Bring back support of FFmpeg >= 0.7
After planar codecs support minimal FFmpeg was bumped to 0.10 which was not so much nice because it was only released only later last year. Didn't find a way to make compatibility code local in ffmpeg_compat, so there're some ifdefs in audaspace and writeffmpeg. Not entirely happy, but having a bit of ifdefs in code better than lots of real PITA for platform maintainers.
This commit is contained in:
@@ -43,6 +43,7 @@ extern "C" {
|
||||
|
||||
int AUD_FFMPEGReader::decode(AVPacket& packet, AUD_Buffer& buffer)
|
||||
{
|
||||
#ifdef FFMPEG_HAVE_DECODE_AUDIO4
|
||||
AVFrame* frame = NULL;
|
||||
int got_frame;
|
||||
int read_length;
|
||||
@@ -101,6 +102,54 @@ int AUD_FFMPEGReader::decode(AVPacket& packet, AUD_Buffer& buffer)
|
||||
av_free(frame);
|
||||
|
||||
return buf_pos;
|
||||
#else
|
||||
// save packet parameters
|
||||
uint8_t *audio_pkg_data = packet.data;
|
||||
int audio_pkg_size = packet.size;
|
||||
|
||||
int buf_size = buffer.getSize();
|
||||
int buf_pos = 0;
|
||||
|
||||
int read_length, data_size;
|
||||
|
||||
AVPacket tmp_pkt;
|
||||
|
||||
av_init_packet(&tmp_pkt);
|
||||
|
||||
// as long as there is still data in the package
|
||||
while(audio_pkg_size > 0)
|
||||
{
|
||||
// resize buffer if needed
|
||||
if(buf_size - buf_pos < AVCODEC_MAX_AUDIO_FRAME_SIZE)
|
||||
{
|
||||
buffer.resize(buf_size + AVCODEC_MAX_AUDIO_FRAME_SIZE, true);
|
||||
buf_size += AVCODEC_MAX_AUDIO_FRAME_SIZE;
|
||||
}
|
||||
|
||||
// read samples from the packet
|
||||
data_size = buf_size - buf_pos;
|
||||
|
||||
tmp_pkt.data = audio_pkg_data;
|
||||
tmp_pkt.size = audio_pkg_size;
|
||||
|
||||
read_length = avcodec_decode_audio3(
|
||||
m_codecCtx,
|
||||
(int16_t*)(((data_t*)buffer.getBuffer()) + buf_pos),
|
||||
&data_size, &tmp_pkt);
|
||||
|
||||
// read error, next packet!
|
||||
if(read_length < 0)
|
||||
break;
|
||||
|
||||
buf_pos += data_size;
|
||||
|
||||
// move packet parameters
|
||||
audio_pkg_data += read_length;
|
||||
audio_pkg_size -= read_length;
|
||||
}
|
||||
|
||||
return buf_pos;
|
||||
#endif
|
||||
}
|
||||
|
||||
static const char* streaminfo_error = "AUD_FFMPEGReader: Stream info couldn't "
|
||||
|
@@ -76,9 +76,26 @@
|
||||
#define FFMPEG_HAVE_CANON_H264_RESOLUTION_FIX
|
||||
#endif
|
||||
|
||||
#if ((LIBAVCODEC_VERSION_MAJOR > 53) || (LIBAVCODEC_VERSION_MAJOR >= 53) && (LIBAVCODEC_VERSION_MINOR >= 60))
|
||||
#define FFMPEG_HAVE_ENCODE_AUDIO2
|
||||
#endif
|
||||
|
||||
#if ((LIBAVCODEC_VERSION_MAJOR > 53) || (LIBAVCODEC_VERSION_MAJOR >= 53) && (LIBAVCODEC_VERSION_MINOR >= 42))
|
||||
#define FFMPEG_HAVE_DECODE_AUDIO4
|
||||
#endif
|
||||
|
||||
#if ((LIBAVUTIL_VERSION_MAJOR > 51) || (LIBAVUTIL_VERSION_MAJOR == 51) && (LIBAVUTIL_VERSION_MINOR >= 32))
|
||||
#define FFMPEG_FFV1_ALPHA_SUPPORTED
|
||||
#define FFMPEG_SAMPLE_FMT_S16P_SUPPORTED
|
||||
#else
|
||||
|
||||
static inline
|
||||
int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt)
|
||||
{
|
||||
/* no planar formats in FFmpeg < 0.9 */
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static inline
|
||||
|
@@ -90,6 +90,10 @@ static int video_buffersize = 0;
|
||||
static uint8_t *audio_input_buffer = 0;
|
||||
static uint8_t *audio_deinterleave_buffer = 0;
|
||||
static int audio_input_samples = 0;
|
||||
#ifndef FFMPEG_HAVE_ENCODE_AUDIO2
|
||||
static uint8_t *audio_output_buffer = 0;
|
||||
static int audio_outbuf_size = 0;
|
||||
#endif
|
||||
static double audio_time = 0.0f;
|
||||
static bool audio_deinterleave = false;
|
||||
static int audio_sample_size = 0;
|
||||
@@ -123,7 +127,7 @@ static int write_audio_frame(void)
|
||||
{
|
||||
AVCodecContext *c = NULL;
|
||||
AVPacket pkt;
|
||||
AVFrame *frame;
|
||||
AVFrame *frame = NULL;
|
||||
int got_output = 0;
|
||||
|
||||
c = audio_stream->codec;
|
||||
@@ -132,6 +136,10 @@ static int write_audio_frame(void)
|
||||
pkt.size = 0;
|
||||
pkt.data = NULL;
|
||||
|
||||
AUD_readDevice(audio_mixdown_device, audio_input_buffer, audio_input_samples);
|
||||
audio_time += (double) audio_input_samples / (double) c->sample_rate;
|
||||
|
||||
#ifdef FFMPEG_HAVE_ENCODE_AUDIO2
|
||||
frame = avcodec_alloc_frame();
|
||||
frame->nb_samples = audio_input_samples;
|
||||
frame->format = c->sample_fmt;
|
||||
@@ -139,9 +147,6 @@ static int write_audio_frame(void)
|
||||
frame->channel_layout = c->channel_layout;
|
||||
#endif
|
||||
|
||||
AUD_readDevice(audio_mixdown_device, audio_input_buffer, audio_input_samples);
|
||||
audio_time += (double) audio_input_samples / (double) c->sample_rate;
|
||||
|
||||
if (audio_deinterleave) {
|
||||
int channel, i;
|
||||
uint8_t *temp;
|
||||
@@ -166,6 +171,22 @@ static int write_audio_frame(void)
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!got_output) {
|
||||
avcodec_free_frame(&frame);
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
pkt.size = avcodec_encode_audio(c, audio_output_buffer, audio_outbuf_size, (short *) audio_input_buffer);
|
||||
|
||||
if (pkt.size < 0) {
|
||||
// XXX error("Error writing audio packet");
|
||||
return -1;
|
||||
}
|
||||
|
||||
pkt.data = audio_output_buffer;
|
||||
got_output = 1;
|
||||
#endif
|
||||
|
||||
if (got_output) {
|
||||
if (c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE) {
|
||||
pkt.pts = av_rescale_q(c->coded_frame->pts, c->time_base, audio_stream->time_base);
|
||||
@@ -178,13 +199,16 @@ static int write_audio_frame(void)
|
||||
|
||||
if (av_interleaved_write_frame(outfile, &pkt) != 0) {
|
||||
fprintf(stderr, "Error writing audio packet!\n");
|
||||
if (frame)
|
||||
avcodec_free_frame(&frame);
|
||||
return -1;
|
||||
}
|
||||
|
||||
av_free_packet(&pkt);
|
||||
}
|
||||
|
||||
avcodec_free_frame(&frame);
|
||||
if (frame)
|
||||
avcodec_free_frame(&frame);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -718,6 +742,10 @@ static AVStream *alloc_audio_stream(RenderData *rd, int codec_id, AVFormatContex
|
||||
st->codec->time_base.num = 1;
|
||||
st->codec->time_base.den = st->codec->sample_rate;
|
||||
|
||||
#ifndef FFMPEG_HAVE_ENCODE_AUDIO2
|
||||
audio_outbuf_size = FF_MIN_BUFFER_SIZE;
|
||||
#endif
|
||||
|
||||
if (c->frame_size == 0)
|
||||
// used to be if((c->codec_id >= CODEC_ID_PCM_S16LE) && (c->codec_id <= CODEC_ID_PCM_DVD))
|
||||
// not sure if that is needed anymore, so let's try out if there are any
|
||||
@@ -725,6 +753,10 @@ static AVStream *alloc_audio_stream(RenderData *rd, int codec_id, AVFormatContex
|
||||
audio_input_samples = FF_MIN_BUFFER_SIZE * 8 / c->bits_per_coded_sample / c->channels;
|
||||
else {
|
||||
audio_input_samples = c->frame_size;
|
||||
#ifndef FFMPEG_HAVE_ENCODE_AUDIO2
|
||||
if (c->frame_size * c->channels * sizeof(int16_t) * 4 > audio_outbuf_size)
|
||||
audio_outbuf_size = c->frame_size * c->channels * sizeof(int16_t) * 4;
|
||||
#endif
|
||||
}
|
||||
|
||||
audio_deinterleave = av_sample_fmt_is_planar(c->sample_fmt);
|
||||
@@ -732,6 +764,9 @@ static AVStream *alloc_audio_stream(RenderData *rd, int codec_id, AVFormatContex
|
||||
audio_sample_size = av_get_bytes_per_sample(c->sample_fmt);
|
||||
|
||||
audio_input_buffer = (uint8_t *) av_malloc(audio_input_samples * c->channels * audio_sample_size);
|
||||
#ifndef FFMPEG_HAVE_ENCODE_AUDIO2
|
||||
audio_output_buffer = (uint8_t *) av_malloc(audio_outbuf_size);
|
||||
#endif
|
||||
|
||||
if (audio_deinterleave)
|
||||
audio_deinterleave_buffer = (uint8_t *) av_malloc(audio_input_samples * c->channels * audio_sample_size);
|
||||
@@ -1182,6 +1217,12 @@ static void end_ffmpeg_impl(int is_autosplit)
|
||||
av_free(audio_input_buffer);
|
||||
audio_input_buffer = 0;
|
||||
}
|
||||
#ifndef FFMPEG_HAVE_ENCODE_AUDIO2
|
||||
if (audio_output_buffer) {
|
||||
av_free(audio_output_buffer);
|
||||
audio_output_buffer = 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (audio_deinterleave_buffer) {
|
||||
av_free(audio_deinterleave_buffer);
|
||||
|
Reference in New Issue
Block a user