summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJean-Yves Avenard <jyavenard@mythtv.org>2010-11-29 10:19:21 (GMT)
committer Jean-Yves Avenard <jyavenard@mythtv.org>2010-11-29 10:19:21 (GMT)
commit4064cade3e69f15dfa7cb62724b474e7d9aa38fe (patch)
treee1679573ba38eb569d76b03d6ae414bca3a8ce46
parent4f01704163e1336e4f63226c09584ee23cd46b8a (diff)
Fixes #9282. This ticket revealed massive issues with a few of the classes used with the audio framework.
- Resampler used a 32k and 128k statically allocated buffer. If the resampling ratio was greater than 4, memory corruption would occur. The resampler now uses a dynamically allocated memory buffer that is increased when required and freed when the audio class is closed. - Float audio processing, if the samples received were greater than the statically allocated buffer (32k), memory corruption would occur. The audio float processing is now done in 32k blocks at a time that guarantee no memory overflow can occur. - Audio upmixer uses a statically allocated buffer (8192 bytes). If the samples converted were greater than 8k, corruption would occur. The upmixer is now called iteratively 8k at a time. - AC3 encoder uses a staticaly allocted buffer (128k). If the samples converted were greater than 128k, audio data would be loss and audio buffer would be left in an undetermine state. The AC3 encoder is now called in blocks according to how many free space exists in the AC3 encoder buffer. All issues mentioned above are corner cases scenarios and unlikely to be experienced by the user ; typically the size of audio frames received at a given time is typically between 80 and 300 bytes. However, in theory ffmpeg can decode and provide samples of up to 190kB. In the sample provided in #9282 (WM3 audio codec), the audio framework is called with 60-90kB of samples triggering most of the bugs mentioned above. The resampler, upmixer and AC3 encoder has been in use in myth for several years, yet the issue had never been seen up to now. Now that was fun... Spent a whole week-end tracing what was going on. The ideal solution would be to change all the sub-classes so they do not work with static size buffer and be able to work with anything you throw at the, git-svn-id: http://svn.mythtv.org/svn/trunk@27368 7dbf422c-18fa-0310-86e9-fd20926502f2
-rw-r--r--mythtv/libs/libmyth/audiooutputbase.cpp322
-rw-r--r--mythtv/libs/libmyth/audiooutputbase.h7
-rw-r--r--mythtv/libs/libmyth/audiooutpututil.cpp13
-rw-r--r--mythtv/libs/libmythfreesurround/freesurround.cpp2
-rw-r--r--mythtv/libs/libmythfreesurround/freesurround.h2
5 files changed, 208 insertions, 138 deletions
diff --git a/mythtv/libs/libmyth/audiooutputbase.cpp b/mythtv/libs/libmyth/audiooutputbase.cpp
index aa2ae32..dda0474 100644
--- a/mythtv/libs/libmyth/audiooutputbase.cpp
+++ b/mythtv/libs/libmyth/audiooutputbase.cpp
@@ -134,6 +134,9 @@ AudioOutputBase::~AudioOutputBase()
delete output_settings;
delete output_settingsraw;
+ if (kAudioSRCOutputSize > 0)
+ delete[] src_out;
+
assert(memory_corruption_test0 == 0xdeadbeef);
assert(memory_corruption_test1 == 0xdeadbeef);
assert(memory_corruption_test2 == 0xdeadbeef);
@@ -474,12 +477,13 @@ void AudioOutputBase::Reconfigure(const AudioSettings &orig_settings)
src_data.src_ratio = (double)samplerate / settings.samplerate;
src_data.data_in = src_in;
- if (kAudioSRCOutputSize < (int)
- ((float)kAudioSRCInputSize * samplerate / settings.samplerate) + 1)
+ int newsize = ((long)((float)kAudioSRCInputSize *
+ samplerate / settings.samplerate) +
+ 15) & ~0xf;
+ if (kAudioSRCOutputSize < newsize)
{
- kAudioSRCOutputSize = (int)
- ((float) kAudioSRCInputSize * samplerate /
- settings.samplerate) + 1;
+ kAudioSRCOutputSize = newsize;
+ VBAUDIO(QString("Resampler allocating %1").arg(newsize));
if (src_out)
delete[] src_out;
src_out = new float[kAudioSRCOutputSize];
@@ -655,12 +659,6 @@ void AudioOutputBase::KillAudio()
if (src_ctx)
{
src_delete(src_ctx);
- if (kAudioSRCOutputSize > 0)
- {
- kAudioSRCOutputSize = 0;
- delete[] src_out;
- src_out = NULL;
- }
src_ctx = NULL;
}
@@ -805,7 +803,7 @@ int64_t AudioOutputBase::GetAudiotime(void)
/* timecode is the stretch adjusted version
of major post-stretched buffer contents
- processing latencies are catered for in AddSamples/SetAudiotime
+ processing latencies are catered for in AddFrames/SetAudiotime
to eliminate race */
audiotime = audbuf_timecode - (
((main_buffer + soundcard_buffer) * eff_stretchfactor ) /
@@ -835,7 +833,7 @@ int64_t AudioOutputBase::GetAudiotime(void)
/**
* Set the timecode of the top of the ringbuffer
* Exclude all other processing elements as they dont vary
- * between AddSamples calls
+ * between AddFrames calls
*/
void AudioOutputBase::SetAudiotime(int frames, int64_t timecode)
{
@@ -857,12 +855,12 @@ void AudioOutputBase::SetAudiotime(int frames, int64_t timecode)
int64_t old_audbuf_timecode = audbuf_timecode;
- audbuf_timecode = timecode +
+ audbuf_timecode = timecode +
(((frames + processframes_unstretched) * 100000) +
(processframes_stretched * eff_stretchfactor )) / effdsp;
// check for timecode wrap and reset audiotime if detected
- // timecode will always be monotonic asc if not seeked and reset
+ // timecode will always be monotonic asc if not seeked and reset
// happens if seek or pause happens
if (audbuf_timecode < old_audbuf_timecode)
audiotime = 0;
@@ -995,32 +993,48 @@ int AudioOutputBase::CopyWithUpmix(char *buffer, int frames, int &org_waud)
}
// Upmix to 6ch via FreeSurround
-
// Calculate frame size of input
off = processing ? 4 : output_settings->SampleSize(format);
off *= source_channels;
- int i = 0;
- while (i < frames)
- i += upmixer->putFrames(buffer + i * off, frames - i, source_channels);
+ int remaining_frames = frames;
+ len = 0;
+ do
+ {
+ int i;
+ frames = remaining_frames;
+ if (frames * source_channels > SURROUND_BUFSIZE)
+ {
+ frames = SURROUND_BUFSIZE / source_channels;
+ }
- int nFrames = upmixer->numFrames();
- if (!nFrames)
- return 0;
+ i = 0;
+ while (i < frames)
+ i += upmixer->putFrames(buffer + i * off,
+ frames - i, source_channels);
- len = CheckFreeSpace(nFrames);
+ remaining_frames -= i;
+ buffer += i * off;
- int bdFrames = bdiff / bpf;
- if (bdFrames < nFrames)
- {
- upmixer->receiveFrames((float *)(WPOS), bdFrames);
- nFrames -= bdFrames;
- org_waud = 0;
- }
- if (nFrames > 0)
- upmixer->receiveFrames((float *)(WPOS), nFrames);
+ int nFrames = upmixer->numFrames();
+ if (!nFrames)
+ continue;
+
+ len += CheckFreeSpace(nFrames);
- org_waud += nFrames * bpf;
+ int bdFrames = (kAudioRingBufferSize - org_waud) / bpf;
+ if (bdFrames < nFrames)
+ {
+ upmixer->receiveFrames((float *)(WPOS), bdFrames);
+ nFrames -= bdFrames;
+ org_waud = 0;
+ }
+ if (nFrames > 0)
+ upmixer->receiveFrames((float *)(WPOS), nFrames);
+
+ org_waud += nFrames * bpf;
+ }
+ while (remaining_frames > 0);
return len;
}
@@ -1029,11 +1043,13 @@ int AudioOutputBase::CopyWithUpmix(char *buffer, int frames, int &org_waud)
*
* Returns false if there's not enough space right now
*/
-bool AudioOutputBase::AddFrames(void *buffer, int in_frames, int64_t timecode)
+bool AudioOutputBase::AddFrames(void *in_buffer, int in_frames,
+ int64_t timecode)
{
int org_waud = waud, afree = audiofree();
int frames = in_frames;
- int bpf = bytes_per_frame, len = frames * source_bytes_per_frame;
+ void *buffer = in_buffer;
+ int bpf = bytes_per_frame, len = frames * source_bytes_per_frame;
int used = kAudioRingBufferSize - afree;
bool music = false;
int bdiff;
@@ -1060,15 +1076,17 @@ bool AudioOutputBase::AddFrames(void *buffer, int in_frames, int64_t timecode)
// Send original samples to mythmusic visualisation
timecode = (int64_t)(frames_buffered) * 1000 / source_samplerate;
frames_buffered += frames;
- dispatchVisual((uchar *)buffer, len, timecode, source_channels,
+ dispatchVisual((uchar *)in_buffer, len, timecode, source_channels,
output_settings->FormatToBits(format));
music = true;
}
+ // Calculate amount of free space required in ringbuffer
if (processing)
{
- // Convert to floats
- len = AudioOutputUtil::toFloat(format, src_in, buffer, len);
+ // Final float conversion space requirement
+ len = sizeof(*src_in_buf) /
+ AudioOutputSettings::SampleSize(format) * len;
// Account for changes in number of channels
if (needs_upmix || needs_downmix)
@@ -1090,124 +1108,178 @@ bool AudioOutputBase::AddFrames(void *buffer, int in_frames, int64_t timecode)
if (len > afree)
{
- VBAUDIOTS("Buffer is full, AddFrames returning false");
- return false; // would overflow
+ VBAUDIOTS("Buffer is full, AddFrames returning false");
+ return false; // would overflow
}
- // Perform downmix if necessary
- if (needs_downmix)
- if(AudioOutputDownmix::DownmixFrames(source_channels, channels,
- src_in, src_in, frames) < 0)
- VBERROR("Error occurred while downmixing");
+ int frames_remaining = in_frames;
+ int frames_offset = 0;
+ int frames_final = 0;
- // Resample if necessary
- if (need_resampler && src_ctx)
+ while(frames_remaining > 0)
{
- src_data.input_frames = frames;
- int error = src_process(src_ctx, &src_data);
+ buffer = (char *)in_buffer + frames_offset;
+ frames = frames_remaining;
- if (error)
- VBERROR(QString("Error occurred while resampling audio: %1")
- .arg(src_strerror(error)));
+ len = frames * source_bytes_per_frame;
- buffer = src_out;
- in_frames = frames = src_data.output_frames_gen;
- }
- else if (processing)
- buffer = src_in;
+ if (processing)
+ {
+ if (frames * source_channels > (int)kAudioSRCInputSize)
+ {
+ frames = kAudioSRCInputSize / source_channels;
+ len = frames * source_bytes_per_frame;
+ frames_offset += len;
+ }
+ // Convert to floats
+ len = AudioOutputUtil::toFloat(format, src_in, buffer, len);
+ }
+ frames_remaining -= frames;
- /* we want the timecode of the last sample added but we are given the
- timecode of the first - add the time in ms that the frames added
- represent */
+ // Perform downmix if necessary
+ if (needs_downmix)
+ if(AudioOutputDownmix::DownmixFrames(source_channels, channels,
+ src_in, src_in, frames) < 0)
+ VBERROR("Error occurred while downmixing");
- // Copy samples into audiobuffer, with upmix if necessary
- if ((len = CopyWithUpmix((char *)buffer, frames, org_waud)) <= 0)
- {
- SetAudiotime(in_frames, timecode);
- return true;
- }
+ // Resample if necessary
+ if (need_resampler && src_ctx)
+ {
+ src_data.input_frames = frames;
+ int error = src_process(src_ctx, &src_data);
- frames = len / bpf;
+ if (error)
+ VBERROR(QString("Error occurred while resampling audio: %1")
+ .arg(src_strerror(error)));
- bdiff = kAudioRingBufferSize - waud;
+ buffer = src_out;
+ frames = src_data.output_frames_gen;
+ frames_final += frames;
+ }
+ else
+ {
+ frames_final += frames;
+ if (processing)
+ buffer = src_in;
+ }
- if (pSoundStretch)
- {
- // does not change the timecode, only the number of samples
- org_waud = waud;
- int bdFrames = bdiff / bpf;
+ /* we want the timecode of the last sample added but we are given the
+ timecode of the first - add the time in ms that the frames added
+ represent */
- if (bdiff < len)
+ // Copy samples into audiobuffer, with upmix if necessary
+ if ((len = CopyWithUpmix((char *)buffer, frames, org_waud)) <= 0)
{
- pSoundStretch->putSamples((STST *)(WPOS), bdFrames);
- pSoundStretch->putSamples((STST *)ABUF, (len - bdiff) / bpf);
+ continue;
}
- else
- pSoundStretch->putSamples((STST *)(WPOS), frames);
- int nFrames = pSoundStretch->numSamples();
- if (nFrames > frames)
- CheckFreeSpace(nFrames);
+ frames = len / bpf;
- len = nFrames * bpf;
+ bdiff = kAudioRingBufferSize - waud;
- if (nFrames > bdFrames)
+ if (pSoundStretch)
{
- nFrames -= pSoundStretch->receiveSamples((STST *)(WPOS), bdFrames);
- org_waud = 0;
- }
- if (nFrames > 0)
- nFrames = pSoundStretch->receiveSamples((STST *)(WPOS), nFrames);
+ // does not change the timecode, only the number of samples
+ org_waud = waud;
+ int bdFrames = bdiff / bpf;
- org_waud += nFrames * bpf;
- }
+ if (bdiff < len)
+ {
+ pSoundStretch->putSamples((STST *)(WPOS), bdFrames);
+ pSoundStretch->putSamples((STST *)ABUF, (len - bdiff) / bpf);
+ }
+ else
+ pSoundStretch->putSamples((STST *)(WPOS), frames);
- if (internal_vol && SWVolume())
- {
- org_waud = waud;
- int num = len;
+ int nFrames = pSoundStretch->numSamples();
+ if (nFrames > frames)
+ CheckFreeSpace(nFrames);
- if (bdiff <= num)
- {
- AudioOutputUtil::AdjustVolume(WPOS, bdiff, volume,
- music, needs_upmix && upmixer);
- num -= bdiff;
- org_waud = 0;
- }
- if (num > 0)
- AudioOutputUtil::AdjustVolume(WPOS, num, volume,
- music, needs_upmix && upmixer);
- org_waud += num;
- }
+ len = nFrames * bpf;
- if (encoder)
- {
- org_waud = waud;
- int to_get = 0;
+ if (nFrames > bdFrames)
+ {
+ nFrames -= pSoundStretch->receiveSamples((STST *)(WPOS),
+ bdFrames);
+ org_waud = 0;
+ }
+ if (nFrames > 0)
+ nFrames = pSoundStretch->receiveSamples((STST *)(WPOS),
+ nFrames);
+
+ org_waud += nFrames * bpf;
+ }
- if (bdiff < len)
+ if (internal_vol && SWVolume())
{
- encoder->Encode(WPOS, bdiff, processing);
- to_get = encoder->Encode(ABUF, len - bdiff, processing);
+ org_waud = waud;
+ int num = len;
+
+ if (bdiff <= num)
+ {
+ AudioOutputUtil::AdjustVolume(WPOS, bdiff, volume,
+ music, needs_upmix && upmixer);
+ num -= bdiff;
+ org_waud = 0;
+ }
+ if (num > 0)
+ AudioOutputUtil::AdjustVolume(WPOS, num, volume,
+ music, needs_upmix && upmixer);
+ org_waud += num;
}
- else
- to_get = encoder->Encode(WPOS, len, processing);
- if (bdiff <= to_get)
+ if (encoder)
{
- encoder->GetFrames(WPOS, bdiff);
- to_get -= bdiff;
- org_waud = 0;
+ org_waud = waud;
+ int org_waud2 = waud;
+ int remaining = len;
+ int to_get = 0;
+ // The AC3 encoder can only work on 128kB of data at a time
+ int maxframes = (INBUFSIZE / encoder->FrameSize()) *
+ encoder->FrameSize();
+
+ do
+ {
+ len = remaining;
+ if (len > maxframes)
+ {
+ len = maxframes;
+ }
+ remaining -= len;
+
+ bdiff = kAudioRingBufferSize - org_waud;
+ if (bdiff < len)
+ {
+ encoder->Encode(WPOS, bdiff, processing);
+ to_get = encoder->Encode(ABUF, len - bdiff, processing);
+ org_waud = len - bdiff;
+ }
+ else
+ {
+ to_get = encoder->Encode(WPOS, len, processing);
+ org_waud += len;
+ }
+
+ bdiff = kAudioRingBufferSize - org_waud2;
+ if (bdiff <= to_get)
+ {
+ encoder->GetFrames(audiobuffer + org_waud2, bdiff);
+ to_get -= bdiff ;
+ org_waud2 = 0;
+ }
+ if (to_get > 0)
+ encoder->GetFrames(audiobuffer + org_waud2, to_get);
+
+ org_waud2 += to_get;
+ }
+ while (remaining > 0);
+ org_waud = org_waud2;
}
- if (to_get > 0)
- encoder->GetFrames(WPOS, to_get);
- org_waud += to_get;
+ waud = org_waud;
}
- waud = org_waud;
-
- SetAudiotime(in_frames, timecode);
+ SetAudiotime(frames_final, timecode);
return true;
}
diff --git a/mythtv/libs/libmyth/audiooutputbase.h b/mythtv/libs/libmyth/audiooutputbase.h
index 3f82821..3e918da 100644
--- a/mythtv/libs/libmyth/audiooutputbase.h
+++ b/mythtv/libs/libmyth/audiooutputbase.h
@@ -19,10 +19,6 @@ using namespace std;
#include "samplerate.h"
#include "mythverbose.h"
-// make sure AVCODEC_MAX_AUDIO_FRAME_SIZE definition match the one in
-// libavcodec/avcodec.h
-#define AVCODEC_MAX_AUDIO_FRAME_SIZE 192000
-
#define VBAUDIO(str) VERBOSE(VB_AUDIO, LOC + str)
#define VBAUDIOTS(str) VERBOSE(VB_AUDIO+VB_TIMESTAMP, LOC + str)
#define VBGENERAL(str) VERBOSE(VB_GENERAL, LOC + str)
@@ -99,7 +95,8 @@ class AudioOutputBase : public AudioOutput, public QThread
virtual void bufferOutputData(bool y){ buffer_output_data_for_use = y; }
virtual int readOutputData(unsigned char *read_buffer, int max_length);
- static const uint kAudioSRCInputSize = AVCODEC_MAX_AUDIO_FRAME_SIZE;
+ static const uint kAudioSRCInputSize = 4096;
+
/// Audio Buffer Size -- should be divisible by 32,24,16,12,10,8,6,4,2..
static const uint kAudioRingBufferSize = 3072000;
diff --git a/mythtv/libs/libmyth/audiooutpututil.cpp b/mythtv/libs/libmyth/audiooutpututil.cpp
index 014ffb4..891bf9a 100644
--- a/mythtv/libs/libmyth/audiooutpututil.cpp
+++ b/mythtv/libs/libmyth/audiooutpututil.cpp
@@ -120,9 +120,8 @@ static int toFloat8(float *out, uchar *in, int len)
}
/*
- All fromFloat variants require 16 byte aligned output buffers on x86
- The SSE code processes 16 bytes at a time and leaves any remainder for the C
- - there is no remainder in practice */
+ The SSE code processes 16 bytes at a time and leaves any remainder for the C
+ - there is no remainder in practice */
static int fromFloat8(uchar *out, float *in, int len)
{
@@ -130,7 +129,7 @@ static int fromFloat8(uchar *out, float *in, int len)
float f = (1<<7) - 1;
#if ARCH_X86
- if (sse_check() && len >= 16)
+ if (sse_check() && len >= 16 && ((unsigned long)out & 0xf) == 0)
{
int loops = len >> 4;
i = loops << 4;
@@ -235,7 +234,7 @@ static int fromFloat16(short *out, float *in, int len)
float f = (1<<15) - 1;
#if ARCH_X86
- if (sse_check() && len >= 16)
+ if (sse_check() && len >= 16 && ((unsigned long)out & 0xf) == 0)
{
int loops = len >> 4;
i = loops << 4;
@@ -342,7 +341,7 @@ static int fromFloat32(AudioFormat format, int *out, float *in, int len)
shift = 0;
#if ARCH_X86
- if (sse_check() && len >= 16)
+ if (sse_check() && len >= 16 && ((unsigned long)out & 0xf) == 0)
{
float o = 1, mo = -1;
int loops = len >> 4;
@@ -407,7 +406,7 @@ static int fromFloatFLT(float *out, float *in, int len)
int i = 0;
#if ARCH_X86
- if (sse_check() && len >= 16)
+ if (sse_check() && len >= 16 && ((unsigned long)in & 0xf) == 0)
{
int loops = len >> 4;
float o = 1, mo = -1;
diff --git a/mythtv/libs/libmythfreesurround/freesurround.cpp b/mythtv/libs/libmythfreesurround/freesurround.cpp
index 61593f7..bad81d5 100644
--- a/mythtv/libs/libmythfreesurround/freesurround.cpp
+++ b/mythtv/libs/libmythfreesurround/freesurround.cpp
@@ -37,7 +37,7 @@ using namespace std;
#include <QDateTime>
// our default internal block size, in floats
-static const unsigned default_block_size = 8192;
+static const unsigned default_block_size = SURROUND_BUFSIZE;
// Gain of center and lfe channels in passive mode (sqrt 0.5)
static const float center_level = 0.707107;
diff --git a/mythtv/libs/libmythfreesurround/freesurround.h b/mythtv/libs/libmythfreesurround/freesurround.h
index a92afd5..837bc0a 100644
--- a/mythtv/libs/libmythfreesurround/freesurround.h
+++ b/mythtv/libs/libmythfreesurround/freesurround.h
@@ -21,6 +21,8 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#include "compat.h" // instead of sys/types.h, for MinGW compatibility
+#define SURROUND_BUFSIZE 8192
+
class FreeSurround
{
public: