commit 0653c6ed8a80d31f3ded6d4bc5dd8267279f6281
Author: Mark Spieth <mspieth@digivation.com.au>
Date: Tue Apr 27 07:51:51 2010 +1000
smoother vsync with predictive frame skipping
diff --git a/mythtv/libs/libmyth/audiooutput.h b/mythtv/libs/libmyth/audiooutput.h
index f3ec2b0..e522304 100644
a
|
b
|
|
3 | 3 | |
4 | 4 | #include <QString> |
5 | 5 | |
| 6 | #include "compat.h" |
6 | 7 | #include "audiosettings.h" |
7 | 8 | #include "mythcorecontext.h" |
8 | 9 | #include "volumebase.h" |
… |
… |
class MPUBLIC AudioOutput : public VolumeBase, public OutputListeners |
42 | 43 | |
43 | 44 | virtual void Reset(void) = 0; |
44 | 45 | |
45 | | virtual bool AddSamples(void *buffer, int samples, long long timecode) = 0; |
| 46 | virtual bool AddSamples(void *buffer, int samples, int64_t timecode) = 0; |
46 | 47 | |
47 | | virtual void SetTimecode(long long timecode) = 0; |
| 48 | virtual void SetTimecode(int64_t timecode) = 0; |
48 | 49 | virtual bool IsPaused(void) const = 0; |
49 | 50 | virtual void Pause(bool paused) = 0; |
50 | 51 | virtual void PauseUntilBuffered(void) = 0; |
… |
… |
class MPUBLIC AudioOutput : public VolumeBase, public OutputListeners |
52 | 53 | // Wait for all data to finish playing |
53 | 54 | virtual void Drain(void) = 0; |
54 | 55 | |
55 | | virtual int GetAudiotime(void) = 0; |
| 56 | virtual int64_t GetAudiotime(void) = 0; |
56 | 57 | |
57 | 58 | /// report amount of audio buffered in milliseconds. |
58 | | virtual int GetAudioBufferedTime(void) { return 0; } |
| 59 | virtual int64_t GetAudioBufferedTime(void) { return 0; } |
59 | 60 | |
60 | 61 | virtual void SetSourceBitrate(int ) { } |
61 | 62 | |
diff --git a/mythtv/libs/libmyth/audiooutputbase.cpp b/mythtv/libs/libmyth/audiooutputbase.cpp
index 9213adf..1cae7bc 100644
a
|
b
|
AudioOutputBase::AudioOutputBase(const AudioSettings &settings) : |
56 | 56 | passthru(false), enc(false), |
57 | 57 | reenc(false), |
58 | 58 | stretchfactor(1.0f), |
| 59 | eff_stretchfactor(100000), |
59 | 60 | |
60 | 61 | source(settings.source), killaudio(false), |
61 | 62 | |
… |
… |
void AudioOutputBase::SetStretchFactorLocked(float lstretchfactor) |
179 | 180 | return; |
180 | 181 | |
181 | 182 | stretchfactor = lstretchfactor; |
| 183 | eff_stretchfactor = (int)(100000.0f * lstretchfactor + 0.5); |
182 | 184 | if (pSoundStretch) |
183 | 185 | { |
184 | 186 | VBGENERAL(QString("Changing time stretch to %1").arg(stretchfactor)); |
… |
… |
void AudioOutputBase::Reset() |
596 | 598 | * Used by mythmusic for seeking since it doesn't provide timecodes to |
597 | 599 | * AddSamples() |
598 | 600 | */ |
599 | | void AudioOutputBase::SetTimecode(long long timecode) |
| 601 | void AudioOutputBase::SetTimecode(int64_t timecode) |
600 | 602 | { |
601 | 603 | audbuf_timecode = audiotime = timecode; |
602 | | frames_buffered = (long long)((timecode * source_samplerate) / 1000); |
| 604 | frames_buffered = (int64_t)((timecode * source_samplerate) / 1000); |
603 | 605 | } |
604 | 606 | |
605 | 607 | /** |
… |
… |
int AudioOutputBase::audioready() |
654 | 656 | /** |
655 | 657 | * Calculate the timecode of the samples that are about to become audible |
656 | 658 | */ |
657 | | int AudioOutputBase::GetAudiotime(void) |
| 659 | int64_t AudioOutputBase::GetAudiotime(void) |
658 | 660 | { |
659 | 661 | if (audbuf_timecode == 0) |
660 | 662 | return 0; |
661 | 663 | |
662 | | int soundcard_buffer = 0; |
663 | 664 | int obpf = output_bytes_per_frame; |
664 | | int totalbuffer; |
665 | | long long oldaudiotime; |
| 665 | int64_t oldaudiotime; |
666 | 666 | |
667 | 667 | /* We want to calculate 'audiotime', which is the timestamp of the audio |
668 | | which is leaving the sound card at this instant. |
| 668 | Which is leaving the sound card at this instant. |
669 | 669 | |
670 | 670 | We use these variables: |
671 | 671 | |
… |
… |
int AudioOutputBase::GetAudiotime(void) |
677 | 677 | 'totalbuffer' is the total # of bytes in our audio buffer, and the |
678 | 678 | sound card's buffer. */ |
679 | 679 | |
680 | | soundcard_buffer = GetBufferedOnSoundcard(); // bytes |
681 | 680 | |
682 | 681 | QMutexLocker lockav(&avsync_lock); |
683 | 682 | |
| 683 | int64_t soundcard_buffer = GetBufferedOnSoundcard(); // bytes |
| 684 | int64_t main_buffer = audioready(); |
| 685 | |
684 | 686 | /* audioready tells us how many bytes are in audiobuffer |
685 | 687 | scaled appropriately if output format != internal format */ |
686 | | totalbuffer = audioready() + soundcard_buffer; |
687 | | |
688 | | if (needs_upmix && upmixer) |
689 | | totalbuffer += upmixer->frameLatency() * obpf; |
690 | | |
691 | | if (pSoundStretch) |
692 | | { |
693 | | totalbuffer += pSoundStretch->numUnprocessedSamples() * obpf / |
694 | | stretchfactor; |
695 | | totalbuffer += pSoundStretch->numSamples() * obpf; |
696 | | } |
697 | | |
698 | | if (encoder) |
699 | | totalbuffer += encoder->Buffered(); |
700 | 688 | |
701 | 689 | oldaudiotime = audiotime; |
702 | 690 | |
703 | | audiotime = audbuf_timecode - (long long)(totalbuffer) * 100000 * |
704 | | stretchfactor / (obpf * effdsp); |
| 691 | // timecode is the stretch adjusted version |
| 692 | // of major post-stretched buffer contents |
| 693 | // processing latencies are catered for in AddSamples/SetAudiotime to eliminate |
| 694 | // race |
| 695 | audiotime = audbuf_timecode - (( (main_buffer + soundcard_buffer) * eff_stretchfactor ) / (effdsp * obpf)); |
705 | 696 | |
706 | 697 | /* audiotime should never go backwards, but we might get a negative |
707 | 698 | value if GetBufferedOnSoundcard() isn't updated by the driver very |
… |
… |
int AudioOutputBase::GetAudiotime(void) |
709 | 700 | if (audiotime < oldaudiotime) |
710 | 701 | audiotime = oldaudiotime; |
711 | 702 | |
712 | | VBAUDIOTS(QString("GetAudiotime audt=%3 atc=%4 tb=%5 sb=%6 " |
713 | | "sr=%7 obpf=%8 sf=%9") |
| 703 | VBAUDIOTS(QString("GetAudiotime audt=%1 atc=%2 mb=%3 sb=%4 tb=%5 " |
| 704 | "sr=%6 obpf=%7 bpf=%8 sf=%9 %10 %11") |
714 | 705 | .arg(audiotime).arg(audbuf_timecode) |
715 | | .arg(totalbuffer).arg(soundcard_buffer) |
716 | | .arg(samplerate).arg(obpf).arg(stretchfactor)); |
| 706 | .arg(main_buffer) |
| 707 | .arg(soundcard_buffer) |
| 708 | .arg(main_buffer+soundcard_buffer) |
| 709 | .arg(samplerate).arg(obpf).arg(bytes_per_frame).arg(stretchfactor) |
| 710 | .arg((main_buffer + soundcard_buffer) * eff_stretchfactor) |
| 711 | .arg(( (main_buffer + soundcard_buffer) * eff_stretchfactor ) / (effdsp * obpf)) |
| 712 | ); |
| 713 | |
| 714 | return audiotime; |
| 715 | } |
| 716 | |
| 717 | /** |
| 718 | * Set the timecode of the top of the ringbuffer |
| 719 | * Exclude all other processing elements as they dont vary |
| 720 | * between AddSamples calls |
| 721 | */ |
| 722 | void AudioOutputBase::SetAudiotime(int frames, int64_t timecode) |
| 723 | { |
| 724 | int64_t processframes_stretched = 0; |
| 725 | int64_t processframes_unstretched = 0; |
| 726 | |
| 727 | if (needs_upmix && upmixer) |
| 728 | processframes_unstretched -= upmixer->frameLatency(); |
| 729 | |
| 730 | if (pSoundStretch) |
| 731 | { |
| 732 | processframes_unstretched -= pSoundStretch->numUnprocessedSamples(); |
| 733 | processframes_stretched -= pSoundStretch->numSamples(); |
| 734 | } |
717 | 735 | |
718 | | return (int)audiotime; |
| 736 | if (encoder) |
| 737 | // the input buffered data is still in audio_bytes_per_sample format |
| 738 | processframes_stretched -= encoder->Buffered() / output_bytes_per_frame; |
| 739 | |
| 740 | audbuf_timecode = timecode + |
| 741 | (((frames + processframes_unstretched) * 100000) + |
| 742 | (processframes_stretched * eff_stretchfactor )) / effdsp; |
| 743 | |
| 744 | VBAUDIOTS(QString("SetAudiotime atc=%1 tc=%2 f=%3 pfu=%4 pfs=%5") |
| 745 | .arg(audbuf_timecode) |
| 746 | .arg(timecode) |
| 747 | .arg(frames) |
| 748 | .arg(processframes_unstretched) |
| 749 | .arg(processframes_stretched)); |
| 750 | #ifdef AUDIOTSTESTING |
| 751 | GetAudiotime(); |
| 752 | #endif |
719 | 753 | } |
720 | 754 | |
721 | 755 | /** |
… |
… |
int AudioOutputBase::GetAudiotime(void) |
723 | 757 | * audible and the samples most recently added to the audiobuffer, i.e. the |
724 | 758 | * time in ms representing the sum total of buffered samples |
725 | 759 | */ |
726 | | int AudioOutputBase::GetAudioBufferedTime(void) |
| 760 | int64_t AudioOutputBase::GetAudioBufferedTime(void) |
727 | 761 | { |
728 | 762 | int ret = audbuf_timecode - GetAudiotime(); |
729 | 763 | // Pulse can give us values that make this -ve |
… |
… |
int AudioOutputBase::CopyWithUpmix(char *buffer, int frames, int &org_waud) |
869 | 903 | * |
870 | 904 | * Returns false if there's not enough space right now |
871 | 905 | */ |
872 | | bool AudioOutputBase::AddSamples(void *buffer, int frames, long long timecode) |
| 906 | bool AudioOutputBase::AddSamples(void *buffer, int in_frames, int64_t timecode) |
873 | 907 | { |
874 | 908 | int org_waud = waud, afree = audiofree(); |
875 | | int bpf = bytes_per_frame, len = frames * source_bytes_per_frame; |
| 909 | int frames = in_frames; |
| 910 | int bpf = bytes_per_frame, len = in_frames * source_bytes_per_frame; |
876 | 911 | int used = kAudioRingBufferSize - afree; |
877 | 912 | bool music = false; |
| 913 | int bdiff; |
878 | 914 | |
879 | 915 | VBAUDIOTS(QString("AddSamples frames=%1, bytes=%2, used=%3, free=%4, " |
880 | 916 | "timecode=%5 needsupmix=%6") |
… |
… |
bool AudioOutputBase::AddSamples(void *buffer, int frames, long long timecode) |
896 | 932 | if (timecode < 0) |
897 | 933 | { |
898 | 934 | // Send original samples to mythmusic visualisation |
899 | | timecode = (long long)(frames_buffered) * 1000 / source_samplerate; |
| 935 | timecode = (int64_t)(frames_buffered) * 1000 / source_samplerate; |
900 | 936 | frames_buffered += frames; |
901 | 937 | dispatchVisual((uchar *)buffer, len, timecode, source_channels, |
902 | 938 | output_settings->FormatToBits(format)); |
… |
… |
bool AudioOutputBase::AddSamples(void *buffer, int frames, long long timecode) |
949 | 985 | .arg(src_strerror(error))); |
950 | 986 | |
951 | 987 | buffer = src_out; |
952 | | frames = src_data.output_frames_gen; |
| 988 | in_frames = frames = src_data.output_frames_gen; |
953 | 989 | } |
954 | 990 | else if (processing) |
955 | 991 | buffer = src_in; |
… |
… |
bool AudioOutputBase::AddSamples(void *buffer, int frames, long long timecode) |
957 | 993 | /* we want the timecode of the last sample added but we are given the |
958 | 994 | timecode of the first - add the time in ms that the frames added |
959 | 995 | represent */ |
960 | | audbuf_timecode = timecode + ((long long)(frames) * 100000 / effdsp); |
| 996 | //audbuf_timecode = timecode + ((int64_t)((frames) * 100000) / effdsp); |
961 | 997 | |
962 | 998 | // Copy samples into audiobuffer, with upmix if necessary |
963 | 999 | if ((len = CopyWithUpmix((char *)buffer, frames, org_waud)) <= 0) |
964 | | return true; |
| 1000 | { |
| 1001 | //return true; |
| 1002 | goto done; |
| 1003 | } |
965 | 1004 | |
966 | 1005 | frames = len / bpf; |
967 | 1006 | |
968 | | int bdiff = kAudioRingBufferSize - waud; |
| 1007 | bdiff = kAudioRingBufferSize - waud; |
969 | 1008 | |
970 | 1009 | if (pSoundStretch) |
971 | 1010 | { |
… |
… |
bool AudioOutputBase::AddSamples(void *buffer, int frames, long long timecode) |
1043 | 1082 | |
1044 | 1083 | waud = org_waud; |
1045 | 1084 | |
| 1085 | done: |
| 1086 | SetAudiotime(in_frames, timecode); |
| 1087 | |
1046 | 1088 | return true; |
1047 | 1089 | } |
1048 | 1090 | |
… |
… |
void AudioOutputBase::OutputAudioLoop(void) |
1090 | 1132 | uchar *fragment_buf = new uchar[fragment_size + 16]; |
1091 | 1133 | uchar *fragment = (uchar *)AOALIGN(fragment_buf[0]); |
1092 | 1134 | |
| 1135 | // to reduce startup latency, write silence in 8ms chunks |
| 1136 | int zero_fragment_size = (int)(0.008*samplerate/channels); |
| 1137 | zero_fragment_size *= bytes_per_frame; // make sure its a multiple of bytes_per_frame |
| 1138 | if (zero_fragment_size > fragment_size) |
| 1139 | zero_fragment_size = fragment_size; |
| 1140 | |
1093 | 1141 | bzero(zeros, fragment_size); |
1094 | 1142 | |
1095 | 1143 | while (!killaudio) |
… |
… |
void AudioOutputBase::OutputAudioLoop(void) |
1138 | 1186 | continue; |
1139 | 1187 | } |
1140 | 1188 | |
| 1189 | #ifdef AUDIOTSTESTING |
| 1190 | VBAUDIOTS("WriteAudio Start"); |
| 1191 | #endif |
1141 | 1192 | Status(); |
1142 | 1193 | |
1143 | | if (GetAudioData(fragment, fragment_size, true)) |
| 1194 | // delay setting raud until after phys buffer is filled |
| 1195 | // so GetAudiotime will be accurate without locking |
| 1196 | int next_raud = raud; |
| 1197 | if (GetAudioData(fragment, fragment_size, true, &next_raud)) |
| 1198 | { |
1144 | 1199 | WriteAudio(fragment, fragment_size); |
| 1200 | raud = next_raud; |
| 1201 | } |
| 1202 | #ifdef AUDIOTSTESTING |
| 1203 | GetAudiotime(); |
| 1204 | VBAUDIOTS("WriteAudio Done"); |
| 1205 | #endif |
| 1206 | |
1145 | 1207 | } |
1146 | 1208 | |
1147 | 1209 | delete[] zeros; |
… |
… |
void AudioOutputBase::OutputAudioLoop(void) |
1158 | 1220 | * nothing. Otherwise, we'll copy less than 'size' bytes if that's all that's |
1159 | 1221 | * available. Returns the number of bytes copied. |
1160 | 1222 | */ |
1161 | | int AudioOutputBase::GetAudioData(uchar *buffer, int size, bool full_buffer) |
| 1223 | int AudioOutputBase::GetAudioData(uchar *buffer, int size, bool full_buffer, int *local_raud) |
1162 | 1224 | { |
1163 | 1225 | |
| 1226 | #define LRPOS audiobuffer + *local_raud |
1164 | 1227 | // re-check audioready() in case things changed. |
1165 | 1228 | // for example, ClearAfterSeek() might have run |
1166 | 1229 | int avail_size = audioready(); |
1167 | 1230 | int frag_size = size; |
1168 | 1231 | int written_size = size; |
1169 | 1232 | |
| 1233 | if (local_raud == NULL) |
| 1234 | local_raud = &raud; |
| 1235 | |
1170 | 1236 | if (!full_buffer && (size > avail_size)) |
1171 | 1237 | { |
1172 | 1238 | // when full_buffer is false, return any available data |
… |
… |
int AudioOutputBase::GetAudioData(uchar *buffer, int size, bool full_buffer) |
1192 | 1258 | { |
1193 | 1259 | if (fromFloats) |
1194 | 1260 | off = AudioOutputUtil::fromFloat(output_format, buffer, |
1195 | | RPOS, bdiff); |
| 1261 | LRPOS, bdiff); |
1196 | 1262 | else |
1197 | 1263 | { |
1198 | | memcpy(buffer, RPOS, bdiff); |
| 1264 | memcpy(buffer, LRPOS, bdiff); |
1199 | 1265 | off = bdiff; |
1200 | 1266 | } |
1201 | 1267 | |
1202 | 1268 | frag_size -= bdiff; |
1203 | | raud = 0; |
| 1269 | *local_raud = 0; |
1204 | 1270 | } |
1205 | 1271 | if (frag_size > 0) |
1206 | 1272 | { |
1207 | 1273 | if (fromFloats) |
1208 | 1274 | AudioOutputUtil::fromFloat(output_format, buffer + off, |
1209 | | RPOS, frag_size); |
| 1275 | LRPOS, frag_size); |
1210 | 1276 | else |
1211 | | memcpy(buffer + off, RPOS, frag_size); |
| 1277 | memcpy(buffer + off, LRPOS, frag_size); |
1212 | 1278 | } |
1213 | 1279 | |
1214 | | raud += frag_size; |
| 1280 | *local_raud += frag_size; |
1215 | 1281 | |
1216 | 1282 | // Mute individual channels through mono->stereo duplication |
1217 | 1283 | MuteState mute_state = GetMuteState(); |
diff --git a/mythtv/libs/libmyth/audiooutputbase.h b/mythtv/libs/libmyth/audiooutputbase.h
index 61ec79d..11a19ba 100644
a
|
b
|
class AudioOutputBase : public AudioOutput, public QThread |
57 | 57 | int GetSWVolume(void); |
58 | 58 | |
59 | 59 | // timecode is in milliseconds. |
60 | | virtual bool AddSamples(void *buffer, int frames, long long timecode); |
| 60 | virtual bool AddSamples(void *buffer, int frames, int64_t timecode); |
61 | 61 | |
62 | | virtual void SetTimecode(long long timecode); |
| 62 | virtual void SetTimecode(int64_t timecode); |
63 | 63 | virtual bool IsPaused(void) const { return actually_paused; } |
64 | 64 | virtual void Pause(bool paused); |
65 | 65 | void PauseUntilBuffered(void); |
… |
… |
class AudioOutputBase : public AudioOutput, public QThread |
67 | 67 | // Wait for all data to finish playing |
68 | 68 | virtual void Drain(void); |
69 | 69 | |
70 | | virtual int GetAudiotime(void); |
71 | | virtual int GetAudioBufferedTime(void); |
| 70 | virtual int64_t GetAudiotime(void); |
| 71 | virtual int64_t GetAudioBufferedTime(void); |
72 | 72 | |
73 | 73 | // Send output events showing current progress |
74 | 74 | virtual void Status(void); |
… |
… |
class AudioOutputBase : public AudioOutput, public QThread |
83 | 83 | |
84 | 84 | static const uint kAudioSRCInputSize = 16384<<1; |
85 | 85 | static const uint kAudioSRCOutputSize = 16384<<3; |
86 | | /// Audio Buffer Size -- should be divisible by 12,10,8,6,4,2.. |
87 | | static const uint kAudioRingBufferSize = 1536000; |
| 86 | /// Audio Buffer Size -- should be divisible by 32,24,16,12,10,8,6,4,2.. |
| 87 | static const uint kAudioRingBufferSize = 3072000; |
88 | 88 | |
89 | 89 | protected: |
90 | 90 | // You need to implement the following functions |
… |
… |
class AudioOutputBase : public AudioOutput, public QThread |
102 | 102 | virtual bool StartOutputThread(void); |
103 | 103 | virtual void StopOutputThread(void); |
104 | 104 | |
105 | | int GetAudioData(uchar *buffer, int buf_size, bool fill_buffer); |
| 105 | int GetAudioData(uchar *buffer, int buf_size, bool fill_buffer, int *local_raud = NULL); |
106 | 106 | |
107 | 107 | void OutputAudioLoop(void); |
108 | 108 | |
… |
… |
class AudioOutputBase : public AudioOutput, public QThread |
138 | 138 | bool passthru, enc, reenc; |
139 | 139 | |
140 | 140 | float stretchfactor; |
| 141 | int eff_stretchfactor; // scaled to 100000 as effdsp is |
141 | 142 | AudioOutputSource source; |
142 | 143 | |
143 | 144 | bool killaudio; |
… |
… |
class AudioOutputBase : public AudioOutput, public QThread |
153 | 154 | |
154 | 155 | private: |
155 | 156 | int CopyWithUpmix(char *buffer, int frames, int &org_waud); |
| 157 | void SetAudiotime(int frames, int64_t timecode); |
156 | 158 | AudioOutputSettings *output_settings; |
157 | 159 | bool need_resampler; |
158 | 160 | SRC_STATE *src_ctx; |
… |
… |
class AudioOutputBase : public AudioOutput, public QThread |
175 | 177 | |
176 | 178 | bool processing; |
177 | 179 | |
178 | | long long frames_buffered; |
| 180 | int64_t frames_buffered; |
179 | 181 | |
180 | 182 | bool audio_thread_exists; |
181 | 183 | |
… |
… |
class AudioOutputBase : public AudioOutput, public QThread |
188 | 190 | QMutex avsync_lock; |
189 | 191 | |
190 | 192 | // timecode of audio leaving the soundcard (same units as timecodes) |
191 | | long long audiotime; |
| 193 | int64_t audiotime; |
192 | 194 | |
193 | 195 | /* Audio circular buffer */ |
194 | 196 | int raud, waud; /* read and write positions */ |
195 | 197 | // timecode of audio most recently placed into buffer |
196 | | long long audbuf_timecode; |
| 198 | int64_t audbuf_timecode; |
197 | 199 | |
198 | 200 | QMutex killAudioLock; |
199 | 201 | |
diff --git a/mythtv/libs/libmythfreesurround/freesurround.cpp b/mythtv/libs/libmythfreesurround/freesurround.cpp
index 5e8b1f5..aef65a3 100644
a
|
b
|
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
29 | 29 | using namespace std; |
30 | 30 | |
31 | 31 | #include "compat.h" |
| 32 | #include "mythverbose.h" |
32 | 33 | #include "freesurround.h" |
33 | 34 | #include "el_processor.h" |
34 | 35 | |
35 | 36 | #include <QString> |
36 | 37 | #include <QDateTime> |
37 | 38 | |
38 | | #if 0 |
39 | | #define VERBOSE(args...) \ |
40 | | do { \ |
41 | | QDateTime dtmp = QDateTime::currentDateTime(); \ |
42 | | QString dtime = dtmp.toString("yyyy-MM-dd hh:mm:ss.zzz"); \ |
43 | | std::cout << dtime.toLocal8Bit().constData() << " " \ |
44 | | << QString(args).toLocal8Bit().constData() << std::endl; \ |
45 | | } while (0) |
46 | | #else |
47 | | #define VERBOSE(args...) |
48 | | #endif |
49 | | #if 0 |
50 | | #define VERBOSE1(args...) \ |
51 | | do { \ |
52 | | QDateTime dtmp = QDateTime::currentDateTime(); \ |
53 | | QString dtime = dtmp.toString("yyyy-MM-dd hh:mm:ss.zzz"); \ |
54 | | std::cout << dtime.toLocal8Bit().constData() << " " \ |
55 | | << QString(args).toLocal8Bit().constData() << std::endl; \ |
56 | | } while (0) |
57 | | #else |
58 | | #define VERBOSE1(args...) |
59 | | #endif |
60 | | |
61 | 39 | // our default internal block size, in floats |
62 | 40 | static const unsigned default_block_size = 8192; |
63 | 41 | // Gain of center and lfe channels in passive mode (sqrt 0.5) |
… |
… |
FreeSurround::FreeSurround(uint srate, bool moviemode, SurroundMode smode) : |
161 | 139 | processed_size(0), |
162 | 140 | surround_mode(smode) |
163 | 141 | { |
164 | | VERBOSE(QString("FreeSurround::FreeSurround rate %1 moviemode %2") |
| 142 | VERBOSE(VB_AUDIO+VB_EXTRA, QString("FreeSurround::FreeSurround rate %1 moviemode %2") |
165 | 143 | .arg(srate).arg(moviemode)); |
166 | 144 | |
167 | 145 | if (moviemode) |
… |
… |
FreeSurround::FreeSurround(uint srate, bool moviemode, SurroundMode smode) : |
193 | 171 | channel_select++; |
194 | 172 | if (channel_select>=6) |
195 | 173 | channel_select = 0; |
196 | | VERBOSE(QString("FreeSurround::FreeSurround channel_select %1") |
| 174 | VERBOSE(VB_AUDIO+VB_EXTRA, QString("FreeSurround::FreeSurround channel_select %1") |
197 | 175 | .arg(channel_select)); |
198 | 176 | #endif |
199 | | VERBOSE(QString("FreeSurround::FreeSurround done")); |
| 177 | VERBOSE(VB_AUDIO+VB_EXTRA, QString("FreeSurround::FreeSurround done")); |
200 | 178 | } |
201 | 179 | |
202 | 180 | void FreeSurround::SetParams() |
… |
… |
FreeSurround::fsurround_params::fsurround_params(int32_t center_width, |
224 | 202 | |
225 | 203 | FreeSurround::~FreeSurround() |
226 | 204 | { |
227 | | VERBOSE(QString("FreeSurround::~FreeSurround")); |
| 205 | VERBOSE(VB_AUDIO+VB_EXTRA, QString("FreeSurround::~FreeSurround")); |
228 | 206 | close(); |
229 | 207 | if (bufs) |
230 | 208 | { |
231 | 209 | bp.release((void*)1); |
232 | 210 | bufs = NULL; |
233 | 211 | } |
234 | | VERBOSE(QString("FreeSurround::~FreeSurround done")); |
| 212 | VERBOSE(VB_AUDIO+VB_EXTRA, QString("FreeSurround::~FreeSurround done")); |
235 | 213 | } |
236 | 214 | |
237 | 215 | uint FreeSurround::putFrames(void* buffer, uint numFrames, uint numChannels) |
… |
… |
uint FreeSurround::putFrames(void* buffer, uint numFrames, uint numChannels) |
289 | 267 | break; |
290 | 268 | } |
291 | 269 | ic += numFrames; |
292 | | in_count = ic; |
293 | 270 | processed = process; |
294 | 271 | if (ic != bs) |
| 272 | { |
| 273 | // dont modify unless no processing is to be done |
| 274 | // for audiotime consistency |
| 275 | in_count = ic; |
295 | 276 | break; |
296 | | in_count = 0; |
| 277 | } |
| 278 | // process_block takes some time so dont update in and out count |
| 279 | // before its finished so that Audiotime is correctly calculated |
297 | 280 | if (process) |
298 | 281 | process_block(); |
| 282 | in_count = 0; |
299 | 283 | out_count = bs; |
300 | 284 | processed_size = bs; |
301 | 285 | break; |
302 | 286 | } |
303 | 287 | |
304 | | VERBOSE1(QString("FreeSurround::putFrames %1 %2 used %4 generated %5") |
| 288 | VERBOSE(VB_AUDIO+VB_TIMESTAMP+VB_EXTRA, QString("FreeSurround::putFrames %1 #ch %2 used %4 generated %5") |
305 | 289 | .arg(numFrames).arg(numChannels).arg(i).arg(out_count)); |
306 | 290 | |
307 | 291 | return i; |
… |
… |
uint FreeSurround::receiveFrames(void *buffer, uint maxFrames) |
318 | 302 | switch (surround_mode) |
319 | 303 | { |
320 | 304 | case SurroundModePassive: |
321 | | for (uint i = 0; i < maxFrames; i++) |
| 305 | for (i = 0; i < maxFrames; i++) |
322 | 306 | { |
323 | 307 | *output++ = bufs->l[outindex]; |
324 | 308 | *output++ = bufs->r[outindex]; |
… |
… |
uint FreeSurround::receiveFrames(void *buffer, uint maxFrames) |
341 | 325 | float *ls = &outputs[3][outindex]; |
342 | 326 | float *rs = &outputs[4][outindex]; |
343 | 327 | float *lfe = &outputs[5][outindex]; |
344 | | for (uint i = 0; i < maxFrames; i++) |
| 328 | for (i = 0; i < maxFrames; i++) |
345 | 329 | { |
346 | 330 | *output++ = *l++; |
347 | 331 | *output++ = *r++; |
… |
… |
uint FreeSurround::receiveFrames(void *buffer, uint maxFrames) |
361 | 345 | float *ls = &bufs->ls[outindex]; |
362 | 346 | float *rs = &bufs->rs[outindex]; |
363 | 347 | float *lfe = &bufs->lfe[outindex]; |
364 | | for (uint i = 0; i < maxFrames; i++) |
| 348 | for (i = 0; i < maxFrames; i++) |
365 | 349 | { |
366 | 350 | *output++ = *l++; |
367 | 351 | *output++ = *r++; |
… |
… |
uint FreeSurround::receiveFrames(void *buffer, uint maxFrames) |
376 | 360 | break; |
377 | 361 | } |
378 | 362 | out_count = oc; |
379 | | VERBOSE1(QString("FreeSurround::receiveFrames %1").arg(maxFrames)); |
| 363 | VERBOSE(VB_AUDIO+VB_TIMESTAMP+VB_EXTRA, QString("FreeSurround::receiveFrames %1").arg(maxFrames)); |
380 | 364 | return maxFrames; |
381 | 365 | } |
382 | 366 | |