commit 8ef87b6b0534a2656ec4307819e0749f6c49d4e5
Author: Mark Spieth <mspieth@digivation.com.au>
Date: Tue Apr 27 07:51:51 2010 +1000
smoother vsync with predictive frame skipping
diff --git a/mythtv/libs/libmyth/audiooutput.h b/mythtv/libs/libmyth/audiooutput.h
index 8947981..4fe5b8f 100644
a
|
b
|
|
3 | 3 | |
4 | 4 | #include <QString> |
5 | 5 | |
| 6 | #include "compat.h" |
6 | 7 | #include "audiosettings.h" |
7 | 8 | #include "mythcorecontext.h" |
8 | 9 | #include "volumebase.h" |
… |
… |
class MPUBLIC AudioOutput : public VolumeBase, public OutputListeners |
41 | 42 | |
42 | 43 | virtual void Reset(void) = 0; |
43 | 44 | |
44 | | virtual bool AddSamples(void *buffer, int samples, long long timecode) = 0; |
| 45 | virtual bool AddSamples(void *buffer, int samples, int64_t timecode) = 0; |
45 | 46 | |
46 | | virtual void SetTimecode(long long timecode) = 0; |
| 47 | virtual void SetTimecode(int64_t timecode) = 0; |
47 | 48 | virtual bool IsPaused(void) const = 0; |
48 | 49 | virtual void Pause(bool paused) = 0; |
49 | 50 | virtual void PauseUntilBuffered(void) = 0; |
… |
… |
class MPUBLIC AudioOutput : public VolumeBase, public OutputListeners |
51 | 52 | // Wait for all data to finish playing |
52 | 53 | virtual void Drain(void) = 0; |
53 | 54 | |
54 | | virtual int GetAudiotime(void) = 0; |
| 55 | virtual int64_t GetAudiotime(void) = 0; |
55 | 56 | |
56 | 57 | /// report amount of audio buffered in milliseconds. |
57 | | virtual int GetAudioBufferedTime(void) { return 0; } |
| 58 | virtual int64_t GetAudioBufferedTime(void) { return 0; } |
58 | 59 | |
59 | 60 | virtual void SetSourceBitrate(int ) { } |
60 | 61 | |
diff --git a/mythtv/libs/libmyth/audiooutputbase.cpp b/mythtv/libs/libmyth/audiooutputbase.cpp
index 9213adf..dbb3815 100644
a
|
b
|
AudioOutputBase::AudioOutputBase(const AudioSettings &settings) : |
56 | 56 | passthru(false), enc(false), |
57 | 57 | reenc(false), |
58 | 58 | stretchfactor(1.0f), |
| 59 | eff_stretchfactor(100000), |
59 | 60 | |
60 | 61 | source(settings.source), killaudio(false), |
61 | 62 | |
… |
… |
void AudioOutputBase::SetStretchFactorLocked(float lstretchfactor) |
179 | 180 | return; |
180 | 181 | |
181 | 182 | stretchfactor = lstretchfactor; |
| 183 | eff_stretchfactor = (int)(100000.0f * lstretchfactor + 0.5); |
182 | 184 | if (pSoundStretch) |
183 | 185 | { |
184 | 186 | VBGENERAL(QString("Changing time stretch to %1").arg(stretchfactor)); |
… |
… |
void AudioOutputBase::SetStretchFactorLocked(float lstretchfactor) |
202 | 204 | bytes_per_frame = source_channels * |
203 | 205 | AudioOutputSettings::SampleSize(FORMAT_FLT); |
204 | 206 | waud = raud = 0; |
| 207 | reset_active.Ref(); |
205 | 208 | } |
206 | 209 | } |
207 | 210 | } |
… |
… |
bool AudioOutputBase::ToggleUpmix(void) |
236 | 239 | audio_buflock.lock(); |
237 | 240 | avsync_lock.lock(); |
238 | 241 | waud = raud = 0; |
| 242 | reset_active.Ref(); |
239 | 243 | |
240 | 244 | configured_channels = |
241 | 245 | configured_channels == max_channels ? 2 : max_channels; |
… |
… |
void AudioOutputBase::Reconfigure(const AudioSettings &orig_settings) |
311 | 315 | QMutexLocker lockav(&avsync_lock); |
312 | 316 | |
313 | 317 | waud = raud = 0; |
| 318 | reset_active.Clear(); |
314 | 319 | actually_paused = processing = false; |
315 | 320 | |
316 | 321 | channels = settings.channels; |
… |
… |
void AudioOutputBase::Reset() |
582 | 587 | QMutexLocker lock(&audio_buflock); |
583 | 588 | QMutexLocker lockav(&avsync_lock); |
584 | 589 | |
585 | | raud = waud = audbuf_timecode = audiotime = frames_buffered = 0; |
| 590 | audbuf_timecode = audiotime = frames_buffered = 0; |
| 591 | waud = raud; // empty ring buffer |
| 592 | reset_active.Ref(); |
586 | 593 | current_seconds = -1; |
587 | 594 | was_paused = !pauseaudio; |
588 | 595 | |
… |
… |
void AudioOutputBase::Reset() |
596 | 603 | * Used by mythmusic for seeking since it doesn't provide timecodes to |
597 | 604 | * AddSamples() |
598 | 605 | */ |
599 | | void AudioOutputBase::SetTimecode(long long timecode) |
| 606 | void AudioOutputBase::SetTimecode(int64_t timecode) |
600 | 607 | { |
601 | 608 | audbuf_timecode = audiotime = timecode; |
602 | | frames_buffered = (long long)((timecode * source_samplerate) / 1000); |
| 609 | frames_buffered = (int64_t)((timecode * source_samplerate) / 1000); |
603 | 610 | } |
604 | 611 | |
605 | 612 | /** |
… |
… |
int AudioOutputBase::audioready() |
654 | 661 | /** |
655 | 662 | * Calculate the timecode of the samples that are about to become audible |
656 | 663 | */ |
657 | | int AudioOutputBase::GetAudiotime(void) |
| 664 | int64_t AudioOutputBase::GetAudiotime(void) |
658 | 665 | { |
659 | 666 | if (audbuf_timecode == 0) |
660 | 667 | return 0; |
661 | 668 | |
662 | | int soundcard_buffer = 0; |
663 | 669 | int obpf = output_bytes_per_frame; |
664 | | int totalbuffer; |
665 | | long long oldaudiotime; |
| 670 | int64_t oldaudiotime; |
666 | 671 | |
667 | 672 | /* We want to calculate 'audiotime', which is the timestamp of the audio |
668 | | which is leaving the sound card at this instant. |
| 673 | Which is leaving the sound card at this instant. |
669 | 674 | |
670 | 675 | We use these variables: |
671 | 676 | |
… |
… |
int AudioOutputBase::GetAudiotime(void) |
677 | 682 | 'totalbuffer' is the total # of bytes in our audio buffer, and the |
678 | 683 | sound card's buffer. */ |
679 | 684 | |
680 | | soundcard_buffer = GetBufferedOnSoundcard(); // bytes |
681 | 685 | |
682 | 686 | QMutexLocker lockav(&avsync_lock); |
683 | 687 | |
| 688 | int64_t soundcard_buffer = GetBufferedOnSoundcard(); // bytes |
| 689 | int64_t main_buffer = audioready(); |
| 690 | |
684 | 691 | /* audioready tells us how many bytes are in audiobuffer |
685 | 692 | scaled appropriately if output format != internal format */ |
686 | | totalbuffer = audioready() + soundcard_buffer; |
687 | | |
688 | | if (needs_upmix && upmixer) |
689 | | totalbuffer += upmixer->frameLatency() * obpf; |
690 | | |
691 | | if (pSoundStretch) |
692 | | { |
693 | | totalbuffer += pSoundStretch->numUnprocessedSamples() * obpf / |
694 | | stretchfactor; |
695 | | totalbuffer += pSoundStretch->numSamples() * obpf; |
696 | | } |
697 | | |
698 | | if (encoder) |
699 | | totalbuffer += encoder->Buffered(); |
700 | 693 | |
701 | 694 | oldaudiotime = audiotime; |
702 | 695 | |
703 | | audiotime = audbuf_timecode - (long long)(totalbuffer) * 100000 * |
704 | | stretchfactor / (obpf * effdsp); |
| 696 | // timecode is the stretch adjusted version |
| 697 | // of major post-stretched buffer contents |
| 698 | // processing latencies are catered for in AddSamples/SetAudiotime to eliminate |
| 699 | // race |
| 700 | audiotime = audbuf_timecode - (( (main_buffer + soundcard_buffer) * eff_stretchfactor ) / (effdsp * obpf)); |
705 | 701 | |
706 | 702 | /* audiotime should never go backwards, but we might get a negative |
707 | 703 | value if GetBufferedOnSoundcard() isn't updated by the driver very |
… |
… |
int AudioOutputBase::GetAudiotime(void) |
709 | 705 | if (audiotime < oldaudiotime) |
710 | 706 | audiotime = oldaudiotime; |
711 | 707 | |
712 | | VBAUDIOTS(QString("GetAudiotime audt=%3 atc=%4 tb=%5 sb=%6 " |
713 | | "sr=%7 obpf=%8 sf=%9") |
| 708 | VBAUDIOTS(QString("GetAudiotime audt=%1 atc=%2 mb=%3 sb=%4 tb=%5 " |
| 709 | "sr=%6 obpf=%7 bpf=%8 sf=%9 %10 %11") |
714 | 710 | .arg(audiotime).arg(audbuf_timecode) |
715 | | .arg(totalbuffer).arg(soundcard_buffer) |
716 | | .arg(samplerate).arg(obpf).arg(stretchfactor)); |
| 711 | .arg(main_buffer) |
| 712 | .arg(soundcard_buffer) |
| 713 | .arg(main_buffer+soundcard_buffer) |
| 714 | .arg(samplerate).arg(obpf).arg(bytes_per_frame).arg(stretchfactor) |
| 715 | .arg((main_buffer + soundcard_buffer) * eff_stretchfactor) |
| 716 | .arg(( (main_buffer + soundcard_buffer) * eff_stretchfactor ) / (effdsp * obpf)) |
| 717 | ); |
| 718 | |
| 719 | return audiotime; |
| 720 | } |
| 721 | |
| 722 | /** |
| 723 | * Set the timecode of the top of the ringbuffer |
| 724 | * Exclude all other processing elements as they dont vary |
| 725 | * between AddSamples calls |
| 726 | */ |
| 727 | void AudioOutputBase::SetAudiotime(int frames, int64_t timecode) |
| 728 | { |
| 729 | int64_t processframes_stretched = 0; |
| 730 | int64_t processframes_unstretched = 0; |
| 731 | |
| 732 | if (needs_upmix && upmixer) |
| 733 | processframes_unstretched -= upmixer->frameLatency(); |
717 | 734 | |
718 | | return (int)audiotime; |
| 735 | if (pSoundStretch) |
| 736 | { |
| 737 | processframes_unstretched -= pSoundStretch->numUnprocessedSamples(); |
| 738 | processframes_stretched -= pSoundStretch->numSamples(); |
| 739 | } |
| 740 | |
| 741 | if (encoder) |
| 742 | // the input buffered data is still in audio_bytes_per_sample format |
| 743 | processframes_stretched -= encoder->Buffered() / output_bytes_per_frame; |
| 744 | |
| 745 | audbuf_timecode = timecode + |
| 746 | (((frames + processframes_unstretched) * 100000) + |
| 747 | (processframes_stretched * eff_stretchfactor )) / effdsp; |
| 748 | |
| 749 | VBAUDIOTS(QString("SetAudiotime atc=%1 tc=%2 f=%3 pfu=%4 pfs=%5") |
| 750 | .arg(audbuf_timecode) |
| 751 | .arg(timecode) |
| 752 | .arg(frames) |
| 753 | .arg(processframes_unstretched) |
| 754 | .arg(processframes_stretched)); |
| 755 | #ifdef AUDIOTSTESTING |
| 756 | GetAudiotime(); |
| 757 | #endif |
719 | 758 | } |
720 | 759 | |
721 | 760 | /** |
… |
… |
int AudioOutputBase::GetAudiotime(void) |
723 | 762 | * audible and the samples most recently added to the audiobuffer, i.e. the |
724 | 763 | * time in ms representing the sum total of buffered samples |
725 | 764 | */ |
726 | | int AudioOutputBase::GetAudioBufferedTime(void) |
| 765 | int64_t AudioOutputBase::GetAudioBufferedTime(void) |
727 | 766 | { |
728 | 767 | int ret = audbuf_timecode - GetAudiotime(); |
729 | 768 | // Pulse can give us values that make this -ve |
… |
… |
int AudioOutputBase::CopyWithUpmix(char *buffer, int frames, int &org_waud) |
869 | 908 | * |
870 | 909 | * Returns false if there's not enough space right now |
871 | 910 | */ |
872 | | bool AudioOutputBase::AddSamples(void *buffer, int frames, long long timecode) |
| 911 | bool AudioOutputBase::AddSamples(void *buffer, int in_frames, int64_t timecode) |
873 | 912 | { |
874 | 913 | int org_waud = waud, afree = audiofree(); |
875 | | int bpf = bytes_per_frame, len = frames * source_bytes_per_frame; |
| 914 | int frames = in_frames; |
| 915 | int bpf = bytes_per_frame, len = in_frames * source_bytes_per_frame; |
876 | 916 | int used = kAudioRingBufferSize - afree; |
877 | 917 | bool music = false; |
| 918 | int bdiff; |
878 | 919 | |
879 | 920 | VBAUDIOTS(QString("AddSamples frames=%1, bytes=%2, used=%3, free=%4, " |
880 | 921 | "timecode=%5 needsupmix=%6") |
… |
… |
bool AudioOutputBase::AddSamples(void *buffer, int frames, long long timecode) |
896 | 937 | if (timecode < 0) |
897 | 938 | { |
898 | 939 | // Send original samples to mythmusic visualisation |
899 | | timecode = (long long)(frames_buffered) * 1000 / source_samplerate; |
| 940 | timecode = (int64_t)(frames_buffered) * 1000 / source_samplerate; |
900 | 941 | frames_buffered += frames; |
901 | 942 | dispatchVisual((uchar *)buffer, len, timecode, source_channels, |
902 | 943 | output_settings->FormatToBits(format)); |
… |
… |
bool AudioOutputBase::AddSamples(void *buffer, int frames, long long timecode) |
949 | 990 | .arg(src_strerror(error))); |
950 | 991 | |
951 | 992 | buffer = src_out; |
952 | | frames = src_data.output_frames_gen; |
| 993 | in_frames = frames = src_data.output_frames_gen; |
953 | 994 | } |
954 | 995 | else if (processing) |
955 | 996 | buffer = src_in; |
… |
… |
bool AudioOutputBase::AddSamples(void *buffer, int frames, long long timecode) |
957 | 998 | /* we want the timecode of the last sample added but we are given the |
958 | 999 | timecode of the first - add the time in ms that the frames added |
959 | 1000 | represent */ |
960 | | audbuf_timecode = timecode + ((long long)(frames) * 100000 / effdsp); |
| 1001 | //audbuf_timecode = timecode + ((int64_t)((frames) * 100000) / effdsp); |
961 | 1002 | |
962 | 1003 | // Copy samples into audiobuffer, with upmix if necessary |
963 | 1004 | if ((len = CopyWithUpmix((char *)buffer, frames, org_waud)) <= 0) |
964 | | return true; |
| 1005 | { |
| 1006 | //return true; |
| 1007 | goto done; |
| 1008 | } |
965 | 1009 | |
966 | 1010 | frames = len / bpf; |
967 | 1011 | |
968 | | int bdiff = kAudioRingBufferSize - waud; |
| 1012 | bdiff = kAudioRingBufferSize - waud; |
969 | 1013 | |
970 | 1014 | if (pSoundStretch) |
971 | 1015 | { |
… |
… |
bool AudioOutputBase::AddSamples(void *buffer, int frames, long long timecode) |
1043 | 1087 | |
1044 | 1088 | waud = org_waud; |
1045 | 1089 | |
| 1090 | done: |
| 1091 | SetAudiotime(in_frames, timecode); |
| 1092 | |
1046 | 1093 | return true; |
1047 | 1094 | } |
1048 | 1095 | |
… |
… |
void AudioOutputBase::OutputAudioLoop(void) |
1090 | 1137 | uchar *fragment_buf = new uchar[fragment_size + 16]; |
1091 | 1138 | uchar *fragment = (uchar *)AOALIGN(fragment_buf[0]); |
1092 | 1139 | |
| 1140 | // to reduce startup latency, write silence in 8ms chunks |
| 1141 | int zero_fragment_size = (int)(0.008*samplerate/channels); |
| 1142 | zero_fragment_size *= bytes_per_frame; // make sure its a multiple of bytes_per_frame |
| 1143 | if (zero_fragment_size > fragment_size) |
| 1144 | zero_fragment_size = fragment_size; |
| 1145 | |
1093 | 1146 | bzero(zeros, fragment_size); |
1094 | 1147 | |
1095 | 1148 | while (!killaudio) |
… |
… |
void AudioOutputBase::OutputAudioLoop(void) |
1138 | 1191 | continue; |
1139 | 1192 | } |
1140 | 1193 | |
| 1194 | #ifdef AUDIOTSTESTING |
| 1195 | VBAUDIOTS("WriteAudio Start"); |
| 1196 | #endif |
1141 | 1197 | Status(); |
1142 | 1198 | |
1143 | | if (GetAudioData(fragment, fragment_size, true)) |
1144 | | WriteAudio(fragment, fragment_size); |
| 1199 | // delay setting raud until after phys buffer is filled |
| 1200 | // so GetAudiotime will be accurate without locking |
| 1201 | reset_active.TestAndDeref(); |
| 1202 | int next_raud = raud; |
| 1203 | if (GetAudioData(fragment, fragment_size, true, &next_raud)) |
| 1204 | { |
| 1205 | if (!reset_active.TestAndDeref()) |
| 1206 | { |
| 1207 | WriteAudio(fragment, fragment_size); |
| 1208 | if (!reset_active.TestAndDeref()) |
| 1209 | raud = next_raud; |
| 1210 | } |
| 1211 | } |
| 1212 | #ifdef AUDIOTSTESTING |
| 1213 | GetAudiotime(); |
| 1214 | VBAUDIOTS("WriteAudio Done"); |
| 1215 | #endif |
| 1216 | |
1145 | 1217 | } |
1146 | 1218 | |
1147 | 1219 | delete[] zeros; |
… |
… |
void AudioOutputBase::OutputAudioLoop(void) |
1158 | 1230 | * nothing. Otherwise, we'll copy less than 'size' bytes if that's all that's |
1159 | 1231 | * available. Returns the number of bytes copied. |
1160 | 1232 | */ |
1161 | | int AudioOutputBase::GetAudioData(uchar *buffer, int size, bool full_buffer) |
| 1233 | int AudioOutputBase::GetAudioData(uchar *buffer, int size, bool full_buffer, int *local_raud) |
1162 | 1234 | { |
1163 | 1235 | |
| 1236 | #define LRPOS audiobuffer + *local_raud |
1164 | 1237 | // re-check audioready() in case things changed. |
1165 | 1238 | // for example, ClearAfterSeek() might have run |
1166 | 1239 | int avail_size = audioready(); |
1167 | 1240 | int frag_size = size; |
1168 | 1241 | int written_size = size; |
1169 | 1242 | |
| 1243 | if (local_raud == NULL) |
| 1244 | local_raud = &raud; |
| 1245 | |
1170 | 1246 | if (!full_buffer && (size > avail_size)) |
1171 | 1247 | { |
1172 | 1248 | // when full_buffer is false, return any available data |
… |
… |
int AudioOutputBase::GetAudioData(uchar *buffer, int size, bool full_buffer) |
1192 | 1268 | { |
1193 | 1269 | if (fromFloats) |
1194 | 1270 | off = AudioOutputUtil::fromFloat(output_format, buffer, |
1195 | | RPOS, bdiff); |
| 1271 | LRPOS, bdiff); |
1196 | 1272 | else |
1197 | 1273 | { |
1198 | | memcpy(buffer, RPOS, bdiff); |
| 1274 | memcpy(buffer, LRPOS, bdiff); |
1199 | 1275 | off = bdiff; |
1200 | 1276 | } |
1201 | 1277 | |
1202 | 1278 | frag_size -= bdiff; |
1203 | | raud = 0; |
| 1279 | *local_raud = 0; |
1204 | 1280 | } |
1205 | 1281 | if (frag_size > 0) |
1206 | 1282 | { |
1207 | 1283 | if (fromFloats) |
1208 | 1284 | AudioOutputUtil::fromFloat(output_format, buffer + off, |
1209 | | RPOS, frag_size); |
| 1285 | LRPOS, frag_size); |
1210 | 1286 | else |
1211 | | memcpy(buffer + off, RPOS, frag_size); |
| 1287 | memcpy(buffer + off, LRPOS, frag_size); |
1212 | 1288 | } |
1213 | 1289 | |
1214 | | raud += frag_size; |
| 1290 | *local_raud += frag_size; |
1215 | 1291 | |
1216 | 1292 | // Mute individual channels through mono->stereo duplication |
1217 | 1293 | MuteState mute_state = GetMuteState(); |
diff --git a/mythtv/libs/libmyth/audiooutputbase.h b/mythtv/libs/libmyth/audiooutputbase.h
index 51e9be6..84e709d 100644
a
|
b
|
class FreeSurround; |
32 | 32 | class AudioOutputDigitalEncoder; |
33 | 33 | struct AVCodecContext; |
34 | 34 | |
| 35 | class AsyncLooseLock |
| 36 | { |
| 37 | public: |
| 38 | AsyncLooseLock() { head = tail = 0; } |
| 39 | void Clear() { head = tail = 0; } |
| 40 | void Ref() { head++; } |
| 41 | bool TestAndDeref() { bool r; if ((r=(head != tail))) tail++; return r; } |
| 42 | private: |
| 43 | int head; |
| 44 | int tail; |
| 45 | }; |
| 46 | |
35 | 47 | class AudioOutputBase : public AudioOutput, public QThread |
36 | 48 | { |
37 | 49 | public: |
… |
… |
class AudioOutputBase : public AudioOutput, public QThread |
57 | 69 | int GetSWVolume(void); |
58 | 70 | |
59 | 71 | // timecode is in milliseconds. |
60 | | virtual bool AddSamples(void *buffer, int frames, long long timecode); |
| 72 | virtual bool AddSamples(void *buffer, int frames, int64_t timecode); |
61 | 73 | |
62 | | virtual void SetTimecode(long long timecode); |
| 74 | virtual void SetTimecode(int64_t timecode); |
63 | 75 | virtual bool IsPaused(void) const { return actually_paused; } |
64 | 76 | virtual void Pause(bool paused); |
65 | 77 | void PauseUntilBuffered(void); |
… |
… |
class AudioOutputBase : public AudioOutput, public QThread |
67 | 79 | // Wait for all data to finish playing |
68 | 80 | virtual void Drain(void); |
69 | 81 | |
70 | | virtual int GetAudiotime(void); |
71 | | virtual int GetAudioBufferedTime(void); |
| 82 | virtual int64_t GetAudiotime(void); |
| 83 | virtual int64_t GetAudioBufferedTime(void); |
72 | 84 | |
73 | 85 | // Send output events showing current progress |
74 | 86 | virtual void Status(void); |
… |
… |
class AudioOutputBase : public AudioOutput, public QThread |
83 | 95 | |
84 | 96 | static const uint kAudioSRCInputSize = 16384<<1; |
85 | 97 | static const uint kAudioSRCOutputSize = 16384<<3; |
86 | | /// Audio Buffer Size -- should be divisible by 12,10,8,6,4,2.. |
87 | | static const uint kAudioRingBufferSize = 1536000; |
| 98 | /// Audio Buffer Size -- should be divisible by 32,24,16,12,10,8,6,4,2.. |
| 99 | static const uint kAudioRingBufferSize = 3072000; |
88 | 100 | |
89 | 101 | protected: |
90 | 102 | // You need to implement the following functions |
… |
… |
class AudioOutputBase : public AudioOutput, public QThread |
102 | 114 | virtual bool StartOutputThread(void); |
103 | 115 | virtual void StopOutputThread(void); |
104 | 116 | |
105 | | int GetAudioData(uchar *buffer, int buf_size, bool fill_buffer); |
| 117 | int GetAudioData(uchar *buffer, int buf_size, bool fill_buffer, int *local_raud = NULL); |
106 | 118 | |
107 | 119 | void OutputAudioLoop(void); |
108 | 120 | |
… |
… |
class AudioOutputBase : public AudioOutput, public QThread |
138 | 150 | bool passthru, enc, reenc; |
139 | 151 | |
140 | 152 | float stretchfactor; |
| 153 | int eff_stretchfactor; // scaled to 100000 as effdsp is |
141 | 154 | AudioOutputSource source; |
142 | 155 | |
143 | 156 | bool killaudio; |
… |
… |
class AudioOutputBase : public AudioOutput, public QThread |
153 | 166 | |
154 | 167 | private: |
155 | 168 | int CopyWithUpmix(char *buffer, int frames, int &org_waud); |
| 169 | void SetAudiotime(int frames, int64_t timecode); |
156 | 170 | AudioOutputSettings *output_settings; |
157 | 171 | bool need_resampler; |
158 | 172 | SRC_STATE *src_ctx; |
… |
… |
class AudioOutputBase : public AudioOutput, public QThread |
173 | 187 | |
174 | 188 | bool processing; |
175 | 189 | |
176 | | long long frames_buffered; |
| 190 | int64_t frames_buffered; |
177 | 191 | |
178 | 192 | bool audio_thread_exists; |
179 | 193 | |
… |
… |
class AudioOutputBase : public AudioOutput, public QThread |
186 | 200 | QMutex avsync_lock; |
187 | 201 | |
188 | 202 | // timecode of audio leaving the soundcard (same units as timecodes) |
189 | | long long audiotime; |
| 203 | int64_t audiotime; |
190 | 204 | |
191 | 205 | /* Audio circular buffer */ |
192 | 206 | int raud, waud; /* read and write positions */ |
193 | 207 | // timecode of audio most recently placed into buffer |
194 | | long long audbuf_timecode; |
| 208 | int64_t audbuf_timecode; |
| 209 | AsyncLooseLock reset_active; |
195 | 210 | |
196 | 211 | QMutex killAudioLock; |
197 | 212 | |
diff --git a/mythtv/libs/libmythfreesurround/freesurround.cpp b/mythtv/libs/libmythfreesurround/freesurround.cpp
index 5e8b1f5..aef65a3 100644
a
|
b
|
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
29 | 29 | using namespace std; |
30 | 30 | |
31 | 31 | #include "compat.h" |
| 32 | #include "mythverbose.h" |
32 | 33 | #include "freesurround.h" |
33 | 34 | #include "el_processor.h" |
34 | 35 | |
35 | 36 | #include <QString> |
36 | 37 | #include <QDateTime> |
37 | 38 | |
38 | | #if 0 |
39 | | #define VERBOSE(args...) \ |
40 | | do { \ |
41 | | QDateTime dtmp = QDateTime::currentDateTime(); \ |
42 | | QString dtime = dtmp.toString("yyyy-MM-dd hh:mm:ss.zzz"); \ |
43 | | std::cout << dtime.toLocal8Bit().constData() << " " \ |
44 | | << QString(args).toLocal8Bit().constData() << std::endl; \ |
45 | | } while (0) |
46 | | #else |
47 | | #define VERBOSE(args...) |
48 | | #endif |
49 | | #if 0 |
50 | | #define VERBOSE1(args...) \ |
51 | | do { \ |
52 | | QDateTime dtmp = QDateTime::currentDateTime(); \ |
53 | | QString dtime = dtmp.toString("yyyy-MM-dd hh:mm:ss.zzz"); \ |
54 | | std::cout << dtime.toLocal8Bit().constData() << " " \ |
55 | | << QString(args).toLocal8Bit().constData() << std::endl; \ |
56 | | } while (0) |
57 | | #else |
58 | | #define VERBOSE1(args...) |
59 | | #endif |
60 | | |
61 | 39 | // our default internal block size, in floats |
62 | 40 | static const unsigned default_block_size = 8192; |
63 | 41 | // Gain of center and lfe channels in passive mode (sqrt 0.5) |
… |
… |
FreeSurround::FreeSurround(uint srate, bool moviemode, SurroundMode smode) : |
161 | 139 | processed_size(0), |
162 | 140 | surround_mode(smode) |
163 | 141 | { |
164 | | VERBOSE(QString("FreeSurround::FreeSurround rate %1 moviemode %2") |
| 142 | VERBOSE(VB_AUDIO+VB_EXTRA, QString("FreeSurround::FreeSurround rate %1 moviemode %2") |
165 | 143 | .arg(srate).arg(moviemode)); |
166 | 144 | |
167 | 145 | if (moviemode) |
… |
… |
FreeSurround::FreeSurround(uint srate, bool moviemode, SurroundMode smode) : |
193 | 171 | channel_select++; |
194 | 172 | if (channel_select>=6) |
195 | 173 | channel_select = 0; |
196 | | VERBOSE(QString("FreeSurround::FreeSurround channel_select %1") |
| 174 | VERBOSE(VB_AUDIO+VB_EXTRA, QString("FreeSurround::FreeSurround channel_select %1") |
197 | 175 | .arg(channel_select)); |
198 | 176 | #endif |
199 | | VERBOSE(QString("FreeSurround::FreeSurround done")); |
| 177 | VERBOSE(VB_AUDIO+VB_EXTRA, QString("FreeSurround::FreeSurround done")); |
200 | 178 | } |
201 | 179 | |
202 | 180 | void FreeSurround::SetParams() |
… |
… |
FreeSurround::fsurround_params::fsurround_params(int32_t center_width, |
224 | 202 | |
225 | 203 | FreeSurround::~FreeSurround() |
226 | 204 | { |
227 | | VERBOSE(QString("FreeSurround::~FreeSurround")); |
| 205 | VERBOSE(VB_AUDIO+VB_EXTRA, QString("FreeSurround::~FreeSurround")); |
228 | 206 | close(); |
229 | 207 | if (bufs) |
230 | 208 | { |
231 | 209 | bp.release((void*)1); |
232 | 210 | bufs = NULL; |
233 | 211 | } |
234 | | VERBOSE(QString("FreeSurround::~FreeSurround done")); |
| 212 | VERBOSE(VB_AUDIO+VB_EXTRA, QString("FreeSurround::~FreeSurround done")); |
235 | 213 | } |
236 | 214 | |
237 | 215 | uint FreeSurround::putFrames(void* buffer, uint numFrames, uint numChannels) |
… |
… |
uint FreeSurround::putFrames(void* buffer, uint numFrames, uint numChannels) |
289 | 267 | break; |
290 | 268 | } |
291 | 269 | ic += numFrames; |
292 | | in_count = ic; |
293 | 270 | processed = process; |
294 | 271 | if (ic != bs) |
| 272 | { |
| 273 | // dont modify unless no processing is to be done |
| 274 | // for audiotime consistency |
| 275 | in_count = ic; |
295 | 276 | break; |
296 | | in_count = 0; |
| 277 | } |
| 278 | // process_block takes some time so dont update in and out count |
| 279 | // before its finished so that Audiotime is correctly calculated |
297 | 280 | if (process) |
298 | 281 | process_block(); |
| 282 | in_count = 0; |
299 | 283 | out_count = bs; |
300 | 284 | processed_size = bs; |
301 | 285 | break; |
302 | 286 | } |
303 | 287 | |
304 | | VERBOSE1(QString("FreeSurround::putFrames %1 %2 used %4 generated %5") |
| 288 | VERBOSE(VB_AUDIO+VB_TIMESTAMP+VB_EXTRA, QString("FreeSurround::putFrames %1 #ch %2 used %4 generated %5") |
305 | 289 | .arg(numFrames).arg(numChannels).arg(i).arg(out_count)); |
306 | 290 | |
307 | 291 | return i; |
… |
… |
uint FreeSurround::receiveFrames(void *buffer, uint maxFrames) |
318 | 302 | switch (surround_mode) |
319 | 303 | { |
320 | 304 | case SurroundModePassive: |
321 | | for (uint i = 0; i < maxFrames; i++) |
| 305 | for (i = 0; i < maxFrames; i++) |
322 | 306 | { |
323 | 307 | *output++ = bufs->l[outindex]; |
324 | 308 | *output++ = bufs->r[outindex]; |
… |
… |
uint FreeSurround::receiveFrames(void *buffer, uint maxFrames) |
341 | 325 | float *ls = &outputs[3][outindex]; |
342 | 326 | float *rs = &outputs[4][outindex]; |
343 | 327 | float *lfe = &outputs[5][outindex]; |
344 | | for (uint i = 0; i < maxFrames; i++) |
| 328 | for (i = 0; i < maxFrames; i++) |
345 | 329 | { |
346 | 330 | *output++ = *l++; |
347 | 331 | *output++ = *r++; |
… |
… |
uint FreeSurround::receiveFrames(void *buffer, uint maxFrames) |
361 | 345 | float *ls = &bufs->ls[outindex]; |
362 | 346 | float *rs = &bufs->rs[outindex]; |
363 | 347 | float *lfe = &bufs->lfe[outindex]; |
364 | | for (uint i = 0; i < maxFrames; i++) |
| 348 | for (i = 0; i < maxFrames; i++) |
365 | 349 | { |
366 | 350 | *output++ = *l++; |
367 | 351 | *output++ = *r++; |
… |
… |
uint FreeSurround::receiveFrames(void *buffer, uint maxFrames) |
376 | 360 | break; |
377 | 361 | } |
378 | 362 | out_count = oc; |
379 | | VERBOSE1(QString("FreeSurround::receiveFrames %1").arg(maxFrames)); |
| 363 | VERBOSE(VB_AUDIO+VB_TIMESTAMP+VB_EXTRA, QString("FreeSurround::receiveFrames %1").arg(maxFrames)); |
380 | 364 | return maxFrames; |
381 | 365 | } |
382 | 366 | |
diff --git a/mythtv/libs/libmythtv/NuppelVideoPlayer.cpp b/mythtv/libs/libmythtv/NuppelVideoPlayer.cpp
index 78b048e..b8ff202 100644
a
|
b
|
NuppelVideoPlayer::NuppelVideoPlayer(bool muted) |
208 | 208 | videosync(NULL), delay(0), |
209 | 209 | vsynctol(30/4), avsync_delay(0), |
210 | 210 | avsync_adjustment(0), avsync_avg(0), |
211 | | avsync_oldavg(0), refreshrate(0), |
| 211 | avsync_oldavg(0), |
| 212 | avsync_predictor(0), avsync_predictor_enabled(false), |
| 213 | refreshrate(0), |
212 | 214 | lastsync(false), m_playing_slower(false), |
213 | 215 | m_stored_audio_stretchfactor(1.0), |
214 | 216 | audio_paused(false), |
… |
… |
void NuppelVideoPlayer::SetVideoParams(int width, int height, double fps, |
1141 | 1143 | video_frame_rate = fps; |
1142 | 1144 | float temp_speed = (play_speed == 0.0f) ? |
1143 | 1145 | audio_stretchfactor : play_speed; |
1144 | | frame_interval = (int)(1000000.0f / video_frame_rate / temp_speed); |
| 1146 | SetFrameInterval(kScan_Progressive, 1.0 / (video_frame_rate * temp_speed)); |
1145 | 1147 | } |
1146 | 1148 | |
1147 | 1149 | if (videoOutput) |
… |
… |
float NuppelVideoPlayer::WarpFactor(void) |
2358 | 2360 | return divergence; |
2359 | 2361 | } |
2360 | 2362 | |
| 2363 | void NuppelVideoPlayer::SetFrameInterval(FrameScanType scan, double frame_period) |
| 2364 | { |
| 2365 | frame_interval = (int)(1000000.0f * frame_period + 0.5f); |
| 2366 | if (!avsync_predictor_enabled) |
| 2367 | avsync_predictor = 0; |
| 2368 | avsync_predictor_enabled = false; |
| 2369 | |
| 2370 | VERBOSE(VB_PLAYBACK, LOC + QString("SetFrameInterval ps:%1 scan:%2") |
| 2371 | .arg(play_speed).arg(scan) |
| 2372 | ); |
| 2373 | if (play_speed < 1 || play_speed > 2 || refreshrate <= 0) |
| 2374 | return; |
| 2375 | |
| 2376 | avsync_predictor_enabled = ((frame_interval-(frame_interval/200)) < refreshrate); |
| 2377 | } |
| 2378 | |
| 2379 | void NuppelVideoPlayer::ResetAVSync(void) |
| 2380 | { |
| 2381 | avsync_avg = 0; |
| 2382 | avsync_oldavg = 0; |
| 2383 | if (!avsync_predictor_enabled || avsync_predictor >= refreshrate) |
| 2384 | avsync_predictor = 0; |
| 2385 | prevtc = 0; |
| 2386 | warpfactor = 1.0f; |
| 2387 | warpfactor_avg = 1.0f; |
| 2388 | VERBOSE(VB_PLAYBACK+VB_TIMESTAMP, LOC + "A/V sync reset"); |
| 2389 | } |
| 2390 | |
2361 | 2391 | void NuppelVideoPlayer::InitAVSync(void) |
2362 | 2392 | { |
2363 | 2393 | videosync->Start(); |
… |
… |
void NuppelVideoPlayer::InitAVSync(void) |
2379 | 2409 | VERBOSE(VB_GENERAL, msg); |
2380 | 2410 | msg = QString("Refresh rate: %1, frame interval: %2") |
2381 | 2411 | .arg(refreshrate).arg(frame_interval); |
2382 | | VERBOSE(VB_PLAYBACK, msg); |
| 2412 | VERBOSE(VB_PLAYBACK, LOC + msg); |
| 2413 | |
| 2414 | SetFrameInterval(m_scan, 1.0 / (video_frame_rate * play_speed)); |
2383 | 2415 | |
2384 | 2416 | // try to get preferential scheduling, but ignore if we fail to. |
2385 | 2417 | myth_nice(-19); |
2386 | 2418 | } |
2387 | 2419 | } |
2388 | 2420 | |
| 2421 | int64_t NuppelVideoPlayer::AVSyncGetAudiotime(void) |
| 2422 | { |
| 2423 | int64_t currentaudiotime = 0; |
| 2424 | audio_lock.lock(); |
| 2425 | if (audioOutput && normal_speed) |
| 2426 | { |
| 2427 | currentaudiotime = audioOutput->GetAudiotime(); |
| 2428 | } |
| 2429 | audio_lock.unlock(); |
| 2430 | return currentaudiotime; |
| 2431 | } |
| 2432 | |
2389 | 2433 | void NuppelVideoPlayer::AVSync(void) |
2390 | 2434 | { |
2391 | 2435 | float diverge = 0.0f; |
| 2436 | int vsync_delay_clock = 0; |
| 2437 | int64_t currentaudiotime = 0; |
| 2438 | |
2392 | 2439 | // attempt to reduce fps for standalone PIP |
2393 | 2440 | if (player_ctx->IsPIP() && framesPlayed % 2) |
2394 | 2441 | { |
… |
… |
void NuppelVideoPlayer::AVSync(void) |
2428 | 2475 | ps = kScan_Progressive; |
2429 | 2476 | |
2430 | 2477 | bool dropframe = false; |
| 2478 | QString dbg; |
| 2479 | |
| 2480 | if (avsync_predictor_enabled && !prebuffering) |
| 2481 | { |
| 2482 | avsync_predictor += frame_interval; |
| 2483 | if (avsync_predictor >= refreshrate) |
| 2484 | { |
| 2485 | int refreshperiodsinframe = avsync_predictor/refreshrate; |
| 2486 | avsync_predictor -= refreshrate * refreshperiodsinframe; |
| 2487 | } |
| 2488 | else |
| 2489 | { |
| 2490 | dropframe = true; |
| 2491 | dbg = "A/V predict drop frame, "; |
| 2492 | } |
| 2493 | } |
| 2494 | |
2431 | 2495 | if (diverge < -MAXDIVERGE) |
2432 | 2496 | { |
2433 | 2497 | dropframe = true; |
2434 | 2498 | // If video is way behind of audio, adjust for it... |
2435 | | QString dbg = QString("Video is %1 frames behind audio (too slow), ") |
| 2499 | dbg = QString("Video is %1 frames behind audio (too slow), ") |
2436 | 2500 | .arg(-diverge); |
| 2501 | } |
2437 | 2502 | |
| 2503 | if (dropframe) |
| 2504 | { |
2438 | 2505 | // Reset A/V Sync |
2439 | 2506 | lastsync = true; |
2440 | 2507 | |
| 2508 | currentaudiotime = AVSyncGetAudiotime(); |
| 2509 | |
2441 | 2510 | if (buffer && !using_null_videoout && |
2442 | 2511 | videoOutput->hasHWAcceleration() && |
2443 | 2512 | !videoOutput->IsSyncLocked()) |
… |
… |
void NuppelVideoPlayer::AVSync(void) |
2462 | 2531 | if (buffer) |
2463 | 2532 | videoOutput->PrepareFrame(buffer, ps); |
2464 | 2533 | |
2465 | | VERBOSE(VB_PLAYBACK|VB_TIMESTAMP, QString("AVSync waitforframe %1 %2") |
| 2534 | VERBOSE(VB_PLAYBACK|VB_TIMESTAMP, LOC + QString("AVSync waitforframe %1 %2") |
2466 | 2535 | .arg(avsync_adjustment).arg(m_double_framerate)); |
2467 | | videosync->WaitForFrame(avsync_adjustment + repeat_delay); |
2468 | | VERBOSE(VB_PLAYBACK|VB_TIMESTAMP, "AVSync show"); |
| 2536 | vsync_delay_clock = videosync->WaitForFrame(avsync_adjustment + repeat_delay); |
| 2537 | currentaudiotime = AVSyncGetAudiotime(); |
| 2538 | VERBOSE(VB_PLAYBACK|VB_TIMESTAMP, LOC + "AVSync show"); |
2469 | 2539 | if (!resetvideo) |
2470 | 2540 | videoOutput->Show(ps); |
2471 | 2541 | |
2472 | 2542 | if (videoOutput->IsErrored()) |
2473 | 2543 | { |
2474 | | VERBOSE(VB_IMPORTANT, "NVP: Error condition detected " |
| 2544 | VERBOSE(VB_IMPORTANT, LOC + "Error condition detected " |
2475 | 2545 | "in videoOutput after Show(), aborting playback."); |
2476 | 2546 | SetErrored(QObject::tr("Serious error detected in Video Output")); |
2477 | 2547 | return; |
… |
… |
void NuppelVideoPlayer::AVSync(void) |
2507 | 2577 | // Display the second field |
2508 | 2578 | videosync->AdvanceTrigger(); |
2509 | 2579 | #ifdef NEW_AVSYNC |
2510 | | videosync->WaitForFrame(avsync_adjustment); |
| 2580 | vsync_delay_clock = videosync->WaitForFrame(avsync_adjustment); |
2511 | 2581 | #else |
2512 | | videosync->WaitForFrame(0); |
| 2582 | vsync_delay_clock = videosync->WaitForFrame(0); |
2513 | 2583 | #endif |
2514 | 2584 | if (!resetvideo) |
2515 | 2585 | { |
… |
… |
void NuppelVideoPlayer::AVSync(void) |
2520 | 2590 | repeat_delay = frame_interval * buffer->repeat_pict * 0.5; |
2521 | 2591 | |
2522 | 2592 | if (repeat_delay) |
2523 | | VERBOSE(VB_TIMESTAMP, QString("A/V repeat_pict, adding %1 repeat " |
| 2593 | VERBOSE(VB_TIMESTAMP, LOC + QString("A/V repeat_pict, adding %1 repeat " |
2524 | 2594 | "delay").arg(repeat_delay)); |
2525 | 2595 | } |
2526 | 2596 | else |
2527 | 2597 | { |
2528 | | videosync->WaitForFrame(0); |
| 2598 | vsync_delay_clock = videosync->WaitForFrame(0); |
| 2599 | currentaudiotime = AVSyncGetAudiotime(); |
2529 | 2600 | } |
2530 | 2601 | |
2531 | 2602 | if (output_jmeter && output_jmeter->RecordCycleTime()) |
2532 | 2603 | { |
2533 | | VERBOSE(VB_PLAYBACK+VB_TIMESTAMP, QString("A/V avsync_delay: %1, " |
| 2604 | VERBOSE(VB_PLAYBACK+VB_TIMESTAMP, LOC + QString("A/V avsync_delay: %1, " |
2534 | 2605 | "avsync_avg: %2, warpfactor: %3, warpfactor_avg: %4") |
2535 | 2606 | .arg(avsync_delay / 1000).arg(avsync_avg / 1000) |
2536 | 2607 | .arg(warpfactor).arg(warpfactor_avg)); |
… |
… |
void NuppelVideoPlayer::AVSync(void) |
2546 | 2617 | // by cutting the frame rate in half for the length of this frame |
2547 | 2618 | |
2548 | 2619 | #ifdef NEW_AVSYNC |
2549 | | avsync_adjustment = refreshrate; |
| 2620 | //avsync_adjustment = refreshrate; |
| 2621 | avsync_adjustment = frame_interval; |
| 2622 | //avsync_adjustment = frame_interval*(((int)MAXDIVERGE)-1); |
2550 | 2623 | #else |
2551 | 2624 | avsync_adjustment = frame_interval; |
2552 | 2625 | #endif |
… |
… |
void NuppelVideoPlayer::AVSync(void) |
2556 | 2629 | "\t\t\tdoubling video frame interval to slow down.").arg(diverge)); |
2557 | 2630 | } |
2558 | 2631 | |
2559 | | audio_lock.lock(); |
2560 | 2632 | if (audioOutput && normal_speed) |
2561 | 2633 | { |
2562 | | long long currentaudiotime = audioOutput->GetAudiotime(); |
2563 | | audio_lock.unlock(); |
2564 | 2634 | #if 0 |
2565 | | VERBOSE(VB_PLAYBACK+VB_TIMESTAMP, QString( |
| 2635 | VERBOSE(VB_PLAYBACK+VB_TIMESTAMP, LOC + QString( |
2566 | 2636 | "A/V timecodes audio %1 video %2 frameinterval %3 " |
2567 | | "avdel %4 avg %5 tcoffset %6") |
| 2637 | "avdel %4 avg %5 tcoffset %6" |
| 2638 | " avp %7 avpen %8" |
| 2639 | " avdc %9" |
| 2640 | ) |
2568 | 2641 | .arg(currentaudiotime) |
2569 | 2642 | .arg(buffer->timecode) |
2570 | 2643 | .arg(frame_interval) |
2571 | | .arg(buffer->timecode - currentaudiotime) |
| 2644 | .arg(buffer->timecode - currentaudiotime - (int)(vsync_delay_clock*audio_stretchfactor+500)/1000) |
2572 | 2645 | .arg(avsync_avg) |
2573 | 2646 | .arg(tc_wrap[TC_AUDIO]) |
| 2647 | .arg(avsync_predictor) |
| 2648 | .arg(avsync_predictor_enabled) |
| 2649 | .arg(vsync_delay_clock) |
2574 | 2650 | ); |
2575 | 2651 | #endif |
2576 | 2652 | if (currentaudiotime != 0 && buffer->timecode != 0) |
2577 | 2653 | { // currentaudiotime == 0 after a seek |
2578 | 2654 | // The time at the start of this frame (ie, now) is given by |
2579 | 2655 | // last->timecode |
2580 | | int delta = (int)((buffer->timecode - prevtc)/play_speed) - (frame_interval / 1000); |
2581 | | prevtc = buffer->timecode; |
2582 | | //cerr << delta << " "; |
2583 | | |
2584 | | // If the timecode is off by a frame (dropped frame) wait to sync |
2585 | | if (delta > (int) frame_interval / 1200 && |
2586 | | delta < (int) frame_interval / 1000 * 3 && |
2587 | | prevrp == 0) |
| 2656 | if (prevtc != 0) |
2588 | 2657 | { |
2589 | | //cerr << "+ "; |
2590 | | videosync->AdvanceTrigger(); |
2591 | | if (m_double_framerate) |
| 2658 | int delta = (int)((buffer->timecode - prevtc)/play_speed) - (frame_interval / 1000); |
| 2659 | // If the timecode is off by a frame (dropped frame) wait to sync |
| 2660 | if (delta > (int) frame_interval / 1200 && |
| 2661 | delta < (int) frame_interval / 1000 * 3 && |
| 2662 | prevrp == 0) |
| 2663 | { |
| 2664 | //cerr << "+ "; |
| 2665 | VERBOSE(VB_PLAYBACK+VB_TIMESTAMP, LOC + QString("A/V delay %1").arg(delta)); |
2592 | 2666 | videosync->AdvanceTrigger(); |
| 2667 | if (m_double_framerate) |
| 2668 | videosync->AdvanceTrigger(); |
| 2669 | } |
2593 | 2670 | } |
| 2671 | prevtc = buffer->timecode; |
2594 | 2672 | prevrp = buffer->repeat_pict; |
2595 | 2673 | |
2596 | | avsync_delay = (buffer->timecode - currentaudiotime) * 1000;//usec |
| 2674 | avsync_delay = (buffer->timecode - currentaudiotime) * 1000 - (int)(vsync_delay_clock*audio_stretchfactor); //usec |
2597 | 2675 | // prevents major jitter when pts resets during dvd title |
2598 | 2676 | if (avsync_delay > 2000000 && player_ctx->buffer->isDVD()) |
2599 | 2677 | avsync_delay = 90000; |
2600 | 2678 | avsync_avg = (avsync_delay + (avsync_avg * 3)) / 4; |
2601 | 2679 | |
| 2680 | int avsync_used = avsync_avg; |
| 2681 | if (labs(avsync_used) > labs(avsync_delay)) |
| 2682 | avsync_used = avsync_delay; |
| 2683 | |
2602 | 2684 | /* If the audio time codes and video diverge, shift |
2603 | 2685 | the video by one interlaced field (1/2 frame) */ |
2604 | 2686 | if (!lastsync) |
2605 | 2687 | { |
2606 | | if (avsync_avg > frame_interval * 3 / 2) |
| 2688 | if (avsync_used > refreshrate) |
2607 | 2689 | { |
2608 | | avsync_adjustment = refreshrate; |
2609 | | lastsync = true; |
| 2690 | avsync_adjustment += refreshrate; |
| 2691 | //lastsync = true; |
| 2692 | VERBOSE(VB_PLAYBACK+VB_TIMESTAMP, LOC + "A/V avg high extend"); |
2610 | 2693 | } |
2611 | | else if (avsync_avg < 0 - frame_interval * 3 / 2) |
| 2694 | else if (avsync_used < 0 - refreshrate) |
2612 | 2695 | { |
2613 | | avsync_adjustment = -refreshrate; |
2614 | | lastsync = true; |
| 2696 | avsync_adjustment -= refreshrate; |
| 2697 | //lastsync = true; |
| 2698 | VERBOSE(VB_PLAYBACK+VB_TIMESTAMP, LOC + "A/V avg high skip"); |
2615 | 2699 | } |
2616 | 2700 | } |
2617 | 2701 | else |
… |
… |
void NuppelVideoPlayer::AVSync(void) |
2619 | 2703 | } |
2620 | 2704 | else |
2621 | 2705 | { |
2622 | | avsync_avg = 0; |
2623 | | avsync_oldavg = 0; |
| 2706 | ResetAVSync(); |
2624 | 2707 | } |
2625 | 2708 | } |
2626 | 2709 | else |
2627 | | audio_lock.unlock(); |
| 2710 | { |
| 2711 | VERBOSE(VB_PLAYBACK+VB_TIMESTAMP, LOC + QString("A/V no sync proc ns:%1 ao:%2").arg(normal_speed).arg(audioOutput != NULL)); |
| 2712 | } |
2628 | 2713 | } |
2629 | 2714 | |
2630 | 2715 | void NuppelVideoPlayer::DisplayPauseFrame(void) |
… |
… |
void NuppelVideoPlayer::DoPause(void) |
4237 | 4322 | } |
4238 | 4323 | |
4239 | 4324 | float temp_speed = audio_stretchfactor; |
4240 | | frame_interval = (int)(1000000.0 * ffrew_skip / video_frame_rate / temp_speed); |
| 4325 | SetFrameInterval(m_scan, ffrew_skip / (video_frame_rate * temp_speed)); |
4241 | 4326 | VERBOSE(VB_PLAYBACK, QString("rate: %1 speed: %2 skip: %3 = interval %4") |
4242 | 4327 | .arg(video_frame_rate).arg(temp_speed) |
4243 | 4328 | .arg(ffrew_skip).arg(frame_interval)); |
… |
… |
void NuppelVideoPlayer::DoPlay(void) |
4299 | 4384 | ClearAfterSeek(); |
4300 | 4385 | } |
4301 | 4386 | |
4302 | | frame_interval = (int) (1000000.0f * ffrew_skip / video_frame_rate / |
4303 | | play_speed); |
| 4387 | SetFrameInterval(m_scan, ffrew_skip / (video_frame_rate * play_speed)); |
4304 | 4388 | |
4305 | 4389 | VERBOSE(VB_PLAYBACK, LOC + "DoPlay: " + |
4306 | 4390 | QString("rate: %1 speed: %2 skip: %3 => new interval %4") |
… |
… |
void NuppelVideoPlayer::ClearAfterSeek(bool clearvideobuffers) |
4698 | 4782 | savedAudioTimecodeOffset = 0; |
4699 | 4783 | } |
4700 | 4784 | |
| 4785 | ResetAVSync(); |
4701 | 4786 | SetPrebuffering(true); |
4702 | 4787 | ResetAudio(); |
4703 | 4788 | |
diff --git a/mythtv/libs/libmythtv/NuppelVideoPlayer.h b/mythtv/libs/libmythtv/NuppelVideoPlayer.h
index af02d21..8d4017c 100644
a
|
b
|
class MPUBLIC NuppelVideoPlayer : public CC608Reader, public CC708Reader |
514 | 514 | float WarpFactor(void); |
515 | 515 | void WrapTimecode(long long &timecode, TCTypes tc_type); |
516 | 516 | void InitAVSync(void); |
| 517 | void ResetAVSync(void); |
| 518 | int64_t AVSyncGetAudiotime(void); |
| 519 | void SetFrameInterval(FrameScanType scan, double speed); |
517 | 520 | void AVSync(void); |
518 | 521 | void FallbackDeint(void); |
519 | 522 | void CheckExtraAudioDecode(void); |
… |
… |
class MPUBLIC NuppelVideoPlayer : public CC608Reader, public CC708Reader |
800 | 803 | int avsync_adjustment; |
801 | 804 | int avsync_avg; |
802 | 805 | int avsync_oldavg; |
| 806 | int avsync_predictor; |
| 807 | bool avsync_predictor_enabled; |
803 | 808 | int refreshrate; |
804 | 809 | bool lastsync; |
805 | 810 | bool m_playing_slower; |
diff --git a/mythtv/libs/libmythtv/avformatdecoder.cpp b/mythtv/libs/libmythtv/avformatdecoder.cpp
index d6547c4..a5a1899 100644
a
|
b
|
AvFormatDecoder::AvFormatDecoder(NuppelVideoPlayer *parent, |
482 | 482 | start_code_state(0xffffffff), |
483 | 483 | lastvpts(0), lastapts(0), |
484 | 484 | lastccptsu(0), |
| 485 | firstvpts(0), firstvptsinuse(false), |
485 | 486 | using_null_videoout(use_null_videoout), |
486 | 487 | video_codec_id(kCodec_NONE), |
487 | 488 | no_hardware_decoders(no_hardware_decode), |
… |
… |
void AvFormatDecoder::SeekReset(long long newKey, uint skipFrames, |
933 | 934 | if (decoded_video_frame) |
934 | 935 | GetNVP()->DiscardVideoFrame(decoded_video_frame); |
935 | 936 | } |
| 937 | |
| 938 | if (doflush) |
| 939 | { |
| 940 | firstvpts = 0; |
| 941 | firstvptsinuse = true; |
| 942 | } |
936 | 943 | } |
937 | 944 | |
938 | 945 | void AvFormatDecoder::Reset(bool reset_video_data, bool seek_reset) |
… |
… |
void AvFormatDecoder::MpegPreProcessPkt(AVStream *stream, AVPacket *pkt) |
2931 | 2938 | |
2932 | 2939 | gopset = false; |
2933 | 2940 | prevgoppos = 0; |
| 2941 | firstvpts = |
2934 | 2942 | lastapts = lastvpts = lastccptsu = 0; |
| 2943 | firstvptsinuse = true; |
2935 | 2944 | |
2936 | 2945 | // fps debugging info |
2937 | 2946 | float avFPS = normalized_fps(stream, context); |
… |
… |
bool AvFormatDecoder::H264PreProcessPkt(AVStream *stream, AVPacket *pkt) |
3041 | 3050 | |
3042 | 3051 | gopset = false; |
3043 | 3052 | prevgoppos = 0; |
| 3053 | firstvpts = |
3044 | 3054 | lastapts = lastvpts = lastccptsu = 0; |
| 3055 | firstvptsinuse = true; |
3045 | 3056 | |
3046 | 3057 | // fps debugging info |
3047 | 3058 | float avFPS = normalized_fps(stream, context); |
… |
… |
bool AvFormatDecoder::ProcessVideoPacket(AVStream *curstream, AVPacket *pkt) |
3270 | 3281 | framesPlayed++; |
3271 | 3282 | |
3272 | 3283 | lastvpts = temppts; |
| 3284 | if (!firstvpts && firstvptsinuse) |
| 3285 | firstvpts = temppts; |
3273 | 3286 | |
3274 | 3287 | return true; |
3275 | 3288 | } |
… |
… |
bool AvFormatDecoder::ProcessAudioPacket(AVStream *curstream, AVPacket *pkt, |
4044 | 4057 | skipaudio = false; |
4045 | 4058 | } |
4046 | 4059 | |
| 4060 | // skip any audio frames preceding first video frame |
| 4061 | if (firstvptsinuse && firstvpts && (lastapts < firstvpts)) |
| 4062 | { |
| 4063 | VERBOSE(VB_PLAYBACK+VB_TIMESTAMP, |
| 4064 | LOC + QString("discarding early audio timecode %1 %2 %3") |
| 4065 | .arg(pkt->pts).arg(pkt->dts).arg(lastapts)); |
| 4066 | break; |
| 4067 | } |
| 4068 | firstvptsinuse = false; |
| 4069 | |
4047 | 4070 | avcodeclock->lock(); |
4048 | 4071 | data_size = 0; |
4049 | 4072 | |
diff --git a/mythtv/libs/libmythtv/avformatdecoder.h b/mythtv/libs/libmythtv/avformatdecoder.h
index 3ad2c70..1192546 100644
a
|
b
|
class AvFormatDecoder : public DecoderBase |
261 | 261 | long long lastvpts; |
262 | 262 | long long lastapts; |
263 | 263 | long long lastccptsu; |
| 264 | long long firstvpts; |
| 265 | bool firstvptsinuse; |
264 | 266 | |
265 | 267 | bool using_null_videoout; |
266 | 268 | MythCodecID video_codec_id; |
diff --git a/mythtv/libs/libmythtv/vsync.cpp b/mythtv/libs/libmythtv/vsync.cpp
index 59b5db7..728a759 100644
a
|
b
|
VideoSync::VideoSync(VideoOutput *video_output, |
123 | 123 | bool halve_frame_interval) : |
124 | 124 | m_video_output(video_output), m_frame_interval(frameint), |
125 | 125 | m_refresh_interval(refreshint), m_interlaced(halve_frame_interval), |
126 | | m_delay(-1) |
| 126 | m_nexttrigger(0), |
| 127 | m_delay(-1), |
| 128 | m_synchronous(false) |
127 | 129 | { |
128 | | bzero(&m_nexttrigger, sizeof(m_nexttrigger)); |
129 | 130 | |
130 | 131 | int tolerance = m_refresh_interval / 200; |
131 | 132 | if (m_interlaced && m_refresh_interval > ((m_frame_interval/2) + tolerance)) |
… |
… |
VideoSync::VideoSync(VideoOutput *video_output, |
136 | 137 | |
137 | 138 | void VideoSync::Start(void) |
138 | 139 | { |
139 | | gettimeofday(&m_nexttrigger, NULL); // now |
| 140 | struct timeval now_tv; |
| 141 | gettimeofday(&now_tv, NULL); // now |
| 142 | m_nexttrigger = now_tv.tv_sec * 1000000LL + now_tv.tv_usec; |
140 | 143 | } |
141 | 144 | |
142 | 145 | /** \fn VideoSync::SetFrameInterval(int fr, bool intr) |
… |
… |
void VideoSync::SetFrameInterval(int fr, bool intr) |
147 | 150 | m_frame_interval = fr; |
148 | 151 | m_interlaced = intr; |
149 | 152 | int tolerance = m_refresh_interval / 200; |
| 153 | double sync_factor = fr * 2.0f / intr; |
| 154 | sync_factor = sync_factor - round(sync_factor); |
| 155 | m_synchronous = (sync_factor >= -0.005) && (sync_factor <= 0.005); |
150 | 156 | if (m_interlaced && m_refresh_interval > ((m_frame_interval/2) + tolerance)) |
151 | 157 | m_interlaced = false; // can't display both fields at 2x rate |
152 | 158 | |
153 | | VERBOSE(VB_PLAYBACK, QString("Set video sync frame interval to %1") |
154 | | .arg(m_frame_interval)); |
155 | | } |
156 | | |
157 | | void VideoSync::OffsetTimeval(struct timeval& tv, int offset) |
158 | | { |
159 | | tv.tv_usec += offset; |
160 | | while (tv.tv_usec > 999999) |
161 | | { |
162 | | tv.tv_sec++; |
163 | | tv.tv_usec -= 1000000; |
164 | | } |
165 | | while (tv.tv_usec < 0) |
166 | | { |
167 | | tv.tv_sec--; |
168 | | tv.tv_usec += 1000000; |
169 | | } |
| 159 | VERBOSE(VB_PLAYBACK, QString("Set video sync frame interval to %1 (synced:%2)") |
| 160 | .arg(m_frame_interval).arg(m_synchronous)); |
170 | 161 | } |
171 | 162 | |
172 | 163 | /** \fn VideoSync::UpdateNexttrigger() |
… |
… |
void VideoSync::UpdateNexttrigger() |
179 | 170 | // Offset by frame interval -- if interlaced, only delay by half |
180 | 171 | // frame interval |
181 | 172 | if (m_interlaced) |
182 | | OffsetTimeval(m_nexttrigger, m_frame_interval/2); |
| 173 | m_nexttrigger += m_frame_interval/2; |
183 | 174 | else |
184 | | OffsetTimeval(m_nexttrigger, m_frame_interval); |
| 175 | m_nexttrigger += m_frame_interval; |
185 | 176 | } |
186 | 177 | |
187 | 178 | /** \fn VideoSync::CalcDelay() |
… |
… |
void VideoSync::UpdateNexttrigger() |
197 | 188 | */ |
198 | 189 | int VideoSync::CalcDelay() |
199 | 190 | { |
200 | | struct timeval now; |
201 | | gettimeofday(&now, NULL); |
| 191 | struct timeval now_tv; |
| 192 | gettimeofday(&now_tv, NULL); |
202 | 193 | //cout << "CalcDelay: next: " << timeval_str(m_nexttrigger) << " now " |
203 | 194 | // << timeval_str(now) << endl; |
| 195 | int64_t now = now_tv.tv_sec * 1000000LL + now_tv.tv_usec; |
204 | 196 | |
205 | | int ret_val = (m_nexttrigger.tv_sec - now.tv_sec) * 1000000 + |
206 | | (m_nexttrigger.tv_usec - now.tv_usec); |
| 197 | int ret_val = m_nexttrigger - now; |
207 | 198 | |
208 | 199 | //cout << "delay " << ret_val << endl; |
209 | 200 | |
… |
… |
int VideoSync::CalcDelay() |
215 | 206 | ret_val = m_frame_interval * 4; |
216 | 207 | |
217 | 208 | // set nexttrigger to our new target time |
218 | | m_nexttrigger.tv_sec = now.tv_sec; |
219 | | m_nexttrigger.tv_usec = now.tv_usec; |
220 | | OffsetTimeval(m_nexttrigger, ret_val); |
| 209 | m_nexttrigger = now; |
| 210 | m_nexttrigger += ret_val; |
221 | 211 | } |
222 | 212 | |
223 | | if (ret_val < -m_frame_interval) |
| 213 | if ((ret_val < -m_frame_interval) && (m_frame_interval >= m_refresh_interval)) |
224 | 214 | { |
225 | 215 | ret_val = -m_frame_interval; |
226 | 216 | |
227 | 217 | // set nexttrigger to our new target time |
228 | | m_nexttrigger.tv_sec = now.tv_sec; |
229 | | m_nexttrigger.tv_usec = now.tv_usec; |
230 | | OffsetTimeval(m_nexttrigger, ret_val); |
| 218 | m_nexttrigger = now; |
| 219 | m_nexttrigger += ret_val; |
231 | 220 | } |
232 | 221 | |
233 | 222 | return ret_val; |
… |
… |
int VideoSync::CalcDelay() |
244 | 233 | void VideoSync::KeepPhase() |
245 | 234 | { |
246 | 235 | // cerr << m_delay << endl; |
247 | | if (m_delay < -(m_refresh_interval/2)) |
248 | | OffsetTimeval(m_nexttrigger, 200); |
249 | | else if (m_delay > -500) |
250 | | OffsetTimeval(m_nexttrigger, -2000); |
| 236 | if (m_synchronous) |
| 237 | { |
| 238 | if (m_delay < -(m_refresh_interval - 500)) |
| 239 | m_nexttrigger += 200; |
| 240 | else if (m_delay > -500) |
| 241 | m_nexttrigger += -2000; |
| 242 | } |
| 243 | else |
| 244 | { |
| 245 | if (m_delay < -(m_refresh_interval + 500)) |
| 246 | m_nexttrigger += 200; |
| 247 | else if (m_delay >= 0) |
| 248 | m_nexttrigger += -2000; |
| 249 | } |
251 | 250 | } |
252 | 251 | |
253 | 252 | #ifndef _WIN32 |
… |
… |
void DRMVideoSync::Start(void) |
337 | 336 | VideoSync::Start(); |
338 | 337 | } |
339 | 338 | |
340 | | void DRMVideoSync::WaitForFrame(int sync_delay) |
| 339 | int DRMVideoSync::WaitForFrame(int sync_delay) |
341 | 340 | { |
342 | 341 | // Offset for externally-provided A/V sync delay |
343 | | OffsetTimeval(m_nexttrigger, sync_delay); |
| 342 | m_nexttrigger += sync_delay; |
344 | 343 | |
345 | 344 | m_delay = CalcDelay(); |
346 | 345 | //cerr << "WaitForFrame at : " << m_delay; |
… |
… |
void DRMVideoSync::WaitForFrame(int sync_delay) |
360 | 359 | if (m_delay > 0) |
361 | 360 | { |
362 | 361 | // Wait for any remaining retrace intervals in one pass. |
363 | | int n = m_delay / m_refresh_interval + 1; |
| 362 | int n = (m_delay + m_refresh_interval - 1) / m_refresh_interval; |
364 | 363 | |
365 | 364 | drm_wait_vblank_t blank; |
366 | 365 | blank.request.type = DRM_VBLANK_RELATIVE; |
… |
… |
void DRMVideoSync::WaitForFrame(int sync_delay) |
370 | 369 | //cerr << "Wait " << n << " intervals. Count " << blank.request.sequence; |
371 | 370 | //cerr << " Delay " << m_delay << endl; |
372 | 371 | } |
| 372 | return m_delay; |
373 | 373 | } |
374 | 374 | |
375 | 375 | void DRMVideoSync::AdvanceTrigger(void) |
… |
… |
void OpenGLVideoSync::Start(void) |
497 | 497 | #endif /* USING_OPENGL_VSYNC */ |
498 | 498 | } |
499 | 499 | |
500 | | void OpenGLVideoSync::WaitForFrame(int sync_delay) |
| 500 | int OpenGLVideoSync::WaitForFrame(int sync_delay) |
501 | 501 | { |
502 | 502 | (void) sync_delay; |
503 | 503 | #ifdef USING_OPENGL_VSYNC |
| 504 | //#define GLVSYNCDEBUG |
| 505 | #ifdef GLVSYNCDEBUG |
| 506 | int refreshcount = 0; |
| 507 | #endif |
504 | 508 | const QString msg1("First A/V Sync"), msg2("Second A/V Sync"); |
505 | | OffsetTimeval(m_nexttrigger, sync_delay); |
| 509 | m_nexttrigger += sync_delay; |
506 | 510 | |
507 | 511 | VideoOutput *vo = dynamic_cast<VideoOutput*>(m_video_output); |
508 | 512 | if (vo && vo->IsEmbedding()) |
… |
… |
void OpenGLVideoSync::WaitForFrame(int sync_delay) |
510 | 514 | m_delay = CalcDelay(); |
511 | 515 | if (m_delay > 0) |
512 | 516 | usleep(m_delay); |
513 | | return; |
| 517 | return 0; |
514 | 518 | } |
515 | 519 | |
516 | 520 | int err; |
517 | 521 | if (!m_context) |
518 | | return; |
| 522 | return 0; |
519 | 523 | unsigned int frameNum = 0; |
520 | 524 | |
521 | 525 | OpenGLContextLocker ctx_lock(m_context); |
522 | 526 | err = gMythGLXGetVideoSyncSGI(&frameNum); |
523 | 527 | checkGLSyncError("Frame Number Query", err); |
524 | 528 | |
| 529 | #ifdef GLVSYNCDEBUG |
| 530 | int delay1 = m_delay; |
| 531 | int delay2; |
| 532 | #endif |
525 | 533 | // Always sync to the next retrace execpt when we are very late. |
526 | 534 | if ((m_delay = CalcDelay()) > -(m_refresh_interval/2)) |
527 | 535 | { |
| 536 | #ifdef GLVSYNCDEBUG |
| 537 | delay2 = m_delay; |
| 538 | #endif |
528 | 539 | err = gMythGLXWaitVideoSyncSGI(2, (frameNum+1)%2 ,&frameNum); |
529 | 540 | checkGLSyncError(msg1, err); |
530 | 541 | m_delay = CalcDelay(); |
| 542 | #ifdef GLVSYNCDEBUG |
| 543 | refreshcount++; |
| 544 | #endif |
531 | 545 | } |
| 546 | #ifdef GLVSYNCDEBUG |
| 547 | else |
| 548 | delay2 = m_delay; |
| 549 | #endif |
532 | 550 | |
| 551 | #ifdef GLVSYNCDEBUG |
| 552 | int delay3 = m_delay; |
| 553 | #endif |
533 | 554 | // Wait for any remaining retrace intervals in one pass. |
534 | 555 | if (m_delay > 0) |
535 | 556 | { |
536 | | uint n = m_delay / m_refresh_interval + 1; |
| 557 | uint n = (m_delay + m_refresh_interval - 1) / m_refresh_interval; |
| 558 | #ifdef GLVSYNCDEBUG |
| 559 | refreshcount += (int)n; |
| 560 | #endif |
537 | 561 | err = gMythGLXWaitVideoSyncSGI((n+1), (frameNum+n)%(n+1), &frameNum); |
538 | 562 | checkGLSyncError(msg2, err); |
539 | 563 | m_delay = CalcDelay(); |
540 | 564 | } |
| 565 | #ifdef GLVSYNCDEBUG |
| 566 | VERBOSE(VB_PLAYBACK+VB_TIMESTAMP, QString("VS: WFF: ri:%1 fi:%2 delay1:%3 delay2:%4 delay3:%5 skip:%6 finaldelay:%7") |
| 567 | .arg(m_refresh_interval) |
| 568 | .arg(m_frame_interval) |
| 569 | .arg(delay1) |
| 570 | .arg(delay2) |
| 571 | .arg(delay3) |
| 572 | .arg(refreshcount) |
| 573 | .arg(m_delay) |
| 574 | ); |
| 575 | #endif |
541 | 576 | |
542 | 577 | #endif /* USING_OPENGL_VSYNC */ |
| 578 | return m_delay; |
543 | 579 | } |
544 | 580 | |
545 | 581 | void OpenGLVideoSync::AdvanceTrigger(void) |
… |
… |
void OpenGLVideoSync::AdvanceTrigger(void) |
548 | 584 | |
549 | 585 | KeepPhase(); |
550 | 586 | UpdateNexttrigger(); |
| 587 | #ifdef GLVSYNCDEBUG |
| 588 | VERBOSE(VB_PLAYBACK+VB_TIMESTAMP, "VS: AdvanceTrigger"); |
| 589 | #endif |
551 | 590 | #endif /* USING_OPENGL_VSYNC */ |
552 | 591 | } |
553 | 592 | #endif /* !_WIN32 */ |
… |
… |
bool RTCVideoSync::TryInit(void) |
594 | 633 | return true; |
595 | 634 | } |
596 | 635 | |
597 | | void RTCVideoSync::WaitForFrame(int sync_delay) |
| 636 | int RTCVideoSync::WaitForFrame(int sync_delay) |
598 | 637 | { |
599 | | OffsetTimeval(m_nexttrigger, sync_delay); |
| 638 | m_nexttrigger += sync_delay; |
600 | 639 | |
601 | 640 | m_delay = CalcDelay(); |
602 | 641 | |
… |
… |
void RTCVideoSync::WaitForFrame(int sync_delay) |
609 | 648 | if ((val < 0) && (m_delay > 0)) |
610 | 649 | usleep(m_delay); |
611 | 650 | } |
| 651 | return 0; |
612 | 652 | } |
613 | 653 | |
614 | 654 | void RTCVideoSync::AdvanceTrigger(void) |
… |
… |
bool VDPAUVideoSync::TryInit(void) |
637 | 677 | return true; |
638 | 678 | } |
639 | 679 | |
640 | | void VDPAUVideoSync::WaitForFrame(int sync_delay) |
| 680 | int VDPAUVideoSync::WaitForFrame(int sync_delay) |
641 | 681 | { |
642 | 682 | // Offset for externally-provided A/V sync delay |
643 | | OffsetTimeval(m_nexttrigger, sync_delay); |
| 683 | m_nexttrigger += sync_delay; |
644 | 684 | m_delay = CalcDelay(); |
645 | 685 | |
646 | 686 | if (m_delay < 0) |
… |
… |
void VDPAUVideoSync::WaitForFrame(int sync_delay) |
648 | 688 | |
649 | 689 | VideoOutputVDPAU *vo = (VideoOutputVDPAU *)(m_video_output); |
650 | 690 | vo->SetNextFrameDisplayTimeOffset(m_delay); |
| 691 | return 0; |
651 | 692 | } |
652 | 693 | |
653 | 694 | void VDPAUVideoSync::AdvanceTrigger(void) |
… |
… |
bool BusyWaitVideoSync::TryInit(void) |
674 | 715 | return true; |
675 | 716 | } |
676 | 717 | |
677 | | void BusyWaitVideoSync::WaitForFrame(int sync_delay) |
| 718 | int BusyWaitVideoSync::WaitForFrame(int sync_delay) |
678 | 719 | { |
679 | 720 | // Offset for externally-provided A/V sync delay |
680 | | OffsetTimeval(m_nexttrigger, sync_delay); |
| 721 | m_nexttrigger += sync_delay; |
681 | 722 | |
682 | 723 | m_delay = CalcDelay(); |
683 | 724 | |
… |
… |
void BusyWaitVideoSync::WaitForFrame(int sync_delay) |
703 | 744 | if (cnt > 1) |
704 | 745 | m_cheat -= 200; |
705 | 746 | } |
| 747 | return 0; |
706 | 748 | } |
707 | 749 | |
708 | 750 | void BusyWaitVideoSync::AdvanceTrigger(void) |
… |
… |
bool USleepVideoSync::TryInit(void) |
725 | 767 | return true; |
726 | 768 | } |
727 | 769 | |
728 | | void USleepVideoSync::WaitForFrame(int sync_delay) |
| 770 | int USleepVideoSync::WaitForFrame(int sync_delay) |
729 | 771 | { |
730 | 772 | // Offset for externally-provided A/V sync delay |
731 | | OffsetTimeval(m_nexttrigger, sync_delay); |
| 773 | m_nexttrigger += sync_delay; |
732 | 774 | |
733 | 775 | m_delay = CalcDelay(); |
734 | 776 | if (m_delay > 0) |
735 | 777 | usleep(m_delay); |
| 778 | return 0; |
736 | 779 | } |
737 | 780 | |
738 | 781 | void USleepVideoSync::AdvanceTrigger(void) |
diff --git a/mythtv/libs/libmythtv/vsync.h b/mythtv/libs/libmythtv/vsync.h
index f077949..f8b1c4b 100644
a
|
b
|
class VideoSync |
70 | 70 | virtual void Start(void); |
71 | 71 | |
72 | 72 | /** \brief Waits for next a frame or field. |
| 73 | * Returns delay to real frame timing in usec |
73 | 74 | * |
74 | 75 | * Start(void), WaitForFrame(void), and Stop(void) should |
75 | 76 | * always be called from same thread, to prevent bad |
… |
… |
class VideoSync |
78 | 79 | * \param sync_delay time until the desired frame or field |
79 | 80 | * \sa CalcDelay(void), KeepPhase(void) |
80 | 81 | */ |
81 | | virtual void WaitForFrame(int sync_delay) = 0; |
| 82 | virtual int WaitForFrame(int sync_delay) = 0; |
82 | 83 | |
83 | 84 | /// \brief Use the next frame or field for CalcDelay(void) |
84 | 85 | /// and WaitForFrame(int). |
… |
… |
class VideoSync |
104 | 105 | uint frame_interval, uint refresh_interval, |
105 | 106 | bool interlaced); |
106 | 107 | protected: |
107 | | static void OffsetTimeval(struct timeval& tv, int offset); |
108 | 108 | void UpdateNexttrigger(void); |
109 | 109 | int CalcDelay(void); |
110 | 110 | void KeepPhase(void); |
… |
… |
class VideoSync |
113 | 113 | int m_frame_interval; // of video |
114 | 114 | int m_refresh_interval; // of display |
115 | 115 | bool m_interlaced; |
116 | | struct timeval m_nexttrigger; |
| 116 | int64_t m_nexttrigger; |
117 | 117 | int m_delay; |
| 118 | bool m_synchronous; |
118 | 119 | |
119 | 120 | static int m_forceskip; |
120 | 121 | }; |
… |
… |
class DRMVideoSync : public VideoSync |
136 | 137 | QString getName(void) const { return QString("DRM"); } |
137 | 138 | bool TryInit(void); |
138 | 139 | void Start(void); |
139 | | void WaitForFrame(int sync_delay); |
| 140 | int WaitForFrame(int sync_delay); |
140 | 141 | void AdvanceTrigger(void); |
141 | 142 | |
142 | 143 | private: |
… |
… |
class OpenGLVideoSync : public VideoSync |
178 | 179 | QString getName(void) const { return QString("SGI OpenGL"); } |
179 | 180 | bool TryInit(void); |
180 | 181 | void Start(void); |
181 | | void WaitForFrame(int sync_delay); |
| 182 | int WaitForFrame(int sync_delay); |
182 | 183 | void AdvanceTrigger(void); |
183 | 184 | |
184 | 185 | private: |
… |
… |
class RTCVideoSync : public VideoSync |
207 | 208 | |
208 | 209 | QString getName(void) const { return QString("RTC"); } |
209 | 210 | bool TryInit(void); |
210 | | void WaitForFrame(int sync_delay); |
| 211 | int WaitForFrame(int sync_delay); |
211 | 212 | void AdvanceTrigger(void); |
212 | 213 | |
213 | 214 | private: |
… |
… |
class VDPAUVideoSync : public VideoSync |
228 | 229 | |
229 | 230 | QString getName(void) const { return QString("VDPAU"); } |
230 | 231 | bool TryInit(void); |
231 | | void WaitForFrame(int sync_delay); |
| 232 | int WaitForFrame(int sync_delay); |
232 | 233 | void AdvanceTrigger(void); |
233 | 234 | |
234 | 235 | private: |
… |
… |
class BusyWaitVideoSync : public VideoSync |
256 | 257 | |
257 | 258 | QString getName(void) const { return QString("USleep with busy wait"); } |
258 | 259 | bool TryInit(void); |
259 | | void WaitForFrame(int sync_delay); |
| 260 | int WaitForFrame(int sync_delay); |
260 | 261 | void AdvanceTrigger(void); |
261 | 262 | |
262 | 263 | private: |
… |
… |
class USleepVideoSync : public VideoSync |
284 | 285 | |
285 | 286 | QString getName(void) const { return QString("USleep"); } |
286 | 287 | bool TryInit(void); |
287 | | void WaitForFrame(int sync_delay); |
| 288 | int WaitForFrame(int sync_delay); |
288 | 289 | void AdvanceTrigger(void); |
289 | 290 | }; |
290 | 291 | #endif /* VSYNC_H_INCLUDED */ |