commit 16d565b0b2ab5f059aa5ac655d8e852e1043ef79
Author: Mark Spieth <mspieth@digivation.com.au>
Date: Tue Apr 27 07:51:51 2010 +1000
smoother vsync with predictive frame skipping
diff --git a/mythtv/libs/libmyth/audiooutput.h b/mythtv/libs/libmyth/audiooutput.h
index e89147c..2889402 100644
a
|
b
|
class MPUBLIC AudioOutput : public VolumeBase, public OutputListeners |
43 | 43 | |
44 | 44 | // timecode is in milliseconds. |
45 | 45 | // Return true if all samples were written, false if none. |
46 | | virtual bool AddSamples(char *buffer, int samples, long long timecode) = 0; |
47 | | virtual bool AddSamples(char *buffers[], int samples, long long timecode) = 0; |
| 46 | virtual bool AddSamples(char *buffer, int samples, int64_t timecode) = 0; |
| 47 | virtual bool AddSamples(char *buffers[], int samples, int64_t timecode) = 0; |
48 | 48 | |
49 | | virtual void SetTimecode(long long timecode) = 0; |
| 49 | virtual void SetTimecode(int64_t timecode) = 0; |
50 | 50 | virtual bool IsPaused(void) const = 0; |
51 | 51 | virtual void Pause(bool paused) = 0; |
52 | 52 | |
53 | 53 | // Wait for all data to finish playing |
54 | 54 | virtual void Drain(void) = 0; |
55 | 55 | |
56 | | virtual int GetAudiotime(void) = 0; |
| 56 | virtual int64_t GetAudiotime(void) = 0; |
57 | 57 | |
58 | 58 | /// report amount of audio buffered in milliseconds. |
59 | | virtual int GetAudioBufferedTime(void) { return 0; } |
| 59 | virtual int64_t GetAudioBufferedTime(void) { return 0; } |
60 | 60 | |
61 | 61 | virtual void SetSourceBitrate(int ) { } |
62 | 62 | |
diff --git a/mythtv/libs/libmyth/audiooutputbase.cpp b/mythtv/libs/libmyth/audiooutputbase.cpp
index de6e0b0..f62eb06 100644
a
|
b
|
|
19 | 19 | #define LOC QString("AO: ") |
20 | 20 | #define LOC_ERR QString("AO, ERROR: ") |
21 | 21 | |
| 22 | #define EFF_FACTOR_F 100000.0 |
| 23 | #define EFF_FACTOR_I 100000 |
| 24 | #define EFF_FACTOR_LL 100000LL |
| 25 | |
22 | 26 | AudioOutputBase::AudioOutputBase(const AudioSettings &settings) : |
23 | 27 | // protected |
24 | 28 | effdsp(0), effdspstretched(0), |
… |
… |
AudioOutputBase::AudioOutputBase(const AudioSettings &settings) : |
31 | 35 | audio_passthru_device(settings.GetPassthruDevice()), |
32 | 36 | audio_passthru(false), audio_enc(false), |
33 | 37 | audio_reenc(false), audio_stretchfactor(1.0f), |
| 38 | eff_audio_stretchfactor(10000), |
34 | 39 | |
35 | 40 | source(settings.source), killaudio(false), |
36 | 41 | |
… |
… |
void AudioOutputBase::SetStretchFactorLocked(float laudio_stretchfactor) |
142 | 147 | if ((audio_stretchfactor != laudio_stretchfactor) || !pSoundStretch) |
143 | 148 | { |
144 | 149 | audio_stretchfactor = laudio_stretchfactor; |
| 150 | eff_audio_stretchfactor = (int)(EFF_FACTOR_F * laudio_stretchfactor); |
145 | 151 | if (pSoundStretch) |
146 | 152 | { |
147 | 153 | VERBOSE(VB_GENERAL, LOC + QString("Changing time stretch to %1") |
… |
… |
void AudioOutputBase::Reset() |
527 | 533 | gettimeofday(&audiotime_updated, NULL); |
528 | 534 | } |
529 | 535 | |
530 | | void AudioOutputBase::SetTimecode(long long timecode) |
| 536 | void AudioOutputBase::SetTimecode(int64_t timecode) |
531 | 537 | { |
532 | 538 | QMutexLocker locker(&audio_buflock); |
533 | 539 | audbuf_timecode = timecode; |
534 | | samples_buffered = (long long)((timecode * effdsp) / 100000.0); |
| 540 | samples_buffered = (int64_t)((timecode * effdsp) / EFF_FACTOR_I); |
535 | 541 | } |
536 | 542 | |
537 | 543 | void AudioOutputBase::SetEffDsp(int dsprate) |
… |
… |
int AudioOutputBase::audiofree(bool use_lock) |
573 | 579 | be is kAudioRingBufferSize - 1. */ |
574 | 580 | } |
575 | 581 | |
576 | | int AudioOutputBase::GetAudiotime(void) |
| 582 | int64_t AudioOutputBase::GetAudiotime(void) |
577 | 583 | { |
578 | 584 | /* Returns the current timecode of audio leaving the soundcard, based |
579 | 585 | on the 'audiotime' computed earlier, and the delay since it was computed. |
… |
… |
int AudioOutputBase::GetAudiotime(void) |
583 | 589 | The reason is that computing 'audiotime' requires acquiring the audio |
584 | 590 | lock, which the video thread should not do. So, we call 'SetAudioTime()' |
585 | 591 | from the audio thread, and then call this from the video thread. */ |
586 | | long long ret; |
| 592 | int64_t ret; |
587 | 593 | struct timeval now; |
588 | 594 | |
589 | 595 | if (audiotime == 0) |
… |
… |
int AudioOutputBase::GetAudiotime(void) |
595 | 601 | |
596 | 602 | ret = (now.tv_sec - audiotime_updated.tv_sec) * 1000; |
597 | 603 | ret += (now.tv_usec - audiotime_updated.tv_usec) / 1000; |
598 | | ret = (long long)(ret * audio_stretchfactor); |
| 604 | ret = (int64_t)(ret * audio_stretchfactor); |
599 | 605 | |
600 | 606 | #if 1 |
601 | 607 | VERBOSE(VB_AUDIO+VB_TIMESTAMP, |
… |
… |
int AudioOutputBase::GetAudiotime(void) |
610 | 616 | |
611 | 617 | ret += audiotime; |
612 | 618 | |
613 | | return (int)ret; |
| 619 | return ret; |
614 | 620 | } |
615 | 621 | |
616 | 622 | void AudioOutputBase::SetAudiotime(void) |
… |
… |
void AudioOutputBase::SetAudiotime(void) |
618 | 624 | if (audbuf_timecode == 0) |
619 | 625 | return; |
620 | 626 | |
621 | | int soundcard_buffer = 0; |
622 | | int totalbuffer; |
| 627 | int64_t soundcard_buffer = 0; |
| 628 | int64_t totalsamples_stretched; |
| 629 | int64_t totalsamples_unstretched = 0; |
623 | 630 | |
624 | 631 | /* We want to calculate 'audiotime', which is the timestamp of the audio |
625 | 632 | which is leaving the sound card at this instant. |
… |
… |
void AudioOutputBase::SetAudiotime(void) |
642 | 649 | QMutexLocker lock2(&avsync_lock); |
643 | 650 | |
644 | 651 | soundcard_buffer = GetBufferedOnSoundcard(); // bytes |
645 | | totalbuffer = audiolen(false) + soundcard_buffer; |
646 | | |
647 | | // include algorithmic latencies |
648 | | if (pSoundStretch) |
649 | | totalbuffer += (int)((pSoundStretch->numUnprocessedSamples() * |
650 | | audio_bytes_per_sample) / audio_stretchfactor); |
| 652 | // major post-stretched buffer contents |
| 653 | totalsamples_stretched = (audiolen(false) + soundcard_buffer) / audio_bytes_per_sample; |
651 | 654 | |
| 655 | // include algorithmic pre-stretch latencies |
652 | 656 | if (upmixer && needs_upmix) |
653 | | totalbuffer += upmixer->sampleLatency() * audio_bytes_per_sample; |
| 657 | totalsamples_unstretched += upmixer->sampleLatency(); |
654 | 658 | |
| 659 | if (pSoundStretch) |
| 660 | totalsamples_unstretched += pSoundStretch->numUnprocessedSamples(); |
| 661 | |
| 662 | // include algorithmic post-stretch latencies |
655 | 663 | if (encoder) |
656 | | totalbuffer += encoder->Buffered(); |
| 664 | // the input buffered data is still in audio_bytes_per_sample format |
| 665 | totalsamples_stretched += encoder->Buffered() / audio_bytes_per_sample; |
657 | 666 | |
658 | | audiotime = audbuf_timecode - (int)(totalbuffer * 100000.0 / |
659 | | (audio_bytes_per_sample * effdspstretched)); |
| 667 | // timecode is the stretch adjusted version |
| 668 | audiotime = audbuf_timecode - (int64_t)((totalsamples_unstretched * EFF_FACTOR_I + |
| 669 | totalsamples_stretched * eff_audio_stretchfactor ) / effdsp ); |
660 | 670 | |
661 | 671 | gettimeofday(&audiotime_updated, NULL); |
662 | 672 | #if 1 |
663 | 673 | VERBOSE(VB_AUDIO+VB_TIMESTAMP, |
664 | 674 | QString("SetAudiotime set=%1.%2, audt=%3 atc=%4 " |
665 | | "tb=%5 sb=%6 eds=%7 abps=%8 sf=%9") |
| 675 | "tss=%5 tsu=%6 sb=%7 eds=%8 abps=%9 sf=%10") |
666 | 676 | .arg(audiotime_updated.tv_sec).arg(audiotime_updated.tv_usec) |
667 | 677 | .arg(audiotime) |
668 | 678 | .arg(audbuf_timecode) |
669 | | .arg(totalbuffer) |
| 679 | .arg(totalsamples_stretched) |
| 680 | .arg(totalsamples_unstretched) |
670 | 681 | .arg(soundcard_buffer) |
671 | 682 | .arg(effdspstretched) |
672 | 683 | .arg(audio_bytes_per_sample) |
… |
… |
void AudioOutputBase::SetAudiotime(void) |
674 | 685 | #endif |
675 | 686 | } |
676 | 687 | |
677 | | int AudioOutputBase::GetAudioBufferedTime(void) |
| 688 | int64_t AudioOutputBase::GetAudioBufferedTime(void) |
678 | 689 | { |
679 | 690 | return audbuf_timecode - GetAudiotime(); |
680 | 691 | } |
… |
… |
void AudioOutputBase::_AdjustVolume(AudioDataType *buffer, int len, bool music) |
737 | 748 | } |
738 | 749 | |
739 | 750 | bool AudioOutputBase::AddSamples(char *buffers[], int samples, |
740 | | long long timecode) |
| 751 | int64_t timecode) |
741 | 752 | { |
742 | 753 | // NOTE: This function is not threadsafe |
743 | 754 | int afree = audiofree(true); |
… |
… |
bool AudioOutputBase::AddSamples(char *buffers[], int samples, |
805 | 816 | return true; |
806 | 817 | } |
807 | 818 | |
808 | | bool AudioOutputBase::AddSamples(char *buffer, int samples, long long timecode) |
| 819 | bool AudioOutputBase::AddSamples(char *buffer, int samples, int64_t timecode) |
809 | 820 | { |
810 | 821 | // NOTE: This function is not threadsafe |
811 | 822 | |
… |
… |
void *AudioOutputBase::_MonoToStereo(AudioDataType *s1, AudioDataType *s2, int s |
936 | 947 | return s2; |
937 | 948 | } |
938 | 949 | |
939 | | void AudioOutputBase::_AddSamples(void *buffer, bool interleaved, int samples, |
940 | | long long timecode) |
| 950 | void AudioOutputBase::_AddSamples(void *buffer, bool interleaved, int in_samples, |
| 951 | int64_t timecode) |
941 | 952 | { |
942 | 953 | int len; // = samples * audio_bytes_per_sample; |
943 | 954 | int audio_bytes = audio_bits / 8; |
944 | 955 | int org_waud = waud; |
| 956 | int samples = in_samples; |
945 | 957 | |
946 | 958 | int afree = audiofree(false); |
947 | 959 | |
… |
… |
void AudioOutputBase::_AddSamples(void *buffer, bool interleaved, int samples, |
980 | 992 | int out_samples = 0; |
981 | 993 | org_waud = waud; |
982 | 994 | int step = (interleaved)?source_audio_channels:1; |
983 | | |
| 995 | |
984 | 996 | for (int itemp = 0; itemp < samples; ) |
985 | 997 | { |
986 | 998 | if (audio_bytes == 2) |
… |
… |
void AudioOutputBase::_AddSamples(void *buffer, bool interleaved, int samples, |
1065 | 1077 | } |
1066 | 1078 | } |
1067 | 1079 | |
1068 | | if (samples <= 0) |
1069 | | return; |
1070 | | |
1071 | | if (pSoundStretch) |
| 1080 | if (samples > 0) |
1072 | 1081 | { |
1073 | | // does not change the timecode, only the number of samples |
1074 | | // back to orig pos |
1075 | | org_waud = waud; |
1076 | | int bdiff = kAudioRingBufferSize - org_waud; |
1077 | | int nSamplesToEnd = bdiff/abps; |
1078 | | if (bdiff < len) |
1079 | | { |
1080 | | pSoundStretch->putSamples((soundtouch::SAMPLETYPE*) |
1081 | | (audiobuffer + |
1082 | | org_waud), nSamplesToEnd); |
1083 | | pSoundStretch->putSamples((soundtouch::SAMPLETYPE*)audiobuffer, |
1084 | | (len - bdiff) / abps); |
1085 | | } |
1086 | | else |
1087 | | { |
1088 | | pSoundStretch->putSamples((soundtouch::SAMPLETYPE*) |
1089 | | (audiobuffer + org_waud), |
1090 | | len / abps); |
1091 | | } |
1092 | 1082 | |
1093 | | int nSamples = pSoundStretch->numSamples(); |
1094 | | len = WaitForFreeSpace(nSamples); |
1095 | | |
1096 | | while ((nSamples = pSoundStretch->numSamples())) |
| 1083 | if (pSoundStretch) |
1097 | 1084 | { |
1098 | | if (nSamples > nSamplesToEnd) |
1099 | | nSamples = nSamplesToEnd; |
1100 | | |
1101 | | nSamples = pSoundStretch->receiveSamples( |
1102 | | (soundtouch::SAMPLETYPE*) |
1103 | | (audiobuffer + org_waud), nSamples |
1104 | | ); |
1105 | | |
1106 | | if (nSamples == nSamplesToEnd) { |
1107 | | org_waud = 0; |
1108 | | nSamplesToEnd = kAudioRingBufferSize/abps; |
| 1085 | // does not change the timecode, only the number of samples |
| 1086 | // back to orig pos |
| 1087 | org_waud = waud; |
| 1088 | int bdiff = kAudioRingBufferSize - org_waud; |
| 1089 | int nSamplesToEnd = bdiff/abps; |
| 1090 | if (bdiff < len) |
| 1091 | { |
| 1092 | pSoundStretch->putSamples((soundtouch::SAMPLETYPE*) |
| 1093 | (audiobuffer + |
| 1094 | org_waud), nSamplesToEnd); |
| 1095 | pSoundStretch->putSamples((soundtouch::SAMPLETYPE*)audiobuffer, |
| 1096 | (len - bdiff) / abps); |
1109 | 1097 | } |
1110 | | else { |
1111 | | org_waud += nSamples * abps; |
1112 | | nSamplesToEnd -= nSamples; |
| 1098 | else |
| 1099 | { |
| 1100 | pSoundStretch->putSamples((soundtouch::SAMPLETYPE*) |
| 1101 | (audiobuffer + org_waud), |
| 1102 | len / abps); |
1113 | 1103 | } |
1114 | | } |
1115 | | } |
1116 | 1104 | |
1117 | | if (internal_vol && SWVolume()) |
1118 | | { |
1119 | | int bdiff = kAudioRingBufferSize - waud; |
1120 | | bool music = (timecode < 1); |
| 1105 | int nSamples = pSoundStretch->numSamples(); |
| 1106 | len = WaitForFreeSpace(nSamples); |
1121 | 1107 | |
1122 | | if (bdiff < len) |
1123 | | { |
1124 | | AdjustVolume(audiobuffer + waud, bdiff, music); |
1125 | | AdjustVolume(audiobuffer, len - bdiff, music); |
1126 | | } |
1127 | | else |
1128 | | AdjustVolume(audiobuffer + waud, len, music); |
1129 | | } |
| 1108 | while ((nSamples = pSoundStretch->numSamples())) |
| 1109 | { |
| 1110 | if (nSamples > nSamplesToEnd) |
| 1111 | nSamples = nSamplesToEnd; |
1130 | 1112 | |
1131 | | // Encode to AC-3? |
1132 | | if (encoder) |
1133 | | { |
1134 | | org_waud = waud; |
1135 | | int bdiff = kAudioRingBufferSize - org_waud; |
1136 | | int to_get = 0; |
| 1113 | nSamples = pSoundStretch->receiveSamples( |
| 1114 | (soundtouch::SAMPLETYPE*) |
| 1115 | (audiobuffer + org_waud), nSamples |
| 1116 | ); |
1137 | 1117 | |
1138 | | if (bdiff < len) |
| 1118 | if (nSamples == nSamplesToEnd) { |
| 1119 | org_waud = 0; |
| 1120 | nSamplesToEnd = kAudioRingBufferSize/abps; |
| 1121 | } |
| 1122 | else { |
| 1123 | org_waud += nSamples * abps; |
| 1124 | nSamplesToEnd -= nSamples; |
| 1125 | } |
| 1126 | } |
| 1127 | } |
| 1128 | |
| 1129 | if (internal_vol && SWVolume()) |
1139 | 1130 | { |
1140 | | encoder->Encode(audiobuffer + org_waud, bdiff); |
1141 | | to_get = encoder->Encode(audiobuffer, len - bdiff); |
| 1131 | int bdiff = kAudioRingBufferSize - waud; |
| 1132 | bool music = (timecode < 1); |
| 1133 | |
| 1134 | if (bdiff < len) |
| 1135 | { |
| 1136 | AdjustVolume(audiobuffer + waud, bdiff, music); |
| 1137 | AdjustVolume(audiobuffer, len - bdiff, music); |
| 1138 | } |
| 1139 | else |
| 1140 | AdjustVolume(audiobuffer + waud, len, music); |
1142 | 1141 | } |
1143 | | else |
1144 | | to_get = encoder->Encode(audiobuffer + org_waud, len); |
1145 | 1142 | |
1146 | | if (to_get > 0) |
| 1143 | // Encode to AC-3? |
| 1144 | if (encoder) |
1147 | 1145 | { |
1148 | | if (to_get >= bdiff) |
| 1146 | org_waud = waud; |
| 1147 | int bdiff = kAudioRingBufferSize - org_waud; |
| 1148 | int to_get = 0; |
| 1149 | |
| 1150 | if (bdiff < len) |
1149 | 1151 | { |
1150 | | encoder->GetFrames(audiobuffer + org_waud, bdiff); |
1151 | | to_get -= bdiff; |
1152 | | org_waud = 0; |
| 1152 | encoder->Encode(audiobuffer + org_waud, bdiff); |
| 1153 | to_get = encoder->Encode(audiobuffer, len - bdiff); |
1153 | 1154 | } |
1154 | | if (to_get > 0) |
1155 | | encoder->GetFrames(audiobuffer + org_waud, to_get); |
| 1155 | else |
| 1156 | to_get = encoder->Encode(audiobuffer + org_waud, len); |
| 1157 | |
| 1158 | if (to_get > 0) |
| 1159 | { |
| 1160 | if (to_get >= bdiff) |
| 1161 | { |
| 1162 | encoder->GetFrames(audiobuffer + org_waud, bdiff); |
| 1163 | to_get -= bdiff; |
| 1164 | org_waud = 0; |
| 1165 | } |
| 1166 | if (to_get > 0) |
| 1167 | encoder->GetFrames(audiobuffer + org_waud, to_get); |
1156 | 1168 | |
1157 | | org_waud += to_get; |
| 1169 | org_waud += to_get; |
| 1170 | } |
1158 | 1171 | } |
1159 | | } |
1160 | 1172 | |
1161 | | waud = org_waud; |
1162 | | lastaudiolen = audiolen(false); |
| 1173 | waud = org_waud; |
| 1174 | lastaudiolen = audiolen(false); |
| 1175 | } |
1163 | 1176 | |
1164 | 1177 | if (timecode < 0) |
1165 | 1178 | // mythmusic doesn't give timestamps.. |
1166 | | timecode = (int)((samples_buffered * 100000.0) / effdsp); |
| 1179 | timecode = (int64_t)((samples_buffered * EFF_FACTOR_I) / effdsp); |
1167 | 1180 | |
1168 | | samples_buffered += samples; |
| 1181 | samples_buffered += in_samples; |
1169 | 1182 | |
1170 | 1183 | /* we want the time at the end -- but the file format stores |
1171 | 1184 | time at the start of the chunk. */ |
1172 | 1185 | // even with timestretch, timecode is still calculated from original |
1173 | 1186 | // sample count |
1174 | | audbuf_timecode = timecode + (int)((samples * 100000.0) / effdsp); |
| 1187 | audbuf_timecode = timecode + (int64_t)((in_samples * EFF_FACTOR_I) / effdsp); |
1175 | 1188 | } |
1176 | 1189 | |
1177 | 1190 | void AudioOutputBase::Status() |
… |
… |
void AudioOutputBase::OutputAudioLoop(void) |
1208 | 1221 | unsigned char *zeros = new unsigned char[fragment_size]; |
1209 | 1222 | unsigned char *fragment = new unsigned char[fragment_size]; |
1210 | 1223 | |
| 1224 | // to reduce startup latency, write silence in 8ms chunks |
| 1225 | int zero_fragment_size = (int)(0.008*audio_samplerate/audio_channels); |
| 1226 | zero_fragment_size *= audio_channels * audio_bits / 16; // make sure its a multiple of audio_channels |
| 1227 | if (zero_fragment_size > fragment_size) |
| 1228 | zero_fragment_size = fragment_size; |
| 1229 | |
1211 | 1230 | bzero(zeros, fragment_size); |
1212 | 1231 | last_space_on_soundcard = 0; |
1213 | 1232 | |
… |
… |
void AudioOutputBase::OutputAudioLoop(void) |
1240 | 1259 | |
1241 | 1260 | // only send zeros if card doesn't already have at least one |
1242 | 1261 | // fragment of zeros -dag |
1243 | | if (fragment_size >= soundcard_buffer_size - space_on_soundcard) |
| 1262 | if (zero_fragment_size >= soundcard_buffer_size - space_on_soundcard) |
1244 | 1263 | { |
1245 | | if (fragment_size <= space_on_soundcard) |
| 1264 | if (zero_fragment_size <= space_on_soundcard) |
1246 | 1265 | { |
1247 | | WriteAudio(zeros, fragment_size); |
| 1266 | WriteAudio(zeros, zero_fragment_size); |
1248 | 1267 | } |
1249 | 1268 | else |
1250 | 1269 | { |
… |
… |
void AudioOutputBase::OutputAudioLoop(void) |
1252 | 1271 | VERBOSE(VB_AUDIO+VB_TIMESTAMP, LOC + |
1253 | 1272 | QString("waiting for space on soundcard " |
1254 | 1273 | "to write zeros: have %1 need %2") |
1255 | | .arg(space_on_soundcard).arg(fragment_size)); |
| 1274 | .arg(space_on_soundcard).arg(zero_fragment_size)); |
1256 | 1275 | usleep(5000); |
1257 | 1276 | } |
1258 | 1277 | } |
diff --git a/mythtv/libs/libmyth/audiooutputbase.h b/mythtv/libs/libmyth/audiooutputbase.h
index 8d9e86b..5778694 100644
a
|
b
|
class AudioOutputBase : public AudioOutput, public QThread |
51 | 51 | int GetSWVolume(void); |
52 | 52 | |
53 | 53 | // timecode is in milliseconds. |
54 | | virtual bool AddSamples(char *buffer, int samples, long long timecode); |
55 | | virtual bool AddSamples(char *buffers[], int samples, long long timecode); |
| 54 | virtual bool AddSamples(char *buffer, int samples, int64_t timecode); |
| 55 | virtual bool AddSamples(char *buffers[], int samples, int64_t timecode); |
56 | 56 | |
57 | | virtual void SetTimecode(long long timecode); |
| 57 | virtual void SetTimecode(int64_t timecode); |
58 | 58 | virtual bool IsPaused(void) const { return audio_actually_paused; } |
59 | 59 | virtual void Pause(bool paused); |
60 | 60 | |
61 | 61 | // Wait for all data to finish playing |
62 | 62 | virtual void Drain(void); |
63 | 63 | |
64 | | virtual int GetAudiotime(void); |
65 | | virtual int GetAudioBufferedTime(void); |
| 64 | virtual int64_t GetAudiotime(void); |
| 65 | virtual int64_t GetAudioBufferedTime(void); |
66 | 66 | |
67 | 67 | // Send output events showing current progress |
68 | 68 | virtual void Status(void); |
… |
… |
class AudioOutputBase : public AudioOutput, public QThread |
100 | 100 | |
101 | 101 | int GetAudioData(unsigned char *buffer, int buf_size, bool fill_buffer); |
102 | 102 | |
103 | | void _AddSamples(void *buffer, bool interleaved, int samples, long long timecode); |
| 103 | void _AddSamples(void *buffer, bool interleaved, int samples, int64_t timecode); |
104 | 104 | |
105 | 105 | void OutputAudioLoop(void); |
106 | 106 | |
… |
… |
class AudioOutputBase : public AudioOutput, public QThread |
117 | 117 | |
118 | 118 | void SetStretchFactorLocked(float factor); |
119 | 119 | |
120 | | int GetBaseAudioTime() const { return audiotime; } |
121 | | int GetBaseAudBufTimeCode() const { return audbuf_timecode; } |
| 120 | int64_t GetBaseAudioTime() const { return audiotime; } |
| 121 | int64_t GetBaseAudBufTimeCode() const { return audbuf_timecode; } |
122 | 122 | soundtouch::SoundTouch *GetSoundStretch() const { return pSoundStretch; } |
123 | | void SetBaseAudioTime(const int inAudioTime) { audiotime = inAudioTime; } |
| 123 | void SetBaseAudioTime(const int64_t inAudioTime) { audiotime = inAudioTime; } |
124 | 124 | |
125 | 125 | protected: |
126 | 126 | int effdsp; // from the recorded stream |
… |
… |
class AudioOutputBase : public AudioOutput, public QThread |
143 | 143 | bool audio_reenc; |
144 | 144 | |
145 | 145 | float audio_stretchfactor; |
| 146 | int eff_audio_stretchfactor; // scaled to 10000 as effdsp is |
146 | 147 | AudioOutputSource source; |
147 | 148 | |
148 | 149 | bool killaudio; |
… |
… |
class AudioOutputBase : public AudioOutput, public QThread |
186 | 187 | bool blocking; // do AddSamples calls block? |
187 | 188 | |
188 | 189 | int lastaudiolen; |
189 | | long long samples_buffered; |
| 190 | int64_t samples_buffered; |
190 | 191 | |
191 | 192 | bool audio_thread_exists; |
192 | 193 | |
… |
… |
class AudioOutputBase : public AudioOutput, public QThread |
203 | 204 | QMutex avsync_lock; |
204 | 205 | |
205 | 206 | /// timecode of audio leaving the soundcard (same units as timecodes) |
206 | | long long audiotime; |
| 207 | int64_t audiotime; |
207 | 208 | struct timeval audiotime_updated; // ... which was last updated at this time |
208 | 209 | |
209 | 210 | /* Audio circular buffer */ |
210 | 211 | int raud, waud; /* read and write positions */ |
211 | 212 | /// timecode of audio most recently placed into buffer |
212 | | long long audbuf_timecode; |
| 213 | int64_t audbuf_timecode; |
213 | 214 | |
214 | 215 | int numlowbuffer; |
215 | 216 | |
diff --git a/mythtv/libs/libmythtv/NuppelVideoPlayer.cpp b/mythtv/libs/libmythtv/NuppelVideoPlayer.cpp
index cf1e690..bcda6ea 100644
a
|
b
|
NuppelVideoPlayer::NuppelVideoPlayer(bool muted) |
206 | 206 | videosync(NULL), delay(0), |
207 | 207 | vsynctol(30/4), avsync_delay(0), |
208 | 208 | avsync_adjustment(0), avsync_avg(0), |
209 | | avsync_oldavg(0), refreshrate(0), |
| 209 | avsync_oldavg(0), |
| 210 | avsync_predictor(0), avsync_predictor_enabled(false), |
| 211 | refreshrate(0), |
210 | 212 | lastsync(false), m_playing_slower(false), |
211 | 213 | m_stored_audio_stretchfactor(1.0), |
212 | 214 | audio_paused(false), |
… |
… |
NuppelVideoPlayer::NuppelVideoPlayer(bool muted) |
238 | 240 | db_prefer708 = gContext->GetNumSetting("Prefer708Captions", 1); |
239 | 241 | autocommercialskip = (CommSkipMode) |
240 | 242 | gContext->GetNumSetting("AutoCommercialSkip", kCommSkipOff); |
| 243 | usesmoothsync = gContext->GetNumSetting("UseSmoothSync", 1) != 0; |
241 | 244 | |
242 | 245 | lastIgnoredManualSkip = QDateTime::currentDateTime().addSecs(-10); |
243 | 246 | |
… |
… |
void NuppelVideoPlayer::SetVideoParams(int width, int height, double fps, |
1118 | 1121 | video_frame_rate = fps; |
1119 | 1122 | float temp_speed = (play_speed == 0.0f) ? |
1120 | 1123 | audio_stretchfactor : play_speed; |
1121 | | frame_interval = (int)(1000000.0f / video_frame_rate / temp_speed); |
| 1124 | SetFrameInterval(kScan_Progressive, 1.0 / (video_frame_rate * temp_speed)); |
1122 | 1125 | } |
1123 | 1126 | |
1124 | 1127 | if (videoOutput) |
… |
… |
float NuppelVideoPlayer::WarpFactor(void) |
2310 | 2313 | return divergence; |
2311 | 2314 | } |
2312 | 2315 | |
| 2316 | void NuppelVideoPlayer::SetFrameInterval(FrameScanType scan, double frame_period) |
| 2317 | { |
| 2318 | frame_interval = (int)(1000000.0f * frame_period + 0.5f); |
| 2319 | avsync_predictor = 0; |
| 2320 | avsync_predictor_enabled = false; |
| 2321 | |
| 2322 | VERBOSE(VB_PLAYBACK, LOC + QString("SetFrameInterval ps:%1 scan:%2 usesmoothsync:%3") |
| 2323 | .arg(play_speed).arg(scan).arg(usesmoothsync) |
| 2324 | ); |
| 2325 | //if (play_speed <= 1 || play_speed > 2 || scan != kScan_Progressive || !usesmoothsync) |
| 2326 | if (play_speed < 1 || play_speed > 2 || refreshrate <= 0 || !usesmoothsync) |
| 2327 | return; |
| 2328 | |
| 2329 | avsync_predictor_enabled = ((frame_interval-(frame_interval/200)) < refreshrate); |
| 2330 | } |
| 2331 | |
| 2332 | void NuppelVideoPlayer::ResetAVSync(void) |
| 2333 | { |
| 2334 | avsync_avg = 0; |
| 2335 | avsync_oldavg = 0; |
| 2336 | avsync_predictor = 0; |
| 2337 | prevtc = 0; |
| 2338 | warpfactor = 1.0f; |
| 2339 | warpfactor_avg = 1.0f; |
| 2340 | VERBOSE(VB_PLAYBACK+VB_TIMESTAMP, LOC + "A/V sync reset"); |
| 2341 | } |
| 2342 | |
2313 | 2343 | void NuppelVideoPlayer::InitAVSync(void) |
2314 | 2344 | { |
2315 | 2345 | videosync->Start(); |
… |
… |
void NuppelVideoPlayer::InitAVSync(void) |
2331 | 2361 | VERBOSE(VB_GENERAL, msg); |
2332 | 2362 | msg = QString("Refresh rate: %1, frame interval: %2") |
2333 | 2363 | .arg(refreshrate).arg(frame_interval); |
2334 | | VERBOSE(VB_PLAYBACK, msg); |
| 2364 | VERBOSE(VB_PLAYBACK, LOC + msg); |
| 2365 | |
| 2366 | SetFrameInterval(m_scan, 1.0 / (video_frame_rate * play_speed)); |
2335 | 2367 | |
2336 | 2368 | // try to get preferential scheduling, but ignore if we fail to. |
2337 | 2369 | myth_nice(-19); |
2338 | 2370 | } |
2339 | 2371 | } |
2340 | 2372 | |
| 2373 | int64_t NuppelVideoPlayer::AVSyncGetAudiotime(void) |
| 2374 | { |
| 2375 | int64_t currentaudiotime = 0; |
| 2376 | audio_lock.lock(); |
| 2377 | if (audioOutput && normal_speed) |
| 2378 | { |
| 2379 | currentaudiotime = audioOutput->GetAudiotime(); |
| 2380 | } |
| 2381 | audio_lock.unlock(); |
| 2382 | return currentaudiotime; |
| 2383 | } |
| 2384 | |
2341 | 2385 | void NuppelVideoPlayer::AVSync(void) |
2342 | 2386 | { |
2343 | 2387 | float diverge = 0.0f; |
| 2388 | int vsync_delay_clock = 0; |
| 2389 | int64_t currentaudiotime = 0; |
| 2390 | |
2344 | 2391 | // attempt to reduce fps for standalone PIP |
2345 | 2392 | if (player_ctx->IsPIP() && framesPlayed % 2) |
2346 | 2393 | { |
… |
… |
void NuppelVideoPlayer::AVSync(void) |
2380 | 2427 | ps = kScan_Progressive; |
2381 | 2428 | |
2382 | 2429 | bool dropframe = false; |
| 2430 | QString dbg; |
| 2431 | |
| 2432 | if (avsync_predictor_enabled) |
| 2433 | { |
| 2434 | avsync_predictor += frame_interval; |
| 2435 | if (avsync_predictor >= refreshrate) |
| 2436 | { |
| 2437 | int refreshperiodsinframe = avsync_predictor/refreshrate; |
| 2438 | avsync_predictor -= refreshrate * refreshperiodsinframe; |
| 2439 | } |
| 2440 | else |
| 2441 | { |
| 2442 | dropframe = true; |
| 2443 | dbg = "A/V predict drop frame, "; |
| 2444 | } |
| 2445 | } |
| 2446 | |
2383 | 2447 | if (diverge < -MAXDIVERGE) |
2384 | 2448 | { |
2385 | 2449 | dropframe = true; |
2386 | 2450 | // If video is way behind of audio, adjust for it... |
2387 | | QString dbg = QString("Video is %1 frames behind audio (too slow), ") |
| 2451 | dbg = QString("Video is %1 frames behind audio (too slow), ") |
2388 | 2452 | .arg(-diverge); |
| 2453 | } |
2389 | 2454 | |
| 2455 | if (dropframe) |
| 2456 | { |
2390 | 2457 | // Reset A/V Sync |
2391 | 2458 | lastsync = true; |
2392 | 2459 | |
| 2460 | currentaudiotime = AVSyncGetAudiotime(); |
| 2461 | |
2393 | 2462 | if (buffer && !using_null_videoout && |
2394 | 2463 | videoOutput->hasHWAcceleration() && |
2395 | 2464 | !videoOutput->IsSyncLocked()) |
… |
… |
void NuppelVideoPlayer::AVSync(void) |
2414 | 2483 | if (buffer) |
2415 | 2484 | videoOutput->PrepareFrame(buffer, ps); |
2416 | 2485 | |
2417 | | VERBOSE(VB_PLAYBACK|VB_TIMESTAMP, QString("AVSync waitforframe %1 %2") |
| 2486 | VERBOSE(VB_PLAYBACK|VB_TIMESTAMP, LOC + QString("AVSync waitforframe %1 %2") |
2418 | 2487 | .arg(avsync_adjustment).arg(m_double_framerate)); |
2419 | | videosync->WaitForFrame(avsync_adjustment + repeat_delay); |
2420 | | VERBOSE(VB_PLAYBACK|VB_TIMESTAMP, "AVSync show"); |
| 2488 | vsync_delay_clock = videosync->WaitForFrame(avsync_adjustment + repeat_delay); |
| 2489 | currentaudiotime = AVSyncGetAudiotime(); |
| 2490 | VERBOSE(VB_PLAYBACK|VB_TIMESTAMP, LOC + "AVSync show"); |
2421 | 2491 | if (!resetvideo) |
2422 | 2492 | videoOutput->Show(ps); |
2423 | 2493 | |
2424 | 2494 | if (videoOutput->IsErrored()) |
2425 | 2495 | { |
2426 | | VERBOSE(VB_IMPORTANT, "NVP: Error condition detected " |
| 2496 | VERBOSE(VB_IMPORTANT, LOC + "Error condition detected " |
2427 | 2497 | "in videoOutput after Show(), aborting playback."); |
2428 | 2498 | SetErrored(QObject::tr("Serious error detected in Video Output")); |
2429 | 2499 | return; |
… |
… |
void NuppelVideoPlayer::AVSync(void) |
2459 | 2529 | // Display the second field |
2460 | 2530 | videosync->AdvanceTrigger(); |
2461 | 2531 | #ifdef NEW_AVSYNC |
2462 | | videosync->WaitForFrame(avsync_adjustment); |
| 2532 | vsync_delay_clock = videosync->WaitForFrame(avsync_adjustment); |
2463 | 2533 | #else |
2464 | | videosync->WaitForFrame(0); |
| 2534 | vsync_delay_clock = videosync->WaitForFrame(0); |
2465 | 2535 | #endif |
2466 | 2536 | if (!resetvideo) |
2467 | 2537 | { |
… |
… |
void NuppelVideoPlayer::AVSync(void) |
2472 | 2542 | repeat_delay = frame_interval * buffer->repeat_pict * 0.5; |
2473 | 2543 | |
2474 | 2544 | if (repeat_delay) |
2475 | | VERBOSE(VB_TIMESTAMP, QString("A/V repeat_pict, adding %1 repeat " |
| 2545 | VERBOSE(VB_TIMESTAMP, LOC + QString("A/V repeat_pict, adding %1 repeat " |
2476 | 2546 | "delay").arg(repeat_delay)); |
2477 | 2547 | } |
2478 | 2548 | else |
2479 | 2549 | { |
2480 | | videosync->WaitForFrame(0); |
| 2550 | vsync_delay_clock = videosync->WaitForFrame(0); |
| 2551 | currentaudiotime = AVSyncGetAudiotime(); |
2481 | 2552 | } |
2482 | 2553 | |
2483 | 2554 | if (output_jmeter && output_jmeter->RecordCycleTime()) |
2484 | 2555 | { |
2485 | | VERBOSE(VB_PLAYBACK+VB_TIMESTAMP, QString("A/V avsync_delay: %1, " |
| 2556 | VERBOSE(VB_PLAYBACK+VB_TIMESTAMP, LOC + QString("A/V avsync_delay: %1, " |
2486 | 2557 | "avsync_avg: %2, warpfactor: %3, warpfactor_avg: %4") |
2487 | 2558 | .arg(avsync_delay / 1000).arg(avsync_avg / 1000) |
2488 | 2559 | .arg(warpfactor).arg(warpfactor_avg)); |
… |
… |
void NuppelVideoPlayer::AVSync(void) |
2498 | 2569 | // by cutting the frame rate in half for the length of this frame |
2499 | 2570 | |
2500 | 2571 | #ifdef NEW_AVSYNC |
2501 | | avsync_adjustment = refreshrate; |
| 2572 | //avsync_adjustment = refreshrate; |
| 2573 | avsync_adjustment = frame_interval; |
| 2574 | //avsync_adjustment = frame_interval*(((int)MAXDIVERGE)-1); |
2502 | 2575 | #else |
2503 | 2576 | avsync_adjustment = frame_interval; |
2504 | 2577 | #endif |
… |
… |
void NuppelVideoPlayer::AVSync(void) |
2508 | 2581 | "\t\t\tdoubling video frame interval to slow down.").arg(diverge)); |
2509 | 2582 | } |
2510 | 2583 | |
2511 | | audio_lock.lock(); |
2512 | 2584 | if (audioOutput && normal_speed) |
2513 | 2585 | { |
2514 | | long long currentaudiotime = audioOutput->GetAudiotime(); |
2515 | | audio_lock.unlock(); |
2516 | 2586 | #if 0 |
2517 | | VERBOSE(VB_PLAYBACK+VB_TIMESTAMP, QString( |
| 2587 | VERBOSE(VB_PLAYBACK+VB_TIMESTAMP, LOC + QString( |
2518 | 2588 | "A/V timecodes audio %1 video %2 frameinterval %3 " |
2519 | | "avdel %4 avg %5 tcoffset %6") |
| 2589 | "avdel %4 avg %5 tcoffset %6" |
| 2590 | " avp %7 avpen %8" |
| 2591 | " avdc %9" |
| 2592 | ) |
2520 | 2593 | .arg(currentaudiotime) |
2521 | 2594 | .arg(buffer->timecode) |
2522 | 2595 | .arg(frame_interval) |
2523 | | .arg(buffer->timecode - currentaudiotime) |
| 2596 | .arg(buffer->timecode - currentaudiotime - (int)(vsync_delay_clock*audio_stretchfactor+500)/1000) |
2524 | 2597 | .arg(avsync_avg) |
2525 | 2598 | .arg(tc_wrap[TC_AUDIO]) |
| 2599 | .arg(avsync_predictor) |
| 2600 | .arg(avsync_predictor_enabled) |
| 2601 | .arg(vsync_delay_clock) |
2526 | 2602 | ); |
2527 | 2603 | #endif |
2528 | 2604 | if (currentaudiotime != 0 && buffer->timecode != 0) |
2529 | 2605 | { // currentaudiotime == 0 after a seek |
2530 | 2606 | // The time at the start of this frame (ie, now) is given by |
2531 | 2607 | // last->timecode |
2532 | | int delta = (int)((buffer->timecode - prevtc)/play_speed) - (frame_interval / 1000); |
2533 | | prevtc = buffer->timecode; |
2534 | | //cerr << delta << " "; |
2535 | | |
2536 | | // If the timecode is off by a frame (dropped frame) wait to sync |
2537 | | if (delta > (int) frame_interval / 1200 && |
2538 | | delta < (int) frame_interval / 1000 * 3 && |
2539 | | prevrp == 0) |
| 2608 | if (prevtc != 0) |
2540 | 2609 | { |
2541 | | //cerr << "+ "; |
2542 | | videosync->AdvanceTrigger(); |
2543 | | if (m_double_framerate) |
| 2610 | int delta = (int)((buffer->timecode - prevtc)/play_speed) - (frame_interval / 1000); |
| 2611 | // If the timecode is off by a frame (dropped frame) wait to sync |
| 2612 | if (delta > (int) frame_interval / 1200 && |
| 2613 | delta < (int) frame_interval / 1000 * 3 && |
| 2614 | prevrp == 0) |
| 2615 | { |
| 2616 | //cerr << "+ "; |
| 2617 | VERBOSE(VB_PLAYBACK+VB_TIMESTAMP, LOC + QString("A/V delay %1").arg(delta)); |
2544 | 2618 | videosync->AdvanceTrigger(); |
| 2619 | if (m_double_framerate) |
| 2620 | videosync->AdvanceTrigger(); |
| 2621 | } |
2545 | 2622 | } |
| 2623 | prevtc = buffer->timecode; |
2546 | 2624 | prevrp = buffer->repeat_pict; |
2547 | 2625 | |
2548 | | avsync_delay = (buffer->timecode - currentaudiotime) * 1000;//usec |
| 2626 | avsync_delay = (buffer->timecode - currentaudiotime) * 1000 - (int)(vsync_delay_clock*audio_stretchfactor); //usec |
2549 | 2627 | // prevents major jitter when pts resets during dvd title |
2550 | 2628 | if (avsync_delay > 2000000 && player_ctx->buffer->isDVD()) |
2551 | 2629 | avsync_delay = 90000; |
… |
… |
void NuppelVideoPlayer::AVSync(void) |
2555 | 2633 | the video by one interlaced field (1/2 frame) */ |
2556 | 2634 | if (!lastsync) |
2557 | 2635 | { |
2558 | | if (avsync_avg > frame_interval * 3 / 2) |
| 2636 | if (avsync_delay > refreshrate) |
2559 | 2637 | { |
2560 | | avsync_adjustment = refreshrate; |
2561 | | lastsync = true; |
| 2638 | avsync_adjustment += refreshrate; |
| 2639 | //lastsync = true; |
| 2640 | VERBOSE(VB_PLAYBACK+VB_TIMESTAMP, LOC + "A/V avg high extend"); |
2562 | 2641 | } |
2563 | | else if (avsync_avg < 0 - frame_interval * 3 / 2) |
| 2642 | else if (avsync_delay < 0 - refreshrate) |
2564 | 2643 | { |
2565 | | avsync_adjustment = -refreshrate; |
2566 | | lastsync = true; |
| 2644 | avsync_adjustment -= refreshrate; |
| 2645 | //lastsync = true; |
| 2646 | VERBOSE(VB_PLAYBACK+VB_TIMESTAMP, LOC + "A/V avg high skip"); |
2567 | 2647 | } |
2568 | 2648 | } |
2569 | 2649 | else |
… |
… |
void NuppelVideoPlayer::AVSync(void) |
2571 | 2651 | } |
2572 | 2652 | else |
2573 | 2653 | { |
2574 | | avsync_avg = 0; |
2575 | | avsync_oldavg = 0; |
| 2654 | ResetAVSync(); |
2576 | 2655 | } |
2577 | 2656 | } |
2578 | 2657 | else |
2579 | | audio_lock.unlock(); |
| 2658 | { |
| 2659 | VERBOSE(VB_PLAYBACK+VB_TIMESTAMP, LOC + QString("A/V no sync proc ns:%1 ao:%2").arg(normal_speed).arg(audioOutput != NULL)); |
| 2660 | } |
2580 | 2661 | } |
2581 | 2662 | |
2582 | 2663 | void NuppelVideoPlayer::DisplayPauseFrame(void) |
… |
… |
void NuppelVideoPlayer::DoPause(void) |
4210 | 4291 | } |
4211 | 4292 | |
4212 | 4293 | float temp_speed = audio_stretchfactor; |
4213 | | frame_interval = (int)(1000000.0 * ffrew_skip / video_frame_rate / temp_speed); |
| 4294 | SetFrameInterval(m_scan, ffrew_skip / (video_frame_rate * temp_speed)); |
4214 | 4295 | VERBOSE(VB_PLAYBACK, QString("rate: %1 speed: %2 skip: %3 = interval %4") |
4215 | 4296 | .arg(video_frame_rate).arg(temp_speed) |
4216 | 4297 | .arg(ffrew_skip).arg(frame_interval)); |
… |
… |
void NuppelVideoPlayer::DoPlay(void) |
4272 | 4353 | ClearAfterSeek(); |
4273 | 4354 | } |
4274 | 4355 | |
4275 | | frame_interval = (int) (1000000.0f * ffrew_skip / video_frame_rate / |
4276 | | play_speed); |
| 4356 | SetFrameInterval(m_scan, ffrew_skip / (video_frame_rate * play_speed)); |
4277 | 4357 | |
4278 | 4358 | VERBOSE(VB_PLAYBACK, LOC + "DoPlay: " + |
4279 | 4359 | QString("rate: %1 speed: %2 skip: %3 => new interval %4") |
… |
… |
void NuppelVideoPlayer::ClearAfterSeek(bool clearvideobuffers) |
4671 | 4751 | savedAudioTimecodeOffset = 0; |
4672 | 4752 | } |
4673 | 4753 | |
| 4754 | ResetAVSync(); |
4674 | 4755 | SetPrebuffering(true); |
4675 | 4756 | audio_lock.lock(); |
4676 | 4757 | if (audioOutput) |
diff --git a/mythtv/libs/libmythtv/NuppelVideoPlayer.h b/mythtv/libs/libmythtv/NuppelVideoPlayer.h
index 33854f5..85e533a 100644
a
|
b
|
class MPUBLIC NuppelVideoPlayer : public CC608Reader, public CC708Reader |
511 | 511 | float WarpFactor(void); |
512 | 512 | void WrapTimecode(long long &timecode, TCTypes tc_type); |
513 | 513 | void InitAVSync(void); |
| 514 | void ResetAVSync(void); |
| 515 | int64_t AVSyncGetAudiotime(void); |
| 516 | void SetFrameInterval(FrameScanType scan, double speed); |
514 | 517 | void AVSync(void); |
515 | 518 | void FallbackDeint(void); |
516 | 519 | void CheckExtraAudioDecode(void); |
… |
… |
class MPUBLIC NuppelVideoPlayer : public CC608Reader, public CC708Reader |
797 | 800 | int avsync_adjustment; |
798 | 801 | int avsync_avg; |
799 | 802 | int avsync_oldavg; |
| 803 | bool usesmoothsync; |
| 804 | int avsync_predictor; |
| 805 | bool avsync_predictor_enabled; |
800 | 806 | int refreshrate; |
801 | 807 | bool lastsync; |
802 | 808 | bool m_playing_slower; |
diff --git a/mythtv/libs/libmythtv/avformatdecoder.cpp b/mythtv/libs/libmythtv/avformatdecoder.cpp
index edf3d59..f4d6f9c 100644
a
|
b
|
AvFormatDecoder::AvFormatDecoder(NuppelVideoPlayer *parent, |
481 | 481 | start_code_state(0xffffffff), |
482 | 482 | lastvpts(0), lastapts(0), |
483 | 483 | lastccptsu(0), |
| 484 | firstvpts(0), firstvptsinuse(false), |
484 | 485 | using_null_videoout(use_null_videoout), |
485 | 486 | video_codec_id(kCodec_NONE), |
486 | 487 | no_hardware_decoders(no_hardware_decode), |
… |
… |
void AvFormatDecoder::SeekReset(long long newKey, uint skipFrames, |
929 | 930 | if (decoded_video_frame) |
930 | 931 | GetNVP()->DiscardVideoFrame(decoded_video_frame); |
931 | 932 | } |
| 933 | |
| 934 | if (doflush) |
| 935 | { |
| 936 | firstvpts = 0; |
| 937 | firstvptsinuse = true; |
| 938 | } |
932 | 939 | } |
933 | 940 | |
934 | 941 | void AvFormatDecoder::Reset(bool reset_video_data, bool seek_reset) |
… |
… |
void AvFormatDecoder::MpegPreProcessPkt(AVStream *stream, AVPacket *pkt) |
2922 | 2929 | |
2923 | 2930 | gopset = false; |
2924 | 2931 | prevgoppos = 0; |
| 2932 | firstvpts = |
2925 | 2933 | lastapts = lastvpts = lastccptsu = 0; |
| 2934 | firstvptsinuse = true; |
2926 | 2935 | |
2927 | 2936 | // fps debugging info |
2928 | 2937 | float avFPS = normalized_fps(stream, context); |
… |
… |
bool AvFormatDecoder::H264PreProcessPkt(AVStream *stream, AVPacket *pkt) |
3032 | 3041 | |
3033 | 3042 | gopset = false; |
3034 | 3043 | prevgoppos = 0; |
| 3044 | firstvpts = |
3035 | 3045 | lastapts = lastvpts = lastccptsu = 0; |
| 3046 | firstvptsinuse = true; |
3036 | 3047 | |
3037 | 3048 | // fps debugging info |
3038 | 3049 | float avFPS = normalized_fps(stream, context); |
… |
… |
bool AvFormatDecoder::ProcessVideoPacket(AVStream *curstream, AVPacket *pkt) |
3254 | 3265 | framesPlayed++; |
3255 | 3266 | |
3256 | 3267 | lastvpts = temppts; |
| 3268 | if (!firstvpts && firstvptsinuse) |
| 3269 | firstvpts = temppts; |
3257 | 3270 | |
3258 | 3271 | return true; |
3259 | 3272 | } |
… |
… |
bool AvFormatDecoder::ProcessAudioPacket(AVStream *curstream, AVPacket *pkt, |
4022 | 4035 | skipaudio = false; |
4023 | 4036 | } |
4024 | 4037 | |
| 4038 | // skip any audio frames preceding first video frame |
| 4039 | if (firstvptsinuse && firstvpts && (lastapts < firstvpts)) |
| 4040 | { |
| 4041 | VERBOSE(VB_PLAYBACK+VB_TIMESTAMP, |
| 4042 | LOC + QString("discarding early audio timecode %1 %2 %3") |
| 4043 | .arg(pkt->pts).arg(pkt->dts).arg(lastapts)); |
| 4044 | break; |
| 4045 | } |
| 4046 | firstvptsinuse = false; |
| 4047 | |
4025 | 4048 | avcodeclock->lock(); |
4026 | 4049 | data_size = 0; |
4027 | 4050 | |
diff --git a/mythtv/libs/libmythtv/avformatdecoder.h b/mythtv/libs/libmythtv/avformatdecoder.h
index 90b8f58..f48bc18 100644
a
|
b
|
class AvFormatDecoder : public DecoderBase |
262 | 262 | long long lastvpts; |
263 | 263 | long long lastapts; |
264 | 264 | long long lastccptsu; |
| 265 | long long firstvpts; |
| 266 | bool firstvptsinuse; |
265 | 267 | |
266 | 268 | bool using_null_videoout; |
267 | 269 | MythCodecID video_codec_id; |
diff --git a/mythtv/libs/libmythtv/vsync.cpp b/mythtv/libs/libmythtv/vsync.cpp
index 060402d..4d6a2d2 100644
a
|
b
|
VideoSync::VideoSync(VideoOutput *video_output, |
123 | 123 | bool halve_frame_interval) : |
124 | 124 | m_video_output(video_output), m_frame_interval(frameint), |
125 | 125 | m_refresh_interval(refreshint), m_interlaced(halve_frame_interval), |
126 | | m_delay(-1) |
| 126 | m_nexttrigger(0), |
| 127 | m_delay(-1), |
| 128 | m_synchronous(false) |
127 | 129 | { |
128 | | bzero(&m_nexttrigger, sizeof(m_nexttrigger)); |
129 | 130 | |
130 | 131 | int tolerance = m_refresh_interval / 200; |
131 | 132 | if (m_interlaced && m_refresh_interval > ((m_frame_interval/2) + tolerance)) |
… |
… |
VideoSync::VideoSync(VideoOutput *video_output, |
136 | 137 | |
137 | 138 | void VideoSync::Start(void) |
138 | 139 | { |
139 | | gettimeofday(&m_nexttrigger, NULL); // now |
| 140 | struct timeval now_tv; |
| 141 | gettimeofday(&now_tv, NULL); // now |
| 142 | m_nexttrigger = now_tv.tv_sec * 1000000LL + now_tv.tv_usec; |
140 | 143 | } |
141 | 144 | |
142 | 145 | /** \fn VideoSync::SetFrameInterval(int fr, bool intr) |
… |
… |
void VideoSync::SetFrameInterval(int fr, bool intr) |
147 | 150 | m_frame_interval = fr; |
148 | 151 | m_interlaced = intr; |
149 | 152 | int tolerance = m_refresh_interval / 200; |
| 153 | double sync_factor = fr * 2.0f / intr; |
| 154 | sync_factor = sync_factor - round(sync_factor); |
| 155 | m_synchronous = (sync_factor >= -0.005) && (sync_factor <= 0.005); |
150 | 156 | if (m_interlaced && m_refresh_interval > ((m_frame_interval/2) + tolerance)) |
151 | 157 | m_interlaced = false; // can't display both fields at 2x rate |
152 | 158 | |
153 | | VERBOSE(VB_PLAYBACK, QString("Set video sync frame interval to %1") |
154 | | .arg(m_frame_interval)); |
155 | | } |
156 | | |
157 | | void VideoSync::OffsetTimeval(struct timeval& tv, int offset) |
158 | | { |
159 | | tv.tv_usec += offset; |
160 | | while (tv.tv_usec > 999999) |
161 | | { |
162 | | tv.tv_sec++; |
163 | | tv.tv_usec -= 1000000; |
164 | | } |
165 | | while (tv.tv_usec < 0) |
166 | | { |
167 | | tv.tv_sec--; |
168 | | tv.tv_usec += 1000000; |
169 | | } |
| 159 | VERBOSE(VB_PLAYBACK, QString("Set video sync frame interval to %1 (synced:%2)") |
| 160 | .arg(m_frame_interval).arg(m_synchronous)); |
170 | 161 | } |
171 | 162 | |
172 | 163 | /** \fn VideoSync::UpdateNexttrigger() |
… |
… |
void VideoSync::UpdateNexttrigger() |
179 | 170 | // Offset by frame interval -- if interlaced, only delay by half |
180 | 171 | // frame interval |
181 | 172 | if (m_interlaced) |
182 | | OffsetTimeval(m_nexttrigger, m_frame_interval/2); |
| 173 | m_nexttrigger += m_frame_interval/2; |
183 | 174 | else |
184 | | OffsetTimeval(m_nexttrigger, m_frame_interval); |
| 175 | m_nexttrigger += m_frame_interval; |
185 | 176 | } |
186 | 177 | |
187 | 178 | /** \fn VideoSync::CalcDelay() |
… |
… |
void VideoSync::UpdateNexttrigger() |
197 | 188 | */ |
198 | 189 | int VideoSync::CalcDelay() |
199 | 190 | { |
200 | | struct timeval now; |
201 | | gettimeofday(&now, NULL); |
| 191 | struct timeval now_tv; |
| 192 | gettimeofday(&now_tv, NULL); |
202 | 193 | //cout << "CalcDelay: next: " << timeval_str(m_nexttrigger) << " now " |
203 | 194 | // << timeval_str(now) << endl; |
| 195 | int64_t now = now_tv.tv_sec * 1000000LL + now_tv.tv_usec; |
204 | 196 | |
205 | | int ret_val = (m_nexttrigger.tv_sec - now.tv_sec) * 1000000 + |
206 | | (m_nexttrigger.tv_usec - now.tv_usec); |
| 197 | int ret_val = m_nexttrigger - now; |
207 | 198 | |
208 | 199 | //cout << "delay " << ret_val << endl; |
209 | 200 | |
… |
… |
int VideoSync::CalcDelay() |
215 | 206 | ret_val = m_frame_interval * 4; |
216 | 207 | |
217 | 208 | // set nexttrigger to our new target time |
218 | | m_nexttrigger.tv_sec = now.tv_sec; |
219 | | m_nexttrigger.tv_usec = now.tv_usec; |
220 | | OffsetTimeval(m_nexttrigger, ret_val); |
| 209 | m_nexttrigger = now; |
| 210 | m_nexttrigger += ret_val; |
221 | 211 | } |
222 | 212 | |
223 | 213 | if (ret_val < -m_frame_interval) |
… |
… |
int VideoSync::CalcDelay() |
225 | 215 | ret_val = -m_frame_interval; |
226 | 216 | |
227 | 217 | // set nexttrigger to our new target time |
228 | | m_nexttrigger.tv_sec = now.tv_sec; |
229 | | m_nexttrigger.tv_usec = now.tv_usec; |
230 | | OffsetTimeval(m_nexttrigger, ret_val); |
| 218 | m_nexttrigger = now; |
| 219 | m_nexttrigger += ret_val; |
231 | 220 | } |
232 | 221 | |
233 | 222 | return ret_val; |
… |
… |
int VideoSync::CalcDelay() |
244 | 233 | void VideoSync::KeepPhase() |
245 | 234 | { |
246 | 235 | // cerr << m_delay << endl; |
247 | | if (m_delay < -(m_refresh_interval/2)) |
248 | | OffsetTimeval(m_nexttrigger, 200); |
249 | | else if (m_delay > -500) |
250 | | OffsetTimeval(m_nexttrigger, -2000); |
| 236 | if (m_synchronous) |
| 237 | { |
| 238 | if (m_delay < -(m_refresh_interval - 500)) |
| 239 | m_nexttrigger += 200; |
| 240 | else if (m_delay > -500) |
| 241 | m_nexttrigger += -2000; |
| 242 | } |
| 243 | else |
| 244 | { |
| 245 | if (m_delay < -(m_refresh_interval + 500)) |
| 246 | m_nexttrigger += 200; |
| 247 | else if (m_delay >= 0) |
| 248 | m_nexttrigger += -2000; |
| 249 | } |
251 | 250 | } |
252 | 251 | |
253 | 252 | #ifndef _WIN32 |
… |
… |
void DRMVideoSync::Start(void) |
337 | 336 | VideoSync::Start(); |
338 | 337 | } |
339 | 338 | |
340 | | void DRMVideoSync::WaitForFrame(int sync_delay) |
| 339 | int DRMVideoSync::WaitForFrame(int sync_delay) |
341 | 340 | { |
342 | 341 | // Offset for externally-provided A/V sync delay |
343 | | OffsetTimeval(m_nexttrigger, sync_delay); |
| 342 | m_nexttrigger += sync_delay; |
344 | 343 | |
345 | 344 | m_delay = CalcDelay(); |
346 | 345 | //cerr << "WaitForFrame at : " << m_delay; |
… |
… |
void DRMVideoSync::WaitForFrame(int sync_delay) |
360 | 359 | if (m_delay > 0) |
361 | 360 | { |
362 | 361 | // Wait for any remaining retrace intervals in one pass. |
363 | | int n = m_delay / m_refresh_interval + 1; |
| 362 | int n = (m_delay + m_refresh_interval - 1) / m_refresh_interval; |
364 | 363 | |
365 | 364 | drm_wait_vblank_t blank; |
366 | 365 | blank.request.type = DRM_VBLANK_RELATIVE; |
… |
… |
void DRMVideoSync::WaitForFrame(int sync_delay) |
370 | 369 | //cerr << "Wait " << n << " intervals. Count " << blank.request.sequence; |
371 | 370 | //cerr << " Delay " << m_delay << endl; |
372 | 371 | } |
| 372 | return m_delay; |
373 | 373 | } |
374 | 374 | |
375 | 375 | void DRMVideoSync::AdvanceTrigger(void) |
… |
… |
void OpenGLVideoSync::Start(void) |
497 | 497 | #endif /* USING_OPENGL_VSYNC */ |
498 | 498 | } |
499 | 499 | |
500 | | void OpenGLVideoSync::WaitForFrame(int sync_delay) |
| 500 | int OpenGLVideoSync::WaitForFrame(int sync_delay) |
501 | 501 | { |
502 | 502 | (void) sync_delay; |
503 | 503 | #ifdef USING_OPENGL_VSYNC |
| 504 | //#define GLVSYNCDEBUG |
| 505 | #ifdef GLVSYNCDEBUG |
| 506 | int refreshcount = 0; |
| 507 | #endif |
504 | 508 | const QString msg1("First A/V Sync"), msg2("Second A/V Sync"); |
505 | | OffsetTimeval(m_nexttrigger, sync_delay); |
| 509 | m_nexttrigger += sync_delay; |
506 | 510 | |
507 | 511 | VideoOutput *vo = dynamic_cast<VideoOutput*>(m_video_output); |
508 | 512 | if (vo && vo->IsEmbedding()) |
… |
… |
void OpenGLVideoSync::WaitForFrame(int sync_delay) |
510 | 514 | m_delay = CalcDelay(); |
511 | 515 | if (m_delay > 0) |
512 | 516 | usleep(m_delay); |
513 | | return; |
| 517 | return 0; |
514 | 518 | } |
515 | 519 | |
516 | 520 | int err; |
517 | 521 | if (!m_context) |
518 | | return; |
| 522 | return 0; |
519 | 523 | unsigned int frameNum = 0; |
520 | 524 | |
521 | 525 | OpenGLContextLocker ctx_lock(m_context); |
522 | 526 | err = gMythGLXGetVideoSyncSGI(&frameNum); |
523 | 527 | checkGLSyncError("Frame Number Query", err); |
524 | 528 | |
| 529 | #ifdef GLVSYNCDEBUG |
| 530 | int delay1 = m_delay; |
| 531 | int delay2; |
| 532 | #endif |
525 | 533 | // Always sync to the next retrace execpt when we are very late. |
526 | 534 | if ((m_delay = CalcDelay()) > -(m_refresh_interval/2)) |
527 | 535 | { |
| 536 | #ifdef GLVSYNCDEBUG |
| 537 | delay2 = m_delay; |
| 538 | #endif |
528 | 539 | err = gMythGLXWaitVideoSyncSGI(2, (frameNum+1)%2 ,&frameNum); |
529 | 540 | checkGLSyncError(msg1, err); |
530 | 541 | m_delay = CalcDelay(); |
| 542 | #ifdef GLVSYNCDEBUG |
| 543 | refreshcount++; |
| 544 | #endif |
531 | 545 | } |
| 546 | #ifdef GLVSYNCDEBUG |
| 547 | else |
| 548 | delay2 = m_delay; |
| 549 | #endif |
532 | 550 | |
| 551 | #ifdef GLVSYNCDEBUG |
| 552 | int delay3 = m_delay; |
| 553 | #endif |
533 | 554 | // Wait for any remaining retrace intervals in one pass. |
534 | 555 | if (m_delay > 0) |
535 | 556 | { |
536 | | uint n = m_delay / m_refresh_interval + 1; |
| 557 | uint n = (m_delay + m_refresh_interval - 1) / m_refresh_interval; |
| 558 | #ifdef GLVSYNCDEBUG |
| 559 | refreshcount += (int)n; |
| 560 | #endif |
537 | 561 | err = gMythGLXWaitVideoSyncSGI((n+1), (frameNum+n)%(n+1), &frameNum); |
538 | 562 | checkGLSyncError(msg2, err); |
539 | 563 | m_delay = CalcDelay(); |
540 | 564 | } |
| 565 | #ifdef GLVSYNCDEBUG |
| 566 | VERBOSE(VB_PLAYBACK+VB_TIMESTAMP, QString("VS: WFF: ri:%1 fi:%2 delay1:%3 delay2:%4 delay3:%5 skip:%6 finaldelay:%7") |
| 567 | .arg(m_refresh_interval) |
| 568 | .arg(m_frame_interval) |
| 569 | .arg(delay1) |
| 570 | .arg(delay2) |
| 571 | .arg(delay3) |
| 572 | .arg(refreshcount) |
| 573 | .arg(m_delay) |
| 574 | ); |
| 575 | #endif |
541 | 576 | |
542 | 577 | #endif /* USING_OPENGL_VSYNC */ |
| 578 | return m_delay; |
543 | 579 | } |
544 | 580 | |
545 | 581 | void OpenGLVideoSync::AdvanceTrigger(void) |
… |
… |
void OpenGLVideoSync::AdvanceTrigger(void) |
548 | 584 | |
549 | 585 | KeepPhase(); |
550 | 586 | UpdateNexttrigger(); |
| 587 | #ifdef GLVSYNCDEBUG |
| 588 | VERBOSE(VB_PLAYBACK+VB_TIMESTAMP, "VS: AdvanceTrigger"); |
| 589 | #endif |
551 | 590 | #endif /* USING_OPENGL_VSYNC */ |
552 | 591 | } |
553 | 592 | #endif /* !_WIN32 */ |
… |
… |
bool RTCVideoSync::TryInit(void) |
594 | 633 | return true; |
595 | 634 | } |
596 | 635 | |
597 | | void RTCVideoSync::WaitForFrame(int sync_delay) |
| 636 | int RTCVideoSync::WaitForFrame(int sync_delay) |
598 | 637 | { |
599 | | OffsetTimeval(m_nexttrigger, sync_delay); |
| 638 | m_nexttrigger += sync_delay; |
600 | 639 | |
601 | 640 | m_delay = CalcDelay(); |
602 | 641 | |
… |
… |
void RTCVideoSync::WaitForFrame(int sync_delay) |
609 | 648 | if ((val < 0) && (m_delay > 0)) |
610 | 649 | usleep(m_delay); |
611 | 650 | } |
| 651 | return 0; |
612 | 652 | } |
613 | 653 | |
614 | 654 | void RTCVideoSync::AdvanceTrigger(void) |
… |
… |
bool VDPAUVideoSync::TryInit(void) |
637 | 677 | return true; |
638 | 678 | } |
639 | 679 | |
640 | | void VDPAUVideoSync::WaitForFrame(int sync_delay) |
| 680 | int VDPAUVideoSync::WaitForFrame(int sync_delay) |
641 | 681 | { |
642 | 682 | // Offset for externally-provided A/V sync delay |
643 | | OffsetTimeval(m_nexttrigger, sync_delay); |
| 683 | m_nexttrigger += sync_delay; |
644 | 684 | m_delay = CalcDelay(); |
645 | 685 | |
646 | 686 | if (m_delay < 0) |
… |
… |
void VDPAUVideoSync::WaitForFrame(int sync_delay) |
648 | 688 | |
649 | 689 | VideoOutputVDPAU *vo = (VideoOutputVDPAU *)(m_video_output); |
650 | 690 | vo->SetNextFrameDisplayTimeOffset(m_delay); |
| 691 | return 0; |
651 | 692 | } |
652 | 693 | |
653 | 694 | void VDPAUVideoSync::AdvanceTrigger(void) |
… |
… |
bool BusyWaitVideoSync::TryInit(void) |
674 | 715 | return true; |
675 | 716 | } |
676 | 717 | |
677 | | void BusyWaitVideoSync::WaitForFrame(int sync_delay) |
| 718 | int BusyWaitVideoSync::WaitForFrame(int sync_delay) |
678 | 719 | { |
679 | 720 | // Offset for externally-provided A/V sync delay |
680 | | OffsetTimeval(m_nexttrigger, sync_delay); |
| 721 | m_nexttrigger += sync_delay; |
681 | 722 | |
682 | 723 | m_delay = CalcDelay(); |
683 | 724 | |
… |
… |
void BusyWaitVideoSync::WaitForFrame(int sync_delay) |
703 | 744 | if (cnt > 1) |
704 | 745 | m_cheat -= 200; |
705 | 746 | } |
| 747 | return 0; |
706 | 748 | } |
707 | 749 | |
708 | 750 | void BusyWaitVideoSync::AdvanceTrigger(void) |
… |
… |
bool USleepVideoSync::TryInit(void) |
725 | 767 | return true; |
726 | 768 | } |
727 | 769 | |
728 | | void USleepVideoSync::WaitForFrame(int sync_delay) |
| 770 | int USleepVideoSync::WaitForFrame(int sync_delay) |
729 | 771 | { |
730 | 772 | // Offset for externally-provided A/V sync delay |
731 | | OffsetTimeval(m_nexttrigger, sync_delay); |
| 773 | m_nexttrigger += sync_delay; |
732 | 774 | |
733 | 775 | m_delay = CalcDelay(); |
734 | 776 | if (m_delay > 0) |
735 | 777 | usleep(m_delay); |
| 778 | return 0; |
736 | 779 | } |
737 | 780 | |
738 | 781 | void USleepVideoSync::AdvanceTrigger(void) |
diff --git a/mythtv/libs/libmythtv/vsync.h b/mythtv/libs/libmythtv/vsync.h
index f077949..f8b1c4b 100644
a
|
b
|
class VideoSync |
70 | 70 | virtual void Start(void); |
71 | 71 | |
72 | 72 | /** \brief Waits for next a frame or field. |
| 73 | * Returns delay to real frame timing in usec |
73 | 74 | * |
74 | 75 | * Start(void), WaitForFrame(void), and Stop(void) should |
75 | 76 | * always be called from same thread, to prevent bad |
… |
… |
class VideoSync |
78 | 79 | * \param sync_delay time until the desired frame or field |
79 | 80 | * \sa CalcDelay(void), KeepPhase(void) |
80 | 81 | */ |
81 | | virtual void WaitForFrame(int sync_delay) = 0; |
| 82 | virtual int WaitForFrame(int sync_delay) = 0; |
82 | 83 | |
83 | 84 | /// \brief Use the next frame or field for CalcDelay(void) |
84 | 85 | /// and WaitForFrame(int). |
… |
… |
class VideoSync |
104 | 105 | uint frame_interval, uint refresh_interval, |
105 | 106 | bool interlaced); |
106 | 107 | protected: |
107 | | static void OffsetTimeval(struct timeval& tv, int offset); |
108 | 108 | void UpdateNexttrigger(void); |
109 | 109 | int CalcDelay(void); |
110 | 110 | void KeepPhase(void); |
… |
… |
class VideoSync |
113 | 113 | int m_frame_interval; // of video |
114 | 114 | int m_refresh_interval; // of display |
115 | 115 | bool m_interlaced; |
116 | | struct timeval m_nexttrigger; |
| 116 | int64_t m_nexttrigger; |
117 | 117 | int m_delay; |
| 118 | bool m_synchronous; |
118 | 119 | |
119 | 120 | static int m_forceskip; |
120 | 121 | }; |
… |
… |
class DRMVideoSync : public VideoSync |
136 | 137 | QString getName(void) const { return QString("DRM"); } |
137 | 138 | bool TryInit(void); |
138 | 139 | void Start(void); |
139 | | void WaitForFrame(int sync_delay); |
| 140 | int WaitForFrame(int sync_delay); |
140 | 141 | void AdvanceTrigger(void); |
141 | 142 | |
142 | 143 | private: |
… |
… |
class OpenGLVideoSync : public VideoSync |
178 | 179 | QString getName(void) const { return QString("SGI OpenGL"); } |
179 | 180 | bool TryInit(void); |
180 | 181 | void Start(void); |
181 | | void WaitForFrame(int sync_delay); |
| 182 | int WaitForFrame(int sync_delay); |
182 | 183 | void AdvanceTrigger(void); |
183 | 184 | |
184 | 185 | private: |
… |
… |
class RTCVideoSync : public VideoSync |
207 | 208 | |
208 | 209 | QString getName(void) const { return QString("RTC"); } |
209 | 210 | bool TryInit(void); |
210 | | void WaitForFrame(int sync_delay); |
| 211 | int WaitForFrame(int sync_delay); |
211 | 212 | void AdvanceTrigger(void); |
212 | 213 | |
213 | 214 | private: |
… |
… |
class VDPAUVideoSync : public VideoSync |
228 | 229 | |
229 | 230 | QString getName(void) const { return QString("VDPAU"); } |
230 | 231 | bool TryInit(void); |
231 | | void WaitForFrame(int sync_delay); |
| 232 | int WaitForFrame(int sync_delay); |
232 | 233 | void AdvanceTrigger(void); |
233 | 234 | |
234 | 235 | private: |
… |
… |
class BusyWaitVideoSync : public VideoSync |
256 | 257 | |
257 | 258 | QString getName(void) const { return QString("USleep with busy wait"); } |
258 | 259 | bool TryInit(void); |
259 | | void WaitForFrame(int sync_delay); |
| 260 | int WaitForFrame(int sync_delay); |
260 | 261 | void AdvanceTrigger(void); |
261 | 262 | |
262 | 263 | private: |
… |
… |
class USleepVideoSync : public VideoSync |
284 | 285 | |
285 | 286 | QString getName(void) const { return QString("USleep"); } |
286 | 287 | bool TryInit(void); |
287 | | void WaitForFrame(int sync_delay); |
| 288 | int WaitForFrame(int sync_delay); |
288 | 289 | void AdvanceTrigger(void); |
289 | 290 | }; |
290 | 291 | #endif /* VSYNC_H_INCLUDED */ |