MythTV  master
audiooutputbase.cpp
Go to the documentation of this file.
1 // C++ headers
2 #include <algorithm>
3 #include <cmath>
4 #include <limits>
5 
6 using namespace std;
7 
8 // POSIX headers
9 #include <unistd.h>
10 #include <sys/time.h>
11 
12 // Qt headers
13 #include <QMutexLocker>
14 
15 // MythTV headers
16 #include "compat.h"
17 #include "audiooutputbase.h"
19 #include "audiooutpututil.h"
20 #include "audiooutputdownmix.h"
21 #include "SoundTouch.h"
22 #include "freesurround.h"
23 #include "spdifencoder.h"
24 #include "mythlogging.h"
25 #include "mythconfig.h"
26 
27 // AC3 encode currently disabled for Android
28 #if defined(Q_OS_ANDROID)
29 #define DISABLE_AC3_ENCODE
30 #endif
31 
32 #define LOC QString("AOBase: ")
33 
34 #define WPOS (m_audioBuffer + org_waud)
35 #define RPOS (m_audioBuffer + m_raud)
36 #define ABUF m_audioBuffer
37 #define STST soundtouch::SAMPLETYPE
38 #define AOALIGN(x) (((long)&(x) + 15) & ~0xf);
39 
40 // 1,2,5 and 7 channels are currently valid for upmixing if required
41 #define UPMIX_CHANNEL_MASK ((1<<1)|(1<<2)|(1<<5)|1<<7)
42 #define IS_VALID_UPMIX_CHANNEL(ch) ((1 << (ch)) & UPMIX_CHANNEL_MASK)
43 
45 {
46  switch(q)
47  {
48  case QUALITY_DISABLED: return "disabled";
49  case QUALITY_LOW: return "low";
50  case QUALITY_MEDIUM: return "medium";
51  case QUALITY_HIGH: return "high";
52  default: return "unknown";
53  }
54 }
55 
57  MThread("AudioOutputBase"),
58  // protected
59  m_mainDevice(settings.GetMainDevice()),
60  m_passthruDevice(settings.GetPassthruDevice()),
61  m_source(settings.m_source),
62  m_setInitialVol(settings.m_setInitialVol)
63 {
65 
66  if (m_mainDevice.startsWith("AudioTrack:"))
67  m_usesSpdif = false;
68  // Handle override of SRC quality settings
69  if (gCoreContext->GetBoolSetting("SRCQualityOverride", false))
70  {
72  // Extra test to keep backward compatibility with earlier SRC setting
75 
76  VBAUDIO(QString("SRC quality = %1").arg(quality_string(m_srcQuality)));
77  }
78 }
79 
86 {
87  if (!m_killAudio)
88  VBERROR("Programmer Error: "
89  "~AudioOutputBase called, but KillAudio has not been called!");
90 
91  // We got this from a subclass, delete it
92  delete m_outputSettings;
93  delete m_outputSettingsRaw;
95  {
98  }
99 
100  if (m_kAudioSRCOutputSize > 0)
101  delete[] m_srcOut;
102 
103 #ifndef NDEBUG
104  assert(m_memoryCorruptionTest0 == 0xdeadbeef);
105  assert(m_memoryCorruptionTest1 == 0xdeadbeef);
106  assert(m_memoryCorruptionTest2 == 0xdeadbeef);
107  assert(m_memoryCorruptionTest3 == 0xdeadbeef);
108 #else
109  Q_UNUSED(m_memoryCorruptionTest0);
110  Q_UNUSED(m_memoryCorruptionTest1);
111  Q_UNUSED(m_memoryCorruptionTest2);
112  Q_UNUSED(m_memoryCorruptionTest3);
113 #endif
114 }
115 
117 {
118  if (settings.m_custom)
119  {
120  // got a custom audio report already, use it
121  // this was likely provided by the AudioTest utility
123  *m_outputSettings = *settings.m_custom;
127  return;
128  }
129 
130  // Ask the subclass what we can send to the device
133 
137 
139  gCoreContext->GetBoolSetting("AudioDefaultUpmix", false) :
140  false;
141  if (settings.m_upmixer == 1) // music, upmixer off
142  m_upmixDefault = false;
143  else if (settings.m_upmixer == 2) // music, upmixer on
144  m_upmixDefault = true;
145 }
146 
153 {
154  // If we've already checked the port, use the cache
155  // version instead
156  if (!m_discreteDigital || !digital)
157  {
158  digital = false;
160  return m_outputSettingsRaw;
161  }
164 
165  AudioOutputSettings* aosettings = GetOutputSettings(digital);
166  if (aosettings)
167  aosettings->GetCleaned();
168  else
169  aosettings = new AudioOutputSettings(true);
170 
171  if (digital)
172  return (m_outputSettingsDigitalRaw = aosettings);
173  return (m_outputSettingsRaw = aosettings);
174 }
175 
182 {
183  if (!m_discreteDigital || !digital)
184  {
185  digital = false;
186  if (m_outputSettings)
187  return m_outputSettings;
188  }
189  else if (m_outputSettingsDigital)
191 
192  auto* aosettings = new AudioOutputSettings;
193 
194  *aosettings = *GetOutputSettingsCleaned(digital);
195  aosettings->GetUsers();
196 
197  if (digital)
198  return (m_outputSettingsDigital = aosettings);
199  return (m_outputSettings = aosettings);
200 }
201 
205 bool AudioOutputBase::CanPassthrough(int samplerate, int channels,
206  AVCodecID codec, int profile) const
207 {
209  bool ret = !(m_internalVol && SWVolume());
210 
211  switch(codec)
212  {
213  case AV_CODEC_ID_AC3:
214  arg = FEATURE_AC3;
215  break;
216  case AV_CODEC_ID_DTS:
217  switch(profile)
218  {
219  case FF_PROFILE_DTS:
220  case FF_PROFILE_DTS_ES:
221  case FF_PROFILE_DTS_96_24:
222  arg = FEATURE_DTS;
223  break;
224  case FF_PROFILE_DTS_HD_HRA:
225  case FF_PROFILE_DTS_HD_MA:
226  arg = FEATURE_DTSHD;
227  break;
228  default:
229  break;
230  }
231  break;
232  case AV_CODEC_ID_EAC3:
233  arg = FEATURE_EAC3;
234  break;
235  case AV_CODEC_ID_TRUEHD:
236  arg = FEATURE_TRUEHD;
237  break;
238  default:
239  arg = FEATURE_NONE;
240  break;
241  }
242  // we can't passthrough any other codecs than those defined above
243  ret &= m_outputSettingsDigital->canFeature(arg);
245  ret &= m_outputSettingsDigital->IsSupportedRate(samplerate);
246  // if we must resample to 48kHz ; we can't passthrough
247  ret &= !((samplerate != 48000) &&
248  gCoreContext->GetBoolSetting("Audio48kOverride", false));
249  // Don't know any cards that support spdif clocked at < 44100
250  // Some US cable transmissions have 2ch 32k AC-3 streams
251  ret &= samplerate >= 44100;
252  if (!ret)
253  return false;
254  // Will passthrough if surround audio was defined. Amplifier will
255  // do the downmix if required
256  bool willupmix = m_maxChannels >= 6 && (channels <= 2 && m_upmixDefault);
257  ret &= !willupmix;
258  // unless audio is configured for stereo. We can passthrough otherwise
259  ret |= m_maxChannels == 2;
260 
261  return ret;
262 }
263 
268 {
269  if (rate > 0)
270  m_sourceBitRate = rate;
271 }
272 
278 void AudioOutputBase::SetStretchFactorLocked(float lstretchfactor)
279 {
280  if (m_stretchFactor == lstretchfactor && m_pSoundStretch)
281  return;
282 
283  m_stretchFactor = lstretchfactor;
284 
285  int channels = m_needsUpmix || m_needsDownmix ?
287  if (channels < 1 || channels > 8 || !m_configureSucceeded)
288  return;
289 
290  bool willstretch = m_stretchFactor < 0.99F || m_stretchFactor > 1.01F;
291  m_effStretchFactor = lroundf(100000.0F * lstretchfactor);
292 
293  if (m_pSoundStretch)
294  {
295  if (!willstretch && m_forcedProcessing)
296  {
297  m_forcedProcessing = false;
298  m_processing = false;
299  delete m_pSoundStretch;
300  m_pSoundStretch = nullptr;
301  VBGENERAL(QString("Cancelling time stretch"));
303  m_waud = m_raud = 0;
304  m_resetActive.Ref();
305  }
306  else
307  {
308  VBGENERAL(QString("Changing time stretch to %1")
309  .arg(m_stretchFactor));
310  m_pSoundStretch->setTempo(m_stretchFactor);
311  }
312  }
313  else if (willstretch)
314  {
315  VBGENERAL(QString("Using time stretch %1").arg(m_stretchFactor));
316  m_pSoundStretch = new soundtouch::SoundTouch();
317  m_pSoundStretch->setSampleRate(m_sampleRate);
318  m_pSoundStretch->setChannels(channels);
319  m_pSoundStretch->setTempo(m_stretchFactor);
320 #if ARCH_ARM || defined(Q_OS_ANDROID)
321  // use less demanding settings for Raspberry pi
322  m_pSoundStretch->setSetting(SETTING_SEQUENCE_MS, 82);
323  m_pSoundStretch->setSetting(SETTING_USE_AA_FILTER, 0);
324  m_pSoundStretch->setSetting(SETTING_USE_QUICKSEEK, 1);
325 #else
326  m_pSoundStretch->setSetting(SETTING_SEQUENCE_MS, 35);
327 #endif
328  /* If we weren't already processing we need to turn on float conversion
329  adjust sample and frame sizes accordingly and dump the contents of
330  the audiobuffer */
331  if (!m_processing)
332  {
333  m_processing = true;
334  m_forcedProcessing = true;
339  m_waud = m_raud = 0;
340  m_resetActive.Ref();
342  m_pauseAudio = true;
343  m_actuallyPaused = false;
344  m_unpauseWhenReady = true;
345  }
346  }
347 }
348 
352 void AudioOutputBase::SetStretchFactor(float lstretchfactor)
353 {
354  QMutexLocker lock(&m_audioBufLock);
355  SetStretchFactorLocked(lstretchfactor);
356 }
357 
362 {
363  return m_stretchFactor;
364 }
365 
370 {
371  return m_needsUpmix && m_upmixer;
372 }
373 
378 {
379  // Can only upmix from mono/stereo to 6 ch
380  if (m_maxChannels == 2 || m_sourceChannels > 2)
381  return false;
382 
384 
387  m_upmixDefault ? false : m_passthru);
388  Reconfigure(settings);
389  return IsUpmixing();
390 }
391 
396 {
397  return m_sourceChannels <= 2 && m_maxChannels > 2;
398 }
399 
400 /*
401  * Setup samplerate and number of channels for passthrough
402  * Create SPDIF encoder and true if successful
403  */
404 bool AudioOutputBase::SetupPassthrough(AVCodecID codec, int codec_profile,
405  int &samplerate_tmp, int &channels_tmp)
406 {
407  if (codec == AV_CODEC_ID_DTS &&
409  {
410  // We do not support DTS-HD bitstream so force extraction of the
411  // DTS core track instead
412  codec_profile = FF_PROFILE_DTS;
413  }
415  codec, codec_profile,
416  samplerate_tmp, channels_tmp,
418  VBAUDIO("Setting " + log + " passthrough");
419 
420  delete m_spdifEnc;
421 
422  // No spdif encoder needed for certain devices
423  if (m_usesSpdif)
424  m_spdifEnc = new SPDIFEncoder("spdif", codec);
425  else
426  m_spdifEnc = nullptr;
427  if (m_spdifEnc && m_spdifEnc->Succeeded() && codec == AV_CODEC_ID_DTS)
428  {
429  switch(codec_profile)
430  {
431  case FF_PROFILE_DTS:
432  case FF_PROFILE_DTS_ES:
433  case FF_PROFILE_DTS_96_24:
435  break;
436  case FF_PROFILE_DTS_HD_HRA:
437  case FF_PROFILE_DTS_HD_MA:
438  m_spdifEnc->SetMaxHDRate(samplerate_tmp * channels_tmp / 2);
439  break;
440  }
441  }
442 
443  if (m_spdifEnc && !m_spdifEnc->Succeeded())
444  {
445  delete m_spdifEnc;
446  m_spdifEnc = nullptr;
447  return false;
448  }
449  return true;
450 }
451 
453 {
454  if (digital)
456  return m_outputSettings;
457 }
458 
464 void AudioOutputBase::Reconfigure(const AudioSettings &orig_settings)
465 {
466  AudioSettings settings = orig_settings;
467  int lsource_channels = settings.m_channels;
468  int lconfigured_channels = m_configuredChannels;
469  bool lneeds_upmix = false;
470  bool lneeds_downmix = false;
471  bool lreenc = false;
472  bool lenc = false;
473 
474  if (!settings.m_usePassthru)
475  {
476  // Do we upmix stereo or mono?
477  lconfigured_channels =
478  (m_upmixDefault && lsource_channels <= 2) ? 6 : lsource_channels;
479  bool cando_channels =
480  m_outputSettings->IsSupportedChannels(lconfigured_channels);
481 
482  // check if the number of channels could be transmitted via AC3 encoding
483 #ifndef DISABLE_AC3_ENCODE
486  lconfigured_channels > 2 && lconfigured_channels <= 6);
487 #endif
488  if (!lenc && !cando_channels)
489  {
490  // if hardware doesn't support source audio configuration
491  // we will upmix/downmix to what we can
492  // (can safely assume hardware supports stereo)
493  switch (lconfigured_channels)
494  {
495  case 7:
496  lconfigured_channels = 8;
497  break;
498  case 8:
499  case 5:
500  lconfigured_channels = 6;
501  break;
502  case 6:
503  case 4:
504  case 3:
505  case 2: //Will never happen
506  lconfigured_channels = 2;
507  break;
508  case 1:
509  lconfigured_channels = m_upmixDefault ? 6 : 2;
510  break;
511  default:
512  lconfigured_channels = 2;
513  break;
514  }
515  }
516  // Make sure we never attempt to output more than what we can
517  // the upmixer can only upmix to 6 channels when source < 6
518  if (lsource_channels <= 6)
519  lconfigured_channels = min(lconfigured_channels, 6);
520  lconfigured_channels = min(lconfigured_channels, m_maxChannels);
521  /* Encode to AC-3 if we're allowed to passthru but aren't currently
522  and we have more than 2 channels but multichannel PCM is not
523  supported or if the device just doesn't support the number of
524  channels */
525 #ifndef DISABLE_AC3_ENCODE
528  lconfigured_channels > 2) ||
529  !m_outputSettings->IsSupportedChannels(lconfigured_channels));
530  /* Might we reencode a bitstream that's been decoded for timestretch?
531  If the device doesn't support the number of channels - see below */
533  (settings.m_codec == AV_CODEC_ID_AC3 ||
534  settings.m_codec == AV_CODEC_ID_DTS))
535  {
536  lreenc = true;
537  }
538 #endif
539  // Enough channels? Upmix if not, but only from mono/stereo/5.0 to 5.1
540  if (IS_VALID_UPMIX_CHANNEL(settings.m_channels) &&
541  settings.m_channels < lconfigured_channels)
542  {
543  VBAUDIO(QString("Needs upmix from %1 -> %2 channels")
544  .arg(settings.m_channels).arg(lconfigured_channels));
545  settings.m_channels = lconfigured_channels;
546  lneeds_upmix = true;
547  }
548  else if (settings.m_channels > lconfigured_channels)
549  {
550  VBAUDIO(QString("Needs downmix from %1 -> %2 channels")
551  .arg(settings.m_channels).arg(lconfigured_channels));
552  settings.m_channels = lconfigured_channels;
553  lneeds_downmix = true;
554  }
555  }
556 
557  ClearError();
558 
559  bool general_deps = true;
560 
561  /* Set samplerate_tmp and channels_tmp to appropriate values
562  if passing through */
563  int samplerate_tmp = 0;
564  int channels_tmp = 0;
565  if (settings.m_usePassthru)
566  {
567  samplerate_tmp = settings.m_sampleRate;
568  SetupPassthrough(settings.m_codec, settings.m_codecProfile,
569  samplerate_tmp, channels_tmp);
570  general_deps = m_sampleRate == samplerate_tmp && m_channels == channels_tmp;
571  general_deps &= m_format == m_outputFormat && m_format == FORMAT_S16;
572  }
573  else
574  {
575  general_deps =
576  settings.m_format == m_format && lsource_channels == m_sourceChannels;
577  }
578 
579  // Check if anything has changed
580  general_deps &=
581  settings.m_sampleRate == m_sourceSampleRate &&
582  settings.m_usePassthru == m_passthru &&
583  lconfigured_channels == m_configuredChannels &&
584  lneeds_upmix == m_needsUpmix && lreenc == m_reEnc &&
585  lneeds_downmix == m_needsDownmix;
586 
587  if (general_deps && m_configureSucceeded)
588  {
589  VBAUDIO("Reconfigure(): No change -> exiting");
590  // if passthrough, source channels may have changed
591  m_sourceChannels = lsource_channels;
592  return;
593  }
594 
595  KillAudio();
596 
597  QMutexLocker lock(&m_audioBufLock);
598  QMutexLocker lockav(&m_avsyncLock);
599 
600  m_waud = m_raud = 0;
603 
604  m_channels = settings.m_channels;
605  m_sourceChannels = lsource_channels;
606  m_reEnc = lreenc;
607  m_codec = settings.m_codec;
608  m_passthru = settings.m_usePassthru;
609  m_configuredChannels = lconfigured_channels;
610  m_needsUpmix = lneeds_upmix;
611  m_needsDownmix = lneeds_downmix;
612  m_format = m_outputFormat = settings.m_format;
614  m_enc = lenc;
615 
616  m_killAudio = m_pauseAudio = false;
617  m_wasPaused = true;
618 
619  // Don't try to do anything if audio hasn't been
620  // initialized yet (e.g. rubbish was provided)
621  if (m_sourceChannels <= 0 || m_format <= 0 || m_sampleRate <= 0)
622  {
623  SilentError(QString("Aborting Audio Reconfigure. ") +
624  QString("Invalid audio parameters ch %1 fmt %2 @ %3Hz")
625  .arg(m_sourceChannels).arg(m_format).arg(m_sampleRate));
626  return;
627  }
628 
629  VBAUDIO(QString("Original codec was %1, %2, %3 kHz, %4 channels")
630  .arg(ff_codec_id_string(m_codec))
632  .arg(m_sampleRate/1000)
633  .arg(m_sourceChannels));
634 
635  if (m_needsDownmix && m_sourceChannels > 8)
636  {
637  Error(QObject::tr("Aborting Audio Reconfigure. "
638  "Can't handle audio with more than 8 channels."));
639  return;
640  }
641 
642  VBAUDIO(QString("enc(%1), passthru(%2), features (%3) "
643  "configured_channels(%4), %5 channels supported(%6) "
644  "max_channels(%7)")
645  .arg(m_enc)
646  .arg(m_passthru)
649  .arg(m_channels)
650  .arg(OutputSettings(m_enc || m_passthru)->IsSupportedChannels(m_channels))
651  .arg(m_maxChannels));
652 
653  int dest_rate = 0;
654 
655  // Force resampling if we are encoding to AC3 and sr > 48k
656  // or if 48k override was checked in settings
657  if ((m_sampleRate != 48000 &&
658  gCoreContext->GetBoolSetting("Audio48kOverride", false)) ||
659  (m_enc && (m_sampleRate > 48000)))
660  {
661  VBAUDIO("Forcing resample to 48 kHz");
662  if (m_srcQuality < 0)
664  m_needResampler = true;
665  dest_rate = 48000;
666  }
667  // this will always be false for passthrough audio as
668  // CanPassthrough() already tested these conditions
669  else if ((m_needResampler =
670  !OutputSettings(m_enc || m_passthru)->IsSupportedRate(m_sampleRate)))
671  {
673  }
674 
676  {
677  m_sampleRate = dest_rate;
678 
679  VBGENERAL(QString("Resampling from %1 kHz to %2 kHz with quality %3")
680  .arg(settings.m_sampleRate/1000).arg(m_sampleRate/1000)
682 
684 
685  int error = 0;
686  m_srcCtx = src_new(2-m_srcQuality, chans, &error);
687  if (error)
688  {
689  Error(QObject::tr("Error creating resampler: %1")
690  .arg(src_strerror(error)));
691  m_srcCtx = nullptr;
692  return;
693  }
694 
695  m_srcData.src_ratio = (double)m_sampleRate / settings.m_sampleRate;
696  m_srcData.data_in = m_srcIn;
697  int newsize = (int)(kAudioSRCInputSize * m_srcData.src_ratio + 15)
698  & ~0xf;
699 
700  if (m_kAudioSRCOutputSize < newsize)
701  {
702  m_kAudioSRCOutputSize = newsize;
703  VBAUDIO(QString("Resampler allocating %1").arg(newsize));
704  delete[] m_srcOut;
705  m_srcOut = new float[m_kAudioSRCOutputSize];
706  }
707  m_srcData.data_out = m_srcOut;
708  m_srcData.output_frames = m_kAudioSRCOutputSize / chans;
709  m_srcData.end_of_input = 0;
710  }
711 
712  if (m_enc)
713  {
714  if (m_reEnc)
715  VBAUDIO("Reencoding decoded AC-3/DTS to AC-3");
716 
717  VBAUDIO(QString("Creating AC-3 Encoder with sr = %1, ch = %2")
719 
721  if (!m_encoder->Init(AV_CODEC_ID_AC3, 448000, m_sampleRate,
723  {
724  Error(QObject::tr("AC-3 encoder initialization failed"));
725  delete m_encoder;
726  m_encoder = nullptr;
727  m_enc = false;
728  // upmixing will fail if we needed the encoder
729  m_needsUpmix = false;
730  }
731  }
732 
733  if (m_passthru)
734  {
735  //AC3, DTS, DTS-HD MA and TrueHD all use 16 bits samples
736  m_channels = channels_tmp;
737  m_sampleRate = samplerate_tmp;
741  }
742  else
743  {
746  }
747 
748  // Turn on float conversion?
750  m_stretchFactor != 1.0F || (m_internalVol && SWVolume()) ||
751  (m_enc && m_outputFormat != FORMAT_S16) ||
752  !OutputSettings(m_enc || m_passthru)->IsSupportedFormat(m_outputFormat))
753  {
754  VBAUDIO("Audio processing enabled");
755  m_processing = true;
756  if (m_enc)
757  m_outputFormat = FORMAT_S16; // Output s16le for AC-3 encoder
758  else
760  }
761 
763  sizeof(float) : AudioOutputSettings::SampleSize(m_format);
765 
766  if (m_enc)
767  m_channels = 2; // But only post-encoder
768 
771 
772  VBGENERAL(
773  QString("Opening audio device '%1' ch %2(%3) sr %4 sf %5 reenc %6")
776 
779  m_effDsp = m_sampleRate * 100;
780 
781  // Actually do the device specific open call
782  if (!OpenDevice())
783  {
784  if (GetError().isEmpty())
785  Error(QObject::tr("Aborting reconfigure"));
786  else
787  VBGENERAL("Aborting reconfigure");
788  m_configureSucceeded = false;
789  return;
790  }
791 
792  VBAUDIO(QString("Audio fragment size: %1").arg(m_fragmentSize));
793 
794  // Only used for software volume
796  {
797  VBAUDIO("Software volume enabled");
798  m_volumeControl = gCoreContext->GetSetting("MixerControl", "PCM");
799  m_volumeControl += "MixerVolume";
801  }
802 
806 
808  {
812  VBAUDIO(QString("Create %1 quality upmixer done")
814  }
815 
816  VBAUDIO(QString("Audio Stretch Factor: %1").arg(m_stretchFactor));
818 
819  // Setup visualisations, zero the visualisations buffers
820  prepareVisuals();
821 
822  if (m_unpauseWhenReady)
824 
825  m_configureSucceeded = true;
826 
828 
829  VBAUDIO("Ending Reconfigure()");
830 }
831 
833 {
835  return true;
836 
837  start();
838  m_audioThreadExists = true;
839 
840  return true;
841 }
842 
843 
845 {
847  {
848  wait();
849  m_audioThreadExists = false;
850  }
851 }
852 
857 {
858  m_killAudioLock.lock();
859 
860  VBAUDIO("Killing AudioOutputDSP");
861  m_killAudio = true;
863  QMutexLocker lock(&m_audioBufLock);
864 
865  if (m_pSoundStretch)
866  {
867  delete m_pSoundStretch;
868  m_pSoundStretch = nullptr;
870  m_stretchFactor = 1.0F;
871  }
872 
873  if (m_encoder)
874  {
875  delete m_encoder;
876  m_encoder = nullptr;
877  }
878 
879  if (m_upmixer)
880  {
881  delete m_upmixer;
882  m_upmixer = nullptr;
883  }
884 
885  if (m_srcCtx)
886  {
887  src_delete(m_srcCtx);
888  m_srcCtx = nullptr;
889  }
890 
891  m_needsUpmix = m_needResampler = m_enc = false;
892 
893  CloseDevice();
894 
895  m_killAudioLock.unlock();
896 }
897 
898 void AudioOutputBase::Pause(bool paused)
899 {
900  if (!paused && m_unpauseWhenReady)
901  return;
902  VBAUDIO(QString("Pause %1").arg(paused));
903  if (m_pauseAudio != paused)
905  m_pauseAudio = paused;
906  m_unpauseWhenReady = false;
907  m_actuallyPaused = false;
908 }
909 
911 {
912  Reset();
913  Pause(true);
914  m_unpauseWhenReady = true;
915 }
916 
921 {
922  QMutexLocker lock(&m_audioBufLock);
923  QMutexLocker lockav(&m_avsyncLock);
924 
926  if (m_encoder)
927  {
928  m_waud = m_raud = 0; // empty ring buffer
930  }
931  else
932  {
933  m_waud = m_raud; // empty ring buffer
934  }
935  m_resetActive.Ref();
936  m_currentSeconds = -1;
938  m_unpauseWhenReady = false;
939  // clear any state that could remember previous audio in any active filters
940  if (m_needsUpmix && m_upmixer)
941  m_upmixer->flush();
942  if (m_pSoundStretch)
943  m_pSoundStretch->clear();
944  if (m_encoder)
945  m_encoder->clear();
946 
947  // Setup visualisations, zero the visualisations buffers
948  prepareVisuals();
949 }
950 
957 void AudioOutputBase::SetTimecode(int64_t timecode)
958 {
959  m_audbufTimecode = m_audioTime = timecode;
960  m_framesBuffered = (timecode * m_sourceSampleRate) / 1000;
961 }
962 
969 void AudioOutputBase::SetEffDsp(int dsprate)
970 {
971  VBAUDIO(QString("SetEffDsp: %1").arg(dsprate));
972  m_effDsp = dsprate;
973 }
974 
978 inline int AudioOutputBase::audiolen() const
979 {
980  if (m_waud >= m_raud)
981  return m_waud - m_raud;
982  return kAudioRingBufferSize - (m_raud - m_waud);
983 }
984 
989 {
990  return kAudioRingBufferSize - audiolen() - 1;
991  /* There is one wasted byte in the buffer. The case where waud = raud is
992  interpreted as an empty buffer, so the fullest the buffer can ever
993  be is kAudioRingBufferSize - 1. */
994 }
995 
1004 {
1006  return audiolen();
1008 }
1009 
1014 {
1016  return 0;
1017 
1018  // output bits per 10 frames
1019  int64_t obpf = 0;
1020 
1021  if (m_passthru && !usesSpdif())
1022  obpf = m_sourceBitRate * 10 / m_sourceSampleRate;
1023  else if (m_enc && !usesSpdif())
1024  {
1025  // re-encode bitrate is hardcoded at 448000
1026  obpf = 448000 * 10 / m_sourceSampleRate;
1027  }
1028  else
1029  obpf = static_cast<int64_t>(m_outputBytesPerFrame) * 80;
1030 
1031  /* We want to calculate 'audiotime', which is the timestamp of the audio
1032  Which is leaving the sound card at this instant.
1033 
1034  We use these variables:
1035 
1036  'effdsp' is 100 * frames/sec
1037 
1038  'audbuf_timecode' is the timecode in milliseconds of the
1039  audio that has just been written into the buffer.
1040 
1041  'eff_stretchfactor' is stretch factor * 100,000
1042 
1043  'totalbuffer' is the total # of bytes in our audio buffer, and the
1044  sound card's buffer. */
1045 
1046 
1047  QMutexLocker lockav(&m_avsyncLock);
1048 
1049  int64_t soundcard_buffer = GetBufferedOnSoundcard(); // bytes
1050 
1051  /* audioready tells us how many bytes are in audiobuffer
1052  scaled appropriately if output format != internal format */
1053  int64_t main_buffer = audioready();
1054 
1055  int64_t oldaudiotime = m_audioTime;
1056 
1057  /* timecode is the stretch adjusted version
1058  of major post-stretched buffer contents
1059  processing latencies are catered for in AddData/SetAudiotime
1060  to eliminate race */
1061 
1062  m_audioTime = m_audbufTimecode - (m_effDsp && obpf ?
1063  ((main_buffer + soundcard_buffer) * int64_t(m_effStretchFactor)
1064  * 80 / int64_t(m_effDsp) / obpf) : 0);
1065 
1066  /* audiotime should never go backwards, but we might get a negative
1067  value if GetBufferedOnSoundcard() isn't updated by the driver very
1068  quickly (e.g. ALSA) */
1069  if (m_audioTime < oldaudiotime)
1070  m_audioTime = oldaudiotime;
1071 
1072  VBAUDIOTS(QString("GetAudiotime audt=%1 abtc=%2 mb=%3 sb=%4 tb=%5 "
1073  "sr=%6 obpf=%7 bpf=%8 esf=%9 edsp=%10 sbr=%11")
1074  .arg(m_audioTime).arg(m_audbufTimecode) // 1, 2
1075  .arg(main_buffer) // 3
1076  .arg(soundcard_buffer) // 4
1077  .arg(main_buffer+soundcard_buffer) // 5
1078  .arg(m_sampleRate).arg(obpf) // 6, 7
1079  .arg(m_bytesPerFrame) // 8
1080  .arg(m_effStretchFactor) // 9
1081  .arg(m_effDsp).arg(m_sourceBitRate) // 10, 11
1082  );
1083 
1084  return m_audioTime;
1085 }
1086 
1092 void AudioOutputBase::SetAudiotime(int frames, int64_t timecode)
1093 {
1094  int64_t processframes_stretched = 0;
1095  int64_t processframes_unstretched = 0;
1096  int64_t old_audbuf_timecode = m_audbufTimecode;
1097 
1098  if (!m_configureSucceeded)
1099  return;
1100 
1101  if (m_needsUpmix && m_upmixer)
1102  processframes_unstretched -= m_upmixer->frameLatency();
1103 
1104  if (m_pSoundStretch)
1105  {
1106  processframes_unstretched -= m_pSoundStretch->numUnprocessedSamples();
1107  processframes_stretched -= m_pSoundStretch->numSamples();
1108  }
1109 
1110  if (m_encoder)
1111  {
1112  processframes_stretched -= m_encoder->Buffered();
1113  }
1114 
1116  timecode + (m_effDsp ? ((frames + processframes_unstretched) * 100000 +
1117  (processframes_stretched * m_effStretchFactor)
1118  ) / m_effDsp : 0);
1119 
1120  // check for timecode wrap and reset audiotime if detected
1121  // timecode will always be monotonic asc if not seeked and reset
1122  // happens if seek or pause happens
1123  if (m_audbufTimecode < old_audbuf_timecode)
1124  m_audioTime = 0;
1125 
1126  VBAUDIOTS(QString("SetAudiotime atc=%1 tc=%2 f=%3 pfu=%4 pfs=%5")
1127  .arg(m_audbufTimecode)
1128  .arg(timecode)
1129  .arg(frames)
1130  .arg(processframes_unstretched)
1131  .arg(processframes_stretched));
1132 #ifdef AUDIOTSTESTING
1133  GetAudiotime();
1134 #endif
1135 }
1136 
1143 {
1144  int64_t ret = m_audbufTimecode - GetAudiotime();
1145  // Pulse can give us values that make this -ve
1146  if (ret < 0)
1147  return 0;
1148  return ret;
1149 }
1150 
1154 void AudioOutputBase::SetSWVolume(int new_volume, bool save)
1155 {
1156  m_volume = new_volume;
1157  if (save && m_volumeControl != nullptr)
1159 }
1160 
1165 {
1166  return m_volume;
1167 }
1168 
1178 {
1179  int bpf = m_bytesPerFrame;
1180  int len = frames * bpf;
1181  int afree = audiofree();
1182 
1183  if (len <= afree)
1184  return len;
1185 
1186  VBERROR(QString("Audio buffer overflow, %1 frames lost!")
1187  .arg(frames - (afree / bpf)));
1188 
1189  frames = afree / bpf;
1190  len = frames * bpf;
1191 
1192  if (!m_srcCtx)
1193  return len;
1194 
1195  int error = src_reset(m_srcCtx);
1196  if (error)
1197  {
1198  VBERROR(QString("Error occurred while resetting resampler: %1")
1199  .arg(src_strerror(error)));
1200  m_srcCtx = nullptr;
1201  }
1202 
1203  return len;
1204 }
1205 
1212 int AudioOutputBase::CopyWithUpmix(char *buffer, int frames, uint &org_waud)
1213 {
1214  int len = CheckFreeSpace(frames);
1215  int bdiff = kAudioRingBufferSize - org_waud;
1216  int bpf = m_bytesPerFrame;
1217  int off = 0;
1218 
1219  if (!m_needsUpmix)
1220  {
1221  int num = len;
1222 
1223  if (bdiff <= num)
1224  {
1225  memcpy(WPOS, buffer, bdiff);
1226  num -= bdiff;
1227  off = bdiff;
1228  org_waud = 0;
1229  }
1230  if (num > 0)
1231  memcpy(WPOS, buffer + off, num);
1232  org_waud = (org_waud + num) % kAudioRingBufferSize;
1233  return len;
1234  }
1235 
1236  // Convert mono to stereo as most devices can't accept mono
1237  if (!m_upmixer)
1238  {
1239  // we're always in the case
1240  // m_configuredChannels == 2 && m_sourceChannels == 1
1241  int bdFrames = bdiff / bpf;
1242  if (bdFrames <= frames)
1243  {
1244  AudioOutputUtil::MonoToStereo(WPOS, buffer, bdFrames);
1245  frames -= bdFrames;
1246  off = bdFrames * sizeof(float); // 1 channel of floats
1247  org_waud = 0;
1248  }
1249  if (frames > 0)
1250  AudioOutputUtil::MonoToStereo(WPOS, buffer + off, frames);
1251 
1252  org_waud = (org_waud + frames * bpf) % kAudioRingBufferSize;
1253  return len;
1254  }
1255 
1256  // Upmix to 6ch via FreeSurround
1257  // Calculate frame size of input
1258  off = m_processing ? sizeof(float) : AudioOutputSettings::SampleSize(m_format);
1259  off *= m_sourceChannels;
1260 
1261  int i = 0;
1262  len = 0;
1263  while (i < frames)
1264  {
1265  i += m_upmixer->putFrames(buffer + i * off, frames - i, m_sourceChannels);
1266  int nFrames = m_upmixer->numFrames();
1267  if (!nFrames)
1268  continue;
1269 
1270  len += CheckFreeSpace(nFrames);
1271 
1272  int bdFrames = (kAudioRingBufferSize - org_waud) / bpf;
1273  if (bdFrames < nFrames)
1274  {
1275  if ((org_waud % bpf) != 0)
1276  {
1277  VBERROR(QString("Upmixing: org_waud = %1 (bpf = %2)")
1278  .arg(org_waud)
1279  .arg(bpf));
1280  }
1281  m_upmixer->receiveFrames((float *)(WPOS), bdFrames);
1282  nFrames -= bdFrames;
1283  org_waud = 0;
1284  }
1285  if (nFrames > 0)
1286  m_upmixer->receiveFrames((float *)(WPOS), nFrames);
1287 
1288  org_waud = (org_waud + nFrames * bpf) % kAudioRingBufferSize;
1289  }
1290  return len;
1291 }
1292 
1298 bool AudioOutputBase::AddFrames(void *in_buffer, int in_frames,
1299  int64_t timecode)
1300 {
1301  return AddData(in_buffer, in_frames * m_sourceBytesPerFrame, timecode,
1302  in_frames);
1303 }
1304 
1310 bool AudioOutputBase::AddData(void *in_buffer, int in_len,
1311  int64_t timecode, int /*in_frames*/)
1312 {
1313  int frames = in_len / m_sourceBytesPerFrame;
1314  int bpf = m_bytesPerFrame;
1315  int len = in_len;
1316  bool music = false;
1317 
1318  if (!m_configureSucceeded)
1319  {
1320  LOG(VB_GENERAL, LOG_ERR, "AddData called with audio framework not "
1321  "initialised");
1322  m_lengthLastData = 0;
1323  return false;
1324  }
1325 
1326  /* See if we're waiting for new samples to be buffered before we unpause
1327  post channel change, seek, etc. Wait for 4 fragments to be buffered */
1329  {
1330  m_unpauseWhenReady = false;
1331  Pause(false);
1332  }
1333 
1334  // Don't write new samples if we're resetting the buffer or reconfiguring
1335  QMutexLocker lock(&m_audioBufLock);
1336 
1337  uint org_waud = m_waud;
1338  int afree = audiofree();
1339  int used = kAudioRingBufferSize - afree;
1340 
1341  if (m_passthru && m_spdifEnc)
1342  {
1343  if (m_processing)
1344  {
1345  /*
1346  * We shouldn't encounter this case, but it can occur when
1347  * timestretch just got activated. So we will just drop the
1348  * data
1349  */
1350  LOG(VB_AUDIO, LOG_INFO,
1351  "Passthrough activated with audio processing. Dropping audio");
1352  return false;
1353  }
1354  // mux into an IEC958 packet
1355  m_spdifEnc->WriteFrame((unsigned char *)in_buffer, len);
1356  len = m_spdifEnc->GetProcessedSize();
1357  if (len > 0)
1358  {
1359  in_buffer = m_spdifEnc->GetProcessedBuffer();
1360  m_spdifEnc->Reset();
1361  frames = len / m_sourceBytesPerFrame;
1362  }
1363  else
1364  frames = 0;
1365  }
1366  m_lengthLastData = (int64_t)
1367  ((double)(len * 1000) / (m_sourceSampleRate * m_sourceBytesPerFrame));
1368 
1369  VBAUDIOTS(QString("AddData frames=%1, bytes=%2, used=%3, free=%4, "
1370  "timecode=%5 needsupmix=%6")
1371  .arg(frames).arg(len).arg(used).arg(afree).arg(timecode)
1372  .arg(m_needsUpmix));
1373 
1374  // Mythmusic doesn't give us timestamps
1375  if (timecode < 0)
1376  {
1377  timecode = (m_framesBuffered * 1000) / m_sourceSampleRate;
1379  music = true;
1380  }
1381 
1382  if (hasVisual())
1383  {
1384  // Send original samples to any attached visualisations
1385  dispatchVisual((uchar *)in_buffer, len, timecode, m_sourceChannels,
1387  }
1388 
1389  // Calculate amount of free space required in ringbuffer
1390  if (m_processing)
1391  {
1392  int sampleSize = AudioOutputSettings::SampleSize(m_format);
1393  if (sampleSize <= 0)
1394  {
1395  // Would lead to division by zero (or unexpected results if negative)
1396  VBERROR("Sample size is <= 0, AddData returning false");
1397  return false;
1398  }
1399 
1400  // Final float conversion space requirement
1401  len = sizeof(*m_srcInBuf) / sampleSize * len;
1402 
1403  // Account for changes in number of channels
1404  if (m_needsDownmix)
1405  len = (len * m_configuredChannels ) / m_sourceChannels;
1406 
1407  // Check we have enough space to write the data
1408  if (m_needResampler && m_srcCtx)
1409  len = lround(ceil(static_cast<double>(len) * m_srcData.src_ratio));
1410 
1411  if (m_needsUpmix)
1412  len = (len * m_configuredChannels ) / m_sourceChannels;
1413 
1414  // Include samples in upmix buffer that may be flushed
1415  if (m_needsUpmix && m_upmixer)
1416  len += m_upmixer->numUnprocessedFrames() * bpf;
1417 
1418  // Include samples in soundstretch buffers
1419  if (m_pSoundStretch)
1420  len += (m_pSoundStretch->numUnprocessedSamples() +
1421  (int)(m_pSoundStretch->numSamples() / m_stretchFactor)) * bpf;
1422  }
1423 
1424  if (len > afree)
1425  {
1426  VBERROR("Buffer is full, AddData returning false");
1427  return false; // would overflow
1428  }
1429 
1430  int frames_remaining = frames;
1431  int frames_final = 0;
1432  int maxframes = (kAudioSRCInputSize / m_sourceChannels) & ~0xf;
1433  int offset = 0;
1434 
1435  while(frames_remaining > 0)
1436  {
1437  void *buffer = (char *)in_buffer + offset;
1438  frames = frames_remaining;
1439  len = frames * m_sourceBytesPerFrame;
1440 
1441  if (m_processing)
1442  {
1443  if (frames > maxframes)
1444  {
1445  frames = maxframes;
1446  len = frames * m_sourceBytesPerFrame;
1447  offset += len;
1448  }
1449  // Convert to floats
1450  AudioOutputUtil::toFloat(m_format, m_srcIn, buffer, len);
1451  }
1452 
1453  frames_remaining -= frames;
1454 
1455  // Perform downmix if necessary
1456  if (m_needsDownmix)
1457  {
1460  m_srcIn, m_srcIn, frames) < 0)
1461  VBERROR("Error occurred while downmixing");
1462  }
1463 
1464  // Resample if necessary
1465  if (m_needResampler && m_srcCtx)
1466  {
1467  m_srcData.input_frames = frames;
1468  int error = src_process(m_srcCtx, &m_srcData);
1469 
1470  if (error)
1471  VBERROR(QString("Error occurred while resampling audio: %1")
1472  .arg(src_strerror(error)));
1473 
1474  buffer = m_srcOut;
1475  frames = m_srcData.output_frames_gen;
1476  }
1477  else if (m_processing)
1478  buffer = m_srcIn;
1479 
1480  /* we want the timecode of the last sample added but we are given the
1481  timecode of the first - add the time in ms that the frames added
1482  represent */
1483 
1484  // Copy samples into audiobuffer, with upmix if necessary
1485  if ((len = CopyWithUpmix((char *)buffer, frames, org_waud)) <= 0)
1486  {
1487  continue;
1488  }
1489 
1490  frames = len / bpf;
1491  frames_final += frames;
1492 
1493  int bdiff = kAudioRingBufferSize - m_waud;
1494  if ((len % bpf) != 0 && bdiff < len)
1495  {
1496  VBERROR(QString("AddData: Corruption likely: len = %1 (bpf = %2)")
1497  .arg(len)
1498  .arg(bpf));
1499  }
1500  if ((bdiff % bpf) != 0 && bdiff < len)
1501  {
1502  VBERROR(QString("AddData: Corruption likely: bdiff = %1 (bpf = %2)")
1503  .arg(bdiff)
1504  .arg(bpf));
1505  }
1506 
1507  if (m_pSoundStretch)
1508  {
1509  // does not change the timecode, only the number of samples
1510  org_waud = m_waud;
1511  int bdFrames = bdiff / bpf;
1512 
1513  if (bdiff < len)
1514  {
1515  m_pSoundStretch->putSamples((STST *)(WPOS), bdFrames);
1516  m_pSoundStretch->putSamples((STST *)ABUF, (len - bdiff) / bpf);
1517  }
1518  else
1519  m_pSoundStretch->putSamples((STST *)(WPOS), frames);
1520 
1521  int nFrames = m_pSoundStretch->numSamples();
1522  if (nFrames > frames)
1523  CheckFreeSpace(nFrames);
1524 
1525  len = nFrames * bpf;
1526 
1527  if (nFrames > bdFrames)
1528  {
1529  nFrames -= m_pSoundStretch->receiveSamples((STST *)(WPOS),
1530  bdFrames);
1531  org_waud = 0;
1532  }
1533  if (nFrames > 0)
1534  nFrames = m_pSoundStretch->receiveSamples((STST *)(WPOS),
1535  nFrames);
1536 
1537  org_waud = (org_waud + nFrames * bpf) % kAudioRingBufferSize;
1538  }
1539 
1540  if (m_internalVol && SWVolume())
1541  {
1542  org_waud = m_waud;
1543  int num = len;
1544 
1545  if (bdiff <= num)
1546  {
1548  music, m_needsUpmix && m_upmixer);
1549  num -= bdiff;
1550  org_waud = 0;
1551  }
1552  if (num > 0)
1554  music, m_needsUpmix && m_upmixer);
1555  org_waud = (org_waud + num) % kAudioRingBufferSize;
1556  }
1557 
1558  if (m_encoder)
1559  {
1560  org_waud = m_waud;
1561  int to_get = 0;
1562 
1563  if (bdiff < len)
1564  {
1566  to_get = m_encoder->Encode(ABUF, len - bdiff,
1568  }
1569  else
1570  {
1571  to_get = m_encoder->Encode(WPOS, len,
1573  }
1574 
1575  if (bdiff <= to_get)
1576  {
1577  m_encoder->GetFrames(WPOS, bdiff);
1578  to_get -= bdiff ;
1579  org_waud = 0;
1580  }
1581  if (to_get > 0)
1582  m_encoder->GetFrames(WPOS, to_get);
1583 
1584  org_waud = (org_waud + to_get) % kAudioRingBufferSize;
1585  }
1586 
1587  m_waud = org_waud;
1588  }
1589 
1590  SetAudiotime(frames_final, timecode);
1591 
1592  return true;
1593 }
1594 
1599 {
1600  long ct = GetAudiotime();
1601 
1602  if (ct < 0)
1603  ct = 0;
1604 
1605  if (m_sourceBitRate == -1)
1608 
1609  if (ct / 1000 != m_currentSeconds)
1610  {
1611  m_currentSeconds = ct / 1000;
1614  dispatch(e);
1615  }
1616 }
1617 
1623 {
1624  fill = kAudioRingBufferSize - audiofree();
1625  total = kAudioRingBufferSize;
1626 }
1627 
1634 {
1635  auto *zeros = new uchar[m_fragmentSize];
1636  auto *fragment_buf = new uchar[m_fragmentSize + 16];
1637  auto *fragment = (uchar *)AOALIGN(fragment_buf[0]);
1638  memset(zeros, 0, m_fragmentSize);
1639 
1640  // to reduce startup latency, write silence in 8ms chunks
1641  int zero_fragment_size = 8 * m_sampleRate * m_outputBytesPerFrame / 1000;
1642  if (zero_fragment_size > m_fragmentSize)
1643  zero_fragment_size = m_fragmentSize;
1644 
1645  while (!m_killAudio)
1646  {
1647  if (m_pauseAudio)
1648  {
1649  if (!m_actuallyPaused)
1650  {
1651  VBAUDIO("OutputAudioLoop: audio paused");
1653  dispatch(e);
1654  m_wasPaused = true;
1655  }
1656 
1657  m_actuallyPaused = true;
1658  m_audioTime = 0; // mark 'audiotime' as invalid.
1659 
1660  WriteAudio(zeros, zero_fragment_size);
1661  continue;
1662  }
1663 
1664  if (m_wasPaused)
1665  {
1666  VBAUDIO("OutputAudioLoop: Play Event");
1668  dispatch(e);
1669  m_wasPaused = false;
1670  }
1671 
1672  /* do audio output */
1673  int ready = audioready();
1674 
1675  // wait for the buffer to fill with enough to play
1676  if (m_fragmentSize > ready)
1677  {
1678  if (ready > 0) // only log if we're sending some audio
1679  {
1680  VBAUDIOTS(QString("audio waiting for buffer to fill: "
1681  "have %1 want %2")
1682  .arg(ready).arg(m_fragmentSize));
1683  }
1684 
1685  usleep(10000);
1686  continue;
1687  }
1688 
1689 #ifdef AUDIOTSTESTING
1690  VBAUDIOTS("WriteAudio Start");
1691 #endif
1692  Status();
1693 
1694  // delay setting raud until after phys buffer is filled
1695  // so GetAudiotime will be accurate without locking
1697  volatile uint next_raud = m_raud;
1698  if (GetAudioData(fragment, m_fragmentSize, true, &next_raud))
1699  {
1700  if (!m_resetActive.TestAndDeref())
1701  {
1702  WriteAudio(fragment, m_fragmentSize);
1703  if (!m_resetActive.TestAndDeref())
1704  m_raud = next_raud;
1705  }
1706  }
1707 #ifdef AUDIOTSTESTING
1708  GetAudiotime();
1709  VBAUDIOTS("WriteAudio Done");
1710 #endif
1711 
1712  }
1713 
1714  delete[] zeros;
1715  delete[] fragment_buf;
1716  VBAUDIO("OutputAudioLoop: Stop Event");
1718  dispatch(e);
1719 }
1720 
1728 int AudioOutputBase::GetAudioData(uchar *buffer, int size, bool full_buffer,
1729  volatile uint *local_raud)
1730 {
1731 
1732 #define LRPOS (m_audioBuffer + *local_raud)
1733  // re-check audioready() in case things changed.
1734  // for example, ClearAfterSeek() might have run
1735  int avail_size = audioready();
1736  int frag_size = size;
1737  int written_size = size;
1738 
1739  if (local_raud == nullptr)
1740  local_raud = &m_raud;
1741 
1742  if (!full_buffer && (size > avail_size))
1743  {
1744  // when full_buffer is false, return any available data
1745  frag_size = avail_size;
1746  written_size = frag_size;
1747  }
1748 
1749  if (!avail_size || (frag_size > avail_size))
1750  return 0;
1751 
1752  int bdiff = kAudioRingBufferSize - m_raud;
1753 
1755 
1756  if (obytes <= 0)
1757  return 0;
1758 
1759  bool fromFloats = m_processing && !m_enc && m_outputFormat != FORMAT_FLT;
1760 
1761  // Scale if necessary
1762  if (fromFloats && obytes != sizeof(float))
1763  frag_size *= sizeof(float) / obytes;
1764 
1765  int off = 0;
1766 
1767  if (bdiff <= frag_size)
1768  {
1769  if (fromFloats)
1770  {
1772  LRPOS, bdiff);
1773  }
1774  else
1775  {
1776  memcpy(buffer, LRPOS, bdiff);
1777  off = bdiff;
1778  }
1779 
1780  frag_size -= bdiff;
1781  *local_raud = 0;
1782  }
1783  if (frag_size > 0)
1784  {
1785  if (fromFloats)
1786  {
1788  LRPOS, frag_size);
1789  }
1790  else
1791  {
1792  memcpy(buffer + off, LRPOS, frag_size);
1793  }
1794  }
1795 
1796  *local_raud += frag_size;
1797 
1798  // Mute individual channels through mono->stereo duplication
1799  MuteState mute_state = GetMuteState();
1800  if (!m_enc && !m_passthru &&
1801  written_size && m_configuredChannels > 1 &&
1802  (mute_state == kMuteLeft || mute_state == kMuteRight))
1803  {
1805  mute_state == kMuteLeft ? 0 : 1,
1806  buffer, written_size);
1807  }
1808 
1809  return written_size;
1810 }
1811 
1816 {
1817  while (!m_pauseAudio && audioready() > m_fragmentSize)
1818  usleep(1000);
1819  if (m_pauseAudio)
1820  {
1821  // Audio is paused and can't be drained, clear ringbuffer
1822  QMutexLocker lock(&m_audioBufLock);
1823 
1824  m_waud = m_raud = 0;
1825  }
1826 }
1827 
1832 {
1833  RunProlog();
1834  VBAUDIO(QString("kickoffOutputAudioLoop: pid = %1").arg(getpid()));
1835  OutputAudioLoop();
1836  VBAUDIO("kickoffOutputAudioLoop exiting");
1837  RunEpilog();
1838 }
1839 
1840 int AudioOutputBase::readOutputData(unsigned char* /*read_buffer*/, int /*max_length*/)
1841 {
1842  VBERROR("AudioOutputBase should not be getting asked to readOutputData()");
1843  return 0;
1844 }
void RunEpilog(void)
Cleans up a thread's resources, call this if you reimplement run().
Definition: mthread.cpp:215
bool hasVisual(void)
Definition: output.h:101
virtual void WriteAudio(unsigned char *aubuf, int size)=0
int64_t m_audbufTimecode
timecode of audio most recently placed into buffer
virtual void CloseDevice(void)=0
int64_t m_lengthLastData
static const uint kAudioRingBufferSize
Audio Buffer Size – should be divisible by 32,24,16,12,10,8,6,4,2..
int NearestSupportedRate(int rate)
int64_t m_framesBuffered
void InitSettings(const AudioSettings &settings)
virtual bool OpenDevice(void)=0
This is a wrapper around QThread that does several additional things.
Definition: mthread.h:46
bool Init(AVCodecID codec_id, int bitrate, int samplerate, int channels)
QMutex m_audioBufLock
Writes to the audiobuffer, reconfigures and audiobuffer resets can only take place while holding this...
void Error(const QString &msg)
void SetStretchFactor(float factor) override
Set the timestretch factor.
uint putFrames(void *buffer, uint numFrames, uint numChannels)
static int toFloat(AudioFormat format, void *out, const void *in, int bytes)
Convert integer samples to floats.
void WriteFrame(unsigned char *data, int size)
Encode data through created muxer unsigned char data: pointer to data to encode int size: size of dat...
bool IsSupportedChannels(int channels)
static Type Playing
Definition: output.h:61
AudioOutputSettings * m_outputSettings
void SaveSetting(const QString &key, int newValue)
void error(const QString &e)
Definition: output.cpp:30
bool m_internalVol
Definition: volumebase.h:41
uint frameLatency()
volatile uint m_raud
Audio circular buffer.
bool IsSupportedFormat(AudioFormat format)
size_t GetFrames(void *ptr, int maxlen)
bool wait(unsigned long time=ULONG_MAX)
Wait for the MThread to exit, with a maximum timeout.
Definition: mthread.cpp:311
static int DownmixFrames(int channels_in, int channels_out, float *dst, const float *src, int frames)
AudioFormat m_format
Definition: audiosettings.h:71
MuteState
Definition: volumebase.h:6
virtual int GetBufferedOnSoundcard(void) const =0
Return the size in bytes of frames currently in the audio buffer adjusted with the audio playback lat...
virtual void StopOutputThread(void)
bool ToggleUpmix(void) override
Toggle between stereo and upmixed 5.1 if the source material is stereo.
bool AddData(void *buffer, int len, int64_t timecode, int frames) override
Add data to the audiobuffer and perform any required processing.
int GetAudioData(uchar *buffer, int buf_size, bool full_buffer, volatile uint *local_raud=nullptr)
Copy frames from the audiobuffer into the buffer provided.
int64_t GetAudioBufferedTime(void) override
Get the difference in timecode between the samples that are about to become audible and the samples m...
MythCoreContext * gCoreContext
This global variable contains the MythCoreContext instance for the app.
static const char * FormatToString(AudioFormat format)
void prepareVisuals()
Definition: output.cpp:63
int64_t GetAudiotime(void) override
Calculate the timecode of the samples that are about to become audible.
int audiofree() const
Get the free space in the audiobuffer in bytes.
static int FormatToBits(AudioFormat format)
AudioFormat m_format
AudioOutputBase(const AudioSettings &settings)
volatile uint m_waud
bool SetMaxHDRate(int rate)
Set the maximum HD rate.
bool SWVolume(void) const
Definition: volumebase.cpp:106
#define VBERROR(str)
void OutputAudioLoop(void)
Run in the output thread, write frames to the output device as they become available and there's spac...
void Reset(void) override
Reset the audiobuffer, timecode and mythmusic visualisation.
static QString GetPassthroughParams(int codec, int codec_profile, int &samplerate, int &channels, bool canDTSHDMA)
Setup samplerate and number of channels for passthrough.
bool IsUpmixing(void) override
Source is currently being upmixed.
static int fromFloat(AudioFormat format, void *out, const void *in, int bytes)
Convert float samples to integers.
void SetSWVolume(int new_volume, bool save) override
Set the volume for software volume control.
bool CanUpmix(void) override
Upmixing of the current source is available if requested.
uint receiveFrames(void *buffer, uint maxFrames)
void UpdateVolume(void)
Definition: volumebase.cpp:178
#define IS_VALID_UPMIX_CHANNEL(ch)
#define STST
SPDIFEncoder * m_spdifEnc
int audiolen() const
Get the number of bytes in the audiobuffer.
AudioFormat BestSupportedFormat()
void SetSourceBitrate(int rate) override
Set the bitrate of the source material, reported in periodic OutputEvents.
#define WPOS
AudioOutputSettings * OutputSettings(bool digital=true)
void dispatch(const MythEvent &event)
Dispatch an event to all listeners.
AudioOutputSettings * m_custom
custom contains a pointer to the audio device capabilities if defined, AudioOutput will not try to au...
Definition: audiosettings.h:91
void SyncVolume(void)
Definition: volumebase.cpp:209
QString GetSetting(const QString &key, const QString &defaultval="")
void SetTimecode(int64_t timecode) override
Set the timecode of the samples most recently added to the audiobuffer.
bool usesSpdif() const
uchar m_audioBuffer[kAudioRingBufferSize]
main audio buffer
static int SampleSize(AudioFormat format)
void dispatchVisual(uchar *b, unsigned long b_len, unsigned long written, int chan, int prec)
Definition: output.cpp:50
int GetSWVolume(void) override
Get the volume for software volume control.
AudioOutputSettings * m_outputSettingsDigital
def log(debug, txt)
Definition: utilities.py:7
bool canFeature(DigitalFeature arg)
return DigitalFeature mask.
virtual void Status(void)
Report status via an OutputEvent.
unsigned int uint
Definition: compat.h:140
AudioOutputSettings * GetOutputSettingsUsers(bool digital=false) override
Returns capabilities supported by the audio device amended to take into account the digital audio opt...
void GetBufferStatus(uint &fill, uint &total) override
Fill in the number of bytes in the audiobuffer and the total size of the audiobuffer.
bool SetupPassthrough(AVCodecID codec, int codec_profile, int &samplerate_tmp, int &channels_tmp)
AVCodecID m_codec
Definition: audiosettings.h:73
static QString FeaturesToString(DigitalFeature arg)
Display in human readable form the digital features supported by the output device.
void SetAudiotime(int frames, int64_t timecode)
Set the timecode of the top of the ringbuffer Exclude all other processing elements as they dont vary...
FreeSurround * m_upmixer
void SilentError(const QString &msg)
static const uint kAudioSRCInputSize
AsyncLooseLock m_resetActive
#define LRPOS
void PauseUntilBuffered(void) override
virtual bool StartOutputThread(void)
bool Succeeded()
Definition: spdifencoder.h:24
int GetNumSetting(const QString &key, int defaultval=0)
#define LOG(_MASK_, _LEVEL_, _STRING_)
Definition: mythlogging.h:41
AudioOutputSettings * m_outputSettingsRaw
AudioOutputSource m_source
AudioOutputSettings * GetCleaned(bool newcopy=false)
Returns capabilities supported by the audio device amended to take into account the digital audio opt...
SRC_STATE * m_srcCtx
AudioOutputSettings * GetOutputSettingsCleaned(bool digital=true) override
Returns capabilities supported by the audio device amended to take into account the digital audio opt...
#define assert(x)
bool GetBoolSetting(const QString &key, bool defaultval=false)
AudioFormat m_outputFormat
void Pause(bool paused) override
void KillAudio(void)
Kill the output thread and cleanup.
void Reconfigure(const AudioSettings &settings) override
(Re)Configure AudioOutputBase
void Reset()
Reset the internal encoder buffer.
int CheckFreeSpace(int &frames)
Check that there's enough space in the audiobuffer to write the provided number of frames.
bool IsSupportedRate(int rate)
int GetProcessedSize()
Definition: spdifencoder.h:21
void RunProlog(void)
Sets up a thread, call this if you reimplement run().
Definition: mthread.cpp:202
bool AddFrames(void *buffer, int frames, int64_t timecode) override
Add frames to the audiobuffer and perform any required processing.
int CopyWithUpmix(char *buffer, int frames, uint &org_waud)
Copy frames into the audiobuffer, upmixing en route if necessary.
soundtouch::SoundTouch * m_pSoundStretch
static void AdjustVolume(void *buffer, int len, int volume, bool music, bool upmix)
Adjust the volume of samples.
int GetMaxHDRate()
return the highest iec958 rate supported.
unsigned char * GetProcessedBuffer()
Definition: spdifencoder.h:22
void start(QThread::Priority p=QThread::InheritPriority)
Tell MThread to start running the thread in the near future.
Definition: mthread.cpp:294
virtual AudioOutputSettings * GetOutputSettings(bool)
void SetEffDsp(int dsprate) override
Set the effective DSP rate.
#define ABUF
~AudioOutputBase() override
Destructor.
bool CanPassthrough(int samplerate, int channels, AVCodecID codec, int profile) const override
Test if we can output digital audio and if sample rate is supported.
DigitalFeature
const char * frames[3]
Definition: element.c:46
void Drain(void) override
Block until all available frames have been written to the device.
static const char * quality_string(int q)
uint numUnprocessedFrames()
void ClearError(void)
AudioOutputSettings * m_outputSettingsDigitalRaw
static void MonoToStereo(void *dst, const void *src, int samples)
Convert a mono stream to stereo by copying and interleaving samples.
static void usleep(unsigned long time)
Definition: mthread.cpp:348
AudioOutputDigitalEncoder * m_encoder
static void MuteChannel(int obits, int channels, int ch, void *buffer, int bytes)
Mute individual channels through mono->stereo duplication.
void run() override
Main routine for the output thread.
size_t Encode(void *buf, int len, AudioFormat format)
float GetStretchFactor(void) const override
Get the timetretch factor.
int audioready() const
Get the scaled number of bytes in the audiobuffer, i.e.
static Type Paused
Definition: output.h:64
#define AOALIGN(x)
#define VBAUDIOTS(str)
QString GetError(void) const
Definition: audiooutput.h:145
#define VBAUDIO(str)
void SetStretchFactorLocked(float factor)
Set the timestretch factor.
void SetChannels(int new_channels)
Definition: volumebase.cpp:218
virtual MuteState GetMuteState(void) const
Definition: volumebase.cpp:150
#define VBGENERAL(str)
float m_srcInBuf[kAudioSRCInputSize]
int64_t m_audioTime
timecode of audio leaving the soundcard (same units as timecodes)
static Type Stopped
Definition: output.h:65
QMutex m_avsyncLock
must hold avsync_lock to read or write 'audiotime' and 'audiotime_updated'
int readOutputData(unsigned char *read_buffer, int max_length) override