Ticket #13446: 20190512_1505_working_latency_cleaned.patch
File 20190512_1505_working_latency_cleaned.patch, 29.3 KB (added by , 5 years ago) |
---|
-
new file mythtv/android-package-source/src/org/mythtv/audio/AudioOutputAudioTrack.java
diff --git a/mythtv/android-package-source/src/org/mythtv/audio/AudioOutputAudioTrack.java b/mythtv/android-package-source/src/org/mythtv/audio/AudioOutputAudioTrack.java new file mode 100644 index 00000000000..7f2b7f296b3
- + 1 package org.mythtv.audio; 2 3 import android.media.AudioTrack; 4 import android.media.AudioAttributes; 5 import android.media.AudioFormat; 6 import android.media.AudioManager; 7 import android.media.AudioTimestamp; 8 import java.nio.ByteBuffer; 9 10 public class AudioOutputAudioTrack 11 { 12 AudioTrack player; 13 AudioTimestamp timestamp = new AudioTimestamp(); 14 long timelasttaken; 15 int samplerate; 16 long firstwritetime; 17 long lastwritetime; 18 int bufferedBytes; 19 Object syncBuffer; 20 int bufferSize; 21 int channels; 22 int bitsPer10Frames; 23 long bytesWritten; 24 int latency; 25 int latencyTot; 26 int latencyCount; 27 boolean isSettled; 28 boolean isBufferInFlux; 29 30 public AudioOutputAudioTrack(int encoding, int sampleRate, int bufferSize, int channels) 31 { 32 syncBuffer = new Object(); 33 this.bufferSize = bufferSize; 34 this.channels = channels; 35 AudioAttributes.Builder aab = new AudioAttributes.Builder(); 36 aab.setUsage(AudioAttributes.USAGE_MEDIA); 37 aab.setContentType(AudioAttributes.CONTENT_TYPE_MOVIE); 38 AudioAttributes aa = aab.build(); 39 40 AudioFormat.Builder afb = new AudioFormat.Builder(); 41 afb.setEncoding (encoding); 42 afb.setSampleRate (sampleRate); 43 int channelMask = 0; 44 switch (channels) 45 { 46 case 8: 47 channelMask |= AudioFormat.CHANNEL_OUT_BACK_LEFT | AudioFormat.CHANNEL_OUT_BACK_RIGHT; 48 // fall through 49 case 6: 50 channelMask |= AudioFormat.CHANNEL_OUT_SIDE_LEFT | AudioFormat.CHANNEL_OUT_SIDE_RIGHT 51 | AudioFormat.CHANNEL_OUT_FRONT_CENTER | AudioFormat.CHANNEL_OUT_LOW_FREQUENCY; 52 // fall through 53 case 2: 54 channelMask |= AudioFormat.CHANNEL_OUT_FRONT_LEFT | AudioFormat.CHANNEL_OUT_FRONT_RIGHT; 55 break; 56 case 1: 57 channelMask |= AudioFormat.CHANNEL_OUT_FRONT_CENTER; 58 break; 59 default: 60 // default treated as 2 channel (stereo) 61 channelMask |= AudioFormat.CHANNEL_OUT_FRONT_LEFT | AudioFormat.CHANNEL_OUT_FRONT_RIGHT; 62 break; 63 } 64 afb.setChannelMask(channelMask); 65 AudioFormat af = afb.build(); 66 samplerate = sampleRate; 67 int state = 0; 68 69 for (int i = 0; i < 10; i++) 70 { 71 player = new AudioTrack(aa, af, bufferSize, 72 AudioTrack.MODE_STREAM, AudioManager.AUDIO_SESSION_ID_GENERATE); 73 state = player.getState(); 74 if (state == AudioTrack.STATE_INITIALIZED) 75 break; 76 try 77 { 78 Thread.sleep(50); 79 } 80 catch (InterruptedException ex) { } 81 } 82 83 player.play(); 84 } 85 86 public int write(byte[] audioData, int sizeInBytes) 87 { 88 isBufferInFlux = false; 89 if (player.getPlayState() != AudioTrack.PLAYSTATE_PLAYING) 90 player.play(); 91 if (firstwritetime == 0) 92 firstwritetime = System.nanoTime(); 93 ByteBuffer buf = ByteBuffer.wrap(audioData); 94 int written = 0; 95 int ret = 0; 96 int i; 97 if (player != null) 98 { 99 while (buf.hasRemaining()) 100 { 101 ret = player.write(buf, buf.remaining(), AudioTrack.WRITE_NON_BLOCKING); 102 if (ret < 0) 103 { 104 break; 105 } 106 written += ret; 107 synchronized(syncBuffer) 108 { 109 bytesWritten += ret; 110 lastwritetime = System.nanoTime(); 111 // Note that only after this method returns is this data 112 // removed from the caller's buffer. 113 // bufferedBytes may be negative because actually some 114 // data still in the "Audio circular buffer" may have 115 // already played. 116 bufferedBytes = buf.remaining() - sizeInBytes; 117 } 118 try 119 { 120 Thread.sleep(10); 121 } 122 catch (InterruptedException ex) {} 123 } 124 } 125 else 126 written = AudioTrack.ERROR; 127 synchronized(syncBuffer) 128 { 129 // After we return to caller, the data will be removed from 130 // the "Audio circular buffer", so the buffered bytes are now zero 131 isBufferInFlux = true; 132 bufferedBytes = 0; 133 } 134 return written; 135 } 136 137 public void resetFlux () 138 { 139 isBufferInFlux = false; 140 } 141 142 public int getBufferedBytes () 143 { 144 int ret; 145 synchronized(syncBuffer) 146 { 147 int i = 0; 148 if (isBufferInFlux) 149 { 150 try 151 { 152 // sleep 1 millisec to let code get from write to 153 // data removal from "Audio circular buffer". 154 // This is a crude way of doing it. A better way 155 // would be a call to resetFlux 156 // after "m_raud = next_raud" and a wait here 157 // to ensure this is only done after that resetFlux. 158 Thread.sleep(1); 159 } 160 catch (InterruptedException ex) {} 161 } 162 ret = bufferedBytes; 163 } 164 return ret; 165 } 166 167 // Get latency in milliseconds 168 // averaged over the first 10 seconds of playback plus 169 // at least 100 readings 170 public int getLatencyViaHeadPosition () 171 { 172 if (!isSettled && player != null && bytesWritten > 0 173 && bitsPer10Frames > 0) 174 { 175 int headPos = player.getPlaybackHeadPosition(); 176 synchronized(syncBuffer) 177 { 178 long frameWritten = bytesWritten * 80 / bitsPer10Frames; 179 long framesInProg = frameWritten - headPos; 180 int new_latency = (int)(framesInProg * 1000 / samplerate); 181 if (new_latency >= 0 && new_latency < 1500) 182 { 183 latencyTot += new_latency; 184 latency = latencyTot / (++latencyCount); 185 } 186 } 187 if ((headPos > samplerate * 10 && latencyCount > 100) 188 || latencyCount > 1000) 189 isSettled = true; 190 } 191 return latency; 192 } 193 194 195 public void setBitsPer10Frames (int bitsPer10Frames) 196 { 197 this.bitsPer10Frames = bitsPer10Frames; 198 } 199 200 public void pause (boolean doPause) 201 { 202 if (player == null) 203 return; 204 205 if (doPause) 206 { 207 if (player.getPlayState() == AudioTrack.PLAYSTATE_PLAYING) 208 player.pause(); 209 } 210 else 211 { 212 if (player.getPlayState() != AudioTrack.PLAYSTATE_PLAYING) 213 player.play(); 214 } 215 } 216 217 public void release () 218 { 219 if (player != null) 220 player.release(); 221 player = null; 222 } 223 224 } -
mythtv/libs/libmyth/audio/audiooutput.cpp
diff --git a/mythtv/libs/libmyth/audio/audiooutput.cpp b/mythtv/libs/libmyth/audio/audiooutput.cpp index 73f8259bda7..54f1edacea7 100644
a b using namespace std; 38 38 #endif 39 39 #ifdef Q_OS_ANDROID 40 40 #include "audiooutputopensles.h" 41 #include "audiooutputaudiotrack.h" 41 42 #endif 42 43 #ifdef USING_OPENMAX 43 44 #include "audiooutput_omx.h" … … AudioOutput *AudioOutput::OpenAudio(AudioSettings &settings, 206 207 LOG(VB_GENERAL, LOG_ERR, "Audio output device is set to a OpenSLES " 207 208 "device but Android support is not compiled " 208 209 "in!"); 210 #endif 211 } 212 else if (main_device.startsWith("AudioTrack:")) 213 { 214 #ifdef Q_OS_ANDROID 215 ret = new AudioOutputAudioTrack(settings); 216 #else 217 LOG(VB_GENERAL, LOG_ERR, "Audio output device is set to AudioTrack " 218 "device but Android support is not compiled " 219 "in!"); 209 220 #endif 210 221 } 211 222 else if (main_device.startsWith("OpenMAX:")) … … AudioOutput::ADCVect* AudioOutput::GetOutputList(void) 564 575 #ifdef ANDROID 565 576 { 566 577 QString name = "OpenSLES:"; 567 QString desc = tr("OpenSLES default output."); 578 QString desc = tr("OpenSLES default output. Stereo support only."); 579 adc = GetAudioDeviceConfig(name, desc); 580 if (adc) 581 { 582 list->append(*adc); 583 delete adc; 584 } 585 } 586 { 587 QString name = "AudioTrack:"; 588 QString desc = tr("Android AudioTrack output. Supports surround sound."); 568 589 adc = GetAudioDeviceConfig(name, desc); 569 590 if (adc) 570 591 { -
mythtv/libs/libmyth/audio/audiooutput_omx.cpp
diff --git a/mythtv/libs/libmyth/audio/audiooutput_omx.cpp b/mythtv/libs/libmyth/audio/audiooutput_omx.cpp index d79b786053d..1691ebffecd 100644
a b int AudioOutputOMX::GetBufferedOnSoundcard(void) const 576 576 } 577 577 578 578 #ifdef USING_BROADCOM 579 // output bits per 10 frames 580 int obpf; 581 if (m_passthru && !usesSpdif()) 582 obpf = m_source_bitrate * 10 / m_source_samplerate; 583 else 584 obpf = m_output_bytes_per_frame * 80; 585 579 586 OMX_PARAM_U32TYPE u; 580 587 OMX_DATA_INIT(u); 581 588 u.nPortIndex = m_audiorender.Base(); … … int AudioOutputOMX::GetBufferedOnSoundcard(void) const 586 593 "GetConfig AudioRenderingLatency error %1").arg(Error2String(e))); 587 594 return 0; 588 595 } 589 return u.nU32 * m_output_bytes_per_frame;596 return u.nU32 * obpf / 80; 590 597 #else 591 598 return m_pending; 592 599 #endif -
new file mythtv/libs/libmyth/audio/audiooutputaudiotrack.cpp
diff --git a/mythtv/libs/libmyth/audio/audiooutputaudiotrack.cpp b/mythtv/libs/libmyth/audio/audiooutputaudiotrack.cpp new file mode 100644 index 00000000000..1d3d7cdf7b2
- + 1 2 #include "config.h" 3 4 using namespace std; 5 6 #include <QAndroidJniObject> 7 #include <QAndroidJniEnvironment> 8 #include <android/log.h> 9 10 #include "mythlogging.h" 11 #include "audiooutputaudiotrack.h" 12 13 #define CHANNELS_MIN 1 14 #define CHANNELS_MAX 8 15 16 #define ANDROID_EXCEPTION_CHECK \ 17 if (env->ExceptionCheck()) { \ 18 env->ExceptionDescribe(); \ 19 env->ExceptionClear(); \ 20 exception=true; \ 21 } else \ 22 exception=false; 23 // clear exception without checking 24 #define ANDROID_EXCEPTION_CLEAR \ 25 if (env->ExceptionCheck()) { \ 26 env->ExceptionDescribe(); \ 27 env->ExceptionClear(); \ 28 } 29 30 #define LOC QString("AudioTrack: ") 31 32 // Constants from Android Java API 33 // class android.media.AudioFormat 34 #define AF_CHANNEL_OUT_MONO 4 35 #define AF_CHANNEL_OUT_STEREO 12 36 #define AF_CHANNEL_OUT_SURROUND 1052 37 #define AF_ENCODING_AC3 5 38 #define AF_ENCODING_E_AC3 6 39 #define AF_ENCODING_DTS 7 40 #define AF_ENCODING_DOLBY_TRUEHD 14 41 #define AF_ENCODING_PCM_8BIT 3 42 #define AF_ENCODING_PCM_16BIT 2 43 #define AF_ENCODING_PCM_FLOAT 4 44 45 // for debugging 46 #include <android/log.h> 47 48 AudioOutputAudioTrack::AudioOutputAudioTrack(const AudioSettings &settings) : 49 AudioOutputBase(settings) 50 { 51 InitSettings(settings); 52 if (settings.m_init) 53 Reconfigure(settings); 54 } 55 56 AudioOutputAudioTrack::~AudioOutputAudioTrack() 57 { 58 KillAudio(); 59 CloseDevice(); 60 } 61 62 bool AudioOutputAudioTrack::OpenDevice() 63 { 64 bool exception=false; 65 QAndroidJniEnvironment env; 66 jint encoding = 0; 67 jint sampleRate = m_samplerate; 68 69 // m_bitsPer10Frames = output bits per 10 frames 70 m_bitsPer10Frames = m_output_bytes_per_frame * 80; 71 72 if ((m_passthru || m_enc) && m_source_bitrate > 0) 73 m_bitsPer10Frames = m_source_bitrate * 10 / m_source_samplerate; 74 75 // 50 milliseconds 76 m_fragment_size = m_bitsPer10Frames * m_source_samplerate * 5 / 8000; 77 78 if (m_fragment_size < 1536) 79 m_fragment_size = 1536; 80 81 82 if (m_passthru || m_enc) 83 { 84 switch (m_codec) 85 { 86 case AV_CODEC_ID_AC3: 87 encoding = AF_ENCODING_AC3; 88 break; 89 case AV_CODEC_ID_DTS: 90 encoding = AF_ENCODING_DTS; 91 break; 92 case AV_CODEC_ID_EAC3: 93 encoding = AF_ENCODING_E_AC3; 94 break; 95 case AV_CODEC_ID_TRUEHD: 96 encoding = AF_ENCODING_DOLBY_TRUEHD; 97 break; 98 99 default: 100 LOG(VB_GENERAL, LOG_ERR, LOC + __func__ + QString(" No support for audio passthru encoding %1").arg(m_codec)); 101 return false; 102 } 103 } 104 else 105 { 106 switch (m_output_format) 107 { 108 case FORMAT_U8: 109 // This could be used to get the value from java instead // of haning these constants in pour header file. 110 // encoding = QAndroidJniObject::getStaticField<jint> 111 // ("android.media.AudioFormat","ENCODING_PCM_8BIT"); 112 encoding = AF_ENCODING_PCM_8BIT; 113 break; 114 case FORMAT_S16: 115 encoding = AF_ENCODING_PCM_16BIT; 116 break; 117 case FORMAT_FLT: 118 encoding = AF_ENCODING_PCM_FLOAT; 119 break; 120 default: 121 LOG(VB_GENERAL, LOG_ERR, LOC + __func__ + QString(" No support for audio format %1").arg(m_output_format)); 122 return false; 123 } 124 } 125 126 jint minBufferSize = m_fragment_size * 4; 127 m_soundcard_buffer_size = minBufferSize; 128 jint channels = m_channels; 129 130 m_audioTrack = new QAndroidJniObject("org/mythtv/audio/AudioOutputAudioTrack", 131 "(IIII)V", encoding, sampleRate, minBufferSize, channels); 132 ANDROID_EXCEPTION_CHECK 133 134 if (exception) 135 { 136 LOG(VB_GENERAL, LOG_ERR, LOC + __func__ + QString(" Java Exception when creating AudioTrack")); 137 m_audioTrack = nullptr; 138 return false; 139 } 140 if (!m_passthru && !m_enc) 141 { 142 jint bitsPer10Frames = m_bitsPer10Frames; 143 m_audioTrack->callMethod<void>("setBitsPer10Frames","(I)V",bitsPer10Frames); 144 } 145 return true; 146 } 147 148 void AudioOutputAudioTrack::CloseDevice() 149 { 150 QAndroidJniEnvironment env; 151 if (m_audioTrack) 152 { 153 m_audioTrack->callMethod<void>("release"); 154 ANDROID_EXCEPTION_CLEAR 155 delete m_audioTrack; 156 m_audioTrack = nullptr; 157 } 158 } 159 160 AudioOutputSettings* AudioOutputAudioTrack::GetOutputSettings(bool /* digital */) 161 { 162 bool exception=false; 163 QAndroidJniEnvironment env; 164 jint bufsize = 0; 165 166 AudioOutputSettings *settings = new AudioOutputSettings(); 167 168 int supportedrate = 0; 169 while (int rate = settings->GetNextRate()) 170 { 171 // Checking for valid rates using getMinBufferSize. 172 // See https://stackoverflow.com/questions/8043387/android-audiorecord-supported-sampling-rates/22317382 173 bufsize = QAndroidJniObject::callStaticMethod<jint> 174 ("android/media/AudioTrack", "getMinBufferSize", "(III)I", 175 rate, AF_CHANNEL_OUT_MONO, AF_ENCODING_PCM_16BIT); 176 ANDROID_EXCEPTION_CHECK 177 if (bufsize > 0 && !exception) 178 { 179 settings->AddSupportedRate(rate); 180 // save any supported rate for later 181 supportedrate = rate; 182 } 183 } 184 185 // Checking for valid format using getMinBufferSize. 186 bufsize = QAndroidJniObject::callStaticMethod<jint> 187 ("android/media/AudioTrack", "getMinBufferSize", "(III)I", 188 supportedrate, AF_CHANNEL_OUT_MONO, AF_ENCODING_PCM_8BIT); 189 ANDROID_EXCEPTION_CHECK 190 if (bufsize > 0 && !exception) 191 settings->AddSupportedFormat(FORMAT_U8); 192 // 16bit always supported 193 settings->AddSupportedFormat(FORMAT_S16); 194 195 bufsize = QAndroidJniObject::callStaticMethod<jint> 196 ("android/media/AudioTrack", "getMinBufferSize", "(III)I", 197 supportedrate, AF_CHANNEL_OUT_MONO, AF_ENCODING_PCM_FLOAT); 198 ANDROID_EXCEPTION_CHECK 199 if (bufsize > 0 && !exception) 200 settings->AddSupportedFormat(FORMAT_FLT); 201 202 for (uint channels = CHANNELS_MIN; channels <= CHANNELS_MAX; channels++) 203 { 204 settings->AddSupportedChannels(channels); 205 } 206 settings->setPassthrough(0); 207 208 return settings; 209 } 210 211 void AudioOutputAudioTrack::WriteAudio(unsigned char* aubuf, int size) 212 { 213 bool exception=false; 214 QAndroidJniEnvironment env; 215 if (m_actually_paused) 216 { 217 jboolean param = true; 218 m_audioTrack->callMethod<void>("pause","(Z)V",param); 219 ANDROID_EXCEPTION_CLEAR 220 return; 221 } 222 // create a java byte array 223 jbyteArray arr = env->NewByteArray(size); 224 env->SetByteArrayRegion(arr, 0, size, reinterpret_cast<jbyte*>(aubuf)); 225 jint ret = -99; 226 if (m_audioTrack) 227 { 228 ret = m_audioTrack->callMethod<jint>("write","([BI)I", arr, size); 229 ANDROID_EXCEPTION_CHECK 230 } 231 env->DeleteLocalRef(arr); 232 if (ret != size || exception) 233 LOG(VB_GENERAL, LOG_ERR, LOC + __func__ 234 + QString(" Audio Write failed, size %1 return %2 exception %3") 235 .arg(size).arg(ret).arg(exception)); 236 237 LOG(VB_AUDIO | VB_TIMESTAMP, LOG_INFO, LOC + __func__ 238 + QString(" WriteAudio size=%1 written=%2") 239 .arg(size).arg(ret)); 240 } 241 242 243 int AudioOutputAudioTrack::GetBufferedOnSoundcard(void) const 244 { 245 bool exception=false; 246 QAndroidJniEnvironment env; 247 int buffered (0); 248 if (m_audioTrack) 249 { 250 // This may return a negative value, because there 251 // is data already played that is still in the "Audio circular buffer" 252 buffered 253 = m_audioTrack->callMethod<jint>("getBufferedBytes"); 254 ANDROID_EXCEPTION_CHECK 255 if (exception) 256 buffered = 0; 257 int latency 258 = m_audioTrack->callMethod<jint>("getLatencyViaHeadPosition"); 259 ANDROID_EXCEPTION_CHECK 260 if (exception) 261 latency = 0; 262 buffered += latency * m_samplerate / 1000 * m_bitsPer10Frames / 80 ; 263 } 264 265 return buffered; 266 } 267 268 bool AudioOutputAudioTrack::AddData(void *in_buffer, int in_len, 269 int64_t timecode, int in_frames) 270 { 271 bool ret = AudioOutputBase::AddData 272 (in_buffer, in_len, timecode,in_frames); 273 274 return ret; 275 } 276 277 void AudioOutputAudioTrack::Pause(bool paused) 278 { 279 AudioOutputBase::Pause(paused); 280 jboolean param = paused; 281 m_audioTrack->callMethod<void>("pause","(Z)V",param); 282 } 283 284 void AudioOutputAudioTrack::SetSourceBitrate(int rate) 285 { 286 AudioOutputBase::SetSourceBitrate(rate); 287 if (m_source_bitrate > 0) 288 { 289 if (m_passthru || m_enc) 290 { 291 m_bitsPer10Frames = m_source_bitrate * 10 / m_source_samplerate; 292 jint bitsPer10Frames = m_bitsPer10Frames; 293 m_audioTrack->callMethod<void>("setBitsPer10Frames","(I)V",bitsPer10Frames); 294 295 } 296 } 297 } -
new file mythtv/libs/libmyth/audio/audiooutputaudiotrack.h
diff --git a/mythtv/libs/libmyth/audio/audiooutputaudiotrack.h b/mythtv/libs/libmyth/audio/audiooutputaudiotrack.h new file mode 100644 index 00000000000..9f407448945
- + 1 #ifndef _AUDIOOUTPUTAUDIOTRACK_H_ 2 #define _AUDIOOUTPUTAUDIOTRACK_H_ 3 4 #include "audiooutputbase.h" 5 6 class QAndroidJniObject; 7 /* 8 9 Audio output for android based on android.media.AudioTrack. 10 11 This uses the java class org.mythtv.audio.AudioOutputAudioTrack 12 to invoke android media playback methods. 13 14 */ 15 16 class AudioOutputAudioTrack : public AudioOutputBase 17 { 18 public: 19 explicit AudioOutputAudioTrack(const AudioSettings &settings); 20 ~AudioOutputAudioTrack() override; 21 22 bool AddData(void *buffer, int len, int64_t timecode, int frames) override; // AudioOutput 23 24 // Volume control 25 int GetVolumeChannel(int /* channel */) const override // VolumeBase 26 { return 100; } 27 void SetVolumeChannel(int /* channel */, int /* volume */) override // VolumeBase 28 {} 29 void Pause(bool paused) override; // AudioOutput 30 31 protected: 32 bool OpenDevice(void) override; // AudioOutputBase 33 void CloseDevice(void) override; // AudioOutputBase 34 void WriteAudio(unsigned char *aubuf, int size) override; // AudioOutputBase 35 int GetBufferedOnSoundcard(void) const override; // AudioOutputBase 36 AudioOutputSettings* GetOutputSettings(bool digital) override; // AudioOutputBase 37 void SetSourceBitrate(int rate) override; // AudioOutputBase 38 QAndroidJniObject *m_audioTrack {nullptr}; 39 int m_bitsPer10Frames {0}; 40 }; 41 42 #endif //_AUDIOOUTPUTAUDIOTRACK_H_ -
mythtv/libs/libmyth/audio/audiooutputbase.cpp
diff --git a/mythtv/libs/libmyth/audio/audiooutputbase.cpp b/mythtv/libs/libmyth/audio/audiooutputbase.cpp index 1200bc2106c..1e07886c6b4 100644
a b using namespace std; 24 24 #include "mythlogging.h" 25 25 #include "mythconfig.h" 26 26 27 // AC3 encode currently disabled for Android 28 #if defined(Q_OS_ANDROID) 29 #define DISABLE_AC3_ENCODE 30 #endif 31 27 32 #define LOC QString("AOBase: ") 28 33 29 34 #define WPOS (m_audiobuffer + org_waud) … … AudioOutputBase::AudioOutputBase(const AudioSettings &settings) : 61 66 memset(m_src_in_buf, 0, sizeof(m_src_in_buf)); 62 67 memset(m_audiobuffer, 0, sizeof(m_audiobuffer)); 63 68 69 if (m_main_device.startsWith("OpenMAX:") 70 || m_main_device.startsWith("AudioTrack:")) 71 m_usesSpdif = false; 64 72 // Handle override of SRC quality settings 65 73 if (gCoreContext->GetBoolSetting("SRCQualityOverride", false)) 66 74 { … … void AudioOutputBase::SetStretchFactorLocked(float lstretchfactor) 313 321 m_pSoundStretch->setSampleRate(m_samplerate); 314 322 m_pSoundStretch->setChannels(channels); 315 323 m_pSoundStretch->setTempo(m_stretchfactor); 316 #if ARCH_ARM 324 #if ARCH_ARM || defined(Q_OS_ANDROID) 317 325 // use less demanding settings for Raspberry pi 318 326 m_pSoundStretch->setSetting(SETTING_SEQUENCE_MS, 82); 319 327 m_pSoundStretch->setSetting(SETTING_USE_AA_FILTER, 0); … … bool AudioOutputBase::SetupPassthrough(AVCodecID codec, int codec_profile, 415 423 416 424 delete m_spdifenc; 417 425 418 // No spdif encoder if using openmax audio 419 if (m_main_device.startsWith("OpenMAX:")) 420 m_spdifenc = nullptr; 421 else 426 // No spdif encoder needed for certain devices 427 if (m_usesSpdif) 422 428 m_spdifenc = new SPDIFEncoder("spdif", codec); 429 else 430 m_spdifenc = nullptr; 423 431 if (m_spdifenc && m_spdifenc->Succeeded() && codec == AV_CODEC_ID_DTS) 424 432 { 425 433 switch(codec_profile) … … void AudioOutputBase::Reconfigure(const AudioSettings &orig_settings) 476 484 m_output_settings->IsSupportedChannels(lconfigured_channels); 477 485 478 486 // check if the number of channels could be transmitted via AC3 encoding 487 #ifndef DISABLE_AC3_ENCODE 479 488 lenc = m_output_settingsdigital->canFeature(FEATURE_AC3) && 480 489 (!m_output_settings->canFeature(FEATURE_LPCM) && 481 490 lconfigured_channels > 2 && lconfigured_channels <= 6); 482 491 #endif 483 492 if (!lenc && !cando_channels) 484 493 { 485 494 // if hardware doesn't support source audio configuration … … void AudioOutputBase::Reconfigure(const AudioSettings &orig_settings) 517 526 and we have more than 2 channels but multichannel PCM is not 518 527 supported or if the device just doesn't support the number of 519 528 channels */ 529 #ifndef DISABLE_AC3_ENCODE 520 530 lenc = m_output_settingsdigital->canFeature(FEATURE_AC3) && 521 531 ((!m_output_settings->canFeature(FEATURE_LPCM) && 522 532 lconfigured_channels > 2) || 523 533 !m_output_settings->IsSupportedChannels(lconfigured_channels)); 524 525 534 /* Might we reencode a bitstream that's been decoded for timestretch? 526 535 If the device doesn't support the number of channels - see below */ 527 536 if (m_output_settingsdigital->canFeature(FEATURE_AC3) && … … void AudioOutputBase::Reconfigure(const AudioSettings &orig_settings) 530 539 { 531 540 lreenc = true; 532 541 } 533 542 #endif 534 543 // Enough channels? Upmix if not, but only from mono/stereo/5.0 to 5.1 535 544 if (IS_VALID_UPMIX_CHANNEL(settings.m_channels) && 536 545 settings.m_channels < lconfigured_channels) … … void AudioOutputBase::SetEffDsp(int dsprate) 968 977 /** 969 978 * Get the number of bytes in the audiobuffer 970 979 */ 971 inline int AudioOutputBase::audiolen() 980 inline int AudioOutputBase::audiolen() const 972 981 { 973 982 if (m_waud >= m_raud) 974 983 return m_waud - m_raud; … … inline int AudioOutputBase::audiolen() 978 987 /** 979 988 * Get the free space in the audiobuffer in bytes 980 989 */ 981 int AudioOutputBase::audiofree() 990 int AudioOutputBase::audiofree() const 982 991 { 983 992 return kAudioRingBufferSize - audiolen() - 1; 984 993 /* There is one wasted byte in the buffer. The case where waud = raud is … … int AudioOutputBase::audiofree() 993 1002 * This value can differ from that returned by audiolen if samples are 994 1003 * being converted to floats and the output sample format is not 32 bits 995 1004 */ 996 int AudioOutputBase::audioready() 1005 int AudioOutputBase::audioready() const 997 1006 { 998 1007 if (m_passthru || m_enc || m_bytes_per_frame == m_output_bytes_per_frame) 999 1008 return audiolen(); … … int64_t AudioOutputBase::GetAudiotime(void) 1008 1017 if (m_audbuf_timecode == 0 || !m_configure_succeeded) 1009 1018 return 0; 1010 1019 1011 int obpf = m_output_bytes_per_frame; 1020 // output bits per 10 frames 1021 int64_t obpf; 1022 1023 if (m_passthru && !usesSpdif()) 1024 obpf = m_source_bitrate * 10 / m_source_samplerate; 1025 else 1026 if (m_enc && !usesSpdif()) 1027 { 1028 // re-encode bitrate is hardcoded at 448000 1029 obpf = 448000 * 10 / m_source_samplerate; 1030 } 1031 else 1032 obpf = m_output_bytes_per_frame * 80; 1033 1012 1034 int64_t oldaudiotime; 1013 1035 1014 1036 /* We want to calculate 'audiotime', which is the timestamp of the audio … … int64_t AudioOutputBase::GetAudiotime(void) 1029 1051 1030 1052 QMutexLocker lockav(&m_avsync_lock); 1031 1053 1032 int soundcard_buffer = GetBufferedOnSoundcard(); // bytes1054 int64_t soundcard_buffer = GetBufferedOnSoundcard(); // bytes 1033 1055 1034 1056 /* audioready tells us how many bytes are in audiobuffer 1035 1057 scaled appropriately if output format != internal format */ 1036 int main_buffer = audioready();1058 int64_t main_buffer = audioready(); 1037 1059 1038 1060 oldaudiotime = m_audiotime; 1039 1061 … … int64_t AudioOutputBase::GetAudiotime(void) 1041 1063 of major post-stretched buffer contents 1042 1064 processing latencies are catered for in AddData/SetAudiotime 1043 1065 to eliminate race */ 1044 m_audiotime = m_audbuf_timecode - (m_effdsp && obpf ? ( 1045 ((int64_t)(main_buffer + soundcard_buffer) * m_eff_stretchfactor) / 1046 (m_effdsp * obpf)) : 0); 1066 1067 m_audiotime = m_audbuf_timecode - (m_effdsp && obpf ? 1068 ((main_buffer + soundcard_buffer) * int64_t(m_eff_stretchfactor) 1069 * 80 / int64_t(m_effdsp) / obpf) : 0); 1047 1070 1048 1071 /* audiotime should never go backwards, but we might get a negative 1049 1072 value if GetBufferedOnSoundcard() isn't updated by the driver very -
mythtv/libs/libmyth/audio/audiooutputbase.h
diff --git a/mythtv/libs/libmyth/audio/audiooutputbase.h b/mythtv/libs/libmyth/audio/audiooutputbase.h index ea830b43585..b2a44e80403 100644
a b class AudioOutputBase : public AudioOutput, public MThread 155 155 156 156 int CheckFreeSpace(int &frames); 157 157 158 inline int audiolen() ; // number of valid bytes in audio buffer159 int audiofree() ; // number of free bytes in audio buffer160 int audioready() ; // number of bytes ready to be written158 inline int audiolen() const; // number of valid bytes in audio buffer 159 int audiofree() const; // number of free bytes in audio buffer 160 int audioready() const; // number of bytes ready to be written 161 161 162 162 void SetStretchFactorLocked(float factor); 163 163 164 164 // For audiooutputca 165 165 int GetBaseAudBufTimeCode() const { return m_audbuf_timecode; } 166 166 167 bool usesSpdif() const { return m_usesSpdif; } 168 167 169 protected: 168 170 // Basic details about the audio stream 169 171 int m_channels {-1}; … … class AudioOutputBase : public AudioOutput, public MThread 295 297 int64_t m_length_last_data {0}; 296 298 297 299 // SPDIF Encoder for digital passthrough 300 bool m_usesSpdif {true}; 298 301 SPDIFEncoder *m_spdifenc {nullptr}; 299 302 300 303 // Flag indicating if SetStretchFactor enabled audio float processing -
mythtv/libs/libmyth/libmyth.pro
diff --git a/mythtv/libs/libmyth/libmyth.pro b/mythtv/libs/libmyth/libmyth.pro index bb5f4a9e200..3c2de90068e 100644
a b unix:!cygwin { 179 179 180 180 android { 181 181 SOURCES += audio/audiooutputopensles.cpp 182 SOURCES += audio/audiooutputaudiotrack.cpp 182 183 HEADERS += audio/audiooutputopensles.h 184 HEADERS += audio/audiooutputaudiotrack.h 183 185 } 184 186 185 187 linux:DEFINES += linux