2014-09-04 11:33:44 +04:00
/*
This file is part of Telegram Desktop ,
2014-12-01 13:47:38 +03:00
the official desktop version of Telegram messaging app , see https : //telegram.org
2014-09-04 11:33:44 +04:00
Telegram Desktop is free software : you can redistribute it and / or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation , either version 3 of the License , or
( at your option ) any later version .
It is distributed in the hope that it will be useful ,
but WITHOUT ANY WARRANTY ; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
GNU General Public License for more details .
2015-10-03 16:16:42 +03:00
In addition , as a special exception , the copyright holders give permission
to link the code of portions of this program with the OpenSSL library .
2014-09-04 11:33:44 +04:00
Full license : https : //github.com/telegramdesktop/tdesktop/blob/master/LICENSE
2016-02-08 13:56:18 +03:00
Copyright ( c ) 2014 - 2016 John Preston , https : //desktop.telegram.org
2014-09-04 11:33:44 +04:00
*/
# include "stdafx.h"
2016-07-05 20:44:02 +03:00
# include "media/media_audio.h"
2016-03-24 15:57:10 +03:00
2016-07-05 20:44:02 +03:00
# include "media/media_audio_ffmpeg_loader.h"
# include "media/media_child_ffmpeg_loader.h"
# include "media/media_audio_loaders.h"
2014-09-04 11:33:44 +04:00
# include <AL/al.h>
# include <AL/alc.h>
2015-05-24 20:58:39 +03:00
2015-06-03 15:18:46 +03:00
# define AL_ALEXT_PROTOTYPES
# include <AL/alext.h>
2016-08-29 23:24:16 -06:00
# include <numeric>
2015-05-30 19:30:47 +03:00
extern " C " {
2016-03-24 15:57:10 +03:00
# ifdef Q_OS_MAC
2015-05-30 19:30:47 +03:00
# include <iconv.h>
# undef iconv_open
# undef iconv
# undef iconv_close
2016-06-30 15:03:32 +03:00
iconv_t iconv_open ( const char * tocode , const char * fromcode ) {
2015-05-30 19:30:47 +03:00
return libiconv_open ( tocode , fromcode ) ;
}
2016-06-30 15:03:32 +03:00
size_t iconv ( iconv_t cd , char * * inbuf , size_t * inbytesleft , char * * outbuf , size_t * outbytesleft ) {
2015-05-30 19:30:47 +03:00
return libiconv ( cd , inbuf , inbytesleft , outbuf , outbytesleft ) ;
}
2016-06-30 15:03:32 +03:00
int iconv_close ( iconv_t cd ) {
2015-05-30 19:30:47 +03:00
return libiconv_close ( cd ) ;
}
2016-03-24 15:57:10 +03:00
# endif // Q_OS_MAC
2015-05-30 19:30:47 +03:00
2016-03-24 15:57:10 +03:00
} // extern "C"
2015-05-30 19:30:47 +03:00
2014-09-04 11:33:44 +04:00
namespace {
ALCdevice * audioDevice = 0 ;
ALCcontext * audioContext = 0 ;
ALuint notifySource = 0 ;
ALuint notifyBuffer = 0 ;
2015-05-29 21:52:43 +03:00
2016-12-01 22:20:33 +03:00
TimeMs notifyLengthMs = 0 ;
2015-07-01 00:07:05 +03:00
2015-05-29 21:52:43 +03:00
QMutex playerMutex ;
AudioPlayer * player = 0 ;
2015-07-01 00:07:05 +03:00
float64 suppressAllGain = 1. , suppressSongGain = 1. ;
2015-05-29 21:52:43 +03:00
AudioCapture * capture = 0 ;
2014-09-04 11:33:44 +04:00
}
bool _checkALCError ( ) {
ALenum errCode ;
if ( ( errCode = alcGetError ( audioDevice ) ) ! = ALC_NO_ERROR ) {
2015-05-29 21:52:43 +03:00
LOG ( ( " Audio Error: (alc) %1, %2 " ) . arg ( errCode ) . arg ( ( const char * ) alcGetString ( audioDevice , errCode ) ) ) ;
return false ;
}
return true ;
}
bool _checkCaptureError ( ALCdevice * device ) {
ALenum errCode ;
if ( ( errCode = alcGetError ( device ) ) ! = ALC_NO_ERROR ) {
LOG ( ( " Audio Error: (capture) %1, %2 " ) . arg ( errCode ) . arg ( ( const char * ) alcGetString ( audioDevice , errCode ) ) ) ;
2014-09-04 11:33:44 +04:00
return false ;
}
return true ;
}
bool _checkALError ( ) {
ALenum errCode ;
if ( ( errCode = alGetError ( ) ) ! = AL_NO_ERROR ) {
2015-05-29 21:52:43 +03:00
LOG ( ( " Audio Error: (al) %1, %2 " ) . arg ( errCode ) . arg ( ( const char * ) alGetString ( errCode ) ) ) ;
2014-09-04 11:33:44 +04:00
return false ;
}
return true ;
}
2015-07-01 00:07:05 +03:00
Q_DECLARE_METATYPE ( AudioMsgId ) ;
2016-02-12 19:35:06 +03:00
Q_DECLARE_METATYPE ( VoiceWaveform ) ;
2014-09-04 11:33:44 +04:00
void audioInit ( ) {
2015-05-29 21:52:43 +03:00
if ( ! capture ) {
capture = new AudioCapture ( ) ;
2015-06-01 13:58:46 +03:00
cSetHasAudioCapture ( capture - > check ( ) ) ;
2015-05-29 21:52:43 +03:00
}
2014-09-04 11:33:44 +04:00
if ( audioDevice ) return ;
2015-05-29 21:52:43 +03:00
audioDevice = alcOpenDevice ( 0 ) ;
2014-09-04 11:33:44 +04:00
if ( ! audioDevice ) {
LOG ( ( " Audio Error: default sound device not present. " ) ) ;
return ;
}
2016-01-05 14:59:57 +08:00
2014-09-04 11:33:44 +04:00
ALCint attributes [ ] = { ALC_STEREO_SOURCES , 8 , 0 } ;
audioContext = alcCreateContext ( audioDevice , attributes ) ;
alcMakeContextCurrent ( audioContext ) ;
if ( ! _checkALCError ( ) ) return audioFinish ( ) ;
ALfloat v [ ] = { 0.f , 0.f , - 1.f , 0.f , 1.f , 0.f } ;
alListener3f ( AL_POSITION , 0.f , 0.f , 0.f ) ;
alListener3f ( AL_VELOCITY , 0.f , 0.f , 0.f ) ;
alListenerfv ( AL_ORIENTATION , v ) ;
alDistanceModel ( AL_NONE ) ;
alGenSources ( 1 , & notifySource ) ;
alSourcef ( notifySource , AL_PITCH , 1.f ) ;
alSourcef ( notifySource , AL_GAIN , 1.f ) ;
alSource3f ( notifySource , AL_POSITION , 0 , 0 , 0 ) ;
alSource3f ( notifySource , AL_VELOCITY , 0 , 0 , 0 ) ;
alSourcei ( notifySource , AL_LOOPING , 0 ) ;
alGenBuffers ( 1 , & notifyBuffer ) ;
if ( ! _checkALError ( ) ) return audioFinish ( ) ;
2016-12-21 18:05:58 +03:00
QFile notify ( " :/gui/art/newmsg.wav " ) ;
2014-09-04 11:33:44 +04:00
if ( ! notify . open ( QIODevice : : ReadOnly ) ) return audioFinish ( ) ;
QByteArray blob = notify . readAll ( ) ;
const char * data = blob . constData ( ) ;
if ( blob . size ( ) < 44 ) return audioFinish ( ) ;
if ( * ( ( const uint32 * ) ( data + 0 ) ) ! = 0x46464952 ) return audioFinish ( ) ; // ChunkID - "RIFF"
2014-09-04 16:23:28 +04:00
if ( * ( ( const uint32 * ) ( data + 4 ) ) ! = uint32 ( blob . size ( ) - 8 ) ) return audioFinish ( ) ; // ChunkSize
2014-09-04 11:33:44 +04:00
if ( * ( ( const uint32 * ) ( data + 8 ) ) ! = 0x45564157 ) return audioFinish ( ) ; // Format - "WAVE"
if ( * ( ( const uint32 * ) ( data + 12 ) ) ! = 0x20746d66 ) return audioFinish ( ) ; // Subchunk1ID - "fmt "
uint32 subchunk1Size = * ( ( const uint32 * ) ( data + 16 ) ) , extra = subchunk1Size - 16 ;
2014-09-04 16:23:28 +04:00
if ( subchunk1Size < 16 | | ( extra & & extra < 2 ) ) return audioFinish ( ) ;
2014-09-04 11:33:44 +04:00
if ( * ( ( const uint16 * ) ( data + 20 ) ) ! = 1 ) return audioFinish ( ) ; // AudioFormat - PCM (1)
uint16 numChannels = * ( ( const uint16 * ) ( data + 22 ) ) ;
if ( numChannels ! = 1 & & numChannels ! = 2 ) return audioFinish ( ) ;
uint32 sampleRate = * ( ( const uint32 * ) ( data + 24 ) ) ;
uint32 byteRate = * ( ( const uint32 * ) ( data + 28 ) ) ;
uint16 blockAlign = * ( ( const uint16 * ) ( data + 32 ) ) ;
uint16 bitsPerSample = * ( ( const uint16 * ) ( data + 34 ) ) ;
if ( bitsPerSample % 8 ) return audioFinish ( ) ;
uint16 bytesPerSample = bitsPerSample / 8 ;
if ( bytesPerSample ! = 1 & & bytesPerSample ! = 2 ) return audioFinish ( ) ;
if ( blockAlign ! = numChannels * bytesPerSample ) return audioFinish ( ) ;
if ( byteRate ! = sampleRate * blockAlign ) return audioFinish ( ) ;
if ( extra ) {
uint16 extraSize = * ( ( const uint16 * ) ( data + 36 ) ) ;
2014-09-04 23:24:03 +04:00
if ( uint32 ( extraSize + 2 ) ! = extra ) return audioFinish ( ) ;
2014-09-04 16:23:28 +04:00
if ( uint32 ( blob . size ( ) ) < 44 + extra ) return audioFinish ( ) ;
2014-09-04 11:33:44 +04:00
}
if ( * ( ( const uint32 * ) ( data + extra + 36 ) ) ! = 0x61746164 ) return audioFinish ( ) ; // Subchunk2ID - "data"
uint32 subchunk2Size = * ( ( const uint32 * ) ( data + extra + 40 ) ) ;
if ( subchunk2Size % ( numChannels * bytesPerSample ) ) return audioFinish ( ) ;
uint32 numSamples = subchunk2Size / ( numChannels * bytesPerSample ) ;
2014-09-04 16:23:28 +04:00
if ( uint32 ( blob . size ( ) ) < 44 + extra + subchunk2Size ) return audioFinish ( ) ;
2014-09-04 11:33:44 +04:00
data + = 44 + extra ;
ALenum format = 0 ;
switch ( bytesPerSample ) {
case 1 :
switch ( numChannels ) {
case 1 : format = AL_FORMAT_MONO8 ; break ;
case 2 : format = AL_FORMAT_STEREO8 ; break ;
}
break ;
case 2 :
switch ( numChannels ) {
case 1 : format = AL_FORMAT_MONO16 ; break ;
case 2 : format = AL_FORMAT_STEREO16 ; break ;
}
break ;
}
if ( ! format ) return audioFinish ( ) ;
2015-07-01 00:07:05 +03:00
int32 addBytes = ( sampleRate * 15 / 100 ) * bytesPerSample * numChannels ; // add 150ms of silence
QByteArray fullData ( addBytes + subchunk2Size , ( bytesPerSample = = 1 ) ? 128 : 0 ) ;
memcpy ( fullData . data ( ) + addBytes , data , subchunk2Size ) ;
alBufferData ( notifyBuffer , format , fullData . constData ( ) , fullData . size ( ) , sampleRate ) ;
2014-09-04 11:33:44 +04:00
alSourcei ( notifySource , AL_BUFFER , notifyBuffer ) ;
2015-07-01 00:07:05 +03:00
notifyLengthMs = ( numSamples * 1000ULL / sampleRate ) ;
2014-09-04 11:33:44 +04:00
if ( ! _checkALError ( ) ) return audioFinish ( ) ;
2015-07-01 00:07:05 +03:00
qRegisterMetaType < AudioMsgId > ( ) ;
2016-02-12 19:35:06 +03:00
qRegisterMetaType < VoiceWaveform > ( ) ;
2015-07-01 00:07:05 +03:00
2015-05-29 21:52:43 +03:00
player = new AudioPlayer ( ) ;
2015-06-03 15:18:46 +03:00
alcDevicePauseSOFT ( audioDevice ) ;
2015-05-24 20:58:39 +03:00
2015-06-01 13:58:46 +03:00
cSetHasAudioPlayer ( true ) ;
2014-09-04 11:33:44 +04:00
}
void audioPlayNotify ( ) {
2015-05-29 21:52:43 +03:00
if ( ! audioPlayer ( ) ) return ;
2014-09-04 11:33:44 +04:00
2015-06-03 15:18:46 +03:00
audioPlayer ( ) - > resumeDevice ( ) ;
2014-09-04 11:33:44 +04:00
alSourcePlay ( notifySource ) ;
2015-07-01 00:07:05 +03:00
emit audioPlayer ( ) - > suppressAll ( ) ;
2015-05-29 21:52:43 +03:00
emit audioPlayer ( ) - > faderOnTimer ( ) ;
2014-09-04 11:33:44 +04:00
}
2016-03-25 23:46:35 +03:00
// can be called at any moment when audio error
2014-09-04 11:33:44 +04:00
void audioFinish ( ) {
2015-05-29 21:52:43 +03:00
if ( player ) {
2016-03-25 23:46:35 +03:00
delete player ;
player = nullptr ;
2015-05-29 21:52:43 +03:00
}
if ( capture ) {
2016-03-25 23:46:35 +03:00
delete capture ;
capture = nullptr ;
2014-09-04 11:33:44 +04:00
}
alSourceStop ( notifySource ) ;
if ( alIsBuffer ( notifyBuffer ) ) {
alDeleteBuffers ( 1 , & notifyBuffer ) ;
notifyBuffer = 0 ;
}
if ( alIsSource ( notifySource ) ) {
alDeleteSources ( 1 , & notifySource ) ;
notifySource = 0 ;
}
if ( audioContext ) {
2016-03-20 11:16:35 +03:00
alcMakeContextCurrent ( nullptr ) ;
2014-09-04 11:33:44 +04:00
alcDestroyContext ( audioContext ) ;
2016-03-20 11:16:35 +03:00
audioContext = nullptr ;
2014-09-04 11:33:44 +04:00
}
if ( audioDevice ) {
alcCloseDevice ( audioDevice ) ;
2016-03-20 11:16:35 +03:00
audioDevice = nullptr ;
2014-09-04 11:33:44 +04:00
}
2015-06-01 13:58:46 +03:00
cSetHasAudioCapture ( false ) ;
cSetHasAudioPlayer ( false ) ;
2014-09-04 11:33:44 +04:00
}
2016-06-30 15:03:32 +03:00
void AudioPlayer : : AudioMsg : : clear ( ) {
audio = AudioMsgId ( ) ;
2015-11-26 20:34:52 +03:00
file = FileLocation ( ) ;
2015-11-24 19:19:18 +03:00
data = QByteArray ( ) ;
2016-07-10 16:02:22 +03:00
playbackState = defaultState ( ) ;
2015-11-24 19:19:18 +03:00
skipStart = skipEnd = 0 ;
loading = false ;
started = 0 ;
if ( alIsSource ( source ) ) {
alSourceStop ( source ) ;
}
2016-07-05 20:44:02 +03:00
for ( int i = 0 ; i < 3 ; + + i ) {
2015-11-24 19:19:18 +03:00
if ( samplesCount [ i ] ) {
2016-07-05 20:44:02 +03:00
ALuint buffer = 0 ;
// This cleans some random queued buffer, not exactly the buffers[i].
alSourceUnqueueBuffers ( source , 1 , & buffer ) ;
2015-11-24 19:19:18 +03:00
samplesCount [ i ] = 0 ;
}
}
nextBuffer = 0 ;
2016-07-05 20:44:02 +03:00
videoData = nullptr ;
2016-07-12 21:04:34 +03:00
videoPlayId = 0 ;
2015-11-24 19:19:18 +03:00
}
2016-10-12 22:34:25 +03:00
AudioPlayer : : AudioPlayer ( )
: _fader ( new AudioPlayerFader ( & _faderThread ) )
, _loader ( new AudioPlayerLoaders ( & _loaderThread ) ) {
2014-09-04 11:33:44 +04:00
connect ( this , SIGNAL ( faderOnTimer ( ) ) , _fader , SLOT ( onTimer ( ) ) ) ;
2015-07-01 00:07:05 +03:00
connect ( this , SIGNAL ( suppressSong ( ) ) , _fader , SLOT ( onSuppressSong ( ) ) ) ;
connect ( this , SIGNAL ( unsuppressSong ( ) ) , _fader , SLOT ( onUnsuppressSong ( ) ) ) ;
connect ( this , SIGNAL ( suppressAll ( ) ) , _fader , SLOT ( onSuppressAll ( ) ) ) ;
2016-10-12 22:34:25 +03:00
subscribe ( Global : : RefSongVolumeChanged ( ) , [ this ] {
QMetaObject : : invokeMethod ( _fader , " onSongVolumeChanged " ) ;
} ) ;
subscribe ( Global : : RefVideoVolumeChanged ( ) , [ this ] {
QMetaObject : : invokeMethod ( _fader , " onVideoVolumeChanged " ) ;
} ) ;
2015-07-03 11:47:16 +03:00
connect ( this , SIGNAL ( loaderOnStart ( const AudioMsgId & , qint64 ) ) , _loader , SLOT ( onStart ( const AudioMsgId & , qint64 ) ) ) ;
2015-07-01 00:07:05 +03:00
connect ( this , SIGNAL ( loaderOnCancel ( const AudioMsgId & ) ) , _loader , SLOT ( onCancel ( const AudioMsgId & ) ) ) ;
2014-09-04 11:33:44 +04:00
connect ( _loader , SIGNAL ( needToCheck ( ) ) , _fader , SLOT ( onTimer ( ) ) ) ;
2015-07-01 00:07:05 +03:00
connect ( _loader , SIGNAL ( error ( const AudioMsgId & ) ) , this , SLOT ( onError ( const AudioMsgId & ) ) ) ;
connect ( _fader , SIGNAL ( needToPreload ( const AudioMsgId & ) ) , _loader , SLOT ( onLoad ( const AudioMsgId & ) ) ) ;
connect ( _fader , SIGNAL ( playPositionUpdated ( const AudioMsgId & ) ) , this , SIGNAL ( updated ( const AudioMsgId & ) ) ) ;
connect ( _fader , SIGNAL ( audioStopped ( const AudioMsgId & ) ) , this , SLOT ( onStopped ( const AudioMsgId & ) ) ) ;
connect ( _fader , SIGNAL ( error ( const AudioMsgId & ) ) , this , SLOT ( onError ( const AudioMsgId & ) ) ) ;
2016-07-10 16:02:22 +03:00
connect ( this , SIGNAL ( stoppedOnError ( const AudioMsgId & ) ) , this , SIGNAL ( updated ( const AudioMsgId & ) ) , Qt : : QueuedConnection ) ;
2016-09-21 14:44:20 +03:00
connect ( this , SIGNAL ( updated ( const AudioMsgId & ) ) , this , SLOT ( onUpdated ( const AudioMsgId & ) ) ) ;
2016-10-12 22:34:25 +03:00
2014-09-04 11:33:44 +04:00
_loaderThread . start ( ) ;
_faderThread . start ( ) ;
}
2015-05-29 21:52:43 +03:00
AudioPlayer : : ~ AudioPlayer ( ) {
2014-09-04 11:33:44 +04:00
{
2015-05-29 21:52:43 +03:00
QMutexLocker lock ( & playerMutex ) ;
2016-07-05 20:44:02 +03:00
player = nullptr ;
2014-09-04 11:33:44 +04:00
}
2016-06-30 15:03:32 +03:00
auto clearAudioMsg = [ ] ( AudioMsg * msg ) {
alSourceStop ( msg - > source ) ;
if ( alIsBuffer ( msg - > buffers [ 0 ] ) ) {
alDeleteBuffers ( 3 , msg - > buffers ) ;
for ( int j = 0 ; j < 3 ; + + j ) {
msg - > buffers [ j ] = msg - > samplesCount [ j ] = 0 ;
2015-07-01 00:07:05 +03:00
}
}
2016-06-30 15:03:32 +03:00
if ( alIsSource ( msg - > source ) ) {
alDeleteSources ( 1 , & msg - > source ) ;
msg - > source = 0 ;
2014-09-04 11:33:44 +04:00
}
2016-06-30 15:03:32 +03:00
} ;
for ( int i = 0 ; i < AudioSimultaneousLimit ; + + i ) {
clearAudioMsg ( dataForType ( AudioMsgId : : Type : : Voice , i ) ) ;
clearAudioMsg ( dataForType ( AudioMsgId : : Type : : Song , i ) ) ;
2014-09-04 11:33:44 +04:00
}
2016-07-05 20:44:02 +03:00
clearAudioMsg ( & _videoData ) ;
2014-09-04 11:33:44 +04:00
_faderThread . quit ( ) ;
_loaderThread . quit ( ) ;
_faderThread . wait ( ) ;
_loaderThread . wait ( ) ;
}
2016-09-21 14:44:20 +03:00
void AudioPlayer : : onUpdated ( const AudioMsgId & audio ) {
if ( audio . type ( ) = = AudioMsgId : : Type : : Video ) {
videoSoundProgress ( audio ) ;
}
2016-09-23 19:04:26 +03:00
notify ( audio ) ;
2016-09-21 14:44:20 +03:00
}
2015-07-01 00:07:05 +03:00
void AudioPlayer : : onError ( const AudioMsgId & audio ) {
2015-07-03 11:47:16 +03:00
emit stoppedOnError ( audio ) ;
2016-06-30 15:03:32 +03:00
if ( audio . type ( ) = = AudioMsgId : : Type : : Voice ) {
emit unsuppressSong ( ) ;
}
2015-07-01 00:07:05 +03:00
}
void AudioPlayer : : onStopped ( const AudioMsgId & audio ) {
2016-07-10 16:02:22 +03:00
emit updated ( audio ) ;
2016-06-30 15:03:32 +03:00
if ( audio . type ( ) = = AudioMsgId : : Type : : Voice ) {
emit unsuppressSong ( ) ;
}
2014-09-04 11:33:44 +04:00
}
2016-06-30 15:03:32 +03:00
AudioPlayer : : AudioMsg * AudioPlayer : : dataForType ( AudioMsgId : : Type type , int index ) {
2016-07-23 09:39:46 +03:00
if ( index < 0 ) {
if ( auto indexPtr = currentIndex ( type ) ) {
index = * indexPtr ;
} else {
return nullptr ;
}
}
2016-06-30 15:03:32 +03:00
switch ( type ) {
case AudioMsgId : : Type : : Voice : return & _audioData [ index ] ;
case AudioMsgId : : Type : : Song : return & _songData [ index ] ;
2016-07-05 20:44:02 +03:00
case AudioMsgId : : Type : : Video : return & _videoData ;
2016-06-30 15:03:32 +03:00
}
return nullptr ;
2015-07-01 00:07:05 +03:00
}
2016-06-30 15:03:32 +03:00
const AudioPlayer : : AudioMsg * AudioPlayer : : dataForType ( AudioMsgId : : Type type , int index ) const {
return const_cast < AudioPlayer * > ( this ) - > dataForType ( type , index ) ;
}
int * AudioPlayer : : currentIndex ( AudioMsgId : : Type type ) {
2015-07-01 00:07:05 +03:00
switch ( type ) {
2016-06-30 15:03:32 +03:00
case AudioMsgId : : Type : : Voice : return & _audioCurrent ;
case AudioMsgId : : Type : : Song : return & _songCurrent ;
2016-07-05 20:44:02 +03:00
case AudioMsgId : : Type : : Video : { static int videoIndex = 0 ; return & videoIndex ; }
2015-07-01 00:07:05 +03:00
}
2016-06-30 15:03:32 +03:00
return nullptr ;
}
const int * AudioPlayer : : currentIndex ( AudioMsgId : : Type type ) const {
return const_cast < AudioPlayer * > ( this ) - > currentIndex ( type ) ;
}
bool AudioPlayer : : updateCurrentStarted ( AudioMsgId : : Type type , int32 pos ) {
auto data = dataForType ( type ) ;
2015-07-01 00:07:05 +03:00
if ( ! data ) return false ;
2014-09-04 11:33:44 +04:00
if ( pos < 0 ) {
2015-07-01 00:07:05 +03:00
if ( alIsSource ( data - > source ) ) {
alGetSourcei ( data - > source , AL_SAMPLE_OFFSET , & pos ) ;
2014-09-04 11:33:44 +04:00
} else {
pos = 0 ;
}
2015-07-03 11:47:16 +03:00
if ( ! _checkALError ( ) ) {
setStoppedState ( data , AudioPlayerStoppedAtError ) ;
2016-06-30 15:03:32 +03:00
onError ( data - > audio ) ;
2015-07-03 11:47:16 +03:00
return false ;
2015-07-01 00:07:05 +03:00
}
2014-09-04 11:33:44 +04:00
}
2016-07-10 16:02:22 +03:00
data - > started = data - > playbackState . position = pos + data - > skipStart ;
2014-09-04 11:33:44 +04:00
return true ;
}
2016-06-30 15:03:32 +03:00
bool AudioPlayer : : fadedStop ( AudioMsgId : : Type type , bool * fadedStart ) {
auto current = dataForType ( type ) ;
2015-07-01 00:07:05 +03:00
if ( ! current ) return false ;
2014-09-04 11:33:44 +04:00
2016-07-10 16:02:22 +03:00
switch ( current - > playbackState . state ) {
2015-07-01 00:07:05 +03:00
case AudioPlayerStarting :
case AudioPlayerResuming :
case AudioPlayerPlaying :
2016-07-10 16:02:22 +03:00
current - > playbackState . state = AudioPlayerFinishing ;
2015-07-01 00:07:05 +03:00
updateCurrentStarted ( type ) ;
2015-07-03 11:47:16 +03:00
if ( fadedStart ) * fadedStart = true ;
2015-07-01 00:07:05 +03:00
break ;
case AudioPlayerPausing :
2016-07-10 16:02:22 +03:00
current - > playbackState . state = AudioPlayerFinishing ;
2015-07-03 11:47:16 +03:00
if ( fadedStart ) * fadedStart = true ;
2015-07-01 00:07:05 +03:00
break ;
case AudioPlayerPaused :
2015-07-03 11:47:16 +03:00
case AudioPlayerPausedAtEnd :
setStoppedState ( current ) ;
2015-07-01 00:07:05 +03:00
return true ;
}
return false ;
}
2015-07-03 11:47:16 +03:00
void AudioPlayer : : play ( const AudioMsgId & audio , int64 position ) {
2016-06-30 15:03:32 +03:00
auto type = audio . type ( ) ;
2015-07-01 00:07:05 +03:00
AudioMsgId stopped ;
2016-10-12 22:34:25 +03:00
auto notLoadedYet = false ;
2015-05-30 19:30:47 +03:00
{
QMutexLocker lock ( & playerMutex ) ;
2015-07-01 00:07:05 +03:00
bool fadedStart = false ;
2016-06-30 15:03:32 +03:00
auto current = dataForType ( type ) ;
if ( ! current ) return ;
2015-07-01 00:07:05 +03:00
if ( current - > audio ! = audio ) {
2016-06-30 15:03:32 +03:00
if ( fadedStop ( type , & fadedStart ) ) {
2015-07-01 00:07:05 +03:00
stopped = current - > audio ;
2015-05-30 19:30:47 +03:00
}
2015-07-01 00:07:05 +03:00
if ( current - > audio ) {
emit loaderOnCancel ( current - > audio ) ;
2015-05-30 19:30:47 +03:00
emit faderOnTimer ( ) ;
}
2014-09-04 11:33:44 +04:00
2016-06-30 15:03:32 +03:00
auto foundCurrent = currentIndex ( type ) ;
int index = 0 ;
for ( ; index < AudioSimultaneousLimit ; + + index ) {
if ( dataForType ( type , index ) - > audio = = audio ) {
* foundCurrent = index ;
2015-07-01 00:07:05 +03:00
break ;
}
2015-05-30 19:30:47 +03:00
}
2016-06-30 15:03:32 +03:00
if ( index = = AudioSimultaneousLimit & & + + * foundCurrent > = AudioSimultaneousLimit ) {
* foundCurrent - = AudioSimultaneousLimit ;
2015-07-01 00:07:05 +03:00
}
2016-06-30 15:03:32 +03:00
current = dataForType ( type ) ;
2015-05-30 19:30:47 +03:00
}
2015-07-01 00:07:05 +03:00
current - > audio = audio ;
2016-06-30 15:03:32 +03:00
current - > file = audio . audio ( ) - > location ( true ) ;
current - > data = audio . audio ( ) - > data ( ) ;
2015-11-26 20:34:52 +03:00
if ( current - > file . isEmpty ( ) & & current - > data . isEmpty ( ) ) {
2016-10-12 22:34:25 +03:00
notLoadedYet = true ;
2016-06-30 15:03:32 +03:00
if ( audio . type ( ) = = AudioMsgId : : Type : : Song ) {
setStoppedState ( current ) ;
} else {
setStoppedState ( current , AudioPlayerStoppedAtError ) ;
2015-07-03 11:47:16 +03:00
}
} else {
2016-09-29 18:25:17 +03:00
current - > playbackState . position = position ;
2016-07-10 16:02:22 +03:00
current - > playbackState . state = fadedStart ? AudioPlayerStarting : AudioPlayerPlaying ;
2015-07-01 00:07:05 +03:00
current - > loading = true ;
2016-06-30 15:03:32 +03:00
emit loaderOnStart ( audio , position ) ;
if ( type = = AudioMsgId : : Type : : Voice ) {
emit suppressSong ( ) ;
}
2014-09-04 11:33:44 +04:00
}
}
2016-10-12 22:34:25 +03:00
if ( notLoadedYet ) {
if ( audio . type ( ) = = AudioMsgId : : Type : : Song ) {
DocumentOpenClickHandler : : doOpen ( audio . audio ( ) , App : : histItemById ( audio . contextId ( ) ) ) ;
} else {
onError ( audio ) ;
}
}
if ( stopped ) {
emit updated ( stopped ) ;
}
2014-09-04 11:33:44 +04:00
}
2016-07-13 20:34:57 +03:00
void AudioPlayer : : initFromVideo ( uint64 videoPlayId , std_ : : unique_ptr < VideoSoundData > & & data , int64 position ) {
2016-07-05 20:44:02 +03:00
AudioMsgId stopped ;
{
QMutexLocker lock ( & playerMutex ) ;
2016-07-14 14:20:46 +03:00
// Pause current song.
auto currentSong = dataForType ( AudioMsgId : : Type : : Song ) ;
float64 suppressGain = suppressSongGain * Global : : SongVolume ( ) ;
switch ( currentSong - > playbackState . state ) {
case AudioPlayerStarting :
case AudioPlayerResuming :
case AudioPlayerPlaying :
currentSong - > playbackState . state = AudioPlayerPausing ;
updateCurrentStarted ( AudioMsgId : : Type : : Song ) ;
break ;
case AudioPlayerFinishing : currentSong - > playbackState . state = AudioPlayerPausing ; break ;
}
2016-07-13 20:34:57 +03:00
auto type = AudioMsgId : : Type : : Video ;
2016-07-05 20:44:02 +03:00
auto current = dataForType ( type ) ;
t_assert ( current ! = nullptr ) ;
if ( current - > audio ) {
fadedStop ( type ) ;
stopped = current - > audio ;
emit loaderOnCancel ( current - > audio ) ;
}
emit faderOnTimer ( ) ;
current - > clear ( ) ;
2016-07-13 20:34:57 +03:00
current - > audio = AudioMsgId ( AudioMsgId : : Type : : Video ) ;
2016-07-10 22:44:55 +03:00
current - > videoPlayId = videoPlayId ;
2016-07-05 20:44:02 +03:00
current - > videoData = std_ : : move ( data ) ;
2016-07-10 22:44:55 +03:00
{
QMutexLocker videoLock ( & _lastVideoMutex ) ;
_lastVideoPlayId = current - > videoPlayId ;
_lastVideoPlaybackWhen = 0 ;
_lastVideoPlaybackCorrectedMs = 0 ;
}
_loader - > startFromVideo ( current - > videoPlayId ) ;
2016-07-05 20:44:02 +03:00
2016-07-13 14:24:31 +03:00
current - > playbackState . state = AudioPlayerPaused ;
2016-07-05 20:44:02 +03:00
current - > loading = true ;
2016-07-13 20:34:57 +03:00
emit loaderOnStart ( current - > audio , position ) ;
2016-07-05 20:44:02 +03:00
}
if ( stopped ) emit updated ( stopped ) ;
}
2016-07-12 21:04:34 +03:00
void AudioPlayer : : stopFromVideo ( uint64 videoPlayId ) {
AudioMsgId current ;
{
QMutexLocker lock ( & playerMutex ) ;
auto data = dataForType ( AudioMsgId : : Type : : Video ) ;
t_assert ( data ! = nullptr ) ;
if ( data - > videoPlayId ! = videoPlayId ) {
return ;
}
current = data - > audio ;
fadedStop ( AudioMsgId : : Type : : Video ) ;
data - > clear ( ) ;
}
if ( current ) emit updated ( current ) ;
}
2016-07-13 14:24:31 +03:00
void AudioPlayer : : pauseFromVideo ( uint64 videoPlayId ) {
AudioMsgId current ;
{
QMutexLocker lock ( & playerMutex ) ;
auto type = AudioMsgId : : Type : : Video ;
auto data = dataForType ( type ) ;
t_assert ( data ! = nullptr ) ;
if ( data - > videoPlayId ! = videoPlayId ) {
return ;
}
current = data - > audio ;
switch ( data - > playbackState . state ) {
case AudioPlayerStarting :
case AudioPlayerResuming :
case AudioPlayerPlaying : {
data - > playbackState . state = AudioPlayerPaused ;
updateCurrentStarted ( type ) ;
ALint state = AL_INITIAL ;
alGetSourcei ( data - > source , AL_SOURCE_STATE , & state ) ;
if ( ! checkCurrentALError ( type ) ) return ;
if ( state = = AL_PLAYING ) {
alSourcePause ( data - > source ) ;
if ( ! checkCurrentALError ( type ) ) return ;
}
} break ;
}
emit faderOnTimer ( ) ;
2016-07-19 13:54:43 +03:00
QMutexLocker videoLock ( & _lastVideoMutex ) ;
if ( _lastVideoPlayId = = videoPlayId ) {
_lastVideoPlaybackWhen = 0 ;
_lastVideoPlaybackCorrectedMs = 0 ;
}
2016-07-13 14:24:31 +03:00
}
if ( current ) emit updated ( current ) ;
}
void AudioPlayer : : resumeFromVideo ( uint64 videoPlayId ) {
AudioMsgId current ;
{
QMutexLocker lock ( & playerMutex ) ;
auto type = AudioMsgId : : Type : : Video ;
auto data = dataForType ( type ) ;
t_assert ( data ! = nullptr ) ;
if ( data - > videoPlayId ! = videoPlayId ) {
return ;
}
float64 suppressGain = suppressSongGain * Global : : VideoVolume ( ) ;
current = data - > audio ;
switch ( data - > playbackState . state ) {
case AudioPlayerPausing :
case AudioPlayerPaused :
case AudioPlayerPausedAtEnd : {
if ( data - > playbackState . state = = AudioPlayerPaused ) {
updateCurrentStarted ( type ) ;
} else if ( data - > playbackState . state = = AudioPlayerPausedAtEnd ) {
if ( alIsSource ( data - > source ) ) {
alSourcei ( data - > source , AL_SAMPLE_OFFSET , qMax ( data - > playbackState . position - data - > skipStart , 0LL ) ) ;
if ( ! checkCurrentALError ( type ) ) return ;
}
}
data - > playbackState . state = AudioPlayerPlaying ;
ALint state = AL_INITIAL ;
alGetSourcei ( data - > source , AL_SOURCE_STATE , & state ) ;
if ( ! checkCurrentALError ( type ) ) return ;
if ( state ! = AL_PLAYING ) {
audioPlayer ( ) - > resumeDevice ( ) ;
alSourcef ( data - > source , AL_GAIN , suppressGain ) ;
if ( ! checkCurrentALError ( type ) ) return ;
alSourcePlay ( data - > source ) ;
if ( ! checkCurrentALError ( type ) ) return ;
}
} break ;
}
emit faderOnTimer ( ) ;
}
if ( current ) emit updated ( current ) ;
}
2016-07-05 20:44:02 +03:00
void AudioPlayer : : feedFromVideo ( VideoSoundPart & & part ) {
_loader - > feedFromVideo ( std_ : : move ( part ) ) ;
}
2016-12-01 22:20:33 +03:00
TimeMs AudioPlayer : : getVideoCorrectedTime ( uint64 playId , TimeMs frameMs , TimeMs systemMs ) {
auto result = frameMs ;
2016-07-10 22:44:55 +03:00
QMutexLocker videoLock ( & _lastVideoMutex ) ;
if ( _lastVideoPlayId = = playId & & _lastVideoPlaybackWhen > 0 ) {
2016-12-01 22:20:33 +03:00
result = static_cast < TimeMs > ( _lastVideoPlaybackCorrectedMs ) ;
2016-07-10 22:44:55 +03:00
if ( systemMs > _lastVideoPlaybackWhen ) {
result + = ( systemMs - _lastVideoPlaybackWhen ) ;
}
}
return result ;
}
void AudioPlayer : : videoSoundProgress ( const AudioMsgId & audio ) {
auto type = audio . type ( ) ;
t_assert ( type = = AudioMsgId : : Type : : Video ) ;
QMutexLocker lock ( & playerMutex ) ;
QMutexLocker videoLock ( & _lastVideoMutex ) ;
auto current = dataForType ( type ) ;
t_assert ( current ! = nullptr ) ;
2016-07-13 20:34:57 +03:00
if ( current - > videoPlayId = = _lastVideoPlayId & & current - > playbackState . duration & & current - > playbackState . frequency ) {
2016-07-19 13:54:43 +03:00
if ( current - > playbackState . state = = AudioPlayerPlaying ) {
_lastVideoPlaybackWhen = getms ( ) ;
_lastVideoPlaybackCorrectedMs = ( current - > playbackState . position * 1000ULL ) / current - > playbackState . frequency ;
}
2016-07-10 22:44:55 +03:00
}
}
2016-06-30 15:03:32 +03:00
bool AudioPlayer : : checkCurrentALError ( AudioMsgId : : Type type ) {
2015-07-03 11:47:16 +03:00
if ( _checkALError ( ) ) return true ;
2016-01-05 14:59:57 +08:00
2016-06-30 15:03:32 +03:00
auto data = dataForType ( type ) ;
if ( ! data ) {
setStoppedState ( data , AudioPlayerStoppedAtError ) ;
onError ( data - > audio ) ;
2015-07-03 11:47:16 +03:00
}
return false ;
}
2016-06-30 15:03:32 +03:00
void AudioPlayer : : pauseresume ( AudioMsgId : : Type type , bool fast ) {
2015-05-29 21:52:43 +03:00
QMutexLocker lock ( & playerMutex ) ;
2014-09-04 11:33:44 +04:00
2016-06-30 15:03:32 +03:00
auto current = dataForType ( type ) ;
2015-07-01 00:07:05 +03:00
float64 suppressGain = 1. ;
switch ( type ) {
2016-06-30 15:03:32 +03:00
case AudioMsgId : : Type : : Voice : suppressGain = suppressAllGain ; break ;
2016-07-12 17:11:59 +03:00
case AudioMsgId : : Type : : Song : suppressGain = suppressSongGain * Global : : SongVolume ( ) ; break ;
2016-07-13 14:24:31 +03:00
case AudioMsgId : : Type : : Video : suppressGain = suppressSongGain * Global : : VideoVolume ( ) ; break ;
2015-07-01 00:07:05 +03:00
}
2016-06-30 15:03:32 +03:00
2016-07-10 16:02:22 +03:00
switch ( current - > playbackState . state ) {
2015-05-29 21:52:43 +03:00
case AudioPlayerPausing :
case AudioPlayerPaused :
2015-07-03 11:47:16 +03:00
case AudioPlayerPausedAtEnd : {
2016-07-10 16:02:22 +03:00
if ( current - > playbackState . state = = AudioPlayerPaused ) {
2015-07-01 00:07:05 +03:00
updateCurrentStarted ( type ) ;
2016-07-10 16:02:22 +03:00
} else if ( current - > playbackState . state = = AudioPlayerPausedAtEnd ) {
2015-07-03 11:47:16 +03:00
if ( alIsSource ( current - > source ) ) {
2016-07-10 16:02:22 +03:00
alSourcei ( current - > source , AL_SAMPLE_OFFSET , qMax ( current - > playbackState . position - current - > skipStart , 0LL ) ) ;
2015-07-03 11:47:16 +03:00
if ( ! checkCurrentALError ( type ) ) return ;
}
}
2016-07-10 16:02:22 +03:00
current - > playbackState . state = fast ? AudioPlayerPlaying : AudioPlayerResuming ;
2015-07-03 11:47:16 +03:00
ALint state = AL_INITIAL ;
alGetSourcei ( current - > source , AL_SOURCE_STATE , & state ) ;
if ( ! checkCurrentALError ( type ) ) return ;
if ( state ! = AL_PLAYING ) {
audioPlayer ( ) - > resumeDevice ( ) ;
alSourcef ( current - > source , AL_GAIN , suppressGain ) ;
if ( ! checkCurrentALError ( type ) ) return ;
alSourcePlay ( current - > source ) ;
if ( ! checkCurrentALError ( type ) ) return ;
2014-09-04 11:33:44 +04:00
}
2016-06-30 15:03:32 +03:00
if ( type = = AudioMsgId : : Type : : Voice ) emit suppressSong ( ) ;
2015-07-03 11:47:16 +03:00
} break ;
2015-05-29 21:52:43 +03:00
case AudioPlayerStarting :
case AudioPlayerResuming :
case AudioPlayerPlaying :
2016-07-10 16:02:22 +03:00
current - > playbackState . state = AudioPlayerPausing ;
2015-07-01 00:07:05 +03:00
updateCurrentStarted ( type ) ;
2016-06-30 15:03:32 +03:00
if ( type = = AudioMsgId : : Type : : Voice ) emit unsuppressSong ( ) ;
2014-09-04 11:33:44 +04:00
break ;
2016-07-10 16:02:22 +03:00
case AudioPlayerFinishing : current - > playbackState . state = AudioPlayerPausing ; break ;
2014-09-04 11:33:44 +04:00
}
emit faderOnTimer ( ) ;
}
2015-07-03 11:47:16 +03:00
void AudioPlayer : : seek ( int64 position ) {
QMutexLocker lock ( & playerMutex ) ;
2016-06-30 15:03:32 +03:00
auto type = AudioMsgId : : Type : : Song ;
auto current = dataForType ( type ) ;
2015-07-03 11:47:16 +03:00
float64 suppressGain = 1. ;
switch ( type ) {
2016-06-30 15:03:32 +03:00
case AudioMsgId : : Type : : Voice : suppressGain = suppressAllGain ; break ;
2016-07-12 17:11:59 +03:00
case AudioMsgId : : Type : : Song : suppressGain = suppressSongGain * Global : : SongVolume ( ) ; break ;
2015-07-03 11:47:16 +03:00
}
2016-06-30 15:03:32 +03:00
auto audio = current - > audio ;
2015-07-03 11:47:16 +03:00
bool isSource = alIsSource ( current - > source ) ;
2016-07-10 16:02:22 +03:00
bool fastSeek = ( position > = current - > skipStart & & position < current - > playbackState . duration - current - > skipEnd - ( current - > skipEnd ? AudioVoiceMsgFrequency : 0 ) ) ;
2015-07-03 11:47:16 +03:00
if ( fastSeek & & isSource ) {
alSourcei ( current - > source , AL_SAMPLE_OFFSET , position - current - > skipStart ) ;
if ( ! checkCurrentALError ( type ) ) return ;
alSourcef ( current - > source , AL_GAIN , 1. * suppressGain ) ;
if ( ! checkCurrentALError ( type ) ) return ;
updateCurrentStarted ( type , position - current - > skipStart ) ;
} else {
setStoppedState ( current ) ;
if ( isSource ) alSourceStop ( current - > source ) ;
}
2016-07-10 16:02:22 +03:00
switch ( current - > playbackState . state ) {
2015-07-03 11:47:16 +03:00
case AudioPlayerPausing :
case AudioPlayerPaused :
case AudioPlayerPausedAtEnd : {
2016-07-10 16:02:22 +03:00
if ( current - > playbackState . state = = AudioPlayerPausedAtEnd ) {
current - > playbackState . state = AudioPlayerPaused ;
2015-07-03 11:47:16 +03:00
}
lock . unlock ( ) ;
return pauseresume ( type , true ) ;
} break ;
case AudioPlayerStarting :
case AudioPlayerResuming :
case AudioPlayerPlaying :
2016-07-10 16:02:22 +03:00
current - > playbackState . state = AudioPlayerPausing ;
2015-07-03 11:47:16 +03:00
updateCurrentStarted ( type ) ;
2016-06-30 15:03:32 +03:00
if ( type = = AudioMsgId : : Type : : Voice ) emit unsuppressSong ( ) ;
2015-07-03 11:47:16 +03:00
break ;
case AudioPlayerFinishing :
case AudioPlayerStopped :
case AudioPlayerStoppedAtEnd :
case AudioPlayerStoppedAtError :
case AudioPlayerStoppedAtStart :
lock . unlock ( ) ;
2016-06-30 15:03:32 +03:00
return play ( audio , position ) ;
2015-07-03 11:47:16 +03:00
}
emit faderOnTimer ( ) ;
}
2016-06-30 15:03:32 +03:00
void AudioPlayer : : stop ( AudioMsgId : : Type type ) {
AudioMsgId current ;
{
QMutexLocker lock ( & playerMutex ) ;
2016-07-05 20:44:22 +03:00
auto data = dataForType ( type ) ;
t_assert ( data ! = nullptr ) ;
current = data - > audio ;
2016-06-30 15:03:32 +03:00
fadedStop ( type ) ;
2016-07-05 20:44:22 +03:00
if ( type = = AudioMsgId : : Type : : Video ) {
data - > clear ( ) ;
}
2015-11-24 19:19:18 +03:00
}
2016-06-30 15:03:32 +03:00
if ( current ) emit updated ( current ) ;
2015-11-24 19:19:18 +03:00
}
void AudioPlayer : : stopAndClear ( ) {
2016-06-30 15:03:32 +03:00
AudioMsg * current_audio = nullptr , * current_song = nullptr ;
2015-11-24 19:19:18 +03:00
{
QMutexLocker lock ( & playerMutex ) ;
2016-06-30 15:03:32 +03:00
if ( ( current_audio = dataForType ( AudioMsgId : : Type : : Voice ) ) ) {
2015-11-24 19:19:18 +03:00
setStoppedState ( current_audio ) ;
}
2016-06-30 15:03:32 +03:00
if ( ( current_song = dataForType ( AudioMsgId : : Type : : Song ) ) ) {
2015-11-24 19:19:18 +03:00
setStoppedState ( current_song ) ;
}
}
if ( current_song ) {
2016-06-30 15:03:32 +03:00
emit updated ( current_song - > audio ) ;
2015-11-24 19:19:18 +03:00
}
if ( current_audio ) {
emit updated ( current_audio - > audio ) ;
}
{
QMutexLocker lock ( & playerMutex ) ;
2016-06-30 15:03:32 +03:00
auto clearAndCancel = [ this ] ( AudioMsgId : : Type type , int index ) {
auto data = dataForType ( type , index ) ;
if ( data - > audio ) {
emit loaderOnCancel ( data - > audio ) ;
2015-11-24 19:19:18 +03:00
}
2016-06-30 15:03:32 +03:00
data - > clear ( ) ;
} ;
for ( int index = 0 ; index < AudioSimultaneousLimit ; + + index ) {
clearAndCancel ( AudioMsgId : : Type : : Voice , index ) ;
clearAndCancel ( AudioMsgId : : Type : : Song , index ) ;
2015-11-24 19:19:18 +03:00
}
2016-07-05 20:44:02 +03:00
_videoData . clear ( ) ;
_loader - > stopFromVideo ( ) ;
2015-07-03 11:47:16 +03:00
}
}
2016-07-12 14:38:16 +03:00
AudioPlaybackState AudioPlayer : : currentVideoState ( uint64 videoPlayId ) {
QMutexLocker lock ( & playerMutex ) ;
auto current = dataForType ( AudioMsgId : : Type : : Video ) ;
if ( ! current | | current - > videoPlayId ! = videoPlayId ) return AudioPlaybackState ( ) ;
return current - > playbackState ;
}
2016-07-10 16:02:22 +03:00
AudioPlaybackState AudioPlayer : : currentState ( AudioMsgId * audio , AudioMsgId : : Type type ) {
2015-07-01 00:07:05 +03:00
QMutexLocker lock ( & playerMutex ) ;
2016-06-30 15:03:32 +03:00
auto current = dataForType ( type ) ;
2016-07-10 16:02:22 +03:00
if ( ! current ) return AudioPlaybackState ( ) ;
2015-07-01 00:07:05 +03:00
2016-06-30 15:03:32 +03:00
if ( audio ) * audio = current - > audio ;
2016-07-10 16:02:22 +03:00
return current - > playbackState ;
2015-07-01 00:07:05 +03:00
}
2016-06-30 15:03:32 +03:00
void AudioPlayer : : setStoppedState ( AudioMsg * current , AudioPlayerState state ) {
2016-07-10 16:02:22 +03:00
current - > playbackState . state = state ;
current - > playbackState . position = 0 ;
2015-07-03 11:47:16 +03:00
}
2015-07-01 00:07:05 +03:00
void AudioPlayer : : clearStoppedAtStart ( const AudioMsgId & audio ) {
2015-05-29 21:52:43 +03:00
QMutexLocker lock ( & playerMutex ) ;
2016-06-30 15:03:32 +03:00
auto data = dataForType ( audio . type ( ) ) ;
2016-07-10 16:02:22 +03:00
if ( data & & data - > audio = = audio & & data - > playbackState . state = = AudioPlayerStoppedAtStart ) {
2016-06-30 15:03:32 +03:00
setStoppedState ( data ) ;
2015-05-24 20:58:39 +03:00
}
2014-09-04 11:33:44 +04:00
}
2015-06-03 15:18:46 +03:00
void AudioPlayer : : resumeDevice ( ) {
_fader - > resumeDevice ( ) ;
2015-01-10 16:08:30 +03:00
}
2016-07-05 20:44:02 +03:00
namespace internal {
QMutex * audioPlayerMutex ( ) {
return & playerMutex ;
}
float64 audioSuppressGain ( ) {
return suppressAllGain ;
}
float64 audioSuppressSongGain ( ) {
return suppressSongGain ;
}
bool audioCheckError ( ) {
return _checkALError ( ) ;
}
} // namespace internal
2015-05-29 21:52:43 +03:00
AudioCapture : : AudioCapture ( ) : _capture ( new AudioCaptureInner ( & _captureThread ) ) {
2016-09-21 14:44:20 +03:00
connect ( this , SIGNAL ( start ( ) ) , _capture , SLOT ( onStart ( ) ) ) ;
connect ( this , SIGNAL ( stop ( bool ) ) , _capture , SLOT ( onStop ( bool ) ) ) ;
connect ( _capture , SIGNAL ( done ( QByteArray , VoiceWaveform , qint32 ) ) , this , SIGNAL ( done ( QByteArray , VoiceWaveform , qint32 ) ) ) ;
connect ( _capture , SIGNAL ( updated ( quint16 , qint32 ) ) , this , SIGNAL ( updated ( quint16 , qint32 ) ) ) ;
connect ( _capture , SIGNAL ( error ( ) ) , this , SIGNAL ( error ( ) ) ) ;
2015-05-29 21:52:43 +03:00
connect ( & _captureThread , SIGNAL ( started ( ) ) , _capture , SLOT ( onInit ( ) ) ) ;
connect ( & _captureThread , SIGNAL ( finished ( ) ) , _capture , SLOT ( deleteLater ( ) ) ) ;
_captureThread . start ( ) ;
}
2015-06-01 13:58:46 +03:00
bool AudioCapture : : check ( ) {
2016-04-27 15:02:17 +03:00
if ( auto defaultDevice = alcGetString ( 0 , ALC_CAPTURE_DEFAULT_DEVICE_SPECIFIER ) ) {
if ( auto device = alcCaptureOpenDevice ( defaultDevice , AudioVoiceMsgFrequency , AL_FORMAT_MONO16 , AudioVoiceMsgFrequency / 5 ) ) {
alcCaptureCloseDevice ( device ) ;
2015-06-01 13:58:46 +03:00
return _checkALCError ( ) ;
}
}
return false ;
}
2015-05-29 21:52:43 +03:00
AudioCapture : : ~ AudioCapture ( ) {
2016-07-05 20:44:02 +03:00
capture = nullptr ;
2015-05-29 21:52:43 +03:00
_captureThread . quit ( ) ;
_captureThread . wait ( ) ;
}
AudioPlayer * audioPlayer ( ) {
return player ;
}
AudioCapture * audioCapture ( ) {
return capture ;
2014-09-04 11:33:44 +04:00
}
2016-10-12 22:34:25 +03:00
AudioPlayerFader : : AudioPlayerFader ( QThread * thread ) : QObject ( )
, _timer ( this )
, _suppressAllGain ( 1. , 1. )
, _suppressSongGain ( 1. , 1. ) {
2014-09-04 11:33:44 +04:00
moveToThread ( thread ) ;
_timer . moveToThread ( thread ) ;
2015-06-03 15:18:46 +03:00
_pauseTimer . moveToThread ( thread ) ;
2016-10-12 22:34:25 +03:00
connect ( thread , SIGNAL ( started ( ) ) , this , SLOT ( onInit ( ) ) ) ;
connect ( thread , SIGNAL ( finished ( ) ) , this , SLOT ( deleteLater ( ) ) ) ;
2015-01-10 16:08:30 +03:00
2014-09-04 11:33:44 +04:00
_timer . setSingleShot ( true ) ;
connect ( & _timer , SIGNAL ( timeout ( ) ) , this , SLOT ( onTimer ( ) ) ) ;
2015-01-10 16:08:30 +03:00
2015-06-03 15:18:46 +03:00
_pauseTimer . setSingleShot ( true ) ;
connect ( & _pauseTimer , SIGNAL ( timeout ( ) ) , this , SLOT ( onPauseTimer ( ) ) ) ;
connect ( this , SIGNAL ( stopPauseDevice ( ) ) , this , SLOT ( onPauseTimerStop ( ) ) , Qt : : QueuedConnection ) ;
2014-09-04 11:33:44 +04:00
}
2015-05-29 21:52:43 +03:00
void AudioPlayerFader : : onInit ( ) {
2014-09-04 11:33:44 +04:00
}
2015-05-29 21:52:43 +03:00
void AudioPlayerFader : : onTimer ( ) {
QMutexLocker lock ( & playerMutex ) ;
AudioPlayer * voice = audioPlayer ( ) ;
2014-09-04 11:33:44 +04:00
if ( ! voice ) return ;
2015-07-01 00:07:05 +03:00
bool suppressAudioChanged = false , suppressSongChanged = false ;
if ( _suppressAll | | _suppressSongAnim ) {
2016-12-01 22:20:33 +03:00
auto ms = getms ( ) ;
2015-07-01 00:07:05 +03:00
float64 wasSong = suppressSongGain ;
if ( _suppressAll ) {
float64 wasAudio = suppressAllGain ;
if ( ms > = _suppressAllStart + notifyLengthMs | | ms < _suppressAllStart ) {
_suppressAll = _suppressAllAnim = false ;
2016-12-05 14:01:08 +03:00
_suppressAllGain = anim : : value ( 1. , 1. ) ;
2015-07-01 00:07:05 +03:00
} else if ( ms > _suppressAllStart + notifyLengthMs - AudioFadeDuration ) {
if ( _suppressAllGain . to ( ) ! = 1. ) _suppressAllGain . start ( 1. ) ;
_suppressAllGain . update ( 1. - ( ( _suppressAllStart + notifyLengthMs - ms ) / float64 ( AudioFadeDuration ) ) , anim : : linear ) ;
2016-10-04 21:18:08 +03:00
} else if ( ms > = _suppressAllStart + st : : mediaPlayerSuppressDuration ) {
2015-07-01 00:07:05 +03:00
if ( _suppressAllAnim ) {
_suppressAllGain . finish ( ) ;
_suppressAllAnim = false ;
2014-09-04 11:33:44 +04:00
}
2015-07-01 00:07:05 +03:00
} else if ( ms > _suppressAllStart ) {
2016-10-04 21:18:08 +03:00
_suppressAllGain . update ( ( ms - _suppressAllStart ) / st : : mediaPlayerSuppressDuration , anim : : linear ) ;
2014-09-04 11:33:44 +04:00
}
2015-07-01 00:07:05 +03:00
suppressAllGain = _suppressAllGain . current ( ) ;
suppressAudioChanged = ( suppressAllGain ! = wasAudio ) ;
}
if ( _suppressSongAnim ) {
if ( ms > = _suppressSongStart + AudioFadeDuration ) {
_suppressSongGain . finish ( ) ;
_suppressSongAnim = false ;
} else {
_suppressSongGain . update ( ( ms - _suppressSongStart ) / float64 ( AudioFadeDuration ) , anim : : linear ) ;
2014-09-04 11:33:44 +04:00
}
}
2015-07-01 00:07:05 +03:00
suppressSongGain = qMin ( suppressAllGain , _suppressSongGain . current ( ) ) ;
suppressSongChanged = ( suppressSongGain ! = wasSong ) ;
2014-09-04 11:33:44 +04:00
}
2016-06-30 15:03:32 +03:00
bool hasFading = ( _suppressAll | | _suppressSongAnim ) ;
bool hasPlaying = false ;
2015-07-01 00:07:05 +03:00
2016-06-30 15:03:32 +03:00
auto updatePlayback = [ this , voice , & hasPlaying , & hasFading ] ( AudioMsgId : : Type type , int index , float64 suppressGain , bool suppressGainChanged ) {
auto data = voice - > dataForType ( type , index ) ;
2016-07-10 16:02:22 +03:00
if ( ( data - > playbackState . state & AudioPlayerStoppedMask ) | | data - > playbackState . state = = AudioPlayerPaused | | ! data - > source ) return ;
2015-07-01 00:07:05 +03:00
2016-06-30 15:03:32 +03:00
int32 emitSignals = updateOnePlayback ( data , hasPlaying , hasFading , suppressGain , suppressGainChanged ) ;
if ( emitSignals & EmitError ) emit error ( data - > audio ) ;
if ( emitSignals & EmitStopped ) emit audioStopped ( data - > audio ) ;
if ( emitSignals & EmitPositionUpdated ) emit playPositionUpdated ( data - > audio ) ;
if ( emitSignals & EmitNeedToPreload ) emit needToPreload ( data - > audio ) ;
} ;
2016-07-12 17:11:59 +03:00
auto suppressGainForMusic = suppressSongGain * Global : : SongVolume ( ) ;
2016-06-30 15:03:32 +03:00
auto suppressGainForMusicChanged = suppressSongChanged | | _songVolumeChanged ;
for ( int i = 0 ; i < AudioSimultaneousLimit ; + + i ) {
updatePlayback ( AudioMsgId : : Type : : Voice , i , suppressAllGain , suppressAudioChanged ) ;
updatePlayback ( AudioMsgId : : Type : : Song , i , suppressGainForMusic , suppressGainForMusicChanged ) ;
2015-07-01 00:07:05 +03:00
}
2016-07-12 17:11:59 +03:00
auto suppressGainForVideo = suppressSongGain * Global : : VideoVolume ( ) ;
auto suppressGainForVideoChanged = suppressSongChanged | | _videoVolumeChanged ;
updatePlayback ( AudioMsgId : : Type : : Video , 0 , suppressGainForVideo , suppressGainForVideoChanged ) ;
2015-07-01 00:07:05 +03:00
2016-07-12 17:11:59 +03:00
_songVolumeChanged = _videoVolumeChanged = false ;
2015-07-01 00:07:05 +03:00
if ( ! hasFading ) {
if ( ! hasPlaying ) {
ALint state = AL_INITIAL ;
alGetSourcei ( notifySource , AL_SOURCE_STATE , & state ) ;
if ( _checkALError ( ) & & state = = AL_PLAYING ) {
hasPlaying = true ;
}
2015-01-10 16:08:30 +03:00
}
}
2014-09-04 11:33:44 +04:00
if ( hasFading ) {
_timer . start ( AudioFadeTimeout ) ;
2015-06-03 15:18:46 +03:00
resumeDevice ( ) ;
2014-09-04 11:33:44 +04:00
} else if ( hasPlaying ) {
_timer . start ( AudioCheckPositionTimeout ) ;
2015-06-03 15:18:46 +03:00
resumeDevice ( ) ;
2015-01-10 16:08:30 +03:00
} else {
2015-06-03 15:18:46 +03:00
QMutexLocker lock ( & _pauseMutex ) ;
_pauseFlag = true ;
_pauseTimer . start ( AudioPauseDeviceTimeout ) ;
2014-09-04 11:33:44 +04:00
}
}
2016-06-30 15:03:32 +03:00
int32 AudioPlayerFader : : updateOnePlayback ( AudioPlayer : : AudioMsg * m , bool & hasPlaying , bool & hasFading , float64 suppressGain , bool suppressGainChanged ) {
2015-07-01 00:07:05 +03:00
bool playing = false , fading = false ;
ALint pos = 0 ;
ALint state = AL_INITIAL ;
alGetSourcei ( m - > source , AL_SAMPLE_OFFSET , & pos ) ;
2015-07-03 11:47:16 +03:00
if ( ! _checkALError ( ) ) { setStoppedState ( m , AudioPlayerStoppedAtError ) ; return EmitError ; }
2015-07-01 00:07:05 +03:00
alGetSourcei ( m - > source , AL_SOURCE_STATE , & state ) ;
2015-07-03 11:47:16 +03:00
if ( ! _checkALError ( ) ) { setStoppedState ( m , AudioPlayerStoppedAtError ) ; return EmitError ; }
2015-07-01 00:07:05 +03:00
int32 emitSignals = 0 ;
2016-07-10 16:02:22 +03:00
switch ( m - > playbackState . state ) {
2015-07-01 00:07:05 +03:00
case AudioPlayerFinishing :
case AudioPlayerPausing :
case AudioPlayerStarting :
case AudioPlayerResuming :
fading = true ;
break ;
case AudioPlayerPlaying :
playing = true ;
break ;
}
if ( fading & & ( state = = AL_PLAYING | | ! m - > loading ) ) {
if ( state ! = AL_PLAYING ) {
fading = false ;
if ( m - > source ) {
alSourceStop ( m - > source ) ;
2015-07-03 11:47:16 +03:00
if ( ! _checkALError ( ) ) { setStoppedState ( m , AudioPlayerStoppedAtError ) ; return EmitError ; }
alSourcef ( m - > source , AL_GAIN , 1 ) ;
if ( ! _checkALError ( ) ) { setStoppedState ( m , AudioPlayerStoppedAtError ) ; return EmitError ; }
}
2016-07-10 16:02:22 +03:00
if ( m - > playbackState . state = = AudioPlayerPausing ) {
m - > playbackState . state = AudioPlayerPausedAtEnd ;
2015-07-03 11:47:16 +03:00
} else {
setStoppedState ( m , AudioPlayerStoppedAtEnd ) ;
2015-07-01 00:07:05 +03:00
}
emitSignals | = EmitStopped ;
2016-07-10 16:02:22 +03:00
} else if ( 1000 * ( pos + m - > skipStart - m - > started ) > = AudioFadeDuration * m - > playbackState . frequency ) {
2015-07-01 00:07:05 +03:00
fading = false ;
alSourcef ( m - > source , AL_GAIN , 1. * suppressGain ) ;
2015-07-03 11:47:16 +03:00
if ( ! _checkALError ( ) ) { setStoppedState ( m , AudioPlayerStoppedAtError ) ; return EmitError ; }
2016-07-10 16:02:22 +03:00
switch ( m - > playbackState . state ) {
2015-07-03 11:47:16 +03:00
case AudioPlayerFinishing :
alSourceStop ( m - > source ) ;
if ( ! _checkALError ( ) ) { setStoppedState ( m , AudioPlayerStoppedAtError ) ; return EmitError ; }
setStoppedState ( m ) ;
state = AL_STOPPED ;
break ;
case AudioPlayerPausing :
alSourcePause ( m - > source ) ;
if ( ! _checkALError ( ) ) { setStoppedState ( m , AudioPlayerStoppedAtError ) ; return EmitError ; }
2016-07-10 16:02:22 +03:00
m - > playbackState . state = AudioPlayerPaused ;
2015-07-03 11:47:16 +03:00
break ;
2015-07-01 00:07:05 +03:00
case AudioPlayerStarting :
case AudioPlayerResuming :
2016-07-10 16:02:22 +03:00
m - > playbackState . state = AudioPlayerPlaying ;
2015-07-01 00:07:05 +03:00
playing = true ;
break ;
}
} else {
2016-07-10 16:02:22 +03:00
float64 newGain = 1000. * ( pos + m - > skipStart - m - > started ) / ( AudioFadeDuration * m - > playbackState . frequency ) ;
if ( m - > playbackState . state = = AudioPlayerPausing | | m - > playbackState . state = = AudioPlayerFinishing ) {
2015-07-01 00:07:05 +03:00
newGain = 1. - newGain ;
}
alSourcef ( m - > source , AL_GAIN , newGain * suppressGain ) ;
2015-07-03 11:47:16 +03:00
if ( ! _checkALError ( ) ) { setStoppedState ( m , AudioPlayerStoppedAtError ) ; return EmitError ; }
2015-07-01 00:07:05 +03:00
}
} else if ( playing & & ( state = = AL_PLAYING | | ! m - > loading ) ) {
if ( state ! = AL_PLAYING ) {
playing = false ;
if ( m - > source ) {
alSourceStop ( m - > source ) ;
2015-07-03 11:47:16 +03:00
if ( ! _checkALError ( ) ) { setStoppedState ( m , AudioPlayerStoppedAtError ) ; return EmitError ; }
2015-07-01 00:07:05 +03:00
alSourcef ( m - > source , AL_GAIN , 1 ) ;
2015-07-03 11:47:16 +03:00
if ( ! _checkALError ( ) ) { setStoppedState ( m , AudioPlayerStoppedAtError ) ; return EmitError ; }
2015-07-01 00:07:05 +03:00
}
2015-07-03 11:47:16 +03:00
setStoppedState ( m , AudioPlayerStoppedAtEnd ) ;
2015-07-01 00:07:05 +03:00
emitSignals | = EmitStopped ;
} else if ( suppressGainChanged ) {
alSourcef ( m - > source , AL_GAIN , suppressGain ) ;
2015-07-03 11:47:16 +03:00
if ( ! _checkALError ( ) ) { setStoppedState ( m , AudioPlayerStoppedAtError ) ; return EmitError ; }
2015-07-01 00:07:05 +03:00
}
}
2016-07-10 16:02:22 +03:00
if ( state = = AL_PLAYING & & pos + m - > skipStart - m - > playbackState . position > = AudioCheckPositionDelta ) {
m - > playbackState . position = pos + m - > skipStart ;
2015-07-01 00:07:05 +03:00
emitSignals | = EmitPositionUpdated ;
}
2016-07-10 16:02:22 +03:00
if ( playing | | m - > playbackState . state = = AudioPlayerStarting | | m - > playbackState . state = = AudioPlayerResuming ) {
if ( ! m - > loading & & m - > skipEnd > 0 & & m - > playbackState . position + AudioPreloadSamples + m - > skipEnd > m - > playbackState . duration ) {
2015-07-01 00:07:05 +03:00
m - > loading = true ;
emitSignals | = EmitNeedToPreload ;
}
}
if ( playing ) hasPlaying = true ;
if ( fading ) hasFading = true ;
return emitSignals ;
}
2016-06-30 15:03:32 +03:00
void AudioPlayerFader : : setStoppedState ( AudioPlayer : : AudioMsg * m , AudioPlayerState state ) {
2016-07-10 16:02:22 +03:00
m - > playbackState . state = state ;
m - > playbackState . position = 0 ;
2015-07-03 11:47:16 +03:00
}
2015-06-03 15:18:46 +03:00
void AudioPlayerFader : : onPauseTimer ( ) {
QMutexLocker lock ( & _pauseMutex ) ;
if ( _pauseFlag ) {
_paused = true ;
alcDevicePauseSOFT ( audioDevice ) ;
2015-01-10 16:08:30 +03:00
}
}
2015-06-03 15:18:46 +03:00
void AudioPlayerFader : : onPauseTimerStop ( ) {
if ( _pauseTimer . isActive ( ) ) _pauseTimer . stop ( ) ;
2015-01-10 16:08:30 +03:00
}
2015-07-01 00:07:05 +03:00
void AudioPlayerFader : : onSuppressSong ( ) {
if ( ! _suppressSong ) {
_suppressSong = true ;
_suppressSongAnim = true ;
_suppressSongStart = getms ( ) ;
_suppressSongGain . start ( st : : suppressSong ) ;
onTimer ( ) ;
}
}
void AudioPlayerFader : : onUnsuppressSong ( ) {
if ( _suppressSong ) {
_suppressSong = false ;
_suppressSongAnim = true ;
_suppressSongStart = getms ( ) ;
_suppressSongGain . start ( 1. ) ;
onTimer ( ) ;
}
}
void AudioPlayerFader : : onSuppressAll ( ) {
_suppressAll = true ;
_suppressAllStart = getms ( ) ;
_suppressAllGain . start ( st : : suppressAll ) ;
onTimer ( ) ;
}
2015-07-03 11:47:16 +03:00
void AudioPlayerFader : : onSongVolumeChanged ( ) {
_songVolumeChanged = true ;
onTimer ( ) ;
}
2016-07-12 17:11:59 +03:00
void AudioPlayerFader : : onVideoVolumeChanged ( ) {
_videoVolumeChanged = true ;
onTimer ( ) ;
}
2015-06-03 15:18:46 +03:00
void AudioPlayerFader : : resumeDevice ( ) {
QMutexLocker lock ( & _pauseMutex ) ;
_pauseFlag = false ;
emit stopPauseDevice ( ) ;
if ( _paused ) {
_paused = false ;
alcDeviceResumeSOFT ( audioDevice ) ;
2015-06-03 14:57:14 +03:00
}
2015-01-10 16:08:30 +03:00
}
2015-05-29 21:52:43 +03:00
struct AudioCapturePrivate {
2016-02-12 19:35:06 +03:00
AudioCapturePrivate ( )
: device ( 0 )
, fmt ( 0 )
, ioBuffer ( 0 )
, ioContext ( 0 )
, fmtContext ( 0 )
, stream ( 0 )
, codec ( 0 )
, codecContext ( 0 )
, opened ( false )
, srcSamples ( 0 )
, dstSamples ( 0 )
, maxDstSamples ( 0 )
, dstSamplesSize ( 0 )
, fullSamples ( 0 )
, srcSamplesData ( 0 )
, dstSamplesData ( 0 )
, swrContext ( 0 )
, lastUpdate ( 0 )
, levelMax ( 0 )
2016-02-14 22:46:01 +03:00
, dataPos ( 0 )
2016-02-12 19:35:06 +03:00
, waveformMod ( 0 )
, waveformEach ( AudioVoiceMsgFrequency / 100 )
2016-02-14 22:46:01 +03:00
, waveformPeak ( 0 ) {
2015-05-29 21:52:43 +03:00
}
ALCdevice * device ;
AVOutputFormat * fmt ;
uchar * ioBuffer ;
AVIOContext * ioContext ;
AVFormatContext * fmtContext ;
AVStream * stream ;
AVCodec * codec ;
AVCodecContext * codecContext ;
bool opened ;
int32 srcSamples , dstSamples , maxDstSamples , dstSamplesSize , fullSamples ;
uint8_t * * srcSamplesData , * * dstSamplesData ;
SwrContext * swrContext ;
int32 lastUpdate ;
2016-02-12 19:35:06 +03:00
uint16 levelMax ;
2015-05-29 21:52:43 +03:00
QByteArray data ;
int32 dataPos ;
2016-02-12 19:35:06 +03:00
int64 waveformMod , waveformEach ;
uint16 waveformPeak ;
QVector < uchar > waveform ;
2015-05-29 21:52:43 +03:00
static int _read_data ( void * opaque , uint8_t * buf , int buf_size ) {
AudioCapturePrivate * l = reinterpret_cast < AudioCapturePrivate * > ( opaque ) ;
int32 nbytes = qMin ( l - > data . size ( ) - l - > dataPos , int32 ( buf_size ) ) ;
if ( nbytes < = 0 ) {
return 0 ;
}
memcpy ( buf , l - > data . constData ( ) + l - > dataPos , nbytes ) ;
l - > dataPos + = nbytes ;
return nbytes ;
}
static int _write_data ( void * opaque , uint8_t * buf , int buf_size ) {
AudioCapturePrivate * l = reinterpret_cast < AudioCapturePrivate * > ( opaque ) ;
if ( buf_size < = 0 ) return 0 ;
if ( l - > dataPos + buf_size > l - > data . size ( ) ) l - > data . resize ( l - > dataPos + buf_size ) ;
memcpy ( l - > data . data ( ) + l - > dataPos , buf , buf_size ) ;
l - > dataPos + = buf_size ;
return buf_size ;
}
static int64_t _seek_data ( void * opaque , int64_t offset , int whence ) {
AudioCapturePrivate * l = reinterpret_cast < AudioCapturePrivate * > ( opaque ) ;
int32 newPos = - 1 ;
switch ( whence ) {
case SEEK_SET : newPos = offset ; break ;
case SEEK_CUR : newPos = l - > dataPos + offset ; break ;
case SEEK_END : newPos = l - > data . size ( ) + offset ; break ;
}
if ( newPos < 0 ) {
return - 1 ;
}
l - > dataPos = newPos ;
return l - > dataPos ;
}
} ;
AudioCaptureInner : : AudioCaptureInner ( QThread * thread ) : d ( new AudioCapturePrivate ( ) ) {
moveToThread ( thread ) ;
_timer . moveToThread ( thread ) ;
connect ( & _timer , SIGNAL ( timeout ( ) ) , this , SLOT ( onTimeout ( ) ) ) ;
}
AudioCaptureInner : : ~ AudioCaptureInner ( ) {
onStop ( false ) ;
delete d ;
}
void AudioCaptureInner : : onInit ( ) {
}
void AudioCaptureInner : : onStart ( ) {
2016-01-05 14:59:57 +08:00
2015-05-29 21:52:43 +03:00
// Start OpenAL Capture
2015-06-01 14:15:07 +03:00
const ALCchar * dName = alcGetString ( 0 , ALC_CAPTURE_DEFAULT_DEVICE_SPECIFIER ) ;
DEBUG_LOG ( ( " Audio Info: Capture device name '%1' " ) . arg ( dName ) ) ;
d - > device = alcCaptureOpenDevice ( dName , AudioVoiceMsgFrequency , AL_FORMAT_MONO16 , AudioVoiceMsgFrequency / 5 ) ;
2015-05-29 21:52:43 +03:00
if ( ! d - > device ) {
LOG ( ( " Audio Error: capture device not present! " ) ) ;
emit error ( ) ;
return ;
}
alcCaptureStart ( d - > device ) ;
if ( ! _checkCaptureError ( d - > device ) ) {
alcCaptureCloseDevice ( d - > device ) ;
d - > device = 0 ;
emit error ( ) ;
return ;
}
// Create encoding context
d - > ioBuffer = ( uchar * ) av_malloc ( AVBlockSize ) ;
2016-01-05 14:59:57 +08:00
2015-05-29 21:52:43 +03:00
d - > ioContext = avio_alloc_context ( d - > ioBuffer , AVBlockSize , 1 , static_cast < void * > ( d ) , & AudioCapturePrivate : : _read_data , & AudioCapturePrivate : : _write_data , & AudioCapturePrivate : : _seek_data ) ;
int res = 0 ;
char err [ AV_ERROR_MAX_STRING_SIZE ] = { 0 } ;
AVOutputFormat * fmt = 0 ;
while ( ( fmt = av_oformat_next ( fmt ) ) ) {
2015-06-27 16:02:00 +03:00
if ( fmt - > name = = qstr ( " opus " ) ) {
2015-05-29 21:52:43 +03:00
break ;
}
}
if ( ! fmt ) {
LOG ( ( " Audio Error: Unable to find opus AVOutputFormat for capture " ) ) ;
onStop ( false ) ;
emit error ( ) ;
return ;
}
if ( ( res = avformat_alloc_output_context2 ( & d - > fmtContext , fmt , 0 , 0 ) ) < 0 ) {
LOG ( ( " Audio Error: Unable to avformat_alloc_output_context2 for capture, error %1, %2 " ) . arg ( res ) . arg ( av_make_error_string ( err , sizeof ( err ) , res ) ) ) ;
onStop ( false ) ;
emit error ( ) ;
return ;
}
d - > fmtContext - > pb = d - > ioContext ;
d - > fmtContext - > flags | = AVFMT_FLAG_CUSTOM_IO ;
d - > opened = true ;
// Add audio stream
d - > codec = avcodec_find_encoder ( fmt - > audio_codec ) ;
if ( ! d - > codec ) {
LOG ( ( " Audio Error: Unable to avcodec_find_encoder for capture " ) ) ;
onStop ( false ) ;
emit error ( ) ;
return ;
}
2015-06-01 14:15:07 +03:00
d - > stream = avformat_new_stream ( d - > fmtContext , d - > codec ) ;
2015-05-29 21:52:43 +03:00
if ( ! d - > stream ) {
LOG ( ( " Audio Error: Unable to avformat_new_stream for capture " ) ) ;
onStop ( false ) ;
emit error ( ) ;
return ;
}
d - > stream - > id = d - > fmtContext - > nb_streams - 1 ;
2016-07-22 18:01:24 +03:00
d - > codecContext = avcodec_alloc_context3 ( d - > codec ) ;
if ( ! d - > codecContext ) {
LOG ( ( " Audio Error: Unable to avcodec_alloc_context3 for capture " ) ) ;
onStop ( false ) ;
emit error ( ) ;
return ;
}
2015-05-29 21:52:43 +03:00
av_opt_set_int ( d - > codecContext , " refcounted_frames " , 1 , 0 ) ;
d - > codecContext - > sample_fmt = AV_SAMPLE_FMT_FLTP ;
2015-06-01 14:15:07 +03:00
d - > codecContext - > bit_rate = 64000 ;
d - > codecContext - > channel_layout = AV_CH_LAYOUT_MONO ;
2015-05-29 21:52:43 +03:00
d - > codecContext - > sample_rate = AudioVoiceMsgFrequency ;
d - > codecContext - > channels = 1 ;
if ( d - > fmtContext - > oformat - > flags & AVFMT_GLOBALHEADER ) {
d - > codecContext - > flags | = CODEC_FLAG_GLOBAL_HEADER ;
}
// Open audio stream
2016-03-20 11:16:35 +03:00
if ( ( res = avcodec_open2 ( d - > codecContext , d - > codec , nullptr ) ) < 0 ) {
2015-05-29 21:52:43 +03:00
LOG ( ( " Audio Error: Unable to avcodec_open2 for capture, error %1, %2 " ) . arg ( res ) . arg ( av_make_error_string ( err , sizeof ( err ) , res ) ) ) ;
onStop ( false ) ;
emit error ( ) ;
return ;
}
// Alloc source samples
d - > srcSamples = ( d - > codecContext - > codec - > capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE ) ? 10000 : d - > codecContext - > frame_size ;
//if ((res = av_samples_alloc_array_and_samples(&d->srcSamplesData, 0, d->codecContext->channels, d->srcSamples, d->codecContext->sample_fmt, 0)) < 0) {
// LOG(("Audio Error: Unable to av_samples_alloc_array_and_samples for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
// onStop(false);
// emit error();
// return;
//}
// Using _captured directly
// Prepare resampling
d - > swrContext = swr_alloc ( ) ;
if ( ! d - > swrContext ) {
fprintf ( stderr , " Could not allocate resampler context \n " ) ;
exit ( 1 ) ;
}
av_opt_set_int ( d - > swrContext , " in_channel_count " , d - > codecContext - > channels , 0 ) ;
av_opt_set_int ( d - > swrContext , " in_sample_rate " , d - > codecContext - > sample_rate , 0 ) ;
av_opt_set_sample_fmt ( d - > swrContext , " in_sample_fmt " , AV_SAMPLE_FMT_S16 , 0 ) ;
av_opt_set_int ( d - > swrContext , " out_channel_count " , d - > codecContext - > channels , 0 ) ;
av_opt_set_int ( d - > swrContext , " out_sample_rate " , d - > codecContext - > sample_rate , 0 ) ;
av_opt_set_sample_fmt ( d - > swrContext , " out_sample_fmt " , d - > codecContext - > sample_fmt , 0 ) ;
if ( ( res = swr_init ( d - > swrContext ) ) < 0 ) {
LOG ( ( " Audio Error: Unable to swr_init for capture, error %1, %2 " ) . arg ( res ) . arg ( av_make_error_string ( err , sizeof ( err ) , res ) ) ) ;
onStop ( false ) ;
emit error ( ) ;
return ;
}
d - > maxDstSamples = d - > srcSamples ;
if ( ( res = av_samples_alloc_array_and_samples ( & d - > dstSamplesData , 0 , d - > codecContext - > channels , d - > maxDstSamples , d - > codecContext - > sample_fmt , 0 ) ) < 0 ) {
LOG ( ( " Audio Error: Unable to av_samples_alloc_array_and_samples for capture, error %1, %2 " ) . arg ( res ) . arg ( av_make_error_string ( err , sizeof ( err ) , res ) ) ) ;
onStop ( false ) ;
emit error ( ) ;
return ;
}
d - > dstSamplesSize = av_samples_get_buffer_size ( 0 , d - > codecContext - > channels , d - > maxDstSamples , d - > codecContext - > sample_fmt , 0 ) ;
2016-07-22 18:01:24 +03:00
if ( ( res = avcodec_parameters_from_context ( d - > stream - > codecpar , d - > codecContext ) ) < 0 ) {
LOG ( ( " Audio Error: Unable to avcodec_parameters_from_context for capture, error %1, %2 " ) . arg ( res ) . arg ( av_make_error_string ( err , sizeof ( err ) , res ) ) ) ;
onStop ( false ) ;
emit error ( ) ;
return ;
}
2015-05-29 21:52:43 +03:00
// Write file header
if ( ( res = avformat_write_header ( d - > fmtContext , 0 ) ) < 0 ) {
LOG ( ( " Audio Error: Unable to avformat_write_header for capture, error %1, %2 " ) . arg ( res ) . arg ( av_make_error_string ( err , sizeof ( err ) , res ) ) ) ;
onStop ( false ) ;
emit error ( ) ;
return ;
}
_timer . start ( 50 ) ;
_captured . clear ( ) ;
_captured . reserve ( AudioVoiceMsgBufferSize ) ;
DEBUG_LOG ( ( " Audio Capture: started! " ) ) ;
}
void AudioCaptureInner : : onStop ( bool needResult ) {
if ( ! _timer . isActive ( ) ) return ; // in onStop() already
_timer . stop ( ) ;
2015-06-01 14:15:07 +03:00
if ( d - > device ) {
alcCaptureStop ( d - > device ) ;
onTimeout ( ) ; // get last data
}
2015-05-29 21:52:43 +03:00
// Write what is left
if ( ! _captured . isEmpty ( ) ) {
int32 fadeSamples = AudioVoiceMsgFade * AudioVoiceMsgFrequency / 1000 , capturedSamples = _captured . size ( ) / sizeof ( short ) ;
if ( ( _captured . size ( ) % sizeof ( short ) ) | | ( d - > fullSamples + capturedSamples < AudioVoiceMsgFrequency ) | | ( capturedSamples < fadeSamples ) ) {
d - > fullSamples = 0 ;
d - > dataPos = 0 ;
d - > data . clear ( ) ;
2016-02-12 19:35:06 +03:00
d - > waveformMod = 0 ;
d - > waveformPeak = 0 ;
d - > waveform . clear ( ) ;
2015-05-29 21:52:43 +03:00
} else {
float64 coef = 1. / fadeSamples , fadedFrom = 0 ;
for ( short * ptr = ( ( short * ) _captured . data ( ) ) + capturedSamples , * end = ptr - fadeSamples ; ptr ! = end ; + + fadedFrom ) {
- - ptr ;
* ptr = qRound ( fadedFrom * coef * * ptr ) ;
}
if ( capturedSamples % d - > srcSamples ) {
int32 s = _captured . size ( ) ;
_captured . resize ( s + ( d - > srcSamples - ( capturedSamples % d - > srcSamples ) ) * sizeof ( short ) ) ;
memset ( _captured . data ( ) + s , 0 , _captured . size ( ) - s ) ;
}
int32 framesize = d - > srcSamples * d - > codecContext - > channels * sizeof ( short ) , encoded = 0 ;
while ( _captured . size ( ) > = encoded + framesize ) {
2016-07-22 18:01:24 +03:00
processFrame ( encoded , framesize ) ;
2015-05-29 21:52:43 +03:00
encoded + = framesize ;
}
2016-07-22 18:01:24 +03:00
writeFrame ( nullptr ) ; // drain the codec
2015-05-29 21:52:43 +03:00
if ( encoded ! = _captured . size ( ) ) {
d - > fullSamples = 0 ;
d - > dataPos = 0 ;
d - > data . clear ( ) ;
2016-02-12 19:35:06 +03:00
d - > waveformMod = 0 ;
d - > waveformPeak = 0 ;
d - > waveform . clear ( ) ;
2015-05-29 21:52:43 +03:00
}
}
}
2016-01-11 23:43:29 +08:00
DEBUG_LOG ( ( " Audio Capture: stopping (need result: %1), size: %2, samples: %3 " ) . arg ( Logs : : b ( needResult ) ) . arg ( d - > data . size ( ) ) . arg ( d - > fullSamples ) ) ;
2015-05-29 21:52:43 +03:00
_captured = QByteArray ( ) ;
// Finish stream
if ( d - > device ) {
av_write_trailer ( d - > fmtContext ) ;
}
QByteArray result = d - > fullSamples ? d - > data : QByteArray ( ) ;
2016-02-12 19:35:06 +03:00
VoiceWaveform waveform ;
2015-05-29 21:52:43 +03:00
qint32 samples = d - > fullSamples ;
2016-02-12 19:35:06 +03:00
if ( samples & & ! d - > waveform . isEmpty ( ) ) {
int64 count = d - > waveform . size ( ) , sum = 0 ;
if ( count > = WaveformSamplesCount ) {
QVector < uint16 > peaks ;
peaks . reserve ( WaveformSamplesCount ) ;
uint16 peak = 0 ;
for ( int32 i = 0 ; i < count ; + + i ) {
uint16 sample = uint16 ( d - > waveform . at ( i ) ) * 256 ;
if ( peak < sample ) {
peak = sample ;
}
sum + = WaveformSamplesCount ;
if ( sum > = count ) {
sum - = count ;
peaks . push_back ( peak ) ;
peak = 0 ;
}
}
int64 sum = std : : accumulate ( peaks . cbegin ( ) , peaks . cend ( ) , 0ULL ) ;
peak = qMax ( int32 ( sum * 1.8 / peaks . size ( ) ) , 2500 ) ;
waveform . resize ( peaks . size ( ) ) ;
for ( int32 i = 0 , l = peaks . size ( ) ; i ! = l ; + + i ) {
waveform [ i ] = char ( qMin ( 31U , uint32 ( qMin ( peaks . at ( i ) , peak ) ) * 31 / peak ) ) ;
}
}
}
2015-05-29 21:52:43 +03:00
if ( d - > device ) {
alcCaptureStop ( d - > device ) ;
alcCaptureCloseDevice ( d - > device ) ;
2016-07-13 20:34:57 +03:00
d - > device = nullptr ;
2015-05-29 21:52:43 +03:00
if ( d - > codecContext ) {
2016-07-22 18:01:24 +03:00
avcodec_free_context ( & d - > codecContext ) ;
2016-07-13 20:34:57 +03:00
d - > codecContext = nullptr ;
2015-05-29 21:52:43 +03:00
}
if ( d - > srcSamplesData ) {
if ( d - > srcSamplesData [ 0 ] ) {
av_freep ( & d - > srcSamplesData [ 0 ] ) ;
}
av_freep ( & d - > srcSamplesData ) ;
}
if ( d - > dstSamplesData ) {
if ( d - > dstSamplesData [ 0 ] ) {
av_freep ( & d - > dstSamplesData [ 0 ] ) ;
}
av_freep ( & d - > dstSamplesData ) ;
}
d - > fullSamples = 0 ;
if ( d - > swrContext ) {
swr_free ( & d - > swrContext ) ;
2016-07-13 20:34:57 +03:00
d - > swrContext = nullptr ;
2015-05-29 21:52:43 +03:00
}
if ( d - > opened ) {
avformat_close_input ( & d - > fmtContext ) ;
d - > opened = false ;
2016-07-13 20:34:57 +03:00
}
if ( d - > ioContext ) {
2017-01-09 17:12:53 +04:00
av_freep ( & d - > ioContext - > buffer ) ;
av_freep ( & d - > ioContext ) ;
2016-07-13 20:34:57 +03:00
d - > ioBuffer = nullptr ;
2015-05-29 21:52:43 +03:00
} else if ( d - > ioBuffer ) {
2017-01-09 17:12:53 +04:00
av_freep ( & d - > ioBuffer ) ;
2015-05-29 21:52:43 +03:00
}
if ( d - > fmtContext ) {
avformat_free_context ( d - > fmtContext ) ;
2016-07-13 20:34:57 +03:00
d - > fmtContext = nullptr ;
2015-05-29 21:52:43 +03:00
}
2016-07-13 20:34:57 +03:00
d - > fmt = nullptr ;
d - > stream = nullptr ;
d - > codec = nullptr ;
2015-05-29 21:52:43 +03:00
d - > lastUpdate = 0 ;
2016-02-12 19:35:06 +03:00
d - > levelMax = 0 ;
2015-05-29 21:52:43 +03:00
d - > dataPos = 0 ;
d - > data . clear ( ) ;
2016-02-12 19:35:06 +03:00
d - > waveformMod = 0 ;
d - > waveformPeak = 0 ;
d - > waveform . clear ( ) ;
2015-05-29 21:52:43 +03:00
}
2016-02-12 19:35:06 +03:00
if ( needResult ) emit done ( result , waveform , samples ) ;
2015-05-29 21:52:43 +03:00
}
void AudioCaptureInner : : onTimeout ( ) {
if ( ! d - > device ) {
_timer . stop ( ) ;
return ;
}
ALint samples ;
alcGetIntegerv ( d - > device , ALC_CAPTURE_SAMPLES , sizeof ( samples ) , & samples ) ;
if ( ! _checkCaptureError ( d - > device ) ) {
onStop ( false ) ;
emit error ( ) ;
return ;
}
if ( samples > 0 ) {
// Get samples from OpenAL
int32 s = _captured . size ( ) , news = s + samples * sizeof ( short ) ;
if ( news / AudioVoiceMsgBufferSize > s / AudioVoiceMsgBufferSize ) {
_captured . reserve ( ( ( news / AudioVoiceMsgBufferSize ) + 1 ) * AudioVoiceMsgBufferSize ) ;
}
_captured . resize ( news ) ;
alcCaptureSamples ( d - > device , ( ALCvoid * ) ( _captured . data ( ) + s ) , samples ) ;
if ( ! _checkCaptureError ( d - > device ) ) {
onStop ( false ) ;
emit error ( ) ;
return ;
}
// Count new recording level and update view
int32 skipSamples = AudioVoiceMsgSkip * AudioVoiceMsgFrequency / 1000 , fadeSamples = AudioVoiceMsgFade * AudioVoiceMsgFrequency / 1000 ;
int32 levelindex = d - > fullSamples + ( s / sizeof ( short ) ) ;
for ( const short * ptr = ( const short * ) ( _captured . constData ( ) + s ) , * end = ( const short * ) ( _captured . constData ( ) + news ) ; ptr < end ; + + ptr , + + levelindex ) {
if ( levelindex > skipSamples ) {
2016-02-12 19:35:06 +03:00
uint16 value = qAbs ( * ptr ) ;
2015-05-29 21:52:43 +03:00
if ( levelindex < skipSamples + fadeSamples ) {
2016-02-12 19:35:06 +03:00
value = qRound ( value * float64 ( levelindex - skipSamples ) / fadeSamples ) ;
}
if ( d - > levelMax < value ) {
d - > levelMax = value ;
2015-05-29 21:52:43 +03:00
}
}
}
qint32 samplesFull = d - > fullSamples + _captured . size ( ) / sizeof ( short ) , samplesSinceUpdate = samplesFull - d - > lastUpdate ;
if ( samplesSinceUpdate > AudioVoiceMsgUpdateView * AudioVoiceMsgFrequency / 1000 ) {
2016-09-21 14:44:20 +03:00
emit updated ( d - > levelMax , samplesFull ) ;
2015-05-29 21:52:43 +03:00
d - > lastUpdate = samplesFull ;
2016-02-12 19:35:06 +03:00
d - > levelMax = 0 ;
2015-05-29 21:52:43 +03:00
}
// Write frames
int32 framesize = d - > srcSamples * d - > codecContext - > channels * sizeof ( short ) , encoded = 0 ;
2015-05-30 19:30:47 +03:00
while ( uint32 ( _captured . size ( ) ) > = encoded + framesize + fadeSamples * sizeof ( short ) ) {
2016-07-22 18:01:24 +03:00
processFrame ( encoded , framesize ) ;
2015-05-29 21:52:43 +03:00
encoded + = framesize ;
}
// Collapse the buffer
if ( encoded > 0 ) {
int32 goodSize = _captured . size ( ) - encoded ;
memmove ( _captured . data ( ) , _captured . constData ( ) + encoded , goodSize ) ;
_captured . resize ( goodSize ) ;
}
} else {
DEBUG_LOG ( ( " Audio Capture: no samples to capture. " ) ) ;
}
}
2016-07-22 18:01:24 +03:00
void AudioCaptureInner : : processFrame ( int32 offset , int32 framesize ) {
2015-05-29 21:52:43 +03:00
// Prepare audio frame
if ( framesize % sizeof ( short ) ) { // in the middle of a sample
LOG ( ( " Audio Error: Bad framesize in writeFrame() for capture, framesize %1, %2 " ) . arg ( framesize ) ) ;
onStop ( false ) ;
emit error ( ) ;
return ;
}
int32 samplesCnt = framesize / sizeof ( short ) ;
int res = 0 ;
char err [ AV_ERROR_MAX_STRING_SIZE ] = { 0 } ;
2017-01-05 13:08:16 +04:00
auto srcSamplesDataChannel = ( short * ) ( _captured . data ( ) + offset ) ;
auto srcSamplesData = & srcSamplesDataChannel ;
2015-05-29 21:52:43 +03:00
// memcpy(d->srcSamplesData[0], _captured.constData() + offset, framesize);
int32 skipSamples = AudioVoiceMsgSkip * AudioVoiceMsgFrequency / 1000 , fadeSamples = AudioVoiceMsgFade * AudioVoiceMsgFrequency / 1000 ;
if ( d - > fullSamples < skipSamples + fadeSamples ) {
int32 fadedCnt = qMin ( samplesCnt , skipSamples + fadeSamples - d - > fullSamples ) ;
float64 coef = 1. / fadeSamples , fadedFrom = d - > fullSamples - skipSamples ;
2016-02-12 19:35:06 +03:00
short * ptr = srcSamplesDataChannel , * zeroEnd = ptr + qMin ( samplesCnt , qMax ( 0 , skipSamples - d - > fullSamples ) ) , * end = ptr + fadedCnt ;
2015-05-29 21:52:43 +03:00
for ( ; ptr ! = zeroEnd ; + + ptr , + + fadedFrom ) {
* ptr = 0 ;
}
for ( ; ptr ! = end ; + + ptr , + + fadedFrom ) {
* ptr = qRound ( fadedFrom * coef * * ptr ) ;
}
}
2016-02-12 19:35:06 +03:00
d - > waveform . reserve ( d - > waveform . size ( ) + ( samplesCnt / d - > waveformEach ) + 1 ) ;
for ( short * ptr = srcSamplesDataChannel , * end = ptr + samplesCnt ; ptr ! = end ; + + ptr ) {
uint16 value = qAbs ( * ptr ) ;
if ( d - > waveformPeak < value ) {
d - > waveformPeak = value ;
}
if ( + + d - > waveformMod = = d - > waveformEach ) {
d - > waveformMod - = d - > waveformEach ;
d - > waveform . push_back ( uchar ( d - > waveformPeak / 256 ) ) ;
d - > waveformPeak = 0 ;
}
}
2015-05-29 21:52:43 +03:00
// Convert to final format
d - > dstSamples = av_rescale_rnd ( swr_get_delay ( d - > swrContext , d - > codecContext - > sample_rate ) + d - > srcSamples , d - > codecContext - > sample_rate , d - > codecContext - > sample_rate , AV_ROUND_UP ) ;
if ( d - > dstSamples > d - > maxDstSamples ) {
d - > maxDstSamples = d - > dstSamples ;
2017-01-09 17:12:53 +04:00
av_freep ( & d - > dstSamplesData [ 0 ] ) ;
if ( ( res = av_samples_alloc ( d - > dstSamplesData , 0 , d - > codecContext - > channels , d - > dstSamples , d - > codecContext - > sample_fmt , 1 ) ) < 0 ) {
2015-05-29 21:52:43 +03:00
LOG ( ( " Audio Error: Unable to av_samples_alloc for capture, error %1, %2 " ) . arg ( res ) . arg ( av_make_error_string ( err , sizeof ( err ) , res ) ) ) ;
onStop ( false ) ;
emit error ( ) ;
return ;
}
d - > dstSamplesSize = av_samples_get_buffer_size ( 0 , d - > codecContext - > channels , d - > maxDstSamples , d - > codecContext - > sample_fmt , 0 ) ;
}
if ( ( res = swr_convert ( d - > swrContext , d - > dstSamplesData , d - > dstSamples , ( const uint8_t * * ) srcSamplesData , d - > srcSamples ) ) < 0 ) {
LOG ( ( " Audio Error: Unable to swr_convert for capture, error %1, %2 " ) . arg ( res ) . arg ( av_make_error_string ( err , sizeof ( err ) , res ) ) ) ;
onStop ( false ) ;
emit error ( ) ;
return ;
}
// Write audio frame
2015-05-30 19:30:47 +03:00
AVFrame * frame = av_frame_alloc ( ) ;
2015-05-29 21:52:43 +03:00
frame - > nb_samples = d - > dstSamples ;
2016-07-22 19:22:25 +03:00
frame - > pts = av_rescale_q ( d - > fullSamples , AVRational { 1 , d - > codecContext - > sample_rate } , d - > codecContext - > time_base ) ;
2016-07-22 18:01:24 +03:00
2015-05-29 21:52:43 +03:00
avcodec_fill_audio_frame ( frame , d - > codecContext - > channels , d - > codecContext - > sample_fmt , d - > dstSamplesData [ 0 ] , d - > dstSamplesSize , 0 ) ;
2016-07-22 18:01:24 +03:00
writeFrame ( frame ) ;
d - > fullSamples + = samplesCnt ;
av_frame_free ( & frame ) ;
}
void AudioCaptureInner : : writeFrame ( AVFrame * frame ) {
int res = 0 ;
char err [ AV_ERROR_MAX_STRING_SIZE ] = { 0 } ;
res = avcodec_send_frame ( d - > codecContext , frame ) ;
if ( res = = AVERROR ( EAGAIN ) ) {
int packetsWritten = writePackets ( ) ;
if ( packetsWritten < 0 ) {
if ( frame & & packetsWritten = = AVERROR_EOF ) {
LOG ( ( " Audio Error: EOF in packets received when EAGAIN was got in avcodec_send_frame() " ) ) ;
onStop ( false ) ;
emit error ( ) ;
}
return ;
} else if ( ! packetsWritten ) {
LOG ( ( " Audio Error: No packets received when EAGAIN was got in avcodec_send_frame() " ) ) ;
onStop ( false ) ;
emit error ( ) ;
return ;
}
res = avcodec_send_frame ( d - > codecContext , frame ) ;
}
if ( res < 0 ) {
LOG ( ( " Audio Error: Unable to avcodec_send_frame for capture, error %1, %2 " ) . arg ( res ) . arg ( av_make_error_string ( err , sizeof ( err ) , res ) ) ) ;
2015-05-29 21:52:43 +03:00
onStop ( false ) ;
emit error ( ) ;
return ;
}
2016-07-22 18:01:24 +03:00
if ( ! frame ) { // drain
if ( ( res = writePackets ( ) ) ! = AVERROR_EOF ) {
LOG ( ( " Audio Error: not EOF in packets received when draining the codec, result %1 " ) . arg ( res ) ) ;
onStop ( false ) ;
emit error ( ) ;
}
}
}
int AudioCaptureInner : : writePackets ( ) {
AVPacket pkt ;
memset ( & pkt , 0 , sizeof ( pkt ) ) ; // data and size must be 0;
int res = 0 ;
char err [ AV_ERROR_MAX_STRING_SIZE ] = { 0 } ;
int written = 0 ;
do {
av_init_packet ( & pkt ) ;
if ( ( res = avcodec_receive_packet ( d - > codecContext , & pkt ) ) < 0 ) {
if ( res = = AVERROR ( EAGAIN ) ) {
return written ;
} else if ( res = = AVERROR_EOF ) {
return res ;
}
LOG ( ( " Audio Error: Unable to avcodec_receive_packet for capture, error %1, %2 " ) . arg ( res ) . arg ( av_make_error_string ( err , sizeof ( err ) , res ) ) ) ;
onStop ( false ) ;
emit error ( ) ;
return res ;
}
av_packet_rescale_ts ( & pkt , d - > codecContext - > time_base , d - > stream - > time_base ) ;
2015-05-29 21:52:43 +03:00
pkt . stream_index = d - > stream - > index ;
if ( ( res = av_interleaved_write_frame ( d - > fmtContext , & pkt ) ) < 0 ) {
LOG ( ( " Audio Error: Unable to av_interleaved_write_frame for capture, error %1, %2 " ) . arg ( res ) . arg ( av_make_error_string ( err , sizeof ( err ) , res ) ) ) ;
onStop ( false ) ;
emit error ( ) ;
2016-07-22 18:01:24 +03:00
return - 1 ;
2015-05-29 21:52:43 +03:00
}
2016-07-22 18:01:24 +03:00
+ + written ;
av_packet_unref ( & pkt ) ;
} while ( true ) ;
return written ;
2015-06-01 14:15:07 +03:00
}
2015-07-01 00:07:05 +03:00
2016-02-12 19:35:06 +03:00
class FFMpegAttributesReader : public AbstractFFMpegLoader {
2015-07-01 00:07:05 +03:00
public :
2016-02-12 19:35:06 +03:00
FFMpegAttributesReader ( const FileLocation & file , const QByteArray & data ) : AbstractFFMpegLoader ( file , data ) {
2015-07-01 00:07:05 +03:00
}
2016-07-21 20:35:55 +03:00
bool open ( qint64 & position ) override {
if ( ! AbstractFFMpegLoader : : open ( position ) ) {
2015-07-01 00:07:05 +03:00
return false ;
}
int res = 0 ;
char err [ AV_ERROR_MAX_STRING_SIZE ] = { 0 } ;
2016-02-12 19:35:06 +03:00
int videoStreamId = av_find_best_stream ( fmtContext , AVMEDIA_TYPE_VIDEO , - 1 , - 1 , & codec , 0 ) ;
if ( videoStreamId > = 0 ) {
DEBUG_LOG ( ( " Audio Read Error: Found video stream in file '%1', data size '%2', error %3, %4 " ) . arg ( file . name ( ) ) . arg ( data . size ( ) ) . arg ( videoStreamId ) . arg ( av_make_error_string ( err , sizeof ( err ) , streamId ) ) ) ;
2015-07-01 00:07:05 +03:00
return false ;
}
for ( int32 i = 0 , l = fmtContext - > nb_streams ; i < l ; + + i ) {
AVStream * stream = fmtContext - > streams [ i ] ;
if ( stream - > disposition & AV_DISPOSITION_ATTACHED_PIC ) {
const AVPacket & packet ( stream - > attached_pic ) ;
if ( packet . size ) {
bool animated = false ;
QByteArray cover ( ( const char * ) packet . data , packet . size ) , format ;
_cover = App : : readImage ( cover , & format , true , & animated ) ;
if ( ! _cover . isNull ( ) ) {
_coverBytes = cover ;
_coverFormat = format ;
break ;
}
}
}
}
extractMetaData ( fmtContext - > streams [ streamId ] - > metadata ) ;
extractMetaData ( fmtContext - > metadata ) ;
return true ;
}
void trySet ( QString & to , AVDictionary * dict , const char * key ) {
if ( ! to . isEmpty ( ) ) return ;
if ( AVDictionaryEntry * tag = av_dict_get ( dict , key , 0 , 0 ) ) {
to = QString : : fromUtf8 ( tag - > value ) ;
}
}
void extractMetaData ( AVDictionary * dict ) {
trySet ( _title , dict , " title " ) ;
trySet ( _performer , dict , " artist " ) ;
trySet ( _performer , dict , " performer " ) ;
trySet ( _performer , dict , " album_artist " ) ;
2015-09-23 20:43:08 +03:00
//for (AVDictionaryEntry *tag = av_dict_get(dict, "", 0, AV_DICT_IGNORE_SUFFIX); tag; tag = av_dict_get(dict, "", tag, AV_DICT_IGNORE_SUFFIX)) {
// const char *key = tag->key;
// const char *value = tag->value;
// QString tmp = QString::fromUtf8(value);
//}
2015-07-01 00:07:05 +03:00
}
2016-07-05 20:44:02 +03:00
int32 format ( ) override {
2015-07-01 00:07:05 +03:00
return 0 ;
}
QString title ( ) {
return _title ;
}
2016-01-05 14:59:57 +08:00
2015-07-01 00:07:05 +03:00
QString performer ( ) {
return _performer ;
}
QImage cover ( ) {
return _cover ;
}
QByteArray coverBytes ( ) {
return _coverBytes ;
}
QByteArray coverFormat ( ) {
return _coverFormat ;
}
2016-07-05 20:44:02 +03:00
ReadResult readMore ( QByteArray & result , int64 & samplesAdded ) override {
2015-07-01 00:07:05 +03:00
DEBUG_LOG ( ( " Audio Read Error: should not call this " ) ) ;
2016-07-05 20:44:02 +03:00
return ReadResult : : Error ;
2015-07-01 00:07:05 +03:00
}
~ FFMpegAttributesReader ( ) {
}
private :
QString _title , _performer ;
QImage _cover ;
QByteArray _coverBytes , _coverFormat ;
2016-02-12 19:35:06 +03:00
} ;
2015-07-01 00:07:05 +03:00
2016-02-12 19:35:06 +03:00
MTPDocumentAttribute audioReadSongAttributes ( const QString & fname , const QByteArray & data , QImage & cover , QByteArray & coverBytes , QByteArray & coverFormat ) {
FFMpegAttributesReader reader ( FileLocation ( StorageFilePartial , fname ) , data ) ;
2016-07-21 20:35:55 +03:00
qint64 position = 0 ;
if ( reader . open ( position ) ) {
2016-02-12 19:35:06 +03:00
int32 duration = reader . duration ( ) / reader . frequency ( ) ;
if ( reader . duration ( ) > 0 ) {
cover = reader . cover ( ) ;
coverBytes = reader . coverBytes ( ) ;
coverFormat = reader . coverFormat ( ) ;
2016-03-19 19:55:15 +03:00
return MTP_documentAttributeAudio ( MTP_flags ( MTPDdocumentAttributeAudio : : Flag : : f_title | MTPDdocumentAttributeAudio : : Flag : : f_performer ) , MTP_int ( duration ) , MTP_string ( reader . title ( ) ) , MTP_string ( reader . performer ( ) ) , MTPstring ( ) ) ;
2016-02-12 19:35:06 +03:00
}
}
return MTP_documentAttributeFilename ( MTP_string ( fname ) ) ;
}
2015-07-01 00:07:05 +03:00
2016-02-12 19:35:06 +03:00
class FFMpegWaveformCounter : public FFMpegLoader {
public :
2015-07-01 00:07:05 +03:00
2016-02-12 19:35:06 +03:00
FFMpegWaveformCounter ( const FileLocation & file , const QByteArray & data ) : FFMpegLoader ( file , data ) {
}
2016-07-21 20:35:55 +03:00
bool open ( qint64 & position ) override {
2016-02-12 19:35:06 +03:00
if ( ! FFMpegLoader : : open ( position ) ) {
return false ;
2015-07-01 00:07:05 +03:00
}
2016-02-12 19:35:06 +03:00
QByteArray buffer ;
buffer . reserve ( AudioVoiceMsgBufferSize ) ;
int64 countbytes = sampleSize * duration ( ) , processed = 0 , sumbytes = 0 ;
if ( duration ( ) < WaveformSamplesCount ) {
return false ;
}
2015-07-01 00:07:05 +03:00
2016-02-12 19:35:06 +03:00
QVector < uint16 > peaks ;
peaks . reserve ( WaveformSamplesCount ) ;
2015-07-01 00:07:05 +03:00
2016-02-12 19:35:06 +03:00
int32 fmt = format ( ) ;
uint16 peak = 0 ;
while ( processed < countbytes ) {
buffer . resize ( 0 ) ;
int64 samples = 0 ;
2016-07-05 20:44:02 +03:00
auto res = readMore ( buffer , samples ) ;
2016-07-30 11:03:44 +01:00
if ( res = = ReadResult : : Error | | res = = ReadResult : : EndOfFile ) {
2016-02-12 19:35:06 +03:00
break ;
}
if ( buffer . isEmpty ( ) ) {
continue ;
}
const char * data = buffer . data ( ) ;
if ( fmt = = AL_FORMAT_MONO8 | | fmt = = AL_FORMAT_STEREO8 ) {
2016-02-14 22:46:01 +03:00
for ( int32 i = 0 , l = buffer . size ( ) ; i + int32 ( sizeof ( uchar ) ) < = l ; ) {
2016-02-12 19:35:06 +03:00
uint16 sample = qAbs ( ( int32 ( * ( uchar * ) ( data + i ) ) - 128 ) * 256 ) ;
if ( peak < sample ) {
peak = sample ;
}
i + = sizeof ( uchar ) ;
sumbytes + = WaveformSamplesCount ;
if ( sumbytes > = countbytes ) {
sumbytes - = countbytes ;
peaks . push_back ( peak ) ;
peak = 0 ;
}
}
} else if ( fmt = = AL_FORMAT_MONO16 | | fmt = = AL_FORMAT_STEREO16 ) {
2016-02-14 22:46:01 +03:00
for ( int32 i = 0 , l = buffer . size ( ) ; i + int32 ( sizeof ( uint16 ) ) < = l ; ) {
2016-02-12 19:35:06 +03:00
uint16 sample = qAbs ( int32 ( * ( int16 * ) ( data + i ) ) ) ;
if ( peak < sample ) {
peak = sample ;
}
i + = sizeof ( uint16 ) ;
sumbytes + = sizeof ( uint16 ) * WaveformSamplesCount ;
if ( sumbytes > = countbytes ) {
sumbytes - = countbytes ;
peaks . push_back ( peak ) ;
peak = 0 ;
}
}
}
processed + = sampleSize * samples ;
2015-07-01 00:07:05 +03:00
}
2016-02-12 19:35:06 +03:00
if ( sumbytes > 0 & & peaks . size ( ) < WaveformSamplesCount ) {
peaks . push_back ( peak ) ;
2015-07-01 00:07:05 +03:00
}
2016-02-12 19:35:06 +03:00
if ( peaks . isEmpty ( ) ) {
return false ;
}
2015-07-01 00:07:05 +03:00
2016-02-12 19:35:06 +03:00
int64 sum = std : : accumulate ( peaks . cbegin ( ) , peaks . cend ( ) , 0ULL ) ;
peak = qMax ( int32 ( sum * 1.8 / peaks . size ( ) ) , 2500 ) ;
2015-07-01 00:07:05 +03:00
2016-02-12 19:35:06 +03:00
result . resize ( peaks . size ( ) ) ;
for ( int32 i = 0 , l = peaks . size ( ) ; i ! = l ; + + i ) {
result [ i ] = char ( qMin ( 31U , uint32 ( qMin ( peaks . at ( i ) , peak ) ) * 31 / peak ) ) ;
2015-07-01 00:07:05 +03:00
}
2016-02-12 19:35:06 +03:00
return true ;
}
const VoiceWaveform & waveform ( ) const {
return result ;
2015-07-01 00:07:05 +03:00
}
2016-02-12 19:35:06 +03:00
~ FFMpegWaveformCounter ( ) {
}
private :
VoiceWaveform result ;
2015-07-01 00:07:05 +03:00
} ;
2016-02-12 19:35:06 +03:00
VoiceWaveform audioCountWaveform ( const FileLocation & file , const QByteArray & data ) {
FFMpegWaveformCounter counter ( file , data ) ;
2016-07-21 20:35:55 +03:00
qint64 position = 0 ;
if ( counter . open ( position ) ) {
2016-02-12 19:35:06 +03:00
return counter . waveform ( ) ;
2015-07-01 00:07:05 +03:00
}
2016-02-12 19:35:06 +03:00
return VoiceWaveform ( ) ;
2015-07-01 00:07:05 +03:00
}