mirror of
https://github.com/vale981/tdesktop
synced 2025-03-06 10:11:41 -05:00
updateNewMessage now can request getDifference(), if data is absent.
Video sync and frame duration count improved. Seek in not 44100 and not 48000 hz audio streams fixed.
This commit is contained in:
parent
fa708ada3b
commit
9fe714189d
12 changed files with 50 additions and 32 deletions
|
@ -4107,7 +4107,16 @@ void MainWidget::feedUpdate(const MTPUpdate &update) {
|
|||
|
||||
switch (update.type()) {
|
||||
case mtpc_updateNewMessage: {
|
||||
const auto &d(update.c_updateNewMessage());
|
||||
auto &d = update.c_updateNewMessage();
|
||||
|
||||
DataIsLoadedResult isDataLoaded = allDataLoadedForMessage(d.vmessage);
|
||||
if (!requestingDifference() && isDataLoaded != DataIsLoadedResult::Ok) {
|
||||
MTP_LOG(0, ("getDifference { good - after not all data loaded in updateNewMessage }%1").arg(cTestMode() ? " TESTMODE" : ""));
|
||||
|
||||
// This can be if this update was created by grouping
|
||||
// some short message update into an updates vector.
|
||||
return getDifference();
|
||||
}
|
||||
|
||||
if (!ptsUpdated(d.vpts.v, d.vpts_count.v, update)) {
|
||||
return;
|
||||
|
@ -4462,7 +4471,7 @@ void MainWidget::feedUpdate(const MTPUpdate &update) {
|
|||
} break;
|
||||
|
||||
case mtpc_updateNewChannelMessage: {
|
||||
const auto &d(update.c_updateNewChannelMessage());
|
||||
auto &d = update.c_updateNewChannelMessage();
|
||||
ChannelData *channel = App::channelLoaded(peerToChannel(peerFromMessage(d.vmessage)));
|
||||
DataIsLoadedResult isDataLoaded = allDataLoadedForMessage(d.vmessage);
|
||||
if (!requestingDifference() && (!channel || isDataLoaded != DataIsLoadedResult::Ok)) {
|
||||
|
|
|
@ -1765,8 +1765,8 @@ public:
|
|||
FFMpegAttributesReader(const FileLocation &file, const QByteArray &data) : AbstractFFMpegLoader(file, data) {
|
||||
}
|
||||
|
||||
bool open(qint64 position = 0) override {
|
||||
if (!AbstractFFMpegLoader::open()) {
|
||||
bool open(qint64 &position) override {
|
||||
if (!AbstractFFMpegLoader::open(position)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -1862,7 +1862,8 @@ private:
|
|||
|
||||
MTPDocumentAttribute audioReadSongAttributes(const QString &fname, const QByteArray &data, QImage &cover, QByteArray &coverBytes, QByteArray &coverFormat) {
|
||||
FFMpegAttributesReader reader(FileLocation(StorageFilePartial, fname), data);
|
||||
if (reader.open()) {
|
||||
qint64 position = 0;
|
||||
if (reader.open(position)) {
|
||||
int32 duration = reader.duration() / reader.frequency();
|
||||
if (reader.duration() > 0) {
|
||||
cover = reader.cover();
|
||||
|
@ -1880,7 +1881,7 @@ public:
|
|||
FFMpegWaveformCounter(const FileLocation &file, const QByteArray &data) : FFMpegLoader(file, data) {
|
||||
}
|
||||
|
||||
bool open(qint64 position = 0) override {
|
||||
bool open(qint64 &position) override {
|
||||
if (!FFMpegLoader::open(position)) {
|
||||
return false;
|
||||
}
|
||||
|
@ -1976,7 +1977,8 @@ private:
|
|||
|
||||
VoiceWaveform audioCountWaveform(const FileLocation &file, const QByteArray &data) {
|
||||
FFMpegWaveformCounter counter(file, data);
|
||||
if (counter.open()) {
|
||||
qint64 position = 0;
|
||||
if (counter.open(position)) {
|
||||
return counter.waveform();
|
||||
}
|
||||
return VoiceWaveform();
|
||||
|
|
|
@ -25,7 +25,7 @@ constexpr AVSampleFormat AudioToFormat = AV_SAMPLE_FMT_S16;
|
|||
constexpr int64_t AudioToChannelLayout = AV_CH_LAYOUT_STEREO;
|
||||
constexpr int32 AudioToChannels = 2;
|
||||
|
||||
bool AbstractFFMpegLoader::open(qint64 position) {
|
||||
bool AbstractFFMpegLoader::open(qint64 &position) {
|
||||
if (!AudioPlayerLoader::openFile()) {
|
||||
return false;
|
||||
}
|
||||
|
@ -137,7 +137,7 @@ FFMpegLoader::FFMpegLoader(const FileLocation &file, const QByteArray &data) : A
|
|||
frame = av_frame_alloc();
|
||||
}
|
||||
|
||||
bool FFMpegLoader::open(qint64 position) {
|
||||
bool FFMpegLoader::open(qint64 &position) {
|
||||
if (!AbstractFFMpegLoader::open(position)) {
|
||||
return false;
|
||||
}
|
||||
|
@ -210,6 +210,7 @@ bool FFMpegLoader::open(qint64 position) {
|
|||
sampleSize = AudioToChannels * sizeof(short);
|
||||
freq = dstRate;
|
||||
len = av_rescale_rnd(len, dstRate, srcRate, AV_ROUND_UP);
|
||||
position = av_rescale_rnd(position, dstRate, srcRate, AV_ROUND_DOWN);
|
||||
fmt = AL_FORMAT_STEREO16;
|
||||
|
||||
maxResampleSamples = av_rescale_rnd(AVBlockSize / sampleSize, dstRate, srcRate, AV_ROUND_UP);
|
||||
|
|
|
@ -36,7 +36,7 @@ public:
|
|||
AbstractFFMpegLoader(const FileLocation &file, const QByteArray &data) : AudioPlayerLoader(file, data) {
|
||||
}
|
||||
|
||||
bool open(qint64 position = 0) override;
|
||||
bool open(qint64 &position) override;
|
||||
|
||||
int64 duration() override {
|
||||
return len;
|
||||
|
@ -72,7 +72,7 @@ class FFMpegLoader : public AbstractFFMpegLoader {
|
|||
public:
|
||||
FFMpegLoader(const FileLocation &file, const QByteArray &data);
|
||||
|
||||
bool open(qint64 position = 0) override;
|
||||
bool open(qint64 &position) override;
|
||||
|
||||
int32 format() override {
|
||||
return fmt;
|
||||
|
|
|
@ -27,7 +27,7 @@ public:
|
|||
|
||||
virtual bool check(const FileLocation &file, const QByteArray &data);
|
||||
|
||||
virtual bool open(qint64 position = 0) = 0;
|
||||
virtual bool open(qint64 &position) = 0;
|
||||
virtual int64 duration() = 0;
|
||||
virtual int32 frequency() = 0;
|
||||
virtual int32 format() = 0;
|
||||
|
|
|
@ -321,7 +321,7 @@ void AudioPlayerLoaders::loadData(AudioMsgId audio, qint64 position) {
|
|||
}
|
||||
}
|
||||
|
||||
AudioPlayerLoader *AudioPlayerLoaders::setupLoader(const AudioMsgId &audio, SetupError &err, qint64 position) {
|
||||
AudioPlayerLoader *AudioPlayerLoaders::setupLoader(const AudioMsgId &audio, SetupError &err, qint64 &position) {
|
||||
err = SetupErrorAtStart;
|
||||
QMutexLocker lock(internal::audioPlayerMutex());
|
||||
AudioPlayer *voice = audioPlayer();
|
||||
|
|
|
@ -73,7 +73,7 @@ private:
|
|||
SetupNoErrorStarted = 3,
|
||||
};
|
||||
void loadData(AudioMsgId audio, qint64 position);
|
||||
AudioPlayerLoader *setupLoader(const AudioMsgId &audio, SetupError &err, qint64 position);
|
||||
AudioPlayerLoader *setupLoader(const AudioMsgId &audio, SetupError &err, qint64 &position);
|
||||
AudioPlayer::AudioMsg *checkLoader(AudioMsgId::Type type);
|
||||
|
||||
};
|
||||
|
|
|
@ -39,7 +39,7 @@ ChildFFMpegLoader::ChildFFMpegLoader(uint64 videoPlayId, std_::unique_ptr<VideoS
|
|||
_frame = av_frame_alloc();
|
||||
}
|
||||
|
||||
bool ChildFFMpegLoader::open(qint64 position) {
|
||||
bool ChildFFMpegLoader::open(qint64 &position) {
|
||||
int res = 0;
|
||||
char err[AV_ERROR_MAX_STRING_SIZE] = { 0 };
|
||||
|
||||
|
@ -100,6 +100,7 @@ bool ChildFFMpegLoader::open(qint64 position) {
|
|||
_sampleSize = AudioToChannels * sizeof(short);
|
||||
_parentData->frequency = _dstRate;
|
||||
_parentData->length = av_rescale_rnd(_parentData->length, _dstRate, _srcRate, AV_ROUND_UP);
|
||||
position = av_rescale_rnd(position, _dstRate, _srcRate, AV_ROUND_DOWN);
|
||||
_format = AL_FORMAT_STEREO16;
|
||||
|
||||
_maxResampleSamples = av_rescale_rnd(AVBlockSize / _sampleSize, _dstRate, _srcRate, AV_ROUND_UP);
|
||||
|
|
|
@ -83,7 +83,7 @@ class ChildFFMpegLoader : public AudioPlayerLoader {
|
|||
public:
|
||||
ChildFFMpegLoader(uint64 videoPlayId, std_::unique_ptr<VideoSoundData> &&data);
|
||||
|
||||
bool open(qint64 position = 0) override;
|
||||
bool open(qint64 &position) override;
|
||||
|
||||
bool check(const FileLocation &file, const QByteArray &data) override {
|
||||
return true;
|
||||
|
|
|
@ -96,7 +96,10 @@ ReaderImplementation::ReadResult FFMpegReaderImplementation::readNextFrame() {
|
|||
_currentFrameDelay = _nextFrameDelay;
|
||||
if (_frameMs + _currentFrameDelay < frameMs) {
|
||||
_currentFrameDelay = int32(frameMs - _frameMs);
|
||||
} else if (frameMs < _frameMs + _currentFrameDelay) {
|
||||
frameMs = _frameMs + _currentFrameDelay;
|
||||
}
|
||||
|
||||
if (duration == AV_NOPTS_VALUE) {
|
||||
_nextFrameDelay = 0;
|
||||
} else {
|
||||
|
@ -129,7 +132,7 @@ ReaderImplementation::ReadResult FFMpegReaderImplementation::readNextFrame() {
|
|||
avcodec_flush_buffers(_codecContext);
|
||||
_hadFrame = false;
|
||||
_frameMs = 0;
|
||||
_lastReadPacketMs = 0;
|
||||
_lastReadVideoMs = _lastReadAudioMs = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -154,7 +157,6 @@ ReaderImplementation::ReadResult FFMpegReaderImplementation::readFramesTill(int6
|
|||
|
||||
// sync by audio stream
|
||||
auto correctMs = (frameMs >= 0) ? audioPlayer()->getVideoCorrectedTime(_playId, frameMs, systemMs) : frameMs;
|
||||
|
||||
if (!_frameRead) {
|
||||
auto readResult = readNextFrame();
|
||||
if (readResult != ReadResult::Success) {
|
||||
|
@ -237,7 +239,8 @@ bool FFMpegReaderImplementation::renderFrame(QImage &to, bool &hasAlpha, const Q
|
|||
|
||||
// Read some future packets for audio stream.
|
||||
if (_audioStreamId >= 0) {
|
||||
while (_frameMs + 5000 > _lastReadPacketMs) {
|
||||
while (_frameMs + 5000 > _lastReadAudioMs
|
||||
&& _frameMs + 15000 > _lastReadVideoMs) {
|
||||
auto packetResult = readAndProcessPacket();
|
||||
if (packetResult != PacketResult::Ok) {
|
||||
break;
|
||||
|
@ -327,16 +330,16 @@ bool FFMpegReaderImplementation::start(Mode mode, int64 &positionMs) {
|
|||
if ((res = avcodec_open2(audioContext, audioCodec, 0)) < 0) {
|
||||
avcodec_free_context(&audioContext);
|
||||
LOG(("Gif Error: Unable to avcodec_open2 %1, error %2, %3").arg(logData()).arg(res).arg(av_make_error_string(err, sizeof(err), res)));
|
||||
return false;
|
||||
}
|
||||
|
||||
soundData = std_::make_unique<VideoSoundData>();
|
||||
soundData->context = audioContext;
|
||||
soundData->frequency = audioContextOriginal->sample_rate;
|
||||
if (_fmtContext->streams[_audioStreamId]->duration == AV_NOPTS_VALUE) {
|
||||
soundData->length = (_fmtContext->duration * soundData->frequency) / AV_TIME_BASE;
|
||||
_audioStreamId = -1;
|
||||
} else {
|
||||
soundData->length = (_fmtContext->streams[_audioStreamId]->duration * soundData->frequency * _fmtContext->streams[_audioStreamId]->time_base.num) / _fmtContext->streams[_audioStreamId]->time_base.den;
|
||||
soundData = std_::make_unique<VideoSoundData>();
|
||||
soundData->context = audioContext;
|
||||
soundData->frequency = audioContextOriginal->sample_rate;
|
||||
if (_fmtContext->streams[_audioStreamId]->duration == AV_NOPTS_VALUE) {
|
||||
soundData->length = (_fmtContext->duration * soundData->frequency) / AV_TIME_BASE;
|
||||
} else {
|
||||
soundData->length = (_fmtContext->streams[_audioStreamId]->duration * soundData->frequency * _fmtContext->streams[_audioStreamId]->time_base.num) / _fmtContext->streams[_audioStreamId]->time_base.den;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -425,11 +428,13 @@ void FFMpegReaderImplementation::processPacket(AVPacket *packet) {
|
|||
bool videoPacket = (packet->stream_index == _streamId);
|
||||
bool audioPacket = (_audioStreamId >= 0 && packet->stream_index == _audioStreamId);
|
||||
if (audioPacket || videoPacket) {
|
||||
_lastReadPacketMs = countPacketMs(packet);
|
||||
|
||||
if (videoPacket) {
|
||||
_lastReadVideoMs = countPacketMs(packet);
|
||||
|
||||
_packetQueue.enqueue(FFMpeg::dataWrapFromPacket(*packet));
|
||||
} else if (audioPacket) {
|
||||
_lastReadAudioMs = countPacketMs(packet);
|
||||
|
||||
// queue packet to audio player
|
||||
VideoSoundPart part;
|
||||
part.packet = packet;
|
||||
|
|
|
@ -92,7 +92,8 @@ private:
|
|||
|
||||
int _audioStreamId = 0;
|
||||
uint64 _playId = 0;
|
||||
int64 _lastReadPacketMs = 0;
|
||||
int64 _lastReadVideoMs = 0;
|
||||
int64 _lastReadAudioMs = 0;
|
||||
|
||||
QQueue<FFMpeg::AVPacketDataWrap> _packetQueue;
|
||||
AVPacket _packetNull; // for final decoding
|
||||
|
|
|
@ -489,7 +489,6 @@ public:
|
|||
int64 delta = static_cast<int64>(ms) - static_cast<int64>(_videoPausedAtMs);
|
||||
_animationStarted += delta;
|
||||
_nextFrameWhen += delta;
|
||||
LOG(("RESUME VIDEO: next when: %1, started: %2, delta: %3").arg(_nextFrameWhen).arg(_animationStarted).arg(delta));
|
||||
|
||||
_videoPausedAtMs = 0;
|
||||
_implementation->resumeAudio();
|
||||
|
|
Loading…
Add table
Reference in a new issue