1
0
mirror of https://github.com/rwengine/openrw.git synced 2024-10-06 09:07:19 +02:00

Divide SoundSource (for future streaming)

This commit is contained in:
Filip Gawin 2018-11-21 00:34:29 +01:00
parent a3916ca677
commit 50c6eedf4f
2 changed files with 397 additions and 266 deletions

View File

@ -1,12 +1,11 @@
#include "audio/SoundSource.hpp"
#include <loaders/LoaderSDT.hpp>
#include <rw/types.hpp>
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavformat/avio.h>
#include <libavutil/avutil.h>
#include <libavutil/opt.h>
#include <libswresample/swresample.h>
}
@ -24,29 +23,91 @@ extern "C" {
constexpr int kNumOutputChannels = 2;
constexpr AVSampleFormat kOutputFMT = AV_SAMPLE_FMT_S16;
constexpr size_t kNrFramesToPreload = 50;
void SoundSource::loadFromFile(const rwfs::path& filePath) {
// Allocate audio frame
AVFrame* frame = av_frame_alloc();
bool SoundSource::allocateAudioFrame() {
frame = av_frame_alloc();
if (!frame) {
RW_ERROR("Error allocating the audio frame");
return;
return false;
}
return true;
}
// Allocate formatting context
AVFormatContext* formatContext = nullptr;
bool SoundSource::allocateFormatContext(const rwfs::path& filePath) {
formatContext = nullptr;
if (avformat_open_input(&formatContext, filePath.string().c_str(), nullptr,
nullptr) != 0) {
av_frame_free(&frame);
RW_ERROR("Error opening audio file (" << filePath << ")");
return;
return false;
}
return true;
}
namespace {
/// Low level function for copying data from handler (opaque)
/// to buffer.
int read_packet(void* opaque, uint8_t* buf, int buf_size) {
auto* input = reinterpret_cast<InputData*>(opaque);
buf_size = std::min(buf_size, static_cast<int>(input->size));
/* copy internal data to buf */
memcpy(buf, input->ptr, buf_size);
input->ptr += buf_size;
input->size -= buf_size;
return buf_size;
}
} // namespace
bool SoundSource::prepareFormatContextSfx(LoaderSDT& sdt, size_t index,
bool asWave) {
/// Now we need to prepare "custom" format context
/// We need sdt loader for that purpose
raw_sound = sdt.loadToMemory(index, asWave);
if (!raw_sound) {
av_frame_free(&frame);
RW_ERROR("Error loading sound");
return false;
}
/// Prepare input
input.size = sizeof(WaveHeader) + sdt.assetInfo.size;
/// Store start ptr of data to be able freed memory later
inputDataStart = std::make_unique<uint8_t[]>(input.size);
input.ptr = inputDataStart.get();
/// Alocate memory for buffer
/// Memory freeded at the end
static constexpr size_t ioBufferSize = 4096;
auto ioBuffer = static_cast<uint8_t*>(av_malloc(ioBufferSize));
/// Cast pointer, in order to match required layout for ffmpeg
input.ptr = reinterpret_cast<uint8_t*>(raw_sound.get());
/// Finally prepare our "custom" format context
avioContext = avio_alloc_context(ioBuffer, ioBufferSize, 0, &input,
&read_packet, nullptr, nullptr);
formatContext = avformat_alloc_context();
formatContext->pb = avioContext;
if (avformat_open_input(&formatContext, "SDT", nullptr, nullptr) != 0) {
av_free(formatContext->pb->buffer);
avio_context_free(&formatContext->pb);
av_frame_free(&frame);
RW_ERROR("Error opening audio file (" << index << ")");
return false;
}
return true;
}
bool SoundSource::findAudioStream(const rwfs::path& filePath) {
RW_UNUSED(filePath); // it's used by macro
if (avformat_find_stream_info(formatContext, nullptr) < 0) {
av_frame_free(&frame);
avformat_close_input(&formatContext);
RW_ERROR("Error finding audio stream info");
return;
return false;
}
// Find the audio stream
@ -56,14 +117,45 @@ void SoundSource::loadFromFile(const rwfs::path& filePath) {
av_frame_free(&frame);
avformat_close_input(&formatContext);
RW_ERROR("Could not find any audio stream in the file " << filePath);
return;
return false;
}
AVStream* audioStream = formatContext->streams[streamIndex];
AVCodec* codec = avcodec_find_decoder(audioStream->codecpar->codec_id);
audioStream = formatContext->streams[streamIndex];
codec = avcodec_find_decoder(audioStream->codecpar->codec_id);
return true;
}
bool SoundSource::findAudioStreamSfx() {
if (avformat_find_stream_info(formatContext, nullptr) < 0) {
av_free(formatContext->pb->buffer);
avio_context_free(&formatContext->pb);
av_frame_free(&frame);
avformat_close_input(&formatContext);
RW_ERROR("Error finding audio stream info");
return false;
}
// Find the audio stream
int streamIndex = av_find_best_stream(formatContext, AVMEDIA_TYPE_AUDIO, -1,
-1, nullptr, 0);
if (streamIndex < 0) {
av_free(formatContext->pb->buffer);
avio_context_free(&formatContext->pb);
av_frame_free(&frame);
avformat_close_input(&formatContext);
RW_ERROR("Could not find any audio stream in the file ");
return false;
}
audioStream = formatContext->streams[streamIndex];
codec = avcodec_find_decoder(audioStream->codecpar->codec_id);
return true;
}
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(57, 5, 0)
AVCodecContext* codecContext = audioStream->codec;
bool SoundSource::prepareCodecContextLegacy() {
codecContext = audioStream->codec;
codecContext->codec = codec;
// Open the codec
@ -71,16 +163,45 @@ void SoundSource::loadFromFile(const rwfs::path& filePath) {
av_frame_free(&frame);
avformat_close_input(&formatContext);
RW_ERROR("Couldn't open the audio codec context");
return;
return false;
}
return true;
}
bool SoundSource::prepareCodecContextSfxLegacy() {
AVCodecContext* codecContext = audioStream->codec;
codecContext->codec = codec;
// Open the codec
if (avcodec_open2(codecContext, codecContext->codec, nullptr) != 0) {
av_free(formatContext->pb->buffer);
avio_context_free(&formatContext->pb);
av_frame_free(&frame);
avformat_close_input(&formatContext);
RW_ERROR("Couldn't open the audio codec context");
return false;
}
return true;
}
#endif
bool SoundSource::prepareCodecContextWrap() {
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(57, 5, 0)
return prepareCodecContextLegacy());
#else
return prepareCodecContext();
#endif
}
bool SoundSource::prepareCodecContext() {
// Initialize codec context for the decoder.
AVCodecContext* codecContext = avcodec_alloc_context3(codec);
codecContext = avcodec_alloc_context3(codec);
if (!codecContext) {
av_frame_free(&frame);
avformat_close_input(&formatContext);
RW_ERROR("Couldn't allocate a decoding context.");
return;
return false;
}
// Fill the codecCtx with the parameters of the codec used in the read file.
@ -90,6 +211,7 @@ void SoundSource::loadFromFile(const rwfs::path& filePath) {
avcodec_free_context(&codecContext);
avformat_close_input(&formatContext);
RW_ERROR("Couldn't find parametrs for context");
return false;
}
// Initialize the decoder.
@ -98,24 +220,63 @@ void SoundSource::loadFromFile(const rwfs::path& filePath) {
avcodec_free_context(&codecContext);
avformat_close_input(&formatContext);
RW_ERROR("Couldn't open the audio codec context");
return;
return false;
}
return true;
}
bool SoundSource::prepareCodecContextSfxWrap() {
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(57, 5, 0)
return prepareCodecContextSfxLegacy();
#else
return prepareCodecContextSfx();
#endif
}
// Expose audio metadata
channels = kNumOutputChannels;
sampleRate = static_cast<size_t>(codecContext->sample_rate);
bool SoundSource::prepareCodecContextSfx() {
// Initialize codec context for the decoder.
codecContext = avcodec_alloc_context3(codec);
if (!codecContext) {
av_free(formatContext->pb->buffer);
avio_context_free(&formatContext->pb);
av_frame_free(&frame);
avformat_close_input(&formatContext);
RW_ERROR("Couldn't allocate a decoding context.");
return false;
}
// prepare resampler
SwrContext* swr = nullptr;
// Fill the codecCtx with the parameters of the codec used in the read file.
if (avcodec_parameters_to_context(codecContext, audioStream->codecpar) !=
0) {
av_free(formatContext->pb->buffer);
avio_context_free(&formatContext->pb);
avcodec_close(codecContext);
avcodec_free_context(&codecContext);
avformat_close_input(&formatContext);
RW_ERROR("Couldn't find parametrs for context");
return false;
}
// Start reading audio packets
AVPacket readingPacket;
av_init_packet(&readingPacket);
// Initialize the decoder.
if (avcodec_open2(codecContext, codec, nullptr) != 0) {
av_free(formatContext->pb->buffer);
avio_context_free(&formatContext->pb);
avcodec_close(codecContext);
avcodec_free_context(&codecContext);
avformat_close_input(&formatContext);
RW_ERROR("Couldn't open the audio codec context");
return false;
}
return true;
}
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(57, 37, 100)
void SoundSource::decodeFramesLegacy(size_t framesToDecode) {
size_t decoded = 0;
while (av_read_frame(formatContext, &readingPacket) == 0) {
while ((framesToDecode == 0 || decoded < framesToDecode) &&
av_read_frame(formatContext, &readingPacket) == 0) {
if (readingPacket.stream_index == audioStream->index) {
AVPacket decodingPacket = readingPacket;
@ -147,12 +308,72 @@ void SoundSource::loadFromFile(const rwfs::path& filePath) {
}
}
av_free_packet(&readingPacket);
++decoded;
}
#else
}
#endif
void SoundSource::decodeFramesSfxWrap() {
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(57, 37, 100)
decodeFramesLegacy(kNrFramesToPreload);
#else
decodeFrames(kNrFramesToPreload);
#endif
}
void SoundSource::decodeFrames(size_t framesToDecode) {
size_t decoded = 0;
while ((framesToDecode == 0 || decoded < framesToDecode) &&
av_read_frame(formatContext, &readingPacket) == 0) {
if (readingPacket.stream_index == audioStream->index) {
AVPacket decodingPacket = readingPacket;
int sendPacket = avcodec_send_packet(codecContext, &decodingPacket);
int receiveFrame = 0;
while ((receiveFrame =
avcodec_receive_frame(codecContext, frame)) == 0) {
// Decode audio packet
if (receiveFrame == 0 && sendPacket == 0) {
// Write samples to audio buffer
for (size_t i = 0;
i < static_cast<size_t>(frame->nb_samples); i++) {
// Interleave left/right channels
for (size_t channel = 0; channel < channels;
channel++) {
int16_t sample = reinterpret_cast<int16_t*>(
frame->data[channel])[i];
data.push_back(sample);
}
}
}
}
}
av_packet_unref(&readingPacket);
++decoded;
}
}
void SoundSource::decodeFramesWrap(const rwfs::path& filePath) {
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(57, 37, 100)
decodeFramesLegacy(kNrFramesToPreload);
#else
decodeAndResampleFrames(filePath, kNrFramesToPreload);
#endif
}
void SoundSource::decodeAndResampleFrames(const rwfs::path& filePath,
size_t framesToDecode) {
RW_UNUSED(filePath); // it's used by macro
AVFrame* resampled = av_frame_alloc();
while (av_read_frame(formatContext, &readingPacket) == 0) {
size_t decoded = 0;
while ((framesToDecode == 0 || decoded < framesToDecode) &&
av_read_frame(formatContext, &readingPacket) == 0) {
if (readingPacket.stream_index == audioStream->index) {
int sendPacket = avcodec_send_packet(codecContext, &readingPacket);
av_packet_unref(&readingPacket);
@ -213,19 +434,20 @@ void SoundSource::loadFromFile(const rwfs::path& filePath) {
}
}
}
++decoded;
}
/// Free all data used by the resampled frame.
av_frame_free(&resampled);
#endif
// Cleanup
/// Free all data used by the frame.
av_frame_free(&frame);
/// Free resampler
swr_free(&swr);
}
void SoundSource::cleanupAfterSoundLoading() {
/// Free all data used by the frame.
av_frame_free(&frame);
/// Close the context and free all data associated to it, but not the
/// context itself.
@ -238,235 +460,7 @@ void SoundSource::loadFromFile(const rwfs::path& filePath) {
avformat_close_input(&formatContext);
}
/// Structure for input data
struct InputData {
uint8_t* ptr = nullptr;
size_t size{}; ///< size left in the buffer
};
/// Low level function for copying data from handler (opaque)
/// to buffer.
static int read_packet(void* opaque, uint8_t* buf, int buf_size) {
auto* input = reinterpret_cast<InputData*>(opaque);
buf_size = std::min(buf_size, static_cast<int>(input->size));
/* copy internal data to buf */
memcpy(buf, input->ptr, buf_size);
input->ptr += buf_size;
input->size -= buf_size;
return buf_size;
}
void SoundSource::loadSfx(LoaderSDT& sdt, size_t index, bool asWave) {
// Allocate audio frame
AVFrame* frame = av_frame_alloc();
if (!frame) {
RW_ERROR("Error allocating the audio frame");
return;
}
/// Now we need to prepare "custom" format context
/// We need sdt loader for that purpose
std::unique_ptr<char[]> raw_sound = sdt.loadToMemory(index, asWave);
if (!raw_sound) {
av_frame_free(&frame);
RW_ERROR("Error loading sound");
return;
}
/// Prepare input
InputData input{};
input.size = sizeof(WaveHeader) + sdt.assetInfo.size;
/// Store start ptr of data to be able freed memory later
auto inputDataStart = std::make_unique<uint8_t[]>(
input.size);
input.ptr = inputDataStart.get();
/// Alocate memory for buffer
/// Memory freeded at the end
static constexpr size_t ioBufferSize = 4096;
auto ioBuffer = static_cast<uint8_t*>(av_malloc(ioBufferSize));
/// Cast pointer, in order to match required layout for ffmpeg
input.ptr = reinterpret_cast<uint8_t*>(raw_sound.get());
/// Finally prepare our "custom" format context
AVIOContext* avioContext = avio_alloc_context(
ioBuffer, ioBufferSize, 0, &input, &read_packet, nullptr, nullptr);
AVFormatContext* formatContext = avformat_alloc_context();
formatContext->pb = avioContext;
if (avformat_open_input(&formatContext, "nothint", nullptr, nullptr) != 0) {
av_free(formatContext->pb->buffer);
avio_context_free(&formatContext->pb);
av_frame_free(&frame);
RW_ERROR("Error opening audio file (" << index << ")");
return;
}
if (avformat_find_stream_info(formatContext, nullptr) < 0) {
av_free(formatContext->pb->buffer);
avio_context_free(&formatContext->pb);
av_frame_free(&frame);
avformat_close_input(&formatContext);
RW_ERROR("Error finding audio stream info");
return;
}
// Find the audio stream
// AVCodec* codec = nullptr;
int streamIndex = av_find_best_stream(formatContext, AVMEDIA_TYPE_AUDIO, -1,
-1, nullptr, 0);
if (streamIndex < 0) {
av_free(formatContext->pb->buffer);
avio_context_free(&formatContext->pb);
av_frame_free(&frame);
avformat_close_input(&formatContext);
RW_ERROR("Could not find any audio stream in the file ");
return;
}
AVStream* audioStream = formatContext->streams[streamIndex];
AVCodec* codec = avcodec_find_decoder(audioStream->codecpar->codec_id);
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(57, 5, 0)
AVCodecContext* codecContext = audioStream->codec;
codecContext->codec = codec;
// Open the codec
if (avcodec_open2(codecContext, codecContext->codec, nullptr) != 0) {
av_free(formatContext->pb->buffer);
avio_context_free(&formatContext->pb);
av_frame_free(&frame);
avformat_close_input(&formatContext);
RW_ERROR("Couldn't open the audio codec context");
return;
}
#else
// Initialize codec context for the decoder.
AVCodecContext* codecContext = avcodec_alloc_context3(codec);
if (!codecContext) {
av_free(formatContext->pb->buffer);
avio_context_free(&formatContext->pb);
av_frame_free(&frame);
avformat_close_input(&formatContext);
RW_ERROR("Couldn't allocate a decoding context.");
return;
}
// Fill the codecCtx with the parameters of the codec used in the read file.
if (avcodec_parameters_to_context(codecContext, audioStream->codecpar) !=
0) {
av_free(formatContext->pb->buffer);
avio_context_free(&formatContext->pb);
avcodec_close(codecContext);
avcodec_free_context(&codecContext);
avformat_close_input(&formatContext);
RW_ERROR("Couldn't find parametrs for context");
return;
}
// Initialize the decoder.
if (avcodec_open2(codecContext, codec, nullptr) != 0) {
av_free(formatContext->pb->buffer);
avio_context_free(&formatContext->pb);
avcodec_close(codecContext);
avcodec_free_context(&codecContext);
avformat_close_input(&formatContext);
RW_ERROR("Couldn't open the audio codec context");
return;
}
#endif
// Expose audio metadata
channels = static_cast<size_t>(codecContext->channels);
sampleRate = sdt.assetInfo.sampleRate;
// OpenAL only supports mono or stereo, so error on more than 2 channels
if (channels > 2) {
RW_ERROR("Audio has more than two channels");
av_free(formatContext->pb->buffer);
avio_context_free(&formatContext->pb);
av_frame_free(&frame);
avcodec_close(codecContext);
avformat_close_input(&formatContext);
return;
}
// Start reading audio packets
AVPacket readingPacket;
av_init_packet(&readingPacket);
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(57, 37, 100)
while (av_read_frame(formatContext, &readingPacket) == 0) {
if (readingPacket.stream_index == audioStream->index) {
AVPacket decodingPacket = readingPacket;
while (decodingPacket.size > 0) {
// Decode audio packet
int gotFrame = 0;
int len = avcodec_decode_audio4(codecContext, frame, &gotFrame,
&decodingPacket);
if (len >= 0 && gotFrame) {
// Write samples to audio buffer
for (size_t i = 0;
i < static_cast<size_t>(frame->nb_samples); i++) {
// Interleave left/right channels
for (size_t channel = 0; channel < channels;
channel++) {
int16_t sample = reinterpret_cast<int16_t*>(
frame->data[channel])[i];
data.push_back(sample);
}
}
decodingPacket.size -= len;
decodingPacket.data += len;
} else {
decodingPacket.size = 0;
decodingPacket.data = nullptr;
}
}
}
av_free_packet(&readingPacket);
}
#else
while (av_read_frame(formatContext, &readingPacket) == 0) {
if (readingPacket.stream_index == audioStream->index) {
AVPacket decodingPacket = readingPacket;
int sendPacket = avcodec_send_packet(codecContext, &decodingPacket);
int receiveFrame = 0;
while ((receiveFrame =
avcodec_receive_frame(codecContext, frame)) == 0) {
// Decode audio packet
if (receiveFrame == 0 && sendPacket == 0) {
// Write samples to audio buffer
for (size_t i = 0;
i < static_cast<size_t>(frame->nb_samples); i++) {
// Interleave left/right channels
for (size_t channel = 0; channel < channels;
channel++) {
int16_t sample = reinterpret_cast<int16_t*>(
frame->data[channel])[i];
data.push_back(sample);
}
}
}
}
}
av_packet_unref(&readingPacket);
}
#endif
// Cleanup
void SoundSource::cleanupAfterSfxLoading() {
/// Free all data used by the frame.
av_frame_free(&frame);
@ -484,3 +478,70 @@ void SoundSource::loadSfx(LoaderSDT& sdt, size_t index, bool asWave) {
/// We are done here. Close the input.
avformat_close_input(&formatContext);
}
void SoundSource::exposeSoundMetadata() {
channels = kNumOutputChannels;
sampleRate = static_cast<size_t>(codecContext->sample_rate);
}
void SoundSource::exposeSfxMetadata(LoaderSDT& sdt) {
channels = static_cast<size_t>(codecContext->channels);
sampleRate = sdt.assetInfo.sampleRate;
}
void SoundSource::decodeRestSoundFramesAndCleanup(const rwfs::path& filePath) {
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(57, 37, 100)
decodeFramesLegacy(0);
#else
decodeAndResampleFrames(filePath, 0);
#endif
cleanupAfterSoundLoading();
}
void SoundSource::decodeRestSfxFramesAndCleanup() {
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(57, 37, 100)
decodeFramesLegacy(0);
#else
decodeFrames(0);
#endif
cleanupAfterSfxLoading();
}
void SoundSource::loadFromFile(const rwfs::path& filePath, bool streaming) {
if (allocateAudioFrame() && allocateFormatContext(filePath) &&
findAudioStream(filePath) && prepareCodecContextWrap()) {
exposeSoundMetadata();
av_init_packet(&readingPacket);
decodeFramesWrap(filePath);
if (streaming) {
auto loadingThread = std::async(
std::launch::async,
&SoundSource::decodeRestSoundFramesAndCleanup, this, filePath);
} else {
decodeRestSoundFramesAndCleanup(filePath);
}
}
}
void SoundSource::loadSfx(LoaderSDT& sdt, size_t index, bool asWave,
bool streaming) {
if (allocateAudioFrame() && prepareFormatContextSfx(sdt, index, asWave) &&
findAudioStreamSfx() && prepareCodecContextSfxWrap()) {
exposeSfxMetadata(sdt);
av_init_packet(&readingPacket);
decodeFramesSfxWrap();
if (streaming) {
auto loadingThread =
std::async(std::launch::async,
&SoundSource::decodeRestSfxFramesAndCleanup, this);
} else {
decodeRestSfxFramesAndCleanup();
}
}
}

View File

@ -3,7 +3,25 @@
#include <rw/filesystem.hpp>
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavutil/avutil.h>
}
#include <cstdint>
#include <future>
/// Structure for input data
struct InputData {
uint8_t* ptr = nullptr;
size_t size{}; ///< size left in the buffer
};
class SwrContext;
class AVFormatContext;
class AVStream;
class AVIOContext;
class LoaderSDT;
class LoaderSDT;
@ -15,11 +33,49 @@ class SoundSource {
friend struct SoundBuffer;
public:
bool allocateAudioFrame();
bool allocateFormatContext(const rwfs::path& filePath);
bool prepareFormatContextSfx(LoaderSDT& sdt, size_t index, bool asWave);
bool findAudioStream(const rwfs::path& filePath);
bool findAudioStreamSfx();
bool prepareCodecContextWrap();
bool prepareCodecContext();
bool prepareCodecContextSfxWrap();
bool prepareCodecContextSfx();
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(57, 5, 0)
bool prepareCodecContextLegacy();
bool prepareCodecContextSfxLegacy();
#endif
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(57, 37, 100)
void decodeFramesLegacy(size_t framesToDecode);
#endif
void decodeFramesWrap(const rwfs::path& filePath);
void decodeFramesSfxWrap();
void decodeFrames(size_t framesToDecode);
void decodeAndResampleFrames(const rwfs::path& filePath, size_t framesToDecode);
void cleanupAfterSoundLoading();
void cleanupAfterSfxLoading();
void exposeSoundMetadata();
void exposeSfxMetadata(LoaderSDT& sdt);
void decodeRestSoundFramesAndCleanup(const rwfs::path& filePath);
void decodeRestSfxFramesAndCleanup();
/// Load sound from mp3/wav file
void loadFromFile(const rwfs::path& filePath);
void loadFromFile(const rwfs::path& filePath, bool streaming = false);
/// Load sound from sdt file
void loadSfx(LoaderSDT& sdt, std::size_t index, bool asWave = true);
void loadSfx(LoaderSDT& sdt, std::size_t index, bool asWave = true,
bool streaming = false);
private:
/// Raw data
@ -27,6 +83,20 @@ private:
std::uint32_t channels;
std::uint32_t sampleRate;
AVFrame* frame = nullptr;
AVFormatContext* formatContext = nullptr;
AVStream* audioStream = nullptr;
AVCodec* codec = nullptr;
SwrContext* swr = nullptr;
AVCodecContext* codecContext = nullptr;
AVPacket readingPacket;
// For sfx
AVIOContext* avioContext;
std::unique_ptr<char[]> raw_sound;
std::unique_ptr<uint8_t[]> inputDataStart;
InputData input{};
};
#endif