1
0
mirror of https://github.com/rwengine/openrw.git synced 2024-09-15 06:52:34 +02:00

Implement methods for sfx, and refactor Sound system

SoundBuffer and SoundSource got their own
files.
This commit is contained in:
Filip Gawin 2018-07-26 23:28:00 +02:00
parent 844d1f89b3
commit 11bc35c3f2
8 changed files with 954 additions and 564 deletions

View File

@ -21,10 +21,15 @@ set(RWENGINE_SOURCES
src/ai/TrafficDirector.cpp
src/ai/TrafficDirector.hpp
src/audio/SoundManager.cpp
src/audio/SoundManager.hpp
src/audio/alCheck.cpp
src/audio/alCheck.hpp
src/audio/Sound.hpp
src/audio/SoundBuffer.cpp
src/audio/SoundBuffer.hpp
src/audio/SoundManager.cpp
src/audio/SoundManager.hpp
src/audio/SoundSource.cpp
src/audio/SoundSource.hpp
src/core/Logger.cpp
src/core/Logger.hpp

View File

@ -0,0 +1,77 @@
#ifndef _RWENGINE_SOUND_HPP_
#define _RWENGINE_SOUND_HPP_
#include <memory>
#include <string>
#include <vector>
#include <al.h>
#include <alc.h>
#include <glm/glm.hpp>
#include <rw/filesystem.hpp>
#include <rw/types.hpp>
#include "audio/SoundBuffer.hpp"
#include "audio/SoundSource.hpp"
/// Wrapper for SoundBuffer and SoundSource.
/// Each command connected
/// with playment is passed to SoundBuffer
struct Sound {
size_t id = 0;
bool isLoaded = false;
std::shared_ptr<SoundSource> source = nullptr;
std::unique_ptr<SoundBuffer> buffer = nullptr;
Sound() = default;
bool isPlaying() const {
return buffer->isPlaying();
}
bool isPaused() const {
return buffer->isPaused();
}
bool isStopped() const {
return buffer->isStopped();
}
void play() {
buffer->play();
}
void pause() {
buffer->pause();
}
void stop() {
buffer->stop();
}
void setPosition(const glm::vec3& position) {
buffer->setPosition(position);
}
void setLooping(bool looping) {
buffer->setLooping(looping);
}
void setPitch(float pitch) {
buffer->setPitch(pitch);
}
void setGain(float gain) {
buffer->setGain(gain);
}
void setMaxDistance(float maxDist) {
buffer->setMaxDistance(maxDist);
}
int getScriptObjectID() const {
return id;
}
};
#endif

View File

@ -0,0 +1,78 @@
#include "audio/SoundBuffer.hpp"
#include <rw/types.hpp>
#include "audio/alCheck.hpp"
SoundBuffer::SoundBuffer() {
alCheck(alGenSources(1, &source));
alCheck(alGenBuffers(1, &buffer));
alCheck(alSourcef(source, AL_PITCH, 1));
alCheck(alSourcef(source, AL_GAIN, 1));
alCheck(alSource3f(source, AL_POSITION, 0, 0, 0));
alCheck(alSource3f(source, AL_VELOCITY, 0, 0, 0));
alCheck(alSourcei(source, AL_LOOPING, AL_FALSE));
}
bool SoundBuffer::bufferData(SoundSource& soundSource) {
alCheck(alBufferData(
buffer,
soundSource.channels == 1 ? AL_FORMAT_MONO16 : AL_FORMAT_STEREO16,
&soundSource.data.front(), soundSource.data.size() * sizeof(int16_t),
soundSource.sampleRate));
alCheck(alSourcei(source, AL_BUFFER, buffer));
return true;
}
bool SoundBuffer::isPlaying() const {
ALint sourceState;
alCheck(alGetSourcei(source, AL_SOURCE_STATE, &sourceState));
return AL_PLAYING == sourceState;
}
bool SoundBuffer::isPaused() const {
ALint sourceState;
alCheck(alGetSourcei(source, AL_SOURCE_STATE, &sourceState));
return AL_PAUSED == sourceState;
}
bool SoundBuffer::isStopped() const {
ALint sourceState;
alCheck(alGetSourcei(source, AL_SOURCE_STATE, &sourceState));
return AL_STOPPED == sourceState;
}
void SoundBuffer::play() {
alCheck(alSourcePlay(source));
}
void SoundBuffer::pause() {
alCheck(alSourcePause(source));
}
void SoundBuffer::stop() {
alCheck(alSourceStop(source));
}
void SoundBuffer::setPosition(const glm::vec3& position) {
alCheck(
alSource3f(source, AL_POSITION, position.x, position.y, position.z));
}
void SoundBuffer::setLooping(bool looping) {
if (looping) {
alCheck(alSourcei(source, AL_LOOPING, AL_TRUE));
} else {
alCheck(alSourcei(source, AL_LOOPING, AL_FALSE));
}
}
void SoundBuffer::setPitch(float pitch) {
alCheck(alSourcef(source, AL_PITCH, pitch));
}
void SoundBuffer::setGain(float gain) {
alCheck(alSourcef(source, AL_GAIN, gain));
}
void SoundBuffer::setMaxDistance(float maxDist) {
alCheck(alSourcef(source, AL_MAX_DISTANCE, maxDist));
}

View File

@ -0,0 +1,38 @@
#ifndef _RWENGINE_SOUND_BUFFER_HPP_
#define _RWENGINE_SOUND_BUFFER_HPP_
#include <al.h>
#include <alc.h>
#include <glm/glm.hpp>
#include "audio/SoundSource.hpp"
/// OpenAL tool for playing
/// sound instance.
class SoundBuffer {
friend class SoundManager;
public:
SoundBuffer();
bool bufferData(SoundSource& soundSource);
bool isPlaying() const;
bool isPaused() const;
bool isStopped() const;
void play();
void pause();
void stop();
void setPosition(const glm::vec3& position);
void setLooping(bool looping);
void setPitch(float pitch);
void setGain(float gain);
void setMaxDistance(float maxDist);
private:
ALuint source;
ALuint buffer;
};
#endif

View File

@ -1,30 +1,29 @@
#include "audio/alCheck.hpp"
#include "audio/SoundManager.hpp"
#include "loaders/LoaderSDT.hpp"
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavformat/avio.h>
#include <libavutil/avutil.h>
#include <libavutil/opt.h>
#include <libswresample/swresample.h>
}
//ab
#include <rw/debug.hpp>
// Rename some functions for older libavcodec/ffmpeg versions (e.g. Ubuntu Trusty)
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55,28,1)
#define av_frame_alloc avcodec_alloc_frame
#define av_frame_free avcodec_free_frame
#endif
#include "audio/alCheck.hpp"
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(57,80,100)
#define avio_context_free av_freep
#endif
#include <rw/types.hpp>
constexpr int kNumOutputChannels = 2;
constexpr AVSampleFormat kOutputFMT = AV_SAMPLE_FMT_S16;
Sound& SoundManager::getSoundRef(size_t name) {
auto ref = buffers.find(name);
if (ref != buffers.end()) {
return ref->second;
} else {
createSfxInstance(name);
}
return buffers[name];
}
Sound& SoundManager::getSoundRef(const std::string& name) {
return sounds[name]; // @todo reloading, how to check is it wav/mp3?
}
SoundManager::SoundManager() {
initializeOpenAL();
@ -59,476 +58,24 @@ bool SoundManager::initializeOpenAL() {
return false;
}
// Needed for max distance
alDistanceModel(AL_LINEAR_DISTANCE_CLAMPED);
return true;
}
bool SoundManager::initializeAVCodec() {
#if RW_DEBUG && RW_VERBOSE_DEBUG_MESSAGES
av_log_set_level(AV_LOG_WARNING);
av_log_set_level(AV_LOG_WARNING);
#else
av_log_set_level(AV_LOG_ERROR);
av_log_set_level(AV_LOG_ERROR);
#endif
return true;
}
void SoundManager::SoundSource::loadFromFile(const rwfs::path& filePath) {
// Allocate audio frame
AVFrame* frame = av_frame_alloc();
if (!frame) {
RW_ERROR("Error allocating the audio frame");
return;
}
// Allocate formatting context
AVFormatContext* formatContext = nullptr;
if (avformat_open_input(&formatContext, filePath.string().c_str(), nullptr, nullptr) != 0) {
av_frame_free(&frame);
RW_ERROR("Error opening audio file (" << filePath << ")");
return;
}
if (avformat_find_stream_info(formatContext, nullptr) < 0) {
av_frame_free(&frame);
avformat_close_input(&formatContext);
RW_ERROR("Error finding audio stream info");
return;
}
// Find the audio stream
int streamIndex = av_find_best_stream(formatContext, AVMEDIA_TYPE_AUDIO, -1, -1, nullptr, 0);
if (streamIndex < 0) {
av_frame_free(&frame);
avformat_close_input(&formatContext);
RW_ERROR("Could not find any audio stream in the file " << filePath);
return;
}
AVStream* audioStream = formatContext->streams[streamIndex];
AVCodec* codec = avcodec_find_decoder(audioStream->codecpar->codec_id);
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(57,5,0)
AVCodecContext* codecContext = audioStream->codec;
codecContext->codec = codec;
// Open the codec
if (avcodec_open2(codecContext, codecContext->codec, nullptr) != 0) {
av_frame_free(&frame);
avformat_close_input(&formatContext);
RW_ERROR("Couldn't open the audio codec context");
return;
}
#else
// Initialize codec context for the decoder.
AVCodecContext* codecContext = avcodec_alloc_context3(codec);
if (!codecContext) {
av_frame_free(&frame);
avformat_close_input(&formatContext);
RW_ERROR("Couldn't allocate a decoding context.");
return;
}
// Fill the codecCtx with the parameters of the codec used in the read file.
if (avcodec_parameters_to_context(codecContext, audioStream->codecpar) != 0) {
avcodec_close(codecContext);
avcodec_free_context(&codecContext);
avformat_close_input(&formatContext);
RW_ERROR("Couldn't find parametrs for context");
}
// Initialize the decoder.
if (avcodec_open2(codecContext, codec, nullptr) != 0) {
avcodec_close(codecContext);
avcodec_free_context(&codecContext);
avformat_close_input(&formatContext);
RW_ERROR("Couldn't open the audio codec context");
return;
}
#endif
// Expose audio metadata
channels = kNumOutputChannels;
sampleRate = static_cast<size_t>(codecContext->sample_rate);
// prepare resampler
SwrContext* swr = nullptr;
// Start reading audio packets
AVPacket readingPacket;
av_init_packet(&readingPacket);
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(57,37,100)
while (av_read_frame(formatContext, &readingPacket) == 0) {
if (readingPacket.stream_index == audioStream->index) {
AVPacket decodingPacket = readingPacket;
while (decodingPacket.size > 0) {
// Decode audio packet
int gotFrame = 0;
int len = avcodec_decode_audio4(codecContext, frame, &gotFrame, &decodingPacket);
if (len >= 0 && gotFrame) {
// Write samples to audio buffer
for(size_t i = 0; i < static_cast<size_t>(frame->nb_samples); i++) {
// Interleave left/right channels
for(size_t channel = 0; channel < channels; channel++) {
int16_t sample = reinterpret_cast<int16_t *>(frame->data[channel])[i];
data.push_back(sample);
}
}
decodingPacket.size -= len;
decodingPacket.data += len;
}
else {
decodingPacket.size = 0;
decodingPacket.data = nullptr;
}
}
}
av_free_packet(&readingPacket);
}
#else
AVFrame* resampled = nullptr;
while (av_read_frame(formatContext, &readingPacket) == 0) {
if (readingPacket.stream_index == audioStream->index) {
int sendPacket = avcodec_send_packet(codecContext, &readingPacket);
av_packet_unref(&readingPacket);
int receiveFrame = 0;
while ((receiveFrame = avcodec_receive_frame(codecContext, frame)) == 0) {
if(!swr) {
if(frame->channels == 1 || frame->channel_layout == 0) {
frame->channel_layout = av_get_default_channel_layout(1);
}
swr = swr_alloc_set_opts(nullptr,
AV_CH_LAYOUT_STEREO, // output channel layout
kOutputFMT, // output format
frame->sample_rate, // output sample rate
frame->channel_layout, // input channel layout
static_cast<AVSampleFormat>(frame->format), // input format
frame->sample_rate, // input sample rate
0,
nullptr);
if (!swr) {
RW_ERROR("Resampler has not been successfully allocated.");
return;
}
swr_init(swr);
if (!swr_is_initialized(swr)) {
RW_ERROR("Resampler has not been properly initialized.");
return;
}
}
// Decode audio packet
if (receiveFrame == 0 && sendPacket == 0) {
// Write samples to audio buffer
resampled = av_frame_alloc();
resampled->channel_layout = AV_CH_LAYOUT_STEREO;
resampled->sample_rate = frame->sample_rate;
resampled->format = kOutputFMT;
resampled->channels = kNumOutputChannels;
swr_config_frame(swr, resampled, frame);
if (swr_convert_frame(swr, resampled, frame) < 0) {
RW_ERROR("Error resampling "<< filePath << '\n');
}
for(size_t i = 0; i < static_cast<size_t>(resampled->nb_samples) * channels; i++) {
data.push_back(reinterpret_cast<int16_t *>(resampled->data[0])[i]);
}
av_frame_unref(resampled);
}
}
}
}
#endif
// Cleanup
/// Free all data used by the frame.
av_frame_free(&frame);
/// Free resampler
swr_free(&swr);
/// Close the context and free all data associated to it, but not the context itself.
avcodec_close(codecContext);
/// Free the context itself.
avcodec_free_context(&codecContext);
/// We are done here. Close the input.
avformat_close_input(&formatContext);
}
/// Structure for input data
struct InputData {
uint8_t *ptr = nullptr;
size_t size{}; ///< size left in the buffer
};
/// Low level function for copying data from handler (opaque)
/// to buffer.
static int read_packet(void *opaque, uint8_t *buf, int buf_size) {
auto* input = reinterpret_cast<InputData*>(opaque);
buf_size = FFMIN(buf_size, input->size);
/* copy internal data to buf */
memcpy(buf, input->ptr, buf_size);
input->ptr += buf_size;
input->size -= buf_size;
return buf_size;
}
void SoundManager::SoundSource::loadSfx(const rwfs::path& path, const size_t& index, const bool asWave) {
// Allocate audio frame
AVFrame* frame = av_frame_alloc();
if (!frame) {
RW_ERROR("Error allocating the audio frame");
return;
}
/// Now we need to prepare "custom" format context
/// We need sdt loader for that purpose
LoaderSDT sdt{};
sdt.load(path / "audio/sfx");
std::unique_ptr<char[]> raw_sound = sdt.loadToMemory(index, asWave);
if (!raw_sound) {
av_frame_free(&frame);
RW_ERROR("Error loading sound");
return;
}
/// Prepare input
InputData input{};
input.size = sizeof(WaveHeader) + sdt.assetInfo.size;
auto inputDataStart = std::make_unique<uint8_t[]>(input.size); /// Store start ptr of data to be able freed memory later
input.ptr = inputDataStart.get();
/// Alocate memory for buffer
/// Memory freeded at the end
static constexpr size_t ioBufferSize = 4096;
auto ioBuffer = static_cast<uint8_t*>(av_malloc(ioBufferSize));
/// Cast pointer, in order to match required layout for ffmpeg
input.ptr = reinterpret_cast<uint8_t*>(raw_sound.get());
/// Finally prepare our "custom" format context
AVIOContext* avioContext = avio_alloc_context(ioBuffer, ioBufferSize, 0, &input, &read_packet, nullptr, nullptr);
AVFormatContext* formatContext = avformat_alloc_context();
formatContext->pb = avioContext;
if (avformat_open_input(&formatContext, "nothint", nullptr, nullptr) != 0) {
av_free(formatContext->pb->buffer);
avio_context_free(&formatContext->pb);
av_frame_free(&frame);
RW_ERROR("Error opening audio file (" << index << ")");
return;
}
if (avformat_find_stream_info(formatContext, nullptr) < 0) {
av_free(formatContext->pb->buffer);
avio_context_free(&formatContext->pb);
av_frame_free(&frame);
avformat_close_input(&formatContext);
RW_ERROR("Error finding audio stream info");
return;
}
// Find the audio stream
//AVCodec* codec = nullptr;
int streamIndex = av_find_best_stream(formatContext, AVMEDIA_TYPE_AUDIO, -1, -1, nullptr, 0);
if (streamIndex < 0) {
av_free(formatContext->pb->buffer);
avio_context_free(&formatContext->pb);
av_frame_free(&frame);
avformat_close_input(&formatContext);
RW_ERROR("Could not find any audio stream in the file ");
return;
}
AVStream* audioStream = formatContext->streams[streamIndex];
AVCodec* codec = avcodec_find_decoder(audioStream->codecpar->codec_id);
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(57,5,0)
AVCodecContext* codecContext = audioStream->codec;
codecContext->codec = codec;
// Open the codec
if (avcodec_open2(codecContext, codecContext->codec, nullptr) != 0) {
av_free(formatContext->pb->buffer);
avio_context_free(&formatContext->pb);
av_frame_free(&frame);
avformat_close_input(&formatContext);
RW_ERROR("Couldn't open the audio codec context");
return;
}
#else
// Initialize codec context for the decoder.
AVCodecContext* codecContext = avcodec_alloc_context3(codec);
if (!codecContext) {
av_free(formatContext->pb->buffer);
avio_context_free(&formatContext->pb);
av_frame_free(&frame);
avformat_close_input(&formatContext);
RW_ERROR("Couldn't allocate a decoding context.");
return;
}
// Fill the codecCtx with the parameters of the codec used in the read file.
if (avcodec_parameters_to_context(codecContext, audioStream->codecpar) != 0) {
av_free(formatContext->pb->buffer);
avio_context_free(&formatContext->pb);
avcodec_close(codecContext);
avcodec_free_context(&codecContext);
avformat_close_input(&formatContext);
RW_ERROR("Couldn't find parametrs for context");
return;
}
// Initialize the decoder.
if (avcodec_open2(codecContext, codec, nullptr) != 0) {
av_free(formatContext->pb->buffer);
avio_context_free(&formatContext->pb);
avcodec_close(codecContext);
avcodec_free_context(&codecContext);
avformat_close_input(&formatContext);
RW_ERROR("Couldn't open the audio codec context");
return;
}
#endif
// Expose audio metadata
channels = static_cast<size_t>(codecContext->channels);
sampleRate = sdt.assetInfo.sampleRate;
// OpenAL only supports mono or stereo, so error on more than 2 channels
if(channels > 2) {
RW_ERROR("Audio has more than two channels");
av_free(formatContext->pb->buffer);
avio_context_free(&formatContext->pb);
av_frame_free(&frame);
avcodec_close(codecContext);
avformat_close_input(&formatContext);
return;
}
// Start reading audio packets
AVPacket readingPacket;
av_init_packet(&readingPacket);
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(57,37,100)
while (av_read_frame(formatContext, &readingPacket) == 0) {
if (readingPacket.stream_index == audioStream->index) {
AVPacket decodingPacket = readingPacket;
while (decodingPacket.size > 0) {
// Decode audio packet
int gotFrame = 0;
int len = avcodec_decode_audio4(codecContext, frame, &gotFrame, &decodingPacket);
if (len >= 0 && gotFrame) {
// Write samples to audio buffer
for(size_t i = 0; i < static_cast<size_t>(frame->nb_samples); i++) {
// Interleave left/right channels
for(size_t channel = 0; channel < channels; channel++) {
int16_t sample = reinterpret_cast<int16_t *>(frame->data[channel])[i];
data.push_back(sample);
}
}
decodingPacket.size -= len;
decodingPacket.data += len;
}
else {
decodingPacket.size = 0;
decodingPacket.data = nullptr;
}
}
}
av_free_packet(&readingPacket);
}
#else
while (av_read_frame(formatContext, &readingPacket) == 0) {
if (readingPacket.stream_index == audioStream->index) {
AVPacket decodingPacket = readingPacket;
int sendPacket = avcodec_send_packet(codecContext, &decodingPacket);
int receiveFrame = 0;
while ((receiveFrame = avcodec_receive_frame(codecContext, frame)) == 0) {
// Decode audio packet
if (receiveFrame == 0 && sendPacket == 0) {
// Write samples to audio buffer
for(size_t i = 0; i < static_cast<size_t>(frame->nb_samples); i++) {
// Interleave left/right channels
for(size_t channel = 0; channel < channels; channel++) {
int16_t sample = reinterpret_cast<int16_t *>(frame->data[channel])[i];
data.push_back(sample);
}
}
}
}
}
av_packet_unref(&readingPacket);
}
#endif
// Cleanup
/// Free all data used by the frame.
av_frame_free(&frame);
/// Close the context and free all data associated to it, but not the context itself.
avcodec_close(codecContext);
/// Free the context itself.
avcodec_free_context(&codecContext);
/// Free our custom AVIO.
av_free(formatContext->pb->buffer);
avio_context_free(&formatContext->pb);
/// We are done here. Close the input.
avformat_close_input(&formatContext);
}
SoundManager::SoundBuffer::SoundBuffer() {
alCheck(alGenSources(1, &source));
alCheck(alGenBuffers(1, &buffer));
alCheck(alSourcef(source, AL_PITCH, 1));
alCheck(alSourcef(source, AL_GAIN, 1));
alCheck(alSource3f(source, AL_POSITION, 0, 0, 0));
alCheck(alSource3f(source, AL_VELOCITY, 0, 0, 0));
alCheck(alSourcei(source, AL_LOOPING, AL_FALSE));
}
bool SoundManager::SoundBuffer::bufferData(SoundSource& soundSource) {
alCheck(alBufferData(
buffer, soundSource.channels == 1 ? AL_FORMAT_MONO16
: AL_FORMAT_STEREO16,
&soundSource.data.front(), soundSource.data.size() * sizeof(int16_t),
soundSource.sampleRate));
alCheck(alSourcei(source, AL_BUFFER, buffer));
return true;
}
bool SoundManager::loadSound(const std::string& name,
const rwfs::path& path) {
const std::string& fileName) {
Sound* sound = nullptr;
auto sound_iter = sounds.find(name);
@ -540,71 +87,148 @@ bool SoundManager::loadSound(const std::string& name,
std::forward_as_tuple());
sound = &emplaced.first->second;
sound->source.loadFromFile(path);
sound->isLoaded = sound->buffer.bufferData(sound->source);
sound->source = std::make_shared<SoundSource>();
sound->buffer = std::make_unique<SoundBuffer>();
sound->source->loadFromFile(fileName);
sound->isLoaded = sound->buffer->bufferData(*sound->source);
}
return sound->isLoaded;
}
void SoundManager::loadSfxSound(const rwfs::path& path, size_t index) {
Sound* sound = nullptr;
auto emplaced =
sfx.emplace(std::piecewise_construct, std::forward_as_tuple(index),
std::forward_as_tuple());
sound = &emplaced.first->second;
sound->source = std::make_shared<SoundSource>();
sound->source->loadSfx(path, sdt, index);
}
size_t SoundManager::createSfxInstance(size_t index) {
Sound* sound = nullptr;
auto soundRef = sfx.find(index);
// Try to reuse first available buffer
// (aka with stopped state)
for (auto& sound : buffers) {
if (sound.second.buffer && sound.second.isStopped()) {
// Let's use this buffer
sound.second.buffer = std::make_unique<SoundBuffer>();
sound.second.source = soundRef->second.source;
sound.second.isLoaded =
sound.second.buffer->bufferData(*sound.second.source);
return sound.first;
}
}
// There's no available free buffer, so
// we should create a new one.
auto emplaced = buffers.emplace(std::piecewise_construct,
std::forward_as_tuple(bufferNr),
std::forward_as_tuple());
sound = &emplaced.first->second;
sound->id = bufferNr;
sound->buffer = std::make_unique<SoundBuffer>();
sound->source = soundRef->second.source;
sound->isLoaded = sound->buffer->bufferData(*sound->source);
bufferNr++;
return sound->id;
}
bool SoundManager::isLoaded(const std::string& name) {
if (sounds.find(name) != sounds.end()) {
return sounds[name].isLoaded;
auto sound = sounds.find(name);
if (sound != sounds.end()) {
return sound->second.isLoaded;
}
return false;
}
void SoundManager::playSound(const std::string& name) {
if (sounds.find(name) != sounds.end()) {
alCheck(alSourcePlay(sounds[name].buffer.source));
}
}
void SoundManager::pauseSound(const std::string& name) {
if (sounds.find(name) != sounds.end()) {
alCheck(alSourcePause(sounds[name].buffer.source));
}
}
bool SoundManager::isPaused(const std::string& name) {
if (sounds.find(name) != sounds.end()) {
ALint sourceState;
alCheck(alGetSourcei(sounds[name].buffer.source, AL_SOURCE_STATE,
&sourceState));
return AL_PAUSED == sourceState;
}
return false;
}
bool SoundManager::isPlaying(const std::string& name) {
if (sounds.find(name) != sounds.end()) {
ALint sourceState;
alCheck(alGetSourcei(sounds[name].buffer.source, AL_SOURCE_STATE,
&sourceState));
return AL_PLAYING == sourceState;
auto sound = sounds.find(name);
if (sound != sounds.end()) {
return sound->second.isPlaying();
}
return false;
}
bool SoundManager::isStopped(const std::string& name) {
auto sound = sounds.find(name);
if (sound != sounds.end()) {
return sound->second.isStopped();
}
return false;
}
bool SoundManager::isPaused(const std::string& name) {
auto sound = sounds.find(name);
if (sound != sounds.end()) {
return sound->second.isPaused();
}
return false;
}
void SoundManager::playSound(const std::string& name) {
auto sound = sounds.find(name);
if (sound != sounds.end()) {
return sound->second.play();
}
}
void SoundManager::playSfx(size_t name, const glm::vec3& position, bool looping,
int maxDist) {
auto buffer = buffers.find(name);
if (buffer != buffers.end()) {
buffer->second.setPosition(position);
if (looping) {
buffer->second.setLooping(looping);
}
buffer->second.setPitch(1.f);
buffer->second.setGain(1.f);
if (maxDist != -1) {
buffer->second.setMaxDistance(maxDist);
}
buffer->second.play();
}
}
void SoundManager::pauseAllSounds() {
for (auto &sound : sounds) {
if(isPlaying(sound.first)) {
pauseSound(sound.first);
for (auto& sound : sounds) {
if (sound.second.isPlaying()) {
sound.second.pause();
}
}
for (auto& sound : buffers) {
if (sound.second.isPlaying()) {
sound.second.pause();
}
}
}
void SoundManager::resumeAllSounds() {
for (auto &sound : sounds) {
if(isPaused(sound.first))
playSound(sound.first);
for (auto& sound : sounds) {
if (sound.second.isPaused()) {
sound.second.play();
}
}
for (auto& sound : buffers) {
if (sound.second.isPaused()) {
sound.second.play();
}
}
}
bool SoundManager::playBackground(const std::string& fileName) {
if (this->loadSound(fileName, fileName)) {
backgroundNoise = fileName;
this->playSound(fileName);
auto& sound = getSoundRef(fileName);
sound.play();
return true;
}
@ -612,16 +236,21 @@ bool SoundManager::playBackground(const std::string& fileName) {
}
bool SoundManager::loadMusic(const std::string& name,
const rwfs::path& path) {
return loadSound(name, path);
const std::string& fileName) {
return loadSound(name, fileName);
}
void SoundManager::playMusic(const std::string& name) {
playSound(name);
auto sound = sounds.find(name);
if (sound != sounds.end()) {
sound->second.play();
}
}
void SoundManager::stopMusic(const std::string& name) {
if (sounds.find(name) != sounds.end()) {
alCheck(alSourceStop(sounds[name].buffer.source));
auto sound = sounds.find(name);
if (sound != sounds.end()) {
sound->second.stop();
}
}
@ -634,3 +263,24 @@ void SoundManager::pause(bool p) {
}
}
}
void SoundManager::setListenerPosition(const glm::vec3& position) {
alListener3f(AL_POSITION, position.x, position.y, position.z);
}
void SoundManager::setListenerVelocity(const glm::vec3& vel) {
alListener3f(AL_VELOCITY, vel.x, vel.y, vel.z);
}
void SoundManager::setListenerOrientation(const glm::vec3& at) {
float v[6] = {0, at.y, 0, 0, 0, at.z};
alListenerfv(AL_ORIENTATION, v);
}
void SoundManager::setSoundPosition(const std::string& name,
const glm::vec3& position) {
if (sounds.find(name) != sounds.end()) {
alCheck(alSource3f(sounds[name].buffer->source, AL_POSITION, position.x,
position.y, position.z));
}
}

View File

@ -1,85 +1,111 @@
#ifndef _RWENGINE_SOUNDMANAGER_HPP_
#define _RWENGINE_SOUNDMANAGER_HPP_
#include "audio/Sound.hpp"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <map>
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include <rw/filesystem.hpp>
#include <al.h>
#include <alc.h>
#include <glm/glm.hpp>
#include <rw/filesystem.hpp>
#include <loaders/LoaderSDT.hpp>
/// Game's sound manager.
/// It handles all stuff connected with sounds.
/// Worth noted: there are three types of sounds,
/// these containg raw source and openAL buffer for playing (only one instance
/// simultaneously), these containg only source or buffer. (It allows multiple
/// instances simultaneously without duplicating raw source).
class SoundManager {
public:
SoundManager();
~SoundManager();
bool loadSound(const std::string& name, const rwfs::path& path);
bool isLoaded(const std::string& name);
void playSound(const std::string& name);
void pauseSound(const std::string& name);
/// Load sound from file and store it with selected name
bool loadSound(const std::string& name, const std::string& fileName);
bool isPaused(const std::string& name);
/// Load all sfx sounds
void loadSfxSound(const rwfs::path& path, size_t index);
Sound& getSoundRef(size_t name);
Sound& getSoundRef(const std::string& name);
size_t createSfxInstance(size_t index);
/// Checking is selected sound loaded.
bool isLoaded(const std::string& name);
/// Checking is selected sound playing.
bool isPlaying(const std::string& name);
/// Checking is selected sound playing.
bool isStopped(const std::string& name);
/// Checking is selected sound playing.
bool isPaused(const std::string& name);
/// Play sound with selected name
void playSound(const std::string& name);
/// Effect same as playSound with one parametr,
/// but this function works for sfx and
/// allows also for setting position,
/// looping and max Distance.
/// -1 means no limit of max distance.
void playSfx(size_t name, const glm::vec3& position, bool looping = false,
int maxDist = -1);
void pauseAllSounds();
void resumeAllSounds();
bool playBackground(const std::string& name);
/// Play background from selected file.
bool playBackground(const std::string& fileName);
bool loadMusic(const std::string& name, const rwfs::path& path);
bool loadMusic(const std::string& name, const std::string& fileName);
void playMusic(const std::string& name);
void stopMusic(const std::string& name);
/// Setting position of listener for openAL.
void setListenerPosition(const glm::vec3& position);
/// Setting velocity of velocity for openAL.
void setListenerVelocity(const glm::vec3& vel);
/// Setting orientation of listener for openAL.
/// Worth noted v = { at.x, at.y, at.z, up.x, up.y, up.z}
void setListenerOrientation(const glm::vec3& at);
/// Setting position of sound source in buffer.
void setSoundPosition(const std::string& name, const glm::vec3& position);
void pause(bool p);
private:
class SoundSource {
friend class SoundManager;
friend class SoundBuffer;
public:
void loadFromFile(const rwfs::path& filePath);
void loadSfx(const rwfs::path& path, const size_t& index, const bool asWave = true);
private:
std::vector<int16_t> data;
size_t channels;
size_t sampleRate;
};
class SoundBuffer {
friend class SoundManager;
public:
SoundBuffer();
bool bufferData(SoundSource& soundSource);
private:
ALuint source;
ALuint buffer;
};
struct Sound {
SoundSource source;
SoundBuffer buffer;
bool isLoaded = false;
};
bool initializeOpenAL();
bool initializeAVCodec();
ALCcontext* alContext = nullptr;
ALCdevice* alDevice = nullptr;
std::map<std::string, Sound> sounds;
/// Containers for sounds
std::unordered_map<std::string, Sound> sounds;
std::unordered_map<size_t, Sound> sfx;
std::unordered_map<size_t, Sound> buffers;
std::string backgroundNoise;
/// Nr of already created buffers
size_t bufferNr = 0;
LoaderSDT sdt{};
};
#endif

View File

@ -0,0 +1,487 @@
#include "audio/SoundSource.hpp"
#include <loaders/LoaderSDT.hpp>
#include <rw/types.hpp>
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavformat/avio.h>
#include <libavutil/avutil.h>
#include <libavutil/opt.h>
#include <libswresample/swresample.h>
}
// Rename some functions for older libavcodec/ffmpeg versions (e.g. Ubuntu
// Trusty)
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55, 28, 1)
#define av_frame_alloc avcodec_alloc_frame
#define av_frame_free avcodec_free_frame
#endif
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(57, 80, 100)
#define avio_context_free av_freep
#endif
constexpr int kNumOutputChannels = 2;
constexpr AVSampleFormat kOutputFMT = AV_SAMPLE_FMT_S16;
void SoundSource::loadFromFile(const rwfs::path& filePath) {
// Allocate audio frame
AVFrame* frame = av_frame_alloc();
if (!frame) {
RW_ERROR("Error allocating the audio frame");
return;
}
// Allocate formatting context
AVFormatContext* formatContext = nullptr;
if (avformat_open_input(&formatContext, filePath.string().c_str(), nullptr,
nullptr) != 0) {
av_frame_free(&frame);
RW_ERROR("Error opening audio file (" << filePath << ")");
return;
}
if (avformat_find_stream_info(formatContext, nullptr) < 0) {
av_frame_free(&frame);
avformat_close_input(&formatContext);
RW_ERROR("Error finding audio stream info");
return;
}
// Find the audio stream
int streamIndex = av_find_best_stream(formatContext, AVMEDIA_TYPE_AUDIO, -1,
-1, nullptr, 0);
if (streamIndex < 0) {
av_frame_free(&frame);
avformat_close_input(&formatContext);
RW_ERROR("Could not find any audio stream in the file " << filePath);
return;
}
AVStream* audioStream = formatContext->streams[streamIndex];
AVCodec* codec = avcodec_find_decoder(audioStream->codecpar->codec_id);
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(57, 5, 0)
AVCodecContext* codecContext = audioStream->codec;
codecContext->codec = codec;
// Open the codec
if (avcodec_open2(codecContext, codecContext->codec, nullptr) != 0) {
av_frame_free(&frame);
avformat_close_input(&formatContext);
RW_ERROR("Couldn't open the audio codec context");
return;
}
#else
// Initialize codec context for the decoder.
AVCodecContext* codecContext = avcodec_alloc_context3(codec);
if (!codecContext) {
av_frame_free(&frame);
avformat_close_input(&formatContext);
RW_ERROR("Couldn't allocate a decoding context.");
return;
}
// Fill the codecCtx with the parameters of the codec used in the read file.
if (avcodec_parameters_to_context(codecContext, audioStream->codecpar) !=
0) {
avcodec_close(codecContext);
avcodec_free_context(&codecContext);
avformat_close_input(&formatContext);
RW_ERROR("Couldn't find parametrs for context");
}
// Initialize the decoder.
if (avcodec_open2(codecContext, codec, nullptr) != 0) {
avcodec_close(codecContext);
avcodec_free_context(&codecContext);
avformat_close_input(&formatContext);
RW_ERROR("Couldn't open the audio codec context");
return;
}
#endif
// Expose audio metadata
channels = kNumOutputChannels;
sampleRate = static_cast<size_t>(codecContext->sample_rate);
// prepare resampler
SwrContext* swr = nullptr;
// Start reading audio packets
AVPacket readingPacket;
av_init_packet(&readingPacket);
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(57, 37, 100)
while (av_read_frame(formatContext, &readingPacket) == 0) {
if (readingPacket.stream_index == audioStream->index) {
AVPacket decodingPacket = readingPacket;
while (decodingPacket.size > 0) {
// Decode audio packet
int gotFrame = 0;
int len = avcodec_decode_audio4(codecContext, frame, &gotFrame,
&decodingPacket);
if (len >= 0 && gotFrame) {
// Write samples to audio buffer
for (size_t i = 0;
i < static_cast<size_t>(frame->nb_samples); i++) {
// Interleave left/right channels
for (size_t channel = 0; channel < channels;
channel++) {
int16_t sample = reinterpret_cast<int16_t*>(
frame->data[channel])[i];
data.push_back(sample);
}
}
decodingPacket.size -= len;
decodingPacket.data += len;
} else {
decodingPacket.size = 0;
decodingPacket.data = nullptr;
}
}
}
av_free_packet(&readingPacket);
}
#else
AVFrame* resampled = nullptr;
while (av_read_frame(formatContext, &readingPacket) == 0) {
if (readingPacket.stream_index == audioStream->index) {
int sendPacket = avcodec_send_packet(codecContext, &readingPacket);
av_packet_unref(&readingPacket);
int receiveFrame = 0;
while ((receiveFrame =
avcodec_receive_frame(codecContext, frame)) == 0) {
if (!swr) {
if (frame->channels == 1 || frame->channel_layout == 0) {
frame->channel_layout =
av_get_default_channel_layout(1);
}
swr = swr_alloc_set_opts(
nullptr,
AV_CH_LAYOUT_STEREO, // output channel layout
kOutputFMT, // output format
frame->sample_rate, // output sample rate
frame->channel_layout, // input channel layout
static_cast<AVSampleFormat>(
frame->format), // input format
frame->sample_rate, // input sample rate
0, nullptr);
if (!swr) {
RW_ERROR(
"Resampler has not been successfully allocated.");
return;
}
swr_init(swr);
if (!swr_is_initialized(swr)) {
RW_ERROR(
"Resampler has not been properly initialized.");
return;
}
}
// Decode audio packet
if (receiveFrame == 0 && sendPacket == 0) {
// Write samples to audio buffer
resampled = av_frame_alloc();
resampled->channel_layout = AV_CH_LAYOUT_STEREO;
resampled->sample_rate = frame->sample_rate;
resampled->format = kOutputFMT;
resampled->channels = kNumOutputChannels;
swr_config_frame(swr, resampled, frame);
if (swr_convert_frame(swr, resampled, frame) < 0) {
RW_ERROR("Error resampling " << filePath << '\n');
}
for (size_t i = 0;
i <
static_cast<size_t>(resampled->nb_samples) * channels;
i++) {
data.push_back(
reinterpret_cast<int16_t*>(resampled->data[0])[i]);
}
av_frame_unref(resampled);
}
}
}
}
#endif
// Cleanup
/// Free all data used by the frame.
av_frame_free(&frame);
/// Free resampler
swr_free(&swr);
/// Close the context and free all data associated to it, but not the
/// context itself.
avcodec_close(codecContext);
/// Free the context itself.
avcodec_free_context(&codecContext);
/// We are done here. Close the input.
avformat_close_input(&formatContext);
}
/// Structure for input data
struct InputData {
uint8_t* ptr = nullptr;
size_t size{}; ///< size left in the buffer
};
/// Low level function for copying data from handler (opaque)
/// to buffer.
static int read_packet(void* opaque, uint8_t* buf, int buf_size) {
auto* input = reinterpret_cast<InputData*>(opaque);
buf_size = FFMIN(buf_size, input->size);
/* copy internal data to buf */
memcpy(buf, input->ptr, buf_size);
input->ptr += buf_size;
input->size -= buf_size;
return buf_size;
}
void SoundSource::loadSfx(const rwfs::path& path, LoaderSDT& sdt, size_t index, bool asWave) {
// Allocate audio frame
AVFrame* frame = av_frame_alloc();
if (!frame) {
RW_ERROR("Error allocating the audio frame");
return;
}
sdt.load(path / "audio/sfx");
/// Now we need to prepare "custom" format context
/// We need sdt loader for that purpose
std::unique_ptr<char[]> raw_sound = sdt.loadToMemory(index, asWave);
if (!raw_sound) {
av_frame_free(&frame);
RW_ERROR("Error loading sound");
return;
}
/// Prepare input
InputData input{};
input.size = sizeof(WaveHeader) + sdt.assetInfo.size;
/// Store start ptr of data to be able freed memory later
auto inputDataStart = std::make_unique<uint8_t[]>(
input.size);
input.ptr = inputDataStart.get();
/// Alocate memory for buffer
/// Memory freeded at the end
static constexpr size_t ioBufferSize = 4096;
auto ioBuffer = static_cast<uint8_t*>(av_malloc(ioBufferSize));
/// Cast pointer, in order to match required layout for ffmpeg
input.ptr = reinterpret_cast<uint8_t*>(raw_sound.get());
/// Finally prepare our "custom" format context
AVIOContext* avioContext = avio_alloc_context(
ioBuffer, ioBufferSize, 0, &input, &read_packet, nullptr, nullptr);
AVFormatContext* formatContext = avformat_alloc_context();
formatContext->pb = avioContext;
if (avformat_open_input(&formatContext, "nothint", nullptr, nullptr) != 0) {
av_free(formatContext->pb->buffer);
avio_context_free(&formatContext->pb);
av_frame_free(&frame);
RW_ERROR("Error opening audio file (" << index << ")");
return;
}
if (avformat_find_stream_info(formatContext, nullptr) < 0) {
av_free(formatContext->pb->buffer);
avio_context_free(&formatContext->pb);
av_frame_free(&frame);
avformat_close_input(&formatContext);
RW_ERROR("Error finding audio stream info");
return;
}
// Find the audio stream
// AVCodec* codec = nullptr;
int streamIndex = av_find_best_stream(formatContext, AVMEDIA_TYPE_AUDIO, -1,
-1, nullptr, 0);
if (streamIndex < 0) {
av_free(formatContext->pb->buffer);
avio_context_free(&formatContext->pb);
av_frame_free(&frame);
avformat_close_input(&formatContext);
RW_ERROR("Could not find any audio stream in the file ");
return;
}
AVStream* audioStream = formatContext->streams[streamIndex];
AVCodec* codec = avcodec_find_decoder(audioStream->codecpar->codec_id);
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(57, 5, 0)
AVCodecContext* codecContext = audioStream->codec;
codecContext->codec = codec;
// Open the codec
if (avcodec_open2(codecContext, codecContext->codec, nullptr) != 0) {
av_free(formatContext->pb->buffer);
avio_context_free(&formatContext->pb);
av_frame_free(&frame);
avformat_close_input(&formatContext);
RW_ERROR("Couldn't open the audio codec context");
return;
}
#else
// Initialize codec context for the decoder.
AVCodecContext* codecContext = avcodec_alloc_context3(codec);
if (!codecContext) {
av_free(formatContext->pb->buffer);
avio_context_free(&formatContext->pb);
av_frame_free(&frame);
avformat_close_input(&formatContext);
RW_ERROR("Couldn't allocate a decoding context.");
return;
}
// Fill the codecCtx with the parameters of the codec used in the read file.
if (avcodec_parameters_to_context(codecContext, audioStream->codecpar) !=
0) {
av_free(formatContext->pb->buffer);
avio_context_free(&formatContext->pb);
avcodec_close(codecContext);
avcodec_free_context(&codecContext);
avformat_close_input(&formatContext);
RW_ERROR("Couldn't find parametrs for context");
return;
}
// Initialize the decoder.
if (avcodec_open2(codecContext, codec, nullptr) != 0) {
av_free(formatContext->pb->buffer);
avio_context_free(&formatContext->pb);
avcodec_close(codecContext);
avcodec_free_context(&codecContext);
avformat_close_input(&formatContext);
RW_ERROR("Couldn't open the audio codec context");
return;
}
#endif
// Expose audio metadata
channels = static_cast<size_t>(codecContext->channels);
sampleRate = sdt.assetInfo.sampleRate;
// OpenAL only supports mono or stereo, so error on more than 2 channels
if (channels > 2) {
RW_ERROR("Audio has more than two channels");
av_free(formatContext->pb->buffer);
avio_context_free(&formatContext->pb);
av_frame_free(&frame);
avcodec_close(codecContext);
avformat_close_input(&formatContext);
return;
}
// Start reading audio packets
AVPacket readingPacket;
av_init_packet(&readingPacket);
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(57, 37, 100)
while (av_read_frame(formatContext, &readingPacket) == 0) {
if (readingPacket.stream_index == audioStream->index) {
AVPacket decodingPacket = readingPacket;
while (decodingPacket.size > 0) {
// Decode audio packet
int gotFrame = 0;
int len = avcodec_decode_audio4(codecContext, frame, &gotFrame,
&decodingPacket);
if (len >= 0 && gotFrame) {
// Write samples to audio buffer
for (size_t i = 0;
i < static_cast<size_t>(frame->nb_samples); i++) {
// Interleave left/right channels
for (size_t channel = 0; channel < channels;
channel++) {
int16_t sample = reinterpret_cast<int16_t*>(
frame->data[channel])[i];
data.push_back(sample);
}
}
decodingPacket.size -= len;
decodingPacket.data += len;
} else {
decodingPacket.size = 0;
decodingPacket.data = nullptr;
}
}
}
av_free_packet(&readingPacket);
}
#else
while (av_read_frame(formatContext, &readingPacket) == 0) {
if (readingPacket.stream_index == audioStream->index) {
AVPacket decodingPacket = readingPacket;
int sendPacket = avcodec_send_packet(codecContext, &decodingPacket);
int receiveFrame = 0;
while ((receiveFrame =
avcodec_receive_frame(codecContext, frame)) == 0) {
// Decode audio packet
if (receiveFrame == 0 && sendPacket == 0) {
// Write samples to audio buffer
for (size_t i = 0;
i < static_cast<size_t>(frame->nb_samples); i++) {
// Interleave left/right channels
for (size_t channel = 0; channel < channels;
channel++) {
int16_t sample = reinterpret_cast<int16_t*>(
frame->data[channel])[i];
data.push_back(sample);
}
}
}
}
}
av_packet_unref(&readingPacket);
}
#endif
// Cleanup
/// Free all data used by the frame.
av_frame_free(&frame);
/// Close the context and free all data associated to it, but not the
/// context itself.
avcodec_close(codecContext);
/// Free the context itself.
avcodec_free_context(&codecContext);
/// Free our custom AVIO.
av_free(formatContext->pb->buffer);
avio_context_free(&formatContext->pb);
/// We are done here. Close the input.
avformat_close_input(&formatContext);
}

View File

@ -0,0 +1,29 @@
#ifndef _RWENGINE_SOUND_SOURCE_HPP_
#define _RWENGINE_SOUND_SOURCE_HPP_
#include <rw/filesystem.hpp>
#include <loaders/LoaderSDT.hpp>
/// Opaque for raw sound,
/// cooperate with ffmpeg
/// (loading and decoding sound)
class SoundSource {
friend class SoundManager;
friend class SoundBuffer;
public:
/// Load sound from mp3/wav file
void loadFromFile(const rwfs::path& filePath);
/// Load sound from sdt file
void loadSfx(const rwfs::path& path, LoaderSDT& sdt, size_t index, bool asWave = true);
private:
/// Raw data
std::vector<int16_t> data;
size_t channels;
size_t sampleRate;
};
#endif