Converting RhythmGame to float and other tidy ups
diff --git a/samples/MegaDrone/src/main/cpp/Synth.h b/samples/MegaDrone/src/main/cpp/Synth.h
index 1836fc0..c8c5548 100644
--- a/samples/MegaDrone/src/main/cpp/Synth.h
+++ b/samples/MegaDrone/src/main/cpp/Synth.h
@@ -20,7 +20,7 @@
#include <array>
#include "shared/Oscillator.h"
-#include "shared/MixerMono.h"
+#include "shared/Mixer.h"
#include "shared/MonoToStereo.h"
constexpr int kNumOscillators = 100;
@@ -65,7 +65,7 @@
// Rendering objects
std::array<Oscillator, kNumOscillators> mOscs;
- MixerMono mMixer;
+ Mixer mMixer;
std::shared_ptr<IRenderableAudio> mOutputStage;
};
diff --git a/samples/RhythmGame/README.md b/samples/RhythmGame/README.md
index 82e1a11..ceb81f8 100644
--- a/samples/RhythmGame/README.md
+++ b/samples/RhythmGame/README.md
@@ -39,9 +39,9 @@
### Audio rendering
-The `RenderableAudio` interface (abstract class) represents objects which can produce frames of audio data. The `SoundRecording` and `Mixer` objects both implement this interface.
+The `IRenderableAudio` interface (abstract class) represents objects which can produce frames of audio data. The `Player` and `Mixer` objects both implement this interface.
-Both the clap sound and backing tracks are represented by `SoundRecording` objects which are then mixed together using a `Mixer`.
+Both the clap sound and backing tracks are represented by `Player` objects which are then mixed together using a `Mixer`.
![Audio rendering](images/4-audio-rendering.png "Audio rendering")
diff --git a/samples/RhythmGame/src/main/cpp/Game.cpp b/samples/RhythmGame/src/main/cpp/Game.cpp
index ed82372..1adef91 100644
--- a/samples/RhythmGame/src/main/cpp/Game.cpp
+++ b/samples/RhythmGame/src/main/cpp/Game.cpp
@@ -51,6 +51,7 @@
// simultaneously using a single audio stream.
mMixer.addTrack(mClap);
mMixer.addTrack(mBackingTrack);
+ mMixer.setChannelCount(kChannelCount);
// Add the audio frame numbers on which the clap sound should be played to the clap event queue.
// The backing track tempo is 120 beats per minute, which is 2 beats per second. At a sample
@@ -68,7 +69,6 @@
// Create a builder
AudioStreamBuilder builder;
- builder.setFormat(AudioFormat::I16);
builder.setChannelCount(kChannelCount);
builder.setSampleRate(kSampleRateHz);
builder.setCallback(this);
@@ -78,6 +78,12 @@
Result result = builder.openStream(&mAudioStream);
if (result != Result::OK){
LOGE("Failed to open stream. Error: %s", convertToText(result));
+ return;
+ }
+
+ if (mAudioStream->getFormat() == AudioFormat::I16){
+ mConversionBuffer = std::make_unique<float[]>(mAudioStream->getBufferCapacityInFrames() *
+ kChannelCount);
}
// Reduce stream latency by setting the buffer size to a multiple of the burst size
@@ -138,6 +144,11 @@
DataCallbackResult Game::onAudioReady(AudioStream *oboeStream, void *audioData, int32_t numFrames) {
+ // If we're outputting in 16-bit we need to render into a separate buffer then convert that
+ // buffer to int16s
+ bool is16Bit = (oboeStream->getFormat() == AudioFormat::I16);
+ float *outputBuffer = (is16Bit) ? mConversionBuffer.get() : static_cast<float *>(audioData);
+
int64_t nextClapEvent;
for (int i = 0; i < numFrames; ++i) {
@@ -146,10 +157,16 @@
mClap->setPlaying(true);
mClapEvents.pop(nextClapEvent);
}
- mMixer.renderAudio(static_cast<int16_t*>(audioData)+(kChannelCount*i), 1);
+ mMixer.renderAudio(outputBuffer+(kChannelCount*i), 1);
mCurrentFrame++;
}
+ if (is16Bit){
+ oboe::convertFloatToPcm16(outputBuffer,
+ static_cast<int16_t*>(audioData),
+ numFrames * kChannelCount);
+ }
+
mLastUpdateTime = nowUptimeMillis();
return DataCallbackResult::Continue;
diff --git a/samples/RhythmGame/src/main/cpp/Game.h b/samples/RhythmGame/src/main/cpp/Game.h
index 3a17658..07d396a 100644
--- a/samples/RhythmGame/src/main/cpp/Game.h
+++ b/samples/RhythmGame/src/main/cpp/Game.h
@@ -52,7 +52,8 @@
AudioStream *mAudioStream{nullptr};
std::shared_ptr<Player> mClap;
std::shared_ptr<Player> mBackingTrack;
- Mixer<int16_t> mMixer;
+ Mixer mMixer;
+ std::unique_ptr<float[]> mConversionBuffer { nullptr }; // For float->int16 conversion
LockFreeQueue<int64_t, kMaxQueueItems> mClapEvents;
std::atomic<int64_t> mCurrentFrame { 0 };
diff --git a/samples/RhythmGame/src/main/cpp/audio/AAssetDataSource.cpp b/samples/RhythmGame/src/main/cpp/audio/AAssetDataSource.cpp
index 0d6dbc4..b9b6bbe 100644
--- a/samples/RhythmGame/src/main/cpp/audio/AAssetDataSource.cpp
+++ b/samples/RhythmGame/src/main/cpp/audio/AAssetDataSource.cpp
@@ -16,6 +16,7 @@
#include <utils/logging.h>
+#include <oboe/Oboe.h>
#include "AAssetDataSource.h"
@@ -33,17 +34,23 @@
// Get the length of the track (we assume it is stereo 48kHz)
off_t trackSizeInBytes = AAsset_getLength(asset);
+ auto numSamples = static_cast<int32_t>(trackSizeInBytes / sizeof(int16_t));
+ auto numFrames = static_cast<int32_t>(numSamples / channelCount);
- // Load it into memory
- auto *audioBuffer = static_cast<const int16_t*>(AAsset_getBuffer(asset));
+ // Load it into memory (we assume it is 16 bit signed integers)
+ auto *sourceBuffer = static_cast<const int16_t*>(AAsset_getBuffer(asset));
- if (audioBuffer == nullptr){
+ if (sourceBuffer == nullptr){
LOGE("Could not get buffer for track");
return nullptr;
}
- auto numFrames = static_cast<int32_t>(trackSizeInBytes / (sizeof(int16_t) * channelCount));
- LOGD("Opened audio data source, bytes: %ld frames: %d", trackSizeInBytes, numFrames);
+ auto outputBuffer = std::make_unique<float[]>(numSamples);
+ oboe::convertPcm16ToFloat(sourceBuffer, outputBuffer.get(), numSamples);
- return new AAssetDataSource(asset, audioBuffer, numFrames, channelCount);
+ LOGD("Opened audio data source %s, bytes: %ld samples: %d frames: %d", filename, trackSizeInBytes, numSamples, numFrames);
+
+ AAsset_close(asset);
+
+ return new AAssetDataSource(std::move(outputBuffer), numFrames, channelCount);
}
diff --git a/samples/RhythmGame/src/main/cpp/audio/AAssetDataSource.h b/samples/RhythmGame/src/main/cpp/audio/AAssetDataSource.h
index dd64650..c9ee3d1 100644
--- a/samples/RhythmGame/src/main/cpp/audio/AAssetDataSource.h
+++ b/samples/RhythmGame/src/main/cpp/audio/AAssetDataSource.h
@@ -23,31 +23,22 @@
class AAssetDataSource : public DataSource {
public:
-
- ~AAssetDataSource(){
-
- // Note that this will also delete the data at mBuffer
- AAsset_close(mAsset);
- }
-
- int32_t getTotalFrames() const override { return mTotalFrames; } ;
- int32_t getChannelCount() const override { return mChannelCount; } ;
- const int16_t* getData() const override { return mBuffer; };
+ int32_t getTotalFrames() const override { return mTotalFrames; }
+ int32_t getChannelCount() const override { return mChannelCount; }
+ const float* getData() const override { return mBuffer.get(); }
static AAssetDataSource* newFromAssetManager(AAssetManager&, const char *, const int32_t);
private:
- AAssetDataSource(AAsset *asset, const int16_t *data, int32_t frames,
- int32_t channelCount)
- : mAsset(asset)
- , mBuffer(data)
+ AAssetDataSource(std::unique_ptr<float[]> data, int32_t frames,
+ const int32_t channelCount)
+ : mBuffer(std::move(data))
, mTotalFrames(frames)
, mChannelCount(channelCount) {
- };
+ }
- AAsset *mAsset = nullptr;
- const int16_t* mBuffer;
+ const std::unique_ptr<float[]> mBuffer;
const int32_t mTotalFrames;
const int32_t mChannelCount;
diff --git a/samples/RhythmGame/src/main/cpp/audio/DataSource.h b/samples/RhythmGame/src/main/cpp/audio/DataSource.h
index 6694399..377e045 100644
--- a/samples/RhythmGame/src/main/cpp/audio/DataSource.h
+++ b/samples/RhythmGame/src/main/cpp/audio/DataSource.h
@@ -24,7 +24,7 @@
virtual ~DataSource(){};
virtual int32_t getTotalFrames() const = 0;
virtual int32_t getChannelCount() const = 0;
- virtual const int16_t* getData() const = 0;
+ virtual const float* getData() const = 0;
};
diff --git a/samples/RhythmGame/src/main/cpp/audio/Player.cpp b/samples/RhythmGame/src/main/cpp/audio/Player.cpp
index 2ecd616..13646fb 100644
--- a/samples/RhythmGame/src/main/cpp/audio/Player.cpp
+++ b/samples/RhythmGame/src/main/cpp/audio/Player.cpp
@@ -17,7 +17,7 @@
#include "Player.h"
#include "utils/logging.h"
-void Player::renderAudio(int16_t *targetData, int32_t numFrames){
+void Player::renderAudio(float *targetData, int32_t numFrames){
const int32_t channelCount = mSource->getChannelCount();
@@ -25,7 +25,7 @@
int32_t framesToRenderFromData = numFrames;
int32_t totalSourceFrames = mSource->getTotalFrames();
- const int16_t *data = mSource->getData();
+ const float *data = mSource->getData();
// Check whether we're about to reach the end of the recording
if (!mIsLooping && mReadFrameIndex + numFrames >= totalSourceFrames){
@@ -52,7 +52,7 @@
}
}
-void Player::renderSilence(int16_t *start, int32_t numSamples){
+void Player::renderSilence(float *start, int32_t numSamples){
for (int i = 0; i < numSamples; ++i) {
start[i] = 0;
}
diff --git a/samples/RhythmGame/src/main/cpp/audio/Player.h b/samples/RhythmGame/src/main/cpp/audio/Player.h
index 1899ed0..9c7eeba 100644
--- a/samples/RhythmGame/src/main/cpp/audio/Player.h
+++ b/samples/RhythmGame/src/main/cpp/audio/Player.h
@@ -26,10 +26,10 @@
#include <android/asset_manager.h>
-#include "shared/RenderableAudio.h"
+#include "shared/IRenderableAudio.h"
#include "DataSource.h"
-class Player : public RenderableAudio<int16_t>{
+class Player : public IRenderableAudio{
public:
/**
@@ -43,7 +43,7 @@
: mSource(source)
{};
- void renderAudio(int16_t *targetData, int32_t numFrames);
+ void renderAudio(float *targetData, int32_t numFrames);
void resetPlayHead() { mReadFrameIndex = 0; };
void setPlaying(bool isPlaying) { mIsPlaying = isPlaying; resetPlayHead(); };
void setLooping(bool isLooping) { mIsLooping = isLooping; };
@@ -54,7 +54,7 @@
std::atomic<bool> mIsLooping { false };
std::shared_ptr<DataSource> mSource;
- void renderSilence(int16_t*, int32_t);
+ void renderSilence(float*, int32_t);
};
#endif //RHYTHMGAME_SOUNDRECORDING_H
diff --git a/samples/hello-oboe/src/main/cpp/PlayAudioEngine.cpp b/samples/hello-oboe/src/main/cpp/PlayAudioEngine.cpp
index c682c79..65c3adf 100644
--- a/samples/hello-oboe/src/main/cpp/PlayAudioEngine.cpp
+++ b/samples/hello-oboe/src/main/cpp/PlayAudioEngine.cpp
@@ -25,15 +25,13 @@
#include "SoundGenerator.h"
constexpr int64_t kNanosPerMillisecond = 1000000; // Use int64_t to avoid overflows in calculations
-constexpr int32_t kDefaultChannelCount = 2; // Stereo
+
PlayAudioEngine::PlayAudioEngine() {
// Initialize the trace functions, this enables you to output trace statements without
// blocking. See https://developer.android.com/studio/profile/systrace-commandline.html
Trace::initialize();
-
- mChannelCount = kDefaultChannelCount;
createPlaybackStream();
}
diff --git a/samples/hello-oboe/src/main/cpp/PlayAudioEngine.h b/samples/hello-oboe/src/main/cpp/PlayAudioEngine.h
index 9e7922a..08c8ba0 100644
--- a/samples/hello-oboe/src/main/cpp/PlayAudioEngine.h
+++ b/samples/hello-oboe/src/main/cpp/PlayAudioEngine.h
@@ -21,7 +21,7 @@
#include <array>
#include <oboe/Oboe.h>
-#include "shared/MixerMono.h"
+#include "shared/Mixer.h"
#include "SineGenerator.h"
#include "SoundGenerator.h"
@@ -59,7 +59,7 @@
oboe::AudioApi mAudioApi = oboe::AudioApi::Unspecified;
int32_t mPlaybackDeviceId = oboe::kUnspecified;
int32_t mSampleRate;
- int32_t mChannelCount;
+ int32_t mChannelCount = 2; // Stereo
int32_t mFramesPerBurst;
double mCurrentOutputLatencyMillis = 0;
int32_t mBufferSizeSelection = kBufferSizeAutomatic;
diff --git a/samples/shared/MixerMono.h b/samples/shared/Mixer.h
similarity index 69%
rename from samples/shared/MixerMono.h
rename to samples/shared/Mixer.h
index 425095d..52ccf37 100644
--- a/samples/shared/MixerMono.h
+++ b/samples/shared/Mixer.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef SHARED_MIXER_MONO_H
-#define SHARED_MIXER_MONO_H
+#ifndef SHARED_MIXER_H
+#define SHARED_MIXER_H
#include <array>
#include "IRenderableAudio.h"
@@ -24,20 +24,22 @@
constexpr uint8_t kMaxTracks = 100;
/**
- * A Mixer object which sums the output from multiple mono tracks into a single mono output
+ * A Mixer object which sums the output from multiple tracks into a single output. The number of
+ * input channels on each track must match the number of output channels (default 1=mono). This can
+ * be changed by calling `setChannelCount`.
*/
-class MixerMono : public IRenderableAudio {
+class Mixer : public IRenderableAudio {
public:
void renderAudio(float *audioData, int32_t numFrames) {
// Zero out the incoming container array
- memset(audioData, 0, sizeof(float) * numFrames);
+ memset(audioData, 0, sizeof(float) * numFrames * mChannelCount);
for (int i = 0; i < mNextFreeTrackIndex; ++i) {
mTracks[i]->renderAudio(mixingBuffer, numFrames);
- for (int j = 0; j < numFrames; ++j) {
+ for (int j = 0; j < numFrames * mChannelCount; ++j) {
audioData[j] += mixingBuffer[j];
}
}
@@ -47,11 +49,14 @@
mTracks[mNextFreeTrackIndex++] = renderer;
}
+ void setChannelCount(int32_t channelCount){ mChannelCount = channelCount; }
+
private:
float mixingBuffer[kBufferSize];
std::array<std::shared_ptr<IRenderableAudio>, kMaxTracks> mTracks;
uint8_t mNextFreeTrackIndex = 0;
+ int32_t mChannelCount = 1; // Default to mono
};
-#endif //SHARED_MIXER_MONO_H
+#endif //SHARED_MIXER_H