Skip to content
Merged
Show file tree
Hide file tree
Changes from 9 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -131,16 +131,14 @@ const AudioVisualizer: React.FC = () => {
}
/>
<View
style={{ flex: 0.5, justifyContent: 'center', alignItems: 'center' }}
>
style={{ flex: 0.5, justifyContent: 'center', alignItems: 'center' }}>
{isLoading && <ActivityIndicator color="#FFFFFF" />}
<View
style={{
justifyContent: 'center',
flexDirection: 'row',
marginTop: layout.spacing * 2,
}}
>
}}>
<Button
onPress={handlePlayPause}
title={isPlaying ? 'Pause' : 'Play'}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,8 @@ function App() {
const audioBufferQueue = audioContextRef.current.createBufferQueueSource();
const buffer1 = ...; // Load your audio buffer here
const buffer2 = ...; // Load another audio buffer if needed
audioBufferQueue.enqueueBuffer(buffer1, false);
audioBufferQueue.enqueueBuffer(buffer2, true); // Last buffer should be marked as is
audioBufferQueue.enqueueBuffer(buffer1);
audioBufferQueue.enqueueBuffer(buffer2);
audioBufferQueue.connect(audioContextRef.current.destination);
audioBufferQueue.start(audioContextRef.current.currentTime);
}
Expand Down
35 changes: 34 additions & 1 deletion packages/audiodocs/docs/sources/audio-buffer-source-node.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@ import { useGainAdsrPlayground } from '@site/src/components/InteractivePlaygroun

# AudioBufferSourceNode

The `AudioBufferSourceNode` is an [`AudioBufferBaseSourceNode`](/docs/sources/audio-buffer-base-source-node) which represents audio source with in-memory audio data, stored in
The `AudioBufferSourceNode` is an [`AudioBufferBaseSourceNode`](/docs/sources/audio-buffer-base-source-node) which represents audio source with in-memory audio data, stored in
[`AudioBuffer`](/docs/sources/audio-buffer). You can use it for audio playback, including standard pause and resume functionalities.

Expand Down Expand Up @@ -40,6 +39,12 @@ interface AudioBufferBaseSourceNodeOptions {
}
```

:::caution
The pitch correction algorithm introduces processing latency.
As a result, when scheduling precise playback times, you should start input samples slightly ahead of the intended playback time.
For more details, see [getLatency()](/docs/sources/audio-buffer-source-node#getlatency).
:::

## Example

```tsx
Expand Down Expand Up @@ -100,6 +105,34 @@ Schedules the `AudioBufferSourceNode` to start playback of audio data contained

#### Returns `undefined`.


### `getLatency`

Returns the playback latency introduced by the pitch correction algorithm, in seconds.
When scheduling precise playback times, start input samples this many seconds earlier to compensate for processing delay.
Typically around `0.06s` when pitch correction is enabled, and `0` otherwise.

If you plan to play multiple buffers one after another, consider using [`AudioBufferQueueSourceNode`](/docs/sources/audio-buffer-queue-source-node)


#### Returns `number`.

<details>
<summary>Example usage</summary>
```tsx
const source = audioContext.createBufferSource({ pitchCorrection: true });
source.buffer = buffer;
source.connect(audioContext.destination);

const latency = source.getLatency();

// Schedule playback slightly earlier to compensate for latency
const startTime = audioContext.currentTime + 1.0; // play in 1 second
source.start(startTime - latency);
```
</details>


## Events

### `onLoopEnded`
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,9 @@ AudioBufferSourceNodeHostObject::AudioBufferSourceNodeHostObject(

addFunctions(
JSI_EXPORT_FUNCTION(AudioBufferSourceNodeHostObject, start),
JSI_EXPORT_FUNCTION(AudioBufferSourceNodeHostObject, setBuffer));
JSI_EXPORT_FUNCTION(AudioBufferSourceNodeHostObject, setBuffer),
JSI_EXPORT_FUNCTION(AudioBufferSourceNodeHostObject, getInputLatency),
JSI_EXPORT_FUNCTION(AudioBufferSourceNodeHostObject, getOutputLatency));
}

AudioBufferSourceNodeHostObject::~AudioBufferSourceNodeHostObject() {
Expand Down Expand Up @@ -149,4 +151,18 @@ JSI_HOST_FUNCTION_IMPL(AudioBufferSourceNodeHostObject, setBuffer) {
return jsi::Value::undefined();
}

JSI_HOST_FUNCTION_IMPL(AudioBufferSourceNodeHostObject, getInputLatency) {
auto audioBufferSourceNode =
std::static_pointer_cast<AudioBufferSourceNode>(node_);

return audioBufferSourceNode->getInputLatency();
}

JSI_HOST_FUNCTION_IMPL(AudioBufferSourceNodeHostObject, getOutputLatency) {
auto audioBufferSourceNode =
std::static_pointer_cast<AudioBufferSourceNode>(node_);

return audioBufferSourceNode->getOutputLatency();
}

} // namespace audioapi
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,8 @@ class AudioBufferSourceNodeHostObject

JSI_HOST_FUNCTION_DECL(start);
JSI_HOST_FUNCTION_DECL(setBuffer);
JSI_HOST_FUNCTION_DECL(getInputLatency);
JSI_HOST_FUNCTION_DECL(getOutputLatency);
};

} // namespace audioapi
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,12 @@ AudioBufferQueueSourceNode::AudioBufferQueueSourceNode(
buffers_ = {};
stretch_->presetDefault(channelCount_, context_->getSampleRate());

if (pitchCorrection) {
// If pitch correction is enabled, add extra frames at the end
// to compensate for processing latency.
addExtraTailFrames_ = true;
}

isInitialized_ = true;
}

Expand Down Expand Up @@ -161,7 +167,19 @@ void AudioBufferQueueSourceNode::processWithoutInterpolation(
context_->audioEventHandlerRegistry_->invokeHandlerWithEventBody(
"ended", onEndedCallbackId_, body);

if (buffers_.empty()) {
if (buffers_.empty() && addExtraTailFrames_) {
int extraTailFrames = static_cast<int>(
stretch_->inputLatency() + stretch_->outputLatency());
auto tailBuffer = std::make_shared<AudioBuffer>(
channelCount_, extraTailFrames, context_->getSampleRate());

tailBuffer->bus_->zero();

buffers_.emplace(bufferId_, tailBuffer);
bufferId_++;

addExtraTailFrames_ = false;
} else if (buffers_.empty()) {
processingBus->zero(writeIndex, framesLeft);
readIndex = 0;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ class AudioBufferQueueSourceNode : public AudioBufferBaseSourceNode {
size_t bufferId_ = 0;

bool isPaused_ = false;
bool addExtraTailFrames_ = false;

double playedBuffersDuration_ = 0;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,22 @@ std::shared_ptr<AudioBuffer> AudioBufferSourceNode::getBuffer() const {
return buffer_;
}

double AudioBufferSourceNode::getInputLatency() const {
if (pitchCorrection_) {
return static_cast<double>(stretch_->inputLatency()) /
context_->getSampleRate();
}
return 0;
}

double AudioBufferSourceNode::getOutputLatency() const {
if (pitchCorrection_) {
return static_cast<double>(stretch_->outputLatency()) /
context_->getSampleRate();
}
return 0;
}

void AudioBufferSourceNode::setLoop(bool loop) {
loop_ = loop;
}
Expand Down Expand Up @@ -82,17 +98,30 @@ void AudioBufferSourceNode::setBuffer(
}

buffer_ = buffer;
alignedBus_ = std::make_shared<AudioBus>(*buffer_->bus_);
channelCount_ = buffer_->getNumberOfChannels();

stretch_->presetDefault(channelCount_, buffer_->getSampleRate());

if (pitchCorrection_) {
int extraTailFrames = static_cast<int>(
(getInputLatency() + getOutputLatency()) * context_->getSampleRate());
size_t totalSize = buffer_->getLength() + extraTailFrames;

alignedBus_ = std::make_shared<AudioBus>(
totalSize, channelCount_, buffer_->getSampleRate());
alignedBus_->copy(buffer_->bus_.get(), 0, 0, buffer_->getLength());

alignedBus_->zero(buffer_->getLength(), extraTailFrames);
} else {
alignedBus_ = std::make_shared<AudioBus>(*buffer_->bus_);
}

audioBus_ = std::make_shared<AudioBus>(
RENDER_QUANTUM_SIZE, channelCount_, context_->getSampleRate());
playbackRateBus_ = std::make_shared<AudioBus>(
RENDER_QUANTUM_SIZE * 3, channelCount_, context_->getSampleRate());

loopEnd_ = buffer_->getDuration();

stretch_->presetDefault(channelCount_, buffer_->getSampleRate());
}

void AudioBufferSourceNode::start(double when, double offset, double duration) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,9 @@
#include <audioapi/core/sources/AudioBufferBaseSourceNode.h>
#include <audioapi/libs/signalsmith-stretch/signalsmith-stretch.h>

#include <memory>
#include <cstddef>
#include <algorithm>
#include <cstddef>
#include <memory>
#include <string>

namespace audioapi {
Expand All @@ -24,6 +24,8 @@ class AudioBufferSourceNode : public AudioBufferBaseSourceNode {
[[nodiscard]] double getLoopStart() const;
[[nodiscard]] double getLoopEnd() const;
[[nodiscard]] std::shared_ptr<AudioBuffer> getBuffer() const;
[[nodiscard]] double getInputLatency() const;
[[nodiscard]] double getOutputLatency() const;

void setLoop(bool loop);
void setLoopSkip(bool loopSkip);
Expand All @@ -38,7 +40,7 @@ class AudioBufferSourceNode : public AudioBufferBaseSourceNode {
void setOnLoopEndedCallbackId(uint64_t callbackId);

protected:
std::shared_ptr<AudioBus> processNode(const std::shared_ptr<AudioBus>& processingBus, int framesToProcess) override;
std::shared_ptr<AudioBus> processNode(const std::shared_ptr<AudioBus> &processingBus, int framesToProcess) override;
double getCurrentPosition() const override;

private:
Expand All @@ -56,13 +58,13 @@ class AudioBufferSourceNode : public AudioBufferBaseSourceNode {
void sendOnLoopEndedEvent();

void processWithoutInterpolation(
const std::shared_ptr<AudioBus>& processingBus,
const std::shared_ptr<AudioBus> &processingBus,
size_t startOffset,
size_t offsetLength,
float playbackRate) override;

void processWithInterpolation(
const std::shared_ptr<AudioBus>& processingBus,
const std::shared_ptr<AudioBus> &processingBus,
size_t startOffset,
size_t offsetLength,
float playbackRate) override;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -118,4 +118,12 @@ export default class AudioBufferSourceNode extends AudioBufferBaseSourceNode {
(this.node as IAudioBufferSourceNode).onLoopEnded =
this.onLoopEndedSubscription.subscriptionId;
}

public getLatency(): number {
return (
(this.node as IAudioBufferSourceNode).getOutputLatency() +
(this.node as IAudioBufferSourceNode).getInputLatency() *
(this.node as IAudioBufferSourceNode).playbackRate.value
);
}
}
2 changes: 2 additions & 0 deletions packages/react-native-audio-api/src/interfaces.ts
Original file line number Diff line number Diff line change
Expand Up @@ -175,6 +175,8 @@ export interface IAudioBufferSourceNode extends IAudioBufferBaseSourceNode {

start: (when?: number, offset?: number, duration?: number) => void;
setBuffer: (audioBuffer: IAudioBuffer | null) => void;
getInputLatency: () => number;
getOutputLatency: () => number;

// passing subscriptionId(uint_64 in cpp, string in js) to the cpp
onLoopEnded: string;
Expand Down