Web: update docs, make tonalityHz controllable, not config
This commit is contained in:
parent
94a41f9436
commit
7c13c1dbbd
@ -32,7 +32,7 @@ namespace _impl {
|
||||
|
||||
template<typename Sample=float, class RandomEngine=void>
|
||||
struct SignalsmithStretch {
|
||||
static constexpr size_t version[3] = {1, 3, 0};
|
||||
static constexpr size_t version[3] = {1, 3, 1};
|
||||
|
||||
SignalsmithStretch() : randomEngine(std::random_device{}()) {}
|
||||
SignalsmithStretch(long seed) : randomEngine(seed) {}
|
||||
|
||||
@ -55,6 +55,10 @@
|
||||
#controls input[type=range], #controls input[type=checkbox] {
|
||||
grid-column: 2;
|
||||
font: inherit;
|
||||
flex-grow: 0.25;
|
||||
}
|
||||
#controls input[type=range] {
|
||||
flex-grow: 1;
|
||||
}
|
||||
#controls input[type=number] {
|
||||
grid-column: 3;
|
||||
@ -110,7 +114,7 @@
|
||||
<input type="range" min="2000" max="20000" step="100" data-key="tonalityHz" class="diagram-red">
|
||||
<input type="number" min="2000" max="20000" step="1" data-key="tonalityHz" class="diagram-red">
|
||||
<label>formant</label>
|
||||
<div>
|
||||
<div style="display: flex">
|
||||
<input type="checkbox" data-key="formantCompensation" class="diagram-yellow">
|
||||
<input type="range" min="50" max="500" step="1" data-key="formantBaseHz" class="diagram-brown">
|
||||
</div>
|
||||
@ -137,6 +141,7 @@
|
||||
active: false,
|
||||
rate: 1,
|
||||
semitones: 0,
|
||||
tonalityHz: 8000,
|
||||
formantSemitones: 0,
|
||||
formantCompensation: false,
|
||||
formantBaseHz: 200,
|
||||
@ -145,7 +150,6 @@
|
||||
};
|
||||
let controlValues = Object.assign({}, controlValuesInitial);
|
||||
let configValuesInitial = {
|
||||
tonalityHz: 8000,
|
||||
blockMs: 120,
|
||||
overlap: 4
|
||||
};
|
||||
|
||||
@ -6,7 +6,7 @@ This is an official release of the Signalsmith Stretch library for Web Audio, us
|
||||
|
||||
Call `SignalsmithStretch(audioContext, ?channelOptions)` from the main thread. This returns a Promise which resolves to an `AudioNode`, with extra methods attached to it. The optional [`channelOptions` object](https://developer.mozilla.org/en-US/docs/Web/API/AudioWorkletNode/AudioWorkletNode#options) can specify the number of inputs/outputs and channels.
|
||||
|
||||
It can operate either on live/streaming input (if you configure it to have an input), or on a sample buffer you load into it.
|
||||
It can operate either on live input (if connected to input audio), or on sample buffers you load into it (which can be added/removed dynamically, for streaming). Either way, you need to call `.start()` (or equivalently `.schedule({active: true})`) for it to start processing audio.
|
||||
|
||||
### `stretch.inputTime`
|
||||
|
||||
@ -21,7 +21,12 @@ This adds a scheduled change, removing any scheduled changes occuring after this
|
||||
* `input` (seconds): position in input buffer
|
||||
* `rate` (number): playback rate, e.g. 0.5 == half speed
|
||||
* `semitones` (number): pitch shift
|
||||
* `loopStart` / `loopEnd`: sets a section of the input to auto-loop. Disabled if both are set to the same value.
|
||||
* `tonalityHz` (number): tonality limit (default 8000)
|
||||
* `formantSemitones` (number) / `formantCompensation` (bool): formant shift/compensation
|
||||
* `formantBaseHz` (number): rough fundamental used for formant analysis (e.g. 100 for low voice, 400 for high voice), or `0` to attempt pitch-tracking
|
||||
* `loopStart` (seconds) / `loopEnd` (seconds): sets a section of the input buffer to auto-loop. Disabled if both are set to the same value.
|
||||
|
||||
If the node is processing live input (not a buffer) then `input`/`rate`/`loopStart`/`loopEnd` are ignored.
|
||||
|
||||
### `stretch.start(?when)` / `stretch.stop(?when)`
|
||||
|
||||
@ -31,28 +36,30 @@ Starts/stops playback or processing, immediately or at some future time. These
|
||||
|
||||
### `stretch.addBuffers([...])`
|
||||
|
||||
This adds buffers to the end of the current input. Buffers should be typed arrays of equal length, one per channel.
|
||||
This adds buffers to the end of the current input sample buffers. Buffers should be typed arrays of equal length, one per channel.
|
||||
|
||||
It can be called multiple times, and the new buffers are inserted after the existing ones, which lets you start playback before the entire audio is loaded. It returns (as a Promise) the new end time for the stored input, in seconds.
|
||||
It can be called multiple times, and the new buffers are inserted immediately after the existing ones, which lets you start playback before the entire audio is loaded. It returns a Promise for the new sample buffer end time, in seconds.
|
||||
|
||||
### `stretch.dropBuffers()`
|
||||
|
||||
This drops all input buffers, and resets the input buffer start time to 0.
|
||||
This drops all input buffers, and resets the input buffer end time to 0.
|
||||
|
||||
### `stretch.dropBuffers(toSeconds)`
|
||||
|
||||
This drops all input buffers before the given time. It returns (as a Promise) the an object with the current input buffer extent: `{start: ..., end: ...}`.
|
||||
This drops all input buffers before the given time, but doesn't change the end time. It returns a Promise for an object with the current input buffer extent: `{start: ..., end: ...}`.
|
||||
|
||||
This can be useful when processing streams or very long audio files, letting the Stretch node release old buffers once that section of the input will no longer be played back.
|
||||
|
||||
### `stretch.latency()`
|
||||
|
||||
Returns the latency when used in "live" mode. This is also how far ahead you might want to schedule things (`output` in `.schedule()`) to give the node enough time to fully compensate for its own latency.
|
||||
Returns the latency when used in "live input" mode, in seconds. This is also how far ahead you might want to schedule things (`output` in `.schedule()`) to give the node enough time to fully compensate for its own latency.
|
||||
|
||||
### `stretch.configure({...})`
|
||||
|
||||
Optionally reconfigure, with the following fields:
|
||||
|
||||
* `blockMs`: block length in ms (default 120ms)
|
||||
* `intervalMs`: interval (default is 30ms)
|
||||
* `splitComputation`: spread computation more evenly across time (default `false`, but worth trying if you're getting dropouts)
|
||||
* `blockMs`: block length in ms (e.g. 120ms)
|
||||
* `intervalMs`: interval (default `blockMs/4`)
|
||||
* `splitComputation`: spread computation more evenly across time (default `false`)
|
||||
|
||||
If you set `blockMs` to `0` or `null`, it will check for a `preset` field (with the values `"default"`/`"cheaper"`).
|
||||
|
||||
@ -40,6 +40,7 @@ function registerWorkletProcessor(Module, audioNodeKey) {
|
||||
output: 0,
|
||||
rate: 1,
|
||||
semitones: 0,
|
||||
tonalityHz: 8000,
|
||||
formantSemitones: 0,
|
||||
formantCompensation: false,
|
||||
formantBaseHz: 0, /* 0 = attempt to detect */
|
||||
@ -130,7 +131,7 @@ function registerWorkletProcessor(Module, audioNodeKey) {
|
||||
},
|
||||
dropBuffers: toSeconds => {
|
||||
if (typeof toSeconds !== 'number') {
|
||||
let buffers = this.audioBuffers.flat(1).map(b => b.buffer);;
|
||||
let buffers = this.audioBuffers.flat(1).map(b => b.buffer);
|
||||
this.audioBuffers = [];
|
||||
this.audioBuffersStart = this.audioBuffersEnd = 0;
|
||||
return {
|
||||
@ -200,7 +201,7 @@ function registerWorkletProcessor(Module, audioNodeKey) {
|
||||
}
|
||||
|
||||
config = {
|
||||
tonalityHz: 8000
|
||||
preset: 'default'
|
||||
};
|
||||
configure() {
|
||||
if (this.config.blockMs) {
|
||||
@ -252,7 +253,7 @@ function registerWorkletProcessor(Module, audioNodeKey) {
|
||||
let currentMapSegment = this.timeMap[0];
|
||||
|
||||
let wasmModule = this.wasmModule;
|
||||
wasmModule._setTransposeSemitones(currentMapSegment.semitones, this.config.tonalityHz/sampleRate);
|
||||
wasmModule._setTransposeSemitones(currentMapSegment.semitones, currentMapSegment.tonalityHz/sampleRate);
|
||||
wasmModule._setFormantSemitones(currentMapSegment.formantSemitones, currentMapSegment.formantCompensation);
|
||||
wasmModule._setFormantBase(currentMapSegment.formantBaseHz/sampleRate);
|
||||
|
||||
|
||||
@ -41,6 +41,7 @@ function registerWorkletProcessor(Module, audioNodeKey) {
|
||||
output: 0,
|
||||
rate: 1,
|
||||
semitones: 0,
|
||||
tonalityHz: 8000,
|
||||
formantSemitones: 0,
|
||||
formantCompensation: false,
|
||||
formantBaseHz: 0, /* 0 = attempt to detect */
|
||||
@ -131,7 +132,7 @@ function registerWorkletProcessor(Module, audioNodeKey) {
|
||||
},
|
||||
dropBuffers: toSeconds => {
|
||||
if (typeof toSeconds !== 'number') {
|
||||
let buffers = this.audioBuffers.flat(1).map(b => b.buffer);;
|
||||
let buffers = this.audioBuffers.flat(1).map(b => b.buffer);
|
||||
this.audioBuffers = [];
|
||||
this.audioBuffersStart = this.audioBuffersEnd = 0;
|
||||
return {
|
||||
@ -201,7 +202,7 @@ function registerWorkletProcessor(Module, audioNodeKey) {
|
||||
}
|
||||
|
||||
config = {
|
||||
tonalityHz: 8000
|
||||
preset: 'default'
|
||||
};
|
||||
configure() {
|
||||
if (this.config.blockMs) {
|
||||
@ -253,7 +254,7 @@ function registerWorkletProcessor(Module, audioNodeKey) {
|
||||
let currentMapSegment = this.timeMap[0];
|
||||
|
||||
let wasmModule = this.wasmModule;
|
||||
wasmModule._setTransposeSemitones(currentMapSegment.semitones, this.config.tonalityHz/sampleRate);
|
||||
wasmModule._setTransposeSemitones(currentMapSegment.semitones, currentMapSegment.tonalityHz/sampleRate);
|
||||
wasmModule._setFormantSemitones(currentMapSegment.formantSemitones, currentMapSegment.formantCompensation);
|
||||
wasmModule._setFormantBase(currentMapSegment.formantBaseHz/sampleRate);
|
||||
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "signalsmith-stretch",
|
||||
"version": "1.3.0",
|
||||
"version": "1.3.1",
|
||||
"description": "JS/WASM release of the Signalsmith Stretch library",
|
||||
"main": "SignalsmithStretch.mjs",
|
||||
"exports": {
|
||||
|
||||
@ -21,6 +21,7 @@ function registerWorkletProcessor(Module, audioNodeKey) {
|
||||
output: 0,
|
||||
rate: 1,
|
||||
semitones: 0,
|
||||
tonalityHz: 8000,
|
||||
formantSemitones: 0,
|
||||
formantCompensation: false,
|
||||
formantBaseHz: 0, /* 0 = attempt to detect */
|
||||
@ -111,7 +112,7 @@ function registerWorkletProcessor(Module, audioNodeKey) {
|
||||
},
|
||||
dropBuffers: toSeconds => {
|
||||
if (typeof toSeconds !== 'number') {
|
||||
let buffers = this.audioBuffers.flat(1).map(b => b.buffer);;
|
||||
let buffers = this.audioBuffers.flat(1).map(b => b.buffer);
|
||||
this.audioBuffers = [];
|
||||
this.audioBuffersStart = this.audioBuffersEnd = 0;
|
||||
return {
|
||||
@ -181,7 +182,7 @@ function registerWorkletProcessor(Module, audioNodeKey) {
|
||||
}
|
||||
|
||||
config = {
|
||||
tonalityHz: 8000
|
||||
preset: 'default'
|
||||
};
|
||||
configure() {
|
||||
if (this.config.blockMs) {
|
||||
@ -233,7 +234,7 @@ function registerWorkletProcessor(Module, audioNodeKey) {
|
||||
let currentMapSegment = this.timeMap[0];
|
||||
|
||||
let wasmModule = this.wasmModule;
|
||||
wasmModule._setTransposeSemitones(currentMapSegment.semitones, this.config.tonalityHz/sampleRate);
|
||||
wasmModule._setTransposeSemitones(currentMapSegment.semitones, currentMapSegment.tonalityHz/sampleRate);
|
||||
wasmModule._setFormantSemitones(currentMapSegment.formantSemitones, currentMapSegment.formantCompensation);
|
||||
wasmModule._setFormantBase(currentMapSegment.formantBaseHz/sampleRate);
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user