diff --git a/packages/desktopbridge/webaudiobridge.mjs b/packages/desktopbridge/webaudiobridge.mjs index 895b15460..124b6edf8 100644 --- a/packages/desktopbridge/webaudiobridge.mjs +++ b/packages/desktopbridge/webaudiobridge.mjs @@ -6,8 +6,8 @@ export const desktopAudio = async (value, deadline, hapDuration) => { const ac = getAudioContext(); if (typeof value !== 'object') { throw new Error( - `expected hap.value to be an object, but got "${value}". Hint: append .note() or .s() to the end`, - 'error', + `expected hap.value to be an object, but got "${value}". Hint: append .note() or .s() to the end`, + 'error', ); } @@ -49,29 +49,31 @@ export const desktopAudio = async (value, deadline, hapDuration) => { loop = 0, loopBegin = 0, loopEnd = 1, - attack = 0.001, - decay = 0.005, - sustain = 1, - release = 0.001, - lpattack = 0.0001, - lpdecay = 0.2, - lpsustain = 0.6, - lprelease = 0.2, - lpenv = 0, - hpattack = 0.0001, - hpdecay = 0.2, - hpsustain = 0.6, - hprelease = 0.2, - hpenv = 0, - bpattack = 0.0001, - bpdecay = 0.2, - bpsustain = 0.6, - bprelease = 0.2, - bpenv = 0, + attack, + decay, + sustain, + release, + lpattack, + lpdecay, + lpsustain, + lprelease, + lpenv, + hpattack, + hpdecay, + hpsustain, + hprelease, + hpenv, + bpattack, + bpdecay, + bpsustain, + bprelease, + bpenv, n = 0, freq, + unit, } = value; + value.duration = hapDuration; if (bank && s) { s = `${bank}_${s}`; } @@ -79,15 +81,15 @@ export const desktopAudio = async (value, deadline, hapDuration) => { logger('[sampler] hap has note and freq. ignoring note', 'warning'); } let midi = valueToMidi({ freq, note }, 36); - value.duration = hapDuration; - let transpose; - transpose = midi - 36; + let transpose = midi - 36; + let sampleUrl; let baseUrl; - if (s === 'sine' || s === 'square' || s === 'saw' || s === 'sawtooth' || s === 'triangle') { + let path; + const waveForms = new Set(['sine', 'square', 'saw', 'sawtooth', 'triangle']); + if (waveForms.has(s)) { sampleUrl = 'none'; } else { - let path; if (getSound(s).data.baseUrl !== undefined) { baseUrl = getSound(s).data.baseUrl; if (baseUrl === './piano/') { @@ -101,26 +103,28 @@ export const desktopAudio = async (value, deadline, hapDuration) => { let map = getSound(s).data.samples; if (Array.isArray(map)) { sampleUrl = - path !== undefined ? path + map[n % map.length].replace('./', '') : map[n % map.length].replace('./', ''); + path !== undefined ? path + map[n % map.length].replace('./', '') : map[n % map.length].replace('./', ''); } else { const midiDiff = (noteA) => noteToMidi(noteA) - midi; // object format will expect keys as notes const closest = Object.keys(map) - .filter((k) => !k.startsWith('_')) - .reduce( - (closest, key, j) => (!closest || Math.abs(midiDiff(key)) < Math.abs(midiDiff(closest)) ? key : closest), - null, - ); + .filter((k) => !k.startsWith('_')) + .reduce( + (closest, key, j) => (!closest || Math.abs(midiDiff(key)) < Math.abs(midiDiff(closest)) ? key : closest), + null, + ); transpose = -midiDiff(closest); // semitones to repitch sampleUrl = - path !== undefined - ? path + map[closest][n % map[closest].length].replace('./', '') - : map[closest][n % map[closest].length].replace('./', ''); + path !== undefined + ? path + map[closest][n % map[closest].length].replace('./', '') + : map[closest][n % map[closest].length].replace('./', ''); } } + if (isNote(note)) { note = noteToMidi(note); } + const playbackRate = 1.0 * Math.pow(2, transpose / 12); if (delay !== 0) { @@ -129,22 +133,26 @@ export const desktopAudio = async (value, deadline, hapDuration) => { delaytime = Math.abs(delaytime); } - let adsr_on = attack !== 0.001 || decay !== 0.05 || sustain !== 1 || release !== 0.01 ? 1 : 0; - const packages = { loop: [loop, loopBegin, loopEnd], delay: [delay, delaytime, delayfeedback], lpf: [cutoff, resonance], hpf: [hcutoff, hresonance], bpf: [bandf, bandq], - adsr: [attack, decay, sustain, release, adsr_on], + adsr: [attack, decay, sustain, release], lpenv: [lpattack, lpdecay, lpsustain, lprelease, lpenv], hpenv: [hpattack, hpdecay, hpsustain, hprelease, hpenv], bpenv: [bpattack, bpdecay, bpsustain, bprelease, bpenv], }; - const dirname = bank + '/' + s + '/'; - + console.log('unit', unit); + let folder; + if (baseUrl !== undefined) { + folder = baseUrl.replace('./', '') + '/' + s + '/'; + } else { + folder = 'misc'; + } + const dirname = folder + '/' + s + '/'; const offset = (t - getAudioContext().currentTime) * 1000; const roundedOffset = Math.round(offset); const messagesfromjs = []; @@ -159,7 +167,7 @@ export const desktopAudio = async (value, deadline, hapDuration) => { velocity: velocity, delay: packages.delay, orbit: orbit, - speed: speed * playbackRate, + speed: speed, begin: begin, end: end, looper: packages.loop, @@ -170,6 +178,8 @@ export const desktopAudio = async (value, deadline, hapDuration) => { n: n, sampleurl: sampleUrl, dirname: dirname, + unit: unit, + playbackrate: playbackRate, }); if (messagesfromjs.length) { @@ -178,14 +188,15 @@ export const desktopAudio = async (value, deadline, hapDuration) => { }); } }; + const hap2value = (hap) => { hap.ensureObjectValue(); return { ...hap.value, velocity: hap.context.velocity }; }; export const webaudioDesktopOutputTrigger = (t, hap, ct, cps) => - desktopAudio(hap2value(hap), t - ct, hap.duration / cps, cps); + desktopAudio(hap2value(hap), t - ct, hap.duration / cps, cps); export const webaudioDesktopOutput = (hap, deadline, hapDuration) => - desktopAudio(hap2value(hap), deadline, hapDuration); + desktopAudio(hap2value(hap), deadline, hapDuration); Pattern.prototype.webaudio = function () { return this.onTrigger(webaudioDesktopOutputTrigger); diff --git a/src-tauri/src/superdough.rs b/src-tauri/src/superdough.rs index 09f11a127..bde81ca4b 100644 --- a/src-tauri/src/superdough.rs +++ b/src-tauri/src/superdough.rs @@ -1,7 +1,9 @@ -use web_audio_api::AudioBuffer; -use web_audio_api::context::{AudioContext, BaseAudioContext}; -use web_audio_api::node::{AudioBufferSourceNode, AudioNode, AudioScheduledSourceNode, BiquadFilterNode, BiquadFilterType, DelayNode, DynamicsCompressorNode, GainNode, OscillatorNode, OscillatorType}; -use web_audio_api::node::BiquadFilterType::{Bandpass, Highpass, Lowpass}; +use web_audio_api::{ + context::{AudioContext, BaseAudioContext}, + AudioBuffer, + node::{AudioBufferSourceNode, AudioNode, AudioScheduledSourceNode, BiquadFilterNode, BiquadFilterType, GainNode, OscillatorNode, OscillatorType}, + node::BiquadFilterType::{Bandpass, Highpass, Lowpass} +}; use crate::webaudiobridge::WebAudioMessage; #[derive(Clone, Copy, Debug)] @@ -38,26 +40,25 @@ pub struct BPF { #[derive(Debug, Copy, Clone)] pub struct ADSR { - pub attack: f64, - pub decay: f64, - pub sustain: f32, - pub release: f64, - pub adsr_on: u8, + pub attack: Option, + pub decay: Option, + pub sustain: Option, + pub release: Option, } #[derive(Debug, Copy, Clone)] pub struct FilterADSR { - pub attack: f64, - pub decay: f64, - pub sustain: f64, - pub release: f64, - pub env: f64, + pub attack: Option, + pub decay: Option, + pub sustain: Option, + pub release: Option, + pub env: Option, } -pub trait WebAudioPlayer { +pub trait WebAudioInstrument { fn set_adsr(&mut self, t: f64, adsr: &ADSR, velocity: f32, duration: f64); - fn play(&mut self, t: f64, message: &WebAudioMessage, duration: f64,); + fn play(&mut self, t: f64, message: &WebAudioMessage, duration: f64); fn set_filters(&mut self, context: &mut AudioContext, message: &WebAudioMessage) -> Vec; } @@ -67,7 +68,7 @@ pub struct Synth { } impl Synth { - pub fn new(context: &mut AudioContext, message: &WebAudioMessage) -> Self { + pub fn new(context: &mut AudioContext) -> Self { let oscillator = context.create_oscillator(); let envelope = context.create_gain(); Self { oscillator, envelope } @@ -88,16 +89,21 @@ impl Synth { } } -impl WebAudioPlayer for Synth { +impl WebAudioInstrument for Synth { fn set_adsr(&mut self, t: f64, adsr: &ADSR, velocity: f32, duration: f64) { + let attack = adsr.attack.unwrap_or(0.001); + let decay = adsr.decay.unwrap_or(0.05); + let sustain = adsr.sustain.unwrap_or(0.6); + let release = adsr.release.unwrap_or(0.01); self.envelope.gain() .set_value_at_time(0.0, t) - .linear_ramp_to_value_at_time(velocity, t + adsr.attack) - .linear_ramp_to_value_at_time((adsr.sustain + 0.001) * velocity, t + adsr.attack + adsr.decay) - .set_value_at_time(adsr.sustain * velocity, t + duration) - .linear_ramp_to_value_at_time(0.0, t + duration + adsr.release); + .linear_ramp_to_value_at_time(velocity, t + attack) + .exponential_ramp_to_value_at_time((sustain + 0.0001) * velocity, t + attack + decay) + // .set_value_at_time((sustain + 0.00001) * velocity, t + duration) + .exponential_ramp_to_value_at_time(0.000001, t + duration + release); } + fn play(&mut self, t: f64, message: &WebAudioMessage, release: f64) { self.oscillator.start(); self.oscillator.stop_at(t + message.duration + release); @@ -142,7 +148,7 @@ pub struct Sampler { } impl Sampler { - pub fn new(context: &mut AudioContext, message: &WebAudioMessage, audio_buffer: AudioBuffer) -> Self { + pub fn new(context: &mut AudioContext, audio_buffer: AudioBuffer) -> Self { let mut sample = context.create_buffer_source(); sample.set_buffer(audio_buffer); let envelope = context.create_gain(); @@ -150,14 +156,18 @@ impl Sampler { } } -impl WebAudioPlayer for Sampler { +impl WebAudioInstrument for Sampler { fn set_adsr(&mut self, t: f64, adsr: &ADSR, velocity: f32, duration: f64) { + let attack = adsr.attack.unwrap_or(0.001); + let decay = adsr.decay.unwrap_or(0.001); + let sustain = adsr.sustain.unwrap_or(1.0); + let release = adsr.release.unwrap_or(0.01); self.envelope.gain() .set_value_at_time(0.0, t) - .linear_ramp_to_value_at_time(velocity, t + adsr.attack) - .linear_ramp_to_value_at_time((adsr.sustain + 0.00001) * velocity, t + adsr.attack + adsr.decay) - .set_value_at_time(adsr.sustain * velocity, t + duration) - .linear_ramp_to_value_at_time(0.0, t + duration + adsr.release); + .linear_ramp_to_value_at_time(velocity, t + attack) + .linear_ramp_to_value_at_time((sustain + 0.00001) * velocity, t + attack + decay) + .set_value_at_time((sustain + 0.00001) * velocity, t + duration) + .linear_ramp_to_value_at_time(0.0, t + duration + release); } fn play(&mut self, t: f64, message: &WebAudioMessage, release: f64) { @@ -165,7 +175,7 @@ impl WebAudioPlayer for Sampler { let (start_at, stop_at) = if message.speed < 0.0 { (buffer_duration, t + message.duration + 0.2) } else { - (message.begin * buffer_duration, t + message.duration + message.adsr.release) + (message.begin * buffer_duration, t + message.duration + message.adsr.release.unwrap_or(0.01)) }; if message.looper.is_loop > 0 { self.sample.set_loop(true); @@ -175,7 +185,7 @@ impl WebAudioPlayer for Sampler { t, self.sample.loop_start(), ); - self.sample.stop_at(t + message.duration + message.adsr.release); + self.sample.stop_at(t + message.duration + message.adsr.release.unwrap_or(0.01)); } else { self.sample.start_at_with_offset( t, @@ -232,16 +242,16 @@ pub fn apply_filter_adsr(filter_node: &BiquadFilterNode, message: &WebAudioMessa _ => 8000.0, }; - let offset = env.env * 0.5; + let offset = env.env.unwrap_or(1.0) * 0.5; let min = (2f32.powf(-offset as f32) * freq).clamp(0.0, 20000.0); - let max = (2f32.powf((env.env - offset) as f32) * freq).clamp(0.0, 20000.0); + let max = (2f32.powf((env.env.unwrap_or(1.0) - offset) as f32) * freq).clamp(0.0, 20000.0); let range = max - min; let peak = min + range; - let sustain_level = min + env.sustain as f32 * range; + let sustain_level = min + env.sustain.unwrap_or(1.0) as f32 * range; - filter_node.frequency().set_value_at_time(min, now); - filter_node.frequency().linear_ramp_to_value_at_time(peak, now + env.attack); - filter_node.frequency().linear_ramp_to_value_at_time(sustain_level, now + env.attack + env.decay); - filter_node.frequency().set_value_at_time(sustain_level, now + message.duration); - filter_node.frequency().linear_ramp_to_value_at_time(min, now + message.duration + env.release.max(0.1)); + filter_node.frequency().set_value_at_time(min, now) + .linear_ramp_to_value_at_time(peak, now + env.attack.unwrap_or(0.01)) + .linear_ramp_to_value_at_time(sustain_level, now + env.attack.unwrap_or(0.01) + env.decay.unwrap_or(0.01)) + // .set_value_at_time(sustain_level, now + message.duration) + .linear_ramp_to_value_at_time(min, now + message.duration + env.release.unwrap_or(0.01)); } diff --git a/src-tauri/src/webaudiobridge.rs b/src-tauri/src/webaudiobridge.rs index 340625df4..bcabeb2a8 100644 --- a/src-tauri/src/webaudiobridge.rs +++ b/src-tauri/src/webaudiobridge.rs @@ -1,22 +1,25 @@ use std::{ sync::Arc, time::Duration, + fs::File, + path::Path, + time::Instant, }; -use std::fs::File; -use std::path::Path; -use std::sync::mpsc::{RecvError, SendError}; -use std::time::{Instant, SystemTime}; use mini_moka::sync::Cache; use reqwest::Url; - -use tokio::{fs, sync::{mpsc, Mutex}}; +use tokio::{ + fs, + sync::{mpsc, Mutex}, + io::AsyncWriteExt, +}; use serde::Deserialize; -use tokio::io::AsyncWriteExt; -use web_audio_api::{AudioBuffer, context::{AudioContext, AudioContextLatencyCategory, AudioContextOptions}}; -use web_audio_api::context::BaseAudioContext; -use web_audio_api::node::{AudioBufferSourceNode, AudioNode, AudioScheduledSourceNode, BiquadFilterNode, DelayNode, DynamicsCompressorNode, GainNode, OscillatorNode, OscillatorType}; -use web_audio_api::node::BiquadFilterType::{Bandpass, Highpass, Lowpass}; -use crate::superdough::{ADSR, apply_filter_adsr, BPF, Delay, FilterADSR, HPF, Loop, LPF, Sampler, Synth, WebAudioPlayer}; +use web_audio_api::{ + context::BaseAudioContext, + AudioBuffer, + context::{AudioContext, AudioContextLatencyCategory, AudioContextOptions}, + node::AudioNode, +}; +use crate::superdough::{ADSR, apply_filter_adsr, BPF, Delay, FilterADSR, HPF, Loop, LPF, Sampler, Synth, WebAudioInstrument}; #[derive(Debug, Clone)] @@ -43,6 +46,8 @@ pub struct WebAudioMessage { pub n: usize, pub sampleurl: String, pub dirname: String, + pub unit: Option, + pub playbackrate: f32, } pub struct AsyncInputTransmitWebAudio { @@ -111,7 +116,7 @@ pub fn init( latency_hint, ..AudioContextOptions::default() }); - let mut temp_src = audio_context.create_buffer_source(); + let _ = audio_context.create_buffer_source(); let compressor = audio_context.create_dynamics_compressor(); compressor.threshold().set_value(-50.0); compressor.connect(&audio_context.destination()); @@ -140,7 +145,7 @@ pub fn init( feedback.gain().set_value(message.delay.feedback); pre_gain.gain().set_value_at_time(message.delay.wet, t + message.delay.delay_time as f64); - let mut synth = Synth::new(&mut audio_context, &message); + let mut synth = Synth::new(&mut audio_context); synth.set_frequency(&message.note); synth.set_waveform(&message.waveform); let filters = synth.set_filters(&mut audio_context, &message); @@ -148,11 +153,11 @@ pub fn init( synth.envelope.connect(&input); } synth.envelope.connect(&compressor); - synth.play(t, &message, message.adsr.release); + synth.play(t, &message, message.adsr.release.unwrap_or(0.001)); synth.set_adsr(t, &message.adsr, message.velocity, message.duration); - if message.lpenv.env > 0.0 || message.hpenv.env > 0.0 || message.bpenv.env > 0.0 { - for f in filters { - apply_filter_adsr(&f, &message, &f.type_(), t); + if is_filter_envelope_on(&message.lpenv) || is_filter_envelope_on(&message.hpenv) || is_filter_envelope_on(&message.bpenv) { + for f in &filters { + apply_filter_adsr(f, &message, &f.type_(), t); } } } @@ -162,7 +167,7 @@ pub fn init( .and_then(Iterator::last) .and_then(|name| if name.is_empty() { None } else { Some(name) }) .unwrap_or("tmp.ben"); - let file_path = format!("/Users/vasiliymilovidov/samples/{}{}", message.dirname, filename); + let file_path = format!("samples/{}{}", message.dirname, filename); let file_path_clone = file_path.clone(); tokio::spawn(async move { @@ -186,13 +191,20 @@ pub fn init( feedback.gain().set_value(message.delay.feedback); pre_gain.gain().set_value_at_time(message.delay.wet, t + message.delay.delay_time as f64); let audio_buffer_duration = audio_buffer.duration(); - let mut sampler = Sampler::new(&mut audio_context, &message, audio_buffer); - sampler.sample.playback_rate().set_value(message.speed); + let mut sampler = Sampler::new(&mut audio_context, audio_buffer); + match message.unit { + Some(_) => { + sampler.sample.playback_rate().set_value(message.speed * audio_buffer_duration as f32 * 1.0); + } + _ => { + sampler.sample.playback_rate().set_value(message.speed * message.playbackrate); + } + } sampler.set_adsr(t, &message.adsr, message.velocity, message.duration); let filters = sampler.set_filters(&mut audio_context, &message); - if message.lpenv.env > 0.0 || message.hpenv.env > 0.0 || message.bpenv.env > 0.0 { - for f in filters { - apply_filter_adsr(&f, &message, &f.type_(), t); + if is_filter_envelope_on(&message.lpenv) || is_filter_envelope_on(&message.hpenv) || is_filter_envelope_on(&message.bpenv) { + for f in &filters { + apply_filter_adsr(f, &message, &f.type_(), t); } } if message.delay.wet > 0.0 { @@ -241,13 +253,15 @@ pub struct MessageFromJS { begin: f64, end: f64, looper: (u8, f64, f64), - adsr: (f64, f64, f32, f64, u8), - lpenv: (f64, f64, f64, f64, f64), - hpenv: (f64, f64, f64, f64, f64), - bpenv: (f64, f64, f64, f64, f64), + adsr: (Option, Option, Option, Option), + lpenv: (Option, Option, Option, Option, Option), + hpenv: (Option, Option, Option, Option, Option), + bpenv: (Option, Option, Option, Option, Option), n: usize, sampleurl: String, dirname: String, + unit: Option, + playbackrate: f32, } // Called from JS @@ -298,7 +312,6 @@ pub async fn sendwebaudio( decay: m.adsr.1, sustain: m.adsr.2, release: m.adsr.3, - adsr_on: m.adsr.4, }, lpenv: FilterADSR { attack: m.lpenv.0, @@ -324,6 +337,8 @@ pub async fn sendwebaudio( n: m.n, sampleurl: m.sampleurl, dirname: m.dirname, + unit: m.unit, + playbackrate: m.playbackrate, }; messages_to_process.push(message_to_process); } @@ -338,3 +353,7 @@ async fn create_file_and_dirs(path: &Path) -> fs::File { let file = fs::File::create(path).await.unwrap(); file } + +fn is_filter_envelope_on(adsr: &FilterADSR) -> bool { + adsr.env.is_some() || adsr.attack.is_some() || adsr.decay.is_some() || adsr.sustain.is_some() || adsr.release.is_some() +} \ No newline at end of file