diff --git a/Cargo.toml b/Cargo.toml index def60bb..6c43929 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,9 +12,6 @@ elided-lifetimes-in-paths = "allow" [workspace.lints.clippy] all = "deny" -[profile.release] -lto = "fat" - [workspace.dependencies] anyhow = "1.0.75" thiserror = "1.0.48" diff --git a/Justfile b/Justfile index 6d52c73..106e9af 100644 --- a/Justfile +++ b/Justfile @@ -59,6 +59,9 @@ log FILE='./out/aud.log': udpdump: sudo tcpdump -i lo0 udp port 8080 -v # -X +profile ARGS: + CARGO_PROFILE_RELEASE_DEBUG=true cargo flamegraph --root --bin aud -- {{ARGS}} + # run-once setup your development environment for this project setup: (_setup_packages) cargo install cargo-deny cargo-watch cargo-nextest bat diff --git a/aud/cli/src/auscope/mod.rs b/aud/cli/src/auscope/mod.rs index 5ea0a3a..aa98365 100644 --- a/aud/cli/src/auscope/mod.rs +++ b/aud/cli/src/auscope/mod.rs @@ -8,14 +8,15 @@ use ratatui::prelude::*; struct TerminalApp { app: App, ui: ui::Ui, + fps: f32, } impl TerminalApp { - fn new(audio_provider: Box) -> Self { + fn new(audio_provider: Box, fps: f32) -> Self { let app = App::new(audio_provider); let mut ui = ui::Ui::default(); ui.update_device_names(app.devices()); - Self { app, ui } + Self { app, ui, fps } } fn try_connect_to_audio_input(&mut self, index: usize) -> anyhow::Result<()> { @@ -53,7 +54,7 @@ impl crate::app::Base for TerminalApp { } fn render(&mut self, f: &mut Frame) { - self.ui.render(f, &mut self.app); + self.ui.render(f, &mut self.app, self.fps); } } @@ -119,7 +120,7 @@ pub fn run( Box::::default() }; - let mut app = TerminalApp::new(audio_provider); + let mut app = TerminalApp::new(audio_provider, opts.fps); let scripts = opts .script diff --git a/aud/cli/src/auscope/ui.rs b/aud/cli/src/auscope/ui.rs index f5c6e6b..7a3795f 100644 --- a/aud/cli/src/auscope/ui.rs +++ b/aud/cli/src/auscope/ui.rs @@ -106,7 +106,7 @@ impl Ui { UiEvent::Continue } - pub fn render(&mut self, f: &mut Frame, app: &mut App) { + pub fn render(&mut self, f: &mut Frame, app: &mut App, fps: f32) { let sections = Layout::default() .direction(Direction::Horizontal) .margin(1) @@ -165,9 +165,21 @@ impl Ui { None => "".to_owned(), }; + const DOWNSAMPLE: usize = 128; + const SAMPLE_RATE: usize = 48000; + let audio = app.audio_mut(); - let num_samples_rendered = - widgets::scope::render(f, sections[1], &selected_device_name, audio); - let _ = audio.data.drain(0..num_samples_rendered); + widgets::scope::render(f, sections[1], &selected_device_name, audio, 128); + + let num_renderable_samples = f.size().width as usize * DOWNSAMPLE; + let num_samples_to_purge = + ((SAMPLE_RATE as f32 / fps) * audio.num_channels as f32) as usize; + + if audio.data.len() > num_renderable_samples { + let num_samples_to_purge = + num_samples_to_purge.max(audio.data.len() - num_renderable_samples); + + let _ = audio.data.drain(0..num_samples_to_purge); + } } } diff --git a/aud/cli/src/ui/widgets/scope.rs b/aud/cli/src/ui/widgets/scope.rs index 8dcf7a0..671b77a 100644 --- a/aud/cli/src/ui/widgets/scope.rs +++ b/aud/cli/src/ui/widgets/scope.rs @@ -1,7 +1,6 @@ -use aud::audio::AudioBuffer; +use aud::{audio::AudioBuffer, dsp}; use ratatui::{prelude::*, widgets::*}; -const DOWNSAMPLE: usize = 8; const COLORS: [Color; 8] = [ Color::Cyan, Color::Yellow, @@ -13,24 +12,33 @@ const COLORS: [Color; 8] = [ Color::LightRed, ]; -type ChannelData = Vec<(f64, f64)>; +type SamplePoint = (f64, f64); +type SamplePoints = Vec; -fn prepare_audio_data(audio: &AudioBuffer) -> Vec { - audio - .data - .chunks(audio.num_channels.max(1) as usize) - .map(|channel: &[f32]| -> ChannelData { - channel - .iter() - .step_by(DOWNSAMPLE) - .enumerate() - .map(|(i, &sample)| (i as f64, sample as f64)) - .collect() - }) - .collect() +fn prepare_audio_data( + audio: &AudioBuffer, + downsample: usize, + num_samples_to_render: usize, +) -> Vec { + let num_channels = audio.num_channels.min(1) as usize; + let audio = dsp::deinterleave(&audio.data, num_channels); + let mut channels = Vec::::with_capacity(num_channels); + for chan in audio { + let data = chan + .iter() + .take(num_samples_to_render * downsample) + .rev() + .step_by(num_channels) + .step_by(downsample) + .enumerate() + .map(|(i, &sample)| (i as f64, sample as f64)) + .collect(); + channels.push(data); + } + channels } -fn create_datasets(data: &[ChannelData]) -> Vec { +fn create_datasets(data: &[SamplePoints]) -> Vec { data.iter() .enumerate() .map(|(i, points)| { @@ -43,8 +51,11 @@ fn create_datasets(data: &[ChannelData]) -> Vec { .collect() } -pub fn render(f: &mut Frame, area: Rect, title: &str, audio: &AudioBuffer) -> usize { - let data = prepare_audio_data(audio); +pub fn render(f: &mut Frame, area: Rect, title: &str, audio: &AudioBuffer, downsample: usize) { + let width = f.size().width as usize; + let num_samples_to_render = (audio.num_frames() / downsample).min(width); + let data = prepare_audio_data(audio, downsample, num_samples_to_render); + let datasets = create_datasets(&data); let chart = Chart::new(datasets) @@ -67,5 +78,4 @@ pub fn render(f: &mut Frame, area: Rect, title: &str, audio: &AudioBuffer) -> us ); f.render_widget(chart, area); - data.iter().map(Vec::len).sum() } diff --git a/aud/lib/src/apps/auscope/app.rs b/aud/lib/src/apps/auscope/app.rs index 438dc6a..fad2b71 100644 --- a/aud/lib/src/apps/auscope/app.rs +++ b/aud/lib/src/apps/auscope/app.rs @@ -64,7 +64,6 @@ impl App { let mut audio = self.receiver.retrieve_audio_buffer(); if !audio.data.is_empty() { - log::info!("num received {}", self.num_received); self.num_received += 1; } diff --git a/aud/lib/src/audio/interface.rs b/aud/lib/src/audio/interface.rs index c0c48bc..4a50a62 100644 --- a/aud/lib/src/audio/interface.rs +++ b/aud/lib/src/audio/interface.rs @@ -47,12 +47,21 @@ pub trait AudioConsuming { /// The API favors interleaved data since it is typically /// what lower-level APIs use, and it is easier (and more compact) /// for transferring or processing the audio data. -#[derive(Serialize, Deserialize, Default, Debug, Clone, PartialEq)] +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] pub struct AudioBuffer { pub data: Vec, pub num_channels: u32, } +impl Default for AudioBuffer { + fn default() -> Self { + Self { + data: vec![], + num_channels: 1, + } + } +} + impl AudioBuffer { /// Creates a new `AudioBuffer` with a preallocated interleaved buffer. /// @@ -144,7 +153,7 @@ impl AudioBuffer { /// Number of "frames" in this interleaved buffer. This is effectively /// the same as "number of samples per channel" for this buffer. pub fn num_frames(&self) -> usize { - self.data.len() / self.num_channels as usize + self.data.len() / self.num_channels.min(1) as usize } }