diff --git a/README.md b/README.md index 63d3066..11fe704 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ 3. If you are using **linux**, make sure `gcc`, `g++`, `cmake`, `libssl-dev`, `pkg-config`, `libfontconfig1-dev` are installed 4. Compile and build the binaries with `cargo build --release --bins` 5. Install the binaries if you want to use it anywhere you want. `cargo install --path .` -6. Use `vv` and `vvplay` in other directory. Now you are good to go! +6. Use `vv`, `vvplay` and `vvplay_async` in other directory. Now you are good to go! 7. Download the [8i_dataset](https://plenodb.jpeg.org/pc/8ilabs/) to use and test our tool! ## Commands @@ -468,6 +468,72 @@ Options: -h, --help Print help ``` + +### `vvplay_async` + +Plays a folder of ply files in lexicographical order, leveraging prefetching and playback caching for optimized performance. A window will appear upon running the binary from which you can navigate using your mouse and keyboard. Controls are described further below. + +```shell +Usage: vvplay_async [OPTIONS] + +Arguments: + src can be: + +Options: + -f, --fps + [default: 30] + -x, --camera-x + [default: 0] + -y, --camera-y + [default: 0] + -z, --camera-z + [default: 1.5] + --pitch + [default: 0] + --yaw + [default: -90] + -W, --width + Set the screen width [default: 1600] + -H, --height + Set the screen height [default: 900] + --controls + + -b, --buffer-capacity + buffer capacity in seconds + -m, --metrics + + --abr + [default: quetra] [possible values: quetra, quetra-multiview, mckp] + --decoder + [default: noop] [possible values: noop, draco, tmc2rs] + --multiview + Set this flag if each view is encoded separately, i.e. multiview + --decoder-path + Path to the decoder binary (only for Draco) + --tp + [default: last] [possible values: last, avg, ema, gaema, lpema] + --throughput-alpha + Alpha for throughput prediction. Only used for EMA, GAEMA, and LPEMA [default: 0.1] + --vp + [default: last] [possible values: last] + --network-trace + Path to network trace for repeatable simulation. + Network trace is expected to be given in Kbps + --camera-trace + Path to camera trace for repeatable simulation. + Camera trace is expected to be given in + (pos_x, pos_y, pos_z, rot_pitch, rot_yaw, rot_roll). + Rotation is in degrees + --record-camera-trace + Path to record camera trace from the player + --enable-fetcher-optimizations + Enable fetcher optimizations + --bg-color + [default: rgb(255,255,255)] + -h, --help + Print help (see more with '--help') +``` + ### Controls With the main screen focused, diff --git a/src/bin/vvplay_async.rs b/src/bin/vvplay_async.rs index 11a9297..248fbc2 100644 --- a/src/bin/vvplay_async.rs +++ b/src/bin/vvplay_async.rs @@ -109,7 +109,8 @@ fn main() { let (total_frames_tx, total_frames_rx) = tokio::sync::oneshot::channel(); // initialize variables based on args - let buffer_capacity = args.buffer_capacity.unwrap_or(11); + // the real buffer capacity is buffer capacity in seconds * fps + let buffer_capacity = args.buffer_capacity.unwrap_or(11) * args.fps as u64; let simulated_network_trace = args.network_trace.map(|path| NetworkTrace::new(&path)); let simulated_camera_trace = args.camera_trace.map(|path| CameraTrace::new(&path, false)); let record_camera_trace = args @@ -326,7 +327,11 @@ fn main() { }, }; decoder.start().unwrap(); + //t: the unbound receiver for PointCloud is here, trace this down + // Everytime a PointCloud is ready, an unbounded channel is created + // t: For case of Noop, only one PointCloud will be produced each time, not sure about other decoder let (output_sx, output_rx) = tokio::sync::mpsc::unbounded_channel(); + // Send BufMsg to inform the buffer that the PointCloud is ready _ = to_buf_sx .send(BufMsg::PointCloud(( PCMetadata { diff --git a/src/dash/buffer.rs b/src/dash/buffer.rs index ae5cbe6..e887390 100644 --- a/src/dash/buffer.rs +++ b/src/dash/buffer.rs @@ -177,4 +177,19 @@ impl Buffer { pub fn iter_mut(&mut self) -> impl Iterator { self.frames.iter_mut() } + + pub fn clear(&mut self) { + self.frames.clear(); + } + + pub fn is_frame_in_buffer(&self, req: FrameRequest) -> bool { + // This implementation assumes that the frame index stored in the buffer form contiguous sequence. + // If the first frame offset is 2, last frame offset is 5, then frame 3, 4 will also exist in current buffer. + if req.frame_offset >= self.front().unwrap().req.frame_offset + && req.frame_offset <= self.back().unwrap().req.frame_offset + { + return true; + } + return false; + } } diff --git a/src/render/wgpu/reader.rs b/src/render/wgpu/reader.rs index 083b232..4d834d6 100644 --- a/src/render/wgpu/reader.rs +++ b/src/render/wgpu/reader.rs @@ -4,16 +4,16 @@ use crate::pcd::read_pcd_file; use crate::BufMsg; use crate::utils::read_file_to_point_cloud; +use std::collections::VecDeque; use std::fmt::Debug; use std::path::{Path, PathBuf}; use std::sync::mpsc::Receiver; use tokio::sync::mpsc::UnboundedSender; -use super::camera::CameraPosition; -use super::camera::CameraState; +use super::camera::{CameraPosition, CameraState}; use super::renderable::Renderable; -//RenderReader for the original RenderReader +// RenderReader for the original RenderReader used by vvplay pub trait RenderReader { fn start(&mut self) -> Option; fn get_at(&mut self, index: usize) -> Option; @@ -22,7 +22,7 @@ pub trait RenderReader { fn set_len(&mut self, len: usize); fn set_camera_state(&mut self, camera_state: Option); } -//RenderReaderCameraPos for the one with CameraPosition +// RenderReaderCameraPos for the one with CameraPosition pub trait RenderReaderCameraPos { /// Initialize the input reader for our renderer. Returns the first frame, if any. fn start(&mut self) -> (Option, Option); @@ -35,6 +35,7 @@ pub trait RenderReaderCameraPos { fn len(&self) -> usize; fn is_empty(&self) -> bool; fn set_len(&mut self, len: usize); + fn set_cache_size(&mut self, size: usize); fn set_camera_state(&mut self, camera_state: Option); } @@ -119,7 +120,7 @@ impl RenderReader> for PointCloudFileReader { impl RenderReaderCameraPos> for PointCloudFileReader { fn start(&mut self) -> (Option, Option>) { - RenderReaderCameraPos::get_at(self, 0, None) + RenderReaderCameraPos::get_at(self, 0, Some(CameraPosition::default())) } fn get_at( @@ -128,7 +129,10 @@ impl RenderReaderCameraPos> for PointCloudFileReader { _camera_pos: Option, ) -> (Option, Option>) { let file_path = self.files.get(index).unwrap(); - (None, read_file_to_point_cloud(file_path)) + ( + Some(CameraPosition::default()), + read_file_to_point_cloud(file_path), + ) } fn len(&self) -> usize { @@ -141,6 +145,8 @@ impl RenderReaderCameraPos> for PointCloudFileReader { fn set_len(&mut self, _len: usize) {} + fn set_cache_size(&mut self, _size: usize) {} + fn set_camera_state(&mut self, _camera_state: Option) {} } @@ -205,7 +211,9 @@ impl RenderReader> for PcdMemoryReader { pub struct PcdAsyncReader { total_frames: u64, rx: Receiver<(FrameRequest, PointCloud)>, - cache: Vec<(u64, PointCloud)>, + //playback cache + cache: VecDeque<(u64, PointCloud)>, + cache_size: usize, tx: UnboundedSender, } @@ -240,7 +248,8 @@ impl PcdAsyncReader { tx, // buffer_size, // cache: HashMap::with_capacity(buffer_size as usize), - cache: vec![], + cache: VecDeque::new(), + cache_size: 10, //default number of size, Use `set_cache_size` to overwrite this value total_frames: 30, // default number of frames. Use `set_len` to overwrite this value } } @@ -249,7 +258,7 @@ impl PcdAsyncReader { #[cfg(feature = "dash")] impl RenderReaderCameraPos> for PcdAsyncReader { fn start(&mut self) -> (Option, Option>) { - RenderReaderCameraPos::get_at(self, 0, None) + RenderReaderCameraPos::get_at(self, 0, Some(CameraPosition::default())) } fn get_at( @@ -257,15 +266,12 @@ impl RenderReaderCameraPos> for PcdAsyncReader { index: usize, camera_pos: Option, ) -> (Option, Option>) { - /* println!("----------------------------------"); - println!{"get at request index: {}", index}; - */ + println! {"get at request index: {}", index}; let index = index as u64; if let Some(&ref result) = self.cache.iter().find(|&i| i.0 == index) { - //t: - //it: f the result is already inside the cache, just return - //can improve this find algorithm + //if the result is already inside the cache, just return + //can improve this O(size) find algorithm return (camera_pos, Some(result.1.clone())); } _ = self.tx.send(BufMsg::FrameRequest(FrameRequest { @@ -274,14 +280,14 @@ impl RenderReaderCameraPos> for PcdAsyncReader { camera_pos, })); if let Ok((frame_req, pc)) = self.rx.recv() { - if self.cache.len() >= 10 { - self.cache.pop(); + if self.cache.len() >= self.cache_size { + self.cache.pop_front(); } - println!( - "one frame is added to the point cloud cache: index:{}", - index - ); - self.cache.push((index, pc.clone())); + //println!( + // "one frame is added to the point cloud cache: index:{}", + // index + //); + self.cache.push_back((index, pc.clone())); (frame_req.camera_pos, Some(pc)) } else { (None, None) @@ -300,38 +306,46 @@ impl RenderReaderCameraPos> for PcdAsyncReader { self.total_frames = len as u64; } + fn set_cache_size(&mut self, size: usize) { + self.cache_size = size; + } + fn set_camera_state(&mut self, _camera_state: Option) {} } +// This is used by vvplay_async impl RenderReader> for PcdAsyncReader { fn start(&mut self) -> Option> { RenderReader::get_at(self, 0) } fn get_at(&mut self, index: usize) -> Option> { - /* println!("----------------------------------"); - println!{"get at request index: {}", index}; - */ + println! {"get at request index: {}", index}; let index = index as u64; // Everytime a request is made, find it from the playback cache first if let Some(&ref result) = self.cache.iter().find(|&i| i.0 == index) { //can improve this O(n) find algorithm in future + println!("----------------------------------"); + println! {"{} is found in the cache", index}; return Some(result.1.clone()); } // Send request to prepare for the frame _ = self.tx.send(BufMsg::FrameRequest(FrameRequest { object_id: 0, frame_offset: index % self.total_frames, - camera_pos: None, + camera_pos: Some(CameraPosition::default()), })); // Wait for the point cloud to be ready, cache it then return if let Ok((_frame_req, pc)) = self.rx.recv() { if self.cache.len() >= 10 { - self.cache.pop(); + self.cache.pop_front(); } - //println!("one frame is added to the point cloud cache: index:{}", index); - self.cache.push((index, pc.clone())); + println!( + "one frame is added to the point cloud cache: index:{}", + index + ); + self.cache.push_back((index, pc.clone())); Some(pc) } else { None diff --git a/src/render/wgpu/result.txt b/src/render/wgpu/result.txt deleted file mode 100644 index e69de29..0000000 diff --git a/src/result.txt b/src/result.txt deleted file mode 100644 index e69de29..0000000 diff --git a/src/vvplay_async_prefetch/args.rs b/src/vvplay_async_prefetch/args.rs index f07ed21..59d71bd 100644 --- a/src/vvplay_async_prefetch/args.rs +++ b/src/vvplay_async_prefetch/args.rs @@ -6,9 +6,6 @@ use crate::vvplay_async_prefetch::enums::AbrType; use crate::vvplay_async_prefetch::enums::DecoderType; use crate::vvplay_async_prefetch::enums::ThroughputPredictionType; use crate::vvplay_async_prefetch::enums::ViewportPredictionType; -/** - * This file contains all the command line argumentfor vvplay_async_prefetch.rs - */ #[derive(Parser)] pub struct Args { diff --git a/src/vvplay_async_prefetch/buffer_manager.rs b/src/vvplay_async_prefetch/buffer_manager.rs index 5c20db8..c3aa97b 100644 --- a/src/vvplay_async_prefetch/buffer_manager.rs +++ b/src/vvplay_async_prefetch/buffer_manager.rs @@ -75,7 +75,8 @@ impl BufferManager { pub fn prefetch_frame(&mut self, camera_pos: Option) { assert!(camera_pos.is_some()); let last_req = FrameRequest { - camera_pos, + //camera_pos, + camera_pos: camera_pos, ..self.buffer.back().unwrap().req }; // The frame prefetched is the next frame of the frame at the back of the buffer @@ -114,46 +115,42 @@ impl BufferManager { // Since we prefetch after a `FetchDone` event, once the buffer is full, we can't prefetch anymore. // So, we set this flag to true once the buffer is full, so that when the frames are consumed and the first channels are discarded, we can prefetch again. let mut is_desired_buffer_level_reached = false; + // last_req is only use for buffer size = 1, and it is update after the last PointCloud is received. let mut last_req: Option = None; loop { - /* - println!{"---------------------------"}; + println! {"---------------------------"}; println!("buffer: {:?}", &self.buffer); - */ - //wait for message in self.shutdown_recv and self.to_buf_Rx - //if a message is received, match the message with the bufmsg enum + //This logic can be improved but it needs to be thoroughly tested. if !self.buffer.is_full() && !self.buffer.is_empty() { + // This is executed when there is some frame inside the buffer, and the buffer is not full. + //println!("---------------------------"); + //println!{"buffer is not full neither empty, prefetching frame"}; self.prefetch_frame(Some(CameraPosition::default())); } else if self.buffer.is_empty() && last_req.is_some() { - //temporary fix: right not just assign default camera position + // If the buffer is currently empty, we continue to prefetch the frame, necessary for buffer size = 1 + //println!{"---------------------------"}; + //println!{"buffer is empty and there is last request, prefetching frame"}; self.prefetch_frame_with_request( - Some(CameraPosition::default()), + Some(last_req.unwrap().camera_pos.unwrap()), last_req.unwrap(), ); } tokio::select! { _ = self.shutdown_recv.changed() => { - /* - println!{"---------------------------"}; - println!{"in vvplay_async:"} - println!{"[buffer mgr] received shutdown signal"}; - */ break; } Some(msg) = self.to_buf_rx.recv() => { match msg { BufMsg::FrameRequest(mut renderer_req) => { - /* - println!{"---------------------------"}; - println!{"[buffer mgr] renderer sent a frame request {:?}", &renderer_req}; - */ - // record camera trace + //println!{"---------------------------"}; + //println!{"[buffer mgr] renderer sent a frame request {:?}", &renderer_req}; if record_camera_trace.is_some() && renderer_req.camera_pos.is_some() { if let Some(ct) = record_camera_trace.as_mut() { ct.add(renderer_req.camera_pos.unwrap()) } } - // If the camera trace is provided, we will use the camera trace to override the camera position for the next frame // else we will feed this into the viewport predictor + // camera for the whole duratio, camera predictor predict where the user will be in the future + // just use the same frame request for prefetching if camera_trace.is_some() { renderer_req.camera_pos = camera_trace.as_ref().map(|ct| ct.next()); } else { @@ -161,18 +158,29 @@ impl BufferManager { renderer_req.camera_pos = viewport_predictor.predict(); } - // First, attempt to fulfill the request from the buffer. - // Check in cache whether it exists + // First, attempt to fulfill the request from the renderer. + // If the requested frame is not inside the buffer, we will clear the buffer. + if !self.buffer.is_empty() && !self.buffer.is_frame_in_buffer(renderer_req) { + self.buffer.clear(); + } else if !self.buffer.is_empty() && self.buffer.is_frame_in_buffer(renderer_req) { + // If the frame requested is inside the buffer, we will pop all previous frame such that the requested frame is at front. + let num_frames_to_remove = renderer_req.frame_offset - self.buffer.front().unwrap().req.frame_offset; + for _ in 0..num_frames_to_remove { + self.buffer.pop_front(); + } + } + + // When the requested frame is in front of the buffer if !self.buffer.is_empty() && self.buffer.front().unwrap().req.frame_offset == renderer_req.frame_offset { let mut front = self.buffer.pop_front().unwrap(); match front.state { FrameStatus::Fetching | FrameStatus::Decoding => { - // we update frame_to_answer to indicate that we are waiting to send back this data to renderer. + // We update frame_to_answer to indicate that we are waiting to send back this data to renderer. self.frame_to_answer = Some(renderer_req); self.buffer.push_front(front); } FrameStatus::Ready(remaining_frames, mut rx) => { - // send to the renderer + // Receive the point cloud from the UnboundedReceiver match rx.recv().await { Some(pc) => { // if camera trace is not provided, we should not send camera_pos back to the renderer @@ -187,7 +195,6 @@ impl BufferManager { self.frame_to_answer = None; front.req.frame_offset += 1; front.state = FrameStatus::Ready(remaining_frames - 1, rx); - //println!("In FrameStatus::Ready, the front is {:?}", front); if remaining_frames > 1 { // we only reinsert it if there are more frames to render self.buffer.push_front(front); @@ -207,7 +214,7 @@ impl BufferManager { } } } else { - // It has not been requested, so we send a request to the fetcher to fetch the data + // If the requested frame is not inside the buffer, we send a request to the fetcher to fetch the data _ = self.buf_in_sx.send(FetchRequest::new(renderer_req, self.buffer.len())); // we update frame_to_answer to indicate that we are waiting to send back this data to renderer. @@ -219,10 +226,8 @@ impl BufferManager { } BufMsg::FetchDone(req) => { // upon receiving fetch result, immediately schedule the next fetch request - /* - println!{"---------------------------"}; - println!("the current buffer message is fetch done for {:?}", req); - */ + //println!{"---------------------------"}; + //println!("the current buffer message is fetch done for {:?}", req); self.buffer.update_state(req, FrameStatus::Decoding); if !self.buffer.is_full() { @@ -233,27 +238,30 @@ impl BufferManager { } } BufMsg::PointCloud((mut metadata, mut rx)) => { - /* - println!{"---------------------------"}; - println!("[buffer mgr] received a point cloud result {:?}", &metadata); - */ + //println!{"---------------------------"}; + //println!("[buffer mgr] received a point cloud result {:?}", &metadata); + // When using PCMetaData::into(), there is no CameraPosition by default let orig_metadata: FrameRequest = metadata.into(); - //if this frame is the one that the renderer is awaiting, do not put it back and send it to the renderer + // Only update the frame state in buffer when the frame is still in the buffer + if !self.buffer.is_empty() && self.buffer.is_frame_in_buffer(orig_metadata) { let mut remaining = self.segment_size as usize; if self.frame_to_answer.is_some() && metadata.frame_offset == self.frame_to_answer.as_ref().unwrap().frame_offset { let pc = rx.recv().await.unwrap(); - // send results to the renderer + // Send results to the renderer _ = self.buf_out_sx.send((self.frame_to_answer.unwrap(), pc)); self.frame_to_answer = None; metadata.frame_offset += 1; remaining -= 1; } - // cache the point cloud if there is still point clouds to render + // Cache the point cloud if there is still point clouds to render self.buffer.update(orig_metadata, metadata.into(), FrameStatus::Ready(remaining, rx)); + // Store the metadata and assign default CameraPosition for the next frame. last_req = Some(orig_metadata); + last_req.unwrap().camera_pos = Some(CameraPosition::default()); + } } } }