Skip to content

Commit

Permalink
Merge branch 'main' into feat/lod-integration
Browse files Browse the repository at this point in the history
  • Loading branch information
weitsang authored May 26, 2024
2 parents cf80f05 + 00c6d8a commit 9d840fd
Show file tree
Hide file tree
Showing 8 changed files with 176 additions and 70 deletions.
68 changes: 67 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
3. If you are using **linux**, make sure `gcc`, `g++`, `cmake`, `libssl-dev`, `pkg-config`, `libfontconfig1-dev` are installed
4. Compile and build the binaries with `cargo build --release --bins`
5. Install the binaries if you want to use it anywhere you want. `cargo install --path .`
6. Use `vv` and `vvplay` in other directory. Now you are good to go!
6. Use `vv`, `vvplay` and `vvplay_async` in other directory. Now you are good to go!
7. Download the [8i_dataset](https://plenodb.jpeg.org/pc/8ilabs/) to use and test our tool!

## Commands
Expand Down Expand Up @@ -508,6 +508,72 @@ Options:
-h, --help Print help
```
### `vvplay_async`
Plays a folder of ply files in lexicographical order, leveraging prefetching and playback caching for optimized performance. A window will appear upon running the binary from which you can navigate using your mouse and keyboard. Controls are described further below.
```shell
Usage: vvplay_async [OPTIONS] <SRC>
Arguments:
<SRC> src can be:
Options:
-f, --fps <FPS>
[default: 30]
-x, --camera-x <CAMERA_X>
[default: 0]
-y, --camera-y <CAMERA_Y>
[default: 0]
-z, --camera-z <CAMERA_Z>
[default: 1.5]
--pitch <CAMERA_PITCH>
[default: 0]
--yaw <CAMERA_YAW>
[default: -90]
-W, --width <WIDTH>
Set the screen width [default: 1600]
-H, --height <HEIGHT>
Set the screen height [default: 900]
--controls
-b, --buffer-capacity <BUFFER_CAPACITY>
buffer capacity in seconds
-m, --metrics <METRICS>
--abr <ABR_TYPE>
[default: quetra] [possible values: quetra, quetra-multiview, mckp]
--decoder <DECODER_TYPE>
[default: noop] [possible values: noop, draco, tmc2rs]
--multiview
Set this flag if each view is encoded separately, i.e. multiview
--decoder-path <DECODER_PATH>
Path to the decoder binary (only for Draco)
--tp <THROUGHPUT_PREDICTION_TYPE>
[default: last] [possible values: last, avg, ema, gaema, lpema]
--throughput-alpha <THROUGHPUT_ALPHA>
Alpha for throughput prediction. Only used for EMA, GAEMA, and LPEMA [default: 0.1]
--vp <VIEWPORT_PREDICTION_TYPE>
[default: last] [possible values: last]
--network-trace <NETWORK_TRACE>
Path to network trace for repeatable simulation.
Network trace is expected to be given in Kbps
--camera-trace <CAMERA_TRACE>
Path to camera trace for repeatable simulation.
Camera trace is expected to be given in
(pos_x, pos_y, pos_z, rot_pitch, rot_yaw, rot_roll).
Rotation is in degrees
--record-camera-trace <RECORD_CAMERA_TRACE>
Path to record camera trace from the player
--enable-fetcher-optimizations
Enable fetcher optimizations
--bg-color <BG_COLOR>
[default: rgb(255,255,255)]
-h, --help
Print help (see more with '--help')
```
### Controls
With the main screen focused,
Expand Down
7 changes: 6 additions & 1 deletion src/bin/vvplay_async.rs
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,8 @@ fn main() {
let (total_frames_tx, total_frames_rx) = tokio::sync::oneshot::channel();

// initialize variables based on args
let buffer_capacity = args.buffer_capacity.unwrap_or(11);
// the real buffer capacity is buffer capacity in seconds * fps
let buffer_capacity = args.buffer_capacity.unwrap_or(11) * args.fps as u64;
let simulated_network_trace = args.network_trace.map(|path| NetworkTrace::new(&path));
let simulated_camera_trace = args.camera_trace.map(|path| CameraTrace::new(&path, false));
let record_camera_trace = args
Expand Down Expand Up @@ -326,7 +327,11 @@ fn main() {
},
};
decoder.start().unwrap();
//t: the unbound receiver for PointCloud is here, trace this down
// Everytime a PointCloud is ready, an unbounded channel is created
// t: For case of Noop, only one PointCloud will be produced each time, not sure about other decoder
let (output_sx, output_rx) = tokio::sync::mpsc::unbounded_channel();
// Send BufMsg to inform the buffer that the PointCloud is ready
_ = to_buf_sx
.send(BufMsg::PointCloud((
PCMetadata {
Expand Down
15 changes: 15 additions & 0 deletions src/dash/buffer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -177,4 +177,19 @@ impl Buffer {
pub fn iter_mut(&mut self) -> impl Iterator<Item = &mut RequestStatus> {
self.frames.iter_mut()
}

pub fn clear(&mut self) {
self.frames.clear();
}

pub fn is_frame_in_buffer(&self, req: FrameRequest) -> bool {
// This implementation assumes that the frame index stored in the buffer form contiguous sequence.
// If the first frame offset is 2, last frame offset is 5, then frame 3, 4 will also exist in current buffer.
if req.frame_offset >= self.front().unwrap().req.frame_offset
&& req.frame_offset <= self.back().unwrap().req.frame_offset
{
return true;
}
return false;
}
}
73 changes: 44 additions & 29 deletions src/render/wgpu/reader.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,25 +4,26 @@ use crate::pcd::read_pcd_file;
use crate::utils::{read_file_to_point_cloud, read_files_to_point_cloud};
use crate::BufMsg;

use crate::utils::read_file_to_point_cloud;
use std::collections::VecDeque;
use std::fmt::Debug;
use std::path::{Path, PathBuf};
use std::process::exit;
use std::sync::mpsc::Receiver;
use tokio::sync::mpsc::UnboundedSender;

use super::camera::CameraPosition;
use super::camera::CameraState;
use super::camera::{CameraPosition, CameraState};
use super::renderable::Renderable;

//RenderReader for the original RenderReader
// RenderReader for the original RenderReader used by vvplay
pub trait RenderReader<T: Renderable> {
fn start(&mut self) -> Option<T>;
fn get_at(&mut self, index: usize) -> Option<T>;
fn len(&self) -> usize;
fn is_empty(&self) -> bool;
fn set_len(&mut self, len: usize);
}
//RenderReaderCameraPos for the one with CameraPosition
// RenderReaderCameraPos for the one with CameraPosition
pub trait RenderReaderCameraPos<T: Renderable> {
/// Initialize the input reader for our renderer. Returns the first frame, if any.
fn start(&mut self) -> (Option<CameraPosition>, Option<T>);
Expand All @@ -35,6 +36,7 @@ pub trait RenderReaderCameraPos<T: Renderable> {
fn len(&self) -> usize;
fn is_empty(&self) -> bool;
fn set_len(&mut self, len: usize);
fn set_cache_size(&mut self, size: usize);
fn set_camera_state(&mut self, camera_state: Option<CameraState>);
}

Expand Down Expand Up @@ -117,7 +119,7 @@ impl RenderReader<PointCloud<PointXyzRgba>> for PointCloudFileReader {

impl RenderReaderCameraPos<PointCloud<PointXyzRgba>> for PointCloudFileReader {
fn start(&mut self) -> (Option<CameraPosition>, Option<PointCloud<PointXyzRgba>>) {
RenderReaderCameraPos::get_at(self, 0, None)
RenderReaderCameraPos::get_at(self, 0, Some(CameraPosition::default()))
}

fn get_at(
Expand All @@ -126,7 +128,10 @@ impl RenderReaderCameraPos<PointCloud<PointXyzRgba>> for PointCloudFileReader {
_camera_pos: Option<CameraPosition>,
) -> (Option<CameraPosition>, Option<PointCloud<PointXyzRgba>>) {
let file_path = self.files.get(index).unwrap();
(None, read_file_to_point_cloud(file_path))
(
Some(CameraPosition::default()),
read_file_to_point_cloud(file_path),
)
}

fn len(&self) -> usize {
Expand All @@ -139,6 +144,8 @@ impl RenderReaderCameraPos<PointCloud<PointXyzRgba>> for PointCloudFileReader {

fn set_len(&mut self, _len: usize) {}

fn set_cache_size(&mut self, _size: usize) {}

fn set_camera_state(&mut self, _camera_state: Option<CameraState>) {}
}

Expand Down Expand Up @@ -293,7 +300,9 @@ impl RenderReader<PointCloud<PointXyzRgba>> for LODFileReader {
pub struct PcdAsyncReader {
total_frames: u64,
rx: Receiver<(FrameRequest, PointCloud<PointXyzRgba>)>,
cache: Vec<(u64, PointCloud<PointXyzRgba>)>,
//playback cache
cache: VecDeque<(u64, PointCloud<PointXyzRgba>)>,
cache_size: usize,
tx: UnboundedSender<BufMsg>,
}

Expand Down Expand Up @@ -328,7 +337,8 @@ impl PcdAsyncReader {
tx,
// buffer_size,
// cache: HashMap::with_capacity(buffer_size as usize),
cache: vec![],
cache: VecDeque::new(),
cache_size: 10, //default number of size, Use `set_cache_size` to overwrite this value
total_frames: 30, // default number of frames. Use `set_len` to overwrite this value
}
}
Expand All @@ -337,23 +347,20 @@ impl PcdAsyncReader {
#[cfg(feature = "dash")]
impl RenderReaderCameraPos<PointCloud<PointXyzRgba>> for PcdAsyncReader {
fn start(&mut self) -> (Option<CameraPosition>, Option<PointCloud<PointXyzRgba>>) {
RenderReaderCameraPos::get_at(self, 0, None)
RenderReaderCameraPos::get_at(self, 0, Some(CameraPosition::default()))
}

fn get_at(
&mut self,
index: usize,
camera_pos: Option<CameraPosition>,
) -> (Option<CameraPosition>, Option<PointCloud<PointXyzRgba>>) {
/*
println!("----------------------------------");
println!{"get at request index: {}", index};
*/
println! {"get at request index: {}", index};
let index = index as u64;
if let Some(&ref result) = self.cache.iter().find(|&i| i.0 == index) {
//t:
//it: f the result is already inside the cache, just return
//can improve this find algorithm
//if the result is already inside the cache, just return
//can improve this O(size) find algorithm
return (camera_pos, Some(result.1.clone()));
}
_ = self.tx.send(BufMsg::FrameRequest(FrameRequest {
Expand All @@ -362,14 +369,14 @@ impl RenderReaderCameraPos<PointCloud<PointXyzRgba>> for PcdAsyncReader {
camera_pos,
}));
if let Ok((frame_req, pc)) = self.rx.recv() {
if self.cache.len() >= 10 {
self.cache.pop();
if self.cache.len() >= self.cache_size {
self.cache.pop_front();
}
println!(
"one frame is added to the point cloud cache: index:{}",
index
);
self.cache.push((index, pc.clone()));
//println!(
// "one frame is added to the point cloud cache: index:{}",
// index
//);
self.cache.push_back((index, pc.clone()));
(frame_req.camera_pos, Some(pc))
} else {
(None, None)
Expand All @@ -388,38 +395,46 @@ impl RenderReaderCameraPos<PointCloud<PointXyzRgba>> for PcdAsyncReader {
self.total_frames = len as u64;
}

fn set_cache_size(&mut self, size: usize) {
self.cache_size = size;
}

fn set_camera_state(&mut self, _camera_state: Option<CameraState>) {}
}

// This is used by vvplay_async
impl RenderReader<PointCloud<PointXyzRgba>> for PcdAsyncReader {
fn start(&mut self) -> Option<PointCloud<PointXyzRgba>> {
RenderReader::get_at(self, 0)
}

fn get_at(&mut self, index: usize) -> Option<PointCloud<PointXyzRgba>> {
/*
println!("----------------------------------");
println!{"get at request index: {}", index};
*/
println! {"get at request index: {}", index};
let index = index as u64;
// Everytime a request is made, find it from the playback cache first
if let Some(&ref result) = self.cache.iter().find(|&i| i.0 == index) {
//can improve this O(n) find algorithm in future
println!("----------------------------------");
println! {"{} is found in the cache", index};
return Some(result.1.clone());
}
// Send request to prepare for the frame
_ = self.tx.send(BufMsg::FrameRequest(FrameRequest {
object_id: 0,
frame_offset: index % self.total_frames,
camera_pos: None,
camera_pos: Some(CameraPosition::default()),
}));
// Wait for the point cloud to be ready, cache it then return
if let Ok((_frame_req, pc)) = self.rx.recv() {
if self.cache.len() >= 10 {
self.cache.pop();
self.cache.pop_front();
}
//println!("one frame is added to the point cloud cache: index:{}", index);
self.cache.push((index, pc.clone()));
println!(
"one frame is added to the point cloud cache: index:{}",
index
);
self.cache.push_back((index, pc.clone()));
Some(pc)
} else {
None
Expand Down
Empty file removed src/render/wgpu/result.txt
Empty file.
Empty file removed src/result.txt
Empty file.
3 changes: 0 additions & 3 deletions src/vvplay_async_prefetch/args.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,6 @@ use crate::vvplay_async_prefetch::enums::AbrType;
use crate::vvplay_async_prefetch::enums::DecoderType;
use crate::vvplay_async_prefetch::enums::ThroughputPredictionType;
use crate::vvplay_async_prefetch::enums::ViewportPredictionType;
/**
* This file contains all the command line argumentfor vvplay_async_prefetch.rs
*/

#[derive(Parser)]
pub struct Args {
Expand Down
Loading

0 comments on commit 9d840fd

Please sign in to comment.