From c4f87f872d82d1403bd36daf22fdcac0ec335932 Mon Sep 17 00:00:00 2001 From: Matt Keeter Date: Fri, 22 Sep 2023 18:06:03 -0700 Subject: [PATCH 1/3] Remove unused async --- downstairs/src/lib.rs | 5 +- downstairs/src/repair.rs | 14 +-- integration_tests/src/lib.rs | 7 +- pantry/src/lib.rs | 3 +- pantry/src/main.rs | 5 +- pantry/src/server.rs | 2 +- upstairs/src/lib.rs | 19 ++-- upstairs/src/live_repair.rs | 184 ++++++++++++++--------------------- upstairs/src/volume.rs | 58 ++++------- 9 files changed, 119 insertions(+), 178 deletions(-) diff --git a/downstairs/src/lib.rs b/downstairs/src/lib.rs index fa308edfa..d7d453064 100644 --- a/downstairs/src/lib.rs +++ b/downstairs/src/lib.rs @@ -1,6 +1,7 @@ // Copyright 2023 Oxide Computer Company #![cfg_attr(usdt_need_asm, feature(asm))] #![cfg_attr(all(target_os = "macos", usdt_need_asm_sym), feature(asm_sym))] +#![warn(clippy::unused_async)] use futures::executor; use futures::lock::{Mutex, MutexGuard}; @@ -1981,7 +1982,7 @@ impl Downstairs { ) -> Result> { let job = { let mut work = self.work_lock(upstairs_connection).await?; - let job = work.get_ready_job(job_id).await; + let job = work.get_ready_job(job_id); // `promote_to_active` can clear out the Work struct for this // UpstairsConnection, but the tasks can still be working on @@ -2913,7 +2914,7 @@ impl Work { } // Return a job that's ready to have the work done - async fn get_ready_job(&mut self, job_id: JobId) -> Option { + fn get_ready_job(&mut self, job_id: JobId) -> Option { match self.active.get(&job_id) { Some(job) => { assert_eq!(job.state, WorkState::InProgress); diff --git a/downstairs/src/repair.rs b/downstairs/src/repair.rs index 18efd3ea3..ac90cc7b2 100644 --- a/downstairs/src/repair.rs +++ b/downstairs/src/repair.rs @@ -211,7 +211,7 @@ async fn get_files_for_extent( format!("Expected {:?} to be a directory", extent_dir), )) } else { - let files = extent_file_list(extent_dir, eid).await?; + let files = extent_file_list(extent_dir, eid)?; Ok(HttpResponseOk(files)) } } @@ -221,7 +221,7 @@ async fn get_files_for_extent( * that correspond to the given extent. Return an error if any * of the required files are missing. */ -async fn extent_file_list( +fn extent_file_list( extent_dir: PathBuf, eid: u32, ) -> Result, HttpError> { @@ -281,7 +281,7 @@ mod test { // Determine the directory and name for expected extent files. let ed = extent_dir(&dir, 1); - let mut ex_files = extent_file_list(ed, 1).await.unwrap(); + let mut ex_files = extent_file_list(ed, 1).unwrap(); ex_files.sort(); let expected = vec!["001", "001.db", "001.db-shm", "001.db-wal"]; println!("files: {:?}", ex_files); @@ -311,7 +311,7 @@ mod test { rm_file.set_extension("db-shm"); std::fs::remove_file(rm_file).unwrap(); - let mut ex_files = extent_file_list(extent_dir, 1).await.unwrap(); + let mut ex_files = extent_file_list(extent_dir, 1).unwrap(); ex_files.sort(); let expected = vec!["001", "001.db"]; println!("files: {:?}", ex_files); @@ -346,7 +346,7 @@ mod test { rm_file.set_extension("db-shm"); let _ = std::fs::remove_file(rm_file); - let mut ex_files = extent_file_list(extent_dir, 1).await.unwrap(); + let mut ex_files = extent_file_list(extent_dir, 1).unwrap(); ex_files.sort(); let expected = vec!["001", "001.db"]; println!("files: {:?}", ex_files); @@ -373,7 +373,7 @@ mod test { rm_file.set_extension("db"); std::fs::remove_file(&rm_file).unwrap(); - assert!(extent_file_list(extent_dir, 2).await.is_err()); + assert!(extent_file_list(extent_dir, 2).is_err()); Ok(()) } @@ -395,7 +395,7 @@ mod test { rm_file.push(extent_file_name(1, ExtentType::Data)); std::fs::remove_file(&rm_file).unwrap(); - assert!(extent_file_list(extent_dir, 1).await.is_err()); + assert!(extent_file_list(extent_dir, 1).is_err()); Ok(()) } diff --git a/integration_tests/src/lib.rs b/integration_tests/src/lib.rs index 06371f47e..2d083135b 100644 --- a/integration_tests/src/lib.rs +++ b/integration_tests/src/lib.rs @@ -1,4 +1,5 @@ // Copyright 2023 Oxide Computer Company +#![warn(clippy::unused_async)] #[cfg(test)] mod test { @@ -3395,13 +3396,12 @@ mod test { // Start a new pantry - let (log, pantry) = crucible_pantry::initialize_pantry().await.unwrap(); + let (log, pantry) = crucible_pantry::initialize_pantry().unwrap(); let (pantry_addr, _join_handle) = crucible_pantry::server::run_server( &log, "127.0.0.1:0".parse().unwrap(), &pantry, ) - .await .unwrap(); // Create a Volume out of it, and attach a CruciblePantryClient @@ -3929,13 +3929,12 @@ mod test { // Start the pantry, then use it to scrub - let (log, pantry) = crucible_pantry::initialize_pantry().await.unwrap(); + let (log, pantry) = crucible_pantry::initialize_pantry().unwrap(); let (pantry_addr, _join_handle) = crucible_pantry::server::run_server( &log, "127.0.0.1:0".parse().unwrap(), &pantry, ) - .await .unwrap(); let client = diff --git a/pantry/src/lib.rs b/pantry/src/lib.rs index 8d39090e0..aa30507f5 100644 --- a/pantry/src/lib.rs +++ b/pantry/src/lib.rs @@ -1,5 +1,6 @@ // Copyright 2022 Oxide Computer Company +#![warn(clippy::unused_async)] use std::sync::Arc; use anyhow::Result; @@ -11,7 +12,7 @@ pub const PROG: &str = "crucible-pantry"; pub mod pantry; pub mod server; -pub async fn initialize_pantry() -> Result<(Logger, Arc)> { +pub fn initialize_pantry() -> Result<(Logger, Arc)> { let log = ConfigLogging::File { level: ConfigLoggingLevel::Info, path: "/dev/stdout".into(), diff --git a/pantry/src/main.rs b/pantry/src/main.rs index cfcbc9323..244419180 100644 --- a/pantry/src/main.rs +++ b/pantry/src/main.rs @@ -45,10 +45,9 @@ async fn main() -> Result<()> { write_openapi(&mut f) } Args::Run { listen } => { - let (log, pantry) = initialize_pantry().await?; + let (log, pantry) = initialize_pantry()?; - let (_, join_handle) = - server::run_server(&log, listen, &pantry).await?; + let (_, join_handle) = server::run_server(&log, listen, &pantry)?; join_handle.await?.map_err(|e| anyhow!(e)) } diff --git a/pantry/src/server.rs b/pantry/src/server.rs index 8d53fc6a2..71f538cb2 100644 --- a/pantry/src/server.rs +++ b/pantry/src/server.rs @@ -349,7 +349,7 @@ pub fn make_api() -> Result>, String> { Ok(api) } -pub async fn run_server( +pub fn run_server( log: &Logger, bind_address: SocketAddr, df: &Arc, diff --git a/upstairs/src/lib.rs b/upstairs/src/lib.rs index b427f1506..f30dc2f91 100644 --- a/upstairs/src/lib.rs +++ b/upstairs/src/lib.rs @@ -2,6 +2,7 @@ #![cfg_attr(usdt_need_asm, feature(asm))] #![cfg_attr(all(target_os = "macos", usdt_need_asm_sym), feature(asm_sym))] #![allow(clippy::mutex_atomic)] +#![warn(clippy::unused_async)] use std::clone::Clone; use std::collections::{BTreeSet, HashMap, HashSet, VecDeque}; @@ -3716,7 +3717,7 @@ impl Downstairs { /** * Enqueue a new downstairs live repair request. */ - async fn enqueue_repair(&mut self, mut io: DownstairsIO) { + fn enqueue_repair(&mut self, mut io: DownstairsIO) { // Puts the repair IO onto the downstairs work queue. for cid in ClientId::iter() { assert_eq!(io.state[cid], IOState::New); @@ -9503,7 +9504,7 @@ struct Condition { * Send work to all the targets. * If a send fails, report an error. */ -async fn send_work(t: &[Target], val: u64, log: &Logger) { +fn send_work(t: &[Target], val: u64, log: &Logger) { for (client_id, d_client) in t.iter().enumerate() { let res = d_client.ds_work_tx.try_send(val); if let Err(e) = res { @@ -9754,7 +9755,7 @@ async fn process_new_io( return; } - send_work(dst, *lastcast, &up.log).await; + send_work(dst, *lastcast, &up.log); *lastcast += 1; } BlockOp::Read { offset, data } => { @@ -9765,7 +9766,7 @@ async fn process_new_io( { return; } - send_work(dst, *lastcast, &up.log).await; + send_work(dst, *lastcast, &up.log); *lastcast += 1; } BlockOp::Write { offset, data } => { @@ -9776,7 +9777,7 @@ async fn process_new_io( { return; } - send_work(dst, *lastcast, &up.log).await; + send_work(dst, *lastcast, &up.log); *lastcast += 1; } BlockOp::WriteUnwritten { offset, data } => { @@ -9787,7 +9788,7 @@ async fn process_new_io( { return; } - send_work(dst, *lastcast, &up.log).await; + send_work(dst, *lastcast, &up.log); *lastcast += 1; } BlockOp::Flush { snapshot_details } => { @@ -9811,7 +9812,7 @@ async fn process_new_io( return; } - send_work(dst, *lastcast, &up.log).await; + send_work(dst, *lastcast, &up.log); *lastcast += 1; } BlockOp::RepairOp => { @@ -9915,7 +9916,7 @@ async fn process_new_io( req.send_err(CrucibleError::UpstairsInactive); return; } - send_work(dst, *lastcast, &up.log).await; + send_work(dst, *lastcast, &up.log); *lastcast += 1; } } @@ -10133,7 +10134,7 @@ async fn up_listen( error!(up.log, "flush send failed:{:?}", e); // XXX What to do here? } else { - send_work(&dst, 1, &up.log).await; + send_work(&dst, 1, &up.log); } } diff --git a/upstairs/src/live_repair.rs b/upstairs/src/live_repair.rs index 4c51d2f9d..416a92d8b 100644 --- a/upstairs/src/live_repair.rs +++ b/upstairs/src/live_repair.rs @@ -76,7 +76,7 @@ use tokio::sync::{mpsc, oneshot}; // When determining if an extent needs repair, we collect its current // information from a downstairs and store the results in this struct. -#[derive(Debug, Clone)] +#[derive(Debug, Copy, Clone)] pub struct ExtentInfo { pub generation: u64, pub flush_number: u64, @@ -254,14 +254,14 @@ async fn live_repair_main( // rejoins. if source_downstairs.is_none() { error!(log, "Failed to find source downstairs for repair"); - up.abort_repair_ds(&mut ds, up_state, &ds_done_tx).await; + up.abort_repair_ds(&mut ds, up_state, &ds_done_tx); ds.end_live_repair(); bail!("Failed to find a valid source downstairs for repair"); } if repair_downstairs.is_empty() { error!(log, "Failed to find a downstairs needing repair"); - up.abort_repair_ds(&mut ds, up_state, &ds_done_tx).await; + up.abort_repair_ds(&mut ds, up_state, &ds_done_tx); ds.end_live_repair(); bail!("Failed to find a downstairs needing repair"); } @@ -590,7 +590,7 @@ fn repair_or_noop( } #[allow(clippy::too_many_arguments)] -async fn create_and_enqueue_reopen_io( +fn create_and_enqueue_reopen_io( ds: &mut Downstairs, gw: &mut GuestWork, eid: u32, @@ -620,13 +620,13 @@ async fn create_and_enqueue_reopen_io( { gw.active.insert(gw_reopen_id, new_gtos); } - ds.enqueue_repair(reopen_io).await; + ds.enqueue_repair(reopen_io); reopen_brw } // This creates and enqueues an ExtentFlushClose operation onto the work queue. #[allow(clippy::too_many_arguments)] -async fn create_and_enqueue_close_io( +fn create_and_enqueue_close_io( ds: &mut Downstairs, gw: &mut GuestWork, eid: u32, @@ -648,7 +648,7 @@ async fn create_and_enqueue_close_io( next_flush, gen, source, - repair.clone(), + repair, ); let mut sub = HashMap::new(); @@ -664,12 +664,12 @@ async fn create_and_enqueue_close_io( { gw.active.insert(gw_close_id, new_gtos); } - ds.enqueue_repair(close_io).await; + ds.enqueue_repair(close_io); close_brw } #[allow(clippy::too_many_arguments)] -async fn create_and_enqueue_repair_io( +fn create_and_enqueue_repair_io( ds: &mut Downstairs, gw: &mut GuestWork, eid: u32, @@ -704,11 +704,11 @@ async fn create_and_enqueue_repair_io( { gw.active.insert(gw_repair_id, new_gtos); } - ds.enqueue_repair(repair_io).await; + ds.enqueue_repair(repair_io); repair_brw } -async fn create_and_enqueue_noop_io( +fn create_and_enqueue_noop_io( ds: &mut Downstairs, gw: &mut GuestWork, deps: Vec, @@ -731,7 +731,7 @@ async fn create_and_enqueue_noop_io( { gw.active.insert(gw_noop_id, new_gtos); } - ds.enqueue_repair(nio).await; + ds.enqueue_repair(nio); noop_brw } @@ -755,7 +755,7 @@ impl Upstairs { // Abort a repair in progress on all downstairs and clear out any // repair state. If our setting faulted on an IO will complete that // IO, then we must indicate that to the ds_done_tx channel. - async fn abort_repair_ds( + fn abort_repair_ds( &self, ds: &mut Downstairs, up_state: UpState, @@ -863,8 +863,7 @@ impl Upstairs { JobId(id), gw_id, impacted_blocks, - ) - .await; + ); noops.push(noop_brw); } } @@ -902,7 +901,7 @@ impl Upstairs { let up_state = active.up_state; let mut ds = self.downstairs.lock().await; drop(active); - self.abort_repair_ds(&mut ds, up_state, ds_done_tx).await; + self.abort_repair_ds(&mut ds, up_state, ds_done_tx); abort_repair = true; } } @@ -1000,7 +999,7 @@ impl Upstairs { // Since we have done nothing yet, we can call abort here and // not have to issue jobs if none have been reserved. - self.abort_repair_ds(&mut ds, up_state, ds_done_tx).await; + self.abort_repair_ds(&mut ds, up_state, ds_done_tx); self.abort_repair_extent(&mut gw, &mut ds, eid).await; drop(ds); @@ -1092,8 +1091,7 @@ impl Upstairs { reopen_id, gw_reopen_id, impacted_blocks, - ) - .await; + ); // Next we create and insert the close job on the work queue. let next_flush = self.next_flush_id(); @@ -1111,8 +1109,7 @@ impl Upstairs { impacted_blocks, source, repair.clone(), - ) - .await; + ); // Now that we have enqueued both the close and the reopen, we // can release all the locks and wait for the result from our close. @@ -1161,7 +1158,7 @@ impl Upstairs { // Verify none of the downstairs changed state if !abort_repair && repair_ds_state_change(&mut ds, source, &repair) { warn!(self.log, "RE: downstairs state change, aborting repair now"); - self.abort_repair_ds(&mut ds, up_state, ds_done_tx).await; + self.abort_repair_ds(&mut ds, up_state, ds_done_tx); abort_repair = true; } @@ -1176,7 +1173,6 @@ impl Upstairs { gw_repair_id, impacted_blocks, ) - .await } else { create_and_enqueue_repair_io( &mut ds, @@ -1189,7 +1185,6 @@ impl Upstairs { source, &repair, ) - .await }; drop(gw); drop(ds); @@ -1230,7 +1225,7 @@ impl Upstairs { drop(active); if !abort_repair && repair_ds_state_change(&mut ds, source, &repair) { warn!(self.log, "RE: downstairs state change, aborting repair now"); - self.abort_repair_ds(&mut ds, up_state, ds_done_tx).await; + self.abort_repair_ds(&mut ds, up_state, ds_done_tx); abort_repair = true; } @@ -1242,8 +1237,7 @@ impl Upstairs { noop_id, gw_noop_id, impacted_blocks, - ) - .await; + ); drop(gw); drop(ds); @@ -1315,7 +1309,7 @@ impl Upstairs { self.log, "RE: downstairs state change, aborting repair now" ); - self.abort_repair_ds(&mut ds, up_state, ds_done_tx).await; + self.abort_repair_ds(&mut ds, up_state, ds_done_tx); } } @@ -1726,7 +1720,7 @@ pub mod repair_test { Ok(vec![]), &None, UpState::Active, - Some(ei.clone()), + Some(ei), ) .unwrap(); } @@ -1887,19 +1881,14 @@ pub mod repair_test { ds_close_id, cid, Ok(vec![]), - Some(bad_ei.clone()), + Some(bad_ei), ) .await .unwrap(); } else { - up.process_ds_operation( - ds_close_id, - cid, - Ok(vec![]), - Some(ei.clone()), - ) - .await - .unwrap(); + up.process_ds_operation(ds_close_id, cid, Ok(vec![]), Some(ei)) + .await + .unwrap(); } } @@ -2049,14 +2038,9 @@ pub mod repair_test { .await .unwrap(); } else { - up.process_ds_operation( - ds_close_id, - cid, - Ok(vec![]), - Some(ei.clone()), - ) - .await - .unwrap(); + up.process_ds_operation(ds_close_id, cid, Ok(vec![]), Some(ei)) + .await + .unwrap(); } } @@ -2238,7 +2222,7 @@ pub mod repair_test { Ok(vec![]), &None, UpState::Active, - Some(ei.clone()), + Some(ei), ) .unwrap(); } @@ -2275,7 +2259,7 @@ pub mod repair_test { ds_repair_id, cid, Ok(vec![]), - Some(ei.clone()), + Some(ei), ) .await .unwrap(); @@ -2422,7 +2406,7 @@ pub mod repair_test { Ok(vec![]), &None, UpState::Active, - Some(ei.clone()), + Some(ei), ) .unwrap(); } @@ -2576,7 +2560,7 @@ pub mod repair_test { Ok(vec![]), &None, UpState::Active, - Some(ei.clone()), + Some(ei), ) .unwrap(); } @@ -3260,7 +3244,7 @@ pub mod repair_test { gw.active.insert(gw_close_id, new_gtos); } - ds.enqueue_repair(close_io).await; + ds.enqueue_repair(close_io); ds.in_progress(close_id, ClientId::new(0)); ds.in_progress(close_id, ClientId::new(1)); @@ -3362,18 +3346,9 @@ pub mod repair_test { flush_number: 3, dirty: false, }; - assert!(ds - .repair_info - .insert(ClientId::new(0), ei.clone()) - .is_none()); - assert!(ds - .repair_info - .insert(ClientId::new(1), ei.clone()) - .is_none()); - assert!(ds - .repair_info - .insert(ClientId::new(2), ei.clone()) - .is_none()); + assert!(ds.repair_info.insert(ClientId::new(0), ei).is_none()); + assert!(ds.repair_info.insert(ClientId::new(1), ei).is_none()); + assert!(ds.repair_info.insert(ClientId::new(2), ei).is_none()); let repair_extent = if source == ClientId::new(0) { vec![ClientId::new(1), ClientId::new(2)] @@ -3473,29 +3448,29 @@ pub mod repair_test { let repair = if source == ClientId::new(0) { assert!(ds .repair_info - .insert(ClientId::new(0), good_ei.clone()) + .insert(ClientId::new(0), good_ei) .is_none()); assert!(ds .repair_info - .insert(ClientId::new(1), bad_ei.clone()) + .insert(ClientId::new(1), bad_ei) .is_none()); assert!(ds .repair_info - .insert(ClientId::new(2), good_ei.clone()) + .insert(ClientId::new(2), good_ei) .is_none()); vec![ClientId::new(1)] } else { assert!(ds .repair_info - .insert(ClientId::new(0), bad_ei.clone()) + .insert(ClientId::new(0), bad_ei) .is_none()); assert!(ds .repair_info - .insert(ClientId::new(1), good_ei.clone()) + .insert(ClientId::new(1), good_ei) .is_none()); assert!(ds .repair_info - .insert(ClientId::new(2), good_ei.clone()) + .insert(ClientId::new(2), good_ei) .is_none()); vec![ClientId::new(0)] }; @@ -3507,29 +3482,29 @@ pub mod repair_test { let repair = if source == ClientId::new(2) { assert!(ds .repair_info - .insert(ClientId::new(0), good_ei.clone()) + .insert(ClientId::new(0), good_ei) .is_none()); assert!(ds .repair_info - .insert(ClientId::new(1), bad_ei.clone()) + .insert(ClientId::new(1), bad_ei) .is_none()); assert!(ds .repair_info - .insert(ClientId::new(2), good_ei.clone()) + .insert(ClientId::new(2), good_ei) .is_none()); vec![ClientId::new(1)] } else { assert!(ds .repair_info - .insert(ClientId::new(0), good_ei.clone()) + .insert(ClientId::new(0), good_ei) .is_none()); assert!(ds .repair_info - .insert(ClientId::new(1), good_ei.clone()) + .insert(ClientId::new(1), good_ei) .is_none()); assert!(ds .repair_info - .insert(ClientId::new(2), bad_ei.clone()) + .insert(ClientId::new(2), bad_ei) .is_none()); vec![ClientId::new(2)] }; @@ -3559,43 +3534,43 @@ pub mod repair_test { let repair = if source == ClientId::new(0) { assert!(ds .repair_info - .insert(ClientId::new(0), good_ei.clone()) + .insert(ClientId::new(0), good_ei) .is_none()); assert!(ds .repair_info - .insert(ClientId::new(1), bad_ei.clone()) + .insert(ClientId::new(1), bad_ei) .is_none()); assert!(ds .repair_info - .insert(ClientId::new(2), bad_ei.clone()) + .insert(ClientId::new(2), bad_ei) .is_none()); vec![ClientId::new(1), ClientId::new(2)] } else if source == ClientId::new(1) { assert!(ds .repair_info - .insert(ClientId::new(0), bad_ei.clone()) + .insert(ClientId::new(0), bad_ei) .is_none()); assert!(ds .repair_info - .insert(ClientId::new(1), good_ei.clone()) + .insert(ClientId::new(1), good_ei) .is_none()); assert!(ds .repair_info - .insert(ClientId::new(2), bad_ei.clone()) + .insert(ClientId::new(2), bad_ei) .is_none()); vec![ClientId::new(0), ClientId::new(2)] } else { assert!(ds .repair_info - .insert(ClientId::new(0), bad_ei.clone()) + .insert(ClientId::new(0), bad_ei) .is_none()); assert!(ds .repair_info - .insert(ClientId::new(1), bad_ei.clone()) + .insert(ClientId::new(1), bad_ei) .is_none()); assert!(ds .repair_info - .insert(ClientId::new(2), good_ei.clone()) + .insert(ClientId::new(2), good_ei) .is_none()); vec![ClientId::new(0), ClientId::new(1)] }; @@ -3848,8 +3823,7 @@ pub mod repair_test { r_id, gw_r_id, impacted_blocks, - ) - .await; + ); let job = ds.ds_active.get(&r_id).unwrap(); @@ -3916,8 +3890,7 @@ pub mod repair_test { impacted_blocks, source, repair.clone(), - ) - .await; + ); let job = ds.ds_active.get(&close_id).unwrap(); @@ -3986,7 +3959,7 @@ pub mod repair_test { dirty: false, }; for cid in ClientId::iter() { - ds.repair_info.insert(cid, ei.clone()); + ds.repair_info.insert(cid, ei); } let _repair_brw = create_and_enqueue_repair_io( @@ -3999,8 +3972,7 @@ pub mod repair_test { impacted_blocks, source, &repair, - ) - .await; + ); let job = ds.ds_active.get(&repair_id).unwrap(); @@ -4054,14 +4026,14 @@ pub mod repair_test { flush_number: 3, dirty: false, }; - ds.repair_info.insert(ClientId::new(0), ei.clone()); - ds.repair_info.insert(ClientId::new(1), ei.clone()); + ds.repair_info.insert(ClientId::new(0), ei); + ds.repair_info.insert(ClientId::new(1), ei); let bad_ei = ExtentInfo { generation: 5, flush_number: 2, dirty: false, }; - ds.repair_info.insert(ClientId::new(2), bad_ei.clone()); + ds.repair_info.insert(ClientId::new(2), bad_ei); // We also need a fake repair address for cid in ClientId::iter() { ds.ds_repair.insert(cid, "127.0.0.1:1234".parse().unwrap()); @@ -4077,8 +4049,7 @@ pub mod repair_test { impacted_blocks, source, &repair, - ) - .await; + ); let job = ds.ds_active.get(&repair_id).unwrap(); @@ -4153,8 +4124,7 @@ pub mod repair_test { impacted_blocks, ClientId::new(0), // source downstairs vec![ClientId::new(1)], // repair downstairs - ) - .await; + ); drop(gw); drop(ds); } @@ -4189,8 +4159,7 @@ pub mod repair_test { repair_id, gw_repair_id, impacted_blocks, - ) - .await; + ); drop(gw); drop(ds); } @@ -5416,8 +5385,7 @@ pub mod repair_test { let mut gw = up.guest.guest_work.lock().await; let mut ds = up.downstairs.lock().await; - up.abort_repair_ds(&mut ds, UpState::Active, &ds_done_tx) - .await; + up.abort_repair_ds(&mut ds, UpState::Active, &ds_done_tx); up.abort_repair_extent(&mut gw, &mut ds, eid as u32).await; assert_eq!(ds.ds_state[ClientId::new(0)], DsState::Active); @@ -5457,8 +5425,7 @@ pub mod repair_test { // Reserve some repair IDs let reserved_ids = ds.reserve_repair_ids(eid as u32); - up.abort_repair_ds(&mut ds, UpState::Active, &ds_done_tx) - .await; + up.abort_repair_ds(&mut ds, UpState::Active, &ds_done_tx); up.abort_repair_extent(&mut gw, &mut ds, eid as u32).await; // Check all three IOs again, downstairs 1 will be skipped.. @@ -5506,8 +5473,7 @@ pub mod repair_test { // Reserve some repair IDs let _reserved_ids = ds.reserve_repair_ids(eid as u32); - up.abort_repair_ds(&mut ds, UpState::Active, &ds_done_tx) - .await; + up.abort_repair_ds(&mut ds, UpState::Active, &ds_done_tx); up.abort_repair_extent(&mut gw, &mut ds, eid as u32).await; // Check all three IOs again, all downstairs will be skipped.. @@ -6069,8 +6035,7 @@ pub mod repair_test { reopen_id, gw_reopen_id, impacted_blocks, - ) - .await; + ); // Next we create and insert the close job on the work queue. @@ -6086,8 +6051,7 @@ pub mod repair_test { impacted_blocks, ClientId::new(0), vec![ClientId::new(1)], - ) - .await; + ); drop(ds); drop(gw); diff --git a/upstairs/src/volume.rs b/upstairs/src/volume.rs index da96de26a..cfbf4b203 100644 --- a/upstairs/src/volume.rs +++ b/upstairs/src/volume.rs @@ -1139,12 +1139,12 @@ impl Volume { // 3B, and 3C are all met. This would mean that the VCRs are valid for // a downstairs replacement. - pub async fn compare_vcr_for_migration( + pub fn compare_vcr_for_migration( original: VolumeConstructionRequest, replacement: VolumeConstructionRequest, log: &Logger, ) -> Result<(), CrucibleError> { - match Self::compare_vcr_for_update(original, replacement, log).await? { + match Self::compare_vcr_for_update(original, replacement, log)? { Some((_o, _n)) => crucible_bail!( ReplaceRequestInvalid, "VCR targets are different" @@ -1153,12 +1153,12 @@ impl Volume { } } - pub async fn compare_vcr_for_target_replacement( + pub fn compare_vcr_for_target_replacement( original: VolumeConstructionRequest, replacement: VolumeConstructionRequest, log: &Logger, ) -> Result<(SocketAddr, SocketAddr), CrucibleError> { - match Self::compare_vcr_for_update(original, replacement, log).await? { + match Self::compare_vcr_for_update(original, replacement, log)? { Some((o, n)) => Ok((o, n)), None => crucible_bail!( ReplaceRequestInvalid, @@ -1167,7 +1167,7 @@ impl Volume { } } - pub async fn compare_vcr_for_update( + pub fn compare_vcr_for_update( original: VolumeConstructionRequest, replacement: VolumeConstructionRequest, log: &Logger, @@ -1484,8 +1484,7 @@ impl Volume { original, replacement, &self.log, - ) - .await?; + )?; info!( self.log, @@ -3107,17 +3106,17 @@ mod test { } } - #[tokio::test] - async fn volume_replace_basic() { + #[test] + fn volume_replace_basic() { // A valid replacement VCR is provided with only one target being // different. // Test all three targets for replacement. for cid in 0..3 { - test_volume_replace_cid(cid).await.unwrap(); + test_volume_replace_cid(cid).unwrap(); } } - async fn test_volume_replace_cid(cid: usize) -> Result<()> { + fn test_volume_replace_cid(cid: usize) -> Result<()> { // A valid replacement VCR is provided with a larger generation // number and only one target being different. let block_size = 512; @@ -3167,8 +3166,7 @@ mod test { original, replacement, &log, - ) - .await?; + )?; info!(log, "replace {old_t} with {new_t}"); assert_eq!(original_target, old_t); @@ -3176,8 +3174,8 @@ mod test { Ok(()) } - #[tokio::test] - async fn volume_replace_rop() { + #[test] + fn volume_replace_rop() { // A replacement VCR is provided with one target being // different, both new and old have a read_only_parent let block_size = 512; @@ -3234,7 +3232,6 @@ mod test { replacement, &log, ) - .await .unwrap(); assert_eq!(original_target, old_t); @@ -3300,7 +3297,6 @@ mod test { replacement, &log, ) - .await .unwrap(); assert_eq!(original_target, old_t); @@ -3326,7 +3322,7 @@ mod test { block_size, blocks_per_extent, extent_count, - opts: opts.clone(), + opts, gen: 2, }], read_only_parent: None, @@ -3339,7 +3335,6 @@ mod test { original, &log ) - .await .is_err()); } @@ -3377,7 +3372,7 @@ mod test { block_size, blocks_per_extent, extent_count, - opts: opts.clone(), + opts, gen: 3, }], read_only_parent: None, @@ -3391,13 +3386,10 @@ mod test { replacement.clone(), &log, ) - .await .is_err()); // Migration is valid with these VCRs - Volume::compare_vcr_for_migration(original.clone(), replacement, &log) - .await - .unwrap(); + Volume::compare_vcr_for_migration(original, replacement, &log).unwrap(); } #[tokio::test] @@ -3447,7 +3439,6 @@ mod test { replacement, &log ) - .await .is_err()); } @@ -3496,7 +3487,6 @@ mod test { replacement, &log ) - .await .is_err()); } @@ -3555,7 +3545,6 @@ mod test { replacement, &log ) - .await .is_err()); } @@ -3605,7 +3594,6 @@ mod test { replacement, &log ) - .await .is_err()); } @@ -3656,7 +3644,6 @@ mod test { replacement, &log ) - .await .is_err()); } @@ -3702,7 +3689,6 @@ mod test { let log = csl(); assert!(Volume::compare_vcr_for_update(original, replacement, &log) - .await .is_err()); } @@ -3710,7 +3696,7 @@ mod test { // We create two Volumes with the provided information, and use o_opts // for one Volume and n_opts for the other. We return the result of // the compare_vcr_for_target_replacement function. - async fn test_volume_replace_opts( + fn test_volume_replace_opts( id: Uuid, block_size: u64, blocks_per_extent: u64, @@ -3748,7 +3734,6 @@ mod test { let log = csl(); Volume::compare_vcr_for_target_replacement(original, replacement, &log) - .await } #[tokio::test] @@ -3774,7 +3759,6 @@ mod test { o_opts, n_opts ) - .await .is_err()); } @@ -3801,7 +3785,6 @@ mod test { o_opts, n_opts ) - .await .is_err()); } @@ -3828,7 +3811,6 @@ mod test { o_opts, n_opts ) - .await .is_err()); } @@ -3857,7 +3839,6 @@ mod test { o_opts, n_opts ) - .await .is_err()); } @@ -3884,7 +3865,6 @@ mod test { o_opts, n_opts ) - .await .is_err()); } @@ -3911,7 +3891,6 @@ mod test { o_opts, n_opts ) - .await .is_err()); } @@ -3938,7 +3917,6 @@ mod test { o_opts, n_opts ) - .await .is_err()); } @@ -3965,7 +3943,6 @@ mod test { o_opts, n_opts ) - .await .is_err()); } @@ -3992,7 +3969,6 @@ mod test { o_opts, n_opts ) - .await .is_err()); } } From 94edb6026302541929ec505dfc762494a8cdc040 Mon Sep 17 00:00:00 2001 From: Matt Keeter Date: Sat, 23 Sep 2023 17:56:16 -0400 Subject: [PATCH 2/3] Enable lint using rustflags hack --- .cargo/config.toml | 6 ++++++ Cargo.toml | 1 - downstairs/src/lib.rs | 1 - integration_tests/src/lib.rs | 1 - pantry/src/lib.rs | 1 - upstairs/src/lib.rs | 1 - 6 files changed, 6 insertions(+), 5 deletions(-) create mode 100644 .cargo/config.toml diff --git a/.cargo/config.toml b/.cargo/config.toml new file mode 100644 index 000000000..be0f6d2ad --- /dev/null +++ b/.cargo/config.toml @@ -0,0 +1,6 @@ +[build] +# Workaround to enable this lint for all packages in the workspace +# +# Once https://github.com/rust-lang/cargo/issues/12115 makes it to our +# toolchain, we'll be able to put this in the `Cargo.toml` manifest instead. +rustflags = ["-Wclippy::unused-async"] diff --git a/Cargo.toml b/Cargo.toml index 6d30e7d95..8e0a2f16b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -124,4 +124,3 @@ repair-client = { path = "./repair-client" } [profile.dev] panic = 'abort' - diff --git a/downstairs/src/lib.rs b/downstairs/src/lib.rs index d7d453064..0f0d8706b 100644 --- a/downstairs/src/lib.rs +++ b/downstairs/src/lib.rs @@ -1,7 +1,6 @@ // Copyright 2023 Oxide Computer Company #![cfg_attr(usdt_need_asm, feature(asm))] #![cfg_attr(all(target_os = "macos", usdt_need_asm_sym), feature(asm_sym))] -#![warn(clippy::unused_async)] use futures::executor; use futures::lock::{Mutex, MutexGuard}; diff --git a/integration_tests/src/lib.rs b/integration_tests/src/lib.rs index 2d083135b..9437bf6f5 100644 --- a/integration_tests/src/lib.rs +++ b/integration_tests/src/lib.rs @@ -1,5 +1,4 @@ // Copyright 2023 Oxide Computer Company -#![warn(clippy::unused_async)] #[cfg(test)] mod test { diff --git a/pantry/src/lib.rs b/pantry/src/lib.rs index aa30507f5..814b2af79 100644 --- a/pantry/src/lib.rs +++ b/pantry/src/lib.rs @@ -1,6 +1,5 @@ // Copyright 2022 Oxide Computer Company -#![warn(clippy::unused_async)] use std::sync::Arc; use anyhow::Result; diff --git a/upstairs/src/lib.rs b/upstairs/src/lib.rs index f30dc2f91..4dff50237 100644 --- a/upstairs/src/lib.rs +++ b/upstairs/src/lib.rs @@ -2,7 +2,6 @@ #![cfg_attr(usdt_need_asm, feature(asm))] #![cfg_attr(all(target_os = "macos", usdt_need_asm_sym), feature(asm_sym))] #![allow(clippy::mutex_atomic)] -#![warn(clippy::unused_async)] use std::clone::Clone; use std::collections::{BTreeSet, HashMap, HashSet, VecDeque}; From 369c36749e458ec42ef5b65674b2bf6e04218da2 Mon Sep 17 00:00:00 2001 From: Matt Keeter Date: Sun, 24 Sep 2023 09:55:47 -0400 Subject: [PATCH 3/3] Post-rebase fixes --- upstairs/src/lib.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/upstairs/src/lib.rs b/upstairs/src/lib.rs index 4dff50237..c1af803a9 100644 --- a/upstairs/src/lib.rs +++ b/upstairs/src/lib.rs @@ -5042,7 +5042,7 @@ impl UpstairsState { * that happens on initial startup. This is because the running * upstairs has some state it can use to re-verify a downstairs. */ - async fn set_active(&mut self) -> Result<(), CrucibleError> { + fn set_active(&mut self) -> Result<(), CrucibleError> { if self.up_state == UpState::Active { crucible_bail!(UpstairsAlreadyActive); } else if self.up_state == UpState::Deactivating { @@ -5359,7 +5359,7 @@ impl Upstairs { async fn set_active(&self) -> Result<(), CrucibleError> { let mut active = self.active.lock().await; self.stats.add_activation().await; - active.set_active().await?; + active.set_active()?; info!( self.log, "{} is now active with session: {}", self.uuid, self.session_id @@ -6614,7 +6614,7 @@ impl Upstairs { * Verify the guest given gen number is highest. * Decide if we need repair, and if so create the repair list */ - async fn collate_downstairs( + fn collate_downstairs( &self, ds: &mut Downstairs, ) -> Result { @@ -6939,7 +6939,7 @@ impl Upstairs { * downstairs out, forget any activation requests, and the * upstairs goes back to waiting for another activation request. */ - self.collate_downstairs(&mut ds).await + self.collate_downstairs(&mut ds) }; match collate_status { @@ -7048,7 +7048,7 @@ impl Upstairs { for s in ds.ds_state.iter_mut() { *s = DsState::Active; } - active.set_active().await?; + active.set_active()?; info!( self.log, "{} is now active with session: {}", @@ -7084,7 +7084,7 @@ impl Upstairs { for s in ds.ds_state.iter_mut() { *s = DsState::Active; } - active.set_active().await?; + active.set_active()?; info!( self.log, "{} is now active with session: {}", @@ -8822,7 +8822,7 @@ impl GtoS { /* * Notify corresponding BlockReqWaiter */ - pub async fn notify(self, result: Result<(), CrucibleError>) { + pub fn notify(self, result: Result<(), CrucibleError>) { /* * If present, send the result to the guest. If this is a flush * issued on behalf of crucible, then there is no place to send @@ -8943,7 +8943,7 @@ impl GuestWork { gtos_job.transfer().await; } - gtos_job.notify(result).await; + gtos_job.notify(result); self.completed.push(gw_id); } else {