From 520ca00920ce3904a4e71d72c97c24c826afe019 Mon Sep 17 00:00:00 2001 From: Sander Bosma Date: Wed, 17 May 2023 14:46:29 +0200 Subject: [PATCH] fix: save rescanning progress --- Cargo.lock | 11 +- service/src/lib.rs | 5 + vault/Cargo.toml | 2 + vault/src/error.rs | 10 + vault/src/issue.rs | 305 +++++++++++++++++++++++-- vault/src/main.rs | 7 + vault/src/system.rs | 72 +++++- vault/tests/vault_integration_tests.rs | 59 ++++- 8 files changed, 428 insertions(+), 43 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 05db2d8d3..cbc37fef4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14746,6 +14746,7 @@ dependencies = [ "mockall 0.8.3", "nonzero_ext", "parity-scale-codec", + "rocksdb", "runtime", "secp256k1 0.24.3", "serde", @@ -15970,12 +15971,12 @@ dependencies = [ "pkg-config", ] -[[patch.unused]] -name = "sp-serializer" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech//substrate?branch=polkadot-v0.9.37#f38bd6671d460293c93062cc1e4fe9e9e490cb29" - [[patch.unused]] name = "orml-xcm" version = "0.4.1-dev" source = "git+https://github.com/open-web3-stack//open-runtime-module-library?rev=24f0a8b6e04e1078f70d0437fb816337cdf4f64c#24f0a8b6e04e1078f70d0437fb816337cdf4f64c" + +[[patch.unused]] +name = "sp-serializer" +version = "4.0.0-dev" +source = "git+https://github.com/paritytech//substrate?branch=polkadot-v0.9.37#f38bd6671d460293c93062cc1e4fe9e9e490cb29" diff --git a/service/src/lib.rs b/service/src/lib.rs index 5012961ae..cf9b831f1 100644 --- a/service/src/lib.rs +++ b/service/src/lib.rs @@ -33,6 +33,7 @@ pub trait Service { monitoring_config: MonitoringConfig, shutdown: ShutdownSender, constructor: Box Result + Send + Sync>, + keyname: String, ) -> Self; async fn start(&self) -> Result<(), Error>; } @@ -46,6 +47,7 @@ pub struct ConnectionManager { monitoring_config: MonitoringConfig, config: Config, increment_restart_counter: F, + db_path: String, } impl ConnectionManager { @@ -59,6 +61,7 @@ impl ConnectionManager { monitoring_config: MonitoringConfig, config: Config, increment_restart_counter: F, + db_path: String, ) -> Self { Self { signer, @@ -69,6 +72,7 @@ impl ConnectionManager { monitoring_config, config, increment_restart_counter, + db_path, } } @@ -122,6 +126,7 @@ impl ConnectionManager { self.monitoring_config.clone(), shutdown_tx.clone(), Box::new(constructor), + self.db_path.clone(), ); match service.start().await { Err(err @ Error::Abort(_)) => { diff --git a/vault/Cargo.toml b/vault/Cargo.toml index 6b99baa66..589c6ef1f 100644 --- a/vault/Cargo.toml +++ b/vault/Cargo.toml @@ -35,6 +35,8 @@ lazy_static = "1.4" governor = "0.5.0" nonzero_ext = "0.3.0" +rocksdb = { version = "0.19.0", features = ["snappy"], default-features = false } + tracing = { version = "0.1", features = ["log"] } tracing-subscriber = { version = "0.2.12", features = ["registry", "env-filter", "fmt"] } tracing-futures = { version = "0.2.5" } diff --git a/vault/src/error.rs b/vault/src/error.rs index 9705413d3..1c6bce64a 100644 --- a/vault/src/error.rs +++ b/vault/src/error.rs @@ -1,7 +1,11 @@ +use std::string::FromUtf8Error; + use bitcoin::Error as BitcoinError; use jsonrpc_core_client::RpcError; use parity_scale_codec::Error as CodecError; +use rocksdb::Error as RocksDbError; use runtime::Error as RuntimeError; +use serde_json::Error as SerdeJsonError; use thiserror::Error; use tokio_stream::wrappers::errors::BroadcastStreamRecvError; @@ -32,6 +36,12 @@ pub enum Error { RuntimeError(#[from] RuntimeError), #[error("CodecError: {0}")] CodecError(#[from] CodecError), + #[error("DatabaseError: {0}")] + DatabaseError(#[from] RocksDbError), + #[error("SerdeJsonError: {0}")] + SerdeJsonError(#[from] SerdeJsonError), + #[error("FromUtf8Error: {0}")] + FromUtf8Error(#[from] FromUtf8Error), #[error("BroadcastStreamRecvError: {0}")] BroadcastStreamRecvError(#[from] BroadcastStreamRecvError), } diff --git a/vault/src/issue.rs b/vault/src/issue.rs index 73203c7b6..615d0f143 100644 --- a/vault/src/issue.rs +++ b/vault/src/issue.rs @@ -4,12 +4,16 @@ use crate::{ use bitcoin::{BlockHash, Error as BitcoinError, PublicKey, Transaction, TransactionExt}; use futures::{channel::mpsc::Sender, future, SinkExt, StreamExt, TryFutureExt}; use runtime::{ - BtcAddress, BtcPublicKey, BtcRelayPallet, CancelIssueEvent, ExecuteIssueEvent, H256Le, InterBtcParachain, - IssuePallet, IssueRequestStatus, PartialAddress, PrettyPrint, RequestIssueEvent, UtilFuncs, VaultId, H256, + BtcAddress, BtcPublicKey, BtcRelayPallet, CancelIssueEvent, ExecuteIssueEvent, H256Le, InterBtcIssueRequest, + InterBtcParachain, IssuePallet, IssueRequestStatus, PartialAddress, PrettyPrint, RequestIssueEvent, UtilFuncs, + VaultId, H256, }; use service::{DynBitcoinCoreApi, Error as ServiceError}; use sha2::{Digest, Sha256}; -use std::sync::Arc; +use std::{ + sync::Arc, + time::{Duration, Instant}, +}; // initialize `issue_set` with currently open issues, and return the block height // from which to start watching the bitcoin chain @@ -73,11 +77,72 @@ pub async fn process_issue_requests( Err(ServiceError::ClientShutdown) } +#[derive(serde::Serialize, serde::Deserialize, Clone, Default, PartialEq, Debug)] +struct RescanStatus { + newest_issue_height: u32, + queued_rescan_range: Option<(usize, usize)>, // start, end(including) +} +impl RescanStatus { + const KEY: &str = "rescan-status"; + fn update(&mut self, mut issues: Vec, current_bitcoin_height: usize) { + // Only look at issues that haven't been processed yet + issues.retain(|issue| issue.opentime > self.newest_issue_height); + + for issue in issues { + self.newest_issue_height = self.newest_issue_height.max(issue.opentime); + let begin = match self.queued_rescan_range { + Some((begin, _)) => begin.min(issue.btc_height as usize), + None => issue.btc_height as usize, + }; + self.queued_rescan_range = Some((begin, current_bitcoin_height)); + } + } + + /// prune the scanning range: bitcoin can't scan before prune_height. This function + /// modifies the range in self to be within scannable range, and returns the + /// unscannable range + fn prune(&mut self, btc_pruned_start_height: usize) -> Option<(usize, usize)> { + if let Some((ref mut start, _)) = self.queued_rescan_range { + if *start < btc_pruned_start_height { + let ret = (*start, btc_pruned_start_height.saturating_sub(1)); + *start = btc_pruned_start_height; + return Some(ret); + } + } + None + } + + /// updates self as if max_blocks were processed. Returns the chunk to rescan now. + fn process_blocks(&mut self, max_blocks: usize) -> Option<(usize, usize)> { + let (start, end) = self.queued_rescan_range?; + let chunk_end = end.min(start.saturating_add(max_blocks).saturating_sub(1)); + + if chunk_end == end { + self.queued_rescan_range = None; // this will be the last chunk to scan + } else { + self.queued_rescan_range = Some((chunk_end + 1, end)); + } + Some((start, chunk_end)) + } + + fn get(vault_id: &VaultId, db: &crate::system::DatabaseConfig) -> Result { + Ok(db.get(vault_id, Self::KEY)?.unwrap_or_default()) + } + fn store(&self, vault_id: &VaultId, db: &crate::system::DatabaseConfig) -> Result<(), Error> { + db.put(vault_id, Self::KEY, self)?; + Ok(()) + } +} + pub async fn add_keys_from_past_issue_request( bitcoin_core: &DynBitcoinCoreApi, btc_parachain: &InterBtcParachain, vault_id: &VaultId, + db: &crate::system::DatabaseConfig, ) -> Result<(), Error> { + let mut scanning_status = RescanStatus::get(vault_id, db)?; + tracing::info!("initial status: = {scanning_status:?}"); + let issue_requests: Vec<_> = btc_parachain .get_vault_issue_requests(btc_parachain.get_account_id().clone()) .await? @@ -85,39 +150,27 @@ pub async fn add_keys_from_past_issue_request( .filter(|(_, issue)| &issue.vault == vault_id) .collect(); - let btc_start_height = match issue_requests.iter().map(|(_, request)| request.btc_height).min() { - Some(x) => x as usize, - None => return Ok(()), // the iterator is empty so we have nothing to do - }; - for (issue_id, request) in issue_requests.clone().into_iter() { if let Err(e) = add_new_deposit_key(bitcoin_core, issue_id, request.btc_public_key).await { tracing::error!("Failed to add deposit key #{}: {}", issue_id, e.to_string()); } } - // read height only _after_ the last add_new_deposit_height.If a new block arrives + // read height only _after_ the last add_new_deposit_key.If a new block arrives // while we rescan, bitcoin core will correctly recognize addressed associated with the // privkey - let btc_end_height = bitcoin_core.get_block_count().await? as usize - 1; - - // check if the blockchain was pruned after the point where we would need to scan - // if it was, we only rescan from the pruned height + let btc_end_height = bitcoin_core.get_block_count().await? as usize; let btc_pruned_start_height = bitcoin_core.get_pruned_height().await? as usize; - let rescan_start_height = btc_start_height.max(btc_pruned_start_height); - // in parallel, rescan what blockchain we do have stored locally - tracing::info!("Rescanning bitcoin chain from height {}...", rescan_start_height); - bitcoin_core - .rescan_blockchain(rescan_start_height, btc_end_height) - .await?; + let issues = issue_requests.clone().into_iter().map(|(_key, issue)| issue).collect(); + scanning_status.update(issues, btc_end_height); - // also check in electrs in case there were any requests from before the pruned height - if btc_start_height < btc_pruned_start_height { + // use electrs to scan the portion that is not scannable by bitcoin core + if let Some((start, end)) = scanning_status.prune(btc_pruned_start_height) { tracing::info!( "Also checking electrs for issue requests between {} and {}...", - btc_start_height, - btc_pruned_start_height + start, + end ); bitcoin_core .rescan_electrs_for_addresses( @@ -135,6 +188,30 @@ pub async fn add_keys_from_past_issue_request( .await?; } + // save progress s.t. we don't rescan pruned range again if we crash now + scanning_status.store(vault_id, db)?; + + let mut chunk_size = 1; + // rescan the blockchain in chunks, so that we can save progress. The code below + // aims to have each chunk take about 10 seconds (arbitrarily chosen value). + while let Some((chunk_start, chunk_end)) = scanning_status.process_blocks(chunk_size) { + tracing::info!("Rescanning bitcoin chain from {} to {}...", chunk_start, chunk_end); + + let start_time = Instant::now(); + + bitcoin_core.rescan_blockchain(chunk_start, chunk_end).await?; + + // with the code below the rescan time should remain between 5 and 20 seconds + // after the first couple of rounds. + if start_time.elapsed() < Duration::from_secs(10) { + chunk_size = chunk_size.saturating_mul(2); + } else { + chunk_size = (chunk_size / 2).max(1); + } + + scanning_status.store(vault_id, db)?; + } + Ok(()) } @@ -374,3 +451,185 @@ pub async fn listen_for_issue_cancels( .await?; Ok(()) } + +#[cfg(test)] +mod tests { + use super::*; + use runtime::{ + AccountId, + CurrencyId::Token, + TokenSymbol::{DOT, IBTC}, + }; + + fn dummy_issues(heights: Vec<(u32, usize)>) -> Vec { + heights + .into_iter() + .map(|(opentime, btc_height)| InterBtcIssueRequest { + opentime, + btc_height: btc_height as u32, + amount: Default::default(), + btc_address: Default::default(), + fee: Default::default(), + griefing_collateral: Default::default(), + period: Default::default(), + requester: AccountId::new([1u8; 32]), + btc_public_key: BtcPublicKey { 0: [0; 33] }, + status: IssueRequestStatus::Pending, + vault: VaultId::new(AccountId::new([1u8; 32]), Token(DOT), runtime::Token(IBTC)), + }) + .collect() + } + + #[test] + fn test_rescan_status_update() { + let mut status = RescanStatus::default(); + let current_height = 50; + let issues = dummy_issues(vec![(2, 23), (4, 20), (3, 30)]); + + status.update(issues, current_height); + + assert_eq!( + status, + RescanStatus { + newest_issue_height: 4, + queued_rescan_range: Some((20, current_height)) + } + ); + + // check that status does not change if issues have already been registered + let processed_issues = dummy_issues(vec![ + (2, current_height * 2), + (4, current_height * 2), + (3, current_height * 2), + ]); + status.update(processed_issues, current_height); + assert_eq!( + status, + RescanStatus { + newest_issue_height: 4, + queued_rescan_range: Some((20, current_height)) + } + ); + + // check that status does not change if new issue doesn't expand current range + let processed_issues = dummy_issues(vec![ + (2, current_height * 2), + (5, 45), // new, but already included in the to-scan range + (3, current_height * 2), + ]); + status.update(processed_issues.clone(), current_height); + assert_eq!( + status, + RescanStatus { + newest_issue_height: 5, + queued_rescan_range: Some((20, current_height)) + } + ); + + // check that status decreases start of range if issue requires it + let more_issues = dummy_issues(vec![ + (2, 41), + (6, 15), // new this one has not been processed yet, and expands the range + (3, 41), + ]); + status.update(more_issues, current_height); + assert_eq!( + status, + RescanStatus { + newest_issue_height: 6, + queued_rescan_range: Some((15, current_height)) + } + ); + + // check that status end of range does not expand if there are no new issues + status.update(processed_issues, current_height + 1); + assert_eq!( + status, + RescanStatus { + newest_issue_height: 6, + queued_rescan_range: Some((15, current_height)) + } + ); + + // check that status end of range does expand if there are new issues + let more_issues = dummy_issues(vec![ + (2, 41), + (7, current_height + 2), // new this one has not been processed yet, and expands the range + (3, 41), + ]); + status.update(more_issues, current_height + 2); + assert_eq!( + status, + RescanStatus { + newest_issue_height: 7, + queued_rescan_range: Some((15, current_height + 2)) + } + ); + } + + #[test] + fn test_process_blocks() { + let mut status = RescanStatus { + newest_issue_height: 4, + queued_rescan_range: Some((20, 40)), + }; + + assert_eq!(status.process_blocks(15), Some((20, 34))); + assert_eq!( + status, + RescanStatus { + newest_issue_height: 4, + queued_rescan_range: Some((35, 40)) + } + ); + + assert_eq!(status.process_blocks(15), Some((35, 40))); + assert_eq!( + status, + RescanStatus { + newest_issue_height: 4, + queued_rescan_range: None + } + ); + + assert_eq!(status.process_blocks(15), None); + assert_eq!( + status, + RescanStatus { + newest_issue_height: 4, + queued_rescan_range: None + } + ); + } + + #[test] + fn test_process_blocks_boundary() { + let mut status = RescanStatus { + newest_issue_height: 4, + queued_rescan_range: Some((20, 40)), + }; + + assert_eq!(status.process_blocks(21), Some((20, 40))); + assert_eq!( + status, + RescanStatus { + newest_issue_height: 4, + queued_rescan_range: None + } + ); + + assert_eq!(status.process_blocks(15), None); + } + // /// updates self as if max_blocks were processed. Returns the chunk to rescan now. + // fn process_blocks(&mut self, max_blocks: usize) -> Option<(usize, usize)> { + // let (start, end) = self.queued_rescan_range?; + // let chunk_end = end.min(start + max_blocks); + // + // if chunk_end == end { + // self.queued_rescan_range = None; // this will be the last chunk to scan + // } else { + // self.queued_rescan_range = Some((chunk_end + 1, end)); + // } + // Some((start, chunk_end)) + // } +} diff --git a/vault/src/main.rs b/vault/src/main.rs index 0ea87ecef..272fda403 100644 --- a/vault/src/main.rs +++ b/vault/src/main.rs @@ -164,6 +164,12 @@ async fn start() -> Result<(), ServiceError> { let (pair, wallet_name) = opts.account_info.get_key_pair()?; let signer = InterBtcSigner::new(pair); + let db_path = opts + .vault + .db_path + .clone() + .unwrap_or(format!("{}.db", wallet_name.clone())); + let vault_connection_manager = ConnectionManager::new( signer.clone(), Some(wallet_name.to_string()), @@ -173,6 +179,7 @@ async fn start() -> Result<(), ServiceError> { opts.monitoring.clone(), opts.vault, increment_restart_counter, + db_path, ); if !opts.monitoring.no_prometheus { diff --git a/vault/src/system.rs b/vault/src/system.rs index 64cf2bffa..84733bdea 100644 --- a/vault/src/system.rs +++ b/vault/src/system.rs @@ -19,8 +19,8 @@ use git_version::git_version; use runtime::{ cli::{parse_duration_minutes, parse_duration_ms}, BtcRelayPallet, CollateralBalancesPallet, CurrencyId, Error as RuntimeError, InterBtcParachain, PrettyPrint, - RegisterVaultEvent, StoreMainChainHeaderEvent, TryFromSymbol, UpdateActiveBlockEvent, UtilFuncs, VaultCurrencyPair, - VaultId, VaultRegistryPallet, + RegisterVaultEvent, RuntimeCurrencyInfo, StoreMainChainHeaderEvent, TryFromSymbol, UpdateActiveBlockEvent, + UtilFuncs, VaultCurrencyPair, VaultId, VaultRegistryPallet, }; use service::{wait_or_shutdown, DynBitcoinCoreApi, Error as ServiceError, MonitoringConfig, Service, ShutdownSender}; use std::{collections::HashMap, pin::Pin, sync::Arc, time::Duration}; @@ -119,6 +119,12 @@ pub struct VaultServiceConfig { /// higher inclusion fee estimate. #[clap(long)] pub auto_rbf: bool, + + /// Path of a caching database. If you want to create a new database, set + /// this to an unexisting path, e.g. `${pwd}/myvault.db`. If not set, a + /// the path is generated from the --keyname argument + #[clap(long)] + pub db_path: Option, } async fn active_block_listener( @@ -166,6 +172,53 @@ pub struct VaultData { pub metrics: PerCurrencyMetrics, } +#[derive(Clone)] +pub struct DatabaseConfig { + path: String, +} + +impl DatabaseConfig { + fn prefixed_key(vault_id: &VaultId, key: &str) -> Result { + Ok(format!( + "{}-{}-{}-{}", + vault_id.account_id.pretty_print(), /* technically not needed since each client should have their own + * db, but doesn't hurt to be safe */ + vault_id + .currencies + .collateral + .symbol() + .map_err(|_| BitcoinError::FailedToConstructWalletName)?, + vault_id + .currencies + .wrapped + .symbol() + .map_err(|_| BitcoinError::FailedToConstructWalletName)?, + key + )) + } + + pub fn put(&self, vault_id: &VaultId, key: &str, value: &V) -> Result<(), Error> { + let db = rocksdb::DB::open_default(self.path.clone())?; + let key = Self::prefixed_key(vault_id, key)?; + db.put(key, serde_json::to_vec(value)?)?; + Ok(()) + } + + pub fn get(&self, vault_id: &VaultId, key: &str) -> Result, Error> { + let db = rocksdb::DB::open_default(self.path.clone())?; + let key = Self::prefixed_key(vault_id, key)?; + + let value = match db.get(key)? { + None => return Ok(None), + Some(x) => { + let value = String::from_utf8(x)?; + serde_json::from_str(&value)? + } + }; + Ok(value) + } +} + #[derive(Clone)] pub struct VaultIdManager { vault_data: Arc>>, @@ -174,6 +227,7 @@ pub struct VaultIdManager { // TODO: refactor this #[allow(clippy::type_complexity)] constructor: Arc Result + Send + Sync>>, + db: DatabaseConfig, } impl VaultIdManager { @@ -181,12 +235,14 @@ impl VaultIdManager { btc_parachain: InterBtcParachain, btc_rpc_master_wallet: DynBitcoinCoreApi, constructor: impl Fn(VaultId) -> Result + Send + Sync + 'static, + db_path: String, ) -> Self { Self { vault_data: Arc::new(RwLock::new(HashMap::new())), constructor: Arc::new(Box::new(constructor)), btc_rpc_master_wallet, btc_parachain, + db: DatabaseConfig { path: db_path }, } } @@ -195,6 +251,7 @@ impl VaultIdManager { btc_parachain: InterBtcParachain, btc_rpc_master_wallet: DynBitcoinCoreApi, map: HashMap, + db_path: &str, ) -> Self { let vault_data = map .into_iter() @@ -214,6 +271,9 @@ impl VaultIdManager { constructor: Arc::new(Box::new(|_| unimplemented!())), btc_rpc_master_wallet, btc_parachain, + db: DatabaseConfig { + path: db_path.to_string(), + }, } } @@ -253,7 +313,8 @@ impl VaultIdManager { } tracing::info!("Adding keys from past issues..."); - issue::add_keys_from_past_issue_request(&btc_rpc, &self.btc_parachain, &vault_id).await?; + + issue::add_keys_from_past_issue_request(&btc_rpc, &self.btc_parachain, &vault_id, &self.db).await?; tracing::info!("Initializing metrics..."); let metrics = PerCurrencyMetrics::new(&vault_id); @@ -366,6 +427,7 @@ impl Service for VaultService { monitoring_config: MonitoringConfig, shutdown: ShutdownSender, constructor: Box Result + Send + Sync>, + db_path: String, ) -> Self { VaultService::new( btc_parachain, @@ -374,6 +436,7 @@ impl Service for VaultService { monitoring_config, shutdown, constructor, + db_path, ) } @@ -449,6 +512,7 @@ impl VaultService { monitoring_config: MonitoringConfig, shutdown: ShutdownSender, constructor: impl Fn(VaultId) -> Result + Send + Sync + 'static, + db_path: String, ) -> Self { Self { btc_parachain: btc_parachain.clone(), @@ -456,7 +520,7 @@ impl VaultService { config, monitoring_config, shutdown, - vault_id_manager: VaultIdManager::new(btc_parachain, btc_rpc_master_wallet, constructor), + vault_id_manager: VaultIdManager::new(btc_parachain, btc_rpc_master_wallet, constructor, db_path), } } diff --git a/vault/tests/vault_integration_tests.rs b/vault/tests/vault_integration_tests.rs index f90e850bc..1a4c3c617 100644 --- a/vault/tests/vault_integration_tests.rs +++ b/vault/tests/vault_integration_tests.rs @@ -63,7 +63,12 @@ async fn test_redeem_succeeds() { let btc_rpcs = vec![(vault_id.clone(), btc_rpc.clone())].into_iter().collect(); let btc_rpc_master_wallet = btc_rpc.clone(); - let vault_id_manager = VaultIdManager::from_map(vault_provider.clone(), btc_rpc_master_wallet, btc_rpcs); + let vault_id_manager = VaultIdManager::from_map( + vault_provider.clone(), + btc_rpc_master_wallet, + btc_rpcs, + "test_redeem_succeeds", + ); let issue_amount = 100000; let vault_collateral = @@ -126,8 +131,12 @@ async fn test_replace_succeeds() { let btc_rpcs = vec![(new_vault_id.clone(), btc_rpc.clone())].into_iter().collect(); let new_btc_rpc_master_wallet = btc_rpc.clone(); - let _vault_id_manager = - VaultIdManager::from_map(new_vault_provider.clone(), new_btc_rpc_master_wallet, btc_rpcs); + let _vault_id_manager = VaultIdManager::from_map( + new_vault_provider.clone(), + new_btc_rpc_master_wallet, + btc_rpcs, + "test_replace_succeeds1", + ); let btc_rpcs = vec![ (old_vault_id.clone(), btc_rpc.clone()), (new_vault_id.clone(), btc_rpc.clone()), @@ -135,8 +144,12 @@ async fn test_replace_succeeds() { .into_iter() .collect(); let old_btc_rpc_master_wallet = btc_rpc.clone(); - let vault_id_manager = - VaultIdManager::from_map(old_vault_provider.clone(), old_btc_rpc_master_wallet, btc_rpcs); + let vault_id_manager = VaultIdManager::from_map( + old_vault_provider.clone(), + old_btc_rpc_master_wallet, + btc_rpcs, + "test_replace_succeeds2", + ); let issue_amount = 100000; let vault_collateral = get_required_vault_collateral_for_issue( @@ -303,8 +316,12 @@ async fn test_cancellation_succeeds() { let btc_rpcs = vec![(new_vault_id.clone(), btc_rpc.clone())].into_iter().collect(); let new_btc_rpc_master_wallet = btc_rpc.clone(); - let vault_id_manager = - VaultIdManager::from_map(new_vault_provider.clone(), new_btc_rpc_master_wallet, btc_rpcs); + let vault_id_manager = VaultIdManager::from_map( + new_vault_provider.clone(), + new_btc_rpc_master_wallet, + btc_rpcs, + "test_cancellation_succeeds", + ); let issue_amount = 100000; let vault_collateral = get_required_vault_collateral_for_issue( @@ -565,7 +582,12 @@ async fn test_automatic_issue_execution_succeeds() { let btc_rpcs = vec![(vault2_id.clone(), btc_rpc.clone())].into_iter().collect(); let btc_rpc_master_wallet = btc_rpc.clone(); - let vault_id_manager = VaultIdManager::from_map(vault2_provider.clone(), btc_rpc_master_wallet, btc_rpcs); + let vault_id_manager = VaultIdManager::from_map( + vault2_provider.clone(), + btc_rpc_master_wallet, + btc_rpcs, + "test_automatic_issue_execution_succeeds", + ); let issue_amount = 100000; let vault_collateral = @@ -658,7 +680,12 @@ async fn test_automatic_issue_execution_succeeds_with_big_transaction() { let btc_rpcs = vec![(vault2_id.clone(), btc_rpc.clone())].into_iter().collect(); let btc_rpc_master_wallet = btc_rpc.clone(); - let vault_id_manager = VaultIdManager::from_map(vault2_provider.clone(), btc_rpc_master_wallet, btc_rpcs); + let vault_id_manager = VaultIdManager::from_map( + vault2_provider.clone(), + btc_rpc_master_wallet, + btc_rpcs, + "test_automatic_issue_execution_succeeds_with_big_transaction", + ); let issue_amount = 100000; let vault_collateral = @@ -740,7 +767,12 @@ async fn test_execute_open_requests_succeeds() { let btc_rpcs = vec![(vault_id.clone(), btc_rpc.clone())].into_iter().collect(); let btc_rpc_master_wallet = btc_rpc.clone(); - let vault_id_manager = VaultIdManager::from_map(vault_provider.clone(), btc_rpc_master_wallet, btc_rpcs); + let vault_id_manager = VaultIdManager::from_map( + vault_provider.clone(), + btc_rpc_master_wallet, + btc_rpcs, + "test_execute_open_requests_succeeds", + ); let issue_amount = 100000; let vault_collateral = @@ -1137,7 +1169,12 @@ mod test_with_bitcoind { // setup vault id manager let btc_rpcs = vec![(vault_id.clone(), btc_rpc.clone())].into_iter().collect(); let btc_rpc_master_wallet = btc_rpc.clone(); - let vault_id_manager = VaultIdManager::from_map(vault_provider.clone(), btc_rpc_master_wallet, btc_rpcs); + let vault_id_manager = VaultIdManager::from_map( + vault_provider.clone(), + btc_rpc_master_wallet, + btc_rpcs, + "test_automatic_rbf_succeeds", + ); let issue_amount = 100000; let vault_collateral =