Skip to content

Commit

Permalink
wip - wallet - IDB scaffolding
Browse files Browse the repository at this point in the history
  • Loading branch information
aspect committed Sep 17, 2023
1 parent 3dcd910 commit fe47e79
Show file tree
Hide file tree
Showing 4 changed files with 62 additions and 73 deletions.
38 changes: 0 additions & 38 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

40 changes: 20 additions & 20 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -204,31 +204,31 @@ pbkdf2 = { version = "0.12.1" }
# pbkdf2 = { version = "0.11", default-features = false}

# workflow dependencies
workflow-d3 = { version = "0.6.0" }
workflow-nw = { version = "0.6.0" }
workflow-log = { version = "0.6.0" }
workflow-core = { version = "0.6.0" }
workflow-wasm = { version = "0.6.1" }
workflow-dom = { version = "0.6.0" }
workflow-rpc = { version = "0.6.0" }
workflow-node = { version = "0.6.0" }
workflow-store = { version = "0.6.0" }
workflow-terminal = { version = "0.6.0" }
# workflow-d3 = { version = "0.6.0" }
# workflow-nw = { version = "0.6.0" }
# workflow-log = { version = "0.6.0" }
# workflow-core = { version = "0.6.0" }
# workflow-wasm = { version = "0.6.1" }
# workflow-dom = { version = "0.6.0" }
# workflow-rpc = { version = "0.6.0" }
# workflow-node = { version = "0.6.0" }
# workflow-store = { version = "0.6.0" }
# workflow-terminal = { version = "0.6.0" }
nw-sys = "0.1.5"

# if below is enabled, this means that there is an ongoing work
# on the workflow-rs crate. This requires that you clone workflow-rs
# into a sibling folder from https://github.com/workflow-rs/workflow-rs
# workflow-d3 = { path = "../workflow-rs/d3" }
# workflow-nw = { path = "../workflow-rs/nw" }
# workflow-log = { path = "../workflow-rs/log" }
# workflow-core = { path = "../workflow-rs/core" }
# workflow-wasm = { path = "../workflow-rs/wasm" }
# workflow-dom = { path = "../workflow-rs/dom" }
# workflow-rpc = { path = "../workflow-rs/rpc" }
# workflow-node = { path = "../workflow-rs/node" }
# workflow-store = { path = "../workflow-rs/store" }
# workflow-terminal = { path = "../workflow-rs/terminal" }
workflow-d3 = { path = "../workflow-rs/d3" }
workflow-nw = { path = "../workflow-rs/nw" }
workflow-log = { path = "../workflow-rs/log" }
workflow-core = { path = "../workflow-rs/core" }
workflow-wasm = { path = "../workflow-rs/wasm" }
workflow-dom = { path = "../workflow-rs/dom" }
workflow-rpc = { path = "../workflow-rs/rpc" }
workflow-node = { path = "../workflow-rs/node" }
workflow-store = { path = "../workflow-rs/store" }
workflow-terminal = { path = "../workflow-rs/terminal" }
# https://github.com/aspectron/nw-sys
# nw-sys = { path = "../nw-sys" }

Expand Down
9 changes: 9 additions & 0 deletions wallet/core/src/error.rs
Original file line number Diff line number Diff line change
Expand Up @@ -216,6 +216,9 @@ pub enum Error {

#[error("Requested transaction is too heavy")]
GeneratorTransactionIsTooHeavy,

#[error("{0}")]
IdbError(String),
}

impl From<Aborted> for Error {
Expand All @@ -230,6 +233,12 @@ impl Error {
}
}

impl From<idb::Error> for Error {
fn from(e: idb::Error) -> Self {
Error::IdbError(e.to_string())
}
}

impl From<chacha20poly1305::Error> for Error {
fn from(e: chacha20poly1305::Error) -> Self {
Error::Chacha20poly1305(e)
Expand Down
48 changes: 33 additions & 15 deletions wallet/core/src/storage/local/transaction/indexdb.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,12 @@ const IDB_TRANSACTION_METADATA_INDEX: &str = "metadata";

pub struct Inner {
known_databases: HashMap<String, HashSet<String>>,
databases: HashMap<String, Mutex<idb::Database>>,
databases: HashMap<String, Arc<Mutex<Sendable<idb::Database>>>>,
}

unsafe impl Sync for Inner {}
unsafe impl Send for Inner {}

pub struct TransactionStore {
inner: Arc<Mutex<Inner>>,
name: String,
Expand Down Expand Up @@ -75,24 +78,32 @@ impl TransactionStore {
Ok(())
}

async fn init_or_get_db(&self, binding: &Binding, network_id: &NetworkId) -> Result<MutexGuard<idb::Database>> {
async fn init_or_get_db(&self, binding: &Binding, network_id: &NetworkId) -> Result<Arc<Mutex<Sendable<idb::Database>>>> {
let db_name = self.make_db_name(binding, network_id);

{
let inner = &mut self.inner();

if let Some(db_mutex) = inner.databases.get_mut(&db_name) {
let db = db_mutex.lock().unwrap();
return Ok(db);
if let Some(db_mutex) = inner.databases.get(&db_name) {
return Ok(db_mutex.clone());
}
// inner.databases.get(&db_name).map(|db_mutex| db_mutex.lock().unwrap());

// // if let Some(db_mutex) = inner.databases.get_mut(&db_name) {
// if let Some(db_mutex) = inner.databases.get(&db_name) {
// return Ok(db_mutex.lock().unwrap());
// }
}

// Get a factory instance from global scope
let factory = idb::Factory::new().map_err(|err| format!("Error creating indexed db factory: {}", err))?;

// Create an open request for the database
let db_name_ = db_name.clone();
let (tx, rx) = oneshot();
dispatch(async move {
let mut open_request = factory.open(&db_name, Some(1)).unwrap();
let factory =
idb::Factory::new().map_err(|err| format!("Error creating indexed db factory: {}", err)).expect("factory failure");
let mut open_request = factory.open(&db_name_, Some(1)).unwrap();

// Add an upgrade handler for database
open_request.on_upgrade_needed(|event| {
Expand All @@ -115,20 +126,25 @@ impl TransactionStore {
.unwrap();
});

let db_result = open_request.await;
tx.send(db_result).await.expect("Error sending database result oneshot channel");
// let db_result = open_request.await.map_err(|err|err.to_string());
let db_result = open_request.await.map(Sendable::new).map_err(|err| format!("Error opening database: {}", err));
tx.send(Sendable(db_result)).await.expect("Error sending database result oneshot channel");
// tx.send(Sendable(db_result)).await.expect("Error sending database result oneshot channel");
});
let db = rx
let db_result = rx
.recv()
.await
.map_err(|err| Error::Custom(format!("Error opening database recv error oneshot channel: {}", err)))?
.unwrap()
// .unwrap()
.map_err(|err| Error::Custom(format!("Error opening database: {}", err)))?;

let inner = &mut self.inner();
inner.databases.insert(db_name, Mutex::new(db));

let db_mutex = inner.databases.get(&db_name).expect("Error getting database from inner databases hashmap");
let db = db_mutex.lock().unwrap();
let db = Arc::new(Mutex::new(db_result));
inner.databases.insert(db_name, db.clone());
// Sendable
// let db_mutex = inner.databases.get(&db_name).expect("Error getting database from inner databases hashmap");
// let db = db_mutex.lock().unwrap();
Ok(db)
}
}
Expand All @@ -144,7 +160,9 @@ impl TransactionRecordStore for TransactionStore {
// }

async fn load_single(&self, binding: &Binding, network_id: &NetworkId, _id: &TransactionId) -> Result<Arc<TransactionRecord>> {
let db = self.init_or_get_db(binding, network_id).await?;
let _db = self.init_or_get_db(binding, network_id).await?;

// let local_db_mutex = db.lock().unwrap();

Err(Error::NotImplemented)
}
Expand Down

0 comments on commit fe47e79

Please sign in to comment.