From 10980a05a24b5942699c6f4fc8c09debdb84f79b Mon Sep 17 00:00:00 2001 From: Roman Podoliaka Date: Sun, 5 Nov 2023 12:47:36 +0000 Subject: [PATCH 1/2] Update to diesel v2 The changes are trivial: * all methods now require a &mut reference to a Connection instance. Connections could not be shared between threads before, so all the references we had already were exclusive anyway * begin_transaction() now calls the closure with one argument -- a &mut reference to the Connection instance --- Cargo.lock | 64 +++++++++++++++++----------------- Cargo.toml | 2 +- src/storage/sql/mod.rs | 78 ++++++++++++++++++++++++------------------ 3 files changed, 78 insertions(+), 66 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 98453f8..1bcfd71 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -60,7 +60,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn", ] [[package]] @@ -71,7 +71,7 @@ checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn", ] [[package]] @@ -266,32 +266,43 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.38", + "syn", ] [[package]] name = "diesel" -version = "1.4.8" +version = "2.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b28135ecf6b7d446b43e27e225622a038cc4e2930a1022f51cdb97ada19b8e4d" +checksum = "2268a214a6f118fce1838edba3d1561cf0e78d8de785475957a580a7f8c69d33" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.4.1", "byteorder", "chrono", "diesel_derives", + "itoa", "pq-sys", "r2d2", ] [[package]] name = "diesel_derives" -version = "1.4.1" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45f5098f628d02a7a0f68ddba586fb61e80edec3bdc1be3b921f4ceec60858d3" +checksum = "ef8337737574f55a468005a83499da720f20c65586241ffea339db9ecdfd2b44" dependencies = [ + "diesel_table_macro_syntax", "proc-macro2", "quote", - "syn 1.0.109", + "syn", +] + +[[package]] +name = "diesel_table_macro_syntax" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc5557efc453706fed5e4fa85006fe9817c224c3f480a34c7e5959fd700921c5" +dependencies = [ + "syn", ] [[package]] @@ -709,9 +720,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.149" +version = "0.2.150" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a08173bc88b7955d1b3145aa561539096c421ac8debde8cbc3612ec635fee29b" +checksum = "89d92a4743f9a61002fae18374ed11e7973f530cb3a3255fb354818118b2203c" [[package]] name = "linux-raw-sys" @@ -917,7 +928,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn", ] [[package]] @@ -987,7 +998,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.38", + "syn", ] [[package]] @@ -1063,7 +1074,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn", "version_check", "yansi", ] @@ -1185,7 +1196,7 @@ checksum = "7f7473c2cfcf90008193dd0e3e16599455cb601a9fce322b5bb55de799664925" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn", ] [[package]] @@ -1335,7 +1346,7 @@ dependencies = [ "proc-macro2", "quote", "rocket_http", - "syn 2.0.38", + "syn", "unicode-xid", "version_check", ] @@ -1468,7 +1479,7 @@ checksum = "67c5609f394e5c2bd7fc51efda478004ea80ef42fee983d5c67a65e34f32c0e3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn", ] [[package]] @@ -1597,17 +1608,6 @@ dependencies = [ "loom", ] -[[package]] -name = "syn" -version = "1.0.109" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - [[package]] name = "syn" version = "2.0.38" @@ -1733,7 +1733,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn", ] [[package]] @@ -1830,7 +1830,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn", ] [[package]] @@ -2010,7 +2010,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.38", + "syn", "wasm-bindgen-shared", ] @@ -2044,7 +2044,7 @@ checksum = "c5353b8dab669f5e10f5bd76df26a9360c748f054f862ff5f3f8aae0c7fb3907" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn", "wasm-bindgen-backend", "wasm-bindgen-shared", ] diff --git a/Cargo.toml b/Cargo.toml index 2626932..20cf39b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,7 +20,7 @@ debug = true [dependencies] chrono = { version = "0.4.19", features = ["serde"] } -diesel = { version = "1.4.5", features = ["chrono", "postgres", "r2d2"] } +diesel = { version = "2.1.0", features = ["chrono", "postgres", "r2d2"] } jsonwebtoken = "7.2.0" rand = "0.7.3" reqwest = { version = "0.11.2", features = ["json"] } diff --git a/src/storage/sql/mod.rs b/src/storage/sql/mod.rs index d33c769..58649d6 100644 --- a/src/storage/sql/mod.rs +++ b/src/storage/sql/mod.rs @@ -24,7 +24,11 @@ impl SqlStorage { Ok(Self { pool }) } - fn get_snippet(&self, conn: &PgConnection, id: &str) -> Result<(i32, Snippet), StorageError> { + fn get_snippet( + &self, + conn: &mut PgConnection, + id: &str, + ) -> Result<(i32, Snippet), StorageError> { let result = snippets::table .filter(snippets::slug.eq(id)) .get_result::(conn); @@ -44,7 +48,11 @@ impl SqlStorage { Ok((snippet.id, Snippet::from((snippet, changesets, tags)))) } - fn insert_snippet(&self, conn: &PgConnection, snippet: &Snippet) -> Result { + fn insert_snippet( + &self, + conn: &mut PgConnection, + snippet: &Snippet, + ) -> Result { let now = chrono::Utc::now(); let result = diesel::insert_into(snippets::table) .values(( @@ -68,7 +76,11 @@ impl SqlStorage { } } - fn update_snippet(&self, conn: &PgConnection, snippet: &Snippet) -> Result { + fn update_snippet( + &self, + conn: &mut PgConnection, + snippet: &Snippet, + ) -> Result { let now = chrono::Utc::now(); let result = diesel::update(snippets::table.filter(snippets::slug.eq(&snippet.id))) .set(( @@ -90,7 +102,7 @@ impl SqlStorage { fn upsert_changesets( &self, - conn: &PgConnection, + conn: &mut PgConnection, snippet_id: i32, snippet: &Snippet, ) -> Result<(), StorageError> { @@ -123,7 +135,7 @@ impl SqlStorage { fn upsert_tags( &self, - conn: &PgConnection, + conn: &mut PgConnection, snippet_id: i32, snippet: &Snippet, ) -> Result<(), StorageError> { @@ -143,7 +155,7 @@ impl SqlStorage { fn trim_removed_tags( &self, - conn: &PgConnection, + conn: &mut PgConnection, snippet_id: i32, snippet: &Snippet, ) -> Result<(), StorageError> { @@ -162,26 +174,26 @@ impl SqlStorage { impl Storage for SqlStorage { fn create(&self, snippet: &Snippet) -> Result { - let conn = self.pool.get()?; - conn.transaction::<_, StorageError, _>(|| { + let mut conn = self.pool.get()?; + conn.transaction::<_, StorageError, _>(|conn| { // insert the new snippet row first to get the generated primary key - let snippet_id = self.insert_snippet(&conn, snippet)?; + let snippet_id = self.insert_snippet(conn, snippet)?; // insert the associated changesets - self.upsert_changesets(&conn, snippet_id, snippet)?; + self.upsert_changesets(conn, snippet_id, snippet)?; // insert the associated tags - self.upsert_tags(&conn, snippet_id, snippet)?; + self.upsert_tags(conn, snippet_id, snippet)?; Ok(()) })?; // reconstruct the created snippet from the state persisted to the database - let (_, created) = self.get_snippet(&conn, &snippet.id)?; + let (_, created) = self.get_snippet(&mut conn, &snippet.id)?; Ok(created) } fn list(&self, criteria: ListSnippetsQuery) -> Result, StorageError> { - let conn = self.pool.get()?; - conn.transaction::<_, StorageError, _>(|| { + let mut conn = self.pool.get()?; + conn.transaction::<_, StorageError, _>(|conn| { let mut query = snippets::table.into_boxed(); // Filters @@ -205,7 +217,7 @@ impl Storage for SqlStorage { // expected in tests and, potentially, in snippets imported from // Mongo that have second precision if let Some(marker) = criteria.pagination.marker { - let (marker_internal_id, marker) = self.get_snippet(&conn, &marker)?; + let (marker_internal_id, marker) = self.get_snippet(conn, &marker)?; if let Some(marker_created_at) = marker.created_at { query = match criteria.pagination.direction { Direction::Desc => query @@ -242,25 +254,25 @@ impl Storage for SqlStorage { } query = query.limit(criteria.pagination.limit as i64); - let snippets = query.get_results::(&conn)?; + let snippets = query.get_results::(conn)?; let snippet_ids = snippets .iter() .map(|snippet| snippet.id) .collect::>(); let changesets = changesets::table .filter(changesets::snippet_id.eq_any(&snippet_ids)) - .get_results::(&conn)?; + .get_results::(conn)?; let tags = tags::table .filter(tags::snippet_id.eq_any(&snippet_ids)) - .get_results::(&conn)?; + .get_results::(conn)?; Ok(models::combine_rows(snippets, changesets, tags)) }) } fn get(&self, id: &str) -> Result { - let conn = self.pool.get()?; - let (_, snippet) = self.get_snippet(&conn, id)?; + let mut conn = self.pool.get()?; + let (_, snippet) = self.get_snippet(&mut conn, id)?; Ok(snippet) } @@ -272,21 +284,21 @@ impl Storage for SqlStorage { Ok(persisted_state) } else { // otherwise, potentially update the title and the syntax - let conn = self.pool.get()?; - conn.transaction::<_, StorageError, _>(|| { - let snippet_id = self.update_snippet(&conn, snippet)?; + let mut conn = self.pool.get()?; + conn.transaction::<_, StorageError, _>(|conn| { + let snippet_id = self.update_snippet(conn, snippet)?; // insert new changesets and tags - self.upsert_changesets(&conn, snippet_id, snippet)?; - self.upsert_tags(&conn, snippet_id, snippet)?; + self.upsert_changesets(conn, snippet_id, snippet)?; + self.upsert_tags(conn, snippet_id, snippet)?; // and delete the removed tags - self.trim_removed_tags(&conn, snippet_id, snippet)?; + self.trim_removed_tags(conn, snippet_id, snippet)?; Ok(()) })?; // reconstruct the updated snippet from the state persisted to the database // (e.g. so that updated_at fields are correctly populated) - let (_, updated) = self.get_snippet(&conn, &snippet.id)?; + let (_, updated) = self.get_snippet(&mut conn, &snippet.id)?; Ok(updated) } } @@ -294,9 +306,9 @@ impl Storage for SqlStorage { fn delete(&self, id: &str) -> Result<(), StorageError> { // CASCADE on foreign keys will take care of deleting associated changesets and // tags - let conn = self.pool.get()?; + let mut conn = self.pool.get()?; let deleted_rows = - diesel::delete(snippets::table.filter(snippets::slug.eq(id))).execute(&conn)?; + diesel::delete(snippets::table.filter(snippets::slug.eq(id))).execute(&mut conn)?; if deleted_rows == 0 { Err(StorageError::NotFound { id: id.to_owned() }) } else { @@ -371,7 +383,7 @@ mod tests { .expect("Failed to build a db connection pool"); { - let conn: PooledConnection> = + let mut conn: PooledConnection> = pool.get().expect("Failed to establish a db connection"); // start a db transaction that will never be committed. All @@ -384,13 +396,13 @@ mod tests { // drop all existing rows in the very beginning of the // transaction, so that tests always start with an empty db diesel::delete(tags::table) - .execute(&conn) + .execute(&mut conn) .expect("could not delete tags"); diesel::delete(changesets::table) - .execute(&conn) + .execute(&mut conn) .expect("could not delete changesets"); diesel::delete(snippets::table) - .execute(&conn) + .execute(&mut conn) .expect("could not delete snippets"); // return the connection with an open transaction back to the From 18048b812a9b5c5d84992c99c38271b728ecd2db Mon Sep 17 00:00:00 2001 From: Roman Podoliaka Date: Sun, 5 Nov 2023 17:24:51 +0000 Subject: [PATCH 2/2] Migrate to diesel-async Now that we are using asynchronous web-framework and routes, we want to get rid of blocking database calls. Diesel is asynchronous, but there is this diesel-async extenstion that we can use, so let's migrate to that. This is mostly straightforward as diesel-async provides direct replacements for Diesel APIs, but we still have to put async everywhere and play by Future's rules. The most notable change is a different connection pool implementation, as diesel-async does not support the one we used before. The API is almost the same, but default max pool size would probably insufficient, so we can override that. --- Cargo.lock | 287 ++++++++++++++++++++++++++++--- Cargo.toml | 3 +- src/routes/snippets.rs | 17 +- src/storage/errors.rs | 2 +- src/storage/mod.rs | 11 +- src/storage/sql/mod.rs | 378 ++++++++++++++++++++++++----------------- 6 files changed, 505 insertions(+), 193 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1bcfd71..9e174c0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -146,6 +146,15 @@ version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array", +] + [[package]] name = "bumpalo" version = "3.14.0" @@ -227,6 +236,44 @@ version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" +[[package]] +name = "cpufeatures" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce420fe07aecd3e67c5f910618fe65e94158f6dcc0adf44e00d69ce2bdfe0fd0" +dependencies = [ + "libc", +] + +[[package]] +name = "crypto-common" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +dependencies = [ + "generic-array", + "typenum", +] + +[[package]] +name = "deadpool" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "421fe0f90f2ab22016f32a9881be5134fdd71c65298917084b0c7477cbc3856e" +dependencies = [ + "async-trait", + "deadpool-runtime", + "num_cpus", + "retain_mut", + "tokio", +] + +[[package]] +name = "deadpool-runtime" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "63dfa964fe2a66f3fde91fc70b267fe193d822c7e603e2a675a49a7f46ad3f49" + [[package]] name = "deranged" version = "0.3.9" @@ -280,8 +327,21 @@ dependencies = [ "chrono", "diesel_derives", "itoa", - "pq-sys", - "r2d2", +] + +[[package]] +name = "diesel-async" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acada1517534c92d3f382217b485db8a8638f111b0e3f2a2a8e26165050f77be" +dependencies = [ + "async-trait", + "deadpool", + "diesel", + "futures-util", + "scoped-futures", + "tokio", + "tokio-postgres", ] [[package]] @@ -305,6 +365,17 @@ dependencies = [ "syn", ] +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer", + "crypto-common", + "subtle", +] + [[package]] name = "either" version = "1.9.0" @@ -336,6 +407,12 @@ dependencies = [ "windows-sys", ] +[[package]] +name = "fallible-iterator" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" + [[package]] name = "fastrand" version = "2.0.1" @@ -356,6 +433,12 @@ dependencies = [ "version_check", ] +[[package]] +name = "finl_unicode" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fcfdc7a0362c9f4444381a9e697c79d435fe65b52a37466fc2c1184cee9edc6" + [[package]] name = "fnv" version = "1.0.7" @@ -422,6 +505,17 @@ version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8bf34a163b5c4c52d0478a4d757da8fb65cabef42ba90515efee0f6f9fa45aaa" +[[package]] +name = "futures-macro" +version = "0.3.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53b153fd91e4b0147f4aced87be237c98248656bb01050b96bf3ee89220a8ddb" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "futures-sink" version = "0.3.29" @@ -443,6 +537,7 @@ dependencies = [ "futures-channel", "futures-core", "futures-io", + "futures-macro", "futures-sink", "futures-task", "memchr", @@ -464,6 +559,16 @@ dependencies = [ "windows", ] +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", +] + [[package]] name = "getrandom" version = "0.1.16" @@ -535,6 +640,15 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d77f7ec81a6d05a3abb01ab6eb7590f6083d08449fe5a1c8b1e620283546ccb7" +[[package]] +name = "hmac" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" +dependencies = [ + "digest", +] + [[package]] name = "http" version = "0.2.9" @@ -770,6 +884,16 @@ dependencies = [ "regex-automata 0.1.10", ] +[[package]] +name = "md-5" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf" +dependencies = [ + "cfg-if", + "digest", +] + [[package]] name = "memchr" version = "2.6.4" @@ -1018,6 +1142,24 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" +[[package]] +name = "phf" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ade2d8b8f33c7333b51bcf0428d37e217e9f32192ae4772156f65063b8ce03dc" +dependencies = [ + "phf_shared", +] + +[[package]] +name = "phf_shared" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90fcb95eef784c2ac79119d1dd819e162b5da872ce6f3c3abe1e8ca1c082f72b" +dependencies = [ + "siphasher", +] + [[package]] name = "pin-project-lite" version = "0.2.13" @@ -1036,6 +1178,35 @@ version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" +[[package]] +name = "postgres-protocol" +version = "0.6.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49b6c5ef183cd3ab4ba005f1ca64c21e8bd97ce4699cfea9e8d9a2c4958ca520" +dependencies = [ + "base64 0.21.5", + "byteorder", + "bytes", + "fallible-iterator", + "hmac", + "md-5", + "memchr", + "rand 0.8.5", + "sha2", + "stringprep", +] + +[[package]] +name = "postgres-types" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d2234cdee9408b523530a9b6d2d6b373d1db34f6a8e51dc03ded1828d7fb67c" +dependencies = [ + "bytes", + "fallible-iterator", + "postgres-protocol", +] + [[package]] name = "powerfmt" version = "0.2.0" @@ -1048,15 +1219,6 @@ version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" -[[package]] -name = "pq-sys" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31c0052426df997c0cbd30789eb44ca097e3541717a7b8fa36b1c464ee7edebd" -dependencies = [ - "vcpkg", -] - [[package]] name = "proc-macro2" version = "1.0.69" @@ -1088,17 +1250,6 @@ dependencies = [ "proc-macro2", ] -[[package]] -name = "r2d2" -version = "0.8.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51de85fb3fb6524929c8a2eb85e6b6d363de4e8c48f9e2c2eac4944abc181c93" -dependencies = [ - "log", - "parking_lot", - "scheduled-thread-pool", -] - [[package]] name = "rand" version = "0.7.3" @@ -1281,6 +1432,12 @@ dependencies = [ "winreg", ] +[[package]] +name = "retain_mut" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4389f1d5789befaf6029ebd9f7dac4af7f7e3d61b69d4f30e2ac02b57e7712b0" + [[package]] name = "ring" version = "0.16.20" @@ -1419,12 +1576,13 @@ dependencies = [ ] [[package]] -name = "scheduled-thread-pool" -version = "0.2.7" +name = "scoped-futures" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cbc66816425a074528352f5789333ecff06ca41b36b0b0efdfbb29edc391a19" +checksum = "b1473e24c637950c9bd38763220bea91ec3e095a89f672bbd7a10d03e77ba467" dependencies = [ - "parking_lot", + "cfg-if", + "pin-utils", ] [[package]] @@ -1514,6 +1672,17 @@ dependencies = [ "serde", ] +[[package]] +name = "sha2" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + [[package]] name = "sharded-slab" version = "0.1.7" @@ -1543,6 +1712,12 @@ dependencies = [ "num-traits", ] +[[package]] +name = "siphasher" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" + [[package]] name = "slab" version = "0.4.9" @@ -1608,6 +1783,23 @@ dependencies = [ "loom", ] +[[package]] +name = "stringprep" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb41d74e231a107a1b4ee36bd1214b11285b77768d2e3824aedafa988fd36ee6" +dependencies = [ + "finl_unicode", + "unicode-bidi", + "unicode-normalization", +] + +[[package]] +name = "subtle" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" + [[package]] name = "syn" version = "2.0.38" @@ -1746,6 +1938,32 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-postgres" +version = "0.7.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d340244b32d920260ae7448cb72b6e238bddc3d4f7603394e7dd46ed8e48f5b8" +dependencies = [ + "async-trait", + "byteorder", + "bytes", + "fallible-iterator", + "futures-channel", + "futures-util", + "log", + "parking_lot", + "percent-encoding", + "phf", + "pin-project-lite", + "postgres-protocol", + "postgres-types", + "rand 0.8.5", + "socket2 0.5.5", + "tokio", + "tokio-util", + "whoami", +] + [[package]] name = "tokio-stream" version = "0.1.14" @@ -1878,6 +2096,12 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" +[[package]] +name = "typenum" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" + [[package]] name = "ubyte" version = "0.10.4" @@ -2065,6 +2289,16 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "whoami" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22fc3756b8a9133049b26c7f61ab35416c130e8c09b660f5b3958b446f52cc50" +dependencies = [ + "wasm-bindgen", + "web-sys", +] + [[package]] name = "winapi" version = "0.3.9" @@ -2196,6 +2430,7 @@ version = "5.0.0" dependencies = [ "chrono", "diesel", + "diesel-async", "jsonwebtoken", "percent-encoding", "rand 0.7.3", diff --git a/Cargo.toml b/Cargo.toml index 20cf39b..48b2d14 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,7 +20,8 @@ debug = true [dependencies] chrono = { version = "0.4.19", features = ["serde"] } -diesel = { version = "2.1.0", features = ["chrono", "postgres", "r2d2"] } +diesel = { version = "2.1.0", features = ["chrono"] } +diesel-async = { version = "0.4.1", features = ["deadpool", "postgres"] } jsonwebtoken = "7.2.0" rand = "0.7.3" reqwest = { version = "0.11.2", features = ["json"] } diff --git a/src/routes/snippets.rs b/src/routes/snippets.rs index 8ed19fa..2ffa3c2 100644 --- a/src/routes/snippets.rs +++ b/src/routes/snippets.rs @@ -15,12 +15,12 @@ use crate::web::{ WithHttpHeaders, }; -fn create_snippet_impl( +async fn create_snippet_impl( storage: &dyn Storage, snippet: &Snippet, base_path: &str, ) -> Result>, ApiError> { - let new_snippet = storage.create(snippet)?; + let new_snippet = storage.create(snippet).await?; let location = [base_path, new_snippet.id.as_str()].join("/"); Ok(Created::new(location).body(Output(new_snippet))) @@ -141,7 +141,7 @@ pub async fn create_snippet( _user: BearerAuth, ) -> Result>, ApiError> { let snippet = Snippet::try_from((config.inner(), body?.0))?; - create_snippet_impl(storage.as_ref(), &snippet, origin.path().as_str()) + create_snippet_impl(storage.as_ref(), &snippet, origin.path().as_str()).await } fn split_marker(mut snippets: Vec, limit: usize) -> (Option, Vec) { @@ -181,7 +181,7 @@ pub async fn list_snippets<'h>( criteria.pagination.limit = limit + 1; criteria.pagination.marker = marker; - let snippets = storage.list(criteria.clone())?; + let snippets = storage.list(criteria.clone()).await?; let mut prev_needed = false; let (next_marker, snippets) = split_marker(snippets, limit); let prev_marker = if criteria.pagination.marker.is_some() && !snippets.is_empty() { @@ -189,7 +189,7 @@ pub async fn list_snippets<'h>( // but to issue the query one more time into opposite direction. criteria.pagination.direction = Direction::Asc; criteria.pagination.marker = Some(snippets[0].id.to_owned()); - let prev_snippets = storage.list(criteria)?; + let prev_snippets = storage.list(criteria).await?; prev_needed = !prev_snippets.is_empty(); prev_snippets.get(limit).map(|m| m.id.to_owned()) @@ -258,7 +258,7 @@ pub async fn import_snippet( let base_path = path .strip_suffix("/import") .ok_or_else(|| ApiError::InternalError(format!("Invalid URI path: {}", path)))?; - create_snippet_impl(storage.as_ref(), &snippet, base_path.as_str()) + create_snippet_impl(storage.as_ref(), &snippet, base_path.as_str()).await } #[get("/snippets/", format = "text/plain", rank = 1)] @@ -273,7 +273,8 @@ pub async fn get_raw_snippet( _not_any: DoNotAcceptAny, ) -> Result { Ok(storage - .get(&id)? + .get(&id) + .await? .changesets .into_iter() .last() @@ -288,5 +289,5 @@ pub async fn get_snippet( _content_type: &NegotiatedContentType, _user: BearerAuth, ) -> Result, ApiError> { - Ok(Output(storage.get(&id)?)) + Ok(Output(storage.get(&id).await?)) } diff --git a/src/storage/errors.rs b/src/storage/errors.rs index 53eedc0..e810134 100644 --- a/src/storage/errors.rs +++ b/src/storage/errors.rs @@ -10,7 +10,7 @@ pub enum StorageError { /// Snippet with this id can't be found NotFound { id: String }, /// All other errors that can't be handled by the storage layer - InternalError(Box), + InternalError(Box), } impl fmt::Display for StorageError { diff --git a/src/storage/mod.rs b/src/storage/mod.rs index 7254c74..32159a0 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -10,21 +10,22 @@ pub use sql::SqlStorage; /// /// Types implementing this trait are required to be both Send and Sync, so /// that their instances can be safely shared between multiple threads. +#[rocket::async_trait] pub trait Storage: Send + Sync { /// Save the state of the given snippet to the persistent storage. - fn create(&self, snippet: &Snippet) -> Result; + async fn create(&self, snippet: &Snippet) -> Result; /// Returns a list of snippets that satisfy the given criteria. - fn list(&self, criteria: ListSnippetsQuery) -> Result, StorageError>; + async fn list(&self, criteria: ListSnippetsQuery) -> Result, StorageError>; /// Returns the snippet uniquely identified by a given id (a slug or a /// legacy numeric id) - fn get(&self, id: &str) -> Result; + async fn get(&self, id: &str) -> Result; /// Update the state of the given snippet in the persistent storage - fn update(&self, snippet: &Snippet) -> Result; + async fn update(&self, snippet: &Snippet) -> Result; /// Delete the snippet uniquely identified by a given id (a slug or a legacy /// numeric id) - fn delete(&self, id: &str) -> Result<(), StorageError>; + async fn delete(&self, id: &str) -> Result<(), StorageError>; } diff --git a/src/storage/sql/mod.rs b/src/storage/sql/mod.rs index 58649d6..cb359a4 100644 --- a/src/storage/sql/mod.rs +++ b/src/storage/sql/mod.rs @@ -5,33 +5,42 @@ use std::convert::From; use diesel::pg::upsert; use diesel::prelude::*; -use diesel::r2d2::{ConnectionManager, Pool, PoolError}; use diesel::result::{DatabaseErrorKind, Error::DatabaseError, Error::NotFound}; +use diesel_async::pooled_connection::deadpool::{BuildError, Pool, PoolError}; +use diesel_async::pooled_connection::AsyncDieselConnectionManager; +use diesel_async::scoped_futures::ScopedFutureExt; +use diesel_async::{AsyncConnection, AsyncPgConnection, RunQueryDsl}; use super::{errors::StorageError, Direction, ListSnippetsQuery, Snippet, Storage}; use schema::{changesets, snippets, tags}; +// deadpool's default pool size is # of CPUs * 4, which might be too low. Instead, +// let's default to PostgreSQL's max_connections value (100) and reserve a small +// buffer for "admin" connections (like periodic database backups or local sessions). +const MAX_POOL_SIZE: usize = 96; + /// A Storage implementation which persists snippets' data in a SQL database. pub struct SqlStorage { - pool: Pool>, + pool: Pool, } impl SqlStorage { pub fn new(database_url: &str) -> Result { - let manager = ConnectionManager::new(database_url); - let pool = Pool::builder().build(manager)?; + let manager = AsyncDieselConnectionManager::new(database_url); + let pool = Pool::builder(manager).max_size(MAX_POOL_SIZE).build()?; Ok(Self { pool }) } - fn get_snippet( + async fn get_snippet( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, id: &str, ) -> Result<(i32, Snippet), StorageError> { let result = snippets::table .filter(snippets::slug.eq(id)) - .get_result::(conn); + .get_result::(conn) + .await; let snippet = match result { Ok(snippet) => snippet, Err(diesel::NotFound) => return Err(StorageError::NotFound { id: id.to_owned() }), @@ -40,17 +49,19 @@ impl SqlStorage { let changesets = changesets::table .filter(changesets::snippet_id.eq(snippet.id)) - .get_results::(conn)?; + .get_results::(conn) + .await?; let tags = tags::table .filter(tags::snippet_id.eq(snippet.id)) - .get_results::(conn)?; + .get_results::(conn) + .await?; Ok((snippet.id, Snippet::from((snippet, changesets, tags)))) } - fn insert_snippet( + async fn insert_snippet( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, snippet: &Snippet, ) -> Result { let now = chrono::Utc::now(); @@ -63,7 +74,8 @@ impl SqlStorage { snippets::updated_at.eq(snippet.updated_at.unwrap_or(now)), )) .returning(snippets::id) - .get_result::(conn); + .get_result::(conn) + .await; match result { Ok(snippet_id) => Ok(snippet_id), @@ -76,9 +88,9 @@ impl SqlStorage { } } - fn update_snippet( + async fn update_snippet( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, snippet: &Snippet, ) -> Result { let now = chrono::Utc::now(); @@ -89,7 +101,8 @@ impl SqlStorage { snippets::updated_at.eq(snippet.updated_at.unwrap_or(now)), )) .returning(snippets::id) - .get_result::(conn); + .get_result::(conn) + .await; match result { Ok(snippet_id) => Ok(snippet_id), @@ -100,9 +113,9 @@ impl SqlStorage { } } - fn upsert_changesets( + async fn upsert_changesets( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, snippet_id: i32, snippet: &Snippet, ) -> Result<(), StorageError> { @@ -128,14 +141,15 @@ impl SqlStorage { changesets::content.eq(upsert::excluded(changesets::content)), changesets::updated_at.eq(snippet.updated_at.unwrap_or(now)), )) - .execute(conn)?; + .execute(conn) + .await?; Ok(()) } - fn upsert_tags( + async fn upsert_tags( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, snippet_id: i32, snippet: &Snippet, ) -> Result<(), StorageError> { @@ -148,14 +162,15 @@ impl SqlStorage { .collect::>(), ) .on_conflict_do_nothing() - .execute(conn)?; + .execute(conn) + .await?; Ok(()) } - fn trim_removed_tags( + async fn trim_removed_tags( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, snippet_id: i32, snippet: &Snippet, ) -> Result<(), StorageError> { @@ -166,149 +181,166 @@ impl SqlStorage { .and(tags::value.ne_all(&snippet.tags)), ), ) - .execute(conn)?; + .execute(conn) + .await?; Ok(()) } } +#[rocket::async_trait] impl Storage for SqlStorage { - fn create(&self, snippet: &Snippet) -> Result { - let mut conn = self.pool.get()?; + async fn create(&self, snippet: &Snippet) -> Result { + let mut conn = self.pool.get().await?; conn.transaction::<_, StorageError, _>(|conn| { - // insert the new snippet row first to get the generated primary key - let snippet_id = self.insert_snippet(conn, snippet)?; - // insert the associated changesets - self.upsert_changesets(conn, snippet_id, snippet)?; - // insert the associated tags - self.upsert_tags(conn, snippet_id, snippet)?; + async { + // insert the new snippet row first to get the generated primary key + let snippet_id = self.insert_snippet(conn, snippet).await?; + // insert the associated changesets + self.upsert_changesets(conn, snippet_id, snippet).await?; + // insert the associated tags + self.upsert_tags(conn, snippet_id, snippet).await?; - Ok(()) - })?; + Ok(()) + } + .scope_boxed() + }) + .await?; // reconstruct the created snippet from the state persisted to the database - let (_, created) = self.get_snippet(&mut conn, &snippet.id)?; + let (_, created) = self.get_snippet(&mut conn, &snippet.id).await?; Ok(created) } - fn list(&self, criteria: ListSnippetsQuery) -> Result, StorageError> { - let mut conn = self.pool.get()?; + async fn list(&self, criteria: ListSnippetsQuery) -> Result, StorageError> { + let mut conn = self.pool.get().await?; conn.transaction::<_, StorageError, _>(|conn| { - let mut query = snippets::table.into_boxed(); + async { + let mut query = snippets::table.into_boxed(); - // Filters - if let Some(title) = criteria.title { - query = query.filter(snippets::title.eq(title)); - } - if let Some(syntax) = criteria.syntax { - query = query.filter(snippets::syntax.eq(syntax)); - } - if let Some(tags) = criteria.tags { - let snippet_ids = tags::table - .select(tags::snippet_id) - .filter(tags::value.eq_any(tags)); + // Filters + if let Some(title) = criteria.title { + query = query.filter(snippets::title.eq(title)); + } + if let Some(syntax) = criteria.syntax { + query = query.filter(snippets::syntax.eq(syntax)); + } + if let Some(tags) = criteria.tags { + let snippet_ids = tags::table + .select(tags::snippet_id) + .filter(tags::value.eq_any(tags)); - query = query.filter(snippets::id.eq_any(snippet_ids)); - } + query = query.filter(snippets::id.eq_any(snippet_ids)); + } - // Pagination. marker_internal_id is used to resolve the ties because the value - // of created_at is not guaranteed to be unique. In practice, we use - // microsecond precision for datetime fields, so duplicates are only - // expected in tests and, potentially, in snippets imported from - // Mongo that have second precision - if let Some(marker) = criteria.pagination.marker { - let (marker_internal_id, marker) = self.get_snippet(conn, &marker)?; - if let Some(marker_created_at) = marker.created_at { + // Pagination. marker_internal_id is used to resolve the ties because the value + // of created_at is not guaranteed to be unique. In practice, we use + // microsecond precision for datetime fields, so duplicates are only + // expected in tests and, potentially, in snippets imported from + // Mongo that have second precision + if let Some(marker) = criteria.pagination.marker { + let (marker_internal_id, marker) = self.get_snippet(conn, &marker).await?; + if let Some(marker_created_at) = marker.created_at { + query = match criteria.pagination.direction { + Direction::Desc => query + .filter( + snippets::created_at.lt(marker_created_at).or( + snippets::created_at + .eq(marker_created_at) + .and(snippets::id.lt(marker_internal_id)), + ), + ) + .order_by(snippets::created_at.desc()) + .then_order_by(snippets::id.desc()), + Direction::Asc => query + .filter( + snippets::created_at.gt(marker_created_at).or( + snippets::created_at + .eq(marker_created_at) + .and(snippets::id.gt(marker_internal_id)), + ), + ) + .order_by(snippets::created_at.asc()) + .then_order_by(snippets::id.asc()), + }; + } + } else { query = match criteria.pagination.direction { Direction::Desc => query - .filter( - snippets::created_at - .lt(marker_created_at) - .or(snippets::created_at - .eq(marker_created_at) - .and(snippets::id.lt(marker_internal_id))), - ) .order_by(snippets::created_at.desc()) .then_order_by(snippets::id.desc()), Direction::Asc => query - .filter( - snippets::created_at - .gt(marker_created_at) - .or(snippets::created_at - .eq(marker_created_at) - .and(snippets::id.gt(marker_internal_id))), - ) .order_by(snippets::created_at.asc()) .then_order_by(snippets::id.asc()), }; } - } else { - query = match criteria.pagination.direction { - Direction::Desc => query - .order_by(snippets::created_at.desc()) - .then_order_by(snippets::id.desc()), - Direction::Asc => query - .order_by(snippets::created_at.asc()) - .then_order_by(snippets::id.asc()), - }; + query = query.limit(criteria.pagination.limit as i64); + + let snippets = query.get_results::(conn).await?; + let snippet_ids = snippets + .iter() + .map(|snippet| snippet.id) + .collect::>(); + let changesets = changesets::table + .filter(changesets::snippet_id.eq_any(&snippet_ids)) + .get_results::(conn) + .await?; + let tags = tags::table + .filter(tags::snippet_id.eq_any(&snippet_ids)) + .get_results::(conn) + .await?; + + Ok(models::combine_rows(snippets, changesets, tags)) } - query = query.limit(criteria.pagination.limit as i64); - - let snippets = query.get_results::(conn)?; - let snippet_ids = snippets - .iter() - .map(|snippet| snippet.id) - .collect::>(); - let changesets = changesets::table - .filter(changesets::snippet_id.eq_any(&snippet_ids)) - .get_results::(conn)?; - let tags = tags::table - .filter(tags::snippet_id.eq_any(&snippet_ids)) - .get_results::(conn)?; - - Ok(models::combine_rows(snippets, changesets, tags)) + .scope_boxed() }) + .await } - fn get(&self, id: &str) -> Result { - let mut conn = self.pool.get()?; - let (_, snippet) = self.get_snippet(&mut conn, id)?; + async fn get(&self, id: &str) -> Result { + let mut conn = self.pool.get().await?; + let (_, snippet) = self.get_snippet(&mut conn, id).await?; Ok(snippet) } - fn update(&self, snippet: &Snippet) -> Result { + async fn update(&self, snippet: &Snippet) -> Result { // load the snippet from the db to check if we need to update anything - let persisted_state = self.get(&snippet.id)?; + let persisted_state = self.get(&snippet.id).await?; if persisted_state == *snippet { // if not, simply return the current state Ok(persisted_state) } else { // otherwise, potentially update the title and the syntax - let mut conn = self.pool.get()?; + let mut conn = self.pool.get().await?; conn.transaction::<_, StorageError, _>(|conn| { - let snippet_id = self.update_snippet(conn, snippet)?; - // insert new changesets and tags - self.upsert_changesets(conn, snippet_id, snippet)?; - self.upsert_tags(conn, snippet_id, snippet)?; - // and delete the removed tags - self.trim_removed_tags(conn, snippet_id, snippet)?; - - Ok(()) - })?; + async { + let snippet_id = self.update_snippet(conn, snippet).await?; + // insert new changesets and tags + self.upsert_changesets(conn, snippet_id, snippet).await?; + self.upsert_tags(conn, snippet_id, snippet).await?; + // and delete the removed tags + self.trim_removed_tags(conn, snippet_id, snippet).await?; + + Ok(()) + } + .scope_boxed() + }) + .await?; // reconstruct the updated snippet from the state persisted to the database // (e.g. so that updated_at fields are correctly populated) - let (_, updated) = self.get_snippet(&mut conn, &snippet.id)?; + let (_, updated) = self.get_snippet(&mut conn, &snippet.id).await?; Ok(updated) } } - fn delete(&self, id: &str) -> Result<(), StorageError> { + async fn delete(&self, id: &str) -> Result<(), StorageError> { // CASCADE on foreign keys will take care of deleting associated changesets and // tags - let mut conn = self.pool.get()?; - let deleted_rows = - diesel::delete(snippets::table.filter(snippets::slug.eq(id))).execute(&mut conn)?; + let mut conn = self.pool.get().await?; + let deleted_rows = diesel::delete(snippets::table.filter(snippets::slug.eq(id))) + .execute(&mut conn) + .await?; if deleted_rows == 0 { Err(StorageError::NotFound { id: id.to_owned() }) } else { @@ -337,11 +369,16 @@ impl From for StorageError { } } +impl From for StorageError { + fn from(error: BuildError) -> Self { + StorageError::InternalError(Box::new(error)) + } +} + #[cfg(test)] mod tests { use std::collections::HashSet; - - use diesel::r2d2::PooledConnection; + use std::future::Future; use super::super::Changeset; use super::*; @@ -375,34 +412,43 @@ mod tests { /// are never seen by other transactions, so multiple tests can operate /// on the same tables in parallel, as if they had exclusive access to /// the database. - fn with_storage)>(test_function: F) { + async fn with_storage) -> R>(test_function: F) + where + R: Future, + { if let Ok(database_url) = std::env::var("ROCKET_DATABASE_URL") { - let pool = Pool::builder() - .max_size(1) - .build(ConnectionManager::new(database_url)) - .expect("Failed to build a db connection pool"); - + let pool: Pool = + Pool::builder(AsyncDieselConnectionManager::new(database_url)) + .max_size(1) + .build() + .expect("Failed to build a db connection pool"); { - let mut conn: PooledConnection> = - pool.get().expect("Failed to establish a db connection"); + let mut conn = pool + .get() + .await + .expect("Failed to establish a db connection"); // start a db transaction that will never be committed. All // modifications will be automatically rolled back when the // test completes. Any updates will only be seen by this // transaction (connection) and no one else conn.begin_test_transaction() + .await .expect("Failed to start a test transaction"); // drop all existing rows in the very beginning of the // transaction, so that tests always start with an empty db diesel::delete(tags::table) .execute(&mut conn) + .await .expect("could not delete tags"); diesel::delete(changesets::table) .execute(&mut conn) + .await .expect("could not delete changesets"); diesel::delete(snippets::table) .execute(&mut conn) + .await .expect("could not delete snippets"); // return the connection with an open transaction back to the @@ -451,11 +497,11 @@ mod tests { snippets } - #[test] - fn crud() { + #[tokio::test] + async fn crud() { // This will be properly covered by higher level tests, so we just // want to perform a basic smoke check here. - with_storage(|storage| { + with_storage(|storage| async move { let reference = reference_snippets(None).into_iter().next().unwrap(); let mut updated_reference = Snippet::new( Some("Hello world!".to_string()), @@ -472,12 +518,14 @@ mod tests { // create a new snippet from the reference value let new_snippet = storage .create(&reference) + .await .expect("Failed to create a snippet"); compare_snippets(&reference, &new_snippet); // retrieve the state of the snippet that was just persisted let retrieved_snippet = storage .get(&new_snippet.id) + .await .expect("Failed to retrieve a snippet"); // the snippet's state must be exactly the same as the one returned // by create() above, including the value of created_at/updated_at @@ -487,29 +535,33 @@ mod tests { // try to update the snippet state somehow let updated_snippet = storage .update(&updated_reference) + .await .expect("Failed to update a snippet"); compare_snippets(&updated_reference, &updated_snippet); // finally, delete the snippet storage .delete(&new_snippet.id) + .await .expect("Failed to delete a snippet"); // and verify that it can't be found in the database anymore - assert!(match storage.get(&new_snippet.id) { + assert!(match storage.get(&new_snippet.id).await { Err(StorageError::NotFound { id }) => id == new_snippet.id, _ => false, }); - }); + }) + .await; } - #[test] - fn list() { - with_storage(|storage| { + #[tokio::test] + async fn list() { + with_storage(|storage| async move { // at this point, listing of snippets should return an empty result assert_eq!( storage .list(ListSnippetsQuery::default()) + .await .expect("Failed to list snippets"), vec![] ); @@ -517,12 +569,16 @@ mod tests { // now insert reference snippets and try some queries let reference = reference_snippets(None); for snippet in reference.iter() { - storage.create(snippet).expect("Failed to create a snippet"); + storage + .create(snippet) + .await + .expect("Failed to create a snippet"); } let default_filters = ListSnippetsQuery::default(); let result = storage .list(default_filters) + .await .expect("Failed to list snippets"); for (actual, expected) in result.iter().rev().zip(reference.iter()) { compare_snippets(expected, actual); @@ -530,7 +586,7 @@ mod tests { let mut by_tag = ListSnippetsQuery::default(); by_tag.tags = Some(vec!["spam".to_string(), "foo".to_string()]); - let result = storage.list(by_tag).expect("Failed to list snippets"); + let result = storage.list(by_tag).await.expect("Failed to list snippets"); assert_eq!(result.len(), 2); for (actual, expected) in result.iter().rev().zip(reference.iter()) { compare_snippets(expected, actual); @@ -538,7 +594,10 @@ mod tests { let mut by_title = ListSnippetsQuery::default(); by_title.title = Some("Hello world".to_string()); - let result = storage.list(by_title).expect("Failed to list snippets"); + let result = storage + .list(by_title) + .await + .expect("Failed to list snippets"); assert_eq!(result.len(), 1); compare_snippets( reference @@ -551,7 +610,10 @@ mod tests { let mut by_syntax = ListSnippetsQuery::default(); by_syntax.syntax = Some("rust".to_string()); - let result = storage.list(by_syntax).expect("Failed to list snippets"); + let result = storage + .list(by_syntax) + .await + .expect("Failed to list snippets"); assert_eq!(result.len(), 1); compare_snippets( reference @@ -561,19 +623,26 @@ mod tests { .unwrap(), &result[0], ); - }); + }) + .await; } - fn pagination(reference: Vec) { - with_storage(|storage| { + async fn pagination(reference: Vec) { + with_storage(|storage| async move { for snippet in reference.iter() { - storage.create(snippet).expect("Failed to create a snippet"); + storage + .create(snippet) + .await + .expect("Failed to create a snippet"); } let mut pagination = ListSnippetsQuery::default(); pagination.pagination.direction = Direction::Asc; pagination.pagination.limit = 2; - let result = storage.list(pagination).expect("Failed to list snippets"); + let result = storage + .list(pagination) + .await + .expect("Failed to list snippets"); assert_eq!(result.len(), 2); for (actual, expected) in result.iter().zip(reference.iter()) { compare_snippets(expected, actual); @@ -582,7 +651,10 @@ mod tests { let mut with_marker = ListSnippetsQuery::default(); with_marker.pagination.direction = Direction::Asc; with_marker.pagination.marker = Some(result.last().unwrap().id.clone()); - let result = storage.list(with_marker).expect("Failed to list snippets"); + let result = storage + .list(with_marker) + .await + .expect("Failed to list snippets"); assert_eq!(result.len(), 1); for (actual, expected) in result.iter().skip(1).zip(reference.iter()) { compare_snippets(expected, actual); @@ -594,21 +666,23 @@ mod tests { with_marker_backward.pagination.marker = Some(result.last().unwrap().id.clone()); let result = storage .list(with_marker_backward) + .await .expect("Failed to list snippets"); assert_eq!(result.len(), 2); for (actual, expected) in result.iter().skip(1).take(2).zip(reference.iter()) { compare_snippets(expected, actual); } - }); + }) + .await; } - #[test] - fn pagination_with_monotonically_increasing_created_at() { - pagination(reference_snippets(None)); + #[tokio::test] + async fn pagination_with_monotonically_increasing_created_at() { + pagination(reference_snippets(None)).await; } - #[test] - fn pagination_with_identical_created_at() { - pagination(reference_snippets(Some(chrono::Utc::now()))); + #[tokio::test] + async fn pagination_with_identical_created_at() { + pagination(reference_snippets(Some(chrono::Utc::now()))).await; } }