From e2758fe2768172081028e9b1a7bf4ee6ae573d0f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Wojciech=20Przytu=C5=82a?= Date: Wed, 7 Aug 2024 16:15:35 +0200 Subject: [PATCH 01/19] statement: introduce strongly-typed PageSize Before, a bare i32 was passed. Some API methods would accept nonpositive integers, while others would panic on such input. Both those kinds of behaviour are bad, and the inconsistency between them is even worse. For robustness the popular Rust tactics is employed, namely strong typing. PageSize's public constructor returns error for nonpositive integers, ensuring that is an instance of that type exists, its validity is guaranteed. I originally wanted to put burden of creating a correct PageSize and handle possible errors on user, but is would introduce a boilerplate (`try_into().unwrap()`) that is deemed to be unacceptable. Therefore, PageSize is kept pub(crate) and it's constructed by driver's code, panicking on invalid user's input. As page size is always chosen by user explicitly (not by some convoluted logic), such panics should appear early and be clear to users. --- scylla/src/statement/mod.rs | 56 ++++++++++++++++++++++ scylla/src/statement/prepared_statement.rs | 23 ++++++--- scylla/src/statement/query.rs | 26 +++++++--- scylla/src/transport/caching_session.rs | 2 +- scylla/src/transport/connection.rs | 6 +-- scylla/src/transport/session.rs | 6 ++- scylla/src/transport/topology.rs | 12 +++-- 7 files changed, 107 insertions(+), 24 deletions(-) diff --git a/scylla/src/statement/mod.rs b/scylla/src/statement/mod.rs index 642ea06ad3..3ede1fea97 100644 --- a/scylla/src/statement/mod.rs +++ b/scylla/src/statement/mod.rs @@ -1,5 +1,7 @@ use std::{sync::Arc, time::Duration}; +use thiserror::Error; + use crate::transport::execution_profile::ExecutionProfileHandle; use crate::{history::HistoryListener, retry_policy::RetryPolicy}; @@ -9,6 +11,9 @@ pub mod query; pub use crate::frame::types::{Consistency, SerialConsistency}; +// This is the default common to drivers. +const DEFAULT_PAGE_SIZE: i32 = 5000; + #[derive(Debug, Clone, Default)] pub(crate) struct StatementConfig { pub(crate) consistency: Option, @@ -34,3 +39,54 @@ impl StatementConfig { self.consistency.unwrap_or(default_consistency) } } + +#[derive(Debug, Clone, Copy, Error)] +#[error("Invalid page size provided: {0}; valid values are [1, i32::MAX]")] +/// Invalid page size was provided. +pub(crate) struct InvalidPageSize(i32); + +/// Size of a single page when performing paged queries to the DB. +/// Configurable on statements, used in `Session::{query,execute}_{iter,single_page}`. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) struct PageSize(i32); + +impl PageSize { + /// Creates a new positive page size. If a non-positive number is passed, + /// returns an [InvalidPageSize] error. + #[inline] + pub(crate) fn new(size: i32) -> Result { + if size > 0 { + Ok(Self(size)) + } else { + Err(InvalidPageSize(size)) + } + } + + #[inline] + pub(crate) fn inner(&self) -> i32 { + self.0 + } +} + +impl Default for PageSize { + #[inline] + fn default() -> Self { + Self(DEFAULT_PAGE_SIZE) + } +} + +impl TryFrom for PageSize { + type Error = InvalidPageSize; + + #[inline] + fn try_from(value: i32) -> Result { + Self::new(value) + } +} + +impl From for i32 { + #[inline] + fn from(page_size: PageSize) -> Self { + page_size.inner() + } +} diff --git a/scylla/src/statement/prepared_statement.rs b/scylla/src/statement/prepared_statement.rs index 01656bdc26..0b7cb814c0 100644 --- a/scylla/src/statement/prepared_statement.rs +++ b/scylla/src/statement/prepared_statement.rs @@ -13,7 +13,7 @@ use std::time::Duration; use thiserror::Error; use uuid::Uuid; -use super::StatementConfig; +use super::{PageSize, StatementConfig}; use crate::frame::response::result::PreparedMetadata; use crate::frame::types::{Consistency, SerialConsistency}; use crate::history::HistoryListener; @@ -92,7 +92,7 @@ pub struct PreparedStatement { id: Bytes, shared: Arc, - page_size: Option, + page_size: Option, partitioner_name: PartitionerName, is_confirmed_lwt: bool, } @@ -125,7 +125,7 @@ impl PreparedStatement { metadata: PreparedMetadata, result_metadata: ResultMetadata, statement: String, - page_size: Option, + page_size: Option, config: StatementConfig, ) -> Self { Self { @@ -152,9 +152,14 @@ impl PreparedStatement { } /// Sets the page size for this CQL query. + /// + /// Panics if given number is nonpositive. pub fn set_page_size(&mut self, page_size: i32) { - assert!(page_size > 0, "page size must be larger than 0"); - self.page_size = Some(page_size); + self.page_size = Some( + page_size + .try_into() + .unwrap_or_else(|err| panic!("PreparedStatement::set_page_size: {err}")), + ); } /// Disables paging for this CQL query. @@ -163,10 +168,16 @@ impl PreparedStatement { } /// Returns the page size for this CQL query. - pub fn get_page_size(&self) -> Option { + #[allow(dead_code)] + pub(crate) fn get_validated_page_size(&self) -> Option { self.page_size } + /// Returns the page size for this CQL query. + pub fn get_page_size(&self) -> Option { + self.page_size.as_ref().map(PageSize::inner) + } + /// Gets tracing ids of queries used to prepare this statement pub fn get_prepare_tracing_ids(&self) -> &[Uuid] { &self.prepare_tracing_ids diff --git a/scylla/src/statement/query.rs b/scylla/src/statement/query.rs index 913aebd1d7..855550b398 100644 --- a/scylla/src/statement/query.rs +++ b/scylla/src/statement/query.rs @@ -1,4 +1,4 @@ -use super::StatementConfig; +use super::{PageSize, StatementConfig}; use crate::frame::types::{Consistency, SerialConsistency}; use crate::history::HistoryListener; use crate::retry_policy::RetryPolicy; @@ -14,7 +14,7 @@ pub struct Query { pub(crate) config: StatementConfig, pub contents: String, - page_size: Option, + page_size: Option, } impl Query { @@ -27,16 +27,23 @@ impl Query { } } - /// Returns self with page size set to the given value + /// Returns self with page size set to the given value. + /// + /// Panics if given number is nonpositive. pub fn with_page_size(mut self, page_size: i32) -> Self { - self.page_size = Some(page_size); + self.set_page_size(page_size); self } /// Sets the page size for this CQL query. + /// + /// Panics if given number is nonpositive. pub fn set_page_size(&mut self, page_size: i32) { - assert!(page_size > 0, "page size must be larger than 0"); - self.page_size = Some(page_size); + self.page_size = Some( + page_size + .try_into() + .unwrap_or_else(|err| panic!("Query::set_page_size: {err}")), + ); } /// Disables paging for this CQL query. @@ -45,10 +52,15 @@ impl Query { } /// Returns the page size for this CQL query. - pub fn get_page_size(&self) -> Option { + pub(crate) fn get_validated_page_size(&self) -> Option { self.page_size } + /// Returns the page size for this CQL query. + pub fn get_page_size(&self) -> Option { + self.page_size.as_ref().map(PageSize::inner) + } + /// Sets the consistency to be used when executing this statement. pub fn set_consistency(&mut self, c: Consistency) { self.config.consistency = Some(c); diff --git a/scylla/src/transport/caching_session.rs b/scylla/src/transport/caching_session.rs index 903eb4e346..4d637e9380 100644 --- a/scylla/src/transport/caching_session.rs +++ b/scylla/src/transport/caching_session.rs @@ -164,7 +164,7 @@ where let query = query.into(); if let Some(raw) = self.cache.get(&query.contents) { - let page_size = query.get_page_size(); + let page_size = query.get_validated_page_size(); let mut stmt = PreparedStatement::new( raw.id.clone(), raw.is_confirmed_lwt, diff --git a/scylla/src/transport/connection.rs b/scylla/src/transport/connection.rs index 52795c30d3..339cc6aaf1 100644 --- a/scylla/src/transport/connection.rs +++ b/scylla/src/transport/connection.rs @@ -756,7 +756,7 @@ impl Connection { p.prepared_metadata, p.result_metadata, query.contents.clone(), - query.get_page_size(), + query.get_validated_page_size(), query.config.clone(), ), _ => { @@ -856,7 +856,7 @@ impl Connection { consistency, serial_consistency, values: Cow::Borrowed(SerializedValues::EMPTY), - page_size: query.get_page_size(), + page_size: query.get_page_size().map(Into::into), paging_state, skip_metadata: false, timestamp: query.get_timestamp(), @@ -901,7 +901,7 @@ impl Connection { consistency, serial_consistency, values: Cow::Borrowed(values), - page_size: prepared_statement.get_page_size(), + page_size: prepared_statement.get_page_size().map(Into::into), timestamp: prepared_statement.get_timestamp(), skip_metadata: prepared_statement.get_use_cached_result_metadata(), paging_state, diff --git a/scylla/src/transport/session.rs b/scylla/src/transport/session.rs index 80a23dace8..6331f494ef 100644 --- a/scylla/src/transport/session.rs +++ b/scylla/src/transport/session.rs @@ -81,6 +81,8 @@ use scylla_cql::errors::BadQuery; pub(crate) const TABLET_CHANNEL_SIZE: usize = 8192; +const TRACING_QUERY_PAGE_SIZE: i32 = 1024; + /// Translates IP addresses received from ScyllaDB nodes into locally reachable addresses. /// /// The driver auto-detects new ScyllaDB nodes added to the cluster through server side pushed @@ -1455,12 +1457,12 @@ impl Session { // Query system_traces.sessions for TracingInfo let mut traces_session_query = Query::new(crate::tracing::TRACES_SESSION_QUERY_STR); traces_session_query.config.consistency = consistency; - traces_session_query.set_page_size(1024); + traces_session_query.set_page_size(TRACING_QUERY_PAGE_SIZE); // Query system_traces.events for TracingEvents let mut traces_events_query = Query::new(crate::tracing::TRACES_EVENTS_QUERY_STR); traces_events_query.config.consistency = consistency; - traces_events_query.set_page_size(1024); + traces_events_query.set_page_size(TRACING_QUERY_PAGE_SIZE); let (traces_session_res, traces_events_res) = tokio::try_join!( self.query(traces_session_query, (tracing_id,)), diff --git a/scylla/src/transport/topology.rs b/scylla/src/transport/topology.rs index fafa8afdca..0dc0c12e79 100644 --- a/scylla/src/transport/topology.rs +++ b/scylla/src/transport/topology.rs @@ -791,10 +791,12 @@ impl NodeInfoSource { } } +const METADATA_QUERY_PAGE_SIZE: i32 = 1024; + async fn query_peers(conn: &Arc, connect_port: u16) -> Result, QueryError> { let mut peers_query = Query::new("select host_id, rpc_address, data_center, rack, tokens from system.peers"); - peers_query.set_page_size(1024); + peers_query.set_page_size(METADATA_QUERY_PAGE_SIZE); let peers_query_stream = conn .clone() .query_iter(peers_query) @@ -804,7 +806,7 @@ async fn query_peers(conn: &Arc, connect_port: u16) -> Result( let fut = async move { if keyspaces_to_fetch.is_empty() { let mut query = Query::new(query_str); - query.set_page_size(1024); + query.set_page_size(METADATA_QUERY_PAGE_SIZE); conn.query_iter(query).await } else { @@ -922,7 +924,7 @@ fn query_filter_keyspace_name<'a>( let query_str = format!("{query_str} where keyspace_name in ?"); let mut query = Query::new(query_str); - query.set_page_size(1024); + query.set_page_size(METADATA_QUERY_PAGE_SIZE); let prepared = conn.prepare(&query).await?; let serialized_values = prepared.serialize_values(&keyspaces)?; @@ -1658,7 +1660,7 @@ async fn query_table_partitioners( let mut partitioner_query = Query::new( "select keyspace_name, table_name, partitioner from system_schema.scylla_tables", ); - partitioner_query.set_page_size(1024); + partitioner_query.set_page_size(METADATA_QUERY_PAGE_SIZE); let rows = conn .clone() From 22f558b07928175431c8a635dcdc41a0e6b60c27 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Wojciech=20Przytu=C5=82a?= Date: Thu, 22 Aug 2024 11:23:59 +0200 Subject: [PATCH 02/19] underive Default for QueryResult & ResultMetadata The value of everything being empty does not seem to be a reasonable default value for both QueryResult and ResultMetadata. It's better to have a mock_empty() constructor for the specific case when such special value is needed. --- scylla-cql/src/frame/response/result.rs | 13 ++++++++++++- scylla/src/transport/iterator.rs | 7 +++++-- scylla/src/transport/query_result.rs | 13 ++++++++++++- scylla/src/transport/session.rs | 2 +- 4 files changed, 30 insertions(+), 5 deletions(-) diff --git a/scylla-cql/src/frame/response/result.rs b/scylla-cql/src/frame/response/result.rs index 8c89f2ad6d..1a948b524a 100644 --- a/scylla-cql/src/frame/response/result.rs +++ b/scylla-cql/src/frame/response/result.rs @@ -427,13 +427,24 @@ pub struct ColumnSpec { pub typ: ColumnType, } -#[derive(Debug, Clone, Default)] +#[derive(Debug, Clone)] pub struct ResultMetadata { col_count: usize, pub paging_state: Option, pub col_specs: Vec, } +impl ResultMetadata { + #[inline] + pub fn mock_empty() -> Self { + Self { + col_count: 0, + paging_state: None, + col_specs: Vec::new(), + } + } +} + #[derive(Debug, Copy, Clone)] pub struct PartitionKeyIndex { /// index in the serialized values diff --git a/scylla/src/transport/iterator.rs b/scylla/src/transport/iterator.rs index 6c340b3453..4023a95f1e 100644 --- a/scylla/src/transport/iterator.rs +++ b/scylla/src/transport/iterator.rs @@ -425,7 +425,10 @@ impl RowIterator { // A separate module is used here so that the parent module cannot construct // SendAttemptedProof directly. mod checked_channel_sender { - use scylla_cql::{errors::QueryError, frame::response::result::Rows}; + use scylla_cql::{ + errors::QueryError, + frame::response::result::{ResultMetadata, Rows}, + }; use std::marker::PhantomData; use tokio::sync::mpsc; use uuid::Uuid; @@ -467,7 +470,7 @@ mod checked_channel_sender { ) { let empty_page = ReceivedPage { rows: Rows { - metadata: Default::default(), + metadata: ResultMetadata::mock_empty(), rows_count: 0, rows: Vec::new(), serialized_size: 0, diff --git a/scylla/src/transport/query_result.rs b/scylla/src/transport/query_result.rs index e368a11745..ed0dd16962 100644 --- a/scylla/src/transport/query_result.rs +++ b/scylla/src/transport/query_result.rs @@ -9,7 +9,7 @@ use uuid::Uuid; /// Result of a single query\ /// Contains all rows returned by the database and some more information #[non_exhaustive] -#[derive(Default, Debug)] +#[derive(Debug)] pub struct QueryResult { /// Rows returned by the database.\ /// Queries like `SELECT` will have `Some(Vec)`, while queries like `INSERT` will have `None`.\ @@ -28,6 +28,17 @@ pub struct QueryResult { } impl QueryResult { + pub(crate) fn mock_empty() -> Self { + Self { + rows: None, + warnings: Vec::new(), + tracing_id: None, + paging_state: None, + col_specs: Vec::new(), + serialized_size: 0, + } + } + /// Returns the number of received rows.\ /// Fails when the query isn't of a type that could return rows, same as [`rows()`](QueryResult::rows). pub fn rows_num(&self) -> Result { diff --git a/scylla/src/transport/session.rs b/scylla/src/transport/session.rs index 6331f494ef..a37bc79dce 100644 --- a/scylla/src/transport/session.rs +++ b/scylla/src/transport/session.rs @@ -1274,7 +1274,7 @@ impl Session { .await?; let result = match run_query_result { - RunQueryResult::IgnoredWriteError => QueryResult::default(), + RunQueryResult::IgnoredWriteError => QueryResult::mock_empty(), RunQueryResult::Completed(response) => response, }; span.record_result_fields(&result); From 9648b3d502e902f5aabf89e22dfd01e2e3873688 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Wojciech=20Przytu=C5=82a?= Date: Tue, 6 Aug 2024 12:39:22 +0200 Subject: [PATCH 03/19] requests: make paging state strongly typed As an effort of more robust paging API, paging state is now made typed. Two new types are introduced: PagingState and PagingStateResponse. PagingState is, underneath, just an Arc over the raw bytes from the DB. The idea is that PagingState is passed as an argument to request execution API to ask for resuming the query (_continue it_) from the given PagingState. PagingStateResponse is returned always by the DB from paged requests - with information about either more or no more pages available. PagingState can be easily retrieved from PagingStateResponse (if it contains some), so that user can use it in a loop. The select_paging example is modernised to showcase the idea. --- examples/select-paging.rs | 101 ++++++++++-------- scylla-cql/benches/benchmark.rs | 3 +- scylla-cql/src/frame/request/mod.rs | 8 +- scylla-cql/src/frame/request/query.rs | 95 ++++++++++++++-- scylla-cql/src/frame/response/result.rs | 19 ++-- scylla/src/statement/mod.rs | 2 + scylla/src/transport/caching_session.rs | 8 +- scylla/src/transport/connection.rs | 47 +++++--- scylla/src/transport/iterator.rs | 57 ++++++---- scylla/src/transport/mod.rs | 1 + scylla/src/transport/query_result.rs | 8 +- scylla/src/transport/session.rs | 16 +-- scylla/src/transport/session_test.rs | 24 +++-- .../integration/skip_metadata_optimization.rs | 23 ++-- 14 files changed, 273 insertions(+), 139 deletions(-) diff --git a/examples/select-paging.rs b/examples/select-paging.rs index ea6cb256e0..77a28fa6fb 100644 --- a/examples/select-paging.rs +++ b/examples/select-paging.rs @@ -1,7 +1,9 @@ use anyhow::Result; use futures::stream::StreamExt; +use scylla::statement::PagingState; use scylla::{query::Query, Session, SessionBuilder}; use std::env; +use std::ops::ControlFlow; #[tokio::main] async fn main() -> Result<()> { @@ -41,54 +43,65 @@ async fn main() -> Result<()> { } let paged_query = Query::new("SELECT a, b, c FROM examples_ks.select_paging").with_page_size(6); - let res1 = session.query(paged_query.clone(), &[]).await?; - println!( - "Paging state: {:#?} ({} rows)", - res1.paging_state, - res1.rows_num()?, - ); - let res2 = session - .query_paged(paged_query.clone(), &[], res1.paging_state) - .await?; - println!( - "Paging state: {:#?} ({} rows)", - res2.paging_state, - res2.rows_num()?, - ); - let res3 = session - .query_paged(paged_query.clone(), &[], res2.paging_state) - .await?; - println!( - "Paging state: {:#?} ({} rows)", - res3.paging_state, - res3.rows_num()?, - ); + + // Manual paging in a loop, unprepared statement. + let mut paging_state = PagingState::start(); + loop { + let res = session + .query_paged(paged_query.clone(), &[], paging_state) + .await?; + + let paging_state_response = res.paging_state_response.clone(); + println!( + "Paging state: {:#?} ({} rows)", + paging_state_response, + res.rows_num()?, + ); + + match paging_state_response.into_paging_control_flow() { + ControlFlow::Break(()) => { + // No more pages to be fetched. + break; + } + ControlFlow::Continue(new_paging_state) => { + // Update paging paging state from the response, so that query + // will be resumed from where it ended the last time. + paging_state = new_paging_state; + } + } + } let paged_prepared = session .prepare(Query::new("SELECT a, b, c FROM examples_ks.select_paging").with_page_size(7)) .await?; - let res4 = session.execute(&paged_prepared, &[]).await?; - println!( - "Paging state from the prepared statement execution: {:#?} ({} rows)", - res4.paging_state, - res4.rows_num()?, - ); - let res5 = session - .execute_paged(&paged_prepared, &[], res4.paging_state) - .await?; - println!( - "Paging state from the second prepared statement execution: {:#?} ({} rows)", - res5.paging_state, - res5.rows_num()?, - ); - let res6 = session - .execute_paged(&paged_prepared, &[], res5.paging_state) - .await?; - println!( - "Paging state from the third prepared statement execution: {:#?} ({} rows)", - res6.paging_state, - res6.rows_num()?, - ); + + // Manual paging in a loop, prepared statement. + let mut paging_state = PagingState::default(); + loop { + let res = session + .execute_paged(&paged_prepared, &[], paging_state) + .await?; + + let paging_state_response = res.paging_state_response.clone(); + println!( + "Paging state from the prepared statement execution: {:#?} ({} rows)", + paging_state_response, + res.rows_num()?, + ); + + match paging_state_response.into_paging_control_flow() { + ControlFlow::Break(()) => { + // No more pages to be fetched. + break; + } + ControlFlow::Continue(new_paging_state) => { + // Update paging paging state from the response, so that query + // will be resumed from where it ended the last time. + paging_state = new_paging_state; + } + } + } + println!("Ok."); Ok(()) diff --git a/scylla-cql/benches/benchmark.rs b/scylla-cql/benches/benchmark.rs index ec0a26213b..72d0493bfb 100644 --- a/scylla-cql/benches/benchmark.rs +++ b/scylla-cql/benches/benchmark.rs @@ -2,6 +2,7 @@ use std::borrow::Cow; use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; +use scylla_cql::frame::request::query::PagingState; use scylla_cql::frame::request::SerializableRequest; use scylla_cql::frame::response::result::ColumnType; use scylla_cql::frame::{request::query, Compression, SerializedRequest}; @@ -16,7 +17,7 @@ fn make_query(contents: &str, values: SerializedValues) -> query::Query<'_> { values: Cow::Owned(values), skip_metadata: false, page_size: None, - paging_state: None, + paging_state: PagingState::start(), timestamp: None, }, } diff --git a/scylla-cql/src/frame/request/mod.rs b/scylla-cql/src/frame/request/mod.rs index 1a5d3511f2..076939d841 100644 --- a/scylla-cql/src/frame/request/mod.rs +++ b/scylla-cql/src/frame/request/mod.rs @@ -140,6 +140,8 @@ mod tests { Consistency, }; + use super::query::PagingState; + #[test] fn request_ser_de_identity() { // Query @@ -149,7 +151,7 @@ mod tests { serial_consistency: Some(SerialConsistency::Serial), timestamp: None, page_size: Some(323), - paging_state: Some(vec![2, 1, 3, 7].into()), + paging_state: PagingState::new_from_raw_bytes(&[2_u8, 1, 3, 7] as &[u8]), skip_metadata: false, values: { let mut vals = SerializedValues::new(); @@ -177,7 +179,7 @@ mod tests { serial_consistency: None, timestamp: Some(3423434), page_size: None, - paging_state: None, + paging_state: PagingState::start(), skip_metadata: false, values: { let mut vals = SerializedValues::new(); @@ -235,7 +237,7 @@ mod tests { serial_consistency: Some(SerialConsistency::LocalSerial), timestamp: None, page_size: None, - paging_state: None, + paging_state: PagingState::start(), skip_metadata: false, values: Cow::Borrowed(SerializedValues::EMPTY), }; diff --git a/scylla-cql/src/frame/request/query.rs b/scylla-cql/src/frame/request/query.rs index 31a281f512..e4a9fcfaf0 100644 --- a/scylla-cql/src/frame/request/query.rs +++ b/scylla-cql/src/frame/request/query.rs @@ -1,10 +1,10 @@ -use std::borrow::Cow; +use std::{borrow::Cow, ops::ControlFlow, sync::Arc}; use crate::{ frame::{frame_errors::ParseError, types::SerialConsistency}, types::serialize::row::SerializedValues, }; -use bytes::{Buf, BufMut, Bytes}; +use bytes::{Buf, BufMut}; use crate::{ frame::request::{RequestOpcode, SerializableRequest}, @@ -62,7 +62,7 @@ pub struct QueryParameters<'a> { pub serial_consistency: Option, pub timestamp: Option, pub page_size: Option, - pub paging_state: Option, + pub paging_state: PagingState, pub skip_metadata: bool, pub values: Cow<'a, SerializedValues>, } @@ -74,7 +74,7 @@ impl Default for QueryParameters<'_> { serial_consistency: None, timestamp: None, page_size: None, - paging_state: None, + paging_state: PagingState::start(), skip_metadata: false, values: Cow::Borrowed(SerializedValues::EMPTY), } @@ -85,6 +85,8 @@ impl QueryParameters<'_> { pub fn serialize(&self, buf: &mut impl BufMut) -> Result<(), ParseError> { types::write_consistency(self.consistency, buf); + let paging_state_bytes = self.paging_state.as_bytes_slice(); + let mut flags = 0; if !self.values.is_empty() { flags |= FLAG_VALUES; @@ -98,7 +100,7 @@ impl QueryParameters<'_> { flags |= FLAG_PAGE_SIZE; } - if self.paging_state.is_some() { + if paging_state_bytes.is_some() { flags |= FLAG_WITH_PAGING_STATE; } @@ -120,8 +122,8 @@ impl QueryParameters<'_> { types::write_int(page_size, buf); } - if let Some(paging_state) = &self.paging_state { - types::write_bytes(paging_state, buf)?; + if let Some(paging_state_bytes) = paging_state_bytes { + types::write_bytes(paging_state_bytes, buf)?; } if let Some(serial_consistency) = self.serial_consistency { @@ -170,9 +172,9 @@ impl<'q> QueryParameters<'q> { let page_size = page_size_flag.then(|| types::read_int(buf)).transpose()?; let paging_state = if paging_state_flag { - Some(Bytes::copy_from_slice(types::read_bytes(buf)?)) + PagingState::new_from_raw_bytes(types::read_bytes(buf)?) } else { - None + PagingState::start() }; let serial_consistency = serial_consistency_flag .then(|| types::read_consistency(buf)) @@ -204,3 +206,78 @@ impl<'q> QueryParameters<'q> { }) } } + +#[derive(Debug, Clone)] +pub enum PagingStateResponse { + HasMorePages { state: PagingState }, + NoMorePages, +} + +impl PagingStateResponse { + /// Determines if the query has finished or it should be resumed with given + /// [PagingState] in order to fetch next pages. + #[inline] + pub fn finished(&self) -> bool { + matches!(*self, Self::NoMorePages) + } + + pub(crate) fn new_from_raw_bytes(raw_paging_state: Option<&[u8]>) -> Self { + match raw_paging_state { + Some(raw_bytes) => Self::HasMorePages { + state: PagingState::new_from_raw_bytes(raw_bytes), + }, + None => Self::NoMorePages, + } + } + + /// Converts the response into [ControlFlow], signalling whether the query has finished + /// or it should be resumed with given [PagingState] in order to fetch next pages. + #[inline] + pub fn into_paging_control_flow(self) -> ControlFlow<(), PagingState> { + match self { + Self::HasMorePages { + state: next_page_handle, + } => ControlFlow::Continue(next_page_handle), + Self::NoMorePages => ControlFlow::Break(()), + } + } +} + +/// The state of a paged query, i.e. where to resume fetching result rows +/// upon next request. +/// +/// Cheaply clonable. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct PagingState(Option>); + +impl PagingState { + /// A start state - the state of a not-yet-started paged query. + #[inline] + pub fn start() -> Self { + Self(None) + } + + /// Returns the inner representation of [PagingState]. + /// One can use this to store paging state for a longer time, + /// and later restore it using [Self::new_from_raw_bytes]. + /// In case None is returned, this signifies + /// [PagingState::start()] being underneath. + #[inline] + pub fn as_bytes_slice(&self) -> Option<&Arc<[u8]>> { + self.0.as_ref() + } + + /// Creates PagingState from its inner representation. + /// One can use this to restore paging state after longer time, + /// having previously stored it using [Self::as_bytes_slice]. + #[inline] + pub fn new_from_raw_bytes(raw_bytes: impl Into>) -> Self { + Self(Some(raw_bytes.into())) + } +} + +impl Default for PagingState { + fn default() -> Self { + Self::start() + } +} diff --git a/scylla-cql/src/frame/response/result.rs b/scylla-cql/src/frame/response/result.rs index 1a948b524a..b6b51dc7df 100644 --- a/scylla-cql/src/frame/response/result.rs +++ b/scylla-cql/src/frame/response/result.rs @@ -4,6 +4,7 @@ use crate::frame::frame_errors::{ PreparedParseError, ResultMetadataParseError, RowsParseError, SchemaChangeEventParseError, SetKeyspaceParseError, TableSpecParseError, }; +use crate::frame::request::query::PagingStateResponse; use crate::frame::response::event::SchemaChangeEvent; use crate::frame::types; use crate::frame::value::{ @@ -430,7 +431,7 @@ pub struct ColumnSpec { #[derive(Debug, Clone)] pub struct ResultMetadata { col_count: usize, - pub paging_state: Option, + pub paging_state: PagingStateResponse, pub col_specs: Vec, } @@ -439,7 +440,7 @@ impl ResultMetadata { pub fn mock_empty() -> Self { Self { col_count: 0, - paging_state: None, + paging_state: PagingStateResponse::NoMorePages, col_specs: Vec::new(), } } @@ -629,16 +630,10 @@ fn deser_result_metadata(buf: &mut &[u8]) -> StdResult, values: impl SerializeRow, - paging_state: Option, + paging_state: PagingState, ) -> Result { let query = query.into(); let prepared = self.add_prepared_statement_owned(query).await?; self.session - .execute_paged(&prepared, values, paging_state.clone()) + .execute_paged(&prepared, values, paging_state) .await } @@ -218,6 +219,7 @@ where #[cfg(test)] mod tests { use crate::query::Query; + use crate::statement::PagingState; use crate::test_utils::{create_new_session_builder, scylla_supports_tablets, setup_tracing}; use crate::transport::partitioner::PartitionerName; use crate::utils::test_utils::unique_keyspace_name; @@ -371,7 +373,7 @@ mod tests { assert!(session.cache.is_empty()); let result = session - .execute_paged("select * from test_table", &[], None) + .execute_paged("select * from test_table", &[], PagingState::start()) .await .unwrap(); diff --git a/scylla/src/transport/connection.rs b/scylla/src/transport/connection.rs index 339cc6aaf1..e1d9298acb 100644 --- a/scylla/src/transport/connection.rs +++ b/scylla/src/transport/connection.rs @@ -62,7 +62,7 @@ use crate::frame::{ use crate::query::Query; use crate::routing::ShardInfo; use crate::statement::prepared_statement::PreparedStatement; -use crate::statement::Consistency; +use crate::statement::{Consistency, PagingState, PagingStateResponse}; use crate::transport::Compression; use crate::QueryResult; @@ -265,7 +265,7 @@ impl NonErrorQueryResponse { rs.metadata.col_specs, rs.serialized_size, ), - NonErrorResponse::Result(_) => (None, None, vec![], 0), + NonErrorResponse::Result(_) => (None, PagingStateResponse::NoMorePages, vec![], 0), _ => { return Err(QueryError::ProtocolError( "Unexpected server response, expected Result or Error", @@ -277,7 +277,7 @@ impl NonErrorQueryResponse { rows, warnings: self.warnings, tracing_id: self.tracing_id, - paging_state, + paging_state_response: paging_state, col_specs, serialized_size, }) @@ -821,15 +821,20 @@ impl Connection { serial_consistency: Option, ) -> Result { let query: Query = query.into(); - self.query_with_consistency(&query, consistency, serial_consistency, None) - .await? - .into_query_result() + self.query_with_consistency( + &query, + consistency, + serial_consistency, + PagingState::start(), + ) + .await? + .into_query_result() } pub(crate) async fn query( &self, query: &Query, - paging_state: Option, + paging_state: PagingState, ) -> Result { // This method is used only for driver internal queries, so no need to consult execution profile here. self.query_with_consistency( @@ -848,7 +853,7 @@ impl Connection { query: &Query, consistency: Consistency, serial_consistency: Option, - paging_state: Option, + paging_state: PagingState, ) -> Result { let query_frame = query::Query { contents: Cow::Borrowed(&query.contents), @@ -872,7 +877,7 @@ impl Connection { &self, prepared: PreparedStatement, values: SerializedValues, - paging_state: Option, + paging_state: PagingState, ) -> Result { // This method is used only for driver internal queries, so no need to consult execution profile here. self.execute_with_consistency( @@ -893,7 +898,7 @@ impl Connection { values: &SerializedValues, consistency: Consistency, serial_consistency: Option, - paging_state: Option, + paging_state: PagingState, ) -> Result { let execute_frame = execute::Execute { id: prepared_statement.get_id().to_owned(), @@ -1137,7 +1142,7 @@ impl Connection { false => format!("USE {}", keyspace_name.as_str()).into(), }; - let query_response = self.query(&query, None).await?; + let query_response = self.query(&query, PagingState::start()).await?; match query_response.response { Response::Result(result::Result::SetKeyspace(set_keyspace)) => { @@ -2137,6 +2142,7 @@ mod tests { use super::ConnectionConfig; use crate::query::Query; + use crate::statement::PagingState; use crate::test_utils::setup_tracing; use crate::transport::connection::open_connection; use crate::transport::node::ResolvedContactPoint; @@ -2237,7 +2243,11 @@ mod tests { for v in &values { let prepared_clone = prepared.clone(); let values = prepared_clone.serialize_values(&(*v,)).unwrap(); - let fut = async { connection.execute(prepared_clone, values, None).await }; + let fut = async { + connection + .execute(prepared_clone, values, PagingState::start()) + .await + }; insert_futures.push(fut); } @@ -2320,7 +2330,10 @@ mod tests { .await .unwrap(); - connection.query(&"TRUNCATE t".into(), None).await.unwrap(); + connection + .query(&"TRUNCATE t".into(), PagingState::start()) + .await + .unwrap(); let mut futs = Vec::new(); @@ -2339,8 +2352,10 @@ mod tests { let values = prepared .serialize_values(&(j, vec![j as u8; j as usize])) .unwrap(); - let response = - conn.execute(prepared.clone(), values, None).await.unwrap(); + let response = conn + .execute(prepared.clone(), values, PagingState::start()) + .await + .unwrap(); // QueryResponse might contain an error - make sure that there were no errors let _nonerror_response = response.into_non_error_query_response().unwrap(); @@ -2357,7 +2372,7 @@ mod tests { // Check that everything was written properly let range_end = arithmetic_sequence_sum(NUM_BATCHES); let mut results = connection - .query(&"SELECT p, v FROM t".into(), None) + .query(&"SELECT p, v FROM t".into(), PagingState::start()) .await .unwrap() .into_query_result() diff --git a/scylla/src/transport/iterator.rs b/scylla/src/transport/iterator.rs index 4023a95f1e..0d614c86b0 100644 --- a/scylla/src/transport/iterator.rs +++ b/scylla/src/transport/iterator.rs @@ -8,7 +8,6 @@ use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll}; -use bytes::Bytes; use futures::Stream; use scylla_cql::frame::response::NonErrorResponse; use scylla_cql::frame::types::SerialConsistency; @@ -27,8 +26,8 @@ use crate::frame::response::{ result::{ColumnSpec, Row, Rows}, }; use crate::history::{self, HistoryListener}; -use crate::statement::Consistency; use crate::statement::{prepared_statement::PreparedStatement, query::Query}; +use crate::statement::{Consistency, PagingState}; use crate::transport::cluster::ClusterData; use crate::transport::connection::{Connection, NonErrorQueryResponse, QueryResponse}; use crate::transport::load_balancing::{self, RoutingInfo}; @@ -161,7 +160,7 @@ impl RowIterator { let page_query = |connection: Arc, consistency: Consistency, - paging_state: Option| { + paging_state: PagingState| { async move { connection .query_with_consistency( @@ -191,7 +190,7 @@ impl RowIterator { retry_session, execution_profile, metrics, - paging_state: None, + paging_state: PagingState::start(), history_listener: query.config.history_listener.clone(), current_query_id: None, current_attempt_id: None, @@ -258,7 +257,7 @@ impl RowIterator { let page_query = |connection: Arc, consistency: Consistency, - paging_state: Option| async move { + paging_state: PagingState| async move { connection .execute_with_consistency( prepared_ref, @@ -308,7 +307,7 @@ impl RowIterator { retry_session, execution_profile: config.execution_profile, metrics: config.metrics, - paging_state: None, + paging_state: PagingState::start(), history_listener: config.prepared.config.history_listener.clone(), current_query_id: None, current_attempt_id: None, @@ -492,7 +491,7 @@ struct RowIteratorWorker<'a, QueryFunc, SpanCreatorFunc> { sender: ProvingSender>, // Closure used to perform a single page query - // AsyncFn(Arc, Option) -> Result + // AsyncFn(Arc, Option>) -> Result page_query: QueryFunc, statement_info: RoutingInfo<'a>, @@ -502,7 +501,7 @@ struct RowIteratorWorker<'a, QueryFunc, SpanCreatorFunc> { execution_profile: Arc, metrics: Arc, - paging_state: Option, + paging_state: PagingState, history_listener: Option>, current_query_id: Option, @@ -514,7 +513,7 @@ struct RowIteratorWorker<'a, QueryFunc, SpanCreatorFunc> { impl RowIteratorWorker<'_, QueryFunc, SpanCreator> where - QueryFunc: Fn(Arc, Consistency, Option) -> QueryFut, + QueryFunc: Fn(Arc, Consistency, PagingState) -> QueryFut, QueryFut: Future>, SpanCreator: Fn() -> RequestSpan, { @@ -677,7 +676,7 @@ where match query_response { Ok(NonErrorQueryResponse { - response: NonErrorResponse::Result(result::Result::Rows(mut rows)), + response: NonErrorResponse::Result(result::Result::Rows(rows)), tracing_id, .. }) => { @@ -688,10 +687,10 @@ where .load_balancing_policy .on_query_success(&self.statement_info, elapsed, node); - self.paging_state = rows.metadata.paging_state.take(); - request_span.record_rows_fields(&rows); + let paging_state_response = rows.metadata.paging_state.clone(); + let received_page = ReceivedPage { rows, tracing_id }; // Send next page to RowIterator @@ -701,9 +700,14 @@ where return Ok(ControlFlow::Break(proof)); } - if self.paging_state.is_none() { - // Reached the last query, shutdown - return Ok(ControlFlow::Break(proof)); + match paging_state_response.into_paging_control_flow() { + ControlFlow::Continue(paging_state) => { + self.paging_state = paging_state; + } + ControlFlow::Break(()) => { + // Reached the last query, shutdown + return Ok(ControlFlow::Break(proof)); + } } // Query succeeded, reset retry policy for future retries @@ -833,7 +837,7 @@ struct SingleConnectionRowIteratorWorker { impl SingleConnectionRowIteratorWorker where - Fetcher: Fn(Option) -> FetchFut + Send + Sync, + Fetcher: Fn(PagingState) -> FetchFut + Send + Sync, FetchFut: Future> + Send, { async fn work(mut self) -> PageSendAttemptedProof { @@ -847,13 +851,14 @@ where } async fn do_work(&mut self) -> Result { - let mut paging_state = None; + let mut paging_state = PagingState::start(); loop { let result = (self.fetcher)(paging_state).await?; let response = result.into_non_error_query_response()?; match response.response { - NonErrorResponse::Result(result::Result::Rows(mut rows)) => { - paging_state = rows.metadata.paging_state.take(); + NonErrorResponse::Result(result::Result::Rows(rows)) => { + let paging_state_response = rows.metadata.paging_state.clone(); + let (proof, send_result) = self .sender .send(Ok(ReceivedPage { @@ -861,9 +866,21 @@ where tracing_id: response.tracing_id, })) .await; - if paging_state.is_none() || send_result.is_err() { + + if send_result.is_err() { + // channel was closed, RowIterator was dropped - should shutdown return Ok(proof); } + + match paging_state_response.into_paging_control_flow() { + ControlFlow::Continue(new_paging_state) => { + paging_state = new_paging_state; + } + ControlFlow::Break(()) => { + // Reached the last query, shutdown + return Ok(proof); + } + } } NonErrorResponse::Result(_) => { // We have most probably sent a modification statement (e.g. INSERT or UPDATE), diff --git a/scylla/src/transport/mod.rs b/scylla/src/transport/mod.rs index 620c1fafb7..bf6310d356 100644 --- a/scylla/src/transport/mod.rs +++ b/scylla/src/transport/mod.rs @@ -22,6 +22,7 @@ pub use crate::frame::{Authenticator, Compression}; pub use connection::SelfIdentity; pub use execution_profile::ExecutionProfile; pub use scylla_cql::errors; +pub use scylla_cql::frame::request::query::{PagingState, PagingStateResponse}; #[cfg(test)] mod authenticate_test; diff --git a/scylla/src/transport/query_result.rs b/scylla/src/transport/query_result.rs index ed0dd16962..793800a23d 100644 --- a/scylla/src/transport/query_result.rs +++ b/scylla/src/transport/query_result.rs @@ -1,8 +1,8 @@ use crate::frame::response::cql_to_rust::{FromRow, FromRowError}; use crate::frame::response::result::ColumnSpec; use crate::frame::response::result::Row; +use crate::statement::PagingStateResponse; use crate::transport::session::{IntoTypedRows, TypedRowIter}; -use bytes::Bytes; use thiserror::Error; use uuid::Uuid; @@ -20,7 +20,7 @@ pub struct QueryResult { /// CQL Tracing uuid - can only be Some if tracing is enabled for this query pub tracing_id: Option, /// Paging state returned from the server - pub paging_state: Option, + pub paging_state_response: PagingStateResponse, /// Column specification returned from the server pub col_specs: Vec, /// The original size of the serialized rows in request @@ -33,7 +33,7 @@ impl QueryResult { rows: None, warnings: Vec::new(), tracing_id: None, - paging_state: None, + paging_state_response: PagingStateResponse::NoMorePages, col_specs: Vec::new(), serialized_size: 0, } @@ -318,7 +318,7 @@ mod tests { rows: None, warnings: vec![], tracing_id: None, - paging_state: None, + paging_state_response: PagingStateResponse::NoMorePages, col_specs: vec![column_spec], serialized_size: 0, } diff --git a/scylla/src/transport/session.rs b/scylla/src/transport/session.rs index a37bc79dce..ac5dddfa32 100644 --- a/scylla/src/transport/session.rs +++ b/scylla/src/transport/session.rs @@ -10,7 +10,6 @@ use crate::history::HistoryListener; use crate::utils::pretty::{CommaSeparatedDisplayer, CqlValueDisplayer}; use arc_swap::ArcSwapOption; use async_trait::async_trait; -use bytes::Bytes; use futures::future::join_all; use futures::future::try_join_all; use itertools::{Either, Itertools}; @@ -53,7 +52,7 @@ use crate::frame::response::result; use crate::prepared_statement::PreparedStatement; use crate::query::Query; use crate::routing::{Shard, Token}; -use crate::statement::Consistency; +use crate::statement::{Consistency, PagingState}; use crate::tracing::{TracingEvent, TracingInfo}; use crate::transport::cluster::{Cluster, ClusterData, ClusterNeatDebug}; use crate::transport::connection::{Connection, ConnectionConfig, VerifiedKeyspaceName}; @@ -624,7 +623,7 @@ impl Session { query: impl Into, values: impl SerializeRow, ) -> Result { - self.query_paged(query, values, None).await + self.query_paged(query, values, PagingState::start()).await } /// Queries the database with a custom paging state. @@ -637,12 +636,12 @@ impl Session { /// /// * `query` - query to be performed /// * `values` - values bound to the query - /// * `paging_state` - previously received paging state or None + /// * `paging_state` - previously received paging state or [PagingState::start()] pub async fn query_paged( &self, query: impl Into, values: impl SerializeRow, - paging_state: Option, + paging_state: PagingState, ) -> Result { let query: Query = query.into(); @@ -977,7 +976,8 @@ impl Session { prepared: &PreparedStatement, values: impl SerializeRow, ) -> Result { - self.execute_paged(prepared, values, None).await + self.execute_paged(prepared, values, PagingState::start()) + .await } /// Executes a previously prepared statement with previously received paging state @@ -985,12 +985,12 @@ impl Session { /// /// * `prepared` - a statement prepared with [prepare](crate::transport::session::Session::prepare) /// * `values` - values bound to the query - /// * `paging_state` - paging state from the previous query or None + /// * `paging_state` - paging state from the previous query or [PagingState::start()] pub async fn execute_paged( &self, prepared: &PreparedStatement, values: impl SerializeRow, - paging_state: Option, + paging_state: PagingState, ) -> Result { let serialized_values = prepared.serialize_values(&values)?; let values_ref = &serialized_values; diff --git a/scylla/src/transport/session_test.rs b/scylla/src/transport/session_test.rs index 617f3535b4..fe39bd0f46 100644 --- a/scylla/src/transport/session_test.rs +++ b/scylla/src/transport/session_test.rs @@ -25,9 +25,9 @@ use crate::ExecutionProfile; use crate::QueryResult; use crate::{Session, SessionBuilder}; use assert_matches::assert_matches; -use bytes::Bytes; use futures::{FutureExt, StreamExt, TryStreamExt}; use itertools::Itertools; +use scylla_cql::frame::request::query::{PagingState, PagingStateResponse}; use scylla_cql::frame::response::result::ColumnType; use scylla_cql::types::serialize::row::{SerializeRow, SerializedValues}; use scylla_cql::types::serialize::value::SerializeValue; @@ -145,7 +145,7 @@ async fn test_unprepared_statement() { } let mut results_from_manual_paging: Vec = vec![]; let query = Query::new(format!("SELECT a, b, c FROM {}.t", ks)).with_page_size(1); - let mut paging_state: Option = None; + let mut paging_state = PagingState::start(); let mut watchdog = 0; loop { let rs_manual = session @@ -153,11 +153,14 @@ async fn test_unprepared_statement() { .await .unwrap(); results_from_manual_paging.append(&mut rs_manual.rows.unwrap()); - if watchdog > 30 || rs_manual.paging_state.is_none() { - break; + match rs_manual.paging_state_response { + PagingStateResponse::HasMorePages { state } => { + paging_state = state; + } + _ if watchdog > 30 => break, + PagingStateResponse::NoMorePages => break, } watchdog += 1; - paging_state = rs_manual.paging_state; } assert_eq!(results_from_manual_paging, rs); } @@ -281,7 +284,7 @@ async fn test_prepared_statement() { let mut results_from_manual_paging: Vec = vec![]; let query = Query::new(format!("SELECT a, b, c FROM {}.t2", ks)).with_page_size(1); let prepared_paged = session.prepare(query).await.unwrap(); - let mut paging_state: Option = None; + let mut paging_state = PagingState::start(); let mut watchdog = 0; loop { let rs_manual = session @@ -289,11 +292,14 @@ async fn test_prepared_statement() { .await .unwrap(); results_from_manual_paging.append(&mut rs_manual.rows.unwrap()); - if watchdog > 30 || rs_manual.paging_state.is_none() { - break; + match rs_manual.paging_state_response { + PagingStateResponse::HasMorePages { state } => { + paging_state = state; + } + _ if watchdog > 30 => break, + PagingStateResponse::NoMorePages => break, } watchdog += 1; - paging_state = rs_manual.paging_state; } assert_eq!(results_from_manual_paging, rs); } diff --git a/scylla/tests/integration/skip_metadata_optimization.rs b/scylla/tests/integration/skip_metadata_optimization.rs index 6ee9ea2319..16a72b5d31 100644 --- a/scylla/tests/integration/skip_metadata_optimization.rs +++ b/scylla/tests/integration/skip_metadata_optimization.rs @@ -2,10 +2,11 @@ use crate::utils::{setup_tracing, test_with_3_node_cluster}; use scylla::transport::session::Session; use scylla::SessionBuilder; use scylla::{prepared_statement::PreparedStatement, test_utils::unique_keyspace_name}; +use scylla_cql::frame::request::query::{PagingState, PagingStateResponse}; use scylla_cql::frame::types; use scylla_proxy::{ - Condition, ProxyError, Reaction, ResponseFrame, ResponseReaction, ShardAwareness, TargetShard, - WorkerError, + Condition, ProxyError, Reaction, ResponseFrame, ResponseOpcode, ResponseReaction, ResponseRule, + ShardAwareness, TargetShard, WorkerError, }; use std::sync::Arc; @@ -14,8 +15,6 @@ use std::sync::Arc; #[cfg(not(scylla_cloud_tests))] async fn test_skip_result_metadata() { setup_tracing(); - use bytes::Bytes; - use scylla_proxy::{ResponseOpcode, ResponseRule}; const NO_METADATA_FLAG: i32 = 0x0004; @@ -124,19 +123,23 @@ async fn test_skip_result_metadata() { let mut prepared_paged = session.prepare(select_query).await.unwrap(); prepared_paged.set_use_cached_result_metadata(true); prepared_paged.set_page_size(1); - let mut paging_state: Option = None; + let mut paging_state = PagingState::start(); let mut watchdog = 0; loop { - let mut rs_manual = session + let rs_manual = session .execute_paged(&prepared_paged, &[], paging_state) .await .unwrap(); - eprintln!("Paging state: {:?}", rs_manual.paging_state); - paging_state = rs_manual.paging_state.take(); + let paging_state_response = rs_manual.paging_state_response.clone(); results_from_manual_paging .extend(rs_manual.rows_typed::().unwrap().map(Result::unwrap)); - if watchdog > 30 || paging_state.is_none() { - break; + + match paging_state_response { + PagingStateResponse::HasMorePages { state } => { + paging_state = state; + } + _ if watchdog > 30 => break, + PagingStateResponse::NoMorePages => break, } watchdog += 1; } From 6d9249f5667599a0aa78db4e63d442d49843b30c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Wojciech=20Przytu=C5=82a?= Date: Mon, 19 Aug 2024 16:39:13 +0200 Subject: [PATCH 04/19] statement: make page size mandatory As users' experience showed that it optional page size on statements is error-prone, it is made mandatory. This means that at the moment no unpaged queries are possible. However, in the next commits it is going to be brought back again. --- scylla/src/statement/prepared_statement.rs | 23 +++++-------- scylla/src/statement/query.rs | 23 +++++-------- scylla/src/transport/connection.rs | 4 +-- scylla/src/transport/iterator.rs | 39 +++------------------- scylla/src/transport/session_test.rs | 2 +- 5 files changed, 24 insertions(+), 67 deletions(-) diff --git a/scylla/src/statement/prepared_statement.rs b/scylla/src/statement/prepared_statement.rs index 0b7cb814c0..74d972fa29 100644 --- a/scylla/src/statement/prepared_statement.rs +++ b/scylla/src/statement/prepared_statement.rs @@ -92,7 +92,7 @@ pub struct PreparedStatement { id: Bytes, shared: Arc, - page_size: Option, + page_size: PageSize, partitioner_name: PartitionerName, is_confirmed_lwt: bool, } @@ -125,7 +125,7 @@ impl PreparedStatement { metadata: PreparedMetadata, result_metadata: ResultMetadata, statement: String, - page_size: Option, + page_size: PageSize, config: StatementConfig, ) -> Self { Self { @@ -155,27 +155,20 @@ impl PreparedStatement { /// /// Panics if given number is nonpositive. pub fn set_page_size(&mut self, page_size: i32) { - self.page_size = Some( - page_size - .try_into() - .unwrap_or_else(|err| panic!("PreparedStatement::set_page_size: {err}")), - ); - } - - /// Disables paging for this CQL query. - pub fn disable_paging(&mut self) { - self.page_size = None; + self.page_size = page_size + .try_into() + .unwrap_or_else(|err| panic!("PreparedStatement::set_page_size: {err}")); } /// Returns the page size for this CQL query. #[allow(dead_code)] - pub(crate) fn get_validated_page_size(&self) -> Option { + pub(crate) fn get_validated_page_size(&self) -> PageSize { self.page_size } /// Returns the page size for this CQL query. - pub fn get_page_size(&self) -> Option { - self.page_size.as_ref().map(PageSize::inner) + pub fn get_page_size(&self) -> i32 { + self.page_size.inner() } /// Gets tracing ids of queries used to prepare this statement diff --git a/scylla/src/statement/query.rs b/scylla/src/statement/query.rs index 855550b398..7fc2424d92 100644 --- a/scylla/src/statement/query.rs +++ b/scylla/src/statement/query.rs @@ -14,7 +14,7 @@ pub struct Query { pub(crate) config: StatementConfig, pub contents: String, - page_size: Option, + page_size: PageSize, } impl Query { @@ -22,7 +22,7 @@ impl Query { pub fn new(query_text: impl Into) -> Self { Self { contents: query_text.into(), - page_size: None, + page_size: PageSize::default(), config: Default::default(), } } @@ -39,26 +39,19 @@ impl Query { /// /// Panics if given number is nonpositive. pub fn set_page_size(&mut self, page_size: i32) { - self.page_size = Some( - page_size - .try_into() - .unwrap_or_else(|err| panic!("Query::set_page_size: {err}")), - ); - } - - /// Disables paging for this CQL query. - pub fn disable_paging(&mut self) { - self.page_size = None; + self.page_size = page_size + .try_into() + .unwrap_or_else(|err| panic!("Query::set_page_size: {err}")); } /// Returns the page size for this CQL query. - pub(crate) fn get_validated_page_size(&self) -> Option { + pub(crate) fn get_validated_page_size(&self) -> PageSize { self.page_size } /// Returns the page size for this CQL query. - pub fn get_page_size(&self) -> Option { - self.page_size.as_ref().map(PageSize::inner) + pub fn get_page_size(&self) -> i32 { + self.page_size.inner() } /// Sets the consistency to be used when executing this statement. diff --git a/scylla/src/transport/connection.rs b/scylla/src/transport/connection.rs index e1d9298acb..351de2c1f5 100644 --- a/scylla/src/transport/connection.rs +++ b/scylla/src/transport/connection.rs @@ -861,7 +861,7 @@ impl Connection { consistency, serial_consistency, values: Cow::Borrowed(SerializedValues::EMPTY), - page_size: query.get_page_size().map(Into::into), + page_size: Some(query.get_page_size().into()), paging_state, skip_metadata: false, timestamp: query.get_timestamp(), @@ -906,7 +906,7 @@ impl Connection { consistency, serial_consistency, values: Cow::Borrowed(values), - page_size: prepared_statement.get_page_size().map(Into::into), + page_size: Some(prepared_statement.get_page_size().into()), timestamp: prepared_statement.get_timestamp(), skip_metadata: prepared_statement.get_use_cached_result_metadata(), paging_state, diff --git a/scylla/src/transport/iterator.rs b/scylla/src/transport/iterator.rs index 0d614c86b0..d2ec751273 100644 --- a/scylla/src/transport/iterator.rs +++ b/scylla/src/transport/iterator.rs @@ -10,7 +10,6 @@ use std::task::{Context, Poll}; use futures::Stream; use scylla_cql::frame::response::NonErrorResponse; -use scylla_cql::frame::types::SerialConsistency; use scylla_cql::types::serialize::row::SerializedValues; use std::result::Result; use thiserror::Error; @@ -27,7 +26,7 @@ use crate::frame::response::{ }; use crate::history::{self, HistoryListener}; use crate::statement::{prepared_statement::PreparedStatement, query::Query}; -use crate::statement::{Consistency, PagingState}; +use crate::statement::{Consistency, PagingState, SerialConsistency}; use crate::transport::cluster::ClusterData; use crate::transport::connection::{Connection, NonErrorQueryResponse, QueryResponse}; use crate::transport::load_balancing::{self, RoutingInfo}; @@ -37,22 +36,6 @@ use crate::transport::NodeRef; use tracing::{trace, trace_span, warn, Instrument}; use uuid::Uuid; -// #424 -// -// Both `Query` and `PreparedStatement` have page size set to `None` as default, -// which means unlimited page size. This is a problem for `query_iter` -// and `execute_iter` because using them with such queries causes everything -// to be fetched in one page, despite them being meant to fetch data -// page-by-page. -// -// We can't really change the default page size for `Query` -// and `PreparedStatement` because it also affects `Session::{query,execute}` -// and this could break existing code. -// -// In order to work around the problem we just set the page size to a default -// value at the beginning of `query_iter` and `execute_iter`. -const DEFAULT_ITER_PAGE_SIZE: i32 = 5000; - /// Iterator over rows returned by paged queries\ /// Allows to easily access rows without worrying about handling multiple pages pub struct RowIterator { @@ -123,14 +106,11 @@ impl RowIterator { } pub(crate) async fn new_for_query( - mut query: Query, + query: Query, execution_profile: Arc, cluster_data: Arc, metrics: Arc, ) -> Result { - if query.get_page_size().is_none() { - query.set_page_size(DEFAULT_ITER_PAGE_SIZE); - } let (sender, receiver) = mpsc::channel(1); let consistency = query @@ -205,11 +185,8 @@ impl RowIterator { } pub(crate) async fn new_for_prepared_statement( - mut config: PreparedIteratorConfig, + config: PreparedIteratorConfig, ) -> Result { - if config.prepared.get_page_size().is_none() { - config.prepared.set_page_size(DEFAULT_ITER_PAGE_SIZE); - } let (sender, receiver) = mpsc::channel(1); let consistency = config @@ -322,14 +299,11 @@ impl RowIterator { } pub(crate) async fn new_for_connection_query_iter( - mut query: Query, + query: Query, connection: Arc, consistency: Consistency, serial_consistency: Option, ) -> Result { - if query.get_page_size().is_none() { - query.set_page_size(DEFAULT_ITER_PAGE_SIZE); - } let (sender, receiver) = mpsc::channel::>(1); let worker_task = async move { @@ -351,15 +325,12 @@ impl RowIterator { } pub(crate) async fn new_for_connection_execute_iter( - mut prepared: PreparedStatement, + prepared: PreparedStatement, values: SerializedValues, connection: Arc, consistency: Consistency, serial_consistency: Option, ) -> Result { - if prepared.get_page_size().is_none() { - prepared.set_page_size(DEFAULT_ITER_PAGE_SIZE); - } let (sender, receiver) = mpsc::channel::>(1); let worker_task = async move { diff --git a/scylla/src/transport/session_test.rs b/scylla/src/transport/session_test.rs index fe39bd0f46..1b8a8909cc 100644 --- a/scylla/src/transport/session_test.rs +++ b/scylla/src/transport/session_test.rs @@ -1357,7 +1357,7 @@ async fn test_prepared_config() { let prepared_statement = session.prepare(query).await.unwrap(); assert!(prepared_statement.get_is_idempotent()); - assert_eq!(prepared_statement.get_page_size(), Some(42)); + assert_eq!(prepared_statement.get_page_size(), 42); } fn udt_type_a_def(ks: &str) -> Arc { From 5dc11b2785cdf4694b114ee1cbfc1b5d5dccad05 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Wojciech=20Przytu=C5=82a?= Date: Mon, 26 Aug 2024 12:45:24 +0200 Subject: [PATCH 05/19] connection: add `_raw` {suf/in}fix to relevant methods Some methods on Connection return QueryResponse instead of QueryResult. To make those methods stand out, they get "_raw" particle put into their names. --- scylla/src/transport/connection.rs | 24 ++++++++++++------------ scylla/src/transport/iterator.rs | 8 ++++---- scylla/src/transport/session.rs | 6 +++--- 3 files changed, 19 insertions(+), 19 deletions(-) diff --git a/scylla/src/transport/connection.rs b/scylla/src/transport/connection.rs index 351de2c1f5..e60ca4e46b 100644 --- a/scylla/src/transport/connection.rs +++ b/scylla/src/transport/connection.rs @@ -821,7 +821,7 @@ impl Connection { serial_consistency: Option, ) -> Result { let query: Query = query.into(); - self.query_with_consistency( + self.query_raw_with_consistency( &query, consistency, serial_consistency, @@ -831,13 +831,13 @@ impl Connection { .into_query_result() } - pub(crate) async fn query( + pub(crate) async fn query_raw( &self, query: &Query, paging_state: PagingState, ) -> Result { // This method is used only for driver internal queries, so no need to consult execution profile here. - self.query_with_consistency( + self.query_raw_with_consistency( query, query .config @@ -848,7 +848,7 @@ impl Connection { .await } - pub(crate) async fn query_with_consistency( + pub(crate) async fn query_raw_with_consistency( &self, query: &Query, consistency: Consistency, @@ -873,14 +873,14 @@ impl Connection { } #[allow(dead_code)] - pub(crate) async fn execute( + pub(crate) async fn execute_raw( &self, prepared: PreparedStatement, values: SerializedValues, paging_state: PagingState, ) -> Result { // This method is used only for driver internal queries, so no need to consult execution profile here. - self.execute_with_consistency( + self.execute_raw_with_consistency( &prepared, &values, prepared @@ -892,7 +892,7 @@ impl Connection { .await } - pub(crate) async fn execute_with_consistency( + pub(crate) async fn execute_raw_with_consistency( &self, prepared_statement: &PreparedStatement, values: &SerializedValues, @@ -1142,7 +1142,7 @@ impl Connection { false => format!("USE {}", keyspace_name.as_str()).into(), }; - let query_response = self.query(&query, PagingState::start()).await?; + let query_response = self.query_raw(&query, PagingState::start()).await?; match query_response.response { Response::Result(result::Result::SetKeyspace(set_keyspace)) => { @@ -2245,7 +2245,7 @@ mod tests { let values = prepared_clone.serialize_values(&(*v,)).unwrap(); let fut = async { connection - .execute(prepared_clone, values, PagingState::start()) + .execute_raw(prepared_clone, values, PagingState::start()) .await }; insert_futures.push(fut); @@ -2331,7 +2331,7 @@ mod tests { .unwrap(); connection - .query(&"TRUNCATE t".into(), PagingState::start()) + .query_raw(&"TRUNCATE t".into(), PagingState::start()) .await .unwrap(); @@ -2353,7 +2353,7 @@ mod tests { .serialize_values(&(j, vec![j as u8; j as usize])) .unwrap(); let response = conn - .execute(prepared.clone(), values, PagingState::start()) + .execute_raw(prepared.clone(), values, PagingState::start()) .await .unwrap(); // QueryResponse might contain an error - make sure that there were no errors @@ -2372,7 +2372,7 @@ mod tests { // Check that everything was written properly let range_end = arithmetic_sequence_sum(NUM_BATCHES); let mut results = connection - .query(&"SELECT p, v FROM t".into(), PagingState::start()) + .query_raw(&"SELECT p, v FROM t".into(), PagingState::start()) .await .unwrap() .into_query_result() diff --git a/scylla/src/transport/iterator.rs b/scylla/src/transport/iterator.rs index d2ec751273..d223053576 100644 --- a/scylla/src/transport/iterator.rs +++ b/scylla/src/transport/iterator.rs @@ -143,7 +143,7 @@ impl RowIterator { paging_state: PagingState| { async move { connection - .query_with_consistency( + .query_raw_with_consistency( query_ref, consistency, serial_consistency, @@ -236,7 +236,7 @@ impl RowIterator { consistency: Consistency, paging_state: PagingState| async move { connection - .execute_with_consistency( + .execute_raw_with_consistency( prepared_ref, values_ref, consistency, @@ -310,7 +310,7 @@ impl RowIterator { let worker = SingleConnectionRowIteratorWorker { sender: sender.into(), fetcher: |paging_state| { - connection.query_with_consistency( + connection.query_raw_with_consistency( &query, consistency, serial_consistency, @@ -337,7 +337,7 @@ impl RowIterator { let worker = SingleConnectionRowIteratorWorker { sender: sender.into(), fetcher: |paging_state| { - connection.execute_with_consistency( + connection.execute_raw_with_consistency( &prepared, &values, consistency, diff --git a/scylla/src/transport/session.rs b/scylla/src/transport/session.rs index ac5dddfa32..3978044c25 100644 --- a/scylla/src/transport/session.rs +++ b/scylla/src/transport/session.rs @@ -684,7 +684,7 @@ impl Session { if values_ref.is_empty() { span_ref.record_request_size(0); connection - .query_with_consistency( + .query_raw_with_consistency( query_ref, consistency, serial_consistency, @@ -697,7 +697,7 @@ impl Session { let serialized = prepared.serialize_values(values_ref)?; span_ref.record_request_size(serialized.buffer_size()); connection - .execute_with_consistency( + .execute_raw_with_consistency( &prepared, &serialized, consistency, @@ -1051,7 +1051,7 @@ impl Session { .unwrap_or(execution_profile.serial_consistency); async move { connection - .execute_with_consistency( + .execute_raw_with_consistency( prepared, values_ref, consistency, From e6f0268488952814701e02e2704a8066db8b1817 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Wojciech=20Przytu=C5=82a?= Date: Mon, 26 Aug 2024 15:28:15 +0200 Subject: [PATCH 06/19] connection: execute_raw takes prepared by ref Without a clear reason, Connection::execute_raw would take PreparedStatement by value, involving a clone. The signature was changed to accept a shared reference and its usages are adjusted. --- scylla/src/transport/connection.rs | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/scylla/src/transport/connection.rs b/scylla/src/transport/connection.rs index e60ca4e46b..b212b22390 100644 --- a/scylla/src/transport/connection.rs +++ b/scylla/src/transport/connection.rs @@ -875,13 +875,13 @@ impl Connection { #[allow(dead_code)] pub(crate) async fn execute_raw( &self, - prepared: PreparedStatement, + prepared: &PreparedStatement, values: SerializedValues, paging_state: PagingState, ) -> Result { // This method is used only for driver internal queries, so no need to consult execution profile here. self.execute_raw_with_consistency( - &prepared, + prepared, &values, prepared .config @@ -2236,16 +2236,15 @@ mod tests { // 2. Insert 100 and select using query_iter with page_size 7 let values: Vec = (0..100).collect(); - let mut insert_futures = Vec::new(); let insert_query = Query::new("INSERT INTO connection_query_iter_tab (p) VALUES (?)").with_page_size(7); let prepared = connection.prepare(&insert_query).await.unwrap(); + let mut insert_futures = Vec::new(); for v in &values { - let prepared_clone = prepared.clone(); - let values = prepared_clone.serialize_values(&(*v,)).unwrap(); + let values = prepared.serialize_values(&(*v,)).unwrap(); let fut = async { connection - .execute_raw(prepared_clone, values, PagingState::start()) + .execute_raw(&prepared, values, PagingState::start()) .await }; insert_futures.push(fut); @@ -2353,7 +2352,7 @@ mod tests { .serialize_values(&(j, vec![j as u8; j as usize])) .unwrap(); let response = conn - .execute_raw(prepared.clone(), values, PagingState::start()) + .execute_raw(&prepared, values, PagingState::start()) .await .unwrap(); // QueryResponse might contain an error - make sure that there were no errors From e4152670cbc58975eb4d2e3153b4b71317e55de5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Wojciech=20Przytu=C5=82a?= Date: Mon, 26 Aug 2024 13:24:54 +0200 Subject: [PATCH 07/19] connection: introduce non-raw query() and execute() These methods are analogous to `Session`'s `{query,execute}`. Similarly, they don't accept a non-start PagingState. --- scylla/src/transport/connection.rs | 36 +++++++++++++++++++++--------- 1 file changed, 26 insertions(+), 10 deletions(-) diff --git a/scylla/src/transport/connection.rs b/scylla/src/transport/connection.rs index b212b22390..30afb6102d 100644 --- a/scylla/src/transport/connection.rs +++ b/scylla/src/transport/connection.rs @@ -831,6 +831,16 @@ impl Connection { .into_query_result() } + #[allow(dead_code)] + pub(crate) async fn query(&self, query: impl Into) -> Result { + // This method is used only for driver internal queries, so no need to consult execution profile here. + let query: Query = query.into(); + + self.query_raw(&query, PagingState::start()) + .await + .and_then(QueryResponse::into_query_result) + } + pub(crate) async fn query_raw( &self, query: &Query, @@ -872,6 +882,18 @@ impl Connection { .await } + #[allow(dead_code)] + pub(crate) async fn execute( + &self, + prepared: &PreparedStatement, + values: SerializedValues, + ) -> Result { + // This method is used only for driver internal queries, so no need to consult execution profile here. + self.execute_raw(prepared, values, PagingState::start()) + .await + .and_then(QueryResponse::into_query_result) + } + #[allow(dead_code)] pub(crate) async fn execute_raw( &self, @@ -2148,7 +2170,7 @@ mod tests { use crate::transport::node::ResolvedContactPoint; use crate::transport::topology::UntranslatedEndpoint; use crate::utils::test_utils::unique_keyspace_name; - use crate::{IntoTypedRows, SessionBuilder}; + use crate::SessionBuilder; use futures::{StreamExt, TryStreamExt}; use std::collections::HashMap; use std::net::SocketAddr; @@ -2329,10 +2351,7 @@ mod tests { .await .unwrap(); - connection - .query_raw(&"TRUNCATE t".into(), PagingState::start()) - .await - .unwrap(); + connection.query("TRUNCATE t").await.unwrap(); let mut futs = Vec::new(); @@ -2371,14 +2390,11 @@ mod tests { // Check that everything was written properly let range_end = arithmetic_sequence_sum(NUM_BATCHES); let mut results = connection - .query_raw(&"SELECT p, v FROM t".into(), PagingState::start()) + .query("SELECT p, v FROM t") .await .unwrap() - .into_query_result() - .unwrap() - .rows() + .rows_typed::<(i32, Vec)>() .unwrap() - .into_typed::<(i32, Vec)>() .collect::, _>>() .unwrap(); results.sort(); From c008d76a343e6ec713c4e5360fd21e632aded43d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Wojciech=20Przytu=C5=82a?= Date: Mon, 19 Aug 2024 17:04:07 +0200 Subject: [PATCH 08/19] connection: pass page size through request API --- scylla/src/statement/prepared_statement.rs | 1 - scylla/src/transport/connection.rs | 20 ++++++++++++++++---- scylla/src/transport/iterator.rs | 13 +++++++++++++ scylla/src/transport/session.rs | 7 +++++++ 4 files changed, 36 insertions(+), 5 deletions(-) diff --git a/scylla/src/statement/prepared_statement.rs b/scylla/src/statement/prepared_statement.rs index 74d972fa29..0d00fe6566 100644 --- a/scylla/src/statement/prepared_statement.rs +++ b/scylla/src/statement/prepared_statement.rs @@ -161,7 +161,6 @@ impl PreparedStatement { } /// Returns the page size for this CQL query. - #[allow(dead_code)] pub(crate) fn get_validated_page_size(&self) -> PageSize { self.page_size } diff --git a/scylla/src/transport/connection.rs b/scylla/src/transport/connection.rs index 30afb6102d..d39f36dd0d 100644 --- a/scylla/src/transport/connection.rs +++ b/scylla/src/transport/connection.rs @@ -62,7 +62,7 @@ use crate::frame::{ use crate::query::Query; use crate::routing::ShardInfo; use crate::statement::prepared_statement::PreparedStatement; -use crate::statement::{Consistency, PagingState, PagingStateResponse}; +use crate::statement::{Consistency, PageSize, PagingState, PagingStateResponse}; use crate::transport::Compression; use crate::QueryResult; @@ -821,10 +821,13 @@ impl Connection { serial_consistency: Option, ) -> Result { let query: Query = query.into(); + let page_size = query.get_validated_page_size(); + self.query_raw_with_consistency( &query, consistency, serial_consistency, + Some(page_size), PagingState::start(), ) .await? @@ -847,12 +850,15 @@ impl Connection { paging_state: PagingState, ) -> Result { // This method is used only for driver internal queries, so no need to consult execution profile here. + let page_size = query.get_validated_page_size(); + self.query_raw_with_consistency( query, query .config .determine_consistency(self.config.default_consistency), query.config.serial_consistency.flatten(), + Some(page_size), paging_state, ) .await @@ -863,6 +869,7 @@ impl Connection { query: &Query, consistency: Consistency, serial_consistency: Option, + page_size: Option, paging_state: PagingState, ) -> Result { let query_frame = query::Query { @@ -871,7 +878,7 @@ impl Connection { consistency, serial_consistency, values: Cow::Borrowed(SerializedValues::EMPTY), - page_size: Some(query.get_page_size().into()), + page_size: page_size.map(Into::into), paging_state, skip_metadata: false, timestamp: query.get_timestamp(), @@ -902,6 +909,8 @@ impl Connection { paging_state: PagingState, ) -> Result { // This method is used only for driver internal queries, so no need to consult execution profile here. + let page_size = prepared.get_validated_page_size(); + self.execute_raw_with_consistency( prepared, &values, @@ -909,6 +918,7 @@ impl Connection { .config .determine_consistency(self.config.default_consistency), prepared.config.serial_consistency.flatten(), + Some(page_size), paging_state, ) .await @@ -920,6 +930,7 @@ impl Connection { values: &SerializedValues, consistency: Consistency, serial_consistency: Option, + page_size: Option, paging_state: PagingState, ) -> Result { let execute_frame = execute::Execute { @@ -928,7 +939,7 @@ impl Connection { consistency, serial_consistency, values: Cow::Borrowed(values), - page_size: Some(prepared_statement.get_page_size().into()), + page_size: page_size.map(Into::into), timestamp: prepared_statement.get_timestamp(), skip_metadata: prepared_statement.get_use_cached_result_metadata(), paging_state, @@ -1207,7 +1218,7 @@ impl Connection { } pub(crate) async fn fetch_schema_version(&self) -> Result { - let (version_id,): (Uuid,) = self + let (version_id,) = self .query_single_page(LOCAL_VERSION) .await? .single_row_typed() @@ -2499,6 +2510,7 @@ mod tests { #[cfg(not(scylla_cloud_tests))] async fn connection_is_closed_on_no_response_to_keepalives() { setup_tracing(); + let proxy_addr = SocketAddr::new(scylla_proxy::get_exclusive_local_address(), 9042); let uri = std::env::var("SCYLLA_URI").unwrap_or_else(|_| "127.0.0.1:9042".to_string()); let node_addr: SocketAddr = resolve_hostname(&uri).await; diff --git a/scylla/src/transport/iterator.rs b/scylla/src/transport/iterator.rs index d223053576..cb5c26ca87 100644 --- a/scylla/src/transport/iterator.rs +++ b/scylla/src/transport/iterator.rs @@ -122,6 +122,8 @@ impl RowIterator { .serial_consistency .unwrap_or(execution_profile.serial_consistency); + let page_size = query.get_validated_page_size(); + let routing_info = RoutingInfo { consistency, serial_consistency, @@ -147,6 +149,7 @@ impl RowIterator { query_ref, consistency, serial_consistency, + Some(page_size), paging_state, ) .await @@ -199,6 +202,9 @@ impl RowIterator { .config .serial_consistency .unwrap_or(config.execution_profile.serial_consistency); + + let page_size = config.prepared.get_validated_page_size(); + let retry_session = config .prepared .get_retry_policy() @@ -241,6 +247,7 @@ impl RowIterator { values_ref, consistency, serial_consistency, + Some(page_size), paging_state, ) .await @@ -306,6 +313,8 @@ impl RowIterator { ) -> Result { let (sender, receiver) = mpsc::channel::>(1); + let page_size = query.get_validated_page_size(); + let worker_task = async move { let worker = SingleConnectionRowIteratorWorker { sender: sender.into(), @@ -314,6 +323,7 @@ impl RowIterator { &query, consistency, serial_consistency, + Some(page_size), paging_state, ) }, @@ -333,6 +343,8 @@ impl RowIterator { ) -> Result { let (sender, receiver) = mpsc::channel::>(1); + let page_size = prepared.get_validated_page_size(); + let worker_task = async move { let worker = SingleConnectionRowIteratorWorker { sender: sender.into(), @@ -342,6 +354,7 @@ impl RowIterator { &values, consistency, serial_consistency, + Some(page_size), paging_state, ) }, diff --git a/scylla/src/transport/session.rs b/scylla/src/transport/session.rs index 3978044c25..957a4ee925 100644 --- a/scylla/src/transport/session.rs +++ b/scylla/src/transport/session.rs @@ -662,6 +662,8 @@ impl Session { ..Default::default() }; + let page_size = Some(query.get_validated_page_size()); + let span = RequestSpan::new_query(&query.contents); let span_ref = &span; let run_query_result = self @@ -688,6 +690,7 @@ impl Session { query_ref, consistency, serial_consistency, + page_size, paging_state_ref.clone(), ) .await @@ -702,6 +705,7 @@ impl Session { &serialized, consistency, serial_consistency, + page_size, paging_state_ref.clone(), ) .await @@ -1021,6 +1025,8 @@ impl Session { is_confirmed_lwt: prepared.is_confirmed_lwt(), }; + let page_size = Some(prepared.get_validated_page_size()); + let span = RequestSpan::new_prepared( partition_key.as_ref().map(|pk| pk.iter()), token, @@ -1056,6 +1062,7 @@ impl Session { values_ref, consistency, serial_consistency, + page_size, paging_state_ref.clone(), ) .await From 1eaf42cd6280e6e8a3aee47b661037563b4bf23b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Wojciech=20Przytu=C5=82a?= Date: Mon, 19 Aug 2024 17:23:02 +0200 Subject: [PATCH 09/19] session: make {query,execute} methods unpaged The logic of {query,execute} is extracted to {query_inner,execute_inner} pub(crate) methods, respectively. {query,execute}_paged are unchanged. {query,execute} now are unpaged unconditionally, i.e. they ignore the page size set on a statement and pass None to the connection layer. In the next commit, both {query,execute} are appended the `_unpaged` suffix to explicitly state that they perform unpaged queries. --- scylla/src/transport/session.rs | 65 ++++++++++++++++++++++++++------- 1 file changed, 51 insertions(+), 14 deletions(-) diff --git a/scylla/src/transport/session.rs b/scylla/src/transport/session.rs index 957a4ee925..d25f12a31f 100644 --- a/scylla/src/transport/session.rs +++ b/scylla/src/transport/session.rs @@ -17,7 +17,7 @@ pub use scylla_cql::errors::TranslationError; use scylla_cql::frame::response::result::{deser_cql_value, ColumnSpec, Rows}; use scylla_cql::frame::response::NonErrorResponse; use scylla_cql::types::serialize::batch::BatchValues; -use scylla_cql::types::serialize::row::SerializeRow; +use scylla_cql::types::serialize::row::{SerializeRow, SerializedValues}; use std::borrow::Borrow; use std::collections::HashMap; use std::fmt::Display; @@ -52,7 +52,7 @@ use crate::frame::response::result; use crate::prepared_statement::PreparedStatement; use crate::query::Query; use crate::routing::{Shard, Token}; -use crate::statement::{Consistency, PagingState}; +use crate::statement::{Consistency, PageSize, PagingState}; use crate::tracing::{TracingEvent, TracingInfo}; use crate::transport::cluster::{Cluster, ClusterData, ClusterNeatDebug}; use crate::transport::connection::{Connection, ConnectionConfig, VerifiedKeyspaceName}; @@ -623,7 +623,9 @@ impl Session { query: impl Into, values: impl SerializeRow, ) -> Result { - self.query_paged(query, values, PagingState::start()).await + let query = query.into(); + self.query_inner(&query, values, None, PagingState::start()) + .await } /// Queries the database with a custom paging state. @@ -643,8 +645,34 @@ impl Session { values: impl SerializeRow, paging_state: PagingState, ) -> Result { - let query: Query = query.into(); + let query = query.into(); + self.query_inner( + &query, + values, + Some(query.get_validated_page_size()), + paging_state, + ) + .await + } + /// Queries the database with a custom paging state. + /// + /// It is discouraged to use this method with non-empty values argument (`is_empty()` method from `SerializeRow` + /// trait returns false). In such case, query first needs to be prepared (on a single connection), so + /// driver will perform 2 round trips instead of 1. Please use [`Session::execute_paged()`] instead. + /// + /// # Arguments + /// + /// * `query` - query to be performed + /// * `values` - values bound to the query + /// * `paging_state` - previously received paging state or [PagingState::start()] + async fn query_inner( + &self, + query: &Query, + values: impl SerializeRow, + page_size: Option, + paging_state: PagingState, + ) -> Result { let execution_profile = query .get_execution_profile_handle() .unwrap_or_else(|| self.get_default_execution_profile_handle()) @@ -662,8 +690,6 @@ impl Session { ..Default::default() }; - let page_size = Some(query.get_validated_page_size()); - let span = RequestSpan::new_query(&query.contents); let span_ref = &span; let run_query_result = self @@ -980,7 +1006,20 @@ impl Session { prepared: &PreparedStatement, values: impl SerializeRow, ) -> Result { - self.execute_paged(prepared, values, PagingState::start()) + let serialized_values = prepared.serialize_values(&values)?; + self.execute_inner(prepared, &serialized_values, None, PagingState::start()) + .await + } + + pub async fn execute_paged( + &self, + prepared: &PreparedStatement, + values: impl SerializeRow, + paging_state: PagingState, + ) -> Result { + let serialized_values = prepared.serialize_values(&values)?; + let page_size = prepared.get_validated_page_size(); + self.execute_inner(prepared, &serialized_values, Some(page_size), paging_state) .await } @@ -988,15 +1027,15 @@ impl Session { /// # Arguments /// /// * `prepared` - a statement prepared with [prepare](crate::transport::session::Session::prepare) - /// * `values` - values bound to the query - /// * `paging_state` - paging state from the previous query or [PagingState::start()] - pub async fn execute_paged( + /// * `values` - values bound to the statement + /// * `paging_state` - paging state from the previous execution or [PagingState::start()] + async fn execute_inner( &self, prepared: &PreparedStatement, - values: impl SerializeRow, + serialized_values: &SerializedValues, + page_size: Option, paging_state: PagingState, ) -> Result { - let serialized_values = prepared.serialize_values(&values)?; let values_ref = &serialized_values; let paging_state_ref = &paging_state; @@ -1025,8 +1064,6 @@ impl Session { is_confirmed_lwt: prepared.is_confirmed_lwt(), }; - let page_size = Some(prepared.get_validated_page_size()); - let span = RequestSpan::new_prepared( partition_key.as_ref().map(|pk| pk.iter()), token, From bfa853cdee9a4b391dfe884a35bd8499d0119a54 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Wojciech=20Przytu=C5=82a?= Date: Mon, 19 Aug 2024 15:14:06 +0200 Subject: [PATCH 10/19] session: add "_unpaged" suffix to {query,execute} In order to make it explicit that the requests using those methods are performed without paging, their names are adjusted. --- README.md | 2 +- docs/source/data-types/blob.md | 4 +- docs/source/data-types/collections.md | 24 +- docs/source/data-types/counter.md | 2 +- docs/source/data-types/date.md | 12 +- docs/source/data-types/decimal.md | 8 +- docs/source/data-types/duration.md | 4 +- docs/source/data-types/inet.md | 4 +- docs/source/data-types/primitive.md | 28 +- docs/source/data-types/text.md | 6 +- docs/source/data-types/time.md | 12 +- docs/source/data-types/timestamp.md | 12 +- docs/source/data-types/timeuuid.md | 8 +- docs/source/data-types/tuple.md | 4 +- docs/source/data-types/udt.md | 4 +- docs/source/data-types/uuid.md | 4 +- docs/source/data-types/varint.md | 4 +- docs/source/execution-profiles/priority.md | 6 +- docs/source/logging/logging.md | 8 +- docs/source/queries/lwt.md | 2 +- docs/source/queries/prepared.md | 8 +- docs/source/queries/result.md | 18 +- docs/source/queries/simple.md | 8 +- docs/source/queries/timeouts.md | 4 +- docs/source/queries/usekeyspace.md | 6 +- docs/source/queries/values.md | 26 +- docs/source/quickstart/example.md | 8 +- docs/source/retry-policy/default.md | 4 +- .../retry-policy/downgrading-consistency.md | 4 +- docs/source/retry-policy/fallthrough.md | 4 +- docs/source/tracing/basic.md | 4 +- docs/source/tracing/query-history.md | 2 +- docs/source/tracing/tracing.md | 4 +- examples/allocations.rs | 6 +- examples/auth.rs | 4 +- examples/basic.rs | 20 +- examples/cloud.rs | 4 +- examples/compare-tokens.rs | 8 +- examples/cql-time-types.rs | 44 +- examples/cqlsh-rs.rs | 2 +- examples/custom_deserialization.rs | 10 +- examples/execution_profile.rs | 18 +- examples/get_by_name.rs | 10 +- examples/logging.rs | 4 +- examples/logging_log.rs | 4 +- examples/parallel-prepared.rs | 6 +- examples/parallel.rs | 6 +- examples/query_history.rs | 10 +- examples/schema_agreement.rs | 16 +- examples/select-paging.rs | 6 +- examples/speculative-execution.rs | 6 +- examples/tls.rs | 16 +- examples/tower.rs | 2 +- examples/tracing.rs | 8 +- examples/user-defined-type.rs | 10 +- examples/value_list.rs | 10 +- scylla-cql/src/frame/request/query.rs | 1 + scylla/src/history.rs | 12 +- scylla/src/transport/authenticate_test.rs | 14 +- scylla/src/transport/caching_session.rs | 30 +- scylla/src/transport/connection.rs | 10 +- scylla/src/transport/cql_collections_test.rs | 8 +- scylla/src/transport/cql_types_test.rs | 197 ++++----- scylla/src/transport/cql_value_test.rs | 16 +- .../transport/large_batch_statements_test.rs | 4 +- .../src/transport/load_balancing/default.rs | 2 +- scylla/src/transport/session.rs | 8 +- scylla/src/transport/session_test.rs | 398 ++++++++++-------- .../transport/silent_prepare_batch_test.rs | 6 +- scylla/src/utils/test_utils.rs | 4 +- scylla/tests/integration/consistency.rs | 18 +- .../tests/integration/execution_profiles.rs | 28 +- scylla/tests/integration/lwt_optimisation.rs | 8 +- scylla/tests/integration/new_session.rs | 2 +- scylla/tests/integration/retries.rs | 24 +- scylla/tests/integration/shards.rs | 6 +- .../tests/integration/silent_prepare_query.rs | 12 +- .../integration/skip_metadata_optimization.rs | 16 +- scylla/tests/integration/tablets.rs | 28 +- 79 files changed, 707 insertions(+), 633 deletions(-) diff --git a/README.md b/README.md index 02364982b4..e215a80319 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ let uri = "127.0.0.1:9042"; let session: Session = SessionBuilder::new().known_node(uri).build().await?; -let result = session.query("SELECT a, b, c FROM ks.t", &[]).await?; +let result = session.query_unpaged("SELECT a, b, c FROM ks.t", &[]).await?; let mut iter = result.rows_typed::<(i32, i32, String)>()?; while let Some((a, b, c)) = iter.next().transpose()? { println!("a, b, c: {}, {}, {}", a, b, c); diff --git a/docs/source/data-types/blob.md b/docs/source/data-types/blob.md index 83ef1306e8..db4aaa1d99 100644 --- a/docs/source/data-types/blob.md +++ b/docs/source/data-types/blob.md @@ -13,11 +13,11 @@ use scylla::IntoTypedRows; // We can insert it by reference to not move the whole blob let to_insert: Vec = vec![1, 2, 3, 4, 5]; session - .query("INSERT INTO keyspace.table (a) VALUES(?)", (&to_insert,)) + .query_unpaged("INSERT INTO keyspace.table (a) VALUES(?)", (&to_insert,)) .await?; // Read blobs from the table -let result = session.query("SELECT a FROM keyspace.table", &[]).await?; +let result = session.query_unpaged("SELECT a FROM keyspace.table", &[]).await?; let mut iter = result.rows_typed::<(Vec,)>()?; while let Some((blob_value,)) = iter.next().transpose()? { println!("{:?}", blob_value); diff --git a/docs/source/data-types/collections.md b/docs/source/data-types/collections.md index 5a1570ad3d..ac06fd566b 100644 --- a/docs/source/data-types/collections.md +++ b/docs/source/data-types/collections.md @@ -13,11 +13,11 @@ use scylla::IntoTypedRows; // Insert a list of ints into the table let my_list: Vec = vec![1, 2, 3, 4, 5]; session - .query("INSERT INTO keyspace.table (a) VALUES(?)", (&my_list,)) + .query_unpaged("INSERT INTO keyspace.table (a) VALUES(?)", (&my_list,)) .await?; // Read a list of ints from the table -let result = session.query("SELECT a FROM keyspace.table", &[]).await?; +let result = session.query_unpaged("SELECT a FROM keyspace.table", &[]).await?; let mut iter = result.rows_typed::<(Vec,)>()?; while let Some((list_value,)) = iter.next().transpose()? { println!("{:?}", list_value); @@ -39,11 +39,11 @@ use scylla::IntoTypedRows; // Insert a set of ints into the table let my_set: Vec = vec![1, 2, 3, 4, 5]; session - .query("INSERT INTO keyspace.table (a) VALUES(?)", (&my_set,)) + .query_unpaged("INSERT INTO keyspace.table (a) VALUES(?)", (&my_set,)) .await?; // Read a set of ints from the table -let result = session.query("SELECT a FROM keyspace.table", &[]).await?; +let result = session.query_unpaged("SELECT a FROM keyspace.table", &[]).await?; let mut iter = result.rows_typed::<(Vec,)>()?; while let Some((list_value,)) = iter.next().transpose()? { println!("{:?}", list_value); @@ -63,11 +63,11 @@ use std::collections::HashSet; // Insert a set of ints into the table let my_set: HashSet = vec![1, 2, 3, 4, 5].into_iter().collect(); session - .query("INSERT INTO keyspace.table (a) VALUES(?)", (&my_set,)) + .query_unpaged("INSERT INTO keyspace.table (a) VALUES(?)", (&my_set,)) .await?; // Read a set of ints from the table -let result = session.query("SELECT a FROM keyspace.table", &[]).await?; +let result = session.query_unpaged("SELECT a FROM keyspace.table", &[]).await?; let mut iter = result.rows_typed::<(HashSet,)>()?; while let Some((list_value,)) = iter.next().transpose()? { println!("{:?}", list_value); @@ -87,11 +87,11 @@ use std::collections::BTreeSet; // Insert a set of ints into the table let my_set: BTreeSet = vec![1, 2, 3, 4, 5].into_iter().collect(); session - .query("INSERT INTO keyspace.table (a) VALUES(?)", (&my_set,)) + .query_unpaged("INSERT INTO keyspace.table (a) VALUES(?)", (&my_set,)) .await?; // Read a set of ints from the table -let result = session.query("SELECT a FROM keyspace.table", &[]).await?; +let result = session.query_unpaged("SELECT a FROM keyspace.table", &[]).await?; let mut iter = result.rows_typed::<(BTreeSet,)>()?; while let Some((list_value,)) = iter.next().transpose()? { println!("{:?}", list_value); @@ -116,11 +116,11 @@ let mut my_map: HashMap = HashMap::new(); my_map.insert("abcd".to_string(), 16); session - .query("INSERT INTO keyspace.table (a) VALUES(?)", (&my_map,)) + .query_unpaged("INSERT INTO keyspace.table (a) VALUES(?)", (&my_map,)) .await?; // Read a map from the table -let result = session.query("SELECT a FROM keyspace.table", &[]).await?; +let result = session.query_unpaged("SELECT a FROM keyspace.table", &[]).await?; let mut iter = result.rows_typed::<(HashMap,)>()?; while let Some((map_value,)) = iter.next().transpose()? { println!("{:?}", map_value); @@ -142,11 +142,11 @@ let mut my_map: BTreeMap = BTreeMap::new(); my_map.insert("abcd".to_string(), 16); session - .query("INSERT INTO keyspace.table (a) VALUES(?)", (&my_map,)) + .query_unpaged("INSERT INTO keyspace.table (a) VALUES(?)", (&my_map,)) .await?; // Read a map from the table -let result = session.query("SELECT a FROM keyspace.table", &[]).await?; +let result = session.query_unpaged("SELECT a FROM keyspace.table", &[]).await?; let mut iter = result.rows_typed::<(BTreeMap,)>()?; while let Some((map_value,)) = iter.next().transpose()? { println!("{:?}", map_value); diff --git a/docs/source/data-types/counter.md b/docs/source/data-types/counter.md index 37eb46439f..4d45b4422f 100644 --- a/docs/source/data-types/counter.md +++ b/docs/source/data-types/counter.md @@ -11,7 +11,7 @@ use scylla::IntoTypedRows; use scylla::frame::value::Counter; // Read counter from the table -let result = session.query("SELECT c FROM keyspace.table", &[]).await?; +let result = session.query_unpaged("SELECT c FROM keyspace.table", &[]).await?; let mut iter = result.rows_typed::<(Counter,)>()?; while let Some((counter_value,)) = iter.next().transpose()? { let counter_int_value: i64 = counter_value.0; diff --git a/docs/source/data-types/date.md b/docs/source/data-types/date.md index 202665295a..d1af135125 100644 --- a/docs/source/data-types/date.md +++ b/docs/source/data-types/date.md @@ -25,12 +25,12 @@ let to_insert = CqlDate((1 << 31) + 7); // Insert date into the table session - .query("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) + .query_unpaged("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) .await?; // Read raw Date from the table if let Some(rows) = session - .query("SELECT a FROM keyspace.table", &[]) + .query_unpaged("SELECT a FROM keyspace.table", &[]) .await? .rows { @@ -63,11 +63,11 @@ let to_insert = NaiveDate::from_ymd_opt(2021, 3, 24).unwrap(); // Insert date into the table session - .query("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) + .query_unpaged("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) .await?; // Read NaiveDate from the table -let result = session.query("SELECT a FROM keyspace.table", &[]).await?; +let result = session.query_unpaged("SELECT a FROM keyspace.table", &[]).await?; let mut iter = result.rows_typed::<(NaiveDate,)>()?; while let Some((date_value,)) = iter.next().transpose()? { println!("{:?}", date_value); @@ -97,11 +97,11 @@ let to_insert = Date::from_calendar_date(2021, Month::March, 24).unwrap(); // Insert date into the table session - .query("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) + .query_unpaged("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) .await?; // Read Date from the table -let result = session.query("SELECT a FROM keyspace.table", &[]).await?; +let result = session.query_unpaged("SELECT a FROM keyspace.table", &[]).await?; let mut iter = result.rows_typed::<(Date,)>()?; while let Some((date_value,)) = iter.next().transpose()? { println!("{:?}", date_value); diff --git a/docs/source/data-types/decimal.md b/docs/source/data-types/decimal.md index 6eb3776f69..1590fab626 100644 --- a/docs/source/data-types/decimal.md +++ b/docs/source/data-types/decimal.md @@ -18,11 +18,11 @@ use std::str::FromStr; let to_insert: CqlDecimal = CqlDecimal::from_signed_be_bytes_and_exponent(vec![0x01, 0xE2, 0x40], 3); session - .query("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) + .query_unpaged("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) .await?; // Read a decimal from the table -if let Some(rows) = session.query("SELECT a FROM keyspace.table", &[]).await?.rows { +if let Some(rows) = session.query_unpaged("SELECT a FROM keyspace.table", &[]).await?.rows { for row in rows.into_typed::<(CqlDecimal,)>() { let (decimal_value,): (CqlDecimal,) = row?; } @@ -48,11 +48,11 @@ use std::str::FromStr; // Insert a decimal into the table let to_insert: BigDecimal = BigDecimal::from_str("12345.0")?; session - .query("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) + .query_unpaged("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) .await?; // Read a decimal from the table -let result = session.query("SELECT a FROM keyspace.table", &[]).await?; +let result = session.query_unpaged("SELECT a FROM keyspace.table", &[]).await?; let mut iter = result.rows_typed::<(BigDecimal,)>()?; while let Some((decimal_value,)) = iter.next().transpose()? { println!("{:?}", decimal_value); diff --git a/docs/source/data-types/duration.md b/docs/source/data-types/duration.md index 79f9f47080..bd4faa1823 100644 --- a/docs/source/data-types/duration.md +++ b/docs/source/data-types/duration.md @@ -12,11 +12,11 @@ use scylla::frame::value::CqlDuration; // Insert some duration into the table let to_insert: CqlDuration = CqlDuration { months: 1, days: 2, nanoseconds: 3 }; session - .query("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) + .query_unpaged("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) .await?; // Read duration from the table -let result = session.query("SELECT a FROM keyspace.table", &[]).await?; +let result = session.query_unpaged("SELECT a FROM keyspace.table", &[]).await?; let mut iter = result.rows_typed::<(CqlDuration,)>()?; while let Some((duration_value,)) = iter.next().transpose()? { println!("{:?}", duration_value); diff --git a/docs/source/data-types/inet.md b/docs/source/data-types/inet.md index c7c9f26ee9..15eb408c58 100644 --- a/docs/source/data-types/inet.md +++ b/docs/source/data-types/inet.md @@ -12,11 +12,11 @@ use std::net::{IpAddr, Ipv4Addr}; // Insert some ip address into the table let to_insert: IpAddr = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1));; session - .query("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) + .query_unpaged("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) .await?; // Read inet from the table -let result = session.query("SELECT a FROM keyspace.table", &[]).await?; +let result = session.query_unpaged("SELECT a FROM keyspace.table", &[]).await?; let mut iter = result.rows_typed::<(IpAddr,)>()?; while let Some((inet_value,)) = iter.next().transpose()? { println!("{:?}", inet_value); diff --git a/docs/source/data-types/primitive.md b/docs/source/data-types/primitive.md index 2bc69ce6f0..74fbcf122d 100644 --- a/docs/source/data-types/primitive.md +++ b/docs/source/data-types/primitive.md @@ -14,11 +14,11 @@ use scylla::IntoTypedRows; // Insert a bool into the table let to_insert: bool = true; session - .query("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) + .query_unpaged("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) .await?; // Read a bool from the table -let result = session.query("SELECT a FROM keyspace.table", &[]).await?; +let result = session.query_unpaged("SELECT a FROM keyspace.table", &[]).await?; let mut iter = result.rows_typed::<(bool,)>()?; while let Some((bool_value,)) = iter.next().transpose()? { println!("{}", bool_value); @@ -41,11 +41,11 @@ use scylla::IntoTypedRows; // Insert a tinyint into the table let to_insert: i8 = 123; session - .query("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) + .query_unpaged("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) .await?; // Read a tinyint from the table -let result = session.query("SELECT a FROM keyspace.table", &[]).await?; +let result = session.query_unpaged("SELECT a FROM keyspace.table", &[]).await?; let mut iter = result.rows_typed::<(i8,)>()?; while let Some((tinyint_value,)) = iter.next().transpose()? { println!("{:?}", tinyint_value); @@ -68,11 +68,11 @@ use scylla::IntoTypedRows; // Insert a smallint into the table let to_insert: i16 = 12345; session - .query("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) + .query_unpaged("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) .await?; // Read a smallint from the table -let result = session.query("SELECT a FROM keyspace.table", &[]).await?; +let result = session.query_unpaged("SELECT a FROM keyspace.table", &[]).await?; let mut iter = result.rows_typed::<(i16,)>()?; while let Some((smallint_value,)) = iter.next().transpose()? { println!("{}", smallint_value); @@ -95,11 +95,11 @@ use scylla::IntoTypedRows; // Insert an int into the table let to_insert: i32 = 12345; session - .query("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) + .query_unpaged("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) .await?; // Read an int from the table -let result = session.query("SELECT a FROM keyspace.table", &[]).await?; +let result = session.query_unpaged("SELECT a FROM keyspace.table", &[]).await?; let mut iter = result.rows_typed::<(i32,)>()?; while let Some((int_value,)) = iter.next().transpose()? { println!("{}", int_value); @@ -122,11 +122,11 @@ use scylla::IntoTypedRows; // Insert a bigint into the table let to_insert: i64 = 12345; session - .query("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) + .query_unpaged("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) .await?; // Read a bigint from the table -let result = session.query("SELECT a FROM keyspace.table", &[]).await?; +let result = session.query_unpaged("SELECT a FROM keyspace.table", &[]).await?; let mut iter = result.rows_typed::<(i64,)>()?; while let Some((bigint_value,)) = iter.next().transpose()? { println!("{:?}", bigint_value); @@ -149,11 +149,11 @@ use scylla::IntoTypedRows; // Insert a float into the table let to_insert: f32 = 123.0; session - .query("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) + .query_unpaged("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) .await?; // Read a float from the table -let result = session.query("SELECT a FROM keyspace.table", &[]).await?; +let result = session.query_unpaged("SELECT a FROM keyspace.table", &[]).await?; let mut iter = result.rows_typed::<(f32,)>()?; while let Some((float_value,)) = iter.next().transpose()? { println!("{:?}", float_value); @@ -176,11 +176,11 @@ use scylla::IntoTypedRows; // Insert a double into the table let to_insert: f64 = 12345.0; session - .query("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) + .query_unpaged("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) .await?; // Read a double from the table -let result = session.query("SELECT a FROM keyspace.table", &[]).await?; +let result = session.query_unpaged("SELECT a FROM keyspace.table", &[]).await?; let mut iter = result.rows_typed::<(f64,)>()?; while let Some((double_value,)) = iter.next().transpose()? { println!("{:?}", double_value); diff --git a/docs/source/data-types/text.md b/docs/source/data-types/text.md index 6d8fbf2b37..20741f5825 100644 --- a/docs/source/data-types/text.md +++ b/docs/source/data-types/text.md @@ -11,17 +11,17 @@ use scylla::IntoTypedRows; // Insert some text into the table as a &str let to_insert_str: &str = "abcdef"; session - .query("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert_str,)) + .query_unpaged("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert_str,)) .await?; // Insert some text into the table as a String let to_insert_string: String = "abcdef".to_string(); session - .query("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert_string,)) + .query_unpaged("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert_string,)) .await?; // Read ascii/text/varchar from the table -let result = session.query("SELECT a FROM keyspace.table", &[]).await?; +let result = session.query_unpaged("SELECT a FROM keyspace.table", &[]).await?; let mut iter = result.rows_typed::<(String,)>()?; while let Some((text_value,)) = iter.next().transpose()? { println!("{}", text_value); diff --git a/docs/source/data-types/time.md b/docs/source/data-types/time.md index 588a1f6c2e..e85d9cb0cd 100644 --- a/docs/source/data-types/time.md +++ b/docs/source/data-types/time.md @@ -25,12 +25,12 @@ let to_insert = CqlTime(64 * 1_000_000_000); // Insert time into the table session - .query("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) + .query_unpaged("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) .await?; // Read time from the table if let Some(rows) = session - .query("SELECT a FROM keyspace.table", &[]) + .query_unpaged("SELECT a FROM keyspace.table", &[]) .await? .rows { @@ -63,11 +63,11 @@ let to_insert = NaiveTime::from_hms_nano_opt(1, 2, 3, 456_789_012); // Insert time into the table session - .query("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) + .query_unpaged("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) .await?; // Read time from the table -let result = session.query("SELECT a FROM keyspace.table", &[]).await?; +let result = session.query_unpaged("SELECT a FROM keyspace.table", &[]).await?; let mut iter = result.rows_typed::<(NaiveTime,)>()?; while let Some((time_value,)) = iter.next().transpose()? { println!("{:?}", time_value); @@ -95,11 +95,11 @@ let to_insert = Time::from_hms_nano(1, 2, 3, 456_789_012).unwrap(); // Insert time into the table session - .query("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) + .query_unpaged("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) .await?; // Read time from the table -let result = session.query("SELECT a FROM keyspace.table", &[]).await?; +let result = session.query_unpaged("SELECT a FROM keyspace.table", &[]).await?; let mut iter = result.rows_typed::<(Time,)>()?; while let Some((time_value,)) = iter.next().transpose()? { println!("{:?}", time_value); diff --git a/docs/source/data-types/timestamp.md b/docs/source/data-types/timestamp.md index 9a9e3e4754..3b44a59031 100644 --- a/docs/source/data-types/timestamp.md +++ b/docs/source/data-types/timestamp.md @@ -26,12 +26,12 @@ let to_insert = CqlTimestamp(64 * 1000); // Write timestamp to the table session - .query("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) + .query_unpaged("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) .await?; // Read timestamp from the table if let Some(rows) = session - .query("SELECT a FROM keyspace.table", &[]) + .query_unpaged("SELECT a FROM keyspace.table", &[]) .await? .rows { @@ -68,11 +68,11 @@ let to_insert = NaiveDateTime::new( // Write timestamp to the table session - .query("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) + .query_unpaged("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) .await?; // Read timestamp from the table -let result = session.query("SELECT a FROM keyspace.table", &[]).await?; +let result = session.query_unpaged("SELECT a FROM keyspace.table", &[]).await?; let mut iter = result.rows_typed::<(DateTime,)>()?; while let Some((timestamp_value,)) = iter.next().transpose()? { println!("{:?}", timestamp_value); @@ -107,11 +107,11 @@ let to_insert = PrimitiveDateTime::new( // Write timestamp to the table session - .query("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) + .query_unpaged("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) .await?; // Read timestamp from the table -let result = session.query("SELECT a FROM keyspace.table", &[]).await?; +let result = session.query_unpaged("SELECT a FROM keyspace.table", &[]).await?; let mut iter = result.rows_typed::<(OffsetDateTime,)>()?; while let Some((timestamp_value,)) = iter.next().transpose()? { println!("{:?}", timestamp_value); diff --git a/docs/source/data-types/timeuuid.md b/docs/source/data-types/timeuuid.md index 0f0349648f..612dbb7f35 100644 --- a/docs/source/data-types/timeuuid.md +++ b/docs/source/data-types/timeuuid.md @@ -17,11 +17,11 @@ use scylla::frame::value::CqlTimeuuid; let to_insert: CqlTimeuuid = CqlTimeuuid::from_str("8e14e760-7fa8-11eb-bc66-000000000001")?; session - .query("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) + .query_unpaged("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) .await?; // Read Timeuuid from the table -let result = session.query("SELECT a FROM keyspace.table", &[]).await?; +let result = session.query_unpaged("SELECT a FROM keyspace.table", &[]).await?; let mut iter = result.rows_typed::<(CqlTimeuuid, )>()?; @@ -60,11 +60,11 @@ let node_id = [0x12, 0x34, 0x56, 0x78, 0x9A, 0xBC]; let to_insert = CqlTimeuuid::from(Uuid::now_v1(&node_id)); session - .query("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) + .query_unpaged("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) .await?; // Read Timeuuid from the table -let result = session.query("SELECT a FROM keyspace.table", &[]).await?; +let result = session.query_unpaged("SELECT a FROM keyspace.table", &[]).await?; let mut iter = result.rows_typed::<(CqlTimeuuid, )>()?; diff --git a/docs/source/data-types/tuple.md b/docs/source/data-types/tuple.md index c56c814913..cce4aa7767 100644 --- a/docs/source/data-types/tuple.md +++ b/docs/source/data-types/tuple.md @@ -12,11 +12,11 @@ use scylla::IntoTypedRows; // Insert a tuple of int and string into the table let to_insert: (i32, String) = (1, "abc".to_string()); session - .query("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) + .query_unpaged("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) .await?; // Read a tuple of int and string from the table -let result = session.query("SELECT a FROM keyspace.table", &[]).await?; +let result = session.query_unpaged("SELECT a FROM keyspace.table", &[]).await?; let mut iter = result.rows_typed::<((i32, String),)>()?; while let Some((tuple_value,)) = iter.next().transpose()? { let int_value: i32 = tuple_value.0; diff --git a/docs/source/data-types/udt.md b/docs/source/data-types/udt.md index eadbfe1d48..0be5b39e78 100644 --- a/docs/source/data-types/udt.md +++ b/docs/source/data-types/udt.md @@ -66,11 +66,11 @@ let to_insert = MyType { }; session - .query("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) + .query_unpaged("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) .await?; // Read MyType from the table -let result = session.query("SELECT a FROM keyspace.table", &[]).await?; +let result = session.query_unpaged("SELECT a FROM keyspace.table", &[]).await?; let mut iter = result.rows_typed::<(MyType,)>()?; while let Some((my_type_value,)) = iter.next().transpose()? { println!("{:?}", my_type_value); diff --git a/docs/source/data-types/uuid.md b/docs/source/data-types/uuid.md index 0e63b9ca71..47e68094f7 100644 --- a/docs/source/data-types/uuid.md +++ b/docs/source/data-types/uuid.md @@ -14,11 +14,11 @@ use uuid::Uuid; // Insert some uuid into the table let to_insert: Uuid = Uuid::parse_str("8e14e760-7fa8-11eb-bc66-000000000001")?; session - .query("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) + .query_unpaged("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) .await?; // Read uuid from the table -let result = session.query("SELECT a FROM keyspace.table", &[]).await?; +let result = session.query_unpaged("SELECT a FROM keyspace.table", &[]).await?; let mut iter = result.rows_typed::<(Uuid,)>()?; while let Some((uuid_value,)) = iter.next().transpose()? { println!("{:?}", uuid_value); diff --git a/docs/source/data-types/varint.md b/docs/source/data-types/varint.md index 87b0f7e1d0..dcd9b14615 100644 --- a/docs/source/data-types/varint.md +++ b/docs/source/data-types/varint.md @@ -25,11 +25,11 @@ use std::str::FromStr; // Insert a varint into the table let to_insert: BigInt = BigInt::from_str("12345")?; session - .query("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) + .query_unpaged("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) .await?; // Read a varint from the table -let result = session.query("SELECT a FROM keyspace.table", &[]).await?; +let result = session.query_unpaged("SELECT a FROM keyspace.table", &[]).await?; let mut iter = result.rows_typed::<(BigInt,)>()?; while let Some((varint_value,)) = iter.next().transpose()? { println!("{:?}", varint_value); diff --git a/docs/source/execution-profiles/priority.md b/docs/source/execution-profiles/priority.md index 4ae22d2c86..ccc57b73b0 100644 --- a/docs/source/execution-profiles/priority.md +++ b/docs/source/execution-profiles/priority.md @@ -36,17 +36,17 @@ let mut query = Query::from("SELECT * FROM ks.table"); // Query is not assigned any specific profile, so session's profile is applied. // Therefore, the query will be executed with Consistency::One. -session.query(query.clone(), ()).await?; +session.query_unpaged(query.clone(), ()).await?; query.set_execution_profile_handle(Some(query_profile.into_handle())); // Query's profile is applied. // Therefore, the query will be executed with Consistency::Two. -session.query(query.clone(), ()).await?; +session.query_unpaged(query.clone(), ()).await?; query.set_consistency(Consistency::Three); // An option is set directly on the query. // Therefore, the query will be executed with Consistency::Three. -session.query(query, ()).await?; +session.query_unpaged(query, ()).await?; # Ok(()) # } diff --git a/docs/source/logging/logging.md b/docs/source/logging/logging.md index c99d1b1e0f..1a946d82d1 100644 --- a/docs/source/logging/logging.md +++ b/docs/source/logging/logging.md @@ -32,7 +32,7 @@ async fn main() -> Result<(), Box> { let session: Session = SessionBuilder::new().known_node(uri).build().await?; session - .query( + .query_unpaged( "CREATE KEYSPACE IF NOT EXISTS ks WITH REPLICATION = \ {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[], @@ -40,7 +40,7 @@ async fn main() -> Result<(), Box> { .await?; // This query should generate a warning message - session.query("USE ks", &[]).await?; + session.query_unpaged("USE ks", &[]).await?; Ok(()) } @@ -82,9 +82,9 @@ async fn main() -> Result<(), Box> { info!("Connecting to {}", uri); let session: Session = SessionBuilder::new().known_node(uri).build().await?; - session.query("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; + session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; - session.query("USE examples_ks", &[]).await?; + session.query_unpaged("USE examples_ks", &[]).await?; Ok(()) } diff --git a/docs/source/queries/lwt.md b/docs/source/queries/lwt.md index afe0d9dc5c..89c2831eb0 100644 --- a/docs/source/queries/lwt.md +++ b/docs/source/queries/lwt.md @@ -21,7 +21,7 @@ my_query.set_serial_consistency(Some(SerialConsistency::Serial)); // Insert a value into the table let to_insert: i32 = 12345; -session.query(my_query, (to_insert,)).await?; +session.query_unpaged(my_query, (to_insert,)).await?; # Ok(()) # } ``` diff --git a/docs/source/queries/prepared.md b/docs/source/queries/prepared.md index caddb3c913..d362d00d9f 100644 --- a/docs/source/queries/prepared.md +++ b/docs/source/queries/prepared.md @@ -23,7 +23,7 @@ let prepared: PreparedStatement = session // Run the prepared query with some values, just like a simple query let to_insert: i32 = 12345; -session.execute(&prepared, (to_insert,)).await?; +session.execute_unpaged(&prepared, (to_insert,)).await?; # Ok(()) # } ``` @@ -71,7 +71,7 @@ prepared.set_consistency(Consistency::One); // Run the prepared query with some values, just like a simple query let to_insert: i32 = 12345; -session.execute(&prepared, (to_insert,)).await?; +session.execute_unpaged(&prepared, (to_insert,)).await?; # Ok(()) # } ``` @@ -117,7 +117,7 @@ let wrong_prepared: PreparedStatement = session .prepare("INSERT INTO ks.prepare_table (a, b, c) VALUES(12345, ?, 16)") .await?; -session.execute(&wrong_prepared, (54321,)).await?; +session.execute_unpaged(&wrong_prepared, (54321,)).await?; // GOOD - partition key values are sent as bound values // Other values can be sent any way you like, it doesn't matter @@ -125,7 +125,7 @@ let good_prepared: PreparedStatement = session .prepare("INSERT INTO ks.prepare_table (a, b, c) VALUES(?, ?, 16)") .await?; -session.execute(&good_prepared, (12345, 54321)).await?; +session.execute_unpaged(&good_prepared, (12345, 54321)).await?; # Ok(()) # } diff --git a/docs/source/queries/result.md b/docs/source/queries/result.md index 6350eab9ad..3663e23726 100644 --- a/docs/source/queries/result.md +++ b/docs/source/queries/result.md @@ -9,7 +9,7 @@ # use scylla::Session; # use std::error::Error; # async fn check_only_compiles(session: &Session) -> Result<(), Box> { -if let Some(rows) = session.query("SELECT a from ks.tab", &[]).await?.rows { +if let Some(rows) = session.query_unpaged("SELECT a from ks.tab", &[]).await?.rows { for row in rows { let int_value: i32 = row.columns[0].as_ref().unwrap().as_int().unwrap(); } @@ -28,21 +28,21 @@ The driver provides a way to parse a row as a tuple of Rust types: use scylla::IntoTypedRows; // Parse row as a single column containing an int value -if let Some(rows) = session.query("SELECT a from ks.tab", &[]).await?.rows { +if let Some(rows) = session.query_unpaged("SELECT a from ks.tab", &[]).await?.rows { for row in rows { let (int_value,): (i32,) = row.into_typed::<(i32,)>()?; } } // rows.into_typed() converts a Vec of Rows to an iterator of parsing results -if let Some(rows) = session.query("SELECT a from ks.tab", &[]).await?.rows { +if let Some(rows) = session.query_unpaged("SELECT a from ks.tab", &[]).await?.rows { for row in rows.into_typed::<(i32,)>() { let (int_value,): (i32,) = row?; } } // Parse row as two columns containing an int and text columns -if let Some(rows) = session.query("SELECT a, b from ks.tab", &[]).await?.rows { +if let Some(rows) = session.query_unpaged("SELECT a, b from ks.tab", &[]).await?.rows { for row in rows.into_typed::<(i32, String)>() { let (int_value, text_value): (i32, String) = row?; } @@ -68,7 +68,7 @@ Here are a few of them: # async fn check_only_compiles(session: &Session) -> Result<(), Box> { // Parse row as a single column containing an int value let rows = session - .query("SELECT a from ks.tab", &[]) + .query_unpaged("SELECT a from ks.tab", &[]) .await? .rows_typed::<(i32,)>()?; // Same as .rows()?.into_typed() for row in rows { @@ -77,12 +77,12 @@ for row in rows { // maybe_first_row_typed gets the first row and parses it as the given type let first_int_val: Option<(i32,)> = session - .query("SELECT a from ks.tab", &[]) + .query_unpaged("SELECT a from ks.tab", &[]) .await? .maybe_first_row_typed::<(i32,)>()?; // no_rows fails when the response is rows -session.query("INSERT INTO ks.tab (a) VALUES (0)", &[]).await?.result_not_rows()?; +session.query_unpaged("INSERT INTO ks.tab (a) VALUES (0)", &[]).await?.result_not_rows()?; # Ok(()) # } ``` @@ -99,7 +99,7 @@ To properly handle `NULL` values parse column as an `Option<>`: use scylla::IntoTypedRows; // Parse row as two columns containing an int and text which might be null -if let Some(rows) = session.query("SELECT a, b from ks.tab", &[]).await?.rows { +if let Some(rows) = session.query_unpaged("SELECT a, b from ks.tab", &[]).await?.rows { for row in rows.into_typed::<(i32, Option)>() { let (int_value, str_or_null): (i32, Option) = row?; } @@ -132,7 +132,7 @@ struct MyRow { } // Parse row as two columns containing an int and text which might be null -if let Some(rows) = session.query("SELECT a, b from ks.tab", &[]).await?.rows { +if let Some(rows) = session.query_unpaged("SELECT a, b from ks.tab", &[]).await?.rows { for row in rows.into_typed::() { let my_row: MyRow = row?; } diff --git a/docs/source/queries/simple.md b/docs/source/queries/simple.md index d065f52d2d..ca0a4269e2 100644 --- a/docs/source/queries/simple.md +++ b/docs/source/queries/simple.md @@ -9,7 +9,7 @@ Simple query takes query text and values and simply executes them on a `Session` // Insert a value into the table let to_insert: i32 = 12345; session - .query("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) + .query_unpaged("INSERT INTO keyspace.table (a) VALUES(?)", (to_insert,)) .await?; # Ok(()) # } @@ -44,7 +44,7 @@ my_query.set_consistency(Consistency::One); // Insert a value into the table let to_insert: i32 = 12345; -session.query(my_query, (to_insert,)).await?; +session.query_unpaged(my_query, (to_insert,)).await?; # Ok(()) # } ``` @@ -63,7 +63,7 @@ The easiest way is to pass values using a tuple: # async fn check_only_compiles(session: &Session) -> Result<(), Box> { // Sending an integer and a string using a tuple session - .query("INSERT INTO ks.tab (a, b, c) VALUES(?, ?, 'text2')", (2_i32, "Some text")) + .query_unpaged("INSERT INTO ks.tab (a, b, c) VALUES(?, ?, 'text2')", (2_i32, "Some text")) .await?; # Ok(()) # } @@ -84,7 +84,7 @@ Each row can be parsed as a tuple of rust types using `rows_typed`: use scylla::IntoTypedRows; // Query rows from the table and print them -let result = session.query("SELECT a FROM ks.tab", &[]).await?; +let result = session.query_unpaged("SELECT a FROM ks.tab", &[]).await?; let mut iter = result.rows_typed::<(i32,)>()?; while let Some(read_row) = iter.next().transpose()? { println!("Read a value from row: {}", read_row.0); diff --git a/docs/source/queries/timeouts.md b/docs/source/queries/timeouts.md index 92c11dfd6a..f99b186cab 100644 --- a/docs/source/queries/timeouts.md +++ b/docs/source/queries/timeouts.md @@ -37,7 +37,7 @@ let session: Session = SessionBuilder::new() // This query, having no timeout, could block indefinitely if a queried node hangs. session - .query("TRUNCATE keyspace.table", ()) + .query_unpaged("TRUNCATE keyspace.table", ()) .await?; let three_sec_timeout_profile_handle = ExecutionProfile::builder() @@ -50,7 +50,7 @@ let three_sec_timeout_profile_handle = ExecutionProfile::builder() let mut query: Query = "TRUNCATE keyspace.table".into(); query.set_execution_profile_handle(Some(three_sec_timeout_profile_handle)); session - .query(query, ()) + .query_unpaged(query, ()) .await?; # Ok(()) diff --git a/docs/source/queries/usekeyspace.md b/docs/source/queries/usekeyspace.md index d35030dda0..2879a26275 100644 --- a/docs/source/queries/usekeyspace.md +++ b/docs/source/queries/usekeyspace.md @@ -33,14 +33,14 @@ In the driver this can be achieved using `Session::use_keyspace`: # use std::error::Error; # async fn check_only_compiles(session: &Session) -> Result<(), Box> { session - .query("INSERT INTO my_keyspace.tab (a) VALUES ('test1')", &[]) + .query_unpaged("INSERT INTO my_keyspace.tab (a) VALUES ('test1')", &[]) .await?; session.use_keyspace("my_keyspace", false).await?; // Now we can omit keyspace name in the query session - .query("INSERT INTO tab (a) VALUES ('test2')", &[]) + .query_unpaged("INSERT INTO tab (a) VALUES ('test2')", &[]) .await?; # Ok(()) # } @@ -55,7 +55,7 @@ It is also possible to send raw use keyspace query using `Session::query` instea # use scylla::Session; # use std::error::Error; # async fn check_only_compiles(session: &Session) -> Result<(), Box> { -session.query("USE my_keyspace", &[]).await?; +session.query_unpaged("USE my_keyspace", &[]).await?; # Ok(()) # } ``` diff --git a/docs/source/queries/values.md b/docs/source/queries/values.md index a8ba9dcf71..e5ae36532b 100644 --- a/docs/source/queries/values.md +++ b/docs/source/queries/values.md @@ -17,19 +17,19 @@ A few examples: # use std::collections::HashMap; # async fn check_only_compiles(session: &Session) -> Result<(), Box> { // Empty slice means that there are no values to send -session.query("INSERT INTO ks.tab (a) VALUES(1)", &[]).await?; +session.query_unpaged("INSERT INTO ks.tab (a) VALUES(1)", &[]).await?; // Empty tuple/unit also means that there are no values to send -session.query("INSERT INTO ks.tab (a) VALUES(1)", ()).await?; +session.query_unpaged("INSERT INTO ks.tab (a) VALUES(1)", ()).await?; // Sending three integers using a slice: session - .query("INSERT INTO ks.tab (a, b, c) VALUES(?, ?, ?)", [1_i32, 2, 3].as_ref()) + .query_unpaged("INSERT INTO ks.tab (a, b, c) VALUES(?, ?, ?)", [1_i32, 2, 3].as_ref()) .await?; // Sending an integer and a string using a tuple session - .query("INSERT INTO ks.tab (a, b) VALUES(?, ?)", (2_i32, "Some text")) + .query_unpaged("INSERT INTO ks.tab (a, b) VALUES(?, ?)", (2_i32, "Some text")) .await?; // Sending an integer and a string using a named struct. @@ -52,7 +52,7 @@ let int_string = IntString { }; session - .query("INSERT INTO ks.tab (a, b) VALUES(?, ?)", int_string) + .query_unpaged("INSERT INTO ks.tab (a, b) VALUES(?, ?)", int_string) .await?; // You can use named bind markers in query if you want @@ -69,15 +69,15 @@ let int_string_custom = IntStringCustom { }; session - .query("INSERT INTO ks.tab (a, b) VALUES(:first_value, :second_value)", int_string_custom) + .query_unpaged("INSERT INTO ks.tab (a, b) VALUES(:first_value, :second_value)", int_string_custom) .await?; // Sending a single value as a tuple requires a trailing coma (Rust syntax): -session.query("INSERT INTO ks.tab (a) VALUES(?)", (2_i32,)).await?; +session.query_unpaged("INSERT INTO ks.tab (a) VALUES(?)", (2_i32,)).await?; // Each value can also be sent using a reference: session - .query("INSERT INTO ks.tab (a, b) VALUES(?, ?)", &(&2_i32, &"Some text")) + .query_unpaged("INSERT INTO ks.tab (a, b) VALUES(?, ?)", &(&2_i32, &"Some text")) .await?; // A map of named values can also be provided: @@ -85,7 +85,7 @@ let mut vals: HashMap<&str, CqlValue> = HashMap::new(); vals.insert("avalue", CqlValue::Text("hello".to_string())); vals.insert("bvalue", CqlValue::Int(17)); session - .query("INSERT INTO ks.tab (a, b) VALUES(:avalue, :bvalue)", &vals) + .query_unpaged("INSERT INTO ks.tab (a, b) VALUES(:avalue, :bvalue)", &vals) .await?; # Ok(()) @@ -101,7 +101,7 @@ Null values can be sent using `Option<>` - sending a `None` will make the value # async fn check_only_compiles(session: &Session) -> Result<(), Box> { let null_i32: Option = None; session - .query("INSERT INTO ks.tab (a) VALUES(?)", (null_i32,)) + .query_unpaged("INSERT INTO ks.tab (a) VALUES(?)", (null_i32,)) .await?; # Ok(()) # } @@ -122,18 +122,18 @@ use scylla::frame::value::{MaybeUnset, Unset}; // Inserting a null results in suboptimal performance let null_i32: Option = None; session - .query("INSERT INTO ks.tab (a) VALUES(?)", (null_i32,)) + .query_unpaged("INSERT INTO ks.tab (a) VALUES(?)", (null_i32,)) .await?; // Using MaybeUnset enum is better let unset_i32: MaybeUnset = MaybeUnset::Unset; session - .query("INSERT INTO ks.tab (a) VALUES(?)", (unset_i32,)) + .query_unpaged("INSERT INTO ks.tab (a) VALUES(?)", (unset_i32,)) .await?; // If we are sure that a value should be unset we can simply use Unset session - .query("INSERT INTO ks.tab (a) VALUES(?)", (Unset,)) + .query_unpaged("INSERT INTO ks.tab (a) VALUES(?)", (Unset,)) .await?; # Ok(()) # } diff --git a/docs/source/quickstart/example.md b/docs/source/quickstart/example.md index 32e01b2d5f..fde78fd474 100644 --- a/docs/source/quickstart/example.md +++ b/docs/source/quickstart/example.md @@ -22,7 +22,7 @@ async fn main() -> Result<(), Box> { // Create an example keyspace and table session - .query( + .query_unpaged( "CREATE KEYSPACE IF NOT EXISTS ks WITH REPLICATION = \ {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[], @@ -30,7 +30,7 @@ async fn main() -> Result<(), Box> { .await?; session - .query( + .query_unpaged( "CREATE TABLE IF NOT EXISTS ks.extab (a int primary key)", &[], ) @@ -39,11 +39,11 @@ async fn main() -> Result<(), Box> { // Insert a value into the table let to_insert: i32 = 12345; session - .query("INSERT INTO ks.extab (a) VALUES(?)", (to_insert,)) + .query_unpaged("INSERT INTO ks.extab (a) VALUES(?)", (to_insert,)) .await?; // Query rows from the table and print them - let result = session.query("SELECT a FROM ks.extab", &[]).await?; + let result = session.query_unpaged("SELECT a FROM ks.extab", &[]).await?; let mut iter = result.rows_typed::<(i32,)>()?; while let Some(read_row) = iter.next().transpose()? { println!("Read a value from row: {}", read_row.0); diff --git a/docs/source/retry-policy/default.md b/docs/source/retry-policy/default.md index e1f8514ed4..9e5a3697e5 100644 --- a/docs/source/retry-policy/default.md +++ b/docs/source/retry-policy/default.md @@ -52,7 +52,7 @@ my_query.set_execution_profile_handle(Some(handle)); // Run the query using this retry policy let to_insert: i32 = 12345; -session.query(my_query, (to_insert,)).await?; +session.query_unpaged(my_query, (to_insert,)).await?; # Ok(()) # } ``` @@ -83,7 +83,7 @@ prepared.set_execution_profile_handle(Some(handle)); // Run the query using this retry policy let to_insert: i32 = 12345; -session.execute(&prepared, (to_insert,)).await?; +session.execute_unpaged(&prepared, (to_insert,)).await?; # Ok(()) # } ``` diff --git a/docs/source/retry-policy/downgrading-consistency.md b/docs/source/retry-policy/downgrading-consistency.md index 711329ec80..0fea0b6a02 100644 --- a/docs/source/retry-policy/downgrading-consistency.md +++ b/docs/source/retry-policy/downgrading-consistency.md @@ -90,7 +90,7 @@ my_query.set_execution_profile_handle(Some(handle)); // Run the query using this retry policy let to_insert: i32 = 12345; -session.query(my_query, (to_insert,)).await?; +session.query_unpaged(my_query, (to_insert,)).await?; # Ok(()) # } ``` @@ -120,7 +120,7 @@ prepared.set_execution_profile_handle(Some(handle)); // Run the query using this retry policy let to_insert: i32 = 12345; -session.execute(&prepared, (to_insert,)).await?; +session.execute_unpaged(&prepared, (to_insert,)).await?; # Ok(()) # } ``` diff --git a/docs/source/retry-policy/fallthrough.md b/docs/source/retry-policy/fallthrough.md index 0f6ab59388..089bb1eb41 100644 --- a/docs/source/retry-policy/fallthrough.md +++ b/docs/source/retry-policy/fallthrough.md @@ -48,7 +48,7 @@ my_query.set_execution_profile_handle(Some(handle)); // Run the query using this retry policy let to_insert: i32 = 12345; -session.query(my_query, (to_insert,)).await?; +session.query_unpaged(my_query, (to_insert,)).await?; # Ok(()) # } ``` @@ -77,7 +77,7 @@ prepared.set_execution_profile_handle(Some(handle)); // Run the query using this retry policy let to_insert: i32 = 12345; -session.execute(&prepared, (to_insert,)).await?; +session.execute_unpaged(&prepared, (to_insert,)).await?; # Ok(()) # } ``` diff --git a/docs/source/tracing/basic.md b/docs/source/tracing/basic.md index 4ee5bc5737..0302478ebf 100644 --- a/docs/source/tracing/basic.md +++ b/docs/source/tracing/basic.md @@ -19,7 +19,7 @@ use uuid::Uuid; let mut query: Query = Query::new("INSERT INTO ks.tab (a) VALUES(4)"); query.set_tracing(true); -let res: QueryResult = session.query(query, &[]).await?; +let res: QueryResult = session.query_unpaged(query, &[]).await?; let tracing_id: Option = res.tracing_id; if let Some(id) = tracing_id { @@ -51,7 +51,7 @@ let mut prepared: PreparedStatement = session // Enable tracing for the prepared query prepared.set_tracing(true); -let res: QueryResult = session.execute(&prepared, &[]).await?; +let res: QueryResult = session.execute_unpaged(&prepared, &[]).await?; let tracing_id: Option = res.tracing_id; if let Some(id) = tracing_id { diff --git a/docs/source/tracing/query-history.md b/docs/source/tracing/query-history.md index 1c0779259e..c7c0e0b3b6 100644 --- a/docs/source/tracing/query-history.md +++ b/docs/source/tracing/query-history.md @@ -23,7 +23,7 @@ let history_listener = Arc::new(HistoryCollector::new()); query.set_history_listener(history_listener.clone()); // Run the query, doesn't matter if it failed, the history will still be saved -let _ignore_error = session.query(query.clone(), ()).await; +let _ignore_error = session.query_unpaged(query.clone(), ()).await; // Access the collected history and print it let structured_history: StructuredHistory = history_listener.clone_structured_history(); diff --git a/docs/source/tracing/tracing.md b/docs/source/tracing/tracing.md index 75ea2d5fee..ec6cd4ac87 100644 --- a/docs/source/tracing/tracing.md +++ b/docs/source/tracing/tracing.md @@ -9,9 +9,9 @@ Tracing is a feature provided by Scylla. When sending a query we can set a flag After completing the query Scylla provides a `tracing_id` which can be used to fetch information about it - which nodes it was sent to, what operations were performed etc. Queries that support tracing: -* [`Session::query()`](basic.md) +* [`Session::query_unpaged()`](basic.md) * [`Session::query_iter()`](paged.md) -* [`Session::execute()`](basic.md) +* [`Session::execute_unpaged()`](basic.md) * [`Session::execute_iter()`](paged.md) * [`Session::batch()`](basic.md) * [`Session::prepare()`](prepare.md) diff --git a/examples/allocations.rs b/examples/allocations.rs index 3148bb51c2..a3ec2a5cba 100644 --- a/examples/allocations.rs +++ b/examples/allocations.rs @@ -93,7 +93,7 @@ async fn measure( std::io::stdout().flush().unwrap(); } session - .execute(&prepared, (i as i32, 2 * i as i32)) + .execute_unpaged(&prepared, (i as i32, 2 * i as i32)) .await .unwrap(); } @@ -131,11 +131,11 @@ async fn main() -> Result<()> { let session: Session = SessionBuilder::new().known_node(args.node).build().await?; let session = Arc::new(session); - session.query("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; + session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; session.await_schema_agreement().await.unwrap(); session - .query( + .query_unpaged( "CREATE TABLE IF NOT EXISTS examples_ks.allocations (a int, b int, c text, primary key (a, b))", &[], ) diff --git a/examples/auth.rs b/examples/auth.rs index 982ccf5d3a..22fbee0077 100644 --- a/examples/auth.rs +++ b/examples/auth.rs @@ -14,9 +14,9 @@ async fn main() -> Result<()> { .await .unwrap(); - session.query("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await.unwrap(); + session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await.unwrap(); session - .query("DROP TABLE IF EXISTS examples_ks.auth;", &[]) + .query_unpaged("DROP TABLE IF EXISTS examples_ks.auth;", &[]) .await .unwrap(); diff --git a/examples/basic.rs b/examples/basic.rs index 6225451398..c8e5510d80 100644 --- a/examples/basic.rs +++ b/examples/basic.rs @@ -12,24 +12,24 @@ async fn main() -> Result<()> { let session: Session = SessionBuilder::new().known_node(uri).build().await?; - session.query("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; + session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; session - .query( + .query_unpaged( "CREATE TABLE IF NOT EXISTS examples_ks.basic (a int, b int, c text, primary key (a, b))", &[], ) .await?; session - .query( + .query_unpaged( "INSERT INTO examples_ks.basic (a, b, c) VALUES (?, ?, ?)", (3, 4, "def"), ) .await?; session - .query( + .query_unpaged( "INSERT INTO examples_ks.basic (a, b, c) VALUES (1, 2, 'abc')", &[], ) @@ -39,18 +39,18 @@ async fn main() -> Result<()> { .prepare("INSERT INTO examples_ks.basic (a, b, c) VALUES (?, 7, ?)") .await?; session - .execute(&prepared, (42_i32, "I'm prepared!")) + .execute_unpaged(&prepared, (42_i32, "I'm prepared!")) .await?; session - .execute(&prepared, (43_i32, "I'm prepared 2!")) + .execute_unpaged(&prepared, (43_i32, "I'm prepared 2!")) .await?; session - .execute(&prepared, (44_i32, "I'm prepared 3!")) + .execute_unpaged(&prepared, (44_i32, "I'm prepared 3!")) .await?; // Rows can be parsed as tuples let result = session - .query("SELECT a, b, c FROM examples_ks.basic", &[]) + .query_unpaged("SELECT a, b, c FROM examples_ks.basic", &[]) .await?; let mut iter = result.rows_typed::<(i32, i32, String)>()?; while let Some((a, b, c)) = iter.next().transpose()? { @@ -66,7 +66,7 @@ async fn main() -> Result<()> { } let result = session - .query("SELECT a, b, c FROM examples_ks.basic", &[]) + .query_unpaged("SELECT a, b, c FROM examples_ks.basic", &[]) .await?; let mut iter = result.rows_typed::()?; while let Some(row_data) = iter.next().transpose()? { @@ -75,7 +75,7 @@ async fn main() -> Result<()> { // Or simply as untyped rows let result = session - .query("SELECT a, b, c FROM examples_ks.basic", &[]) + .query_unpaged("SELECT a, b, c FROM examples_ks.basic", &[]) .await?; let rows = result.rows.unwrap(); for row in rows { diff --git a/examples/cloud.rs b/examples/cloud.rs index e469ae3241..63265e41f8 100644 --- a/examples/cloud.rs +++ b/examples/cloud.rs @@ -16,10 +16,10 @@ async fn main() -> Result<()> { .await .unwrap(); - session.query("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", + session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await.unwrap(); session - .query("DROP TABLE IF EXISTS examples_ks.cloud;", &[]) + .query_unpaged("DROP TABLE IF EXISTS examples_ks.cloud;", &[]) .await .unwrap(); diff --git a/examples/compare-tokens.rs b/examples/compare-tokens.rs index bafeb73bdf..9e9431d869 100644 --- a/examples/compare-tokens.rs +++ b/examples/compare-tokens.rs @@ -12,10 +12,10 @@ async fn main() -> Result<()> { let session: Session = SessionBuilder::new().known_node(uri).build().await?; - session.query("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; + session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; session - .query( + .query_unpaged( "CREATE TABLE IF NOT EXISTS examples_ks.compare_tokens (pk bigint primary key)", &[], ) @@ -27,7 +27,7 @@ async fn main() -> Result<()> { for pk in (0..100_i64).chain(99840..99936_i64) { session - .query( + .query_unpaged( "INSERT INTO examples_ks.compare_tokens (pk) VALUES (?)", (pk,), ) @@ -46,7 +46,7 @@ async fn main() -> Result<()> { ); let (qt,) = session - .query( + .query_unpaged( "SELECT token(pk) FROM examples_ks.compare_tokens where pk = ?", (pk,), ) diff --git a/examples/cql-time-types.rs b/examples/cql-time-types.rs index 93a6a372eb..e7e24725d8 100644 --- a/examples/cql-time-types.rs +++ b/examples/cql-time-types.rs @@ -17,13 +17,13 @@ async fn main() -> Result<()> { let session: Session = SessionBuilder::new().known_node(uri).build().await?; - session.query("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; + session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; // Date // Date is a year, month and day in the range -5877641-06-23 to -5877641-06-23 session - .query( + .query_unpaged( "CREATE TABLE IF NOT EXISTS examples_ks.dates (d date primary key)", &[], ) @@ -34,14 +34,14 @@ async fn main() -> Result<()> { let chrono_date = NaiveDate::from_ymd_opt(2020, 2, 20).unwrap(); session - .query( + .query_unpaged( "INSERT INTO examples_ks.dates (d) VALUES (?)", (chrono_date,), ) .await?; let result = session - .query("SELECT d from examples_ks.dates", &[]) + .query_unpaged("SELECT d from examples_ks.dates", &[]) .await?; for row in result.rows_typed::<(NaiveDate,)>()? { let (read_date,): (NaiveDate,) = match row { @@ -58,11 +58,11 @@ async fn main() -> Result<()> { let time_date = time::Date::from_calendar_date(2020, time::Month::March, 21).unwrap(); session - .query("INSERT INTO examples_ks.dates (d) VALUES (?)", (time_date,)) + .query_unpaged("INSERT INTO examples_ks.dates (d) VALUES (?)", (time_date,)) .await?; let result = session - .query("SELECT d from examples_ks.dates", &[]) + .query_unpaged("SELECT d from examples_ks.dates", &[]) .await?; for row in result.rows_typed::<(time::Date,)>()? { let (read_date,) = match row { @@ -76,14 +76,14 @@ async fn main() -> Result<()> { // Dates outside this range must be represented in the raw form - an u32 describing days since -5877641-06-23 let example_big_date: CqlDate = CqlDate(u32::MAX); session - .query( + .query_unpaged( "INSERT INTO examples_ks.dates (d) VALUES (?)", (example_big_date,), ) .await?; let result = session - .query("SELECT d from examples_ks.dates", &[]) + .query_unpaged("SELECT d from examples_ks.dates", &[]) .await?; let mut iter = result.rows_typed::<(CqlValue,)>()?; while let Some((value,)) = iter.next().transpose()? { @@ -99,7 +99,7 @@ async fn main() -> Result<()> { // Time is represented as nanosecond count since midnight in range 0..=86399999999999 session - .query( + .query_unpaged( "CREATE TABLE IF NOT EXISTS examples_ks.times (t time primary key)", &[], ) @@ -112,14 +112,14 @@ async fn main() -> Result<()> { let chrono_time = NaiveTime::from_hms_nano_opt(1, 2, 3, 456_789_012).unwrap(); session - .query( + .query_unpaged( "INSERT INTO examples_ks.times (t) VALUES (?)", (chrono_time,), ) .await?; let result = session - .query("SELECT t from examples_ks.times", &[]) + .query_unpaged("SELECT t from examples_ks.times", &[]) .await?; let mut iter = result.rows_typed::<(NaiveTime,)>()?; while let Some((read_time,)) = iter.next().transpose()? { @@ -130,11 +130,11 @@ async fn main() -> Result<()> { let time_time = time::Time::from_hms_nano(2, 3, 4, 567_890_123).unwrap(); session - .query("INSERT INTO examples_ks.times (t) VALUES (?)", (time_time,)) + .query_unpaged("INSERT INTO examples_ks.times (t) VALUES (?)", (time_time,)) .await?; let result = session - .query("SELECT t from examples_ks.times", &[]) + .query_unpaged("SELECT t from examples_ks.times", &[]) .await?; let mut iter = result.rows_typed::<(time::Time,)>()?; while let Some((read_time,)) = iter.next().transpose()? { @@ -145,11 +145,11 @@ async fn main() -> Result<()> { let time_time = CqlTime(((3 * 60 + 4) * 60 + 5) * 1_000_000_000 + 678_901_234); session - .query("INSERT INTO examples_ks.times (t) VALUES (?)", (time_time,)) + .query_unpaged("INSERT INTO examples_ks.times (t) VALUES (?)", (time_time,)) .await?; let result = session - .query("SELECT t from examples_ks.times", &[]) + .query_unpaged("SELECT t from examples_ks.times", &[]) .await?; let mut iter = result.rows_typed::<(CqlTime,)>()?; while let Some((read_time,)) = iter.next().transpose()? { @@ -160,7 +160,7 @@ async fn main() -> Result<()> { // Timestamp is represented as milliseconds since unix epoch - 1970-01-01. Negative values are also possible session - .query( + .query_unpaged( "CREATE TABLE IF NOT EXISTS examples_ks.timestamps (t timestamp primary key)", &[], ) @@ -173,14 +173,14 @@ async fn main() -> Result<()> { let chrono_datetime = Utc::now(); session - .query( + .query_unpaged( "INSERT INTO examples_ks.timestamps (t) VALUES (?)", (chrono_datetime,), ) .await?; let result = session - .query("SELECT t from examples_ks.timestamps", &[]) + .query_unpaged("SELECT t from examples_ks.timestamps", &[]) .await?; let mut iter = result.rows_typed::<(DateTime,)>()?; while let Some((read_time,)) = iter.next().transpose()? { @@ -194,14 +194,14 @@ async fn main() -> Result<()> { let time_datetime = time::OffsetDateTime::now_utc(); session - .query( + .query_unpaged( "INSERT INTO examples_ks.timestamps (t) VALUES (?)", (time_datetime,), ) .await?; let result = session - .query("SELECT t from examples_ks.timestamps", &[]) + .query_unpaged("SELECT t from examples_ks.timestamps", &[]) .await?; let mut iter = result.rows_typed::<(time::OffsetDateTime,)>()?; while let Some((read_time,)) = iter.next().transpose()? { @@ -215,14 +215,14 @@ async fn main() -> Result<()> { let cql_datetime = CqlTimestamp(1 << 31); session - .query( + .query_unpaged( "INSERT INTO examples_ks.timestamps (t) VALUES (?)", (cql_datetime,), ) .await?; let result = session - .query("SELECT t from examples_ks.timestamps", &[]) + .query_unpaged("SELECT t from examples_ks.timestamps", &[]) .await?; let mut iter = result.rows_typed::<(CqlTimestamp,)>()?; while let Some((read_time,)) = iter.next().transpose()? { diff --git a/examples/cqlsh-rs.rs b/examples/cqlsh-rs.rs index 877b4af596..8d9ca8ea6a 100644 --- a/examples/cqlsh-rs.rs +++ b/examples/cqlsh-rs.rs @@ -219,7 +219,7 @@ async fn main() -> Result<()> { continue; } rl.add_history_entry(line.as_str()); - let maybe_res = session.query(line, &[]).await; + let maybe_res = session.query_unpaged(line, &[]).await; match maybe_res { Err(err) => println!("Error: {}", err), Ok(res) => print_result(&res), diff --git a/examples/custom_deserialization.rs b/examples/custom_deserialization.rs index ea9a0d6284..1d0173ca59 100644 --- a/examples/custom_deserialization.rs +++ b/examples/custom_deserialization.rs @@ -13,16 +13,16 @@ async fn main() -> Result<()> { let session: Session = SessionBuilder::new().known_node(uri).build().await?; - session.query("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; + session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; session - .query( + .query_unpaged( "CREATE TABLE IF NOT EXISTS examples_ks.custom_deserialization (pk int primary key, v text)", &[], ) .await?; session - .query( + .query_unpaged( "INSERT INTO examples_ks.custom_deserialization (pk, v) VALUES (1, 'asdf')", (), ) @@ -41,7 +41,7 @@ async fn main() -> Result<()> { } let (v,) = session - .query( + .query_unpaged( "SELECT v FROM examples_ks.custom_deserialization WHERE pk = 1", (), ) @@ -68,7 +68,7 @@ async fn main() -> Result<()> { impl_from_cql_value_from_method!(MyOtherType, into_my_other_type); let (v,) = session - .query( + .query_unpaged( "SELECT v FROM examples_ks.custom_deserialization WHERE pk = 1", (), ) diff --git a/examples/execution_profile.rs b/examples/execution_profile.rs index 317604d9b0..b912c2780c 100644 --- a/examples/execution_profile.rs +++ b/examples/execution_profile.rs @@ -59,10 +59,10 @@ async fn main() -> Result<()> { session_3_config.add_known_node(uri); let session3: Session = Session::connect(session_3_config).await?; - session1.query("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; + session1.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; session2 - .query( + .query_unpaged( "CREATE TABLE IF NOT EXISTS examples_ks.execution_profile (a int, b int, c text, primary key (a, b))", &[], ) @@ -74,23 +74,27 @@ async fn main() -> Result<()> { // As `query_insert` is set another handle than session1, the execution profile pointed by query's handle // will be preferred, so the query below will be executed with `profile2`, even though `session1` is set `profile1`. query_insert.set_execution_profile_handle(Some(handle2.clone())); - session1.query(query_insert.clone(), (3, 4, "def")).await?; + session1 + .query_unpaged(query_insert.clone(), (3, 4, "def")) + .await?; // One can, however, change the execution profile referred by a handle: handle2.map_to_another_profile(profile1); // And now the following queries are executed with profile1: - session1.query(query_insert.clone(), (3, 4, "def")).await?; + session1 + .query_unpaged(query_insert.clone(), (3, 4, "def")) + .await?; session2 - .query("SELECT * FROM examples_ks.execution_profile", ()) + .query_unpaged("SELECT * FROM examples_ks.execution_profile", ()) .await?; // One can unset a profile handle from a statement and, since then, execute it with session's default profile. query_insert.set_execution_profile_handle(None); session3 - .query("SELECT * FROM examples_ks.execution_profile", ()) + .query_unpaged("SELECT * FROM examples_ks.execution_profile", ()) .await?; // This executes with default session profile. session2 - .query("SELECT * FROM examples_ks.execution_profile", ()) + .query_unpaged("SELECT * FROM examples_ks.execution_profile", ()) .await?; // This executes with profile1. Ok(()) diff --git a/examples/get_by_name.rs b/examples/get_by_name.rs index 03c14b8721..bb750de1b4 100644 --- a/examples/get_by_name.rs +++ b/examples/get_by_name.rs @@ -12,31 +12,31 @@ async fn main() -> Result<()> { let session: Session = SessionBuilder::new().known_node(uri).build().await?; - session.query("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; + session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; session - .query( + .query_unpaged( "CREATE TABLE IF NOT EXISTS examples_ks.get_by_name (pk int, ck int, value text, primary key (pk, ck))", &[], ) .await?; session - .query( + .query_unpaged( "INSERT INTO examples_ks.get_by_name (pk, ck, value) VALUES (?, ?, ?)", (3, 4, "def"), ) .await?; session - .query( + .query_unpaged( "INSERT INTO examples_ks.get_by_name (pk, ck, value) VALUES (1, 2, 'abc')", &[], ) .await?; let query_result = session - .query("SELECT pk, ck, value FROM examples_ks.get_by_name", &[]) + .query_unpaged("SELECT pk, ck, value FROM examples_ks.get_by_name", &[]) .await?; let (ck_idx, _) = query_result .get_column_spec("ck") diff --git a/examples/logging.rs b/examples/logging.rs index 19b22d73cb..6b090acbcb 100644 --- a/examples/logging.rs +++ b/examples/logging.rs @@ -17,9 +17,9 @@ async fn main() -> Result<()> { info!("Connecting to {}", uri); let session: Session = SessionBuilder::new().known_node(uri).build().await?; - session.query("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; + session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; - session.query("USE examples_ks", &[]).await?; + session.query_unpaged("USE examples_ks", &[]).await?; Ok(()) } diff --git a/examples/logging_log.rs b/examples/logging_log.rs index 33a2b794c8..da82f42241 100644 --- a/examples/logging_log.rs +++ b/examples/logging_log.rs @@ -19,9 +19,9 @@ async fn main() -> Result<()> { info!("Connecting to {}", uri); let session: Session = SessionBuilder::new().known_node(uri).build().await?; - session.query("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; + session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; - session.query("USE examples_ks", &[]).await?; + session.query_unpaged("USE examples_ks", &[]).await?; Ok(()) } diff --git a/examples/parallel-prepared.rs b/examples/parallel-prepared.rs index ae2a11e3f0..167b583944 100644 --- a/examples/parallel-prepared.rs +++ b/examples/parallel-prepared.rs @@ -14,10 +14,10 @@ async fn main() -> Result<()> { let session: Session = SessionBuilder::new().known_node(uri).build().await?; let session = Arc::new(session); - session.query("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; + session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; session - .query( + .query_unpaged( "CREATE TABLE IF NOT EXISTS examples_ks.parallel_prepared (a int, b int, c text, primary key (a, b))", &[], ) @@ -42,7 +42,7 @@ async fn main() -> Result<()> { let permit = sem.clone().acquire_owned().await; tokio::task::spawn(async move { session - .execute(&prepared, (i as i32, 2 * i as i32)) + .execute_unpaged(&prepared, (i as i32, 2 * i as i32)) .await .unwrap(); diff --git a/examples/parallel.rs b/examples/parallel.rs index 63b22225c9..716225fb77 100644 --- a/examples/parallel.rs +++ b/examples/parallel.rs @@ -14,10 +14,10 @@ async fn main() -> Result<()> { let session: Session = SessionBuilder::new().known_node(uri).build().await?; let session = Arc::new(session); - session.query("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; + session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; session - .query( + .query_unpaged( "CREATE TABLE IF NOT EXISTS examples_ks.parallel (a int, b int, c text, primary key (a, b))", &[], ) @@ -34,7 +34,7 @@ async fn main() -> Result<()> { let permit = sem.clone().acquire_owned().await; tokio::task::spawn(async move { session - .query( + .query_unpaged( format!( "INSERT INTO examples_ks.parallel (a, b, c) VALUES ({}, {}, 'abc')", i, diff --git a/examples/query_history.rs b/examples/query_history.rs index e75a30514f..d5e361f0ec 100644 --- a/examples/query_history.rs +++ b/examples/query_history.rs @@ -17,10 +17,10 @@ async fn main() -> Result<()> { let session: Session = SessionBuilder::new().known_node(uri).build().await?; - session.query("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; + session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; session - .query( + .query_unpaged( "CREATE TABLE IF NOT EXISTS examples_ks.query_history (a int, b int, c text, primary key (a, b))", &[], ) @@ -32,7 +32,7 @@ async fn main() -> Result<()> { query.set_history_listener(history_listener.clone()); // Run the query, doesn't matter if it failed, the history will still be saved - let _ignore_error = session.query(query.clone(), ()).await; + let _ignore_error = session.query_unpaged(query.clone(), ()).await; // Access the collected history and print it let structured_history: StructuredHistory = history_listener.clone_structured_history(); @@ -40,14 +40,14 @@ async fn main() -> Result<()> { // A single history collector can contain histories of multiple queries. // To clear a collector create a new one and set it again. - let _second_execution = session.query(query, ()).await; + let _second_execution = session.query_unpaged(query, ()).await; let structured_history: StructuredHistory = history_listener.clone_structured_history(); println!("Two queries history: {}", structured_history); // The same works for other types of queries, e.g iterators for i in 0..32 { session - .query( + .query_unpaged( "INSERT INTO examples_ks.query_history (a, b, c) VALUES (?, ?, 't')", (i, i), ) diff --git a/examples/schema_agreement.rs b/examples/schema_agreement.rs index ed12af95c8..c337f083ec 100644 --- a/examples/schema_agreement.rs +++ b/examples/schema_agreement.rs @@ -22,7 +22,7 @@ async fn main() -> Result<()> { println!("Schema version: {}", schema_version); - session.query("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; + session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; match session.await_schema_agreement().await { Ok(_schema_version) => println!("Schema is in agreement in time"), @@ -30,7 +30,7 @@ async fn main() -> Result<()> { Err(err) => bail!(err), }; session - .query( + .query_unpaged( "CREATE TABLE IF NOT EXISTS examples_ks.schema_agreement (a int, b int, c text, primary key (a, b))", &[], ) @@ -38,7 +38,7 @@ async fn main() -> Result<()> { session.await_schema_agreement().await?; session - .query( + .query_unpaged( "INSERT INTO examples_ks.schema_agreement (a, b, c) VALUES (?, ?, ?)", (3, 4, "def"), ) @@ -46,7 +46,7 @@ async fn main() -> Result<()> { session.await_schema_agreement().await?; session - .query( + .query_unpaged( "INSERT INTO examples_ks.schema_agreement (a, b, c) VALUES (1, 2, 'abc')", &[], ) @@ -56,18 +56,18 @@ async fn main() -> Result<()> { .prepare("INSERT INTO examples_ks.schema_agreement (a, b, c) VALUES (?, 7, ?)") .await?; session - .execute(&prepared, (42_i32, "I'm prepared!")) + .execute_unpaged(&prepared, (42_i32, "I'm prepared!")) .await?; session - .execute(&prepared, (43_i32, "I'm prepared 2!")) + .execute_unpaged(&prepared, (43_i32, "I'm prepared 2!")) .await?; session - .execute(&prepared, (44_i32, "I'm prepared 3!")) + .execute_unpaged(&prepared, (44_i32, "I'm prepared 3!")) .await?; // Rows can be parsed as tuples let result = session - .query("SELECT a, b, c FROM examples_ks.schema_agreement", &[]) + .query_unpaged("SELECT a, b, c FROM examples_ks.schema_agreement", &[]) .await?; let mut iter = result.rows_typed::<(i32, i32, String)>()?; while let Some((a, b, c)) = iter.next().transpose()? { diff --git a/examples/select-paging.rs b/examples/select-paging.rs index 77a28fa6fb..2c6b4e2d2a 100644 --- a/examples/select-paging.rs +++ b/examples/select-paging.rs @@ -13,10 +13,10 @@ async fn main() -> Result<()> { let session: Session = SessionBuilder::new().known_node(uri).build().await?; - session.query("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; + session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; session - .query( + .query_unpaged( "CREATE TABLE IF NOT EXISTS examples_ks.select_paging (a int, b int, c text, primary key (a, b))", &[], ) @@ -24,7 +24,7 @@ async fn main() -> Result<()> { for i in 0..16_i32 { session - .query( + .query_unpaged( "INSERT INTO examples_ks.select_paging (a, b, c) VALUES (?, ?, 'abc')", (i, 2 * i), ) diff --git a/examples/speculative-execution.rs b/examples/speculative-execution.rs index 792b325c6d..e6c64e3ad7 100644 --- a/examples/speculative-execution.rs +++ b/examples/speculative-execution.rs @@ -26,10 +26,10 @@ async fn main() -> Result<()> { .build() .await?; - session.query("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; + session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; session - .query( + .query_unpaged( "CREATE TABLE IF NOT EXISTS examples_ks.speculative_execution (a int, b int, c text, primary key (a, b))", &[], ) @@ -43,7 +43,7 @@ async fn main() -> Result<()> { select_stmt.set_is_idempotent(true); // This will trigger speculative execution - session.execute(&select_stmt, &[]).await?; + session.execute_unpaged(&select_stmt, &[]).await?; Ok(()) } diff --git a/examples/tls.rs b/examples/tls.rs index b54296bae5..b011e1ff30 100644 --- a/examples/tls.rs +++ b/examples/tls.rs @@ -49,24 +49,24 @@ async fn main() -> Result<()> { .build() .await?; - session.query("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; + session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; session - .query( + .query_unpaged( "CREATE TABLE IF NOT EXISTS examples_ks.tls (a int, b int, c text, primary key (a, b))", &[], ) .await?; session - .query( + .query_unpaged( "INSERT INTO examples_ks.tls (a, b, c) VALUES (?, ?, ?)", (3, 4, "def"), ) .await?; session - .query( + .query_unpaged( "INSERT INTO examples_ks.tls (a, b, c) VALUES (1, 2, 'abc')", &[], ) @@ -76,18 +76,18 @@ async fn main() -> Result<()> { .prepare("INSERT INTO examples_ks.tls (a, b, c) VALUES (?, 7, ?)") .await?; session - .execute(&prepared, (42_i32, "I'm prepared!")) + .execute_unpaged(&prepared, (42_i32, "I'm prepared!")) .await?; session - .execute(&prepared, (43_i32, "I'm prepared 2!")) + .execute_unpaged(&prepared, (43_i32, "I'm prepared 2!")) .await?; session - .execute(&prepared, (44_i32, "I'm prepared 3!")) + .execute_unpaged(&prepared, (44_i32, "I'm prepared 3!")) .await?; // Rows can be parsed as tuples let result = session - .query("SELECT a, b, c FROM examples_ks.tls", &[]) + .query_unpaged("SELECT a, b, c FROM examples_ks.tls", &[]) .await?; let mut iter = result.rows_typed::<(i32, i32, String)>()?; while let Some((a, b, c)) = iter.next().transpose()? { diff --git a/examples/tower.rs b/examples/tower.rs index 1c3bb2112a..b45b08ae19 100644 --- a/examples/tower.rs +++ b/examples/tower.rs @@ -22,7 +22,7 @@ impl Service for SessionService { fn call(&mut self, req: scylla::query::Query) -> Self::Future { let session = self.session.clone(); - Box::pin(async move { session.query(req, &[]).await }) + Box::pin(async move { session.query_unpaged(req, &[]).await }) } } diff --git a/examples/tracing.rs b/examples/tracing.rs index e4c9eb8047..d742de7e5f 100644 --- a/examples/tracing.rs +++ b/examples/tracing.rs @@ -26,10 +26,10 @@ async fn main() -> Result<()> { .build() .await?; - session.query("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; + session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; session - .query( + .query_unpaged( "CREATE TABLE IF NOT EXISTS examples_ks.tracing (val text primary key)", &[], ) @@ -42,7 +42,7 @@ async fn main() -> Result<()> { query.set_serial_consistency(Some(SerialConsistency::LocalSerial)); // QueryResult will contain a tracing_id which can be used to query tracing information - let query_result: QueryResult = session.query(query.clone(), &[]).await?; + let query_result: QueryResult = session.query_unpaged(query.clone(), &[]).await?; let query_tracing_id: Uuid = query_result .tracing_id .ok_or_else(|| anyhow!("Tracing id is None!"))?; @@ -79,7 +79,7 @@ async fn main() -> Result<()> { // To trace execution of a prepared statement tracing must be enabled for it prepared.set_tracing(true); - let execute_result: QueryResult = session.execute(&prepared, &[]).await?; + let execute_result: QueryResult = session.execute_unpaged(&prepared, &[]).await?; println!("Execute tracing id: {:?}", execute_result.tracing_id); // PAGED QUERY_ITER EXECUTE_ITER diff --git a/examples/user-defined-type.rs b/examples/user-defined-type.rs index 4db980b791..dd16346996 100644 --- a/examples/user-defined-type.rs +++ b/examples/user-defined-type.rs @@ -11,17 +11,17 @@ async fn main() -> Result<()> { let session: Session = SessionBuilder::new().known_node(uri).build().await?; - session.query("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; + session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; session - .query( + .query_unpaged( "CREATE TYPE IF NOT EXISTS examples_ks.my_type (int_val int, text_val text)", &[], ) .await?; session - .query( + .query_unpaged( "CREATE TABLE IF NOT EXISTS examples_ks.user_defined_type_table (k int, my my_type, primary key (k))", &[], ) @@ -42,7 +42,7 @@ async fn main() -> Result<()> { // It can be inserted like a normal value session - .query( + .query_unpaged( "INSERT INTO examples_ks.user_defined_type_table (k, my) VALUES (5, ?)", (to_insert,), ) @@ -50,7 +50,7 @@ async fn main() -> Result<()> { // And read like any normal value let result = session - .query("SELECT my FROM examples_ks.user_defined_type_table", &[]) + .query_unpaged("SELECT my FROM examples_ks.user_defined_type_table", &[]) .await?; let mut iter = result.rows_typed::<(MyType,)>()?; while let Some((my_val,)) = iter.next().transpose()? { diff --git a/examples/value_list.rs b/examples/value_list.rs index 8cb4157e87..54a0b0bed3 100644 --- a/examples/value_list.rs +++ b/examples/value_list.rs @@ -9,10 +9,10 @@ async fn main() { let session: Session = SessionBuilder::new().known_node(uri).build().await.unwrap(); - session.query("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await.unwrap(); + session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await.unwrap(); session - .query( + .query_unpaged( "CREATE TABLE IF NOT EXISTS examples_ks.my_type (k int, my text, primary key (k))", &[], ) @@ -31,7 +31,7 @@ async fn main() { }; session - .query( + .query_unpaged( "INSERT INTO examples_ks.my_type (k, my) VALUES (?, ?)", to_insert, ) @@ -51,7 +51,7 @@ async fn main() { }; session - .query( + .query_unpaged( "INSERT INTO examples_ks.my_type (k, my) VALUES (?, ?)", to_insert_2, ) @@ -59,7 +59,7 @@ async fn main() { .unwrap(); let q = session - .query("SELECT * FROM examples_ks.my_type", &[]) + .query_unpaged("SELECT * FROM examples_ks.my_type", &[]) .await .unwrap(); diff --git a/scylla-cql/src/frame/request/query.rs b/scylla-cql/src/frame/request/query.rs index e4a9fcfaf0..9c755c3db9 100644 --- a/scylla-cql/src/frame/request/query.rs +++ b/scylla-cql/src/frame/request/query.rs @@ -56,6 +56,7 @@ impl<'q> DeserializableRequest for Query<'q> { }) } } + #[cfg_attr(test, derive(Debug, PartialEq, Eq))] pub struct QueryParameters<'a> { pub consistency: types::Consistency, diff --git a/scylla/src/history.rs b/scylla/src/history.rs index f7c9acf6d9..c0880514bc 100644 --- a/scylla/src/history.rs +++ b/scylla/src/history.rs @@ -924,7 +924,7 @@ mod tests { let history_collector = Arc::new(HistoryCollector::new()); query.set_history_listener(history_collector.clone()); - session.query(query.clone(), ()).await.unwrap(); + session.query_unpaged(query.clone(), ()).await.unwrap(); let history: StructuredHistory = history_collector.clone_structured_history(); @@ -949,7 +949,7 @@ mod tests { // Prepared queries retain the history listener set in Query. let prepared = session.prepare(query).await.unwrap(); - session.execute(&prepared, ()).await.unwrap(); + session.execute_unpaged(&prepared, ()).await.unwrap(); let history2: StructuredHistory = history_collector.clone_structured_history(); @@ -991,7 +991,7 @@ mod tests { let history_collector = Arc::new(HistoryCollector::new()); query.set_history_listener(history_collector.clone()); - assert!(session.query(query.clone(), ()).await.is_err()); + assert!(session.query_unpaged(query.clone(), ()).await.is_err()); let history: StructuredHistory = history_collector.clone_structured_history(); @@ -1025,18 +1025,18 @@ mod tests { let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); session - .query(format!("CREATE KEYSPACE {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]) + .query_unpaged(format!("CREATE KEYSPACE {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]) .await .unwrap(); session.use_keyspace(ks, true).await.unwrap(); session - .query("CREATE TABLE t (p int primary key)", ()) + .query_unpaged("CREATE TABLE t (p int primary key)", ()) .await .unwrap(); for i in 0..32 { session - .query("INSERT INTO t (p) VALUES (?)", (i,)) + .query_unpaged("INSERT INTO t (p) VALUES (?)", (i,)) .await .unwrap(); } diff --git a/scylla/src/transport/authenticate_test.rs b/scylla/src/transport/authenticate_test.rs index 0383c69d34..78e72dea40 100644 --- a/scylla/src/transport/authenticate_test.rs +++ b/scylla/src/transport/authenticate_test.rs @@ -21,9 +21,12 @@ async fn authenticate_superuser() { .unwrap(); let ks = unique_keyspace_name(); - session.query(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); session.use_keyspace(ks, false).await.unwrap(); - session.query("DROP TABLE IF EXISTS t;", &[]).await.unwrap(); + session + .query_unpaged("DROP TABLE IF EXISTS t;", &[]) + .await + .unwrap(); println!("Ok."); } @@ -77,9 +80,12 @@ async fn custom_authentication() { .unwrap(); let ks = unique_keyspace_name(); - session.query(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); session.use_keyspace(ks, false).await.unwrap(); - session.query("DROP TABLE IF EXISTS t;", &[]).await.unwrap(); + session + .query_unpaged("DROP TABLE IF EXISTS t;", &[]) + .await + .unwrap(); println!("Ok."); } diff --git a/scylla/src/transport/caching_session.rs b/scylla/src/transport/caching_session.rs index a584d82774..5b3d6ca1bf 100644 --- a/scylla/src/transport/caching_session.rs +++ b/scylla/src/transport/caching_session.rs @@ -69,15 +69,15 @@ where } } - /// Does the same thing as [`Session::execute`] but uses the prepared statement cache - pub async fn execute( + /// Does the same thing as [`Session::execute_unpaged`] but uses the prepared statement cache + pub async fn execute_unpaged( &self, query: impl Into, values: impl SerializeRow, ) -> Result { let query = query.into(); let prepared = self.add_prepared_statement_owned(query).await?; - self.session.execute(&prepared, values).await + self.session.execute_unpaged(&prepared, values).await } /// Does the same thing as [`Session::execute_iter`] but uses the prepared statement cache @@ -247,12 +247,12 @@ mod tests { } session - .query(create_ks, &[]) + .query_unpaged(create_ks, &[]) .await .expect("Could not create keyspace"); session - .query( + .query_unpaged( format!( "CREATE TABLE IF NOT EXISTS {}.test_table (a int primary key, b int)", ks @@ -275,7 +275,7 @@ mod tests { // Add a row, this makes it easier to check if the caching works combined with the regular execute fn on Session session - .execute("insert into test_table(a, b) values (1, 2)", &[]) + .execute_unpaged("insert into test_table(a, b) values (1, 2)", &[]) .await .unwrap(); @@ -329,7 +329,7 @@ mod tests { setup_tracing(); let session = create_caching_session().await; let result = session - .execute("select * from test_table", &[]) + .execute_unpaged("select * from test_table", &[]) .await .unwrap(); @@ -337,7 +337,7 @@ mod tests { assert_eq!(1, result.rows_num().unwrap()); let result = session - .execute("select * from test_table", &[]) + .execute_unpaged("select * from test_table", &[]) .await .unwrap(); @@ -386,7 +386,7 @@ mod tests { expected_rows: &[(i32, i32)], ) { let selected_rows: BTreeSet<(i32, i32)> = sess - .execute("SELECT a, b FROM test_batch_table", ()) + .execute_unpaged("SELECT a, b FROM test_batch_table", ()) .await .unwrap() .rows_typed::<(i32, i32)>() @@ -442,7 +442,7 @@ mod tests { let session: CachingSession = create_caching_session().await; session - .execute( + .execute_unpaged( "CREATE TABLE IF NOT EXISTS test_batch_table (a int, b int, primary key (a, b))", (), ) @@ -565,7 +565,7 @@ mod tests { let session: CachingSession = CachingSession::from(new_for_test(true).await, 100); session - .execute("CREATE TABLE tbl (a int PRIMARY KEY, b int)", ()) + .execute_unpaged("CREATE TABLE tbl (a int PRIMARY KEY, b int)", ()) .await .unwrap(); @@ -576,7 +576,7 @@ mod tests { q1.set_timestamp(Some(1000)); session - .execute(q1, (1, 1)) + .execute_unpaged(q1, (1, 1)) .await .unwrap() .result_not_rows() @@ -587,7 +587,7 @@ mod tests { q2.set_timestamp(Some(2000)); session - .execute(q2, (2, 2)) + .execute_unpaged(q2, (2, 2)) .await .unwrap() .result_not_rows() @@ -595,7 +595,7 @@ mod tests { // Fetch both rows with their timestamps let mut rows = session - .execute("SELECT b, WRITETIME(b) FROM tbl", ()) + .execute_unpaged("SELECT b, WRITETIME(b) FROM tbl", ()) .await .unwrap() .rows_typed_or_empty::<(i32, i64)>() @@ -618,7 +618,7 @@ mod tests { let session: CachingSession = CachingSession::from(new_for_test(false).await, 100); session - .execute( + .execute_unpaged( "CREATE TABLE tbl (a int PRIMARY KEY) with cdc = {'enabled': true}", &(), ) diff --git a/scylla/src/transport/connection.rs b/scylla/src/transport/connection.rs index d39f36dd0d..7470fbf0ad 100644 --- a/scylla/src/transport/connection.rs +++ b/scylla/src/transport/connection.rs @@ -2235,14 +2235,14 @@ mod tests { .build() .await .unwrap(); - session.query(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks.clone()), &[]).await.unwrap(); + session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks.clone()), &[]).await.unwrap(); session.use_keyspace(ks.clone(), false).await.unwrap(); session - .query("DROP TABLE IF EXISTS connection_query_iter_tab", &[]) + .query_unpaged("DROP TABLE IF EXISTS connection_query_iter_tab", &[]) .await .unwrap(); session - .query( + .query_unpaged( "CREATE TABLE IF NOT EXISTS connection_query_iter_tab (p int primary key)", &[], ) @@ -2330,10 +2330,10 @@ mod tests { .build() .await .unwrap(); - session.query(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks.clone()), &[]).await.unwrap(); + session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks.clone()), &[]).await.unwrap(); session.use_keyspace(ks.clone(), false).await.unwrap(); session - .query( + .query_unpaged( "CREATE TABLE IF NOT EXISTS t (p int primary key, v blob)", &[], ) diff --git a/scylla/src/transport/cql_collections_test.rs b/scylla/src/transport/cql_collections_test.rs index 24f8ee51c4..d9fb521500 100644 --- a/scylla/src/transport/cql_collections_test.rs +++ b/scylla/src/transport/cql_collections_test.rs @@ -8,7 +8,7 @@ use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; async fn connect() -> Session { let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); - session.query(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); session.use_keyspace(ks, false).await.unwrap(); session @@ -16,7 +16,7 @@ async fn connect() -> Session { async fn create_table(session: &Session, table_name: &str, value_type: &str) { session - .query( + .query_unpaged( format!( "CREATE TABLE IF NOT EXISTS {} (p int PRIMARY KEY, val {})", table_name, value_type @@ -37,7 +37,7 @@ async fn insert_and_select( SelectT: FromCqlVal> + PartialEq + std::fmt::Debug, { session - .query( + .query_unpaged( format!("INSERT INTO {} (p, val) VALUES (0, ?)", table_name), (&to_insert,), ) @@ -45,7 +45,7 @@ async fn insert_and_select( .unwrap(); let selected_value: SelectT = session - .query(format!("SELECT val FROM {} WHERE p = 0", table_name), ()) + .query_unpaged(format!("SELECT val FROM {} WHERE p = 0", table_name), ()) .await .unwrap() .single_row_typed::<(SelectT,)>() diff --git a/scylla/src/transport/cql_types_test.rs b/scylla/src/transport/cql_types_test.rs index 778666f490..072e7b8fdf 100644 --- a/scylla/src/transport/cql_types_test.rs +++ b/scylla/src/transport/cql_types_test.rs @@ -36,16 +36,16 @@ async fn init_test_maybe_without_tablets( create_ks += " AND TABLETS = {'enabled': false}" } - session.query(create_ks, &[]).await.unwrap(); + session.query_unpaged(create_ks, &[]).await.unwrap(); session.use_keyspace(ks, false).await.unwrap(); session - .query(format!("DROP TABLE IF EXISTS {}", table_name), &[]) + .query_unpaged(format!("DROP TABLE IF EXISTS {}", table_name), &[]) .await .unwrap(); session - .query( + .query_unpaged( format!( "CREATE TABLE IF NOT EXISTS {} (id int PRIMARY KEY, val {})", table_name, type_name @@ -84,20 +84,20 @@ where let insert_string_encoded_value = format!("INSERT INTO {} (id, val) VALUES (0, {})", type_name, test); session - .query(insert_string_encoded_value, &[]) + .query_unpaged(insert_string_encoded_value, &[]) .await .unwrap(); let insert_bound_value = format!("INSERT INTO {} (id, val) VALUES (1, ?)", type_name); let value_to_bound = T::from_str(test).ok().unwrap(); session - .query(insert_bound_value, (value_to_bound,)) + .query_unpaged(insert_bound_value, (value_to_bound,)) .await .unwrap(); let select_values = format!("SELECT val from {}", type_name); let read_values: Vec = session - .query(select_values, &[]) + .query_unpaged(select_values, &[]) .await .unwrap() .rows_typed::<(T,)>() @@ -172,7 +172,7 @@ async fn test_cql_varint() { let ks = unique_keyspace_name(); session - .query( + .query_unpaged( format!( "CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = \ {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", @@ -185,7 +185,7 @@ async fn test_cql_varint() { session.use_keyspace(ks, false).await.unwrap(); session - .query( + .query_unpaged( format!( "CREATE TABLE IF NOT EXISTS {} (id int PRIMARY KEY, val varint)", table_name @@ -210,12 +210,12 @@ async fn test_cql_varint() { for test in tests { let cql_varint = CqlVarint::from_signed_bytes_be_slice(&test); session - .execute(&prepared_insert, (&cql_varint,)) + .execute_unpaged(&prepared_insert, (&cql_varint,)) .await .unwrap(); let read_values: Vec = session - .execute(&prepared_select, &[]) + .execute_unpaged(&prepared_select, &[]) .await .unwrap() .rows_typed::<(CqlVarint,)>() @@ -284,13 +284,13 @@ async fn test_counter() { let update_bound_value = format!("UPDATE {} SET val = val + ? WHERE id = ?", type_name); let value_to_bound = Counter(i64::from_str(test).unwrap()); session - .query(update_bound_value, (value_to_bound, i as i32)) + .query_unpaged(update_bound_value, (value_to_bound, i as i32)) .await .unwrap(); let select_values = format!("SELECT val FROM {} WHERE id = ?", type_name); let read_values: Vec = session - .query(select_values, (i as i32,)) + .query_unpaged(select_values, (i as i32,)) .await .unwrap() .rows_typed::<(Counter,)>() @@ -355,7 +355,7 @@ async fn test_naive_date_04() { for (date_text, date) in tests.iter() { session - .query( + .query_unpaged( format!( "INSERT INTO chrono_naive_date_tests (id, val) VALUES (0, '{}')", date_text @@ -366,7 +366,7 @@ async fn test_naive_date_04() { .unwrap(); let read_date: Option = session - .query("SELECT val from chrono_naive_date_tests", &[]) + .query_unpaged("SELECT val from chrono_naive_date_tests", &[]) .await .unwrap() .rows_typed::<(NaiveDate,)>() @@ -381,7 +381,7 @@ async fn test_naive_date_04() { // If date is representable by NaiveDate try inserting it and reading again if let Some(naive_date) = date { session - .query( + .query_unpaged( "INSERT INTO chrono_naive_date_tests (id, val) VALUES (0, ?)", (naive_date,), ) @@ -389,7 +389,7 @@ async fn test_naive_date_04() { .unwrap(); let (read_date,): (NaiveDate,) = session - .query("SELECT val from chrono_naive_date_tests", &[]) + .query_unpaged("SELECT val from chrono_naive_date_tests", &[]) .await .unwrap() .single_row_typed::<(NaiveDate,)>() @@ -417,7 +417,7 @@ async fn test_cql_date() { for (date_text, date) in &tests { session - .query( + .query_unpaged( format!( "INSERT INTO cql_date_tests (id, val) VALUES (0, '{}')", date_text @@ -428,7 +428,7 @@ async fn test_cql_date() { .unwrap(); let read_date: CqlDate = session - .query("SELECT val from cql_date_tests", &[]) + .query_unpaged("SELECT val from cql_date_tests", &[]) .await .unwrap() .rows @@ -443,7 +443,7 @@ async fn test_cql_date() { // 1 less/more than min/max values allowed by the database should cause error session - .query( + .query_unpaged( "INSERT INTO cql_date_tests (id, val) VALUES (0, '-5877641-06-22')", &[], ) @@ -451,7 +451,7 @@ async fn test_cql_date() { .unwrap_err(); session - .query( + .query_unpaged( "INSERT INTO cql_date_tests (id, val) VALUES (0, '5881580-07-12')", &[], ) @@ -504,7 +504,7 @@ async fn test_date_03() { for (date_text, date) in tests.iter() { session - .query( + .query_unpaged( format!( "INSERT INTO time_date_tests (id, val) VALUES (0, '{}')", date_text @@ -515,7 +515,7 @@ async fn test_date_03() { .unwrap(); let read_date = session - .query("SELECT val from time_date_tests", &[]) + .query_unpaged("SELECT val from time_date_tests", &[]) .await .unwrap() .first_row_typed::<(Date,)>() @@ -527,7 +527,7 @@ async fn test_date_03() { // If date is representable by time::Date try inserting it and reading again if let Some(date) = date { session - .query( + .query_unpaged( "INSERT INTO time_date_tests (id, val) VALUES (0, ?)", (date,), ) @@ -535,7 +535,7 @@ async fn test_date_03() { .unwrap(); let (read_date,) = session - .query("SELECT val from time_date_tests", &[]) + .query_unpaged("SELECT val from time_date_tests", &[]) .await .unwrap() .first_row_typed::<(Date,)>() @@ -567,7 +567,7 @@ async fn test_cql_time() { for (time_str, time_duration) in &tests { // Insert time as a string and verify that it matches session - .query( + .query_unpaged( format!( "INSERT INTO cql_time_tests (id, val) VALUES (0, '{}')", time_str @@ -578,7 +578,7 @@ async fn test_cql_time() { .unwrap(); let (read_time,) = session - .query("SELECT val from cql_time_tests", &[]) + .query_unpaged("SELECT val from cql_time_tests", &[]) .await .unwrap() .single_row_typed::<(CqlTime,)>() @@ -588,7 +588,7 @@ async fn test_cql_time() { // Insert time as a bound CqlTime value and verify that it matches session - .query( + .query_unpaged( "INSERT INTO cql_time_tests (id, val) VALUES (0, ?)", (*time_duration,), ) @@ -596,7 +596,7 @@ async fn test_cql_time() { .unwrap(); let (read_time,) = session - .query("SELECT val from cql_time_tests", &[]) + .query_unpaged("SELECT val from cql_time_tests", &[]) .await .unwrap() .single_row_typed::<(CqlTime,)>() @@ -619,7 +619,7 @@ async fn test_cql_time() { for time_str in &invalid_tests { session - .query( + .query_unpaged( format!( "INSERT INTO cql_time_tests (id, val) VALUES (0, '{}')", time_str @@ -663,7 +663,7 @@ async fn test_naive_time_04() { for (time_text, time) in tests.iter() { // Insert as string and read it again session - .query( + .query_unpaged( format!( "INSERT INTO chrono_time_tests (id, val) VALUES (0, '{}')", time_text @@ -674,7 +674,7 @@ async fn test_naive_time_04() { .unwrap(); let (read_time,) = session - .query("SELECT val from chrono_time_tests", &[]) + .query_unpaged("SELECT val from chrono_time_tests", &[]) .await .unwrap() .first_row_typed::<(NaiveTime,)>() @@ -684,7 +684,7 @@ async fn test_naive_time_04() { // Insert as type and read it again session - .query( + .query_unpaged( "INSERT INTO chrono_time_tests (id, val) VALUES (0, ?)", (time,), ) @@ -692,7 +692,7 @@ async fn test_naive_time_04() { .unwrap(); let (read_time,) = session - .query("SELECT val from chrono_time_tests", &[]) + .query_unpaged("SELECT val from chrono_time_tests", &[]) .await .unwrap() .first_row_typed::<(NaiveTime,)>() @@ -703,7 +703,7 @@ async fn test_naive_time_04() { // chrono can represent leap seconds, this should not panic let leap_second = NaiveTime::from_hms_nano_opt(23, 59, 59, 1_500_000_000); session - .query( + .query_unpaged( "INSERT INTO cql_time_tests (id, val) VALUES (0, ?)", (leap_second,), ) @@ -743,7 +743,7 @@ async fn test_time_03() { for (time_text, time) in tests.iter() { // Insert as string and read it again session - .query( + .query_unpaged( format!( "INSERT INTO time_time_tests (id, val) VALUES (0, '{}')", time_text @@ -754,7 +754,7 @@ async fn test_time_03() { .unwrap(); let (read_time,) = session - .query("SELECT val from time_time_tests", &[]) + .query_unpaged("SELECT val from time_time_tests", &[]) .await .unwrap() .first_row_typed::<(Time,)>() @@ -764,7 +764,7 @@ async fn test_time_03() { // Insert as type and read it again session - .query( + .query_unpaged( "INSERT INTO time_time_tests (id, val) VALUES (0, ?)", (time,), ) @@ -772,7 +772,7 @@ async fn test_time_03() { .unwrap(); let (read_time,) = session - .query("SELECT val from time_time_tests", &[]) + .query_unpaged("SELECT val from time_time_tests", &[]) .await .unwrap() .first_row_typed::<(Time,)>() @@ -814,7 +814,7 @@ async fn test_cql_timestamp() { for (timestamp_str, timestamp_duration) in &tests { // Insert timestamp as a string and verify that it matches session - .query( + .query_unpaged( format!( "INSERT INTO cql_timestamp_tests (id, val) VALUES (0, '{}')", timestamp_str @@ -825,7 +825,7 @@ async fn test_cql_timestamp() { .unwrap(); let (read_timestamp,) = session - .query("SELECT val from cql_timestamp_tests", &[]) + .query_unpaged("SELECT val from cql_timestamp_tests", &[]) .await .unwrap() .single_row_typed::<(CqlTimestamp,)>() @@ -835,7 +835,7 @@ async fn test_cql_timestamp() { // Insert timestamp as a bound CqlTimestamp value and verify that it matches session - .query( + .query_unpaged( "INSERT INTO cql_timestamp_tests (id, val) VALUES (0, ?)", (*timestamp_duration,), ) @@ -843,7 +843,7 @@ async fn test_cql_timestamp() { .unwrap(); let (read_timestamp,) = session - .query("SELECT val from cql_timestamp_tests", &[]) + .query_unpaged("SELECT val from cql_timestamp_tests", &[]) .await .unwrap() .single_row_typed::<(CqlTimestamp,)>() @@ -909,7 +909,7 @@ async fn test_date_time_04() { for (datetime_text, datetime) in tests.iter() { // Insert as string and read it again session - .query( + .query_unpaged( format!( "INSERT INTO chrono_datetime_tests (id, val) VALUES (0, '{}')", datetime_text @@ -920,7 +920,7 @@ async fn test_date_time_04() { .unwrap(); let (read_datetime,) = session - .query("SELECT val from chrono_datetime_tests", &[]) + .query_unpaged("SELECT val from chrono_datetime_tests", &[]) .await .unwrap() .first_row_typed::<(DateTime,)>() @@ -930,7 +930,7 @@ async fn test_date_time_04() { // Insert as type and read it again session - .query( + .query_unpaged( "INSERT INTO chrono_datetime_tests (id, val) VALUES (0, ?)", (datetime,), ) @@ -938,7 +938,7 @@ async fn test_date_time_04() { .unwrap(); let (read_datetime,) = session - .query("SELECT val from chrono_datetime_tests", &[]) + .query_unpaged("SELECT val from chrono_datetime_tests", &[]) .await .unwrap() .first_row_typed::<(DateTime,)>() @@ -958,7 +958,7 @@ async fn test_date_time_04() { ) .and_utc(); session - .query( + .query_unpaged( "INSERT INTO chrono_datetime_tests (id, val) VALUES (0, ?)", (nanosecond_precision_1st_half,), ) @@ -966,7 +966,7 @@ async fn test_date_time_04() { .unwrap(); let (read_datetime,) = session - .query("SELECT val from chrono_datetime_tests", &[]) + .query_unpaged("SELECT val from chrono_datetime_tests", &[]) .await .unwrap() .first_row_typed::<(DateTime,)>() @@ -984,7 +984,7 @@ async fn test_date_time_04() { ) .and_utc(); session - .query( + .query_unpaged( "INSERT INTO chrono_datetime_tests (id, val) VALUES (0, ?)", (nanosecond_precision_2nd_half,), ) @@ -992,7 +992,7 @@ async fn test_date_time_04() { .unwrap(); let (read_datetime,) = session - .query("SELECT val from chrono_datetime_tests", &[]) + .query_unpaged("SELECT val from chrono_datetime_tests", &[]) .await .unwrap() .first_row_typed::<(DateTime,)>() @@ -1006,7 +1006,7 @@ async fn test_date_time_04() { ) .and_utc(); session - .query( + .query_unpaged( "INSERT INTO cql_datetime_tests (id, val) VALUES (0, ?)", (leap_second,), ) @@ -1070,7 +1070,7 @@ async fn test_offset_date_time_03() { for (datetime_text, datetime) in tests.iter() { // Insert as string and read it again session - .query( + .query_unpaged( format!( "INSERT INTO time_datetime_tests (id, val) VALUES (0, '{}')", datetime_text @@ -1081,7 +1081,7 @@ async fn test_offset_date_time_03() { .unwrap(); let (read_datetime,) = session - .query("SELECT val from time_datetime_tests", &[]) + .query_unpaged("SELECT val from time_datetime_tests", &[]) .await .unwrap() .first_row_typed::<(OffsetDateTime,)>() @@ -1091,7 +1091,7 @@ async fn test_offset_date_time_03() { // Insert as type and read it again session - .query( + .query_unpaged( "INSERT INTO time_datetime_tests (id, val) VALUES (0, ?)", (datetime,), ) @@ -1099,7 +1099,7 @@ async fn test_offset_date_time_03() { .unwrap(); let (read_datetime,) = session - .query("SELECT val from time_datetime_tests", &[]) + .query_unpaged("SELECT val from time_datetime_tests", &[]) .await .unwrap() .first_row_typed::<(OffsetDateTime,)>() @@ -1119,7 +1119,7 @@ async fn test_offset_date_time_03() { ) .assume_utc(); session - .query( + .query_unpaged( "INSERT INTO time_datetime_tests (id, val) VALUES (0, ?)", (nanosecond_precision_1st_half,), ) @@ -1127,7 +1127,7 @@ async fn test_offset_date_time_03() { .unwrap(); let (read_datetime,) = session - .query("SELECT val from time_datetime_tests", &[]) + .query_unpaged("SELECT val from time_datetime_tests", &[]) .await .unwrap() .first_row_typed::<(OffsetDateTime,)>() @@ -1145,7 +1145,7 @@ async fn test_offset_date_time_03() { ) .assume_utc(); session - .query( + .query_unpaged( "INSERT INTO time_datetime_tests (id, val) VALUES (0, ?)", (nanosecond_precision_2nd_half,), ) @@ -1153,7 +1153,7 @@ async fn test_offset_date_time_03() { .unwrap(); let (read_datetime,) = session - .query("SELECT val from time_datetime_tests", &[]) + .query_unpaged("SELECT val from time_datetime_tests", &[]) .await .unwrap() .first_row_typed::<(OffsetDateTime,)>() @@ -1191,7 +1191,7 @@ async fn test_timeuuid() { for (timeuuid_str, timeuuid_bytes) in &tests { // Insert timeuuid as a string and verify that it matches session - .query( + .query_unpaged( format!( "INSERT INTO timeuuid_tests (id, val) VALUES (0, {})", timeuuid_str @@ -1202,7 +1202,7 @@ async fn test_timeuuid() { .unwrap(); let (read_timeuuid,): (CqlTimeuuid,) = session - .query("SELECT val from timeuuid_tests", &[]) + .query_unpaged("SELECT val from timeuuid_tests", &[]) .await .unwrap() .single_row_typed::<(CqlTimeuuid,)>() @@ -1213,7 +1213,7 @@ async fn test_timeuuid() { // Insert timeuuid as a bound value and verify that it matches let test_uuid: CqlTimeuuid = CqlTimeuuid::from_slice(timeuuid_bytes.as_ref()).unwrap(); session - .query( + .query_unpaged( "INSERT INTO timeuuid_tests (id, val) VALUES (0, ?)", (test_uuid,), ) @@ -1221,7 +1221,7 @@ async fn test_timeuuid() { .unwrap(); let (read_timeuuid,): (CqlTimeuuid,) = session - .query("SELECT val from timeuuid_tests", &[]) + .query_unpaged("SELECT val from timeuuid_tests", &[]) .await .unwrap() .single_row_typed::<(CqlTimeuuid,)>() @@ -1238,7 +1238,7 @@ async fn test_timeuuid_ordering() { let ks = unique_keyspace_name(); session - .query( + .query_unpaged( format!( "CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = \ {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", @@ -1251,7 +1251,7 @@ async fn test_timeuuid_ordering() { session.use_keyspace(ks, false).await.unwrap(); session - .query( + .query_unpaged( "CREATE TABLE tab (p int, t timeuuid, PRIMARY KEY (p, t))", (), ) @@ -1283,11 +1283,14 @@ async fn test_timeuuid_ordering() { .await .unwrap(); for timeuuid_val in &perms[0] { - session.execute(&prepared, (timeuuid_val,)).await.unwrap(); + session + .execute_unpaged(&prepared, (timeuuid_val,)) + .await + .unwrap(); } let scylla_order_timeuuids: Vec = session - .query("SELECT t FROM tab WHERE p = 0", ()) + .query_unpaged("SELECT t FROM tab WHERE p = 0", ()) .await .unwrap() .rows_typed::<(CqlTimeuuid,)>() @@ -1355,7 +1358,7 @@ async fn test_inet() { for (inet_str, inet) in &tests { // Insert inet as a string and verify that it matches session - .query( + .query_unpaged( format!( "INSERT INTO inet_tests (id, val) VALUES (0, '{}')", inet_str @@ -1366,7 +1369,7 @@ async fn test_inet() { .unwrap(); let (read_inet,): (IpAddr,) = session - .query("SELECT val from inet_tests WHERE id = 0", &[]) + .query_unpaged("SELECT val from inet_tests WHERE id = 0", &[]) .await .unwrap() .single_row_typed::<(IpAddr,)>() @@ -1376,12 +1379,12 @@ async fn test_inet() { // Insert inet as a bound value and verify that it matches session - .query("INSERT INTO inet_tests (id, val) VALUES (0, ?)", (inet,)) + .query_unpaged("INSERT INTO inet_tests (id, val) VALUES (0, ?)", (inet,)) .await .unwrap(); let (read_inet,): (IpAddr,) = session - .query("SELECT val from inet_tests WHERE id = 0", &[]) + .query_unpaged("SELECT val from inet_tests WHERE id = 0", &[]) .await .unwrap() .single_row_typed::<(IpAddr,)>() @@ -1424,7 +1427,7 @@ async fn test_blob() { for (blob_str, blob) in &tests { // Insert blob as a string and verify that it matches session - .query( + .query_unpaged( format!("INSERT INTO blob_tests (id, val) VALUES (0, {})", blob_str), &[], ) @@ -1432,7 +1435,7 @@ async fn test_blob() { .unwrap(); let (read_blob,): (Vec,) = session - .query("SELECT val from blob_tests WHERE id = 0", &[]) + .query_unpaged("SELECT val from blob_tests WHERE id = 0", &[]) .await .unwrap() .single_row_typed::<(Vec,)>() @@ -1442,12 +1445,12 @@ async fn test_blob() { // Insert blob as a bound value and verify that it matches session - .query("INSERT INTO blob_tests (id, val) VALUES (0, ?)", (blob,)) + .query_unpaged("INSERT INTO blob_tests (id, val) VALUES (0, ?)", (blob,)) .await .unwrap(); let (read_blob,): (Vec,) = session - .query("SELECT val from blob_tests WHERE id = 0", &[]) + .query_unpaged("SELECT val from blob_tests WHERE id = 0", &[]) .await .unwrap() .single_row_typed::<(Vec,)>() @@ -1467,7 +1470,7 @@ async fn test_udt_after_schema_update() { let ks = unique_keyspace_name(); session - .query( + .query_unpaged( format!( "CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = \ {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", @@ -1480,17 +1483,17 @@ async fn test_udt_after_schema_update() { session.use_keyspace(ks, false).await.unwrap(); session - .query(format!("DROP TABLE IF EXISTS {}", table_name), &[]) + .query_unpaged(format!("DROP TABLE IF EXISTS {}", table_name), &[]) .await .unwrap(); session - .query(format!("DROP TYPE IF EXISTS {}", type_name), &[]) + .query_unpaged(format!("DROP TYPE IF EXISTS {}", type_name), &[]) .await .unwrap(); session - .query( + .query_unpaged( format!( "CREATE TYPE IF NOT EXISTS {} (first int, second boolean)", type_name @@ -1501,7 +1504,7 @@ async fn test_udt_after_schema_update() { .unwrap(); session - .query( + .query_unpaged( format!( "CREATE TABLE IF NOT EXISTS {} (id int PRIMARY KEY, val {})", table_name, type_name @@ -1524,7 +1527,7 @@ async fn test_udt_after_schema_update() { }; session - .query( + .query_unpaged( format!( "INSERT INTO {}(id,val) VALUES (0, {})", table_name, "{first: 123, second: true}" @@ -1535,7 +1538,7 @@ async fn test_udt_after_schema_update() { .unwrap(); let (read_udt,): (UdtV1,) = session - .query(format!("SELECT val from {} WHERE id = 0", table_name), &[]) + .query_unpaged(format!("SELECT val from {} WHERE id = 0", table_name), &[]) .await .unwrap() .single_row_typed::<(UdtV1,)>() @@ -1544,7 +1547,7 @@ async fn test_udt_after_schema_update() { assert_eq!(read_udt, v1); session - .query( + .query_unpaged( format!("INSERT INTO {}(id,val) VALUES (0, ?)", table_name), &(&v1,), ) @@ -1552,7 +1555,7 @@ async fn test_udt_after_schema_update() { .unwrap(); let (read_udt,): (UdtV1,) = session - .query(format!("SELECT val from {} WHERE id = 0", table_name), &[]) + .query_unpaged(format!("SELECT val from {} WHERE id = 0", table_name), &[]) .await .unwrap() .single_row_typed::<(UdtV1,)>() @@ -1561,7 +1564,7 @@ async fn test_udt_after_schema_update() { assert_eq!(read_udt, v1); session - .query(format!("ALTER TYPE {} ADD third text;", type_name), &[]) + .query_unpaged(format!("ALTER TYPE {} ADD third text;", type_name), &[]) .await .unwrap(); @@ -1573,7 +1576,7 @@ async fn test_udt_after_schema_update() { } let (read_udt,): (UdtV2,) = session - .query(format!("SELECT val from {} WHERE id = 0", table_name), &[]) + .query_unpaged(format!("SELECT val from {} WHERE id = 0", table_name), &[]) .await .unwrap() .single_row_typed::<(UdtV2,)>() @@ -1595,7 +1598,7 @@ async fn test_empty() { let session: Session = init_test("empty_tests", "int").await; session - .query( + .query_unpaged( "INSERT INTO empty_tests (id, val) VALUES (0, blobasint(0x))", (), ) @@ -1603,7 +1606,7 @@ async fn test_empty() { .unwrap(); let (empty,) = session - .query("SELECT val FROM empty_tests WHERE id = 0", ()) + .query_unpaged("SELECT val FROM empty_tests WHERE id = 0", ()) .await .unwrap() .first_row_typed::<(CqlValue,)>() @@ -1612,7 +1615,7 @@ async fn test_empty() { assert_eq!(empty, CqlValue::Empty); session - .query( + .query_unpaged( "INSERT INTO empty_tests (id, val) VALUES (1, ?)", (CqlValue::Empty,), ) @@ -1620,7 +1623,7 @@ async fn test_empty() { .unwrap(); let (empty,) = session - .query("SELECT val FROM empty_tests WHERE id = 1", ()) + .query_unpaged("SELECT val FROM empty_tests WHERE id = 1", ()) .await .unwrap() .first_row_typed::<(CqlValue,)>() @@ -1639,7 +1642,7 @@ async fn test_udt_with_missing_field() { let ks = unique_keyspace_name(); session - .query( + .query_unpaged( format!( "CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = \ {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", @@ -1652,17 +1655,17 @@ async fn test_udt_with_missing_field() { session.use_keyspace(ks, false).await.unwrap(); session - .query(format!("DROP TABLE IF EXISTS {}", table_name), &[]) + .query_unpaged(format!("DROP TABLE IF EXISTS {}", table_name), &[]) .await .unwrap(); session - .query(format!("DROP TYPE IF EXISTS {}", type_name), &[]) + .query_unpaged(format!("DROP TYPE IF EXISTS {}", type_name), &[]) .await .unwrap(); session - .query( + .query_unpaged( format!( "CREATE TYPE IF NOT EXISTS {} (first int, second boolean, third float, fourth blob)", type_name @@ -1673,7 +1676,7 @@ async fn test_udt_with_missing_field() { .unwrap(); session - .query( + .query_unpaged( format!( "CREATE TABLE IF NOT EXISTS {} (id int PRIMARY KEY, val {})", table_name, type_name @@ -1696,14 +1699,14 @@ async fn test_udt_with_missing_field() { TR: FromCqlVal + PartialEq + Debug, { session - .query( + .query_unpaged( format!("INSERT INTO {}(id,val) VALUES (?,?)", table_name), &(id, &element), ) .await .unwrap(); let result = session - .query( + .query_unpaged( format!("SELECT val from {} WHERE id = ?", table_name), &(id,), ) diff --git a/scylla/src/transport/cql_value_test.rs b/scylla/src/transport/cql_value_test.rs index 082667c858..781ab919b5 100644 --- a/scylla/src/transport/cql_value_test.rs +++ b/scylla/src/transport/cql_value_test.rs @@ -10,7 +10,7 @@ async fn test_cqlvalue_udt() { let session: Session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); session - .query( + .query_unpaged( format!( "CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = \ {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", @@ -23,14 +23,14 @@ async fn test_cqlvalue_udt() { session.use_keyspace(&ks, false).await.unwrap(); session - .query( + .query_unpaged( "CREATE TYPE IF NOT EXISTS cqlvalue_udt_type (int_val int, text_val text)", &[], ) .await .unwrap(); session - .query( + .query_unpaged( "CREATE TABLE IF NOT EXISTS cqlvalue_udt_test (k int, my cqlvalue_udt_type, primary key (k))", &[], ) @@ -47,7 +47,7 @@ async fn test_cqlvalue_udt() { }; session - .query( + .query_unpaged( "INSERT INTO cqlvalue_udt_test (k, my) VALUES (5, ?)", (&udt_cql_value,), ) @@ -55,7 +55,7 @@ async fn test_cqlvalue_udt() { .unwrap(); let rows = session - .query("SELECT my FROM cqlvalue_udt_test", &[]) + .query_unpaged("SELECT my FROM cqlvalue_udt_test", &[]) .await .unwrap() .rows @@ -76,7 +76,7 @@ async fn test_cqlvalue_duration() { let ks = unique_keyspace_name(); session - .query( + .query_unpaged( format!( "CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = \ {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", @@ -103,11 +103,11 @@ async fn test_cqlvalue_duration() { ]; for query in fixture_queries { - session.query(query.0, query.1).await.unwrap(); + session.query_unpaged(query.0, query.1).await.unwrap(); } let rows = session - .query( + .query_unpaged( "SELECT v FROM cqlvalue_duration_test WHERE pk = ?", (CqlValue::Int(0),), ) diff --git a/scylla/src/transport/large_batch_statements_test.rs b/scylla/src/transport/large_batch_statements_test.rs index 1cd92295e8..5c59adb723 100644 --- a/scylla/src/transport/large_batch_statements_test.rs +++ b/scylla/src/transport/large_batch_statements_test.rs @@ -34,13 +34,13 @@ async fn test_large_batch_statements() { async fn create_test_session(session: Session, ks: &String) -> Session { session - .query( + .query_unpaged( format!("CREATE KEYSPACE {} WITH REPLICATION = {{ 'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1 }}",ks), &[], ) .await.unwrap(); session - .query( + .query_unpaged( format!( "CREATE TABLE {}.pairs (dummy int, k blob, v blob, primary key (dummy, k))", ks diff --git a/scylla/src/transport/load_balancing/default.rs b/scylla/src/transport/load_balancing/default.rs index e2d5ad54da..962cbd2440 100644 --- a/scylla/src/transport/load_balancing/default.rs +++ b/scylla/src/transport/load_balancing/default.rs @@ -3857,7 +3857,7 @@ mod latency_awareness { .await .unwrap(); - session.query("whatever", ()).await.unwrap_err(); + session.query_unpaged("whatever", ()).await.unwrap_err(); } #[tokio::test(start_paused = true)] diff --git a/scylla/src/transport/session.rs b/scylla/src/transport/session.rs index d25f12a31f..7ed0bc301c 100644 --- a/scylla/src/transport/session.rs +++ b/scylla/src/transport/session.rs @@ -618,7 +618,7 @@ impl Session { /// # Ok(()) /// # } /// ``` - pub async fn query( + pub async fn query_unpaged( &self, query: impl Into, values: impl SerializeRow, @@ -1001,7 +1001,7 @@ impl Session { /// # Ok(()) /// # } /// ``` - pub async fn execute( + pub async fn execute_unpaged( &self, prepared: &PreparedStatement, values: impl SerializeRow, @@ -1509,8 +1509,8 @@ impl Session { traces_events_query.set_page_size(TRACING_QUERY_PAGE_SIZE); let (traces_session_res, traces_events_res) = tokio::try_join!( - self.query(traces_session_query, (tracing_id,)), - self.query(traces_events_query, (tracing_id,)) + self.query_unpaged(traces_session_query, (tracing_id,)), + self.query_unpaged(traces_events_query, (tracing_id,)) )?; // Get tracing info diff --git a/scylla/src/transport/session_test.rs b/scylla/src/transport/session_test.rs index 1b8a8909cc..2d8d1d880e 100644 --- a/scylla/src/transport/session_test.rs +++ b/scylla/src/transport/session_test.rs @@ -69,9 +69,9 @@ async fn test_unprepared_statement() { let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); - session.query(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); session - .query( + .query_unpaged( format!( "CREATE TABLE IF NOT EXISTS {}.t (a int, b int, c text, primary key (a, b))", ks @@ -82,21 +82,21 @@ async fn test_unprepared_statement() { .unwrap(); session - .query( + .query_unpaged( format!("INSERT INTO {}.t (a, b, c) VALUES (1, 2, 'abc')", ks), &[], ) .await .unwrap(); session - .query( + .query_unpaged( format!("INSERT INTO {}.t (a, b, c) VALUES (7, 11, '')", ks), &[], ) .await .unwrap(); session - .query( + .query_unpaged( format!("INSERT INTO {}.t (a, b, c) VALUES (1, 4, 'hello')", ks), &[], ) @@ -104,7 +104,7 @@ async fn test_unprepared_statement() { .unwrap(); let query_result = session - .query(format!("SELECT a, b, c FROM {}.t", ks), &[]) + .query_unpaged(format!("SELECT a, b, c FROM {}.t", ks), &[]) .await .unwrap(); @@ -171,9 +171,9 @@ async fn test_prepared_statement() { let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); - session.query(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); session - .query( + .query_unpaged( format!( "CREATE TABLE IF NOT EXISTS {}.t2 (a int, b int, c text, primary key (a, b))", ks @@ -183,7 +183,7 @@ async fn test_prepared_statement() { .await .unwrap(); session - .query(format!("CREATE TABLE IF NOT EXISTS {}.complex_pk (a int, b int, c text, d int, e int, primary key ((a,b,c),d))", ks), &[]) + .query_unpaged(format!("CREATE TABLE IF NOT EXISTS {}.complex_pk (a int, b int, c text, d int, e int, primary key ((a,b,c),d))", ks), &[]) .await .unwrap(); @@ -220,16 +220,19 @@ async fn test_prepared_statement() { .serialize_values(&values) .unwrap(); - session.execute(&prepared_statement, &values).await.unwrap(); session - .execute(&prepared_complex_pk_statement, &values) + .execute_unpaged(&prepared_statement, &values) + .await + .unwrap(); + session + .execute_unpaged(&prepared_complex_pk_statement, &values) .await .unwrap(); // Verify that token calculation is compatible with Scylla { let (value,): (i64,) = session - .query(format!("SELECT token(a) FROM {}.t2", ks), &[]) + .query_unpaged(format!("SELECT token(a) FROM {}.t2", ks), &[]) .await .unwrap() .single_row_typed() @@ -248,7 +251,7 @@ async fn test_prepared_statement() { } { let (value,): (i64,) = session - .query(format!("SELECT token(a,b,c) FROM {}.complex_pk", ks), &[]) + .query_unpaged(format!("SELECT token(a,b,c) FROM {}.complex_pk", ks), &[]) .await .unwrap() .single_row_typed() @@ -270,7 +273,7 @@ async fn test_prepared_statement() { // Verify that correct data was inserted { let rs = session - .query(format!("SELECT a,b,c FROM {}.t2", ks), &[]) + .query_unpaged(format!("SELECT a,b,c FROM {}.t2", ks), &[]) .await .unwrap() .rows @@ -305,7 +308,7 @@ async fn test_prepared_statement() { } { let (a, b, c, d, e): (i32, i32, String, i32, Option) = session - .query(format!("SELECT a,b,c,d,e FROM {}.complex_pk", ks), &[]) + .query_unpaged(format!("SELECT a,b,c,d,e FROM {}.complex_pk", ks), &[]) .await .unwrap() .single_row_typed() @@ -335,7 +338,7 @@ async fn test_prepared_statement() { e: 5, }; session - .query( + .query_unpaged( format!( "INSERT INTO {}.complex_pk (a,b,c,d,e) VALUES (?,?,?,?,?)", ks @@ -345,7 +348,7 @@ async fn test_prepared_statement() { .await .unwrap(); let output: ComplexPk = session - .query( + .query_unpaged( format!( "SELECT * FROM {}.complex_pk WHERE a = 9 and b = 8 and c = 'seven'", ks @@ -376,9 +379,9 @@ async fn test_counter_batch() { create_ks += " AND TABLETS = {'enabled': false}" } - session.query(create_ks, &[]).await.unwrap(); + session.query_unpaged(create_ks, &[]).await.unwrap(); session - .query( + .query_unpaged( format!( "CREATE TABLE IF NOT EXISTS {}.t_batch (key int PRIMARY KEY, value counter)", ks @@ -424,9 +427,9 @@ async fn test_batch() { let session = Arc::new(create_new_session_builder().build().await.unwrap()); let ks = unique_keyspace_name(); - session.query(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); session - .query( + .query_unpaged( format!( "CREATE TABLE IF NOT EXISTS {}.t_batch (a int, b int, c text, primary key (a, b))", ks @@ -472,7 +475,7 @@ async fn test_batch() { .unwrap(); let mut results: Vec<(i32, i32, String)> = session - .query(format!("SELECT a, b, c FROM {}.t_batch", ks), &[]) + .query_unpaged(format!("SELECT a, b, c FROM {}.t_batch", ks), &[]) .await .unwrap() .rows_typed() @@ -497,7 +500,7 @@ async fn test_batch() { // This statement flushes the prepared statement cache session - .query( + .query_unpaged( format!("ALTER TABLE {}.t_batch WITH gc_grace_seconds = 42", ks), &[], ) @@ -506,7 +509,7 @@ async fn test_batch() { session.batch(&batch, values).await.unwrap(); let results: Vec<(i32, i32, String)> = session - .query( + .query_unpaged( format!("SELECT a, b, c FROM {}.t_batch WHERE a = 4", ks), &[], ) @@ -526,9 +529,9 @@ async fn test_token_calculation() { let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); - session.query(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); session - .query( + .query_unpaged( format!("CREATE TABLE IF NOT EXISTS {}.t3 (a text primary key)", ks), &[], ) @@ -553,10 +556,13 @@ async fn test_token_calculation() { } let values = (&s,); let serialized_values = prepared_statement.serialize_values(&values).unwrap(); - session.execute(&prepared_statement, &values).await.unwrap(); + session + .execute_unpaged(&prepared_statement, &values) + .await + .unwrap(); let (value,): (i64,) = session - .query( + .query_unpaged( format!("SELECT token(a) FROM {}.t3 WHERE a = ?", ks), &values, ) @@ -592,9 +598,9 @@ async fn test_token_awareness() { create_ks += " AND TABLETS = {'enabled': false}" } - session.query(create_ks, &[]).await.unwrap(); + session.query_unpaged(create_ks, &[]).await.unwrap(); session - .query( + .query_unpaged( format!("CREATE TABLE IF NOT EXISTS {}.t (a text primary key)", ks), &[], ) @@ -613,7 +619,10 @@ async fn test_token_awareness() { let values = (&key,); // Execute a query and observe tracing info - let res = session.execute(&prepared_statement, values).await.unwrap(); + let res = session + .execute_unpaged(&prepared_statement, values) + .await + .unwrap(); let tracing_info = session .get_tracing_info(res.tracing_id.as_ref().unwrap()) .await @@ -641,10 +650,10 @@ async fn test_use_keyspace() { let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); - session.query(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); session - .query( + .query_unpaged( format!("CREATE TABLE IF NOT EXISTS {}.tab (a text primary key)", ks), &[], ) @@ -652,19 +661,19 @@ async fn test_use_keyspace() { .unwrap(); session - .query(format!("INSERT INTO {}.tab (a) VALUES ('test1')", ks), &[]) + .query_unpaged(format!("INSERT INTO {}.tab (a) VALUES ('test1')", ks), &[]) .await .unwrap(); session.use_keyspace(ks.clone(), false).await.unwrap(); session - .query("INSERT INTO tab (a) VALUES ('test2')", &[]) + .query_unpaged("INSERT INTO tab (a) VALUES ('test2')", &[]) .await .unwrap(); let mut rows: Vec = session - .query("SELECT * FROM tab", &[]) + .query_unpaged("SELECT * FROM tab", &[]) .await .unwrap() .rows_typed::<(String,)>() @@ -713,7 +722,7 @@ async fn test_use_keyspace() { .unwrap(); let mut rows2: Vec = session2 - .query("SELECT * FROM tab", &[]) + .query_unpaged("SELECT * FROM tab", &[]) .await .unwrap() .rows_typed::<(String,)>() @@ -733,11 +742,11 @@ async fn test_use_keyspace_case_sensitivity() { let ks_lower = unique_keyspace_name().to_lowercase(); let ks_upper = ks_lower.to_uppercase(); - session.query(format!("CREATE KEYSPACE IF NOT EXISTS \"{}\" WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks_lower), &[]).await.unwrap(); - session.query(format!("CREATE KEYSPACE IF NOT EXISTS \"{}\" WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks_upper), &[]).await.unwrap(); + session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS \"{}\" WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks_lower), &[]).await.unwrap(); + session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS \"{}\" WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks_upper), &[]).await.unwrap(); session - .query( + .query_unpaged( format!("CREATE TABLE {}.tab (a text primary key)", ks_lower), &[], ) @@ -745,7 +754,7 @@ async fn test_use_keyspace_case_sensitivity() { .unwrap(); session - .query( + .query_unpaged( format!("CREATE TABLE \"{}\".tab (a text primary key)", ks_upper), &[], ) @@ -753,7 +762,7 @@ async fn test_use_keyspace_case_sensitivity() { .unwrap(); session - .query( + .query_unpaged( format!("INSERT INTO {}.tab (a) VALUES ('lowercase')", ks_lower), &[], ) @@ -761,7 +770,7 @@ async fn test_use_keyspace_case_sensitivity() { .unwrap(); session - .query( + .query_unpaged( format!("INSERT INTO \"{}\".tab (a) VALUES ('uppercase')", ks_upper), &[], ) @@ -773,7 +782,7 @@ async fn test_use_keyspace_case_sensitivity() { session.use_keyspace(ks_upper.clone(), false).await.unwrap(); let rows: Vec = session - .query("SELECT * from tab", &[]) + .query_unpaged("SELECT * from tab", &[]) .await .unwrap() .rows_typed::<(String,)>() @@ -788,7 +797,7 @@ async fn test_use_keyspace_case_sensitivity() { session.use_keyspace(ks_upper, true).await.unwrap(); let rows: Vec = session - .query("SELECT * from tab", &[]) + .query_unpaged("SELECT * from tab", &[]) .await .unwrap() .rows_typed::<(String,)>() @@ -805,10 +814,10 @@ async fn test_raw_use_keyspace() { let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); - session.query(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); session - .query( + .query_unpaged( format!("CREATE TABLE IF NOT EXISTS {}.tab (a text primary key)", ks), &[], ) @@ -816,7 +825,7 @@ async fn test_raw_use_keyspace() { .unwrap(); session - .query( + .query_unpaged( format!("INSERT INTO {}.tab (a) VALUES ('raw_test')", ks), &[], ) @@ -824,12 +833,12 @@ async fn test_raw_use_keyspace() { .unwrap(); session - .query(format!("use \"{}\" ;", ks), &[]) + .query_unpaged(format!("use \"{}\" ;", ks), &[]) .await .unwrap(); let rows: Vec = session - .query("SELECT * FROM tab", &[]) + .query_unpaged("SELECT * FROM tab", &[]) .await .unwrap() .rows_typed::<(String,)>() @@ -841,12 +850,12 @@ async fn test_raw_use_keyspace() { // Check if case sensitivity is correctly detected assert!(session - .query(format!("use \"{}\" ;", ks.to_uppercase()), &[]) + .query_unpaged(format!("use \"{}\" ;", ks.to_uppercase()), &[]) .await .is_err()); assert!(session - .query(format!("use {} ;", ks.to_uppercase()), &[]) + .query_unpaged(format!("use {} ;", ks.to_uppercase()), &[]) .await .is_ok()); } @@ -861,7 +870,10 @@ async fn test_fetch_system_keyspace() { .await .unwrap(); - session.execute(&prepared_statement, &[]).await.unwrap(); + session + .execute_unpaged(&prepared_statement, &[]) + .await + .unwrap(); } // Test that some Database Errors are parsed correctly @@ -873,14 +885,14 @@ async fn test_db_errors() { // SyntaxError on bad query assert!(matches!( - session.query("gibberish", &[]).await, + session.query_unpaged("gibberish", &[]).await, Err(QueryError::DbError(DbError::SyntaxError, _)) )); // AlreadyExists when creating a keyspace for the second time - session.query(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); - let create_keyspace_res = session.query(format!("CREATE KEYSPACE {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await; + let create_keyspace_res = session.query_unpaged(format!("CREATE KEYSPACE {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await; let keyspace_exists_error: DbError = match create_keyspace_res { Err(QueryError::DbError(e, _)) => e, _ => panic!("Second CREATE KEYSPACE didn't return an error!"), @@ -896,7 +908,7 @@ async fn test_db_errors() { // AlreadyExists when creating a table for the second time session - .query( + .query_unpaged( format!("CREATE TABLE IF NOT EXISTS {}.tab (a text primary key)", ks), &[], ) @@ -904,7 +916,7 @@ async fn test_db_errors() { .unwrap(); let create_table_res = session - .query(format!("CREATE TABLE {}.tab (a text primary key)", ks), &[]) + .query_unpaged(format!("CREATE TABLE {}.tab (a text primary key)", ks), &[]) .await; let create_tab_error: DbError = match create_table_res { Err(QueryError::DbError(e, _)) => e, @@ -926,10 +938,10 @@ async fn test_tracing() { let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); - session.query(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); session - .query( + .query_unpaged( format!("CREATE TABLE IF NOT EXISTS {}.tab (a text primary key)", ks), &[], ) @@ -948,7 +960,8 @@ async fn test_tracing() { async fn test_tracing_query(session: &Session, ks: String) { // A query without tracing enabled has no tracing uuid in result let untraced_query: Query = Query::new(format!("SELECT * FROM {}.tab", ks)); - let untraced_query_result: QueryResult = session.query(untraced_query, &[]).await.unwrap(); + let untraced_query_result: QueryResult = + session.query_unpaged(untraced_query, &[]).await.unwrap(); assert!(untraced_query_result.tracing_id.is_none()); @@ -956,7 +969,7 @@ async fn test_tracing_query(session: &Session, ks: String) { let mut traced_query: Query = Query::new(format!("SELECT * FROM {}.tab", ks)); traced_query.config.tracing = true; - let traced_query_result: QueryResult = session.query(traced_query, &[]).await.unwrap(); + let traced_query_result: QueryResult = session.query_unpaged(traced_query, &[]).await.unwrap(); assert!(traced_query_result.tracing_id.is_some()); // Querying this uuid from tracing table gives some results @@ -970,8 +983,10 @@ async fn test_tracing_execute(session: &Session, ks: String) { .await .unwrap(); - let untraced_prepared_result: QueryResult = - session.execute(&untraced_prepared, &[]).await.unwrap(); + let untraced_prepared_result: QueryResult = session + .execute_unpaged(&untraced_prepared, &[]) + .await + .unwrap(); assert!(untraced_prepared_result.tracing_id.is_none()); @@ -983,7 +998,10 @@ async fn test_tracing_execute(session: &Session, ks: String) { traced_prepared.config.tracing = true; - let traced_prepared_result: QueryResult = session.execute(&traced_prepared, &[]).await.unwrap(); + let traced_prepared_result: QueryResult = session + .execute_unpaged(&traced_prepared, &[]) + .await + .unwrap(); assert!(traced_prepared_result.tracing_id.is_some()); // Querying this uuid from tracing table gives some results @@ -1017,7 +1035,7 @@ async fn test_get_tracing_info(session: &Session, ks: String) { let mut traced_query: Query = Query::new(format!("SELECT * FROM {}.tab", ks)); traced_query.config.tracing = true; - let traced_query_result: QueryResult = session.query(traced_query, &[]).await.unwrap(); + let traced_query_result: QueryResult = session.query_unpaged(traced_query, &[]).await.unwrap(); let tracing_id: Uuid = traced_query_result.tracing_id.unwrap(); // Getting tracing info from session using this uuid works @@ -1134,7 +1152,7 @@ async fn assert_in_tracing_table(session: &Session, tracing_uuid: Uuid) { // we stick to a not-so-much-terribly-long interval here. for _ in 0..200 { let rows_num = session - .query(traces_query.clone(), (tracing_uuid,)) + .query_unpaged(traces_query.clone(), (tracing_uuid,)) .await .unwrap() .rows_num() @@ -1173,9 +1191,9 @@ async fn test_timestamp() { let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); - session.query(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); session - .query( + .query_unpaged( format!( "CREATE TABLE IF NOT EXISTS {}.t_timestamp (a text, b text, primary key (a))", ks @@ -1195,13 +1213,13 @@ async fn test_timestamp() { regular_query.set_timestamp(Some(420)); session - .query(regular_query.clone(), ("regular query", "higher timestamp")) + .query_unpaged(regular_query.clone(), ("regular query", "higher timestamp")) .await .unwrap(); regular_query.set_timestamp(Some(42)); session - .query(regular_query.clone(), ("regular query", "lower timestamp")) + .query_unpaged(regular_query.clone(), ("regular query", "lower timestamp")) .await .unwrap(); @@ -1211,13 +1229,13 @@ async fn test_timestamp() { prepared_statement.set_timestamp(Some(420)); session - .execute(&prepared_statement, ("prepared query", "higher timestamp")) + .execute_unpaged(&prepared_statement, ("prepared query", "higher timestamp")) .await .unwrap(); prepared_statement.set_timestamp(Some(42)); session - .execute(&prepared_statement, ("prepared query", "lower timestamp")) + .execute_unpaged(&prepared_statement, ("prepared query", "lower timestamp")) .await .unwrap(); @@ -1252,7 +1270,7 @@ async fn test_timestamp() { .unwrap(); let mut results = session - .query( + .query_unpaged( format!("SELECT a, b, WRITETIME(b) FROM {}.t_timestamp", ks), &[], ) @@ -1293,7 +1311,7 @@ async fn test_request_timeout() { let mut query: Query = Query::new("SELECT * FROM system_schema.tables"); query.set_request_timeout(Some(Duration::from_millis(1))); - match session.query(query, &[]).await { + match session.query_unpaged(query, &[]).await { Ok(_) => panic!("the query should have failed due to a client-side timeout"), Err(e) => assert_matches!(e, QueryError::RequestTimeout(_)), } @@ -1304,7 +1322,7 @@ async fn test_request_timeout() { .unwrap(); prepared.set_request_timeout(Some(Duration::from_millis(1))); - match session.execute(&prepared, &[]).await { + match session.execute_unpaged(&prepared, &[]).await { Ok(_) => panic!("the prepared query should have failed due to a client-side timeout"), Err(e) => assert_matches!(e, QueryError::RequestTimeout(_)), }; @@ -1318,14 +1336,14 @@ async fn test_request_timeout() { let mut query = Query::new("SELECT * FROM system_schema.tables"); - match timeouting_session.query(query.clone(), &[]).await { + match timeouting_session.query_unpaged(query.clone(), &[]).await { Ok(_) => panic!("the query should have failed due to a client-side timeout"), Err(e) => assert_matches!(e, QueryError::RequestTimeout(_)), }; query.set_request_timeout(Some(Duration::from_secs(10000))); - timeouting_session.query(query, &[]).await.expect( + timeouting_session.query_unpaged(query, &[]).await.expect( "the query should have not failed, because no client-side timeout was specified", ); @@ -1334,14 +1352,14 @@ async fn test_request_timeout() { .await .unwrap(); - match timeouting_session.execute(&prepared, &[]).await { + match timeouting_session.execute_unpaged(&prepared, &[]).await { Ok(_) => panic!("the prepared query should have failed due to a client-side timeout"), Err(e) => assert_matches!(e, QueryError::RequestTimeout(_)), }; prepared.set_request_timeout(Some(Duration::from_secs(10000))); - timeouting_session.execute(&prepared, &[]).await.expect("the prepared query should have not failed, because no client-side timeout was specified"); + timeouting_session.execute_unpaged(&prepared, &[]).await.expect("the prepared query should have not failed, because no client-side timeout was specified"); } } @@ -1439,14 +1457,17 @@ async fn test_schema_types_in_metadata() { let ks = unique_keyspace_name(); session - .query(format!("CREATE KEYSPACE {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]) + .query_unpaged(format!("CREATE KEYSPACE {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]) .await .unwrap(); - session.query(format!("USE {}", ks), &[]).await.unwrap(); + session + .query_unpaged(format!("USE {}", ks), &[]) + .await + .unwrap(); session - .query( + .query_unpaged( "CREATE TYPE IF NOT EXISTS type_a ( a map>, text>, b frozen>, frozen>>> @@ -1457,12 +1478,12 @@ async fn test_schema_types_in_metadata() { .unwrap(); session - .query("CREATE TYPE IF NOT EXISTS type_b (a int, b text)", &[]) + .query_unpaged("CREATE TYPE IF NOT EXISTS type_b (a int, b text)", &[]) .await .unwrap(); session - .query( + .query_unpaged( "CREATE TYPE IF NOT EXISTS type_c (a map>, frozen>)", &[], ) @@ -1470,7 +1491,7 @@ async fn test_schema_types_in_metadata() { .unwrap(); session - .query( + .query_unpaged( "CREATE TABLE IF NOT EXISTS table_a ( a frozen PRIMARY KEY, b type_b, @@ -1484,7 +1505,7 @@ async fn test_schema_types_in_metadata() { .unwrap(); session - .query( + .query_unpaged( "CREATE TABLE IF NOT EXISTS table_b ( a text PRIMARY KEY, b frozen> @@ -1595,14 +1616,17 @@ async fn test_user_defined_types_in_metadata() { let ks = unique_keyspace_name(); session - .query(format!("CREATE KEYSPACE {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]) + .query_unpaged(format!("CREATE KEYSPACE {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]) .await .unwrap(); - session.query(format!("USE {}", ks), &[]).await.unwrap(); + session + .query_unpaged(format!("USE {}", ks), &[]) + .await + .unwrap(); session - .query( + .query_unpaged( "CREATE TYPE IF NOT EXISTS type_a ( a map>, text>, b frozen>, frozen>>> @@ -1613,12 +1637,12 @@ async fn test_user_defined_types_in_metadata() { .unwrap(); session - .query("CREATE TYPE IF NOT EXISTS type_b (a int, b text)", &[]) + .query_unpaged("CREATE TYPE IF NOT EXISTS type_b (a int, b text)", &[]) .await .unwrap(); session - .query( + .query_unpaged( "CREATE TYPE IF NOT EXISTS type_c (a map>, frozen>)", &[], ) @@ -1656,14 +1680,17 @@ async fn test_column_kinds_in_metadata() { let ks = unique_keyspace_name(); session - .query(format!("CREATE KEYSPACE {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]) + .query_unpaged(format!("CREATE KEYSPACE {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]) .await .unwrap(); - session.query(format!("USE {}", ks), &[]).await.unwrap(); + session + .query_unpaged(format!("USE {}", ks), &[]) + .await + .unwrap(); session - .query( + .query_unpaged( "CREATE TABLE IF NOT EXISTS t ( a int, b int, @@ -1699,14 +1726,17 @@ async fn test_primary_key_ordering_in_metadata() { let ks = unique_keyspace_name(); session - .query(format!("CREATE KEYSPACE {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]) + .query_unpaged(format!("CREATE KEYSPACE {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]) .await .unwrap(); - session.query(format!("USE {}", ks), &[]).await.unwrap(); + session + .query_unpaged(format!("USE {}", ks), &[]) + .await + .unwrap(); session - .query( + .query_unpaged( "CREATE TABLE IF NOT EXISTS t ( a int, b int, @@ -1752,12 +1782,15 @@ async fn test_table_partitioner_in_metadata() { create_ks += " AND TABLETS = {'enabled': false}"; } - session.query(create_ks, &[]).await.unwrap(); + session.query_unpaged(create_ks, &[]).await.unwrap(); - session.query(format!("USE {}", ks), &[]).await.unwrap(); + session + .query_unpaged(format!("USE {}", ks), &[]) + .await + .unwrap(); session - .query( + .query_unpaged( "CREATE TABLE t (pk int, ck int, v int, PRIMARY KEY (pk, ck, v))WITH cdc = {'enabled':true}", &[], ) @@ -1790,14 +1823,17 @@ async fn test_turning_off_schema_fetching() { let ks = unique_keyspace_name(); session - .query(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]) + .query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]) .await .unwrap(); - session.query(format!("USE {}", ks), &[]).await.unwrap(); + session + .query_unpaged(format!("USE {}", ks), &[]) + .await + .unwrap(); session - .query( + .query_unpaged( "CREATE TYPE IF NOT EXISTS type_a ( a map>, text>, b frozen>, frozen>>> @@ -1808,12 +1844,12 @@ async fn test_turning_off_schema_fetching() { .unwrap(); session - .query("CREATE TYPE IF NOT EXISTS type_b (a int, b text)", &[]) + .query_unpaged("CREATE TYPE IF NOT EXISTS type_b (a int, b text)", &[]) .await .unwrap(); session - .query( + .query_unpaged( "CREATE TYPE IF NOT EXISTS type_c (a map>, frozen>)", &[], ) @@ -1821,7 +1857,7 @@ async fn test_turning_off_schema_fetching() { .unwrap(); session - .query( + .query_unpaged( "CREATE TABLE IF NOT EXISTS table_a ( a frozen PRIMARY KEY, b type_b, @@ -1860,14 +1896,17 @@ async fn test_named_bind_markers() { let ks = unique_keyspace_name(); session - .query(format!("CREATE KEYSPACE {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]) + .query_unpaged(format!("CREATE KEYSPACE {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]) .await .unwrap(); - session.query(format!("USE {}", ks), &[]).await.unwrap(); + session + .query_unpaged(format!("USE {}", ks), &[]) + .await + .unwrap(); session - .query( + .query_unpaged( "CREATE TABLE t (pk int, ck int, v int, PRIMARY KEY (pk, ck, v))", &[], ) @@ -1881,13 +1920,13 @@ async fn test_named_bind_markers() { .await .unwrap(); let hashmap: HashMap<&str, i32> = HashMap::from([("pk", 7), ("v", 42), ("ck", 13)]); - session.execute(&prepared, &hashmap).await.unwrap(); + session.execute_unpaged(&prepared, &hashmap).await.unwrap(); let btreemap: BTreeMap<&str, i32> = BTreeMap::from([("ck", 113), ("v", 142), ("pk", 17)]); - session.execute(&prepared, &btreemap).await.unwrap(); + session.execute_unpaged(&prepared, &btreemap).await.unwrap(); let rows: Vec<(i32, i32, i32)> = session - .query("SELECT pk, ck, v FROM t", &[]) + .query_unpaged("SELECT pk, ck, v FROM t", &[]) .await .unwrap() .rows_typed::<(i32, i32, i32)>() @@ -1904,7 +1943,7 @@ async fn test_named_bind_markers() { HashMap::from([("ck", 9)]), ]; for wrongmap in wrongmaps { - assert!(session.execute(&prepared, &wrongmap).await.is_err()); + assert!(session.execute_unpaged(&prepared, &wrongmap).await.is_err()); } } @@ -1920,11 +1959,11 @@ async fn test_prepared_partitioner() { create_ks += " AND TABLETS = {'enabled': false}" } - session.query(create_ks, &[]).await.unwrap(); + session.query_unpaged(create_ks, &[]).await.unwrap(); session.use_keyspace(ks, false).await.unwrap(); session - .query("CREATE TABLE IF NOT EXISTS t1 (a int primary key)", &[]) + .query_unpaged("CREATE TABLE IF NOT EXISTS t1 (a int primary key)", &[]) .await .unwrap(); @@ -1946,7 +1985,7 @@ async fn test_prepared_partitioner() { } session - .query( + .query_unpaged( "CREATE TABLE IF NOT EXISTS t2 (a int primary key) WITH cdc = {'enabled':true}", &[], ) @@ -1969,14 +2008,14 @@ async fn test_prepared_partitioner() { async fn rename(session: &Session, rename_str: &str) { session - .query(format!("ALTER TABLE tab RENAME {}", rename_str), ()) + .query_unpaged(format!("ALTER TABLE tab RENAME {}", rename_str), ()) .await .unwrap(); } async fn rename_caching(session: &CachingSession, rename_str: &str) { session - .execute(format!("ALTER TABLE tab RENAME {}", rename_str), &()) + .execute_unpaged(format!("ALTER TABLE tab RENAME {}", rename_str), &()) .await .unwrap(); } @@ -1995,11 +2034,11 @@ async fn test_unprepared_reprepare_in_execute() { let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); - session.query(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); session.use_keyspace(ks, false).await.unwrap(); session - .query( + .query_unpaged( "CREATE TABLE IF NOT EXISTS tab (a int, b int, c int, primary key (a, b, c))", &[], ) @@ -2011,22 +2050,34 @@ async fn test_unprepared_reprepare_in_execute() { .await .unwrap(); - session.execute(&insert_a_b_c, (1, 2, 3)).await.unwrap(); + session + .execute_unpaged(&insert_a_b_c, (1, 2, 3)) + .await + .unwrap(); // Swap names of columns b and c rename(&session, "b TO tmp_name").await; // During rename the query should fail - assert!(session.execute(&insert_a_b_c, (1, 2, 3)).await.is_err()); + assert!(session + .execute_unpaged(&insert_a_b_c, (1, 2, 3)) + .await + .is_err()); rename(&session, "c TO b").await; - assert!(session.execute(&insert_a_b_c, (1, 2, 3)).await.is_err()); + assert!(session + .execute_unpaged(&insert_a_b_c, (1, 2, 3)) + .await + .is_err()); rename(&session, "tmp_name TO c").await; // Insert values again (b and c are swapped so those are different inserts) - session.execute(&insert_a_b_c, (1, 2, 3)).await.unwrap(); + session + .execute_unpaged(&insert_a_b_c, (1, 2, 3)) + .await + .unwrap(); let mut all_rows: Vec<(i32, i32, i32)> = session - .query("SELECT a, b, c FROM tab", ()) + .query_unpaged("SELECT a, b, c FROM tab", ()) .await .unwrap() .rows_typed::<(i32, i32, i32)>() @@ -2044,11 +2095,11 @@ async fn test_unusual_valuelists() { let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); - session.query(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); session.use_keyspace(ks, false).await.unwrap(); session - .query( + .query_unpaged( "CREATE TABLE IF NOT EXISTS tab (a int, b int, c varchar, primary key (a, b, c))", &[], ) @@ -2065,7 +2116,10 @@ async fn test_unusual_valuelists() { &2 as &dyn SerializeValue, &"&dyn" as &dyn SerializeValue, ]; - session.execute(&insert_a_b_c, values_dyn).await.unwrap(); + session + .execute_unpaged(&insert_a_b_c, values_dyn) + .await + .unwrap(); let values_box_dyn: Vec> = vec![ Box::new(1) as Box, @@ -2073,12 +2127,12 @@ async fn test_unusual_valuelists() { Box::new("Box dyn") as Box, ]; session - .execute(&insert_a_b_c, values_box_dyn) + .execute_unpaged(&insert_a_b_c, values_box_dyn) .await .unwrap(); let mut all_rows: Vec<(i32, i32, String)> = session - .query("SELECT a, b, c FROM tab", ()) + .query_unpaged("SELECT a, b, c FROM tab", ()) .await .unwrap() .rows_typed::<(i32, i32, String)>() @@ -2109,11 +2163,11 @@ async fn test_unprepared_reprepare_in_batch() { let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); - session.query(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); session.use_keyspace(ks, false).await.unwrap(); session - .query( + .query_unpaged( "CREATE TABLE IF NOT EXISTS tab (a int, b int, c int, primary key (a, b, c))", &[], ) @@ -2149,7 +2203,7 @@ async fn test_unprepared_reprepare_in_batch() { session.batch(&batch, ((1, 2, 3), (4, 5))).await.unwrap(); let mut all_rows: Vec<(i32, i32, i32)> = session - .query("SELECT a, b, c FROM tab", ()) + .query_unpaged("SELECT a, b, c FROM tab", ()) .await .unwrap() .rows_typed::<(i32, i32, i32)>() @@ -2174,13 +2228,13 @@ async fn test_unprepared_reprepare_in_caching_session_execute() { let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); - session.query(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); session.use_keyspace(ks, false).await.unwrap(); let caching_session: CachingSession = CachingSession::from(session, 64); caching_session - .execute( + .execute_unpaged( "CREATE TABLE IF NOT EXISTS tab (a int, b int, c int, primary key (a, b, c))", &[], ) @@ -2190,7 +2244,7 @@ async fn test_unprepared_reprepare_in_caching_session_execute() { let insert_a_b_c = "INSERT INTO tab (a, b, c) VALUES (?, ?, ?)"; caching_session - .execute(insert_a_b_c, &(1, 2, 3)) + .execute_unpaged(insert_a_b_c, &(1, 2, 3)) .await .unwrap(); @@ -2199,24 +2253,24 @@ async fn test_unprepared_reprepare_in_caching_session_execute() { // During rename the query should fail assert!(caching_session - .execute(insert_a_b_c, &(1, 2, 3)) + .execute_unpaged(insert_a_b_c, &(1, 2, 3)) .await .is_err()); rename_caching(&caching_session, "c TO b").await; assert!(caching_session - .execute(insert_a_b_c, &(1, 2, 3)) + .execute_unpaged(insert_a_b_c, &(1, 2, 3)) .await .is_err()); rename_caching(&caching_session, "tmp_name TO c").await; // Insert values again (b and c are swapped so those are different inserts) caching_session - .execute(insert_a_b_c, &(1, 2, 3)) + .execute_unpaged(insert_a_b_c, &(1, 2, 3)) .await .unwrap(); let mut all_rows: Vec<(i32, i32, i32)> = caching_session - .execute("SELECT a, b, c FROM tab", &()) + .execute_unpaged("SELECT a, b, c FROM tab", &()) .await .unwrap() .rows_typed::<(i32, i32, i32)>() @@ -2234,16 +2288,16 @@ async fn test_views_in_schema_info() { let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); - session.query(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); session.use_keyspace(ks.clone(), false).await.unwrap(); session - .query("CREATE TABLE t(id int PRIMARY KEY, v int)", &[]) + .query_unpaged("CREATE TABLE t(id int PRIMARY KEY, v int)", &[]) .await .unwrap(); - session.query("CREATE MATERIALIZED VIEW mv1 AS SELECT * FROM t WHERE v IS NOT NULL PRIMARY KEY (v, id)", &[]).await.unwrap(); - session.query("CREATE MATERIALIZED VIEW mv2 AS SELECT id, v FROM t WHERE v IS NOT NULL PRIMARY KEY (v, id)", &[]).await.unwrap(); + session.query_unpaged("CREATE MATERIALIZED VIEW mv1 AS SELECT * FROM t WHERE v IS NOT NULL PRIMARY KEY (v, id)", &[]).await.unwrap(); + session.query_unpaged("CREATE MATERIALIZED VIEW mv2 AS SELECT id, v FROM t WHERE v IS NOT NULL PRIMARY KEY (v, id)", &[]).await.unwrap(); session.await_schema_agreement().await.unwrap(); session.refresh_metadata().await.unwrap(); @@ -2283,7 +2337,7 @@ async fn test_views_in_schema_info() { async fn assert_test_batch_table_rows_contain(sess: &Session, expected_rows: &[(i32, i32)]) { let selected_rows: BTreeSet<(i32, i32)> = sess - .query("SELECT a, b FROM test_batch_table", ()) + .query_unpaged("SELECT a, b FROM test_batch_table", ()) .await .unwrap() .rows_typed::<(i32, i32)>() @@ -2305,11 +2359,11 @@ async fn test_prepare_batch() { let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); - session.query(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); session.use_keyspace(ks.clone(), false).await.unwrap(); session - .query( + .query_unpaged( "CREATE TABLE test_batch_table (a int, b int, primary key (a, b))", (), ) @@ -2402,11 +2456,11 @@ async fn test_refresh_metadata_after_schema_agreement() { let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); - session.query(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); session.use_keyspace(ks.clone(), false).await.unwrap(); session - .query( + .query_unpaged( "CREATE TYPE udt (field1 int, field2 uuid, field3 text)", &[], ) @@ -2448,9 +2502,9 @@ async fn test_rate_limit_exceeded_exception() { } let ks = unique_keyspace_name(); - session.query(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); session.use_keyspace(ks.clone(), false).await.unwrap(); - session.query("CREATE TABLE tbl (pk int PRIMARY KEY, v int) WITH per_partition_rate_limit = {'max_writes_per_second': 1}", ()).await.unwrap(); + session.query_unpaged("CREATE TABLE tbl (pk int PRIMARY KEY, v int) WITH per_partition_rate_limit = {'max_writes_per_second': 1}", ()).await.unwrap(); let stmt = session .prepare("INSERT INTO tbl (pk, v) VALUES (?, ?)") @@ -2463,7 +2517,7 @@ async fn test_rate_limit_exceeded_exception() { let mut maybe_err = None; for _ in 0..1000 { - match session.execute(&stmt, (123, 456)).await { + match session.execute_unpaged(&stmt, (123, 456)).await { Ok(_) => {} // Try again Err(err) => { maybe_err = Some(err); @@ -2492,11 +2546,11 @@ async fn test_batch_lwts() { if scylla_supports_tablets(&session).await { create_ks += " and TABLETS = { 'enabled': false}"; } - session.query(create_ks, &[]).await.unwrap(); + session.query_unpaged(create_ks, &[]).await.unwrap(); session.use_keyspace(ks.clone(), false).await.unwrap(); session - .query( + .query_unpaged( "CREATE TABLE tab (p1 int, c1 int, r1 int, r2 int, primary key (p1, c1))", (), ) @@ -2504,7 +2558,7 @@ async fn test_batch_lwts() { .unwrap(); session - .query("INSERT INTO tab (p1, c1, r1, r2) VALUES (0, 0, 0, 0)", ()) + .query_unpaged("INSERT INTO tab (p1, c1, r1, r2) VALUES (0, 0, 0, 0)", ()) .await .unwrap(); @@ -2607,7 +2661,7 @@ async fn test_keyspaces_to_fetch() { let session_default = create_new_session_builder().build().await.unwrap(); for ks in [&ks1, &ks2] { session_default - .query(format!("CREATE KEYSPACE {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]) + .query_unpaged(format!("CREATE KEYSPACE {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]) .await .unwrap(); } @@ -2687,10 +2741,10 @@ async fn test_iter_works_when_retry_policy_returns_ignore_write_error() { if scylla_supports_tablets(&session).await { create_ks += " and TABLETS = { 'enabled': false}"; } - session.query(create_ks, ()).await.unwrap(); + session.query_unpaged(create_ks, ()).await.unwrap(); session.use_keyspace(ks, true).await.unwrap(); session - .query("CREATE TABLE t (pk int PRIMARY KEY, v int)", ()) + .query_unpaged("CREATE TABLE t (pk int PRIMARY KEY, v int)", ()) .await .unwrap(); @@ -2722,9 +2776,9 @@ async fn test_iter_methods_with_modification_statements() { let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); - session.query(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); session - .query( + .query_unpaged( format!( "CREATE TABLE IF NOT EXISTS {}.t (a int, b int, c text, primary key (a, b))", ks @@ -2762,7 +2816,7 @@ async fn test_get_keyspace_name() { // No keyspace is set in config, so get_keyspace() should return None. let session = create_new_session_builder().build().await.unwrap(); assert_eq!(session.get_keyspace(), None); - session.query(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); assert_eq!(session.get_keyspace(), None); // Call use_keyspace(), get_keyspace now should return the new keyspace name @@ -2788,7 +2842,7 @@ async fn simple_strategy_test() { let session = create_new_session_builder().build().await.unwrap(); session - .query( + .query_unpaged( format!( "CREATE KEYSPACE {} WITH REPLICATION = \ {{'class': 'SimpleStrategy', 'replication_factor': 1}}", @@ -2800,7 +2854,7 @@ async fn simple_strategy_test() { .unwrap(); session - .query( + .query_unpaged( format!( "CREATE TABLE {}.tab (p int, c int, r int, PRIMARY KEY (p, c, r))", ks @@ -2811,7 +2865,7 @@ async fn simple_strategy_test() { .unwrap(); session - .query( + .query_unpaged( format!("INSERT INTO {}.tab (p, c, r) VALUES (1, 2, 3)", ks), (), ) @@ -2819,7 +2873,7 @@ async fn simple_strategy_test() { .unwrap(); session - .query( + .query_unpaged( format!("INSERT INTO {}.tab (p, c, r) VALUES (?, ?, ?)", ks), (4, 5, 6), ) @@ -2831,10 +2885,10 @@ async fn simple_strategy_test() { .await .unwrap(); - session.execute(&prepared, (7, 8, 9)).await.unwrap(); + session.execute_unpaged(&prepared, (7, 8, 9)).await.unwrap(); let mut rows: Vec<(i32, i32, i32)> = session - .query(format!("SELECT p, c, r FROM {}.tab", ks), ()) + .query_unpaged(format!("SELECT p, c, r FROM {}.tab", ks), ()) .await .unwrap() .rows_typed::<(i32, i32, i32)>() @@ -2851,7 +2905,7 @@ async fn test_manual_primary_key_computation() { // Setup session let ks = unique_keyspace_name(); let session = create_new_session_builder().build().await.unwrap(); - session.query(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); session.use_keyspace(&ks, true).await.unwrap(); async fn assert_tokens_equal( @@ -2866,7 +2920,7 @@ async fn test_manual_primary_key_computation() { .unwrap(); session - .execute(prepared, all_values_in_query_order) + .execute_unpaged(prepared, all_values_in_query_order) .await .unwrap(); @@ -2886,7 +2940,7 @@ async fn test_manual_primary_key_computation() { // Single-column partition key { session - .query( + .query_unpaged( "CREATE TABLE IF NOT EXISTS t2 (a int, b int, c text, primary key (a, b))", &[], ) @@ -2917,7 +2971,7 @@ async fn test_manual_primary_key_computation() { // Composite partition key { session - .query("CREATE TABLE IF NOT EXISTS complex_pk (a int, b int, c text, d int, e int, primary key ((a,b,c),d))", &[]) + .query_unpaged("CREATE TABLE IF NOT EXISTS complex_pk (a int, b int, c text, d int, e int, primary key ((a,b,c),d))", &[]) .await .unwrap(); diff --git a/scylla/src/transport/silent_prepare_batch_test.rs b/scylla/src/transport/silent_prepare_batch_test.rs index dd5ef0a129..ece8d1d3fc 100644 --- a/scylla/src/transport/silent_prepare_batch_test.rs +++ b/scylla/src/transport/silent_prepare_batch_test.rs @@ -12,11 +12,11 @@ async fn test_quietly_prepare_batch() { let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); - session.query(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); session.use_keyspace(ks.clone(), false).await.unwrap(); session - .query( + .query_unpaged( "CREATE TABLE test_batch_table (a int, b int, primary key (a, b))", (), ) @@ -93,7 +93,7 @@ async fn test_quietly_prepare_batch() { async fn assert_test_batch_table_rows_contain(sess: &Session, expected_rows: &[(i32, i32)]) { let selected_rows: BTreeSet<(i32, i32)> = sess - .query("SELECT a, b FROM test_batch_table", ()) + .query_unpaged("SELECT a, b FROM test_batch_table", ()) .await .unwrap() .rows_typed::<(i32, i32)>() diff --git a/scylla/src/utils/test_utils.rs b/scylla/src/utils/test_utils.rs index 1b0e0a02d4..6c52fde355 100644 --- a/scylla/src/utils/test_utils.rs +++ b/scylla/src/utils/test_utils.rs @@ -43,7 +43,7 @@ pub(crate) async fn supports_feature(session: &Session, feature: &str) -> bool { } let (features,): (Option,) = session - .query("SELECT supported_features FROM system.local", ()) + .query_unpaged("SELECT supported_features FROM system.local", ()) .await .unwrap() .single_row_typed() @@ -94,7 +94,7 @@ pub fn create_new_session_builder() -> GenericSessionBuilder bool { let result = session - .query( + .query_unpaged( "select column_name from system_schema.columns where keyspace_name = 'system_schema' and table_name = 'scylla_keyspaces' diff --git a/scylla/tests/integration/consistency.rs b/scylla/tests/integration/consistency.rs index 2962795834..6402a9ba9d 100644 --- a/scylla/tests/integration/consistency.rs +++ b/scylla/tests/integration/consistency.rs @@ -60,10 +60,10 @@ const CREATE_TABLE_STR: &str = "CREATE TABLE consistency_tests (a int, b int, PR const QUERY_STR: &str = "INSERT INTO consistency_tests (a, b) VALUES (?, 1)"; async fn create_schema(session: &Session, ks: &str) { - session.query(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 3}}", ks), &[]).await.unwrap(); + session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 3}}", ks), &[]).await.unwrap(); session.use_keyspace(ks, false).await.unwrap(); - session.query(CREATE_TABLE_STR, &[]).await.unwrap(); + session.query_unpaged(CREATE_TABLE_STR, &[]).await.unwrap(); } // The following functions perform a request with consistencies set directly on a statement. @@ -76,7 +76,7 @@ async fn query_consistency_set_directly( let mut query = query.clone(); query.set_consistency(c); query.set_serial_consistency(sc); - session.query(query.clone(), (1,)).await.unwrap(); + session.query_unpaged(query.clone(), (1,)).await.unwrap(); session.query_iter(query, (1,)).await.unwrap(); } @@ -89,7 +89,7 @@ async fn execute_consistency_set_directly( let mut prepared = prepared.clone(); prepared.set_consistency(c); prepared.set_serial_consistency(sc); - session.execute(&prepared, (1,)).await.unwrap(); + session.execute_unpaged(&prepared, (1,)).await.unwrap(); session.execute_iter(prepared, (1,)).await.unwrap(); } @@ -113,7 +113,7 @@ async fn query_consistency_set_on_exec_profile( ) { let mut query = query.clone(); query.set_execution_profile_handle(Some(profile)); - session.query(query.clone(), (1,)).await.unwrap(); + session.query_unpaged(query.clone(), (1,)).await.unwrap(); session.query_iter(query, (1,)).await.unwrap(); } @@ -124,7 +124,7 @@ async fn execute_consistency_set_on_exec_profile( ) { let mut prepared = prepared.clone(); prepared.set_execution_profile_handle(Some(profile)); - session.execute(&prepared, (1,)).await.unwrap(); + session.execute_unpaged(&prepared, (1,)).await.unwrap(); session.execute_iter(prepared, (1,)).await.unwrap(); } @@ -222,7 +222,7 @@ async fn check_for_all_consistencies_and_setting_options< rx = after_session_init(rx).await; session_with_consistencies - .query(QUERY_STR, (1,)) + .query_unpaged(QUERY_STR, (1,)) .await .unwrap(); rx = check_consistencies(consistency, serial_consistency, rx).await; @@ -233,7 +233,7 @@ async fn check_for_all_consistencies_and_setting_options< rx = check_consistencies(consistency, serial_consistency, rx).await; session_with_consistencies - .execute(&prepared, (1,)) + .execute_unpaged(&prepared, (1,)) .await .unwrap(); rx = check_consistencies(consistency, serial_consistency, rx).await; @@ -479,5 +479,5 @@ async fn consistency_allows_for_paxos_selects() { let mut query = Query::from("SELECT host_id FROM system.peers WHERE peer = '127.0.0.1'"); query.set_consistency(Consistency::Serial); - session.query(query, ()).await.unwrap(); + session.query_unpaged(query, ()).await.unwrap(); } diff --git a/scylla/tests/integration/execution_profiles.rs b/scylla/tests/integration/execution_profiles.rs index 59f95dfa88..cc571ef71b 100644 --- a/scylla/tests/integration/execution_profiles.rs +++ b/scylla/tests/integration/execution_profiles.rs @@ -169,9 +169,9 @@ async fn test_execution_profiles() { let ks = unique_keyspace_name(); /* Prepare schema */ - session.query(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 3}}", ks), &[]).await.unwrap(); + session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 3}}", ks), &[]).await.unwrap(); session - .query( + .query_unpaged( format!( "CREATE TABLE IF NOT EXISTS {}.t (a int, b int, c text, primary key (a, b))", ks @@ -192,13 +192,13 @@ async fn test_execution_profiles() { /* Test load balancing and retry policy */ // Run on default per-session execution profile - session.query(query.clone(), &[]).await.unwrap(); + session.query_unpaged(query.clone(), &[]).await.unwrap(); let report1 = profile_rx.recv().await.unwrap(); let report2 = profile_rx.recv().await.unwrap(); assert_matches!((report1, report2), ((Report::LoadBalancing, 1), (Report::RetryPolicy, 1)) | ((Report::RetryPolicy, 1), (Report::LoadBalancing, 1))); profile_rx.try_recv().unwrap_err(); - session.execute(&prepared, &[]).await.unwrap(); + session.execute_unpaged(&prepared, &[]).await.unwrap(); let report1 = profile_rx.recv().await.unwrap(); let report2 = profile_rx.recv().await.unwrap(); assert_matches!((report1, report2), ((Report::LoadBalancing, 1), (Report::RetryPolicy, 1)) | ((Report::RetryPolicy, 1), (Report::LoadBalancing, 1))); @@ -212,14 +212,14 @@ async fn test_execution_profiles() { // Run on query-specific execution profile query.set_execution_profile_handle(Some(profile2.clone().into_handle())); - session.query(query.clone(), &[]).await.unwrap(); + session.query_unpaged(query.clone(), &[]).await.unwrap(); let report1 = profile_rx.recv().await.unwrap(); let report2 = profile_rx.recv().await.unwrap(); assert_matches!((report1, report2), ((Report::LoadBalancing, 2), (Report::RetryPolicy, 2)) | ((Report::RetryPolicy, 2), (Report::LoadBalancing, 2))); profile_rx.try_recv().unwrap_err(); prepared.set_execution_profile_handle(Some(profile2.clone().into_handle())); - session.execute(&prepared, &[]).await.unwrap(); + session.execute_unpaged(&prepared, &[]).await.unwrap(); let report1 = profile_rx.recv().await.unwrap(); let report2 = profile_rx.recv().await.unwrap(); assert_matches!((report1, report2), ((Report::LoadBalancing, 2), (Report::RetryPolicy, 2)) | ((Report::RetryPolicy, 2), (Report::LoadBalancing, 2))); @@ -234,14 +234,14 @@ async fn test_execution_profiles() { // Run again on default per-session execution profile query.set_execution_profile_handle(None); - session.query(query.clone(), &[]).await.unwrap(); + session.query_unpaged(query.clone(), &[]).await.unwrap(); let report1 = profile_rx.recv().await.unwrap(); let report2 = profile_rx.recv().await.unwrap(); assert_matches!((report1, report2), ((Report::LoadBalancing, 1), (Report::RetryPolicy, 1)) | ((Report::RetryPolicy, 1), (Report::LoadBalancing, 1))); profile_rx.try_recv().unwrap_err(); prepared.set_execution_profile_handle(None); - session.execute(&prepared, &[]).await.unwrap(); + session.execute_unpaged(&prepared, &[]).await.unwrap(); let report1 = profile_rx.recv().await.unwrap(); let report2 = profile_rx.recv().await.unwrap(); assert_matches!((report1, report2), ((Report::LoadBalancing, 1), (Report::RetryPolicy, 1)) | ((Report::RetryPolicy, 1), (Report::LoadBalancing, 1))); @@ -267,12 +267,12 @@ async fn test_execution_profiles() { profile_rx.try_recv().unwrap_err(); // Run non-LWT on default per-session execution profile - session.query(query.clone(), &[]).await.unwrap_err(); + session.query_unpaged(query.clone(), &[]).await.unwrap_err(); let report_consistency = consistency_rx.recv().await.unwrap(); assert_matches!(report_consistency, Consistency::One); consistency_rx.try_recv().unwrap_err(); - session.execute(&prepared, &[]).await.unwrap_err(); + session.execute_unpaged(&prepared, &[]).await.unwrap_err(); let report_consistency = consistency_rx.recv().await.unwrap(); assert_matches!(report_consistency, Consistency::One); consistency_rx.try_recv().unwrap_err(); @@ -284,13 +284,13 @@ async fn test_execution_profiles() { // Run on statement-specific execution profile query.set_execution_profile_handle(Some(profile2.clone().into_handle())); - session.query(query.clone(), &[]).await.unwrap_err(); + session.query_unpaged(query.clone(), &[]).await.unwrap_err(); let report_consistency = consistency_rx.recv().await.unwrap(); assert_matches!(report_consistency, Consistency::Two); consistency_rx.try_recv().unwrap_err(); prepared.set_execution_profile_handle(Some(profile2.clone().into_handle())); - session.execute(&prepared, &[]).await.unwrap_err(); + session.execute_unpaged(&prepared, &[]).await.unwrap_err(); let report_consistency = consistency_rx.recv().await.unwrap(); assert_matches!(report_consistency, Consistency::Two); consistency_rx.try_recv().unwrap_err(); @@ -303,13 +303,13 @@ async fn test_execution_profiles() { // Run with statement-set specific options query.set_consistency(Consistency::Three); - session.query(query.clone(), &[]).await.unwrap_err(); + session.query_unpaged(query.clone(), &[]).await.unwrap_err(); let report_consistency = consistency_rx.recv().await.unwrap(); assert_matches!(report_consistency, Consistency::Three); consistency_rx.try_recv().unwrap_err(); prepared.set_consistency(Consistency::Three); - session.execute(&prepared, &[]).await.unwrap_err(); + session.execute_unpaged(&prepared, &[]).await.unwrap_err(); let report_consistency = consistency_rx.recv().await.unwrap(); assert_matches!(report_consistency, Consistency::Three); consistency_rx.try_recv().unwrap_err(); diff --git a/scylla/tests/integration/lwt_optimisation.rs b/scylla/tests/integration/lwt_optimisation.rs index da2c89bdef..689a352b39 100644 --- a/scylla/tests/integration/lwt_optimisation.rs +++ b/scylla/tests/integration/lwt_optimisation.rs @@ -73,11 +73,11 @@ async fn if_lwt_optimisation_mark_offered_then_negotiatied_and_lwt_routed_optima if scylla_supports_tablets(&session).await { create_ks += " and TABLETS = { 'enabled': false}"; } - session.query(create_ks, &[]).await.unwrap(); + session.query_unpaged(create_ks, &[]).await.unwrap(); session.use_keyspace(ks, false).await.unwrap(); session - .query("CREATE TABLE t (a int primary key, b int)", &[]) + .query_unpaged("CREATE TABLE t (a int primary key, b int)", &[]) .await .unwrap(); @@ -141,14 +141,14 @@ async fn if_lwt_optimisation_mark_offered_then_negotiatied_and_lwt_routed_optima // Alternatively, we could give up this part of the test and only test LWT part, but then // we couldn't be sure that in non-LWT case the driver truly chooses various replicas. for _ in 0..30 { - session.execute(&prepared_non_lwt, (MAGIC_MARK,)).await.unwrap(); + session.execute_unpaged(&prepared_non_lwt, (MAGIC_MARK,)).await.unwrap(); } assert_multiple_replicas_queried(&mut prepared_rxs); // We execute LWT statements, and... for _ in 0..15 { - session.execute(&prepared_lwt, (MAGIC_MARK,)).await.unwrap(); + session.execute_unpaged(&prepared_lwt, (MAGIC_MARK,)).await.unwrap(); } if supports_optimisation_mark { diff --git a/scylla/tests/integration/new_session.rs b/scylla/tests/integration/new_session.rs index 8d3d351bed..2f41c39e32 100644 --- a/scylla/tests/integration/new_session.rs +++ b/scylla/tests/integration/new_session.rs @@ -20,7 +20,7 @@ async fn proceed_if_only_some_hostnames_are_invalid() { .await .unwrap(); session - .query("SELECT host_id FROM system.local", &[]) + .query_unpaged("SELECT host_id FROM system.local", &[]) .await .unwrap(); } diff --git a/scylla/tests/integration/retries.rs b/scylla/tests/integration/retries.rs index bd928aca6e..92bf1613c7 100644 --- a/scylla/tests/integration/retries.rs +++ b/scylla/tests/integration/retries.rs @@ -36,10 +36,10 @@ async fn speculative_execution_is_fired() { .unwrap(); let ks = unique_keyspace_name(); - session.query(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 3}}", ks), &[]).await.unwrap(); + session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 3}}", ks), &[]).await.unwrap(); session.use_keyspace(ks, false).await.unwrap(); session - .query("CREATE TABLE t (a int primary key)", &[]) + .query_unpaged("CREATE TABLE t (a int primary key)", &[]) .await .unwrap(); @@ -56,7 +56,7 @@ async fn speculative_execution_is_fired() { info!("--------------------- first query - no rules ----------------"); // first run before any rules - session.query(q.clone(), (3,)).await.unwrap(); + session.query_unpaged(q.clone(), (3,)).await.unwrap(); info!("--------------------- second query - 0 and 2 nodes not responding ----------------"); running_proxy.running_nodes[0] @@ -64,7 +64,7 @@ async fn speculative_execution_is_fired() { running_proxy.running_nodes[2] .change_request_rules(Some(vec![drop_frame_rule.clone()])); - session.query(q.clone(), (2,)).await.unwrap(); + session.query_unpaged(q.clone(), (2,)).await.unwrap(); info!("--------------------- third query - 0 and 1 nodes not responding ----------------"); running_proxy.running_nodes[2] @@ -72,7 +72,7 @@ async fn speculative_execution_is_fired() { running_proxy.running_nodes[1] .change_request_rules(Some(vec![drop_frame_rule.clone()])); - session.query(q.clone(), (1,)).await.unwrap(); + session.query_unpaged(q.clone(), (1,)).await.unwrap(); info!("--------------------- fourth query - all nodes not responding ----------------"); @@ -80,7 +80,7 @@ async fn speculative_execution_is_fired() { .change_request_rules(Some(vec![drop_frame_rule])); tokio::select! { - res = session.query(q, (0,)) => panic!("Rules did not work: received response {:?}", res), + res = session.query_unpaged(q, (0,)) => panic!("Rules did not work: received response {:?}", res), _ = tokio::time::sleep(TIMEOUT_PER_REQUEST) => (), }; @@ -112,10 +112,10 @@ async fn retries_occur() { .unwrap(); let ks = unique_keyspace_name(); - session.query(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 3}}", ks), &[]).await.unwrap(); + session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 3}}", ks), &[]).await.unwrap(); session.use_keyspace(ks, false).await.unwrap(); session - .query("CREATE TABLE t (a int primary key)", &[]) + .query_unpaged("CREATE TABLE t (a int primary key)", &[]) .await .unwrap(); @@ -131,7 +131,7 @@ async fn retries_occur() { info!("--------------------- BEGINNING main test part ----------------"); info!("--------------------- first query - no rules ----------------"); - session.query(q.clone(), (3,)).await.unwrap(); + session.query_unpaged(q.clone(), (3,)).await.unwrap(); info!("--------------------- second query - 0 and 2 nodes not responding ----------------"); running_proxy.running_nodes[0] @@ -139,19 +139,19 @@ async fn retries_occur() { running_proxy.running_nodes[2] .change_request_rules(Some(vec![forge_error_rule.clone()])); - session.query(q.clone(), (2,)).await.unwrap(); + session.query_unpaged(q.clone(), (2,)).await.unwrap(); info!("--------------------- third query - all nodes not responding ----------------"); running_proxy.running_nodes[1] .change_request_rules(Some(vec![forge_error_rule])); - session.query(q.clone(), (1,)).await.unwrap_err(); + session.query_unpaged(q.clone(), (1,)).await.unwrap_err(); info!("--------------------- fourth query - 0 and 1 nodes not responding ----------------"); running_proxy.running_nodes[2] .change_request_rules(None); - session.query(q, (1,)).await.unwrap(); + session.query_unpaged(q, (1,)).await.unwrap(); info!("--------------------- FINISHING main test part ----------------"); diff --git a/scylla/tests/integration/shards.rs b/scylla/tests/integration/shards.rs index ae084cca7a..b22cfc397b 100644 --- a/scylla/tests/integration/shards.rs +++ b/scylla/tests/integration/shards.rs @@ -42,9 +42,9 @@ async fn test_consistent_shard_awareness() { if scylla_supports_tablets(&session).await { create_ks += " and TABLETS = { 'enabled': false}"; } - session.query(create_ks, &[]).await.unwrap(); + session.query_unpaged(create_ks, &[]).await.unwrap(); session - .query( + .query_unpaged( format!( "CREATE TABLE IF NOT EXISTS {}.t (a int, b int, c text, primary key (a, b))", ks @@ -71,7 +71,7 @@ async fn test_consistent_shard_awareness() { for values in value_lists { for _ in 0..10 { - session.execute(&prepared, values).await.unwrap(); + session.execute_unpaged(&prepared, values).await.unwrap(); } for rx in feedback_rxs.iter_mut() { assert_one_shard_queried(rx); diff --git a/scylla/tests/integration/silent_prepare_query.rs b/scylla/tests/integration/silent_prepare_query.rs index 658aa96edf..d814f70a8a 100644 --- a/scylla/tests/integration/silent_prepare_query.rs +++ b/scylla/tests/integration/silent_prepare_query.rs @@ -27,10 +27,10 @@ async fn test_prepare_query_with_values() { .unwrap(); let ks = unique_keyspace_name(); - session.query(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 3}}", ks), &[]).await.unwrap(); + session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 3}}", ks), &[]).await.unwrap(); session.use_keyspace(ks, false).await.unwrap(); session - .query("CREATE TABLE t (a int primary key)", &[]) + .query_unpaged("CREATE TABLE t (a int primary key)", &[]) .await .unwrap(); @@ -46,7 +46,7 @@ async fn test_prepare_query_with_values() { .change_request_rules(Some(vec![drop_unprepared_frame_rule])); tokio::select! { - _res = session.query(q, (0,)) => (), + _res = session.query_unpaged(q, (0,)) => (), _ = tokio::time::sleep(TIMEOUT_PER_REQUEST) => panic!("Rules did not work: no received response"), }; @@ -78,10 +78,10 @@ async fn test_query_with_no_values() { .unwrap(); let ks = unique_keyspace_name(); - session.query(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 3}}", ks), &[]).await.unwrap(); + session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 3}}", ks), &[]).await.unwrap(); session.use_keyspace(ks, false).await.unwrap(); session - .query("CREATE TABLE t (a int primary key)", &[]) + .query_unpaged("CREATE TABLE t (a int primary key)", &[]) .await .unwrap(); @@ -97,7 +97,7 @@ async fn test_query_with_no_values() { .change_request_rules(Some(vec![drop_prepared_frame_rule])); tokio::select! { - _res = session.query(q, ()) => (), + _res = session.query_unpaged(q, ()) => (), _ = tokio::time::sleep(TIMEOUT_PER_REQUEST) => panic!("Rules did not work: no received response"), }; diff --git a/scylla/tests/integration/skip_metadata_optimization.rs b/scylla/tests/integration/skip_metadata_optimization.rs index 16a72b5d31..f530e5bd43 100644 --- a/scylla/tests/integration/skip_metadata_optimization.rs +++ b/scylla/tests/integration/skip_metadata_optimization.rs @@ -28,13 +28,13 @@ async fn test_skip_result_metadata() { .unwrap(); let ks = unique_keyspace_name(); - session.query(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 3}}", ks), &[]).await.unwrap(); + session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 3}}", ks), &[]).await.unwrap(); session.use_keyspace(ks, false).await.unwrap(); session - .query("CREATE TABLE t (a int primary key, b int, c text)", &[]) + .query_unpaged("CREATE TABLE t (a int primary key, b int, c text)", &[]) .await .unwrap(); - session.query("INSERT INTO t (a, b, c) VALUES (1, 2, 'foo_filter_data')", &[]).await.unwrap(); + session.query_unpaged("INSERT INTO t (a, b, c) VALUES (1, 2, 'foo_filter_data')", &[]).await.unwrap(); let mut prepared = session.prepare("SELECT a, b, c FROM t").await.unwrap(); @@ -56,7 +56,7 @@ async fn test_skip_result_metadata() { rx: &mut tokio::sync::mpsc::UnboundedReceiver<(ResponseFrame, Option)>, predicate: impl FnOnce(i32) -> bool ) { - session.execute(prepared, &[]).await.unwrap(); + session.execute_unpaged(prepared, &[]).await.unwrap(); let (frame, _shard) = rx.recv().await.unwrap(); let mut buf = &*frame.body; @@ -83,12 +83,12 @@ async fn test_skip_result_metadata() { { let ks = unique_keyspace_name(); - session.query(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); session.use_keyspace(ks, true).await.unwrap(); type RowT = (i32, i32, String); session - .query( + .query_unpaged( "CREATE TABLE IF NOT EXISTS t2 (a int, b int, c text, primary key (a, b))", &[], ) @@ -102,7 +102,7 @@ async fn test_skip_result_metadata() { for idx in 0..10 { session - .execute(&insert_stmt, (idx, idx + 1, "Some text")) + .execute_unpaged(&insert_stmt, (idx, idx + 1, "Some text")) .await .unwrap(); } @@ -111,7 +111,7 @@ async fn test_skip_result_metadata() { let select_query = "SELECT a, b, c FROM t2"; let rs = session - .query(select_query, ()) + .query_unpaged(select_query, ()) .await .unwrap() .rows_typed::() diff --git a/scylla/tests/integration/tablets.rs b/scylla/tests/integration/tablets.rs index 5f91b43d39..be52780fe8 100644 --- a/scylla/tests/integration/tablets.rs +++ b/scylla/tests/integration/tablets.rs @@ -199,7 +199,7 @@ async fn send_statement_everywhere( .build(); stmt.set_execution_profile_handle(Some(execution_profile.into_handle())); - async move { session.execute(&stmt, values_ref).await } + async move { session.execute_unpaged(&stmt, values_ref).await } }) }); @@ -223,7 +223,7 @@ async fn send_unprepared_query_everywhere( .build(); stmt.set_execution_profile_handle(Some(execution_profile.into_handle())); - async move { session.query(stmt, &()).await } + async move { session.query_unpaged(stmt, &()).await } }) }); @@ -251,7 +251,7 @@ fn count_tablet_feedbacks( async fn prepare_schema(session: &Session, ks: &str, table: &str, tablet_count: usize) { session - .query( + .query_unpaged( format!( "CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 2}} @@ -263,7 +263,7 @@ async fn prepare_schema(session: &Session, ks: &str, table: &str, tablet_count: .await .unwrap(); session - .query( + .query_unpaged( format!( "CREATE TABLE IF NOT EXISTS {}.{} (a int, b int, c text, primary key (a, b))", ks, table @@ -366,9 +366,11 @@ async fn test_default_policy_is_tablet_aware() { values, prepared.calculate_token(&values).unwrap().unwrap().value() ); - try_join_all((0..100).map(|_| async { session.execute(&prepared, values).await })) - .await - .unwrap(); + try_join_all( + (0..100).map(|_| async { session.execute_unpaged(&prepared, values).await }), + ) + .await + .unwrap(); let feedbacks: usize = feedback_rxs.iter_mut().map(count_tablet_feedbacks).sum(); if feedbacks > 0 { total_tablets_with_feedback += 1; @@ -386,9 +388,11 @@ async fn test_default_policy_is_tablet_aware() { values, prepared.calculate_token(&values).unwrap().unwrap().value() ); - try_join_all((0..100).map(|_| async { session.execute(&prepared, values).await })) - .await - .unwrap(); + try_join_all( + (0..100).map(|_| async { session.execute_unpaged(&prepared, values).await }), + ) + .await + .unwrap(); let feedbacks: usize = feedback_rxs.iter_mut().map(count_tablet_feedbacks).sum(); assert_eq!(feedbacks, 0); } @@ -578,7 +582,9 @@ async fn test_lwt_optimization_works_with_tablets() { .value() ); try_join_all((0..100).map(|_| async { - session.execute(&prepared_lwt_update, &("abc", a, b)).await + session + .execute_unpaged(&prepared_lwt_update, &("abc", a, b)) + .await })) .await .unwrap(); From 343b5006778ec21d4c646650dfc7d1a3aae2c1c6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Wojciech=20Przytu=C5=82a?= Date: Mon, 26 Aug 2024 14:36:29 +0200 Subject: [PATCH 11/19] connection: make {query,execute}(_raw) methods unpaged As these methods are analogous to those on Session, they are made unpaged in a manner similar to those. Inthe next commit, both all of them are appended the `_unpaged` suffix to explicitly state that they perform unpaged queries. --- scylla/src/transport/connection.rs | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/scylla/src/transport/connection.rs b/scylla/src/transport/connection.rs index 7470fbf0ad..23911ae8d1 100644 --- a/scylla/src/transport/connection.rs +++ b/scylla/src/transport/connection.rs @@ -850,15 +850,13 @@ impl Connection { paging_state: PagingState, ) -> Result { // This method is used only for driver internal queries, so no need to consult execution profile here. - let page_size = query.get_validated_page_size(); - self.query_raw_with_consistency( query, query .config .determine_consistency(self.config.default_consistency), query.config.serial_consistency.flatten(), - Some(page_size), + None, paging_state, ) .await @@ -909,8 +907,6 @@ impl Connection { paging_state: PagingState, ) -> Result { // This method is used only for driver internal queries, so no need to consult execution profile here. - let page_size = prepared.get_validated_page_size(); - self.execute_raw_with_consistency( prepared, &values, @@ -918,7 +914,7 @@ impl Connection { .config .determine_consistency(self.config.default_consistency), prepared.config.serial_consistency.flatten(), - Some(page_size), + None, paging_state, ) .await From dfe79e963b1b5e972a1612ad08981184e1ba98ca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Wojciech=20Przytu=C5=82a?= Date: Mon, 26 Aug 2024 15:14:30 +0200 Subject: [PATCH 12/19] connection: append "_unpaged" to {query,execute}_{raw} In order to make it explicit that the requests using those methods are performed without paging, their names are adjusted. --- scylla/src/transport/connection.rs | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/scylla/src/transport/connection.rs b/scylla/src/transport/connection.rs index 23911ae8d1..e5129e7425 100644 --- a/scylla/src/transport/connection.rs +++ b/scylla/src/transport/connection.rs @@ -835,16 +835,19 @@ impl Connection { } #[allow(dead_code)] - pub(crate) async fn query(&self, query: impl Into) -> Result { + pub(crate) async fn query_unpaged( + &self, + query: impl Into, + ) -> Result { // This method is used only for driver internal queries, so no need to consult execution profile here. let query: Query = query.into(); - self.query_raw(&query, PagingState::start()) + self.query_raw_unpaged(&query, PagingState::start()) .await .and_then(QueryResponse::into_query_result) } - pub(crate) async fn query_raw( + pub(crate) async fn query_raw_unpaged( &self, query: &Query, paging_state: PagingState, @@ -888,19 +891,19 @@ impl Connection { } #[allow(dead_code)] - pub(crate) async fn execute( + pub(crate) async fn execute_unpaged( &self, prepared: &PreparedStatement, values: SerializedValues, ) -> Result { // This method is used only for driver internal queries, so no need to consult execution profile here. - self.execute_raw(prepared, values, PagingState::start()) + self.execute_raw_unpaged(prepared, values, PagingState::start()) .await .and_then(QueryResponse::into_query_result) } #[allow(dead_code)] - pub(crate) async fn execute_raw( + pub(crate) async fn execute_raw_unpaged( &self, prepared: &PreparedStatement, values: SerializedValues, @@ -1171,7 +1174,7 @@ impl Connection { false => format!("USE {}", keyspace_name.as_str()).into(), }; - let query_response = self.query_raw(&query, PagingState::start()).await?; + let query_response = self.query_raw_unpaged(&query, PagingState::start()).await?; match query_response.response { Response::Result(result::Result::SetKeyspace(set_keyspace)) => { @@ -2273,7 +2276,7 @@ mod tests { let values = prepared.serialize_values(&(*v,)).unwrap(); let fut = async { connection - .execute_raw(&prepared, values, PagingState::start()) + .execute_raw_unpaged(&prepared, values, PagingState::start()) .await }; insert_futures.push(fut); @@ -2358,7 +2361,7 @@ mod tests { .await .unwrap(); - connection.query("TRUNCATE t").await.unwrap(); + connection.query_unpaged("TRUNCATE t").await.unwrap(); let mut futs = Vec::new(); @@ -2378,7 +2381,7 @@ mod tests { .serialize_values(&(j, vec![j as u8; j as usize])) .unwrap(); let response = conn - .execute_raw(&prepared, values, PagingState::start()) + .execute_raw_unpaged(&prepared, values, PagingState::start()) .await .unwrap(); // QueryResponse might contain an error - make sure that there were no errors @@ -2397,7 +2400,7 @@ mod tests { // Check that everything was written properly let range_end = arithmetic_sequence_sum(NUM_BATCHES); let mut results = connection - .query("SELECT p, v FROM t") + .query_unpaged("SELECT p, v FROM t") .await .unwrap() .rows_typed::<(i32, Vec)>() From 6ae5fa6a090e4c7072edd81620dcdca874d8fa86 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Wojciech=20Przytu=C5=82a?= Date: Mon, 26 Aug 2024 15:38:46 +0200 Subject: [PATCH 13/19] connection: use query_unpaged for some internal queries In no case for internal queries should we fetch only one page and ignore possible further ones. As both queries that were using query_single_page would return only one row anyway, the change should not affect semantics after all. --- scylla/src/transport/connection.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/scylla/src/transport/connection.rs b/scylla/src/transport/connection.rs index e5129e7425..df96c33df8 100644 --- a/scylla/src/transport/connection.rs +++ b/scylla/src/transport/connection.rs @@ -798,6 +798,7 @@ impl Connection { .await } + #[allow(dead_code)] pub(crate) async fn query_single_page( &self, query: impl Into, @@ -814,6 +815,7 @@ impl Connection { .await } + #[allow(dead_code)] pub(crate) async fn query_single_page_with_consistency( &self, query: impl Into, @@ -1218,7 +1220,7 @@ impl Connection { pub(crate) async fn fetch_schema_version(&self) -> Result { let (version_id,) = self - .query_single_page(LOCAL_VERSION) + .query_unpaged(LOCAL_VERSION) .await? .single_row_typed() .map_err(|err| match err { @@ -2553,7 +2555,7 @@ mod tests { // As everything is normal, these queries should succeed. for _ in 0..3 { tokio::time::sleep(Duration::from_millis(500)).await; - conn.query_single_page("SELECT host_id FROM system.local") + conn.query_unpaged("SELECT host_id FROM system.local") .await .unwrap(); } @@ -2573,7 +2575,7 @@ mod tests { // As the router is invalidated, all further queries should immediately // return error. - conn.query_single_page("SELECT host_id FROM system.local") + conn.query_unpaged("SELECT host_id FROM system.local") .await .unwrap_err(); From c5140c9cc8e87e891448d0de4c7dd7e4d0937867 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Wojciech=20Przytu=C5=82a?= Date: Fri, 23 Aug 2024 07:24:02 +0200 Subject: [PATCH 14/19] codewide: rename {query,execute}_paged to %_single_page The name %_paged has long confused users. The new name indicates explicitly that only a single page is fetched with a single call to those methods. --- docs/source/queries/paged.md | 4 ++-- examples/select-paging.rs | 4 ++-- scylla/src/transport/caching_session.rs | 12 ++++++------ scylla/src/transport/session.rs | 8 ++++---- scylla/src/transport/session_test.rs | 4 ++-- .../tests/integration/skip_metadata_optimization.rs | 2 +- 6 files changed, 17 insertions(+), 17 deletions(-) diff --git a/docs/source/queries/paged.md b/docs/source/queries/paged.md index 8112c9308b..cff040d79d 100644 --- a/docs/source/queries/paged.md +++ b/docs/source/queries/paged.md @@ -121,7 +121,7 @@ use scylla::query::Query; let paged_query = Query::new("SELECT a, b, c FROM ks.t").with_page_size(6); let res1 = session.query(paged_query.clone(), &[]).await?; let res2 = session - .query_paged(paged_query.clone(), &[], res1.paging_state) + .query_single_page(paged_query.clone(), &[], res1.paging_state) .await?; # Ok(()) # } @@ -145,7 +145,7 @@ let paged_prepared = session .await?; let res1 = session.execute(&paged_prepared, &[]).await?; let res2 = session - .execute_paged(&paged_prepared, &[], res1.paging_state) + .execute_single_page(&paged_prepared, &[], res1.paging_state) .await?; # Ok(()) # } diff --git a/examples/select-paging.rs b/examples/select-paging.rs index 2c6b4e2d2a..340daea986 100644 --- a/examples/select-paging.rs +++ b/examples/select-paging.rs @@ -48,7 +48,7 @@ async fn main() -> Result<()> { let mut paging_state = PagingState::start(); loop { let res = session - .query_paged(paged_query.clone(), &[], paging_state) + .query_single_page(paged_query.clone(), &[], paging_state) .await?; let paging_state_response = res.paging_state_response.clone(); @@ -79,7 +79,7 @@ async fn main() -> Result<()> { let mut paging_state = PagingState::default(); loop { let res = session - .execute_paged(&paged_prepared, &[], paging_state) + .execute_single_page(&paged_prepared, &[], paging_state) .await?; let paging_state_response = res.paging_state_response.clone(); diff --git a/scylla/src/transport/caching_session.rs b/scylla/src/transport/caching_session.rs index 5b3d6ca1bf..74771e9799 100644 --- a/scylla/src/transport/caching_session.rs +++ b/scylla/src/transport/caching_session.rs @@ -91,8 +91,8 @@ where self.session.execute_iter(prepared, values).await } - /// Does the same thing as [`Session::execute_paged`] but uses the prepared statement cache - pub async fn execute_paged( + /// Does the same thing as [`Session::execute_single_page`] but uses the prepared statement cache + pub async fn execute_single_page( &self, query: impl Into, values: impl SerializeRow, @@ -101,7 +101,7 @@ where let query = query.into(); let prepared = self.add_prepared_statement_owned(query).await?; self.session - .execute_paged(&prepared, values, paging_state) + .execute_single_page(&prepared, values, paging_state) .await } @@ -364,16 +364,16 @@ mod tests { assert_eq!(1, session.cache.len()); } - /// Checks that caching works with execute_paged + /// Checks that caching works with execute_single_page #[tokio::test] - async fn test_execute_paged_cached() { + async fn test_execute_single_page_cached() { setup_tracing(); let session = create_caching_session().await; assert!(session.cache.is_empty()); let result = session - .execute_paged("select * from test_table", &[], PagingState::start()) + .execute_single_page("select * from test_table", &[], PagingState::start()) .await .unwrap(); diff --git a/scylla/src/transport/session.rs b/scylla/src/transport/session.rs index 7ed0bc301c..fe4ac88abb 100644 --- a/scylla/src/transport/session.rs +++ b/scylla/src/transport/session.rs @@ -632,14 +632,14 @@ impl Session { /// /// It is discouraged to use this method with non-empty values argument (`is_empty()` method from `SerializeRow` /// trait returns false). In such case, query first needs to be prepared (on a single connection), so - /// driver will perform 2 round trips instead of 1. Please use [`Session::execute_paged()`] instead. + /// driver will perform 2 round trips instead of 1. Please use [`Session::execute_single_page()`] instead. /// /// # Arguments /// /// * `query` - query to be performed /// * `values` - values bound to the query /// * `paging_state` - previously received paging state or [PagingState::start()] - pub async fn query_paged( + pub async fn query_single_page( &self, query: impl Into, values: impl SerializeRow, @@ -659,7 +659,7 @@ impl Session { /// /// It is discouraged to use this method with non-empty values argument (`is_empty()` method from `SerializeRow` /// trait returns false). In such case, query first needs to be prepared (on a single connection), so - /// driver will perform 2 round trips instead of 1. Please use [`Session::execute_paged()`] instead. + /// driver will perform 2 round trips instead of 1. Please use [`Session::execute_single_page()`] instead. /// /// # Arguments /// @@ -1011,7 +1011,7 @@ impl Session { .await } - pub async fn execute_paged( + pub async fn execute_single_page( &self, prepared: &PreparedStatement, values: impl SerializeRow, diff --git a/scylla/src/transport/session_test.rs b/scylla/src/transport/session_test.rs index 2d8d1d880e..0a634dbe11 100644 --- a/scylla/src/transport/session_test.rs +++ b/scylla/src/transport/session_test.rs @@ -149,7 +149,7 @@ async fn test_unprepared_statement() { let mut watchdog = 0; loop { let rs_manual = session - .query_paged(query.clone(), &[], paging_state) + .query_single_page(query.clone(), &[], paging_state) .await .unwrap(); results_from_manual_paging.append(&mut rs_manual.rows.unwrap()); @@ -291,7 +291,7 @@ async fn test_prepared_statement() { let mut watchdog = 0; loop { let rs_manual = session - .execute_paged(&prepared_paged, &[], paging_state) + .execute_single_page(&prepared_paged, &[], paging_state) .await .unwrap(); results_from_manual_paging.append(&mut rs_manual.rows.unwrap()); diff --git a/scylla/tests/integration/skip_metadata_optimization.rs b/scylla/tests/integration/skip_metadata_optimization.rs index f530e5bd43..5b419c65fb 100644 --- a/scylla/tests/integration/skip_metadata_optimization.rs +++ b/scylla/tests/integration/skip_metadata_optimization.rs @@ -127,7 +127,7 @@ async fn test_skip_result_metadata() { let mut watchdog = 0; loop { let rs_manual = session - .execute_paged(&prepared_paged, &[], paging_state) + .execute_single_page(&prepared_paged, &[], paging_state) .await .unwrap(); let paging_state_response = rs_manual.paging_state_response.clone(); From 4f8cf187e63037bf7e37679a33cbfb37bed6ca78 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Wojciech=20Przytu=C5=82a?= Date: Tue, 20 Aug 2024 07:32:43 +0200 Subject: [PATCH 15/19] session: {query,execute}_single_page pass page state explicitly As an attempt to make paged queries' API more explicit, robust and self-explanatory, PagingState is decoupled from QueryResult. Instead, it's returned from {query,execute}_single_page as a second field of a pair, helping users not to forget about it. Moreover, internal and external APIs which expect that the query is not paged now issue an error tracing message and return ProtocolError error if a "more pages" page state is returned from the unpaged query. --- examples/select-paging.rs | 6 +-- scylla/src/lib.rs | 4 +- scylla/src/transport/caching_session.rs | 8 +-- scylla/src/transport/connection.rs | 48 ++++++++++++----- scylla/src/transport/query_result.rs | 5 -- scylla/src/transport/session.rs | 54 ++++++++++++------- scylla/src/transport/session_test.rs | 8 +-- .../integration/skip_metadata_optimization.rs | 3 +- 8 files changed, 83 insertions(+), 53 deletions(-) diff --git a/examples/select-paging.rs b/examples/select-paging.rs index 340daea986..b3a19e3249 100644 --- a/examples/select-paging.rs +++ b/examples/select-paging.rs @@ -47,11 +47,10 @@ async fn main() -> Result<()> { // Manual paging in a loop, unprepared statement. let mut paging_state = PagingState::start(); loop { - let res = session + let (res, paging_state_response) = session .query_single_page(paged_query.clone(), &[], paging_state) .await?; - let paging_state_response = res.paging_state_response.clone(); println!( "Paging state: {:#?} ({} rows)", paging_state_response, @@ -78,11 +77,10 @@ async fn main() -> Result<()> { // Manual paging in a loop, prepared statement. let mut paging_state = PagingState::default(); loop { - let res = session + let (res, paging_state_response) = session .execute_single_page(&paged_prepared, &[], paging_state) .await?; - let paging_state_response = res.paging_state_response.clone(); println!( "Paging state from the prepared statement execution: {:#?} ({} rows)", paging_state_response, diff --git a/scylla/src/lib.rs b/scylla/src/lib.rs index e7b9afb7ee..e55196b6e7 100644 --- a/scylla/src/lib.rs +++ b/scylla/src/lib.rs @@ -55,7 +55,7 @@ //! # async fn check_only_compiles(session: &Session) -> Result<(), Box> { //! // Insert an int and text into the table //! session -//! .query( +//! .query_unpaged( //! "INSERT INTO ks.tab (a, b) VALUES(?, ?)", //! (2_i32, "some text") //! ) @@ -76,7 +76,7 @@ //! //! // Read rows containing an int and text //! let rows_opt = session -//! .query("SELECT a, b FROM ks.tab", &[]) +//! .query_unpaged("SELECT a, b FROM ks.tab", &[]) //! .await? //! .rows; //! diff --git a/scylla/src/transport/caching_session.rs b/scylla/src/transport/caching_session.rs index 74771e9799..0449937956 100644 --- a/scylla/src/transport/caching_session.rs +++ b/scylla/src/transport/caching_session.rs @@ -1,7 +1,7 @@ use crate::batch::{Batch, BatchStatement}; use crate::prepared_statement::PreparedStatement; use crate::query::Query; -use crate::statement::PagingState; +use crate::statement::{PagingState, PagingStateResponse}; use crate::transport::errors::QueryError; use crate::transport::iterator::RowIterator; use crate::transport::partitioner::PartitionerName; @@ -97,7 +97,7 @@ where query: impl Into, values: impl SerializeRow, paging_state: PagingState, - ) -> Result { + ) -> Result<(QueryResult, PagingStateResponse), QueryError> { let query = query.into(); let prepared = self.add_prepared_statement_owned(query).await?; self.session @@ -325,7 +325,7 @@ mod tests { /// Checks that the same prepared statement is reused when executing the same query twice #[tokio::test] - async fn test_execute_cached() { + async fn test_execute_unpaged_cached() { setup_tracing(); let session = create_caching_session().await; let result = session @@ -372,7 +372,7 @@ mod tests { assert!(session.cache.is_empty()); - let result = session + let (result, _paging_state) = session .execute_single_page("select * from test_table", &[], PagingState::start()) .await .unwrap(); diff --git a/scylla/src/transport/connection.rs b/scylla/src/transport/connection.rs index df96c33df8..dcd23e680f 100644 --- a/scylla/src/transport/connection.rs +++ b/scylla/src/transport/connection.rs @@ -237,6 +237,13 @@ impl QueryResponse { }) } + pub(crate) fn into_query_result_and_paging_state( + self, + ) -> Result<(QueryResult, PagingStateResponse), QueryError> { + self.into_non_error_query_response()? + .into_query_result_and_paging_state() + } + pub(crate) fn into_query_result(self) -> Result { self.into_non_error_query_response()?.into_query_result() } @@ -257,7 +264,9 @@ impl NonErrorQueryResponse { } } - pub(crate) fn into_query_result(self) -> Result { + pub(crate) fn into_query_result_and_paging_state( + self, + ) -> Result<(QueryResult, PagingStateResponse), QueryError> { let (rows, paging_state, col_specs, serialized_size) = match self.response { NonErrorResponse::Result(result::Result::Rows(rs)) => ( Some(rs.rows), @@ -273,14 +282,29 @@ impl NonErrorQueryResponse { } }; - Ok(QueryResult { - rows, - warnings: self.warnings, - tracing_id: self.tracing_id, - paging_state_response: paging_state, - col_specs, - serialized_size, - }) + Ok(( + QueryResult { + rows, + warnings: self.warnings, + tracing_id: self.tracing_id, + col_specs, + serialized_size, + }, + paging_state, + )) + } + + pub(crate) fn into_query_result(self) -> Result { + let (result, paging_state) = self.into_query_result_and_paging_state()?; + + if !paging_state.finished() { + let error_msg = "Internal driver API misuse or a server bug: nonfinished paging state\ + would be discarded by `NonErrorQueryResponse::into_query_result`"; + error!(error_msg); + return Err(QueryError::ProtocolError(error_msg)); + } + + Ok(result) } } #[cfg(feature = "ssl")] @@ -802,7 +826,7 @@ impl Connection { pub(crate) async fn query_single_page( &self, query: impl Into, - ) -> Result { + ) -> Result<(QueryResult, PagingStateResponse), QueryError> { let query: Query = query.into(); // This method is used only for driver internal queries, so no need to consult execution profile here. @@ -821,7 +845,7 @@ impl Connection { query: impl Into, consistency: Consistency, serial_consistency: Option, - ) -> Result { + ) -> Result<(QueryResult, PagingStateResponse), QueryError> { let query: Query = query.into(); let page_size = query.get_validated_page_size(); @@ -833,7 +857,7 @@ impl Connection { PagingState::start(), ) .await? - .into_query_result() + .into_query_result_and_paging_state() } #[allow(dead_code)] diff --git a/scylla/src/transport/query_result.rs b/scylla/src/transport/query_result.rs index 793800a23d..49a68f5c73 100644 --- a/scylla/src/transport/query_result.rs +++ b/scylla/src/transport/query_result.rs @@ -1,7 +1,6 @@ use crate::frame::response::cql_to_rust::{FromRow, FromRowError}; use crate::frame::response::result::ColumnSpec; use crate::frame::response::result::Row; -use crate::statement::PagingStateResponse; use crate::transport::session::{IntoTypedRows, TypedRowIter}; use thiserror::Error; use uuid::Uuid; @@ -19,8 +18,6 @@ pub struct QueryResult { pub warnings: Vec, /// CQL Tracing uuid - can only be Some if tracing is enabled for this query pub tracing_id: Option, - /// Paging state returned from the server - pub paging_state_response: PagingStateResponse, /// Column specification returned from the server pub col_specs: Vec, /// The original size of the serialized rows in request @@ -33,7 +30,6 @@ impl QueryResult { rows: None, warnings: Vec::new(), tracing_id: None, - paging_state_response: PagingStateResponse::NoMorePages, col_specs: Vec::new(), serialized_size: 0, } @@ -318,7 +314,6 @@ mod tests { rows: None, warnings: vec![], tracing_id: None, - paging_state_response: PagingStateResponse::NoMorePages, col_specs: vec![column_spec], serialized_size: 0, } diff --git a/scylla/src/transport/session.rs b/scylla/src/transport/session.rs index fe4ac88abb..727eddb1d3 100644 --- a/scylla/src/transport/session.rs +++ b/scylla/src/transport/session.rs @@ -30,7 +30,7 @@ use std::sync::atomic::Ordering; use std::sync::Arc; use std::time::Duration; use tokio::time::timeout; -use tracing::{debug, trace, trace_span, Instrument}; +use tracing::{debug, error, trace, trace_span, Instrument}; use uuid::Uuid; use super::connection::NonErrorQueryResponse; @@ -52,7 +52,7 @@ use crate::frame::response::result; use crate::prepared_statement::PreparedStatement; use crate::query::Query; use crate::routing::{Shard, Token}; -use crate::statement::{Consistency, PageSize, PagingState}; +use crate::statement::{Consistency, PageSize, PagingState, PagingStateResponse}; use crate::tracing::{TracingEvent, TracingInfo}; use crate::transport::cluster::{Cluster, ClusterData, ClusterNeatDebug}; use crate::transport::connection::{Connection, ConnectionConfig, VerifiedKeyspaceName}; @@ -589,7 +589,7 @@ impl Session { /// # async fn check_only_compiles(session: &Session) -> Result<(), Box> { /// // Insert an int and text into a table /// session - /// .query( + /// .query_unpaged( /// "INSERT INTO ks.tab (a, b) VALUES(?, ?)", /// (2_i32, "some text") /// ) @@ -605,7 +605,7 @@ impl Session { /// /// // Read rows containing an int and text /// let rows_opt = session - /// .query("SELECT a, b FROM ks.tab", &[]) + /// .query_unpaged("SELECT a, b FROM ks.tab", &[]) /// .await? /// .rows; /// @@ -624,8 +624,15 @@ impl Session { values: impl SerializeRow, ) -> Result { let query = query.into(); - self.query_inner(&query, values, None, PagingState::start()) - .await + let (result, paging_state_response) = self + .query_inner(&query, values, None, PagingState::start()) + .await?; + if !paging_state_response.finished() { + let err_msg = "Unpaged unprepared query returned a non-empty paging state! This is a driver-side or server-side bug."; + error!(err_msg); + return Err(QueryError::ProtocolError(err_msg)); + } + Ok(result) } /// Queries the database with a custom paging state. @@ -644,7 +651,7 @@ impl Session { query: impl Into, values: impl SerializeRow, paging_state: PagingState, - ) -> Result { + ) -> Result<(QueryResult, PagingStateResponse), QueryError> { let query = query.into(); self.query_inner( &query, @@ -672,7 +679,7 @@ impl Session { values: impl SerializeRow, page_size: Option, paging_state: PagingState, - ) -> Result { + ) -> Result<(QueryResult, PagingStateResponse), QueryError> { let execution_profile = query .get_execution_profile_handle() .unwrap_or_else(|| self.get_default_execution_profile_handle()) @@ -756,9 +763,9 @@ impl Session { self.handle_set_keyspace_response(&response).await?; self.handle_auto_await_schema_agreement(&response).await?; - let result = response.into_query_result()?; + let (result, paging_state) = response.into_query_result_and_paging_state()?; span.record_result_fields(&result); - Ok(result) + Ok((result, paging_state)) } async fn handle_set_keyspace_response( @@ -902,7 +909,7 @@ impl Session { /// /// // Run the prepared query with some values, just like a simple query /// let to_insert: i32 = 12345; - /// session.execute(&prepared, (to_insert,)).await?; + /// session.execute_unpaged(&prepared, (to_insert,)).await?; /// # Ok(()) /// # } /// ``` @@ -997,7 +1004,7 @@ impl Session { /// /// // Run the prepared query with some values, just like a simple query /// let to_insert: i32 = 12345; - /// session.execute(&prepared, (to_insert,)).await?; + /// session.execute_unpaged(&prepared, (to_insert,)).await?; /// # Ok(()) /// # } /// ``` @@ -1007,8 +1014,15 @@ impl Session { values: impl SerializeRow, ) -> Result { let serialized_values = prepared.serialize_values(&values)?; - self.execute_inner(prepared, &serialized_values, None, PagingState::start()) - .await + let (result, paging_state) = self + .execute_inner(prepared, &serialized_values, None, PagingState::start()) + .await?; + if !paging_state.finished() { + let err_msg = "Unpaged prepared query returned a non-empty paging state! This is a driver-side or server-side bug."; + error!(err_msg); + return Err(QueryError::ProtocolError(err_msg)); + } + Ok(result) } pub async fn execute_single_page( @@ -1016,7 +1030,7 @@ impl Session { prepared: &PreparedStatement, values: impl SerializeRow, paging_state: PagingState, - ) -> Result { + ) -> Result<(QueryResult, PagingStateResponse), QueryError> { let serialized_values = prepared.serialize_values(&values)?; let page_size = prepared.get_validated_page_size(); self.execute_inner(prepared, &serialized_values, Some(page_size), paging_state) @@ -1035,7 +1049,7 @@ impl Session { serialized_values: &SerializedValues, page_size: Option, paging_state: PagingState, - ) -> Result { + ) -> Result<(QueryResult, PagingStateResponse), QueryError> { let values_ref = &serialized_values; let paging_state_ref = &paging_state; @@ -1123,9 +1137,9 @@ impl Session { self.handle_set_keyspace_response(&response).await?; self.handle_auto_await_schema_agreement(&response).await?; - let result = response.into_query_result()?; + let (result, paging_state) = response.into_query_result_and_paging_state()?; span.record_result_fields(&result); - Ok(result) + Ok((result, paging_state)) } /// Run a prepared query with paging\ @@ -1396,14 +1410,14 @@ impl Session { /// # async fn example() -> Result<(), Box> { /// # let session = SessionBuilder::new().known_node("127.0.0.1:9042").build().await?; /// session - /// .query("INSERT INTO my_keyspace.tab (a) VALUES ('test1')", &[]) + /// .query_unpaged("INSERT INTO my_keyspace.tab (a) VALUES ('test1')", &[]) /// .await?; /// /// session.use_keyspace("my_keyspace", false).await?; /// /// // Now we can omit keyspace name in the query /// session - /// .query("INSERT INTO tab (a) VALUES ('test2')", &[]) + /// .query_unpaged("INSERT INTO tab (a) VALUES ('test2')", &[]) /// .await?; /// # Ok(()) /// # } diff --git a/scylla/src/transport/session_test.rs b/scylla/src/transport/session_test.rs index 0a634dbe11..a88fa73018 100644 --- a/scylla/src/transport/session_test.rs +++ b/scylla/src/transport/session_test.rs @@ -148,12 +148,12 @@ async fn test_unprepared_statement() { let mut paging_state = PagingState::start(); let mut watchdog = 0; loop { - let rs_manual = session + let (rs_manual, paging_state_response) = session .query_single_page(query.clone(), &[], paging_state) .await .unwrap(); results_from_manual_paging.append(&mut rs_manual.rows.unwrap()); - match rs_manual.paging_state_response { + match paging_state_response { PagingStateResponse::HasMorePages { state } => { paging_state = state; } @@ -290,12 +290,12 @@ async fn test_prepared_statement() { let mut paging_state = PagingState::start(); let mut watchdog = 0; loop { - let rs_manual = session + let (rs_manual, paging_state_response) = session .execute_single_page(&prepared_paged, &[], paging_state) .await .unwrap(); results_from_manual_paging.append(&mut rs_manual.rows.unwrap()); - match rs_manual.paging_state_response { + match paging_state_response { PagingStateResponse::HasMorePages { state } => { paging_state = state; } diff --git a/scylla/tests/integration/skip_metadata_optimization.rs b/scylla/tests/integration/skip_metadata_optimization.rs index 5b419c65fb..1c84569e75 100644 --- a/scylla/tests/integration/skip_metadata_optimization.rs +++ b/scylla/tests/integration/skip_metadata_optimization.rs @@ -126,11 +126,10 @@ async fn test_skip_result_metadata() { let mut paging_state = PagingState::start(); let mut watchdog = 0; loop { - let rs_manual = session + let (rs_manual, paging_state_response) = session .execute_single_page(&prepared_paged, &[], paging_state) .await .unwrap(); - let paging_state_response = rs_manual.paging_state_response.clone(); results_from_manual_paging .extend(rs_manual.rows_typed::().unwrap().map(Result::unwrap)); From d0f0fb43c715321405328d63f381a5edd30662ad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Wojciech=20Przytu=C5=82a?= Date: Mon, 26 Aug 2024 15:49:41 +0200 Subject: [PATCH 16/19] connection: query_single_page takes paging state It appears that query_single_page's previous semantics was to only fetch the first page and ignore others. It makes much more sense, though, to have its semantics consistent with Session::query_single_page: to take PagingState and query a single page pointed by that state. This allows using it in a loop, similarly to Session::query_single_page. --- scylla/src/transport/connection.rs | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/scylla/src/transport/connection.rs b/scylla/src/transport/connection.rs index dcd23e680f..76a226ce54 100644 --- a/scylla/src/transport/connection.rs +++ b/scylla/src/transport/connection.rs @@ -826,6 +826,7 @@ impl Connection { pub(crate) async fn query_single_page( &self, query: impl Into, + paging_state: PagingState, ) -> Result<(QueryResult, PagingStateResponse), QueryError> { let query: Query = query.into(); @@ -835,14 +836,20 @@ impl Connection { .determine_consistency(self.config.default_consistency); let serial_consistency = query.config.serial_consistency; - self.query_single_page_with_consistency(query, consistency, serial_consistency.flatten()) - .await + self.query_single_page_with_consistency( + query, + paging_state, + consistency, + serial_consistency.flatten(), + ) + .await } #[allow(dead_code)] pub(crate) async fn query_single_page_with_consistency( &self, query: impl Into, + paging_state: PagingState, consistency: Consistency, serial_consistency: Option, ) -> Result<(QueryResult, PagingStateResponse), QueryError> { @@ -854,7 +861,7 @@ impl Connection { consistency, serial_consistency, Some(page_size), - PagingState::start(), + paging_state, ) .await? .into_query_result_and_paging_state() From ec1c2e7247ea89c1d42ea8322e929654a549f816 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Wojciech=20Przytu=C5=82a?= Date: Tue, 20 Aug 2024 08:25:23 +0200 Subject: [PATCH 17/19] session: rename {query,execute}_inner to % As {query,execute} methods got the _unpaged prefix, now the inner methods can drop the _inner prefix. This gives us one subtle advantage: users now see "method {query,execute} is private" instead of "{query,execute} method not found", which makes them more likely to think that the API was changed on purpose. Assuming that users see docstrings of private methods when they hover a call to them, this also lets them read the new docstrings of {query,execute} that explain the reasons behind the change and point to proper public methods: {query,execute}_{unpaged,single_page,iter}. --- scylla/src/transport/session.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/scylla/src/transport/session.rs b/scylla/src/transport/session.rs index 727eddb1d3..ed4029668e 100644 --- a/scylla/src/transport/session.rs +++ b/scylla/src/transport/session.rs @@ -625,7 +625,7 @@ impl Session { ) -> Result { let query = query.into(); let (result, paging_state_response) = self - .query_inner(&query, values, None, PagingState::start()) + .query(&query, values, None, PagingState::start()) .await?; if !paging_state_response.finished() { let err_msg = "Unpaged unprepared query returned a non-empty paging state! This is a driver-side or server-side bug."; @@ -653,7 +653,7 @@ impl Session { paging_state: PagingState, ) -> Result<(QueryResult, PagingStateResponse), QueryError> { let query = query.into(); - self.query_inner( + self.query( &query, values, Some(query.get_validated_page_size()), @@ -673,7 +673,7 @@ impl Session { /// * `query` - query to be performed /// * `values` - values bound to the query /// * `paging_state` - previously received paging state or [PagingState::start()] - async fn query_inner( + async fn query( &self, query: &Query, values: impl SerializeRow, @@ -1015,7 +1015,7 @@ impl Session { ) -> Result { let serialized_values = prepared.serialize_values(&values)?; let (result, paging_state) = self - .execute_inner(prepared, &serialized_values, None, PagingState::start()) + .execute(prepared, &serialized_values, None, PagingState::start()) .await?; if !paging_state.finished() { let err_msg = "Unpaged prepared query returned a non-empty paging state! This is a driver-side or server-side bug."; @@ -1033,7 +1033,7 @@ impl Session { ) -> Result<(QueryResult, PagingStateResponse), QueryError> { let serialized_values = prepared.serialize_values(&values)?; let page_size = prepared.get_validated_page_size(); - self.execute_inner(prepared, &serialized_values, Some(page_size), paging_state) + self.execute(prepared, &serialized_values, Some(page_size), paging_state) .await } @@ -1043,7 +1043,7 @@ impl Session { /// * `prepared` - a statement prepared with [prepare](crate::transport::session::Session::prepare) /// * `values` - values bound to the statement /// * `paging_state` - paging state from the previous execution or [PagingState::start()] - async fn execute_inner( + async fn execute( &self, prepared: &PreparedStatement, serialized_values: &SerializedValues, From f82d85ab0fff75525f572630ca88fdcb35671324 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Wojciech=20Przytu=C5=82a?= Date: Tue, 20 Aug 2024 10:06:04 +0200 Subject: [PATCH 18/19] session: update docstrings wrt paging After the API changes regarding paging, docstrings are updated with those changes in mind. --- scylla/src/history.rs | 8 +- scylla/src/lib.rs | 7 +- scylla/src/statement/prepared_statement.rs | 12 +- scylla/src/transport/session.rs | 204 ++++++++++++++++----- 4 files changed, 172 insertions(+), 59 deletions(-) diff --git a/scylla/src/history.rs b/scylla/src/history.rs index c0880514bc..9f2dfe5531 100644 --- a/scylla/src/history.rs +++ b/scylla/src/history.rs @@ -13,7 +13,7 @@ use chrono::{DateTime, Utc}; use scylla_cql::errors::QueryError; use tracing::warn; -/// Id of a single query, i.e. a single call to Session::query/execute/etc. +/// Id of a single query, i.e. a single call to Session::{query,execute}_{unpaged,single_page}/etc. #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] pub struct QueryId(pub usize); @@ -36,13 +36,13 @@ pub struct SpeculativeId(pub usize); /// It's important to note that even after a query is finished there still might come events related to it. /// These events come from speculative futures that didn't notice the query is done already. pub trait HistoryListener: Debug + Send + Sync { - /// Log that a query has started on query start - right after the call to Session::query. + /// Log that a query has started on query start - right after the call to Session::{query,execute}_*/batch. fn log_query_start(&self) -> QueryId; - /// Log that query was successful - called right before returning the result from Session::query, execute, etc. + /// Log that query was successful - called right before returning the result from Session::query_*, execute_*, etc. fn log_query_success(&self, query_id: QueryId); - /// Log that query ended with an error - called right before returning the error from Session::query, execute, etc. + /// Log that query ended with an error - called right before returning the error from Session::query_*, execute_*, etc. fn log_query_error(&self, query_id: QueryId, error: &QueryError); /// Log that a new speculative fiber has started. diff --git a/scylla/src/lib.rs b/scylla/src/lib.rs index e55196b6e7..07ba8aaef5 100644 --- a/scylla/src/lib.rs +++ b/scylla/src/lib.rs @@ -37,9 +37,9 @@ //! ### Making queries //! After successfully connecting to the cluster we can make queries.\ //! The driver supports multiple query types: -//! * [Simple](crate::Session::query) +//! * [Simple](crate::Session::query_unpaged) //! * [Simple paged](crate::Session::query_iter) -//! * [Prepare](crate::Session::execute) (need to be [prepared](crate::Session::prepare) before use) +//! * [Prepared](crate::Session::execute_unpaged) (need to be [prepared](crate::Session::prepare) before use) //! * [Prepared paged](crate::Session::execute_iter) //! * [Batch](crate::Session::batch) //! @@ -75,6 +75,9 @@ //! use scylla::IntoTypedRows; //! //! // Read rows containing an int and text +//! // Keep in mind that all results come in one response (no paging is done!), +//! // so the memory footprint and latency may be huge! +//! // To prevent that, use `Session::query_iter` or `Session::query_single_page`. //! let rows_opt = session //! .query_unpaged("SELECT a, b FROM ks.tab", &[]) //! .await? diff --git a/scylla/src/statement/prepared_statement.rs b/scylla/src/statement/prepared_statement.rs index 0d00fe6566..2899c40dd4 100644 --- a/scylla/src/statement/prepared_statement.rs +++ b/scylla/src/statement/prepared_statement.rs @@ -27,15 +27,17 @@ use crate::transport::partitioner::{Partitioner, PartitionerHasher, PartitionerN /// To prepare a statement, simply execute [`Session::prepare`](crate::transport::session::Session::prepare). /// /// If you plan on reusing the statement, or bounding some values to it during execution, always -/// prefer using prepared statements over [`Session::query`](crate::transport::session::Session::query). +/// prefer using prepared statements over `Session::query_*` methods, +/// e.g. [`Session::query_unpaged`](crate::transport::session::Session::query_unpaged). /// /// Benefits that prepared statements have to offer: /// * Performance - a prepared statement holds information about metadata /// that allows to carry out a statement execution in a type safe manner. -/// When [`Session::query`](crate::transport::session::Session::query) is called with -/// non-empty bound values, the driver has to prepare the statement before execution (to provide type safety). -/// This implies 2 round trips per [`Session::query`](crate::transport::session::Session::query). -/// On the other hand, the cost of [`Session::execute`](crate::transport::session::Session::execute) is only 1 round trip. +/// When any of `Session::query_*` methods is called with non-empty bound values, +/// the driver has to prepare the statement before execution (to provide type safety). +/// This implies 2 round trips per [`Session::query_unpaged`](crate::transport::session::Session::query_unpaged). +/// On the other hand, the cost of [`Session::execute_unpaged`](crate::transport::session::Session::execute_unpaged) +/// is only 1 round trip. /// * Increased type-safety - bound values' types are validated with /// the [`PreparedMetadata`] received from the server during the serialization. /// * Improved load balancing - thanks to statement metadata, the driver is able diff --git a/scylla/src/transport/session.rs b/scylla/src/transport/session.rs index ed4029668e..52a2f9cbe4 100644 --- a/scylla/src/transport/session.rs +++ b/scylla/src/transport/session.rs @@ -568,26 +568,32 @@ impl Session { Ok(session) } - /// Sends a query to the database and receives a response.\ - /// Returns only a single page of results, to receive multiple pages use [query_iter](Session::query_iter) + /// Sends a request to the database and receives a response.\ + /// Performs an unpaged query, i.e. all results are received in a single response. /// /// This is the easiest way to make a query, but performance is worse than that of prepared queries. /// /// It is discouraged to use this method with non-empty values argument (`is_empty()` method from `SerializeRow` /// trait returns false). In such case, query first needs to be prepared (on a single connection), so - /// driver will perform 2 round trips instead of 1. Please use [`Session::execute()`] instead. + /// driver will perform 2 round trips instead of 1. Please use [`Session::execute_unpaged()`] instead. + /// + /// As all results come in one response (no paging is done!), the memory footprint and latency may be huge + /// for statements returning rows (i.e. SELECTs)! Prefer this method for non-SELECTs, and for SELECTs + /// it is best to use paged queries: + /// - to receive multiple pages and transparently iterate through them, use [query_iter](Session::query_iter). + /// - to manually receive multiple pages and iterate through them, use [query_single_page](Session::query_single_page). /// /// See [the book](https://rust-driver.docs.scylladb.com/stable/queries/simple.html) for more information /// # Arguments - /// * `query` - query to perform, can be just a `&str` or the [Query] struct. - /// * `values` - values bound to the query, easiest way is to use a tuple of bound values + /// * `query` - statement to be executed, can be just a `&str` or the [Query] struct. + /// * `values` - values bound to the query, the easiest way is to use a tuple of bound values. /// /// # Examples /// ```rust /// # use scylla::Session; /// # use std::error::Error; /// # async fn check_only_compiles(session: &Session) -> Result<(), Box> { - /// // Insert an int and text into a table + /// // Insert an int and text into a table. /// session /// .query_unpaged( /// "INSERT INTO ks.tab (a, b) VALUES(?, ?)", @@ -603,7 +609,10 @@ impl Session { /// # async fn check_only_compiles(session: &Session) -> Result<(), Box> { /// use scylla::IntoTypedRows; /// - /// // Read rows containing an int and text + /// // Read rows containing an int and text. + /// // Keep in mind that all results come in one response (no paging is done!), + /// // so the memory footprint and latency may be huge! + /// // To prevent that, use `Session::query_iter` or `Session::query_single_page`. /// let rows_opt = session /// .query_unpaged("SELECT a, b FROM ks.tab", &[]) /// .await? @@ -635,7 +644,7 @@ impl Session { Ok(result) } - /// Queries the database with a custom paging state. + /// Queries a single page from the database, optionally continuing from a saved point. /// /// It is discouraged to use this method with non-empty values argument (`is_empty()` method from `SerializeRow` /// trait returns false). In such case, query first needs to be prepared (on a single connection), so @@ -643,9 +652,46 @@ impl Session { /// /// # Arguments /// - /// * `query` - query to be performed + /// * `query` - statement to be executed /// * `values` - values bound to the query /// * `paging_state` - previously received paging state or [PagingState::start()] + /// + /// # Example + /// + /// ```rust + /// # use scylla::Session; + /// # use std::error::Error; + /// # async fn check_only_compiles(session: &Session) -> Result<(), Box> { + /// use std::ops::ControlFlow; + /// use scylla::statement::PagingState; + /// + /// // Manual paging in a loop, unprepared statement. + /// let mut paging_state = PagingState::start(); + /// loop { + /// let (res, paging_state_response) = session + /// .query_single_page("SELECT a, b, c FROM ks.tbl", &[], paging_state) + /// .await?; + /// + /// // Do something with a single page of results. + /// for row in res.rows_typed::<(i32, String)>()? { + /// let (a, b) = row?; + /// } + /// + /// match paging_state_response.into_paging_control_flow() { + /// ControlFlow::Break(()) => { + /// // No more pages to be fetched. + /// break; + /// } + /// ControlFlow::Continue(new_paging_state) => { + /// // Update paging state from the response, so that query + /// // will be resumed from where it ended the last time. + /// paging_state = new_paging_state; + /// } + /// } + /// } + /// # Ok(()) + /// # } + /// ``` pub async fn query_single_page( &self, query: impl Into, @@ -662,17 +708,17 @@ impl Session { .await } - /// Queries the database with a custom paging state. + /// Sends a request to the database. + /// Optionally continues fetching results from a saved point. /// - /// It is discouraged to use this method with non-empty values argument (`is_empty()` method from `SerializeRow` - /// trait returns false). In such case, query first needs to be prepared (on a single connection), so - /// driver will perform 2 round trips instead of 1. Please use [`Session::execute_single_page()`] instead. + /// This is now an internal method only. /// - /// # Arguments + /// Tl;dr: use [Session::query_unpaged], [Session::query_single_page] or [Session::query_iter] instead. /// - /// * `query` - query to be performed - /// * `values` - values bound to the query - /// * `paging_state` - previously received paging state or [PagingState::start()] + /// The rationale is that we believe that paging is so important concept (and it has shown to be error-prone as well) + /// that we need to require users to make a conscious decision to use paging or not. For that, we expose + /// the aforementioned 3 methods clearly differing in naming and API, so that no unconscious choices about paging + /// should be made. async fn query( &self, query: &Query, @@ -803,7 +849,7 @@ impl Session { Ok(()) } - /// Run a simple query with paging\ + /// Run an unprepared query with paging\ /// This method will query all pages of the result\ /// /// Returns an async iterator (stream) over all received rows\ @@ -813,11 +859,11 @@ impl Session { /// trait returns false). In such case, query first needs to be prepared (on a single connection), so /// driver will initially perform 2 round trips instead of 1. Please use [`Session::execute_iter()`] instead. /// - /// See [the book](https://rust-driver.docs.scylladb.com/stable/queries/paged.html) for more information + /// See [the book](https://rust-driver.docs.scylladb.com/stable/queries/paged.html) for more information. /// /// # Arguments - /// * `query` - query to perform, can be just a `&str` or the [Query] struct. - /// * `values` - values bound to the query, easiest way is to use a tuple of bound values + /// * `query` - statement to be executed, can be just a `&str` or the [Query] struct. + /// * `values` - values bound to the query, the easiest way is to use a tuple of bound values. /// /// # Example /// @@ -971,9 +1017,15 @@ impl Session { .as_deref() } - /// Execute a prepared query. Requires a [PreparedStatement] - /// generated using [`Session::prepare`](Session::prepare)\ - /// Returns only a single page of results, to receive multiple pages use [execute_iter](Session::execute_iter) + /// Execute a prepared statement. Requires a [PreparedStatement] + /// generated using [`Session::prepare`](Session::prepare).\ + /// Performs an unpaged query, i.e. all results are received in a single response. + /// + /// As all results come in one response (no paging is done!), the memory footprint and latency may be huge + /// for statements returning rows (i.e. SELECTs)! Prefer this method for non-SELECTs, and for SELECTs + /// it is best to use paged queries: + /// - to receive multiple pages and transparently iterate through them, use [execute_iter](Session::execute_iter). + /// - to manually receive multiple pages and iterate through them, use [execute_single_page](Session::execute_single_page). /// /// Prepared queries are much faster than simple queries: /// * Database doesn't need to parse the query @@ -982,13 +1034,13 @@ impl Session { /// > ***Warning***\ /// > For token/shard aware load balancing to work properly, all partition key values /// > must be sent as bound values - /// > (see [performance section](https://rust-driver.docs.scylladb.com/stable/queries/prepared.html#performance)) + /// > (see [performance section](https://rust-driver.docs.scylladb.com/stable/queries/prepared.html#performance)). /// - /// See [the book](https://rust-driver.docs.scylladb.com/stable/queries/prepared.html) for more information + /// See [the book](https://rust-driver.docs.scylladb.com/stable/queries/prepared.html) for more information. /// /// # Arguments /// * `prepared` - the prepared statement to execute, generated using [`Session::prepare`](Session::prepare) - /// * `values` - values bound to the query, easiest way is to use a tuple of bound values + /// * `values` - values bound to the query, the easiest way is to use a tuple of bound values /// /// # Example /// ```rust @@ -1002,7 +1054,7 @@ impl Session { /// .prepare("INSERT INTO ks.tab (a) VALUES(?)") /// .await?; /// - /// // Run the prepared query with some values, just like a simple query + /// // Run the prepared query with some values, just like a simple query. /// let to_insert: i32 = 12345; /// session.execute_unpaged(&prepared, (to_insert,)).await?; /// # Ok(()) @@ -1025,6 +1077,59 @@ impl Session { Ok(result) } + /// Executes a prepared statement, restricting results to single page. + /// Optionally continues fetching results from a saved point. + /// + /// # Arguments + /// + /// * `prepared` - a statement prepared with [prepare](crate::Session::prepare) + /// * `values` - values bound to the query + /// * `paging_state` - continuation based on a paging state received from a previous paged query or None + /// + /// # Example + /// + /// ```rust + /// # use scylla::Session; + /// # use std::error::Error; + /// # async fn check_only_compiles(session: &Session) -> Result<(), Box> { + /// use std::ops::ControlFlow; + /// use scylla::query::Query; + /// use scylla::statement::{PagingState, PagingStateResponse}; + /// + /// let paged_prepared = session + /// .prepare( + /// Query::new("SELECT a, b FROM ks.tbl") + /// .with_page_size(100.try_into().unwrap()), + /// ) + /// .await?; + /// + /// // Manual paging in a loop, prepared statement. + /// let mut paging_state = PagingState::start(); + /// loop { + /// let (res, paging_state_response) = session + /// .execute_single_page(&paged_prepared, &[], paging_state) + /// .await?; + /// + /// // Do something with a single page of results. + /// for row in res.rows_typed::<(i32, String)>()? { + /// let (a, b) = row?; + /// } + /// + /// match paging_state_response.into_paging_control_flow() { + /// ControlFlow::Break(()) => { + /// // No more pages to be fetched. + /// break; + /// } + /// ControlFlow::Continue(new_paging_state) => { + /// // Update paging continuation from the paging state, so that query + /// // will be resumed from where it ended the last time. + /// paging_state = new_paging_state; + /// } + /// } + /// } + /// # Ok(()) + /// # } + /// ``` pub async fn execute_single_page( &self, prepared: &PreparedStatement, @@ -1037,12 +1142,16 @@ impl Session { .await } - /// Executes a previously prepared statement with previously received paging state - /// # Arguments + /// Sends a prepared request to the database, optionally continuing from a saved point. + /// + /// This is now an internal method only. + /// + /// Tl;dr: use [Session::execute_unpaged], [Session::execute_single_page] or [Session::execute_iter] instead. /// - /// * `prepared` - a statement prepared with [prepare](crate::transport::session::Session::prepare) - /// * `values` - values bound to the statement - /// * `paging_state` - paging state from the previous execution or [PagingState::start()] + /// The rationale is that we believe that paging is so important concept (and it has shown to be error-prone as well) + /// that we need to require users to make a conscious decision to use paging or not. For that, we expose + /// the aforementioned 3 methods clearly differing in naming and API, so that no unconscious choices about paging + /// should be made. async fn execute( &self, prepared: &PreparedStatement, @@ -1142,18 +1251,17 @@ impl Session { Ok((result, paging_state)) } - /// Run a prepared query with paging\ - /// This method will query all pages of the result\ + /// Run a prepared query with paging.\ + /// This method will query all pages of the result.\ /// - /// Returns an async iterator (stream) over all received rows\ - /// Page size can be specified in the [PreparedStatement] - /// passed to the function + /// Returns an async iterator (stream) over all received rows.\ + /// Page size can be specified in the [PreparedStatement] passed to the function. /// - /// See [the book](https://rust-driver.docs.scylladb.com/stable/queries/paged.html) for more information + /// See [the book](https://rust-driver.docs.scylladb.com/stable/queries/paged.html) for more information. /// /// # Arguments /// * `prepared` - the prepared statement to execute, generated using [`Session::prepare`](Session::prepare) - /// * `values` - values bound to the query, easiest way is to use a tuple of bound values + /// * `values` - values bound to the query, the easiest way is to use a tuple of bound values /// /// # Example /// @@ -1206,18 +1314,18 @@ impl Session { .await } - /// Perform a batch query\ - /// Batch contains many `simple` or `prepared` queries which are executed at once\ - /// Batch doesn't return any rows + /// Perform a batch request.\ + /// Batch contains many `simple` or `prepared` queries which are executed at once.\ + /// Batch doesn't return any rows. /// - /// Batch values must contain values for each of the queries + /// Batch values must contain values for each of the queries. /// - /// Avoid using non-empty values (`SerializeRow::is_empty()` return false) for simple queries - /// inside the batch. Such queries will first need to be prepared, so the driver will need to - /// send (numer_of_unprepared_queries_with_values + 1) requests instead of 1 request, severly + /// Avoid using non-empty values (`SerializeRow::is_empty()` return false) for unprepared statements + /// inside the batch. Such statements will first need to be prepared, so the driver will need to + /// send (numer_of_unprepared_statements_with_values + 1) requests instead of 1 request, severly /// affecting performance. /// - /// See [the book](https://rust-driver.docs.scylladb.com/stable/queries/batch.html) for more information + /// See [the book](https://rust-driver.docs.scylladb.com/stable/queries/batch.html) for more information. /// /// # Arguments /// * `batch` - [Batch] to be performed From a1286deafe8d3f167edcf44ef1ac911b8f19ab45 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Wojciech=20Przytu=C5=82a?= Date: Tue, 20 Aug 2024 11:30:10 +0200 Subject: [PATCH 19/19] docs: update for new paging API --- docs/source/queries/paged.md | 77 +++++++++++++++++++++++------- docs/source/queries/result.md | 3 +- docs/source/queries/simple.md | 9 ++-- docs/source/queries/usekeyspace.md | 4 +- 4 files changed, 70 insertions(+), 23 deletions(-) diff --git a/docs/source/queries/paged.md b/docs/source/queries/paged.md index cff040d79d..f6f56c4e60 100644 --- a/docs/source/queries/paged.md +++ b/docs/source/queries/paged.md @@ -1,9 +1,10 @@ # Paged query -Sometimes query results might not fit in a single page. Paged queries -allow to receive the whole result page by page. +Sometimes query results might be so big that one prefers not to fetch them all at once, +e.g. to reduce latency and/or memory footprint. +Paged queries allow to receive the whole result page by page, with a configurable page size. -`Session::query_iter` and `Session::execute_iter` take a [simple query](simple.md) or a [prepared query](prepared.md) -and return an `async` iterator over result `Rows`. +`Session::query_iter` and `Session::execute_iter` take a [simple query](simple.md) +or a [prepared query](prepared.md) and return an `async` iterator over result `Rows`. > ***Warning***\ > In case of unprepared variant (`Session::query_iter`) if the values are not empty @@ -79,7 +80,7 @@ On a `Query`: use scylla::query::Query; let mut query: Query = Query::new("SELECT a, b FROM ks.t"); -query.set_page_size(16); +query.set_page_size(16.try_into().unwrap()); let _ = session.query_iter(query, &[]).await?; // ... # Ok(()) @@ -98,7 +99,7 @@ let mut prepared: PreparedStatement = session .prepare("SELECT a, b FROM ks.t") .await?; -prepared.set_page_size(16); +prepared.set_page_size(16.try_into().unwrap()); let _ = session.execute_iter(prepared, &[]).await?; // ... # Ok(()) @@ -117,12 +118,33 @@ On a `Query`: # use std::error::Error; # async fn check_only_compiles(session: &Session) -> Result<(), Box> { use scylla::query::Query; +use scylla::statement::{PagingState, PagingStateResponse}; +use std::ops::ControlFlow; + +let paged_query = Query::new("SELECT a, b, c FROM ks.t").with_page_size(6.try_into().unwrap()); + +let mut paging_state = PagingState::start(); +loop { + let (res, paging_state_response) = session + .query_single_page(paged_query.clone(), &[], paging_state) + .await?; + + // Do something with `res`. + // ... + + match paging_state_response.into_paging_control_flow() { + ControlFlow::Break(()) => { + // No more pages to be fetched. + break; + } + ControlFlow::Continue(new_paging_state) => { + // Update paging state from the response, so that query + // will be resumed from where it ended the last time. + paging_state = new_paging_state + } + } +} -let paged_query = Query::new("SELECT a, b, c FROM ks.t").with_page_size(6); -let res1 = session.query(paged_query.clone(), &[]).await?; -let res2 = session - .query_single_page(paged_query.clone(), &[], res1.paging_state) - .await?; # Ok(()) # } ``` @@ -139,14 +161,37 @@ On a `PreparedStatement`: # use std::error::Error; # async fn check_only_compiles(session: &Session) -> Result<(), Box> { use scylla::query::Query; +use scylla::statement::{PagingState, PagingStateResponse}; +use std::ops::ControlFlow; let paged_prepared = session - .prepare(Query::new("SELECT a, b, c FROM ks.t").with_page_size(7)) - .await?; -let res1 = session.execute(&paged_prepared, &[]).await?; -let res2 = session - .execute_single_page(&paged_prepared, &[], res1.paging_state) + .prepare(Query::new("SELECT a, b, c FROM ks.t").with_page_size(7.try_into().unwrap())) .await?; + +let mut paging_state = PagingState::start(); +loop { + let (res, paging_state_response) = session + .execute_single_page(&paged_prepared, &[], paging_state) + .await?; + + println!( + "Paging state response from the prepared statement execution: {:#?} ({} rows)", + paging_state_response, + res.rows_num()?, + ); + + match paging_state_response.into_paging_control_flow() { + ControlFlow::Break(()) => { + // No more pages to be fetched. + break; + } + ControlFlow::Continue(new_paging_state) => { + // Update paging state from the response, so that query + // will be resumed from where it ended the last time. + paging_state = new_paging_state + } + } +} # Ok(()) # } ``` diff --git a/docs/source/queries/result.md b/docs/source/queries/result.md index 3663e23726..7eed7fc416 100644 --- a/docs/source/queries/result.md +++ b/docs/source/queries/result.md @@ -1,6 +1,7 @@ # Query result -`Session::query` and `Session::execute` return a `QueryResult` with rows represented as `Option>`. +`Session::query_unpaged`, `Session::query_single_page`, `Session::execute_unpaged` and `Session::execute_single_page` +return a `QueryResult` with rows represented as `Option>`. ### Basic representation `Row` is a basic representation of a received row. It can be used by itself, but it's a bit awkward to use: diff --git a/docs/source/queries/simple.md b/docs/source/queries/simple.md index ca0a4269e2..1eaf9d8be9 100644 --- a/docs/source/queries/simple.md +++ b/docs/source/queries/simple.md @@ -20,7 +20,8 @@ session > By default the query is unpaged and might cause heavy load on the cluster.\ > In such cases set a page size and use [paged query](paged.md) instead.\ > -> When page size is set, `query` will return only the first page of results. +> `query_unpaged` will return all results in one, possibly giant, piece +> (unless a timeout occurs due to high load incurred by the cluster). > ***Warning***\ > If the values are not empty, driver first needs to send a `PREPARE` request @@ -28,7 +29,7 @@ session > performance because 2 round trips will be required instead of 1. ### First argument - the query -As the first argument `Session::query` takes anything implementing `Into`.\ +As the first argument `Session::query_unpaged` takes anything implementing `Into`.\ You can create a query manually to set custom options. For example to change query consistency: ```rust # extern crate scylla; @@ -74,7 +75,7 @@ Here the first `?` will be filled with `2` and the second with `"Some text"`. See [Query values](values.md) for more information about sending values in queries ### Query result -`Session::query` returns `QueryResult` with rows represented as `Option>`.\ +`Session::query_unpaged` returns `QueryResult` with rows represented as `Option>`.\ Each row can be parsed as a tuple of rust types using `rows_typed`: ```rust # extern crate scylla; @@ -92,8 +93,6 @@ while let Some(read_row) = iter.next().transpose()? { # Ok(()) # } ``` -> In cases where page size is set, simple query returns only a single page of results.\ -> To receive all pages use a [paged query](paged.md) instead.\ See [Query result](result.md) for more information about handling query results diff --git a/docs/source/queries/usekeyspace.md b/docs/source/queries/usekeyspace.md index 2879a26275..85c92b1c8a 100644 --- a/docs/source/queries/usekeyspace.md +++ b/docs/source/queries/usekeyspace.md @@ -49,7 +49,8 @@ session The first argument is the keyspace name.\ The second argument states whether this name is case sensitive. -It is also possible to send raw use keyspace query using `Session::query` instead of `Session::use_keyspace` such as: +It is also possible to send raw use keyspace query using `Session::query_*` instead of `Session::use_keyspace` such as: + ```rust # extern crate scylla; # use scylla::Session; @@ -59,6 +60,7 @@ session.query_unpaged("USE my_keyspace", &[]).await?; # Ok(()) # } ``` + This method has a slightly worse latency than `Session::use_keyspace` - there are two roundtrips needed instead of one. Therefore, `Session::use_keyspace` is the preferred method for setting keyspaces.