Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: Significantly decrease startup times for WAL #25643

Merged
merged 2 commits into from
Dec 12, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
38 changes: 37 additions & 1 deletion Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ async-trait = "0.1"
backtrace = "0.3"
base64 = "0.22.0"
bimap = "0.6.3"
bitcode = { version = "0.6.3", features = ["serde"] }
byteorder = "1.3.4"
bytes = "1.9"
chrono = "0.4"
Expand Down
4 changes: 3 additions & 1 deletion influxdb3/src/commands/serve.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ use std::{collections::HashMap, path::Path, str::FromStr};
use std::{num::NonZeroUsize, sync::Arc};
use thiserror::Error;
use tokio::net::TcpListener;
use tokio::time::Instant;
use tokio_util::sync::CancellationToken;
use trace_exporters::TracingConfig;
use trace_http::ctx::TraceHeaderParser;
Expand Down Expand Up @@ -359,6 +360,7 @@ fn ensure_directory_exists(p: &Path) {
}

pub async fn command(config: Config) -> Result<()> {
let startup_timer = Instant::now();
let num_cpus = num_cpus::get();
let build_malloc_conf = build_malloc_conf();
info!(
Expand Down Expand Up @@ -542,7 +544,7 @@ pub async fn command(config: Config) -> Result<()> {
} else {
builder.build()
};
serve(server, frontend_shutdown).await?;
serve(server, frontend_shutdown, startup_timer).await?;

Ok(())
}
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
---
source: influxdb3_cache/src/last_cache/mod.rs
expression: caches
snapshot_kind: text
---
[
{
Expand All @@ -11,9 +12,7 @@ expression: caches
0,
1
],
"value_columns": {
"type": "all_non_key_columns"
},
"value_columns": "all_non_key_columns",
"count": 1,
"ttl": 600
},
Expand All @@ -25,11 +24,12 @@ expression: caches
6
],
"value_columns": {
"type": "explicit",
"columns": [
8,
7
]
"explicit": {
"columns": [
8,
7
]
}
},
"count": 5,
"ttl": 60
Expand All @@ -40,11 +40,12 @@ expression: caches
"name": "test_cache_3",
"key_columns": [],
"value_columns": {
"type": "explicit",
"columns": [
9,
7
]
"explicit": {
"columns": [
9,
7
]
}
},
"count": 10,
"ttl": 500
Expand Down
7 changes: 4 additions & 3 deletions influxdb3_client/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -845,7 +845,7 @@ pub struct LastCacheCreatedResponse {
/// A last cache will either store values for an explicit set of columns, or will accept all
/// non-key columns
#[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Clone)]
#[serde(tag = "type", rename_all = "snake_case")]
#[serde(rename_all = "snake_case")]
pub enum LastCacheValueColumnsDef {
/// Explicit list of column names
Explicit { columns: Vec<u32> },
Expand Down Expand Up @@ -1220,8 +1220,9 @@ mod tests {
"name": "cache_name",
"key_columns": [0, 1],
"value_columns": {
"type": "explicit",
"columns": [2, 3]
"explicit": {
"columns": [2, 3]
}
},
"ttl": 120,
"count": 5
Expand Down
14 changes: 12 additions & 2 deletions influxdb3_server/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,12 +35,14 @@ use iox_query::QueryDatabase;
use iox_query_params::StatementParams;
use iox_time::TimeProvider;
use observability_deps::tracing::error;
use observability_deps::tracing::info;
use service::hybrid;
use std::convert::Infallible;
use std::fmt::Debug;
use std::sync::Arc;
use thiserror::Error;
use tokio::net::TcpListener;
use tokio::time::Instant;
use tokio_util::sync::CancellationToken;
use tower::Layer;
use trace::ctx::SpanContext;
Expand Down Expand Up @@ -174,7 +176,11 @@ impl<T> Server<T> {
}
}

pub async fn serve<T>(server: Server<T>, shutdown: CancellationToken) -> Result<()>
pub async fn serve<T>(
server: Server<T>,
shutdown: CancellationToken,
startup_timer: Instant,
) -> Result<()>
where
T: TimeProvider,
{
Expand Down Expand Up @@ -206,6 +212,9 @@ where
let hybrid_make_service = hybrid(rest_service, grpc_service);

let addr = AddrIncoming::from_listener(server.listener)?;
let timer_end = Instant::now();
let startup_time = timer_end.duration_since(startup_timer);
info!("Server Startup Time: {}ms", startup_time.as_millis());
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

maybe better to output this using duration's formatting? I think it'll do things like 1m32s...

Copy link
Contributor Author

@mgattozzi mgattozzi Dec 12, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@pauldix it does not have a formatting available to it by default. It does not impl Display. This is why you need to specify how you want it, and mostly it seems to do fractional (e.g. 2.7 seconds) rather than absolute (e.g. 2s 700ms) for the various methods. I figured ms was the most accurate, but I'm willing to do this method instead to get something like 2.7s or 800.5s

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nah, just keep the milliseconds. Perhaps I was thinking for Duration's debug output. Or maybe a Chrono duration to string.

hyper::server::Builder::new(addr, Http::new())
.tcp_nodelay(true)
.serve(hybrid_make_service)
Expand Down Expand Up @@ -764,6 +773,7 @@ mod tests {
}

async fn setup_server(start_time: i64) -> (String, CancellationToken, Arc<dyn WriteBuffer>) {
let server_start_time = tokio::time::Instant::now();
let trace_header_parser = trace_http::ctx::TraceHeaderParser::new();
let metrics = Arc::new(metric::Registry::new());
let object_store: Arc<DynObjectStore> = Arc::new(object_store::memory::InMemory::new());
Expand Down Expand Up @@ -856,7 +866,7 @@ mod tests {
let frontend_shutdown = CancellationToken::new();
let shutdown = frontend_shutdown.clone();

tokio::spawn(async move { serve(server, frontend_shutdown).await });
tokio::spawn(async move { serve(server, frontend_shutdown, server_start_time).await });

(format!("http://{addr}"), shutdown, write_buffer)
}
Expand Down
2 changes: 1 addition & 1 deletion influxdb3_wal/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ influxdb3_id = { path = "../influxdb3_id" }

# crates.io dependencies
async-trait.workspace = true
bitcode.workspace = true
bytes.workspace = true
byteorder.workspace = true
crc32fast.workspace = true
Expand All @@ -27,7 +28,6 @@ indexmap.workspace = true
object_store.workspace = true
parking_lot.workspace = true
serde.workspace = true
serde_json.workspace = true
serde_with.workspace = true
thiserror.workspace = true
tokio.workspace = true
Expand Down
5 changes: 4 additions & 1 deletion influxdb3_wal/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,9 @@ pub enum Error {
#[error("deserialize error: {0}")]
Serialize(#[from] crate::serialize::Error),

#[error("join error: {0}")]
Join(#[from] tokio::task::JoinError),

#[error("object store error: {0}")]
ObjectStoreError(#[from] ::object_store::Error),

Expand Down Expand Up @@ -426,7 +429,7 @@ impl LastCacheDefinition {
/// A last cache will either store values for an explicit set of columns, or will accept all
/// non-key columns
#[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Clone)]
#[serde(tag = "type", rename_all = "snake_case")]
#[serde(rename_all = "snake_case")]
pub enum LastCacheValueColumnsDef {
/// Explicit list of column names
Explicit { columns: Vec<ColumnId> },
Expand Down
18 changes: 16 additions & 2 deletions influxdb3_wal/src/object_store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -104,9 +104,22 @@ impl WalObjectStore {
.last_snapshot_sequence_number()
};

async fn get_contents(
object_store: Arc<dyn ObjectStore>,
path: Path,
) -> Result<WalContents, crate::Error> {
let file_bytes = object_store.get(&path).await?.bytes().await?;
Ok(verify_file_type_and_deserialize(file_bytes)?)
}

let mut replay_tasks = Vec::new();
for path in paths {
let file_bytes = self.object_store.get(&path).await?.bytes().await?;
let wal_contents = verify_file_type_and_deserialize(file_bytes)?;
let object_store = Arc::clone(&self.object_store);
replay_tasks.push(tokio::spawn(get_contents(object_store, path)));
}

for wal_contents in replay_tasks {
let wal_contents = wal_contents.await??;

// add this to the snapshot tracker, so we know what to clear out later if the replay
// was a wal file that had a snapshot
Expand All @@ -120,6 +133,7 @@ impl WalObjectStore {
));

match wal_contents.snapshot {
// This branch uses so much time
None => self.file_notifier.notify(wal_contents),
Some(snapshot_details) => {
let snapshot_info = {
Expand Down
11 changes: 6 additions & 5 deletions influxdb3_wal/src/serialize.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,8 @@ pub enum Error {
#[error("crc32 checksum mismatch")]
Crc32Mismatch,

#[error("Serde error: {0}")]
Serde(#[from] serde_json::Error),
#[error("bitcode error: {0}")]
Bitcode(#[from] bitcode::Error),

#[error("IO error: {0}")]
Io(#[from] std::io::Error),
Expand All @@ -32,6 +32,7 @@ pub(crate) type Result<T, E = Error> = std::result::Result<T, E>;
/// The first bytes written into a wal file to identify it and its version.
const FILE_TYPE_IDENTIFIER: &[u8] = b"idb3.001";

#[inline(always)]
pub fn verify_file_type_and_deserialize(b: Bytes) -> Result<WalContents> {
let contents = b.to_vec();

Expand Down Expand Up @@ -61,7 +62,7 @@ pub fn verify_file_type_and_deserialize(b: Bytes) -> Result<WalContents> {
}

// Deserialize the data into a WalContents
let contents: WalContents = serde_json::from_slice(data)?;
let contents: WalContents = bitcode::deserialize(data)?;

Ok(contents)
}
Expand All @@ -70,8 +71,8 @@ pub(crate) fn serialize_to_file_bytes(contents: &WalContents) -> Result<Vec<u8>>
let mut buf = Vec::new();
buf.extend_from_slice(FILE_TYPE_IDENTIFIER);

// serialize the contents into json bytes
let data = serde_json::to_vec(contents)?;
// serialize the contents into bitcode bytes
let data = bitcode::serialize(contents)?;

// calculate the crc32 checksum
let mut hasher = crc32fast::Hasher::new();
Expand Down
5 changes: 2 additions & 3 deletions influxdb3_write/src/write_buffer/queryable_buffer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -513,11 +513,10 @@ impl BufferState {
let sort_key = table_def
.series_key
.iter()
.map(|c| table_def.column_id_to_name_unchecked(c).to_string())
.collect::<Vec<_>>();
.map(|c| Arc::clone(&table_def.column_id_to_name_unchecked(c)));
let index_columns = table_def.index_column_ids();

TableBuffer::new(index_columns, SortKey::from(sort_key))
TableBuffer::new(index_columns, SortKey::from_columns(sort_key))
});
for (chunk_time, chunk) in table_chunks.chunk_time_to_chunk {
table_buffer.buffer_chunk(chunk_time, chunk.rows);
Expand Down
Loading