Skip to content

Commit

Permalink
pageserver: reorder upload queue when possible
Browse files Browse the repository at this point in the history
  • Loading branch information
erikgrinaker committed Dec 27, 2024
1 parent ff4a061 commit fdb6fbb
Show file tree
Hide file tree
Showing 3 changed files with 121 additions and 43 deletions.
109 changes: 67 additions & 42 deletions pageserver/src/tenant/remote_timeline_client.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1797,55 +1797,64 @@ impl RemoteTimelineClient {
Ok(())
}

/// Returns and removes the next ready operation from the queue, if any. This isn't necessarily
/// the first operation in the queue, to avoid head-of-line blocking. None may be returned even
/// if the queue isn't empty, if the queued operations conflict with in-progress operations.
///
/// Pick next tasks from the queue, and start as many of them as possible without violating
/// the ordering constraints.
///
/// The caller needs to already hold the `upload_queue` lock.
fn launch_queued_tasks(self: &Arc<Self>, upload_queue: &mut UploadQueueInitialized) {
while let Some(next_op) = upload_queue.queued_operations.front() {
// Can we run this task now?
let can_run_now = match next_op {
UploadOp::UploadLayer(..) => {
// Can always be scheduled.
true
}
UploadOp::UploadMetadata { .. } => {
// These can only be performed after all the preceding operations
// have finished.
upload_queue.inprogress_tasks.is_empty()
}
UploadOp::Delete(..) => {
// Wait for preceding uploads to finish. Concurrent deletions are OK, though.
upload_queue.num_inprogress_deletions == upload_queue.inprogress_tasks.len()
}

UploadOp::Barrier(_) | UploadOp::Shutdown => {
upload_queue.inprogress_tasks.is_empty()
}
};
/// TODO: consider limiting the number of in-progress tasks.
fn next_queued_task(self: &Arc<Self>, queue: &mut UploadQueueInitialized) -> Option<UploadOp> {
// For each queued operations, starting at the front, check if it can bypass any in-progress
// operations (which it might race with) and queued operations ahead of it, and return it.
//
// TODO: this is quadratic. Is that a problem? Consider optimizing.
for (i, candidate) in queue.queued_operations.iter().enumerate() {
let can_bypass = queue
// Look at in-progress operations, in random order.
.inprogress_tasks
.values()
.map(|task| &task.op)
// Then queued operations ahead of this operation, front-to-back.
.chain(queue.queued_operations.iter().take(i))
// Keep track of the active index ahead of each operation. It's okay that
// in-progress operations are emitted in random order above, since at most one of
// them can be an index upload.
.scan(&queue.clean.0, |next_active_index, op| {
let active_index = *next_active_index;
if let UploadOp::UploadMetadata { ref uploaded } = op {
*next_active_index = uploaded; // stash index for next operation after this
}
Some((op, active_index))
})
// Check if the current candidate operation can bypass all of them.
.all(|(op, active_index)| candidate.can_bypass(op, active_index));

// If we cannot launch this task, don't look any further.
//
// In some cases, we could let some non-frontmost tasks to "jump the queue" and launch
// them now, but we don't try to do that currently. For example, if the frontmost task
// is an index-file upload that cannot proceed until preceding uploads have finished, we
// could still start layer uploads that were scheduled later.
if !can_run_now {
break;
// If we can bypass all preceding operations, go for it.
if can_bypass {
return queue.queued_operations.remove(i);
}

if let UploadOp::Shutdown = next_op {
// leave the op in the queue but do not start more tasks; it will be dropped when
// the stop is called.
upload_queue.shutdown_ready.close();
break;
// Nothing can bypass a barrier or shutdown. If it wasn't scheduled above, give up.
if matches!(candidate, UploadOp::Barrier(_) | UploadOp::Shutdown) {
return None;
}
}
None
}

// We can launch this task. Remove it from the queue first.
let mut next_op = upload_queue.queued_operations.pop_front().unwrap();
/// Pick next tasks from the queue, and start as many of them as possible without violating
/// the ordering constraints.
///
/// The caller needs to already hold the `upload_queue` lock.
fn launch_queued_tasks(self: &Arc<Self>, upload_queue: &mut UploadQueueInitialized) {
// Check for a shutdown. Leave it in the queue, but don't start more tasks. It will be
// dropped on stop.
if let Some(UploadOp::Shutdown) = upload_queue.queued_operations.front() {
upload_queue.shutdown_ready.close();
return;
}

debug!("starting op: {}", next_op);
while let Some(mut next_op) = self.next_queued_task(upload_queue) {
debug!("starting op: {next_op}");

// Update the counters and prepare
match &mut next_op {
Expand Down Expand Up @@ -1967,6 +1976,8 @@ impl RemoteTimelineClient {

let upload_result: anyhow::Result<()> = match &task.op {
UploadOp::UploadLayer(ref layer, ref layer_metadata, mode) => {
// TODO: check if this mechanism can be removed now that can_bypass() performs
// conflict checks during scheduling.
if let Some(OpType::FlushDeletion) = mode {
if self.config.read().unwrap().block_deletions {
// Of course, this is not efficient... but usually the queue should be empty.
Expand Down Expand Up @@ -2531,6 +2542,20 @@ pub fn remote_layer_path(
RemotePath::from_string(&path).expect("Failed to construct path")
}

/// Returns true if a and b have the same layer path within a tenant/timeline.
///
/// TODO: there should be a variant of LayerName for the physical path that contains information
/// about the shard and generation, such that this could be replaced by a simple comparison.
/// Effectively remote_layer_path(a) == remote_layer_path(b) but without the string allocations.
pub fn is_same_layer_path(
aname: &LayerName,
ameta: &LayerFileMetadata,
bname: &LayerName,
bmeta: &LayerFileMetadata,
) -> bool {
aname == bname && ameta.shard == bmeta.shard && ameta.generation == bmeta.generation
}

pub fn remote_initdb_archive_path(tenant_id: &TenantId, timeline_id: &TimelineId) -> RemotePath {
RemotePath::from_string(&format!(
"tenants/{tenant_id}/{TIMELINES_SEGMENT_NAME}/{timeline_id}/{INITDB_PATH}"
Expand Down
2 changes: 1 addition & 1 deletion pageserver/src/tenant/remote_timeline_client/index.rs
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ impl IndexPart {
let Some(index_metadata) = self.layer_metadata.get(name) else {
return false;
};
metadata.shard == index_metadata.shard && metadata.generation == index_metadata.generation
super::is_same_layer_path(name, metadata, name, index_metadata)
}
}

Expand Down
53 changes: 53 additions & 0 deletions pageserver/src/tenant/upload_queue.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
use super::remote_timeline_client::is_same_layer_path;
use super::storage_layer::AsLayerDesc as _;
use super::storage_layer::LayerName;
use super::storage_layer::ResidentLayer;
use crate::tenant::metadata::TimelineMetadata;
Expand Down Expand Up @@ -338,3 +340,54 @@ impl std::fmt::Display for UploadOp {
}
}
}

impl UploadOp {
/// Returns true if self can bypass other, i.e. if the operations don't conflict. index is the
/// active index just before either self or other would be uploaded.
pub fn can_bypass(&self, other: &UploadOp, index: &IndexPart) -> bool {
match (self, other) {
// Nothing can bypass a barrier or shutdown, and it can't bypass anything.
(UploadOp::Barrier(_), _) | (_, UploadOp::Barrier(_)) => false,
(UploadOp::Shutdown, _) | (_, UploadOp::Shutdown) => false,

// Uploads and deletes can bypass each other unless they're for the same file.
(UploadOp::UploadLayer(a, ameta, _), UploadOp::UploadLayer(b, bmeta, _)) => {
let aname = &a.layer_desc().layer_name();
let bname = &b.layer_desc().layer_name();
!is_same_layer_path(aname, ameta, bname, bmeta)
}
(UploadOp::UploadLayer(u, umeta, _), UploadOp::Delete(d))
| (UploadOp::Delete(d), UploadOp::UploadLayer(u, umeta, _)) => {
d.layers.iter().all(|(dname, dmeta)| {
!is_same_layer_path(&u.layer_desc().layer_name(), umeta, dname, dmeta)
})
}

// Deletes are idempotent and can always bypass each other.
(UploadOp::Delete(_), UploadOp::Delete(_)) => true,

// Uploads and deletes can bypass an index upload as long as neither the uploaded index
// nor the active index below it references the file. A layer can't be modified or
// deleted while referenced by an index.
//
// Similarly, index uploads can bypass uploads and deletes as long as neither the
// uploaded index nor the active index references the file (the latter would be
// incorrect use by the caller).
(UploadOp::UploadLayer(u, umeta, _), UploadOp::UploadMetadata { uploaded: i })
| (UploadOp::UploadMetadata { uploaded: i }, UploadOp::UploadLayer(u, umeta, _)) => {
let uname = u.layer_desc().layer_name();
!i.references(&uname, umeta) && !index.references(&uname, umeta)
}
(UploadOp::Delete(d), UploadOp::UploadMetadata { uploaded: i })
| (UploadOp::UploadMetadata { uploaded: i }, UploadOp::Delete(d)) => {
d.layers.iter().all(|(dname, dmeta)| {
!i.references(dname, dmeta) && !index.references(dname, dmeta)
})
}

// Indexes can never bypass each other.
// TODO: they can coalesce though, consider this.
(UploadOp::UploadMetadata { .. }, UploadOp::UploadMetadata { .. }) => false,
}
}
}

0 comments on commit fdb6fbb

Please sign in to comment.