From 59f50252c202b6cffb14088f64136b8ad597d6db Mon Sep 17 00:00:00 2001 From: Christian Schwarz Date: Mon, 29 Jul 2024 11:23:16 +0000 Subject: [PATCH] WIP --- pageserver/src/tenant/ephemeral_file.rs | 4 +-- .../src/tenant/ephemeral_file/page_caching.rs | 2 +- .../ephemeral_file/zero_padded_read_write.rs | 25 ++++++++++--------- 3 files changed, 16 insertions(+), 15 deletions(-) diff --git a/pageserver/src/tenant/ephemeral_file.rs b/pageserver/src/tenant/ephemeral_file.rs index bb65ae24fc5e..7a544c318553 100644 --- a/pageserver/src/tenant/ephemeral_file.rs +++ b/pageserver/src/tenant/ephemeral_file.rs @@ -58,7 +58,7 @@ impl EphemeralFile { }) } - pub(crate) fn len(&self) -> u64 { + pub(crate) fn len(&self) -> u32 { self.rw.bytes_written() } @@ -83,7 +83,7 @@ impl EphemeralFile { &mut self, srcbuf: &[u8], ctx: &RequestContext, - ) -> Result { + ) -> Result { let pos = self.rw.bytes_written(); // Write the length field diff --git a/pageserver/src/tenant/ephemeral_file/page_caching.rs b/pageserver/src/tenant/ephemeral_file/page_caching.rs index 43b9fff28d98..31054b9888be 100644 --- a/pageserver/src/tenant/ephemeral_file/page_caching.rs +++ b/pageserver/src/tenant/ephemeral_file/page_caching.rs @@ -55,7 +55,7 @@ impl RW { self.rw.write_all_borrowed(srcbuf, ctx).await } - pub(crate) fn bytes_written(&self) -> u64 { + pub(crate) fn bytes_written(&self) -> u32 { self.rw.bytes_written() } diff --git a/pageserver/src/tenant/ephemeral_file/zero_padded_read_write.rs b/pageserver/src/tenant/ephemeral_file/zero_padded_read_write.rs index fe310acab888..85c207cc16f1 100644 --- a/pageserver/src/tenant/ephemeral_file/zero_padded_read_write.rs +++ b/pageserver/src/tenant/ephemeral_file/zero_padded_read_write.rs @@ -69,10 +69,10 @@ where self.buffered_writer.write_buffered_borrowed(buf, ctx).await } - pub fn bytes_written(&self) -> u64 { + pub fn bytes_written(&self) -> u32 { let flushed_offset = self.buffered_writer.as_inner().bytes_written(); let buffer: &zero_padded::Buffer = self.buffered_writer.inspect_buffer(); - flushed_offset + u64::try_from(buffer.pending()).unwrap() + flushed_offset + u32::try_from(buffer.pending()).unwrap() } /// Get a slice of all blocks that [`Self::read_blk`] would return as [`ReadResult::ServedFromZeroPaddedMutableTail`]. @@ -91,10 +91,11 @@ where } pub(crate) async fn read_blk(&self, blknum: u32) -> Result, std::io::Error> { - let flushed_offset = self.buffered_writer.as_inner().bytes_written(); + let flushed_offset = u32::try_from(self.buffered_writer.as_inner().bytes_written()).expect(""); let buffer: &zero_padded::Buffer = self.buffered_writer.inspect_buffer(); - let buffered_offset = flushed_offset + u64::try_from(buffer.pending()).unwrap(); - let read_offset = (blknum as u64) * (PAGE_SZ as u64); + let buffered_offset = flushed_offset + u32::try_from(buffer.pending()).unwrap(); + let page_sz = u32::try_from(PAGE_SZ).unwrap(); + let read_offset = blknum.checked_mul(page_sz).unwrap(); // The trailing page ("block") might only be partially filled, // yet the blob_io code relies on us to return a full PAGE_SZed slice anyway. @@ -103,28 +104,28 @@ where // DeltaLayer probably has the same issue, not sure why it needs no special treatment. // => check here that the read doesn't go beyond this potentially trailing // => the zero-padding is done in the `else` branch below - let blocks_written = if buffered_offset % (PAGE_SZ as u64) == 0 { - buffered_offset / (PAGE_SZ as u64) + let blocks_written = if buffered_offset % page_sz == 0 { + buffered_offset / page_sz } else { - (buffered_offset / (PAGE_SZ as u64)) + 1 + (buffered_offset / page_sz) + 1 }; - if (blknum as u64) >= blocks_written { + if blknum >= blocks_written { return Err(std::io::Error::new(std::io::ErrorKind::Other, anyhow::anyhow!("read past end of ephemeral_file: read=0x{read_offset:x} buffered=0x{buffered_offset:x} flushed=0x{flushed_offset}"))); } // assertions for the `if-else` below assert_eq!( - flushed_offset % (TAIL_SZ as u64), 0, + flushed_offset % (u32::try_from(TAIL_SZ).unwrap()), 0, "we only use write_buffered_borrowed to write to the buffered writer, so it's guaranteed that flushes happen buffer.cap()-sized chunks" ); assert_eq!( - flushed_offset % (PAGE_SZ as u64), + flushed_offset % page_sz, 0, "the logic below can't handle if the page is spread across the flushed part and the buffer" ); if read_offset < flushed_offset { - assert!(read_offset + (PAGE_SZ as u64) <= flushed_offset); + assert!(read_offset + page_sz <= flushed_offset); Ok(ReadResult::NeedsReadFromWriter { writer: self.as_writer(), })