Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

tests: use high IO concurrency in test_pgdata_import_smoke, use effective_io_concurrency=2 in tests by default #10114

Merged
merged 4 commits into from
Dec 19, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions control_plane/src/endpoint.rs
Original file line number Diff line number Diff line change
Expand Up @@ -316,6 +316,10 @@ impl Endpoint {
// and can cause errors like 'no unpinned buffers available', see
// <https://github.com/neondatabase/neon/issues/9956>
conf.append("shared_buffers", "1MB");
// Postgres defaults to effective_io_concurrency=1, which does not exercise the pageserver's
// batching logic. Set this to 2 so that we exercise the code a bit without letting
// individual tests do a lot of concurrent work on underpowered test machines
conf.append("effective_io_concurrency", "2");
conf.append("fsync", "off");
conf.append("max_connections", "100");
conf.append("wal_level", "logical");
Expand Down
14 changes: 11 additions & 3 deletions test_runner/regress/test_import_pgdata.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,8 @@ def handler(request: Request) -> Response:
elif rel_block_size == RelBlockSize.TWO_STRPES_PER_SHARD:
target_relblock_size = (shard_count or 1) * stripe_size * 8192 * 2
elif rel_block_size == RelBlockSize.MULTIPLE_RELATION_SEGMENTS:
# Postgres uses a 1GiB segment size, fixed at compile time, so we must use >2GB of data
# to exercise multiple segments.
target_relblock_size = int(((2.333 * 1024 * 1024 * 1024) // 8192) * 8192)
else:
raise ValueError
Expand Down Expand Up @@ -111,9 +113,15 @@ def handler(request: Request) -> Response:

def validate_vanilla_equivalence(ep):
# TODO: would be nicer to just compare pgdump
assert ep.safe_psql("select count(*), sum(data::bigint)::bigint from t") == [
(expect_nrows, expect_sum)
]

# Enable IO concurrency for batching on large sequential scan, to avoid making
# this test unnecessarily onerous on CPU
assert ep.safe_psql_many(
[
"set effective_io_concurrency=32;",
"select count(*), sum(data::bigint)::bigint from t",
]
) == [[], [(expect_nrows, expect_sum)]]

validate_vanilla_equivalence(vanilla_pg)

Expand Down
Loading