From 446c2c9b93578e5a25c63dedee28640701c49b31 Mon Sep 17 00:00:00 2001 From: Mehmet YILMAZ Date: Fri, 8 Nov 2024 14:49:50 +0300 Subject: [PATCH 1/2] Fix Test Failure in multi-mx in PG17 (#7722) This PR addresses a regression test failure in the multi-mx feature of Citus with the new PostgreSQL 17 version. The regression was identified during the execution of multi-node tests, specifically targeting compatibility issues introduced with PostgreSQL 17. --------- Co-authored-by: Mehmet YILMAZ (cherry picked from commit 70cf729ba3725bd000e9f8c3fc99bd981cd46ec1) --- src/test/regress/bin/normalize.sed | 3 + .../regress/expected/ch_bench_having_mx.out | 5 + .../regress/expected/ch_bench_having_mx_0.out | 383 +++ src/test/regress/expected/multi_extension.out | 6 + .../regress/expected/multi_extension_0.out | 2084 +++++++++++++++++ .../expected/multi_mx_hide_shard_names.out | 26 +- .../expected/multi_mx_router_planner.out | 3 + .../expected/multi_mx_router_planner_0.out | 1514 ++++++++++++ src/test/regress/sql/ch_bench_having_mx.sql | 6 + src/test/regress/sql/multi_extension.sql | 6 + .../regress/sql/multi_mx_hide_shard_names.sql | 27 +- .../regress/sql/multi_mx_router_planner.sql | 3 + 12 files changed, 4045 insertions(+), 21 deletions(-) create mode 100644 src/test/regress/expected/ch_bench_having_mx_0.out create mode 100644 src/test/regress/expected/multi_extension_0.out create mode 100644 src/test/regress/expected/multi_mx_router_planner_0.out diff --git a/src/test/regress/bin/normalize.sed b/src/test/regress/bin/normalize.sed index 1cbd0b404fd..57c7c66a0dc 100644 --- a/src/test/regress/bin/normalize.sed +++ b/src/test/regress/bin/normalize.sed @@ -319,3 +319,6 @@ s/COPY DEFAULT only available using COPY FROM/COPY DEFAULT cannot be used with C s/COPY delimiter must not appear in the DEFAULT specification/COPY delimiter character must not appear in the DEFAULT specification/ #endif /* PG_VERSION_NUM < PG_VERSION_17 */ + +# multi_mx_router_planner normalize public.pg_temp_xxxxx +s/DEBUG: drop auto-cascades to type public\.pg_temp_[0-9]+/DEBUG: drop auto-cascades to type public.pg_temp_xxxxx/g diff --git a/src/test/regress/expected/ch_bench_having_mx.out b/src/test/regress/expected/ch_bench_having_mx.out index f4664fba53c..83ca36d05ab 100644 --- a/src/test/regress/expected/ch_bench_having_mx.out +++ b/src/test/regress/expected/ch_bench_having_mx.out @@ -1,3 +1,8 @@ +-- Two alternative test outputs: +-- ch_bench_having_mx.out for PG16 and before +-- ch_bench_having_mx_0.out for PG17 +-- related commit +-- https://github.com/postgres/postgres/commit/fd0398fc ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1640000; SET citus.shard_replication_factor to 1; SET citus.shard_count to 4; diff --git a/src/test/regress/expected/ch_bench_having_mx_0.out b/src/test/regress/expected/ch_bench_having_mx_0.out new file mode 100644 index 00000000000..b3cf22dc6c2 --- /dev/null +++ b/src/test/regress/expected/ch_bench_having_mx_0.out @@ -0,0 +1,383 @@ +-- Two alternative test outputs: +-- ch_bench_having_mx.out for PG16 and before +-- ch_bench_having_mx_0.out for PG17 +-- related commit +-- https://github.com/postgres/postgres/commit/fd0398fc +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1640000; +SET citus.shard_replication_factor to 1; +SET citus.shard_count to 4; +CREATE SCHEMA ch_bench_having; +SET search_path = ch_bench_having; +CREATE TABLE stock ( + s_w_id int NOT NULL, + s_i_id int NOT NULL, + s_order_cnt int NOT NULL +); +SELECT create_distributed_table('stock','s_w_id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +\c - - - :worker_1_port +SET search_path = ch_bench_having; +explain (costs false, summary false, timing false) +select s_i_id, sum(s_order_cnt) as ordercount +from stock +where s_order_cnt > (select sum(s_order_cnt) * .005 as where_query from stock) +group by s_i_id +having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock) +order by s_i_id; + QUERY PLAN +--------------------------------------------------------------------- + Sort + Sort Key: remote_scan.s_i_id + InitPlan 1 + -> Function Scan on read_intermediate_result intermediate_result + -> HashAggregate + Group Key: remote_scan.s_i_id + Filter: ((pg_catalog.sum(remote_scan.worker_column_3))::bigint > (InitPlan 1).col1) + -> Custom Scan (Citus Adaptive) + -> Distributed Subplan XXX_1 + -> Aggregate + -> Custom Scan (Citus Adaptive) + Task Count: 4 + Tasks Shown: One of 4 + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> Aggregate + -> Seq Scan on stock_1640000 stock + -> Distributed Subplan XXX_2 + -> Aggregate + -> Custom Scan (Citus Adaptive) + Task Count: 4 + Tasks Shown: One of 4 + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> Aggregate + -> Seq Scan on stock_1640000 stock + Task Count: 4 + Tasks Shown: One of 4 + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> HashAggregate + Group Key: stock.s_i_id + InitPlan 1 + -> Function Scan on read_intermediate_result intermediate_result + -> Seq Scan on stock_1640000 stock + Filter: ((s_order_cnt)::numeric > (InitPlan 1).col1) +(36 rows) + +explain (costs false, summary false, timing false) +select s_i_id, sum(s_order_cnt) as ordercount +from stock +group by s_i_id +having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock) +order by s_i_id; + QUERY PLAN +--------------------------------------------------------------------- + Sort + Sort Key: remote_scan.s_i_id + InitPlan 1 + -> Function Scan on read_intermediate_result intermediate_result + -> HashAggregate + Group Key: remote_scan.s_i_id + Filter: ((pg_catalog.sum(remote_scan.worker_column_3))::bigint > (InitPlan 1).col1) + -> Custom Scan (Citus Adaptive) + -> Distributed Subplan XXX_1 + -> Aggregate + -> Custom Scan (Citus Adaptive) + Task Count: 4 + Tasks Shown: One of 4 + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> Aggregate + -> Seq Scan on stock_1640000 stock + Task Count: 4 + Tasks Shown: One of 4 + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> HashAggregate + Group Key: stock.s_i_id + -> Seq Scan on stock_1640000 stock +(24 rows) + +explain (costs false, summary false, timing false) +select s_i_id, sum(s_order_cnt) as ordercount +from stock +group by s_i_id +having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock); + QUERY PLAN +--------------------------------------------------------------------- + HashAggregate + Group Key: remote_scan.s_i_id + Filter: ((pg_catalog.sum(remote_scan.worker_column_3))::bigint > (InitPlan 1).col1) + InitPlan 1 + -> Function Scan on read_intermediate_result intermediate_result + -> Custom Scan (Citus Adaptive) + -> Distributed Subplan XXX_1 + -> Aggregate + -> Custom Scan (Citus Adaptive) + Task Count: 4 + Tasks Shown: One of 4 + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> Aggregate + -> Seq Scan on stock_1640000 stock + Task Count: 4 + Tasks Shown: One of 4 + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> HashAggregate + Group Key: stock.s_i_id + -> Seq Scan on stock_1640000 stock +(22 rows) + +explain (costs false) +select s_i_id, sum(s_order_cnt) as ordercount +from stock s +group by s_i_id +having (select true) +order by s_i_id; + QUERY PLAN +--------------------------------------------------------------------- + Sort + Sort Key: remote_scan.s_i_id + InitPlan 1 + -> Result + -> HashAggregate + Group Key: remote_scan.s_i_id + -> Result + One-Time Filter: (InitPlan 1).col1 + -> Custom Scan (Citus Adaptive) + Filter: (InitPlan 1).col1 + Task Count: 4 + Tasks Shown: One of 4 + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> HashAggregate + Group Key: s.s_i_id + -> Seq Scan on stock_1640000 s +(17 rows) + +explain select s_i_id, sum(s_order_cnt) as ordercount +from stock s +group by s_i_id +having (select true); + QUERY PLAN +--------------------------------------------------------------------- + HashAggregate (cost=500.01..503.01 rows=200 width=12) + Group Key: remote_scan.s_i_id + InitPlan 1 + -> Result (cost=0.00..0.01 rows=1 width=1) + -> Result (cost=0.00..0.00 rows=100000 width=12) + One-Time Filter: (InitPlan 1).col1 + -> Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=100000 width=12) + Filter: (InitPlan 1).col1 + Task Count: 4 + Tasks Shown: One of 4 + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> HashAggregate (cost=40.60..42.60 rows=200 width=12) + Group Key: s.s_i_id + -> Seq Scan on stock_1640000 s (cost=0.00..30.40 rows=2040 width=8) +(15 rows) + +select s_i_id, sum(s_order_cnt) as ordercount +from stock +where s_order_cnt > (select sum(s_order_cnt) * .005 as where_query from stock) +group by s_i_id +having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock) +order by s_i_id; + s_i_id | ordercount +--------------------------------------------------------------------- +(0 rows) + +INSERT INTO stock SELECT c, c, c FROM generate_series(1, 5) as c; +select s_i_id, sum(s_order_cnt) as ordercount +from stock +where s_order_cnt > (select sum(s_order_cnt) * .005 as where_query from stock) +group by s_i_id +having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock) +order by s_i_id; + s_i_id | ordercount +--------------------------------------------------------------------- + 3 | 3 + 4 | 4 + 5 | 5 +(3 rows) + +select s_i_id, sum(s_order_cnt) as ordercount +from stock +group by s_i_id +having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock) +order by s_i_id; + s_i_id | ordercount +--------------------------------------------------------------------- + 3 | 3 + 4 | 4 + 5 | 5 +(3 rows) + +select s_i_id, sum(s_order_cnt) as ordercount +from stock s +where s_order_cnt > (select sum(s_order_cnt) * .005 as where_query from stock) +group by s_i_id +having (select true) +order by s_i_id; + s_i_id | ordercount +--------------------------------------------------------------------- + 1 | 1 + 2 | 2 + 3 | 3 + 4 | 4 + 5 | 5 +(5 rows) + +select s_i_id, sum(s_order_cnt) as ordercount +from stock s +where s_order_cnt > (select sum(s_order_cnt) * .005 as where_query from stock) +group by s_i_id +having (select false) +order by s_i_id; + s_i_id | ordercount +--------------------------------------------------------------------- +(0 rows) + +select s_i_id, sum(s_order_cnt) as ordercount +from stock s +group by s_i_id +having (select true) +order by s_i_id; + s_i_id | ordercount +--------------------------------------------------------------------- + 1 | 1 + 2 | 2 + 3 | 3 + 4 | 4 + 5 | 5 +(5 rows) + +select s_i_id, sum(s_order_cnt) as ordercount +from stock s +group by s_i_id +having (select false) +order by s_i_id; + s_i_id | ordercount +--------------------------------------------------------------------- +(0 rows) + +select s_i_id, sum(s_order_cnt) as ordercount +from stock s +group by s_i_id +having (select true) +order by s_i_id; + s_i_id | ordercount +--------------------------------------------------------------------- + 1 | 1 + 2 | 2 + 3 | 3 + 4 | 4 + 5 | 5 +(5 rows) + +-- We don't support correlated subqueries in having +select s_i_id, sum(s_order_cnt) as ordercount +from stock s +where s_order_cnt > (select sum(s_order_cnt) * .005 as where_query from stock) +group by s_i_id +having (select max(s_order_cnt) > 2 as having_query from stock where s_i_id = s.s_i_id) +order by s_i_id; +ERROR: Subqueries in HAVING cannot refer to outer query +-- We don't support correlated subqueries in having +select s_i_id, sum(s_order_cnt) as ordercount +from stock s +group by s_i_id +having (select max(s_order_cnt) > 2 as having_query from stock where s_i_id = s.s_i_id) +order by s_i_id; +ERROR: Subqueries in HAVING cannot refer to outer query +\c - - - :master_port +SET citus.shard_replication_factor to 1; +SET citus.shard_count to 4; +SET search_path = ch_bench_having, public; +DROP TABLE stock; +CREATE TABLE stock ( + s_w_id int NOT NULL, + s_i_id int NOT NULL, + s_quantity decimal(4,0) NOT NULL, + s_ytd decimal(8,2) NOT NULL, + s_order_cnt int NOT NULL, + s_remote_cnt int NOT NULL, + s_data varchar(50) NOT NULL, + s_dist_01 char(24) NOT NULL, + s_dist_02 char(24) NOT NULL, + s_dist_03 char(24) NOT NULL, + s_dist_04 char(24) NOT NULL, + s_dist_05 char(24) NOT NULL, + s_dist_06 char(24) NOT NULL, + s_dist_07 char(24) NOT NULL, + s_dist_08 char(24) NOT NULL, + s_dist_09 char(24) NOT NULL, + s_dist_10 char(24) NOT NULL, + PRIMARY KEY (s_w_id,s_i_id) +); +insert into stock VALUES +(1, 33, 1, 1, 1, 1, '', '','','','','','','','','',''), +(33, 1, 1, 1, 1, 1, '', '','','','','','','','','',''), +(32, 1, 1, 1, 1, 1, '', '','','','','','','','','',''); +SELECT create_distributed_table('stock','s_w_id'); +NOTICE: Copying data from local table... +NOTICE: copying the data has completed +DETAIL: The local data in the table is no longer visible, but is still on disk. +HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$ch_bench_having.stock$$) + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +\c - - - :worker_1_port +SET search_path = ch_bench_having, public; +select s_i_id, sum(s_order_cnt) as ordercount +from stock, supplier_mx, nation_mx +where mod((s_w_id * s_i_id),10000) = s_suppkey + and s_nationkey = n_nationkey + and n_name = 'GERMANY' +group by s_i_id +having sum(s_order_cnt) > + (select sum(s_order_cnt) * .005 + from stock, supplier_mx, nation_mx + where mod((s_w_id * s_i_id),10000) = s_suppkey + and s_nationkey = n_nationkey + and n_name = 'GERMANY') +order by s_i_id, ordercount desc; + s_i_id | ordercount +--------------------------------------------------------------------- + 1 | 1 + 33 | 1 +(2 rows) + +insert into stock VALUES +(10033, 1, 1, 1, 100000, 1, '', '','','','','','','','','',''); +select s_i_id, sum(s_order_cnt) as ordercount +from stock, supplier_mx, nation_mx +where mod((s_w_id * s_i_id),10000) = s_suppkey + and s_nationkey = n_nationkey + and n_name = 'GERMANY' +group by s_i_id +having sum(s_order_cnt) > + (select sum(s_order_cnt) * .005 + from stock, supplier_mx, nation_mx + where mod((s_w_id * s_i_id),10000) = s_suppkey + and s_nationkey = n_nationkey + and n_name = 'GERMANY') +order by s_i_id, ordercount desc; + s_i_id | ordercount +--------------------------------------------------------------------- + 1 | 100001 +(1 row) + +\c - - - :master_port +BEGIN; +SET LOCAL client_min_messages TO WARNING; +DROP SCHEMA ch_bench_having CASCADE; +COMMIT; diff --git a/src/test/regress/expected/multi_extension.out b/src/test/regress/expected/multi_extension.out index b2badd878c2..f86e93146b4 100644 --- a/src/test/regress/expected/multi_extension.out +++ b/src/test/regress/expected/multi_extension.out @@ -7,6 +7,12 @@ -- not done yet. -- -- Upgrade tests verify the objects are added in citus_finish_pg_upgrade() +-- +-- Two alternative test outputs: +-- multi_extension.out for PG16 and before +-- multi_extension_0.out for PG17 +-- related commit +-- https://git.postgresql.org/gitweb/?p=postgresql.git;a=commit;h=e5bc9454e527b1cba97553531d8d4992892fdeef SET citus.next_shard_id TO 580000; CREATE SCHEMA multi_extension; SELECT $definition$ diff --git a/src/test/regress/expected/multi_extension_0.out b/src/test/regress/expected/multi_extension_0.out new file mode 100644 index 00000000000..7b6eb2afc28 --- /dev/null +++ b/src/test/regress/expected/multi_extension_0.out @@ -0,0 +1,2084 @@ +-- +-- MULTI_EXTENSION +-- +-- Tests around extension creation / upgrades +-- +-- It'd be nice to script generation of this file, but alas, that's +-- not done yet. +-- +-- Upgrade tests verify the objects are added in citus_finish_pg_upgrade() +-- +-- Two alternative test outputs: +-- multi_extension.out for PG16 and before +-- multi_extension_0.out for PG17 +-- related commit +-- https://git.postgresql.org/gitweb/?p=postgresql.git;a=commit;h=e5bc9454e527b1cba97553531d8d4992892fdeef +SET citus.next_shard_id TO 580000; +CREATE SCHEMA multi_extension; +SELECT $definition$ +CREATE OR REPLACE FUNCTION test.maintenance_worker() + RETURNS pg_stat_activity + LANGUAGE plpgsql +AS $$ +DECLARE + activity record; +BEGIN + DO 'BEGIN END'; -- Force maintenance daemon to start + -- we don't want to wait forever; loop will exit after 20 seconds + FOR i IN 1 .. 200 LOOP + PERFORM pg_stat_clear_snapshot(); + SELECT * INTO activity FROM pg_stat_activity + WHERE application_name = 'Citus Maintenance Daemon' AND datname = current_database(); + IF activity.pid IS NOT NULL THEN + RETURN activity; + ELSE + PERFORM pg_sleep(0.1); + END IF ; + END LOOP; + -- fail if we reach the end of this loop + raise 'Waited too long for maintenance daemon to start'; +END; +$$; +$definition$ create_function_test_maintenance_worker +\gset +CREATE TABLE multi_extension.prev_objects(description text); +CREATE TABLE multi_extension.extension_diff(previous_object text COLLATE "C", + current_object text COLLATE "C"); +CREATE FUNCTION multi_extension.print_extension_changes() +RETURNS TABLE(previous_object text, current_object text) +AS $func$ +BEGIN + SET LOCAL search_path TO multi_extension; + TRUNCATE TABLE extension_diff; + + CREATE TABLE current_objects AS + SELECT pg_catalog.pg_describe_object(classid, objid, 0) + || ' ' || + coalesce(pg_catalog.pg_get_function_result(objid), '') AS description + FROM pg_catalog.pg_depend, pg_catalog.pg_extension e + WHERE refclassid = 'pg_catalog.pg_extension'::pg_catalog.regclass + AND refobjid = e.oid + AND deptype = 'e' + AND e.extname='citus'; + + INSERT INTO extension_diff + SELECT p.description previous_object, c.description current_object + FROM current_objects c FULL JOIN prev_objects p + ON p.description = c.description + WHERE (p.description is null OR c.description is null) + AND c.description IS DISTINCT FROM 'function any_value(anyelement) anyelement' + AND c.description IS DISTINCT FROM 'function any_value_agg(anyelement,anyelement) anyelement'; + + DROP TABLE prev_objects; + ALTER TABLE current_objects RENAME TO prev_objects; + + RETURN QUERY SELECT * FROM extension_diff ORDER BY 1, 2; +END +$func$ LANGUAGE plpgsql; +CREATE SCHEMA test; +:create_function_test_maintenance_worker +-- check maintenance daemon is started +SELECT datname, current_database(), + usename, (SELECT extowner::regrole::text FROM pg_extension WHERE extname = 'citus') +FROM test.maintenance_worker(); + datname | current_database | usename | extowner +--------------------------------------------------------------------- + regression | regression | postgres | postgres +(1 row) + +-- ensure no unexpected objects were created outside pg_catalog +SELECT pgio.type, pgio.identity +FROM pg_depend AS pgd, + pg_extension AS pge, + LATERAL pg_identify_object(pgd.classid, pgd.objid, pgd.objsubid) AS pgio +WHERE pgd.refclassid = 'pg_extension'::regclass AND + pgd.refobjid = pge.oid AND + pge.extname = 'citus' AND + pgio.schema NOT IN ('pg_catalog', 'citus', 'citus_internal', 'test', 'columnar', 'columnar_internal') +ORDER BY 1, 2; + type | identity +--------------------------------------------------------------------- + type | public.citus_schemas + type | public.citus_schemas[] + type | public.citus_tables + type | public.citus_tables[] + view | public.citus_schemas + view | public.citus_tables +(6 rows) + +-- DROP EXTENSION pre-created by the regression suite +DROP EXTENSION citus; +DROP EXTENSION citus_columnar; +\c +-- these tests switch between citus versions and call ddl's that require pg_dist_object to be created +SET citus.enable_metadata_sync TO 'false'; +SET citus.enable_version_checks TO 'false'; +SET columnar.enable_version_checks TO 'false'; +CREATE EXTENSION citus VERSION '8.0-1'; +ALTER EXTENSION citus UPDATE TO '8.0-2'; +ALTER EXTENSION citus UPDATE TO '8.0-3'; +ALTER EXTENSION citus UPDATE TO '8.0-4'; +ALTER EXTENSION citus UPDATE TO '8.0-5'; +ALTER EXTENSION citus UPDATE TO '8.0-6'; +ALTER EXTENSION citus UPDATE TO '8.0-7'; +ALTER EXTENSION citus UPDATE TO '8.0-8'; +ALTER EXTENSION citus UPDATE TO '8.0-9'; +ALTER EXTENSION citus UPDATE TO '8.0-10'; +ALTER EXTENSION citus UPDATE TO '8.0-11'; +ALTER EXTENSION citus UPDATE TO '8.0-12'; +ALTER EXTENSION citus UPDATE TO '8.0-13'; +ALTER EXTENSION citus UPDATE TO '8.1-1'; +ALTER EXTENSION citus UPDATE TO '8.2-1'; +ALTER EXTENSION citus UPDATE TO '8.2-2'; +ALTER EXTENSION citus UPDATE TO '8.2-3'; +ALTER EXTENSION citus UPDATE TO '8.2-4'; +ALTER EXTENSION citus UPDATE TO '8.3-1'; +ALTER EXTENSION citus UPDATE TO '9.0-1'; +ALTER EXTENSION citus UPDATE TO '9.0-2'; +ALTER EXTENSION citus UPDATE TO '9.1-1'; +ALTER EXTENSION citus UPDATE TO '9.2-1'; +ALTER EXTENSION citus UPDATE TO '9.2-2'; +-- Snapshot of state at 9.2-2 +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- + | event trigger citus_cascade_to_partition + | function alter_role_if_exists(text,text) boolean + | function array_cat_agg(anycompatiblearray) anycompatiblearray + | function assign_distributed_transaction_id(integer,bigint,timestamp with time zone) void + | function authinfo_valid(text) boolean + | function broadcast_intermediate_result(text,text) bigint + | function check_distributed_deadlocks() boolean + | function citus_add_rebalance_strategy(name,regproc,regproc,regproc,real,real) void + | function citus_blocking_pids(integer) integer[] + | function citus_create_restore_point(text) pg_lsn + | function citus_dist_stat_activity() SETOF record + | function citus_drop_trigger() event_trigger + | function citus_executor_name(integer) text + | function citus_extradata_container(internal) void + | function citus_finish_pg_upgrade() void + | function citus_internal.find_groupid_for_node(text,integer) integer + | function citus_internal.pg_dist_node_trigger_func() trigger + | function citus_internal.pg_dist_rebalance_strategy_enterprise_check() trigger + | function citus_internal.pg_dist_rebalance_strategy_trigger_func() trigger + | function citus_internal.pg_dist_shard_placement_trigger_func() trigger + | function citus_internal.refresh_isolation_tester_prepared_statement() void + | function citus_internal.replace_isolation_tester_func() void + | function citus_internal.restore_isolation_tester_func() void + | function citus_isolation_test_session_is_blocked(integer,integer[]) boolean + | function citus_json_concatenate(json,json) json + | function citus_json_concatenate_final(json) json + | function citus_jsonb_concatenate(jsonb,jsonb) jsonb + | function citus_jsonb_concatenate_final(jsonb) jsonb + | function citus_node_capacity_1(integer) real + | function citus_prepare_pg_upgrade() void + | function citus_query_stats() SETOF record + | function citus_relation_size(regclass) bigint + | function citus_server_id() uuid + | function citus_set_default_rebalance_strategy(text) void + | function citus_shard_allowed_on_node_true(bigint,integer) boolean + | function citus_shard_cost_1(bigint) real + | function citus_shard_cost_by_disk_size(bigint) real + | function citus_stat_statements() SETOF record + | function citus_stat_statements_reset() void + | function citus_table_is_visible(oid) boolean + | function citus_table_size(regclass) bigint + | function citus_text_send_as_jsonb(text) bytea + | function citus_total_relation_size(regclass) bigint + | function citus_truncate_trigger() trigger + | function citus_validate_rebalance_strategy_functions(regproc,regproc,regproc) void + | function citus_version() text + | function citus_worker_stat_activity() SETOF record + | function column_name_to_column(regclass,text) text + | function column_to_column_name(regclass,text) text + | function coord_combine_agg(oid,cstring,anyelement) anyelement + | function coord_combine_agg_ffunc(internal,oid,cstring,anyelement) anyelement + | function coord_combine_agg_sfunc(internal,oid,cstring,anyelement) internal + | function create_distributed_function(regprocedure,text,text) void + | function create_distributed_table(regclass,text,citus.distribution_type,text) void + | function create_intermediate_result(text,text) bigint + | function create_reference_table(regclass) void + | function distributed_tables_colocated(regclass,regclass) boolean + | function dump_global_wait_edges() SETOF record + | function dump_local_wait_edges() SETOF record + | function fetch_intermediate_results(text[],text,integer) bigint + | function get_all_active_transactions() SETOF record + | function get_colocated_shard_array(bigint) bigint[] + | function get_colocated_table_array(regclass) regclass[] + | function get_current_transaction_id() record + | function get_global_active_transactions() SETOF record + | function get_rebalance_progress() TABLE(sessionid integer, table_name regclass, shardid bigint, shard_size bigint, sourcename text, sourceport integer, targetname text, targetport integer, progress bigint) + | function get_rebalance_table_shards_plan(regclass,real,integer,bigint[],boolean,name) TABLE(table_name regclass, shardid bigint, shard_size bigint, sourcename text, sourceport integer, targetname text, targetport integer) + | function get_shard_id_for_distribution_column(regclass,"any") bigint + | function isolate_tenant_to_new_shard(regclass,"any",text) bigint + | function json_cat_agg(json) json + | function jsonb_cat_agg(jsonb) jsonb + | function lock_relation_if_exists(text,text) boolean + | function lock_shard_metadata(integer,bigint[]) void + | function lock_shard_resources(integer,bigint[]) void + | function mark_tables_colocated(regclass,regclass[]) void + | function master_activate_node(text,integer) integer + | function master_add_inactive_node(text,integer,integer,noderole,name) integer + | function master_add_node(text,integer,integer,noderole,name) integer + | function master_add_secondary_node(text,integer,text,integer,name) integer + | function master_append_table_to_shard(bigint,text,text,integer) real + | function master_apply_delete_command(text) integer + | function master_conninfo_cache_invalidate() trigger + | function master_copy_shard_placement(bigint,text,integer,text,integer,boolean,citus.shard_transfer_mode) void + | function master_create_distributed_table(regclass,text,citus.distribution_type) void + | function master_create_empty_shard(text) bigint + | function master_create_worker_shards(text,integer,integer) void + | function master_disable_node(text,integer) void + | function master_dist_local_group_cache_invalidate() trigger + | function master_dist_node_cache_invalidate() trigger + | function master_dist_object_cache_invalidate() trigger + | function master_dist_partition_cache_invalidate() trigger + | function master_dist_placement_cache_invalidate() trigger + | function master_dist_shard_cache_invalidate() trigger + | function master_drain_node(text,integer,citus.shard_transfer_mode,name) void + | function master_drop_all_shards(regclass,text,text) integer + | function master_drop_sequences(text[]) void + | function master_get_active_worker_nodes() SETOF record + | function master_get_new_placementid() bigint + | function master_get_new_shardid() bigint + | function master_get_table_ddl_events(text) SETOF text + | function master_get_table_metadata(text) record + | function master_modify_multiple_shards(text) integer + | function master_move_shard_placement(bigint,text,integer,text,integer,citus.shard_transfer_mode) void + | function master_remove_distributed_table_metadata_from_workers(regclass,text,text) void + | function master_remove_node(text,integer) void + | function master_remove_partition_metadata(regclass,text,text) void + | function master_run_on_worker(text[],integer[],text[],boolean) SETOF record + | function master_set_node_property(text,integer,text,boolean) void + | function master_unmark_object_distributed(oid,oid,integer) void + | function master_update_node(integer,text,integer,boolean,integer) void + | function master_update_shard_statistics(bigint) bigint + | function master_update_table_statistics(regclass) void + | function poolinfo_valid(text) boolean + | function read_intermediate_result(text,citus_copy_format) SETOF record + | function read_intermediate_results(text[],citus_copy_format) SETOF record + | function rebalance_table_shards(regclass,real,integer,bigint[],citus.shard_transfer_mode,boolean,name) void + | function recover_prepared_transactions() integer + | function relation_is_a_known_shard(regclass) boolean + | function replicate_table_shards(regclass,integer,integer,bigint[],citus.shard_transfer_mode) void + | function role_exists(name) boolean + | function run_command_on_colocated_placements(regclass,regclass,text,boolean) SETOF record + | function run_command_on_placements(regclass,text,boolean) SETOF record + | function run_command_on_shards(regclass,text,boolean) SETOF record + | function run_command_on_workers(text,boolean) SETOF record + | function shard_name(regclass,bigint) text + | function start_metadata_sync_to_node(text,integer) void + | function stop_metadata_sync_to_node(text,integer) void + | function task_tracker_assign_task(bigint,integer,text) void + | function task_tracker_cleanup_job(bigint) void + | function task_tracker_conninfo_cache_invalidate() trigger + | function task_tracker_task_status(bigint,integer) integer + | function upgrade_to_reference_table(regclass) void + | function worker_append_table_to_shard(text,text,text,integer) void + | function worker_apply_inter_shard_ddl_command(bigint,text,bigint,text,text) void + | function worker_apply_sequence_command(text) void + | function worker_apply_sequence_command(text,regtype) void + | function worker_apply_shard_ddl_command(bigint,text) void + | function worker_apply_shard_ddl_command(bigint,text,text) void + | function worker_cleanup_job_schema_cache() void + | function worker_create_or_replace_object(text) boolean + | function worker_create_schema(bigint,text) void + | function worker_create_truncate_trigger(regclass) void + | function worker_drop_distributed_table(text) void + | function worker_execute_sql_task(bigint,integer,text,boolean) bigint + | function worker_fetch_foreign_file(text,text,bigint,text[],integer[]) void + | function worker_fetch_partition_file(bigint,integer,integer,integer,text,integer) void + | function worker_hash("any") integer + | function worker_hash_partition_table(bigint,integer,text,text,oid,anyarray) void + | function worker_merge_files_and_run_query(bigint,integer,text,text) void + | function worker_merge_files_into_table(bigint,integer,text[],text[]) void + | function worker_partial_agg(oid,anyelement) cstring + | function worker_partial_agg_ffunc(internal) cstring + | function worker_partial_agg_sfunc(internal,oid,anyelement) internal + | function worker_partition_query_result(text,text,integer,citus.distribution_type,text[],text[],boolean) SETOF record + | function worker_range_partition_table(bigint,integer,text,text,oid,anyarray) void + | function worker_repartition_cleanup(bigint) void + | schema citus + | schema citus_internal + | sequence pg_dist_colocationid_seq + | sequence pg_dist_groupid_seq + | sequence pg_dist_node_nodeid_seq + | sequence pg_dist_placement_placementid_seq + | sequence pg_dist_shardid_seq + | table citus.pg_dist_object + | table pg_dist_authinfo + | table pg_dist_colocation + | table pg_dist_local_group + | table pg_dist_node + | table pg_dist_node_metadata + | table pg_dist_partition + | table pg_dist_placement + | table pg_dist_poolinfo + | table pg_dist_rebalance_strategy + | table pg_dist_shard + | table pg_dist_transaction + | type citus.distribution_type + | type citus.distribution_type[] + | type citus.pg_dist_object + | type citus.pg_dist_object[] + | type citus.shard_transfer_mode + | type citus.shard_transfer_mode[] + | type citus_copy_format + | type citus_copy_format[] + | type citus_dist_stat_activity + | type citus_dist_stat_activity[] + | type citus_lock_waits + | type citus_lock_waits[] + | type citus_shard_indexes_on_worker + | type citus_shard_indexes_on_worker[] + | type citus_shards_on_worker + | type citus_shards_on_worker[] + | type citus_stat_statements + | type citus_stat_statements[] + | type citus_worker_stat_activity + | type citus_worker_stat_activity[] + | type noderole + | type noderole[] + | type pg_dist_authinfo + | type pg_dist_authinfo[] + | type pg_dist_colocation + | type pg_dist_colocation[] + | type pg_dist_local_group + | type pg_dist_local_group[] + | type pg_dist_node + | type pg_dist_node[] + | type pg_dist_node_metadata + | type pg_dist_node_metadata[] + | type pg_dist_partition + | type pg_dist_partition[] + | type pg_dist_placement + | type pg_dist_placement[] + | type pg_dist_poolinfo + | type pg_dist_poolinfo[] + | type pg_dist_rebalance_strategy + | type pg_dist_rebalance_strategy[] + | type pg_dist_shard + | type pg_dist_shard[] + | type pg_dist_shard_placement + | type pg_dist_shard_placement[] + | type pg_dist_transaction + | type pg_dist_transaction[] + | view citus_dist_stat_activity + | view citus_lock_waits + | view citus_shard_indexes_on_worker + | view citus_shards_on_worker + | view citus_stat_statements + | view citus_worker_stat_activity + | view pg_dist_shard_placement +(228 rows) + +-- Test downgrade to 9.2-2 from 9.2-4 +ALTER EXTENSION citus UPDATE TO '9.2-4'; +ALTER EXTENSION citus UPDATE TO '9.2-2'; +-- Should be empty result since upgrade+downgrade should be a no-op +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) + +/* + * As we mistakenly bumped schema version to 9.3-1 in a bad release, we support + * updating citus schema from 9.3-1 to 9.2-4, but we do not support updates to 9.3-1. + * + * Hence the query below should fail. + */ +ALTER EXTENSION citus UPDATE TO '9.3-1'; +ERROR: extension "citus" has no update path from version "9.2-2" to version "9.3-1" +ALTER EXTENSION citus UPDATE TO '9.2-4'; +-- Snapshot of state at 9.2-4 +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) + +-- Test downgrade to 9.2-4 from 9.3-2 +ALTER EXTENSION citus UPDATE TO '9.3-2'; +ALTER EXTENSION citus UPDATE TO '9.2-4'; +-- Should be empty result since upgrade+downgrade should be a no-op +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) + +-- Snapshot of state at 9.3-2 +ALTER EXTENSION citus UPDATE TO '9.3-2'; +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- + function citus_extradata_container(internal) void | + | function citus_extradata_container(internal) SETOF record + | function citus_remote_connection_stats() SETOF record + | function replicate_reference_tables() void + | function truncate_local_data_after_distributing_table(regclass) void + | function update_distributed_table_colocation(regclass,text) void + | function worker_create_or_alter_role(text,text,text) boolean +(7 rows) + +-- Test downgrade to 9.3-2 from 9.4-1 +ALTER EXTENSION citus UPDATE TO '9.4-1'; +ALTER EXTENSION citus UPDATE TO '9.3-2'; +-- Should be empty result since upgrade+downgrade should be a no-op +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) + +-- Snapshot of state at 9.4-1 +ALTER EXTENSION citus UPDATE TO '9.4-1'; +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- + | function worker_last_saved_explain_analyze() TABLE(explain_analyze_output text, execution_duration double precision) + | function worker_save_query_explain_analyze(text,jsonb) SETOF record +(2 rows) + +-- Test upgrade paths for backported citus_pg_upgrade functions +ALTER EXTENSION citus UPDATE TO '9.4-2'; +ALTER EXTENSION citus UPDATE TO '9.4-1'; +-- Should be empty result, even though the downgrade doesn't undo the upgrade, the +-- function signature doesn't change, which is reflected here. +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) + +ALTER EXTENSION citus UPDATE TO '9.4-2'; +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) + +-- Snapshot of state at 9.4-1 +ALTER EXTENSION citus UPDATE TO '9.4-1'; +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) + +-- Test upgrade paths for backported improvement of master_update_table_statistics function +ALTER EXTENSION citus UPDATE TO '9.4-3'; +-- should see the new source code with internal function citus_update_table_statistics +SELECT prosrc FROM pg_proc WHERE proname = 'master_update_table_statistics' ORDER BY 1; + prosrc +--------------------------------------------------------------------- + citus_update_table_statistics +(1 row) + +ALTER EXTENSION citus UPDATE TO '9.4-2'; +-- should see the old source code +SELECT prosrc FROM pg_proc WHERE proname = 'master_update_table_statistics' ORDER BY 1; + prosrc +--------------------------------------------------------------------- + + + DECLARE + + colocated_tables regclass[]; + + BEGIN + + SELECT get_colocated_table_array(relation) INTO colocated_tables;+ + PERFORM + + master_update_shard_statistics(shardid) + + FROM + + pg_dist_shard + + WHERE + + logicalrelid = ANY (colocated_tables); + + END; + + +(1 row) + +-- Should be empty result +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) + +ALTER EXTENSION citus UPDATE TO '9.4-3'; +-- should see the new source code with internal function citus_update_table_statistics +SELECT prosrc FROM pg_proc WHERE proname = 'master_update_table_statistics' ORDER BY 1; + prosrc +--------------------------------------------------------------------- + citus_update_table_statistics +(1 row) + +-- Should be empty result +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) + +-- Snapshot of state at 9.4-1 +ALTER EXTENSION citus UPDATE TO '9.4-1'; +-- should see the old source code +SELECT prosrc FROM pg_proc WHERE proname = 'master_update_table_statistics' ORDER BY 1; + prosrc +--------------------------------------------------------------------- + + + DECLARE + + colocated_tables regclass[]; + + BEGIN + + SELECT get_colocated_table_array(relation) INTO colocated_tables;+ + PERFORM + + master_update_shard_statistics(shardid) + + FROM + + pg_dist_shard + + WHERE + + logicalrelid = ANY (colocated_tables); + + END; + + +(1 row) + +-- Should be empty result +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) + +-- Test downgrade to 9.4-1 from 9.5-1 +ALTER EXTENSION citus UPDATE TO '9.5-1'; +-- TODO: This test should be moved to a valid downgrade testing suite where the downgrade is done, both on the schema and the binaries. Later changes in Citus made a C vs Schema discrepancy error here +-- BEGIN; +-- SET citus.enable_metadata_sync TO on; +-- SELECT master_add_node('localhost', :master_port, groupId=>0); +-- CREATE TABLE citus_local_table (a int); +-- SELECT create_citus_local_table('citus_local_table'); +-- RESET citus.enable_metadata_sync; +-- +-- -- downgrade from 9.5-1 to 9.4-1 should fail as we have a citus local table +-- ALTER EXTENSION citus UPDATE TO '9.4-1'; +-- ROLLBACK; +-- now we can downgrade as there is no citus local table +ALTER EXTENSION citus UPDATE TO '9.4-1'; +-- Should be empty result since upgrade+downgrade should be a no-op +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) + +-- Snapshot of state at 9.5-1 +ALTER EXTENSION citus UPDATE TO '9.5-1'; +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- + function master_drop_sequences(text[]) void | + function task_tracker_assign_task(bigint,integer,text) void | + function task_tracker_cleanup_job(bigint) void | + function task_tracker_conninfo_cache_invalidate() trigger | + function task_tracker_task_status(bigint,integer) integer | + function worker_execute_sql_task(bigint,integer,text,boolean) bigint | + function worker_merge_files_and_run_query(bigint,integer,text,text) void | + | function create_citus_local_table(regclass) void + | function undistribute_table(regclass) void + | function worker_record_sequence_dependency(regclass,regclass,name) void +(10 rows) + +-- Test upgrade paths for backported citus_pg_upgrade functions +ALTER EXTENSION citus UPDATE TO '9.5-2'; +ALTER EXTENSION citus UPDATE TO '9.5-1'; +-- Should be empty result, even though the downgrade doesn't undo the upgrade, the +-- function signature doesn't change, which is reflected here. +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) + +ALTER EXTENSION citus UPDATE TO '9.5-2'; +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) + +-- Snapshot of state at 9.5-1 +ALTER EXTENSION citus UPDATE TO '9.5-1'; +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) + +-- Test upgrade paths for backported improvement of master_update_table_statistics function +ALTER EXTENSION citus UPDATE TO '9.5-3'; +-- should see the new source code with internal function citus_update_table_statistics +SELECT prosrc FROM pg_proc WHERE proname = 'master_update_table_statistics' ORDER BY 1; + prosrc +--------------------------------------------------------------------- + citus_update_table_statistics +(1 row) + +ALTER EXTENSION citus UPDATE TO '9.5-2'; +-- should see the old source code +SELECT prosrc FROM pg_proc WHERE proname = 'master_update_table_statistics' ORDER BY 1; + prosrc +--------------------------------------------------------------------- + + + DECLARE + + colocated_tables regclass[]; + + BEGIN + + SELECT get_colocated_table_array(relation) INTO colocated_tables;+ + PERFORM + + master_update_shard_statistics(shardid) + + FROM + + pg_dist_shard + + WHERE + + logicalrelid = ANY (colocated_tables); + + END; + + +(1 row) + +-- Should be empty result +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) + +ALTER EXTENSION citus UPDATE TO '9.5-3'; +-- should see the new source code with internal function citus_update_table_statistics +SELECT prosrc FROM pg_proc WHERE proname = 'master_update_table_statistics' ORDER BY 1; + prosrc +--------------------------------------------------------------------- + citus_update_table_statistics +(1 row) + +-- Should be empty result +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) + +-- Snapshot of state at 9.5-1 +ALTER EXTENSION citus UPDATE TO '9.5-1'; +-- should see the old source code +SELECT prosrc FROM pg_proc WHERE proname = 'master_update_table_statistics' ORDER BY 1; + prosrc +--------------------------------------------------------------------- + + + DECLARE + + colocated_tables regclass[]; + + BEGIN + + SELECT get_colocated_table_array(relation) INTO colocated_tables;+ + PERFORM + + master_update_shard_statistics(shardid) + + FROM + + pg_dist_shard + + WHERE + + logicalrelid = ANY (colocated_tables); + + END; + + +(1 row) + +-- Should be empty result +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) + +-- We removed the upgrade paths to 10.0-1, 10.0-2 and 10.0-3 due to a bug that blocked +-- upgrades to 10.0, Therefore we test upgrades to 10.0-4 instead +-- Test downgrade to 9.5-1 from 10.0-4 +ALTER EXTENSION citus UPDATE TO '10.0-4'; +ALTER EXTENSION citus UPDATE TO '9.5-1'; +-- Should be empty result since upgrade+downgrade should be a no-op +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) + +-- Snapshot of state at 10.0-4 +ALTER EXTENSION citus UPDATE TO '10.0-4'; +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- + function citus_total_relation_size(regclass) bigint | + function create_citus_local_table(regclass) void | + function mark_tables_colocated(regclass,regclass[]) void | + function master_conninfo_cache_invalidate() trigger | + function master_create_distributed_table(regclass,text,citus.distribution_type) void | + function master_create_worker_shards(text,integer,integer) void | + function master_dist_local_group_cache_invalidate() trigger | + function master_dist_node_cache_invalidate() trigger | + function master_dist_object_cache_invalidate() trigger | + function master_dist_partition_cache_invalidate() trigger | + function master_dist_placement_cache_invalidate() trigger | + function master_dist_shard_cache_invalidate() trigger | + function master_drop_all_shards(regclass,text,text) integer | + function master_modify_multiple_shards(text) integer | + function undistribute_table(regclass) void | + function upgrade_to_reference_table(regclass) void | + | access method columnar + | function alter_columnar_table_reset(regclass,boolean,boolean,boolean,boolean) void + | function alter_columnar_table_set(regclass,integer,integer,name,integer) void + | function alter_distributed_table(regclass,text,integer,text,boolean) void + | function alter_old_partitions_set_access_method(regclass,timestamp with time zone,name) + | function alter_table_set_access_method(regclass,text) void + | function citus_activate_node(text,integer) integer + | function citus_add_inactive_node(text,integer,integer,noderole,name) integer + | function citus_add_local_table_to_metadata(regclass,boolean) void + | function citus_add_node(text,integer,integer,noderole,name) integer + | function citus_add_secondary_node(text,integer,text,integer,name) integer + | function citus_conninfo_cache_invalidate() trigger + | function citus_copy_shard_placement(bigint,text,integer,text,integer,boolean,citus.shard_transfer_mode) void + | function citus_disable_node(text,integer) void + | function citus_dist_local_group_cache_invalidate() trigger + | function citus_dist_node_cache_invalidate() trigger + | function citus_dist_object_cache_invalidate() trigger + | function citus_dist_partition_cache_invalidate() trigger + | function citus_dist_placement_cache_invalidate() trigger + | function citus_dist_shard_cache_invalidate() trigger + | function citus_drain_node(text,integer,citus.shard_transfer_mode,name) void + | function citus_drop_all_shards(regclass,text,text) integer + | function citus_get_active_worker_nodes() SETOF record + | function citus_internal.columnar_ensure_objects_exist() void + | function citus_move_shard_placement(bigint,text,integer,text,integer,citus.shard_transfer_mode) void + | function citus_remove_node(text,integer) void + | function citus_set_coordinator_host(text,integer,noderole,name) void + | function citus_set_node_property(text,integer,text,boolean) void + | function citus_shard_sizes() SETOF record + | function citus_total_relation_size(regclass,boolean) bigint + | function citus_unmark_object_distributed(oid,oid,integer) void + | function citus_update_node(integer,text,integer,boolean,integer) void + | function citus_update_shard_statistics(bigint) bigint + | function citus_update_table_statistics(regclass) void + | function columnar.columnar_handler(internal) table_am_handler + | function fix_pre_citus10_partitioned_table_constraint_names() SETOF regclass + | function fix_pre_citus10_partitioned_table_constraint_names(regclass) void + | function notify_constraint_dropped() void + | function remove_local_tables_from_metadata() void + | function time_partition_range(regclass) record + | function undistribute_table(regclass,boolean) void + | function worker_change_sequence_dependency(regclass,regclass,regclass) void + | function worker_fix_pre_citus10_partitioned_table_constraint_names(regclass,bigint,text) void + | schema columnar + | sequence columnar.storageid_seq + | table columnar.chunk + | table columnar.chunk_group + | table columnar.options + | table columnar.stripe + | type citus_shards + | type citus_shards[] + | type columnar.chunk + | type columnar.chunk[] + | type columnar.chunk_group + | type columnar.chunk_group[] + | type columnar.options + | type columnar.options[] + | type columnar.stripe + | type columnar.stripe[] + | type public.citus_tables + | type public.citus_tables[] + | type time_partitions + | type time_partitions[] + | view citus_shards + | view public.citus_tables + | view time_partitions +(82 rows) + +-- check that we depend on the existence of public schema, and we can not drop it now +DROP SCHEMA public; +ERROR: cannot drop schema public because other objects depend on it +DETAIL: extension citus depends on schema public +HINT: Use DROP ... CASCADE to drop the dependent objects too. +-- verify that citus_tables view is on pg_catalog if public schema is absent. +ALTER EXTENSION citus UPDATE TO '9.5-1'; +DROP SCHEMA public; +ALTER EXTENSION citus UPDATE TO '10.0-4'; +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- + type public.citus_tables | + type public.citus_tables[] | + view public.citus_tables | + | type citus_tables + | type citus_tables[] + | view citus_tables +(6 rows) + +-- recreate public schema, and recreate citus_tables in the public schema by default +CREATE SCHEMA public; +-- In PG15, public schema is owned by pg_database_owner role +-- Relevant PG commit: b073c3ccd06e4cb845e121387a43faa8c68a7b62 +SHOW server_version \gset +SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15 +\gset +\if :server_version_ge_15 +ALTER SCHEMA public OWNER TO pg_database_owner; +\endif +GRANT ALL ON SCHEMA public TO public; +ALTER EXTENSION citus UPDATE TO '9.5-1'; +ALTER EXTENSION citus UPDATE TO '10.0-4'; +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- + type citus_tables | + type citus_tables[] | + view citus_tables | + | type public.citus_tables + | type public.citus_tables[] + | view public.citus_tables +(6 rows) + +-- not print "HINT: " to hide current lib version +\set VERBOSITY terse +CREATE TABLE columnar_table(a INT, b INT) USING columnar; +SET citus.enable_version_checks TO ON; +SET columnar.enable_version_checks TO ON; +-- all should throw an error due to version mismatch +VACUUM FULL columnar_table; +ERROR: loaded Citus library version differs from installed extension version +INSERT INTO columnar_table SELECT i FROM generate_series(1, 10) i; +ERROR: loaded Citus library version differs from installed extension version +VACUUM columnar_table; +WARNING: loaded Citus library version differs from installed extension version +TRUNCATE columnar_table; +ERROR: loaded Citus library version differs from installed extension version +DROP TABLE columnar_table; +ERROR: loaded Citus library version differs from installed extension version +CREATE INDEX ON columnar_table (a); +ERROR: loaded Citus library version differs from installed extension version +ALTER TABLE columnar_table SET(columnar.compression = pglz); +ERROR: loaded Citus library version differs from installed extension version +ALTER TABLE columnar_table RESET (columnar.compression); +ERROR: loaded Citus library version differs from installed extension version +INSERT INTO columnar_table SELECT * FROM columnar_table; +ERROR: loaded Citus library version differs from installed extension version +SELECT 1 FROM columnar_table; -- columnar custom scan +ERROR: loaded Citus library version differs from installed extension version +SET columnar.enable_custom_scan TO OFF; +SELECT 1 FROM columnar_table; -- seq scan +ERROR: loaded Citus library version differs from installed extension version +CREATE TABLE new_columnar_table (a int) USING columnar; +ERROR: loaded Citus library version differs from installed extension version +-- disable version checks for other sessions too +ALTER SYSTEM SET citus.enable_version_checks TO OFF; +SELECT pg_reload_conf(); + pg_reload_conf +--------------------------------------------------------------------- + t +(1 row) + +-- do cleanup for the rest of the tests +SET citus.enable_version_checks TO OFF; +SET columnar.enable_version_checks TO OFF; +DROP TABLE columnar_table; +RESET columnar.enable_custom_scan; +\set VERBOSITY default +-- Test downgrade to 10.0-4 from 10.1-1 +ALTER EXTENSION citus UPDATE TO '10.1-1'; +ALTER EXTENSION citus UPDATE TO '10.0-4'; +-- Should be empty result since upgrade+downgrade should be a no-op +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) + +-- Snapshot of state at 10.1-1 +ALTER EXTENSION citus UPDATE TO '10.1-1'; +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- + function citus_add_rebalance_strategy(name,regproc,regproc,regproc,real,real) void | + function citus_internal.columnar_ensure_objects_exist() void | + function citus_internal.pg_dist_rebalance_strategy_enterprise_check() trigger | + function create_distributed_table(regclass,text,citus.distribution_type,text) void | + function get_rebalance_progress() TABLE(sessionid integer, table_name regclass, shardid bigint, shard_size bigint, sourcename text, sourceport integer, targetname text, targetport integer, progress bigint) | + function get_rebalance_table_shards_plan(regclass,real,integer,bigint[],boolean,name) TABLE(table_name regclass, shardid bigint, shard_size bigint, sourcename text, sourceport integer, targetname text, targetport integer) | + | function citus_add_rebalance_strategy(name,regproc,regproc,regproc,real,real,real) void + | function citus_cleanup_orphaned_shards() + | function citus_local_disk_space_stats() record + | function create_distributed_table(regclass,text,citus.distribution_type,text,integer) void + | function get_rebalance_progress() TABLE(sessionid integer, table_name regclass, shardid bigint, shard_size bigint, sourcename text, sourceport integer, targetname text, targetport integer, progress bigint, source_shard_size bigint, target_shard_size bigint) + | function get_rebalance_table_shards_plan(regclass,real,integer,bigint[],boolean,name,real) TABLE(table_name regclass, shardid bigint, shard_size bigint, sourcename text, sourceport integer, targetname text, targetport integer) + | function worker_partitioned_relation_size(regclass) bigint + | function worker_partitioned_relation_total_size(regclass) bigint + | function worker_partitioned_table_size(regclass) bigint +(15 rows) + +-- Test downgrade to 10.1-1 from 10.2-1 +ALTER EXTENSION citus UPDATE TO '10.2-1'; +ALTER EXTENSION citus UPDATE TO '10.1-1'; +-- Should be empty result since upgrade+downgrade should be a no-op +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) + +-- Snapshot of state at 10.2-1 +ALTER EXTENSION citus UPDATE TO '10.2-1'; +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- + function citus_drop_all_shards(regclass,text,text) integer | + function stop_metadata_sync_to_node(text,integer) void | + | function citus_drop_all_shards(regclass,text,text,boolean) integer + | function citus_internal.downgrade_columnar_storage(regclass) void + | function citus_internal.upgrade_columnar_storage(regclass) void + | function citus_internal_add_partition_metadata(regclass,"char",text,integer,"char") void + | function citus_internal_add_placement_metadata(bigint,integer,bigint,integer,bigint) void + | function citus_internal_add_shard_metadata(regclass,bigint,"char",text,text) void + | function citus_internal_delete_shard_metadata(bigint) void + | function citus_internal_update_placement_metadata(bigint,integer,integer) void + | function citus_internal_update_relation_colocation(oid,integer) void + | function create_time_partitions(regclass,interval,timestamp with time zone,timestamp with time zone) boolean + | function drop_old_time_partitions(regclass,timestamp with time zone) + | function get_missing_time_partition_ranges(regclass,interval,timestamp with time zone,timestamp with time zone) TABLE(partition_name text, range_from_value text, range_to_value text) + | function stop_metadata_sync_to_node(text,integer,boolean) void + | function worker_nextval(regclass) integer +(16 rows) + +-- Test downgrade to 10.2-1 from 10.2-2 +ALTER EXTENSION citus UPDATE TO '10.2-2'; +ALTER EXTENSION citus UPDATE TO '10.2-1'; +-- Should be empty result since upgrade+downgrade should be a no-op +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) + +-- Snapshot of state at 10.2-2 +ALTER EXTENSION citus UPDATE TO '10.2-2'; +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) + +-- Test downgrade to 10.2-2 from 10.2-3 +ALTER EXTENSION citus UPDATE TO '10.2-3'; +ALTER EXTENSION citus UPDATE TO '10.2-2'; +-- Should be empty result since upgrade+downgrade should be a no-op +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) + +-- Snapshot of state at 10.2-3 +ALTER EXTENSION citus UPDATE TO '10.2-3'; +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) + +-- Test downgrade to 10.2-3 from 10.2-4 +ALTER EXTENSION citus UPDATE TO '10.2-4'; +ALTER EXTENSION citus UPDATE TO '10.2-3'; +-- Make sure that we don't delete pg_depend entries added in +-- columnar--10.2-3--10.2-4.sql when downgrading to 10.2-3. +SELECT COUNT(*)=10 +FROM pg_depend +WHERE classid = 'pg_am'::regclass::oid AND + objid = (select oid from pg_am where amname = 'columnar') AND + objsubid = 0 AND + refclassid = 'pg_class'::regclass::oid AND + refobjsubid = 0 AND + deptype = 'n'; + ?column? +--------------------------------------------------------------------- + t +(1 row) + +-- Should be empty result since upgrade+downgrade should be a no-op +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) + +-- Snapshot of state at 10.2-4 +ALTER EXTENSION citus UPDATE TO '10.2-4'; +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- + | function citus_internal.columnar_ensure_am_depends_catalog() void + | function fix_all_partition_shard_index_names() SETOF regclass + | function fix_partition_shard_index_names(regclass) void + | function worker_fix_partition_shard_index_names(regclass,text,text) void +(4 rows) + +-- There was a bug when downgrading to 10.2-2 from 10.2-4 +-- Test that we do not have any issues with this particular downgrade +ALTER EXTENSION citus UPDATE TO '10.2-2'; +ALTER EXTENSION citus UPDATE TO '10.2-4'; +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) + +-- Test downgrade to 10.2-4 from 10.2-5 +ALTER EXTENSION citus UPDATE TO '10.2-5'; +ALTER EXTENSION citus UPDATE TO '10.2-4'; +-- Should be empty result since upgrade+downgrade should be a no-op +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) + +-- Snapshot of state at 10.2-5 +ALTER EXTENSION citus UPDATE TO '10.2-5'; +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) + +-- Make sure that we defined dependencies from all rel objects (tables, +-- indexes, sequences ..) to columnar table access method ... +SELECT pg_class.oid INTO columnar_schema_members +FROM pg_class, pg_namespace +WHERE pg_namespace.oid=pg_class.relnamespace AND + pg_namespace.nspname='columnar'; +SELECT refobjid INTO columnar_schema_members_pg_depend +FROM pg_depend +WHERE classid = 'pg_am'::regclass::oid AND + objid = (select oid from pg_am where amname = 'columnar') AND + objsubid = 0 AND + refclassid = 'pg_class'::regclass::oid AND + refobjsubid = 0 AND + deptype = 'n'; +-- ... , so this should be empty, +(TABLE columnar_schema_members EXCEPT TABLE columnar_schema_members_pg_depend) +UNION +(TABLE columnar_schema_members_pg_depend EXCEPT TABLE columnar_schema_members); + oid +--------------------------------------------------------------------- +(0 rows) + +-- ... , and both columnar_schema_members_pg_depend & columnar_schema_members +-- should have 10 entries. +SELECT COUNT(*)=10 FROM columnar_schema_members_pg_depend; + ?column? +--------------------------------------------------------------------- + t +(1 row) + +DROP TABLE columnar_schema_members, columnar_schema_members_pg_depend; +-- Use a synthetic pg_dist_shard record to show that upgrade fails +-- when there are cstore_fdw tables +INSERT INTO pg_dist_shard (logicalrelid, shardid, shardstorage) VALUES ('pg_dist_shard', 1, 'c'); +ALTER EXTENSION citus UPDATE TO '11.0-1'; +ERROR: cstore_fdw tables are deprecated as of Citus 11.0 +HINT: Install Citus 10.2 and convert your cstore_fdw tables to the columnar access method before upgrading further +CONTEXT: PL/pgSQL function inline_code_block line XX at RAISE +DELETE FROM pg_dist_shard WHERE shardid = 1; +-- partitioned table count is tracked on Citus 11 upgrade +CREATE TABLE e_transactions(order_id varchar(255) NULL, transaction_id int) PARTITION BY LIST(transaction_id); +CREATE TABLE orders_2020_07_01 +PARTITION OF e_transactions FOR VALUES IN (1,2,3); +INSERT INTO pg_dist_partition VALUES ('e_transactions'::regclass,'h', '{VAR :varno 1 :varattno 1 :vartype 1043 :vartypmod 259 :varcollid 100 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1}', 7, 's'); +SELECT + (metadata->>'partitioned_citus_table_exists_pre_11')::boolean as partitioned_citus_table_exists_pre_11, + (metadata->>'partitioned_citus_table_exists_pre_11') IS NULL as is_null +FROM + pg_dist_node_metadata; + partitioned_citus_table_exists_pre_11 | is_null +--------------------------------------------------------------------- + | t +(1 row) + +-- Test downgrade to 10.2-5 from 11.0-1 +ALTER EXTENSION citus UPDATE TO '11.0-1'; +SELECT + (metadata->>'partitioned_citus_table_exists_pre_11')::boolean as partitioned_citus_table_exists_pre_11, + (metadata->>'partitioned_citus_table_exists_pre_11') IS NULL as is_null +FROM + pg_dist_node_metadata; + partitioned_citus_table_exists_pre_11 | is_null +--------------------------------------------------------------------- + t | f +(1 row) + +DELETE FROM pg_dist_partition WHERE logicalrelid = 'e_transactions'::regclass; +DROP TABLE e_transactions; +ALTER EXTENSION citus UPDATE TO '10.2-5'; +-- Should be empty result since upgrade+downgrade should be a no-op +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) + +-- Snapshot of state at 11.0-1 +ALTER EXTENSION citus UPDATE TO '11.0-1'; +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- + function citus_disable_node(text,integer) void | + function citus_dist_stat_activity() SETOF record | + function citus_worker_stat_activity() SETOF record | + function create_distributed_function(regprocedure,text,text) void | + function master_append_table_to_shard(bigint,text,text,integer) real | + function master_apply_delete_command(text) integer | + function master_get_table_metadata(text) record | + function worker_partition_query_result(text,text,integer,citus.distribution_type,text[],text[],boolean) SETOF record | + table citus.pg_dist_object | + type citus.pg_dist_object | + type citus.pg_dist_object[] | + type citus_worker_stat_activity | + type citus_worker_stat_activity[] | + view citus_worker_stat_activity | + | function citus_backend_gpid() bigint + | function citus_calculate_gpid(integer,integer) bigint + | function citus_check_cluster_node_health() SETOF record + | function citus_check_connection_to_node(text,integer) boolean + | function citus_coordinator_nodeid() integer + | function citus_disable_node(text,integer,boolean) void + | function citus_finalize_upgrade_to_citus11(boolean) boolean + | function citus_internal_add_colocation_metadata(integer,integer,integer,regtype,oid) void + | function citus_internal_add_object_metadata(text,text[],text[],integer,integer,boolean) void + | function citus_internal_delete_colocation_metadata(integer) void + | function citus_internal_global_blocked_processes() SETOF record + | function citus_internal_local_blocked_processes() SETOF record + | function citus_nodeid_for_gpid(bigint) integer + | function citus_nodename_for_nodeid(integer) text + | function citus_nodeport_for_nodeid(integer) integer + | function citus_pid_for_gpid(bigint) integer + | function citus_run_local_command(text) void + | function citus_shard_indexes_on_worker() SETOF record + | function citus_shards_on_worker() SETOF record + | function citus_stat_activity() SETOF record + | function create_distributed_function(regprocedure,text,text,boolean) void + | function get_nodeid_for_groupid(integer) integer + | function pg_cancel_backend(bigint) boolean + | function pg_terminate_backend(bigint,bigint) boolean + | function run_command_on_all_nodes(text,boolean,boolean) SETOF record + | function worker_create_or_replace_object(text[]) boolean + | function worker_drop_sequence_dependency(text) void + | function worker_drop_shell_table(text) void + | function worker_partition_query_result(text,text,integer,citus.distribution_type,text[],text[],boolean,boolean,boolean) SETOF record + | table pg_dist_object + | type citus_stat_activity + | type citus_stat_activity[] + | type pg_dist_object + | type pg_dist_object[] + | view citus_stat_activity +(49 rows) + +-- Test downgrade to 11.0-1 from 11.0-2 +ALTER EXTENSION citus UPDATE TO '11.0-2'; +ALTER EXTENSION citus UPDATE TO '11.0-1'; +-- Should be empty result since upgrade+downgrade should be a no-op +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) + +-- Snapshot of state at 11.0-2 +ALTER EXTENSION citus UPDATE TO '11.0-2'; +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- + | function citus_finish_citus_upgrade() + | function citus_is_coordinator() boolean + | function run_command_on_coordinator(text,boolean) SETOF record + | function start_metadata_sync_to_all_nodes() boolean +(4 rows) + +-- Test downgrade to 11.0-2 from 11.0-3 +ALTER EXTENSION citus UPDATE TO '11.0-3'; +ALTER EXTENSION citus UPDATE TO '11.0-2'; +-- Should be empty result since upgrade+downgrade should be a no-op +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) + +-- Snapshot of state at 11.0-3 +ALTER EXTENSION citus UPDATE TO '11.0-3'; +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) + +-- Test downgrade to 11.0-3 from 11.0-4 +ALTER EXTENSION citus UPDATE TO '11.0-4'; +ALTER EXTENSION citus UPDATE TO '11.0-3'; +-- Should be empty result since upgrade+downgrade should be a no-op +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) + +-- Snapshot of state at 11.0-4 +ALTER EXTENSION citus UPDATE TO '11.0-4'; +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) + +-- Test downgrade to 11.0-4 from 11.1-1 +ALTER EXTENSION citus UPDATE TO '11.1-1'; +ALTER EXTENSION citus UPDATE TO '11.0-4'; +-- Should be empty result since upgrade+downgrade should be a no-op +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) + +-- Test CREATE EXTENSION when Citus already exists but Citus_Columnar does not. Should skip +CREATE EXTENSION IF NOT EXISTS citus; +NOTICE: extension "citus" already exists, skipping +CREATE EXTENSION citus; +ERROR: extension "citus" already exists +-- Snapshot of state at 11.1-1 +ALTER EXTENSION citus UPDATE TO '11.1-1'; +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- + access method columnar | + function alter_columnar_table_reset(regclass,boolean,boolean,boolean,boolean) void | + function alter_columnar_table_set(regclass,integer,integer,name,integer) void | + function citus_copy_shard_placement(bigint,text,integer,text,integer,boolean,citus.shard_transfer_mode) void | + function citus_internal.columnar_ensure_am_depends_catalog() void | + function citus_internal.downgrade_columnar_storage(regclass) void | + function citus_internal.upgrade_columnar_storage(regclass) void | + function columnar.columnar_handler(internal) table_am_handler | + function get_rebalance_progress() TABLE(sessionid integer, table_name regclass, shardid bigint, shard_size bigint, sourcename text, sourceport integer, targetname text, targetport integer, progress bigint, source_shard_size bigint, target_shard_size bigint) | + function isolate_tenant_to_new_shard(regclass,"any",text) bigint | + function replicate_reference_tables() void | + function worker_cleanup_job_schema_cache() void | + function worker_create_schema(bigint,text) void | + function worker_fetch_foreign_file(text,text,bigint,text[],integer[]) void | + function worker_fetch_partition_file(bigint,integer,integer,integer,text,integer) void | + function worker_hash_partition_table(bigint,integer,text,text,oid,anyarray) void | + function worker_merge_files_into_table(bigint,integer,text[],text[]) void | + function worker_range_partition_table(bigint,integer,text,text,oid,anyarray) void | + function worker_repartition_cleanup(bigint) void | + schema columnar | + sequence columnar.storageid_seq | + table columnar.chunk | + table columnar.chunk_group | + table columnar.options | + table columnar.stripe | + type columnar.chunk | + type columnar.chunk[] | + type columnar.chunk_group | + type columnar.chunk_group[] | + type columnar.options | + type columnar.options[] | + type columnar.stripe | + type columnar.stripe[] | + | function citus_cleanup_orphaned_resources() + | function citus_copy_shard_placement(bigint,text,integer,text,integer,citus.shard_transfer_mode) void + | function citus_internal_delete_partition_metadata(regclass) void + | function citus_job_cancel(bigint) void + | function citus_job_wait(bigint,citus_job_status) void + | function citus_locks() SETOF record + | function citus_rebalance_start(name,boolean,citus.shard_transfer_mode) bigint + | function citus_rebalance_stop() void + | function citus_rebalance_wait() void + | function citus_split_shard_by_split_points(bigint,text[],integer[],citus.shard_transfer_mode) void + | function create_distributed_table_concurrently(regclass,text,citus.distribution_type,text,integer) void + | function get_rebalance_progress() TABLE(sessionid integer, table_name regclass, shardid bigint, shard_size bigint, sourcename text, sourceport integer, targetname text, targetport integer, progress bigint, source_shard_size bigint, target_shard_size bigint, operation_type text) + | function isolate_tenant_to_new_shard(regclass,"any",text,citus.shard_transfer_mode) bigint + | function replicate_reference_tables(citus.shard_transfer_mode) void + | function worker_copy_table_to_node(regclass,integer) void + | function worker_split_copy(bigint,text,split_copy_info[]) void + | function worker_split_shard_release_dsm() void + | function worker_split_shard_replication_setup(split_shard_info[]) SETOF replication_slot_info + | sequence pg_dist_background_job_job_id_seq + | sequence pg_dist_background_task_task_id_seq + | sequence pg_dist_cleanup_recordid_seq + | sequence pg_dist_operationid_seq + | table pg_dist_background_job + | table pg_dist_background_task + | table pg_dist_background_task_depend + | table pg_dist_cleanup + | type citus_job_status + | type citus_job_status[] + | type citus_locks + | type citus_locks[] + | type citus_task_status + | type citus_task_status[] + | type pg_dist_background_job + | type pg_dist_background_job[] + | type pg_dist_background_task + | type pg_dist_background_task[] + | type pg_dist_background_task_depend + | type pg_dist_background_task_depend[] + | type pg_dist_cleanup + | type pg_dist_cleanup[] + | type replication_slot_info + | type replication_slot_info[] + | type split_copy_info + | type split_copy_info[] + | type split_shard_info + | type split_shard_info[] + | view citus_locks +(80 rows) + +-- Test downgrade to 11.1-1 from 11.2-1 +ALTER EXTENSION citus UPDATE TO '11.2-1'; +ALTER EXTENSION citus UPDATE TO '11.1-1'; +-- Should be empty result since upgrade+downgrade should be a no-op +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) + +-- Snapshot of state at 11.2-1 +ALTER EXTENSION citus UPDATE TO '11.2-1'; +ALTER EXTENSION citus_columnar UPDATE TO '11.2-1'; +-- Make sure that we defined dependencies from all rel objects (tables, +-- indexes, sequences ..) to columnar table access method ... +SELECT pg_class.oid INTO columnar_schema_members +FROM pg_class, pg_namespace +WHERE pg_namespace.oid=pg_class.relnamespace AND + pg_namespace.nspname='columnar_internal' AND + pg_class.relname NOT IN ('chunk_group_pkey', + 'chunk_pkey', + 'options_pkey', + 'stripe_first_row_number_idx', + 'stripe_pkey'); +SELECT refobjid INTO columnar_schema_members_pg_depend +FROM pg_depend +WHERE classid = 'pg_am'::regclass::oid AND + objid = (select oid from pg_am where amname = 'columnar') AND + objsubid = 0 AND + refclassid = 'pg_class'::regclass::oid AND + refobjsubid = 0 AND + deptype = 'n'; +-- ... , so this should be empty, +(TABLE columnar_schema_members EXCEPT TABLE columnar_schema_members_pg_depend) +UNION +(TABLE columnar_schema_members_pg_depend EXCEPT TABLE columnar_schema_members); + oid +--------------------------------------------------------------------- +(0 rows) + +-- ... , and both columnar_schema_members_pg_depend & columnar_schema_members +-- should have 5 entries. +SELECT COUNT(*)=5 FROM columnar_schema_members_pg_depend; + ?column? +--------------------------------------------------------------------- + t +(1 row) + +DROP TABLE columnar_schema_members, columnar_schema_members_pg_depend; +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- + function get_rebalance_progress() TABLE(sessionid integer, table_name regclass, shardid bigint, shard_size bigint, sourcename text, sourceport integer, targetname text, targetport integer, progress bigint, source_shard_size bigint, target_shard_size bigint, operation_type text) | + function worker_append_table_to_shard(text,text,text,integer) void | + function worker_split_shard_replication_setup(split_shard_info[]) SETOF replication_slot_info | + | function citus_copy_shard_placement(bigint,integer,integer,citus.shard_transfer_mode) void + | function citus_get_node_clock() cluster_clock + | function citus_get_transaction_clock() cluster_clock + | function citus_internal_add_placement_metadata(bigint,bigint,integer,bigint) void + | function citus_internal_adjust_local_clock_to_remote(cluster_clock) void + | function citus_is_clock_after(cluster_clock,cluster_clock) boolean + | function citus_job_list() TABLE(job_id bigint, state citus_job_status, job_type name, description text, started_at timestamp with time zone, finished_at timestamp with time zone) + | function citus_job_status(bigint,boolean) TABLE(job_id bigint, state citus_job_status, job_type name, description text, started_at timestamp with time zone, finished_at timestamp with time zone, details jsonb) + | function citus_move_shard_placement(bigint,integer,integer,citus.shard_transfer_mode) void + | function citus_rebalance_status(boolean) TABLE(job_id bigint, state citus_job_status, job_type name, description text, started_at timestamp with time zone, finished_at timestamp with time zone, details jsonb) + | function citus_task_wait(bigint,citus_task_status) void + | function cluster_clock_cmp(cluster_clock,cluster_clock) integer + | function cluster_clock_eq(cluster_clock,cluster_clock) boolean + | function cluster_clock_ge(cluster_clock,cluster_clock) boolean + | function cluster_clock_gt(cluster_clock,cluster_clock) boolean + | function cluster_clock_in(cstring) cluster_clock + | function cluster_clock_le(cluster_clock,cluster_clock) boolean + | function cluster_clock_logical(cluster_clock) bigint + | function cluster_clock_lt(cluster_clock,cluster_clock) boolean + | function cluster_clock_ne(cluster_clock,cluster_clock) boolean + | function cluster_clock_out(cluster_clock) cstring + | function cluster_clock_recv(internal) cluster_clock + | function cluster_clock_send(cluster_clock) bytea + | function get_rebalance_progress() TABLE(sessionid integer, table_name regclass, shardid bigint, shard_size bigint, sourcename text, sourceport integer, targetname text, targetport integer, progress bigint, source_shard_size bigint, target_shard_size bigint, operation_type text, source_lsn pg_lsn, target_lsn pg_lsn, status text) + | function worker_split_shard_replication_setup(split_shard_info[],bigint) SETOF replication_slot_info + | operator <(cluster_clock,cluster_clock) + | operator <=(cluster_clock,cluster_clock) + | operator <>(cluster_clock,cluster_clock) + | operator =(cluster_clock,cluster_clock) + | operator >(cluster_clock,cluster_clock) + | operator >=(cluster_clock,cluster_clock) + | operator class cluster_clock_ops for access method btree + | operator family cluster_clock_ops for access method btree + | sequence pg_dist_clock_logical_seq + | type cluster_clock + | type cluster_clock[] +(39 rows) + +-- Test downgrade to 11.2-1 from 11.2-2 +ALTER EXTENSION citus UPDATE TO '11.2-2'; +ALTER EXTENSION citus UPDATE TO '11.2-1'; +-- Should be empty result since upgrade+downgrade should be a no-op +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) + +-- Snapshot of state at 11.2-2 +ALTER EXTENSION citus UPDATE TO '11.2-2'; +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- + | function worker_adjust_identity_column_seq_ranges(regclass) void +(1 row) + +-- Test downgrade to 11.2-2 from 11.3-1 +ALTER EXTENSION citus UPDATE TO '11.3-1'; +ALTER EXTENSION citus UPDATE TO '11.2-2'; +-- Should be empty result since upgrade+downgrade should be a no-op +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) + +-- Snapshot of state at 11.3-1 +ALTER EXTENSION citus UPDATE TO '11.3-1'; +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- + | function citus_internal_is_replication_origin_tracking_active() boolean + | function citus_internal_mark_node_not_synced(integer,integer) void + | function citus_internal_start_replication_origin_tracking() void + | function citus_internal_stop_replication_origin_tracking() void + | function citus_stat_tenants(boolean) SETOF record + | function citus_stat_tenants_local(boolean) SETOF record + | function citus_stat_tenants_local_reset() void + | function citus_stat_tenants_reset() void + | function worker_drop_all_shell_tables(boolean) + | type citus_stat_tenants + | type citus_stat_tenants[] + | type citus_stat_tenants_local + | type citus_stat_tenants_local[] + | view citus_stat_tenants + | view citus_stat_tenants_local +(15 rows) + +-- Test downgrade to 11.3-1 from 11.3-2 +ALTER EXTENSION citus UPDATE TO '11.3-2'; +ALTER EXTENSION citus UPDATE TO '11.3-1'; +-- Should be empty result since upgrade+downgrade should be a no-op +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) + +-- Snapshot of state at 11.3-2 +ALTER EXTENSION citus UPDATE TO '11.3-2'; +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) + +-- Test downgrade to 11.3-2 from 12.0-1 +ALTER EXTENSION citus UPDATE TO '12.0-1'; +CREATE TABLE null_shard_key (x int, y int); +SET citus.shard_replication_factor TO 1; +SELECT create_distributed_table('null_shard_key', null); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- Show that we cannot downgrade to 11.3-2 becuase the cluster has a +-- distributed table with single-shard. +ALTER EXTENSION citus UPDATE TO '11.3-2'; +ERROR: cannot downgrade Citus because there are distributed tables without a shard key. +DETAIL: To downgrade Citus to an older version, you should first convert those tables to Postgres tables by executing SELECT undistribute_table("%s"). +HINT: You can find the distributed tables without a shard key in the cluster by using the following query: "SELECT * FROM citus_tables WHERE distribution_column = '' AND colocation_id > 0". +CONTEXT: PL/pgSQL function inline_code_block line XX at RAISE +DROP TABLE null_shard_key; +ALTER EXTENSION citus UPDATE TO '11.3-2'; +-- Should be empty result since upgrade+downgrade should be a no-op +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) + +-- Snapshot of state at 12.0-1 +ALTER EXTENSION citus UPDATE TO '12.0-1'; +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- + | function citus_internal_add_tenant_schema(oid,integer) void + | function citus_internal_delete_tenant_schema(oid) void + | function citus_internal_unregister_tenant_schema_globally(oid,text) void + | function citus_schema_distribute(regnamespace) void + | function citus_schema_undistribute(regnamespace) void + | function citus_stat_tenants_local_internal(boolean) SETOF record + | table pg_dist_schema + | type pg_dist_schema + | type pg_dist_schema[] + | type public.citus_schemas + | type public.citus_schemas[] + | view public.citus_schemas +(12 rows) + +-- Test downgrade to 12.0-1 from 12.1-1 +ALTER EXTENSION citus UPDATE TO '12.1-1'; +ALTER EXTENSION citus UPDATE TO '12.0-1'; +-- Should be empty result since upgrade+downgrade should be a no-op +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) + +-- Snapshot of state at 12.1-1 +ALTER EXTENSION citus UPDATE TO '12.1-1'; +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- + | function citus_internal_delete_placement_metadata(bigint) void + | function citus_internal_update_none_dist_table_metadata(oid,"char",bigint,boolean) void + | function citus_pause_node_within_txn(integer,boolean,integer) void + | function citus_schema_move(regnamespace,integer,citus.shard_transfer_mode) void + | function citus_schema_move(regnamespace,text,integer,citus.shard_transfer_mode) void +(5 rows) + +-- Test downgrade to 12.1-1 from 12.2-1 +ALTER EXTENSION citus UPDATE TO '12.2-1'; +ALTER EXTENSION citus UPDATE TO '12.1-1'; +-- Should be empty result since upgrade+downgrade should be a no-op +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) + +-- Snapshot of state at 12.2-1 +ALTER EXTENSION citus UPDATE TO '12.2-1'; +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- + function citus_unmark_object_distributed(oid,oid,integer) void | + | function citus_internal.acquire_citus_advisory_object_class_lock(integer,cstring) void + | function citus_internal.add_colocation_metadata(integer,integer,integer,regtype,oid) void + | function citus_internal.add_object_metadata(text,text[],text[],integer,integer,boolean) void + | function citus_internal.add_partition_metadata(regclass,"char",text,integer,"char") void + | function citus_internal.add_placement_metadata(bigint,bigint,integer,bigint) void + | function citus_internal.add_shard_metadata(regclass,bigint,"char",text,text) void + | function citus_internal.add_tenant_schema(oid,integer) void + | function citus_internal.adjust_local_clock_to_remote(cluster_clock) void + | function citus_internal.commit_management_command_2pc() void + | function citus_internal.database_command(text) void + | function citus_internal.delete_colocation_metadata(integer) void + | function citus_internal.delete_partition_metadata(regclass) void + | function citus_internal.delete_placement_metadata(bigint) void + | function citus_internal.delete_shard_metadata(bigint) void + | function citus_internal.delete_tenant_schema(oid) void + | function citus_internal.execute_command_on_remote_nodes_as_user(text,text) void + | function citus_internal.global_blocked_processes() SETOF record + | function citus_internal.is_replication_origin_tracking_active() boolean + | function citus_internal.local_blocked_processes() SETOF record + | function citus_internal.mark_node_not_synced(integer,integer) void + | function citus_internal.mark_object_distributed(oid,text,oid,text) void + | function citus_internal.start_management_transaction(xid8) void + | function citus_internal.start_replication_origin_tracking() void + | function citus_internal.stop_replication_origin_tracking() void + | function citus_internal.unregister_tenant_schema_globally(oid,text) void + | function citus_internal.update_none_dist_table_metadata(oid,"char",bigint,boolean) void + | function citus_internal.update_placement_metadata(bigint,integer,integer) void + | function citus_internal.update_relation_colocation(oid,integer) void + | function citus_unmark_object_distributed(oid,oid,integer,boolean) void +(30 rows) + +DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff; +-- show running version +SHOW citus.version; + citus.version +--------------------------------------------------------------------- + 12.2devel +(1 row) + +-- ensure no unexpected objects were created outside pg_catalog +SELECT pgio.type, pgio.identity +FROM pg_depend AS pgd, + pg_extension AS pge, + LATERAL pg_identify_object(pgd.classid, pgd.objid, pgd.objsubid) AS pgio +WHERE pgd.refclassid = 'pg_extension'::regclass AND + pgd.refobjid = pge.oid AND + pge.extname = 'citus' AND + pgio.schema NOT IN ('pg_catalog', 'citus', 'citus_internal', 'test', 'columnar', 'columnar_internal') +ORDER BY 1, 2; + type | identity +--------------------------------------------------------------------- + type | public.citus_schemas + type | public.citus_schemas[] + type | public.citus_tables + type | public.citus_tables[] + view | public.citus_schemas + view | public.citus_tables +(6 rows) + +-- see incompatible version errors out +RESET citus.enable_version_checks; +RESET columnar.enable_version_checks; +-- reset version check config for other sessions too +ALTER SYSTEM RESET citus.enable_version_checks; +SELECT pg_reload_conf(); + pg_reload_conf +--------------------------------------------------------------------- + t +(1 row) + +DROP EXTENSION citus; +DROP EXTENSION citus_columnar; +CREATE EXTENSION citus VERSION '8.0-1'; +ERROR: specified version incompatible with loaded Citus library +DETAIL: Loaded library requires 12.2, but 8.0-1 was specified. +HINT: If a newer library is present, restart the database and try the command again. +-- Test non-distributed queries work even in version mismatch +SET citus.enable_version_checks TO 'false'; +SET columnar.enable_version_checks TO 'false'; +CREATE EXTENSION citus VERSION '8.1-1'; +SET citus.enable_version_checks TO 'true'; +SET columnar.enable_version_checks TO 'true'; +-- Test CREATE TABLE +CREATE TABLE version_mismatch_table(column1 int); +-- Test COPY +\copy version_mismatch_table FROM STDIN; +-- Test INSERT +INSERT INTO version_mismatch_table(column1) VALUES(5); +-- Test SELECT +SELECT * FROM version_mismatch_table ORDER BY column1; + column1 +--------------------------------------------------------------------- + 0 + 1 + 2 + 3 + 4 + 5 +(6 rows) + +-- Test SELECT from pg_catalog +SELECT d.datname as "Name", + pg_catalog.pg_get_userbyid(d.datdba) as "Owner", + pg_catalog.array_to_string(d.datacl, E'\n') AS "Access privileges" +FROM pg_catalog.pg_database d +ORDER BY 1; + Name | Owner | Access privileges +--------------------------------------------------------------------- + postgres | postgres | + regression | postgres | + template0 | postgres | =c/postgres + + | | postgres=CTc/postgres + template1 | postgres | =c/postgres + + | | postgres=CTc/postgres +(4 rows) + +-- We should not distribute table in version mistmatch +SELECT create_distributed_table('version_mismatch_table', 'column1'); +ERROR: loaded Citus library version differs from installed extension version +DETAIL: Loaded library requires 12.2, but the installed extension version is 8.1-1. +HINT: Run ALTER EXTENSION citus UPDATE and try again. +-- This function will cause fail in next ALTER EXTENSION +CREATE OR REPLACE FUNCTION pg_catalog.relation_is_a_known_shard(regclass) +RETURNS void LANGUAGE plpgsql +AS $function$ +BEGIN +END; +$function$; +ERROR: cannot change return type of existing function +HINT: Use DROP FUNCTION relation_is_a_known_shard(regclass) first. +SET citus.enable_version_checks TO 'false'; +SET columnar.enable_version_checks TO 'false'; +-- This will fail because of previous function declaration +ALTER EXTENSION citus UPDATE TO '9.1-1'; +-- We can DROP problematic function and continue ALTER EXTENSION even when version checks are on +SET citus.enable_version_checks TO 'true'; +SET columnar.enable_version_checks TO 'true'; +DROP FUNCTION pg_catalog.relation_is_a_known_shard(regclass); +ERROR: cannot drop function relation_is_a_known_shard(regclass) because extension citus requires it +HINT: You can drop extension citus instead. +SET citus.enable_version_checks TO 'false'; +SET columnar.enable_version_checks TO 'false'; +ALTER EXTENSION citus UPDATE TO '9.1-1'; +NOTICE: version "9.1-1" of extension "citus" is already installed +-- Test updating to the latest version without specifying the version number +ALTER EXTENSION citus UPDATE; +-- re-create in newest version +DROP EXTENSION citus; +DROP EXTENSION citus_columnar; +\c +CREATE EXTENSION citus; +-- test cache invalidation in workers +\c - - - :worker_1_port +DROP EXTENSION citus; +DROP EXTENSION citus_columnar; +SET citus.enable_version_checks TO 'false'; +SET columnar.enable_version_checks TO 'false'; +CREATE EXTENSION citus VERSION '8.0-1'; +SET citus.enable_version_checks TO 'true'; +SET columnar.enable_version_checks TO 'true'; +-- during ALTER EXTENSION, we should invalidate the cache +ALTER EXTENSION citus UPDATE; +-- if cache is invalidated succesfull, this \d should work without any problem +\d + List of relations + Schema | Name | Type | Owner +--------------------------------------------------------------------- + public | citus_schemas | view | postgres + public | citus_tables | view | postgres +(2 rows) + +\c - - - :master_port +-- test https://github.com/citusdata/citus/issues/3409 +CREATE USER testuser2 SUPERUSER; +SET ROLE testuser2; +DROP EXTENSION Citus; +-- Loop until we see there's no maintenance daemon running +DO $$begin + for i in 0 .. 100 loop + if i = 100 then raise 'Waited too long'; end if; + PERFORM pg_stat_clear_snapshot(); + perform * from pg_stat_activity where application_name = 'Citus Maintenance Daemon'; + if not found then exit; end if; + perform pg_sleep(0.1); + end loop; +end$$; +SELECT datid, datname, usename FROM pg_stat_activity WHERE application_name = 'Citus Maintenance Daemon'; + datid | datname | usename +--------------------------------------------------------------------- +(0 rows) + +CREATE EXTENSION Citus; +-- Loop until we there's a maintenance daemon running +DO $$begin + for i in 0 .. 100 loop + if i = 100 then raise 'Waited too long'; end if; + PERFORM pg_stat_clear_snapshot(); + perform * from pg_stat_activity where application_name = 'Citus Maintenance Daemon'; + if found then exit; end if; + perform pg_sleep(0.1); + end loop; +end$$; +SELECT datid, datname, usename FROM pg_stat_activity WHERE application_name = 'Citus Maintenance Daemon'; + datid | datname | usename +--------------------------------------------------------------------- + 16384 | regression | testuser2 +(1 row) + +RESET ROLE; +-- check that maintenance daemon gets (re-)started for the right user +DROP EXTENSION citus; +CREATE USER testuser SUPERUSER; +SET ROLE testuser; +CREATE EXTENSION citus; +SELECT datname, current_database(), + usename, (SELECT extowner::regrole::text FROM pg_extension WHERE extname = 'citus') +FROM test.maintenance_worker(); + datname | current_database | usename | extowner +--------------------------------------------------------------------- + regression | regression | testuser | testuser +(1 row) + +-- and recreate as the right owner +RESET ROLE; +DROP EXTENSION citus; +CREATE EXTENSION citus; +-- Check that maintenance daemon can also be started in another database +CREATE DATABASE another; +NOTICE: Citus partially supports CREATE DATABASE for distributed databases +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. +\c another +CREATE EXTENSION citus; +CREATE SCHEMA test; +:create_function_test_maintenance_worker +-- see that the daemon started +SELECT datname, current_database(), + usename, (SELECT extowner::regrole::text FROM pg_extension WHERE extname = 'citus') +FROM test.maintenance_worker(); + datname | current_database | usename | extowner +--------------------------------------------------------------------- + another | another | postgres | postgres +(1 row) + +-- Test that database with active worker can be dropped. +\c regression +CREATE SCHEMA test_daemon; +-- we create a similar function on the regression database +-- note that this function checks for the existence of the daemon +-- when not found, returns true else tries for 5 times and +-- returns false +CREATE OR REPLACE FUNCTION test_daemon.maintenance_daemon_died(p_dbname text) + RETURNS boolean + LANGUAGE plpgsql +AS $$ +DECLARE + activity record; +BEGIN + PERFORM pg_stat_clear_snapshot(); + SELECT * INTO activity FROM pg_stat_activity + WHERE application_name = 'Citus Maintenance Daemon' AND datname = p_dbname; + IF activity.pid IS NULL THEN + RETURN true; + ELSE + RETURN false; + END IF; +END; +$$; +-- drop the database and see that the daemon is dead +DROP DATABASE another; +SELECT + * +FROM + test_daemon.maintenance_daemon_died('another'); + maintenance_daemon_died +--------------------------------------------------------------------- + t +(1 row) + +-- we don't need the schema and the function anymore +DROP SCHEMA test_daemon CASCADE; +NOTICE: drop cascades to function test_daemon.maintenance_daemon_died(text) +-- verify citus does not crash while creating a table when run against an older worker +-- create_distributed_table piggybacks multiple commands into single one, if one worker +-- did not have the required UDF it should fail instead of crash. +-- create a test database, configure citus with single node +CREATE DATABASE another; +NOTICE: Citus partially supports CREATE DATABASE for distributed databases +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. +\c - - - :worker_1_port +CREATE DATABASE another; +NOTICE: Citus partially supports CREATE DATABASE for distributed databases +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. +\c - - - :master_port +\c another +CREATE EXTENSION citus; +\c - - - :worker_1_port +CREATE EXTENSION citus; +\c - - - :master_port +SELECT FROM master_add_node('localhost', :worker_1_port); +-- +(1 row) + +\c - - - :worker_1_port +ALTER FUNCTION assign_distributed_transaction_id(initiator_node_identifier integer, transaction_number bigint, transaction_stamp timestamp with time zone) +RENAME TO dummy_assign_function; +\c - - - :master_port +SET citus.shard_replication_factor to 1; +-- create_distributed_table command should fail +CREATE TABLE t1(a int, b int); +SET client_min_messages TO ERROR; +DO $$ +BEGIN + BEGIN + SELECT create_distributed_table('t1', 'a'); + EXCEPTION WHEN OTHERS THEN + RAISE 'create distributed table failed'; + END; +END; +$$; +ERROR: create distributed table failed +CONTEXT: PL/pgSQL function inline_code_block line XX at RAISE +\c regression +\c - - - :master_port +DROP DATABASE another; +\c - - - :worker_1_port +DROP DATABASE another; +\c - - - :master_port +-- only the regression database should have a maintenance daemon +SELECT count(*) FROM pg_stat_activity WHERE application_name = 'Citus Maintenance Daemon'; + count +--------------------------------------------------------------------- + 1 +(1 row) + +-- recreate the extension immediately after the maintenancae daemon errors +SELECT pg_cancel_backend(pid) FROM pg_stat_activity WHERE application_name = 'Citus Maintenance Daemon'; + pg_cancel_backend +--------------------------------------------------------------------- + t +(1 row) + +DROP EXTENSION citus; +CREATE EXTENSION citus; +-- wait for maintenance daemon restart +SELECT datname, current_database(), + usename, (SELECT extowner::regrole::text FROM pg_extension WHERE extname = 'citus') +FROM test.maintenance_worker(); + datname | current_database | usename | extowner +--------------------------------------------------------------------- + regression | regression | postgres | postgres +(1 row) + +-- confirm that there is only one maintenance daemon +SELECT count(*) FROM pg_stat_activity WHERE application_name = 'Citus Maintenance Daemon'; + count +--------------------------------------------------------------------- + 1 +(1 row) + +-- kill the maintenance daemon +SELECT pg_cancel_backend(pid) FROM pg_stat_activity WHERE application_name = 'Citus Maintenance Daemon'; + pg_cancel_backend +--------------------------------------------------------------------- + t +(1 row) + +-- reconnect +\c - - - :master_port +-- run something that goes through planner hook and therefore kicks of maintenance daemon +SELECT 1; + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +-- wait for maintenance daemon restart +SELECT datname, current_database(), + usename, (SELECT extowner::regrole::text FROM pg_extension WHERE extname = 'citus') +FROM test.maintenance_worker(); + datname | current_database | usename | extowner +--------------------------------------------------------------------- + regression | regression | postgres | postgres +(1 row) + +-- confirm that there is only one maintenance daemon +SELECT count(*) FROM pg_stat_activity WHERE application_name = 'Citus Maintenance Daemon'; + count +--------------------------------------------------------------------- + 1 +(1 row) + +-- confirm that we can create a distributed table concurrently on an empty node +DROP EXTENSION citus; +CREATE EXTENSION citus; +CREATE TABLE test (x int, y int); +INSERT INTO test VALUES (1,2); +SET citus.shard_replication_factor TO 1; +SELECT create_distributed_table_concurrently('test','x'); +NOTICE: relation test does not have a REPLICA IDENTITY or PRIMARY KEY +DETAIL: UPDATE and DELETE commands on the relation will error out during create_distributed_table_concurrently unless there is a REPLICA IDENTITY or PRIMARY KEY. INSERT commands will still work. + create_distributed_table_concurrently +--------------------------------------------------------------------- + +(1 row) + +DROP TABLE test; +TRUNCATE pg_dist_node; +-- confirm that we can create a distributed table on an empty node +CREATE TABLE test (x int, y int); +INSERT INTO test VALUES (1,2); +SET citus.shard_replication_factor TO 1; +SELECT create_distributed_table('test','x'); +NOTICE: Copying data from local table... +NOTICE: copying the data has completed +DETAIL: The local data in the table is no longer visible, but is still on disk. +HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.test$$) + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +DROP TABLE test; +TRUNCATE pg_dist_node; +-- confirm that we can create a single-shard table on an empty node +CREATE TABLE test (x int, y int); +INSERT INTO test VALUES (1,2); +SET citus.shard_replication_factor TO 1; +SELECT create_distributed_table('test', null, colocate_with=>'none', distribution_type=>null); +NOTICE: Copying data from local table... +NOTICE: copying the data has completed +DETAIL: The local data in the table is no longer visible, but is still on disk. +HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.test$$) + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- and make sure that we can't remove the coordinator due to "test" +SELECT citus_remove_node('localhost', :master_port); +ERROR: cannot remove or disable the node localhost:xxxxx because because it contains the only shard placement for shard xxxxx +DETAIL: One of the table(s) that prevents the operation complete successfully is public.test +HINT: To proceed, either drop the tables or use undistribute_table() function to convert them to local tables +DROP TABLE test; +-- and now we should be able to remove the coordinator +SELECT citus_remove_node('localhost', :master_port); + citus_remove_node +--------------------------------------------------------------------- + +(1 row) + +-- confirm that we can create a tenant schema / table on an empty node +SET citus.enable_schema_based_sharding TO ON; +CREATE SCHEMA tenant_schema; +CREATE TABLE tenant_schema.test(x int, y int); +SELECT colocationid = ( + SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'tenant_schema.test'::regclass +) +FROM pg_dist_schema +WHERE schemaid::regnamespace::text = 'tenant_schema'; + ?column? +--------------------------------------------------------------------- + t +(1 row) + +-- and make sure that we can't remove the coordinator due to "test" +SELECT citus_remove_node('localhost', :master_port); +ERROR: cannot remove or disable the node localhost:xxxxx because because it contains the only shard placement for shard xxxxx +DETAIL: One of the table(s) that prevents the operation complete successfully is tenant_schema.test +HINT: To proceed, either drop the tables or use undistribute_table() function to convert them to local tables +BEGIN; + SET LOCAL client_min_messages TO WARNING; + DROP SCHEMA tenant_schema CASCADE; +COMMIT; +-- and now we should be able to remove the coordinator +SELECT citus_remove_node('localhost', :master_port); + citus_remove_node +--------------------------------------------------------------------- + +(1 row) + +CREATE SCHEMA tenant_schema; +-- Make sure that we can sync metadata for empty tenant schemas +-- when adding the first node to the cluster. +SELECT 1 FROM citus_add_node('localhost', :worker_1_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +DROP SCHEMA tenant_schema; +SELECT citus_remove_node('localhost', :worker_1_port); + citus_remove_node +--------------------------------------------------------------------- + +(1 row) + +RESET citus.enable_schema_based_sharding; +-- confirm that we can create a reference table on an empty node +CREATE TABLE test (x int, y int); +INSERT INTO test VALUES (1,2); +SELECT create_reference_table('test'); +NOTICE: Copying data from local table... +NOTICE: copying the data has completed +DETAIL: The local data in the table is no longer visible, but is still on disk. +HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.test$$) + create_reference_table +--------------------------------------------------------------------- + +(1 row) + +DROP TABLE test; +TRUNCATE pg_dist_node; +-- confirm that we can create a local table on an empty node +CREATE TABLE test (x int, y int); +INSERT INTO test VALUES (1,2); +SELECT citus_add_local_table_to_metadata('test'); + citus_add_local_table_to_metadata +--------------------------------------------------------------------- + +(1 row) + +DROP TABLE test; +-- Verify that we don't consider the schemas created by extensions as tenant schemas. +-- Easiest way of verifying this is to drop and re-create columnar extension. +DROP EXTENSION citus_columnar; +SET citus.enable_schema_based_sharding TO ON; +CREATE EXTENSION citus_columnar; +SELECT COUNT(*)=0 FROM pg_dist_schema +WHERE schemaid IN ('columnar'::regnamespace, 'columnar_internal'::regnamespace); + ?column? +--------------------------------------------------------------------- + t +(1 row) + +RESET citus.enable_schema_based_sharding; +DROP EXTENSION citus; +CREATE EXTENSION citus; +DROP TABLE version_mismatch_table; +DROP SCHEMA multi_extension; +ERROR: cannot drop schema multi_extension because other objects depend on it +DETAIL: function multi_extension.print_extension_changes() depends on schema multi_extension +HINT: Use DROP ... CASCADE to drop the dependent objects too. diff --git a/src/test/regress/expected/multi_mx_hide_shard_names.out b/src/test/regress/expected/multi_mx_hide_shard_names.out index 762c6a30b54..1f6e06ab1cc 100644 --- a/src/test/regress/expected/multi_mx_hide_shard_names.out +++ b/src/test/regress/expected/multi_mx_hide_shard_names.out @@ -473,19 +473,25 @@ SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_name -- PG16 added one more backend type B_STANDALONE_BACKEND -- and also alphabetized the backend types, hence the orders changed --- Relevant PG commit: +-- Relevant PG16 commit: -- https://github.com/postgres/postgres/commit/0c679464a837079acc75ff1d45eaa83f79e05690 +-- Relevant Pg17 commit: +-- https://github.com/postgres/postgres/commit/067701f57758f9baed5bd9d868539738d77bfa92#diff-afc0ebd67534b71b5b94b29a1387aa6eedffe342a5539f52d686428be323e802 SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16 -\gset -\if :server_version_ge_16 -SELECT 4 AS client_backend \gset -SELECT 5 AS bgworker \gset -SELECT 12 AS walsender \gset +SELECT substring(:'server_version', '\d+')::int >= 17 AS server_version_ge_17 \gset +SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16 \gset +\if :server_version_ge_17 + SELECT 1 AS client_backend \gset + SELECT 4 AS bgworker \gset + SELECT 5 AS walsender \gset +\elif :server_version_ge_16 + SELECT 4 AS client_backend \gset + SELECT 5 AS bgworker \gset + SELECT 12 AS walsender \gset \else -SELECT 3 AS client_backend \gset -SELECT 4 AS bgworker \gset -SELECT 9 AS walsender \gset + SELECT 3 AS client_backend \gset + SELECT 4 AS bgworker \gset + SELECT 9 AS walsender \gset \endif -- say, we set it to bgworker -- the shards and indexes do not show up diff --git a/src/test/regress/expected/multi_mx_router_planner.out b/src/test/regress/expected/multi_mx_router_planner.out index 5ac6093cb99..a5b863b096e 100644 --- a/src/test/regress/expected/multi_mx_router_planner.out +++ b/src/test/regress/expected/multi_mx_router_planner.out @@ -1,3 +1,6 @@ +-- Two alternative test outputs: +-- multi_mx_router_planner.out for PG16 and before +-- multi_mx_router_planner_0.out for PG17 -- =================================================================== -- test router planner functionality for single shard select queries -- =================================================================== diff --git a/src/test/regress/expected/multi_mx_router_planner_0.out b/src/test/regress/expected/multi_mx_router_planner_0.out new file mode 100644 index 00000000000..d66834e01cd --- /dev/null +++ b/src/test/regress/expected/multi_mx_router_planner_0.out @@ -0,0 +1,1514 @@ +-- Two alternative test outputs: +-- multi_mx_router_planner.out for PG16 and before +-- multi_mx_router_planner_0.out for PG17 +-- =================================================================== +-- test router planner functionality for single shard select queries +-- =================================================================== +-- run all the router queries from the one of the workers +-- and CTE inlining is not relevant to router plannery anyway +\c - - - :worker_1_port +-- this table is used in a CTE test +CREATE TABLE authors_hash_mx ( name text, id bigint ); +-- create a bunch of test data +INSERT INTO articles_hash_mx VALUES ( 1, 1, 'arsenous', 9572); +INSERT INTO articles_hash_mx VALUES ( 2, 2, 'abducing', 13642); +INSERT INTO articles_hash_mx VALUES ( 3, 3, 'asternal', 10480); +INSERT INTO articles_hash_mx VALUES ( 4, 4, 'altdorfer', 14551); +INSERT INTO articles_hash_mx VALUES ( 5, 5, 'aruru', 11389); +INSERT INTO articles_hash_mx VALUES ( 6, 6, 'atlases', 15459); +INSERT INTO articles_hash_mx VALUES ( 7, 7, 'aseptic', 12298); +INSERT INTO articles_hash_mx VALUES ( 8, 8, 'agatized', 16368); +INSERT INTO articles_hash_mx VALUES ( 9, 9, 'alligate', 438); +INSERT INTO articles_hash_mx VALUES (10, 10, 'aggrandize', 17277); +INSERT INTO articles_hash_mx VALUES (11, 1, 'alamo', 1347); +INSERT INTO articles_hash_mx VALUES (12, 2, 'archiblast', 18185); +INSERT INTO articles_hash_mx VALUES (13, 3, 'aseyev', 2255); +INSERT INTO articles_hash_mx VALUES (14, 4, 'andesite', 19094); +INSERT INTO articles_hash_mx VALUES (15, 5, 'adversa', 3164); +INSERT INTO articles_hash_mx VALUES (16, 6, 'allonym', 2); +INSERT INTO articles_hash_mx VALUES (17, 7, 'auriga', 4073); +INSERT INTO articles_hash_mx VALUES (18, 8, 'assembly', 911); +INSERT INTO articles_hash_mx VALUES (19, 9, 'aubergiste', 4981); +INSERT INTO articles_hash_mx VALUES (20, 10, 'absentness', 1820); +INSERT INTO articles_hash_mx VALUES (21, 1, 'arcading', 5890); +INSERT INTO articles_hash_mx VALUES (22, 2, 'antipope', 2728); +INSERT INTO articles_hash_mx VALUES (23, 3, 'abhorring', 6799); +INSERT INTO articles_hash_mx VALUES (24, 4, 'audacious', 3637); +INSERT INTO articles_hash_mx VALUES (25, 5, 'antehall', 7707); +INSERT INTO articles_hash_mx VALUES (26, 6, 'abington', 4545); +INSERT INTO articles_hash_mx VALUES (27, 7, 'arsenous', 8616); +INSERT INTO articles_hash_mx VALUES (28, 8, 'aerophyte', 5454); +INSERT INTO articles_hash_mx VALUES (29, 9, 'amateur', 9524); +INSERT INTO articles_hash_mx VALUES (30, 10, 'andelee', 6363); +INSERT INTO articles_hash_mx VALUES (31, 1, 'athwartships', 7271); +INSERT INTO articles_hash_mx VALUES (32, 2, 'amazon', 11342); +INSERT INTO articles_hash_mx VALUES (33, 3, 'autochrome', 8180); +INSERT INTO articles_hash_mx VALUES (34, 4, 'amnestied', 12250); +INSERT INTO articles_hash_mx VALUES (35, 5, 'aminate', 9089); +INSERT INTO articles_hash_mx VALUES (36, 6, 'ablation', 13159); +INSERT INTO articles_hash_mx VALUES (37, 7, 'archduchies', 9997); +INSERT INTO articles_hash_mx VALUES (38, 8, 'anatine', 14067); +INSERT INTO articles_hash_mx VALUES (39, 9, 'anchises', 10906); +INSERT INTO articles_hash_mx VALUES (40, 10, 'attemper', 14976); +INSERT INTO articles_hash_mx VALUES (41, 1, 'aznavour', 11814); +INSERT INTO articles_hash_mx VALUES (42, 2, 'ausable', 15885); +INSERT INTO articles_hash_mx VALUES (43, 3, 'affixal', 12723); +INSERT INTO articles_hash_mx VALUES (44, 4, 'anteport', 16793); +INSERT INTO articles_hash_mx VALUES (45, 5, 'afrasia', 864); +INSERT INTO articles_hash_mx VALUES (46, 6, 'atlanta', 17702); +INSERT INTO articles_hash_mx VALUES (47, 7, 'abeyance', 1772); +INSERT INTO articles_hash_mx VALUES (48, 8, 'alkylic', 18610); +INSERT INTO articles_hash_mx VALUES (49, 9, 'anyone', 2681); +INSERT INTO articles_hash_mx VALUES (50, 10, 'anjanette', 19519); +SET client_min_messages TO 'DEBUG2'; +-- insert a single row for the test +INSERT INTO articles_single_shard_hash_mx VALUES (50, 10, 'anjanette', 19519); +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 10 +-- single-shard tests +-- many of the tests in this file is intended for testing non-fast-path +-- router planner, so we're explicitly disabling it in this file. +-- We've bunch of other tests that triggers fast-path-router +SET citus.enable_fast_path_router_planner TO false; +-- test simple select for a single row +SELECT * FROM articles_hash_mx WHERE author_id = 10 AND id = 50; +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 10 + id | author_id | title | word_count +--------------------------------------------------------------------- + 50 | 10 | anjanette | 19519 +(1 row) + +-- get all titles by a single author +SELECT title FROM articles_hash_mx WHERE author_id = 10; +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 10 + title +--------------------------------------------------------------------- + aggrandize + absentness + andelee + attemper + anjanette +(5 rows) + +-- try ordering them by word count +SELECT title, word_count FROM articles_hash_mx + WHERE author_id = 10 + ORDER BY word_count DESC NULLS LAST; +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 10 + title | word_count +--------------------------------------------------------------------- + anjanette | 19519 + aggrandize | 17277 + attemper | 14976 + andelee | 6363 + absentness | 1820 +(5 rows) + +-- look at last two articles by an author +SELECT title, id FROM articles_hash_mx + WHERE author_id = 5 + ORDER BY id + LIMIT 2; +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 5 + title | id +--------------------------------------------------------------------- + aruru | 5 + adversa | 15 +(2 rows) + +-- find all articles by two authors in same shard +-- but plan is not router executable due to order by +SELECT title, author_id FROM articles_hash_mx + WHERE author_id = 7 OR author_id = 8 + ORDER BY author_id ASC, id; +DEBUG: Creating router plan + title | author_id +--------------------------------------------------------------------- + aseptic | 7 + auriga | 7 + arsenous | 7 + archduchies | 7 + abeyance | 7 + agatized | 8 + assembly | 8 + aerophyte | 8 + anatine | 8 + alkylic | 8 +(10 rows) + +-- same query is router executable with no order by +SELECT title, author_id FROM articles_hash_mx + WHERE author_id = 7 OR author_id = 8; +DEBUG: Creating router plan + title | author_id +--------------------------------------------------------------------- + aseptic | 7 + agatized | 8 + auriga | 7 + assembly | 8 + arsenous | 7 + aerophyte | 8 + archduchies | 7 + anatine | 8 + abeyance | 7 + alkylic | 8 +(10 rows) + +-- add in some grouping expressions, still on same shard +-- having queries unsupported in Citus +SELECT author_id, sum(word_count) AS corpus_size FROM articles_hash_mx + WHERE author_id = 1 OR author_id = 7 OR author_id = 8 OR author_id = 10 + GROUP BY author_id + HAVING sum(word_count) > 1000 + ORDER BY sum(word_count) DESC; +DEBUG: Creating router plan + author_id | corpus_size +--------------------------------------------------------------------- + 10 | 59955 + 8 | 55410 + 7 | 36756 + 1 | 35894 +(4 rows) + +-- however having clause is supported if it goes to a single shard +SELECT author_id, sum(word_count) AS corpus_size FROM articles_hash_mx + WHERE author_id = 1 + GROUP BY author_id + HAVING sum(word_count) > 1000 + ORDER BY sum(word_count) DESC; +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 1 + author_id | corpus_size +--------------------------------------------------------------------- + 1 | 35894 +(1 row) + +-- query is a single shard query but can't do shard pruning, +-- not router-plannable due to <= and IN +SELECT * FROM articles_hash_mx WHERE author_id <= 1; +DEBUG: Router planner cannot handle multi-shard select queries + id | author_id | title | word_count +--------------------------------------------------------------------- + 1 | 1 | arsenous | 9572 + 11 | 1 | alamo | 1347 + 21 | 1 | arcading | 5890 + 31 | 1 | athwartships | 7271 + 41 | 1 | aznavour | 11814 +(5 rows) + +SELECT * FROM articles_hash_mx WHERE author_id IN (1, 3); +DEBUG: Creating router plan + id | author_id | title | word_count +--------------------------------------------------------------------- + 1 | 1 | arsenous | 9572 + 3 | 3 | asternal | 10480 + 11 | 1 | alamo | 1347 + 13 | 3 | aseyev | 2255 + 21 | 1 | arcading | 5890 + 23 | 3 | abhorring | 6799 + 31 | 1 | athwartships | 7271 + 33 | 3 | autochrome | 8180 + 41 | 1 | aznavour | 11814 + 43 | 3 | affixal | 12723 +(10 rows) + +-- queries with CTEs are supported +WITH first_author AS ( SELECT id FROM articles_hash_mx WHERE author_id = 1) +SELECT * FROM first_author; +DEBUG: CTE first_author is going to be inlined via distributed planning +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 1 + id +--------------------------------------------------------------------- + 1 + 11 + 21 + 31 + 41 +(5 rows) + +-- queries with CTEs are supported even if CTE is not referenced inside query +WITH first_author AS ( SELECT id FROM articles_hash_mx WHERE author_id = 1) +SELECT title FROM articles_hash_mx WHERE author_id = 1; +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 1 + title +--------------------------------------------------------------------- + arsenous + alamo + arcading + athwartships + aznavour +(5 rows) + +-- two CTE joins are supported if they go to the same worker +WITH id_author AS ( SELECT id, author_id FROM articles_hash_mx WHERE author_id = 1), +id_title AS (SELECT id, title from articles_hash_mx WHERE author_id = 1) +SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id; +DEBUG: CTE id_author is going to be inlined via distributed planning +DEBUG: CTE id_title is going to be inlined via distributed planning +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 1 + id | author_id | id | title +--------------------------------------------------------------------- + 1 | 1 | 1 | arsenous + 11 | 1 | 11 | alamo + 21 | 1 | 21 | arcading + 31 | 1 | 31 | athwartships + 41 | 1 | 41 | aznavour +(5 rows) + +WITH id_author AS ( SELECT id, author_id FROM articles_hash_mx WHERE author_id = 1), +id_title AS (SELECT id, title from articles_hash_mx WHERE author_id = 3) +SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id; +DEBUG: CTE id_author is going to be inlined via distributed planning +DEBUG: CTE id_title is going to be inlined via distributed planning +DEBUG: Creating router plan + id | author_id | id | title +--------------------------------------------------------------------- +(0 rows) + +-- CTE joins on different workers are supported because they are both planned recursively +WITH id_author AS ( SELECT id, author_id FROM articles_hash_mx WHERE author_id = 1), +id_title AS (SELECT id, title from articles_hash_mx WHERE author_id = 2) +SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id; +DEBUG: CTE id_author is going to be inlined via distributed planning +DEBUG: CTE id_title is going to be inlined via distributed planning +DEBUG: router planner does not support queries that reference non-colocated distributed tables +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 2 +DEBUG: generating subplan XXX_1 for subquery SELECT id, title FROM public.articles_hash_mx WHERE (author_id OPERATOR(pg_catalog.=) 2) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT id_author.id, id_author.author_id, id_title.id, id_title.title FROM (SELECT articles_hash_mx.id, articles_hash_mx.author_id FROM public.articles_hash_mx WHERE (articles_hash_mx.author_id OPERATOR(pg_catalog.=) 1)) id_author, (SELECT intermediate_result.id, intermediate_result.title FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, title character varying(20))) id_title WHERE (id_author.id OPERATOR(pg_catalog.=) id_title.id) +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 1 + id | author_id | id | title +--------------------------------------------------------------------- +(0 rows) + +WITH update_article AS ( + UPDATE articles_hash_mx SET word_count = 11 WHERE id = 1 AND word_count = 10 RETURNING * +) +SELECT coalesce(1,random()); +DEBUG: Router planner cannot handle multi-shard select queries +DEBUG: generating subplan XXX_1 for CTE update_article: UPDATE public.articles_hash_mx SET word_count = 11 WHERE ((id OPERATOR(pg_catalog.=) 1) AND (word_count OPERATOR(pg_catalog.=) 10)) RETURNING id, author_id, title, word_count +DEBUG: Creating router plan +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT COALESCE((1)::double precision, random()) AS "coalesce" +DEBUG: Creating router plan + coalesce +--------------------------------------------------------------------- + 1 +(1 row) + +WITH update_article AS ( + UPDATE articles_hash_mx SET word_count = 10 WHERE author_id = 1 AND id = 1 AND word_count = 11 RETURNING * +) +SELECT coalesce(1,random()); +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 1 + coalesce +--------------------------------------------------------------------- + 1 +(1 row) + +-- recursive CTEs are supported when filtered on partition column +INSERT INTO company_employees_mx values(1, 1, 0); +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 1 +INSERT INTO company_employees_mx values(1, 2, 1); +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 1 +INSERT INTO company_employees_mx values(1, 3, 1); +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 1 +INSERT INTO company_employees_mx values(1, 4, 2); +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 1 +INSERT INTO company_employees_mx values(1, 5, 4); +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 1 +INSERT INTO company_employees_mx values(3, 1, 0); +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 3 +INSERT INTO company_employees_mx values(3, 15, 1); +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 3 +INSERT INTO company_employees_mx values(3, 3, 1); +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 3 +-- find employees at top 2 level within company hierarchy +WITH RECURSIVE hierarchy as ( + SELECT *, 1 AS level + FROM company_employees_mx + WHERE company_id = 1 and manager_id = 0 + UNION + SELECT ce.*, (h.level+1) + FROM hierarchy h JOIN company_employees_mx ce + ON (h.employee_id = ce.manager_id AND + h.company_id = ce.company_id AND + ce.company_id = 1)) +SELECT * FROM hierarchy WHERE LEVEL <= 2; +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 1 + company_id | employee_id | manager_id | level +--------------------------------------------------------------------- + 1 | 1 | 0 | 1 + 1 | 2 | 1 | 2 + 1 | 3 | 1 | 2 +(3 rows) + +-- query becomes not router plannble and gets rejected +-- if filter on company is dropped +WITH RECURSIVE hierarchy as ( + SELECT *, 1 AS level + FROM company_employees_mx + WHERE company_id = 1 and manager_id = 0 + UNION + SELECT ce.*, (h.level+1) + FROM hierarchy h JOIN company_employees_mx ce + ON (h.employee_id = ce.manager_id AND + h.company_id = ce.company_id)) +SELECT * FROM hierarchy WHERE LEVEL <= 2; +DEBUG: Router planner cannot handle multi-shard select queries +ERROR: recursive CTEs are only supported when they contain a filter on the distribution column +-- logically wrong query, query involves different shards +-- from the same table, but still router plannable due to +-- shard being placed on the same worker. +WITH RECURSIVE hierarchy as ( + SELECT *, 1 AS level + FROM company_employees_mx + WHERE company_id = 3 and manager_id = 0 + UNION + SELECT ce.*, (h.level+1) + FROM hierarchy h JOIN company_employees_mx ce + ON (h.employee_id = ce.manager_id AND + h.company_id = ce.company_id AND + ce.company_id = 2)) +SELECT * FROM hierarchy WHERE LEVEL <= 2; +DEBUG: router planner does not support queries that reference non-colocated distributed tables +ERROR: recursive CTEs are only supported when they contain a filter on the distribution column +-- grouping sets are supported on single shard +SELECT + id, substring(title, 2, 1) AS subtitle, count(*) + FROM articles_hash_mx + WHERE author_id = 1 or author_id = 3 + GROUP BY GROUPING SETS ((id),(subtitle)) + ORDER BY id, subtitle; +DEBUG: Creating router plan + id | subtitle | count +--------------------------------------------------------------------- + 1 | | 1 + 3 | | 1 + 11 | | 1 + 13 | | 1 + 21 | | 1 + 23 | | 1 + 31 | | 1 + 33 | | 1 + 41 | | 1 + 43 | | 1 + | b | 1 + | f | 1 + | l | 1 + | r | 2 + | s | 2 + | t | 1 + | u | 1 + | z | 1 +(18 rows) + +-- grouping sets are not supported on multiple shards +SELECT + id, substring(title, 2, 1) AS subtitle, count(*) + FROM articles_hash_mx + WHERE author_id = 1 or author_id = 2 + GROUP BY GROUPING SETS ((id),(subtitle)) + ORDER BY id, subtitle; +DEBUG: Router planner cannot handle multi-shard select queries +ERROR: could not run distributed query with GROUPING SETS, CUBE, or ROLLUP +HINT: Consider using an equality filter on the distributed table's partition column. +-- queries which involve functions in FROM clause are supported if it goes to a single worker. +SELECT * FROM articles_hash_mx, position('om' in 'Thomas') WHERE author_id = 1; +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 1 + id | author_id | title | word_count | position +--------------------------------------------------------------------- + 1 | 1 | arsenous | 9572 | 3 + 11 | 1 | alamo | 1347 | 3 + 21 | 1 | arcading | 5890 | 3 + 31 | 1 | athwartships | 7271 | 3 + 41 | 1 | aznavour | 11814 | 3 +(5 rows) + +SELECT * FROM articles_hash_mx, position('om' in 'Thomas') WHERE author_id = 1 or author_id = 3; +DEBUG: Creating router plan + id | author_id | title | word_count | position +--------------------------------------------------------------------- + 1 | 1 | arsenous | 9572 | 3 + 3 | 3 | asternal | 10480 | 3 + 11 | 1 | alamo | 1347 | 3 + 13 | 3 | aseyev | 2255 | 3 + 21 | 1 | arcading | 5890 | 3 + 23 | 3 | abhorring | 6799 | 3 + 31 | 1 | athwartships | 7271 | 3 + 33 | 3 | autochrome | 8180 | 3 + 41 | 1 | aznavour | 11814 | 3 + 43 | 3 | affixal | 12723 | 3 +(10 rows) + +-- they are supported via (sub)query pushdown if multiple workers are involved +SELECT * FROM articles_hash_mx, position('om' in 'Thomas') WHERE author_id = 1 or author_id = 2 ORDER BY 4 DESC, 1 DESC, 2 DESC LIMIT 5; +DEBUG: Router planner cannot handle multi-shard select queries +DEBUG: push down of limit count: 5 + id | author_id | title | word_count | position +--------------------------------------------------------------------- + 12 | 2 | archiblast | 18185 | 3 + 42 | 2 | ausable | 15885 | 3 + 2 | 2 | abducing | 13642 | 3 + 41 | 1 | aznavour | 11814 | 3 + 32 | 2 | amazon | 11342 | 3 +(5 rows) + +-- subqueries are supported in FROM clause but they are not router plannable +SELECT articles_hash_mx.id,test.word_count +FROM articles_hash_mx, (SELECT id, word_count FROM articles_hash_mx) AS test WHERE test.id = articles_hash_mx.id +ORDER BY test.word_count DESC, articles_hash_mx.id LIMIT 5; +DEBUG: Router planner cannot handle multi-shard select queries +DEBUG: Router planner cannot handle multi-shard select queries +DEBUG: generating subplan XXX_1 for subquery SELECT id, word_count FROM public.articles_hash_mx +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT articles_hash_mx.id, test.word_count FROM public.articles_hash_mx, (SELECT intermediate_result.id, intermediate_result.word_count FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, word_count integer)) test WHERE (test.id OPERATOR(pg_catalog.=) articles_hash_mx.id) ORDER BY test.word_count DESC, articles_hash_mx.id LIMIT 5 +DEBUG: Router planner cannot handle multi-shard select queries +DEBUG: push down of limit count: 5 + id | word_count +--------------------------------------------------------------------- + 50 | 19519 + 14 | 19094 + 48 | 18610 + 12 | 18185 + 46 | 17702 +(5 rows) + +SELECT articles_hash_mx.id,test.word_count +FROM articles_hash_mx, (SELECT id, word_count FROM articles_hash_mx) AS test +WHERE test.id = articles_hash_mx.id and articles_hash_mx.author_id = 1 +ORDER BY articles_hash_mx.id; +DEBUG: Router planner cannot handle multi-shard select queries +DEBUG: Router planner cannot handle multi-shard select queries +DEBUG: generating subplan XXX_1 for subquery SELECT id, word_count FROM public.articles_hash_mx +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT articles_hash_mx.id, test.word_count FROM public.articles_hash_mx, (SELECT intermediate_result.id, intermediate_result.word_count FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, word_count integer)) test WHERE ((test.id OPERATOR(pg_catalog.=) articles_hash_mx.id) AND (articles_hash_mx.author_id OPERATOR(pg_catalog.=) 1)) ORDER BY articles_hash_mx.id +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 1 + id | word_count +--------------------------------------------------------------------- + 1 | 9572 + 11 | 1347 + 21 | 5890 + 31 | 7271 + 41 | 11814 +(5 rows) + +-- subqueries in SELECT clause +SELECT a.title AS name, (SELECT a2.id FROM articles_single_shard_hash_mx a2 WHERE a.id = a2.id LIMIT 1) + AS special_price FROM articles_hash_mx a; +DEBUG: Router planner cannot handle multi-shard select queries +ERROR: complex joins are only supported when all distributed tables are co-located and joined on their distribution columns +-- simple lookup query +SELECT * + FROM articles_hash_mx + WHERE author_id = 1; +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 1 + id | author_id | title | word_count +--------------------------------------------------------------------- + 1 | 1 | arsenous | 9572 + 11 | 1 | alamo | 1347 + 21 | 1 | arcading | 5890 + 31 | 1 | athwartships | 7271 + 41 | 1 | aznavour | 11814 +(5 rows) + +-- below query hits a single shard, router plannable +SELECT * + FROM articles_hash_mx + WHERE author_id = 1 OR author_id = 17; +DEBUG: Creating router plan + id | author_id | title | word_count +--------------------------------------------------------------------- + 1 | 1 | arsenous | 9572 + 11 | 1 | alamo | 1347 + 21 | 1 | arcading | 5890 + 31 | 1 | athwartships | 7271 + 41 | 1 | aznavour | 11814 +(5 rows) + +-- below query hits two shards, not router plannable + not router executable +-- handled by real-time executor +SELECT * + FROM articles_hash_mx + WHERE author_id = 1 OR author_id = 18; +DEBUG: Router planner cannot handle multi-shard select queries + id | author_id | title | word_count +--------------------------------------------------------------------- + 1 | 1 | arsenous | 9572 + 11 | 1 | alamo | 1347 + 21 | 1 | arcading | 5890 + 31 | 1 | athwartships | 7271 + 41 | 1 | aznavour | 11814 +(5 rows) + +-- rename the output columns +SELECT id as article_id, word_count * id as random_value + FROM articles_hash_mx + WHERE author_id = 1; +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 1 + article_id | random_value +--------------------------------------------------------------------- + 1 | 9572 + 11 | 14817 + 21 | 123690 + 31 | 225401 + 41 | 484374 +(5 rows) + +-- we can push down co-located joins to a single worker +SELECT a.author_id as first_author, b.word_count as second_word_count + FROM articles_hash_mx a, articles_hash_mx b + WHERE a.author_id = 10 and a.author_id = b.author_id + LIMIT 3; +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 10 + first_author | second_word_count +--------------------------------------------------------------------- + 10 | 17277 + 10 | 1820 + 10 | 6363 +(3 rows) + +-- following join is router plannable since the same worker +-- has both shards when citus.enable_non_colocated_router_query_pushdown +-- is enabled +SET citus.enable_non_colocated_router_query_pushdown TO ON; +SELECT a.author_id as first_author, b.word_count as second_word_count + FROM articles_hash_mx a, articles_single_shard_hash_mx b + WHERE a.author_id = 10 and a.author_id = b.author_id + ORDER by 1,2 LIMIT 3; +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 10 + first_author | second_word_count +--------------------------------------------------------------------- + 10 | 19519 + 10 | 19519 + 10 | 19519 +(3 rows) + +SET citus.enable_non_colocated_router_query_pushdown TO OFF; +-- but this is not the case otherwise +SELECT a.author_id as first_author, b.word_count as second_word_count + FROM articles_hash_mx a, articles_single_shard_hash_mx b + WHERE a.author_id = 10 and a.author_id = b.author_id + ORDER by 1,2 LIMIT 3; +DEBUG: router planner does not support queries that reference non-colocated distributed tables +DEBUG: push down of limit count: 3 +DEBUG: join prunable for task partitionId 0 and 1 +DEBUG: join prunable for task partitionId 0 and 2 +DEBUG: join prunable for task partitionId 0 and 3 +DEBUG: join prunable for task partitionId 1 and 0 +DEBUG: join prunable for task partitionId 1 and 2 +DEBUG: join prunable for task partitionId 1 and 3 +DEBUG: join prunable for task partitionId 2 and 0 +DEBUG: join prunable for task partitionId 2 and 1 +DEBUG: join prunable for task partitionId 2 and 3 +DEBUG: join prunable for task partitionId 3 and 0 +DEBUG: join prunable for task partitionId 3 and 1 +DEBUG: join prunable for task partitionId 3 and 2 +DEBUG: pruning merge fetch taskId 1 +DETAIL: Creating dependency on merge taskId 2 +DEBUG: pruning merge fetch taskId 2 +DETAIL: Creating dependency on merge taskId 2 +DEBUG: pruning merge fetch taskId 4 +DETAIL: Creating dependency on merge taskId 4 +DEBUG: pruning merge fetch taskId 5 +DETAIL: Creating dependency on merge taskId 4 +DEBUG: pruning merge fetch taskId 7 +DETAIL: Creating dependency on merge taskId 6 +DEBUG: pruning merge fetch taskId 8 +DETAIL: Creating dependency on merge taskId 6 +DEBUG: pruning merge fetch taskId 10 +DETAIL: Creating dependency on merge taskId 8 +DEBUG: pruning merge fetch taskId 11 +DETAIL: Creating dependency on merge taskId 8 +ERROR: the query contains a join that requires repartitioning +HINT: Set citus.enable_repartition_joins to on to enable repartitioning +RESET citus.enable_non_colocated_router_query_pushdown; +-- following join is not router plannable since there are no +-- workers containing both shards, but will work through recursive +-- planning +SET client_min_messages TO INFO; +WITH single_shard as (SELECT * FROM articles_single_shard_hash_mx) +SELECT a.author_id as first_author, b.word_count as second_word_count + FROM articles_hash_mx a, single_shard b + WHERE a.author_id = 2 and a.author_id = b.author_id + LIMIT 3; + first_author | second_word_count +--------------------------------------------------------------------- +(0 rows) + +SET client_min_messages TO DEBUG; +-- single shard select with limit is router plannable +SELECT * + FROM articles_hash_mx + WHERE author_id = 1 + LIMIT 3; +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 1 + id | author_id | title | word_count +--------------------------------------------------------------------- + 1 | 1 | arsenous | 9572 + 11 | 1 | alamo | 1347 + 21 | 1 | arcading | 5890 +(3 rows) + +-- single shard select with limit + offset is router plannable +SELECT * + FROM articles_hash_mx + WHERE author_id = 1 + LIMIT 2 + OFFSET 1; +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 1 + id | author_id | title | word_count +--------------------------------------------------------------------- + 11 | 1 | alamo | 1347 + 21 | 1 | arcading | 5890 +(2 rows) + +-- single shard select with limit + offset + order by is router plannable +SELECT * + FROM articles_hash_mx + WHERE author_id = 1 + ORDER BY id desc + LIMIT 2 + OFFSET 1; +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 1 + id | author_id | title | word_count +--------------------------------------------------------------------- + 31 | 1 | athwartships | 7271 + 21 | 1 | arcading | 5890 +(2 rows) + +-- single shard select with group by on non-partition column is router plannable +SELECT id + FROM articles_hash_mx + WHERE author_id = 1 + GROUP BY id + ORDER BY id; +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 1 + id +--------------------------------------------------------------------- + 1 + 11 + 21 + 31 + 41 +(5 rows) + +-- single shard select with distinct is router plannable +SELECT distinct id + FROM articles_hash_mx + WHERE author_id = 1 + ORDER BY id; +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 1 + id +--------------------------------------------------------------------- + 1 + 11 + 21 + 31 + 41 +(5 rows) + +-- single shard aggregate is router plannable +SELECT avg(word_count) + FROM articles_hash_mx + WHERE author_id = 2; +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 2 + avg +--------------------------------------------------------------------- + 12356.400000000000 +(1 row) + +-- max, min, sum, count are router plannable on single shard +SELECT max(word_count) as max, min(word_count) as min, + sum(word_count) as sum, count(word_count) as cnt + FROM articles_hash_mx + WHERE author_id = 2; +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 2 + max | min | sum | cnt +--------------------------------------------------------------------- + 18185 | 2728 | 61782 | 5 +(1 row) + +-- queries with aggregates and group by supported on single shard +SELECT max(word_count) + FROM articles_hash_mx + WHERE author_id = 1 + GROUP BY author_id; +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 1 + max +--------------------------------------------------------------------- + 11814 +(1 row) + +-- router plannable union queries are supported +SELECT * FROM ( + SELECT * FROM articles_hash_mx WHERE author_id = 1 + UNION + SELECT * FROM articles_hash_mx WHERE author_id = 3 +) AS combination +ORDER BY id; +DEBUG: Creating router plan + id | author_id | title | word_count +--------------------------------------------------------------------- + 1 | 1 | arsenous | 9572 + 3 | 3 | asternal | 10480 + 11 | 1 | alamo | 1347 + 13 | 3 | aseyev | 2255 + 21 | 1 | arcading | 5890 + 23 | 3 | abhorring | 6799 + 31 | 1 | athwartships | 7271 + 33 | 3 | autochrome | 8180 + 41 | 1 | aznavour | 11814 + 43 | 3 | affixal | 12723 +(10 rows) + +(SELECT LEFT(title, 1) FROM articles_hash_mx WHERE author_id = 1) +UNION +(SELECT LEFT(title, 1) FROM articles_hash_mx WHERE author_id = 3); +DEBUG: Creating router plan + left +--------------------------------------------------------------------- + a +(1 row) + +(SELECT LEFT(title, 1) FROM articles_hash_mx WHERE author_id = 1) +INTERSECT +(SELECT LEFT(title, 1) FROM articles_hash_mx WHERE author_id = 3); +DEBUG: Creating router plan + left +--------------------------------------------------------------------- + a +(1 row) + +SELECT * FROM ( + SELECT LEFT(title, 2) FROM articles_hash_mx WHERE author_id = 1 + EXCEPT + SELECT LEFT(title, 2) FROM articles_hash_mx WHERE author_id = 3 +) AS combination +ORDER BY 1; +DEBUG: Creating router plan + left +--------------------------------------------------------------------- + al + ar + at + az +(4 rows) + +-- union queries are supported through recursive planning +SET client_min_messages TO DEBUG1; +(SELECT * FROM articles_hash_mx WHERE author_id = 1) +UNION +(SELECT * FROM articles_hash_mx WHERE author_id = 2) +ORDER BY 1,2; +DEBUG: generating subplan XXX_1 for subquery SELECT id, author_id, title, word_count FROM public.articles_hash_mx WHERE (author_id OPERATOR(pg_catalog.=) 1) +DEBUG: generating subplan XXX_2 for subquery SELECT id, author_id, title, word_count FROM public.articles_hash_mx WHERE (author_id OPERATOR(pg_catalog.=) 2) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.id, intermediate_result.author_id, intermediate_result.title, intermediate_result.word_count FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, author_id bigint, title character varying(20), word_count integer) UNION SELECT intermediate_result.id, intermediate_result.author_id, intermediate_result.title, intermediate_result.word_count FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, author_id bigint, title character varying(20), word_count integer) ORDER BY 1, 2 + id | author_id | title | word_count +--------------------------------------------------------------------- + 1 | 1 | arsenous | 9572 + 2 | 2 | abducing | 13642 + 11 | 1 | alamo | 1347 + 12 | 2 | archiblast | 18185 + 21 | 1 | arcading | 5890 + 22 | 2 | antipope | 2728 + 31 | 1 | athwartships | 7271 + 32 | 2 | amazon | 11342 + 41 | 1 | aznavour | 11814 + 42 | 2 | ausable | 15885 +(10 rows) + +SELECT * FROM ( + (SELECT * FROM articles_hash_mx WHERE author_id = 1) + UNION + (SELECT * FROM articles_hash_mx WHERE author_id = 2)) uu +ORDER BY 1, 2 +LIMIT 5; +DEBUG: push down of limit count: 5 + id | author_id | title | word_count +--------------------------------------------------------------------- + 1 | 1 | arsenous | 9572 + 2 | 2 | abducing | 13642 + 11 | 1 | alamo | 1347 + 12 | 2 | archiblast | 18185 + 21 | 1 | arcading | 5890 +(5 rows) + +-- error out for queries with repartition jobs +SELECT * + FROM articles_hash_mx a, articles_hash_mx b + WHERE a.id = b.id AND a.author_id = 1; +ERROR: the query contains a join that requires repartitioning +HINT: Set citus.enable_repartition_joins to on to enable repartitioning +-- queries which hit more than 1 shards are not router plannable or executable +-- handled by real-time executor +SELECT * + FROM articles_hash_mx + WHERE author_id >= 1 AND author_id <= 3 +ORDER BY 1,2,3,4; + id | author_id | title | word_count +--------------------------------------------------------------------- + 1 | 1 | arsenous | 9572 + 2 | 2 | abducing | 13642 + 3 | 3 | asternal | 10480 + 11 | 1 | alamo | 1347 + 12 | 2 | archiblast | 18185 + 13 | 3 | aseyev | 2255 + 21 | 1 | arcading | 5890 + 22 | 2 | antipope | 2728 + 23 | 3 | abhorring | 6799 + 31 | 1 | athwartships | 7271 + 32 | 2 | amazon | 11342 + 33 | 3 | autochrome | 8180 + 41 | 1 | aznavour | 11814 + 42 | 2 | ausable | 15885 + 43 | 3 | affixal | 12723 +(15 rows) + +-- Test various filtering options for router plannable check +SET client_min_messages to 'DEBUG2'; +-- this is definitely single shard +-- and router plannable +SELECT * + FROM articles_hash_mx + WHERE author_id = 1 and author_id >= 1; +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 1 + id | author_id | title | word_count +--------------------------------------------------------------------- + 1 | 1 | arsenous | 9572 + 11 | 1 | alamo | 1347 + 21 | 1 | arcading | 5890 + 31 | 1 | athwartships | 7271 + 41 | 1 | aznavour | 11814 +(5 rows) + +-- not router plannable due to or +SELECT * + FROM articles_hash_mx + WHERE author_id = 1 or id = 1; +DEBUG: Router planner cannot handle multi-shard select queries + id | author_id | title | word_count +--------------------------------------------------------------------- + 1 | 1 | arsenous | 9572 + 11 | 1 | alamo | 1347 + 21 | 1 | arcading | 5890 + 31 | 1 | athwartships | 7271 + 41 | 1 | aznavour | 11814 +(5 rows) + +-- router plannable +SELECT * + FROM articles_hash_mx + WHERE author_id = 1 and (id = 1 or id = 41); +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 1 + id | author_id | title | word_count +--------------------------------------------------------------------- + 1 | 1 | arsenous | 9572 + 41 | 1 | aznavour | 11814 +(2 rows) + +-- router plannable +SELECT * + FROM articles_hash_mx + WHERE author_id = 1 and (id = random()::int * 0); +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 1 + id | author_id | title | word_count +--------------------------------------------------------------------- +(0 rows) + +-- not router plannable due to function call on the right side +SELECT * + FROM articles_hash_mx + WHERE author_id = (random()::int * 0 + 1); +DEBUG: Router planner cannot handle multi-shard select queries + id | author_id | title | word_count +--------------------------------------------------------------------- + 1 | 1 | arsenous | 9572 + 11 | 1 | alamo | 1347 + 21 | 1 | arcading | 5890 + 31 | 1 | athwartships | 7271 + 41 | 1 | aznavour | 11814 +(5 rows) + +-- not router plannable due to or +SELECT * + FROM articles_hash_mx + WHERE author_id = 1 or id = 1; +DEBUG: Router planner cannot handle multi-shard select queries + id | author_id | title | word_count +--------------------------------------------------------------------- + 1 | 1 | arsenous | 9572 + 11 | 1 | alamo | 1347 + 21 | 1 | arcading | 5890 + 31 | 1 | athwartships | 7271 + 41 | 1 | aznavour | 11814 +(5 rows) + +-- router plannable due to abs(-1) getting converted to 1 by postgresql +SELECT * + FROM articles_hash_mx + WHERE author_id = abs(-1); +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 1 + id | author_id | title | word_count +--------------------------------------------------------------------- + 1 | 1 | arsenous | 9572 + 11 | 1 | alamo | 1347 + 21 | 1 | arcading | 5890 + 31 | 1 | athwartships | 7271 + 41 | 1 | aznavour | 11814 +(5 rows) + +-- not router plannable due to abs() function +SELECT * + FROM articles_hash_mx + WHERE 1 = abs(author_id); +DEBUG: Router planner cannot handle multi-shard select queries + id | author_id | title | word_count +--------------------------------------------------------------------- + 1 | 1 | arsenous | 9572 + 11 | 1 | alamo | 1347 + 21 | 1 | arcading | 5890 + 31 | 1 | athwartships | 7271 + 41 | 1 | aznavour | 11814 +(5 rows) + +-- not router plannable due to abs() function +SELECT * + FROM articles_hash_mx + WHERE author_id = abs(author_id - 2); +DEBUG: Router planner cannot handle multi-shard select queries + id | author_id | title | word_count +--------------------------------------------------------------------- + 1 | 1 | arsenous | 9572 + 11 | 1 | alamo | 1347 + 21 | 1 | arcading | 5890 + 31 | 1 | athwartships | 7271 + 41 | 1 | aznavour | 11814 +(5 rows) + +-- router plannable, function on different field +SELECT * + FROM articles_hash_mx + WHERE author_id = 1 and (id = abs(id - 2)); +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 1 + id | author_id | title | word_count +--------------------------------------------------------------------- + 1 | 1 | arsenous | 9572 +(1 row) + +-- not router plannable due to is true +SELECT * + FROM articles_hash_mx + WHERE (author_id = 1) is true; +DEBUG: Router planner cannot handle multi-shard select queries + id | author_id | title | word_count +--------------------------------------------------------------------- + 1 | 1 | arsenous | 9572 + 11 | 1 | alamo | 1347 + 21 | 1 | arcading | 5890 + 31 | 1 | athwartships | 7271 + 41 | 1 | aznavour | 11814 +(5 rows) + +-- router plannable, (boolean expression) = true is collapsed to (boolean expression) +SELECT * + FROM articles_hash_mx + WHERE (author_id = 1) = true; +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 1 + id | author_id | title | word_count +--------------------------------------------------------------------- + 1 | 1 | arsenous | 9572 + 11 | 1 | alamo | 1347 + 21 | 1 | arcading | 5890 + 31 | 1 | athwartships | 7271 + 41 | 1 | aznavour | 11814 +(5 rows) + +-- router plannable, between operator is on another column +SELECT * + FROM articles_hash_mx + WHERE (author_id = 1) and id between 0 and 20; +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 1 + id | author_id | title | word_count +--------------------------------------------------------------------- + 1 | 1 | arsenous | 9572 + 11 | 1 | alamo | 1347 +(2 rows) + +-- router plannable, partition column expression is and'ed to rest +SELECT * + FROM articles_hash_mx + WHERE (author_id = 1) and (id = 1 or id = 31) and title like '%s'; +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 1 + id | author_id | title | word_count +--------------------------------------------------------------------- + 1 | 1 | arsenous | 9572 + 31 | 1 | athwartships | 7271 +(2 rows) + +-- router plannable, order is changed +SELECT * + FROM articles_hash_mx + WHERE (id = 1 or id = 31) and title like '%s' and (author_id = 1); +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 1 + id | author_id | title | word_count +--------------------------------------------------------------------- + 1 | 1 | arsenous | 9572 + 31 | 1 | athwartships | 7271 +(2 rows) + +-- router plannable +SELECT * + FROM articles_hash_mx + WHERE (title like '%s' or title like 'a%') and (author_id = 1); +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 1 + id | author_id | title | word_count +--------------------------------------------------------------------- + 1 | 1 | arsenous | 9572 + 11 | 1 | alamo | 1347 + 21 | 1 | arcading | 5890 + 31 | 1 | athwartships | 7271 + 41 | 1 | aznavour | 11814 +(5 rows) + +-- router plannable +SELECT * + FROM articles_hash_mx + WHERE (title like '%s' or title like 'a%') and (author_id = 1) and (word_count < 3000 or word_count > 8000); +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 1 + id | author_id | title | word_count +--------------------------------------------------------------------- + 1 | 1 | arsenous | 9572 + 11 | 1 | alamo | 1347 + 41 | 1 | aznavour | 11814 +(3 rows) + +-- window functions are supported if query is router plannable +SELECT LAG(title, 1) over (ORDER BY word_count) prev, title, word_count + FROM articles_hash_mx + WHERE author_id = 5; +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 5 + prev | title | word_count +--------------------------------------------------------------------- + | afrasia | 864 + afrasia | adversa | 3164 + adversa | antehall | 7707 + antehall | aminate | 9089 + aminate | aruru | 11389 +(5 rows) + +SELECT LAG(title, 1) over (ORDER BY word_count) prev, title, word_count + FROM articles_hash_mx + WHERE author_id = 5 + ORDER BY word_count DESC; +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 5 + prev | title | word_count +--------------------------------------------------------------------- + aminate | aruru | 11389 + antehall | aminate | 9089 + adversa | antehall | 7707 + afrasia | adversa | 3164 + | afrasia | 864 +(5 rows) + +SELECT id, MIN(id) over (order by word_count) + FROM articles_hash_mx + WHERE author_id = 1; +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 1 + id | min +--------------------------------------------------------------------- + 11 | 11 + 21 | 11 + 31 | 11 + 1 | 1 + 41 | 1 +(5 rows) + +SELECT id, word_count, AVG(word_count) over (order by word_count) + FROM articles_hash_mx + WHERE author_id = 1; +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 1 + id | word_count | avg +--------------------------------------------------------------------- + 11 | 1347 | 1347.0000000000000000 + 21 | 5890 | 3618.5000000000000000 + 31 | 7271 | 4836.0000000000000000 + 1 | 9572 | 6020.0000000000000000 + 41 | 11814 | 7178.8000000000000000 +(5 rows) + +SELECT word_count, rank() OVER (PARTITION BY author_id ORDER BY word_count) + FROM articles_hash_mx + WHERE author_id = 1; +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 1 + word_count | rank +--------------------------------------------------------------------- + 1347 | 1 + 5890 | 2 + 7271 | 3 + 9572 | 4 + 11814 | 5 +(5 rows) + +SELECT id, MIN(id) over (order by word_count) + FROM articles_hash_mx + WHERE author_id = 1 or author_id = 2 + ORDER BY 1; +DEBUG: Router planner cannot handle multi-shard select queries + id | min +--------------------------------------------------------------------- + 1 | 1 + 2 | 1 + 11 | 11 + 12 | 1 + 21 | 11 + 22 | 11 + 31 | 11 + 32 | 1 + 41 | 1 + 42 | 1 +(10 rows) + +SELECT LAG(title, 1) over (ORDER BY word_count) prev, title, word_count + FROM articles_hash_mx + WHERE author_id = 5 or author_id = 2 + ORDER BY 2; +DEBUG: Router planner cannot handle multi-shard select queries + prev | title | word_count +--------------------------------------------------------------------- + aruru | abducing | 13642 + antipope | adversa | 3164 + | afrasia | 864 + aminate | amazon | 11342 + antehall | aminate | 9089 + adversa | antehall | 7707 + afrasia | antipope | 2728 + ausable | archiblast | 18185 + amazon | aruru | 11389 + abducing | ausable | 15885 +(10 rows) + +-- complex query hitting a single shard +SELECT + count(DISTINCT CASE + WHEN + word_count > 100 + THEN + id + ELSE + NULL + END) as c + FROM + articles_hash_mx + WHERE + author_id = 5; +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 5 + c +--------------------------------------------------------------------- + 5 +(1 row) + +-- same query is not router plannable if hits multiple shards +SELECT + count(DISTINCT CASE + WHEN + word_count > 100 + THEN + id + ELSE + NULL + END) as c + FROM + articles_hash_mx + GROUP BY + author_id + ORDER BY c; +DEBUG: Router planner cannot handle multi-shard select queries + c +--------------------------------------------------------------------- + 4 + 5 + 5 + 5 + 5 + 5 + 5 + 5 + 5 + 5 +(10 rows) + +-- queries inside transactions can be router plannable +BEGIN; +SELECT * + FROM articles_hash_mx + WHERE author_id = 1 + ORDER BY id; +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 1 + id | author_id | title | word_count +--------------------------------------------------------------------- + 1 | 1 | arsenous | 9572 + 11 | 1 | alamo | 1347 + 21 | 1 | arcading | 5890 + 31 | 1 | athwartships | 7271 + 41 | 1 | aznavour | 11814 +(5 rows) + +END; +-- cursor queries are router plannable +BEGIN; +DECLARE test_cursor CURSOR FOR + SELECT * + FROM articles_hash_mx + WHERE author_id = 1 + ORDER BY id; +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 1 +FETCH test_cursor; + id | author_id | title | word_count +--------------------------------------------------------------------- + 1 | 1 | arsenous | 9572 +(1 row) + +FETCH test_cursor; + id | author_id | title | word_count +--------------------------------------------------------------------- + 11 | 1 | alamo | 1347 +(1 row) + +FETCH BACKWARD test_cursor; + id | author_id | title | word_count +--------------------------------------------------------------------- + 1 | 1 | arsenous | 9572 +(1 row) + +END; +-- queries inside copy can be router plannable +COPY ( + SELECT * + FROM articles_hash_mx + WHERE author_id = 1 + ORDER BY id) TO STDOUT; +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 1 +1 1 arsenous 9572 +11 1 alamo 1347 +21 1 arcading 5890 +31 1 athwartships 7271 +41 1 aznavour 11814 +-- table creation queries inside can be router plannable +CREATE TEMP TABLE temp_articles_hash_mx as + SELECT * + FROM articles_hash_mx + WHERE author_id = 1 + ORDER BY id; +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 1 +-- router plannable queries may include filter for aggregates +SELECT count(*), count(*) FILTER (WHERE id < 3) + FROM articles_hash_mx + WHERE author_id = 1; +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 1 + count | count +--------------------------------------------------------------------- + 5 | 1 +(1 row) + +-- non-router plannable queries support filters as well +SELECT count(*), count(*) FILTER (WHERE id < 3) + FROM articles_hash_mx + WHERE author_id = 1 or author_id = 2; +DEBUG: Router planner cannot handle multi-shard select queries + count | count +--------------------------------------------------------------------- + 10 | 2 +(1 row) + +-- prepare queries can be router plannable +PREPARE author_1_articles as + SELECT * + FROM articles_hash_mx + WHERE author_id = 1; +EXECUTE author_1_articles; +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 1 + id | author_id | title | word_count +--------------------------------------------------------------------- + 1 | 1 | arsenous | 9572 + 11 | 1 | alamo | 1347 + 21 | 1 | arcading | 5890 + 31 | 1 | athwartships | 7271 + 41 | 1 | aznavour | 11814 +(5 rows) + +-- parametric prepare queries can be router plannable +PREPARE author_articles(int) as + SELECT * + FROM articles_hash_mx + WHERE author_id = $1; +EXECUTE author_articles(1); +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 1 + id | author_id | title | word_count +--------------------------------------------------------------------- + 1 | 1 | arsenous | 9572 + 11 | 1 | alamo | 1347 + 21 | 1 | arcading | 5890 + 31 | 1 | athwartships | 7271 + 41 | 1 | aznavour | 11814 +(5 rows) + +-- queries inside plpgsql functions could be router plannable +SET citus.enable_metadata_sync TO OFF; +CREATE OR REPLACE FUNCTION author_articles_max_id() RETURNS int AS $$ +DECLARE + max_id integer; +BEGIN + SELECT MAX(id) FROM articles_hash_mx ah + WHERE author_id = 1 + into max_id; + return max_id; +END; +$$ LANGUAGE plpgsql; +SELECT author_articles_max_id(); +DEBUG: Creating router plan +CONTEXT: SQL statement "SELECT MAX(id) FROM articles_hash_mx ah + WHERE author_id = 1" +PL/pgSQL function author_articles_max_id() line XX at SQL statement +DEBUG: query has a single distribution column value: 1 +CONTEXT: SQL statement "SELECT MAX(id) FROM articles_hash_mx ah + WHERE author_id = 1" +PL/pgSQL function author_articles_max_id() line XX at SQL statement + author_articles_max_id +--------------------------------------------------------------------- + 41 +(1 row) + +-- plpgsql function that return query results are not router plannable +CREATE OR REPLACE FUNCTION author_articles_id_word_count() RETURNS TABLE(id bigint, word_count int) AS $$ +DECLARE +BEGIN + RETURN QUERY + SELECT ah.id, ah.word_count + FROM articles_hash_mx ah + WHERE author_id = 1; + +END; +$$ LANGUAGE plpgsql; +SELECT * FROM author_articles_id_word_count(); +DEBUG: Creating router plan +CONTEXT: SQL statement "SELECT ah.id, ah.word_count + FROM articles_hash_mx ah + WHERE author_id = 1" +PL/pgSQL function author_articles_id_word_count() line XX at RETURN QUERY +DEBUG: query has a single distribution column value: 1 +CONTEXT: SQL statement "SELECT ah.id, ah.word_count + FROM articles_hash_mx ah + WHERE author_id = 1" +PL/pgSQL function author_articles_id_word_count() line XX at RETURN QUERY + id | word_count +--------------------------------------------------------------------- + 1 | 9572 + 11 | 1347 + 21 | 5890 + 31 | 7271 + 41 | 11814 +(5 rows) + +RESET citus.enable_metadata_sync; +-- materialized views can be created for router plannable queries +CREATE MATERIALIZED VIEW mv_articles_hash_mx AS + SELECT * FROM articles_hash_mx WHERE author_id = 1; +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 1 +DEBUG: drop auto-cascades to type public.pg_temp_xxxxx +DEBUG: drop auto-cascades to type public.pg_temp_xxxxx[] +SELECT * FROM mv_articles_hash_mx; + id | author_id | title | word_count +--------------------------------------------------------------------- + 1 | 1 | arsenous | 9572 + 11 | 1 | alamo | 1347 + 21 | 1 | arcading | 5890 + 31 | 1 | athwartships | 7271 + 41 | 1 | aznavour | 11814 +(5 rows) + +SET client_min_messages to 'INFO'; +DROP MATERIALIZED VIEW mv_articles_hash_mx; +SET client_min_messages to 'DEBUG2'; +CREATE MATERIALIZED VIEW mv_articles_hash_mx_error AS + SELECT * FROM articles_hash_mx WHERE author_id in (1,2); +DEBUG: Router planner cannot handle multi-shard select queries +DEBUG: drop auto-cascades to type public.pg_temp_xxxxx +DEBUG: drop auto-cascades to type public.pg_temp_xxxxx[] +INSERT INTO articles_hash_mx VALUES (51, 1, 'amateus', 1814); +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 1 +-- verify insert is successfull (not router plannable and executable) +SELECT id + FROM articles_hash_mx + WHERE author_id = 1; +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 1 + id +--------------------------------------------------------------------- + 1 + 11 + 21 + 31 + 41 + 51 +(6 rows) + +SET client_min_messages to WARNING; +TRUNCATE articles_hash_mx, company_employees_mx, articles_single_shard_hash_mx; +DROP MATERIALIZED VIEW mv_articles_hash_mx_error; +DROP TABLE authors_hash_mx; diff --git a/src/test/regress/sql/ch_bench_having_mx.sql b/src/test/regress/sql/ch_bench_having_mx.sql index 798591a5f87..902e4767884 100644 --- a/src/test/regress/sql/ch_bench_having_mx.sql +++ b/src/test/regress/sql/ch_bench_having_mx.sql @@ -1,3 +1,9 @@ +-- Two alternative test outputs: +-- ch_bench_having_mx.out for PG16 and before +-- ch_bench_having_mx_0.out for PG17 +-- related commit +-- https://github.com/postgres/postgres/commit/fd0398fc + ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1640000; SET citus.shard_replication_factor to 1; SET citus.shard_count to 4; diff --git a/src/test/regress/sql/multi_extension.sql b/src/test/regress/sql/multi_extension.sql index 1726c260f37..8025051441b 100644 --- a/src/test/regress/sql/multi_extension.sql +++ b/src/test/regress/sql/multi_extension.sql @@ -7,6 +7,12 @@ -- not done yet. -- -- Upgrade tests verify the objects are added in citus_finish_pg_upgrade() +-- +-- Two alternative test outputs: +-- multi_extension.out for PG16 and before +-- multi_extension_0.out for PG17 +-- related commit +-- https://git.postgresql.org/gitweb/?p=postgresql.git;a=commit;h=e5bc9454e527b1cba97553531d8d4992892fdeef SET citus.next_shard_id TO 580000; CREATE SCHEMA multi_extension; diff --git a/src/test/regress/sql/multi_mx_hide_shard_names.sql b/src/test/regress/sql/multi_mx_hide_shard_names.sql index addc7f90ede..70f87a875e3 100644 --- a/src/test/regress/sql/multi_mx_hide_shard_names.sql +++ b/src/test/regress/sql/multi_mx_hide_shard_names.sql @@ -246,20 +246,25 @@ SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_name -- PG16 added one more backend type B_STANDALONE_BACKEND -- and also alphabetized the backend types, hence the orders changed --- Relevant PG commit: +-- Relevant PG16 commit: -- https://github.com/postgres/postgres/commit/0c679464a837079acc75ff1d45eaa83f79e05690 +-- Relevant Pg17 commit: +-- https://github.com/postgres/postgres/commit/067701f57758f9baed5bd9d868539738d77bfa92#diff-afc0ebd67534b71b5b94b29a1387aa6eedffe342a5539f52d686428be323e802 SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16 -\gset - -\if :server_version_ge_16 -SELECT 4 AS client_backend \gset -SELECT 5 AS bgworker \gset -SELECT 12 AS walsender \gset +SELECT substring(:'server_version', '\d+')::int >= 17 AS server_version_ge_17 \gset +SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16 \gset +\if :server_version_ge_17 + SELECT 1 AS client_backend \gset + SELECT 4 AS bgworker \gset + SELECT 5 AS walsender \gset +\elif :server_version_ge_16 + SELECT 4 AS client_backend \gset + SELECT 5 AS bgworker \gset + SELECT 12 AS walsender \gset \else -SELECT 3 AS client_backend \gset -SELECT 4 AS bgworker \gset -SELECT 9 AS walsender \gset + SELECT 3 AS client_backend \gset + SELECT 4 AS bgworker \gset + SELECT 9 AS walsender \gset \endif -- say, we set it to bgworker diff --git a/src/test/regress/sql/multi_mx_router_planner.sql b/src/test/regress/sql/multi_mx_router_planner.sql index 3593c2ac8d0..2b8f27d6901 100644 --- a/src/test/regress/sql/multi_mx_router_planner.sql +++ b/src/test/regress/sql/multi_mx_router_planner.sql @@ -1,4 +1,7 @@ +-- Two alternative test outputs: +-- multi_mx_router_planner.out for PG16 and before +-- multi_mx_router_planner_0.out for PG17 -- =================================================================== -- test router planner functionality for single shard select queries From b844f8506ac9616a1029613fde2536bc35ebb241 Mon Sep 17 00:00:00 2001 From: naisila Date: Thu, 21 Nov 2024 23:18:18 +0300 Subject: [PATCH 2/2] Revert these changes --- src/test/regress/bin/normalize.sed | 3 - .../regress/expected/ch_bench_having_mx.out | 5 - .../regress/expected/ch_bench_having_mx_0.out | 383 --- src/test/regress/expected/multi_extension.out | 6 - .../regress/expected/multi_extension_0.out | 2084 ----------------- .../expected/multi_mx_router_planner.out | 3 - .../expected/multi_mx_router_planner_0.out | 1514 ------------ src/test/regress/sql/ch_bench_having_mx.sql | 6 - src/test/regress/sql/multi_extension.sql | 6 - .../regress/sql/multi_mx_router_planner.sql | 3 - 10 files changed, 4013 deletions(-) delete mode 100644 src/test/regress/expected/ch_bench_having_mx_0.out delete mode 100644 src/test/regress/expected/multi_extension_0.out delete mode 100644 src/test/regress/expected/multi_mx_router_planner_0.out diff --git a/src/test/regress/bin/normalize.sed b/src/test/regress/bin/normalize.sed index 57c7c66a0dc..1cbd0b404fd 100644 --- a/src/test/regress/bin/normalize.sed +++ b/src/test/regress/bin/normalize.sed @@ -319,6 +319,3 @@ s/COPY DEFAULT only available using COPY FROM/COPY DEFAULT cannot be used with C s/COPY delimiter must not appear in the DEFAULT specification/COPY delimiter character must not appear in the DEFAULT specification/ #endif /* PG_VERSION_NUM < PG_VERSION_17 */ - -# multi_mx_router_planner normalize public.pg_temp_xxxxx -s/DEBUG: drop auto-cascades to type public\.pg_temp_[0-9]+/DEBUG: drop auto-cascades to type public.pg_temp_xxxxx/g diff --git a/src/test/regress/expected/ch_bench_having_mx.out b/src/test/regress/expected/ch_bench_having_mx.out index 83ca36d05ab..f4664fba53c 100644 --- a/src/test/regress/expected/ch_bench_having_mx.out +++ b/src/test/regress/expected/ch_bench_having_mx.out @@ -1,8 +1,3 @@ --- Two alternative test outputs: --- ch_bench_having_mx.out for PG16 and before --- ch_bench_having_mx_0.out for PG17 --- related commit --- https://github.com/postgres/postgres/commit/fd0398fc ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1640000; SET citus.shard_replication_factor to 1; SET citus.shard_count to 4; diff --git a/src/test/regress/expected/ch_bench_having_mx_0.out b/src/test/regress/expected/ch_bench_having_mx_0.out deleted file mode 100644 index b3cf22dc6c2..00000000000 --- a/src/test/regress/expected/ch_bench_having_mx_0.out +++ /dev/null @@ -1,383 +0,0 @@ --- Two alternative test outputs: --- ch_bench_having_mx.out for PG16 and before --- ch_bench_having_mx_0.out for PG17 --- related commit --- https://github.com/postgres/postgres/commit/fd0398fc -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1640000; -SET citus.shard_replication_factor to 1; -SET citus.shard_count to 4; -CREATE SCHEMA ch_bench_having; -SET search_path = ch_bench_having; -CREATE TABLE stock ( - s_w_id int NOT NULL, - s_i_id int NOT NULL, - s_order_cnt int NOT NULL -); -SELECT create_distributed_table('stock','s_w_id'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -\c - - - :worker_1_port -SET search_path = ch_bench_having; -explain (costs false, summary false, timing false) -select s_i_id, sum(s_order_cnt) as ordercount -from stock -where s_order_cnt > (select sum(s_order_cnt) * .005 as where_query from stock) -group by s_i_id -having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock) -order by s_i_id; - QUERY PLAN ---------------------------------------------------------------------- - Sort - Sort Key: remote_scan.s_i_id - InitPlan 1 - -> Function Scan on read_intermediate_result intermediate_result - -> HashAggregate - Group Key: remote_scan.s_i_id - Filter: ((pg_catalog.sum(remote_scan.worker_column_3))::bigint > (InitPlan 1).col1) - -> Custom Scan (Citus Adaptive) - -> Distributed Subplan XXX_1 - -> Aggregate - -> Custom Scan (Citus Adaptive) - Task Count: 4 - Tasks Shown: One of 4 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Aggregate - -> Seq Scan on stock_1640000 stock - -> Distributed Subplan XXX_2 - -> Aggregate - -> Custom Scan (Citus Adaptive) - Task Count: 4 - Tasks Shown: One of 4 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Aggregate - -> Seq Scan on stock_1640000 stock - Task Count: 4 - Tasks Shown: One of 4 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> HashAggregate - Group Key: stock.s_i_id - InitPlan 1 - -> Function Scan on read_intermediate_result intermediate_result - -> Seq Scan on stock_1640000 stock - Filter: ((s_order_cnt)::numeric > (InitPlan 1).col1) -(36 rows) - -explain (costs false, summary false, timing false) -select s_i_id, sum(s_order_cnt) as ordercount -from stock -group by s_i_id -having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock) -order by s_i_id; - QUERY PLAN ---------------------------------------------------------------------- - Sort - Sort Key: remote_scan.s_i_id - InitPlan 1 - -> Function Scan on read_intermediate_result intermediate_result - -> HashAggregate - Group Key: remote_scan.s_i_id - Filter: ((pg_catalog.sum(remote_scan.worker_column_3))::bigint > (InitPlan 1).col1) - -> Custom Scan (Citus Adaptive) - -> Distributed Subplan XXX_1 - -> Aggregate - -> Custom Scan (Citus Adaptive) - Task Count: 4 - Tasks Shown: One of 4 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Aggregate - -> Seq Scan on stock_1640000 stock - Task Count: 4 - Tasks Shown: One of 4 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> HashAggregate - Group Key: stock.s_i_id - -> Seq Scan on stock_1640000 stock -(24 rows) - -explain (costs false, summary false, timing false) -select s_i_id, sum(s_order_cnt) as ordercount -from stock -group by s_i_id -having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock); - QUERY PLAN ---------------------------------------------------------------------- - HashAggregate - Group Key: remote_scan.s_i_id - Filter: ((pg_catalog.sum(remote_scan.worker_column_3))::bigint > (InitPlan 1).col1) - InitPlan 1 - -> Function Scan on read_intermediate_result intermediate_result - -> Custom Scan (Citus Adaptive) - -> Distributed Subplan XXX_1 - -> Aggregate - -> Custom Scan (Citus Adaptive) - Task Count: 4 - Tasks Shown: One of 4 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Aggregate - -> Seq Scan on stock_1640000 stock - Task Count: 4 - Tasks Shown: One of 4 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> HashAggregate - Group Key: stock.s_i_id - -> Seq Scan on stock_1640000 stock -(22 rows) - -explain (costs false) -select s_i_id, sum(s_order_cnt) as ordercount -from stock s -group by s_i_id -having (select true) -order by s_i_id; - QUERY PLAN ---------------------------------------------------------------------- - Sort - Sort Key: remote_scan.s_i_id - InitPlan 1 - -> Result - -> HashAggregate - Group Key: remote_scan.s_i_id - -> Result - One-Time Filter: (InitPlan 1).col1 - -> Custom Scan (Citus Adaptive) - Filter: (InitPlan 1).col1 - Task Count: 4 - Tasks Shown: One of 4 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> HashAggregate - Group Key: s.s_i_id - -> Seq Scan on stock_1640000 s -(17 rows) - -explain select s_i_id, sum(s_order_cnt) as ordercount -from stock s -group by s_i_id -having (select true); - QUERY PLAN ---------------------------------------------------------------------- - HashAggregate (cost=500.01..503.01 rows=200 width=12) - Group Key: remote_scan.s_i_id - InitPlan 1 - -> Result (cost=0.00..0.01 rows=1 width=1) - -> Result (cost=0.00..0.00 rows=100000 width=12) - One-Time Filter: (InitPlan 1).col1 - -> Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=100000 width=12) - Filter: (InitPlan 1).col1 - Task Count: 4 - Tasks Shown: One of 4 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> HashAggregate (cost=40.60..42.60 rows=200 width=12) - Group Key: s.s_i_id - -> Seq Scan on stock_1640000 s (cost=0.00..30.40 rows=2040 width=8) -(15 rows) - -select s_i_id, sum(s_order_cnt) as ordercount -from stock -where s_order_cnt > (select sum(s_order_cnt) * .005 as where_query from stock) -group by s_i_id -having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock) -order by s_i_id; - s_i_id | ordercount ---------------------------------------------------------------------- -(0 rows) - -INSERT INTO stock SELECT c, c, c FROM generate_series(1, 5) as c; -select s_i_id, sum(s_order_cnt) as ordercount -from stock -where s_order_cnt > (select sum(s_order_cnt) * .005 as where_query from stock) -group by s_i_id -having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock) -order by s_i_id; - s_i_id | ordercount ---------------------------------------------------------------------- - 3 | 3 - 4 | 4 - 5 | 5 -(3 rows) - -select s_i_id, sum(s_order_cnt) as ordercount -from stock -group by s_i_id -having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock) -order by s_i_id; - s_i_id | ordercount ---------------------------------------------------------------------- - 3 | 3 - 4 | 4 - 5 | 5 -(3 rows) - -select s_i_id, sum(s_order_cnt) as ordercount -from stock s -where s_order_cnt > (select sum(s_order_cnt) * .005 as where_query from stock) -group by s_i_id -having (select true) -order by s_i_id; - s_i_id | ordercount ---------------------------------------------------------------------- - 1 | 1 - 2 | 2 - 3 | 3 - 4 | 4 - 5 | 5 -(5 rows) - -select s_i_id, sum(s_order_cnt) as ordercount -from stock s -where s_order_cnt > (select sum(s_order_cnt) * .005 as where_query from stock) -group by s_i_id -having (select false) -order by s_i_id; - s_i_id | ordercount ---------------------------------------------------------------------- -(0 rows) - -select s_i_id, sum(s_order_cnt) as ordercount -from stock s -group by s_i_id -having (select true) -order by s_i_id; - s_i_id | ordercount ---------------------------------------------------------------------- - 1 | 1 - 2 | 2 - 3 | 3 - 4 | 4 - 5 | 5 -(5 rows) - -select s_i_id, sum(s_order_cnt) as ordercount -from stock s -group by s_i_id -having (select false) -order by s_i_id; - s_i_id | ordercount ---------------------------------------------------------------------- -(0 rows) - -select s_i_id, sum(s_order_cnt) as ordercount -from stock s -group by s_i_id -having (select true) -order by s_i_id; - s_i_id | ordercount ---------------------------------------------------------------------- - 1 | 1 - 2 | 2 - 3 | 3 - 4 | 4 - 5 | 5 -(5 rows) - --- We don't support correlated subqueries in having -select s_i_id, sum(s_order_cnt) as ordercount -from stock s -where s_order_cnt > (select sum(s_order_cnt) * .005 as where_query from stock) -group by s_i_id -having (select max(s_order_cnt) > 2 as having_query from stock where s_i_id = s.s_i_id) -order by s_i_id; -ERROR: Subqueries in HAVING cannot refer to outer query --- We don't support correlated subqueries in having -select s_i_id, sum(s_order_cnt) as ordercount -from stock s -group by s_i_id -having (select max(s_order_cnt) > 2 as having_query from stock where s_i_id = s.s_i_id) -order by s_i_id; -ERROR: Subqueries in HAVING cannot refer to outer query -\c - - - :master_port -SET citus.shard_replication_factor to 1; -SET citus.shard_count to 4; -SET search_path = ch_bench_having, public; -DROP TABLE stock; -CREATE TABLE stock ( - s_w_id int NOT NULL, - s_i_id int NOT NULL, - s_quantity decimal(4,0) NOT NULL, - s_ytd decimal(8,2) NOT NULL, - s_order_cnt int NOT NULL, - s_remote_cnt int NOT NULL, - s_data varchar(50) NOT NULL, - s_dist_01 char(24) NOT NULL, - s_dist_02 char(24) NOT NULL, - s_dist_03 char(24) NOT NULL, - s_dist_04 char(24) NOT NULL, - s_dist_05 char(24) NOT NULL, - s_dist_06 char(24) NOT NULL, - s_dist_07 char(24) NOT NULL, - s_dist_08 char(24) NOT NULL, - s_dist_09 char(24) NOT NULL, - s_dist_10 char(24) NOT NULL, - PRIMARY KEY (s_w_id,s_i_id) -); -insert into stock VALUES -(1, 33, 1, 1, 1, 1, '', '','','','','','','','','',''), -(33, 1, 1, 1, 1, 1, '', '','','','','','','','','',''), -(32, 1, 1, 1, 1, 1, '', '','','','','','','','','',''); -SELECT create_distributed_table('stock','s_w_id'); -NOTICE: Copying data from local table... -NOTICE: copying the data has completed -DETAIL: The local data in the table is no longer visible, but is still on disk. -HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$ch_bench_having.stock$$) - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -\c - - - :worker_1_port -SET search_path = ch_bench_having, public; -select s_i_id, sum(s_order_cnt) as ordercount -from stock, supplier_mx, nation_mx -where mod((s_w_id * s_i_id),10000) = s_suppkey - and s_nationkey = n_nationkey - and n_name = 'GERMANY' -group by s_i_id -having sum(s_order_cnt) > - (select sum(s_order_cnt) * .005 - from stock, supplier_mx, nation_mx - where mod((s_w_id * s_i_id),10000) = s_suppkey - and s_nationkey = n_nationkey - and n_name = 'GERMANY') -order by s_i_id, ordercount desc; - s_i_id | ordercount ---------------------------------------------------------------------- - 1 | 1 - 33 | 1 -(2 rows) - -insert into stock VALUES -(10033, 1, 1, 1, 100000, 1, '', '','','','','','','','','',''); -select s_i_id, sum(s_order_cnt) as ordercount -from stock, supplier_mx, nation_mx -where mod((s_w_id * s_i_id),10000) = s_suppkey - and s_nationkey = n_nationkey - and n_name = 'GERMANY' -group by s_i_id -having sum(s_order_cnt) > - (select sum(s_order_cnt) * .005 - from stock, supplier_mx, nation_mx - where mod((s_w_id * s_i_id),10000) = s_suppkey - and s_nationkey = n_nationkey - and n_name = 'GERMANY') -order by s_i_id, ordercount desc; - s_i_id | ordercount ---------------------------------------------------------------------- - 1 | 100001 -(1 row) - -\c - - - :master_port -BEGIN; -SET LOCAL client_min_messages TO WARNING; -DROP SCHEMA ch_bench_having CASCADE; -COMMIT; diff --git a/src/test/regress/expected/multi_extension.out b/src/test/regress/expected/multi_extension.out index f86e93146b4..b2badd878c2 100644 --- a/src/test/regress/expected/multi_extension.out +++ b/src/test/regress/expected/multi_extension.out @@ -7,12 +7,6 @@ -- not done yet. -- -- Upgrade tests verify the objects are added in citus_finish_pg_upgrade() --- --- Two alternative test outputs: --- multi_extension.out for PG16 and before --- multi_extension_0.out for PG17 --- related commit --- https://git.postgresql.org/gitweb/?p=postgresql.git;a=commit;h=e5bc9454e527b1cba97553531d8d4992892fdeef SET citus.next_shard_id TO 580000; CREATE SCHEMA multi_extension; SELECT $definition$ diff --git a/src/test/regress/expected/multi_extension_0.out b/src/test/regress/expected/multi_extension_0.out deleted file mode 100644 index 7b6eb2afc28..00000000000 --- a/src/test/regress/expected/multi_extension_0.out +++ /dev/null @@ -1,2084 +0,0 @@ --- --- MULTI_EXTENSION --- --- Tests around extension creation / upgrades --- --- It'd be nice to script generation of this file, but alas, that's --- not done yet. --- --- Upgrade tests verify the objects are added in citus_finish_pg_upgrade() --- --- Two alternative test outputs: --- multi_extension.out for PG16 and before --- multi_extension_0.out for PG17 --- related commit --- https://git.postgresql.org/gitweb/?p=postgresql.git;a=commit;h=e5bc9454e527b1cba97553531d8d4992892fdeef -SET citus.next_shard_id TO 580000; -CREATE SCHEMA multi_extension; -SELECT $definition$ -CREATE OR REPLACE FUNCTION test.maintenance_worker() - RETURNS pg_stat_activity - LANGUAGE plpgsql -AS $$ -DECLARE - activity record; -BEGIN - DO 'BEGIN END'; -- Force maintenance daemon to start - -- we don't want to wait forever; loop will exit after 20 seconds - FOR i IN 1 .. 200 LOOP - PERFORM pg_stat_clear_snapshot(); - SELECT * INTO activity FROM pg_stat_activity - WHERE application_name = 'Citus Maintenance Daemon' AND datname = current_database(); - IF activity.pid IS NOT NULL THEN - RETURN activity; - ELSE - PERFORM pg_sleep(0.1); - END IF ; - END LOOP; - -- fail if we reach the end of this loop - raise 'Waited too long for maintenance daemon to start'; -END; -$$; -$definition$ create_function_test_maintenance_worker -\gset -CREATE TABLE multi_extension.prev_objects(description text); -CREATE TABLE multi_extension.extension_diff(previous_object text COLLATE "C", - current_object text COLLATE "C"); -CREATE FUNCTION multi_extension.print_extension_changes() -RETURNS TABLE(previous_object text, current_object text) -AS $func$ -BEGIN - SET LOCAL search_path TO multi_extension; - TRUNCATE TABLE extension_diff; - - CREATE TABLE current_objects AS - SELECT pg_catalog.pg_describe_object(classid, objid, 0) - || ' ' || - coalesce(pg_catalog.pg_get_function_result(objid), '') AS description - FROM pg_catalog.pg_depend, pg_catalog.pg_extension e - WHERE refclassid = 'pg_catalog.pg_extension'::pg_catalog.regclass - AND refobjid = e.oid - AND deptype = 'e' - AND e.extname='citus'; - - INSERT INTO extension_diff - SELECT p.description previous_object, c.description current_object - FROM current_objects c FULL JOIN prev_objects p - ON p.description = c.description - WHERE (p.description is null OR c.description is null) - AND c.description IS DISTINCT FROM 'function any_value(anyelement) anyelement' - AND c.description IS DISTINCT FROM 'function any_value_agg(anyelement,anyelement) anyelement'; - - DROP TABLE prev_objects; - ALTER TABLE current_objects RENAME TO prev_objects; - - RETURN QUERY SELECT * FROM extension_diff ORDER BY 1, 2; -END -$func$ LANGUAGE plpgsql; -CREATE SCHEMA test; -:create_function_test_maintenance_worker --- check maintenance daemon is started -SELECT datname, current_database(), - usename, (SELECT extowner::regrole::text FROM pg_extension WHERE extname = 'citus') -FROM test.maintenance_worker(); - datname | current_database | usename | extowner ---------------------------------------------------------------------- - regression | regression | postgres | postgres -(1 row) - --- ensure no unexpected objects were created outside pg_catalog -SELECT pgio.type, pgio.identity -FROM pg_depend AS pgd, - pg_extension AS pge, - LATERAL pg_identify_object(pgd.classid, pgd.objid, pgd.objsubid) AS pgio -WHERE pgd.refclassid = 'pg_extension'::regclass AND - pgd.refobjid = pge.oid AND - pge.extname = 'citus' AND - pgio.schema NOT IN ('pg_catalog', 'citus', 'citus_internal', 'test', 'columnar', 'columnar_internal') -ORDER BY 1, 2; - type | identity ---------------------------------------------------------------------- - type | public.citus_schemas - type | public.citus_schemas[] - type | public.citus_tables - type | public.citus_tables[] - view | public.citus_schemas - view | public.citus_tables -(6 rows) - --- DROP EXTENSION pre-created by the regression suite -DROP EXTENSION citus; -DROP EXTENSION citus_columnar; -\c --- these tests switch between citus versions and call ddl's that require pg_dist_object to be created -SET citus.enable_metadata_sync TO 'false'; -SET citus.enable_version_checks TO 'false'; -SET columnar.enable_version_checks TO 'false'; -CREATE EXTENSION citus VERSION '8.0-1'; -ALTER EXTENSION citus UPDATE TO '8.0-2'; -ALTER EXTENSION citus UPDATE TO '8.0-3'; -ALTER EXTENSION citus UPDATE TO '8.0-4'; -ALTER EXTENSION citus UPDATE TO '8.0-5'; -ALTER EXTENSION citus UPDATE TO '8.0-6'; -ALTER EXTENSION citus UPDATE TO '8.0-7'; -ALTER EXTENSION citus UPDATE TO '8.0-8'; -ALTER EXTENSION citus UPDATE TO '8.0-9'; -ALTER EXTENSION citus UPDATE TO '8.0-10'; -ALTER EXTENSION citus UPDATE TO '8.0-11'; -ALTER EXTENSION citus UPDATE TO '8.0-12'; -ALTER EXTENSION citus UPDATE TO '8.0-13'; -ALTER EXTENSION citus UPDATE TO '8.1-1'; -ALTER EXTENSION citus UPDATE TO '8.2-1'; -ALTER EXTENSION citus UPDATE TO '8.2-2'; -ALTER EXTENSION citus UPDATE TO '8.2-3'; -ALTER EXTENSION citus UPDATE TO '8.2-4'; -ALTER EXTENSION citus UPDATE TO '8.3-1'; -ALTER EXTENSION citus UPDATE TO '9.0-1'; -ALTER EXTENSION citus UPDATE TO '9.0-2'; -ALTER EXTENSION citus UPDATE TO '9.1-1'; -ALTER EXTENSION citus UPDATE TO '9.2-1'; -ALTER EXTENSION citus UPDATE TO '9.2-2'; --- Snapshot of state at 9.2-2 -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- - | event trigger citus_cascade_to_partition - | function alter_role_if_exists(text,text) boolean - | function array_cat_agg(anycompatiblearray) anycompatiblearray - | function assign_distributed_transaction_id(integer,bigint,timestamp with time zone) void - | function authinfo_valid(text) boolean - | function broadcast_intermediate_result(text,text) bigint - | function check_distributed_deadlocks() boolean - | function citus_add_rebalance_strategy(name,regproc,regproc,regproc,real,real) void - | function citus_blocking_pids(integer) integer[] - | function citus_create_restore_point(text) pg_lsn - | function citus_dist_stat_activity() SETOF record - | function citus_drop_trigger() event_trigger - | function citus_executor_name(integer) text - | function citus_extradata_container(internal) void - | function citus_finish_pg_upgrade() void - | function citus_internal.find_groupid_for_node(text,integer) integer - | function citus_internal.pg_dist_node_trigger_func() trigger - | function citus_internal.pg_dist_rebalance_strategy_enterprise_check() trigger - | function citus_internal.pg_dist_rebalance_strategy_trigger_func() trigger - | function citus_internal.pg_dist_shard_placement_trigger_func() trigger - | function citus_internal.refresh_isolation_tester_prepared_statement() void - | function citus_internal.replace_isolation_tester_func() void - | function citus_internal.restore_isolation_tester_func() void - | function citus_isolation_test_session_is_blocked(integer,integer[]) boolean - | function citus_json_concatenate(json,json) json - | function citus_json_concatenate_final(json) json - | function citus_jsonb_concatenate(jsonb,jsonb) jsonb - | function citus_jsonb_concatenate_final(jsonb) jsonb - | function citus_node_capacity_1(integer) real - | function citus_prepare_pg_upgrade() void - | function citus_query_stats() SETOF record - | function citus_relation_size(regclass) bigint - | function citus_server_id() uuid - | function citus_set_default_rebalance_strategy(text) void - | function citus_shard_allowed_on_node_true(bigint,integer) boolean - | function citus_shard_cost_1(bigint) real - | function citus_shard_cost_by_disk_size(bigint) real - | function citus_stat_statements() SETOF record - | function citus_stat_statements_reset() void - | function citus_table_is_visible(oid) boolean - | function citus_table_size(regclass) bigint - | function citus_text_send_as_jsonb(text) bytea - | function citus_total_relation_size(regclass) bigint - | function citus_truncate_trigger() trigger - | function citus_validate_rebalance_strategy_functions(regproc,regproc,regproc) void - | function citus_version() text - | function citus_worker_stat_activity() SETOF record - | function column_name_to_column(regclass,text) text - | function column_to_column_name(regclass,text) text - | function coord_combine_agg(oid,cstring,anyelement) anyelement - | function coord_combine_agg_ffunc(internal,oid,cstring,anyelement) anyelement - | function coord_combine_agg_sfunc(internal,oid,cstring,anyelement) internal - | function create_distributed_function(regprocedure,text,text) void - | function create_distributed_table(regclass,text,citus.distribution_type,text) void - | function create_intermediate_result(text,text) bigint - | function create_reference_table(regclass) void - | function distributed_tables_colocated(regclass,regclass) boolean - | function dump_global_wait_edges() SETOF record - | function dump_local_wait_edges() SETOF record - | function fetch_intermediate_results(text[],text,integer) bigint - | function get_all_active_transactions() SETOF record - | function get_colocated_shard_array(bigint) bigint[] - | function get_colocated_table_array(regclass) regclass[] - | function get_current_transaction_id() record - | function get_global_active_transactions() SETOF record - | function get_rebalance_progress() TABLE(sessionid integer, table_name regclass, shardid bigint, shard_size bigint, sourcename text, sourceport integer, targetname text, targetport integer, progress bigint) - | function get_rebalance_table_shards_plan(regclass,real,integer,bigint[],boolean,name) TABLE(table_name regclass, shardid bigint, shard_size bigint, sourcename text, sourceport integer, targetname text, targetport integer) - | function get_shard_id_for_distribution_column(regclass,"any") bigint - | function isolate_tenant_to_new_shard(regclass,"any",text) bigint - | function json_cat_agg(json) json - | function jsonb_cat_agg(jsonb) jsonb - | function lock_relation_if_exists(text,text) boolean - | function lock_shard_metadata(integer,bigint[]) void - | function lock_shard_resources(integer,bigint[]) void - | function mark_tables_colocated(regclass,regclass[]) void - | function master_activate_node(text,integer) integer - | function master_add_inactive_node(text,integer,integer,noderole,name) integer - | function master_add_node(text,integer,integer,noderole,name) integer - | function master_add_secondary_node(text,integer,text,integer,name) integer - | function master_append_table_to_shard(bigint,text,text,integer) real - | function master_apply_delete_command(text) integer - | function master_conninfo_cache_invalidate() trigger - | function master_copy_shard_placement(bigint,text,integer,text,integer,boolean,citus.shard_transfer_mode) void - | function master_create_distributed_table(regclass,text,citus.distribution_type) void - | function master_create_empty_shard(text) bigint - | function master_create_worker_shards(text,integer,integer) void - | function master_disable_node(text,integer) void - | function master_dist_local_group_cache_invalidate() trigger - | function master_dist_node_cache_invalidate() trigger - | function master_dist_object_cache_invalidate() trigger - | function master_dist_partition_cache_invalidate() trigger - | function master_dist_placement_cache_invalidate() trigger - | function master_dist_shard_cache_invalidate() trigger - | function master_drain_node(text,integer,citus.shard_transfer_mode,name) void - | function master_drop_all_shards(regclass,text,text) integer - | function master_drop_sequences(text[]) void - | function master_get_active_worker_nodes() SETOF record - | function master_get_new_placementid() bigint - | function master_get_new_shardid() bigint - | function master_get_table_ddl_events(text) SETOF text - | function master_get_table_metadata(text) record - | function master_modify_multiple_shards(text) integer - | function master_move_shard_placement(bigint,text,integer,text,integer,citus.shard_transfer_mode) void - | function master_remove_distributed_table_metadata_from_workers(regclass,text,text) void - | function master_remove_node(text,integer) void - | function master_remove_partition_metadata(regclass,text,text) void - | function master_run_on_worker(text[],integer[],text[],boolean) SETOF record - | function master_set_node_property(text,integer,text,boolean) void - | function master_unmark_object_distributed(oid,oid,integer) void - | function master_update_node(integer,text,integer,boolean,integer) void - | function master_update_shard_statistics(bigint) bigint - | function master_update_table_statistics(regclass) void - | function poolinfo_valid(text) boolean - | function read_intermediate_result(text,citus_copy_format) SETOF record - | function read_intermediate_results(text[],citus_copy_format) SETOF record - | function rebalance_table_shards(regclass,real,integer,bigint[],citus.shard_transfer_mode,boolean,name) void - | function recover_prepared_transactions() integer - | function relation_is_a_known_shard(regclass) boolean - | function replicate_table_shards(regclass,integer,integer,bigint[],citus.shard_transfer_mode) void - | function role_exists(name) boolean - | function run_command_on_colocated_placements(regclass,regclass,text,boolean) SETOF record - | function run_command_on_placements(regclass,text,boolean) SETOF record - | function run_command_on_shards(regclass,text,boolean) SETOF record - | function run_command_on_workers(text,boolean) SETOF record - | function shard_name(regclass,bigint) text - | function start_metadata_sync_to_node(text,integer) void - | function stop_metadata_sync_to_node(text,integer) void - | function task_tracker_assign_task(bigint,integer,text) void - | function task_tracker_cleanup_job(bigint) void - | function task_tracker_conninfo_cache_invalidate() trigger - | function task_tracker_task_status(bigint,integer) integer - | function upgrade_to_reference_table(regclass) void - | function worker_append_table_to_shard(text,text,text,integer) void - | function worker_apply_inter_shard_ddl_command(bigint,text,bigint,text,text) void - | function worker_apply_sequence_command(text) void - | function worker_apply_sequence_command(text,regtype) void - | function worker_apply_shard_ddl_command(bigint,text) void - | function worker_apply_shard_ddl_command(bigint,text,text) void - | function worker_cleanup_job_schema_cache() void - | function worker_create_or_replace_object(text) boolean - | function worker_create_schema(bigint,text) void - | function worker_create_truncate_trigger(regclass) void - | function worker_drop_distributed_table(text) void - | function worker_execute_sql_task(bigint,integer,text,boolean) bigint - | function worker_fetch_foreign_file(text,text,bigint,text[],integer[]) void - | function worker_fetch_partition_file(bigint,integer,integer,integer,text,integer) void - | function worker_hash("any") integer - | function worker_hash_partition_table(bigint,integer,text,text,oid,anyarray) void - | function worker_merge_files_and_run_query(bigint,integer,text,text) void - | function worker_merge_files_into_table(bigint,integer,text[],text[]) void - | function worker_partial_agg(oid,anyelement) cstring - | function worker_partial_agg_ffunc(internal) cstring - | function worker_partial_agg_sfunc(internal,oid,anyelement) internal - | function worker_partition_query_result(text,text,integer,citus.distribution_type,text[],text[],boolean) SETOF record - | function worker_range_partition_table(bigint,integer,text,text,oid,anyarray) void - | function worker_repartition_cleanup(bigint) void - | schema citus - | schema citus_internal - | sequence pg_dist_colocationid_seq - | sequence pg_dist_groupid_seq - | sequence pg_dist_node_nodeid_seq - | sequence pg_dist_placement_placementid_seq - | sequence pg_dist_shardid_seq - | table citus.pg_dist_object - | table pg_dist_authinfo - | table pg_dist_colocation - | table pg_dist_local_group - | table pg_dist_node - | table pg_dist_node_metadata - | table pg_dist_partition - | table pg_dist_placement - | table pg_dist_poolinfo - | table pg_dist_rebalance_strategy - | table pg_dist_shard - | table pg_dist_transaction - | type citus.distribution_type - | type citus.distribution_type[] - | type citus.pg_dist_object - | type citus.pg_dist_object[] - | type citus.shard_transfer_mode - | type citus.shard_transfer_mode[] - | type citus_copy_format - | type citus_copy_format[] - | type citus_dist_stat_activity - | type citus_dist_stat_activity[] - | type citus_lock_waits - | type citus_lock_waits[] - | type citus_shard_indexes_on_worker - | type citus_shard_indexes_on_worker[] - | type citus_shards_on_worker - | type citus_shards_on_worker[] - | type citus_stat_statements - | type citus_stat_statements[] - | type citus_worker_stat_activity - | type citus_worker_stat_activity[] - | type noderole - | type noderole[] - | type pg_dist_authinfo - | type pg_dist_authinfo[] - | type pg_dist_colocation - | type pg_dist_colocation[] - | type pg_dist_local_group - | type pg_dist_local_group[] - | type pg_dist_node - | type pg_dist_node[] - | type pg_dist_node_metadata - | type pg_dist_node_metadata[] - | type pg_dist_partition - | type pg_dist_partition[] - | type pg_dist_placement - | type pg_dist_placement[] - | type pg_dist_poolinfo - | type pg_dist_poolinfo[] - | type pg_dist_rebalance_strategy - | type pg_dist_rebalance_strategy[] - | type pg_dist_shard - | type pg_dist_shard[] - | type pg_dist_shard_placement - | type pg_dist_shard_placement[] - | type pg_dist_transaction - | type pg_dist_transaction[] - | view citus_dist_stat_activity - | view citus_lock_waits - | view citus_shard_indexes_on_worker - | view citus_shards_on_worker - | view citus_stat_statements - | view citus_worker_stat_activity - | view pg_dist_shard_placement -(228 rows) - --- Test downgrade to 9.2-2 from 9.2-4 -ALTER EXTENSION citus UPDATE TO '9.2-4'; -ALTER EXTENSION citus UPDATE TO '9.2-2'; --- Should be empty result since upgrade+downgrade should be a no-op -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- -(0 rows) - -/* - * As we mistakenly bumped schema version to 9.3-1 in a bad release, we support - * updating citus schema from 9.3-1 to 9.2-4, but we do not support updates to 9.3-1. - * - * Hence the query below should fail. - */ -ALTER EXTENSION citus UPDATE TO '9.3-1'; -ERROR: extension "citus" has no update path from version "9.2-2" to version "9.3-1" -ALTER EXTENSION citus UPDATE TO '9.2-4'; --- Snapshot of state at 9.2-4 -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- -(0 rows) - --- Test downgrade to 9.2-4 from 9.3-2 -ALTER EXTENSION citus UPDATE TO '9.3-2'; -ALTER EXTENSION citus UPDATE TO '9.2-4'; --- Should be empty result since upgrade+downgrade should be a no-op -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- -(0 rows) - --- Snapshot of state at 9.3-2 -ALTER EXTENSION citus UPDATE TO '9.3-2'; -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- - function citus_extradata_container(internal) void | - | function citus_extradata_container(internal) SETOF record - | function citus_remote_connection_stats() SETOF record - | function replicate_reference_tables() void - | function truncate_local_data_after_distributing_table(regclass) void - | function update_distributed_table_colocation(regclass,text) void - | function worker_create_or_alter_role(text,text,text) boolean -(7 rows) - --- Test downgrade to 9.3-2 from 9.4-1 -ALTER EXTENSION citus UPDATE TO '9.4-1'; -ALTER EXTENSION citus UPDATE TO '9.3-2'; --- Should be empty result since upgrade+downgrade should be a no-op -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- -(0 rows) - --- Snapshot of state at 9.4-1 -ALTER EXTENSION citus UPDATE TO '9.4-1'; -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- - | function worker_last_saved_explain_analyze() TABLE(explain_analyze_output text, execution_duration double precision) - | function worker_save_query_explain_analyze(text,jsonb) SETOF record -(2 rows) - --- Test upgrade paths for backported citus_pg_upgrade functions -ALTER EXTENSION citus UPDATE TO '9.4-2'; -ALTER EXTENSION citus UPDATE TO '9.4-1'; --- Should be empty result, even though the downgrade doesn't undo the upgrade, the --- function signature doesn't change, which is reflected here. -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- -(0 rows) - -ALTER EXTENSION citus UPDATE TO '9.4-2'; -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- -(0 rows) - --- Snapshot of state at 9.4-1 -ALTER EXTENSION citus UPDATE TO '9.4-1'; -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- -(0 rows) - --- Test upgrade paths for backported improvement of master_update_table_statistics function -ALTER EXTENSION citus UPDATE TO '9.4-3'; --- should see the new source code with internal function citus_update_table_statistics -SELECT prosrc FROM pg_proc WHERE proname = 'master_update_table_statistics' ORDER BY 1; - prosrc ---------------------------------------------------------------------- - citus_update_table_statistics -(1 row) - -ALTER EXTENSION citus UPDATE TO '9.4-2'; --- should see the old source code -SELECT prosrc FROM pg_proc WHERE proname = 'master_update_table_statistics' ORDER BY 1; - prosrc ---------------------------------------------------------------------- - + - DECLARE + - colocated_tables regclass[]; + - BEGIN + - SELECT get_colocated_table_array(relation) INTO colocated_tables;+ - PERFORM + - master_update_shard_statistics(shardid) + - FROM + - pg_dist_shard + - WHERE + - logicalrelid = ANY (colocated_tables); + - END; + - -(1 row) - --- Should be empty result -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- -(0 rows) - -ALTER EXTENSION citus UPDATE TO '9.4-3'; --- should see the new source code with internal function citus_update_table_statistics -SELECT prosrc FROM pg_proc WHERE proname = 'master_update_table_statistics' ORDER BY 1; - prosrc ---------------------------------------------------------------------- - citus_update_table_statistics -(1 row) - --- Should be empty result -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- -(0 rows) - --- Snapshot of state at 9.4-1 -ALTER EXTENSION citus UPDATE TO '9.4-1'; --- should see the old source code -SELECT prosrc FROM pg_proc WHERE proname = 'master_update_table_statistics' ORDER BY 1; - prosrc ---------------------------------------------------------------------- - + - DECLARE + - colocated_tables regclass[]; + - BEGIN + - SELECT get_colocated_table_array(relation) INTO colocated_tables;+ - PERFORM + - master_update_shard_statistics(shardid) + - FROM + - pg_dist_shard + - WHERE + - logicalrelid = ANY (colocated_tables); + - END; + - -(1 row) - --- Should be empty result -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- -(0 rows) - --- Test downgrade to 9.4-1 from 9.5-1 -ALTER EXTENSION citus UPDATE TO '9.5-1'; --- TODO: This test should be moved to a valid downgrade testing suite where the downgrade is done, both on the schema and the binaries. Later changes in Citus made a C vs Schema discrepancy error here --- BEGIN; --- SET citus.enable_metadata_sync TO on; --- SELECT master_add_node('localhost', :master_port, groupId=>0); --- CREATE TABLE citus_local_table (a int); --- SELECT create_citus_local_table('citus_local_table'); --- RESET citus.enable_metadata_sync; --- --- -- downgrade from 9.5-1 to 9.4-1 should fail as we have a citus local table --- ALTER EXTENSION citus UPDATE TO '9.4-1'; --- ROLLBACK; --- now we can downgrade as there is no citus local table -ALTER EXTENSION citus UPDATE TO '9.4-1'; --- Should be empty result since upgrade+downgrade should be a no-op -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- -(0 rows) - --- Snapshot of state at 9.5-1 -ALTER EXTENSION citus UPDATE TO '9.5-1'; -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- - function master_drop_sequences(text[]) void | - function task_tracker_assign_task(bigint,integer,text) void | - function task_tracker_cleanup_job(bigint) void | - function task_tracker_conninfo_cache_invalidate() trigger | - function task_tracker_task_status(bigint,integer) integer | - function worker_execute_sql_task(bigint,integer,text,boolean) bigint | - function worker_merge_files_and_run_query(bigint,integer,text,text) void | - | function create_citus_local_table(regclass) void - | function undistribute_table(regclass) void - | function worker_record_sequence_dependency(regclass,regclass,name) void -(10 rows) - --- Test upgrade paths for backported citus_pg_upgrade functions -ALTER EXTENSION citus UPDATE TO '9.5-2'; -ALTER EXTENSION citus UPDATE TO '9.5-1'; --- Should be empty result, even though the downgrade doesn't undo the upgrade, the --- function signature doesn't change, which is reflected here. -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- -(0 rows) - -ALTER EXTENSION citus UPDATE TO '9.5-2'; -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- -(0 rows) - --- Snapshot of state at 9.5-1 -ALTER EXTENSION citus UPDATE TO '9.5-1'; -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- -(0 rows) - --- Test upgrade paths for backported improvement of master_update_table_statistics function -ALTER EXTENSION citus UPDATE TO '9.5-3'; --- should see the new source code with internal function citus_update_table_statistics -SELECT prosrc FROM pg_proc WHERE proname = 'master_update_table_statistics' ORDER BY 1; - prosrc ---------------------------------------------------------------------- - citus_update_table_statistics -(1 row) - -ALTER EXTENSION citus UPDATE TO '9.5-2'; --- should see the old source code -SELECT prosrc FROM pg_proc WHERE proname = 'master_update_table_statistics' ORDER BY 1; - prosrc ---------------------------------------------------------------------- - + - DECLARE + - colocated_tables regclass[]; + - BEGIN + - SELECT get_colocated_table_array(relation) INTO colocated_tables;+ - PERFORM + - master_update_shard_statistics(shardid) + - FROM + - pg_dist_shard + - WHERE + - logicalrelid = ANY (colocated_tables); + - END; + - -(1 row) - --- Should be empty result -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- -(0 rows) - -ALTER EXTENSION citus UPDATE TO '9.5-3'; --- should see the new source code with internal function citus_update_table_statistics -SELECT prosrc FROM pg_proc WHERE proname = 'master_update_table_statistics' ORDER BY 1; - prosrc ---------------------------------------------------------------------- - citus_update_table_statistics -(1 row) - --- Should be empty result -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- -(0 rows) - --- Snapshot of state at 9.5-1 -ALTER EXTENSION citus UPDATE TO '9.5-1'; --- should see the old source code -SELECT prosrc FROM pg_proc WHERE proname = 'master_update_table_statistics' ORDER BY 1; - prosrc ---------------------------------------------------------------------- - + - DECLARE + - colocated_tables regclass[]; + - BEGIN + - SELECT get_colocated_table_array(relation) INTO colocated_tables;+ - PERFORM + - master_update_shard_statistics(shardid) + - FROM + - pg_dist_shard + - WHERE + - logicalrelid = ANY (colocated_tables); + - END; + - -(1 row) - --- Should be empty result -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- -(0 rows) - --- We removed the upgrade paths to 10.0-1, 10.0-2 and 10.0-3 due to a bug that blocked --- upgrades to 10.0, Therefore we test upgrades to 10.0-4 instead --- Test downgrade to 9.5-1 from 10.0-4 -ALTER EXTENSION citus UPDATE TO '10.0-4'; -ALTER EXTENSION citus UPDATE TO '9.5-1'; --- Should be empty result since upgrade+downgrade should be a no-op -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- -(0 rows) - --- Snapshot of state at 10.0-4 -ALTER EXTENSION citus UPDATE TO '10.0-4'; -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- - function citus_total_relation_size(regclass) bigint | - function create_citus_local_table(regclass) void | - function mark_tables_colocated(regclass,regclass[]) void | - function master_conninfo_cache_invalidate() trigger | - function master_create_distributed_table(regclass,text,citus.distribution_type) void | - function master_create_worker_shards(text,integer,integer) void | - function master_dist_local_group_cache_invalidate() trigger | - function master_dist_node_cache_invalidate() trigger | - function master_dist_object_cache_invalidate() trigger | - function master_dist_partition_cache_invalidate() trigger | - function master_dist_placement_cache_invalidate() trigger | - function master_dist_shard_cache_invalidate() trigger | - function master_drop_all_shards(regclass,text,text) integer | - function master_modify_multiple_shards(text) integer | - function undistribute_table(regclass) void | - function upgrade_to_reference_table(regclass) void | - | access method columnar - | function alter_columnar_table_reset(regclass,boolean,boolean,boolean,boolean) void - | function alter_columnar_table_set(regclass,integer,integer,name,integer) void - | function alter_distributed_table(regclass,text,integer,text,boolean) void - | function alter_old_partitions_set_access_method(regclass,timestamp with time zone,name) - | function alter_table_set_access_method(regclass,text) void - | function citus_activate_node(text,integer) integer - | function citus_add_inactive_node(text,integer,integer,noderole,name) integer - | function citus_add_local_table_to_metadata(regclass,boolean) void - | function citus_add_node(text,integer,integer,noderole,name) integer - | function citus_add_secondary_node(text,integer,text,integer,name) integer - | function citus_conninfo_cache_invalidate() trigger - | function citus_copy_shard_placement(bigint,text,integer,text,integer,boolean,citus.shard_transfer_mode) void - | function citus_disable_node(text,integer) void - | function citus_dist_local_group_cache_invalidate() trigger - | function citus_dist_node_cache_invalidate() trigger - | function citus_dist_object_cache_invalidate() trigger - | function citus_dist_partition_cache_invalidate() trigger - | function citus_dist_placement_cache_invalidate() trigger - | function citus_dist_shard_cache_invalidate() trigger - | function citus_drain_node(text,integer,citus.shard_transfer_mode,name) void - | function citus_drop_all_shards(regclass,text,text) integer - | function citus_get_active_worker_nodes() SETOF record - | function citus_internal.columnar_ensure_objects_exist() void - | function citus_move_shard_placement(bigint,text,integer,text,integer,citus.shard_transfer_mode) void - | function citus_remove_node(text,integer) void - | function citus_set_coordinator_host(text,integer,noderole,name) void - | function citus_set_node_property(text,integer,text,boolean) void - | function citus_shard_sizes() SETOF record - | function citus_total_relation_size(regclass,boolean) bigint - | function citus_unmark_object_distributed(oid,oid,integer) void - | function citus_update_node(integer,text,integer,boolean,integer) void - | function citus_update_shard_statistics(bigint) bigint - | function citus_update_table_statistics(regclass) void - | function columnar.columnar_handler(internal) table_am_handler - | function fix_pre_citus10_partitioned_table_constraint_names() SETOF regclass - | function fix_pre_citus10_partitioned_table_constraint_names(regclass) void - | function notify_constraint_dropped() void - | function remove_local_tables_from_metadata() void - | function time_partition_range(regclass) record - | function undistribute_table(regclass,boolean) void - | function worker_change_sequence_dependency(regclass,regclass,regclass) void - | function worker_fix_pre_citus10_partitioned_table_constraint_names(regclass,bigint,text) void - | schema columnar - | sequence columnar.storageid_seq - | table columnar.chunk - | table columnar.chunk_group - | table columnar.options - | table columnar.stripe - | type citus_shards - | type citus_shards[] - | type columnar.chunk - | type columnar.chunk[] - | type columnar.chunk_group - | type columnar.chunk_group[] - | type columnar.options - | type columnar.options[] - | type columnar.stripe - | type columnar.stripe[] - | type public.citus_tables - | type public.citus_tables[] - | type time_partitions - | type time_partitions[] - | view citus_shards - | view public.citus_tables - | view time_partitions -(82 rows) - --- check that we depend on the existence of public schema, and we can not drop it now -DROP SCHEMA public; -ERROR: cannot drop schema public because other objects depend on it -DETAIL: extension citus depends on schema public -HINT: Use DROP ... CASCADE to drop the dependent objects too. --- verify that citus_tables view is on pg_catalog if public schema is absent. -ALTER EXTENSION citus UPDATE TO '9.5-1'; -DROP SCHEMA public; -ALTER EXTENSION citus UPDATE TO '10.0-4'; -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- - type public.citus_tables | - type public.citus_tables[] | - view public.citus_tables | - | type citus_tables - | type citus_tables[] - | view citus_tables -(6 rows) - --- recreate public schema, and recreate citus_tables in the public schema by default -CREATE SCHEMA public; --- In PG15, public schema is owned by pg_database_owner role --- Relevant PG commit: b073c3ccd06e4cb845e121387a43faa8c68a7b62 -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15 -\gset -\if :server_version_ge_15 -ALTER SCHEMA public OWNER TO pg_database_owner; -\endif -GRANT ALL ON SCHEMA public TO public; -ALTER EXTENSION citus UPDATE TO '9.5-1'; -ALTER EXTENSION citus UPDATE TO '10.0-4'; -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- - type citus_tables | - type citus_tables[] | - view citus_tables | - | type public.citus_tables - | type public.citus_tables[] - | view public.citus_tables -(6 rows) - --- not print "HINT: " to hide current lib version -\set VERBOSITY terse -CREATE TABLE columnar_table(a INT, b INT) USING columnar; -SET citus.enable_version_checks TO ON; -SET columnar.enable_version_checks TO ON; --- all should throw an error due to version mismatch -VACUUM FULL columnar_table; -ERROR: loaded Citus library version differs from installed extension version -INSERT INTO columnar_table SELECT i FROM generate_series(1, 10) i; -ERROR: loaded Citus library version differs from installed extension version -VACUUM columnar_table; -WARNING: loaded Citus library version differs from installed extension version -TRUNCATE columnar_table; -ERROR: loaded Citus library version differs from installed extension version -DROP TABLE columnar_table; -ERROR: loaded Citus library version differs from installed extension version -CREATE INDEX ON columnar_table (a); -ERROR: loaded Citus library version differs from installed extension version -ALTER TABLE columnar_table SET(columnar.compression = pglz); -ERROR: loaded Citus library version differs from installed extension version -ALTER TABLE columnar_table RESET (columnar.compression); -ERROR: loaded Citus library version differs from installed extension version -INSERT INTO columnar_table SELECT * FROM columnar_table; -ERROR: loaded Citus library version differs from installed extension version -SELECT 1 FROM columnar_table; -- columnar custom scan -ERROR: loaded Citus library version differs from installed extension version -SET columnar.enable_custom_scan TO OFF; -SELECT 1 FROM columnar_table; -- seq scan -ERROR: loaded Citus library version differs from installed extension version -CREATE TABLE new_columnar_table (a int) USING columnar; -ERROR: loaded Citus library version differs from installed extension version --- disable version checks for other sessions too -ALTER SYSTEM SET citus.enable_version_checks TO OFF; -SELECT pg_reload_conf(); - pg_reload_conf ---------------------------------------------------------------------- - t -(1 row) - --- do cleanup for the rest of the tests -SET citus.enable_version_checks TO OFF; -SET columnar.enable_version_checks TO OFF; -DROP TABLE columnar_table; -RESET columnar.enable_custom_scan; -\set VERBOSITY default --- Test downgrade to 10.0-4 from 10.1-1 -ALTER EXTENSION citus UPDATE TO '10.1-1'; -ALTER EXTENSION citus UPDATE TO '10.0-4'; --- Should be empty result since upgrade+downgrade should be a no-op -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- -(0 rows) - --- Snapshot of state at 10.1-1 -ALTER EXTENSION citus UPDATE TO '10.1-1'; -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- - function citus_add_rebalance_strategy(name,regproc,regproc,regproc,real,real) void | - function citus_internal.columnar_ensure_objects_exist() void | - function citus_internal.pg_dist_rebalance_strategy_enterprise_check() trigger | - function create_distributed_table(regclass,text,citus.distribution_type,text) void | - function get_rebalance_progress() TABLE(sessionid integer, table_name regclass, shardid bigint, shard_size bigint, sourcename text, sourceport integer, targetname text, targetport integer, progress bigint) | - function get_rebalance_table_shards_plan(regclass,real,integer,bigint[],boolean,name) TABLE(table_name regclass, shardid bigint, shard_size bigint, sourcename text, sourceport integer, targetname text, targetport integer) | - | function citus_add_rebalance_strategy(name,regproc,regproc,regproc,real,real,real) void - | function citus_cleanup_orphaned_shards() - | function citus_local_disk_space_stats() record - | function create_distributed_table(regclass,text,citus.distribution_type,text,integer) void - | function get_rebalance_progress() TABLE(sessionid integer, table_name regclass, shardid bigint, shard_size bigint, sourcename text, sourceport integer, targetname text, targetport integer, progress bigint, source_shard_size bigint, target_shard_size bigint) - | function get_rebalance_table_shards_plan(regclass,real,integer,bigint[],boolean,name,real) TABLE(table_name regclass, shardid bigint, shard_size bigint, sourcename text, sourceport integer, targetname text, targetport integer) - | function worker_partitioned_relation_size(regclass) bigint - | function worker_partitioned_relation_total_size(regclass) bigint - | function worker_partitioned_table_size(regclass) bigint -(15 rows) - --- Test downgrade to 10.1-1 from 10.2-1 -ALTER EXTENSION citus UPDATE TO '10.2-1'; -ALTER EXTENSION citus UPDATE TO '10.1-1'; --- Should be empty result since upgrade+downgrade should be a no-op -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- -(0 rows) - --- Snapshot of state at 10.2-1 -ALTER EXTENSION citus UPDATE TO '10.2-1'; -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- - function citus_drop_all_shards(regclass,text,text) integer | - function stop_metadata_sync_to_node(text,integer) void | - | function citus_drop_all_shards(regclass,text,text,boolean) integer - | function citus_internal.downgrade_columnar_storage(regclass) void - | function citus_internal.upgrade_columnar_storage(regclass) void - | function citus_internal_add_partition_metadata(regclass,"char",text,integer,"char") void - | function citus_internal_add_placement_metadata(bigint,integer,bigint,integer,bigint) void - | function citus_internal_add_shard_metadata(regclass,bigint,"char",text,text) void - | function citus_internal_delete_shard_metadata(bigint) void - | function citus_internal_update_placement_metadata(bigint,integer,integer) void - | function citus_internal_update_relation_colocation(oid,integer) void - | function create_time_partitions(regclass,interval,timestamp with time zone,timestamp with time zone) boolean - | function drop_old_time_partitions(regclass,timestamp with time zone) - | function get_missing_time_partition_ranges(regclass,interval,timestamp with time zone,timestamp with time zone) TABLE(partition_name text, range_from_value text, range_to_value text) - | function stop_metadata_sync_to_node(text,integer,boolean) void - | function worker_nextval(regclass) integer -(16 rows) - --- Test downgrade to 10.2-1 from 10.2-2 -ALTER EXTENSION citus UPDATE TO '10.2-2'; -ALTER EXTENSION citus UPDATE TO '10.2-1'; --- Should be empty result since upgrade+downgrade should be a no-op -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- -(0 rows) - --- Snapshot of state at 10.2-2 -ALTER EXTENSION citus UPDATE TO '10.2-2'; -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- -(0 rows) - --- Test downgrade to 10.2-2 from 10.2-3 -ALTER EXTENSION citus UPDATE TO '10.2-3'; -ALTER EXTENSION citus UPDATE TO '10.2-2'; --- Should be empty result since upgrade+downgrade should be a no-op -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- -(0 rows) - --- Snapshot of state at 10.2-3 -ALTER EXTENSION citus UPDATE TO '10.2-3'; -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- -(0 rows) - --- Test downgrade to 10.2-3 from 10.2-4 -ALTER EXTENSION citus UPDATE TO '10.2-4'; -ALTER EXTENSION citus UPDATE TO '10.2-3'; --- Make sure that we don't delete pg_depend entries added in --- columnar--10.2-3--10.2-4.sql when downgrading to 10.2-3. -SELECT COUNT(*)=10 -FROM pg_depend -WHERE classid = 'pg_am'::regclass::oid AND - objid = (select oid from pg_am where amname = 'columnar') AND - objsubid = 0 AND - refclassid = 'pg_class'::regclass::oid AND - refobjsubid = 0 AND - deptype = 'n'; - ?column? ---------------------------------------------------------------------- - t -(1 row) - --- Should be empty result since upgrade+downgrade should be a no-op -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- -(0 rows) - --- Snapshot of state at 10.2-4 -ALTER EXTENSION citus UPDATE TO '10.2-4'; -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- - | function citus_internal.columnar_ensure_am_depends_catalog() void - | function fix_all_partition_shard_index_names() SETOF regclass - | function fix_partition_shard_index_names(regclass) void - | function worker_fix_partition_shard_index_names(regclass,text,text) void -(4 rows) - --- There was a bug when downgrading to 10.2-2 from 10.2-4 --- Test that we do not have any issues with this particular downgrade -ALTER EXTENSION citus UPDATE TO '10.2-2'; -ALTER EXTENSION citus UPDATE TO '10.2-4'; -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- -(0 rows) - --- Test downgrade to 10.2-4 from 10.2-5 -ALTER EXTENSION citus UPDATE TO '10.2-5'; -ALTER EXTENSION citus UPDATE TO '10.2-4'; --- Should be empty result since upgrade+downgrade should be a no-op -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- -(0 rows) - --- Snapshot of state at 10.2-5 -ALTER EXTENSION citus UPDATE TO '10.2-5'; -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- -(0 rows) - --- Make sure that we defined dependencies from all rel objects (tables, --- indexes, sequences ..) to columnar table access method ... -SELECT pg_class.oid INTO columnar_schema_members -FROM pg_class, pg_namespace -WHERE pg_namespace.oid=pg_class.relnamespace AND - pg_namespace.nspname='columnar'; -SELECT refobjid INTO columnar_schema_members_pg_depend -FROM pg_depend -WHERE classid = 'pg_am'::regclass::oid AND - objid = (select oid from pg_am where amname = 'columnar') AND - objsubid = 0 AND - refclassid = 'pg_class'::regclass::oid AND - refobjsubid = 0 AND - deptype = 'n'; --- ... , so this should be empty, -(TABLE columnar_schema_members EXCEPT TABLE columnar_schema_members_pg_depend) -UNION -(TABLE columnar_schema_members_pg_depend EXCEPT TABLE columnar_schema_members); - oid ---------------------------------------------------------------------- -(0 rows) - --- ... , and both columnar_schema_members_pg_depend & columnar_schema_members --- should have 10 entries. -SELECT COUNT(*)=10 FROM columnar_schema_members_pg_depend; - ?column? ---------------------------------------------------------------------- - t -(1 row) - -DROP TABLE columnar_schema_members, columnar_schema_members_pg_depend; --- Use a synthetic pg_dist_shard record to show that upgrade fails --- when there are cstore_fdw tables -INSERT INTO pg_dist_shard (logicalrelid, shardid, shardstorage) VALUES ('pg_dist_shard', 1, 'c'); -ALTER EXTENSION citus UPDATE TO '11.0-1'; -ERROR: cstore_fdw tables are deprecated as of Citus 11.0 -HINT: Install Citus 10.2 and convert your cstore_fdw tables to the columnar access method before upgrading further -CONTEXT: PL/pgSQL function inline_code_block line XX at RAISE -DELETE FROM pg_dist_shard WHERE shardid = 1; --- partitioned table count is tracked on Citus 11 upgrade -CREATE TABLE e_transactions(order_id varchar(255) NULL, transaction_id int) PARTITION BY LIST(transaction_id); -CREATE TABLE orders_2020_07_01 -PARTITION OF e_transactions FOR VALUES IN (1,2,3); -INSERT INTO pg_dist_partition VALUES ('e_transactions'::regclass,'h', '{VAR :varno 1 :varattno 1 :vartype 1043 :vartypmod 259 :varcollid 100 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1}', 7, 's'); -SELECT - (metadata->>'partitioned_citus_table_exists_pre_11')::boolean as partitioned_citus_table_exists_pre_11, - (metadata->>'partitioned_citus_table_exists_pre_11') IS NULL as is_null -FROM - pg_dist_node_metadata; - partitioned_citus_table_exists_pre_11 | is_null ---------------------------------------------------------------------- - | t -(1 row) - --- Test downgrade to 10.2-5 from 11.0-1 -ALTER EXTENSION citus UPDATE TO '11.0-1'; -SELECT - (metadata->>'partitioned_citus_table_exists_pre_11')::boolean as partitioned_citus_table_exists_pre_11, - (metadata->>'partitioned_citus_table_exists_pre_11') IS NULL as is_null -FROM - pg_dist_node_metadata; - partitioned_citus_table_exists_pre_11 | is_null ---------------------------------------------------------------------- - t | f -(1 row) - -DELETE FROM pg_dist_partition WHERE logicalrelid = 'e_transactions'::regclass; -DROP TABLE e_transactions; -ALTER EXTENSION citus UPDATE TO '10.2-5'; --- Should be empty result since upgrade+downgrade should be a no-op -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- -(0 rows) - --- Snapshot of state at 11.0-1 -ALTER EXTENSION citus UPDATE TO '11.0-1'; -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- - function citus_disable_node(text,integer) void | - function citus_dist_stat_activity() SETOF record | - function citus_worker_stat_activity() SETOF record | - function create_distributed_function(regprocedure,text,text) void | - function master_append_table_to_shard(bigint,text,text,integer) real | - function master_apply_delete_command(text) integer | - function master_get_table_metadata(text) record | - function worker_partition_query_result(text,text,integer,citus.distribution_type,text[],text[],boolean) SETOF record | - table citus.pg_dist_object | - type citus.pg_dist_object | - type citus.pg_dist_object[] | - type citus_worker_stat_activity | - type citus_worker_stat_activity[] | - view citus_worker_stat_activity | - | function citus_backend_gpid() bigint - | function citus_calculate_gpid(integer,integer) bigint - | function citus_check_cluster_node_health() SETOF record - | function citus_check_connection_to_node(text,integer) boolean - | function citus_coordinator_nodeid() integer - | function citus_disable_node(text,integer,boolean) void - | function citus_finalize_upgrade_to_citus11(boolean) boolean - | function citus_internal_add_colocation_metadata(integer,integer,integer,regtype,oid) void - | function citus_internal_add_object_metadata(text,text[],text[],integer,integer,boolean) void - | function citus_internal_delete_colocation_metadata(integer) void - | function citus_internal_global_blocked_processes() SETOF record - | function citus_internal_local_blocked_processes() SETOF record - | function citus_nodeid_for_gpid(bigint) integer - | function citus_nodename_for_nodeid(integer) text - | function citus_nodeport_for_nodeid(integer) integer - | function citus_pid_for_gpid(bigint) integer - | function citus_run_local_command(text) void - | function citus_shard_indexes_on_worker() SETOF record - | function citus_shards_on_worker() SETOF record - | function citus_stat_activity() SETOF record - | function create_distributed_function(regprocedure,text,text,boolean) void - | function get_nodeid_for_groupid(integer) integer - | function pg_cancel_backend(bigint) boolean - | function pg_terminate_backend(bigint,bigint) boolean - | function run_command_on_all_nodes(text,boolean,boolean) SETOF record - | function worker_create_or_replace_object(text[]) boolean - | function worker_drop_sequence_dependency(text) void - | function worker_drop_shell_table(text) void - | function worker_partition_query_result(text,text,integer,citus.distribution_type,text[],text[],boolean,boolean,boolean) SETOF record - | table pg_dist_object - | type citus_stat_activity - | type citus_stat_activity[] - | type pg_dist_object - | type pg_dist_object[] - | view citus_stat_activity -(49 rows) - --- Test downgrade to 11.0-1 from 11.0-2 -ALTER EXTENSION citus UPDATE TO '11.0-2'; -ALTER EXTENSION citus UPDATE TO '11.0-1'; --- Should be empty result since upgrade+downgrade should be a no-op -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- -(0 rows) - --- Snapshot of state at 11.0-2 -ALTER EXTENSION citus UPDATE TO '11.0-2'; -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- - | function citus_finish_citus_upgrade() - | function citus_is_coordinator() boolean - | function run_command_on_coordinator(text,boolean) SETOF record - | function start_metadata_sync_to_all_nodes() boolean -(4 rows) - --- Test downgrade to 11.0-2 from 11.0-3 -ALTER EXTENSION citus UPDATE TO '11.0-3'; -ALTER EXTENSION citus UPDATE TO '11.0-2'; --- Should be empty result since upgrade+downgrade should be a no-op -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- -(0 rows) - --- Snapshot of state at 11.0-3 -ALTER EXTENSION citus UPDATE TO '11.0-3'; -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- -(0 rows) - --- Test downgrade to 11.0-3 from 11.0-4 -ALTER EXTENSION citus UPDATE TO '11.0-4'; -ALTER EXTENSION citus UPDATE TO '11.0-3'; --- Should be empty result since upgrade+downgrade should be a no-op -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- -(0 rows) - --- Snapshot of state at 11.0-4 -ALTER EXTENSION citus UPDATE TO '11.0-4'; -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- -(0 rows) - --- Test downgrade to 11.0-4 from 11.1-1 -ALTER EXTENSION citus UPDATE TO '11.1-1'; -ALTER EXTENSION citus UPDATE TO '11.0-4'; --- Should be empty result since upgrade+downgrade should be a no-op -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- -(0 rows) - --- Test CREATE EXTENSION when Citus already exists but Citus_Columnar does not. Should skip -CREATE EXTENSION IF NOT EXISTS citus; -NOTICE: extension "citus" already exists, skipping -CREATE EXTENSION citus; -ERROR: extension "citus" already exists --- Snapshot of state at 11.1-1 -ALTER EXTENSION citus UPDATE TO '11.1-1'; -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- - access method columnar | - function alter_columnar_table_reset(regclass,boolean,boolean,boolean,boolean) void | - function alter_columnar_table_set(regclass,integer,integer,name,integer) void | - function citus_copy_shard_placement(bigint,text,integer,text,integer,boolean,citus.shard_transfer_mode) void | - function citus_internal.columnar_ensure_am_depends_catalog() void | - function citus_internal.downgrade_columnar_storage(regclass) void | - function citus_internal.upgrade_columnar_storage(regclass) void | - function columnar.columnar_handler(internal) table_am_handler | - function get_rebalance_progress() TABLE(sessionid integer, table_name regclass, shardid bigint, shard_size bigint, sourcename text, sourceport integer, targetname text, targetport integer, progress bigint, source_shard_size bigint, target_shard_size bigint) | - function isolate_tenant_to_new_shard(regclass,"any",text) bigint | - function replicate_reference_tables() void | - function worker_cleanup_job_schema_cache() void | - function worker_create_schema(bigint,text) void | - function worker_fetch_foreign_file(text,text,bigint,text[],integer[]) void | - function worker_fetch_partition_file(bigint,integer,integer,integer,text,integer) void | - function worker_hash_partition_table(bigint,integer,text,text,oid,anyarray) void | - function worker_merge_files_into_table(bigint,integer,text[],text[]) void | - function worker_range_partition_table(bigint,integer,text,text,oid,anyarray) void | - function worker_repartition_cleanup(bigint) void | - schema columnar | - sequence columnar.storageid_seq | - table columnar.chunk | - table columnar.chunk_group | - table columnar.options | - table columnar.stripe | - type columnar.chunk | - type columnar.chunk[] | - type columnar.chunk_group | - type columnar.chunk_group[] | - type columnar.options | - type columnar.options[] | - type columnar.stripe | - type columnar.stripe[] | - | function citus_cleanup_orphaned_resources() - | function citus_copy_shard_placement(bigint,text,integer,text,integer,citus.shard_transfer_mode) void - | function citus_internal_delete_partition_metadata(regclass) void - | function citus_job_cancel(bigint) void - | function citus_job_wait(bigint,citus_job_status) void - | function citus_locks() SETOF record - | function citus_rebalance_start(name,boolean,citus.shard_transfer_mode) bigint - | function citus_rebalance_stop() void - | function citus_rebalance_wait() void - | function citus_split_shard_by_split_points(bigint,text[],integer[],citus.shard_transfer_mode) void - | function create_distributed_table_concurrently(regclass,text,citus.distribution_type,text,integer) void - | function get_rebalance_progress() TABLE(sessionid integer, table_name regclass, shardid bigint, shard_size bigint, sourcename text, sourceport integer, targetname text, targetport integer, progress bigint, source_shard_size bigint, target_shard_size bigint, operation_type text) - | function isolate_tenant_to_new_shard(regclass,"any",text,citus.shard_transfer_mode) bigint - | function replicate_reference_tables(citus.shard_transfer_mode) void - | function worker_copy_table_to_node(regclass,integer) void - | function worker_split_copy(bigint,text,split_copy_info[]) void - | function worker_split_shard_release_dsm() void - | function worker_split_shard_replication_setup(split_shard_info[]) SETOF replication_slot_info - | sequence pg_dist_background_job_job_id_seq - | sequence pg_dist_background_task_task_id_seq - | sequence pg_dist_cleanup_recordid_seq - | sequence pg_dist_operationid_seq - | table pg_dist_background_job - | table pg_dist_background_task - | table pg_dist_background_task_depend - | table pg_dist_cleanup - | type citus_job_status - | type citus_job_status[] - | type citus_locks - | type citus_locks[] - | type citus_task_status - | type citus_task_status[] - | type pg_dist_background_job - | type pg_dist_background_job[] - | type pg_dist_background_task - | type pg_dist_background_task[] - | type pg_dist_background_task_depend - | type pg_dist_background_task_depend[] - | type pg_dist_cleanup - | type pg_dist_cleanup[] - | type replication_slot_info - | type replication_slot_info[] - | type split_copy_info - | type split_copy_info[] - | type split_shard_info - | type split_shard_info[] - | view citus_locks -(80 rows) - --- Test downgrade to 11.1-1 from 11.2-1 -ALTER EXTENSION citus UPDATE TO '11.2-1'; -ALTER EXTENSION citus UPDATE TO '11.1-1'; --- Should be empty result since upgrade+downgrade should be a no-op -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- -(0 rows) - --- Snapshot of state at 11.2-1 -ALTER EXTENSION citus UPDATE TO '11.2-1'; -ALTER EXTENSION citus_columnar UPDATE TO '11.2-1'; --- Make sure that we defined dependencies from all rel objects (tables, --- indexes, sequences ..) to columnar table access method ... -SELECT pg_class.oid INTO columnar_schema_members -FROM pg_class, pg_namespace -WHERE pg_namespace.oid=pg_class.relnamespace AND - pg_namespace.nspname='columnar_internal' AND - pg_class.relname NOT IN ('chunk_group_pkey', - 'chunk_pkey', - 'options_pkey', - 'stripe_first_row_number_idx', - 'stripe_pkey'); -SELECT refobjid INTO columnar_schema_members_pg_depend -FROM pg_depend -WHERE classid = 'pg_am'::regclass::oid AND - objid = (select oid from pg_am where amname = 'columnar') AND - objsubid = 0 AND - refclassid = 'pg_class'::regclass::oid AND - refobjsubid = 0 AND - deptype = 'n'; --- ... , so this should be empty, -(TABLE columnar_schema_members EXCEPT TABLE columnar_schema_members_pg_depend) -UNION -(TABLE columnar_schema_members_pg_depend EXCEPT TABLE columnar_schema_members); - oid ---------------------------------------------------------------------- -(0 rows) - --- ... , and both columnar_schema_members_pg_depend & columnar_schema_members --- should have 5 entries. -SELECT COUNT(*)=5 FROM columnar_schema_members_pg_depend; - ?column? ---------------------------------------------------------------------- - t -(1 row) - -DROP TABLE columnar_schema_members, columnar_schema_members_pg_depend; -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- - function get_rebalance_progress() TABLE(sessionid integer, table_name regclass, shardid bigint, shard_size bigint, sourcename text, sourceport integer, targetname text, targetport integer, progress bigint, source_shard_size bigint, target_shard_size bigint, operation_type text) | - function worker_append_table_to_shard(text,text,text,integer) void | - function worker_split_shard_replication_setup(split_shard_info[]) SETOF replication_slot_info | - | function citus_copy_shard_placement(bigint,integer,integer,citus.shard_transfer_mode) void - | function citus_get_node_clock() cluster_clock - | function citus_get_transaction_clock() cluster_clock - | function citus_internal_add_placement_metadata(bigint,bigint,integer,bigint) void - | function citus_internal_adjust_local_clock_to_remote(cluster_clock) void - | function citus_is_clock_after(cluster_clock,cluster_clock) boolean - | function citus_job_list() TABLE(job_id bigint, state citus_job_status, job_type name, description text, started_at timestamp with time zone, finished_at timestamp with time zone) - | function citus_job_status(bigint,boolean) TABLE(job_id bigint, state citus_job_status, job_type name, description text, started_at timestamp with time zone, finished_at timestamp with time zone, details jsonb) - | function citus_move_shard_placement(bigint,integer,integer,citus.shard_transfer_mode) void - | function citus_rebalance_status(boolean) TABLE(job_id bigint, state citus_job_status, job_type name, description text, started_at timestamp with time zone, finished_at timestamp with time zone, details jsonb) - | function citus_task_wait(bigint,citus_task_status) void - | function cluster_clock_cmp(cluster_clock,cluster_clock) integer - | function cluster_clock_eq(cluster_clock,cluster_clock) boolean - | function cluster_clock_ge(cluster_clock,cluster_clock) boolean - | function cluster_clock_gt(cluster_clock,cluster_clock) boolean - | function cluster_clock_in(cstring) cluster_clock - | function cluster_clock_le(cluster_clock,cluster_clock) boolean - | function cluster_clock_logical(cluster_clock) bigint - | function cluster_clock_lt(cluster_clock,cluster_clock) boolean - | function cluster_clock_ne(cluster_clock,cluster_clock) boolean - | function cluster_clock_out(cluster_clock) cstring - | function cluster_clock_recv(internal) cluster_clock - | function cluster_clock_send(cluster_clock) bytea - | function get_rebalance_progress() TABLE(sessionid integer, table_name regclass, shardid bigint, shard_size bigint, sourcename text, sourceport integer, targetname text, targetport integer, progress bigint, source_shard_size bigint, target_shard_size bigint, operation_type text, source_lsn pg_lsn, target_lsn pg_lsn, status text) - | function worker_split_shard_replication_setup(split_shard_info[],bigint) SETOF replication_slot_info - | operator <(cluster_clock,cluster_clock) - | operator <=(cluster_clock,cluster_clock) - | operator <>(cluster_clock,cluster_clock) - | operator =(cluster_clock,cluster_clock) - | operator >(cluster_clock,cluster_clock) - | operator >=(cluster_clock,cluster_clock) - | operator class cluster_clock_ops for access method btree - | operator family cluster_clock_ops for access method btree - | sequence pg_dist_clock_logical_seq - | type cluster_clock - | type cluster_clock[] -(39 rows) - --- Test downgrade to 11.2-1 from 11.2-2 -ALTER EXTENSION citus UPDATE TO '11.2-2'; -ALTER EXTENSION citus UPDATE TO '11.2-1'; --- Should be empty result since upgrade+downgrade should be a no-op -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- -(0 rows) - --- Snapshot of state at 11.2-2 -ALTER EXTENSION citus UPDATE TO '11.2-2'; -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- - | function worker_adjust_identity_column_seq_ranges(regclass) void -(1 row) - --- Test downgrade to 11.2-2 from 11.3-1 -ALTER EXTENSION citus UPDATE TO '11.3-1'; -ALTER EXTENSION citus UPDATE TO '11.2-2'; --- Should be empty result since upgrade+downgrade should be a no-op -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- -(0 rows) - --- Snapshot of state at 11.3-1 -ALTER EXTENSION citus UPDATE TO '11.3-1'; -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- - | function citus_internal_is_replication_origin_tracking_active() boolean - | function citus_internal_mark_node_not_synced(integer,integer) void - | function citus_internal_start_replication_origin_tracking() void - | function citus_internal_stop_replication_origin_tracking() void - | function citus_stat_tenants(boolean) SETOF record - | function citus_stat_tenants_local(boolean) SETOF record - | function citus_stat_tenants_local_reset() void - | function citus_stat_tenants_reset() void - | function worker_drop_all_shell_tables(boolean) - | type citus_stat_tenants - | type citus_stat_tenants[] - | type citus_stat_tenants_local - | type citus_stat_tenants_local[] - | view citus_stat_tenants - | view citus_stat_tenants_local -(15 rows) - --- Test downgrade to 11.3-1 from 11.3-2 -ALTER EXTENSION citus UPDATE TO '11.3-2'; -ALTER EXTENSION citus UPDATE TO '11.3-1'; --- Should be empty result since upgrade+downgrade should be a no-op -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- -(0 rows) - --- Snapshot of state at 11.3-2 -ALTER EXTENSION citus UPDATE TO '11.3-2'; -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- -(0 rows) - --- Test downgrade to 11.3-2 from 12.0-1 -ALTER EXTENSION citus UPDATE TO '12.0-1'; -CREATE TABLE null_shard_key (x int, y int); -SET citus.shard_replication_factor TO 1; -SELECT create_distributed_table('null_shard_key', null); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - --- Show that we cannot downgrade to 11.3-2 becuase the cluster has a --- distributed table with single-shard. -ALTER EXTENSION citus UPDATE TO '11.3-2'; -ERROR: cannot downgrade Citus because there are distributed tables without a shard key. -DETAIL: To downgrade Citus to an older version, you should first convert those tables to Postgres tables by executing SELECT undistribute_table("%s"). -HINT: You can find the distributed tables without a shard key in the cluster by using the following query: "SELECT * FROM citus_tables WHERE distribution_column = '' AND colocation_id > 0". -CONTEXT: PL/pgSQL function inline_code_block line XX at RAISE -DROP TABLE null_shard_key; -ALTER EXTENSION citus UPDATE TO '11.3-2'; --- Should be empty result since upgrade+downgrade should be a no-op -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- -(0 rows) - --- Snapshot of state at 12.0-1 -ALTER EXTENSION citus UPDATE TO '12.0-1'; -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- - | function citus_internal_add_tenant_schema(oid,integer) void - | function citus_internal_delete_tenant_schema(oid) void - | function citus_internal_unregister_tenant_schema_globally(oid,text) void - | function citus_schema_distribute(regnamespace) void - | function citus_schema_undistribute(regnamespace) void - | function citus_stat_tenants_local_internal(boolean) SETOF record - | table pg_dist_schema - | type pg_dist_schema - | type pg_dist_schema[] - | type public.citus_schemas - | type public.citus_schemas[] - | view public.citus_schemas -(12 rows) - --- Test downgrade to 12.0-1 from 12.1-1 -ALTER EXTENSION citus UPDATE TO '12.1-1'; -ALTER EXTENSION citus UPDATE TO '12.0-1'; --- Should be empty result since upgrade+downgrade should be a no-op -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- -(0 rows) - --- Snapshot of state at 12.1-1 -ALTER EXTENSION citus UPDATE TO '12.1-1'; -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- - | function citus_internal_delete_placement_metadata(bigint) void - | function citus_internal_update_none_dist_table_metadata(oid,"char",bigint,boolean) void - | function citus_pause_node_within_txn(integer,boolean,integer) void - | function citus_schema_move(regnamespace,integer,citus.shard_transfer_mode) void - | function citus_schema_move(regnamespace,text,integer,citus.shard_transfer_mode) void -(5 rows) - --- Test downgrade to 12.1-1 from 12.2-1 -ALTER EXTENSION citus UPDATE TO '12.2-1'; -ALTER EXTENSION citus UPDATE TO '12.1-1'; --- Should be empty result since upgrade+downgrade should be a no-op -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- -(0 rows) - --- Snapshot of state at 12.2-1 -ALTER EXTENSION citus UPDATE TO '12.2-1'; -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- - function citus_unmark_object_distributed(oid,oid,integer) void | - | function citus_internal.acquire_citus_advisory_object_class_lock(integer,cstring) void - | function citus_internal.add_colocation_metadata(integer,integer,integer,regtype,oid) void - | function citus_internal.add_object_metadata(text,text[],text[],integer,integer,boolean) void - | function citus_internal.add_partition_metadata(regclass,"char",text,integer,"char") void - | function citus_internal.add_placement_metadata(bigint,bigint,integer,bigint) void - | function citus_internal.add_shard_metadata(regclass,bigint,"char",text,text) void - | function citus_internal.add_tenant_schema(oid,integer) void - | function citus_internal.adjust_local_clock_to_remote(cluster_clock) void - | function citus_internal.commit_management_command_2pc() void - | function citus_internal.database_command(text) void - | function citus_internal.delete_colocation_metadata(integer) void - | function citus_internal.delete_partition_metadata(regclass) void - | function citus_internal.delete_placement_metadata(bigint) void - | function citus_internal.delete_shard_metadata(bigint) void - | function citus_internal.delete_tenant_schema(oid) void - | function citus_internal.execute_command_on_remote_nodes_as_user(text,text) void - | function citus_internal.global_blocked_processes() SETOF record - | function citus_internal.is_replication_origin_tracking_active() boolean - | function citus_internal.local_blocked_processes() SETOF record - | function citus_internal.mark_node_not_synced(integer,integer) void - | function citus_internal.mark_object_distributed(oid,text,oid,text) void - | function citus_internal.start_management_transaction(xid8) void - | function citus_internal.start_replication_origin_tracking() void - | function citus_internal.stop_replication_origin_tracking() void - | function citus_internal.unregister_tenant_schema_globally(oid,text) void - | function citus_internal.update_none_dist_table_metadata(oid,"char",bigint,boolean) void - | function citus_internal.update_placement_metadata(bigint,integer,integer) void - | function citus_internal.update_relation_colocation(oid,integer) void - | function citus_unmark_object_distributed(oid,oid,integer,boolean) void -(30 rows) - -DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff; --- show running version -SHOW citus.version; - citus.version ---------------------------------------------------------------------- - 12.2devel -(1 row) - --- ensure no unexpected objects were created outside pg_catalog -SELECT pgio.type, pgio.identity -FROM pg_depend AS pgd, - pg_extension AS pge, - LATERAL pg_identify_object(pgd.classid, pgd.objid, pgd.objsubid) AS pgio -WHERE pgd.refclassid = 'pg_extension'::regclass AND - pgd.refobjid = pge.oid AND - pge.extname = 'citus' AND - pgio.schema NOT IN ('pg_catalog', 'citus', 'citus_internal', 'test', 'columnar', 'columnar_internal') -ORDER BY 1, 2; - type | identity ---------------------------------------------------------------------- - type | public.citus_schemas - type | public.citus_schemas[] - type | public.citus_tables - type | public.citus_tables[] - view | public.citus_schemas - view | public.citus_tables -(6 rows) - --- see incompatible version errors out -RESET citus.enable_version_checks; -RESET columnar.enable_version_checks; --- reset version check config for other sessions too -ALTER SYSTEM RESET citus.enable_version_checks; -SELECT pg_reload_conf(); - pg_reload_conf ---------------------------------------------------------------------- - t -(1 row) - -DROP EXTENSION citus; -DROP EXTENSION citus_columnar; -CREATE EXTENSION citus VERSION '8.0-1'; -ERROR: specified version incompatible with loaded Citus library -DETAIL: Loaded library requires 12.2, but 8.0-1 was specified. -HINT: If a newer library is present, restart the database and try the command again. --- Test non-distributed queries work even in version mismatch -SET citus.enable_version_checks TO 'false'; -SET columnar.enable_version_checks TO 'false'; -CREATE EXTENSION citus VERSION '8.1-1'; -SET citus.enable_version_checks TO 'true'; -SET columnar.enable_version_checks TO 'true'; --- Test CREATE TABLE -CREATE TABLE version_mismatch_table(column1 int); --- Test COPY -\copy version_mismatch_table FROM STDIN; --- Test INSERT -INSERT INTO version_mismatch_table(column1) VALUES(5); --- Test SELECT -SELECT * FROM version_mismatch_table ORDER BY column1; - column1 ---------------------------------------------------------------------- - 0 - 1 - 2 - 3 - 4 - 5 -(6 rows) - --- Test SELECT from pg_catalog -SELECT d.datname as "Name", - pg_catalog.pg_get_userbyid(d.datdba) as "Owner", - pg_catalog.array_to_string(d.datacl, E'\n') AS "Access privileges" -FROM pg_catalog.pg_database d -ORDER BY 1; - Name | Owner | Access privileges ---------------------------------------------------------------------- - postgres | postgres | - regression | postgres | - template0 | postgres | =c/postgres + - | | postgres=CTc/postgres - template1 | postgres | =c/postgres + - | | postgres=CTc/postgres -(4 rows) - --- We should not distribute table in version mistmatch -SELECT create_distributed_table('version_mismatch_table', 'column1'); -ERROR: loaded Citus library version differs from installed extension version -DETAIL: Loaded library requires 12.2, but the installed extension version is 8.1-1. -HINT: Run ALTER EXTENSION citus UPDATE and try again. --- This function will cause fail in next ALTER EXTENSION -CREATE OR REPLACE FUNCTION pg_catalog.relation_is_a_known_shard(regclass) -RETURNS void LANGUAGE plpgsql -AS $function$ -BEGIN -END; -$function$; -ERROR: cannot change return type of existing function -HINT: Use DROP FUNCTION relation_is_a_known_shard(regclass) first. -SET citus.enable_version_checks TO 'false'; -SET columnar.enable_version_checks TO 'false'; --- This will fail because of previous function declaration -ALTER EXTENSION citus UPDATE TO '9.1-1'; --- We can DROP problematic function and continue ALTER EXTENSION even when version checks are on -SET citus.enable_version_checks TO 'true'; -SET columnar.enable_version_checks TO 'true'; -DROP FUNCTION pg_catalog.relation_is_a_known_shard(regclass); -ERROR: cannot drop function relation_is_a_known_shard(regclass) because extension citus requires it -HINT: You can drop extension citus instead. -SET citus.enable_version_checks TO 'false'; -SET columnar.enable_version_checks TO 'false'; -ALTER EXTENSION citus UPDATE TO '9.1-1'; -NOTICE: version "9.1-1" of extension "citus" is already installed --- Test updating to the latest version without specifying the version number -ALTER EXTENSION citus UPDATE; --- re-create in newest version -DROP EXTENSION citus; -DROP EXTENSION citus_columnar; -\c -CREATE EXTENSION citus; --- test cache invalidation in workers -\c - - - :worker_1_port -DROP EXTENSION citus; -DROP EXTENSION citus_columnar; -SET citus.enable_version_checks TO 'false'; -SET columnar.enable_version_checks TO 'false'; -CREATE EXTENSION citus VERSION '8.0-1'; -SET citus.enable_version_checks TO 'true'; -SET columnar.enable_version_checks TO 'true'; --- during ALTER EXTENSION, we should invalidate the cache -ALTER EXTENSION citus UPDATE; --- if cache is invalidated succesfull, this \d should work without any problem -\d - List of relations - Schema | Name | Type | Owner ---------------------------------------------------------------------- - public | citus_schemas | view | postgres - public | citus_tables | view | postgres -(2 rows) - -\c - - - :master_port --- test https://github.com/citusdata/citus/issues/3409 -CREATE USER testuser2 SUPERUSER; -SET ROLE testuser2; -DROP EXTENSION Citus; --- Loop until we see there's no maintenance daemon running -DO $$begin - for i in 0 .. 100 loop - if i = 100 then raise 'Waited too long'; end if; - PERFORM pg_stat_clear_snapshot(); - perform * from pg_stat_activity where application_name = 'Citus Maintenance Daemon'; - if not found then exit; end if; - perform pg_sleep(0.1); - end loop; -end$$; -SELECT datid, datname, usename FROM pg_stat_activity WHERE application_name = 'Citus Maintenance Daemon'; - datid | datname | usename ---------------------------------------------------------------------- -(0 rows) - -CREATE EXTENSION Citus; --- Loop until we there's a maintenance daemon running -DO $$begin - for i in 0 .. 100 loop - if i = 100 then raise 'Waited too long'; end if; - PERFORM pg_stat_clear_snapshot(); - perform * from pg_stat_activity where application_name = 'Citus Maintenance Daemon'; - if found then exit; end if; - perform pg_sleep(0.1); - end loop; -end$$; -SELECT datid, datname, usename FROM pg_stat_activity WHERE application_name = 'Citus Maintenance Daemon'; - datid | datname | usename ---------------------------------------------------------------------- - 16384 | regression | testuser2 -(1 row) - -RESET ROLE; --- check that maintenance daemon gets (re-)started for the right user -DROP EXTENSION citus; -CREATE USER testuser SUPERUSER; -SET ROLE testuser; -CREATE EXTENSION citus; -SELECT datname, current_database(), - usename, (SELECT extowner::regrole::text FROM pg_extension WHERE extname = 'citus') -FROM test.maintenance_worker(); - datname | current_database | usename | extowner ---------------------------------------------------------------------- - regression | regression | testuser | testuser -(1 row) - --- and recreate as the right owner -RESET ROLE; -DROP EXTENSION citus; -CREATE EXTENSION citus; --- Check that maintenance daemon can also be started in another database -CREATE DATABASE another; -NOTICE: Citus partially supports CREATE DATABASE for distributed databases -DETAIL: Citus does not propagate CREATE DATABASE command to other nodes -HINT: You can manually create a database and its extensions on other nodes. -\c another -CREATE EXTENSION citus; -CREATE SCHEMA test; -:create_function_test_maintenance_worker --- see that the daemon started -SELECT datname, current_database(), - usename, (SELECT extowner::regrole::text FROM pg_extension WHERE extname = 'citus') -FROM test.maintenance_worker(); - datname | current_database | usename | extowner ---------------------------------------------------------------------- - another | another | postgres | postgres -(1 row) - --- Test that database with active worker can be dropped. -\c regression -CREATE SCHEMA test_daemon; --- we create a similar function on the regression database --- note that this function checks for the existence of the daemon --- when not found, returns true else tries for 5 times and --- returns false -CREATE OR REPLACE FUNCTION test_daemon.maintenance_daemon_died(p_dbname text) - RETURNS boolean - LANGUAGE plpgsql -AS $$ -DECLARE - activity record; -BEGIN - PERFORM pg_stat_clear_snapshot(); - SELECT * INTO activity FROM pg_stat_activity - WHERE application_name = 'Citus Maintenance Daemon' AND datname = p_dbname; - IF activity.pid IS NULL THEN - RETURN true; - ELSE - RETURN false; - END IF; -END; -$$; --- drop the database and see that the daemon is dead -DROP DATABASE another; -SELECT - * -FROM - test_daemon.maintenance_daemon_died('another'); - maintenance_daemon_died ---------------------------------------------------------------------- - t -(1 row) - --- we don't need the schema and the function anymore -DROP SCHEMA test_daemon CASCADE; -NOTICE: drop cascades to function test_daemon.maintenance_daemon_died(text) --- verify citus does not crash while creating a table when run against an older worker --- create_distributed_table piggybacks multiple commands into single one, if one worker --- did not have the required UDF it should fail instead of crash. --- create a test database, configure citus with single node -CREATE DATABASE another; -NOTICE: Citus partially supports CREATE DATABASE for distributed databases -DETAIL: Citus does not propagate CREATE DATABASE command to other nodes -HINT: You can manually create a database and its extensions on other nodes. -\c - - - :worker_1_port -CREATE DATABASE another; -NOTICE: Citus partially supports CREATE DATABASE for distributed databases -DETAIL: Citus does not propagate CREATE DATABASE command to other nodes -HINT: You can manually create a database and its extensions on other nodes. -\c - - - :master_port -\c another -CREATE EXTENSION citus; -\c - - - :worker_1_port -CREATE EXTENSION citus; -\c - - - :master_port -SELECT FROM master_add_node('localhost', :worker_1_port); --- -(1 row) - -\c - - - :worker_1_port -ALTER FUNCTION assign_distributed_transaction_id(initiator_node_identifier integer, transaction_number bigint, transaction_stamp timestamp with time zone) -RENAME TO dummy_assign_function; -\c - - - :master_port -SET citus.shard_replication_factor to 1; --- create_distributed_table command should fail -CREATE TABLE t1(a int, b int); -SET client_min_messages TO ERROR; -DO $$ -BEGIN - BEGIN - SELECT create_distributed_table('t1', 'a'); - EXCEPTION WHEN OTHERS THEN - RAISE 'create distributed table failed'; - END; -END; -$$; -ERROR: create distributed table failed -CONTEXT: PL/pgSQL function inline_code_block line XX at RAISE -\c regression -\c - - - :master_port -DROP DATABASE another; -\c - - - :worker_1_port -DROP DATABASE another; -\c - - - :master_port --- only the regression database should have a maintenance daemon -SELECT count(*) FROM pg_stat_activity WHERE application_name = 'Citus Maintenance Daemon'; - count ---------------------------------------------------------------------- - 1 -(1 row) - --- recreate the extension immediately after the maintenancae daemon errors -SELECT pg_cancel_backend(pid) FROM pg_stat_activity WHERE application_name = 'Citus Maintenance Daemon'; - pg_cancel_backend ---------------------------------------------------------------------- - t -(1 row) - -DROP EXTENSION citus; -CREATE EXTENSION citus; --- wait for maintenance daemon restart -SELECT datname, current_database(), - usename, (SELECT extowner::regrole::text FROM pg_extension WHERE extname = 'citus') -FROM test.maintenance_worker(); - datname | current_database | usename | extowner ---------------------------------------------------------------------- - regression | regression | postgres | postgres -(1 row) - --- confirm that there is only one maintenance daemon -SELECT count(*) FROM pg_stat_activity WHERE application_name = 'Citus Maintenance Daemon'; - count ---------------------------------------------------------------------- - 1 -(1 row) - --- kill the maintenance daemon -SELECT pg_cancel_backend(pid) FROM pg_stat_activity WHERE application_name = 'Citus Maintenance Daemon'; - pg_cancel_backend ---------------------------------------------------------------------- - t -(1 row) - --- reconnect -\c - - - :master_port --- run something that goes through planner hook and therefore kicks of maintenance daemon -SELECT 1; - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - --- wait for maintenance daemon restart -SELECT datname, current_database(), - usename, (SELECT extowner::regrole::text FROM pg_extension WHERE extname = 'citus') -FROM test.maintenance_worker(); - datname | current_database | usename | extowner ---------------------------------------------------------------------- - regression | regression | postgres | postgres -(1 row) - --- confirm that there is only one maintenance daemon -SELECT count(*) FROM pg_stat_activity WHERE application_name = 'Citus Maintenance Daemon'; - count ---------------------------------------------------------------------- - 1 -(1 row) - --- confirm that we can create a distributed table concurrently on an empty node -DROP EXTENSION citus; -CREATE EXTENSION citus; -CREATE TABLE test (x int, y int); -INSERT INTO test VALUES (1,2); -SET citus.shard_replication_factor TO 1; -SELECT create_distributed_table_concurrently('test','x'); -NOTICE: relation test does not have a REPLICA IDENTITY or PRIMARY KEY -DETAIL: UPDATE and DELETE commands on the relation will error out during create_distributed_table_concurrently unless there is a REPLICA IDENTITY or PRIMARY KEY. INSERT commands will still work. - create_distributed_table_concurrently ---------------------------------------------------------------------- - -(1 row) - -DROP TABLE test; -TRUNCATE pg_dist_node; --- confirm that we can create a distributed table on an empty node -CREATE TABLE test (x int, y int); -INSERT INTO test VALUES (1,2); -SET citus.shard_replication_factor TO 1; -SELECT create_distributed_table('test','x'); -NOTICE: Copying data from local table... -NOTICE: copying the data has completed -DETAIL: The local data in the table is no longer visible, but is still on disk. -HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.test$$) - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -DROP TABLE test; -TRUNCATE pg_dist_node; --- confirm that we can create a single-shard table on an empty node -CREATE TABLE test (x int, y int); -INSERT INTO test VALUES (1,2); -SET citus.shard_replication_factor TO 1; -SELECT create_distributed_table('test', null, colocate_with=>'none', distribution_type=>null); -NOTICE: Copying data from local table... -NOTICE: copying the data has completed -DETAIL: The local data in the table is no longer visible, but is still on disk. -HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.test$$) - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - --- and make sure that we can't remove the coordinator due to "test" -SELECT citus_remove_node('localhost', :master_port); -ERROR: cannot remove or disable the node localhost:xxxxx because because it contains the only shard placement for shard xxxxx -DETAIL: One of the table(s) that prevents the operation complete successfully is public.test -HINT: To proceed, either drop the tables or use undistribute_table() function to convert them to local tables -DROP TABLE test; --- and now we should be able to remove the coordinator -SELECT citus_remove_node('localhost', :master_port); - citus_remove_node ---------------------------------------------------------------------- - -(1 row) - --- confirm that we can create a tenant schema / table on an empty node -SET citus.enable_schema_based_sharding TO ON; -CREATE SCHEMA tenant_schema; -CREATE TABLE tenant_schema.test(x int, y int); -SELECT colocationid = ( - SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'tenant_schema.test'::regclass -) -FROM pg_dist_schema -WHERE schemaid::regnamespace::text = 'tenant_schema'; - ?column? ---------------------------------------------------------------------- - t -(1 row) - --- and make sure that we can't remove the coordinator due to "test" -SELECT citus_remove_node('localhost', :master_port); -ERROR: cannot remove or disable the node localhost:xxxxx because because it contains the only shard placement for shard xxxxx -DETAIL: One of the table(s) that prevents the operation complete successfully is tenant_schema.test -HINT: To proceed, either drop the tables or use undistribute_table() function to convert them to local tables -BEGIN; - SET LOCAL client_min_messages TO WARNING; - DROP SCHEMA tenant_schema CASCADE; -COMMIT; --- and now we should be able to remove the coordinator -SELECT citus_remove_node('localhost', :master_port); - citus_remove_node ---------------------------------------------------------------------- - -(1 row) - -CREATE SCHEMA tenant_schema; --- Make sure that we can sync metadata for empty tenant schemas --- when adding the first node to the cluster. -SELECT 1 FROM citus_add_node('localhost', :worker_1_port); - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - -DROP SCHEMA tenant_schema; -SELECT citus_remove_node('localhost', :worker_1_port); - citus_remove_node ---------------------------------------------------------------------- - -(1 row) - -RESET citus.enable_schema_based_sharding; --- confirm that we can create a reference table on an empty node -CREATE TABLE test (x int, y int); -INSERT INTO test VALUES (1,2); -SELECT create_reference_table('test'); -NOTICE: Copying data from local table... -NOTICE: copying the data has completed -DETAIL: The local data in the table is no longer visible, but is still on disk. -HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.test$$) - create_reference_table ---------------------------------------------------------------------- - -(1 row) - -DROP TABLE test; -TRUNCATE pg_dist_node; --- confirm that we can create a local table on an empty node -CREATE TABLE test (x int, y int); -INSERT INTO test VALUES (1,2); -SELECT citus_add_local_table_to_metadata('test'); - citus_add_local_table_to_metadata ---------------------------------------------------------------------- - -(1 row) - -DROP TABLE test; --- Verify that we don't consider the schemas created by extensions as tenant schemas. --- Easiest way of verifying this is to drop and re-create columnar extension. -DROP EXTENSION citus_columnar; -SET citus.enable_schema_based_sharding TO ON; -CREATE EXTENSION citus_columnar; -SELECT COUNT(*)=0 FROM pg_dist_schema -WHERE schemaid IN ('columnar'::regnamespace, 'columnar_internal'::regnamespace); - ?column? ---------------------------------------------------------------------- - t -(1 row) - -RESET citus.enable_schema_based_sharding; -DROP EXTENSION citus; -CREATE EXTENSION citus; -DROP TABLE version_mismatch_table; -DROP SCHEMA multi_extension; -ERROR: cannot drop schema multi_extension because other objects depend on it -DETAIL: function multi_extension.print_extension_changes() depends on schema multi_extension -HINT: Use DROP ... CASCADE to drop the dependent objects too. diff --git a/src/test/regress/expected/multi_mx_router_planner.out b/src/test/regress/expected/multi_mx_router_planner.out index a5b863b096e..5ac6093cb99 100644 --- a/src/test/regress/expected/multi_mx_router_planner.out +++ b/src/test/regress/expected/multi_mx_router_planner.out @@ -1,6 +1,3 @@ --- Two alternative test outputs: --- multi_mx_router_planner.out for PG16 and before --- multi_mx_router_planner_0.out for PG17 -- =================================================================== -- test router planner functionality for single shard select queries -- =================================================================== diff --git a/src/test/regress/expected/multi_mx_router_planner_0.out b/src/test/regress/expected/multi_mx_router_planner_0.out deleted file mode 100644 index d66834e01cd..00000000000 --- a/src/test/regress/expected/multi_mx_router_planner_0.out +++ /dev/null @@ -1,1514 +0,0 @@ --- Two alternative test outputs: --- multi_mx_router_planner.out for PG16 and before --- multi_mx_router_planner_0.out for PG17 --- =================================================================== --- test router planner functionality for single shard select queries --- =================================================================== --- run all the router queries from the one of the workers --- and CTE inlining is not relevant to router plannery anyway -\c - - - :worker_1_port --- this table is used in a CTE test -CREATE TABLE authors_hash_mx ( name text, id bigint ); --- create a bunch of test data -INSERT INTO articles_hash_mx VALUES ( 1, 1, 'arsenous', 9572); -INSERT INTO articles_hash_mx VALUES ( 2, 2, 'abducing', 13642); -INSERT INTO articles_hash_mx VALUES ( 3, 3, 'asternal', 10480); -INSERT INTO articles_hash_mx VALUES ( 4, 4, 'altdorfer', 14551); -INSERT INTO articles_hash_mx VALUES ( 5, 5, 'aruru', 11389); -INSERT INTO articles_hash_mx VALUES ( 6, 6, 'atlases', 15459); -INSERT INTO articles_hash_mx VALUES ( 7, 7, 'aseptic', 12298); -INSERT INTO articles_hash_mx VALUES ( 8, 8, 'agatized', 16368); -INSERT INTO articles_hash_mx VALUES ( 9, 9, 'alligate', 438); -INSERT INTO articles_hash_mx VALUES (10, 10, 'aggrandize', 17277); -INSERT INTO articles_hash_mx VALUES (11, 1, 'alamo', 1347); -INSERT INTO articles_hash_mx VALUES (12, 2, 'archiblast', 18185); -INSERT INTO articles_hash_mx VALUES (13, 3, 'aseyev', 2255); -INSERT INTO articles_hash_mx VALUES (14, 4, 'andesite', 19094); -INSERT INTO articles_hash_mx VALUES (15, 5, 'adversa', 3164); -INSERT INTO articles_hash_mx VALUES (16, 6, 'allonym', 2); -INSERT INTO articles_hash_mx VALUES (17, 7, 'auriga', 4073); -INSERT INTO articles_hash_mx VALUES (18, 8, 'assembly', 911); -INSERT INTO articles_hash_mx VALUES (19, 9, 'aubergiste', 4981); -INSERT INTO articles_hash_mx VALUES (20, 10, 'absentness', 1820); -INSERT INTO articles_hash_mx VALUES (21, 1, 'arcading', 5890); -INSERT INTO articles_hash_mx VALUES (22, 2, 'antipope', 2728); -INSERT INTO articles_hash_mx VALUES (23, 3, 'abhorring', 6799); -INSERT INTO articles_hash_mx VALUES (24, 4, 'audacious', 3637); -INSERT INTO articles_hash_mx VALUES (25, 5, 'antehall', 7707); -INSERT INTO articles_hash_mx VALUES (26, 6, 'abington', 4545); -INSERT INTO articles_hash_mx VALUES (27, 7, 'arsenous', 8616); -INSERT INTO articles_hash_mx VALUES (28, 8, 'aerophyte', 5454); -INSERT INTO articles_hash_mx VALUES (29, 9, 'amateur', 9524); -INSERT INTO articles_hash_mx VALUES (30, 10, 'andelee', 6363); -INSERT INTO articles_hash_mx VALUES (31, 1, 'athwartships', 7271); -INSERT INTO articles_hash_mx VALUES (32, 2, 'amazon', 11342); -INSERT INTO articles_hash_mx VALUES (33, 3, 'autochrome', 8180); -INSERT INTO articles_hash_mx VALUES (34, 4, 'amnestied', 12250); -INSERT INTO articles_hash_mx VALUES (35, 5, 'aminate', 9089); -INSERT INTO articles_hash_mx VALUES (36, 6, 'ablation', 13159); -INSERT INTO articles_hash_mx VALUES (37, 7, 'archduchies', 9997); -INSERT INTO articles_hash_mx VALUES (38, 8, 'anatine', 14067); -INSERT INTO articles_hash_mx VALUES (39, 9, 'anchises', 10906); -INSERT INTO articles_hash_mx VALUES (40, 10, 'attemper', 14976); -INSERT INTO articles_hash_mx VALUES (41, 1, 'aznavour', 11814); -INSERT INTO articles_hash_mx VALUES (42, 2, 'ausable', 15885); -INSERT INTO articles_hash_mx VALUES (43, 3, 'affixal', 12723); -INSERT INTO articles_hash_mx VALUES (44, 4, 'anteport', 16793); -INSERT INTO articles_hash_mx VALUES (45, 5, 'afrasia', 864); -INSERT INTO articles_hash_mx VALUES (46, 6, 'atlanta', 17702); -INSERT INTO articles_hash_mx VALUES (47, 7, 'abeyance', 1772); -INSERT INTO articles_hash_mx VALUES (48, 8, 'alkylic', 18610); -INSERT INTO articles_hash_mx VALUES (49, 9, 'anyone', 2681); -INSERT INTO articles_hash_mx VALUES (50, 10, 'anjanette', 19519); -SET client_min_messages TO 'DEBUG2'; --- insert a single row for the test -INSERT INTO articles_single_shard_hash_mx VALUES (50, 10, 'anjanette', 19519); -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 10 --- single-shard tests --- many of the tests in this file is intended for testing non-fast-path --- router planner, so we're explicitly disabling it in this file. --- We've bunch of other tests that triggers fast-path-router -SET citus.enable_fast_path_router_planner TO false; --- test simple select for a single row -SELECT * FROM articles_hash_mx WHERE author_id = 10 AND id = 50; -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 10 - id | author_id | title | word_count ---------------------------------------------------------------------- - 50 | 10 | anjanette | 19519 -(1 row) - --- get all titles by a single author -SELECT title FROM articles_hash_mx WHERE author_id = 10; -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 10 - title ---------------------------------------------------------------------- - aggrandize - absentness - andelee - attemper - anjanette -(5 rows) - --- try ordering them by word count -SELECT title, word_count FROM articles_hash_mx - WHERE author_id = 10 - ORDER BY word_count DESC NULLS LAST; -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 10 - title | word_count ---------------------------------------------------------------------- - anjanette | 19519 - aggrandize | 17277 - attemper | 14976 - andelee | 6363 - absentness | 1820 -(5 rows) - --- look at last two articles by an author -SELECT title, id FROM articles_hash_mx - WHERE author_id = 5 - ORDER BY id - LIMIT 2; -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 5 - title | id ---------------------------------------------------------------------- - aruru | 5 - adversa | 15 -(2 rows) - --- find all articles by two authors in same shard --- but plan is not router executable due to order by -SELECT title, author_id FROM articles_hash_mx - WHERE author_id = 7 OR author_id = 8 - ORDER BY author_id ASC, id; -DEBUG: Creating router plan - title | author_id ---------------------------------------------------------------------- - aseptic | 7 - auriga | 7 - arsenous | 7 - archduchies | 7 - abeyance | 7 - agatized | 8 - assembly | 8 - aerophyte | 8 - anatine | 8 - alkylic | 8 -(10 rows) - --- same query is router executable with no order by -SELECT title, author_id FROM articles_hash_mx - WHERE author_id = 7 OR author_id = 8; -DEBUG: Creating router plan - title | author_id ---------------------------------------------------------------------- - aseptic | 7 - agatized | 8 - auriga | 7 - assembly | 8 - arsenous | 7 - aerophyte | 8 - archduchies | 7 - anatine | 8 - abeyance | 7 - alkylic | 8 -(10 rows) - --- add in some grouping expressions, still on same shard --- having queries unsupported in Citus -SELECT author_id, sum(word_count) AS corpus_size FROM articles_hash_mx - WHERE author_id = 1 OR author_id = 7 OR author_id = 8 OR author_id = 10 - GROUP BY author_id - HAVING sum(word_count) > 1000 - ORDER BY sum(word_count) DESC; -DEBUG: Creating router plan - author_id | corpus_size ---------------------------------------------------------------------- - 10 | 59955 - 8 | 55410 - 7 | 36756 - 1 | 35894 -(4 rows) - --- however having clause is supported if it goes to a single shard -SELECT author_id, sum(word_count) AS corpus_size FROM articles_hash_mx - WHERE author_id = 1 - GROUP BY author_id - HAVING sum(word_count) > 1000 - ORDER BY sum(word_count) DESC; -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 1 - author_id | corpus_size ---------------------------------------------------------------------- - 1 | 35894 -(1 row) - --- query is a single shard query but can't do shard pruning, --- not router-plannable due to <= and IN -SELECT * FROM articles_hash_mx WHERE author_id <= 1; -DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count ---------------------------------------------------------------------- - 1 | 1 | arsenous | 9572 - 11 | 1 | alamo | 1347 - 21 | 1 | arcading | 5890 - 31 | 1 | athwartships | 7271 - 41 | 1 | aznavour | 11814 -(5 rows) - -SELECT * FROM articles_hash_mx WHERE author_id IN (1, 3); -DEBUG: Creating router plan - id | author_id | title | word_count ---------------------------------------------------------------------- - 1 | 1 | arsenous | 9572 - 3 | 3 | asternal | 10480 - 11 | 1 | alamo | 1347 - 13 | 3 | aseyev | 2255 - 21 | 1 | arcading | 5890 - 23 | 3 | abhorring | 6799 - 31 | 1 | athwartships | 7271 - 33 | 3 | autochrome | 8180 - 41 | 1 | aznavour | 11814 - 43 | 3 | affixal | 12723 -(10 rows) - --- queries with CTEs are supported -WITH first_author AS ( SELECT id FROM articles_hash_mx WHERE author_id = 1) -SELECT * FROM first_author; -DEBUG: CTE first_author is going to be inlined via distributed planning -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 1 - id ---------------------------------------------------------------------- - 1 - 11 - 21 - 31 - 41 -(5 rows) - --- queries with CTEs are supported even if CTE is not referenced inside query -WITH first_author AS ( SELECT id FROM articles_hash_mx WHERE author_id = 1) -SELECT title FROM articles_hash_mx WHERE author_id = 1; -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 1 - title ---------------------------------------------------------------------- - arsenous - alamo - arcading - athwartships - aznavour -(5 rows) - --- two CTE joins are supported if they go to the same worker -WITH id_author AS ( SELECT id, author_id FROM articles_hash_mx WHERE author_id = 1), -id_title AS (SELECT id, title from articles_hash_mx WHERE author_id = 1) -SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id; -DEBUG: CTE id_author is going to be inlined via distributed planning -DEBUG: CTE id_title is going to be inlined via distributed planning -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 1 - id | author_id | id | title ---------------------------------------------------------------------- - 1 | 1 | 1 | arsenous - 11 | 1 | 11 | alamo - 21 | 1 | 21 | arcading - 31 | 1 | 31 | athwartships - 41 | 1 | 41 | aznavour -(5 rows) - -WITH id_author AS ( SELECT id, author_id FROM articles_hash_mx WHERE author_id = 1), -id_title AS (SELECT id, title from articles_hash_mx WHERE author_id = 3) -SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id; -DEBUG: CTE id_author is going to be inlined via distributed planning -DEBUG: CTE id_title is going to be inlined via distributed planning -DEBUG: Creating router plan - id | author_id | id | title ---------------------------------------------------------------------- -(0 rows) - --- CTE joins on different workers are supported because they are both planned recursively -WITH id_author AS ( SELECT id, author_id FROM articles_hash_mx WHERE author_id = 1), -id_title AS (SELECT id, title from articles_hash_mx WHERE author_id = 2) -SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id; -DEBUG: CTE id_author is going to be inlined via distributed planning -DEBUG: CTE id_title is going to be inlined via distributed planning -DEBUG: router planner does not support queries that reference non-colocated distributed tables -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 2 -DEBUG: generating subplan XXX_1 for subquery SELECT id, title FROM public.articles_hash_mx WHERE (author_id OPERATOR(pg_catalog.=) 2) -DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT id_author.id, id_author.author_id, id_title.id, id_title.title FROM (SELECT articles_hash_mx.id, articles_hash_mx.author_id FROM public.articles_hash_mx WHERE (articles_hash_mx.author_id OPERATOR(pg_catalog.=) 1)) id_author, (SELECT intermediate_result.id, intermediate_result.title FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, title character varying(20))) id_title WHERE (id_author.id OPERATOR(pg_catalog.=) id_title.id) -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 1 - id | author_id | id | title ---------------------------------------------------------------------- -(0 rows) - -WITH update_article AS ( - UPDATE articles_hash_mx SET word_count = 11 WHERE id = 1 AND word_count = 10 RETURNING * -) -SELECT coalesce(1,random()); -DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan XXX_1 for CTE update_article: UPDATE public.articles_hash_mx SET word_count = 11 WHERE ((id OPERATOR(pg_catalog.=) 1) AND (word_count OPERATOR(pg_catalog.=) 10)) RETURNING id, author_id, title, word_count -DEBUG: Creating router plan -DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT COALESCE((1)::double precision, random()) AS "coalesce" -DEBUG: Creating router plan - coalesce ---------------------------------------------------------------------- - 1 -(1 row) - -WITH update_article AS ( - UPDATE articles_hash_mx SET word_count = 10 WHERE author_id = 1 AND id = 1 AND word_count = 11 RETURNING * -) -SELECT coalesce(1,random()); -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 1 - coalesce ---------------------------------------------------------------------- - 1 -(1 row) - --- recursive CTEs are supported when filtered on partition column -INSERT INTO company_employees_mx values(1, 1, 0); -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 1 -INSERT INTO company_employees_mx values(1, 2, 1); -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 1 -INSERT INTO company_employees_mx values(1, 3, 1); -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 1 -INSERT INTO company_employees_mx values(1, 4, 2); -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 1 -INSERT INTO company_employees_mx values(1, 5, 4); -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 1 -INSERT INTO company_employees_mx values(3, 1, 0); -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 3 -INSERT INTO company_employees_mx values(3, 15, 1); -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 3 -INSERT INTO company_employees_mx values(3, 3, 1); -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 3 --- find employees at top 2 level within company hierarchy -WITH RECURSIVE hierarchy as ( - SELECT *, 1 AS level - FROM company_employees_mx - WHERE company_id = 1 and manager_id = 0 - UNION - SELECT ce.*, (h.level+1) - FROM hierarchy h JOIN company_employees_mx ce - ON (h.employee_id = ce.manager_id AND - h.company_id = ce.company_id AND - ce.company_id = 1)) -SELECT * FROM hierarchy WHERE LEVEL <= 2; -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 1 - company_id | employee_id | manager_id | level ---------------------------------------------------------------------- - 1 | 1 | 0 | 1 - 1 | 2 | 1 | 2 - 1 | 3 | 1 | 2 -(3 rows) - --- query becomes not router plannble and gets rejected --- if filter on company is dropped -WITH RECURSIVE hierarchy as ( - SELECT *, 1 AS level - FROM company_employees_mx - WHERE company_id = 1 and manager_id = 0 - UNION - SELECT ce.*, (h.level+1) - FROM hierarchy h JOIN company_employees_mx ce - ON (h.employee_id = ce.manager_id AND - h.company_id = ce.company_id)) -SELECT * FROM hierarchy WHERE LEVEL <= 2; -DEBUG: Router planner cannot handle multi-shard select queries -ERROR: recursive CTEs are only supported when they contain a filter on the distribution column --- logically wrong query, query involves different shards --- from the same table, but still router plannable due to --- shard being placed on the same worker. -WITH RECURSIVE hierarchy as ( - SELECT *, 1 AS level - FROM company_employees_mx - WHERE company_id = 3 and manager_id = 0 - UNION - SELECT ce.*, (h.level+1) - FROM hierarchy h JOIN company_employees_mx ce - ON (h.employee_id = ce.manager_id AND - h.company_id = ce.company_id AND - ce.company_id = 2)) -SELECT * FROM hierarchy WHERE LEVEL <= 2; -DEBUG: router planner does not support queries that reference non-colocated distributed tables -ERROR: recursive CTEs are only supported when they contain a filter on the distribution column --- grouping sets are supported on single shard -SELECT - id, substring(title, 2, 1) AS subtitle, count(*) - FROM articles_hash_mx - WHERE author_id = 1 or author_id = 3 - GROUP BY GROUPING SETS ((id),(subtitle)) - ORDER BY id, subtitle; -DEBUG: Creating router plan - id | subtitle | count ---------------------------------------------------------------------- - 1 | | 1 - 3 | | 1 - 11 | | 1 - 13 | | 1 - 21 | | 1 - 23 | | 1 - 31 | | 1 - 33 | | 1 - 41 | | 1 - 43 | | 1 - | b | 1 - | f | 1 - | l | 1 - | r | 2 - | s | 2 - | t | 1 - | u | 1 - | z | 1 -(18 rows) - --- grouping sets are not supported on multiple shards -SELECT - id, substring(title, 2, 1) AS subtitle, count(*) - FROM articles_hash_mx - WHERE author_id = 1 or author_id = 2 - GROUP BY GROUPING SETS ((id),(subtitle)) - ORDER BY id, subtitle; -DEBUG: Router planner cannot handle multi-shard select queries -ERROR: could not run distributed query with GROUPING SETS, CUBE, or ROLLUP -HINT: Consider using an equality filter on the distributed table's partition column. --- queries which involve functions in FROM clause are supported if it goes to a single worker. -SELECT * FROM articles_hash_mx, position('om' in 'Thomas') WHERE author_id = 1; -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 1 - id | author_id | title | word_count | position ---------------------------------------------------------------------- - 1 | 1 | arsenous | 9572 | 3 - 11 | 1 | alamo | 1347 | 3 - 21 | 1 | arcading | 5890 | 3 - 31 | 1 | athwartships | 7271 | 3 - 41 | 1 | aznavour | 11814 | 3 -(5 rows) - -SELECT * FROM articles_hash_mx, position('om' in 'Thomas') WHERE author_id = 1 or author_id = 3; -DEBUG: Creating router plan - id | author_id | title | word_count | position ---------------------------------------------------------------------- - 1 | 1 | arsenous | 9572 | 3 - 3 | 3 | asternal | 10480 | 3 - 11 | 1 | alamo | 1347 | 3 - 13 | 3 | aseyev | 2255 | 3 - 21 | 1 | arcading | 5890 | 3 - 23 | 3 | abhorring | 6799 | 3 - 31 | 1 | athwartships | 7271 | 3 - 33 | 3 | autochrome | 8180 | 3 - 41 | 1 | aznavour | 11814 | 3 - 43 | 3 | affixal | 12723 | 3 -(10 rows) - --- they are supported via (sub)query pushdown if multiple workers are involved -SELECT * FROM articles_hash_mx, position('om' in 'Thomas') WHERE author_id = 1 or author_id = 2 ORDER BY 4 DESC, 1 DESC, 2 DESC LIMIT 5; -DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: push down of limit count: 5 - id | author_id | title | word_count | position ---------------------------------------------------------------------- - 12 | 2 | archiblast | 18185 | 3 - 42 | 2 | ausable | 15885 | 3 - 2 | 2 | abducing | 13642 | 3 - 41 | 1 | aznavour | 11814 | 3 - 32 | 2 | amazon | 11342 | 3 -(5 rows) - --- subqueries are supported in FROM clause but they are not router plannable -SELECT articles_hash_mx.id,test.word_count -FROM articles_hash_mx, (SELECT id, word_count FROM articles_hash_mx) AS test WHERE test.id = articles_hash_mx.id -ORDER BY test.word_count DESC, articles_hash_mx.id LIMIT 5; -DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan XXX_1 for subquery SELECT id, word_count FROM public.articles_hash_mx -DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT articles_hash_mx.id, test.word_count FROM public.articles_hash_mx, (SELECT intermediate_result.id, intermediate_result.word_count FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, word_count integer)) test WHERE (test.id OPERATOR(pg_catalog.=) articles_hash_mx.id) ORDER BY test.word_count DESC, articles_hash_mx.id LIMIT 5 -DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: push down of limit count: 5 - id | word_count ---------------------------------------------------------------------- - 50 | 19519 - 14 | 19094 - 48 | 18610 - 12 | 18185 - 46 | 17702 -(5 rows) - -SELECT articles_hash_mx.id,test.word_count -FROM articles_hash_mx, (SELECT id, word_count FROM articles_hash_mx) AS test -WHERE test.id = articles_hash_mx.id and articles_hash_mx.author_id = 1 -ORDER BY articles_hash_mx.id; -DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: generating subplan XXX_1 for subquery SELECT id, word_count FROM public.articles_hash_mx -DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT articles_hash_mx.id, test.word_count FROM public.articles_hash_mx, (SELECT intermediate_result.id, intermediate_result.word_count FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, word_count integer)) test WHERE ((test.id OPERATOR(pg_catalog.=) articles_hash_mx.id) AND (articles_hash_mx.author_id OPERATOR(pg_catalog.=) 1)) ORDER BY articles_hash_mx.id -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 1 - id | word_count ---------------------------------------------------------------------- - 1 | 9572 - 11 | 1347 - 21 | 5890 - 31 | 7271 - 41 | 11814 -(5 rows) - --- subqueries in SELECT clause -SELECT a.title AS name, (SELECT a2.id FROM articles_single_shard_hash_mx a2 WHERE a.id = a2.id LIMIT 1) - AS special_price FROM articles_hash_mx a; -DEBUG: Router planner cannot handle multi-shard select queries -ERROR: complex joins are only supported when all distributed tables are co-located and joined on their distribution columns --- simple lookup query -SELECT * - FROM articles_hash_mx - WHERE author_id = 1; -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 1 - id | author_id | title | word_count ---------------------------------------------------------------------- - 1 | 1 | arsenous | 9572 - 11 | 1 | alamo | 1347 - 21 | 1 | arcading | 5890 - 31 | 1 | athwartships | 7271 - 41 | 1 | aznavour | 11814 -(5 rows) - --- below query hits a single shard, router plannable -SELECT * - FROM articles_hash_mx - WHERE author_id = 1 OR author_id = 17; -DEBUG: Creating router plan - id | author_id | title | word_count ---------------------------------------------------------------------- - 1 | 1 | arsenous | 9572 - 11 | 1 | alamo | 1347 - 21 | 1 | arcading | 5890 - 31 | 1 | athwartships | 7271 - 41 | 1 | aznavour | 11814 -(5 rows) - --- below query hits two shards, not router plannable + not router executable --- handled by real-time executor -SELECT * - FROM articles_hash_mx - WHERE author_id = 1 OR author_id = 18; -DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count ---------------------------------------------------------------------- - 1 | 1 | arsenous | 9572 - 11 | 1 | alamo | 1347 - 21 | 1 | arcading | 5890 - 31 | 1 | athwartships | 7271 - 41 | 1 | aznavour | 11814 -(5 rows) - --- rename the output columns -SELECT id as article_id, word_count * id as random_value - FROM articles_hash_mx - WHERE author_id = 1; -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 1 - article_id | random_value ---------------------------------------------------------------------- - 1 | 9572 - 11 | 14817 - 21 | 123690 - 31 | 225401 - 41 | 484374 -(5 rows) - --- we can push down co-located joins to a single worker -SELECT a.author_id as first_author, b.word_count as second_word_count - FROM articles_hash_mx a, articles_hash_mx b - WHERE a.author_id = 10 and a.author_id = b.author_id - LIMIT 3; -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 10 - first_author | second_word_count ---------------------------------------------------------------------- - 10 | 17277 - 10 | 1820 - 10 | 6363 -(3 rows) - --- following join is router plannable since the same worker --- has both shards when citus.enable_non_colocated_router_query_pushdown --- is enabled -SET citus.enable_non_colocated_router_query_pushdown TO ON; -SELECT a.author_id as first_author, b.word_count as second_word_count - FROM articles_hash_mx a, articles_single_shard_hash_mx b - WHERE a.author_id = 10 and a.author_id = b.author_id - ORDER by 1,2 LIMIT 3; -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 10 - first_author | second_word_count ---------------------------------------------------------------------- - 10 | 19519 - 10 | 19519 - 10 | 19519 -(3 rows) - -SET citus.enable_non_colocated_router_query_pushdown TO OFF; --- but this is not the case otherwise -SELECT a.author_id as first_author, b.word_count as second_word_count - FROM articles_hash_mx a, articles_single_shard_hash_mx b - WHERE a.author_id = 10 and a.author_id = b.author_id - ORDER by 1,2 LIMIT 3; -DEBUG: router planner does not support queries that reference non-colocated distributed tables -DEBUG: push down of limit count: 3 -DEBUG: join prunable for task partitionId 0 and 1 -DEBUG: join prunable for task partitionId 0 and 2 -DEBUG: join prunable for task partitionId 0 and 3 -DEBUG: join prunable for task partitionId 1 and 0 -DEBUG: join prunable for task partitionId 1 and 2 -DEBUG: join prunable for task partitionId 1 and 3 -DEBUG: join prunable for task partitionId 2 and 0 -DEBUG: join prunable for task partitionId 2 and 1 -DEBUG: join prunable for task partitionId 2 and 3 -DEBUG: join prunable for task partitionId 3 and 0 -DEBUG: join prunable for task partitionId 3 and 1 -DEBUG: join prunable for task partitionId 3 and 2 -DEBUG: pruning merge fetch taskId 1 -DETAIL: Creating dependency on merge taskId 2 -DEBUG: pruning merge fetch taskId 2 -DETAIL: Creating dependency on merge taskId 2 -DEBUG: pruning merge fetch taskId 4 -DETAIL: Creating dependency on merge taskId 4 -DEBUG: pruning merge fetch taskId 5 -DETAIL: Creating dependency on merge taskId 4 -DEBUG: pruning merge fetch taskId 7 -DETAIL: Creating dependency on merge taskId 6 -DEBUG: pruning merge fetch taskId 8 -DETAIL: Creating dependency on merge taskId 6 -DEBUG: pruning merge fetch taskId 10 -DETAIL: Creating dependency on merge taskId 8 -DEBUG: pruning merge fetch taskId 11 -DETAIL: Creating dependency on merge taskId 8 -ERROR: the query contains a join that requires repartitioning -HINT: Set citus.enable_repartition_joins to on to enable repartitioning -RESET citus.enable_non_colocated_router_query_pushdown; --- following join is not router plannable since there are no --- workers containing both shards, but will work through recursive --- planning -SET client_min_messages TO INFO; -WITH single_shard as (SELECT * FROM articles_single_shard_hash_mx) -SELECT a.author_id as first_author, b.word_count as second_word_count - FROM articles_hash_mx a, single_shard b - WHERE a.author_id = 2 and a.author_id = b.author_id - LIMIT 3; - first_author | second_word_count ---------------------------------------------------------------------- -(0 rows) - -SET client_min_messages TO DEBUG; --- single shard select with limit is router plannable -SELECT * - FROM articles_hash_mx - WHERE author_id = 1 - LIMIT 3; -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 1 - id | author_id | title | word_count ---------------------------------------------------------------------- - 1 | 1 | arsenous | 9572 - 11 | 1 | alamo | 1347 - 21 | 1 | arcading | 5890 -(3 rows) - --- single shard select with limit + offset is router plannable -SELECT * - FROM articles_hash_mx - WHERE author_id = 1 - LIMIT 2 - OFFSET 1; -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 1 - id | author_id | title | word_count ---------------------------------------------------------------------- - 11 | 1 | alamo | 1347 - 21 | 1 | arcading | 5890 -(2 rows) - --- single shard select with limit + offset + order by is router plannable -SELECT * - FROM articles_hash_mx - WHERE author_id = 1 - ORDER BY id desc - LIMIT 2 - OFFSET 1; -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 1 - id | author_id | title | word_count ---------------------------------------------------------------------- - 31 | 1 | athwartships | 7271 - 21 | 1 | arcading | 5890 -(2 rows) - --- single shard select with group by on non-partition column is router plannable -SELECT id - FROM articles_hash_mx - WHERE author_id = 1 - GROUP BY id - ORDER BY id; -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 1 - id ---------------------------------------------------------------------- - 1 - 11 - 21 - 31 - 41 -(5 rows) - --- single shard select with distinct is router plannable -SELECT distinct id - FROM articles_hash_mx - WHERE author_id = 1 - ORDER BY id; -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 1 - id ---------------------------------------------------------------------- - 1 - 11 - 21 - 31 - 41 -(5 rows) - --- single shard aggregate is router plannable -SELECT avg(word_count) - FROM articles_hash_mx - WHERE author_id = 2; -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 2 - avg ---------------------------------------------------------------------- - 12356.400000000000 -(1 row) - --- max, min, sum, count are router plannable on single shard -SELECT max(word_count) as max, min(word_count) as min, - sum(word_count) as sum, count(word_count) as cnt - FROM articles_hash_mx - WHERE author_id = 2; -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 2 - max | min | sum | cnt ---------------------------------------------------------------------- - 18185 | 2728 | 61782 | 5 -(1 row) - --- queries with aggregates and group by supported on single shard -SELECT max(word_count) - FROM articles_hash_mx - WHERE author_id = 1 - GROUP BY author_id; -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 1 - max ---------------------------------------------------------------------- - 11814 -(1 row) - --- router plannable union queries are supported -SELECT * FROM ( - SELECT * FROM articles_hash_mx WHERE author_id = 1 - UNION - SELECT * FROM articles_hash_mx WHERE author_id = 3 -) AS combination -ORDER BY id; -DEBUG: Creating router plan - id | author_id | title | word_count ---------------------------------------------------------------------- - 1 | 1 | arsenous | 9572 - 3 | 3 | asternal | 10480 - 11 | 1 | alamo | 1347 - 13 | 3 | aseyev | 2255 - 21 | 1 | arcading | 5890 - 23 | 3 | abhorring | 6799 - 31 | 1 | athwartships | 7271 - 33 | 3 | autochrome | 8180 - 41 | 1 | aznavour | 11814 - 43 | 3 | affixal | 12723 -(10 rows) - -(SELECT LEFT(title, 1) FROM articles_hash_mx WHERE author_id = 1) -UNION -(SELECT LEFT(title, 1) FROM articles_hash_mx WHERE author_id = 3); -DEBUG: Creating router plan - left ---------------------------------------------------------------------- - a -(1 row) - -(SELECT LEFT(title, 1) FROM articles_hash_mx WHERE author_id = 1) -INTERSECT -(SELECT LEFT(title, 1) FROM articles_hash_mx WHERE author_id = 3); -DEBUG: Creating router plan - left ---------------------------------------------------------------------- - a -(1 row) - -SELECT * FROM ( - SELECT LEFT(title, 2) FROM articles_hash_mx WHERE author_id = 1 - EXCEPT - SELECT LEFT(title, 2) FROM articles_hash_mx WHERE author_id = 3 -) AS combination -ORDER BY 1; -DEBUG: Creating router plan - left ---------------------------------------------------------------------- - al - ar - at - az -(4 rows) - --- union queries are supported through recursive planning -SET client_min_messages TO DEBUG1; -(SELECT * FROM articles_hash_mx WHERE author_id = 1) -UNION -(SELECT * FROM articles_hash_mx WHERE author_id = 2) -ORDER BY 1,2; -DEBUG: generating subplan XXX_1 for subquery SELECT id, author_id, title, word_count FROM public.articles_hash_mx WHERE (author_id OPERATOR(pg_catalog.=) 1) -DEBUG: generating subplan XXX_2 for subquery SELECT id, author_id, title, word_count FROM public.articles_hash_mx WHERE (author_id OPERATOR(pg_catalog.=) 2) -DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.id, intermediate_result.author_id, intermediate_result.title, intermediate_result.word_count FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, author_id bigint, title character varying(20), word_count integer) UNION SELECT intermediate_result.id, intermediate_result.author_id, intermediate_result.title, intermediate_result.word_count FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, author_id bigint, title character varying(20), word_count integer) ORDER BY 1, 2 - id | author_id | title | word_count ---------------------------------------------------------------------- - 1 | 1 | arsenous | 9572 - 2 | 2 | abducing | 13642 - 11 | 1 | alamo | 1347 - 12 | 2 | archiblast | 18185 - 21 | 1 | arcading | 5890 - 22 | 2 | antipope | 2728 - 31 | 1 | athwartships | 7271 - 32 | 2 | amazon | 11342 - 41 | 1 | aznavour | 11814 - 42 | 2 | ausable | 15885 -(10 rows) - -SELECT * FROM ( - (SELECT * FROM articles_hash_mx WHERE author_id = 1) - UNION - (SELECT * FROM articles_hash_mx WHERE author_id = 2)) uu -ORDER BY 1, 2 -LIMIT 5; -DEBUG: push down of limit count: 5 - id | author_id | title | word_count ---------------------------------------------------------------------- - 1 | 1 | arsenous | 9572 - 2 | 2 | abducing | 13642 - 11 | 1 | alamo | 1347 - 12 | 2 | archiblast | 18185 - 21 | 1 | arcading | 5890 -(5 rows) - --- error out for queries with repartition jobs -SELECT * - FROM articles_hash_mx a, articles_hash_mx b - WHERE a.id = b.id AND a.author_id = 1; -ERROR: the query contains a join that requires repartitioning -HINT: Set citus.enable_repartition_joins to on to enable repartitioning --- queries which hit more than 1 shards are not router plannable or executable --- handled by real-time executor -SELECT * - FROM articles_hash_mx - WHERE author_id >= 1 AND author_id <= 3 -ORDER BY 1,2,3,4; - id | author_id | title | word_count ---------------------------------------------------------------------- - 1 | 1 | arsenous | 9572 - 2 | 2 | abducing | 13642 - 3 | 3 | asternal | 10480 - 11 | 1 | alamo | 1347 - 12 | 2 | archiblast | 18185 - 13 | 3 | aseyev | 2255 - 21 | 1 | arcading | 5890 - 22 | 2 | antipope | 2728 - 23 | 3 | abhorring | 6799 - 31 | 1 | athwartships | 7271 - 32 | 2 | amazon | 11342 - 33 | 3 | autochrome | 8180 - 41 | 1 | aznavour | 11814 - 42 | 2 | ausable | 15885 - 43 | 3 | affixal | 12723 -(15 rows) - --- Test various filtering options for router plannable check -SET client_min_messages to 'DEBUG2'; --- this is definitely single shard --- and router plannable -SELECT * - FROM articles_hash_mx - WHERE author_id = 1 and author_id >= 1; -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 1 - id | author_id | title | word_count ---------------------------------------------------------------------- - 1 | 1 | arsenous | 9572 - 11 | 1 | alamo | 1347 - 21 | 1 | arcading | 5890 - 31 | 1 | athwartships | 7271 - 41 | 1 | aznavour | 11814 -(5 rows) - --- not router plannable due to or -SELECT * - FROM articles_hash_mx - WHERE author_id = 1 or id = 1; -DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count ---------------------------------------------------------------------- - 1 | 1 | arsenous | 9572 - 11 | 1 | alamo | 1347 - 21 | 1 | arcading | 5890 - 31 | 1 | athwartships | 7271 - 41 | 1 | aznavour | 11814 -(5 rows) - --- router plannable -SELECT * - FROM articles_hash_mx - WHERE author_id = 1 and (id = 1 or id = 41); -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 1 - id | author_id | title | word_count ---------------------------------------------------------------------- - 1 | 1 | arsenous | 9572 - 41 | 1 | aznavour | 11814 -(2 rows) - --- router plannable -SELECT * - FROM articles_hash_mx - WHERE author_id = 1 and (id = random()::int * 0); -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 1 - id | author_id | title | word_count ---------------------------------------------------------------------- -(0 rows) - --- not router plannable due to function call on the right side -SELECT * - FROM articles_hash_mx - WHERE author_id = (random()::int * 0 + 1); -DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count ---------------------------------------------------------------------- - 1 | 1 | arsenous | 9572 - 11 | 1 | alamo | 1347 - 21 | 1 | arcading | 5890 - 31 | 1 | athwartships | 7271 - 41 | 1 | aznavour | 11814 -(5 rows) - --- not router plannable due to or -SELECT * - FROM articles_hash_mx - WHERE author_id = 1 or id = 1; -DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count ---------------------------------------------------------------------- - 1 | 1 | arsenous | 9572 - 11 | 1 | alamo | 1347 - 21 | 1 | arcading | 5890 - 31 | 1 | athwartships | 7271 - 41 | 1 | aznavour | 11814 -(5 rows) - --- router plannable due to abs(-1) getting converted to 1 by postgresql -SELECT * - FROM articles_hash_mx - WHERE author_id = abs(-1); -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 1 - id | author_id | title | word_count ---------------------------------------------------------------------- - 1 | 1 | arsenous | 9572 - 11 | 1 | alamo | 1347 - 21 | 1 | arcading | 5890 - 31 | 1 | athwartships | 7271 - 41 | 1 | aznavour | 11814 -(5 rows) - --- not router plannable due to abs() function -SELECT * - FROM articles_hash_mx - WHERE 1 = abs(author_id); -DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count ---------------------------------------------------------------------- - 1 | 1 | arsenous | 9572 - 11 | 1 | alamo | 1347 - 21 | 1 | arcading | 5890 - 31 | 1 | athwartships | 7271 - 41 | 1 | aznavour | 11814 -(5 rows) - --- not router plannable due to abs() function -SELECT * - FROM articles_hash_mx - WHERE author_id = abs(author_id - 2); -DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count ---------------------------------------------------------------------- - 1 | 1 | arsenous | 9572 - 11 | 1 | alamo | 1347 - 21 | 1 | arcading | 5890 - 31 | 1 | athwartships | 7271 - 41 | 1 | aznavour | 11814 -(5 rows) - --- router plannable, function on different field -SELECT * - FROM articles_hash_mx - WHERE author_id = 1 and (id = abs(id - 2)); -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 1 - id | author_id | title | word_count ---------------------------------------------------------------------- - 1 | 1 | arsenous | 9572 -(1 row) - --- not router plannable due to is true -SELECT * - FROM articles_hash_mx - WHERE (author_id = 1) is true; -DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count ---------------------------------------------------------------------- - 1 | 1 | arsenous | 9572 - 11 | 1 | alamo | 1347 - 21 | 1 | arcading | 5890 - 31 | 1 | athwartships | 7271 - 41 | 1 | aznavour | 11814 -(5 rows) - --- router plannable, (boolean expression) = true is collapsed to (boolean expression) -SELECT * - FROM articles_hash_mx - WHERE (author_id = 1) = true; -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 1 - id | author_id | title | word_count ---------------------------------------------------------------------- - 1 | 1 | arsenous | 9572 - 11 | 1 | alamo | 1347 - 21 | 1 | arcading | 5890 - 31 | 1 | athwartships | 7271 - 41 | 1 | aznavour | 11814 -(5 rows) - --- router plannable, between operator is on another column -SELECT * - FROM articles_hash_mx - WHERE (author_id = 1) and id between 0 and 20; -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 1 - id | author_id | title | word_count ---------------------------------------------------------------------- - 1 | 1 | arsenous | 9572 - 11 | 1 | alamo | 1347 -(2 rows) - --- router plannable, partition column expression is and'ed to rest -SELECT * - FROM articles_hash_mx - WHERE (author_id = 1) and (id = 1 or id = 31) and title like '%s'; -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 1 - id | author_id | title | word_count ---------------------------------------------------------------------- - 1 | 1 | arsenous | 9572 - 31 | 1 | athwartships | 7271 -(2 rows) - --- router plannable, order is changed -SELECT * - FROM articles_hash_mx - WHERE (id = 1 or id = 31) and title like '%s' and (author_id = 1); -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 1 - id | author_id | title | word_count ---------------------------------------------------------------------- - 1 | 1 | arsenous | 9572 - 31 | 1 | athwartships | 7271 -(2 rows) - --- router plannable -SELECT * - FROM articles_hash_mx - WHERE (title like '%s' or title like 'a%') and (author_id = 1); -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 1 - id | author_id | title | word_count ---------------------------------------------------------------------- - 1 | 1 | arsenous | 9572 - 11 | 1 | alamo | 1347 - 21 | 1 | arcading | 5890 - 31 | 1 | athwartships | 7271 - 41 | 1 | aznavour | 11814 -(5 rows) - --- router plannable -SELECT * - FROM articles_hash_mx - WHERE (title like '%s' or title like 'a%') and (author_id = 1) and (word_count < 3000 or word_count > 8000); -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 1 - id | author_id | title | word_count ---------------------------------------------------------------------- - 1 | 1 | arsenous | 9572 - 11 | 1 | alamo | 1347 - 41 | 1 | aznavour | 11814 -(3 rows) - --- window functions are supported if query is router plannable -SELECT LAG(title, 1) over (ORDER BY word_count) prev, title, word_count - FROM articles_hash_mx - WHERE author_id = 5; -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 5 - prev | title | word_count ---------------------------------------------------------------------- - | afrasia | 864 - afrasia | adversa | 3164 - adversa | antehall | 7707 - antehall | aminate | 9089 - aminate | aruru | 11389 -(5 rows) - -SELECT LAG(title, 1) over (ORDER BY word_count) prev, title, word_count - FROM articles_hash_mx - WHERE author_id = 5 - ORDER BY word_count DESC; -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 5 - prev | title | word_count ---------------------------------------------------------------------- - aminate | aruru | 11389 - antehall | aminate | 9089 - adversa | antehall | 7707 - afrasia | adversa | 3164 - | afrasia | 864 -(5 rows) - -SELECT id, MIN(id) over (order by word_count) - FROM articles_hash_mx - WHERE author_id = 1; -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 1 - id | min ---------------------------------------------------------------------- - 11 | 11 - 21 | 11 - 31 | 11 - 1 | 1 - 41 | 1 -(5 rows) - -SELECT id, word_count, AVG(word_count) over (order by word_count) - FROM articles_hash_mx - WHERE author_id = 1; -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 1 - id | word_count | avg ---------------------------------------------------------------------- - 11 | 1347 | 1347.0000000000000000 - 21 | 5890 | 3618.5000000000000000 - 31 | 7271 | 4836.0000000000000000 - 1 | 9572 | 6020.0000000000000000 - 41 | 11814 | 7178.8000000000000000 -(5 rows) - -SELECT word_count, rank() OVER (PARTITION BY author_id ORDER BY word_count) - FROM articles_hash_mx - WHERE author_id = 1; -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 1 - word_count | rank ---------------------------------------------------------------------- - 1347 | 1 - 5890 | 2 - 7271 | 3 - 9572 | 4 - 11814 | 5 -(5 rows) - -SELECT id, MIN(id) over (order by word_count) - FROM articles_hash_mx - WHERE author_id = 1 or author_id = 2 - ORDER BY 1; -DEBUG: Router planner cannot handle multi-shard select queries - id | min ---------------------------------------------------------------------- - 1 | 1 - 2 | 1 - 11 | 11 - 12 | 1 - 21 | 11 - 22 | 11 - 31 | 11 - 32 | 1 - 41 | 1 - 42 | 1 -(10 rows) - -SELECT LAG(title, 1) over (ORDER BY word_count) prev, title, word_count - FROM articles_hash_mx - WHERE author_id = 5 or author_id = 2 - ORDER BY 2; -DEBUG: Router planner cannot handle multi-shard select queries - prev | title | word_count ---------------------------------------------------------------------- - aruru | abducing | 13642 - antipope | adversa | 3164 - | afrasia | 864 - aminate | amazon | 11342 - antehall | aminate | 9089 - adversa | antehall | 7707 - afrasia | antipope | 2728 - ausable | archiblast | 18185 - amazon | aruru | 11389 - abducing | ausable | 15885 -(10 rows) - --- complex query hitting a single shard -SELECT - count(DISTINCT CASE - WHEN - word_count > 100 - THEN - id - ELSE - NULL - END) as c - FROM - articles_hash_mx - WHERE - author_id = 5; -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 5 - c ---------------------------------------------------------------------- - 5 -(1 row) - --- same query is not router plannable if hits multiple shards -SELECT - count(DISTINCT CASE - WHEN - word_count > 100 - THEN - id - ELSE - NULL - END) as c - FROM - articles_hash_mx - GROUP BY - author_id - ORDER BY c; -DEBUG: Router planner cannot handle multi-shard select queries - c ---------------------------------------------------------------------- - 4 - 5 - 5 - 5 - 5 - 5 - 5 - 5 - 5 - 5 -(10 rows) - --- queries inside transactions can be router plannable -BEGIN; -SELECT * - FROM articles_hash_mx - WHERE author_id = 1 - ORDER BY id; -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 1 - id | author_id | title | word_count ---------------------------------------------------------------------- - 1 | 1 | arsenous | 9572 - 11 | 1 | alamo | 1347 - 21 | 1 | arcading | 5890 - 31 | 1 | athwartships | 7271 - 41 | 1 | aznavour | 11814 -(5 rows) - -END; --- cursor queries are router plannable -BEGIN; -DECLARE test_cursor CURSOR FOR - SELECT * - FROM articles_hash_mx - WHERE author_id = 1 - ORDER BY id; -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 1 -FETCH test_cursor; - id | author_id | title | word_count ---------------------------------------------------------------------- - 1 | 1 | arsenous | 9572 -(1 row) - -FETCH test_cursor; - id | author_id | title | word_count ---------------------------------------------------------------------- - 11 | 1 | alamo | 1347 -(1 row) - -FETCH BACKWARD test_cursor; - id | author_id | title | word_count ---------------------------------------------------------------------- - 1 | 1 | arsenous | 9572 -(1 row) - -END; --- queries inside copy can be router plannable -COPY ( - SELECT * - FROM articles_hash_mx - WHERE author_id = 1 - ORDER BY id) TO STDOUT; -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 1 -1 1 arsenous 9572 -11 1 alamo 1347 -21 1 arcading 5890 -31 1 athwartships 7271 -41 1 aznavour 11814 --- table creation queries inside can be router plannable -CREATE TEMP TABLE temp_articles_hash_mx as - SELECT * - FROM articles_hash_mx - WHERE author_id = 1 - ORDER BY id; -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 1 --- router plannable queries may include filter for aggregates -SELECT count(*), count(*) FILTER (WHERE id < 3) - FROM articles_hash_mx - WHERE author_id = 1; -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 1 - count | count ---------------------------------------------------------------------- - 5 | 1 -(1 row) - --- non-router plannable queries support filters as well -SELECT count(*), count(*) FILTER (WHERE id < 3) - FROM articles_hash_mx - WHERE author_id = 1 or author_id = 2; -DEBUG: Router planner cannot handle multi-shard select queries - count | count ---------------------------------------------------------------------- - 10 | 2 -(1 row) - --- prepare queries can be router plannable -PREPARE author_1_articles as - SELECT * - FROM articles_hash_mx - WHERE author_id = 1; -EXECUTE author_1_articles; -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 1 - id | author_id | title | word_count ---------------------------------------------------------------------- - 1 | 1 | arsenous | 9572 - 11 | 1 | alamo | 1347 - 21 | 1 | arcading | 5890 - 31 | 1 | athwartships | 7271 - 41 | 1 | aznavour | 11814 -(5 rows) - --- parametric prepare queries can be router plannable -PREPARE author_articles(int) as - SELECT * - FROM articles_hash_mx - WHERE author_id = $1; -EXECUTE author_articles(1); -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 1 - id | author_id | title | word_count ---------------------------------------------------------------------- - 1 | 1 | arsenous | 9572 - 11 | 1 | alamo | 1347 - 21 | 1 | arcading | 5890 - 31 | 1 | athwartships | 7271 - 41 | 1 | aznavour | 11814 -(5 rows) - --- queries inside plpgsql functions could be router plannable -SET citus.enable_metadata_sync TO OFF; -CREATE OR REPLACE FUNCTION author_articles_max_id() RETURNS int AS $$ -DECLARE - max_id integer; -BEGIN - SELECT MAX(id) FROM articles_hash_mx ah - WHERE author_id = 1 - into max_id; - return max_id; -END; -$$ LANGUAGE plpgsql; -SELECT author_articles_max_id(); -DEBUG: Creating router plan -CONTEXT: SQL statement "SELECT MAX(id) FROM articles_hash_mx ah - WHERE author_id = 1" -PL/pgSQL function author_articles_max_id() line XX at SQL statement -DEBUG: query has a single distribution column value: 1 -CONTEXT: SQL statement "SELECT MAX(id) FROM articles_hash_mx ah - WHERE author_id = 1" -PL/pgSQL function author_articles_max_id() line XX at SQL statement - author_articles_max_id ---------------------------------------------------------------------- - 41 -(1 row) - --- plpgsql function that return query results are not router plannable -CREATE OR REPLACE FUNCTION author_articles_id_word_count() RETURNS TABLE(id bigint, word_count int) AS $$ -DECLARE -BEGIN - RETURN QUERY - SELECT ah.id, ah.word_count - FROM articles_hash_mx ah - WHERE author_id = 1; - -END; -$$ LANGUAGE plpgsql; -SELECT * FROM author_articles_id_word_count(); -DEBUG: Creating router plan -CONTEXT: SQL statement "SELECT ah.id, ah.word_count - FROM articles_hash_mx ah - WHERE author_id = 1" -PL/pgSQL function author_articles_id_word_count() line XX at RETURN QUERY -DEBUG: query has a single distribution column value: 1 -CONTEXT: SQL statement "SELECT ah.id, ah.word_count - FROM articles_hash_mx ah - WHERE author_id = 1" -PL/pgSQL function author_articles_id_word_count() line XX at RETURN QUERY - id | word_count ---------------------------------------------------------------------- - 1 | 9572 - 11 | 1347 - 21 | 5890 - 31 | 7271 - 41 | 11814 -(5 rows) - -RESET citus.enable_metadata_sync; --- materialized views can be created for router plannable queries -CREATE MATERIALIZED VIEW mv_articles_hash_mx AS - SELECT * FROM articles_hash_mx WHERE author_id = 1; -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 1 -DEBUG: drop auto-cascades to type public.pg_temp_xxxxx -DEBUG: drop auto-cascades to type public.pg_temp_xxxxx[] -SELECT * FROM mv_articles_hash_mx; - id | author_id | title | word_count ---------------------------------------------------------------------- - 1 | 1 | arsenous | 9572 - 11 | 1 | alamo | 1347 - 21 | 1 | arcading | 5890 - 31 | 1 | athwartships | 7271 - 41 | 1 | aznavour | 11814 -(5 rows) - -SET client_min_messages to 'INFO'; -DROP MATERIALIZED VIEW mv_articles_hash_mx; -SET client_min_messages to 'DEBUG2'; -CREATE MATERIALIZED VIEW mv_articles_hash_mx_error AS - SELECT * FROM articles_hash_mx WHERE author_id in (1,2); -DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: drop auto-cascades to type public.pg_temp_xxxxx -DEBUG: drop auto-cascades to type public.pg_temp_xxxxx[] -INSERT INTO articles_hash_mx VALUES (51, 1, 'amateus', 1814); -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 1 --- verify insert is successfull (not router plannable and executable) -SELECT id - FROM articles_hash_mx - WHERE author_id = 1; -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 1 - id ---------------------------------------------------------------------- - 1 - 11 - 21 - 31 - 41 - 51 -(6 rows) - -SET client_min_messages to WARNING; -TRUNCATE articles_hash_mx, company_employees_mx, articles_single_shard_hash_mx; -DROP MATERIALIZED VIEW mv_articles_hash_mx_error; -DROP TABLE authors_hash_mx; diff --git a/src/test/regress/sql/ch_bench_having_mx.sql b/src/test/regress/sql/ch_bench_having_mx.sql index 902e4767884..798591a5f87 100644 --- a/src/test/regress/sql/ch_bench_having_mx.sql +++ b/src/test/regress/sql/ch_bench_having_mx.sql @@ -1,9 +1,3 @@ --- Two alternative test outputs: --- ch_bench_having_mx.out for PG16 and before --- ch_bench_having_mx_0.out for PG17 --- related commit --- https://github.com/postgres/postgres/commit/fd0398fc - ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1640000; SET citus.shard_replication_factor to 1; SET citus.shard_count to 4; diff --git a/src/test/regress/sql/multi_extension.sql b/src/test/regress/sql/multi_extension.sql index 8025051441b..1726c260f37 100644 --- a/src/test/regress/sql/multi_extension.sql +++ b/src/test/regress/sql/multi_extension.sql @@ -7,12 +7,6 @@ -- not done yet. -- -- Upgrade tests verify the objects are added in citus_finish_pg_upgrade() --- --- Two alternative test outputs: --- multi_extension.out for PG16 and before --- multi_extension_0.out for PG17 --- related commit --- https://git.postgresql.org/gitweb/?p=postgresql.git;a=commit;h=e5bc9454e527b1cba97553531d8d4992892fdeef SET citus.next_shard_id TO 580000; CREATE SCHEMA multi_extension; diff --git a/src/test/regress/sql/multi_mx_router_planner.sql b/src/test/regress/sql/multi_mx_router_planner.sql index 2b8f27d6901..3593c2ac8d0 100644 --- a/src/test/regress/sql/multi_mx_router_planner.sql +++ b/src/test/regress/sql/multi_mx_router_planner.sql @@ -1,7 +1,4 @@ --- Two alternative test outputs: --- multi_mx_router_planner.out for PG16 and before --- multi_mx_router_planner_0.out for PG17 -- =================================================================== -- test router planner functionality for single shard select queries