-- =================================================================== -- create test functions -- =================================================================== CREATE FUNCTION load_shard_id_array(regclass, bool) RETURNS bigint[] AS 'pg_shard' LANGUAGE C STRICT; CREATE FUNCTION load_shard_interval_array(bigint) RETURNS integer[] AS 'pg_shard' LANGUAGE C STRICT; CREATE FUNCTION load_shard_placement_array(bigint, bool) RETURNS text[] AS 'pg_shard' LANGUAGE C STRICT; CREATE FUNCTION partition_column_id(regclass) RETURNS smallint AS 'pg_shard' LANGUAGE C STRICT; CREATE FUNCTION insert_hash_partition_row(regclass, text) RETURNS void AS 'pg_shard' LANGUAGE C STRICT; CREATE FUNCTION insert_monolithic_shard_row(regclass, bigint) RETURNS void AS 'pg_shard' LANGUAGE C STRICT; CREATE FUNCTION insert_healthy_local_shard_placement_row(bigint, bigint) RETURNS void AS 'pg_shard' LANGUAGE C STRICT; CREATE FUNCTION delete_shard_placement_row(bigint) RETURNS void AS 'pg_shard' LANGUAGE C STRICT; CREATE FUNCTION acquire_shared_shard_lock(bigint) RETURNS void AS 'pg_shard' LANGUAGE C STRICT; CREATE FUNCTION next_shard_id() RETURNS bigint AS 'pg_shard' LANGUAGE C STRICT; -- =================================================================== -- test distribution metadata functionality -- =================================================================== -- create table to be distributed CREATE TABLE events ( id bigint, name text ); -- before distribution, should return, but not cache, an empty list SELECT load_shard_id_array('events', true); load_shard_id_array --------------------- {} (1 row) -- for this table we'll "distribute" manually but verify using function calls INSERT INTO pgs_distribution_metadata.shard (id, relation_id, storage, min_value, max_value) VALUES (1, 'events'::regclass, 't', '0', '10'), (2, 'events'::regclass, 't', '10', '20'), (3, 'events'::regclass, 't', '20', '30'), (4, 'events'::regclass, 't', '30', '40'); INSERT INTO pgs_distribution_metadata.shard_placement (id, node_name, node_port, shard_id, shard_state) VALUES (101, 'cluster-worker-01', 5432, 1, 0), (102, 'cluster-worker-01', 5432, 2, 0), (103, 'cluster-worker-02', 5433, 3, 0), (104, 'cluster-worker-02', 5433, 4, 0), (105, 'cluster-worker-03', 5434, 1, 1), (106, 'cluster-worker-03', 5434, 2, 1), (107, 'cluster-worker-04', 5435, 3, 1), (108, 'cluster-worker-04', 5435, 4, 1); INSERT INTO pgs_distribution_metadata.partition (relation_id, partition_method, key) VALUES ('events'::regclass, 'h', 'name'); -- should see above shard identifiers SELECT load_shard_id_array('events', false); load_shard_id_array --------------------- {4,3,2,1} (1 row) -- cache them for later use SELECT load_shard_id_array('events', true); load_shard_id_array --------------------- {4,3,2,1} (1 row) -- should see empty array (catalog is not distributed) SELECT load_shard_id_array('pg_type', false); load_shard_id_array --------------------- {} (1 row) -- should see array with first shard range SELECT load_shard_interval_array(1); load_shard_interval_array --------------------------- {0,10} (1 row) -- should see error for non-existent shard SELECT load_shard_interval_array(5); ERROR: shard with ID 5 does not exist -- should see two placements SELECT load_shard_placement_array(2, false); load_shard_placement_array ------------------------------------------------- {cluster-worker-03:5434,cluster-worker-01:5432} (1 row) -- only one of which is finalized SELECT load_shard_placement_array(2, true); load_shard_placement_array ---------------------------- {cluster-worker-03:5434} (1 row) -- should see error for non-existent shard SELECT load_shard_placement_array(6, false); ERROR: no placements exist for shard with ID 6 -- should see column id of 'name' SELECT partition_column_id('events'); partition_column_id --------------------- 2 (1 row) -- should see error (catalog is not distributed) SELECT partition_column_id('pg_type'); ERROR: no partition column is defined for relation "pg_type" -- drop shard rows (must drop placements first) DELETE FROM pgs_distribution_metadata.shard_placement WHERE shard_id BETWEEN 1 AND 4; DELETE FROM pgs_distribution_metadata.shard WHERE relation_id = 'events'::regclass; -- verify that an eager load shows them missing SELECT load_shard_id_array('events', false); load_shard_id_array --------------------- {} (1 row) -- but they live on in the cache SELECT load_shard_id_array('events', true); load_shard_id_array --------------------- {4,3,2,1} (1 row) -- create second table to distribute CREATE TABLE customers ( id bigint, name text ); -- now we'll distribute using function calls but verify metadata manually... -- partition on id and manually inspect partition row SELECT insert_hash_partition_row('customers', 'id'); insert_hash_partition_row --------------------------- (1 row) SELECT partition_method, key FROM pgs_distribution_metadata.partition WHERE relation_id = 'customers'::regclass; partition_method | key ------------------+----- h | id (1 row) -- make one huge shard and manually inspect shard row SELECT insert_monolithic_shard_row('customers', 5); insert_monolithic_shard_row ----------------------------- (1 row) SELECT storage, min_value, max_value FROM pgs_distribution_metadata.shard WHERE id = 5; storage | min_value | max_value ---------+-------------+------------ t | -2147483648 | 2147483647 (1 row) -- add a placement and manually inspect row SELECT insert_healthy_local_shard_placement_row(109, 5); insert_healthy_local_shard_placement_row ------------------------------------------ (1 row) SELECT * FROM pgs_distribution_metadata.shard_placement WHERE id = 109; id | shard_id | shard_state | node_name | node_port -----+----------+-------------+-----------+----------- 109 | 5 | 1 | localhost | 5432 (1 row) -- remove it and verify it is gone SELECT delete_shard_placement_row(109); delete_shard_placement_row ---------------------------- (1 row) SELECT COUNT(*) FROM pgs_distribution_metadata.shard_placement WHERE id = 109; count ------- 0 (1 row) -- ask for next shard id SELECT next_shard_id(); next_shard_id --------------- 10000 (1 row) -- now we'll even test our lock methods... -- use transaction to bound how long we hold the lock BEGIN; -- pick up a shard lock and look for it in pg_locks SELECT acquire_shared_shard_lock(5); acquire_shared_shard_lock --------------------------- (1 row) SELECT objid, mode FROM pg_locks WHERE locktype = 'advisory' AND objid = 5; objid | mode -------+----------- 5 | ShareLock (1 row) -- commit should drop the lock COMMIT; -- lock should be gone now SELECT COUNT(*) FROM pg_locks WHERE locktype = 'advisory' AND objid = 5; count ------- 0 (1 row) -- finally, check that having distributed tables prevent dropping the extension DROP EXTENSION pg_shard; ERROR: cannot drop extension pg_shard because other objects depend on it DETAIL: Existing distributed tables depend on extension pg_shard HINT: Use DROP ... CASCADE to drop the dependent objects too.