From 81bdb1832b7a5b6f46c54d112e03dd1d6d8413ff Mon Sep 17 00:00:00 2001 From: jnasbyupgrade Date: Wed, 6 Nov 2024 12:23:48 -0600 Subject: [PATCH 01/18] Squashed 'pgxntool/' changes from c0af00f..bed3604 bed3604 Fix pg_regress on versions > 12 (#5) (#6) git-subtree-dir: pgxntool git-subtree-split: bed36044679d6b53ad7cd2875272552a4ad6508a --- HISTORY.asc | 3 +++ base.mk | 6 ++++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/HISTORY.asc b/HISTORY.asc index b134482..9cb793b 100644 --- a/HISTORY.asc +++ b/HISTORY.asc @@ -1,5 +1,8 @@ STABLE ------ +== Support 13+ +The `--load-language` option was removed from `pg_regress` in 13. + == Reduce verbosity from test setup As part of this change, you will want to review the changes to test/deps.sql. diff --git a/base.mk b/base.mk index 0634f2e..a976ebb 100644 --- a/base.mk +++ b/base.mk @@ -36,7 +36,7 @@ TEST_SQL_FILES += $(wildcard $(TESTDIR)/sql/*.sql) TEST_RESULT_FILES = $(patsubst $(TESTDIR)/sql/%.sql,$(TESTDIR)/expected/%.out,$(TEST_SQL_FILES)) TEST_FILES = $(TEST_SOURCE_FILES) $(TEST_SQL_FILES) REGRESS = $(sort $(notdir $(subst .source,,$(TEST_FILES:.sql=)))) # Sort is to get unique list -REGRESS_OPTS = --inputdir=$(TESTDIR) --outputdir=$(TESTOUT) --load-language=plpgsql +REGRESS_OPTS = --inputdir=$(TESTDIR) --outputdir=$(TESTOUT) # See additional setup below MODULES = $(patsubst %.c,%,$(wildcard src/*.c)) ifeq ($(strip $(MODULES)),) MODULES =# Set to NUL so PGXS doesn't puke @@ -57,8 +57,10 @@ GE91 = $(call test, $(MAJORVER), -ge, 91) ifeq ($(GE91),yes) all: $(EXTENSION_VERSION_FILES) +endif -#DATA = $(wildcard sql/*--*.sql) +ifeq ($($call test, $(MAJORVER), -lt 13), yes) + REGRESS_OPTS += --load-language=plpgsql endif PGXS := $(shell $(PG_CONFIG) --pgxs) From d231a5896337f869b6327874166ed0f3a965505d Mon Sep 17 00:00:00 2001 From: jnasbyupgrade Date: Thu, 7 Nov 2024 14:53:01 -0600 Subject: [PATCH 02/18] Add support for Postgres 13 Biggest change is that starting with Postgres 12 OID columns in catalog tables were no longer hidden, which required adjusting a few views and tests. Also update pgxntool. --- sql/cat_tools.sql.in | 70 ++++++++++++++++++++++++++++++------- sql/omit_column.sql | 16 +++++++++ test/expected/general.out | 3 +- test/expected/zzz_build.out | 4 +++ test/setup.sql | 17 +++++++++ test/sql/attribute.sql | 15 ++++++-- test/sql/extension.sql | 2 +- 7 files changed, 110 insertions(+), 17 deletions(-) create mode 100644 sql/omit_column.sql diff --git a/sql/cat_tools.sql.in b/sql/cat_tools.sql.in index dda82d8..fba34a6 100644 --- a/sql/cat_tools.sql.in +++ b/sql/cat_tools.sql.in @@ -22,14 +22,6 @@ CREATE SCHEMA __cat_tools; GRANT USAGE ON SCHEMA cat_tools TO cat_tools__usage; CREATE SCHEMA _cat_tools; --- No permissions checks -CREATE OR REPLACE VIEW _cat_tools.pg_class_v AS - SELECT c.oid AS reloid, c.*, n.nspname AS relschema - FROM pg_class c - LEFT JOIN pg_namespace n ON( n.oid = c.relnamespace ) -; -REVOKE ALL ON _cat_tools.pg_class_v FROM public; - @generated@ CREATE FUNCTION __cat_tools.exec( @@ -41,8 +33,44 @@ BEGIN END $body$; +-- See also test/setup.sql +CREATE FUNCTION __cat_tools.omit_column( + rel text + , omit name[] DEFAULT array['oid'] +) RETURNS text LANGUAGE sql IMMUTABLE AS $body$ +SELECT array_to_string(array( + SELECT attname + FROM pg_attribute a + WHERE attrelid = rel::regclass + AND NOT attisdropped + AND attnum >= 0 + AND attname != ANY( omit ) + ORDER BY attnum + ) + , ', ' +) +$body$; + @generated@ +/* + * Starting in 12 oid columns in catalog tables are no longer hidden, so we + * need a way to include all the fields in a table *except* for the OID column. + */ +SELECT __cat_tools.exec(format($fmt$ +CREATE OR REPLACE VIEW _cat_tools.pg_class_v AS + SELECT c.oid AS reloid + , %s + , n.nspname AS relschema + FROM pg_class c + LEFT JOIN pg_namespace n ON( n.oid = c.relnamespace ) +; +$fmt$ + , __cat_tools.omit_column('pg_catalog.pg_class') +)); +REVOKE ALL ON _cat_tools.pg_class_v FROM public; + + /* * Temporary stub function. We do this so we can use the nice create_function * function that we're about to create to create the real version of this @@ -717,15 +745,20 @@ GRANT SELECT ON cat_tools.pg_class_v TO cat_tools__usage; @generated@ +SELECT __cat_tools.exec(format($fmt$ CREATE OR REPLACE VIEW _cat_tools.pg_attribute_v AS - SELECT a.* + SELECT %s , c.* , t.oid AS typoid - , t.* + , %s FROM pg_attribute a LEFT JOIN _cat_tools.pg_class_v c ON ( c.reloid = a.attrelid ) LEFT JOIN pg_type t ON ( t.oid = a.atttypid ) ; +$fmt$ + , __cat_tools.omit_column('pg_catalog.pg_attribute', array['attmissingval']) + , __cat_tools.omit_column('pg_catalog.pg_type') +)); REVOKE ALL ON _cat_tools.pg_attribute_v FROM public; CREATE OR REPLACE VIEW _cat_tools.column AS @@ -756,8 +789,10 @@ REVOKE ALL ON _cat_tools.column FROM public; @generated@ -- No perms on extension visibility +SELECT __cat_tools.exec(format($fmt$ CREATE OR REPLACE VIEW cat_tools.pg_extension_v AS - SELECT e.oid, e.* + SELECT e.oid + , %s , extnamespace::regnamespace AS extschema -- SED: REQUIRES 9.5! , nspname AS extschema -- SED: PRIOR TO 9.5! @@ -766,6 +801,9 @@ CREATE OR REPLACE VIEW cat_tools.pg_extension_v AS FROM pg_catalog.pg_extension e LEFT JOIN pg_catalog.pg_namespace n ON n.oid = e.extnamespace ; +$fmt$ + , __cat_tools.omit_column('pg_catalog.pg_extension') +)); GRANT SELECT ON cat_tools.pg_extension_v TO cat_tools__usage; CREATE OR REPLACE VIEW cat_tools.column AS @@ -1318,7 +1356,11 @@ BEGIN RAISE DEBUG 'v_work "%"', v_work; -- Get function arguments - v_execute_clause := ' EXECUTE PROCEDURE ' || r_trigger.tgfoid::pg_catalog.regproc || E'\\('; + IF current_setting('server_version')::real >= 11.0 THEN + v_execute_clause := ' EXECUTE FUNCTION ' || r_trigger.tgfoid::pg_catalog.regproc || E'\\('; + ELSE + v_execute_clause := ' EXECUTE PROCEDURE ' || r_trigger.tgfoid::pg_catalog.regproc || E'\\('; + END IF; v_array := regexp_split_to_array( v_work, v_execute_clause ); EXECUTE format( 'SELECT array[ %s ]' @@ -1454,6 +1496,10 @@ CLUSTER _cat_tools.catalog_metadata USING catalog_metadata__pk_object_catalog; /* * Drop "temporary" objects */ +DROP FUNCTION __cat_tools.omit_column( + rel text + , omit name[] -- DEFAULT array['oid'] +); DROP FUNCTION __cat_tools.exec( sql text ); diff --git a/sql/omit_column.sql b/sql/omit_column.sql new file mode 100644 index 0000000..1d95832 --- /dev/null +++ b/sql/omit_column.sql @@ -0,0 +1,16 @@ +CREATE FUNCTION :s.omit_column( + rel text + , omit name[] DEFAULT array['oid'] +) RETURNS text LANGUAGE sql IMMUTABLE AS $body$ +SELECT array_to_string(array( + SELECT attname + FROM pg_attribute a + WHERE attrelid = rel::regclass + AND NOT attisdropped + AND attnum >= 0 + AND attname != ANY( omit ) + ORDER BY attnum + ) + , ', ' +) +$body$; diff --git a/test/expected/general.out b/test/expected/general.out index 008cc56..84e08e2 100644 --- a/test/expected/general.out +++ b/test/expected/general.out @@ -1,5 +1,4 @@ \set ECHO none -1..2 +1..1 ok 1 - Schema __cat_tools should not exist -ok 2 - verify pg_get_object_address # TRANSACTION INTENTIONALLY LEFT OPEN! diff --git a/test/expected/zzz_build.out b/test/expected/zzz_build.out index 754863c..dbe9615 100644 --- a/test/expected/zzz_build.out +++ b/test/expected/zzz_build.out @@ -94,6 +94,10 @@ + + + + diff --git a/test/setup.sql b/test/setup.sql index ae7b502..f6735ed 100644 --- a/test/setup.sql +++ b/test/setup.sql @@ -16,4 +16,21 @@ RETURNS int LANGUAGE sql IMMUTABLE AS $$ SELECT current_setting('server_version_num')::int/100 $$; +CREATE FUNCTION pg_temp.omit_column( + rel text + , omit name[] DEFAULT array['oid'] +) RETURNS text LANGUAGE sql IMMUTABLE AS $body$ +SELECT array_to_string(array( + SELECT attname + FROM pg_attribute a + WHERE attrelid = rel::regclass + AND NOT attisdropped + AND attnum >= 0 + AND attname != ANY( omit ) + ORDER BY attnum + ) + , ', ' +) +$body$; + -- vi: expandtab ts=2 sw=2 diff --git a/test/sql/attribute.sql b/test/sql/attribute.sql index afcfe13..59c9486 100644 --- a/test/sql/attribute.sql +++ b/test/sql/attribute.sql @@ -41,6 +41,7 @@ SET LOCAL ROLE :use_role; \set call 'SELECT * FROM %I.%I( %L, %L )' \set n pg_attribute__get + SELECT throws_ok( format( :'call', :'s', :'n' @@ -62,13 +63,23 @@ SELECT throws_ok( , 'Non-existent column throws error' ); +/* + * pg_attributes.attmissingval is type anyarray, which doesn't have an equality + * operator. That breaks results_eq(), so we have to omit it from the column + * list. + */ +SELECT pg_temp.omit_column('pg_catalog.pg_attribute', array['attmissingval']) AS atts +\gset +\set get_attributes 'SELECT ' :atts ' FROM pg_attribute ' +\set call 'SELECT ' :atts ' FROM %I.%I( %L, %L )' + SELECT results_eq( format( :'call', :'s', :'n' , 'pg_catalog.pg_class' , 'relname' ) - , $$SELECT * FROM pg_attribute WHERE attrelid = 'pg_class'::regclass AND attname='relname'$$ + , :'get_attributes' || $$WHERE attrelid = 'pg_class'::regclass AND attname='relname'$$ , 'Verify details of pg_class.relname' ); SELECT results_eq( @@ -77,7 +88,7 @@ SELECT results_eq( , 'pg_catalog.pg_tables' , 'tablename' ) - , $$SELECT * FROM pg_attribute WHERE attrelid = 'pg_tables'::regclass AND attname='tablename'$$ + , :'get_attributes' || $$WHERE attrelid = 'pg_tables'::regclass AND attname='tablename'$$ , 'Verify details of pg_tables.tablename' ); diff --git a/test/sql/extension.sql b/test/sql/extension.sql index b79f160..39cf7f7 100644 --- a/test/sql/extension.sql +++ b/test/sql/extension.sql @@ -32,7 +32,7 @@ SELECT isnt_empty( SELECT bag_eq( $$SELECT * FROM cat_tools.pg_extension__get('cat_tools')$$ , format( - $$SELECT e.oid, e.*, %s, extconfig::regclass[] AS ext_config_table + $$SELECT e.*, %s, extconfig::regclass[] AS ext_config_table FROM pg_extension e JOIN pg_namespace n ON n.oid = extnamespace WHERE extname = 'cat_tools' From e8e2717a4c57c0da1acd85bdef15cd67a8fcc798 Mon Sep 17 00:00:00 2001 From: jnasbyupgrade Date: Thu, 7 Nov 2024 15:20:48 -0600 Subject: [PATCH 03/18] Add pgxn-tools CI --- .github/workflows/ci.yml | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100644 .github/workflows/ci.yml diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..b889baa --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,17 @@ +name: CI +on: [push, pull_request] +jobs: + test: + strategy: + matrix: + pg: [17, 16, 15, 14, 13, 12, 11, 10, 9.6, 9.5, 9.4, 9.3, 9.2, 9.1, 9.0, 8.4, 8.3, 8.2] + name: 🐘 PostgreSQL ${{ matrix.pg }} + runs-on: ubuntu-latest + container: pgxn/pgxn-tools + steps: + - name: Start PostgreSQL ${{ matrix.pg }} + run: pg-start ${{ matrix.pg }} + - name: Check out the repo + uses: actions/checkout@v4 + - name: Test on PostgreSQL ${{ matrix.pg }} + run: pg-build-test From 0ff8a9a20ed028ad6994af2f4f30377543348e67 Mon Sep 17 00:00:00 2001 From: jnasbyupgrade Date: Thu, 7 Nov 2024 15:46:32 -0600 Subject: [PATCH 04/18] Fix CI version matrix --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b889baa..0946c32 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -4,7 +4,7 @@ jobs: test: strategy: matrix: - pg: [17, 16, 15, 14, 13, 12, 11, 10, 9.6, 9.5, 9.4, 9.3, 9.2, 9.1, 9.0, 8.4, 8.3, 8.2] + pg: [17, 16, 15, 14, 13, 12, 11, 10, 9.6, 9.5, 9.4, 9.3, 9.2] name: 🐘 PostgreSQL ${{ matrix.pg }} runs-on: ubuntu-latest container: pgxn/pgxn-tools From 5c59f66e492068b11aea5103117fe97a20bc39b4 Mon Sep 17 00:00:00 2001 From: jnasbyupgrade Date: Mon, 11 Nov 2024 13:08:41 -0600 Subject: [PATCH 05/18] Remove .travis.yml --- .travis.yml | 19 ------------------- 1 file changed, 19 deletions(-) delete mode 100644 .travis.yml diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index ddca483..0000000 --- a/.travis.yml +++ /dev/null @@ -1,19 +0,0 @@ -language: c -before_install: - - wget https://gist.github.com/petere/5893799/raw/apt.postgresql.org.sh - - sudo sh ./apt.postgresql.org.sh - - sudo sh -c "echo deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs 2>/dev/null)-pgdg main $PGVERSION >> /etc/apt/sources.list.d/pgdg.list" -env: - - PGVERSION=9.6 - - PGVERSION=9.5 - - PGVERSION=9.4 - - PGVERSION=9.3 - - PGVERSION=9.2 - - - PGVERSION=9.6 TARGET='set-test-upgrade test' - - PGVERSION=9.5 TARGET='set-test-upgrade test' - - PGVERSION=9.4 TARGET='set-test-upgrade test' - - PGVERSION=9.3 TARGET='set-test-upgrade test' - - PGVERSION=9.2 TARGET='set-test-upgrade test' - -script: bash ./pg-travis-test.sh From ca7c8c1f2b8acf0e03bd3b748714a6fc6d81cfa5 Mon Sep 17 00:00:00 2001 From: jnasbyupgrade Date: Thu, 28 Aug 2025 17:08:55 -0500 Subject: [PATCH 06/18] Add new relkind entries --- META.in.json | 4 +- META.json | 4 +- README.asc | 5 +- cat_tools.control | 2 +- sql/cat_tools--0.2.1--0.3.0.sql | 155 +++++++++++++ sql/cat_tools.sql.in | 12 + test/expected/object_type.out | 378 ++++++++++++++++---------------- test/expected/relation_type.out | 40 ++-- test/sql/object_type.sql | 2 +- test/sql/relation_type.sql | 2 +- 10 files changed, 391 insertions(+), 213 deletions(-) create mode 100644 sql/cat_tools--0.2.1--0.3.0.sql diff --git a/META.in.json b/META.in.json index cb35668..7501152 100644 --- a/META.in.json +++ b/META.in.json @@ -14,7 +14,7 @@ "name": "cat_tools", "X_comment": "REQUIRED. Version of the distribution. http://pgxn.org/spec/#version", - "version": "0.2.1", + "version": "0.3.0", "X_comment": "REQUIRED. Short description of distribution.", "abstract": "Tools for interfacing with the Postgres catalog", @@ -37,7 +37,7 @@ "file": "sql/cat_tools.sql", "X_comment": "REQUIRED. Version the extension is at.", - "version": "0.2.1", + "version": "0.3.0", "X_comment": "Optional: \"abstract\": Description of the extension.", "abstract": "Tools for interfacing with the catalog", diff --git a/META.json b/META.json index ad50c0f..a4c7e7d 100644 --- a/META.json +++ b/META.json @@ -14,7 +14,7 @@ "name": "cat_tools", "X_comment": "REQUIRED. Version of the distribution. http://pgxn.org/spec/#version", - "version": "0.2.1", + "version": "0.3.0", "X_comment": "REQUIRED. Short description of distribution.", "abstract": "Tools for interfacing with the Postgres catalog", @@ -37,7 +37,7 @@ "file": "sql/cat_tools.sql", "X_comment": "REQUIRED. Version the extension is at.", - "version": "0.2.1", + "version": "0.3.0", "X_comment": "Optional: \"abstract\": Description of the extension.", "abstract": "Tools for interfacing with the catalog", diff --git a/README.asc b/README.asc index 9fa8761..bf35c9e 100644 --- a/README.asc +++ b/README.asc @@ -6,9 +6,6 @@ To make use of them, you need to grant `cat_tools__usage` to any roles that need == Current Status -image:https://badge.fury.io/pg/cat_tools.svg["PGXN version",link="https://badge.fury.io/pg/cat_tools"] -image:https://travis-ci.org/decibel/cat_tools.png["Build Status",link="https://travis-ci.org/decibel/cat_tools"] - This is very much a work in progress. If it doesn't do something you need, please https://github.com/decibel/cat_tools/issues[open an issue]! === Supported Versions @@ -58,4 +55,4 @@ Copyright and License Cat Tools is released under a https://github.com/decibel/cattools/blob/master/LICENSE[MIT license]. -Copyright (c) 2016 Jim Nasby . +Copyright (c) 2025 Jim Nasby . diff --git a/cat_tools.control b/cat_tools.control index 5248506..d798380 100644 --- a/cat_tools.control +++ b/cat_tools.control @@ -1,4 +1,4 @@ comment = 'Tools for intorfacing with the catalog' -default_version = '0.2.1' +default_version = '0.3.0' relocatable = false schema = 'cat_tools' diff --git a/sql/cat_tools--0.2.1--0.3.0.sql b/sql/cat_tools--0.2.1--0.3.0.sql new file mode 100644 index 0000000..54c086e --- /dev/null +++ b/sql/cat_tools--0.2.1--0.3.0.sql @@ -0,0 +1,155 @@ +CREATE SCHEMA __cat_tools; + +CREATE FUNCTION __cat_tools.exec( + sql text +) RETURNS void LANGUAGE plpgsql AS $body$ +BEGIN + RAISE DEBUG 'sql = %', sql; + EXECUTE sql; +END +$body$; + +CREATE FUNCTION __cat_tools.create_function( + function_name text + , args text + , options text + , body text + , grants text DEFAULT NULL + , comment text DEFAULT NULL +) RETURNS void LANGUAGE plpgsql AS $body$ +DECLARE + c_simple_args CONSTANT text := cat_tools.function__arg_types_text(args); + + create_template CONSTANT text := $template$ +CREATE OR REPLACE FUNCTION %s( +%s +) RETURNS %s AS +%L +$template$ + ; + + revoke_template CONSTANT text := $template$ +REVOKE ALL ON FUNCTION %s( +%s +) FROM public; +$template$ + ; + + grant_template CONSTANT text := $template$ +GRANT EXECUTE ON FUNCTION %s( +%s +) TO %s; +$template$ + ; + + comment_template CONSTANT text := $template$ +COMMENT ON FUNCTION %s( +%s +) IS %L; +$template$ + ; + +BEGIN + PERFORM __cat_tools.exec( format( + create_template + , function_name + , args + , options -- TODO: Force search_path if options ~* 'definer' + , body + ) ) + ; + PERFORM __cat_tools.exec( format( + revoke_template + , function_name + , c_simple_args + ) ) + ; + + IF grants IS NOT NULL THEN + PERFORM __cat_tools.exec( format( + grant_template + , function_name + , c_simple_args + , grants + ) ) + ; + END IF; + + IF comment IS NOT NULL THEN + PERFORM __cat_tools.exec( format( + comment_template + , function_name + , c_simple_args + , comment + ) ) + ; + END IF; +END +$body$; + +ALTER TYPE cat_tools.relation_type ADD VALUE 'partitioned table'; +ALTER TYPE cat_tools.relation_type ADD VALUE 'partitioned index'; + +ALTER TYPE cat_tools.relation_relkind ADD VALUE 'p'; +ALTER TYPE cat_tools.relation_relkind ADD VALUE 'I'; + +ALTER TYPE cat_tools.object_type ADD VALUE 'partitioned table' AFTER 'foreign table'; +ALTER TYPE cat_tools.object_type ADD VALUE 'partitioned index' AFTER 'partitioned table'; + + +SELECT __cat_tools.create_function( + 'cat_tools.relation__kind' + , 'relkind cat_tools.relation_relkind' + , 'cat_tools.relation_type LANGUAGE sql STRICT IMMUTABLE' + , $body$ +SELECT CASE relkind + WHEN 'r' THEN 'table' + WHEN 'i' THEN 'index' + WHEN 'S' THEN 'sequence' + WHEN 't' THEN 'toast table' + WHEN 'v' THEN 'view' + WHEN 'c' THEN 'materialized view' + WHEN 'f' THEN 'composite type' + WHEN 'm' THEN 'foreign table' + WHEN 'p' THEN 'partitioned table' + WHEN 'I' THEN 'partitioned index' +END::cat_tools.relation_type +$body$ + , 'cat_tools__usage' + , 'Mapping from to a ' +); + +SELECT __cat_tools.create_function( + 'cat_tools.relation__relkind' + , 'kind cat_tools.relation_type' + , 'cat_tools.relation_relkind LANGUAGE sql STRICT IMMUTABLE' + , $body$ +SELECT CASE kind + WHEN 'table' THEN 'r' + WHEN 'index' THEN 'i' + WHEN 'sequence' THEN 'S' + WHEN 'toast table' THEN 't' + WHEN 'view' THEN 'v' + WHEN 'materialized view' THEN 'c' + WHEN 'composite type' THEN 'f' + WHEN 'foreign table' THEN 'm' + WHEN 'partitioned table' THEN 'p' + WHEN 'partitioned index' THEN 'I' +END::cat_tools.relation_relkind +$body$ + , 'cat_tools__usage' + , 'Mapping from to a value' +); + +DROP FUNCTION __cat_tools.exec( + sql text +); +DROP FUNCTION __cat_tools.create_function( + function_name text + , args text + , options text + , body text + , grants text + , comment text +); +DROP SCHEMA __cat_tools; diff --git a/sql/cat_tools.sql.in b/sql/cat_tools.sql.in index fba34a6..273374b 100644 --- a/sql/cat_tools.sql.in +++ b/sql/cat_tools.sql.in @@ -283,6 +283,8 @@ CREATE TYPE cat_tools.relation_type AS ENUM( , 'materialized view' , 'composite type' , 'foreign table' + , 'partitioned table' + , 'partitioned index' ); COMMENT ON TYPE cat_tools.relation_type IS $$Types of objects stored in `pg_class`$$; @@ -295,6 +297,8 @@ CREATE TYPE cat_tools.relation_relkind AS ENUM( , 'c' , 'f' , 'm' + , 'p' + , 'I' ); COMMENT ON TYPE cat_tools.relation_relkind IS $$Valid values for `pg_class.relkind`$$; @@ -310,6 +314,8 @@ CREATE TYPE cat_tools.object_type AS ENUM( , 'materialized view' , 'composite type' , 'foreign table' + , 'partitioned table' + , 'partitioned index' /* * NOTE! These are a bit weird because columns live in pg_attribute, but * address stuff recognizes columns as part of pg_class with a subobjid <> 0! @@ -471,6 +477,8 @@ SELECT ( , 'materialized view' , 'composite type' , 'foreign table' + , 'partitioned table' + , 'partitioned index' ]::cat_tools.object_type[] ) THEN 'pg_class' WHEN object_type = ANY( '{domain constraint,table constraint}'::cat_tools.object_type[] ) @@ -655,6 +663,8 @@ SELECT CASE relkind WHEN 'c' THEN 'materialized view' WHEN 'f' THEN 'composite type' WHEN 'm' THEN 'foreign table' + WHEN 'p' THEN 'partitioned table' + WHEN 'I' THEN 'partitioned index' END::cat_tools.relation_type $body$ , 'cat_tools__usage' @@ -675,6 +685,8 @@ SELECT CASE kind WHEN 'materialized view' THEN 'c' WHEN 'composite type' THEN 'f' WHEN 'foreign table' THEN 'm' + WHEN 'partitioned table' THEN 'p' + WHEN 'partitioned index' THEN 'I' END::cat_tools.relation_relkind $body$ , 'cat_tools__usage' diff --git a/test/expected/object_type.out b/test/expected/object_type.out index ea37620..1c9029b 100644 --- a/test/expected/object_type.out +++ b/test/expected/object_type.out @@ -1,5 +1,5 @@ \set ECHO none -1..222 +1..230 ok 1 - sanity check size of pg_temp.extra_types() ok 2 - sanity check size of pg_temp.obj_type ok 3 - Permission denied trying to use types @@ -38,188 +38,196 @@ ok 35 - check addressability for object type 'operator' ok 36 - check addressability for object type 'operator class' ok 37 - check addressability for object type 'operator family' ok 38 - check addressability for object type 'operator of access method' -ok 39 - check addressability for object type 'policy' -ok 40 - check addressability for object type 'role' -ok 41 - check addressability for object type 'rule' -ok 42 - check addressability for object type 'schema' -ok 43 - check addressability for object type 'sequence' -ok 44 - check addressability for object type 'sequence column' -ok 45 - check addressability for object type 'server' -ok 46 - check addressability for object type 'table' -ok 47 - check addressability for object type 'table column' -ok 48 - check addressability for object type 'table constraint' -ok 49 - check addressability for object type 'tablespace' -ok 50 - check addressability for object type 'text search configuration' -ok 51 - check addressability for object type 'text search dictionary' -ok 52 - check addressability for object type 'text search parser' -ok 53 - check addressability for object type 'text search template' -ok 54 - check addressability for object type 'toast table' -ok 55 - check addressability for object type 'toast table column' -ok 56 - check addressability for object type 'transform' -ok 57 - check addressability for object type 'trigger' -ok 58 - check addressability for object type 'type' -ok 59 - check addressability for object type 'user mapping' -ok 60 - check addressability for object type 'view' -ok 61 - check addressability for object type 'view column' -ok 62 - lives_ok: SELECT * FROM cat_tools.object__catalog('access method') -ok 63 - lives_ok: SELECT * FROM cat_tools.object__catalog('aggregate') -ok 64 - lives_ok: SELECT * FROM cat_tools.object__catalog('cast') -ok 65 - lives_ok: SELECT * FROM cat_tools.object__catalog('collation') -ok 66 - lives_ok: SELECT * FROM cat_tools.object__catalog('composite type') -ok 67 - lives_ok: SELECT * FROM cat_tools.object__catalog('composite type column') -ok 68 - lives_ok: SELECT * FROM cat_tools.object__catalog('conversion') -ok 69 - lives_ok: SELECT * FROM cat_tools.object__catalog('database') -ok 70 - lives_ok: SELECT * FROM cat_tools.object__catalog('default acl') -ok 71 - lives_ok: SELECT * FROM cat_tools.object__catalog('default value') -ok 72 - lives_ok: SELECT * FROM cat_tools.object__catalog('domain constraint') -ok 73 - lives_ok: SELECT * FROM cat_tools.object__catalog('event trigger') -ok 74 - lives_ok: SELECT * FROM cat_tools.object__catalog('extension') -ok 75 - lives_ok: SELECT * FROM cat_tools.object__catalog('foreign table') -ok 76 - lives_ok: SELECT * FROM cat_tools.object__catalog('foreign table column') -ok 77 - lives_ok: SELECT * FROM cat_tools.object__catalog('foreign-data wrapper') -ok 78 - lives_ok: SELECT * FROM cat_tools.object__catalog('function') -ok 79 - lives_ok: SELECT * FROM cat_tools.object__catalog('function of access method') -ok 80 - lives_ok: SELECT * FROM cat_tools.object__catalog('index') -ok 81 - lives_ok: SELECT * FROM cat_tools.object__catalog('index column') -ok 82 - lives_ok: SELECT * FROM cat_tools.object__catalog('language') -ok 83 - lives_ok: SELECT * FROM cat_tools.object__catalog('large object') -ok 84 - lives_ok: SELECT * FROM cat_tools.object__catalog('materialized view') -ok 85 - lives_ok: SELECT * FROM cat_tools.object__catalog('materialized view column') -ok 86 - lives_ok: SELECT * FROM cat_tools.object__catalog('operator') -ok 87 - lives_ok: SELECT * FROM cat_tools.object__catalog('operator class') -ok 88 - lives_ok: SELECT * FROM cat_tools.object__catalog('operator family') -ok 89 - lives_ok: SELECT * FROM cat_tools.object__catalog('operator of access method') -ok 90 - lives_ok: SELECT * FROM cat_tools.object__catalog('policy') -ok 91 - lives_ok: SELECT * FROM cat_tools.object__catalog('role') -ok 92 - lives_ok: SELECT * FROM cat_tools.object__catalog('rule') -ok 93 - lives_ok: SELECT * FROM cat_tools.object__catalog('schema') -ok 94 - lives_ok: SELECT * FROM cat_tools.object__catalog('sequence') -ok 95 - lives_ok: SELECT * FROM cat_tools.object__catalog('sequence column') -ok 96 - lives_ok: SELECT * FROM cat_tools.object__catalog('server') -ok 97 - lives_ok: SELECT * FROM cat_tools.object__catalog('table') -ok 98 - lives_ok: SELECT * FROM cat_tools.object__catalog('table column') -ok 99 - lives_ok: SELECT * FROM cat_tools.object__catalog('table constraint') -ok 100 - lives_ok: SELECT * FROM cat_tools.object__catalog('tablespace') -ok 101 - lives_ok: SELECT * FROM cat_tools.object__catalog('text search configuration') -ok 102 - lives_ok: SELECT * FROM cat_tools.object__catalog('text search dictionary') -ok 103 - lives_ok: SELECT * FROM cat_tools.object__catalog('text search parser') -ok 104 - lives_ok: SELECT * FROM cat_tools.object__catalog('text search template') -ok 105 - lives_ok: SELECT * FROM cat_tools.object__catalog('toast table') -ok 106 - lives_ok: SELECT * FROM cat_tools.object__catalog('toast table column') -ok 107 - lives_ok: SELECT * FROM cat_tools.object__catalog('transform') -ok 108 - lives_ok: SELECT * FROM cat_tools.object__catalog('trigger') -ok 109 - lives_ok: SELECT * FROM cat_tools.object__catalog('type') -ok 110 - lives_ok: SELECT * FROM cat_tools.object__catalog('user mapping') -ok 111 - lives_ok: SELECT * FROM cat_tools.object__catalog('view') -ok 112 - lives_ok: SELECT * FROM cat_tools.object__catalog('view column') -ok 113 - lives_ok: SELECT * FROM cat_tools.object__reg_type('access method') -ok 114 - lives_ok: SELECT * FROM cat_tools.object__reg_type('aggregate') -ok 115 - lives_ok: SELECT * FROM cat_tools.object__reg_type('cast') -ok 116 - lives_ok: SELECT * FROM cat_tools.object__reg_type('collation') -ok 117 - lives_ok: SELECT * FROM cat_tools.object__reg_type('composite type') -ok 118 - lives_ok: SELECT * FROM cat_tools.object__reg_type('composite type column') -ok 119 - lives_ok: SELECT * FROM cat_tools.object__reg_type('conversion') -ok 120 - lives_ok: SELECT * FROM cat_tools.object__reg_type('database') -ok 121 - lives_ok: SELECT * FROM cat_tools.object__reg_type('default acl') -ok 122 - lives_ok: SELECT * FROM cat_tools.object__reg_type('default value') -ok 123 - lives_ok: SELECT * FROM cat_tools.object__reg_type('domain constraint') -ok 124 - lives_ok: SELECT * FROM cat_tools.object__reg_type('event trigger') -ok 125 - lives_ok: SELECT * FROM cat_tools.object__reg_type('extension') -ok 126 - lives_ok: SELECT * FROM cat_tools.object__reg_type('foreign table') -ok 127 - lives_ok: SELECT * FROM cat_tools.object__reg_type('foreign table column') -ok 128 - lives_ok: SELECT * FROM cat_tools.object__reg_type('foreign-data wrapper') -ok 129 - lives_ok: SELECT * FROM cat_tools.object__reg_type('function') -ok 130 - lives_ok: SELECT * FROM cat_tools.object__reg_type('function of access method') -ok 131 - lives_ok: SELECT * FROM cat_tools.object__reg_type('index') -ok 132 - lives_ok: SELECT * FROM cat_tools.object__reg_type('index column') -ok 133 - lives_ok: SELECT * FROM cat_tools.object__reg_type('language') -ok 134 - lives_ok: SELECT * FROM cat_tools.object__reg_type('large object') -ok 135 - lives_ok: SELECT * FROM cat_tools.object__reg_type('materialized view') -ok 136 - lives_ok: SELECT * FROM cat_tools.object__reg_type('materialized view column') -ok 137 - lives_ok: SELECT * FROM cat_tools.object__reg_type('operator') -ok 138 - lives_ok: SELECT * FROM cat_tools.object__reg_type('operator class') -ok 139 - lives_ok: SELECT * FROM cat_tools.object__reg_type('operator family') -ok 140 - lives_ok: SELECT * FROM cat_tools.object__reg_type('operator of access method') -ok 141 - lives_ok: SELECT * FROM cat_tools.object__reg_type('policy') -ok 142 - lives_ok: SELECT * FROM cat_tools.object__reg_type('role') -ok 143 - lives_ok: SELECT * FROM cat_tools.object__reg_type('rule') -ok 144 - lives_ok: SELECT * FROM cat_tools.object__reg_type('schema') -ok 145 - lives_ok: SELECT * FROM cat_tools.object__reg_type('sequence') -ok 146 - lives_ok: SELECT * FROM cat_tools.object__reg_type('sequence column') -ok 147 - lives_ok: SELECT * FROM cat_tools.object__reg_type('server') -ok 148 - lives_ok: SELECT * FROM cat_tools.object__reg_type('table') -ok 149 - lives_ok: SELECT * FROM cat_tools.object__reg_type('table column') -ok 150 - lives_ok: SELECT * FROM cat_tools.object__reg_type('table constraint') -ok 151 - lives_ok: SELECT * FROM cat_tools.object__reg_type('tablespace') -ok 152 - lives_ok: SELECT * FROM cat_tools.object__reg_type('text search configuration') -ok 153 - lives_ok: SELECT * FROM cat_tools.object__reg_type('text search dictionary') -ok 154 - lives_ok: SELECT * FROM cat_tools.object__reg_type('text search parser') -ok 155 - lives_ok: SELECT * FROM cat_tools.object__reg_type('text search template') -ok 156 - lives_ok: SELECT * FROM cat_tools.object__reg_type('toast table') -ok 157 - lives_ok: SELECT * FROM cat_tools.object__reg_type('toast table column') -ok 158 - lives_ok: SELECT * FROM cat_tools.object__reg_type('transform') -ok 159 - lives_ok: SELECT * FROM cat_tools.object__reg_type('trigger') -ok 160 - lives_ok: SELECT * FROM cat_tools.object__reg_type('type') -ok 161 - lives_ok: SELECT * FROM cat_tools.object__reg_type('user mapping') -ok 162 - lives_ok: SELECT * FROM cat_tools.object__reg_type('view') -ok 163 - lives_ok: SELECT * FROM cat_tools.object__reg_type('view column') -ok 164 - Verify cat_tools.object__address_classid('access method') -ok 165 - Verify cat_tools.object__address_classid('aggregate') -ok 166 - Verify cat_tools.object__address_classid('cast') -ok 167 - Verify cat_tools.object__address_classid('collation') -ok 168 - Verify cat_tools.object__address_classid('composite type') -ok 169 - Verify cat_tools.object__address_classid('composite type column') -ok 170 - Verify cat_tools.object__address_classid('conversion') -ok 171 - Verify cat_tools.object__address_classid('database') -ok 172 - Verify cat_tools.object__address_classid('default acl') -ok 173 - Verify cat_tools.object__address_classid('default value') -ok 174 - Verify cat_tools.object__address_classid('domain constraint') -ok 175 - Verify cat_tools.object__address_classid('event trigger') -ok 176 - Verify cat_tools.object__address_classid('extension') -ok 177 - Verify cat_tools.object__address_classid('foreign table') -ok 178 - Verify cat_tools.object__address_classid('foreign table column') -ok 179 - Verify cat_tools.object__address_classid('foreign-data wrapper') -ok 180 - Verify cat_tools.object__address_classid('function') -ok 181 - Verify cat_tools.object__address_classid('function of access method') -ok 182 - Verify cat_tools.object__address_classid('index') -ok 183 - Verify cat_tools.object__address_classid('index column') -ok 184 - Verify cat_tools.object__address_classid('language') -ok 185 - Verify cat_tools.object__address_classid('large object') -ok 186 - Verify cat_tools.object__address_classid('materialized view') -ok 187 - Verify cat_tools.object__address_classid('materialized view column') -ok 188 - Verify cat_tools.object__address_classid('operator') -ok 189 - Verify cat_tools.object__address_classid('operator class') -ok 190 - Verify cat_tools.object__address_classid('operator family') -ok 191 - Verify cat_tools.object__address_classid('operator of access method') -ok 192 - Verify cat_tools.object__address_classid('policy') -ok 193 - Verify cat_tools.object__address_classid('role') -ok 194 - Verify cat_tools.object__address_classid('rule') -ok 195 - Verify cat_tools.object__address_classid('schema') -ok 196 - Verify cat_tools.object__address_classid('sequence') -ok 197 - Verify cat_tools.object__address_classid('sequence column') -ok 198 - Verify cat_tools.object__address_classid('server') -ok 199 - Verify cat_tools.object__address_classid('table') -ok 200 - Verify cat_tools.object__address_classid('table column') -ok 201 - Verify cat_tools.object__address_classid('table constraint') -ok 202 - Verify cat_tools.object__address_classid('tablespace') -ok 203 - Verify cat_tools.object__address_classid('text search configuration') -ok 204 - Verify cat_tools.object__address_classid('text search dictionary') -ok 205 - Verify cat_tools.object__address_classid('text search parser') -ok 206 - Verify cat_tools.object__address_classid('text search template') -ok 207 - Verify cat_tools.object__address_classid('toast table') -ok 208 - Verify cat_tools.object__address_classid('toast table column') -ok 209 - Verify cat_tools.object__address_classid('transform') -ok 210 - Verify cat_tools.object__address_classid('trigger') -ok 211 - Verify cat_tools.object__address_classid('type') -ok 212 - Verify cat_tools.object__address_classid('user mapping') -ok 213 - Verify cat_tools.object__address_classid('view') -ok 214 - Verify cat_tools.object__address_classid('view column') -ok 215 - Change search_path -ok 216 - Create bogus pg_class table -ok 217 - Create bogus regclass type -ok 218 - Simple 'pg_class'::pg_catalog.regclass should not return pg_catalog.pg_class -ok 219 - Simple 'regclass'::regtype should not return pg_catalog.regtype -ok 220 - cat_tools.object__catalog('table') returns pg_catalog.pg_class -ok 221 - cat_tools.object__catalog('table') returns pg_catalog.pg_class -ok 222 - Verify objects__shared_src() returns correct values +ok 39 - check addressability for object type 'partitioned index' +ok 40 - check addressability for object type 'partitioned table' +ok 41 - check addressability for object type 'policy' +ok 42 - check addressability for object type 'role' +ok 43 - check addressability for object type 'rule' +ok 44 - check addressability for object type 'schema' +ok 45 - check addressability for object type 'sequence' +ok 46 - check addressability for object type 'sequence column' +ok 47 - check addressability for object type 'server' +ok 48 - check addressability for object type 'table' +ok 49 - check addressability for object type 'table column' +ok 50 - check addressability for object type 'table constraint' +ok 51 - check addressability for object type 'tablespace' +ok 52 - check addressability for object type 'text search configuration' +ok 53 - check addressability for object type 'text search dictionary' +ok 54 - check addressability for object type 'text search parser' +ok 55 - check addressability for object type 'text search template' +ok 56 - check addressability for object type 'toast table' +ok 57 - check addressability for object type 'toast table column' +ok 58 - check addressability for object type 'transform' +ok 59 - check addressability for object type 'trigger' +ok 60 - check addressability for object type 'type' +ok 61 - check addressability for object type 'user mapping' +ok 62 - check addressability for object type 'view' +ok 63 - check addressability for object type 'view column' +ok 64 - lives_ok: SELECT * FROM cat_tools.object__catalog('access method') +ok 65 - lives_ok: SELECT * FROM cat_tools.object__catalog('aggregate') +ok 66 - lives_ok: SELECT * FROM cat_tools.object__catalog('cast') +ok 67 - lives_ok: SELECT * FROM cat_tools.object__catalog('collation') +ok 68 - lives_ok: SELECT * FROM cat_tools.object__catalog('composite type') +ok 69 - lives_ok: SELECT * FROM cat_tools.object__catalog('composite type column') +ok 70 - lives_ok: SELECT * FROM cat_tools.object__catalog('conversion') +ok 71 - lives_ok: SELECT * FROM cat_tools.object__catalog('database') +ok 72 - lives_ok: SELECT * FROM cat_tools.object__catalog('default acl') +ok 73 - lives_ok: SELECT * FROM cat_tools.object__catalog('default value') +ok 74 - lives_ok: SELECT * FROM cat_tools.object__catalog('domain constraint') +ok 75 - lives_ok: SELECT * FROM cat_tools.object__catalog('event trigger') +ok 76 - lives_ok: SELECT * FROM cat_tools.object__catalog('extension') +ok 77 - lives_ok: SELECT * FROM cat_tools.object__catalog('foreign table') +ok 78 - lives_ok: SELECT * FROM cat_tools.object__catalog('foreign table column') +ok 79 - lives_ok: SELECT * FROM cat_tools.object__catalog('foreign-data wrapper') +ok 80 - lives_ok: SELECT * FROM cat_tools.object__catalog('function') +ok 81 - lives_ok: SELECT * FROM cat_tools.object__catalog('function of access method') +ok 82 - lives_ok: SELECT * FROM cat_tools.object__catalog('index') +ok 83 - lives_ok: SELECT * FROM cat_tools.object__catalog('index column') +ok 84 - lives_ok: SELECT * FROM cat_tools.object__catalog('language') +ok 85 - lives_ok: SELECT * FROM cat_tools.object__catalog('large object') +ok 86 - lives_ok: SELECT * FROM cat_tools.object__catalog('materialized view') +ok 87 - lives_ok: SELECT * FROM cat_tools.object__catalog('materialized view column') +ok 88 - lives_ok: SELECT * FROM cat_tools.object__catalog('operator') +ok 89 - lives_ok: SELECT * FROM cat_tools.object__catalog('operator class') +ok 90 - lives_ok: SELECT * FROM cat_tools.object__catalog('operator family') +ok 91 - lives_ok: SELECT * FROM cat_tools.object__catalog('operator of access method') +ok 92 - lives_ok: SELECT * FROM cat_tools.object__catalog('partitioned index') +ok 93 - lives_ok: SELECT * FROM cat_tools.object__catalog('partitioned table') +ok 94 - lives_ok: SELECT * FROM cat_tools.object__catalog('policy') +ok 95 - lives_ok: SELECT * FROM cat_tools.object__catalog('role') +ok 96 - lives_ok: SELECT * FROM cat_tools.object__catalog('rule') +ok 97 - lives_ok: SELECT * FROM cat_tools.object__catalog('schema') +ok 98 - lives_ok: SELECT * FROM cat_tools.object__catalog('sequence') +ok 99 - lives_ok: SELECT * FROM cat_tools.object__catalog('sequence column') +ok 100 - lives_ok: SELECT * FROM cat_tools.object__catalog('server') +ok 101 - lives_ok: SELECT * FROM cat_tools.object__catalog('table') +ok 102 - lives_ok: SELECT * FROM cat_tools.object__catalog('table column') +ok 103 - lives_ok: SELECT * FROM cat_tools.object__catalog('table constraint') +ok 104 - lives_ok: SELECT * FROM cat_tools.object__catalog('tablespace') +ok 105 - lives_ok: SELECT * FROM cat_tools.object__catalog('text search configuration') +ok 106 - lives_ok: SELECT * FROM cat_tools.object__catalog('text search dictionary') +ok 107 - lives_ok: SELECT * FROM cat_tools.object__catalog('text search parser') +ok 108 - lives_ok: SELECT * FROM cat_tools.object__catalog('text search template') +ok 109 - lives_ok: SELECT * FROM cat_tools.object__catalog('toast table') +ok 110 - lives_ok: SELECT * FROM cat_tools.object__catalog('toast table column') +ok 111 - lives_ok: SELECT * FROM cat_tools.object__catalog('transform') +ok 112 - lives_ok: SELECT * FROM cat_tools.object__catalog('trigger') +ok 113 - lives_ok: SELECT * FROM cat_tools.object__catalog('type') +ok 114 - lives_ok: SELECT * FROM cat_tools.object__catalog('user mapping') +ok 115 - lives_ok: SELECT * FROM cat_tools.object__catalog('view') +ok 116 - lives_ok: SELECT * FROM cat_tools.object__catalog('view column') +ok 117 - lives_ok: SELECT * FROM cat_tools.object__reg_type('access method') +ok 118 - lives_ok: SELECT * FROM cat_tools.object__reg_type('aggregate') +ok 119 - lives_ok: SELECT * FROM cat_tools.object__reg_type('cast') +ok 120 - lives_ok: SELECT * FROM cat_tools.object__reg_type('collation') +ok 121 - lives_ok: SELECT * FROM cat_tools.object__reg_type('composite type') +ok 122 - lives_ok: SELECT * FROM cat_tools.object__reg_type('composite type column') +ok 123 - lives_ok: SELECT * FROM cat_tools.object__reg_type('conversion') +ok 124 - lives_ok: SELECT * FROM cat_tools.object__reg_type('database') +ok 125 - lives_ok: SELECT * FROM cat_tools.object__reg_type('default acl') +ok 126 - lives_ok: SELECT * FROM cat_tools.object__reg_type('default value') +ok 127 - lives_ok: SELECT * FROM cat_tools.object__reg_type('domain constraint') +ok 128 - lives_ok: SELECT * FROM cat_tools.object__reg_type('event trigger') +ok 129 - lives_ok: SELECT * FROM cat_tools.object__reg_type('extension') +ok 130 - lives_ok: SELECT * FROM cat_tools.object__reg_type('foreign table') +ok 131 - lives_ok: SELECT * FROM cat_tools.object__reg_type('foreign table column') +ok 132 - lives_ok: SELECT * FROM cat_tools.object__reg_type('foreign-data wrapper') +ok 133 - lives_ok: SELECT * FROM cat_tools.object__reg_type('function') +ok 134 - lives_ok: SELECT * FROM cat_tools.object__reg_type('function of access method') +ok 135 - lives_ok: SELECT * FROM cat_tools.object__reg_type('index') +ok 136 - lives_ok: SELECT * FROM cat_tools.object__reg_type('index column') +ok 137 - lives_ok: SELECT * FROM cat_tools.object__reg_type('language') +ok 138 - lives_ok: SELECT * FROM cat_tools.object__reg_type('large object') +ok 139 - lives_ok: SELECT * FROM cat_tools.object__reg_type('materialized view') +ok 140 - lives_ok: SELECT * FROM cat_tools.object__reg_type('materialized view column') +ok 141 - lives_ok: SELECT * FROM cat_tools.object__reg_type('operator') +ok 142 - lives_ok: SELECT * FROM cat_tools.object__reg_type('operator class') +ok 143 - lives_ok: SELECT * FROM cat_tools.object__reg_type('operator family') +ok 144 - lives_ok: SELECT * FROM cat_tools.object__reg_type('operator of access method') +ok 145 - lives_ok: SELECT * FROM cat_tools.object__reg_type('partitioned index') +ok 146 - lives_ok: SELECT * FROM cat_tools.object__reg_type('partitioned table') +ok 147 - lives_ok: SELECT * FROM cat_tools.object__reg_type('policy') +ok 148 - lives_ok: SELECT * FROM cat_tools.object__reg_type('role') +ok 149 - lives_ok: SELECT * FROM cat_tools.object__reg_type('rule') +ok 150 - lives_ok: SELECT * FROM cat_tools.object__reg_type('schema') +ok 151 - lives_ok: SELECT * FROM cat_tools.object__reg_type('sequence') +ok 152 - lives_ok: SELECT * FROM cat_tools.object__reg_type('sequence column') +ok 153 - lives_ok: SELECT * FROM cat_tools.object__reg_type('server') +ok 154 - lives_ok: SELECT * FROM cat_tools.object__reg_type('table') +ok 155 - lives_ok: SELECT * FROM cat_tools.object__reg_type('table column') +ok 156 - lives_ok: SELECT * FROM cat_tools.object__reg_type('table constraint') +ok 157 - lives_ok: SELECT * FROM cat_tools.object__reg_type('tablespace') +ok 158 - lives_ok: SELECT * FROM cat_tools.object__reg_type('text search configuration') +ok 159 - lives_ok: SELECT * FROM cat_tools.object__reg_type('text search dictionary') +ok 160 - lives_ok: SELECT * FROM cat_tools.object__reg_type('text search parser') +ok 161 - lives_ok: SELECT * FROM cat_tools.object__reg_type('text search template') +ok 162 - lives_ok: SELECT * FROM cat_tools.object__reg_type('toast table') +ok 163 - lives_ok: SELECT * FROM cat_tools.object__reg_type('toast table column') +ok 164 - lives_ok: SELECT * FROM cat_tools.object__reg_type('transform') +ok 165 - lives_ok: SELECT * FROM cat_tools.object__reg_type('trigger') +ok 166 - lives_ok: SELECT * FROM cat_tools.object__reg_type('type') +ok 167 - lives_ok: SELECT * FROM cat_tools.object__reg_type('user mapping') +ok 168 - lives_ok: SELECT * FROM cat_tools.object__reg_type('view') +ok 169 - lives_ok: SELECT * FROM cat_tools.object__reg_type('view column') +ok 170 - Verify cat_tools.object__address_classid('access method') +ok 171 - Verify cat_tools.object__address_classid('aggregate') +ok 172 - Verify cat_tools.object__address_classid('cast') +ok 173 - Verify cat_tools.object__address_classid('collation') +ok 174 - Verify cat_tools.object__address_classid('composite type') +ok 175 - Verify cat_tools.object__address_classid('composite type column') +ok 176 - Verify cat_tools.object__address_classid('conversion') +ok 177 - Verify cat_tools.object__address_classid('database') +ok 178 - Verify cat_tools.object__address_classid('default acl') +ok 179 - Verify cat_tools.object__address_classid('default value') +ok 180 - Verify cat_tools.object__address_classid('domain constraint') +ok 181 - Verify cat_tools.object__address_classid('event trigger') +ok 182 - Verify cat_tools.object__address_classid('extension') +ok 183 - Verify cat_tools.object__address_classid('foreign table') +ok 184 - Verify cat_tools.object__address_classid('foreign table column') +ok 185 - Verify cat_tools.object__address_classid('foreign-data wrapper') +ok 186 - Verify cat_tools.object__address_classid('function') +ok 187 - Verify cat_tools.object__address_classid('function of access method') +ok 188 - Verify cat_tools.object__address_classid('index') +ok 189 - Verify cat_tools.object__address_classid('index column') +ok 190 - Verify cat_tools.object__address_classid('language') +ok 191 - Verify cat_tools.object__address_classid('large object') +ok 192 - Verify cat_tools.object__address_classid('materialized view') +ok 193 - Verify cat_tools.object__address_classid('materialized view column') +ok 194 - Verify cat_tools.object__address_classid('operator') +ok 195 - Verify cat_tools.object__address_classid('operator class') +ok 196 - Verify cat_tools.object__address_classid('operator family') +ok 197 - Verify cat_tools.object__address_classid('operator of access method') +ok 198 - Verify cat_tools.object__address_classid('partitioned index') +ok 199 - Verify cat_tools.object__address_classid('partitioned table') +ok 200 - Verify cat_tools.object__address_classid('policy') +ok 201 - Verify cat_tools.object__address_classid('role') +ok 202 - Verify cat_tools.object__address_classid('rule') +ok 203 - Verify cat_tools.object__address_classid('schema') +ok 204 - Verify cat_tools.object__address_classid('sequence') +ok 205 - Verify cat_tools.object__address_classid('sequence column') +ok 206 - Verify cat_tools.object__address_classid('server') +ok 207 - Verify cat_tools.object__address_classid('table') +ok 208 - Verify cat_tools.object__address_classid('table column') +ok 209 - Verify cat_tools.object__address_classid('table constraint') +ok 210 - Verify cat_tools.object__address_classid('tablespace') +ok 211 - Verify cat_tools.object__address_classid('text search configuration') +ok 212 - Verify cat_tools.object__address_classid('text search dictionary') +ok 213 - Verify cat_tools.object__address_classid('text search parser') +ok 214 - Verify cat_tools.object__address_classid('text search template') +ok 215 - Verify cat_tools.object__address_classid('toast table') +ok 216 - Verify cat_tools.object__address_classid('toast table column') +ok 217 - Verify cat_tools.object__address_classid('transform') +ok 218 - Verify cat_tools.object__address_classid('trigger') +ok 219 - Verify cat_tools.object__address_classid('type') +ok 220 - Verify cat_tools.object__address_classid('user mapping') +ok 221 - Verify cat_tools.object__address_classid('view') +ok 222 - Verify cat_tools.object__address_classid('view column') +ok 223 - Change search_path +ok 224 - Create bogus pg_class table +ok 225 - Create bogus regclass type +ok 226 - Simple 'pg_class'::pg_catalog.regclass should not return pg_catalog.pg_class +ok 227 - Simple 'regclass'::regtype should not return pg_catalog.regtype +ok 228 - cat_tools.object__catalog('table') returns pg_catalog.pg_class +ok 229 - cat_tools.object__catalog('table') returns pg_catalog.pg_class +ok 230 - Verify objects__shared_src() returns correct values # TRANSACTION INTENTIONALLY LEFT OPEN! diff --git a/test/expected/relation_type.out b/test/expected/relation_type.out index c8d4172..f65ad92 100644 --- a/test/expected/relation_type.out +++ b/test/expected/relation_type.out @@ -1,5 +1,5 @@ \set ECHO none -1..31 +1..37 ok 1 - Verify count from kinds ok 2 - Simple sanity check of relation__kind() ok 3 - Simple sanity check of relation__relkind() @@ -15,20 +15,26 @@ ok 12 - SELECT cat_tools.relation_relkind('view') ok 13 - SELECT cat_tools.relation_relkind('materialized view') ok 14 - SELECT cat_tools.relation_relkind('composite type') ok 15 - SELECT cat_tools.relation_relkind('foreign table') -ok 16 - SELECT cat_tools.relation_type('r') -ok 17 - SELECT cat_tools.relation_type('i') -ok 18 - SELECT cat_tools.relation_type('S') -ok 19 - SELECT cat_tools.relation_type('t') -ok 20 - SELECT cat_tools.relation_type('v') -ok 21 - SELECT cat_tools.relation_type('c') -ok 22 - SELECT cat_tools.relation_type('f') -ok 23 - SELECT cat_tools.relation_type('m') -ok 24 - SELECT cat_tools.relation_type('r'::"char") -ok 25 - SELECT cat_tools.relation_type('i'::"char") -ok 26 - SELECT cat_tools.relation_type('S'::"char") -ok 27 - SELECT cat_tools.relation_type('t'::"char") -ok 28 - SELECT cat_tools.relation_type('v'::"char") -ok 29 - SELECT cat_tools.relation_type('c'::"char") -ok 30 - SELECT cat_tools.relation_type('f'::"char") -ok 31 - SELECT cat_tools.relation_type('m'::"char") +ok 16 - SELECT cat_tools.relation_relkind('partitioned table') +ok 17 - SELECT cat_tools.relation_relkind('partitioned index') +ok 18 - SELECT cat_tools.relation_type('r') +ok 19 - SELECT cat_tools.relation_type('i') +ok 20 - SELECT cat_tools.relation_type('S') +ok 21 - SELECT cat_tools.relation_type('t') +ok 22 - SELECT cat_tools.relation_type('v') +ok 23 - SELECT cat_tools.relation_type('c') +ok 24 - SELECT cat_tools.relation_type('f') +ok 25 - SELECT cat_tools.relation_type('m') +ok 26 - SELECT cat_tools.relation_type('p') +ok 27 - SELECT cat_tools.relation_type('I') +ok 28 - SELECT cat_tools.relation_type('r'::"char") +ok 29 - SELECT cat_tools.relation_type('i'::"char") +ok 30 - SELECT cat_tools.relation_type('S'::"char") +ok 31 - SELECT cat_tools.relation_type('t'::"char") +ok 32 - SELECT cat_tools.relation_type('v'::"char") +ok 33 - SELECT cat_tools.relation_type('c'::"char") +ok 34 - SELECT cat_tools.relation_type('f'::"char") +ok 35 - SELECT cat_tools.relation_type('m'::"char") +ok 36 - SELECT cat_tools.relation_type('p'::"char") +ok 37 - SELECT cat_tools.relation_type('I'::"char") # TRANSACTION INTENTIONALLY LEFT OPEN! diff --git a/test/sql/object_type.sql b/test/sql/object_type.sql index e230e9e..3123c4d 100644 --- a/test/sql/object_type.sql +++ b/test/sql/object_type.sql @@ -72,7 +72,7 @@ SELECT is( ); SELECT is( (SELECT count(*)::int FROM obj_type) - , 51 + , 53 , 'sanity check size of pg_temp.obj_type' ); diff --git a/test/sql/relation_type.sql b/test/sql/relation_type.sql index 69beaf1..02999cd 100644 --- a/test/sql/relation_type.sql +++ b/test/sql/relation_type.sql @@ -27,7 +27,7 @@ SELECT plan( SELECT is( (SELECT count(*)::int FROM kinds) - , 8 + , 10 , 'Verify count from kinds' ); From 700ac3b420f2a7eeba841098de1eeca48549547e Mon Sep 17 00:00:00 2001 From: jnasbyupgrade Date: Fri, 29 Aug 2025 15:24:42 -0500 Subject: [PATCH 07/18] Add relation__is_temp and relation__is_catalog functions --- README.asc | 2 ++ sql/cat_tools.sql.in | 28 +++++++++++++++ test/expected/relation__is_catalog.out | 8 +++++ test/expected/relation__is_temp.out | 8 +++++ test/expected/zzz_build.out | 4 +++ test/sql/relation__is_catalog.sql | 47 ++++++++++++++++++++++++++ test/sql/relation__is_temp.sql | 47 ++++++++++++++++++++++++++ 7 files changed, 144 insertions(+) create mode 100644 test/expected/relation__is_catalog.out create mode 100644 test/expected/relation__is_temp.out create mode 100644 test/sql/relation__is_catalog.sql create mode 100644 test/sql/relation__is_temp.sql diff --git a/README.asc b/README.asc index bf35c9e..31152f9 100644 --- a/README.asc +++ b/README.asc @@ -36,6 +36,8 @@ Works on Postgres 9.3 and above. * `cat_tools.regprocedure(function_name, arguments)` - Returns regprocedure for function_name and it's full set of arguments * `cat_tools.relation__kind(relkind)` - Mapping from `pg_class.relkind` to a `cat_tools.relation_type` * `cat_tools.relation__relkind(relation_type)` - Mapping from `cat_tools.relation_type` to a `pg_class.relkind` value +* `cat_tools.relation__is_catalog(relation regclass)` - Returns true if the relation is in the `pg_catalog` schema +* `cat_tools.relation__is_temp(relation regclass)` - Returns true if the relation is a temporary table (lives in a schema that starts with 'pg_temp') * `cat_tools.trigger__args_as_text(text)` - Converts the arguments for a trigger function (as returned by `trigger__parse()`) to text (for backwards compatibility). * `cat_tools.trigger__get_oid(trigger_table, trigger_name)` - oid of a trigger. Throws error if trigger doesn't exits. * `cat_tools.trigger__get_oid__loose(trigger_table, trigger_name)` - oid of a trigger. Does _not_ throw error if trigger doesn't exits. diff --git a/sql/cat_tools.sql.in b/sql/cat_tools.sql.in index 273374b..1212429 100644 --- a/sql/cat_tools.sql.in +++ b/sql/cat_tools.sql.in @@ -1216,6 +1216,34 @@ $body$ @generated@ +SELECT __cat_tools.create_function( + 'cat_tools.relation__is_temp' + , 'relation pg_catalog.regclass' + , $$boolean LANGUAGE sql STRICT STABLE$$ + , $body$ +SELECT relnamespace::pg_catalog.regnamespace::text ~ '^pg_temp' +FROM pg_catalog.pg_class +WHERE oid = $1 +$body$ + , 'cat_tools__usage' + , $$Returns true if the relation is a temporary table (lives in a schema that starts with 'pg_temp').$$ +); + +SELECT __cat_tools.create_function( + 'cat_tools.relation__is_catalog' + , 'relation pg_catalog.regclass' + , $$boolean LANGUAGE sql STRICT STABLE$$ + , $body$ +SELECT relnamespace::pg_catalog.regnamespace::text = 'pg_catalog' +FROM pg_catalog.pg_class +WHERE oid = $1 +$body$ + , 'cat_tools__usage' + , 'Returns true if the relation is in the pg_catalog schema.' +); + +@generated@ + SELECT __cat_tools.create_function( 'cat_tools.name__check' , 'name_to_check text' diff --git a/test/expected/relation__is_catalog.out b/test/expected/relation__is_catalog.out new file mode 100644 index 0000000..c7f0772 --- /dev/null +++ b/test/expected/relation__is_catalog.out @@ -0,0 +1,8 @@ +\set ECHO none +1..5 +ok 1 - Verify public has no perms +ok 2 - pg_catalog.pg_class is in pg_catalog schema +ok 3 - Create temp table for testing +ok 4 - temp relation is not in pg_catalog schema +ok 5 - NULL input returns NULL (STRICT function) +# TRANSACTION INTENTIONALLY LEFT OPEN! diff --git a/test/expected/relation__is_temp.out b/test/expected/relation__is_temp.out new file mode 100644 index 0000000..55b67f7 --- /dev/null +++ b/test/expected/relation__is_temp.out @@ -0,0 +1,8 @@ +\set ECHO none +1..5 +ok 1 - Verify public has no perms +ok 2 - pg_catalog.pg_class is not a temp relation +ok 3 - Create temp table for testing +ok 4 - temp relation is correctly identified as temp +ok 5 - NULL input returns NULL (STRICT function) +# TRANSACTION INTENTIONALLY LEFT OPEN! diff --git a/test/expected/zzz_build.out b/test/expected/zzz_build.out index dbe9615..215ca01 100644 --- a/test/expected/zzz_build.out +++ b/test/expected/zzz_build.out @@ -98,6 +98,10 @@ + + + + diff --git a/test/sql/relation__is_catalog.sql b/test/sql/relation__is_catalog.sql new file mode 100644 index 0000000..a402a4e --- /dev/null +++ b/test/sql/relation__is_catalog.sql @@ -0,0 +1,47 @@ +\set ECHO none + +\i test/setup.sql + +\set s cat_tools +\set f relation__is_catalog + +SELECT plan(5); + +SET LOCAL ROLE :no_use_role; + +SELECT throws_ok( + format( + $$SELECT %I.%I( %L )$$ + , :'s', :'f' + , 'pg_catalog.pg_class' + ) + , '42501' + , NULL + , 'Verify public has no perms' +); + +SET LOCAL ROLE :use_role; + +SELECT is( + cat_tools.relation__is_catalog('pg_catalog.pg_class'::regclass) + , true + , 'pg_catalog.pg_class is in pg_catalog schema' +); + +SELECT lives_ok($$CREATE TEMP TABLE test_temp_table()$$, 'Create temp table for testing'); + +SELECT is( + cat_tools.relation__is_catalog('test_temp_table'::regclass) + , false + , 'temp relation is not in pg_catalog schema' +); + +SELECT is( + cat_tools.relation__is_catalog(NULL) + , NULL + , 'NULL input returns NULL (STRICT function)' +); + +\i test/pgxntool/finish.sql + +-- vi: expandtab ts=2 sw=2 \ No newline at end of file diff --git a/test/sql/relation__is_temp.sql b/test/sql/relation__is_temp.sql new file mode 100644 index 0000000..d80c19e --- /dev/null +++ b/test/sql/relation__is_temp.sql @@ -0,0 +1,47 @@ +\set ECHO none + +\i test/setup.sql + +\set s cat_tools +\set f relation__is_temp + +SELECT plan(5); + +SET LOCAL ROLE :no_use_role; + +SELECT throws_ok( + format( + $$SELECT %I.%I( %L )$$ + , :'s', :'f' + , 'pg_catalog.pg_class' + ) + , '42501' + , NULL + , 'Verify public has no perms' +); + +SET LOCAL ROLE :use_role; + +SELECT is( + cat_tools.relation__is_temp('pg_catalog.pg_class'::regclass) + , false + , 'pg_catalog.pg_class is not a temp relation' +); + +SELECT lives_ok($$CREATE TEMP TABLE test_temp_table()$$, 'Create temp table for testing'); + +SELECT is( + cat_tools.relation__is_temp('test_temp_table'::regclass) + , true + , 'temp relation is correctly identified as temp' +); + +SELECT is( + cat_tools.relation__is_temp(NULL) + , NULL + , 'NULL input returns NULL (STRICT function)' +); + +\i test/pgxntool/finish.sql + +-- vi: expandtab ts=2 sw=2 \ No newline at end of file From 4addd8cd5f90df2cd2e484dbe34c7aeee4afd0a7 Mon Sep 17 00:00:00 2001 From: jnasbyupgrade Date: Wed, 3 Sep 2025 16:31:40 -0500 Subject: [PATCH 08/18] Fix relation__column_names test to use temporary table instead of pg_catalog.pg_class --- test/sql/relation__column_names.sql | 57 +++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) create mode 100644 test/sql/relation__column_names.sql diff --git a/test/sql/relation__column_names.sql b/test/sql/relation__column_names.sql new file mode 100644 index 0000000..4d671c5 --- /dev/null +++ b/test/sql/relation__column_names.sql @@ -0,0 +1,57 @@ +\set ECHO none + +\i test/setup.sql + +\set s cat_tools +\set f relation__column_names + +SELECT plan(8); + +SET LOCAL ROLE :no_use_role; + +SELECT throws_ok( + format( + $$SELECT %I.%I( %L )$$ + , :'s', :'f' + , 'temp_test_table' + ) + , '42501' + , NULL + , 'Verify public has no perms' +); + +SET LOCAL ROLE :use_role; + +SELECT lives_ok($$CREATE TEMP TABLE temp_test_table(col1 int, col2 text, col3 boolean, col4 timestamp, col5 numeric)$$, 'Create temp table with multiple columns'); + +SELECT is( + cat_tools.relation__column_names('temp_test_table'::regclass) + , '{col1,col2,col3,col4,col5}'::text[] + , 'Temp table returns expected column names' +); + +SELECT lives_ok($$ALTER TABLE temp_test_table DROP COLUMN col3$$, 'Drop middle column from temp table'); + +SELECT is( + cat_tools.relation__column_names('temp_test_table'::regclass) + , '{col1,col2,col4,col5}'::text[] + , 'Temp table with dropped column returns expected column names' +); + +SELECT lives_ok($$CREATE TEMP TABLE test_table(id int, name text)$$, 'Create test table with columns'); + +SELECT is( + cat_tools.relation__column_names('test_table'::regclass) + , '{id,name}'::text[] + , 'Test table returns expected column names' +); + +SELECT is( + cat_tools.relation__column_names(NULL) + , NULL + , 'NULL input returns NULL (STRICT function)' +); + +\i test/pgxntool/finish.sql + +-- vi: expandtab ts=2 sw=2 \ No newline at end of file From b1afca2b77c616a51fd740c3a3bacd9847182709 Mon Sep 17 00:00:00 2001 From: jnasbyupgrade Date: Wed, 3 Sep 2025 16:33:10 -0500 Subject: [PATCH 09/18] Fix missing commits --- .gitignore | 1 + README.asc | 1 + sql/cat_tools.sql.in | 15 +++++++++++++++ test/expected/relation__column_names.out | 11 +++++++++++ test/expected/table__is_temp.out | 7 +++++++ test/expected/zzz_build.out | 2 ++ 6 files changed, 37 insertions(+) create mode 100644 test/expected/relation__column_names.out create mode 100644 test/expected/table__is_temp.out diff --git a/.gitignore b/.gitignore index 3b4aafa..adffc88 100644 --- a/.gitignore +++ b/.gitignore @@ -27,3 +27,4 @@ regression.out # Misc tmp/ .DS_Store +.claude/*.local.json diff --git a/README.asc b/README.asc index 31152f9..6791c11 100644 --- a/README.asc +++ b/README.asc @@ -36,6 +36,7 @@ Works on Postgres 9.3 and above. * `cat_tools.regprocedure(function_name, arguments)` - Returns regprocedure for function_name and it's full set of arguments * `cat_tools.relation__kind(relkind)` - Mapping from `pg_class.relkind` to a `cat_tools.relation_type` * `cat_tools.relation__relkind(relation_type)` - Mapping from `cat_tools.relation_type` to a `pg_class.relkind` value +* `cat_tools.relation__column_names(relation regclass)` - Returns an array of quoted column names for a relation in ordinal position order * `cat_tools.relation__is_catalog(relation regclass)` - Returns true if the relation is in the `pg_catalog` schema * `cat_tools.relation__is_temp(relation regclass)` - Returns true if the relation is a temporary table (lives in a schema that starts with 'pg_temp') * `cat_tools.trigger__args_as_text(text)` - Converts the arguments for a trigger function (as returned by `trigger__parse()`) to text (for backwards compatibility). diff --git a/sql/cat_tools.sql.in b/sql/cat_tools.sql.in index 1212429..6f3d79d 100644 --- a/sql/cat_tools.sql.in +++ b/sql/cat_tools.sql.in @@ -1242,6 +1242,21 @@ $body$ , 'Returns true if the relation is in the pg_catalog schema.' ); +SELECT __cat_tools.create_function( + 'cat_tools.relation__column_names' + , 'relation pg_catalog.regclass' + , $$text[] LANGUAGE sql STRICT STABLE$$ + , $body$ +SELECT array_agg(quote_ident(attname) ORDER BY attnum) +FROM pg_catalog.pg_attribute +WHERE attrelid = $1 + AND attnum > 0 + AND NOT attisdropped +$body$ + , 'cat_tools__usage' + , 'Returns an array of quoted column names for a relation in ordinal position order.' +); + @generated@ SELECT __cat_tools.create_function( diff --git a/test/expected/relation__column_names.out b/test/expected/relation__column_names.out new file mode 100644 index 0000000..3914b87 --- /dev/null +++ b/test/expected/relation__column_names.out @@ -0,0 +1,11 @@ +\set ECHO none +1..8 +ok 1 - Verify public has no perms +ok 2 - Create temp table with multiple columns +ok 3 - Temp table returns expected column names +ok 4 - Drop middle column from temp table +ok 5 - Temp table with dropped column returns expected column names +ok 6 - Create test table with columns +ok 7 - Test table returns expected column names +ok 8 - NULL input returns NULL (STRICT function) +# TRANSACTION INTENTIONALLY LEFT OPEN! diff --git a/test/expected/table__is_temp.out b/test/expected/table__is_temp.out new file mode 100644 index 0000000..c97490e --- /dev/null +++ b/test/expected/table__is_temp.out @@ -0,0 +1,7 @@ +\set ECHO none +1..4 +ok 1 - Verify public has no perms +ok 2 - pg_catalog.pg_class is not a temp table +ok 3 - temp table is correctly identified as temp +ok 4 - NULL input returns NULL (STRICT function) +# TRANSACTION INTENTIONALLY LEFT OPEN! diff --git a/test/expected/zzz_build.out b/test/expected/zzz_build.out index 215ca01..6655fea 100644 --- a/test/expected/zzz_build.out +++ b/test/expected/zzz_build.out @@ -104,6 +104,8 @@ + + From 3bde4a805e6a0e4a9544c462f09002e406381b8e Mon Sep 17 00:00:00 2001 From: jnasbyupgrade Date: Tue, 23 Sep 2025 11:57:15 -0500 Subject: [PATCH 10/18] Add function__arg_names() Also, refactor common code between it and function__arg_types() --- README.asc | 2 + sql/cat_tools.sql.in | 155 ++++++++++++++++++++++++++++++++---- test/expected/zzz_build.out | 7 +- test/setup.sql | 1 + test/sql/function.sql | 83 +++++++++++++++++++ 5 files changed, 228 insertions(+), 20 deletions(-) diff --git a/README.asc b/README.asc index 6791c11..1745299 100644 --- a/README.asc +++ b/README.asc @@ -31,6 +31,8 @@ Works on Postgres 9.3 and above. * `cat_tools.extension__schemas_unique(extension_names text/name[])` - Returns a unique array of schemas * `cat_tools.function__arg_types(arguments)` - Accepts full function argument string and returns regtype[] of IN/INOUT arguments * `cat_tools.function__arg_types_text(arguments)` - Version of `function__arg_types` that returns text +* `cat_tools.function__arg_names(arguments)` - Accepts full function argument string and returns text[] of IN/INOUT argument names +* `cat_tools.function__arg_names_text(arguments)` - Version of `function__arg_names` that returns text * `cat_tools.object__catalog(object_type)` - Returns catalog table that is used to store `object_type` objects * `cat_tools.object__reg_type(object_catalog)` - Returns the "reg" pseudotype (ie: regclass) associated with a system catalog (ie: pg_class) * `cat_tools.regprocedure(function_name, arguments)` - Returns regprocedure for function_name and it's full set of arguments diff --git a/sql/cat_tools.sql.in b/sql/cat_tools.sql.in index 6f3d79d..8088ab8 100644 --- a/sql/cat_tools.sql.in +++ b/sql/cat_tools.sql.in @@ -161,24 +161,40 @@ $body$; @generated@ -SELECT __cat_tools.create_function( - 'cat_tools.function__arg_types' - , $$arguments text$$ - , $$pg_catalog.regtype[] LANGUAGE plpgsql$$ - , $body$ +CREATE FUNCTION _cat_tools.function__arg_to_regprocedure( + arguments text + , function_suffix text + , api_function_name text +) RETURNS pg_catalog.regprocedure LANGUAGE plpgsql AS $body$ DECLARE - input_arg_types pg_catalog.regtype[]; - - c_template CONSTANT text := $fmt$CREATE FUNCTION pg_temp.cat_tools__function__arg_types__temp_function( + /* + * Template for creating a temporary function with the user-provided argument + * signature. This allows us to leverage PostgreSQL's parser to validate and + * extract argument information without permanently creating a function. + * Using plpgsql language for the temp function to handle any return type. + */ + c_template CONSTANT text := $fmt$CREATE FUNCTION pg_temp.cat_tools__function__%s__temp_function( %s - ) RETURNS %s LANGUAGE plpgsql AS 'BEGIN NULL; END' + ) RETURNS %s LANGUAGE plpgsql AS 'BEGIN RETURN; END' $fmt$; temp_proc pg_catalog.regprocedure; sql text; BEGIN + /* + * Security check: Ensure current_user == session_user to detect SECURITY DEFINER context + * This prevents SQL injection attacks through elevated privileges. + */ + IF current_user != session_user THEN + RAISE EXCEPTION USING + ERRCODE = '28000' /* invalid_authorization_specification */ + , MESSAGE = 'potential use of SECURITY DEFINER detected' + , DETAIL = format('current_user is %s, session_user is %s', current_user, session_user) + , HINT = 'Helper functions must not be called from SECURITY DEFINER context.'; + END IF; sql := format( c_template + , function_suffix , arguments , 'void' ); @@ -191,6 +207,7 @@ BEGIN v_type := (regexp_matches( SQLERRM, 'function result type must be ([^ ]+) because of' ))[1]; sql := format( c_template + , function_suffix , arguments , v_type ); @@ -203,18 +220,59 @@ BEGIN * only one function with this name. The cast to regprocedure is for the sake * of the DROP down below. */ - EXECUTE $$SELECT 'pg_temp.cat_tools__function__arg_types__temp_function'::pg_catalog.regproc::pg_catalog.regprocedure$$ INTO temp_proc; + EXECUTE format( + $$SELECT 'pg_temp.cat_tools__function__%s__temp_function'::pg_catalog.regproc::pg_catalog.regprocedure$$ + , function_suffix + ) INTO temp_proc; + + RETURN temp_proc; +END +$body$; + +CREATE FUNCTION _cat_tools.function__drop_temp( + p_regprocedure pg_catalog.regprocedure + , api_function_name text +) RETURNS void LANGUAGE plpgsql AS $body$ +BEGIN + /* + * Security check: Ensure current_user == session_user to detect SECURITY DEFINER context + * This prevents SQL injection attacks through elevated privileges. + */ + IF current_user != session_user THEN + RAISE EXCEPTION USING + ERRCODE = '28000' /* invalid_authorization_specification */ + , MESSAGE = 'potential use of SECURITY DEFINER detected' + , DETAIL = format('API function %s must not be called from a SECURITY DEFINER function', api_function_name) + , HINT = 'We detect SECURITY DEFINER context by comparing current_user and session_user, which can cause false positives if SET ROLE is used'; + END IF; + + EXECUTE 'DROP ROUTINE ' || p_regprocedure; +END +$body$; + +GRANT USAGE ON SCHEMA _cat_tools TO cat_tools__usage; +GRANT EXECUTE ON FUNCTION _cat_tools.function__arg_to_regprocedure(text, text, text) TO cat_tools__usage; +GRANT EXECUTE ON FUNCTION _cat_tools.function__drop_temp(pg_catalog.regprocedure, text) TO cat_tools__usage; + +@generated@ + +SELECT __cat_tools.create_function( + 'cat_tools.function__arg_types' + , $$arguments text$$ + , $$pg_catalog.regtype[] LANGUAGE plpgsql$$ + , $body$ +DECLARE + input_arg_types pg_catalog.regtype[]; + c_temp_proc CONSTANT pg_catalog.regprocedure := _cat_tools.function__arg_to_regprocedure(arguments, 'arg_types', 'cat_tools.function__arg_types'); +BEGIN SELECT INTO STRICT input_arg_types -- This is here to re-cast the array as 1-based instead of 0 based (better solutions welcome!) string_to_array(proargtypes::text,' ')::pg_catalog.regtype[] FROM pg_proc - WHERE oid = temp_proc - ; - -- NOTE: DROP may not accept all the argument options that CREATE does, so use temp_proc - EXECUTE format( - $fmt$DROP FUNCTION %s$fmt$ - , temp_proc - ); + WHERE oid = c_temp_proc::pg_catalog.regproc; + + -- Clean up the temporary function + PERFORM _cat_tools.function__drop_temp(c_temp_proc, 'cat_tools.function__arg_types'); RETURN input_arg_types; END @@ -227,6 +285,53 @@ $body$ @generated@ +SELECT __cat_tools.create_function( + 'cat_tools.function__arg_names' + , $$arguments text$$ + , $$text[] LANGUAGE plpgsql$$ + , $body$ +DECLARE + input_arg_names text[]; + c_temp_proc CONSTANT pg_catalog.regprocedure := _cat_tools.function__arg_to_regprocedure(arguments, 'arg_names', 'cat_tools.function__arg_names'); +BEGIN + -- Get argument names, filtering for IN/INOUT/VARIADIC only + SELECT INTO input_arg_names + CASE + WHEN proargmodes IS NULL THEN + -- All arguments are IN mode, return all names (converting empty strings to NULL) + array( + SELECT CASE WHEN proargnames[i] = '' THEN NULL ELSE proargnames[i] END + FROM generate_series(1, array_length(proargnames, 1)) AS s(i) + ) + ELSE + -- Filter names based on modes: 'i' (IN), 'b' (INOUT), 'v' (VARIADIC) + array( + SELECT + CASE + WHEN i <= array_length(proargnames, 1) AND proargnames[i] != '' THEN proargnames[i] + ELSE NULL + END + FROM generate_series(1, array_length(proargmodes, 1)) AS s(i) + WHERE proargmodes[i] IN ('i', 'b', 'v') + ) + END + FROM pg_proc + WHERE oid = c_temp_proc::pg_catalog.regproc; + + -- Clean up the temporary function + PERFORM _cat_tools.function__drop_temp(c_temp_proc, 'cat_tools.function__arg_names'); + + RETURN input_arg_names; +END +$body$ + , 'cat_tools__usage' + , 'Returns argument names for a function argument body as an array. Only + includes IN, INOUT, and VARIADIC arguments (matching function__arg_types + behavior). Unnamed arguments appear as NULL in the result array.' +); + +@generated@ + SELECT __cat_tools.create_function( 'cat_tools.function__arg_types_text' , $$arguments text$$ @@ -243,6 +348,22 @@ $body$ @generated@ +SELECT __cat_tools.create_function( + 'cat_tools.function__arg_names_text' + , $$arguments text$$ + , $$text LANGUAGE sql$$ + , $body$ +SELECT array_to_string(cat_tools.function__arg_names($1), ', ') +$body$ + , 'cat_tools__usage' + , 'Returns argument names for a function argument body as text. Only + includes IN, INOUT, and VARIADIC arguments (matching function__arg_types_text + behavior). Unnamed arguments appear as empty strings in the result.' + +); + +@generated@ + SELECT __cat_tools.create_function( 'cat_tools.regprocedure' , $$ diff --git a/test/expected/zzz_build.out b/test/expected/zzz_build.out index 6655fea..eb9aaed 100644 --- a/test/expected/zzz_build.out +++ b/test/expected/zzz_build.out @@ -103,10 +103,11 @@ - - - + + + + # TRANSACTION INTENTIONALLY LEFT OPEN! diff --git a/test/setup.sql b/test/setup.sql index f6735ed..104205d 100644 --- a/test/setup.sql +++ b/test/setup.sql @@ -33,4 +33,5 @@ SELECT array_to_string(array( ) $body$; + -- vi: expandtab ts=2 sw=2 diff --git a/test/sql/function.sql b/test/sql/function.sql index 198ae72..6828b09 100644 --- a/test/sql/function.sql +++ b/test/sql/function.sql @@ -6,6 +6,7 @@ CREATE TEMP VIEW func_calls AS SELECT * FROM (VALUES ('function__arg_types'::name, $$'x'$$::text) + , ('function__arg_names'::name, $$'x'$$::text) , ('regprocedure'::name, $$'x', 'x'$$) ) v(fname, args) ; @@ -16,8 +17,11 @@ SELECT plan( + (SELECT count(*)::int FROM func_calls) + 4 -- function__arg_types() + + 4 -- function__arg_names() + 2 -- regprocedure() + + 6 -- security definer checks (2 helpers + 4 callers) + + 1 -- current_user != session_user test ); SET LOCAL ROLE :no_use_role; @@ -35,7 +39,24 @@ SELECT throws_ok( FROM func_calls ; +/* + * Test that the security check works when current_user != session_user + * This tests what happens when functions are called from a different role context + */ SET LOCAL ROLE :use_role; +SELECT throws_ok( + $$SELECT cat_tools.function__arg_types('int')$$, + '28000', + 'potential use of SECURITY DEFINER detected', + 'Security check should prevent execution when current_user != session_user' +); + +/* + * The helper functions now have security checks that prevent execution when + * current_user != session_user (which happens with SET LOCAL ROLE). + * Reset to session_user for testing the actual functionality. + */ +SET SESSION AUTHORIZATION :use_role; SELECT is( :s.function__arg_types($$IN in_int int, INOUT inout_int_array int[], OUT out_char "char", anyelement, boolean DEFAULT false$$) @@ -61,6 +82,30 @@ SELECT is( , 'Verify function__arg_types() with only inputs' ); +SELECT is( + :s.function__arg_names($$IN in_int int, INOUT inout_int_array int[], OUT out_char "char", anyelement, boolean DEFAULT false$$) + , '{in_int,inout_int_array,NULL,NULL}'::text[] + , 'Verify function__arg_names() with INOUT and OUT' +); + +SELECT is( + :s.function__arg_names($$IN in_int int, INOUT inout_int_array int[], anyarray, anyelement, boolean DEFAULT false$$) + , '{in_int,inout_int_array,NULL,NULL,NULL}'::text[] + , 'Verify function__arg_names() with just INOUT' +); + +SELECT is( + :s.function__arg_names($$IN in_int int, OUT out_char "char", anyarray, anyelement, boolean DEFAULT false$$) + , '{in_int,NULL,NULL,NULL}'::text[] + , 'Verify function__arg_names() with just OUT' +); + +SELECT is( + :s.function__arg_names($$anyelement, "char", pg_class, VARIADIC boolean[]$$) + , '{NULL,NULL,NULL,NULL}'::text[] + , 'Verify function__arg_names() with only inputs' +); + \set args 'anyarray, OUT text, OUT "char", pg_class, int, VARIADIC boolean[]' SELECT lives_ok( format( @@ -76,6 +121,44 @@ SELECT is( , 'Verify regprocedure()' ); +/* + * CRITICAL SECURITY TESTS: Helper functions must NOT be SECURITY DEFINER + * If they were SECURITY DEFINER, they could be exploited for SQL injection attacks + * since they execute dynamic SQL with elevated privileges. + */ + +-- Test helper functions in _cat_tools schema +\set f function__arg_to_regprocedure +\set args_text 'text, text, text' +SELECT string_to_array(:'args_text', ', ') AS args \gset +SELECT isnt_definer('_cat_tools', :'f', :'args'::name[]); + +\set f function__drop_temp +\set args_text 'regprocedure, text' +SELECT string_to_array(:'args_text', ', ') AS args \gset +SELECT isnt_definer('_cat_tools', :'f', :'args'::name[]); + +-- Test public functions in cat_tools schema +\set f function__arg_types +\set args_text 'text' +SELECT string_to_array(:'args_text', ', ') AS args \gset +SELECT isnt_definer(:'s', :'f', :'args'::name[]); + +\set f function__arg_names +\set args_text 'text' +SELECT string_to_array(:'args_text', ', ') AS args \gset +SELECT isnt_definer(:'s', :'f', :'args'::name[]); + +\set f function__arg_types_text +\set args_text 'text' +SELECT string_to_array(:'args_text', ', ') AS args \gset +SELECT isnt_definer(:'s', :'f', :'args'::name[]); + +\set f function__arg_names_text +\set args_text 'text' +SELECT string_to_array(:'args_text', ', ') AS args \gset +SELECT isnt_definer(:'s', :'f', :'args'::name[]); + \i test/pgxntool/finish.sql -- vi: expandtab ts=2 sw=2 From c75eb40547be168e6852bdd88bd5610e13891fb1 Mon Sep 17 00:00:00 2001 From: jnasbyupgrade Date: Tue, 23 Sep 2025 13:22:02 -0500 Subject: [PATCH 11/18] Rename function__arg_* to routine__parse_arg_* --- README.asc | 13 ++++--- sql/cat_tools.sql.in | 69 +++++++++++++++++++++++++-------- test/expected/function.out | 38 +++++++++++++++---- test/expected/zzz_build.out | 7 ++++ test/sql/function.sql | 76 +++++++++++++++++++++++++------------ 5 files changed, 151 insertions(+), 52 deletions(-) diff --git a/README.asc b/README.asc index 1745299..afa7a00 100644 --- a/README.asc +++ b/README.asc @@ -29,13 +29,16 @@ Works on Postgres 9.3 and above. * `cat_tools.pg_extension__get(extension_name name)` - Returns cat_tools.pg_extension_v row for an extension * `cat_tools.extension__schemas(extension_names text/name[])` - Returns the schemas for the requested functions * `cat_tools.extension__schemas_unique(extension_names text/name[])` - Returns a unique array of schemas -* `cat_tools.function__arg_types(arguments)` - Accepts full function argument string and returns regtype[] of IN/INOUT arguments -* `cat_tools.function__arg_types_text(arguments)` - Version of `function__arg_types` that returns text -* `cat_tools.function__arg_names(arguments)` - Accepts full function argument string and returns text[] of IN/INOUT argument names -* `cat_tools.function__arg_names_text(arguments)` - Version of `function__arg_names` that returns text +* `cat_tools.routine__parse_arg_types(arguments)` - Accepts full function argument string and returns regtype[] of IN/INOUT arguments +* `cat_tools.routine__parse_arg_types_text(arguments)` - Version of `routine__parse_arg_types` that returns text +* `cat_tools.routine__parse_arg_names(arguments)` - Accepts full function argument string and returns text[] of IN/INOUT argument names +* `cat_tools.routine__parse_arg_names_text(arguments)` - Version of `routine__parse_arg_names` that returns text +* `cat_tools.function__arg_types(arguments)` - DEPRECATED: Use `routine__parse_arg_types` instead +* `cat_tools.function__arg_types_text(arguments)` - DEPRECATED: Use `routine__parse_arg_types_text` instead * `cat_tools.object__catalog(object_type)` - Returns catalog table that is used to store `object_type` objects * `cat_tools.object__reg_type(object_catalog)` - Returns the "reg" pseudotype (ie: regclass) associated with a system catalog (ie: pg_class) -* `cat_tools.regprocedure(function_name, arguments)` - Returns regprocedure for function_name and it's full set of arguments +* `cat_tools.regprocedure(routine_name, arguments)` - Returns regprocedure for routine_name and it's full set of arguments +* `cat_tools.regprocedure(routine_name)` - Returns regprocedure for routine_name. Throws an error if the routine is overloaded * `cat_tools.relation__kind(relkind)` - Mapping from `pg_class.relkind` to a `cat_tools.relation_type` * `cat_tools.relation__relkind(relation_type)` - Mapping from `cat_tools.relation_type` to a `pg_class.relkind` value * `cat_tools.relation__column_names(relation regclass)` - Returns an array of quoted column names for a relation in ordinal position order diff --git a/sql/cat_tools.sql.in b/sql/cat_tools.sql.in index 8088ab8..6c4e7ca 100644 --- a/sql/cat_tools.sql.in +++ b/sql/cat_tools.sql.in @@ -76,7 +76,7 @@ REVOKE ALL ON _cat_tools.pg_class_v FROM public; * function that we're about to create to create the real version of this * function. */ -CREATE FUNCTION cat_tools.function__arg_types_text(text +CREATE FUNCTION cat_tools.routine__parse_arg_types_text(text ) RETURNS text LANGUAGE sql AS 'SELECT $1'; CREATE FUNCTION __cat_tools.create_function( @@ -88,7 +88,7 @@ CREATE FUNCTION __cat_tools.create_function( , comment text DEFAULT NULL ) RETURNS void LANGUAGE plpgsql AS $body$ DECLARE - c_simple_args CONSTANT text := cat_tools.function__arg_types_text(args); + c_simple_args CONSTANT text := cat_tools.routine__parse_arg_types_text(args); create_template CONSTANT text := $template$ CREATE OR REPLACE FUNCTION %s( @@ -257,13 +257,13 @@ GRANT EXECUTE ON FUNCTION _cat_tools.function__drop_temp(pg_catalog.regprocedure @generated@ SELECT __cat_tools.create_function( - 'cat_tools.function__arg_types' + 'cat_tools.routine__parse_arg_types' , $$arguments text$$ , $$pg_catalog.regtype[] LANGUAGE plpgsql$$ , $body$ DECLARE input_arg_types pg_catalog.regtype[]; - c_temp_proc CONSTANT pg_catalog.regprocedure := _cat_tools.function__arg_to_regprocedure(arguments, 'arg_types', 'cat_tools.function__arg_types'); + c_temp_proc CONSTANT pg_catalog.regprocedure := _cat_tools.function__arg_to_regprocedure(arguments, 'arg_types', 'cat_tools.routine__parse_arg_types'); BEGIN SELECT INTO STRICT input_arg_types -- This is here to re-cast the array as 1-based instead of 0 based (better solutions welcome!) @@ -272,7 +272,7 @@ BEGIN WHERE oid = c_temp_proc::pg_catalog.regproc; -- Clean up the temporary function - PERFORM _cat_tools.function__drop_temp(c_temp_proc, 'cat_tools.function__arg_types'); + PERFORM _cat_tools.function__drop_temp(c_temp_proc, 'cat_tools.routine__parse_arg_types'); RETURN input_arg_types; END @@ -286,13 +286,13 @@ $body$ @generated@ SELECT __cat_tools.create_function( - 'cat_tools.function__arg_names' + 'cat_tools.routine__parse_arg_names' , $$arguments text$$ , $$text[] LANGUAGE plpgsql$$ , $body$ DECLARE input_arg_names text[]; - c_temp_proc CONSTANT pg_catalog.regprocedure := _cat_tools.function__arg_to_regprocedure(arguments, 'arg_names', 'cat_tools.function__arg_names'); + c_temp_proc CONSTANT pg_catalog.regprocedure := _cat_tools.function__arg_to_regprocedure(arguments, 'arg_names', 'cat_tools.routine__parse_arg_names'); BEGIN -- Get argument names, filtering for IN/INOUT/VARIADIC only SELECT INTO input_arg_names @@ -319,25 +319,25 @@ BEGIN WHERE oid = c_temp_proc::pg_catalog.regproc; -- Clean up the temporary function - PERFORM _cat_tools.function__drop_temp(c_temp_proc, 'cat_tools.function__arg_names'); + PERFORM _cat_tools.function__drop_temp(c_temp_proc, 'cat_tools.routine__parse_arg_names'); RETURN input_arg_names; END $body$ , 'cat_tools__usage' , 'Returns argument names for a function argument body as an array. Only - includes IN, INOUT, and VARIADIC arguments (matching function__arg_types + includes IN, INOUT, and VARIADIC arguments (matching routine__parse_arg_types behavior). Unnamed arguments appear as NULL in the result array.' ); @generated@ SELECT __cat_tools.create_function( - 'cat_tools.function__arg_types_text' + 'cat_tools.routine__parse_arg_types_text' , $$arguments text$$ , $$text LANGUAGE sql$$ , $body$ -SELECT array_to_string(cat_tools.function__arg_types($1), ', ') +SELECT array_to_string(cat_tools.routine__parse_arg_types($1), ', ') $body$ , 'cat_tools__usage' , 'Returns argument types for a function argument body as text. Unlike a @@ -349,21 +349,60 @@ $body$ @generated@ SELECT __cat_tools.create_function( - 'cat_tools.function__arg_names_text' + 'cat_tools.routine__parse_arg_names_text' , $$arguments text$$ , $$text LANGUAGE sql$$ , $body$ -SELECT array_to_string(cat_tools.function__arg_names($1), ', ') +SELECT array_to_string(cat_tools.routine__parse_arg_names($1), ', ') $body$ , 'cat_tools__usage' , 'Returns argument names for a function argument body as text. Only - includes IN, INOUT, and VARIADIC arguments (matching function__arg_types_text + includes IN, INOUT, and VARIADIC arguments (matching routine__parse_arg_types_text behavior). Unnamed arguments appear as empty strings in the result.' ); @generated@ +-- Deprecated wrapper functions for backwards compatibility +SELECT __cat_tools.create_function( + 'cat_tools.function__arg_types' + , $$arguments text$$ + , $$pg_catalog.regtype[] LANGUAGE plpgsql$$ + , $body$ +BEGIN + RAISE WARNING 'function__arg_types() is deprecated, use routine__parse_arg_types instead'; + + RETURN cat_tools.routine__parse_arg_types(arguments); +END +$body$ + , 'cat_tools__usage' + , 'DEPRECATED: Use routine__parse_arg_types instead. + Returns argument types for a function argument body as regtype[]. Only + includes IN, INOUT, and VARIADIC arguments.' +); + +@generated@ + +SELECT __cat_tools.create_function( + 'cat_tools.function__arg_types_text' + , $$arguments text$$ + , $$text LANGUAGE plpgsql$$ + , $body$ +BEGIN + RAISE WARNING 'function__arg_types_text() is deprecated, use routine__parse_arg_types_text instead'; + + RETURN cat_tools.routine__parse_arg_types_text(arguments); +END +$body$ + , 'cat_tools__usage' + , 'DEPRECATED: Use routine__parse_arg_types_text instead. + Returns argument types for a function argument body as text. Only + includes IN, INOUT, and VARIADIC arguments.' +); + +@generated@ + SELECT __cat_tools.create_function( 'cat_tools.regprocedure' , $$ @@ -374,7 +413,7 @@ SELECT __cat_tools.create_function( SELECT format( '%s(%s)' , $1 - , cat_tools.function__arg_types_text($2) + , cat_tools.routine__parse_arg_types_text($2) )::pg_catalog.regprocedure $body$ , 'cat_tools__usage' diff --git a/test/expected/function.out b/test/expected/function.out index 29dade1..afa4f3a 100644 --- a/test/expected/function.out +++ b/test/expected/function.out @@ -1,11 +1,35 @@ \set ECHO none -1..8 +1..24 ok 1 - Verify public has no perms ok 2 - Verify public has no perms -ok 3 - Verify function__arg_types() with INOUT and OUT -ok 4 - Verify function__arg_types() with just INOUT -ok 5 - Verify function__arg_types() with just OUT -ok 6 - Verify function__arg_types() with only inputs -ok 7 - Create pg_temp.test_function(anyarray, OUT text, OUT "char", pg_class, int, VARIADIC boolean[]) -ok 8 - Verify regprocedure() +ok 3 - Verify public has no perms +ok 4 - Security check should prevent execution when current_user != session_user +ok 5 - Verify routine__parse_arg_types() with INOUT and OUT +ok 6 - Verify routine__parse_arg_types() with just INOUT +ok 7 - Verify routine__parse_arg_types() with just OUT +ok 8 - Verify routine__parse_arg_types() with only inputs +ok 9 - Verify routine__parse_arg_names() with INOUT and OUT +ok 10 - Verify routine__parse_arg_names() with just INOUT +ok 11 - Verify routine__parse_arg_names() with just OUT +ok 12 - Verify routine__parse_arg_names() with only inputs +ok 13 - Create pg_temp.test_function(anyarray, OUT text, OUT "char", pg_class, int, VARIADIC boolean[]) +ok 14 - Verify regprocedure() +WARNING: 01000: function__arg_types() is deprecated, use routine__parse_arg_types instead +LOCATION: exec_stmt_raise, pl_exec.c:3879 +ok 15 - Verify function__arg_types() with INOUT and OUT +WARNING: 01000: function__arg_types() is deprecated, use routine__parse_arg_types instead +LOCATION: exec_stmt_raise, pl_exec.c:3879 +ok 16 - Verify function__arg_types() with simple args +WARNING: 01000: function__arg_types_text() is deprecated, use routine__parse_arg_types_text instead +LOCATION: exec_stmt_raise, pl_exec.c:3879 +ok 17 - Verify function__arg_types_text() with INOUT and OUT +WARNING: 01000: function__arg_types_text() is deprecated, use routine__parse_arg_types_text instead +LOCATION: exec_stmt_raise, pl_exec.c:3879 +ok 18 - Verify function__arg_types_text() with simple args +ok 19 - Function _cat_tools.function__arg_to_regprocedure(text, text, text) should not be security definer +ok 20 - Function _cat_tools.function__drop_temp(regprocedure, text) should not be security definer +ok 21 - Function cat_tools.routine__parse_arg_types(text) should not be security definer +ok 22 - Function cat_tools.routine__parse_arg_names(text) should not be security definer +ok 23 - Function cat_tools.routine__parse_arg_types_text(text) should not be security definer +ok 24 - Function cat_tools.routine__parse_arg_names_text(text) should not be security definer # TRANSACTION INTENTIONALLY LEFT OPEN! diff --git a/test/expected/zzz_build.out b/test/expected/zzz_build.out index eb9aaed..7cbc064 100644 --- a/test/expected/zzz_build.out +++ b/test/expected/zzz_build.out @@ -103,11 +103,18 @@ + + + + + + + # TRANSACTION INTENTIONALLY LEFT OPEN! diff --git a/test/sql/function.sql b/test/sql/function.sql index 6828b09..3627cfa 100644 --- a/test/sql/function.sql +++ b/test/sql/function.sql @@ -5,8 +5,8 @@ \set s cat_tools CREATE TEMP VIEW func_calls AS SELECT * FROM (VALUES - ('function__arg_types'::name, $$'x'$$::text) - , ('function__arg_names'::name, $$'x'$$::text) + ('routine__parse_arg_types'::name, $$'x'$$::text) + , ('routine__parse_arg_names'::name, $$'x'$$::text) , ('regprocedure'::name, $$'x', 'x'$$) ) v(fname, args) ; @@ -16,10 +16,11 @@ SELECT plan( 0 + (SELECT count(*)::int FROM func_calls) - + 4 -- function__arg_types() - + 4 -- function__arg_names() + + 4 -- routine__parse_arg_types() + + 4 -- routine__parse_arg_names() + 2 -- regprocedure() + + 4 -- deprecated function__arg_types() wrapper (2 more tests) + 6 -- security definer checks (2 helpers + 4 callers) + 1 -- current_user != session_user test ); @@ -45,7 +46,7 @@ SELECT throws_ok( */ SET LOCAL ROLE :use_role; SELECT throws_ok( - $$SELECT cat_tools.function__arg_types('int')$$, + $$SELECT cat_tools.routine__parse_arg_types('int')$$, '28000', 'potential use of SECURITY DEFINER detected', 'Security check should prevent execution when current_user != session_user' @@ -59,51 +60,51 @@ SELECT throws_ok( SET SESSION AUTHORIZATION :use_role; SELECT is( - :s.function__arg_types($$IN in_int int, INOUT inout_int_array int[], OUT out_char "char", anyelement, boolean DEFAULT false$$) + :s.routine__parse_arg_types($$IN in_int int, INOUT inout_int_array int[], OUT out_char "char", anyelement, boolean DEFAULT false$$) , '{int,int[],anyelement,boolean}'::regtype[] - , 'Verify function__arg_types() with INOUT and OUT' + , 'Verify routine__parse_arg_types() with INOUT and OUT' ); SELECT is( - :s.function__arg_types($$IN in_int int, INOUT inout_int_array int[], anyarray, anyelement, boolean DEFAULT false$$) + :s.routine__parse_arg_types($$IN in_int int, INOUT inout_int_array int[], anyarray, anyelement, boolean DEFAULT false$$) , '{int,int[],anyarray,anyelement,boolean}'::regtype[] - , 'Verify function__arg_types() with just INOUT' + , 'Verify routine__parse_arg_types() with just INOUT' ); SELECT is( - :s.function__arg_types($$IN in_int int, OUT out_char "char", anyarray, anyelement, boolean DEFAULT false$$) + :s.routine__parse_arg_types($$IN in_int int, OUT out_char "char", anyarray, anyelement, boolean DEFAULT false$$) , '{int,anyarray,anyelement,boolean}'::regtype[] - , 'Verify function__arg_types() with just OUT' + , 'Verify routine__parse_arg_types() with just OUT' ); SELECT is( - :s.function__arg_types($$anyelement, "char", pg_class, VARIADIC boolean[]$$) + :s.routine__parse_arg_types($$anyelement, "char", pg_class, VARIADIC boolean[]$$) , '{anyelement,"\"char\"",pg_class,boolean[]}'::regtype[] - , 'Verify function__arg_types() with only inputs' + , 'Verify routine__parse_arg_types() with only inputs' ); SELECT is( - :s.function__arg_names($$IN in_int int, INOUT inout_int_array int[], OUT out_char "char", anyelement, boolean DEFAULT false$$) + :s.routine__parse_arg_names($$IN in_int int, INOUT inout_int_array int[], OUT out_char "char", anyelement, boolean DEFAULT false$$) , '{in_int,inout_int_array,NULL,NULL}'::text[] - , 'Verify function__arg_names() with INOUT and OUT' + , 'Verify routine__parse_arg_names() with INOUT and OUT' ); SELECT is( - :s.function__arg_names($$IN in_int int, INOUT inout_int_array int[], anyarray, anyelement, boolean DEFAULT false$$) + :s.routine__parse_arg_names($$IN in_int int, INOUT inout_int_array int[], anyarray, anyelement, boolean DEFAULT false$$) , '{in_int,inout_int_array,NULL,NULL,NULL}'::text[] - , 'Verify function__arg_names() with just INOUT' + , 'Verify routine__parse_arg_names() with just INOUT' ); SELECT is( - :s.function__arg_names($$IN in_int int, OUT out_char "char", anyarray, anyelement, boolean DEFAULT false$$) + :s.routine__parse_arg_names($$IN in_int int, OUT out_char "char", anyarray, anyelement, boolean DEFAULT false$$) , '{in_int,NULL,NULL,NULL}'::text[] - , 'Verify function__arg_names() with just OUT' + , 'Verify routine__parse_arg_names() with just OUT' ); SELECT is( - :s.function__arg_names($$anyelement, "char", pg_class, VARIADIC boolean[]$$) + :s.routine__parse_arg_names($$anyelement, "char", pg_class, VARIADIC boolean[]$$) , '{NULL,NULL,NULL,NULL}'::text[] - , 'Verify function__arg_names() with only inputs' + , 'Verify routine__parse_arg_names() with only inputs' ); \set args 'anyarray, OUT text, OUT "char", pg_class, int, VARIADIC boolean[]' @@ -121,6 +122,31 @@ SELECT is( , 'Verify regprocedure()' ); +-- Test deprecated wrapper functions still work +SELECT is( + :s.function__arg_types($$IN in_int int, INOUT inout_int_array int[], OUT out_char "char", anyelement, boolean DEFAULT false$$) + , '{int,int[],anyelement,boolean}'::regtype[] + , 'Verify function__arg_types() with INOUT and OUT' +); + +SELECT is( + :s.function__arg_types($$int, text$$) + , '{int,text}'::regtype[] + , 'Verify function__arg_types() with simple args' +); + +SELECT is( + :s.function__arg_types_text($$IN in_int int, INOUT inout_int_array int[], OUT out_char "char", anyelement, boolean DEFAULT false$$) + , 'integer, integer[], anyelement, boolean' + , 'Verify function__arg_types_text() with INOUT and OUT' +); + +SELECT is( + :s.function__arg_types_text($$int, text$$) + , 'integer, text' + , 'Verify function__arg_types_text() with simple args' +); + /* * CRITICAL SECURITY TESTS: Helper functions must NOT be SECURITY DEFINER * If they were SECURITY DEFINER, they could be exploited for SQL injection attacks @@ -139,22 +165,22 @@ SELECT string_to_array(:'args_text', ', ') AS args \gset SELECT isnt_definer('_cat_tools', :'f', :'args'::name[]); -- Test public functions in cat_tools schema -\set f function__arg_types +\set f routine__parse_arg_types \set args_text 'text' SELECT string_to_array(:'args_text', ', ') AS args \gset SELECT isnt_definer(:'s', :'f', :'args'::name[]); -\set f function__arg_names +\set f routine__parse_arg_names \set args_text 'text' SELECT string_to_array(:'args_text', ', ') AS args \gset SELECT isnt_definer(:'s', :'f', :'args'::name[]); -\set f function__arg_types_text +\set f routine__parse_arg_types_text \set args_text 'text' SELECT string_to_array(:'args_text', ', ') AS args \gset SELECT isnt_definer(:'s', :'f', :'args'::name[]); -\set f function__arg_names_text +\set f routine__parse_arg_names_text \set args_text 'text' SELECT string_to_array(:'args_text', ', ') AS args \gset SELECT isnt_definer(:'s', :'f', :'args'::name[]); From 262434fd43148818962dbf4eb9460107c9d56ffd Mon Sep 17 00:00:00 2001 From: jnasbyupgrade Date: Tue, 23 Sep 2025 14:56:31 -0500 Subject: [PATCH 12/18] Remove errant function from README --- README.asc | 1 - 1 file changed, 1 deletion(-) diff --git a/README.asc b/README.asc index afa7a00..2ecb5ee 100644 --- a/README.asc +++ b/README.asc @@ -38,7 +38,6 @@ Works on Postgres 9.3 and above. * `cat_tools.object__catalog(object_type)` - Returns catalog table that is used to store `object_type` objects * `cat_tools.object__reg_type(object_catalog)` - Returns the "reg" pseudotype (ie: regclass) associated with a system catalog (ie: pg_class) * `cat_tools.regprocedure(routine_name, arguments)` - Returns regprocedure for routine_name and it's full set of arguments -* `cat_tools.regprocedure(routine_name)` - Returns regprocedure for routine_name. Throws an error if the routine is overloaded * `cat_tools.relation__kind(relkind)` - Mapping from `pg_class.relkind` to a `cat_tools.relation_type` * `cat_tools.relation__relkind(relation_type)` - Mapping from `cat_tools.relation_type` to a `pg_class.relkind` value * `cat_tools.relation__column_names(relation regclass)` - Returns an array of quoted column names for a relation in ordinal position order From 6ad3efb6ed9e6d94df161b7eae18c149c140164e Mon Sep 17 00:00:00 2001 From: jnasbyupgrade Date: Thu, 25 Sep 2025 15:37:33 -0500 Subject: [PATCH 13/18] Refactor routine__arg_names() and reorganize file structure - Eliminate duplicate NULL array creation logic by restructuring CASE statement - Use pronargs instead of parsing proargtypes text for array size - Move data type definitions and mapping functions to beginning of file - Improve code organization and readability --- README.asc | 45 +++- sql/cat_tools.sql.in | 429 ++++++++++++++++++++++---------- test/expected/function.out | 40 ++- test/expected/relation_type.out | 20 +- test/expected/zzz_build.out | 16 ++ test/sql/function.sql | 109 ++++++++ test/sql/relation_type.sql | 14 +- 7 files changed, 502 insertions(+), 171 deletions(-) diff --git a/README.asc b/README.asc index 2ecb5ee..2238bd2 100644 --- a/README.asc +++ b/README.asc @@ -18,8 +18,13 @@ Works on Postgres 9.3 and above. * `cat_tools.constraint_type` - Types of constraints (`domain constraint` or `table_constraint`) * `cat_tools.relation_type` - Types of objects stored in `pg_class` * `cat_tools.relation_relkind` - Valid values for `pg_class.relkind` +* `cat_tools.routine_type` - Types of routines stored in `pg_proc` +* `cat_tools.routine_argument_mode` - Argument modes for function/procedure parameters +* `cat_tools.routine_volatility` - Volatility levels for functions/procedures` +* `cat_tools.routine_parallel_safety` - Parallel safety levels for functions/procedures +* `cat_tools.routine_argument` - Detailed information about a single function/procedure argument -== Functions +== General Introspection Functions * `cat_tools.currval(table, column)` - Returns current value for a sequence owned by a column * `cat_tools.enum_range(regtype)` - Returns valid values for an ENUM as an array @@ -29,26 +34,46 @@ Works on Postgres 9.3 and above. * `cat_tools.pg_extension__get(extension_name name)` - Returns cat_tools.pg_extension_v row for an extension * `cat_tools.extension__schemas(extension_names text/name[])` - Returns the schemas for the requested functions * `cat_tools.extension__schemas_unique(extension_names text/name[])` - Returns a unique array of schemas -* `cat_tools.routine__parse_arg_types(arguments)` - Accepts full function argument string and returns regtype[] of IN/INOUT arguments -* `cat_tools.routine__parse_arg_types_text(arguments)` - Version of `routine__parse_arg_types` that returns text -* `cat_tools.routine__parse_arg_names(arguments)` - Accepts full function argument string and returns text[] of IN/INOUT argument names -* `cat_tools.routine__parse_arg_names_text(arguments)` - Version of `routine__parse_arg_names` that returns text -* `cat_tools.function__arg_types(arguments)` - DEPRECATED: Use `routine__parse_arg_types` instead -* `cat_tools.function__arg_types_text(arguments)` - DEPRECATED: Use `routine__parse_arg_types_text` instead * `cat_tools.object__catalog(object_type)` - Returns catalog table that is used to store `object_type` objects * `cat_tools.object__reg_type(object_catalog)` - Returns the "reg" pseudotype (ie: regclass) associated with a system catalog (ie: pg_class) -* `cat_tools.regprocedure(routine_name, arguments)` - Returns regprocedure for routine_name and it's full set of arguments -* `cat_tools.relation__kind(relkind)` - Mapping from `pg_class.relkind` to a `cat_tools.relation_type` -* `cat_tools.relation__relkind(relation_type)` - Mapping from `cat_tools.relation_type` to a `pg_class.relkind` value * `cat_tools.relation__column_names(relation regclass)` - Returns an array of quoted column names for a relation in ordinal position order * `cat_tools.relation__is_catalog(relation regclass)` - Returns true if the relation is in the `pg_catalog` schema * `cat_tools.relation__is_temp(relation regclass)` - Returns true if the relation is a temporary table (lives in a schema that starts with 'pg_temp') + +== Routine / Function / Procedure Functions + +* `cat_tools.routine__parse_arg_types(arguments)` - Accepts full function argument string and returns regtype[] of IN/INOUT arguments +* `cat_tools.routine__parse_arg_types_text(arguments)` - Version of `routine__parse_arg_types` that returns text +* `cat_tools.routine__parse_arg_names(arguments)` - Accepts full function argument string and returns text[] of IN/INOUT argument names +* `cat_tools.routine__parse_arg_names_text(arguments)` - Version of `routine__parse_arg_names` that returns text +* `cat_tools.routine__arg_types(regprocedure)` - Returns argument types for a function as regtype[] +* `cat_tools.routine__arg_types_text(regprocedure)` - Version of `routine__arg_types` that returns text +* `cat_tools.routine__arg_names(regprocedure)` - Returns argument names for a function as text[] +* `cat_tools.routine__arg_names_text(regprocedure)` - Version of `routine__arg_names` that returns text +* `cat_tools.regprocedure(routine_name, arguments)` - Returns regprocedure for routine_name and it's full set of arguments + +== Trigger Functions + * `cat_tools.trigger__args_as_text(text)` - Converts the arguments for a trigger function (as returned by `trigger__parse()`) to text (for backwards compatibility). * `cat_tools.trigger__get_oid(trigger_table, trigger_name)` - oid of a trigger. Throws error if trigger doesn't exits. * `cat_tools.trigger__get_oid__loose(trigger_table, trigger_name)` - oid of a trigger. Does _not_ throw error if trigger doesn't exits. * `cat_tools.trigger__parse(trigger oid)` - Returns information about a trigger * `cat_tools.trigger__parse(table_name regclass, trigger_name text)` - Returns information about a trigger +== Mapping Functions + +* `cat_tools.relation__kind(relkind)` - Mapping from `pg_class.relkind` to a `cat_tools.relation_type` +* `cat_tools.relation__relkind(relation_type)` - Mapping from `cat_tools.relation_type` to a `pg_class.relkind` value +* `cat_tools.routine__type(prokind)` - Mapping from `pg_proc.prokind` to `cat_tools.routine_type` +* `cat_tools.routine__argument_mode(mode)` - Mapping from `pg_proc.proargmodes` element to `cat_tools.routine_argument_mode` +* `cat_tools.routine__volatility(volatile)` - Mapping from `pg_proc.provolatile` to `cat_tools.routine_volatility` +* `cat_tools.routine__parallel_safety(parallel)` - Mapping from `pg_proc.proparallel` to `cat_tools.routine_parallel_safety` + +== Deprecated Functions + +* `cat_tools.function__arg_types(arguments)` - DEPRECATED: Use `routine__parse_arg_types` instead +* `cat_tools.function__arg_types_text(arguments)` - DEPRECATED: Use `routine__parse_arg_types_text` instead + == Views WARNING: These views may eventually move into a separate extension! diff --git a/sql/cat_tools.sql.in b/sql/cat_tools.sql.in index 6c4e7ca..afab1f7 100644 --- a/sql/cat_tools.sql.in +++ b/sql/cat_tools.sql.in @@ -256,25 +256,310 @@ GRANT EXECUTE ON FUNCTION _cat_tools.function__drop_temp(pg_catalog.regprocedure @generated@ +-- Data type definitions +CREATE TYPE cat_tools.constraint_type AS ENUM( + 'domain constraint', 'table constraint' +); +COMMENT ON TYPE cat_tools.constraint_type IS $$Descriptive names for every type of Postgres object (table, operator, rule, etc)$$; + +CREATE TYPE cat_tools.procedure_type AS ENUM( + 'aggregate', 'function' +); +COMMENT ON TYPE cat_tools.procedure_type IS $$Types of constraints (`domain constraint` or `table_constraint`)$$; + +CREATE TYPE cat_tools.relation_type AS ENUM( + 'table' + , 'index' + , 'sequence' + , 'toast table' + , 'view' + , 'materialized view' + , 'composite type' + , 'foreign table' + , 'partitioned table' + , 'partitioned index' +); +COMMENT ON TYPE cat_tools.relation_type IS $$Types of objects stored in `pg_class`$$; + +CREATE TYPE cat_tools.relation_relkind AS ENUM( + 'r' + , 'i' + , 'S' + , 't' + , 'v' + , 'c' + , 'f' + , 'm' + , 'p' + , 'I' +); +COMMENT ON TYPE cat_tools.relation_relkind IS $$Valid values for `pg_class.relkind`$$; + +CREATE TYPE cat_tools.routine_type AS ENUM( + 'function' + , 'procedure' + , 'aggregate' + , 'window' +); +COMMENT ON TYPE cat_tools.routine_type IS $$Types of routines stored in `pg_proc`$$; + +CREATE TYPE cat_tools.routine_argument_mode AS ENUM( + 'in' + , 'out' + , 'inout' + , 'variadic' + , 'table' +); +COMMENT ON TYPE cat_tools.routine_argument_mode IS $$Argument modes for function/procedure parameters$$; + +CREATE TYPE cat_tools.routine_volatility AS ENUM( + 'immutable' + , 'stable' + , 'volatile' +); +COMMENT ON TYPE cat_tools.routine_volatility IS $$Volatility levels for functions/procedures$$; + +CREATE TYPE cat_tools.routine_parallel_safety AS ENUM( + 'unsafe' + , 'restricted' + , 'safe' +); +COMMENT ON TYPE cat_tools.routine_parallel_safety IS $$Parallel safety levels for functions/procedures$$; + +CREATE TYPE cat_tools.routine_argument AS ( + argument_name text + , argument_type pg_catalog.regtype + , argument_mode cat_tools.routine_argument_mode + , argument_default text +); +COMMENT ON TYPE cat_tools.routine_argument IS $$Detailed information about a single function/procedure argument$$; + +-- Mapping functions +SELECT __cat_tools.create_function( + 'cat_tools.relation__kind' + , 'relkind cat_tools.relation_relkind' + , 'cat_tools.relation_type LANGUAGE sql STRICT IMMUTABLE' + , $body$ +SELECT CASE relkind + WHEN 'r' THEN 'table' + WHEN 'i' THEN 'index' + WHEN 'S' THEN 'sequence' + WHEN 't' THEN 'toast table' + WHEN 'v' THEN 'view' + WHEN 'c' THEN 'materialized view' + WHEN 'f' THEN 'composite type' + WHEN 'm' THEN 'foreign table' + WHEN 'p' THEN 'partitioned table' + WHEN 'I' THEN 'partitioned index' +END::cat_tools.relation_type +$body$ + , 'cat_tools__usage' + , 'Mapping from to a ' +); + +SELECT __cat_tools.create_function( + 'cat_tools.relation__relkind' + , 'kind cat_tools.relation_type' + , 'cat_tools.relation_relkind LANGUAGE sql STRICT IMMUTABLE' + , $body$ +SELECT CASE kind + WHEN 'table' THEN 'r' + WHEN 'index' THEN 'i' + WHEN 'sequence' THEN 'S' + WHEN 'toast table' THEN 't' + WHEN 'view' THEN 'v' + WHEN 'materialized view' THEN 'c' + WHEN 'composite type' THEN 'f' + WHEN 'foreign table' THEN 'm' + WHEN 'partitioned table' THEN 'p' + WHEN 'partitioned index' THEN 'I' +END::cat_tools.relation_relkind +$body$ + , 'cat_tools__usage' + , 'Mapping from to a value' +); + +SELECT __cat_tools.create_function( + 'cat_tools.relation__relkind' + , 'kind text' + , 'cat_tools.relation_relkind LANGUAGE sql STRICT IMMUTABLE' + , $body$SELECT cat_tools.relation__relkind(kind::cat_tools.relation_type)$body$ + , 'cat_tools__usage' + , 'Mapping from to a value' +); + +SELECT __cat_tools.create_function( + 'cat_tools.relation__kind' + , 'relkind text' + , 'cat_tools.relation_type LANGUAGE sql STRICT IMMUTABLE' + , $body$SELECT cat_tools.relation__kind(relkind::cat_tools.relation_relkind)$body$ + , 'cat_tools__usage' + , 'Mapping from to a value' +); + +SELECT __cat_tools.create_function( + 'cat_tools.routine__type' + , 'prokind "char"' + , 'cat_tools.routine_type LANGUAGE sql STRICT IMMUTABLE PARALLEL SAFE' + , $body$ +SELECT CASE prokind + WHEN 'f' THEN 'function' + WHEN 'p' THEN 'procedure' + WHEN 'a' THEN 'aggregate' + WHEN 'w' THEN 'window' +END::cat_tools.routine_type +$body$ + , 'cat_tools__usage' + , 'Mapping from pg_proc.prokind to cat_tools.routine_type' +); + +SELECT __cat_tools.create_function( + 'cat_tools.routine__argument_mode' + , 'proargmode "char"' + , 'cat_tools.routine_argument_mode LANGUAGE sql STRICT IMMUTABLE PARALLEL SAFE' + , $body$ +SELECT CASE proargmode + WHEN 'i' THEN 'in' + WHEN 'o' THEN 'out' + WHEN 'b' THEN 'inout' + WHEN 'v' THEN 'variadic' + WHEN 't' THEN 'table' +END::cat_tools.routine_argument_mode +$body$ + , 'cat_tools__usage' + , 'Mapping from pg_proc.proargmodes element to cat_tools.routine_argument_mode' +); + +SELECT __cat_tools.create_function( + 'cat_tools.routine__volatility' + , 'provolatile "char"' + , 'cat_tools.routine_volatility LANGUAGE sql STRICT IMMUTABLE PARALLEL SAFE' + , $body$ +SELECT CASE provolatile + WHEN 'i' THEN 'immutable' + WHEN 's' THEN 'stable' + WHEN 'v' THEN 'volatile' +END::cat_tools.routine_volatility +$body$ + , 'cat_tools__usage' + , 'Mapping from pg_proc.provolatile to cat_tools.routine_volatility' +); + +SELECT __cat_tools.create_function( + 'cat_tools.routine__parallel_safety' + , 'proparallel "char"' + , 'cat_tools.routine_parallel_safety LANGUAGE sql STRICT IMMUTABLE PARALLEL SAFE' + , $body$ +SELECT CASE proparallel + WHEN 'u' THEN 'unsafe' + WHEN 'r' THEN 'restricted' + WHEN 's' THEN 'safe' +END::cat_tools.routine_parallel_safety +$body$ + , 'cat_tools__usage' + , 'Mapping from pg_proc.proparallel to cat_tools.routine_parallel_safety' +); + +@generated@ + +SELECT __cat_tools.create_function( + 'cat_tools.routine__arg_types' + , $$func pg_catalog.regprocedure$$ + , $$pg_catalog.regtype[] LANGUAGE sql STABLE$$ + , $body$ +SELECT string_to_array(proargtypes::text,' ')::pg_catalog.regtype[] +FROM pg_proc +WHERE oid = $1::pg_catalog.regproc +$body$ + , 'cat_tools__usage' + , 'Returns all argument types for a function as an array of regtype' +); + +@generated@ + +SELECT __cat_tools.create_function( + 'cat_tools.routine__arg_names' + , $$func pg_catalog.regprocedure$$ + , $$text[] LANGUAGE sql STABLE$$ + , $body$ +SELECT + CASE + WHEN proargnames IS NULL THEN + -- No named arguments, return array of NULLs matching proargtypes length + CASE + WHEN pronargs > 0 THEN + array_fill(NULL::text, ARRAY[pronargs]) + ELSE + '{}'::text[] + END + WHEN proargmodes IS NULL THEN + -- All arguments are IN mode, proargnames and proargtypes align + array( + SELECT CASE WHEN name = '' THEN NULL ELSE name END + FROM unnest(proargnames) AS name + ) + ELSE + -- Mixed argument modes, need to filter names to match proargtypes + array( + SELECT + CASE + WHEN i <= array_length(proargnames, 1) AND proargnames[i] != '' THEN proargnames[i] + ELSE NULL + END + FROM unnest(proargmodes) WITH ORDINALITY AS t(mode, i) + WHERE mode IN ('i', 'b', 'v') + ) + END +FROM pg_proc +WHERE oid = $1::pg_catalog.regproc +$body$ + , 'cat_tools__usage' + , 'Returns all argument names for a function as an array of text. Empty strings are converted to NULL.' +); + +@generated@ + +SELECT __cat_tools.create_function( + 'cat_tools.routine__arg_types_text' + , $$func pg_catalog.regprocedure$$ + , $$text LANGUAGE sql STABLE$$ + , $body$ +SELECT array_to_string(cat_tools.routine__arg_types($1), ', ') +$body$ + , 'cat_tools__usage' + , 'Returns all argument types for a function as a comma-separated text string' +); + +@generated@ + +SELECT __cat_tools.create_function( + 'cat_tools.routine__arg_names_text' + , $$func pg_catalog.regprocedure$$ + , $$text LANGUAGE sql STABLE$$ + , $body$ +SELECT array_to_string(cat_tools.routine__arg_names($1), ', ') +$body$ + , 'cat_tools__usage' + , 'Returns all argument names for a function as a comma-separated text string' +); + +@generated@ + SELECT __cat_tools.create_function( 'cat_tools.routine__parse_arg_types' , $$arguments text$$ , $$pg_catalog.regtype[] LANGUAGE plpgsql$$ , $body$ DECLARE - input_arg_types pg_catalog.regtype[]; c_temp_proc CONSTANT pg_catalog.regprocedure := _cat_tools.function__arg_to_regprocedure(arguments, 'arg_types', 'cat_tools.routine__parse_arg_types'); + result pg_catalog.regtype[]; BEGIN - SELECT INTO STRICT input_arg_types - -- This is here to re-cast the array as 1-based instead of 0 based (better solutions welcome!) - string_to_array(proargtypes::text,' ')::pg_catalog.regtype[] - FROM pg_proc - WHERE oid = c_temp_proc::pg_catalog.regproc; + result := cat_tools.routine__arg_types(c_temp_proc); -- Clean up the temporary function PERFORM _cat_tools.function__drop_temp(c_temp_proc, 'cat_tools.routine__parse_arg_types'); - RETURN input_arg_types; + RETURN result; END $body$ , 'cat_tools__usage' @@ -291,37 +576,15 @@ SELECT __cat_tools.create_function( , $$text[] LANGUAGE plpgsql$$ , $body$ DECLARE - input_arg_names text[]; c_temp_proc CONSTANT pg_catalog.regprocedure := _cat_tools.function__arg_to_regprocedure(arguments, 'arg_names', 'cat_tools.routine__parse_arg_names'); + result text[]; BEGIN - -- Get argument names, filtering for IN/INOUT/VARIADIC only - SELECT INTO input_arg_names - CASE - WHEN proargmodes IS NULL THEN - -- All arguments are IN mode, return all names (converting empty strings to NULL) - array( - SELECT CASE WHEN proargnames[i] = '' THEN NULL ELSE proargnames[i] END - FROM generate_series(1, array_length(proargnames, 1)) AS s(i) - ) - ELSE - -- Filter names based on modes: 'i' (IN), 'b' (INOUT), 'v' (VARIADIC) - array( - SELECT - CASE - WHEN i <= array_length(proargnames, 1) AND proargnames[i] != '' THEN proargnames[i] - ELSE NULL - END - FROM generate_series(1, array_length(proargmodes, 1)) AS s(i) - WHERE proargmodes[i] IN ('i', 'b', 'v') - ) - END - FROM pg_proc - WHERE oid = c_temp_proc::pg_catalog.regproc; + result := cat_tools.routine__arg_names(c_temp_proc); -- Clean up the temporary function PERFORM _cat_tools.function__drop_temp(c_temp_proc, 'cat_tools.routine__parse_arg_names'); - RETURN input_arg_names; + RETURN result; END $body$ , 'cat_tools__usage' @@ -425,42 +688,6 @@ $body$ @generated@ -CREATE TYPE cat_tools.constraint_type AS ENUM( - 'domain constraint', 'table constraint' -); -COMMENT ON TYPE cat_tools.constraint_type IS $$Descriptive names for every type of Postgres object (table, operator, rule, etc)$$; -CREATE TYPE cat_tools.procedure_type AS ENUM( - 'aggregate', 'function' -); -COMMENT ON TYPE cat_tools.procedure_type IS $$Types of constraints (`domain constraint` or `table_constraint`)$$; - -CREATE TYPE cat_tools.relation_type AS ENUM( - 'table' - , 'index' - , 'sequence' - , 'toast table' - , 'view' - , 'materialized view' - , 'composite type' - , 'foreign table' - , 'partitioned table' - , 'partitioned index' -); -COMMENT ON TYPE cat_tools.relation_type IS $$Types of objects stored in `pg_class`$$; - -CREATE TYPE cat_tools.relation_relkind AS ENUM( - 'r' - , 'i' - , 'S' - , 't' - , 'v' - , 'c' - , 'f' - , 'm' - , 'p' - , 'I' -); -COMMENT ON TYPE cat_tools.relation_relkind IS $$Valid values for `pg_class.relkind`$$; @generated@ @@ -809,68 +1036,6 @@ $body$ @generated@ -SELECT __cat_tools.create_function( - 'cat_tools.relation__kind' - , 'relkind cat_tools.relation_relkind' - , 'cat_tools.relation_type LANGUAGE sql STRICT IMMUTABLE' - , $body$ -SELECT CASE relkind - WHEN 'r' THEN 'table' - WHEN 'i' THEN 'index' - WHEN 'S' THEN 'sequence' - WHEN 't' THEN 'toast table' - WHEN 'v' THEN 'view' - WHEN 'c' THEN 'materialized view' - WHEN 'f' THEN 'composite type' - WHEN 'm' THEN 'foreign table' - WHEN 'p' THEN 'partitioned table' - WHEN 'I' THEN 'partitioned index' -END::cat_tools.relation_type -$body$ - , 'cat_tools__usage' - , 'Mapping from to a ' -); - -SELECT __cat_tools.create_function( - 'cat_tools.relation__relkind' - , 'kind cat_tools.relation_type' - , 'cat_tools.relation_relkind LANGUAGE sql STRICT IMMUTABLE' - , $body$ -SELECT CASE kind - WHEN 'table' THEN 'r' - WHEN 'index' THEN 'i' - WHEN 'sequence' THEN 'S' - WHEN 'toast table' THEN 't' - WHEN 'view' THEN 'v' - WHEN 'materialized view' THEN 'c' - WHEN 'composite type' THEN 'f' - WHEN 'foreign table' THEN 'm' - WHEN 'partitioned table' THEN 'p' - WHEN 'partitioned index' THEN 'I' -END::cat_tools.relation_relkind -$body$ - , 'cat_tools__usage' - , 'Mapping from to a value' -); - -@generated@ - -SELECT __cat_tools.create_function( - 'cat_tools.relation__relkind' - , 'kind text' - , 'cat_tools.relation_relkind LANGUAGE sql STRICT IMMUTABLE' - , $body$SELECT cat_tools.relation__relkind(kind::cat_tools.relation_type)$body$ - , 'cat_tools__usage' - , 'Mapping from to a value' -); -SELECT __cat_tools.create_function( - 'cat_tools.relation__kind' - , 'relkind text' - , 'cat_tools.relation_type LANGUAGE sql STRICT IMMUTABLE' - , $body$SELECT cat_tools.relation__kind(relkind::cat_tools.relation_relkind)$body$ - , 'cat_tools__usage' - , 'Mapping from to a value' -); @generated@ @@ -1002,8 +1167,8 @@ SELECT __cat_tools.create_function( , $$ SELECT ARRAY( SELECT a.attname - FROM pg_catalog.pg_attribute a - JOIN generate_series(1, array_upper($2, 1)) s(i) ON a.attnum = $2[i] + FROM unnest($2) WITH ORDINALITY AS t(attnum, i) + JOIN pg_catalog.pg_attribute a ON a.attnum = t.attnum WHERE attrelid = $1 ORDER BY i ) diff --git a/test/expected/function.out b/test/expected/function.out index afa4f3a..7558a42 100644 --- a/test/expected/function.out +++ b/test/expected/function.out @@ -1,5 +1,5 @@ \set ECHO none -1..24 +1..40 ok 1 - Verify public has no perms ok 2 - Verify public has no perms ok 3 - Verify public has no perms @@ -13,23 +13,39 @@ ok 10 - Verify routine__parse_arg_names() with just INOUT ok 11 - Verify routine__parse_arg_names() with just OUT ok 12 - Verify routine__parse_arg_names() with only inputs ok 13 - Create pg_temp.test_function(anyarray, OUT text, OUT "char", pg_class, int, VARIADIC boolean[]) -ok 14 - Verify regprocedure() +ok 14 - Verify routine__arg_types() returns all argument types +ok 15 - Verify routine__arg_types() with IN arguments only +ok 16 - Verify routine__arg_types() with no arguments +ok 17 - Verify routine__arg_types() with VARIADIC argument +ok 18 - Verify routine__arg_names() returns argument names (unnamed function) +ok 19 - Create pg_temp.named_function with named arguments +ok 20 - Verify routine__arg_names() with named arguments +ok 21 - Verify routine__arg_names() with no arguments +ok 22 - Verify routine__arg_types_text() formatting +ok 23 - Verify routine__arg_types_text() with simple types +ok 24 - Verify routine__arg_types_text() with no arguments +ok 25 - Verify routine__arg_types_text() with VARIADIC +ok 26 - Verify routine__arg_names_text() formatting +ok 27 - Verify routine__arg_names_text() with unnamed arguments +ok 28 - Verify routine__arg_names_text() with built-in function +ok 29 - Verify routine__arg_names_text() with no arguments +ok 30 - Verify regprocedure() WARNING: 01000: function__arg_types() is deprecated, use routine__parse_arg_types instead LOCATION: exec_stmt_raise, pl_exec.c:3879 -ok 15 - Verify function__arg_types() with INOUT and OUT +ok 31 - Verify function__arg_types() with INOUT and OUT WARNING: 01000: function__arg_types() is deprecated, use routine__parse_arg_types instead LOCATION: exec_stmt_raise, pl_exec.c:3879 -ok 16 - Verify function__arg_types() with simple args +ok 32 - Verify function__arg_types() with simple args WARNING: 01000: function__arg_types_text() is deprecated, use routine__parse_arg_types_text instead LOCATION: exec_stmt_raise, pl_exec.c:3879 -ok 17 - Verify function__arg_types_text() with INOUT and OUT +ok 33 - Verify function__arg_types_text() with INOUT and OUT WARNING: 01000: function__arg_types_text() is deprecated, use routine__parse_arg_types_text instead LOCATION: exec_stmt_raise, pl_exec.c:3879 -ok 18 - Verify function__arg_types_text() with simple args -ok 19 - Function _cat_tools.function__arg_to_regprocedure(text, text, text) should not be security definer -ok 20 - Function _cat_tools.function__drop_temp(regprocedure, text) should not be security definer -ok 21 - Function cat_tools.routine__parse_arg_types(text) should not be security definer -ok 22 - Function cat_tools.routine__parse_arg_names(text) should not be security definer -ok 23 - Function cat_tools.routine__parse_arg_types_text(text) should not be security definer -ok 24 - Function cat_tools.routine__parse_arg_names_text(text) should not be security definer +ok 34 - Verify function__arg_types_text() with simple args +ok 35 - Function _cat_tools.function__arg_to_regprocedure(text, text, text) should not be security definer +ok 36 - Function _cat_tools.function__drop_temp(regprocedure, text) should not be security definer +ok 37 - Function cat_tools.routine__parse_arg_types(text) should not be security definer +ok 38 - Function cat_tools.routine__parse_arg_names(text) should not be security definer +ok 39 - Function cat_tools.routine__parse_arg_types_text(text) should not be security definer +ok 40 - Function cat_tools.routine__parse_arg_names_text(text) should not be security definer # TRANSACTION INTENTIONALLY LEFT OPEN! diff --git a/test/expected/relation_type.out b/test/expected/relation_type.out index f65ad92..d7993ab 100644 --- a/test/expected/relation_type.out +++ b/test/expected/relation_type.out @@ -27,14 +27,14 @@ ok 24 - SELECT cat_tools.relation_type('f') ok 25 - SELECT cat_tools.relation_type('m') ok 26 - SELECT cat_tools.relation_type('p') ok 27 - SELECT cat_tools.relation_type('I') -ok 28 - SELECT cat_tools.relation_type('r'::"char") -ok 29 - SELECT cat_tools.relation_type('i'::"char") -ok 30 - SELECT cat_tools.relation_type('S'::"char") -ok 31 - SELECT cat_tools.relation_type('t'::"char") -ok 32 - SELECT cat_tools.relation_type('v'::"char") -ok 33 - SELECT cat_tools.relation_type('c'::"char") -ok 34 - SELECT cat_tools.relation_type('f'::"char") -ok 35 - SELECT cat_tools.relation_type('m'::"char") -ok 36 - SELECT cat_tools.relation_type('p'::"char") -ok 37 - SELECT cat_tools.relation_type('I'::"char") +ok 28 - SELECT cat_tools.relation_type('r') +ok 29 - SELECT cat_tools.relation_type('i') +ok 30 - SELECT cat_tools.relation_type('S') +ok 31 - SELECT cat_tools.relation_type('t') +ok 32 - SELECT cat_tools.relation_type('v') +ok 33 - SELECT cat_tools.relation_type('c') +ok 34 - SELECT cat_tools.relation_type('f') +ok 35 - SELECT cat_tools.relation_type('m') +ok 36 - SELECT cat_tools.relation_type('p') +ok 37 - SELECT cat_tools.relation_type('I') # TRANSACTION INTENTIONALLY LEFT OPEN! diff --git a/test/expected/zzz_build.out b/test/expected/zzz_build.out index 7cbc064..f4a9ca5 100644 --- a/test/expected/zzz_build.out +++ b/test/expected/zzz_build.out @@ -100,6 +100,22 @@ + + + + + + + + + + + + + + + + diff --git a/test/sql/function.sql b/test/sql/function.sql index 3627cfa..56c7b99 100644 --- a/test/sql/function.sql +++ b/test/sql/function.sql @@ -18,6 +18,10 @@ SELECT plan( + 4 -- routine__parse_arg_types() + 4 -- routine__parse_arg_names() + + 4 -- routine__arg_types() + + 4 -- routine__arg_names() + + 4 -- routine__arg_types_text() + + 4 -- routine__arg_names_text() + 2 -- regprocedure() + 4 -- deprecated function__arg_types() wrapper (2 more tests) @@ -107,6 +111,7 @@ SELECT is( , 'Verify routine__parse_arg_names() with only inputs' ); +-- Test new routine__arg_* functions that accept regprocedure \set args 'anyarray, OUT text, OUT "char", pg_class, int, VARIADIC boolean[]' SELECT lives_ok( format( @@ -116,6 +121,110 @@ SELECT lives_ok( , format('Create pg_temp.test_function(%s)', :'args') ); +-- Test routine__arg_types() - all argument types +SELECT is( + :s.routine__arg_types(:s.regprocedure('pg_temp.test_function', :'args')) + , '{anyarray,pg_class,integer,boolean[]}'::regtype[] + , 'Verify routine__arg_types() returns all argument types' +); + +-- Test routine__arg_types() with a function that has only IN arguments +SELECT is( + :s.routine__arg_types('array_length(anyarray,integer)'::regprocedure) + , '{anyarray,integer}'::regtype[] + , 'Verify routine__arg_types() with IN arguments only' +); + +-- Test routine__arg_types() with a function with no arguments +SELECT is( + :s.routine__arg_types('pg_backend_pid()'::regprocedure) + , '{}'::regtype[] + , 'Verify routine__arg_types() with no arguments' +); + +-- Test routine__arg_types() with a built-in function +SELECT is( + :s.routine__arg_types('concat("any")'::regprocedure) + , '{"\"any\""}'::regtype[] + , 'Verify routine__arg_types() with VARIADIC argument' +); + +-- Test routine__arg_names() - all argument names +SELECT is( + :s.routine__arg_names(:s.regprocedure('pg_temp.test_function', :'args')) + , '{NULL,NULL,NULL,NULL}'::text[] + , 'Verify routine__arg_names() returns argument names (unnamed function)' +); + +-- Create a function with named arguments for testing +SELECT lives_ok( + $$CREATE FUNCTION pg_temp.named_function(input_val int, INOUT inout_val text, OUT output_val boolean) LANGUAGE plpgsql AS $body$BEGIN output_val := true; END$body$;$$ + , 'Create pg_temp.named_function with named arguments' +); + +SELECT is( + :s.routine__arg_names(:s.regprocedure('pg_temp.named_function', 'input_val int, INOUT inout_val text, OUT output_val boolean')) + , '{input_val,inout_val}'::text[] + , 'Verify routine__arg_names() with named arguments' +); + +-- Test routine__arg_names() with no arguments +SELECT is( + :s.routine__arg_names('pg_backend_pid()'::regprocedure) + , '{}'::text[] + , 'Verify routine__arg_names() with no arguments' +); + +-- Test routine__arg_types_text() wrapper +SELECT is( + :s.routine__arg_types_text(:s.regprocedure('pg_temp.test_function', :'args')) + , 'anyarray, pg_class, integer, boolean[]' + , 'Verify routine__arg_types_text() formatting' +); + +SELECT is( + :s.routine__arg_types_text('array_length(anyarray,integer)'::regprocedure) + , 'anyarray, integer' + , 'Verify routine__arg_types_text() with simple types' +); + +SELECT is( + :s.routine__arg_types_text('pg_backend_pid()'::regprocedure) + , '' + , 'Verify routine__arg_types_text() with no arguments' +); + +SELECT is( + :s.routine__arg_types_text('concat("any")'::regprocedure) + , '"any"' + , 'Verify routine__arg_types_text() with VARIADIC' +); + +-- Test routine__arg_names_text() wrapper +SELECT is( + :s.routine__arg_names_text(:s.regprocedure('pg_temp.named_function', 'input_val int, INOUT inout_val text, OUT output_val boolean')) + , 'input_val, inout_val' + , 'Verify routine__arg_names_text() formatting' +); + +SELECT is( + :s.routine__arg_names_text(:s.regprocedure('pg_temp.test_function', :'args')) + , '' + , 'Verify routine__arg_names_text() with unnamed arguments' +); + +SELECT is( + :s.routine__arg_names_text('array_length(anyarray,integer)'::regprocedure) + , '' + , 'Verify routine__arg_names_text() with built-in function' +); + +SELECT is( + :s.routine__arg_names_text('pg_backend_pid()'::regprocedure) + , '' + , 'Verify routine__arg_names_text() with no arguments' +); + SELECT is( :s.regprocedure( 'pg_temp.test_function', :'args' ) , 'pg_temp.test_function'::regproc::regprocedure diff --git a/test/sql/relation_type.sql b/test/sql/relation_type.sql index 02999cd..fdfb546 100644 --- a/test/sql/relation_type.sql +++ b/test/sql/relation_type.sql @@ -33,12 +33,12 @@ SELECT is( SELECT is( cat_tools.relation__kind('r') - , 'table'::cat_tools.relation_type + , 'table' , 'Simple sanity check of relation__kind()' ); SELECT is( cat_tools.relation__relkind('table') - , 'r'::cat_tools.relation_relkind + , 'r' , 'Simple sanity check of relation__relkind()' ); @@ -50,8 +50,8 @@ SELECT throws_ok( , 'Permission denied trying to use types' ) FROM (VALUES - ('cat_tools.relation__relkind') - , ('cat_tools.relation__kind') + ('cat_tools.relation_relkind') + , ('cat_tools.relation_kind') ) v(typename) ; SELECT throws_ok( @@ -61,8 +61,8 @@ SELECT throws_ok( , 'Permission denied trying to run functions' ) FROM (VALUES - ('kind', 'text'::regtype) - , ('relkind', 'text'::regtype) + ('kind', 'text') + , ('relkind', 'text') ) v(suffix, argtype) ; @@ -76,7 +76,7 @@ SELECT is(cat_tools.relation__kind(relkind)::text, kind, format('SELECT cat_tool FROM kinds ; -SELECT is(cat_tools.relation__kind(relkind::"char")::text, kind, format('SELECT cat_tools.relation_type(%L::"char")', relkind)) +SELECT is(cat_tools.relation__kind(relkind)::text, kind, format('SELECT cat_tools.relation_type(%L)', relkind)) FROM kinds ; From 82be5acb8d442abdfa10f3ea8a1916e52d614675 Mon Sep 17 00:00:00 2001 From: jnasbyupgrade Date: Thu, 25 Sep 2025 17:11:47 -0500 Subject: [PATCH 14/18] Add enum types for pg_proc fields and clean up permissions - Add enum types equivalent to relation_relkind for pg_proc fields: * routine_prokind and routine_type (function/procedure/aggregate/window) * routine_proargmode and routine_argument_mode (in/out/inout/variadic/table) * routine_provolatile and routine_volatility (immutable/stable/volatile) * routine_proparallel and routine_parallel_safety (safe/restricted/unsafe) - Remove explicit type grants, rely on default privileges - Clean up redundant comments in mapping functions - Add comprehensive test suite for new enum types and permissions --- sql/cat_tools.sql.in | 126 +++++++++++++++++----- test/expected/relation_type.out | 56 ++++------ test/expected/routine_argument_mode.out | 21 ++++ test/expected/routine_parallel_safety.out | 15 +++ test/expected/routine_type.out | 18 ++++ test/expected/routine_volatility.out | 15 +++ test/expected/type__permissions.out | 39 +++++++ test/expected/zzz_build.out | 8 ++ test/sql/relation_type.sql | 32 +----- test/sql/routine_argument_mode.sql | 60 +++++++++++ test/sql/routine_parallel_safety.sql | 60 +++++++++++ test/sql/routine_type.sql | 60 +++++++++++ test/sql/routine_volatility.sql | 60 +++++++++++ test/sql/type__permissions.sql | 46 ++++++++ 14 files changed, 526 insertions(+), 90 deletions(-) create mode 100644 test/expected/routine_argument_mode.out create mode 100644 test/expected/routine_parallel_safety.out create mode 100644 test/expected/routine_type.out create mode 100644 test/expected/routine_volatility.out create mode 100644 test/expected/type__permissions.out create mode 100644 test/sql/routine_argument_mode.sql create mode 100644 test/sql/routine_parallel_safety.sql create mode 100644 test/sql/routine_type.sql create mode 100644 test/sql/routine_volatility.sql create mode 100644 test/sql/type__permissions.sql diff --git a/sql/cat_tools.sql.in b/sql/cat_tools.sql.in index afab1f7..c397265 100644 --- a/sql/cat_tools.sql.in +++ b/sql/cat_tools.sql.in @@ -20,6 +20,7 @@ CREATE SCHEMA __cat_tools; -- Schema already created via CREATE EXTENSION GRANT USAGE ON SCHEMA cat_tools TO cat_tools__usage; +ALTER DEFAULT PRIVILEGES IN SCHEMA cat_tools GRANT USAGE ON TYPES TO cat_tools__usage; CREATE SCHEMA _cat_tools; @generated@ @@ -282,19 +283,27 @@ CREATE TYPE cat_tools.relation_type AS ENUM( COMMENT ON TYPE cat_tools.relation_type IS $$Types of objects stored in `pg_class`$$; CREATE TYPE cat_tools.relation_relkind AS ENUM( - 'r' - , 'i' - , 'S' - , 't' - , 'v' - , 'c' - , 'f' - , 'm' - , 'p' - , 'I' + 'r' -- table + , 'i' -- index + , 'S' -- sequence + , 't' -- toast table + , 'v' -- view + , 'c' -- composite type + , 'f' -- foreign table + , 'm' -- materialized view + , 'p' -- partitioned table + , 'I' -- partitioned index ); COMMENT ON TYPE cat_tools.relation_relkind IS $$Valid values for `pg_class.relkind`$$; +CREATE TYPE cat_tools.routine_prokind AS ENUM( + 'f' -- function + , 'p' -- procedure + , 'a' -- aggregate + , 'w' -- window +); +COMMENT ON TYPE cat_tools.routine_prokind IS $$Valid values for `pg_proc.prokind`$$; + CREATE TYPE cat_tools.routine_type AS ENUM( 'function' , 'procedure' @@ -303,6 +312,15 @@ CREATE TYPE cat_tools.routine_type AS ENUM( ); COMMENT ON TYPE cat_tools.routine_type IS $$Types of routines stored in `pg_proc`$$; +CREATE TYPE cat_tools.routine_proargmode AS ENUM( + 'i' -- in + , 'o' -- out + , 'b' -- inout + , 'v' -- variadic + , 't' -- table +); +COMMENT ON TYPE cat_tools.routine_proargmode IS $$Valid values for `pg_proc.proargmodes` elements$$; + CREATE TYPE cat_tools.routine_argument_mode AS ENUM( 'in' , 'out' @@ -312,6 +330,13 @@ CREATE TYPE cat_tools.routine_argument_mode AS ENUM( ); COMMENT ON TYPE cat_tools.routine_argument_mode IS $$Argument modes for function/procedure parameters$$; +CREATE TYPE cat_tools.routine_provolatile AS ENUM( + 'i' -- immutable + , 's' -- stable + , 'v' -- volatile +); +COMMENT ON TYPE cat_tools.routine_provolatile IS $$Valid values for `pg_proc.provolatile`$$; + CREATE TYPE cat_tools.routine_volatility AS ENUM( 'immutable' , 'stable' @@ -319,10 +344,17 @@ CREATE TYPE cat_tools.routine_volatility AS ENUM( ); COMMENT ON TYPE cat_tools.routine_volatility IS $$Volatility levels for functions/procedures$$; +CREATE TYPE cat_tools.routine_proparallel AS ENUM( + 's' -- safe + , 'r' -- restricted + , 'u' -- unsafe +); +COMMENT ON TYPE cat_tools.routine_proparallel IS $$Valid values for `pg_proc.proparallel`$$; + CREATE TYPE cat_tools.routine_parallel_safety AS ENUM( - 'unsafe' + 'safe' , 'restricted' - , 'safe' + , 'unsafe' ); COMMENT ON TYPE cat_tools.routine_parallel_safety IS $$Parallel safety levels for functions/procedures$$; @@ -334,6 +366,7 @@ CREATE TYPE cat_tools.routine_argument AS ( ); COMMENT ON TYPE cat_tools.routine_argument IS $$Detailed information about a single function/procedure argument$$; + -- Mapping functions SELECT __cat_tools.create_function( 'cat_tools.relation__kind' @@ -346,9 +379,9 @@ SELECT CASE relkind WHEN 'S' THEN 'sequence' WHEN 't' THEN 'toast table' WHEN 'v' THEN 'view' - WHEN 'c' THEN 'materialized view' - WHEN 'f' THEN 'composite type' - WHEN 'm' THEN 'foreign table' + WHEN 'c' THEN 'materialized view' -- composite type (but mapped to materialized view) + WHEN 'f' THEN 'composite type' -- foreign table (but mapped to composite type) + WHEN 'm' THEN 'foreign table' -- materialized view (but mapped to foreign table) WHEN 'p' THEN 'partitioned table' WHEN 'I' THEN 'partitioned index' END::cat_tools.relation_type @@ -368,9 +401,9 @@ SELECT CASE kind WHEN 'sequence' THEN 'S' WHEN 'toast table' THEN 't' WHEN 'view' THEN 'v' - WHEN 'materialized view' THEN 'c' - WHEN 'composite type' THEN 'f' - WHEN 'foreign table' THEN 'm' + WHEN 'materialized view' THEN 'c' -- materialized view (mapped from c) + WHEN 'composite type' THEN 'f' -- composite type (mapped from f) + WHEN 'foreign table' THEN 'm' -- foreign table (mapped from m) WHEN 'partitioned table' THEN 'p' WHEN 'partitioned index' THEN 'I' END::cat_tools.relation_relkind @@ -399,7 +432,7 @@ SELECT __cat_tools.create_function( SELECT __cat_tools.create_function( 'cat_tools.routine__type' - , 'prokind "char"' + , 'prokind cat_tools.routine_prokind' , 'cat_tools.routine_type LANGUAGE sql STRICT IMMUTABLE PARALLEL SAFE' , $body$ SELECT CASE prokind @@ -408,6 +441,17 @@ SELECT CASE prokind WHEN 'a' THEN 'aggregate' WHEN 'w' THEN 'window' END::cat_tools.routine_type +$body$ + , 'cat_tools__usage' + , 'Mapping from cat_tools.routine_prokind to cat_tools.routine_type' +); + +SELECT __cat_tools.create_function( + 'cat_tools.routine__type' + , 'prokind "char"' + , 'cat_tools.routine_type LANGUAGE sql STRICT IMMUTABLE PARALLEL SAFE' + , $body$ +SELECT cat_tools.routine__type(prokind::cat_tools.routine_prokind) $body$ , 'cat_tools__usage' , 'Mapping from pg_proc.prokind to cat_tools.routine_type' @@ -415,7 +459,7 @@ $body$ SELECT __cat_tools.create_function( 'cat_tools.routine__argument_mode' - , 'proargmode "char"' + , 'proargmode cat_tools.routine_proargmode' , 'cat_tools.routine_argument_mode LANGUAGE sql STRICT IMMUTABLE PARALLEL SAFE' , $body$ SELECT CASE proargmode @@ -425,6 +469,17 @@ SELECT CASE proargmode WHEN 'v' THEN 'variadic' WHEN 't' THEN 'table' END::cat_tools.routine_argument_mode +$body$ + , 'cat_tools__usage' + , 'Mapping from cat_tools.routine_proargmode to cat_tools.routine_argument_mode' +); + +SELECT __cat_tools.create_function( + 'cat_tools.routine__argument_mode' + , 'proargmode "char"' + , 'cat_tools.routine_argument_mode LANGUAGE sql STRICT IMMUTABLE PARALLEL SAFE' + , $body$ +SELECT cat_tools.routine__argument_mode(proargmode::cat_tools.routine_proargmode) $body$ , 'cat_tools__usage' , 'Mapping from pg_proc.proargmodes element to cat_tools.routine_argument_mode' @@ -432,7 +487,7 @@ $body$ SELECT __cat_tools.create_function( 'cat_tools.routine__volatility' - , 'provolatile "char"' + , 'provolatile cat_tools.routine_provolatile' , 'cat_tools.routine_volatility LANGUAGE sql STRICT IMMUTABLE PARALLEL SAFE' , $body$ SELECT CASE provolatile @@ -440,6 +495,17 @@ SELECT CASE provolatile WHEN 's' THEN 'stable' WHEN 'v' THEN 'volatile' END::cat_tools.routine_volatility +$body$ + , 'cat_tools__usage' + , 'Mapping from cat_tools.routine_provolatile to cat_tools.routine_volatility' +); + +SELECT __cat_tools.create_function( + 'cat_tools.routine__volatility' + , 'provolatile "char"' + , 'cat_tools.routine_volatility LANGUAGE sql STRICT IMMUTABLE PARALLEL SAFE' + , $body$ +SELECT cat_tools.routine__volatility(provolatile::cat_tools.routine_provolatile) $body$ , 'cat_tools__usage' , 'Mapping from pg_proc.provolatile to cat_tools.routine_volatility' @@ -447,14 +513,25 @@ $body$ SELECT __cat_tools.create_function( 'cat_tools.routine__parallel_safety' - , 'proparallel "char"' + , 'proparallel cat_tools.routine_proparallel' , 'cat_tools.routine_parallel_safety LANGUAGE sql STRICT IMMUTABLE PARALLEL SAFE' , $body$ SELECT CASE proparallel - WHEN 'u' THEN 'unsafe' - WHEN 'r' THEN 'restricted' WHEN 's' THEN 'safe' + WHEN 'r' THEN 'restricted' + WHEN 'u' THEN 'unsafe' END::cat_tools.routine_parallel_safety +$body$ + , 'cat_tools__usage' + , 'Mapping from cat_tools.routine_proparallel to cat_tools.routine_parallel_safety' +); + +SELECT __cat_tools.create_function( + 'cat_tools.routine__parallel_safety' + , 'proparallel "char"' + , 'cat_tools.routine_parallel_safety LANGUAGE sql STRICT IMMUTABLE PARALLEL SAFE' + , $body$ +SELECT cat_tools.routine__parallel_safety(proparallel::cat_tools.routine_proparallel) $body$ , 'cat_tools__usage' , 'Mapping from pg_proc.proparallel to cat_tools.routine_parallel_safety' @@ -754,6 +831,7 @@ CREATE TYPE cat_tools.object_type AS ENUM( , 'access method' -- pg_am ); + @generated@ SELECT __cat_tools.create_function( diff --git a/test/expected/relation_type.out b/test/expected/relation_type.out index d7993ab..ad0e5e8 100644 --- a/test/expected/relation_type.out +++ b/test/expected/relation_type.out @@ -1,40 +1,26 @@ \set ECHO none -1..37 +1..23 ok 1 - Verify count from kinds ok 2 - Simple sanity check of relation__kind() ok 3 - Simple sanity check of relation__relkind() -ok 4 - Permission denied trying to use types -ok 5 - Permission denied trying to use types -ok 6 - Permission denied trying to run functions -ok 7 - Permission denied trying to run functions -ok 8 - SELECT cat_tools.relation_relkind('table') -ok 9 - SELECT cat_tools.relation_relkind('index') -ok 10 - SELECT cat_tools.relation_relkind('sequence') -ok 11 - SELECT cat_tools.relation_relkind('toast table') -ok 12 - SELECT cat_tools.relation_relkind('view') -ok 13 - SELECT cat_tools.relation_relkind('materialized view') -ok 14 - SELECT cat_tools.relation_relkind('composite type') -ok 15 - SELECT cat_tools.relation_relkind('foreign table') -ok 16 - SELECT cat_tools.relation_relkind('partitioned table') -ok 17 - SELECT cat_tools.relation_relkind('partitioned index') -ok 18 - SELECT cat_tools.relation_type('r') -ok 19 - SELECT cat_tools.relation_type('i') -ok 20 - SELECT cat_tools.relation_type('S') -ok 21 - SELECT cat_tools.relation_type('t') -ok 22 - SELECT cat_tools.relation_type('v') -ok 23 - SELECT cat_tools.relation_type('c') -ok 24 - SELECT cat_tools.relation_type('f') -ok 25 - SELECT cat_tools.relation_type('m') -ok 26 - SELECT cat_tools.relation_type('p') -ok 27 - SELECT cat_tools.relation_type('I') -ok 28 - SELECT cat_tools.relation_type('r') -ok 29 - SELECT cat_tools.relation_type('i') -ok 30 - SELECT cat_tools.relation_type('S') -ok 31 - SELECT cat_tools.relation_type('t') -ok 32 - SELECT cat_tools.relation_type('v') -ok 33 - SELECT cat_tools.relation_type('c') -ok 34 - SELECT cat_tools.relation_type('f') -ok 35 - SELECT cat_tools.relation_type('m') -ok 36 - SELECT cat_tools.relation_type('p') -ok 37 - SELECT cat_tools.relation_type('I') +ok 4 - SELECT cat_tools.relation_relkind('table') +ok 5 - SELECT cat_tools.relation_relkind('index') +ok 6 - SELECT cat_tools.relation_relkind('sequence') +ok 7 - SELECT cat_tools.relation_relkind('toast table') +ok 8 - SELECT cat_tools.relation_relkind('view') +ok 9 - SELECT cat_tools.relation_relkind('materialized view') +ok 10 - SELECT cat_tools.relation_relkind('composite type') +ok 11 - SELECT cat_tools.relation_relkind('foreign table') +ok 12 - SELECT cat_tools.relation_relkind('partitioned table') +ok 13 - SELECT cat_tools.relation_relkind('partitioned index') +ok 14 - SELECT cat_tools.relation_type('r') +ok 15 - SELECT cat_tools.relation_type('i') +ok 16 - SELECT cat_tools.relation_type('S') +ok 17 - SELECT cat_tools.relation_type('t') +ok 18 - SELECT cat_tools.relation_type('v') +ok 19 - SELECT cat_tools.relation_type('c') +ok 20 - SELECT cat_tools.relation_type('f') +ok 21 - SELECT cat_tools.relation_type('m') +ok 22 - SELECT cat_tools.relation_type('p') +ok 23 - SELECT cat_tools.relation_type('I') # TRANSACTION INTENTIONALLY LEFT OPEN! diff --git a/test/expected/routine_argument_mode.out b/test/expected/routine_argument_mode.out new file mode 100644 index 0000000..3864a75 --- /dev/null +++ b/test/expected/routine_argument_mode.out @@ -0,0 +1,21 @@ +\set ECHO none +1..18 +ok 1 - Verify count from argument_modes +ok 2 - Simple sanity check of routine__argument_mode() +ok 3 - Simple sanity check of routine__argument_mode() with enum +ok 4 - SELECT cat_tools.routine__argument_mode('i'::cat_tools.routine_proargmode) +ok 5 - SELECT cat_tools.routine__argument_mode('o'::cat_tools.routine_proargmode) +ok 6 - SELECT cat_tools.routine__argument_mode('b'::cat_tools.routine_proargmode) +ok 7 - SELECT cat_tools.routine__argument_mode('v'::cat_tools.routine_proargmode) +ok 8 - SELECT cat_tools.routine__argument_mode('t'::cat_tools.routine_proargmode) +ok 9 - SELECT cat_tools.routine__argument_mode('i'::"char") +ok 10 - SELECT cat_tools.routine__argument_mode('o'::"char") +ok 11 - SELECT cat_tools.routine__argument_mode('b'::"char") +ok 12 - SELECT cat_tools.routine__argument_mode('v'::"char") +ok 13 - SELECT cat_tools.routine__argument_mode('t'::"char") +ok 14 - SELECT cat_tools.routine__argument_mode('i') +ok 15 - SELECT cat_tools.routine__argument_mode('o') +ok 16 - SELECT cat_tools.routine__argument_mode('b') +ok 17 - SELECT cat_tools.routine__argument_mode('v') +ok 18 - SELECT cat_tools.routine__argument_mode('t') +# TRANSACTION INTENTIONALLY LEFT OPEN! diff --git a/test/expected/routine_parallel_safety.out b/test/expected/routine_parallel_safety.out new file mode 100644 index 0000000..76cf3cd --- /dev/null +++ b/test/expected/routine_parallel_safety.out @@ -0,0 +1,15 @@ +\set ECHO none +1..12 +ok 1 - Verify count from parallel_safeties +ok 2 - Simple sanity check of routine__parallel_safety() +ok 3 - Simple sanity check of routine__parallel_safety() with enum +ok 4 - SELECT cat_tools.routine__parallel_safety('s'::cat_tools.routine_proparallel) +ok 5 - SELECT cat_tools.routine__parallel_safety('r'::cat_tools.routine_proparallel) +ok 6 - SELECT cat_tools.routine__parallel_safety('u'::cat_tools.routine_proparallel) +ok 7 - SELECT cat_tools.routine__parallel_safety('s'::"char") +ok 8 - SELECT cat_tools.routine__parallel_safety('r'::"char") +ok 9 - SELECT cat_tools.routine__parallel_safety('u'::"char") +ok 10 - SELECT cat_tools.routine__parallel_safety('s') +ok 11 - SELECT cat_tools.routine__parallel_safety('r') +ok 12 - SELECT cat_tools.routine__parallel_safety('u') +# TRANSACTION INTENTIONALLY LEFT OPEN! diff --git a/test/expected/routine_type.out b/test/expected/routine_type.out new file mode 100644 index 0000000..daa74b9 --- /dev/null +++ b/test/expected/routine_type.out @@ -0,0 +1,18 @@ +\set ECHO none +1..15 +ok 1 - Verify count from routine_kinds +ok 2 - Simple sanity check of routine__type() +ok 3 - Simple sanity check of routine__type() with enum +ok 4 - SELECT cat_tools.routine__type('f'::cat_tools.routine_prokind) +ok 5 - SELECT cat_tools.routine__type('p'::cat_tools.routine_prokind) +ok 6 - SELECT cat_tools.routine__type('a'::cat_tools.routine_prokind) +ok 7 - SELECT cat_tools.routine__type('w'::cat_tools.routine_prokind) +ok 8 - SELECT cat_tools.routine__type('f'::"char") +ok 9 - SELECT cat_tools.routine__type('p'::"char") +ok 10 - SELECT cat_tools.routine__type('a'::"char") +ok 11 - SELECT cat_tools.routine__type('w'::"char") +ok 12 - SELECT cat_tools.routine__type('f') +ok 13 - SELECT cat_tools.routine__type('p') +ok 14 - SELECT cat_tools.routine__type('a') +ok 15 - SELECT cat_tools.routine__type('w') +# TRANSACTION INTENTIONALLY LEFT OPEN! diff --git a/test/expected/routine_volatility.out b/test/expected/routine_volatility.out new file mode 100644 index 0000000..133de5e --- /dev/null +++ b/test/expected/routine_volatility.out @@ -0,0 +1,15 @@ +\set ECHO none +1..12 +ok 1 - Verify count from volatilities +ok 2 - Simple sanity check of routine__volatility() +ok 3 - Simple sanity check of routine__volatility() with enum +ok 4 - SELECT cat_tools.routine__volatility('i'::cat_tools.routine_provolatile) +ok 5 - SELECT cat_tools.routine__volatility('s'::cat_tools.routine_provolatile) +ok 6 - SELECT cat_tools.routine__volatility('v'::cat_tools.routine_provolatile) +ok 7 - SELECT cat_tools.routine__volatility('i'::"char") +ok 8 - SELECT cat_tools.routine__volatility('s'::"char") +ok 9 - SELECT cat_tools.routine__volatility('v'::"char") +ok 10 - SELECT cat_tools.routine__volatility('i') +ok 11 - SELECT cat_tools.routine__volatility('s') +ok 12 - SELECT cat_tools.routine__volatility('v') +# TRANSACTION INTENTIONALLY LEFT OPEN! diff --git a/test/expected/type__permissions.out b/test/expected/type__permissions.out new file mode 100644 index 0000000..e4d3656 --- /dev/null +++ b/test/expected/type__permissions.out @@ -0,0 +1,39 @@ +\set ECHO none +1..36 +ok 1 - Permission denied trying to use type cat_tools.column +ok 2 - Permission denied trying to use type cat_tools.constraint_type +ok 3 - Permission denied trying to use type cat_tools.object_type +ok 4 - Permission denied trying to use type cat_tools.pg_all_foreign_keys +ok 5 - Permission denied trying to use type cat_tools.pg_class_v +ok 6 - Permission denied trying to use type cat_tools.pg_extension_v +ok 7 - Permission denied trying to use type cat_tools.procedure_type +ok 8 - Permission denied trying to use type cat_tools.relation_relkind +ok 9 - Permission denied trying to use type cat_tools.relation_type +ok 10 - Permission denied trying to use type cat_tools.routine_argument +ok 11 - Permission denied trying to use type cat_tools.routine_argument_mode +ok 12 - Permission denied trying to use type cat_tools.routine_parallel_safety +ok 13 - Permission denied trying to use type cat_tools.routine_proargmode +ok 14 - Permission denied trying to use type cat_tools.routine_prokind +ok 15 - Permission denied trying to use type cat_tools.routine_proparallel +ok 16 - Permission denied trying to use type cat_tools.routine_provolatile +ok 17 - Permission denied trying to use type cat_tools.routine_type +ok 18 - Permission denied trying to use type cat_tools.routine_volatility +ok 19 - Permission granted to use type cat_tools.column +ok 20 - Permission granted to use type cat_tools.constraint_type +ok 21 - Permission granted to use type cat_tools.object_type +ok 22 - Permission granted to use type cat_tools.pg_all_foreign_keys +ok 23 - Permission granted to use type cat_tools.pg_class_v +ok 24 - Permission granted to use type cat_tools.pg_extension_v +ok 25 - Permission granted to use type cat_tools.procedure_type +ok 26 - Permission granted to use type cat_tools.relation_relkind +ok 27 - Permission granted to use type cat_tools.relation_type +ok 28 - Permission granted to use type cat_tools.routine_argument +ok 29 - Permission granted to use type cat_tools.routine_argument_mode +ok 30 - Permission granted to use type cat_tools.routine_parallel_safety +ok 31 - Permission granted to use type cat_tools.routine_proargmode +ok 32 - Permission granted to use type cat_tools.routine_prokind +ok 33 - Permission granted to use type cat_tools.routine_proparallel +ok 34 - Permission granted to use type cat_tools.routine_provolatile +ok 35 - Permission granted to use type cat_tools.routine_type +ok 36 - Permission granted to use type cat_tools.routine_volatility +# TRANSACTION INTENTIONALLY LEFT OPEN! diff --git a/test/expected/zzz_build.out b/test/expected/zzz_build.out index f4a9ca5..eaef812 100644 --- a/test/expected/zzz_build.out +++ b/test/expected/zzz_build.out @@ -124,6 +124,14 @@ + + + + + + + + diff --git a/test/sql/relation_type.sql b/test/sql/relation_type.sql index fdfb546..2acf4dd 100644 --- a/test/sql/relation_type.sql +++ b/test/sql/relation_type.sql @@ -21,8 +21,7 @@ CREATE TEMP VIEW kinds AS SELECT plan( 1 + 2 -- Simple is() tests - + 4 -- no_use tests - + 3 * (SELECT count(*)::int FROM kinds) + + 2 * (SELECT count(*)::int FROM kinds) ); SELECT is( @@ -42,31 +41,6 @@ SELECT is( , 'Simple sanity check of relation__relkind()' ); -SET LOCAL ROLE :no_use_role; -SELECT throws_ok( - format( 'SELECT NULL::%I', typename ) - , '42704' -- undefined_object; not exactly correct, but close enough - , NULL - , 'Permission denied trying to use types' -) - FROM (VALUES - ('cat_tools.relation_relkind') - , ('cat_tools.relation_kind') - ) v(typename) -; -SELECT throws_ok( - format( 'SELECT cat_tools.relation__%s( NULL::%I )', suffix, argtype ) - , '42501' -- insufficient_privilege - , NULL - , 'Permission denied trying to run functions' -) - FROM (VALUES - ('kind', 'text') - , ('relkind', 'text') - ) v(suffix, argtype) -; - -SET LOCAL ROLE :use_role; SELECT is(cat_tools.relation__relkind(kind)::text, relkind, format('SELECT cat_tools.relation_relkind(%L)', kind)) FROM kinds @@ -76,10 +50,6 @@ SELECT is(cat_tools.relation__kind(relkind)::text, kind, format('SELECT cat_tool FROM kinds ; -SELECT is(cat_tools.relation__kind(relkind)::text, kind, format('SELECT cat_tools.relation_type(%L)', relkind)) - FROM kinds -; - \i test/pgxntool/finish.sql -- vi: expandtab ts=2 sw=2 diff --git a/test/sql/routine_argument_mode.sql b/test/sql/routine_argument_mode.sql new file mode 100644 index 0000000..e09bc4e --- /dev/null +++ b/test/sql/routine_argument_mode.sql @@ -0,0 +1,60 @@ +\set ECHO none + +\i test/setup.sql + +-- test_role is set in test/deps.sql + +SET LOCAL ROLE :use_role; +CREATE TEMP VIEW argument_modes AS + SELECT + (cat_tools.enum_range('cat_tools.routine_argument_mode'))[gs] AS argument_mode + , (cat_tools.enum_range('cat_tools.routine_proargmode'))[gs] AS proargmode + FROM generate_series( + 1 + , greatest( + array_upper(cat_tools.enum_range('cat_tools.routine_argument_mode'), 1) + , array_upper(cat_tools.enum_range('cat_tools.routine_proargmode'), 1) + ) + ) gs +; + +SELECT plan( + 1 + + 2 -- Simple is() tests + + 3 * (SELECT count(*)::int FROM argument_modes) +); + +SELECT is( + (SELECT count(*)::int FROM argument_modes) + , 5 + , 'Verify count from argument_modes' +); + +SELECT is( + cat_tools.routine__argument_mode('i') + , 'in' + , 'Simple sanity check of routine__argument_mode()' +); + +SELECT is( + cat_tools.routine__argument_mode('i'::cat_tools.routine_proargmode) + , 'in' + , 'Simple sanity check of routine__argument_mode() with enum' +); + + +SELECT is(cat_tools.routine__argument_mode(proargmode::cat_tools.routine_proargmode)::text, argument_mode, format('SELECT cat_tools.routine__argument_mode(%L::cat_tools.routine_proargmode)', proargmode)) + FROM argument_modes +; + +SELECT is(cat_tools.routine__argument_mode(proargmode::"char")::text, argument_mode, format('SELECT cat_tools.routine__argument_mode(%L::"char")', proargmode)) + FROM argument_modes +; + +SELECT is(cat_tools.routine__argument_mode(proargmode::"char")::text, argument_mode, format('SELECT cat_tools.routine__argument_mode(%L)', proargmode)) + FROM argument_modes +; + +\i test/pgxntool/finish.sql + +-- vi: expandtab ts=2 sw=2 \ No newline at end of file diff --git a/test/sql/routine_parallel_safety.sql b/test/sql/routine_parallel_safety.sql new file mode 100644 index 0000000..4b71481 --- /dev/null +++ b/test/sql/routine_parallel_safety.sql @@ -0,0 +1,60 @@ +\set ECHO none + +\i test/setup.sql + +-- test_role is set in test/deps.sql + +SET LOCAL ROLE :use_role; +CREATE TEMP VIEW parallel_safeties AS + SELECT + (cat_tools.enum_range('cat_tools.routine_parallel_safety'))[gs] AS parallel_safety + , (cat_tools.enum_range('cat_tools.routine_proparallel'))[gs] AS proparallel + FROM generate_series( + 1 + , greatest( + array_upper(cat_tools.enum_range('cat_tools.routine_parallel_safety'), 1) + , array_upper(cat_tools.enum_range('cat_tools.routine_proparallel'), 1) + ) + ) gs +; + +SELECT plan( + 1 + + 2 -- Simple is() tests + + 3 * (SELECT count(*)::int FROM parallel_safeties) +); + +SELECT is( + (SELECT count(*)::int FROM parallel_safeties) + , 3 + , 'Verify count from parallel_safeties' +); + +SELECT is( + cat_tools.routine__parallel_safety('s') + , 'safe' + , 'Simple sanity check of routine__parallel_safety()' +); + +SELECT is( + cat_tools.routine__parallel_safety('s'::cat_tools.routine_proparallel) + , 'safe' + , 'Simple sanity check of routine__parallel_safety() with enum' +); + + +SELECT is(cat_tools.routine__parallel_safety(proparallel::cat_tools.routine_proparallel)::text, parallel_safety, format('SELECT cat_tools.routine__parallel_safety(%L::cat_tools.routine_proparallel)', proparallel)) + FROM parallel_safeties +; + +SELECT is(cat_tools.routine__parallel_safety(proparallel::"char")::text, parallel_safety, format('SELECT cat_tools.routine__parallel_safety(%L::"char")', proparallel)) + FROM parallel_safeties +; + +SELECT is(cat_tools.routine__parallel_safety(proparallel::"char")::text, parallel_safety, format('SELECT cat_tools.routine__parallel_safety(%L)', proparallel)) + FROM parallel_safeties +; + +\i test/pgxntool/finish.sql + +-- vi: expandtab ts=2 sw=2 \ No newline at end of file diff --git a/test/sql/routine_type.sql b/test/sql/routine_type.sql new file mode 100644 index 0000000..0ac1cf3 --- /dev/null +++ b/test/sql/routine_type.sql @@ -0,0 +1,60 @@ +\set ECHO none + +\i test/setup.sql + +-- test_role is set in test/deps.sql + +SET LOCAL ROLE :use_role; +CREATE TEMP VIEW routine_kinds AS + SELECT + (cat_tools.enum_range('cat_tools.routine_type'))[gs] AS routine_type + , (cat_tools.enum_range('cat_tools.routine_prokind'))[gs] AS prokind + FROM generate_series( + 1 + , greatest( + array_upper(cat_tools.enum_range('cat_tools.routine_type'), 1) + , array_upper(cat_tools.enum_range('cat_tools.routine_prokind'), 1) + ) + ) gs +; + +SELECT plan( + 1 + + 2 -- Simple is() tests + + 3 * (SELECT count(*)::int FROM routine_kinds) +); + +SELECT is( + (SELECT count(*)::int FROM routine_kinds) + , 4 + , 'Verify count from routine_kinds' +); + +SELECT is( + cat_tools.routine__type('f') + , 'function' + , 'Simple sanity check of routine__type()' +); + +SELECT is( + cat_tools.routine__type('f'::cat_tools.routine_prokind) + , 'function' + , 'Simple sanity check of routine__type() with enum' +); + + +SELECT is(cat_tools.routine__type(prokind::cat_tools.routine_prokind)::text, routine_type, format('SELECT cat_tools.routine__type(%L::cat_tools.routine_prokind)', prokind)) + FROM routine_kinds +; + +SELECT is(cat_tools.routine__type(prokind::"char")::text, routine_type, format('SELECT cat_tools.routine__type(%L::"char")', prokind)) + FROM routine_kinds +; + +SELECT is(cat_tools.routine__type(prokind::"char")::text, routine_type, format('SELECT cat_tools.routine__type(%L)', prokind)) + FROM routine_kinds +; + +\i test/pgxntool/finish.sql + +-- vi: expandtab ts=2 sw=2 \ No newline at end of file diff --git a/test/sql/routine_volatility.sql b/test/sql/routine_volatility.sql new file mode 100644 index 0000000..2aa978e --- /dev/null +++ b/test/sql/routine_volatility.sql @@ -0,0 +1,60 @@ +\set ECHO none + +\i test/setup.sql + +-- test_role is set in test/deps.sql + +SET LOCAL ROLE :use_role; +CREATE TEMP VIEW volatilities AS + SELECT + (cat_tools.enum_range('cat_tools.routine_volatility'))[gs] AS volatility + , (cat_tools.enum_range('cat_tools.routine_provolatile'))[gs] AS provolatile + FROM generate_series( + 1 + , greatest( + array_upper(cat_tools.enum_range('cat_tools.routine_volatility'), 1) + , array_upper(cat_tools.enum_range('cat_tools.routine_provolatile'), 1) + ) + ) gs +; + +SELECT plan( + 1 + + 2 -- Simple is() tests + + 3 * (SELECT count(*)::int FROM volatilities) +); + +SELECT is( + (SELECT count(*)::int FROM volatilities) + , 3 + , 'Verify count from volatilities' +); + +SELECT is( + cat_tools.routine__volatility('i') + , 'immutable' + , 'Simple sanity check of routine__volatility()' +); + +SELECT is( + cat_tools.routine__volatility('i'::cat_tools.routine_provolatile) + , 'immutable' + , 'Simple sanity check of routine__volatility() with enum' +); + + +SELECT is(cat_tools.routine__volatility(provolatile::cat_tools.routine_provolatile)::text, volatility, format('SELECT cat_tools.routine__volatility(%L::cat_tools.routine_provolatile)', provolatile)) + FROM volatilities +; + +SELECT is(cat_tools.routine__volatility(provolatile::"char")::text, volatility, format('SELECT cat_tools.routine__volatility(%L::"char")', provolatile)) + FROM volatilities +; + +SELECT is(cat_tools.routine__volatility(provolatile::"char")::text, volatility, format('SELECT cat_tools.routine__volatility(%L)', provolatile)) + FROM volatilities +; + +\i test/pgxntool/finish.sql + +-- vi: expandtab ts=2 sw=2 \ No newline at end of file diff --git a/test/sql/type__permissions.sql b/test/sql/type__permissions.sql new file mode 100644 index 0000000..6a2e704 --- /dev/null +++ b/test/sql/type__permissions.sql @@ -0,0 +1,46 @@ +\set ECHO none + +\i test/setup.sql + +-- test_role is set in test/deps.sql + +-- Create temp view with all types for testing and grant access to both test roles +-- Note: Must create as superuser before switching roles +CREATE TEMP VIEW type_tests AS +SELECT 'cat_tools.' || typname AS type_name + FROM pg_type t + WHERE t.typnamespace = 'cat_tools'::regnamespace + AND t.typtype IN ('e', 'c') -- enums and composite types + ORDER BY typname +; + +-- Grant access to temp view for both roles +GRANT SELECT ON type_tests TO :use_role, :no_use_role; + +SELECT plan( + (SELECT count(*)::int FROM type_tests) * 2 -- test both failure and success +); + +SET LOCAL ROLE :no_use_role; + +-- Test type permissions should fail with no_use_role - expect 42501 (insufficient_privilege) +SELECT throws_ok( + format('SELECT NULL::%s', type_name) + , '42501' -- insufficient_privilege + , NULL + , format('Permission denied trying to use type %s', type_name) +) +FROM type_tests; + +SET LOCAL ROLE :use_role; + +-- Test type permissions should succeed with use_role +SELECT lives_ok( + format('SELECT NULL::%s', type_name) + , format('Permission granted to use type %s', type_name) +) +FROM type_tests; + +\i test/pgxntool/finish.sql + +-- vi: expandtab ts=2 sw=2 \ No newline at end of file From 7a5fe71893022359d04681d075f59cc61e47830e Mon Sep 17 00:00:00 2001 From: jnasbyupgrade Date: Tue, 11 Nov 2025 15:46:44 -0600 Subject: [PATCH 15/18] Add missing test file --- test/expected/permissions.out | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 test/expected/permissions.out diff --git a/test/expected/permissions.out b/test/expected/permissions.out new file mode 100644 index 0000000..f2f23df --- /dev/null +++ b/test/expected/permissions.out @@ -0,0 +1,23 @@ +\set ECHO none +1..20 +ok 1 - Permission denied trying to use type cat_tools.column +ok 2 - Permission denied trying to use type cat_tools.constraint_type +ok 3 - Permission denied trying to use type cat_tools.object_type +ok 4 - Permission denied trying to use type cat_tools.pg_all_foreign_keys +ok 5 - Permission denied trying to use type cat_tools.pg_class_v +ok 6 - Permission denied trying to use type cat_tools.pg_extension_v +ok 7 - Permission denied trying to use type cat_tools.procedure_type +ok 8 - Permission denied trying to use type cat_tools.relation_relkind +ok 9 - Permission denied trying to use type cat_tools.relation_type +ok 10 - Permission denied trying to use type cat_tools.routine_argument +ok 11 - Permission denied trying to use type cat_tools.routine_argument_mode +ok 12 - Permission denied trying to use type cat_tools.routine_parallel_safety +ok 13 - Permission denied trying to use type cat_tools.routine_proargmode +ok 14 - Permission denied trying to use type cat_tools.routine_prokind +ok 15 - Permission denied trying to use type cat_tools.routine_proparallel +ok 16 - Permission denied trying to use type cat_tools.routine_provolatile +ok 17 - Permission denied trying to use type cat_tools.routine_type +ok 18 - Permission denied trying to use type cat_tools.routine_volatility +ok 19 - Permission denied trying to run function relation__relkind +ok 20 - Permission denied trying to run function relation_type +# TRANSACTION INTENTIONALLY LEFT OPEN! From 21ec71b66bb07f69e53b5a55d59b1678fa3d9f7f Mon Sep 17 00:00:00 2001 From: jnasbyupgrade Date: Tue, 11 Nov 2025 15:47:09 -0600 Subject: [PATCH 16/18] Add automated test workflow --- .github/workflows/ci.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0946c32..c89c1b2 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -4,7 +4,7 @@ jobs: test: strategy: matrix: - pg: [17, 16, 15, 14, 13, 12, 11, 10, 9.6, 9.5, 9.4, 9.3, 9.2] + pg: [18, 17, 16, 15, 14, 13, 12, 11, 10] name: 🐘 PostgreSQL ${{ matrix.pg }} runs-on: ubuntu-latest container: pgxn/pgxn-tools @@ -14,4 +14,4 @@ jobs: - name: Check out the repo uses: actions/checkout@v4 - name: Test on PostgreSQL ${{ matrix.pg }} - run: pg-build-test + run: make test PGUSER=postgres From 5332bc5f139ed2574e1b62317772cee17cbcba6d Mon Sep 17 00:00:00 2001 From: jnasbyupgrade Date: Thu, 22 Jan 2026 17:04:31 -0600 Subject: [PATCH 17/18] Squashed 'pgxntool/' changes from bed3604..54793a3 54793a3 Merge branch 'master' into upstream/stable ab7f6e2 Stamp 1.0.0 3a571ba Add pg_tle support and modernize test infrastructure (#11) b96ea6d Add support for Claude code; build and doc improvements (#9) e9c24de Fix pg_regress on versions > 12 (#5) git-subtree-dir: pgxntool git-subtree-split: 54793a39251290657767816d23b45d6297f3a671 --- .claude/commands/commit.md | 78 ++++ .claude/settings.json | 19 + .gitattributes | 3 + .gitignore | 1 + CLAUDE.md | 255 +++++++++++ HISTORY.asc | 13 +- LICENSE | 2 +- README.asc | 225 ++++++++- README.html | 912 ++++++++++++++++++++++++++++--------- _.gitignore | 10 +- base.mk | 137 +++++- build_meta.sh | 30 +- control.mk.sh | 90 ++++ lib.sh | 40 ++ make_results.sh | 28 ++ meta.mk.sh | 96 ++-- pgtle.sh | 849 ++++++++++++++++++++++++++++++++++ pgtle_versions.md | 47 ++ setup.sh | 4 + 19 files changed, 2519 insertions(+), 320 deletions(-) create mode 100644 .claude/commands/commit.md create mode 100644 .claude/settings.json create mode 100644 CLAUDE.md create mode 100755 control.mk.sh create mode 100644 lib.sh create mode 100755 make_results.sh create mode 100755 pgtle.sh create mode 100644 pgtle_versions.md diff --git a/.claude/commands/commit.md b/.claude/commands/commit.md new file mode 100644 index 0000000..5ddbd74 --- /dev/null +++ b/.claude/commands/commit.md @@ -0,0 +1,78 @@ +--- +description: Create a git commit following project standards and safety protocols +allowed-tools: Bash(git status:*), Bash(git log:*), Bash(git add:*), Bash(git diff:*), Bash(git commit:*), Bash(make test:*) +--- + +# commit + +Create a git commit following all project standards and safety protocols for pgxntool-test. + +**CRITICAL REQUIREMENTS:** + +1. **Git Safety**: Never update `git config`, never force push to `main`/`master`, never skip hooks unless explicitly requested + +2. **Commit Attribution**: Do NOT add "Generated with Claude Code" to commit message body. The standard Co-Authored-By trailer is acceptable per project CLAUDE.md. + +3. **Testing**: ALL tests must pass before committing: + - Run `make test` + - Check the output carefully for any "not ok" lines + - Count passing vs total tests + - **If ANY tests fail: STOP. Do NOT commit. Ask the user what to do.** + - There is NO such thing as an "acceptable" failing test + - Do NOT rationalize failures as "pre-existing" or "unrelated" + +**WORKFLOW:** + +1. Run in parallel: `git status`, `git diff --stat`, `git log -10 --oneline` + +2. Check test status - THIS IS MANDATORY: + - Run `make test 2>&1 | tee /tmp/test-output.txt` + - Check for failing tests: `grep "^not ok" /tmp/test-output.txt` + - If ANY tests fail: STOP immediately and inform the user + - Only proceed if ALL tests pass + +3. Analyze changes and draft concise commit message following this repo's style: + - Look at `git log -10 --oneline` to match existing style + - Be factual and direct (e.g., "Fix BATS dist test to create its own distribution") + - Focus on "why" when it adds value, otherwise just describe "what" + - List items in roughly decreasing order of impact + - Keep related items grouped together + - **In commit messages**: Wrap all code references in backticks - filenames, paths, commands, function names, variables, make targets, etc. + - Examples: `helpers.bash`, `make test-recursion`, `setup_sequential_test()`, `TEST_REPO`, `.envs/`, `01-meta.bats` + - Prevents markdown parsing issues and improves clarity + +4. **PRESENT the proposed commit message to the user and WAIT for approval before proceeding** + +5. After receiving approval, stage changes appropriately using `git add` + +6. **VERIFY staged files with `git status`**: + - If user did NOT specify a subset: Confirm ALL modified/untracked files are staged + - If user specified only certain files: Confirm ONLY those files are staged + - STOP and ask user if staging doesn't match intent + +7. After verification, commit using `HEREDOC` format: +```bash +git commit -m "$(cat <<'EOF' +Subject line (imperative mood, < 72 chars) + +Additional context if needed, wrapped at 72 characters. + +Co-Authored-By: Claude +EOF +)" +``` + +8. Run `git status` after commit to verify success + +9. If pre-commit hook modifies files: Check authorship (`git log -1 --format='%an %ae'`) and branch status, then amend if safe or create new commit + +**REPOSITORY CONTEXT:** + +This is pgxntool-test, a test harness for the pgxntool framework. Key facts: +- Tests live in `tests/` directory +- `.envs/` contains test environments (gitignored) + +**RESTRICTIONS:** +- DO NOT push unless explicitly asked +- DO NOT commit files with actual secrets (`.env`, `credentials.json`, etc.) +- Never use `-i` flags (`git commit -i`, `git rebase -i`, etc.) diff --git a/.claude/settings.json b/.claude/settings.json new file mode 100644 index 0000000..e7bf5a9 --- /dev/null +++ b/.claude/settings.json @@ -0,0 +1,19 @@ +{ + "permissions": { + "allow": [ + "Bash(cat:*)", + "Bash(make test:*)", + "Bash(tee:*)", + "Bash(echo:*)", + "Bash(git show:*)", + "Bash(git log:*)", + "Bash(ls:*)", + "Bash(find:*)", + "Bash(git checkout:*)", + "Bash(head:*)" + ], + "additionalDirectories": [ + "../pgxntool-test/" + ] + } +} diff --git a/.gitattributes b/.gitattributes index c602ea0..a94d824 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,4 +1,7 @@ .gitattributes export-ignore +.claude/ export-ignore +*.md export-ignore +.DS_Store export-ignore *.asc export-ignore *.adoc export-ignore *.html export-ignore diff --git a/.gitignore b/.gitignore index a01ee28..5ffb236 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,2 @@ .*.swp +.claude/*.local.json diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 0000000..d2ea214 --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,255 @@ +# CLAUDE.md + +This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. + +## Git Commit Guidelines + +**IMPORTANT**: When creating commit messages, do not attribute commits to yourself (Claude). Commit messages should reflect the work being done without AI attribution in the message body. The standard Co-Authored-By trailer is acceptable. + +## Critical: What This Repo Actually Is + +**pgxntool is NOT a standalone project.** It is a meta-framework that exists ONLY to be embedded into PostgreSQL extension projects via `git subtree`. This repo cannot be built, tested, or run directly. + +**Think of it like this**: pgxntool is to PostgreSQL extensions what a Makefile template library is to C projects - it's infrastructure code that gets copied into other projects, not a project itself. + +## Critical: Directory Purity - NO Temporary Files + +**This directory contains ONLY files that get embedded into extension projects.** When extension developers run `git subtree add`, they pull the entire pgxntool directory into their project. + +**ABSOLUTE RULE**: NO temporary files, scratch work, or development tools may be added to this directory. + +**Examples of what NEVER belongs here:** +- Temporary files (scratch notes, test output, debugging artifacts) +- Development scripts or tools (these go in pgxntool-test/) +- Planning documents (PLAN-*.md files go in pgxntool-test/) +- Any file you wouldn't want in every extension project that uses pgxntool + +**CLAUDE.md exception**: CLAUDE.md exists here for AI assistant guidance, but is excluded from distributions via `.gitattributes export-ignore`. Same with `.claude/` directory. + +**Why this matters**: Any file you add here will be pulled into hundreds of extension projects via git subtree. Keep this directory lean and clean. + +## Development Workflow: Work from pgxntool-test + +**CRITICAL**: All development work on pgxntool should be done from the pgxntool-test repository, NOT from this repository. + +**For complete development workflow documentation, see:** +https://github.com/Postgres-Extensions/pgxntool-test + +## Two-Repository Development Pattern + +This codebase uses a two-repository pattern: + +1. **pgxntool/** (this repo) - The framework code that gets embedded into extension projects +2. **pgxntool-test** - The test harness that validates pgxntool functionality + +**For development and testing workflow, see:** +https://github.com/Postgres-Extensions/pgxntool-test + +## How Extension Developers Use pgxntool + +Extension projects include pgxntool via git subtree: + +```bash +git subtree add -P pgxntool --squash git@github.com:decibel/pgxntool.git release +pgxntool/setup.sh +``` + +After setup, their Makefile typically contains just: +```makefile +include pgxntool/base.mk +``` + +## Architecture: Two-Phase Build System + +### Phase 1: Meta Generation (`build_meta.sh`) +- Processes `META.in.json` (template with placeholders/empty values) +- Strips out X_comment fields and empty values +- Produces clean `META.json` + +### Phase 2: Variable Extraction (`meta.mk.sh`) +- Parses `META.json` using `JSON.sh` (a bash-based JSON parser) +- Generates `meta.mk` with Make variables: + - `PGXN` - distribution name + - `PGXNVERSION` - version number + - `EXTENSIONS` - list of extensions provided + - `EXTENSION_*_VERSION` - per-extension versions + - `EXTENSION_VERSION_FILES` - auto-generated versioned SQL files +- `base.mk` includes `meta.mk` via `-include` + +### The Magic of base.mk + +`base.mk` provides a complete PGXS-based build system: +- Auto-detects extension SQL files in `sql/` +- Auto-detects C modules in `src/*.c` +- Auto-detects tests in `test/sql/*.sql` +- Auto-generates versioned extension files (`extension--version.sql`) +- Handles Asciidoc → HTML conversion +- Integrates with PGXN distribution format +- Manages git tagging and release packaging + +## File Structure for Consumer Projects + +Projects using pgxntool follow this layout: +``` +project/ +├── Makefile # include pgxntool/base.mk +├── META.in.json # Template metadata (customize for your extension) +├── META.json # Auto-generated from META.in.json +├── extension.control # Standard PostgreSQL control file +├── pgxntool/ # This repo, embedded via git subtree +├── sql/ +│ └── extension.sql # Base extension SQL +├── src/ # Optional C code (*.c files) +├── test/ +│ ├── deps.sql # Load extension and test dependencies +│ ├── sql/*.sql # Test SQL files +│ └── expected/*.out # Expected test outputs +└── doc/ # Optional docs (*.adoc, *.asciidoc) +``` + +## Commands for Extension Developers (End Users) + +These are the commands extension developers use (documented for context): + +```bash +make # Build extension (generates versioned SQL, docs) +make test # Full test: testdeps → install → installcheck → show diffs +make results # Run tests and update expected output files +make html # Generate HTML from Asciidoc sources +make tag # Create git branch for current META.json version +make dist # Create PGXN .zip (auto-tags, places in ../) +make pgtle # Generate pg_tle registration SQL (see pg_tle Support below) +make check-pgtle # Check pg_tle installation and report version +make install-pgtle # Install pg_tle registration SQL files into database +make pgxntool-sync # Update to latest pgxntool via git subtree pull +``` + +## Testing with pgxntool + +### Critical Testing Rules + +**NEVER use `make installcheck` directly**. Always use `make test` instead. The `make test` target ensures: +- Clean builds before testing +- Proper test isolation +- Correct test dependency installation +- Proper cleanup and result comparison + +**Database Connection Requirement**: PostgreSQL must be running before executing `make test`. If you get connection errors (e.g., "could not connect to server"), stop and ask the user to start PostgreSQL. + +**Claude Code MUST NEVER run `make results`**. This target updates test expected output files and requires manual human verification of test changes before execution. + +**Claude Code MUST NEVER modify files in `test/expected/`**. These are expected test outputs that define correct behavior and must only be updated through the `make results` workflow. + +The workflow is: +1. Human runs `make test` and examines diffs +2. Human manually verifies changes are correct +3. Human manually runs `make results` to update expected files + +### Test Output Mechanics + +pgxntool uses PostgreSQL's pg_regress test framework: +- **Actual test output**: Written to `test/results/` directory +- **Expected output**: Stored in `test/expected/` directory +- **Test comparison**: pg_regress compares actual vs expected and generates diffs; `make test` displays them +- **Updating expectations**: `make results` copies `test/results/` → `test/expected/` + +When tests fail, examine the diff output carefully. The actual test output in `test/results/` shows what your code produced, while `test/expected/` shows what was expected. + +## Key Implementation Details + +### PostgreSQL Version Handling +- `MAJORVER` = version × 10 (e.g., 9.6 → 96, 13 → 130) +- Tests use `--load-language=plpgsql` for versions < 13 +- Version detection via `pg_config --version` + +### Test System (pg_regress based) +- Tests in `test/sql/*.sql`, outputs compared to `test/expected/*.out` +- Setup via `test/pgxntool/setup.sql` (loads pgTap and deps.sql) +- `.IGNORE: installcheck` allows `make test` to handle errors (show diffs, then exit with error status) +- `make results` updates expected outputs after test runs + +### Document Generation +- Auto-detects `asciidoctor` or `asciidoc` +- Generates HTML from `*.adoc` and `*.asciidoc` in `$(DOC_DIRS)` +- HTML required for `make dist`, optional for `make install` +- Template-based rules via `ASCIIDOC_template` + +### Distribution Packaging +- `make dist` creates `../PGXN-VERSION.zip` +- Always creates git branch tag matching version +- Uses `git archive` to package +- Validates repo is clean before tagging + +### Subtree Sync Support +- `make pgxntool-sync` pulls latest release +- Multiple sync targets: release, stable, local variants +- Uses `git subtree pull --squash` +- Requires clean repo (no uncommitted changes) + +### pg_tle Support + +pgxntool can generate pg_tle (Trusted Language Extensions) registration SQL for deploying extensions in AWS RDS/Aurora without filesystem access. + +**Usage:** `make pgtle` or `make pgtle PGTLE_VERSION=1.5.0+` + +**Output:** `pg_tle/{version_range}/{extension}.sql` + +**For version range details and API compatibility boundaries, see:** `pgtle_versions.md` + +**Installation targets:** + +- `make check-pgtle` - Checks if pg_tle is installed and reports the version. Reports version from `pg_extension` if extension has been created, or newest available version from `pg_available_extension_versions` if available but not created. Errors if pg_tle not available in cluster. Assumes `PG*` environment variables are configured. + +- `make install-pgtle` - Auto-detects pg_tle version and installs appropriate registration SQL files. Updates or creates pg_tle extension as needed. Determines which version range files to install based on detected version. Runs all generated SQL files via `psql` to register extensions with pg_tle. Assumes `PG*` environment variables are configured. + +**Version notation:** +- `X.Y.Z+` means >= X.Y.Z +- `X.Y.Z-A.B.C` means >= X.Y.Z and < A.B.C (note boundary) + +**Key implementation details:** +- Script: `pgxntool/pgtle-wrap.sh` (bash) +- Parses `.control` files for metadata (NOT META.json) +- Fixed delimiter: `$_pgtle_wrap_delimiter_$` (validated not in source) +- Each output file contains ALL versions and ALL upgrade paths +- Multi-extension support (multiple .control files) +- Output directory `pg_tle/` excluded from git +- Depends on `make all` to ensure versioned SQL files exist first +- Only processes versioned files (`sql/{ext}--{version}.sql`), not base files + +**SQL file handling:** +- **Version files** (`sql/{ext}--{version}.sql`): Generated automatically by `make all` from base `sql/{ext}.sql` file +- **Upgrade scripts** (`sql/{ext}--{v1}--{v2}.sql`): Created manually by users when adding new extension versions +- The script ensures the default_version file exists if the base file exists (creates it from base file if missing) +- All version files and upgrade scripts are discovered and included in the generated pg_tle registration SQL + +**Dependencies:** +Generated files depend on: +- Control file (metadata source) +- All SQL files (sql/{ext}--*.sql) - must run `make all` first +- Generator script itself + +**Limitations:** +- No C code support (pg_tle requires trusted languages only) +- PostgreSQL 14.5+ required (pg_tle not available on earlier versions) + +## Critical Gotchas + +1. **Empty Variables**: If `DOCS` or `MODULES` is empty, base.mk sets to empty to prevent PGXS errors +2. **testdeps Pattern**: Never add recipes to `testdeps` - create separate target and make it a prerequisite +3. **META.json is Generated**: Always edit `META.in.json`, never `META.json` directly +4. **Control File Versions**: No automatic validation that `.control` matches `META.json` version +5. **PGXNTOOL_NO_PGXS_INCLUDE**: Setting this skips PGXS inclusion (for special scenarios) +6. **Distribution Placement**: `.zip` files go in parent directory (`../`) to avoid repo clutter + +## Scripts + +- **setup.sh** - Initializes pgxntool in a new extension project (copies templates, creates directories) +- **build_meta.sh** - Strips empty fields from META.in.json to create META.json +- **meta.mk.sh** - Parses META.json via JSON.sh and generates meta.mk with Make variables +- **JSON.sh** - Third-party bash JSON parser (MIT licensed) +- **safesed** - Utility for safe sed operations + +## Related Repositories + +- **pgxntool-test** - Test harness for validating pgxntool functionality: https://github.com/Postgres-Extensions/pgxntool-test +- Never produce any kind of metrics or estimates unless you have data to back them up. If you do have data you MUST reference it. \ No newline at end of file diff --git a/HISTORY.asc b/HISTORY.asc index 9cb793b..bedc0b5 100644 --- a/HISTORY.asc +++ b/HISTORY.asc @@ -1,5 +1,14 @@ -STABLE ------- +1.0.0 +----- +== Fix broken multi-extension support +Prior to this fix, distributions with multiple extensions or extensions with versions different from the PGXN distribution version were completely broken. Extension versions are now correctly read from each `.control` file's `default_version` instead of using META.json's distribution version. + +== Add pg_tle support +New `make pgtle` target generates pg_tle registration SQL for extensions. Supports pg_tle version ranges (1.0.0-1.4.0, 1.4.0-1.5.0, 1.5.0+) with appropriate API calls for each range. See README for usage. + +== Use git tags for distribution versioning +The `tag` and `rmtag` targets now create/delete git tags instead of branches. + == Support 13+ The `--load-language` option was removed from `pg_regress` in 13. diff --git a/LICENSE b/LICENSE index 5a20925..4a507f7 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2015, Jim Nasby, Blue Treble Solutions +Copyright (c) 2015-2026, Jim Nasby, Blue Treble Solutions All rights reserved. Redistribution and use in source and binary forms, with or without diff --git a/README.asc b/README.asc index c2c6683..bf3559d 100644 --- a/README.asc +++ b/README.asc @@ -23,6 +23,10 @@ pgxntool/setup.sh TODO: Create a nice script that will init a new project for you. +== Development + +If you want to contribute to pgxntool development, work from the https://github.com/decibel/pgxntool-test[pgxntool-test] repository, not from this repository. That repository contains the test infrastructure and development tools needed to validate changes to pgxntool. This repository contains only the framework files that get embedded into extension projects via `git subtree`. + == Usage Typically, you can just create a simple Makefile that does nothing but include base.mk: @@ -41,6 +45,8 @@ This will build any .html files that can be created. See <<_Document_Handling>>. === test Runs unit tests via the PGXS `installcheck` target. Unlike a simple `make installcheck` though, the `test` rule has the following prerequisites: clean testdeps install installcheck. All of those are PGXS rules, except for `testdeps`. +NOTE: While you can still run `make installcheck` or any other valid PGXS make target directly, it's recommended to use `make test` when using pgxntool. The `test` target ensures clean builds, proper test isolation, and correct dependency installation. + === testdeps This rule allows you to ensure certain actions have taken place before running tests. By default it has a single prerequisite, `pgtap`, which will attempt to install http://pgtap.org[pgtap] from PGXN. This depneds on having the pgxn client installed. @@ -60,10 +66,18 @@ If you want to over-ride the default dependency on `pgtap` you should be able to WARNING: It will probably cause problems if you try to create a `testdeps` rule that has a recipe. Instead of doing that, put the recipe in a separate rule and make that rule a prerequisite of `testdeps` as show in the example. === results -Because `make test` ultimately runs `installcheck`, it's using the Postgres test suite. Unfortunately, that suite is based on running `diff` between a raw output file and expected results. I *STRONGLY* recommend you use http://pgtap.org[pgTap] instead! The extra effort of learning pgTap will quickly pay for itself. https://github.com/decibel/trunklet-format/blob/master/test/sql/base.sql[This example] might help get you started. +Because `make test` ultimately runs `installcheck`, it's using the Postgres test suite. Unfortunately, that suite is based on running `diff` between a raw output file and expected results. I *STRONGLY* recommend you use http://pgtap.org[pgTap] instead! With pgTap, it's MUCH easier to determine whether a test is passing or not - tests explicitly pass or fail rather than requiring you to examine diff output. The extra effort of learning pgTap will quickly pay for itself. https://github.com/decibel/trunklet-format/blob/master/test/sql/base.sql[This example] might help get you started. No matter what method you use, once you know that all your tests are passing correctly, you need to create or update the test output expected files. `make results` does that for you. +IMPORTANT: *`make results` requires manual verification first*. The correct workflow is: + +1. Run `make test` and examine the diff output +2. Manually verify that the differences are correct and expected +3. Only then run `make results` to update the expected output files in `test/expected/` + +Never run `make results` without first verifying the test changes are correct. The `results` target copies files from `test/results/` to `test/expected/`, so running it blindly will make incorrect output become the new expected behavior. + === tag `make tag` will create a git branch for the current version of your extension, as determined by the META.json file. The reason to do this is so you can always refer to the exact code that went into a released version. @@ -83,6 +97,109 @@ NOTE: Your repository must be clean (no modified files) in order to run this. Ru TIP: There is also a `pgxntool-sync-%` rule if you need to do more advanced things. +=== pgtle +Generates pg_tle (Trusted Language Extensions) registration SQL files for deploying extensions in managed environments like AWS RDS/Aurora. See <<_pg_tle_Support>> for complete documentation. + +`make pgtle` generates SQL files in `pg_tle/` subdirectories organized by pg_tle version ranges. For version range details, see `pgtle_versions.md`. + +=== check-pgtle +Checks if pg_tle is installed and reports the version. This target: +- Reports the version from `pg_extension` if `CREATE EXTENSION pg_tle` has been run in the database +- Errors if pg_tle is not available in the cluster + +This target assumes `PG*` environment variables are configured for `psql` connectivity. + +---- +make check-pgtle +---- + +=== run-pgtle +Registers all extensions with pg_tle by executing the generated pg_tle registration SQL files in a PostgreSQL database. This target: +- Requires pg_tle extension to be installed (checked via `check-pgtle`) +- Uses `pgtle.sh` to determine which version range directory to use based on the installed pg_tle version +- Runs all generated SQL files via `psql` to register your extensions with pg_tle + +This target assumes that running `psql` without any arguments will connect to the desired database. You can control this by setting the various PG* environment variables (and possibly using the `.pgpassword` file). See the PostgreSQL documentation for more details. + +NOTE: The `pgtle` target is a dependency, so `make run-pgtle` will automatically generate the SQL files if needed. + +---- +make run-pgtle +---- + +After running `make run-pgtle`, you can create your extension in the database: +---- +CREATE EXTENSION "your-extension-name"; +---- + +== Version-Specific SQL Files + +PGXNtool automatically generates version-specific SQL files from your base SQL file. These files follow the pattern `sql/{extension}--{version}.sql` and are used by PostgreSQL's extension system to install specific versions of your extension. + +=== How Version Files Are Generated + +When you run `make` (or `make all`), PGXNtool: + +1. Reads your `META.json` file to determine the extension version from `provides.{extension}.version` +2. Generates a Makefile rule that copies your base SQL file (`sql/{extension}.sql`) to the version-specific file (`sql/{extension}--{version}.sql`) +3. Executes this rule, creating the version-specific file with a header comment indicating it's auto-generated + +For example, if your `META.json` contains: +---- +"provides": { + "myext": { + "version": "1.2.3", + ... + } +} +---- + +Running `make` will create `sql/myext--1.2.3.sql` by copying `sql/myext.sql`. + +=== What Controls the Version Number + +The version number comes from `META.json` → `provides.{extension}.version`, *not* from your `.control` file's `default_version` field. The `.control` file's `default_version` is used by PostgreSQL to determine which version to install by default, but the actual version-specific file that gets generated is determined by what's in `META.json`. + +To change the version of your extension: +1. Update `provides.{extension}.version` in `META.json` +2. Run `make` to regenerate the version-specific file +3. Update `default_version` in your `.control` file to match (if needed) + +=== Committing Version Files + +Version-specific SQL files are now treated as permanent files that should be committed to your repository. This makes it much easier to test updates to extensions, as you can see exactly what SQL was included in each version. + +IMPORTANT: These files are auto-generated and include a header comment warning not to edit them. Any manual changes will be overwritten the next time you run `make`. To modify the extension, edit the base SQL file (`sql/{extension}.sql`) instead. + +=== Alternative: Ignoring Version Files + +If you prefer not to commit version-specific SQL files, you must add them to your `.gitignore` to prevent `make dist` from failing due to untracked files. Add the following to your `.gitignore`: + +---- +# Auto-generated version-specific SQL files (if not committing them) +sql/*--*.sql +!sql/*--*--*.sql +---- + +The second line (`!sql/*--*--*.sql`) ensures that upgrade scripts (which contain two version numbers and should be manually written) are still tracked. + +WARNING: If you ignore version files instead of committing them, they will NOT be included in your PGXN distribution (`make dist` uses `git archive`, which only includes tracked files). This means users installing your extension from PGXN will need `make` and PGXS available to build the extension - they cannot simply copy the SQL files into their PostgreSQL installation. For maximum compatibility, we recommend committing version files. + +=== Distribution Inclusion + +Version-specific files are included in distributions created by `make dist` only if they are committed to git. Since `make dist` uses `git archive`, only tracked files are included in the distribution archive. + +=== Multiple Versions + +If you need to support multiple versions of your extension: + +1. Create additional version-specific files manually (e.g., `sql/myext--1.0.0.sql`, `sql/myext--1.1.0.sql`) +2. Create upgrade scripts for version transitions (e.g., `sql/myext--1.0.0--1.1.0.sql`) +3. Update `META.json` to reflect the current version you're working on +4. Commit all version files and upgrade scripts to your repository + +The version file for the current version (specified in `META.json`) will be automatically regenerated when you run `make`, but other version files you create manually will be preserved. + == Document Handling PGXNtool supports generation and installation of document files. There are several variables and rules that control this behavior. @@ -158,7 +275,111 @@ Because of this, `base.mk` will forcibly define it to be NULL if it's empty. PGXNtool appends *all* files found in all `$(DOC_DIRS)` to `DOCS`. +== pg_tle Support +[[_pg_tle_Support]] +pgxntool can generate link:https://github.com/aws/pg_tle[pg_tle (Trusted Language Extensions)] registration SQL for deploying PostgreSQL extensions in managed environments like AWS RDS and Aurora where filesystem access is not available. + +For make targets, see: <<_pgtle>>, <<_check_pgtle>>, <<_run_pgtle>>. + +=== What is pg_tle? + +pg_tle is an AWS open-source framework that enables developers to create and deploy PostgreSQL extensions without filesystem access. Traditional PostgreSQL extensions require `.control` and `.sql` files on the filesystem, which isn't possible in managed services like RDS and Aurora. + +pg_tle solves this by: +- Storing extension metadata and SQL in database tables +- Using the `pgtle_admin` role for administrative operations +- Enabling `CREATE EXTENSION` to work in managed environments + +=== Quick Start + +Generate pg_tle registration SQL for your extension: + +---- +make pgtle +---- + +This creates files in `pg_tle/` subdirectories organized by pg_tle version ranges. See `pgtle_versions.md` for complete version range details and API compatibility boundaries. + +=== Version Groupings + +pgxntool creates different sets of files for different pg_tle versions to handle backward-incompatible API changes. Each version boundary represents a change to pg_tle's API functions that we use. + +For details on version boundaries and API changes, see `pgtle_versions.md`. + +=== Installation Example + +IMPORTANT: This is only a basic example. Always refer to the link:https://github.com/aws/pg_tle[main pg_tle documentation] for complete installation instructions and best practices. + +Basic installation steps: + +. Ensure pg_tle is installed and grant the `pgtle_admin` role to your user +. Generate and run the pg_tle registration SQL files: ++ +---- +make run-pgtle +---- ++ +This automatically detects your pg_tle version and runs the appropriate SQL files. See `pgtle_versions.md` for version range details. +. Create your extension: `CREATE EXTENSION myextension;` + +=== Advanced Usage + +==== Multi-Extension Projects + +If your project has multiple extensions (multiple `.control` files), `make pgtle` generates files for all of them: + +---- +myproject/ +├── ext1.control +├── ext2.control +└── pg_tle/ + ├── 1.0.0-1.5.0/ + │ ├── ext1.sql + │ └── ext2.sql + └── 1.5.0+/ + ├── ext1.sql + └── ext2.sql +---- + +=== How It Works +`make pgtle` does the following: + +. Parses control file(s): Extracts `comment`, `default_version`, `requires`, and `schema` fields +. Discovers SQL files: Finds all versioned files (`sql/{ext}--{version}.sql`) and upgrade scripts (`sql/{ext}--{ver1}--{ver2}.sql`) +. Wraps SQL content: Uses a fixed dollar-quote delimiter (`$_pgtle_wrap_delimiter_$`) to wrap SQL for pg_tle functions +. Generates registration SQL: Creates `pgtle.install_extension()` calls for each version, `pgtle.install_update_path()` for upgrades, and `pgtle.set_default_version()` for the default +. Version-specific output: Generates separate files for different pg_tle capability levels + +Each generated SQL file is wrapped in a transaction (`BEGIN;` ... `COMMIT;`) to ensure atomic installation. + +=== Troubleshooting + +==== "No versioned SQL files found" + +*Problem*: The script can't find `sql/{ext}--{version}.sql` files. + +*Solution*: Run `make` first to generate versioned files from your base `sql/{ext}.sql` file. + +==== "Control file not found" + +*Problem*: The script can't find `{ext}.control` in the current directory. + +*Solution*: Run `make pgtle` from your extension's root directory (where the `.control` file is). + +==== "SQL file contains reserved pg_tle delimiter" + +*Problem*: Your SQL files contain the string `$_pgtle_wrap_delimiter_$` (extremely unlikely). + +*Solution*: Don't use that dollar-quote delimiter in your code. + +==== Extension uses C code + +*Problem*: Your control file has `module_pathname`, indicating C code. + +*Solution*: pg_tle only supports trusted languages. You cannot use C extensions with pg_tle. The script will warn you but still generate files (which won't work). + +NOTE: there are several untrusted languages (such as plpython), and the only tests for C. == Copyright -Copyright (c) 2015 Jim Nasby +Copyright (c) 2026 Jim Nasby PGXNtool is released under a https://github.com/decibel/pgxntool/blob/master/LICENCE[BSD license]. Note that it includes https://github.com/dominictarr/JSON.sh[JSON.sh], which is released under a https://github.com/decibel/pgxntool/blob/master/JSON.sh.LICENCE[MIT license]. diff --git a/README.html b/README.html index ae4a597..41200aa 100644 --- a/README.html +++ b/README.html @@ -2,31 +2,26 @@ - + - + PGXNtool @@ -428,26 +445,50 @@

PGXNtool

Table of Contents
@@ -466,7 +507,7 @@

PGXNtool

-

1. Install

+

1. Install

This assumes that you’ve already initialized your extension in git.

@@ -495,7 +536,15 @@

1. Install

-

2. Usage

+

2. Development

+
+
+

If you want to contribute to pgxntool development, work from the pgxntool-test repository, not from this repository. That repository contains the test infrastructure and development tools needed to validate changes to pgxntool. This repository contains only the framework files that get embedded into extension projects via git subtree.

+
+
+
+
+

3. Usage

Typically, you can just create a simple Makefile that does nothing but include base.mk:

@@ -508,7 +557,7 @@

2. Usage

-

3. make targets

+

4. make targets

These are the make targets that are provided by base.mk

@@ -526,19 +575,31 @@

3. make targe

-

3.1. html

+

4.1. html

This will build any .html files that can be created. See [_Document_Handling].

-

3.2. test

+

4.2. test

Runs unit tests via the PGXS installcheck target. Unlike a simple make installcheck though, the test rule has the following prerequisites: clean testdeps install installcheck. All of those are PGXS rules, except for testdeps.

+
+ + + + + +
+
Note
+
+While you can still run make installcheck or any other valid PGXS make target directly, it’s recommended to use make test when using pgxntool. The test target ensures clean builds, proper test isolation, and correct dependency installation. +
+
-

3.3. testdeps

+

4.3. testdeps

This rule allows you to ensure certain actions have taken place before running tests. By default it has a single prerequisite, pgtap, which will attempt to install pgtap from PGXN. This depneds on having the pgxn client installed.

@@ -574,16 +635,44 @@

3.3. testdeps

-

3.4. results

+

4.4. results

-

Because make test ultimately runs installcheck, it’s using the Postgres test suite. Unfortunately, that suite is based on running diff between a raw output file and expected results. I STRONGLY recommend you use pgTap instead! The extra effort of learning pgTap will quickly pay for itself. This example might help get you started.

+

Because make test ultimately runs installcheck, it’s using the Postgres test suite. Unfortunately, that suite is based on running diff between a raw output file and expected results. I STRONGLY recommend you use pgTap instead! With pgTap, it’s MUCH easier to determine whether a test is passing or not - tests explicitly pass or fail rather than requiring you to examine diff output. The extra effort of learning pgTap will quickly pay for itself. This example might help get you started.

No matter what method you use, once you know that all your tests are passing correctly, you need to create or update the test output expected files. make results does that for you.

+
+ + + + + +
+
Important
+
+make results requires manual verification first. The correct workflow is: +
+
+
+
    +
  1. +

    Run make test and examine the diff output

    +
  2. +
  3. +

    Manually verify that the differences are correct and expected

    +
  4. +
  5. +

    Only then run make results to update the expected output files in test/expected/

    +
  6. +
+
+
+

Never run make results without first verifying the test changes are correct. The results target copies files from test/results/ to test/expected/, so running it blindly will make incorrect output become the new expected behavior.

+
-

3.5. tag

+

4.5. tag

make tag will create a git branch for the current version of your extension, as determined by the META.json file. The reason to do this is so you can always refer to the exact code that went into a released version.

@@ -604,7 +693,7 @@

3.5. tag

-

3.6. dist

+

4.6. dist

make dist will create a .zip file for your current version that you can upload to PGXN. The file is named after the PGXN name and version (the top-level "name" and "version" attributes in META.json). The .zip file is placed in the parent directory so as not to clutter up your git repo.

@@ -622,7 +711,7 @@

3.6. dist

-

3.7. pgxntool-sync

+

4.7. pgxntool-sync

This rule will pull down the latest released version of PGXNtool via git subtree pull.

@@ -651,10 +740,204 @@

3.7. pgxnto

+
+

4.8. pgtle

+
+

Generates pg_tle (Trusted Language Extensions) registration SQL files for deploying extensions in managed environments like AWS RDS/Aurora. See [_pg_tle_Support] for complete documentation.

+
+
+

make pgtle generates SQL files in pg_tle/ subdirectories organized by pg_tle version ranges. For version range details, see pgtle_versions.md.

+
+
+
+

4.9. check-pgtle

+
+

Checks if pg_tle is installed and reports the version. This target: +- Reports the version from pg_extension if CREATE EXTENSION pg_tle has been run in the database +- Errors if pg_tle is not available in the cluster

+
+
+

This target assumes PG* environment variables are configured for psql connectivity.

+
+
+
+
make check-pgtle
+
+
+
+
+

4.10. run-pgtle

+
+

Registers all extensions with pg_tle by executing the generated pg_tle registration SQL files in a PostgreSQL database. This target: +- Requires pg_tle extension to be installed (checked via check-pgtle) +- Uses pgtle.sh to determine which version range directory to use based on the installed pg_tle version +- Runs all generated SQL files via psql to register your extensions with pg_tle

+
+
+

This target assumes that running psql without any arguments will connect to the desired database. You can control this by setting the various PG* environment variables (and possibly using the .pgpassword file). See the PostgreSQL documentation for more details.

+
+
+ + + + + +
+
Note
+
+The pgtle target is a dependency, so make run-pgtle will automatically generate the SQL files if needed. +
+
+
+
+
make run-pgtle
+
+
+
+

After running make run-pgtle, you can create your extension in the database:

+
+
+
+
CREATE EXTENSION "your-extension-name";
+
+
+
-

4. Document Handling

+

5. Version-Specific SQL Files

+
+
+

PGXNtool automatically generates version-specific SQL files from your base SQL file. These files follow the pattern sql/{extension}--{version}.sql and are used by PostgreSQL’s extension system to install specific versions of your extension.

+
+
+

5.1. How Version Files Are Generated

+
+

When you run make (or make all), PGXNtool:

+
+
+
    +
  1. +

    Reads your META.json file to determine the extension version from provides.{extension}.version

    +
  2. +
  3. +

    Generates a Makefile rule that copies your base SQL file (sql/{extension}.sql) to the version-specific file (sql/{extension}--{version}.sql)

    +
  4. +
  5. +

    Executes this rule, creating the version-specific file with a header comment indicating it’s auto-generated

    +
  6. +
+
+
+

For example, if your META.json contains:

+
+
+
+
"provides": {
+  "myext": {
+    "version": "1.2.3",
+    ...
+  }
+}
+
+
+
+

Running make will create sql/myext—​1.2.3.sql by copying sql/myext.sql.

+
+
+
+

5.2. What Controls the Version Number

+
+

The version number comes from META.jsonprovides.{extension}.version, not from your .control file’s default_version field. The .control file’s default_version is used by PostgreSQL to determine which version to install by default, but the actual version-specific file that gets generated is determined by what’s in META.json.

+
+
+

To change the version of your extension: +1. Update provides.{extension}.version in META.json +2. Run make to regenerate the version-specific file +3. Update default_version in your .control file to match (if needed)

+
+
+
+

5.3. Committing Version Files

+
+

Version-specific SQL files are now treated as permanent files that should be committed to your repository. This makes it much easier to test updates to extensions, as you can see exactly what SQL was included in each version.

+
+
+ + + + + +
+
Important
+
+These files are auto-generated and include a header comment warning not to edit them. Any manual changes will be overwritten the next time you run make. To modify the extension, edit the base SQL file (sql/{extension}.sql) instead. +
+
+
+
+

5.4. Alternative: Ignoring Version Files

+
+

If you prefer not to commit version-specific SQL files, you must add them to your .gitignore to prevent make dist from failing due to untracked files. Add the following to your .gitignore:

+
+
+
+
# Auto-generated version-specific SQL files (if not committing them)
+sql/*--*.sql
+!sql/*--*--*.sql
+
+
+
+

The second line (!sql/----*.sql) ensures that upgrade scripts (which contain two version numbers and should be manually written) are still tracked.

+
+
+ + + + + +
+
Warning
+
+If you ignore version files instead of committing them, they will NOT be included in your PGXN distribution (make dist uses git archive, which only includes tracked files). This means users installing your extension from PGXN will need make and PGXS available to build the extension - they cannot simply copy the SQL files into their PostgreSQL installation. For maximum compatibility, we recommend committing version files. +
+
+
+
+

5.5. Distribution Inclusion

+
+

Version-specific files are included in distributions created by make dist only if they are committed to git. Since make dist uses git archive, only tracked files are included in the distribution archive.

+
+
+
+

5.6. Multiple Versions

+
+

If you need to support multiple versions of your extension:

+
+
+
    +
  1. +

    Create additional version-specific files manually (e.g., sql/myext—​1.0.0.sql, sql/myext—​1.1.0.sql)

    +
  2. +
  3. +

    Create upgrade scripts for version transitions (e.g., sql/myext—​1.0.0—​1.1.0.sql)

    +
  4. +
  5. +

    Update META.json to reflect the current version you’re working on

    +
  6. +
  7. +

    Commit all version files and upgrade scripts to your repository

    +
  8. +
+
+
+

The version file for the current version (specified in META.json) will be automatically regenerated when you run make, but other version files you create manually will be preserved.

+
+
+
+
+
+

6. Document Handling

PGXNtool supports generation and installation of document files. There are several variables and rules that control this behavior.

@@ -665,7 +948,7 @@

4. If any generated files are missing (or out-of-date) during installation, PGXNtool will build them if Asciidoc is present on the system.

-

4.1. Document Variables

+

6.1. Document Variables

DOC_DIRS
@@ -712,7 +995,7 @@

4

-

4.2. Document Rules

+

6.2. Document Rules

If Asciidoc is found (or $(ASCIIDOC) is set), the html rule will be added as a prerequisite to the install and installchec rules. That will ensure that docs are generated for install and test, but only if Asciidoc is available. @@ -730,7 +1013,7 @@

4.2. Docu
ASCIIDOC_template
define ASCIIDOC_template
-%.html: %.$(1) (1)
+%.html: %.$(1) # (1)
 ifndef ASCIIDOC
 	$$(warning Could not find "asciidoc" or "asciidoctor". Add one of them to your PATH,)
 	$$(warning or set ASCIIDOC to the correct location.)
@@ -754,7 +1037,7 @@ 

4.2. Docu

-

4.3. The DOCS variable

+

6.3. The DOCS variable

This variable has special meaning to PGXS. See the Postgres documentation for full details.

@@ -782,10 +1065,189 @@

4.3

- +

7. pg_tle Support

+
+

pgxntool can generate pg_tle (Trusted Language Extensions) registration SQL for deploying PostgreSQL extensions in managed environments like AWS RDS and Aurora where filesystem access is not available.

+
-

Copyright (c) 2015 Jim Nasby <Jim.Nasby@BlueTreble.com>

+

For make targets, see: pgtle, check-pgtle, run-pgtle.

+
+
+

7.1. What is pg_tle?

+
+

pg_tle is an AWS open-source framework that enables developers to create and deploy PostgreSQL extensions without filesystem access. Traditional PostgreSQL extensions require .control and .sql files on the filesystem, which isn’t possible in managed services like RDS and Aurora.

+
+
+

pg_tle solves this by: +- Storing extension metadata and SQL in database tables +- Using the pgtle_admin role for administrative operations +- Enabling CREATE EXTENSION to work in managed environments

+
+
+
+

7.2. Quick Start

+
+

Generate pg_tle registration SQL for your extension:

+
+
+
+
make pgtle
+
+
+
+

This creates files in pg_tle/ subdirectories organized by pg_tle version ranges. See pgtle_versions.md for complete version range details and API compatibility boundaries.

+
+
+
+

7.3. Version Groupings

+
+

pgxntool creates different sets of files for different pg_tle versions to handle backward-incompatible API changes. Each version boundary represents a change to pg_tle’s API functions that we use.

+
+
+

For details on version boundaries and API changes, see pgtle_versions.md.

+
+
+
+

7.4. Installation Example

+
+ + + + + +
+
Important
+
+This is only a basic example. Always refer to the main pg_tle documentation for complete installation instructions and best practices. +
+
+
+

Basic installation steps:

+
+
+
    +
  1. +

    Ensure pg_tle is installed and grant the pgtle_admin role to your user

    +
  2. +
  3. +

    Generate and run the pg_tle registration SQL files:

    +
    +
    +
    make run-pgtle
    +
    +
    +
    +

    This automatically detects your pg_tle version and runs the appropriate SQL files. See pgtle_versions.md for version range details.

    +
    +
  4. +
  5. +

    Create your extension: CREATE EXTENSION myextension;

    +
  6. +
+
+
+
+

7.5. Advanced Usage

+
+

7.5.1. Multi-Extension Projects

+
+

If your project has multiple extensions (multiple .control files), make pgtle generates files for all of them:

+
+
+
+
myproject/
+├── ext1.control
+├── ext2.control
+└── pg_tle/
+    ├── 1.0.0-1.5.0/
+    │   ├── ext1.sql
+    │   └── ext2.sql
+    └── 1.5.0+/
+        ├── ext1.sql
+        └── ext2.sql
+
+
+
+
+
+

7.6. How It Works

+
+

make pgtle does the following:

+
+
+
    +
  1. +

    Parses control file(s): Extracts comment, default_version, requires, and schema fields

    +
  2. +
  3. +

    Discovers SQL files: Finds all versioned files (sql/{ext}--{version}.sql) and upgrade scripts (sql/{ext}--{ver1}--{ver2}.sql)

    +
  4. +
  5. +

    Wraps SQL content: Uses a fixed dollar-quote delimiter ($pgtle_wrap_delimiter$) to wrap SQL for pg_tle functions

    +
  6. +
  7. +

    Generates registration SQL: Creates pgtle.install_extension() calls for each version, pgtle.install_update_path() for upgrades, and pgtle.set_default_version() for the default

    +
  8. +
  9. +

    Version-specific output: Generates separate files for different pg_tle capability levels

    +
  10. +
+
+
+

Each generated SQL file is wrapped in a transaction (BEGIN; …​ COMMIT;) to ensure atomic installation.

+
+
+
+

7.7. Troubleshooting

+
+

7.7.1. "No versioned SQL files found"

+
+

Problem: The script can’t find sql/{ext}--{version}.sql files.

+
+
+

Solution: Run make first to generate versioned files from your base sql/{ext}.sql file.

+
+
+
+

7.7.2. "Control file not found"

+
+

Problem: The script can’t find {ext}.control in the current directory.

+
+
+

Solution: Run make pgtle from your extension’s root directory (where the .control file is).

+
+
+
+

7.7.3. "SQL file contains reserved pg_tle delimiter"

+
+

Problem: Your SQL files contain the string $pgtle_wrap_delimiter$ (extremely unlikely).

+
+
+

Solution: Don’t use that dollar-quote delimiter in your code.

+
+
+
+

7.7.4. Extension uses C code

+
+

Problem: Your control file has module_pathname, indicating C code.

+
+
+

Solution: pg_tle only supports trusted languages. You cannot use C extensions with pg_tle. The script will warn you but still generate files (which won’t work).

+
+
+ + + + + +
+
Note
+
+there are several untrusted languages (such as plpython), and the only tests for C. +== Copyright +Copyright (c) 2026 Jim Nasby <Jim.Nasby@gmail.com> +

PGXNtool is released under a BSD license. Note that it includes JSON.sh, which is released under a MIT license.

@@ -793,9 +1255,11 @@
+
+
diff --git a/_.gitignore b/_.gitignore index 3eb345a..0c14928 100644 --- a/_.gitignore +++ b/_.gitignore @@ -1,11 +1,15 @@ # Editor files .*.swp +# Claude Code local settings +.claude/*.local.json + # Explicitly exclude META.json! !/META.json # Generated make files meta.mk +control.mk # Compiler output *.o @@ -13,8 +17,7 @@ meta.mk .deps/ # built targets -/sql/*--* -!/sql/*--*--*.sql +# Note: Version-specific files (sql/*--*.sql) are now tracked in git and should be committed # Test artifacts results/ @@ -24,3 +27,6 @@ regression.out # Misc tmp/ .DS_Store + +# pg_tle generated files +/pg_tle/ diff --git a/base.mk b/base.mk index a976ebb..b03a5cc 100644 --- a/base.mk +++ b/base.mk @@ -1,5 +1,8 @@ PGXNTOOL_DIR := pgxntool +# Ensure 'all' is the default target (not META.json which happens to be first) +.DEFAULT_GOAL := all + # # META.json # @@ -10,13 +13,30 @@ META.json: META.in.json $(PGXNTOOL_DIR)/build_meta.sh # # meta.mk # -# Buind meta.mk, which contains info from META.json, and include it +# Build meta.mk, which contains PGXN distribution info from META.json PGXNTOOL_distclean += meta.mk meta.mk: META.json Makefile $(PGXNTOOL_DIR)/base.mk $(PGXNTOOL_DIR)/meta.mk.sh @$(PGXNTOOL_DIR)/meta.mk.sh $< >$@ -include meta.mk +# +# control.mk +# +# Build control.mk, which contains extension info from .control files +# This is separate from meta.mk because: +# - META.json specifies PGXN distribution metadata +# - .control files specify what PostgreSQL actually uses (e.g., default_version) +# These can differ, and PostgreSQL cares about the control file version. +# +# Find all control files first (needed for dependencies) +PGXNTOOL_CONTROL_FILES := $(wildcard *.control) +PGXNTOOL_distclean += control.mk +control.mk: $(PGXNTOOL_CONTROL_FILES) Makefile $(PGXNTOOL_DIR)/base.mk $(PGXNTOOL_DIR)/control.mk.sh + @$(PGXNTOOL_DIR)/control.mk.sh $(PGXNTOOL_CONTROL_FILES) >$@ + +-include control.mk + DATA = $(EXTENSION_VERSION_FILES) $(wildcard sql/*--*--*.sql) DOC_DIRS += doc # NOTE: if this is empty it gets forcibly defined to NUL before including PGXS @@ -30,11 +50,18 @@ ASCIIDOC_FILES += $(foreach dir,$(DOC_DIRS),$(foreach ext,$(ASCIIDOC_EXTS),$(wil PG_CONFIG ?= pg_config TESTDIR ?= test TESTOUT ?= $(TESTDIR) -TEST_SOURCE_FILES += $(wildcard $(TESTDIR)/input/*.source) -TEST_OUT_FILES = $(subst input,output,$(TEST_SOURCE_FILES)) +# .source files are OPTIONAL - see "pg_regress workflow" comment below for details +TEST__SOURCE__INPUT_FILES += $(wildcard $(TESTDIR)/input/*.source) +TEST__SOURCE__OUTPUT_FILES += $(wildcard $(TESTDIR)/output/*.source) +TEST__SOURCE__INPUT_AS_OUTPUT = $(subst input,output,$(TEST__SOURCE__INPUT_FILES)) TEST_SQL_FILES += $(wildcard $(TESTDIR)/sql/*.sql) TEST_RESULT_FILES = $(patsubst $(TESTDIR)/sql/%.sql,$(TESTDIR)/expected/%.out,$(TEST_SQL_FILES)) -TEST_FILES = $(TEST_SOURCE_FILES) $(TEST_SQL_FILES) +TEST_FILES = $(TEST__SOURCE__INPUT_FILES) $(TEST_SQL_FILES) +# Ephemeral files generated from source files (should be cleaned) +# input/*.source → sql/*.sql (converted by pg_regress) +TEST__SOURCE__SQL_FILES = $(patsubst $(TESTDIR)/input/%.source,$(TESTDIR)/sql/%.sql,$(TEST__SOURCE__INPUT_FILES)) +# output/*.source → expected/*.out (converted by pg_regress) +TEST__SOURCE__EXPECTED_FILES = $(patsubst $(TESTDIR)/output/%.source,$(TESTDIR)/expected/%.out,$(TEST__SOURCE__OUTPUT_FILES)) REGRESS = $(sort $(notdir $(subst .source,,$(TEST_FILES:.sql=)))) # Sort is to get unique list REGRESS_OPTS = --inputdir=$(TESTDIR) --outputdir=$(TESTOUT) # See additional setup below MODULES = $(patsubst %.c,%,$(wildcard src/*.c)) @@ -42,7 +69,7 @@ ifeq ($(strip $(MODULES)),) MODULES =# Set to NUL so PGXS doesn't puke endif -EXTRA_CLEAN = $(wildcard ../$(PGXN)-*.zip) $(EXTENSION_VERSION_FILES) +EXTRA_CLEAN = $(wildcard ../$(PGXN)-*.zip) $(TEST__SOURCE__SQL_FILES) $(TEST__SOURCE__EXPECTED_FILES) pg_tle/ # Get Postgres version, as well as major (9.4, etc) version. # NOTE! In at least some versions, PGXS defines VERSION, so we intentionally don't use that variable @@ -70,7 +97,7 @@ DATA += $(wildcard *.control) # Don't have installcheck bomb on error .IGNORE: installcheck -installcheck: $(TEST_RESULT_FILES) $(TEST_OUT_FILES) $(TEST_SQL_FILES) $(TEST_SOURCE_FILES) +installcheck: $(TEST_RESULT_FILES) $(TEST_SQL_FILES) $(TEST__SOURCE__INPUT_FILES) | $(TESTDIR)/sql/ $(TESTDIR)/expected/ $(TESTOUT)/results/ # # TEST SUPPORT @@ -89,25 +116,81 @@ test: testdeps install installcheck # make results: runs `make test` and copy all result files to expected # DO NOT RUN THIS UNLESS YOU'RE CERTAIN ALL YOUR TESTS ARE PASSING! +# +# pg_regress workflow: +# 1. Converts input/*.source → sql/*.sql (with token substitution) +# 2. Converts output/*.source → expected/*.out (with token substitution) +# 3. Runs tests, saving actual output in results/ +# 4. Compares results/ with expected/ +# +# NOTE: Both input/*.source and output/*.source are COMPLETELY OPTIONAL and are +# very rarely needed. pg_regress does NOT create the input/ or output/ directories +# - these are optional INPUT directories that users create if they need them. +# Most extensions will never need these directories. +# +# CRITICAL: Do NOT copy files that have corresponding output/*.source files, because +# those are the source of truth and will be regenerated by pg_regress from the .source files. +# Only copy files from results/ that don't have output/*.source counterparts. .PHONY: results results: test - rsync -rlpgovP $(TESTOUT)/results/ $(TESTDIR)/expected + @# Copy .out files from results/ to expected/, excluding those with output/*.source counterparts + @# .out files with output/*.source counterparts are generated from .source files and should NOT be overwritten + @$(PGXNTOOL_DIR)/make_results.sh $(TESTDIR) $(TESTOUT) # testdeps is a generic dependency target that you can add targets to .PHONY: testdeps testdeps: pgtap +# +# pg_tle support - Generate pg_tle registration SQL +# + +# PGXNTOOL_CONTROL_FILES is defined above (for control.mk dependencies) +PGXNTOOL_EXTENSIONS = $(basename $(PGXNTOOL_CONTROL_FILES)) + +# Main target +# Depend on 'all' to ensure versioned SQL files are generated first +# Depend on control.mk (which defines EXTENSION_VERSION_FILES) +# Depend on control files explicitly so changes trigger rebuilds +# Generates all supported pg_tle versions for each extension +.PHONY: pgtle +pgtle: all control.mk $(PGXNTOOL_CONTROL_FILES) + @$(foreach ext,$(PGXNTOOL_EXTENSIONS),\ + $(PGXNTOOL_DIR)/pgtle.sh --extension $(ext);) + +# +# pg_tle installation support +# + +# Check if pg_tle is installed and report version +# Only reports version if CREATE EXTENSION pg_tle has been run +# Errors if pg_tle extension is not installed +# Uses pgtle.sh to get version (avoids code duplication) +.PHONY: check-pgtle +check-pgtle: + @echo "Checking pg_tle installation..." + @PGTLE_VERSION=$$($(PGXNTOOL_DIR)/pgtle.sh --get-version 2>/dev/null); \ + if [ -n "$$PGTLE_VERSION" ]; then \ + echo "pg_tle extension version: $$PGTLE_VERSION"; \ + exit 0; \ + fi; \ + echo "ERROR: pg_tle extension is not installed" >&2; \ + echo " Run 'CREATE EXTENSION pg_tle;' first" >&2; \ + exit 1 + +# Run pg_tle registration SQL files +# Requires pg_tle extension to be installed (checked via check-pgtle) +# Uses pgtle.sh to determine which version range directory to use +# Assumes PG* environment variables are configured +.PHONY: run-pgtle +run-pgtle: pgtle + @$(PGXNTOOL_DIR)/pgtle.sh --run + # These targets ensure all the relevant directories exist -$(TESTDIR)/sql: - @mkdir -p $@ -$(TESTDIR)/expected/: +$(TESTDIR)/sql $(TESTDIR)/expected/ $(TESTOUT)/results/: @mkdir -p $@ $(TEST_RESULT_FILES): | $(TESTDIR)/expected/ @touch $@ -$(TESTDIR)/output/: - @mkdir -p $@ -$(TEST_OUT_FILES): | $(TESTDIR)/output/ $(TESTDIR)/expected/ $(TESTDIR)/sql/ - @touch $@ # @@ -155,14 +238,23 @@ docclean: # rmtag: git fetch origin # Update our remotes - @test -z "$$(git branch --list $(PGXNVERSION))" || git branch -d $(PGXNVERSION) - @test -z "$$(git branch --list -r origin/$(PGXNVERSION))" || git push --delete origin $(PGXNVERSION) + @test -z "$$(git tag --list $(PGXNVERSION))" || git tag -d $(PGXNVERSION) + @test -z "$$(git ls-remote --tags origin $(PGXNVERSION) | grep -v '{}')" || git push --delete origin $(PGXNVERSION) -# TODO: Don't puke if tag already exists *and is the same* tag: @test -z "$$(git status --porcelain)" || (echo 'Untracked changes!'; echo; git status; exit 1) - git branch $(PGXNVERSION) - git push --set-upstream origin $(PGXNVERSION) + @# Skip if tag already exists and points to HEAD + @if git rev-parse $(PGXNVERSION) >/dev/null 2>&1; then \ + if [ "$$(git rev-parse $(PGXNVERSION))" = "$$(git rev-parse HEAD)" ]; then \ + echo "Tag $(PGXNVERSION) already exists at HEAD, skipping"; \ + else \ + echo "ERROR: Tag $(PGXNVERSION) exists but points to different commit" >&2; \ + exit 1; \ + fi; \ + else \ + git tag $(PGXNVERSION); \ + fi + git push origin $(PGXNVERSION) .PHONY: forcetag forcetag: rmtag tag @@ -171,6 +263,13 @@ forcetag: rmtag tag dist: tag dist-only dist-only: + @# Check if .gitattributes exists but isn't committed + @if [ -f .gitattributes ] && ! git ls-files --error-unmatch .gitattributes >/dev/null 2>&1; then \ + echo "ERROR: .gitattributes exists but is not committed to git." >&2; \ + echo " git archive only respects export-ignore for committed files." >&2; \ + echo " Please commit .gitattributes for export-ignore to take effect." >&2; \ + exit 1; \ + fi git archive --prefix=$(PGXN)-$(PGXNVERSION)/ -o ../$(PGXN)-$(PGXNVERSION).zip $(PGXNVERSION) .PHONY: forcedist diff --git a/build_meta.sh b/build_meta.sh index 70d2273..e0fd6b2 100755 --- a/build_meta.sh +++ b/build_meta.sh @@ -1,16 +1,28 @@ #!/bin/bash +# Build META.json from META.in.json template +# +# WHY META.in.json EXISTS: +# META.in.json serves as a template that: +# 1. Shows all possible PGXN metadata fields (both required and optional) with comments +# 2. Can have empty placeholder fields like "key": "" or "key": [ "", "" ] +# 3. Users edit this to fill in their extension's metadata +# +# WHY WE GENERATE META.json: +# The reason we generate META.json from a template is to eliminate empty fields that +# are optional; PGXN.org gets upset about them. In the future it's possible we'll do +# more here (for example, if we added more info to the template we could use it to +# generate control files). +# +# WHY WE COMMIT META.json: +# PGXN.org requires META.json to be present in submitted distributions. We choose +# to commit it to git instead of manually adding it to distributions for simplicity +# (and since it generally only changes once for each new version). + set -e -error () { - echo $@ >&2 -} -die () { - return=$1 - shift - error $@ - exit $return -} +BASEDIR=$(dirname "$0") +source "$BASEDIR/lib.sh" [ $# -eq 2 ] || die 2 Invalid number of arguments $# diff --git a/control.mk.sh b/control.mk.sh new file mode 100755 index 0000000..cc63cea --- /dev/null +++ b/control.mk.sh @@ -0,0 +1,90 @@ +#!/usr/bin/env bash +# +# control.mk.sh - Generate Makefile rules from PostgreSQL extension control files +# +# This script parses .control files to extract extension metadata (particularly +# default_version) and generates Make variables and rules for building versioned +# SQL files. +# +# Usage: control.mk.sh [ ...] +# +# Output (to stdout, meant to be redirected to control.mk): +# EXTENSIONS += +# EXTENSION_SQL_FILES += sql/.sql +# EXTENSION__VERSION := +# EXTENSION__VERSION_FILE = sql/--.sql +# EXTENSION_VERSION_FILES += $(EXTENSION__VERSION_FILE) +# +# +# Why control files instead of META.json? +# META.json's "provides" section specifies versions for PGXN distribution metadata. +# But PostgreSQL uses the control file's default_version to determine which +# versioned SQL file to load. These can differ (e.g., PGXN distribution version +# might be updated independently of extension version). Using the control file +# ensures the generated SQL files match what PostgreSQL expects. + +set -o errexit -o errtrace -o pipefail + +BASEDIR=$(dirname "$0") +source "$BASEDIR/lib.sh" + +# Extract default_version from a PostgreSQL extension control file +# Usage: get_control_default_version +# Errors if: +# - Control file doesn't exist +# - default_version is not specified (pgxntool requires it) +# - Multiple default_version lines exist +get_control_default_version() { + local control_file="$1" + + if [ ! -f "$control_file" ]; then + die 2 "Control file '$control_file' not found" + fi + + # Count default_version lines + local count + count=$(grep -cE "^[[:space:]]*default_version[[:space:]]*=" "$control_file" 2>/dev/null) || count=0 + + if [ "$count" -eq 0 ]; then + die 2 "default_version not specified in '$control_file'. PostgreSQL allows extensions without a default_version, but pgxntool requires it to generate versioned SQL files." + fi + + if [ "$count" -gt 1 ]; then + die 2 "Multiple default_version lines found in '$control_file'. Control files must have exactly one default_version." + fi + + # Extract the version value + # Handles: default_version = '1.0', default_version = "1.0", trailing comments + local version=$(grep -E "^[[:space:]]*default_version[[:space:]]*=" "$control_file" | \ + sed -e "s/^[^=]*=[[:space:]]*//" \ + -e "s/[[:space:]]*#.*//" \ + -e "s/^['\"]//;s/['\"]$//" ) + + if [ -z "$version" ]; then + die 2 "Could not parse default_version value from '$control_file'" + fi + + echo "$version" +} + +# Main: process each control file passed as argument +if [ $# -eq 0 ]; then + die 1 "Usage: control.mk.sh [ ...]" +fi + +for control_file in "$@"; do + ext=$(basename "$control_file" .control) + version=$(get_control_default_version "$control_file") + + echo "EXTENSIONS += $ext" + echo "EXTENSION_SQL_FILES += sql/${ext}.sql" + echo "EXTENSION_${ext}_VERSION := ${version}" + echo "EXTENSION_${ext}_VERSION_FILE = sql/${ext}--\$(EXTENSION_${ext}_VERSION).sql" + echo "EXTENSION_VERSION_FILES += \$(EXTENSION_${ext}_VERSION_FILE)" + echo "\$(EXTENSION_${ext}_VERSION_FILE): sql/${ext}.sql ${control_file}" + echo " @echo '/* DO NOT EDIT - AUTO-GENERATED FILE */' > \$(EXTENSION_${ext}_VERSION_FILE)" + echo " @cat sql/${ext}.sql >> \$(EXTENSION_${ext}_VERSION_FILE)" + echo +done + +# vi: expandtab ts=2 sw=2 diff --git a/lib.sh b/lib.sh new file mode 100644 index 0000000..c3eb88e --- /dev/null +++ b/lib.sh @@ -0,0 +1,40 @@ +#!/bin/bash +# lib.sh - Common utility functions for pgxntool scripts +# +# This file is meant to be sourced by other scripts, not executed directly. +# Usage: source "$(dirname "${BASH_SOURCE[0]}")/lib.sh" + +# Error function - outputs to stderr but doesn't exit +# Usage: error "message" +error() { + echo "ERROR: $*" >&2 +} + +# Die function - outputs error message and exits with specified code +# Usage: die EXIT_CODE "message" +die() { + local exit_code=$1 + shift + error "$@" + exit $exit_code +} + +# Debug function +# Usage: debug LEVEL "message" +# Outputs message to stderr if DEBUG >= LEVEL +# Debug levels use multiples of 10 (10, 20, 30, 40, etc.) to allow for easy expansion +# - 10: Critical errors, important warnings +# - 20: Warnings, significant state changes +# - 30: General debugging, function entry/exit, array operations +# - 40: Verbose details, loop iterations +# - 50+: Maximum verbosity +# Enable with: DEBUG=30 scriptname.sh +debug() { + local level=$1 + shift + local message="$*" + + if [ "${DEBUG:-0}" -ge "$level" ]; then + echo "DEBUG[$level]: $message" >&2 + fi +} diff --git a/make_results.sh b/make_results.sh new file mode 100755 index 0000000..066e372 --- /dev/null +++ b/make_results.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +# Helper script for make results target +# Copies .out files from results/ to expected/, excluding those with output/*.source counterparts + +set -e + +TESTDIR="${1:-test}" +TESTOUT="${2:-${TESTDIR}}" + +mkdir -p "${TESTDIR}/expected" + +# Use nullglob so globs that don't match return nothing instead of the literal pattern +shopt -s nullglob + +for result_file in "${TESTOUT}/results"/*.out; do + test_name=$(basename "$result_file" .out) + + # Check if this file has a corresponding output/*.source file + # Only consider non-empty source files (empty files are likely leftovers from pg_regress) + if [ -f "${TESTDIR}/output/${test_name}.source" ] && [ -s "${TESTDIR}/output/${test_name}.source" ]; then + echo "WARNING: ${TESTOUT}/results/${test_name}.out exists but will NOT be copied" >&2 + echo " (excluded because ${TESTDIR}/output/${test_name}.source exists)" >&2 + else + # Copy the file - it doesn't have an output/*.source counterpart + cp "$result_file" "${TESTDIR}/expected/${test_name}.out" + fi +done + diff --git a/meta.mk.sh b/meta.mk.sh index a5da2ec..e6cecc5 100755 --- a/meta.mk.sh +++ b/meta.mk.sh @@ -1,25 +1,38 @@ -#! /usr/bin/env bash +#!/usr/bin/env bash +# +# meta.mk.sh - Generate Makefile variables from META.json +# +# This script parses META.json (PGXN distribution metadata) and generates +# Make variables for the distribution name and version. +# +# Usage: meta.mk.sh +# +# Output (to stdout, meant to be redirected to meta.mk): +# PGXN := +# PGXNVERSION := +# +# Note: Extension-specific variables (like EXTENSION_*_VERSION) are generated +# by control.mk.sh from .control files, not from META.json. This is because +# META.json specifies PGXN distribution metadata, while .control files specify +# what PostgreSQL actually uses. set -o errexit -o errtrace -o pipefail -trap 'echo "Error on line ${LINENO}" >&2' ERR -META=$1 -BASEDIR=`dirname $0` +BASEDIR=$(dirname "$0") +source "$BASEDIR/lib.sh" + JSON_SH=$BASEDIR/JSON.sh -error () { - echo $@ >&2 -} trap 'error "Error on line ${LINENO}"' ERR -die () { - local retval=$1 - shift - error $@ - exit $retval -} +META=$1 +if [ -z "$META" ]; then + die 1 "Usage: meta.mk.sh " +fi -REQUIRED='abstract maintainer license provides name version' +if [ ! -f "$META" ]; then + die 2 "META.json file '$META' not found" +fi #function to get value of specified key #returns empty string if not found @@ -27,7 +40,7 @@ REQUIRED='abstract maintainer license provides name version' #usage: VAR=$(getkey foo.bar) #get value of "bar" contained within "foo" # VAR=$(getkey foo[4].bar) #get value of "bar" contained in the array "foo" on position 4 # VAR=$(getkey [4].foo) #get value of "foo" contained in the root unnamed array on position 4 -function _getkey { +_getkey() { #reformat key string (parameter) to what JSON.sh uses KEYSTRING=$(sed -e 's/\[/\"\,/g' -e 's/^\"\,/\[/g' -e 's/\]\./\,\"/g' -e 's/\./\"\,\"/g' -e '/^\[/! s/^/\[\"/g' -e '/\]$/! s/$/\"\]/g' <<< "$@") #extract the key value @@ -37,60 +50,21 @@ function _getkey { FOUT="${FOUT%\"*}" echo "$FOUT" } -function getkeys { - KEYSTRING=$(sed -e 's/\[/\"\,/g' -e 's/^\"\,/\[/g' -e 's/\]\./\,\"/g' -e 's/\./\"\,\"/g' -e '/^\[/! s/^/\[\"/g' -e '/\",\"$/! s/$/\",\"/g' <<< "$@") - #extract the key value - FOUT=$(grep -F "$KEYSTRING" <<< "$JSON_PARSED") - FOUT="${FOUT%$'\t'*}" - echo "$FOUT" -} - -#function returning length of array -#returns zero if key in parameter does not exist or is not an array -#usage: VAR=$(getarrlen foo.bar) #get length of array "bar" contained within "foo" -# VAR=$(getarrlen) #get length of the root unnamed array -# VAR=$(getarrlen [2].foo.bar) #get length of array "bar" contained within "foo", which is stored in the root unnamed array on position 2 -function getarrlen { - #reformat key string (parameter) to what JSON.sh uses - KEYSTRING=$(gsed -e '/^\[/! s/\[/\"\,/g' -e 's/\]\./\,\"/g' -e 's/\./\"\,\"/g' -e '/^$/! {/^\[/! s/^/\[\"/g}' -e '/^$/! s/$/\"\,/g' -e 's/\[/\\\[/g' -e 's/\]/\\\]/g' -e 's/\,/\\\,/g' -e '/^$/ s/^/\\\[/g' <<< "$@") - #extract the key array length - get last index - LEN=$(grep -o "${KEYSTRING}[0-9]*" <<< "$JSON_PARSED" | tail -n -1 | grep -o "[0-9]*$") - #increment to get length, if empty => zero - if [ -n "$LEN" ]; then - LEN=$(($LEN+1)) - else - LEN="0" - fi - echo "$LEN" -} -JSON_PARSED=$(cat $META | $JSON_SH -l) - -function getkey { +getkey() { out=$(_getkey "$@") [ -n "$out" ] || die 2 "key $@ not found in $META" echo $out } -# Handle meta-spec specially -spec_version=`getkey meta-spec.version` -[ "$spec_version" == "1.0.0" ] || die 2 "Unknown meta-spec/version: $PGXN_meta-spec_version" +JSON_PARSED=$(cat "$META" | $JSON_SH -l) + +# Validate meta-spec version +spec_version=$(getkey meta-spec.version) +[ "$spec_version" == "1.0.0" ] || die 2 "Unknown meta-spec/version: $spec_version" +# Output distribution name and version echo "PGXN := $(getkey name)" echo "PGXNVERSION := $(getkey version)" -echo - -provides=$(getkeys provides | sed -e 's/\["provides","//' -e 's/",".*//' | uniq) -for ext in $provides; do - version=$(getkey provides.${ext}.version) - [ -n "$version" ] || die 2 "provides/${ext} does not specify a version number" - echo "EXTENSIONS += $ext" - echo "EXTENSION_SQL_FILES += sql/${ext}.sql" - echo "EXTENSION_${ext}_VERSION := ${version}" - echo "EXTENSION_${ext}_VERSION_FILE = sql/${ext}--\$(EXTENSION_${ext}_VERSION).sql" - echo "EXTENSION_VERSION_FILES += \$(EXTENSION_${ext}_VERSION_FILE)" - echo "\$(EXTENSION_${ext}_VERSION_FILE): sql/${ext}.sql META.json meta.mk" - echo ' cp $< $@' -done # vi: expandtab ts=2 sw=2 diff --git a/pgtle.sh b/pgtle.sh new file mode 100755 index 0000000..8fd2d17 --- /dev/null +++ b/pgtle.sh @@ -0,0 +1,849 @@ +#!/bin/bash +# +# pgtle.sh - Generate pg_tle registration SQL for PostgreSQL extensions +# +# Part of pgxntool: https://github.com/decibel/pgxntool +# +# SYNOPSIS +# pgtle.sh --extension EXTNAME [--pgtle-version VERSION] +# pgtle.sh --get-dir VERSION +# pgtle.sh --get-version +# pgtle.sh --run +# +# DESCRIPTION +# Generates pg_tle (Trusted Language Extensions) registration SQL from +# a pgxntool-based PostgreSQL extension. Reads the extension's .control +# file and SQL files, wrapping them for pg_tle deployment in managed +# environments like AWS RDS and Aurora. +# +# pg_tle enables extension installation without filesystem access by +# storing extension code in database tables. This script converts +# traditional PostgreSQL extensions into pg_tle-compatible SQL. +# +# OPTIONS +# --extension NAME +# Extension name (required). Must match a .control file basename +# in the current directory. +# +# --pgtle-version VERSION +# Generate for specific pg_tle version only (optional). +# Format: 1.0.0-1.4.0, 1.4.0-1.5.0, or 1.5.0+ +# Default: Generate all supported versions +# +# --get-dir VERSION +# Returns the directory path for the given pg_tle version. +# Format: VERSION is a version string like "1.5.2" +# Output: Directory path like "pg_tle/1.5.0+", "pg_tle/1.4.0-1.5.0", or "pg_tle/1.0.0-1.4.0" +# This option is used by make to determine which directory to use +# +# --get-version +# Returns the installed pg_tle version from the database. +# Output: Version string like "1.5.2" or empty if not installed +# Exit status: 0 if pg_tle is installed, 1 if not installed +# +# --run +# Runs the generated pg_tle registration SQL files. This option: +# - Detects the installed pg_tle version from the database +# - Determines the appropriate directory using --get-dir logic +# - Executes all SQL files in that directory via psql +# - Assumes PG* environment variables are configured for psql +# +# VERSION NOTATION +# X.Y.Z+ Works on pg_tle >= X.Y.Z +# X.Y.Z-A.B.C Works on pg_tle >= X.Y.Z and < A.B.C +# +# Note the boundary conditions: +# 1.5.0+ means >= 1.5.0 (includes 1.5.0) +# 1.4.0-1.5.0 means >= 1.4.0 and < 1.5.0 (excludes 1.5.0) +# 1.0.0-1.4.0 means >= 1.0.0 and < 1.4.0 (excludes 1.4.0) +# +# SUPPORTED VERSIONS +# 1.0.0-1.4.0 pg_tle 1.0.0 through 1.3.x (no uninstall function, no schema parameter) +# 1.4.0-1.5.0 pg_tle 1.4.0 through 1.4.x (has uninstall function, no schema parameter) +# 1.5.0+ pg_tle 1.5.0 and later (has uninstall function, schema parameter support) +# +# EXAMPLES +# # Generate all versions (default) +# pgtle.sh --extension myext +# +# # Generate only for pg_tle 1.5+ +# pgtle.sh --extension myext --pgtle-version 1.5.0+ +# +# # Get directory for a specific pg_tle version +# pgtle.sh --get-dir 1.5.2 +# # Output: pg_tle/1.5.0+ +# +# pgtle.sh --get-dir 1.4.2 +# # Output: pg_tle/1.4.0-1.5.0 +# +# # Get installed pg_tle version from database +# pgtle.sh --get-version +# # Output: 1.5.2 (or empty if not installed) +# +# # Run generated pg_tle registration SQL files +# pgtle.sh --run +# +# OUTPUT +# Creates files in version-specific subdirectories: +# pg_tle/1.0.0-1.4.0/{extension}.sql +# pg_tle/1.4.0-1.5.0/{extension}.sql +# pg_tle/1.5.0+/{extension}.sql +# +# Each file contains: +# - All versions of the extension +# - All upgrade paths between versions +# - Default version configuration +# - Complete installation instructions +# +# For --get-dir: Outputs the directory path to stdout. +# +# For --get-version: Outputs the installed pg_tle version to stdout, or empty if not installed. +# +# For --run: Executes SQL files and outputs progress messages to stderr. +# +# REQUIREMENTS +# - Must run from extension directory (where .control files are) +# - Extension must use only trusted languages (PL/pgSQL, SQL, PL/Perl, etc.) +# - No C code (module_pathname not supported by pg_tle) +# - Versioned SQL files must exist: sql/{ext}--{version}.sql +# +# EXIT STATUS +# 0 Success +# 1 Error (missing files, validation failure, C code detected, etc.) +# +# SEE ALSO +# pgxntool/README-pgtle.md - Complete user guide +# https://github.com/aws/pg_tle - pg_tle documentation +# + +set -eo pipefail + +# Source common library functions (error, die, debug) +PGXNTOOL_DIR="$(dirname "${BASH_SOURCE[0]}")" +source "$PGXNTOOL_DIR/lib.sh" + +# Constants +PGTLE_DELIMITER='$_pgtle_wrap_delimiter_$' +PGTLE_VERSIONS=("1.0.0-1.4.0" "1.4.0-1.5.0" "1.5.0+") + +# Supported pg_tle version ranges and their capabilities +# Use a function instead of associative array for compatibility with bash < 4.0 +get_pgtle_capability() { + local version="$1" + case "$version" in + "1.0.0-1.4.0") + echo "no_uninstall_no_schema" + ;; + "1.4.0-1.5.0") + echo "has_uninstall_no_schema" + ;; + "1.5.0+") + echo "has_uninstall_has_schema" + ;; + *) + echo "unknown" + ;; + esac +} + +# Global variables (populated from control file) +EXTENSION="" +DEFAULT_VERSION="" +COMMENT="" +REQUIRES="" +SCHEMA="" +MODULE_PATHNAME="" +VERSION_FILES=() +UPGRADE_FILES=() + +debug 30 "Global arrays initialized: VERSION_FILES=${#VERSION_FILES[@]}, UPGRADE_FILES=${#UPGRADE_FILES[@]}" +PGTLE_VERSION="" # Empty = generate all +GET_DIR_VERSION="" # For --get-dir option + +# Arrays (populated from SQL discovery) +VERSION_FILES=() +UPGRADE_FILES=() + +# Parse and validate a version string +# Extracts numeric version (major.minor.patch) from version strings +# Handles versions with suffixes like "1.5.0alpha1", "2.0beta", "1.2.3dev" +# Returns: numeric version string (e.g., "1.5.0") or exits with error +parse_version() { + local version="$1" + + if [ -z "$version" ]; then + die 1 "Version string is empty" + fi + + # Extract numeric version part (major.minor.patch) + # Matches: 1.5.0, 1.5, 10.2.1alpha, 2.0beta1, etc. + # Pattern: start of string, then digits, dot, digits, optionally (dot digits), then anything + local numeric_version + if [[ "$version" =~ ^([0-9]+\.[0-9]+(\.[0-9]+)?) ]]; then + numeric_version="${BASH_REMATCH[1]}" + else + die 1 "Cannot parse version string: '$version' + Expected format: major.minor[.patch][suffix] + Examples: 1.5.0, 1.5, 2.0alpha1, 10.2.3dev" + fi + + # Ensure we have at least major.minor (add .0 if needed) + if [[ ! "$numeric_version" =~ \. ]]; then + die 1 "Invalid version format: '$version' (need at least major.minor)" + fi + + # If we only have major.minor, add .0 for patch + if [[ ! "$numeric_version" =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then + numeric_version="${numeric_version}.0" + fi + + echo "$numeric_version" +} + +# Convert version string to comparable integer +# Takes a numeric version string (major.minor.patch) and converts to integer +# Example: "1.5.0" -> 1005000 +# Encoding scheme: major * 1000000 + minor * 1000 + patch +# This limits each component to 0-999 to prevent overflow +version_to_number() { + local version="$1" + + # Parse major.minor.patch + local major minor patch + if [[ "$version" =~ ^([0-9]+)\.([0-9]+)\.([0-9]+)$ ]]; then + major="${BASH_REMATCH[1]}" + minor="${BASH_REMATCH[2]}" + patch="${BASH_REMATCH[3]}" + else + die 1 "version_to_number: Invalid numeric version format: '$version'" + fi + + # Check for overflow in encoding scheme + # Each component must be < 1000 to fit in the allocated space + if [ "$major" -ge 1000 ]; then + die 1 "version_to_number: Major version too large: $major (max 999) + Version: $version" + fi + if [ "$minor" -ge 1000 ]; then + die 1 "version_to_number: Minor version too large: $minor (max 999) + Version: $version" + fi + if [ "$patch" -ge 1000 ]; then + die 1 "version_to_number: Patch version too large: $patch (max 999) + Version: $version" + fi + + # Convert to comparable number: major * 1000000 + minor * 1000 + patch + echo $(( major * 1000000 + minor * 1000 + patch )) +} + +# Get directory for a given pg_tle version +# Takes a version string like "1.5.2" and returns the directory path +# Handles versions with suffixes (e.g., "1.5.0alpha1") +# Returns: "pg_tle/1.0.0-1.4.0", "pg_tle/1.4.0-1.5.0", or "pg_tle/1.5.0+" +get_version_dir() { + local version="$1" + + if [ -z "$version" ]; then + die 1 "Version required for --get-dir (got empty string)" + fi + + # Parse and validate version + local numeric_version + numeric_version=$(parse_version "$version") + + # Check if the original version has a pre-release suffix + # Pre-release versions (alpha, beta, rc, dev) are considered BEFORE the release + # Example: 1.4.0alpha1 comes BEFORE 1.4.0, so it should use the 1.0.0-1.4.0 range + local has_prerelease=0 + if [[ "$version" =~ (alpha|beta|rc|dev) ]]; then + has_prerelease=1 + fi + + # Convert versions to comparable numbers + local version_num + local threshold_1_4_num + local threshold_1_5_num + version_num=$(version_to_number "$numeric_version") + threshold_1_4_num=$(version_to_number "1.4.0") + threshold_1_5_num=$(version_to_number "1.5.0") + + # Compare and return appropriate directory: + # < 1.4.0 -> 1.0.0-1.4.0 + # >= 1.4.0 and < 1.5.0 -> 1.4.0-1.5.0 + # >= 1.5.0 -> 1.5.0+ + # + # Special handling for pre-release versions: + # If version equals a threshold but has a pre-release suffix, treat it as less than that threshold + # Example: 1.4.0alpha1 is treated as < 1.4.0, so it uses 1.0.0-1.4.0 + if [ "$version_num" -lt "$threshold_1_4_num" ]; then + echo "pg_tle/1.0.0-1.4.0" + elif [ "$version_num" -eq "$threshold_1_4_num" ] && [ "$has_prerelease" -eq 1 ]; then + # Pre-release of 1.4.0 is considered < 1.4.0 + echo "pg_tle/1.0.0-1.4.0" + elif [ "$version_num" -lt "$threshold_1_5_num" ]; then + echo "pg_tle/1.4.0-1.5.0" + elif [ "$version_num" -eq "$threshold_1_5_num" ] && [ "$has_prerelease" -eq 1 ]; then + # Pre-release of 1.5.0 is considered < 1.5.0 + echo "pg_tle/1.4.0-1.5.0" + else + echo "pg_tle/1.5.0+" + fi +} + +# Get pg_tle version from installed extension +# Returns version string or empty if not installed +get_pgtle_version() { + psql --no-psqlrc --tuples-only --no-align --command "SELECT extversion FROM pg_extension WHERE extname = 'pg_tle';" 2>/dev/null | tr -d '[:space:]' || echo "" +} + +# Run pg_tle registration SQL files +# Detects installed pg_tle version and runs appropriate SQL files +run_pgtle_sql() { + echo "Running pg_tle registration SQL files..." >&2 + + # Get version from installed extension + local pgtle_version=$(get_pgtle_version) + if [ -z "$pgtle_version" ]; then + die 1 "pg_tle extension is not installed + Run 'CREATE EXTENSION pg_tle;' first, or use 'make check-pgtle' to verify" + fi + + # Get directory for this version + local pgtle_dir=$(get_version_dir "$pgtle_version") + if [ -z "$pgtle_dir" ]; then + die 1 "Failed to determine pg_tle directory for version $pgtle_version" + fi + + echo "Using pg_tle files for version $pgtle_version (directory: $pgtle_dir)" >&2 + + # Check if directory exists + if [ ! -d "$pgtle_dir" ]; then + die 1 "pg_tle directory $pgtle_dir does not exist + Run 'make pgtle' first to generate files" + fi + + # Run all SQL files in the directory + local sql_file + local found=0 + for sql_file in "$pgtle_dir"/*.sql; do + if [ -f "$sql_file" ]; then + found=1 + echo "Running $sql_file..." >&2 + psql --no-psqlrc --file="$sql_file" || exit 1 + fi + done + + if [ "$found" -eq 0 ]; then + die 1 "No SQL files found in $pgtle_dir + Run 'make pgtle' first to generate files" + fi + + echo "pg_tle registration complete" >&2 +} + +# Main logic +main() { + # Handle --get-dir, --get-version, --test-function, and --run options first (early exit, before other validation) + local args=("$@") + local i=0 + while [ $i -lt ${#args[@]} ]; do + if [ "${args[$i]}" = "--get-dir" ] && [ $((i+1)) -lt ${#args[@]} ]; then + get_version_dir "${args[$((i+1))]}" + exit 0 + elif [ "${args[$i]}" = "--get-version" ]; then + local version=$(get_pgtle_version) + if [ -n "$version" ]; then + echo "$version" + exit 0 + else + exit 1 + fi + elif [ "${args[$i]}" = "--test-function" ] && [ $((i+1)) -lt ${#args[@]} ]; then + # Hidden option for testing internal functions + # NOT a supported public interface - used only by the test suite + # Usage: pgtle.sh --test-function FUNC_NAME [ARGS...] + local func_name="${args[$((i+1))]}" + shift $((i+2)) # Remove script name and --test-function and func_name + + # Check if function exists + if ! declare -f "$func_name" >/dev/null 2>&1; then + die 1 "Function '$func_name' does not exist" + fi + + # Call the function with remaining arguments + "$func_name" "${args[@]:$((i+2))}" + exit $? + elif [ "${args[$i]}" = "--run" ]; then + run_pgtle_sql + exit 0 + fi + i=$((i+1)) + done + + # Parse other arguments + parse_args "$@" + + validate_environment + parse_control_file + discover_sql_files + + if [ -z "$PGTLE_VERSION" ]; then + # Generate all versions + for version in "${PGTLE_VERSIONS[@]}"; do + generate_pgtle_sql "$version" + done + else + # Generate specific version + generate_pgtle_sql "$PGTLE_VERSION" + fi +} + +parse_args() { + while [[ $# -gt 0 ]]; do + case $1 in + --extension) + EXTENSION="$2" + shift 2 + ;; + --pgtle-version) + PGTLE_VERSION="$2" + shift 2 + ;; + --get-dir) # This case should ideally not be hit due to early exit + GET_DIR_VERSION="$2" + shift 2 + ;; + --get-version) # This case should ideally not be hit due to early exit + shift + ;; + --test-function) # Hidden option for testing - not documented, not supported + shift 2 # Skip function name and --test-function + ;; + --run) # This case should ideally not be hit due to early exit + shift + ;; + *) + echo "Unknown option: $1" >&2 + exit 1 + ;; + esac + done + + if [ -z "$EXTENSION" ] && [ -z "$GET_DIR_VERSION" ]; then + die 1 "--extension is required (unless using --get-dir, --get-version, --test-function, or --run)" + fi +} + +validate_environment() { + # Check if control file exists + if [ ! -f "${EXTENSION}.control" ]; then + die 1 "Control file not found: ${EXTENSION}.control + Must run from extension directory" + fi +} + +parse_control_file() { + local control_file="${EXTENSION}.control" + + echo "Parsing control file: $control_file" >&2 + + # Parse key = value or key = 'value' format + while IFS= read -r line; do + # Skip comments and empty lines + [[ "$line" =~ ^[[:space:]]*# ]] && continue + [[ "$line" =~ ^[[:space:]]*$ ]] && continue + + # Extract key = value + if [[ "$line" =~ ^[[:space:]]*([a-z_]+)[[:space:]]*=[[:space:]]*(.*)[[:space:]]*$ ]]; then + local key="${BASH_REMATCH[1]}" + local value="${BASH_REMATCH[2]}" + + # Strip quotes if present (both single and double) + value="${value#\'}" + value="${value%\'}" + value="${value#\"}" + value="${value%\"}" + + # Trim trailing whitespace/comments + value="${value%%#*}" # Remove trailing comments + value="${value%% }" # Trim trailing spaces + + # Store in global variables + case "$key" in + default_version) DEFAULT_VERSION="$value" ;; + comment) COMMENT="$value" ;; + requires) REQUIRES="$value" ;; + schema) SCHEMA="$value" ;; + module_pathname) MODULE_PATHNAME="$value" ;; + esac + fi + done < "$control_file" + + # Validate required fields + if [ -z "$DEFAULT_VERSION" ]; then + die 1 "Control file missing default_version" + fi + + if [ -z "$COMMENT" ]; then + echo "WARNING: Control file missing comment, using extension name" >&2 + COMMENT="$EXTENSION extension" + fi + + # Warn about C code + if [ -n "$MODULE_PATHNAME" ]; then + cat >&2 <<-EOF + WARNING: Extension uses module_pathname (C code) + pg_tle only supports trusted languages (PL/pgSQL, SQL, etc.) + Generated SQL will likely not work + EOF + fi + + echo " default_version: $DEFAULT_VERSION" >&2 + echo " comment: $COMMENT" >&2 + if [ -n "$REQUIRES" ]; then + echo " requires: $REQUIRES" >&2 + fi + if [ -n "$SCHEMA" ]; then + echo " schema: $SCHEMA" >&2 + fi +} + +discover_sql_files() { + echo "Discovering SQL files for extension: $EXTENSION" >&2 + debug 30 "discover_sql_files: Starting discovery for extension: $EXTENSION" + + # Ensure default_version file exists and has content if base file exists + # This handles the case where make all hasn't generated it yet, or it exists but is empty + local default_version_file="sql/${EXTENSION}--${DEFAULT_VERSION}.sql" + local base_file="sql/${EXTENSION}.sql" + if [ -f "$base_file" ] && ([ ! -f "$default_version_file" ] || [ ! -s "$default_version_file" ]); then + debug 30 "discover_sql_files: Creating default_version file from base file" + cp "$base_file" "$default_version_file" + fi + + # Find versioned files: sql/{ext}--{version}.sql + # Use find to get proper null-delimited output, then filter out upgrade scripts + VERSION_FILES=() # Reset array + debug 30 "discover_sql_files: Reset VERSION_FILES array" + while IFS= read -r -d '' file; do + local basename=$(basename "$file" .sql) + local dash_count=$(echo "$basename" | grep -o -- "--" | wc -l | tr -d '[:space:]') + # Skip upgrade scripts (they have 2 dashes) + if [ "$dash_count" -ne 1 ]; then + continue + fi + # Error on empty version files + if [ ! -s "$file" ]; then + die 1 "Empty version file found: $file" + fi + VERSION_FILES+=("$file") + done < <(find sql/ -maxdepth 1 -name "${EXTENSION}--*.sql" -print0 2>/dev/null | sort -zV) + + # Find upgrade scripts: sql/{ext}--{ver1}--{ver2}.sql + # These have TWO occurrences of "--" in the filename + UPGRADE_FILES=() # Reset array + debug 30 "discover_sql_files: Reset UPGRADE_FILES array" + while IFS= read -r -d '' file; do + # Error on empty upgrade files + if [ ! -s "$file" ]; then + die 1 "Empty upgrade file found: $file" + fi + local basename=$(basename "$file" .sql) + local dash_count=$(echo "$basename" | grep -o -- "--" | wc -l | tr -d '[:space:]') + if [ "$dash_count" -eq 2 ]; then + UPGRADE_FILES+=("$file") + fi + done < <(find sql/ -maxdepth 1 -name "${EXTENSION}--*--*.sql" -print0 2>/dev/null | sort -zV) + + if [ ${#VERSION_FILES[@]} -eq 0 ]; then + die 1 "No versioned SQL files found for $EXTENSION + Expected pattern: sql/${EXTENSION}--{version}.sql + Run 'make' first to generate versioned files from sql/${EXTENSION}.sql" + fi + + echo " Found ${#VERSION_FILES[@]} version file(s):" >&2 + for f in "${VERSION_FILES[@]}"; do + echo " - $f" >&2 + done + + debug 30 "discover_sql_files: Checking UPGRADE_FILES array, count=${#UPGRADE_FILES[@]:-0}" + if [ ${#UPGRADE_FILES[@]:-0} -gt 0 ]; then + echo " Found ${#UPGRADE_FILES[@]} upgrade script(s):" >&2 + debug 30 "discover_sql_files: Iterating over ${#UPGRADE_FILES[@]} upgrade files" + for f in "${UPGRADE_FILES[@]}"; do + echo " - $f" >&2 + done + else + debug 30 "discover_sql_files: No upgrade files found" + fi +} + +extract_version_from_filename() { + local filename="$1" + local basename=$(basename "$filename" .sql) + + # Match patterns: + # - ext--1.0.0 → FROM_VERSION=1.0.0, TO_VERSION="" + # - ext--1.0.0--2.0.0 → FROM_VERSION=1.0.0, TO_VERSION=2.0.0 + + if [[ "$basename" =~ ^${EXTENSION}--([0-9][0-9.]*)(--([0-9][0-9.]*))?$ ]]; then + FROM_VERSION="${BASH_REMATCH[1]}" + TO_VERSION="${BASH_REMATCH[3]}" # Empty for non-upgrade files + return 0 + else + die 1 "Cannot parse version from filename: $filename + Expected format: ${EXTENSION}--{version}.sql or ${EXTENSION}--{ver1}--{ver2}.sql" + fi +} + +validate_delimiter() { + local sql_file="$1" + + if grep -qF "$PGTLE_DELIMITER" "$sql_file"; then + die 1 "SQL file contains reserved pg_tle delimiter: $sql_file + Found: $PGTLE_DELIMITER + This delimiter is used internally by pgtle.sh to wrap SQL content. + You must modify your SQL to not contain this string. If this poses a + serious problem, please open an issue at https://github.com/decibel/pgxntool/issues" + fi +} + +wrap_sql_content() { + local sql_file="$1" + + validate_delimiter "$sql_file" + + # Output wrapped SQL with proper indentation + echo " ${PGTLE_DELIMITER}" + cat "$sql_file" + echo " ${PGTLE_DELIMITER}" +} + +build_requires_array() { + # Input: "plpgsql, other_ext, another" + # Output: 'plpgsql', 'other_ext', 'another' + + # Split on comma, trim whitespace, quote each element + REQUIRES_ARRAY=$(echo "$REQUIRES" | \ + sed 's/[[:space:]]*,[[:space:]]*/\n/g' | \ + sed "s/^[[:space:]]*//;s/[[:space:]]*$//" | \ + sed "s/^/'/;s/$/'/" | \ + paste -sd, -) +} + +generate_header() { + local pgtle_version="$1" + local output_file="$2" + local version_count=${#VERSION_FILES[@]:-0} + local upgrade_count=${#UPGRADE_FILES[@]:-0} + + # Determine version compatibility message + local compat_msg + if [[ "$pgtle_version" == *"+"* ]]; then + local base_version="${pgtle_version%+}" + compat_msg="-- Works on pg_tle >= ${base_version}" + else + local min_version="${pgtle_version%-*}" + local max_version="${pgtle_version#*-}" + compat_msg="-- Works on pg_tle >= ${min_version} and < ${max_version}" + fi + + cat < $to_ver" + echo "SELECT pgtle.install_update_path(" + echo " '${EXTENSION}'," + echo " '${from_ver}'," + echo " '${to_ver}'," + wrap_sql_content "$upgrade_file" + echo ");" + echo +} + +generate_pgtle_sql() { + local pgtle_version="$1" + debug 30 "generate_pgtle_sql: Starting for version $pgtle_version, extension $EXTENSION" + + # Get capability using function (compatible with bash < 4.0) + local capability=$(get_pgtle_capability "$pgtle_version") + local version_dir="pg_tle/${pgtle_version}" + local output_file="${version_dir}/${EXTENSION}.sql" + + # Ensure arrays are initialized (defensive programming) + # Arrays should already be initialized at top level, but ensure they exist + debug 30 "generate_pgtle_sql: Checking array initialization" + debug 30 "generate_pgtle_sql: VERSION_FILES is ${VERSION_FILES+set}, count=${#VERSION_FILES[@]:-0}" + debug 30 "generate_pgtle_sql: UPGRADE_FILES is ${UPGRADE_FILES+set}, count=${#UPGRADE_FILES[@]:-0}" + + if [ -z "${VERSION_FILES+set}" ]; then + echo "WARNING: VERSION_FILES not set, initializing" >&2 + VERSION_FILES=() + fi + if [ -z "${UPGRADE_FILES+set}" ]; then + echo "WARNING: UPGRADE_FILES not set, initializing" >&2 + UPGRADE_FILES=() + fi + + # Create version-specific output directory if needed + mkdir -p "$version_dir" + + echo "Generating: $output_file (pg_tle $pgtle_version)" >&2 + + # Generate SQL to file + { + generate_header "$pgtle_version" "$output_file" + + cat < "$output_file" + + echo " ✓ Generated: $output_file" >&2 +} + +main "$@" + diff --git a/pgtle_versions.md b/pgtle_versions.md new file mode 100644 index 0000000..d2c5c03 --- /dev/null +++ b/pgtle_versions.md @@ -0,0 +1,47 @@ +# pg_tle Version Support Matrix + +This file documents pg_tle version boundaries that affect pgxntool's pg_tle support code. Each boundary represents a backward-incompatible API change. + +## Version Ranges (pgxntool notation) + +### 1.0.0-1.4.0 +- **pg_tle versions:** 1.0.0 through 1.3.x +- **PostgreSQL support:** 11-17 +- **API:** No `pgtle.uninstall_extension()` function, no schema parameter +- **Features:** Basic extension management, custom data types, authentication hooks + +### 1.4.0-1.5.0 +- **pg_tle versions:** 1.4.0 through 1.4.x +- **PostgreSQL support:** 11-17 +- **API:** Added `pgtle.uninstall_extension()` function, no schema parameter +- **Features:** Custom alignment/storage, enhanced warnings + +### 1.5.0+ +- **pg_tle versions:** 1.5.0 and later (tested through 1.5.2) +- **PostgreSQL support:** 12-18 (dropped PG 11) +- **API:** BREAKING CHANGE - `pgtle.install_extension()` now requires schema parameter +- **Features:** Schema parameter support in installation + +## Key API Changes by Version + +**1.4.0:** Added `pgtle.uninstall_extension()` +- Versions before 1.4.0 cannot uninstall extensions + +**1.5.0:** Changed `pgtle.install_extension()` signature +- Added required `schema` parameter +- Dropped PostgreSQL 11 support + +## Version Notation + +- `X.Y.Z+` - Works on pg_tle >= X.Y.Z +- `X.Y.Z-A.B.C` - Works on pg_tle >= X.Y.Z and < A.B.C + +**Boundary conditions:** +- `1.5.0+` means >= 1.5.0 (includes 1.5.0) +- `1.4.0-1.5.0` means >= 1.4.0 and < 1.5.0 (excludes 1.5.0) +- `1.0.0-1.4.0` means >= 1.0.0 and < 1.4.0 (excludes 1.4.0) + +## For Complete Details + +- `pgtle.sh` (comments at top) +- https://github.com/aws/pg_tle diff --git a/setup.sh b/setup.sh index 881ccaa..08751f1 100755 --- a/setup.sh +++ b/setup.sh @@ -3,6 +3,10 @@ set -o errexit -o errtrace -o pipefail trap 'echo "Error on line ${LINENO}"' ERR +# Source common library functions (error, die, debug) +PGXNTOOL_DIR="$(dirname "${BASH_SOURCE[0]}")" +source "$PGXNTOOL_DIR/lib.sh" + [ -d .git ] || git init if ! git diff --cached --exit-code; then From 4b86e4f641fd585242edb618a8ffb0ca05ba38de Mon Sep 17 00:00:00 2001 From: jnasbyupgrade Date: Thu, 22 Jan 2026 17:25:15 -0600 Subject: [PATCH 18/18] Update .gitignore from pgxntool template --- .gitignore | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/.gitignore b/.gitignore index adffc88..0c14928 100644 --- a/.gitignore +++ b/.gitignore @@ -1,11 +1,15 @@ # Editor files .*.swp +# Claude Code local settings +.claude/*.local.json + # Explicitly exclude META.json! !/META.json # Generated make files meta.mk +control.mk # Compiler output *.o @@ -13,11 +17,7 @@ meta.mk .deps/ # built targets -/sql/*--* -!/sql/*--*--*.sql - -# test targets -/test/.build/ +# Note: Version-specific files (sql/*--*.sql) are now tracked in git and should be committed # Test artifacts results/ @@ -27,4 +27,6 @@ regression.out # Misc tmp/ .DS_Store -.claude/*.local.json + +# pg_tle generated files +/pg_tle/