diff --git a/doc/pgtap.mmd b/doc/pgtap.mmd index f3ca45fc..9311beb0 100644 --- a/doc/pgtap.mmd +++ b/doc/pgtap.mmd @@ -8551,6 +8551,278 @@ table in a bunch of schemas: FROM (VALUES('schema1'), ('schema1')) AS stmp (sch) CROSS JOIN (VALUES('col_pk'), ('col2'), ('col3')) AS ctmp (col); +Mocking, faking and making your test independent +================================================ + +Examples of implementing mocking and faking functionality can be found in my fork [`on GitHub`](https://github.com/v-maliutin/pgtap/tree/examples_forking_and_mocking/examples). + +Faking +------ + +Let's say we have a main table and a subordinate table. + + CREATE TABLE master( + some_id int primary key + ); + + CREATE TABLE child( + master_id int, + some_data numeric, + CONSTRAINT child_fk FOREIGN KEY(master_id) REFERENCES master(some_id) + ); + + --Your function simply sums up the values of the some_data column, filtering them by the master_id field. + + SELECT SUM(some_data) as sm FROM child WHERE master_id = $1; + +The main point here is that the master table is intended exclusively for user input, +and you cannot make any assumptions about the values stored in the some_id field. +Consequently, you will not be able to insert any data into the child table until +the corresponding data exists in the master table. + +What you can do is drop the foreign key constraint, insert some data into the child table only, +and perform any necessary checks. + +Please, do not worry that you've lost the relationship between tables and thus compromised relational integrity. +It's always well known which data are invalid for your system. Simply don't insert them. That's it. + +### `fake_table()` ### + + PERFORM fake_table( :_table_ident, :_make_table_empty, :_leave_primary_key, :_drop_not_null, :_drop_collation, :_drop_partitions ); + PERFORM fake_table( :_table_ident, :_make_table_empty, :_leave_primary_key, :_drop_not_null, :_drop_collation ); + PERFORM fake_table( :_table_ident, :_make_table_empty, :_leave_primary_key, :_drop_not_null ); + PERFORM fake_table( :_table_ident, :_make_table_empty, :_leave_primary_key ); + PERFORM fake_table( :_table_ident, :_make_table_empty ); + PERFORM fake_table( :_table_ident ); + +**Parameters** + +`:_table_ident` +: Text array of tables to be faked in the format 'my_schema.my_table'. Required. + +`:_make_table_empty` +: Boolean. If TRUE the desired tables will be truncated. Optional. FALSE by default. + +`:_leave_primary_key` +: Boolean. TRUE if you want primary key stay as is. Optional. FALSE by default. + +`:_drop_not_null` +: Boolean. TRUE if you want not null constraints stay as is. Optional. FALSE by default. + +`:_drop_collation` +: Boolean. Not yet implemented. Optional. FALSE by default. + +`:_drop_partitions` +: Boolean. If TRUE any declarative partition of a table will be dropped. Works starting with PostgreSQL version 10. +Optional. FALSE by default. + +Sometimes a table contains a lot of junk data, especially in a development environment. +However, to ensure that your test runs on completely valid data, you clear the table before testing, +insert valid data, and then run the check. This is why the _make_table_empty parameter is useful. +If you follow the steps outlined above, you can be assured that your test does not rely on +junk data and can be reproduced at any time. Example. + + perform fake_table( + _table_ident => '{pgconf.account, pgconf.analytic, pgconf.osv, pgconf.transactions}'::text[], + _make_table_empty => true, + _leave_primary_key => false, + _drop_not_null => false, + _drop_collation => false + ); + + One note. The parameters _leave_primary_key and _drop_not_null are currently contradictory. + If you'd like to keep a primary key while making all columns nullable + (_leave_primary_key => true, _drop_not_null => true), you'll receive a runtime error. + This depends on whether the table truly has a primary key. I'm addressing this issue, but meanwhile, + the solution is to set _leave_primary_key => false, _drop_not_null => true, and then immediately + following the execution of 'fake_table', simply execute a command to create your primary key, + e.g., alter table my_table add constraint... + + In general, it's important to remember that there's no way to create a primary key consisting + of nullable columns. Furthermore, when creating a primary key across multiple columns, + PostgreSQL automatically enforces a NOT NULL constraint on each one. + +Mocking +------- + +Imagine a situation where you have one function that is called by another function. +The first function (the inner one) has complex logic and depends on multiple tables. +The second function (the outer one) simply performs a few actions using the result +returned by the first function. Should you populate tables required for the inner function +with test case data for every test scenario when testing the outer function? Goog news you shouldn't. +Simply create a mock for each test scenario and invoke it like a regular inner function. +The mock will bear the same name as your inner function, but will act instead of your real function. + + CREATE OR REPLACE FUNCTION pgconf.time_machine_now() + RETURNS time + LANGUAGE sql + AS $$ + SELECT now()::time; + $$; + + PERFORM mock_func('pgconf', 'time_machine_now', '()' + , _return_scalar_value => '13:00'::time); + +On the script above you can see how to create a mock for the 'time_machine_now()' function. +In production 'time_machine_now()' will return the current time. In test, if function has been mocked, +it will return 13:00 always. This is a scalar mock. + +Another mock you can do is a return set mock with sql string. + + CREATE OR REPLACE FUNCTION pgconf.time_machine_now() + RETURNS TABLE ( t time ) + LANGUAGE sql + AS $$ + SELECT now()::time; + $$; + + PERFORM tap.mock_func('pgconf', 'time_machine_now', '()' + , _return_set_value => 'select ''13:00''::time as t'); + + And my favorite case is + + PREPARE mock_time_machine_now AS SELECT '14:00'::time AS t; + PERFORM tap.mock_func('pgconf', 'time_machine_now', '()' + , _return_set_value => 'mock_time_machine_now'); + +### `mock_func()` ### + + PERFORM mock_func( :_func_schema, :_func_name, :_func_args, _return_scalar_value ); + PERFORM mock_func( :_func_schema, :_func_name, :_func_args, _return_set_value ); + PERFORM mock_func( :_func_schema, :_func_name, :_func_args ); + +**Parameters** + +`:_func_schema` +: Text. This is the schema where your function is declared. Required. + +`:_func_name` +: Text. Function name. Required. + +`:_func_args` +: Text. PG supports something similar to polymorphism. That’s why you always have to provide +a specific signature. This way PG will be able to find the function you want to mock. +If there are no parameters, just give '()'. +But if your function has some parameters, you must specify them as follows: +(_int_param int, _text_param text = null, _ts_param = now()). +The simplest way to find correct signature of your function is to call get_routine_signature. It is required. + +':_return_scalar_value' +: Text. Some scalar value that your mock have to return in test context. Optional. Default NULL. +Obviously, you have to provide either '_return_set_value' or '_return_scalar_value'. + +':_return_set_value' +: Text. Some SQL code forming a dataset that your mock should return in text context. +You may provide a name for a prepared statement. There is a convention to name a prepared statement +using the following pattern: 'mock_my_dataset'. Optional. Default NULL. +Obviously, you have to provide either '_return_set_value' or '_return_scalar_value'. + +### `get_routine_signature()` ### + + PERFORM get_routine_signature( :_routine_schema, :_routine_name ); + PERFORM get_routine_signature( :_routine_name ); + +**Parameters** + +`:_routine_schema` +: Text. This is the schema where your routine is declared. Required. + +`:_func_name` +: Text. Routine name. Required. + +The simple way to find out how PostgreSQL stores the signature of your routine. +Use this function to select at least the arguments with defaults ('args_with_defs') to pass this value to 'mock_func'. + +### `mock_view()` ### + + PERFORM mock_func( :_view_schema, :_view_name, :_return_set_sql ); + +**Parameters** + +`:_view_schema` +: Text. This is the schema where your view is declared. Required. + +`:_view_name` +: Text. View name. Required. + +`:_return_set_sql` +: Text. Some SQL code forming a dataset that your mock should return in text context. Required. + +Creates a mock replacement for a real view. See more details about mocking above regarding the 'mock_func' function. + +Assert to controls count of calls +--------------------------------- + +### `call_count()` ### + + SELECT call_count( :_call_count, :_func_schema, :_func_name, :_func_args ); + +To be able to gather a count on function calls, please ensure that the track_functions setting is set to 'all'. + +**Parameters** + +`:_call_count` +: Int. How many calls you are expecting. Required. + +`:_func_schema` +: Text. This is the schema where your function is declared. Required. + +`:_func_name` +: Text. Function name. Required. + +`:_func_args` +: Text. PG supports something similar to polymorphism. That’s why you always have to provide +a specific signature. This way PG will be able to find the function you want to mock. +If there are no parameters, just give '()'. +But if your function has some parameters, you must specify them as follows: +(_int_param int, _text_param text = null, _ts_param = now()). It is required. + +Helping functions +----------------- + +### `print_table_as_json()` ### + +The greatest feature of pgTap is that every test run happens within its own transaction. That's awesome, +but it complicates tracking what's happening internally. Therefore, by calling the 'print_table_as_json' +function, you'll be able to see what data was present in a table (or query) at the time the test was running. + +**Parameters** + +`:_table_schema` +: Text. This is the schema where your table is declared. Required. + +`:_table_name` +: Text. Table name. Required. + +In the current version, all we need to do is create a table using some SQL statements in a certain schema +and pass the table and schema names to the function. The function will then issue a 'RAISE NOTICE' command +containing SQL code that you can copy-paste and execute. The result will appear as a regular table. +If you work in DBeaver look result of 'RAISE NOTICE' command in Output window. + +### `print_query_as_json()` ### + +It performs precisely the same action as 'print_table_as_json', except you don't have to create a table. +Simply prepare a statement and supply its name to the function. +It's a great idea that you can inspect any data you're working with during testing. +Simply construct a query returning your variables, settings, or any other relevant information, +and output them via the 'print_query_as_json' function. + +**Parameters** + +`:_statements` +: Text. This is the name of the prepared statement whose data you're looking to explore. Required. + +### `drop_prepared_statement()` ### + +Any prepared statement is a session-level object. Thus, if you reuse the same names across tests, +similar to my approach, you must remove previously created prepared statements before you can use it again. +It's advisable to leverage the 'setup' and 'teardown' features provided by pgTap. + +**Parameters** + +`:_statements` +: Array of text. Those are names of the prepared statements you want to drop. Required. + Compose Yourself ================ diff --git a/sql/pgtap--1.3.3--1.3.4.sql b/sql/pgtap--1.3.3--1.3.4.sql index 7607e95d..ab196d0a 100644 --- a/sql/pgtap--1.3.3--1.3.4.sql +++ b/sql/pgtap--1.3.3--1.3.4.sql @@ -16,6 +16,381 @@ RETURNS TEXT AS $$ ); $$ LANGUAGE SQL; +CREATE OR REPLACE FUNCTION get_routine_signature( + _routine_schema name + , _routine_name name) + RETURNS TABLE (routine_schema TEXT, routine_name TEXT, routine_params TEXT) + LANGUAGE SQL STABLE +AS $function$ + SELECT + "schema", "name", args_with_defs + FROM tap_funky + WHERE + "schema" = _routine_schema and + "name" = _routine_name; +$function$; + +CREATE OR REPLACE FUNCTION get_routine_signature( + _routine_name name) + RETURNS TABLE (routine_schema TEXT, routine_name TEXT, routine_params TEXT) + LANGUAGE SQL STABLE +AS $function$ + SELECT + "schema", "name", args_with_defs + FROM tap_funky + WHERE + "name" = _routine_name; +$function$; + +--this function creates a mock in place of a real function +create or replace function mock_func( + _func_schema text + , _func_name text + , _func_args text + , _return_set_value text default null + , _return_scalar_value anyelement default null::text +) +returns void +--creates a mock in place of a real function + LANGUAGE plpgsql +AS $function$ +declare + _mock_ddl text; + _func_result_type text; + _func_qualified_name text; + _func_language text; + _returns_set bool; + _variants text; + _ex_msg text; +begin + --First of all, we have to identify which function we must mock. If there is no such function, throw an error. + begin + select "returns", langname, returns_set + into strict _func_result_type, _func_language, _returns_set + from tap_funky + where "schema" = _func_schema + and "name" = _func_name + and args_with_defs = _func_args; + exception when NO_DATA_FOUND or TOO_MANY_ROWS then + select string_agg(E'\t - ' || format('%I.%I %s', "schema", "name", args_with_defs), E'\n')::text + into _variants + from tap_funky + where "name" = _func_name; + _ex_msg = format('Routine %I.%I %s does not exist.', + _func_schema, _func_name, _func_args) || E'\n' || 'Possible variants are:' || E'\n' || + coalesce(_variants, 'There is no such function in any schema'); + raise exception '%', coalesce(_ex_msg, 'Нет описания'); + end; + --This is the case when we need to mock a function written in SQL. + --But in order to be able to execute the mocking functionality, we need to have a function written in plpgsql. + --That is why we create a hidden function which name starts with "__". + if _func_language = 'sql' and _returns_set then + _mock_ddl = format(' + create or replace function %1$I.__%2$I(_name text) + returns %3$s + language plpgsql + AS %4$sfunction%4$s + begin + return query execute _query(_name); + end; + %4$sfunction%4$s;', + _func_schema/*1*/, _func_name/*2*/, _func_result_type/*3*/, '$'/*4*/); + execute _mock_ddl; + _mock_ddl = format(' + create or replace function %1$I.%2$I %3$s + returns %4$s + language %5$s + AS %7$sfunction%7$s + select * from %1$I.__%2$I ( ''%6$s'' ); + %7$sfunction%7$s;', + _func_schema/*1*/, _func_name/*2*/, _func_args/*3*/, _func_result_type/*4*/, + _func_language/*5*/, _return_set_value/*6*/, '$'/*7*/); + execute _mock_ddl; + end if; + + if _func_language = 'plpgsql' and _returns_set then + _mock_ddl = format(' + create or replace function %1$I.%2$I %3$s + returns %4$s + language plpgsql + AS %6$sfunction%6$s + begin + return query execute _query( ''%5$s'' ); + end; + %6$sfunction%6$s;', + _func_schema/*1*/, _func_name/*2*/, _func_args/*3*/, _func_result_type/*4*/, + _return_set_value/*5*/, '$'/*6*/); + execute _mock_ddl; + end if; + + if not _returns_set then + _mock_ddl = format(' + create or replace function %1$I.%2$I %3$s + RETURNS %4$s + LANGUAGE %5$s + AS %8$sfunction%8$s + select %6$L::%7$s; + %8$sfunction%8$s;', + _func_schema/*1*/, _func_name/*2*/, _func_args/*3*/, _func_result_type/*4*/, + _func_language/*5*/, _return_scalar_value/*6*/, pg_typeof(_return_scalar_value)/*7*/, '$'/*8*/); + execute _mock_ddl; + end if; +end $function$; + +--This function creates a mock in place of a real view +create or replace function mock_view( + _view_schema text + , _view_name text + , _return_set_sql text default null +) +returns void +--Create a mock in place of a real view +language plpgsql +as $function$ +declare + _mock_ddl text; + _func_result_type text; + _func_qualified_name text; + _func_language text; + _returns_set bool; +begin + _mock_ddl = format('drop view %I.%I', _view_schema, _view_name); + execute _mock_ddl; + _mock_ddl = format('create view %I.%I as %s', _view_schema, _view_name, _return_set_sql); + execute _mock_ddl; +end; $function$; + +create or replace function fake_table( + _table_ident text[], + _make_table_empty boolean default false, + _leave_primary_key boolean default false, + _drop_not_null boolean DEFAULT false, + _drop_collation boolean DEFAULT false, + _drop_partitions boolean DEFAULT false +) +returns void +--It frees a table from any constraint (we call such a table as a fake) +--faked table is a full copy of _table_name, but has no any constraint +--without foreign and primary things you can do whatever you want in testing context + LANGUAGE plpgsql +AS $function$ +declare + _table record; + _fk_table record; + _part_table record; + _fake_ddl text; + _not_null_ddl text; +begin + for _table in + select + quote_ident(coalesce((parse_ident(table_ident))[1], '')) table_schema, + quote_ident(coalesce((parse_ident(table_ident))[2], '')) table_name, + coalesce((parse_ident(table_ident))[1], '') table_schema_l, + coalesce((parse_ident(table_ident))[2], '') table_name_l + from + unnest(_table_ident) as t(table_ident) + loop + for _fk_table in + -- collect all table's relations including primary key and unique constraint + select distinct * + from ( + select + fk_schema_name table_schema, fk_table_name table_name + , fk_constraint_name constraint_name, false as is_pk, 1 as ord + from + pg_all_foreign_keys + where + fk_schema_name = _table.table_schema_l and fk_table_name = _table.table_name_l + union all + select + fk_schema_name table_schema, fk_table_name table_name + , fk_constraint_name constraint_name, false as is_pk, 1 as ord + from + pg_all_foreign_keys + where + pk_schema_name = _table.table_schema_l and pk_table_name = _table.table_name_l + union all + select + table_schema, table_name + , constraint_name + , case when constraint_type = 'PRIMARY KEY' then true else false end as is_pk, 2 as ord + from + information_schema.table_constraints + where + table_schema = _table.table_schema_l + and table_name = _table.table_name_l + and constraint_type in ('PRIMARY KEY', 'UNIQUE') + ) as t + order by ord + loop + if not(_leave_primary_key and _fk_table.is_pk) then + _fake_ddl = format('alter table %1$I.%2$I drop constraint %3$I;', + _fk_table.table_schema/*1*/, _fk_table.table_name/*2*/, _fk_table.constraint_name/*3*/ + ); + execute _fake_ddl; + end if; + end loop; + + if _make_table_empty then + _fake_ddl = format('truncate table %1$s.%2$s;', _table.table_schema, _table.table_name); + execute _fake_ddl; + end if; + + --Free table from not null constraints + _fake_ddl = format('alter table %1$s.%2$s ', _table.table_schema, _table.table_name); + if _drop_not_null then + select + string_agg(format('alter column %1$I drop not null', t.attname), ', ') + into + _not_null_ddl + from + pg_catalog.pg_attribute t + where t.attrelid = (_table.table_schema || '.' || _table.table_name)::regclass + and t.attnum > 0 and attnotnull + and ( + ( + --and the column is not part of PK when we want to leave PK as is + t.attname::text != all((select _keys(_table.table_schema, _table.table_name, 'p'))::text[]) and + _leave_primary_key + ) or + --we can drop a NOT NULL constraint as there is no PK already + not _leave_primary_key + ); + + _fake_ddl = _fake_ddl || _not_null_ddl || ';'; + else + _fake_ddl = null; + end if; + + if _fake_ddl is not null then + execute _fake_ddl; + end if; + + if _drop_partitions then + if pg_version_num() < 100000 then + raise exception 'Sorry, but declarative partitioning was introduced only starting with PostgreSQL version 10.'; + end if; + for _part_table in select _parts from _parts(_table.table_schema, _table.table_name) loop + _fake_ddl = 'drop table if exists ' || _part_table._parts || ';'; + execute _fake_ddl; + end loop; + end if; + end loop; +end $function$; + +create or replace function call_count( + _call_count int + , _func_schema name + , _func_name name + , _func_args name[]) + RETURNS text + LANGUAGE plpgsql +AS $function$ +declare + _actual_call_count int; + _track_functions_setting text; +begin + select current_setting('track_functions') into _track_functions_setting; + + if _track_functions_setting != 'all' then + return fail('track_functions setting is not set. Must be all'); + end if; + + select calls into _actual_call_count + from pg_stat_xact_user_functions + where funcid = _get_func_oid(_func_schema, _func_name, _func_args); + + return ok( + _actual_call_count = _call_count + , format('routine %I.%I must has been called %L times, actual call count is %L' + , _func_schema, _func_name, _call_count, _actual_call_count) + ); +end $function$; + +create or replace function drop_prepared_statement(_statements text[]) +returns setof bool as $$ +declare + _statement record; +begin + for _statement in select _name from unnest(_statements) as t(_name) loop + if exists(select * from pg_prepared_statements where "name" = _statement._name) then + EXECUTE format('deallocate %I;', _statement._name); + return next true; + else + return next false; + end if; + end loop; +end +$$ +language plpgsql; + + +create or replace function print_table_as_json(in _table_schema text, in _table_name text) +returns void + language plpgsql +AS $function$ +declare + _ddl text; + _json text; + _columns text; +--returns a query which you can execute and see your table as normal dataset +--you can find the returned query in the output window in DBeaver, where we see raise notice command output +--note! the returned dataset is limited to 1000 records. that's why you didn't get any jdbc error in dbeaver in case of huge amount of rows +begin + _ddl = format(' + select json_agg( + array(select %1$I from %2$I.%1$I limit 1000 + )) as j;', _table_name, _table_schema); + execute _ddl into _json; + _json = '[' || ltrim(rtrim(_json::text, ']'), '[') || ']'; + + select string_agg(concat(quote_ident(c.column_name), ' ', case when lower(c.data_type) = 'array' then e.data_type || '[]' else c.data_type end), ', ') + into _columns + from information_schema."columns" c + left join information_schema.element_types e + on ((c.table_catalog, c.table_schema, c.table_name, 'TABLE', c.dtd_identifier) + = (e.object_catalog, e.object_schema, e.object_name, e.object_type, e.collection_type_identifier)) + where c.table_schema = _table_schema + and c.table_name = _table_name; + + _json = format('select * from /*%1$I.%2$I*/ json_to_recordset(%3$L) as t(%4$s)', + _table_schema/*1*/, _table_name/*2*/, _json/*3*/, _columns/*4*/); + raise notice '%', _json; +end $function$; + +create or replace function print_query_as_json(in _prepared_statement_name text) +returns void + language plpgsql +as $function$ +declare + _ddl text; + _table_name text; +--returns a query which you can execute and see your table as normal dataset +--you can find the returned query in the output window in DBeaver, where we see raise notice command output +--note! the returned dataset is limited to 1000 records. that's why you didn't get any jdbc error in dbeaver in case of huge amount of rows +begin + _table_name = _prepared_statement_name || '_' || gen_random_uuid(); + _ddl = format('create table public.%1$I as execute %2$s', _table_name, _prepared_statement_name); + execute _ddl; + perform print_table_as_json('public', _table_name::text); +end; +$function$; + + +CREATE OR REPLACE FUNCTION _get_func_oid(name, name, name[]) + RETURNS oid + LANGUAGE sql +AS $function$ + SELECT oid + FROM tap_funky + WHERE "schema" = $1 + and "name" = $2 + AND args = _funkargs($3) + AND is_visible +$function$ +; + + -- index_is_partial( schema, table, index, description ) CREATE OR REPLACE FUNCTION index_is_partial ( NAME, NAME, NAME, text ) RETURNS TEXT AS $$ @@ -86,4 +461,4 @@ BEGIN 'Index ' || quote_ident($1) || ' should be partial' ); END; -$$ LANGUAGE plpgsql; +$$ LANGUAGE plpgsql; \ No newline at end of file diff --git a/sql/pgtap.sql.in b/sql/pgtap.sql.in index c1c7ed92..5839a294 100644 --- a/sql/pgtap.sql.in +++ b/sql/pgtap.sql.in @@ -2603,24 +2603,83 @@ BEGIN END; $$ LANGUAGE plpgsql STABLE; -CREATE OR REPLACE VIEW tap_funky - AS SELECT p.oid AS oid, - n.nspname AS schema, - p.proname AS name, - pg_catalog.pg_get_userbyid(p.proowner) AS owner, - array_to_string(p.proargtypes::regtype[], ',') AS args, - CASE p.proretset WHEN TRUE THEN 'setof ' ELSE '' END - || p.prorettype::regtype AS returns, - p.prolang AS langoid, - p.proisstrict AS is_strict, - _prokind(p.oid) AS kind, - p.prosecdef AS is_definer, - p.proretset AS returns_set, - p.provolatile::char AS volatility, - pg_catalog.pg_function_is_visible(p.oid) AS is_visible - FROM pg_catalog.pg_proc p - JOIN pg_catalog.pg_namespace n ON p.pronamespace = n.oid -; +CREATE OR REPLACE FUNCTION _routineresult ( _routine_signature text ) +RETURNS TEXT AS $$ +BEGIN + /* + We need this function because without handling errors we will run into an error like + SQL Error [42883]: ERROR: function + "pg_toast.mock_func(text,text,text,text,anyelement)" dose not exist + It happens when we write filter 'routine_return.returns is not null' in tap_funky view. + Handling 'undefined_object' does not work + */ + RETURN pg_catalog.pg_get_function_result((_routine_signature::regprocedure)::oid); +EXCEPTION WHEN others THEN + RETURN NULL; +END; +$$ LANGUAGE PLPGSQL STABLE; + +CREATE OR REPLACE FUNCTION _routineargsdefs ( _routine_signature text ) +RETURNS TEXT AS $$ +BEGIN + /* + We need this function because without handling errors we will run into an error like + SQL Error [42883]: ERROR: function + "pg_toast.mock_func(text,text,text,text,anyelement)" dose not exist + It happens when we write filter 'routine_args.with_defs is not null' in tap_funky view. + Handling 'undefined_object' does not work + */ + RETURN pg_catalog.pg_get_function_arguments((_routine_signature::regprocedure)::oid); +EXCEPTION WHEN others THEN + RETURN NULL; +END; +$$ LANGUAGE PLPGSQL STABLE; + + +--added columns "langname", "returns" and args_with_defs to be used in mock_func function +CREATE OR REPLACE VIEW tap_funky AS +SELECT + p.oid AS oid, + n.nspname AS schema, + p.proname AS name, + pg_catalog.pg_get_userbyid(p.proowner) AS owner, + routine_name.args AS args, + lower(coalesce( + routine_info."returns", + routine_info.sys_returns)) AS "returns", + p.prolang AS langoid, + p.proisstrict AS is_strict, + _prokind(p.oid) AS kind, + p.prosecdef AS is_definer, + p.proretset AS returns_set, + p.provolatile::char AS volatility, + pg_catalog.pg_function_is_visible(p.oid) AS is_visible, + l.lanname AS langname, + format('(%s)', + routine_info.args_with_defs + ) AS args_with_defs +FROM pg_catalog.pg_proc p +JOIN pg_catalog.pg_namespace n ON p.pronamespace = n.oid +LEFT JOIN pg_catalog.pg_language l ON l.oid = p.prolang +LEFT JOIN LATERAL ( + select + format('%1I.%2I', n.nspname, p.proname) as qualified, + array_to_string(p.proargtypes::regtype[], ',') AS args +) routine_name ON true +LEFT JOIN LATERAL ( + SELECT + CASE + WHEN lower(n.nspname) != 'pg_catalog' + THEN _routineresult(concat(routine_name.qualified, '(', routine_name.args, ')')) + ELSE NULL + END collate "default" AS "returns", + CASE + WHEN lower(n.nspname) != 'pg_catalog' + THEN _routineargsdefs(concat(routine_name.qualified, '(', routine_name.args, ')')) + ELSE NULL + end collate "default" AS "args_with_defs", + CASE p.proretset WHEN TRUE THEN 'setof ' ELSE '' END || p.prorettype::regtype AS sys_returns +) AS routine_info ON TRUE; CREATE OR REPLACE FUNCTION _funkargs ( NAME[] ) RETURNS TEXT AS $$ @@ -11518,3 +11577,376 @@ RETURNS TEXT AS $$ 'Function ' || quote_ident($1) || '() should not be a procedure' ); $$ LANGUAGE sql; + +CREATE OR REPLACE FUNCTION get_routine_signature( + _routine_schema name + , _routine_name name) + RETURNS TABLE (routine_schema TEXT, routine_name TEXT, routine_params TEXT) + LANGUAGE SQL STABLE +AS $function$ + SELECT + "schema", "name", args_with_defs + FROM tap_funky + WHERE + "schema" = _routine_schema and + "name" = _routine_name; +$function$; + +CREATE OR REPLACE FUNCTION get_routine_signature( + _routine_name name) + RETURNS TABLE (routine_schema TEXT, routine_name TEXT, routine_params TEXT) + LANGUAGE SQL STABLE +AS $function$ + SELECT + "schema", "name", args_with_defs + FROM tap_funky + WHERE + "name" = _routine_name; +$function$; + +--this function creates a mock in place of a real function +create or replace function mock_func( + _func_schema text + , _func_name text + , _func_args text + , _return_set_value text default null + , _return_scalar_value anyelement default null::text +) +returns void +--creates a mock in place of a real function + LANGUAGE plpgsql +AS $function$ +declare + _mock_ddl text; + _func_result_type text; + _func_qualified_name text; + _func_language text; + _returns_set bool; + _variants text; + _ex_msg text; +begin + --First of all, we have to identify which function we must mock. If there is no such function, throw an error. + begin + select "returns", langname, returns_set + into strict _func_result_type, _func_language, _returns_set + from tap_funky + where "schema" = _func_schema + and "name" = _func_name + and args_with_defs = _func_args; + exception when NO_DATA_FOUND or TOO_MANY_ROWS then + select string_agg(E'\t - ' || format('%I.%I %s', "schema", "name", args_with_defs), E'\n')::text + into _variants + from tap_funky + where "name" = _func_name; + _ex_msg = format('Routine %I.%I %s does not exist.', + _func_schema, _func_name, _func_args) || E'\n' || 'Possible variants are:' || E'\n' || + coalesce(_variants, 'There is no such function in any schema'); + raise exception '%', coalesce(_ex_msg, 'Нет описания'); + end; + --This is the case when we need to mock a function written in SQL. + --But in order to be able to execute the mocking functionality, we need to have a function written in plpgsql. + --That is why we create a hidden function which name starts with "__". + if _func_language = 'sql' and _returns_set then + _mock_ddl = format(' + create or replace function %1$I.__%2$I(_name text) + returns %3$s + language plpgsql + AS %4$sfunction%4$s + begin + return query execute _query(_name); + end; + %4$sfunction%4$s;', + _func_schema/*1*/, _func_name/*2*/, _func_result_type/*3*/, '$'/*4*/); + execute _mock_ddl; + _mock_ddl = format(' + create or replace function %1$I.%2$I %3$s + returns %4$s + language %5$s + AS %7$sfunction%7$s + select * from %1$I.__%2$I ( ''%6$s'' ); + %7$sfunction%7$s;', + _func_schema/*1*/, _func_name/*2*/, _func_args/*3*/, _func_result_type/*4*/, + _func_language/*5*/, _return_set_value/*6*/, '$'/*7*/); + execute _mock_ddl; + end if; + + if _func_language = 'plpgsql' and _returns_set then + _mock_ddl = format(' + create or replace function %1$I.%2$I %3$s + returns %4$s + language plpgsql + AS %6$sfunction%6$s + begin + return query execute _query( ''%5$s'' ); + end; + %6$sfunction%6$s;', + _func_schema/*1*/, _func_name/*2*/, _func_args/*3*/, _func_result_type/*4*/, + _return_set_value/*5*/, '$'/*6*/); + execute _mock_ddl; + end if; + + if not _returns_set then + _mock_ddl = format(' + create or replace function %1$I.%2$I %3$s + RETURNS %4$s + LANGUAGE %5$s + AS %8$sfunction%8$s + select %6$L::%7$s; + %8$sfunction%8$s;', + _func_schema/*1*/, _func_name/*2*/, _func_args/*3*/, _func_result_type/*4*/, + _func_language/*5*/, _return_scalar_value/*6*/, pg_typeof(_return_scalar_value)/*7*/, '$'/*8*/); + execute _mock_ddl; + end if; +end $function$; + +--This function creates a mock in place of a real view +create or replace function mock_view( + _view_schema text + , _view_name text + , _return_set_sql text default null +) +returns void +--Create a mock in place of a real view +language plpgsql +as $function$ +declare + _mock_ddl text; + _func_result_type text; + _func_qualified_name text; + _func_language text; + _returns_set bool; +begin + _mock_ddl = format('drop view %I.%I', _view_schema, _view_name); + execute _mock_ddl; + _mock_ddl = format('create view %I.%I as %s', _view_schema, _view_name, _return_set_sql); + execute _mock_ddl; +end; $function$; + +create or replace function fake_table( + _table_ident text[], + _make_table_empty boolean default false, + _leave_primary_key boolean default false, + _drop_not_null boolean DEFAULT false, + _drop_collation boolean DEFAULT false, + _drop_partitions boolean DEFAULT false +) +returns void +--It frees a table from any constraint (we call such a table as a fake) +--faked table is a full copy of _table_name, but has no any constraint +--without foreign and primary things you can do whatever you want in testing context + LANGUAGE plpgsql +AS $function$ +declare + _table record; + _fk_table record; + _part_table record; + _fake_ddl text; + _not_null_ddl text; +begin + for _table in + select + quote_ident(coalesce((parse_ident(table_ident))[1], '')) table_schema, + quote_ident(coalesce((parse_ident(table_ident))[2], '')) table_name, + coalesce((parse_ident(table_ident))[1], '') table_schema_l, + coalesce((parse_ident(table_ident))[2], '') table_name_l + from + unnest(_table_ident) as t(table_ident) + loop + for _fk_table in + -- collect all table's relations including primary key and unique constraint + select distinct * + from ( + select + fk_schema_name table_schema, fk_table_name table_name + , fk_constraint_name constraint_name, false as is_pk, 1 as ord + from + pg_all_foreign_keys + where + fk_schema_name = _table.table_schema_l and fk_table_name = _table.table_name_l + union all + select + fk_schema_name table_schema, fk_table_name table_name + , fk_constraint_name constraint_name, false as is_pk, 1 as ord + from + pg_all_foreign_keys + where + pk_schema_name = _table.table_schema_l and pk_table_name = _table.table_name_l + union all + select + table_schema, table_name + , constraint_name + , case when constraint_type = 'PRIMARY KEY' then true else false end as is_pk, 2 as ord + from + information_schema.table_constraints + where + table_schema = _table.table_schema_l + and table_name = _table.table_name_l + and constraint_type in ('PRIMARY KEY', 'UNIQUE') + ) as t + order by ord + loop + if not(_leave_primary_key and _fk_table.is_pk) then + _fake_ddl = format('alter table %1$I.%2$I drop constraint %3$I;', + _fk_table.table_schema/*1*/, _fk_table.table_name/*2*/, _fk_table.constraint_name/*3*/ + ); + execute _fake_ddl; + end if; + end loop; + + if _make_table_empty then + _fake_ddl = format('truncate table %1$s.%2$s;', _table.table_schema, _table.table_name); + execute _fake_ddl; + end if; + + --Free table from not null constraints + _fake_ddl = format('alter table %1$s.%2$s ', _table.table_schema, _table.table_name); + if _drop_not_null then + select + string_agg(format('alter column %1$I drop not null', t.attname), ', ') + into + _not_null_ddl + from + pg_catalog.pg_attribute t + where t.attrelid = (_table.table_schema || '.' || _table.table_name)::regclass + and t.attnum > 0 and attnotnull + and ( + ( + --and the column is not part of PK when we want to leave PK as is + t.attname::text != all((select _keys(_table.table_schema, _table.table_name, 'p'))::text[]) and + _leave_primary_key + ) or + --we can drop a NOT NULL constraint as there is no PK already + not _leave_primary_key + ); + + _fake_ddl = _fake_ddl || _not_null_ddl || ';'; + else + _fake_ddl = null; + end if; + + if _fake_ddl is not null then + execute _fake_ddl; + end if; + + if _drop_partitions then + if pg_version_num() < 100000 then + raise exception 'Sorry, but declarative partitioning was introduced only starting with PostgreSQL version 10.'; + end if; + for _part_table in select _parts from _parts(_table.table_schema, _table.table_name) loop + _fake_ddl = 'drop table if exists ' || _part_table._parts || ';'; + execute _fake_ddl; + end loop; + end if; + end loop; +end $function$; + +create or replace function call_count( + _call_count int + , _func_schema name + , _func_name name + , _func_args name[]) + RETURNS text + LANGUAGE plpgsql +AS $function$ +declare + _actual_call_count int; + _track_functions_setting text; +begin + select current_setting('track_functions') into _track_functions_setting; + + if _track_functions_setting != 'all' then + return fail('track_functions setting is not set. Must be all'); + end if; + + select calls into _actual_call_count + from pg_stat_xact_user_functions + where funcid = _get_func_oid(_func_schema, _func_name, _func_args); + + return ok( + _actual_call_count = _call_count + , format('routine %I.%I must has been called %L times, actual call count is %L' + , _func_schema, _func_name, _call_count, _actual_call_count) + ); +end $function$; + +create or replace function drop_prepared_statement(_statements text[]) +returns setof bool as $$ +declare + _statement record; +begin + for _statement in select _name from unnest(_statements) as t(_name) loop + if exists(select * from pg_prepared_statements where "name" = _statement._name) then + EXECUTE format('deallocate %I;', _statement._name); + return next true; + else + return next false; + end if; + end loop; +end +$$ +language plpgsql; + + +create or replace function print_table_as_json(in _table_schema text, in _table_name text) +returns void + language plpgsql +AS $function$ +declare + _ddl text; + _json text; + _columns text; +--returns a query which you can execute and see your table as normal dataset +--you can find the returned query in the output window in DBeaver, where we see raise notice command output +--note! the returned dataset is limited to 1000 records. that's why you didn't get any jdbc error in dbeaver in case of huge amount of rows +begin + _ddl = format(' + select json_agg( + array(select %1$I from %2$I.%1$I limit 1000 + )) as j;', _table_name, _table_schema); + execute _ddl into _json; + _json = '[' || ltrim(rtrim(_json::text, ']'), '[') || ']'; + + select string_agg(concat(quote_ident(c.column_name), ' ', case when lower(c.data_type) = 'array' then e.data_type || '[]' else c.data_type end), ', ') + into _columns + from information_schema."columns" c + left join information_schema.element_types e + on ((c.table_catalog, c.table_schema, c.table_name, 'TABLE', c.dtd_identifier) + = (e.object_catalog, e.object_schema, e.object_name, e.object_type, e.collection_type_identifier)) + where c.table_schema = _table_schema + and c.table_name = _table_name; + + _json = format('select * from /*%1$I.%2$I*/ json_to_recordset(%3$L) as t(%4$s)', + _table_schema/*1*/, _table_name/*2*/, _json/*3*/, _columns/*4*/); + raise notice '%', _json; +end $function$; + +create or replace function print_query_as_json(in _prepared_statement_name text) +returns void + language plpgsql +as $function$ +declare + _ddl text; + _table_name text; +--returns a query which you can execute and see your table as normal dataset +--you can find the returned query in the output window in DBeaver, where we see raise notice command output +--note! the returned dataset is limited to 1000 records. that's why you didn't get any jdbc error in dbeaver in case of huge amount of rows +begin + _table_name = _prepared_statement_name || '_' || gen_random_uuid(); + _ddl = format('create table public.%1$I as execute %2$s', _table_name, _prepared_statement_name); + execute _ddl; + perform print_table_as_json('public', _table_name::text); +end; +$function$; + +CREATE OR REPLACE FUNCTION _get_func_oid(name, name, name[]) + RETURNS oid + LANGUAGE sql +AS $function$ + SELECT oid + FROM tap_funky + WHERE "schema" = $1 + and "name" = $2 + AND args = _funkargs($3) + AND is_visible +$function$ +; \ No newline at end of file diff --git a/test/expected/faketable.out b/test/expected/faketable.out new file mode 100644 index 00000000..96dd789a --- /dev/null +++ b/test/expected/faketable.out @@ -0,0 +1,23 @@ +\unset ECHO +1..21 +ok 1 - public.parent.id is primary key should pass +ok 2 - public.child.id is not primary key should pass +ok 3 - public.child.parent_id is not foreign key should pass +ok 4 - public.parent.id is not null should pass +ok 5 - public.parent.col is null should pass +ok 6 - public.child.id is null should pass +ok 7 - public.child.parent_id is null should pass +ok 8 - public.child.col is null should pass +ok 9 - table public.parent is empty should pass +ok 10 - table public.child is empty should pass +ok 11 - We can do insert into foreign key column should pass +ok 12 - public.pk_plus_not_null primary key is (id, num) should pass +ok 13 - Columns tale and comments of public.pk_plus_not_null are not part of primary key should pass +ok 14 - public.pk_plus_not_null.id is not null should pass +ok 15 - public.pk_plus_not_null.num is not null should pass +ok 16 - public.pk_plus_not_null.tale is null should pass +ok 17 - public.pk_plus_not_null.comments is null should pass +ok 18 - public.parts should have no any partition should pass +ok 19 - public.scalar_function called once should pass +ok 20 - public.set_sql_function called twice should pass +ok 21 - public.set_sql_function(text) called once should pass diff --git a/test/expected/funcmock.out b/test/expected/funcmock.out new file mode 100644 index 00000000..2d1ef0f7 --- /dev/null +++ b/test/expected/funcmock.out @@ -0,0 +1,5 @@ +\unset ECHO +1..3 +ok 1 - mock scalar_function should pass +ok 2 - mock sql function returning a set should pass +ok 3 - mock plpgsql function returning a set should pass diff --git a/test/expected/viewmock.out b/test/expected/viewmock.out new file mode 100644 index 00000000..aafd9813 --- /dev/null +++ b/test/expected/viewmock.out @@ -0,0 +1,3 @@ +\unset ECHO +1..1 +ok 1 - mock of some_view should return expected result should pass diff --git a/test/sql/faketable.sql b/test/sql/faketable.sql new file mode 100644 index 00000000..a042e61f --- /dev/null +++ b/test/sql/faketable.sql @@ -0,0 +1,236 @@ +\unset ECHO +\i test/setup.sql +-- \i sql/pgtap.sql + +SELECT plan(21); +--SELECT * FROM no_plan(); + +-- This will be rolled back. :-) +SET track_functions = 'all'; +SET client_min_messages = warning; + +create or replace function public.scalar_function() +returns time +language plpgsql +as $$ +begin + return now()::time; +end; +$$; + +create or replace function public.set_sql_function() +returns table(id int, f text) +language sql +as $$ + select * from (values(1, 'a'), (2, 'b'), (3, 'c')) as t(id, f); +$$; + +create or replace function public.set_sql_function(_whatever text) +returns table(id int, f text) +language sql +as $$ + select * from (values(1, 'a' || _whatever), (2, 'b' || _whatever)) as t(id, f); +$$; + +CREATE TABLE public.parent( + id int NOT NULL, col text NOT NULL, + CONSTRAINT parent_pk PRIMARY KEY (id) +); + +CREATE TABLE public.child( + id int NOT NULL, + parent_id int NOT NULL, + col text NOT NULL, + CONSTRAINT child_pk PRIMARY KEY (id), + CONSTRAINT child_fk FOREIGN KEY (parent_id) REFERENCES parent(id) +); + +INSERT INTO public.parent(id, col) values(1, 'a'); + +INSERT INTO public.child(id, parent_id, col) values(1, 1, 'b'); + +RESET client_min_messages; + +CREATE FUNCTION test_faking_functionality() RETURNS SETOF TEXT AS $$ +BEGIN + perform fake_table( + '{public.parent}'::text[], + _make_table_empty => TRUE, + _leave_primary_key => TRUE, + _drop_not_null => FALSE); + + perform fake_table( + '{public.child}'::text[], + _make_table_empty => TRUE, + _leave_primary_key => FALSE, + _drop_not_null => TRUE); + + RETURN query SELECT * FROM check_test( + col_is_pk('public', 'parent', '{id}'::name[]), + TRUE, + 'public.parent.id is primary key'); + + RETURN query SELECT * FROM check_test( + col_isnt_pk('public', 'child', 'id'), + TRUE, + 'public.child.id is not primary key'); + + RETURN query SELECT * FROM check_test( + col_isnt_fk('public', 'child', 'parent_id'), + TRUE, + 'public.child.parent_id is not foreign key'); + + RETURN query SELECT * FROM check_test( + col_not_null('public', 'parent', 'id', ''), + TRUE, + 'public.parent.id is not null'); + + RETURN query SELECT * FROM check_test( + col_not_null('public', 'parent', 'col', ''), + TRUE, + 'public.parent.col is null'); + + RETURN query SELECT * FROM check_test( + col_is_null('public', 'child', 'id', ''), + TRUE, + 'public.child.id is null'); + + RETURN query SELECT * FROM check_test( + col_is_null('public', 'child', 'parent_id', ''), + TRUE, + 'public.child.parent_id is null'); + + RETURN query SELECT * FROM check_test( + col_is_null('public', 'child', 'col', ''), + TRUE, + 'public.child.col is null'); + + PREPARE parent_all AS SELECT * FROM public.parent; + PREPARE child_all AS SELECT * FROM public.child; + + RETURN query SELECT * FROM check_test( + is_empty('parent_all'), + TRUE, + 'table public.parent is empty'); + + RETURN query SELECT * FROM check_test( + is_empty('child_all'), + TRUE, + 'table public.child is empty'); + + RETURN query SELECT * FROM check_test( + lives_ok('INSERT INTO child(id, parent_id, col) values(1, 108, ''z'')'), + TRUE, + 'We can do insert into foreign key column'); +END; +$$ LANGUAGE plpgsql; + +CREATE TABLE public.pk_plus_not_null( + id int NOT NULL, + num varchar(10) not null, + tale text not null, + comments text, + CONSTRAINT pk_plus_not_null_pk PRIMARY KEY (id, num) +); + +CREATE FUNCTION test_faking_functionality_pk_not_null() RETURNS SETOF TEXT AS $$ +BEGIN + perform fake_table( + '{public.pk_plus_not_null}'::text[], + _leave_primary_key => TRUE, + _make_table_empty => TRUE, + _drop_not_null => TRUE); + + RETURN query SELECT * FROM check_test( + col_is_pk('public', 'pk_plus_not_null', '{id, num}'::name[]), + TRUE, + 'public.pk_plus_not_null primary key is (id, num)'); + + RETURN query SELECT * FROM check_test( + col_isnt_pk('public', 'pk_plus_not_null', '{tale, comments}'::name[], 'no pk'), + TRUE, + 'Columns tale and comments of public.pk_plus_not_null are not part of primary key'); + + RETURN query SELECT * FROM check_test( + col_not_null('public', 'pk_plus_not_null', 'id', ''), + TRUE, + 'public.pk_plus_not_null.id is not null'); + + RETURN query SELECT * FROM check_test( + col_not_null('public', 'pk_plus_not_null', 'num', ''), + TRUE, + 'public.pk_plus_not_null.num is not null'); + + RETURN query SELECT * FROM check_test( + col_is_null('public', 'pk_plus_not_null', 'tale', ''), + TRUE, + 'public.pk_plus_not_null.tale is null'); + + RETURN query SELECT * FROM check_test( + col_is_null('public', 'pk_plus_not_null', 'comments', ''), + TRUE, + 'public.pk_plus_not_null.comments is null'); +END; +$$ LANGUAGE plpgsql; + +create table public.parts( + id int, + tale text, + date_key date +) +partition by range(date_key); + +create table public.part_202509 partition of public.parts + for values from ('2025-09-01') to ('2025-10-01'); + +create table public.part_202510 partition of public.parts + for values from ('2025-10-01') to ('2025-11-01'); + +create or replace FUNCTION test_faking_functionality_no_partitions() RETURNS SETOF TEXT AS $$ +BEGIN + perform fake_table( + '{public.parts}'::text[], + _drop_partitions => TRUE); + + RETURN query SELECT * FROM check_test( + partitions_are('public', 'parts', '{}'::name[], 'No partitions we expect'), + TRUE, + 'public.parts should have no any partition'); +END; +$$ LANGUAGE plpgsql; + +SELECT * FROM test_faking_functionality(); +SELECT * FROM test_faking_functionality_pk_not_null(); +SELECT * FROM test_faking_functionality_no_partitions(); + +CREATE FUNCTION test_call_count_functionality() RETURNS SETOF TEXT AS $$ +BEGIN + perform public.scalar_function(); + perform public.set_sql_function(); + perform public.set_sql_function(); + perform public.set_sql_function('whatever'); + + RETURN query SELECT * FROM check_test( + call_count(1, 'public', 'scalar_function', '{}'::name[]), + TRUE, + 'public.scalar_function called once'); + + RETURN query SELECT * FROM check_test( + call_count(2, 'public', 'set_sql_function', '{}'::name[]), + TRUE, + 'public.set_sql_function called twice'); + + RETURN query SELECT * FROM check_test( + call_count(1, 'public', 'set_sql_function', '{text}'::name[]), + TRUE, + 'public.set_sql_function(text) called once'); +END; +$$ LANGUAGE plpgsql; + +SELECT * FROM test_call_count_functionality(); + +RESET track_functions; + +-- Finish the tests and clean up. +SELECT * FROM finish(); +ROLLBACK; diff --git a/test/sql/funcmock.sql b/test/sql/funcmock.sql new file mode 100644 index 00000000..f06e7579 --- /dev/null +++ b/test/sql/funcmock.sql @@ -0,0 +1,80 @@ +\unset ECHO +\i test/setup.sql +-- \i sql/pgtap.sql + +SELECT plan(3); +--SELECT * FROM no_plan(); + +-- This will be rolled back. :-) +--SET track_functions = 'all'; + +-- This will be rolled back. :-) +SET client_min_messages = warning; + +create or replace function public.scalar_function() +returns time +language sql +as $$ + select now()::time; +$$; + +create or replace function public.set_sql_function() +returns table(id int, col text) +language sql +as $$ + select * FROM (VALUES(1, 'a'), (2, 'b')) AS t(id, col); +$$; + +create or replace function public.set_plpgsql_function() +returns table(id int, col text) +language plpgsql +as $$ +begin + RETURN query select * FROM (VALUES(1, 'a'), (2, 'b')) AS t(id, col); +END; +$$; + +RESET client_min_messages; + +CREATE FUNCTION test_mocking_functionality() RETURNS SETOF TEXT AS $$ +DECLARE + _hour_before time; + _mock_result time; +BEGIN + _hour_before = now() - INTERVAL '01:00'; + perform mock_func('public', 'scalar_function', '()' + , _return_scalar_value => _hour_before::time); + _mock_result = scalar_function(); + + RETURN query SELECT * FROM check_test( + is(_mock_result, _hour_before), + TRUE, + 'mock scalar_function'); + + PREPARE mock_set_sql_function AS SELECT * FROM (VALUES(1, 'x'), (2, 'z')) AS t(id, col) ORDER BY id; + perform mock_func('public', 'set_sql_function', '()' + , _return_set_value => 'mock_set_sql_function'); + PREPARE returned_set_sql_function AS SELECT * FROM set_sql_function() ORDER BY id; + + RETURN query SELECT * FROM check_test( + results_eq('returned_set_sql_function', 'mock_set_sql_function'), + TRUE, + 'mock sql function returning a set'); + + PREPARE mock_set_plpgsql_function AS SELECT * FROM (VALUES(1, 'w'), (2, 'q')) AS t(id, col) ORDER BY id; + perform mock_func('public', 'set_plpgsql_function', '()' + , _return_set_value => 'mock_set_plpgsql_function'); + PREPARE returned_set_plpgsql_function AS SELECT * FROM set_plpgsql_function() ORDER BY id; + + RETURN query SELECT * FROM check_test( + results_eq('returned_set_plpgsql_function', 'mock_set_plpgsql_function'), + TRUE, + 'mock plpgsql function returning a set'); +END; +$$ LANGUAGE plpgsql; + +SELECT * FROM test_mocking_functionality(); + +-- Finish the tests and clean up. +SELECT * FROM finish(); +ROLLBACK; diff --git a/test/sql/viewmock.sql b/test/sql/viewmock.sql new file mode 100644 index 00000000..cf4b7fc7 --- /dev/null +++ b/test/sql/viewmock.sql @@ -0,0 +1,29 @@ +\unset ECHO +\i test/setup.sql +-- \i sql/pgtap.sql + +SELECT plan(1); + +create or replace view public.some_view +as +select * from (values(1, 'a'), (2, 'b'), (3, 'c')) as t(id, f); + +create or replace function test_view_mocking() RETURNS SETOF TEXT AS $$ +BEGIN + PREPARE some_view_should_be AS select * from (values(1, 'x'), (2, 'y'), (3, 'z')) as t(id, f) ORDER BY id; + perform mock_view('public', 'some_view', + _return_set_sql => 'select * from (values(1, ''x''), (2, ''y''), (3, ''z'')) as t(id, f)'); + PREPARE some_view_returned AS SELECT * FROM public.some_view ORDER BY id; + + RETURN query SELECT * FROM check_test( + results_eq('some_view_returned', 'some_view_should_be'), + TRUE, + 'mock of some_view should return expected result'); +END; +$$ LANGUAGE plpgsql; + +SELECT * FROM test_view_mocking(); + +-- Finish the tests and clean up. +SELECT * FROM finish(); +ROLLBACK;