From 1b59fe7d9db0314e3e76e6fed3a2cd665ab77461 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabr=C3=ADzio=20de=20Royes=20Mello?= Date: Thu, 8 Feb 2024 15:11:17 -0300 Subject: [PATCH] Remove metadata when dropping chunk Historically we preserve chunk metadata because the old format of the Continuous Aggregate has the `chunk_id` column in the materialization hypertable so in order to don't have chunk ids left over there we just mark it as dropped whe dropping chunks. In #4269 we introduced a new Continuous Aggregate format that don't store the `chunk_id` in the materialization hypertable anymore so it's safe to also remove the metadata when dropping chunk and all associated Continuous Aggregates are in the new format. Also added a post-update SQL script to cleanup unecessary dropped chunk metadata in our catalog. Fixes #6570 --- .unreleased/fix_6621 | 3 + sql/updates/post-update.sql | 62 +++ src/chunk.c | 3 +- src/ts_catalog/continuous_agg.c | 27 ++ src/ts_catalog/continuous_agg.h | 1 + test/sql/updates/post.catalog.sql | 14 +- tsl/test/expected/cagg_ddl-13.out | 71 ++- tsl/test/expected/cagg_ddl-14.out | 71 ++- tsl/test/expected/cagg_ddl-15.out | 71 ++- tsl/test/expected/cagg_ddl-16.out | 71 ++- tsl/test/expected/cagg_usage-13.out | 115 +++++ tsl/test/expected/cagg_usage-14.out | 115 +++++ tsl/test/expected/cagg_usage-15.out | 115 +++++ tsl/test/expected/cagg_usage-16.out | 115 +++++ tsl/test/expected/compression.out | 548 +++++++++++------------ tsl/test/expected/compression_bgw-13.out | 10 - tsl/test/expected/compression_bgw-14.out | 10 - tsl/test/expected/compression_bgw-15.out | 10 - tsl/test/expected/compression_bgw-16.out | 10 - tsl/test/sql/cagg_ddl.sql.in | 4 +- tsl/test/sql/cagg_usage.sql.in | 63 +++ tsl/test/sql/compression_bgw.sql.in | 6 - 22 files changed, 1035 insertions(+), 480 deletions(-) create mode 100644 .unreleased/fix_6621 diff --git a/.unreleased/fix_6621 b/.unreleased/fix_6621 new file mode 100644 index 00000000000..000af798ea6 --- /dev/null +++ b/.unreleased/fix_6621 @@ -0,0 +1,3 @@ +Fixes: #6621 Remove metadata when dropping chunks + +Thanks: @ndjzurawsk For reporting error when dropping chunks diff --git a/sql/updates/post-update.sql b/sql/updates/post-update.sql index 0dd2d8a8fc7..db29e175ba6 100644 --- a/sql/updates/post-update.sql +++ b/sql/updates/post-update.sql @@ -172,3 +172,65 @@ $$; -- Repair relations that have relacl entries for users that do not -- exist in pg_authid CALL _timescaledb_functions.repair_relation_acls(); + +-- Cleanup metadata for deleted chunks +DO $$ +DECLARE + ts_major INTEGER; + ts_minor INTEGER; +BEGIN + SELECT ((string_to_array(extversion,'.'))[1])::int, ((string_to_array(extversion,'.'))[2])::int + INTO ts_major, ts_minor + FROM pg_extension WHERE extname = 'timescaledb'; + + IF ts_major >= 2 AND ts_minor >= 15 THEN + CREATE UNLOGGED TABLE _timescaledb_catalog._chunks_remove AS + SELECT id FROM _timescaledb_catalog.chunk + WHERE dropped IS TRUE + AND NOT EXISTS ( + SELECT FROM information_schema.tables + WHERE tables.table_schema = chunk.schema_name + AND tables.table_name = chunk.table_name + ) + AND NOT EXISTS ( + SELECT FROM _timescaledb_catalog.hypertable + JOIN _timescaledb_catalog.continuous_agg ON continuous_agg.raw_hypertable_id = hypertable.id + WHERE hypertable.id = chunk.hypertable_id + -- for the old caggs format we need to keep chunk metadata for dropped chunks + AND continuous_agg.finalized IS FALSE + ); + + WITH _dimension_slice_remove AS ( + DELETE FROM _timescaledb_catalog.dimension_slice + USING _timescaledb_catalog.chunk_constraint, _timescaledb_catalog._chunks_remove + WHERE dimension_slice.id = chunk_constraint.dimension_slice_id + AND chunk_constraint.chunk_id = _chunks_remove.id + RETURNING _timescaledb_catalog.dimension_slice.id + ) + DELETE FROM _timescaledb_catalog.chunk_constraint + USING _dimension_slice_remove + WHERE chunk_constraint.dimension_slice_id = _dimension_slice_remove.id; + + DELETE FROM _timescaledb_internal.bgw_policy_chunk_stats + USING _timescaledb_catalog._chunks_remove + WHERE bgw_policy_chunk_stats.chunk_id = _chunks_remove.id; + + DELETE FROM _timescaledb_catalog.chunk_index + USING _timescaledb_catalog._chunks_remove + WHERE chunk_index.chunk_id = _chunks_remove.id; + + DELETE FROM _timescaledb_catalog.compression_chunk_size + USING _timescaledb_catalog._chunks_remove + WHERE compression_chunk_size.chunk_id = _chunks_remove.id + OR compression_chunk_size.compressed_chunk_id = _chunks_remove.id; + + DELETE FROM _timescaledb_catalog.chunk + USING _timescaledb_catalog._chunks_remove + WHERE chunk.id = _chunks_remove.id + OR chunk.compressed_chunk_id = _chunks_remove.id; + + ALTER EXTENSION timescaledb DROP TABLE _timescaledb_catalog._chunks_remove; + DROP TABLE _timescaledb_catalog._chunks_remove; + END IF; +END; +$$; \ No newline at end of file diff --git a/src/chunk.c b/src/chunk.c index 237b0f5d22a..6b41e703901 100644 --- a/src/chunk.c +++ b/src/chunk.c @@ -3946,6 +3946,7 @@ ts_chunk_do_drop_chunks(Hypertable *ht, int64 older_than, int64 newer_than, int3 } } + bool all_caggs_finalized = ts_continuous_agg_hypertable_all_finalized(hypertable_id); List *dropped_chunk_names = NIL; for (uint64 i = 0; i < num_chunks; i++) { @@ -3968,7 +3969,7 @@ ts_chunk_do_drop_chunks(Hypertable *ht, int64 older_than, int64 newer_than, int3 chunk_name = psprintf("%s.%s", schema_name, table_name); dropped_chunk_names = lappend(dropped_chunk_names, chunk_name); - if (has_continuous_aggs) + if (has_continuous_aggs && !all_caggs_finalized) ts_chunk_drop_preserve_catalog_row(chunks + i, DROP_RESTRICT, log_level); else ts_chunk_drop(chunks + i, DROP_RESTRICT, log_level); diff --git a/src/ts_catalog/continuous_agg.c b/src/ts_catalog/continuous_agg.c index 46eaca03a8a..1b6fc173132 100644 --- a/src/ts_catalog/continuous_agg.c +++ b/src/ts_catalog/continuous_agg.c @@ -566,6 +566,33 @@ ts_continuous_agg_hypertable_status(int32 hypertable_id) return status; } +TSDLLEXPORT bool +ts_continuous_agg_hypertable_all_finalized(int32 raw_hypertable_id) +{ + ScanIterator iterator = + ts_scan_iterator_create(CONTINUOUS_AGG, AccessShareLock, CurrentMemoryContext); + bool all_finalized = true; + + init_scan_by_raw_hypertable_id(&iterator, raw_hypertable_id); + ts_scanner_foreach(&iterator) + { + FormData_continuous_agg data; + TupleInfo *ti = ts_scan_iterator_tuple_info(&iterator); + + continuous_agg_formdata_fill(&data, ti); + + if (!data.finalized) + { + all_finalized = false; + break; + } + } + + ts_scan_iterator_close(&iterator); + + return all_finalized; +} + TSDLLEXPORT List * ts_continuous_aggs_find_by_raw_table_id(int32 raw_hypertable_id) { diff --git a/src/ts_catalog/continuous_agg.h b/src/ts_catalog/continuous_agg.h index 6680f9d8a6b..cb4964df2f5 100644 --- a/src/ts_catalog/continuous_agg.h +++ b/src/ts_catalog/continuous_agg.h @@ -165,6 +165,7 @@ extern TSDLLEXPORT void ts_materialization_invalidation_log_delete_inner(int32 m extern TSDLLEXPORT ContinuousAggHypertableStatus ts_continuous_agg_hypertable_status(int32 hypertable_id); +extern TSDLLEXPORT bool ts_continuous_agg_hypertable_all_finalized(int32 raw_hypertable_id); extern TSDLLEXPORT List *ts_continuous_aggs_find_by_raw_table_id(int32 raw_hypertable_id); extern TSDLLEXPORT ContinuousAgg *ts_continuous_agg_find_by_view_name(const char *schema, const char *name, diff --git a/test/sql/updates/post.catalog.sql b/test/sql/updates/post.catalog.sql index a7ac579498e..1af5e5e1232 100644 --- a/test/sql/updates/post.catalog.sql +++ b/test/sql/updates/post.catalog.sql @@ -45,20 +45,18 @@ SELECT count(*) -- The list of tables configured to be dumped. SELECT unnest(extconfig)::regclass::text, unnest(extcondition) FROM pg_extension WHERE extname = 'timescaledb' ORDER BY 1; --- Show dropped chunks -SELECT id, hypertable_id, schema_name, table_name, dropped -FROM _timescaledb_catalog.chunk c -WHERE c.dropped -ORDER BY c.id, c.hypertable_id; - -- Show chunks that are not dropped and include owner in the output SELECT c.id, c.hypertable_id, c.schema_name, c.table_name, c.dropped, cl.relowner::regrole FROM _timescaledb_catalog.chunk c INNER JOIN pg_class cl ON (cl.oid=format('%I.%I', schema_name, table_name)::regclass) -WHERE NOT c.dropped +WHERE c.dropped IS FALSE ORDER BY c.id, c.hypertable_id; -SELECT * FROM _timescaledb_catalog.chunk_constraint ORDER BY chunk_id, dimension_slice_id, constraint_name; +SELECT chunk_constraint.* FROM _timescaledb_catalog.chunk_constraint +JOIN _timescaledb_catalog.chunk ON chunk.id = chunk_constraint.chunk_id +WHERE chunk.dropped IS FALSE +ORDER BY chunk_constraint.chunk_id, chunk_constraint.dimension_slice_id, chunk_constraint.constraint_name; + SELECT index_name FROM _timescaledb_catalog.chunk_index ORDER BY index_name; -- Show attnum of all regclass objects belonging to our extension diff --git a/tsl/test/expected/cagg_ddl-13.out b/tsl/test/expected/cagg_ddl-13.out index 2026641e354..c127cdaf5ca 100644 --- a/tsl/test/expected/cagg_ddl-13.out +++ b/tsl/test/expected/cagg_ddl-13.out @@ -573,14 +573,11 @@ SELECT * FROM drop_chunks_table ORDER BY time ASC limit 1; 30 | 30 (1 row) ---we see the chunks row with the dropped flags set; -SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk where dropped; - id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk -----+---------------+-----------------------+--------------------+---------------------+---------+--------+----------- - 13 | 10 | _timescaledb_internal | _hyper_10_13_chunk | | t | 0 | f - 14 | 10 | _timescaledb_internal | _hyper_10_14_chunk | | t | 0 | f - 15 | 10 | _timescaledb_internal | _hyper_10_15_chunk | | t | 0 | f -(3 rows) +--chunks are removed +SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk WHERE dropped; + id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk +----+---------------+-------------+------------+---------------------+---------+--------+----------- +(0 rows) --still see data in the view SELECT * FROM drop_chunks_view WHERE time_bucket < (integer_now_test2()-9) ORDER BY time_bucket DESC; @@ -634,9 +631,9 @@ WHERE hypertable_name = 'drop_chunks_table' ORDER BY range_start_integer; chunk_name | range_start_integer | range_end_integer --------------------+---------------------+------------------- - _hyper_10_13_chunk | 0 | 10 - _hyper_10_14_chunk | 10 | 20 - _hyper_10_15_chunk | 20 | 30 + _hyper_10_18_chunk | 0 | 10 + _hyper_10_19_chunk | 10 | 20 + _hyper_10_20_chunk | 20 | 30 _hyper_10_16_chunk | 30 | 40 (4 rows) @@ -681,7 +678,7 @@ FROM timescaledb_information.chunks WHERE hypertable_name = :'drop_chunks_mat_table_name' ORDER BY range_start_integer; chunk_name | range_start_integer | range_end_integer --------------------+---------------------+------------------- - _hyper_11_20_chunk | 0 | 100 + _hyper_11_23_chunk | 0 | 100 (1 row) \set ON_ERROR_STOP 0 @@ -702,12 +699,12 @@ WHERE hypertable_name = 'drop_chunks_table' ORDER BY 2,3; chunk_name | range_start_integer | range_end_integer --------------------+---------------------+------------------- - _hyper_10_13_chunk | 0 | 10 - _hyper_10_14_chunk | 10 | 20 - _hyper_10_15_chunk | 20 | 30 + _hyper_10_18_chunk | 0 | 10 + _hyper_10_19_chunk | 10 | 20 + _hyper_10_20_chunk | 20 | 30 _hyper_10_16_chunk | 30 | 40 - _hyper_10_18_chunk | 40 | 50 - _hyper_10_19_chunk | 50 | 60 + _hyper_10_21_chunk | 40 | 50 + _hyper_10_22_chunk | 50 | 60 (6 rows) -- Pick the second chunk as the one to drop @@ -766,11 +763,11 @@ WHERE hypertable_name = 'drop_chunks_table' ORDER BY 2,3; chunk_name | range_start_integer | range_end_integer --------------------+---------------------+------------------- - _hyper_10_13_chunk | 0 | 10 - _hyper_10_15_chunk | 20 | 30 + _hyper_10_18_chunk | 0 | 10 + _hyper_10_20_chunk | 20 | 30 _hyper_10_16_chunk | 30 | 40 - _hyper_10_18_chunk | 40 | 50 - _hyper_10_19_chunk | 50 | 60 + _hyper_10_21_chunk | 40 | 50 + _hyper_10_22_chunk | 50 | 60 (5 rows) -- Data is no longer in the table but still in the view @@ -799,8 +796,8 @@ CALL refresh_continuous_aggregate('drop_chunks_view', NULL, 30); SELECT drop_chunks('drop_chunks_table', older_than=>30); drop_chunks ------------------------------------------ - _timescaledb_internal._hyper_10_13_chunk - _timescaledb_internal._hyper_10_15_chunk + _timescaledb_internal._hyper_10_18_chunk + _timescaledb_internal._hyper_10_20_chunk (2 rows) -- Verify that the chunks are dropped @@ -811,8 +808,8 @@ ORDER BY 2,3; chunk_name | range_start_integer | range_end_integer --------------------+---------------------+------------------- _hyper_10_16_chunk | 30 | 40 - _hyper_10_18_chunk | 40 | 50 - _hyper_10_19_chunk | 50 | 60 + _hyper_10_21_chunk | 40 | 50 + _hyper_10_22_chunk | 50 | 60 (3 rows) -- The continuous aggregate should be refreshed in the regions covered @@ -906,8 +903,8 @@ SELECT user_view, AND user_view::text LIKE 'whatever_view%'; user_view | mat_table | mat_tablespace | chunk_name | chunk_tablespace -----------------+-----------------------------+----------------+--------------------+------------------ - whatever_view_1 | _materialized_hypertable_13 | | _hyper_13_24_chunk | - whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_25_chunk | tablespace1 + whatever_view_1 | _materialized_hypertable_13 | | _hyper_13_27_chunk | + whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_28_chunk | tablespace1 (2 rows) ALTER MATERIALIZED VIEW whatever_view_1 SET TABLESPACE tablespace2; @@ -921,14 +918,14 @@ SELECT user_view, AND user_view::text LIKE 'whatever_view%'; user_view | mat_table | mat_tablespace | chunk_name | chunk_tablespace -----------------+-----------------------------+----------------+--------------------+------------------ - whatever_view_1 | _materialized_hypertable_13 | tablespace2 | _hyper_13_24_chunk | tablespace2 - whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_25_chunk | tablespace1 + whatever_view_1 | _materialized_hypertable_13 | tablespace2 | _hyper_13_27_chunk | tablespace2 + whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_28_chunk | tablespace1 (2 rows) DROP MATERIALIZED VIEW whatever_view_1; -NOTICE: drop cascades to table _timescaledb_internal._hyper_13_24_chunk +NOTICE: drop cascades to table _timescaledb_internal._hyper_13_27_chunk DROP MATERIALIZED VIEW whatever_view_2; -NOTICE: drop cascades to table _timescaledb_internal._hyper_14_25_chunk +NOTICE: drop cascades to table _timescaledb_internal._hyper_14_28_chunk -- test bucket width expressions on integer hypertables CREATE TABLE metrics_int2 ( time int2 NOT NULL, @@ -1128,7 +1125,7 @@ SUM(value), COUNT(value) FROM conditionsnm GROUP BY bucket WITH DATA; NOTICE: refreshing continuous aggregate "conditionsnm_4" DROP materialized view conditionsnm_4; -NOTICE: drop cascades to table _timescaledb_internal._hyper_26_37_chunk +NOTICE: drop cascades to table _timescaledb_internal._hyper_26_40_chunk -- Case 2: DROP CASCADE should have similar behaviour as DROP CREATE MATERIALIZED VIEW conditionsnm_4 WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) @@ -1138,7 +1135,7 @@ SUM(value), COUNT(value) FROM conditionsnm GROUP BY bucket WITH DATA; NOTICE: refreshing continuous aggregate "conditionsnm_4" DROP materialized view conditionsnm_4 CASCADE; -NOTICE: drop cascades to table _timescaledb_internal._hyper_27_38_chunk +NOTICE: drop cascades to table _timescaledb_internal._hyper_27_41_chunk -- Case 3: require CASCADE in case of dependent object CREATE MATERIALIZED VIEW conditionsnm_4 WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) @@ -1155,7 +1152,7 @@ ERROR: cannot drop view conditionsnm_4 because other objects depend on it -- Case 4: DROP CASCADE with dependency DROP MATERIALIZED VIEW conditionsnm_4 CASCADE; NOTICE: drop cascades to view see_cagg -NOTICE: drop cascades to table _timescaledb_internal._hyper_28_39_chunk +NOTICE: drop cascades to table _timescaledb_internal._hyper_28_42_chunk -- Test DROP SCHEMA CASCADE with continuous aggregates -- -- Issue: #2350 @@ -1580,7 +1577,7 @@ DELETE FROM test_setting WHERE val = 20; --TEST test with multiple settings on continuous aggregates with real time aggregates turned off initially -- -- test for materialized_only + compress combinations (real time aggs enabled initially) DROP MATERIALIZED VIEW test_setting_cagg; -NOTICE: drop cascades to table _timescaledb_internal._hyper_40_47_chunk +NOTICE: drop cascades to table _timescaledb_internal._hyper_40_50_chunk CREATE MATERIALIZED VIEW test_setting_cagg with (timescaledb.continuous, timescaledb.materialized_only = true) AS SELECT time_bucket('1h',time), avg(val), count(*) FROM test_setting GROUP BY 1; NOTICE: refreshing continuous aggregate "test_setting_cagg" @@ -1770,8 +1767,8 @@ Indexes: "_materialized_hypertable_45_bucket_idx" btree (bucket DESC) Triggers: ts_insert_blocker BEFORE INSERT ON _timescaledb_internal._materialized_hypertable_45 FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker() -Child tables: _timescaledb_internal._hyper_45_52_chunk, - _timescaledb_internal._hyper_45_53_chunk +Child tables: _timescaledb_internal._hyper_45_55_chunk, + _timescaledb_internal._hyper_45_56_chunk \d+ 'cashflows' View "public.cashflows" diff --git a/tsl/test/expected/cagg_ddl-14.out b/tsl/test/expected/cagg_ddl-14.out index 2026641e354..c127cdaf5ca 100644 --- a/tsl/test/expected/cagg_ddl-14.out +++ b/tsl/test/expected/cagg_ddl-14.out @@ -573,14 +573,11 @@ SELECT * FROM drop_chunks_table ORDER BY time ASC limit 1; 30 | 30 (1 row) ---we see the chunks row with the dropped flags set; -SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk where dropped; - id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk -----+---------------+-----------------------+--------------------+---------------------+---------+--------+----------- - 13 | 10 | _timescaledb_internal | _hyper_10_13_chunk | | t | 0 | f - 14 | 10 | _timescaledb_internal | _hyper_10_14_chunk | | t | 0 | f - 15 | 10 | _timescaledb_internal | _hyper_10_15_chunk | | t | 0 | f -(3 rows) +--chunks are removed +SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk WHERE dropped; + id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk +----+---------------+-------------+------------+---------------------+---------+--------+----------- +(0 rows) --still see data in the view SELECT * FROM drop_chunks_view WHERE time_bucket < (integer_now_test2()-9) ORDER BY time_bucket DESC; @@ -634,9 +631,9 @@ WHERE hypertable_name = 'drop_chunks_table' ORDER BY range_start_integer; chunk_name | range_start_integer | range_end_integer --------------------+---------------------+------------------- - _hyper_10_13_chunk | 0 | 10 - _hyper_10_14_chunk | 10 | 20 - _hyper_10_15_chunk | 20 | 30 + _hyper_10_18_chunk | 0 | 10 + _hyper_10_19_chunk | 10 | 20 + _hyper_10_20_chunk | 20 | 30 _hyper_10_16_chunk | 30 | 40 (4 rows) @@ -681,7 +678,7 @@ FROM timescaledb_information.chunks WHERE hypertable_name = :'drop_chunks_mat_table_name' ORDER BY range_start_integer; chunk_name | range_start_integer | range_end_integer --------------------+---------------------+------------------- - _hyper_11_20_chunk | 0 | 100 + _hyper_11_23_chunk | 0 | 100 (1 row) \set ON_ERROR_STOP 0 @@ -702,12 +699,12 @@ WHERE hypertable_name = 'drop_chunks_table' ORDER BY 2,3; chunk_name | range_start_integer | range_end_integer --------------------+---------------------+------------------- - _hyper_10_13_chunk | 0 | 10 - _hyper_10_14_chunk | 10 | 20 - _hyper_10_15_chunk | 20 | 30 + _hyper_10_18_chunk | 0 | 10 + _hyper_10_19_chunk | 10 | 20 + _hyper_10_20_chunk | 20 | 30 _hyper_10_16_chunk | 30 | 40 - _hyper_10_18_chunk | 40 | 50 - _hyper_10_19_chunk | 50 | 60 + _hyper_10_21_chunk | 40 | 50 + _hyper_10_22_chunk | 50 | 60 (6 rows) -- Pick the second chunk as the one to drop @@ -766,11 +763,11 @@ WHERE hypertable_name = 'drop_chunks_table' ORDER BY 2,3; chunk_name | range_start_integer | range_end_integer --------------------+---------------------+------------------- - _hyper_10_13_chunk | 0 | 10 - _hyper_10_15_chunk | 20 | 30 + _hyper_10_18_chunk | 0 | 10 + _hyper_10_20_chunk | 20 | 30 _hyper_10_16_chunk | 30 | 40 - _hyper_10_18_chunk | 40 | 50 - _hyper_10_19_chunk | 50 | 60 + _hyper_10_21_chunk | 40 | 50 + _hyper_10_22_chunk | 50 | 60 (5 rows) -- Data is no longer in the table but still in the view @@ -799,8 +796,8 @@ CALL refresh_continuous_aggregate('drop_chunks_view', NULL, 30); SELECT drop_chunks('drop_chunks_table', older_than=>30); drop_chunks ------------------------------------------ - _timescaledb_internal._hyper_10_13_chunk - _timescaledb_internal._hyper_10_15_chunk + _timescaledb_internal._hyper_10_18_chunk + _timescaledb_internal._hyper_10_20_chunk (2 rows) -- Verify that the chunks are dropped @@ -811,8 +808,8 @@ ORDER BY 2,3; chunk_name | range_start_integer | range_end_integer --------------------+---------------------+------------------- _hyper_10_16_chunk | 30 | 40 - _hyper_10_18_chunk | 40 | 50 - _hyper_10_19_chunk | 50 | 60 + _hyper_10_21_chunk | 40 | 50 + _hyper_10_22_chunk | 50 | 60 (3 rows) -- The continuous aggregate should be refreshed in the regions covered @@ -906,8 +903,8 @@ SELECT user_view, AND user_view::text LIKE 'whatever_view%'; user_view | mat_table | mat_tablespace | chunk_name | chunk_tablespace -----------------+-----------------------------+----------------+--------------------+------------------ - whatever_view_1 | _materialized_hypertable_13 | | _hyper_13_24_chunk | - whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_25_chunk | tablespace1 + whatever_view_1 | _materialized_hypertable_13 | | _hyper_13_27_chunk | + whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_28_chunk | tablespace1 (2 rows) ALTER MATERIALIZED VIEW whatever_view_1 SET TABLESPACE tablespace2; @@ -921,14 +918,14 @@ SELECT user_view, AND user_view::text LIKE 'whatever_view%'; user_view | mat_table | mat_tablespace | chunk_name | chunk_tablespace -----------------+-----------------------------+----------------+--------------------+------------------ - whatever_view_1 | _materialized_hypertable_13 | tablespace2 | _hyper_13_24_chunk | tablespace2 - whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_25_chunk | tablespace1 + whatever_view_1 | _materialized_hypertable_13 | tablespace2 | _hyper_13_27_chunk | tablespace2 + whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_28_chunk | tablespace1 (2 rows) DROP MATERIALIZED VIEW whatever_view_1; -NOTICE: drop cascades to table _timescaledb_internal._hyper_13_24_chunk +NOTICE: drop cascades to table _timescaledb_internal._hyper_13_27_chunk DROP MATERIALIZED VIEW whatever_view_2; -NOTICE: drop cascades to table _timescaledb_internal._hyper_14_25_chunk +NOTICE: drop cascades to table _timescaledb_internal._hyper_14_28_chunk -- test bucket width expressions on integer hypertables CREATE TABLE metrics_int2 ( time int2 NOT NULL, @@ -1128,7 +1125,7 @@ SUM(value), COUNT(value) FROM conditionsnm GROUP BY bucket WITH DATA; NOTICE: refreshing continuous aggregate "conditionsnm_4" DROP materialized view conditionsnm_4; -NOTICE: drop cascades to table _timescaledb_internal._hyper_26_37_chunk +NOTICE: drop cascades to table _timescaledb_internal._hyper_26_40_chunk -- Case 2: DROP CASCADE should have similar behaviour as DROP CREATE MATERIALIZED VIEW conditionsnm_4 WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) @@ -1138,7 +1135,7 @@ SUM(value), COUNT(value) FROM conditionsnm GROUP BY bucket WITH DATA; NOTICE: refreshing continuous aggregate "conditionsnm_4" DROP materialized view conditionsnm_4 CASCADE; -NOTICE: drop cascades to table _timescaledb_internal._hyper_27_38_chunk +NOTICE: drop cascades to table _timescaledb_internal._hyper_27_41_chunk -- Case 3: require CASCADE in case of dependent object CREATE MATERIALIZED VIEW conditionsnm_4 WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) @@ -1155,7 +1152,7 @@ ERROR: cannot drop view conditionsnm_4 because other objects depend on it -- Case 4: DROP CASCADE with dependency DROP MATERIALIZED VIEW conditionsnm_4 CASCADE; NOTICE: drop cascades to view see_cagg -NOTICE: drop cascades to table _timescaledb_internal._hyper_28_39_chunk +NOTICE: drop cascades to table _timescaledb_internal._hyper_28_42_chunk -- Test DROP SCHEMA CASCADE with continuous aggregates -- -- Issue: #2350 @@ -1580,7 +1577,7 @@ DELETE FROM test_setting WHERE val = 20; --TEST test with multiple settings on continuous aggregates with real time aggregates turned off initially -- -- test for materialized_only + compress combinations (real time aggs enabled initially) DROP MATERIALIZED VIEW test_setting_cagg; -NOTICE: drop cascades to table _timescaledb_internal._hyper_40_47_chunk +NOTICE: drop cascades to table _timescaledb_internal._hyper_40_50_chunk CREATE MATERIALIZED VIEW test_setting_cagg with (timescaledb.continuous, timescaledb.materialized_only = true) AS SELECT time_bucket('1h',time), avg(val), count(*) FROM test_setting GROUP BY 1; NOTICE: refreshing continuous aggregate "test_setting_cagg" @@ -1770,8 +1767,8 @@ Indexes: "_materialized_hypertable_45_bucket_idx" btree (bucket DESC) Triggers: ts_insert_blocker BEFORE INSERT ON _timescaledb_internal._materialized_hypertable_45 FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker() -Child tables: _timescaledb_internal._hyper_45_52_chunk, - _timescaledb_internal._hyper_45_53_chunk +Child tables: _timescaledb_internal._hyper_45_55_chunk, + _timescaledb_internal._hyper_45_56_chunk \d+ 'cashflows' View "public.cashflows" diff --git a/tsl/test/expected/cagg_ddl-15.out b/tsl/test/expected/cagg_ddl-15.out index 2026641e354..c127cdaf5ca 100644 --- a/tsl/test/expected/cagg_ddl-15.out +++ b/tsl/test/expected/cagg_ddl-15.out @@ -573,14 +573,11 @@ SELECT * FROM drop_chunks_table ORDER BY time ASC limit 1; 30 | 30 (1 row) ---we see the chunks row with the dropped flags set; -SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk where dropped; - id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk -----+---------------+-----------------------+--------------------+---------------------+---------+--------+----------- - 13 | 10 | _timescaledb_internal | _hyper_10_13_chunk | | t | 0 | f - 14 | 10 | _timescaledb_internal | _hyper_10_14_chunk | | t | 0 | f - 15 | 10 | _timescaledb_internal | _hyper_10_15_chunk | | t | 0 | f -(3 rows) +--chunks are removed +SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk WHERE dropped; + id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk +----+---------------+-------------+------------+---------------------+---------+--------+----------- +(0 rows) --still see data in the view SELECT * FROM drop_chunks_view WHERE time_bucket < (integer_now_test2()-9) ORDER BY time_bucket DESC; @@ -634,9 +631,9 @@ WHERE hypertable_name = 'drop_chunks_table' ORDER BY range_start_integer; chunk_name | range_start_integer | range_end_integer --------------------+---------------------+------------------- - _hyper_10_13_chunk | 0 | 10 - _hyper_10_14_chunk | 10 | 20 - _hyper_10_15_chunk | 20 | 30 + _hyper_10_18_chunk | 0 | 10 + _hyper_10_19_chunk | 10 | 20 + _hyper_10_20_chunk | 20 | 30 _hyper_10_16_chunk | 30 | 40 (4 rows) @@ -681,7 +678,7 @@ FROM timescaledb_information.chunks WHERE hypertable_name = :'drop_chunks_mat_table_name' ORDER BY range_start_integer; chunk_name | range_start_integer | range_end_integer --------------------+---------------------+------------------- - _hyper_11_20_chunk | 0 | 100 + _hyper_11_23_chunk | 0 | 100 (1 row) \set ON_ERROR_STOP 0 @@ -702,12 +699,12 @@ WHERE hypertable_name = 'drop_chunks_table' ORDER BY 2,3; chunk_name | range_start_integer | range_end_integer --------------------+---------------------+------------------- - _hyper_10_13_chunk | 0 | 10 - _hyper_10_14_chunk | 10 | 20 - _hyper_10_15_chunk | 20 | 30 + _hyper_10_18_chunk | 0 | 10 + _hyper_10_19_chunk | 10 | 20 + _hyper_10_20_chunk | 20 | 30 _hyper_10_16_chunk | 30 | 40 - _hyper_10_18_chunk | 40 | 50 - _hyper_10_19_chunk | 50 | 60 + _hyper_10_21_chunk | 40 | 50 + _hyper_10_22_chunk | 50 | 60 (6 rows) -- Pick the second chunk as the one to drop @@ -766,11 +763,11 @@ WHERE hypertable_name = 'drop_chunks_table' ORDER BY 2,3; chunk_name | range_start_integer | range_end_integer --------------------+---------------------+------------------- - _hyper_10_13_chunk | 0 | 10 - _hyper_10_15_chunk | 20 | 30 + _hyper_10_18_chunk | 0 | 10 + _hyper_10_20_chunk | 20 | 30 _hyper_10_16_chunk | 30 | 40 - _hyper_10_18_chunk | 40 | 50 - _hyper_10_19_chunk | 50 | 60 + _hyper_10_21_chunk | 40 | 50 + _hyper_10_22_chunk | 50 | 60 (5 rows) -- Data is no longer in the table but still in the view @@ -799,8 +796,8 @@ CALL refresh_continuous_aggregate('drop_chunks_view', NULL, 30); SELECT drop_chunks('drop_chunks_table', older_than=>30); drop_chunks ------------------------------------------ - _timescaledb_internal._hyper_10_13_chunk - _timescaledb_internal._hyper_10_15_chunk + _timescaledb_internal._hyper_10_18_chunk + _timescaledb_internal._hyper_10_20_chunk (2 rows) -- Verify that the chunks are dropped @@ -811,8 +808,8 @@ ORDER BY 2,3; chunk_name | range_start_integer | range_end_integer --------------------+---------------------+------------------- _hyper_10_16_chunk | 30 | 40 - _hyper_10_18_chunk | 40 | 50 - _hyper_10_19_chunk | 50 | 60 + _hyper_10_21_chunk | 40 | 50 + _hyper_10_22_chunk | 50 | 60 (3 rows) -- The continuous aggregate should be refreshed in the regions covered @@ -906,8 +903,8 @@ SELECT user_view, AND user_view::text LIKE 'whatever_view%'; user_view | mat_table | mat_tablespace | chunk_name | chunk_tablespace -----------------+-----------------------------+----------------+--------------------+------------------ - whatever_view_1 | _materialized_hypertable_13 | | _hyper_13_24_chunk | - whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_25_chunk | tablespace1 + whatever_view_1 | _materialized_hypertable_13 | | _hyper_13_27_chunk | + whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_28_chunk | tablespace1 (2 rows) ALTER MATERIALIZED VIEW whatever_view_1 SET TABLESPACE tablespace2; @@ -921,14 +918,14 @@ SELECT user_view, AND user_view::text LIKE 'whatever_view%'; user_view | mat_table | mat_tablespace | chunk_name | chunk_tablespace -----------------+-----------------------------+----------------+--------------------+------------------ - whatever_view_1 | _materialized_hypertable_13 | tablespace2 | _hyper_13_24_chunk | tablespace2 - whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_25_chunk | tablespace1 + whatever_view_1 | _materialized_hypertable_13 | tablespace2 | _hyper_13_27_chunk | tablespace2 + whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_28_chunk | tablespace1 (2 rows) DROP MATERIALIZED VIEW whatever_view_1; -NOTICE: drop cascades to table _timescaledb_internal._hyper_13_24_chunk +NOTICE: drop cascades to table _timescaledb_internal._hyper_13_27_chunk DROP MATERIALIZED VIEW whatever_view_2; -NOTICE: drop cascades to table _timescaledb_internal._hyper_14_25_chunk +NOTICE: drop cascades to table _timescaledb_internal._hyper_14_28_chunk -- test bucket width expressions on integer hypertables CREATE TABLE metrics_int2 ( time int2 NOT NULL, @@ -1128,7 +1125,7 @@ SUM(value), COUNT(value) FROM conditionsnm GROUP BY bucket WITH DATA; NOTICE: refreshing continuous aggregate "conditionsnm_4" DROP materialized view conditionsnm_4; -NOTICE: drop cascades to table _timescaledb_internal._hyper_26_37_chunk +NOTICE: drop cascades to table _timescaledb_internal._hyper_26_40_chunk -- Case 2: DROP CASCADE should have similar behaviour as DROP CREATE MATERIALIZED VIEW conditionsnm_4 WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) @@ -1138,7 +1135,7 @@ SUM(value), COUNT(value) FROM conditionsnm GROUP BY bucket WITH DATA; NOTICE: refreshing continuous aggregate "conditionsnm_4" DROP materialized view conditionsnm_4 CASCADE; -NOTICE: drop cascades to table _timescaledb_internal._hyper_27_38_chunk +NOTICE: drop cascades to table _timescaledb_internal._hyper_27_41_chunk -- Case 3: require CASCADE in case of dependent object CREATE MATERIALIZED VIEW conditionsnm_4 WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) @@ -1155,7 +1152,7 @@ ERROR: cannot drop view conditionsnm_4 because other objects depend on it -- Case 4: DROP CASCADE with dependency DROP MATERIALIZED VIEW conditionsnm_4 CASCADE; NOTICE: drop cascades to view see_cagg -NOTICE: drop cascades to table _timescaledb_internal._hyper_28_39_chunk +NOTICE: drop cascades to table _timescaledb_internal._hyper_28_42_chunk -- Test DROP SCHEMA CASCADE with continuous aggregates -- -- Issue: #2350 @@ -1580,7 +1577,7 @@ DELETE FROM test_setting WHERE val = 20; --TEST test with multiple settings on continuous aggregates with real time aggregates turned off initially -- -- test for materialized_only + compress combinations (real time aggs enabled initially) DROP MATERIALIZED VIEW test_setting_cagg; -NOTICE: drop cascades to table _timescaledb_internal._hyper_40_47_chunk +NOTICE: drop cascades to table _timescaledb_internal._hyper_40_50_chunk CREATE MATERIALIZED VIEW test_setting_cagg with (timescaledb.continuous, timescaledb.materialized_only = true) AS SELECT time_bucket('1h',time), avg(val), count(*) FROM test_setting GROUP BY 1; NOTICE: refreshing continuous aggregate "test_setting_cagg" @@ -1770,8 +1767,8 @@ Indexes: "_materialized_hypertable_45_bucket_idx" btree (bucket DESC) Triggers: ts_insert_blocker BEFORE INSERT ON _timescaledb_internal._materialized_hypertable_45 FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker() -Child tables: _timescaledb_internal._hyper_45_52_chunk, - _timescaledb_internal._hyper_45_53_chunk +Child tables: _timescaledb_internal._hyper_45_55_chunk, + _timescaledb_internal._hyper_45_56_chunk \d+ 'cashflows' View "public.cashflows" diff --git a/tsl/test/expected/cagg_ddl-16.out b/tsl/test/expected/cagg_ddl-16.out index cae509141be..56bf85cc6e5 100644 --- a/tsl/test/expected/cagg_ddl-16.out +++ b/tsl/test/expected/cagg_ddl-16.out @@ -573,14 +573,11 @@ SELECT * FROM drop_chunks_table ORDER BY time ASC limit 1; 30 | 30 (1 row) ---we see the chunks row with the dropped flags set; -SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk where dropped; - id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk -----+---------------+-----------------------+--------------------+---------------------+---------+--------+----------- - 13 | 10 | _timescaledb_internal | _hyper_10_13_chunk | | t | 0 | f - 14 | 10 | _timescaledb_internal | _hyper_10_14_chunk | | t | 0 | f - 15 | 10 | _timescaledb_internal | _hyper_10_15_chunk | | t | 0 | f -(3 rows) +--chunks are removed +SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk WHERE dropped; + id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk +----+---------------+-------------+------------+---------------------+---------+--------+----------- +(0 rows) --still see data in the view SELECT * FROM drop_chunks_view WHERE time_bucket < (integer_now_test2()-9) ORDER BY time_bucket DESC; @@ -634,9 +631,9 @@ WHERE hypertable_name = 'drop_chunks_table' ORDER BY range_start_integer; chunk_name | range_start_integer | range_end_integer --------------------+---------------------+------------------- - _hyper_10_13_chunk | 0 | 10 - _hyper_10_14_chunk | 10 | 20 - _hyper_10_15_chunk | 20 | 30 + _hyper_10_18_chunk | 0 | 10 + _hyper_10_19_chunk | 10 | 20 + _hyper_10_20_chunk | 20 | 30 _hyper_10_16_chunk | 30 | 40 (4 rows) @@ -681,7 +678,7 @@ FROM timescaledb_information.chunks WHERE hypertable_name = :'drop_chunks_mat_table_name' ORDER BY range_start_integer; chunk_name | range_start_integer | range_end_integer --------------------+---------------------+------------------- - _hyper_11_20_chunk | 0 | 100 + _hyper_11_23_chunk | 0 | 100 (1 row) \set ON_ERROR_STOP 0 @@ -702,12 +699,12 @@ WHERE hypertable_name = 'drop_chunks_table' ORDER BY 2,3; chunk_name | range_start_integer | range_end_integer --------------------+---------------------+------------------- - _hyper_10_13_chunk | 0 | 10 - _hyper_10_14_chunk | 10 | 20 - _hyper_10_15_chunk | 20 | 30 + _hyper_10_18_chunk | 0 | 10 + _hyper_10_19_chunk | 10 | 20 + _hyper_10_20_chunk | 20 | 30 _hyper_10_16_chunk | 30 | 40 - _hyper_10_18_chunk | 40 | 50 - _hyper_10_19_chunk | 50 | 60 + _hyper_10_21_chunk | 40 | 50 + _hyper_10_22_chunk | 50 | 60 (6 rows) -- Pick the second chunk as the one to drop @@ -766,11 +763,11 @@ WHERE hypertable_name = 'drop_chunks_table' ORDER BY 2,3; chunk_name | range_start_integer | range_end_integer --------------------+---------------------+------------------- - _hyper_10_13_chunk | 0 | 10 - _hyper_10_15_chunk | 20 | 30 + _hyper_10_18_chunk | 0 | 10 + _hyper_10_20_chunk | 20 | 30 _hyper_10_16_chunk | 30 | 40 - _hyper_10_18_chunk | 40 | 50 - _hyper_10_19_chunk | 50 | 60 + _hyper_10_21_chunk | 40 | 50 + _hyper_10_22_chunk | 50 | 60 (5 rows) -- Data is no longer in the table but still in the view @@ -799,8 +796,8 @@ CALL refresh_continuous_aggregate('drop_chunks_view', NULL, 30); SELECT drop_chunks('drop_chunks_table', older_than=>30); drop_chunks ------------------------------------------ - _timescaledb_internal._hyper_10_13_chunk - _timescaledb_internal._hyper_10_15_chunk + _timescaledb_internal._hyper_10_18_chunk + _timescaledb_internal._hyper_10_20_chunk (2 rows) -- Verify that the chunks are dropped @@ -811,8 +808,8 @@ ORDER BY 2,3; chunk_name | range_start_integer | range_end_integer --------------------+---------------------+------------------- _hyper_10_16_chunk | 30 | 40 - _hyper_10_18_chunk | 40 | 50 - _hyper_10_19_chunk | 50 | 60 + _hyper_10_21_chunk | 40 | 50 + _hyper_10_22_chunk | 50 | 60 (3 rows) -- The continuous aggregate should be refreshed in the regions covered @@ -906,8 +903,8 @@ SELECT user_view, AND user_view::text LIKE 'whatever_view%'; user_view | mat_table | mat_tablespace | chunk_name | chunk_tablespace -----------------+-----------------------------+----------------+--------------------+------------------ - whatever_view_1 | _materialized_hypertable_13 | | _hyper_13_24_chunk | - whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_25_chunk | tablespace1 + whatever_view_1 | _materialized_hypertable_13 | | _hyper_13_27_chunk | + whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_28_chunk | tablespace1 (2 rows) ALTER MATERIALIZED VIEW whatever_view_1 SET TABLESPACE tablespace2; @@ -921,14 +918,14 @@ SELECT user_view, AND user_view::text LIKE 'whatever_view%'; user_view | mat_table | mat_tablespace | chunk_name | chunk_tablespace -----------------+-----------------------------+----------------+--------------------+------------------ - whatever_view_1 | _materialized_hypertable_13 | tablespace2 | _hyper_13_24_chunk | tablespace2 - whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_25_chunk | tablespace1 + whatever_view_1 | _materialized_hypertable_13 | tablespace2 | _hyper_13_27_chunk | tablespace2 + whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_28_chunk | tablespace1 (2 rows) DROP MATERIALIZED VIEW whatever_view_1; -NOTICE: drop cascades to table _timescaledb_internal._hyper_13_24_chunk +NOTICE: drop cascades to table _timescaledb_internal._hyper_13_27_chunk DROP MATERIALIZED VIEW whatever_view_2; -NOTICE: drop cascades to table _timescaledb_internal._hyper_14_25_chunk +NOTICE: drop cascades to table _timescaledb_internal._hyper_14_28_chunk -- test bucket width expressions on integer hypertables CREATE TABLE metrics_int2 ( time int2 NOT NULL, @@ -1128,7 +1125,7 @@ SUM(value), COUNT(value) FROM conditionsnm GROUP BY bucket WITH DATA; NOTICE: refreshing continuous aggregate "conditionsnm_4" DROP materialized view conditionsnm_4; -NOTICE: drop cascades to table _timescaledb_internal._hyper_26_37_chunk +NOTICE: drop cascades to table _timescaledb_internal._hyper_26_40_chunk -- Case 2: DROP CASCADE should have similar behaviour as DROP CREATE MATERIALIZED VIEW conditionsnm_4 WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) @@ -1138,7 +1135,7 @@ SUM(value), COUNT(value) FROM conditionsnm GROUP BY bucket WITH DATA; NOTICE: refreshing continuous aggregate "conditionsnm_4" DROP materialized view conditionsnm_4 CASCADE; -NOTICE: drop cascades to table _timescaledb_internal._hyper_27_38_chunk +NOTICE: drop cascades to table _timescaledb_internal._hyper_27_41_chunk -- Case 3: require CASCADE in case of dependent object CREATE MATERIALIZED VIEW conditionsnm_4 WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) @@ -1155,7 +1152,7 @@ ERROR: cannot drop view conditionsnm_4 because other objects depend on it -- Case 4: DROP CASCADE with dependency DROP MATERIALIZED VIEW conditionsnm_4 CASCADE; NOTICE: drop cascades to view see_cagg -NOTICE: drop cascades to table _timescaledb_internal._hyper_28_39_chunk +NOTICE: drop cascades to table _timescaledb_internal._hyper_28_42_chunk -- Test DROP SCHEMA CASCADE with continuous aggregates -- -- Issue: #2350 @@ -1580,7 +1577,7 @@ DELETE FROM test_setting WHERE val = 20; --TEST test with multiple settings on continuous aggregates with real time aggregates turned off initially -- -- test for materialized_only + compress combinations (real time aggs enabled initially) DROP MATERIALIZED VIEW test_setting_cagg; -NOTICE: drop cascades to table _timescaledb_internal._hyper_40_47_chunk +NOTICE: drop cascades to table _timescaledb_internal._hyper_40_50_chunk CREATE MATERIALIZED VIEW test_setting_cagg with (timescaledb.continuous, timescaledb.materialized_only = true) AS SELECT time_bucket('1h',time), avg(val), count(*) FROM test_setting GROUP BY 1; NOTICE: refreshing continuous aggregate "test_setting_cagg" @@ -1770,8 +1767,8 @@ Indexes: "_materialized_hypertable_45_bucket_idx" btree (bucket DESC) Triggers: ts_insert_blocker BEFORE INSERT ON _timescaledb_internal._materialized_hypertable_45 FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker() -Child tables: _timescaledb_internal._hyper_45_52_chunk, - _timescaledb_internal._hyper_45_53_chunk +Child tables: _timescaledb_internal._hyper_45_55_chunk, + _timescaledb_internal._hyper_45_56_chunk \d+ 'cashflows' View "public.cashflows" diff --git a/tsl/test/expected/cagg_usage-13.out b/tsl/test/expected/cagg_usage-13.out index 4b265c0c794..0f00dde24e1 100644 --- a/tsl/test/expected/cagg_usage-13.out +++ b/tsl/test/expected/cagg_usage-13.out @@ -482,3 +482,118 @@ SELECT * FROM cagg3; CREATE MATERIALIZED VIEW cagg4 WITH (timescaledb.continuous,timescaledb.materialized_only=true) AS SELECT time_bucket('1 month', time, 'PST8PDT', "offset":= INTERVAL '15 day') FROM metrics GROUP BY 1; ERROR: continuous aggregate view must include a valid time bucket function \set ON_ERROR_STOP 1 +-- +-- drop chunks tests +-- +-- should return 4 chunks +SELECT + c.table_name as chunk_name, + c.status as chunk_status, c.dropped, c.compressed_chunk_id as comp_id +FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c +WHERE h.id = c.hypertable_id and h.table_name = 'metrics' +ORDER BY 1; + chunk_name | chunk_status | dropped | comp_id +--------------------+--------------+---------+--------- + _hyper_11_17_chunk | 0 | f | + _hyper_11_18_chunk | 0 | f | + _hyper_11_19_chunk | 0 | f | + _hyper_11_20_chunk | 0 | f | +(4 rows) + +-- all caggs in the new format (finalized=true) +SELECT user_view_name, finalized FROM _timescaledb_catalog.continuous_agg WHERE user_view_name in ('cagg1', 'cagg2', 'cagg3') ORDER BY 1; + user_view_name | finalized +----------------+----------- + cagg1 | t + cagg2 | t + cagg3 | t +(3 rows) + +-- dropping chunk should also remove the catalog data +SELECT drop_chunks('metrics', older_than => '2000-01-01 00:00:00-02'::timestamptz); + drop_chunks +------------------------------------------ + _timescaledb_internal._hyper_11_17_chunk +(1 row) + +-- should return 3 chunks +SELECT + c.table_name as chunk_name, + c.status as chunk_status, c.dropped, c.compressed_chunk_id as comp_id +FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c +WHERE h.id = c.hypertable_id AND h.table_name = 'metrics' +ORDER BY 1; + chunk_name | chunk_status | dropped | comp_id +--------------------+--------------+---------+--------- + _hyper_11_18_chunk | 0 | f | + _hyper_11_19_chunk | 0 | f | + _hyper_11_20_chunk | 0 | f | +(3 rows) + +-- let's update the catalog to fake an old format cagg (finalized=false) +\c :TEST_DBNAME :ROLE_SUPERUSER +UPDATE _timescaledb_catalog.continuous_agg SET finalized=FALSE WHERE user_view_name = 'cagg1'; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +-- cagg1 now is a fake old format (finalized=false) +SELECT user_view_name, finalized FROM _timescaledb_catalog.continuous_agg WHERE user_view_name in ('cagg1', 'cagg2', 'cagg3') ORDER BY 1; + user_view_name | finalized +----------------+----------- + cagg1 | f + cagg2 | t + cagg3 | t +(3 rows) + +-- cagg1 now is in the old format (finalized=false) +-- dropping chunk should NOT remove the catalog data +SELECT drop_chunks('metrics', older_than => '2000-01-13 00:00:00-02'::timestamptz); + drop_chunks +------------------------------------------ + _timescaledb_internal._hyper_11_18_chunk +(1 row) + +-- should return 3 chunks and one of them should be marked as dropped +SELECT + c.table_name as chunk_name, + c.status as chunk_status, c.dropped, c.compressed_chunk_id as comp_id +FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c +WHERE h.id = c.hypertable_id and h.table_name = 'metrics' +ORDER BY 1; + chunk_name | chunk_status | dropped | comp_id +--------------------+--------------+---------+--------- + _hyper_11_18_chunk | 0 | t | + _hyper_11_19_chunk | 0 | f | + _hyper_11_20_chunk | 0 | f | +(3 rows) + +-- remove the fake old format cagg +DROP MATERIALIZED VIEW cagg1; +NOTICE: drop cascades to table _timescaledb_internal._hyper_12_21_chunk +-- no more old format caggs (finalized=false) +SELECT user_view_name, finalized FROM _timescaledb_catalog.continuous_agg WHERE user_view_name in ('cagg1', 'cagg2', 'cagg3') ORDER BY 1; + user_view_name | finalized +----------------+----------- + cagg2 | t + cagg3 | t +(2 rows) + +-- dropping chunk should remove the catalog data +SELECT drop_chunks('metrics', older_than => '2000-01-25 00:00:00-02'::timestamptz); + drop_chunks +------------------------------------------ + _timescaledb_internal._hyper_11_19_chunk +(1 row) + +-- should return 2 chunks and one of them should be marked as dropped +-- because we dropped chunk before when an old format cagg exists +SELECT + c.table_name as chunk_name, + c.status as chunk_status, c.dropped, c.compressed_chunk_id as comp_id +FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c +WHERE h.id = c.hypertable_id and h.table_name = 'metrics' +ORDER BY 1; + chunk_name | chunk_status | dropped | comp_id +--------------------+--------------+---------+--------- + _hyper_11_18_chunk | 0 | t | + _hyper_11_20_chunk | 0 | f | +(2 rows) + diff --git a/tsl/test/expected/cagg_usage-14.out b/tsl/test/expected/cagg_usage-14.out index 4b265c0c794..0f00dde24e1 100644 --- a/tsl/test/expected/cagg_usage-14.out +++ b/tsl/test/expected/cagg_usage-14.out @@ -482,3 +482,118 @@ SELECT * FROM cagg3; CREATE MATERIALIZED VIEW cagg4 WITH (timescaledb.continuous,timescaledb.materialized_only=true) AS SELECT time_bucket('1 month', time, 'PST8PDT', "offset":= INTERVAL '15 day') FROM metrics GROUP BY 1; ERROR: continuous aggregate view must include a valid time bucket function \set ON_ERROR_STOP 1 +-- +-- drop chunks tests +-- +-- should return 4 chunks +SELECT + c.table_name as chunk_name, + c.status as chunk_status, c.dropped, c.compressed_chunk_id as comp_id +FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c +WHERE h.id = c.hypertable_id and h.table_name = 'metrics' +ORDER BY 1; + chunk_name | chunk_status | dropped | comp_id +--------------------+--------------+---------+--------- + _hyper_11_17_chunk | 0 | f | + _hyper_11_18_chunk | 0 | f | + _hyper_11_19_chunk | 0 | f | + _hyper_11_20_chunk | 0 | f | +(4 rows) + +-- all caggs in the new format (finalized=true) +SELECT user_view_name, finalized FROM _timescaledb_catalog.continuous_agg WHERE user_view_name in ('cagg1', 'cagg2', 'cagg3') ORDER BY 1; + user_view_name | finalized +----------------+----------- + cagg1 | t + cagg2 | t + cagg3 | t +(3 rows) + +-- dropping chunk should also remove the catalog data +SELECT drop_chunks('metrics', older_than => '2000-01-01 00:00:00-02'::timestamptz); + drop_chunks +------------------------------------------ + _timescaledb_internal._hyper_11_17_chunk +(1 row) + +-- should return 3 chunks +SELECT + c.table_name as chunk_name, + c.status as chunk_status, c.dropped, c.compressed_chunk_id as comp_id +FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c +WHERE h.id = c.hypertable_id AND h.table_name = 'metrics' +ORDER BY 1; + chunk_name | chunk_status | dropped | comp_id +--------------------+--------------+---------+--------- + _hyper_11_18_chunk | 0 | f | + _hyper_11_19_chunk | 0 | f | + _hyper_11_20_chunk | 0 | f | +(3 rows) + +-- let's update the catalog to fake an old format cagg (finalized=false) +\c :TEST_DBNAME :ROLE_SUPERUSER +UPDATE _timescaledb_catalog.continuous_agg SET finalized=FALSE WHERE user_view_name = 'cagg1'; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +-- cagg1 now is a fake old format (finalized=false) +SELECT user_view_name, finalized FROM _timescaledb_catalog.continuous_agg WHERE user_view_name in ('cagg1', 'cagg2', 'cagg3') ORDER BY 1; + user_view_name | finalized +----------------+----------- + cagg1 | f + cagg2 | t + cagg3 | t +(3 rows) + +-- cagg1 now is in the old format (finalized=false) +-- dropping chunk should NOT remove the catalog data +SELECT drop_chunks('metrics', older_than => '2000-01-13 00:00:00-02'::timestamptz); + drop_chunks +------------------------------------------ + _timescaledb_internal._hyper_11_18_chunk +(1 row) + +-- should return 3 chunks and one of them should be marked as dropped +SELECT + c.table_name as chunk_name, + c.status as chunk_status, c.dropped, c.compressed_chunk_id as comp_id +FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c +WHERE h.id = c.hypertable_id and h.table_name = 'metrics' +ORDER BY 1; + chunk_name | chunk_status | dropped | comp_id +--------------------+--------------+---------+--------- + _hyper_11_18_chunk | 0 | t | + _hyper_11_19_chunk | 0 | f | + _hyper_11_20_chunk | 0 | f | +(3 rows) + +-- remove the fake old format cagg +DROP MATERIALIZED VIEW cagg1; +NOTICE: drop cascades to table _timescaledb_internal._hyper_12_21_chunk +-- no more old format caggs (finalized=false) +SELECT user_view_name, finalized FROM _timescaledb_catalog.continuous_agg WHERE user_view_name in ('cagg1', 'cagg2', 'cagg3') ORDER BY 1; + user_view_name | finalized +----------------+----------- + cagg2 | t + cagg3 | t +(2 rows) + +-- dropping chunk should remove the catalog data +SELECT drop_chunks('metrics', older_than => '2000-01-25 00:00:00-02'::timestamptz); + drop_chunks +------------------------------------------ + _timescaledb_internal._hyper_11_19_chunk +(1 row) + +-- should return 2 chunks and one of them should be marked as dropped +-- because we dropped chunk before when an old format cagg exists +SELECT + c.table_name as chunk_name, + c.status as chunk_status, c.dropped, c.compressed_chunk_id as comp_id +FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c +WHERE h.id = c.hypertable_id and h.table_name = 'metrics' +ORDER BY 1; + chunk_name | chunk_status | dropped | comp_id +--------------------+--------------+---------+--------- + _hyper_11_18_chunk | 0 | t | + _hyper_11_20_chunk | 0 | f | +(2 rows) + diff --git a/tsl/test/expected/cagg_usage-15.out b/tsl/test/expected/cagg_usage-15.out index 4b265c0c794..0f00dde24e1 100644 --- a/tsl/test/expected/cagg_usage-15.out +++ b/tsl/test/expected/cagg_usage-15.out @@ -482,3 +482,118 @@ SELECT * FROM cagg3; CREATE MATERIALIZED VIEW cagg4 WITH (timescaledb.continuous,timescaledb.materialized_only=true) AS SELECT time_bucket('1 month', time, 'PST8PDT', "offset":= INTERVAL '15 day') FROM metrics GROUP BY 1; ERROR: continuous aggregate view must include a valid time bucket function \set ON_ERROR_STOP 1 +-- +-- drop chunks tests +-- +-- should return 4 chunks +SELECT + c.table_name as chunk_name, + c.status as chunk_status, c.dropped, c.compressed_chunk_id as comp_id +FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c +WHERE h.id = c.hypertable_id and h.table_name = 'metrics' +ORDER BY 1; + chunk_name | chunk_status | dropped | comp_id +--------------------+--------------+---------+--------- + _hyper_11_17_chunk | 0 | f | + _hyper_11_18_chunk | 0 | f | + _hyper_11_19_chunk | 0 | f | + _hyper_11_20_chunk | 0 | f | +(4 rows) + +-- all caggs in the new format (finalized=true) +SELECT user_view_name, finalized FROM _timescaledb_catalog.continuous_agg WHERE user_view_name in ('cagg1', 'cagg2', 'cagg3') ORDER BY 1; + user_view_name | finalized +----------------+----------- + cagg1 | t + cagg2 | t + cagg3 | t +(3 rows) + +-- dropping chunk should also remove the catalog data +SELECT drop_chunks('metrics', older_than => '2000-01-01 00:00:00-02'::timestamptz); + drop_chunks +------------------------------------------ + _timescaledb_internal._hyper_11_17_chunk +(1 row) + +-- should return 3 chunks +SELECT + c.table_name as chunk_name, + c.status as chunk_status, c.dropped, c.compressed_chunk_id as comp_id +FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c +WHERE h.id = c.hypertable_id AND h.table_name = 'metrics' +ORDER BY 1; + chunk_name | chunk_status | dropped | comp_id +--------------------+--------------+---------+--------- + _hyper_11_18_chunk | 0 | f | + _hyper_11_19_chunk | 0 | f | + _hyper_11_20_chunk | 0 | f | +(3 rows) + +-- let's update the catalog to fake an old format cagg (finalized=false) +\c :TEST_DBNAME :ROLE_SUPERUSER +UPDATE _timescaledb_catalog.continuous_agg SET finalized=FALSE WHERE user_view_name = 'cagg1'; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +-- cagg1 now is a fake old format (finalized=false) +SELECT user_view_name, finalized FROM _timescaledb_catalog.continuous_agg WHERE user_view_name in ('cagg1', 'cagg2', 'cagg3') ORDER BY 1; + user_view_name | finalized +----------------+----------- + cagg1 | f + cagg2 | t + cagg3 | t +(3 rows) + +-- cagg1 now is in the old format (finalized=false) +-- dropping chunk should NOT remove the catalog data +SELECT drop_chunks('metrics', older_than => '2000-01-13 00:00:00-02'::timestamptz); + drop_chunks +------------------------------------------ + _timescaledb_internal._hyper_11_18_chunk +(1 row) + +-- should return 3 chunks and one of them should be marked as dropped +SELECT + c.table_name as chunk_name, + c.status as chunk_status, c.dropped, c.compressed_chunk_id as comp_id +FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c +WHERE h.id = c.hypertable_id and h.table_name = 'metrics' +ORDER BY 1; + chunk_name | chunk_status | dropped | comp_id +--------------------+--------------+---------+--------- + _hyper_11_18_chunk | 0 | t | + _hyper_11_19_chunk | 0 | f | + _hyper_11_20_chunk | 0 | f | +(3 rows) + +-- remove the fake old format cagg +DROP MATERIALIZED VIEW cagg1; +NOTICE: drop cascades to table _timescaledb_internal._hyper_12_21_chunk +-- no more old format caggs (finalized=false) +SELECT user_view_name, finalized FROM _timescaledb_catalog.continuous_agg WHERE user_view_name in ('cagg1', 'cagg2', 'cagg3') ORDER BY 1; + user_view_name | finalized +----------------+----------- + cagg2 | t + cagg3 | t +(2 rows) + +-- dropping chunk should remove the catalog data +SELECT drop_chunks('metrics', older_than => '2000-01-25 00:00:00-02'::timestamptz); + drop_chunks +------------------------------------------ + _timescaledb_internal._hyper_11_19_chunk +(1 row) + +-- should return 2 chunks and one of them should be marked as dropped +-- because we dropped chunk before when an old format cagg exists +SELECT + c.table_name as chunk_name, + c.status as chunk_status, c.dropped, c.compressed_chunk_id as comp_id +FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c +WHERE h.id = c.hypertable_id and h.table_name = 'metrics' +ORDER BY 1; + chunk_name | chunk_status | dropped | comp_id +--------------------+--------------+---------+--------- + _hyper_11_18_chunk | 0 | t | + _hyper_11_20_chunk | 0 | f | +(2 rows) + diff --git a/tsl/test/expected/cagg_usage-16.out b/tsl/test/expected/cagg_usage-16.out index defe5296c13..28dfe602da9 100644 --- a/tsl/test/expected/cagg_usage-16.out +++ b/tsl/test/expected/cagg_usage-16.out @@ -482,3 +482,118 @@ SELECT * FROM cagg3; CREATE MATERIALIZED VIEW cagg4 WITH (timescaledb.continuous,timescaledb.materialized_only=true) AS SELECT time_bucket('1 month', time, 'PST8PDT', "offset":= INTERVAL '15 day') FROM metrics GROUP BY 1; ERROR: continuous aggregate view must include a valid time bucket function \set ON_ERROR_STOP 1 +-- +-- drop chunks tests +-- +-- should return 4 chunks +SELECT + c.table_name as chunk_name, + c.status as chunk_status, c.dropped, c.compressed_chunk_id as comp_id +FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c +WHERE h.id = c.hypertable_id and h.table_name = 'metrics' +ORDER BY 1; + chunk_name | chunk_status | dropped | comp_id +--------------------+--------------+---------+--------- + _hyper_11_17_chunk | 0 | f | + _hyper_11_18_chunk | 0 | f | + _hyper_11_19_chunk | 0 | f | + _hyper_11_20_chunk | 0 | f | +(4 rows) + +-- all caggs in the new format (finalized=true) +SELECT user_view_name, finalized FROM _timescaledb_catalog.continuous_agg WHERE user_view_name in ('cagg1', 'cagg2', 'cagg3') ORDER BY 1; + user_view_name | finalized +----------------+----------- + cagg1 | t + cagg2 | t + cagg3 | t +(3 rows) + +-- dropping chunk should also remove the catalog data +SELECT drop_chunks('metrics', older_than => '2000-01-01 00:00:00-02'::timestamptz); + drop_chunks +------------------------------------------ + _timescaledb_internal._hyper_11_17_chunk +(1 row) + +-- should return 3 chunks +SELECT + c.table_name as chunk_name, + c.status as chunk_status, c.dropped, c.compressed_chunk_id as comp_id +FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c +WHERE h.id = c.hypertable_id AND h.table_name = 'metrics' +ORDER BY 1; + chunk_name | chunk_status | dropped | comp_id +--------------------+--------------+---------+--------- + _hyper_11_18_chunk | 0 | f | + _hyper_11_19_chunk | 0 | f | + _hyper_11_20_chunk | 0 | f | +(3 rows) + +-- let's update the catalog to fake an old format cagg (finalized=false) +\c :TEST_DBNAME :ROLE_SUPERUSER +UPDATE _timescaledb_catalog.continuous_agg SET finalized=FALSE WHERE user_view_name = 'cagg1'; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +-- cagg1 now is a fake old format (finalized=false) +SELECT user_view_name, finalized FROM _timescaledb_catalog.continuous_agg WHERE user_view_name in ('cagg1', 'cagg2', 'cagg3') ORDER BY 1; + user_view_name | finalized +----------------+----------- + cagg1 | f + cagg2 | t + cagg3 | t +(3 rows) + +-- cagg1 now is in the old format (finalized=false) +-- dropping chunk should NOT remove the catalog data +SELECT drop_chunks('metrics', older_than => '2000-01-13 00:00:00-02'::timestamptz); + drop_chunks +------------------------------------------ + _timescaledb_internal._hyper_11_18_chunk +(1 row) + +-- should return 3 chunks and one of them should be marked as dropped +SELECT + c.table_name as chunk_name, + c.status as chunk_status, c.dropped, c.compressed_chunk_id as comp_id +FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c +WHERE h.id = c.hypertable_id and h.table_name = 'metrics' +ORDER BY 1; + chunk_name | chunk_status | dropped | comp_id +--------------------+--------------+---------+--------- + _hyper_11_18_chunk | 0 | t | + _hyper_11_19_chunk | 0 | f | + _hyper_11_20_chunk | 0 | f | +(3 rows) + +-- remove the fake old format cagg +DROP MATERIALIZED VIEW cagg1; +NOTICE: drop cascades to table _timescaledb_internal._hyper_12_21_chunk +-- no more old format caggs (finalized=false) +SELECT user_view_name, finalized FROM _timescaledb_catalog.continuous_agg WHERE user_view_name in ('cagg1', 'cagg2', 'cagg3') ORDER BY 1; + user_view_name | finalized +----------------+----------- + cagg2 | t + cagg3 | t +(2 rows) + +-- dropping chunk should remove the catalog data +SELECT drop_chunks('metrics', older_than => '2000-01-25 00:00:00-02'::timestamptz); + drop_chunks +------------------------------------------ + _timescaledb_internal._hyper_11_19_chunk +(1 row) + +-- should return 2 chunks and one of them should be marked as dropped +-- because we dropped chunk before when an old format cagg exists +SELECT + c.table_name as chunk_name, + c.status as chunk_status, c.dropped, c.compressed_chunk_id as comp_id +FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c +WHERE h.id = c.hypertable_id and h.table_name = 'metrics' +ORDER BY 1; + chunk_name | chunk_status | dropped | comp_id +--------------------+--------------+---------+--------- + _hyper_11_18_chunk | 0 | t | + _hyper_11_20_chunk | 0 | f | +(2 rows) + diff --git a/tsl/test/expected/compression.out b/tsl/test/expected/compression.out index 9edf81ed716..002f8442949 100644 --- a/tsl/test/expected/compression.out +++ b/tsl/test/expected/compression.out @@ -1592,11 +1592,9 @@ SELECT FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c WHERE h.id = c.hypertable_id and h.table_name = 'metrics' ORDER BY 1; - chunk_name | chunk_status | dropped | comp_id ---------------------+--------------+---------+--------- - _hyper_13_33_chunk | 0 | t | - _hyper_13_34_chunk | 0 | t | -(2 rows) + chunk_name | chunk_status | dropped | comp_id +------------+--------------+---------+--------- +(0 rows) SELECT "time", cnt FROM cagg_expr ORDER BY time LIMIT 5; time | cnt @@ -1625,8 +1623,8 @@ WHERE h.id = c.hypertable_id and h.table_name = 'metrics' ORDER BY 1; chunk_name | chunk_status | dropped | comp_id --------------------+--------------+---------+--------- - _hyper_13_33_chunk | 1 | f | 64 - _hyper_13_34_chunk | 1 | f | 65 + _hyper_13_64_chunk | 1 | f | 66 + _hyper_13_65_chunk | 1 | f | 67 (2 rows) SELECT count(*) FROM metrics; @@ -1653,7 +1651,7 @@ INSERT INTO local_seq SELECT '2000-01-01', generate_series(5,8); SELECT compress_chunk(c) FROM show_chunks('local_seq') c; compress_chunk ------------------------------------------ - _timescaledb_internal._hyper_33_66_chunk + _timescaledb_internal._hyper_33_68_chunk (1 row) SELECT @@ -1745,7 +1743,7 @@ SELECT set_chunk_time_interval('f_sensor_data', INTERVAL '1 year'); SELECT * FROM _timescaledb_functions.create_chunk('f_sensor_data',' {"time": [181900977000000, 515024000000000]}'); chunk_id | hypertable_id | schema_name | table_name | relkind | slices | created ----------+---------------+-----------------------+--------------------+---------+----------------------------------------------+--------- - 71 | 37 | _timescaledb_internal | _hyper_37_71_chunk | r | {"time": [181900977000000, 515024000000000]} | t + 73 | 37 | _timescaledb_internal | _hyper_37_73_chunk | r | {"time": [181900977000000, 515024000000000]} | t (1 row) INSERT INTO f_sensor_data @@ -1763,7 +1761,7 @@ ALTER TABLE f_sensor_data SET (timescaledb.compress, timescaledb.compress_segmen SELECT compress_chunk(i) FROM show_chunks('f_sensor_data') i; compress_chunk ------------------------------------------ - _timescaledb_internal._hyper_37_71_chunk + _timescaledb_internal._hyper_37_73_chunk (1 row) CALL reindex_compressed_hypertable('f_sensor_data'); @@ -1805,16 +1803,16 @@ SELECT sum(cpu) FROM f_sensor_data; QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_37_71_chunk.cpu) + Output: sum(_hyper_37_73_chunk.cpu) -> Gather - Output: (PARTIAL sum(_hyper_37_71_chunk.cpu)) + Output: (PARTIAL sum(_hyper_37_73_chunk.cpu)) Workers Planned: 4 -> Partial Aggregate - Output: PARTIAL sum(_hyper_37_71_chunk.cpu) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_37_71_chunk - Output: _hyper_37_71_chunk.cpu - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_38_72_chunk - Output: compress_hyper_38_72_chunk."time", compress_hyper_38_72_chunk.sensor_id, compress_hyper_38_72_chunk.cpu, compress_hyper_38_72_chunk.temperature, compress_hyper_38_72_chunk._ts_meta_count, compress_hyper_38_72_chunk._ts_meta_sequence_num, compress_hyper_38_72_chunk._ts_meta_min_1, compress_hyper_38_72_chunk._ts_meta_max_1 + Output: PARTIAL sum(_hyper_37_73_chunk.cpu) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_37_73_chunk + Output: _hyper_37_73_chunk.cpu + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_38_74_chunk + Output: compress_hyper_38_74_chunk."time", compress_hyper_38_74_chunk.sensor_id, compress_hyper_38_74_chunk.cpu, compress_hyper_38_74_chunk.temperature, compress_hyper_38_74_chunk._ts_meta_count, compress_hyper_38_74_chunk._ts_meta_sequence_num, compress_hyper_38_74_chunk._ts_meta_min_1, compress_hyper_38_74_chunk._ts_meta_max_1 (11 rows) -- Encourage use of Index Scan @@ -1828,13 +1826,13 @@ SELECT * FROM f_sensor_data WHERE sensor_id > 100; QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Gather - Output: _hyper_37_71_chunk."time", _hyper_37_71_chunk.sensor_id, _hyper_37_71_chunk.cpu, _hyper_37_71_chunk.temperature + Output: _hyper_37_73_chunk."time", _hyper_37_73_chunk.sensor_id, _hyper_37_73_chunk.cpu, _hyper_37_73_chunk.temperature Workers Planned: 2 - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_37_71_chunk - Output: _hyper_37_71_chunk."time", _hyper_37_71_chunk.sensor_id, _hyper_37_71_chunk.cpu, _hyper_37_71_chunk.temperature - -> Parallel Index Scan using compress_hyper_38_72_chunk_sensor_id__ts_meta_sequence_num_idx on _timescaledb_internal.compress_hyper_38_72_chunk - Output: compress_hyper_38_72_chunk."time", compress_hyper_38_72_chunk.sensor_id, compress_hyper_38_72_chunk.cpu, compress_hyper_38_72_chunk.temperature, compress_hyper_38_72_chunk._ts_meta_count, compress_hyper_38_72_chunk._ts_meta_sequence_num, compress_hyper_38_72_chunk._ts_meta_min_1, compress_hyper_38_72_chunk._ts_meta_max_1 - Index Cond: (compress_hyper_38_72_chunk.sensor_id > 100) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_37_73_chunk + Output: _hyper_37_73_chunk."time", _hyper_37_73_chunk.sensor_id, _hyper_37_73_chunk.cpu, _hyper_37_73_chunk.temperature + -> Parallel Index Scan using compress_hyper_38_74_chunk_sensor_id__ts_meta_sequence_num_idx on _timescaledb_internal.compress_hyper_38_74_chunk + Output: compress_hyper_38_74_chunk."time", compress_hyper_38_74_chunk.sensor_id, compress_hyper_38_74_chunk.cpu, compress_hyper_38_74_chunk.temperature, compress_hyper_38_74_chunk._ts_meta_count, compress_hyper_38_74_chunk._ts_meta_sequence_num, compress_hyper_38_74_chunk._ts_meta_min_1, compress_hyper_38_74_chunk._ts_meta_max_1 + Index Cond: (compress_hyper_38_74_chunk.sensor_id > 100) (8 rows) RESET enable_parallel_append; @@ -1855,21 +1853,21 @@ SELECT sum(cpu) FROM f_sensor_data; QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_37_71_chunk.cpu) + Output: sum(_hyper_37_73_chunk.cpu) -> Gather - Output: (PARTIAL sum(_hyper_37_71_chunk.cpu)) + Output: (PARTIAL sum(_hyper_37_73_chunk.cpu)) Workers Planned: 4 -> Parallel Append -> Partial Aggregate - Output: PARTIAL sum(_hyper_37_71_chunk.cpu) - -> Parallel Seq Scan on _timescaledb_internal._hyper_37_71_chunk - Output: _hyper_37_71_chunk.cpu + Output: PARTIAL sum(_hyper_37_73_chunk.cpu) + -> Parallel Seq Scan on _timescaledb_internal._hyper_37_73_chunk + Output: _hyper_37_73_chunk.cpu -> Partial Aggregate - Output: PARTIAL sum(_hyper_37_71_chunk.cpu) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_37_71_chunk - Output: _hyper_37_71_chunk.cpu - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_38_72_chunk - Output: compress_hyper_38_72_chunk."time", compress_hyper_38_72_chunk.sensor_id, compress_hyper_38_72_chunk.cpu, compress_hyper_38_72_chunk.temperature, compress_hyper_38_72_chunk._ts_meta_count, compress_hyper_38_72_chunk._ts_meta_sequence_num, compress_hyper_38_72_chunk._ts_meta_min_1, compress_hyper_38_72_chunk._ts_meta_max_1 + Output: PARTIAL sum(_hyper_37_73_chunk.cpu) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_37_73_chunk + Output: _hyper_37_73_chunk.cpu + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_38_74_chunk + Output: compress_hyper_38_74_chunk."time", compress_hyper_38_74_chunk.sensor_id, compress_hyper_38_74_chunk.cpu, compress_hyper_38_74_chunk.temperature, compress_hyper_38_74_chunk._ts_meta_count, compress_hyper_38_74_chunk._ts_meta_sequence_num, compress_hyper_38_74_chunk._ts_meta_min_1, compress_hyper_38_74_chunk._ts_meta_max_1 (16 rows) :explain @@ -1877,18 +1875,18 @@ SELECT * FROM f_sensor_data WHERE sensor_id > 100; QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Gather - Output: _hyper_37_71_chunk."time", _hyper_37_71_chunk.sensor_id, _hyper_37_71_chunk.cpu, _hyper_37_71_chunk.temperature + Output: _hyper_37_73_chunk."time", _hyper_37_73_chunk.sensor_id, _hyper_37_73_chunk.cpu, _hyper_37_73_chunk.temperature Workers Planned: 3 -> Parallel Append - -> Parallel Index Scan using _hyper_37_71_chunk_f_sensor_data_time_sensor_id_idx on _timescaledb_internal._hyper_37_71_chunk - Output: _hyper_37_71_chunk."time", _hyper_37_71_chunk.sensor_id, _hyper_37_71_chunk.cpu, _hyper_37_71_chunk.temperature - Index Cond: (_hyper_37_71_chunk.sensor_id > 100) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_37_71_chunk - Output: _hyper_37_71_chunk."time", _hyper_37_71_chunk.sensor_id, _hyper_37_71_chunk.cpu, _hyper_37_71_chunk.temperature - Filter: (_hyper_37_71_chunk.sensor_id > 100) - -> Parallel Index Scan using compress_hyper_38_72_chunk_sensor_id__ts_meta_sequence_num_idx on _timescaledb_internal.compress_hyper_38_72_chunk - Output: compress_hyper_38_72_chunk."time", compress_hyper_38_72_chunk.sensor_id, compress_hyper_38_72_chunk.cpu, compress_hyper_38_72_chunk.temperature, compress_hyper_38_72_chunk._ts_meta_count, compress_hyper_38_72_chunk._ts_meta_sequence_num, compress_hyper_38_72_chunk._ts_meta_min_1, compress_hyper_38_72_chunk._ts_meta_max_1 - Index Cond: (compress_hyper_38_72_chunk.sensor_id > 100) + -> Parallel Index Scan using _hyper_37_73_chunk_f_sensor_data_time_sensor_id_idx on _timescaledb_internal._hyper_37_73_chunk + Output: _hyper_37_73_chunk."time", _hyper_37_73_chunk.sensor_id, _hyper_37_73_chunk.cpu, _hyper_37_73_chunk.temperature + Index Cond: (_hyper_37_73_chunk.sensor_id > 100) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_37_73_chunk + Output: _hyper_37_73_chunk."time", _hyper_37_73_chunk.sensor_id, _hyper_37_73_chunk.cpu, _hyper_37_73_chunk.temperature + Filter: (_hyper_37_73_chunk.sensor_id > 100) + -> Parallel Index Scan using compress_hyper_38_74_chunk_sensor_id__ts_meta_sequence_num_idx on _timescaledb_internal.compress_hyper_38_74_chunk + Output: compress_hyper_38_74_chunk."time", compress_hyper_38_74_chunk.sensor_id, compress_hyper_38_74_chunk.cpu, compress_hyper_38_74_chunk.temperature, compress_hyper_38_74_chunk._ts_meta_count, compress_hyper_38_74_chunk._ts_meta_sequence_num, compress_hyper_38_74_chunk._ts_meta_min_1, compress_hyper_38_74_chunk._ts_meta_max_1 + Index Cond: (compress_hyper_38_74_chunk.sensor_id > 100) (13 rows) -- Test non-partial paths below append are not executed multiple times @@ -1961,8 +1959,8 @@ SELECT time, device, device * 0.1 FROM SELECT compress_chunk(c) FROM show_chunks('ht_metrics_partially_compressed') c; compress_chunk ------------------------------------------ - _timescaledb_internal._hyper_41_75_chunk - _timescaledb_internal._hyper_41_76_chunk + _timescaledb_internal._hyper_41_77_chunk + _timescaledb_internal._hyper_41_78_chunk (2 rows) INSERT INTO ht_metrics_partially_compressed VALUES ('2020-01-01'::timestamptz, 1, 0.1); @@ -1978,26 +1976,26 @@ SELECT * FROM ht_metrics_partially_compressed ORDER BY time DESC, device LIMIT 1 Startup Exclusion: false Runtime Exclusion: false -> Sort - Output: _hyper_41_76_chunk."time", _hyper_41_76_chunk.device, _hyper_41_76_chunk.value - Sort Key: _hyper_41_76_chunk."time" DESC, _hyper_41_76_chunk.device - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_41_76_chunk - Output: _hyper_41_76_chunk."time", _hyper_41_76_chunk.device, _hyper_41_76_chunk.value - -> Seq Scan on _timescaledb_internal.compress_hyper_42_78_chunk - Output: compress_hyper_42_78_chunk."time", compress_hyper_42_78_chunk.device, compress_hyper_42_78_chunk.value, compress_hyper_42_78_chunk._ts_meta_count, compress_hyper_42_78_chunk._ts_meta_sequence_num, compress_hyper_42_78_chunk._ts_meta_min_1, compress_hyper_42_78_chunk._ts_meta_max_1 + Output: _hyper_41_78_chunk."time", _hyper_41_78_chunk.device, _hyper_41_78_chunk.value + Sort Key: _hyper_41_78_chunk."time" DESC, _hyper_41_78_chunk.device + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_41_78_chunk + Output: _hyper_41_78_chunk."time", _hyper_41_78_chunk.device, _hyper_41_78_chunk.value + -> Seq Scan on _timescaledb_internal.compress_hyper_42_80_chunk + Output: compress_hyper_42_80_chunk."time", compress_hyper_42_80_chunk.device, compress_hyper_42_80_chunk.value, compress_hyper_42_80_chunk._ts_meta_count, compress_hyper_42_80_chunk._ts_meta_sequence_num, compress_hyper_42_80_chunk._ts_meta_min_1, compress_hyper_42_80_chunk._ts_meta_max_1 -> Merge Append - Sort Key: _hyper_41_75_chunk."time" DESC, _hyper_41_75_chunk.device + Sort Key: _hyper_41_77_chunk."time" DESC, _hyper_41_77_chunk.device -> Sort - Output: _hyper_41_75_chunk."time", _hyper_41_75_chunk.device, _hyper_41_75_chunk.value - Sort Key: _hyper_41_75_chunk."time" DESC, _hyper_41_75_chunk.device - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_41_75_chunk - Output: _hyper_41_75_chunk."time", _hyper_41_75_chunk.device, _hyper_41_75_chunk.value - -> Seq Scan on _timescaledb_internal.compress_hyper_42_77_chunk - Output: compress_hyper_42_77_chunk."time", compress_hyper_42_77_chunk.device, compress_hyper_42_77_chunk.value, compress_hyper_42_77_chunk._ts_meta_count, compress_hyper_42_77_chunk._ts_meta_sequence_num, compress_hyper_42_77_chunk._ts_meta_min_1, compress_hyper_42_77_chunk._ts_meta_max_1 + Output: _hyper_41_77_chunk."time", _hyper_41_77_chunk.device, _hyper_41_77_chunk.value + Sort Key: _hyper_41_77_chunk."time" DESC, _hyper_41_77_chunk.device + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_41_77_chunk + Output: _hyper_41_77_chunk."time", _hyper_41_77_chunk.device, _hyper_41_77_chunk.value + -> Seq Scan on _timescaledb_internal.compress_hyper_42_79_chunk + Output: compress_hyper_42_79_chunk."time", compress_hyper_42_79_chunk.device, compress_hyper_42_79_chunk.value, compress_hyper_42_79_chunk._ts_meta_count, compress_hyper_42_79_chunk._ts_meta_sequence_num, compress_hyper_42_79_chunk._ts_meta_min_1, compress_hyper_42_79_chunk._ts_meta_max_1 -> Sort - Output: _hyper_41_75_chunk."time", _hyper_41_75_chunk.device, _hyper_41_75_chunk.value - Sort Key: _hyper_41_75_chunk."time" DESC, _hyper_41_75_chunk.device - -> Seq Scan on _timescaledb_internal._hyper_41_75_chunk - Output: _hyper_41_75_chunk."time", _hyper_41_75_chunk.device, _hyper_41_75_chunk.value + Output: _hyper_41_77_chunk."time", _hyper_41_77_chunk.device, _hyper_41_77_chunk.value + Sort Key: _hyper_41_77_chunk."time" DESC, _hyper_41_77_chunk.device + -> Seq Scan on _timescaledb_internal._hyper_41_77_chunk + Output: _hyper_41_77_chunk."time", _hyper_41_77_chunk.device, _hyper_41_77_chunk.value (28 rows) -- Test parameter change on rescan @@ -2064,9 +2062,9 @@ INSERT INTO i6069 VALUES('2023-07-01', 1, 1),('2023-07-03', 2, 1),('2023-07-05', SELECT compress_chunk(i, if_not_compressed => true) FROM show_chunks('i6069') i; compress_chunk ------------------------------------------ - _timescaledb_internal._hyper_43_79_chunk - _timescaledb_internal._hyper_43_80_chunk _timescaledb_internal._hyper_43_81_chunk + _timescaledb_internal._hyper_43_82_chunk + _timescaledb_internal._hyper_43_83_chunk (3 rows) SET enable_indexscan = ON; @@ -2091,6 +2089,28 @@ ORDER BY timestamp desc LIMIT 1 ) a ON true; Order: i6069."timestamp" DESC Startup Exclusion: false Runtime Exclusion: true + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_43_83_chunk + Output: _hyper_43_83_chunk."timestamp", _hyper_43_83_chunk.attr_id, _hyper_43_83_chunk.number_val + Filter: ((_hyper_43_83_chunk."timestamp" > 'Fri Jun 30 00:00:00 2023'::timestamp without time zone) AND (_hyper_43_83_chunk."timestamp" < 'Thu Jul 06 00:00:00 2023'::timestamp without time zone)) + Batch Sorted Merge: true + -> Sort + Output: compress_hyper_44_86_chunk."timestamp", compress_hyper_44_86_chunk.attr_id, compress_hyper_44_86_chunk.number_val, compress_hyper_44_86_chunk._ts_meta_count, compress_hyper_44_86_chunk._ts_meta_sequence_num, compress_hyper_44_86_chunk._ts_meta_min_1, compress_hyper_44_86_chunk._ts_meta_max_1 + Sort Key: compress_hyper_44_86_chunk._ts_meta_max_1 DESC + -> Index Scan using compress_hyper_44_86_chunk_attr_id__ts_meta_sequence_num_idx on _timescaledb_internal.compress_hyper_44_86_chunk + Output: compress_hyper_44_86_chunk."timestamp", compress_hyper_44_86_chunk.attr_id, compress_hyper_44_86_chunk.number_val, compress_hyper_44_86_chunk._ts_meta_count, compress_hyper_44_86_chunk._ts_meta_sequence_num, compress_hyper_44_86_chunk._ts_meta_min_1, compress_hyper_44_86_chunk._ts_meta_max_1 + Index Cond: (compress_hyper_44_86_chunk.attr_id = "*VALUES*".column1) + Filter: ((compress_hyper_44_86_chunk._ts_meta_max_1 > 'Fri Jun 30 00:00:00 2023'::timestamp without time zone) AND (compress_hyper_44_86_chunk._ts_meta_min_1 < 'Thu Jul 06 00:00:00 2023'::timestamp without time zone)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_43_82_chunk + Output: _hyper_43_82_chunk."timestamp", _hyper_43_82_chunk.attr_id, _hyper_43_82_chunk.number_val + Filter: ((_hyper_43_82_chunk."timestamp" > 'Fri Jun 30 00:00:00 2023'::timestamp without time zone) AND (_hyper_43_82_chunk."timestamp" < 'Thu Jul 06 00:00:00 2023'::timestamp without time zone)) + Batch Sorted Merge: true + -> Sort + Output: compress_hyper_44_85_chunk."timestamp", compress_hyper_44_85_chunk.attr_id, compress_hyper_44_85_chunk.number_val, compress_hyper_44_85_chunk._ts_meta_count, compress_hyper_44_85_chunk._ts_meta_sequence_num, compress_hyper_44_85_chunk._ts_meta_min_1, compress_hyper_44_85_chunk._ts_meta_max_1 + Sort Key: compress_hyper_44_85_chunk._ts_meta_max_1 DESC + -> Index Scan using compress_hyper_44_85_chunk_attr_id__ts_meta_sequence_num_idx on _timescaledb_internal.compress_hyper_44_85_chunk + Output: compress_hyper_44_85_chunk."timestamp", compress_hyper_44_85_chunk.attr_id, compress_hyper_44_85_chunk.number_val, compress_hyper_44_85_chunk._ts_meta_count, compress_hyper_44_85_chunk._ts_meta_sequence_num, compress_hyper_44_85_chunk._ts_meta_min_1, compress_hyper_44_85_chunk._ts_meta_max_1 + Index Cond: (compress_hyper_44_85_chunk.attr_id = "*VALUES*".column1) + Filter: ((compress_hyper_44_85_chunk._ts_meta_max_1 > 'Fri Jun 30 00:00:00 2023'::timestamp without time zone) AND (compress_hyper_44_85_chunk._ts_meta_min_1 < 'Thu Jul 06 00:00:00 2023'::timestamp without time zone)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_43_81_chunk Output: _hyper_43_81_chunk."timestamp", _hyper_43_81_chunk.attr_id, _hyper_43_81_chunk.number_val Filter: ((_hyper_43_81_chunk."timestamp" > 'Fri Jun 30 00:00:00 2023'::timestamp without time zone) AND (_hyper_43_81_chunk."timestamp" < 'Thu Jul 06 00:00:00 2023'::timestamp without time zone)) @@ -2102,28 +2122,6 @@ ORDER BY timestamp desc LIMIT 1 ) a ON true; Output: compress_hyper_44_84_chunk."timestamp", compress_hyper_44_84_chunk.attr_id, compress_hyper_44_84_chunk.number_val, compress_hyper_44_84_chunk._ts_meta_count, compress_hyper_44_84_chunk._ts_meta_sequence_num, compress_hyper_44_84_chunk._ts_meta_min_1, compress_hyper_44_84_chunk._ts_meta_max_1 Index Cond: (compress_hyper_44_84_chunk.attr_id = "*VALUES*".column1) Filter: ((compress_hyper_44_84_chunk._ts_meta_max_1 > 'Fri Jun 30 00:00:00 2023'::timestamp without time zone) AND (compress_hyper_44_84_chunk._ts_meta_min_1 < 'Thu Jul 06 00:00:00 2023'::timestamp without time zone)) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_43_80_chunk - Output: _hyper_43_80_chunk."timestamp", _hyper_43_80_chunk.attr_id, _hyper_43_80_chunk.number_val - Filter: ((_hyper_43_80_chunk."timestamp" > 'Fri Jun 30 00:00:00 2023'::timestamp without time zone) AND (_hyper_43_80_chunk."timestamp" < 'Thu Jul 06 00:00:00 2023'::timestamp without time zone)) - Batch Sorted Merge: true - -> Sort - Output: compress_hyper_44_83_chunk."timestamp", compress_hyper_44_83_chunk.attr_id, compress_hyper_44_83_chunk.number_val, compress_hyper_44_83_chunk._ts_meta_count, compress_hyper_44_83_chunk._ts_meta_sequence_num, compress_hyper_44_83_chunk._ts_meta_min_1, compress_hyper_44_83_chunk._ts_meta_max_1 - Sort Key: compress_hyper_44_83_chunk._ts_meta_max_1 DESC - -> Index Scan using compress_hyper_44_83_chunk_attr_id__ts_meta_sequence_num_idx on _timescaledb_internal.compress_hyper_44_83_chunk - Output: compress_hyper_44_83_chunk."timestamp", compress_hyper_44_83_chunk.attr_id, compress_hyper_44_83_chunk.number_val, compress_hyper_44_83_chunk._ts_meta_count, compress_hyper_44_83_chunk._ts_meta_sequence_num, compress_hyper_44_83_chunk._ts_meta_min_1, compress_hyper_44_83_chunk._ts_meta_max_1 - Index Cond: (compress_hyper_44_83_chunk.attr_id = "*VALUES*".column1) - Filter: ((compress_hyper_44_83_chunk._ts_meta_max_1 > 'Fri Jun 30 00:00:00 2023'::timestamp without time zone) AND (compress_hyper_44_83_chunk._ts_meta_min_1 < 'Thu Jul 06 00:00:00 2023'::timestamp without time zone)) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_43_79_chunk - Output: _hyper_43_79_chunk."timestamp", _hyper_43_79_chunk.attr_id, _hyper_43_79_chunk.number_val - Filter: ((_hyper_43_79_chunk."timestamp" > 'Fri Jun 30 00:00:00 2023'::timestamp without time zone) AND (_hyper_43_79_chunk."timestamp" < 'Thu Jul 06 00:00:00 2023'::timestamp without time zone)) - Batch Sorted Merge: true - -> Sort - Output: compress_hyper_44_82_chunk."timestamp", compress_hyper_44_82_chunk.attr_id, compress_hyper_44_82_chunk.number_val, compress_hyper_44_82_chunk._ts_meta_count, compress_hyper_44_82_chunk._ts_meta_sequence_num, compress_hyper_44_82_chunk._ts_meta_min_1, compress_hyper_44_82_chunk._ts_meta_max_1 - Sort Key: compress_hyper_44_82_chunk._ts_meta_max_1 DESC - -> Index Scan using compress_hyper_44_82_chunk_attr_id__ts_meta_sequence_num_idx on _timescaledb_internal.compress_hyper_44_82_chunk - Output: compress_hyper_44_82_chunk."timestamp", compress_hyper_44_82_chunk.attr_id, compress_hyper_44_82_chunk.number_val, compress_hyper_44_82_chunk._ts_meta_count, compress_hyper_44_82_chunk._ts_meta_sequence_num, compress_hyper_44_82_chunk._ts_meta_min_1, compress_hyper_44_82_chunk._ts_meta_max_1 - Index Cond: (compress_hyper_44_82_chunk.attr_id = "*VALUES*".column1) - Filter: ((compress_hyper_44_82_chunk._ts_meta_max_1 > 'Fri Jun 30 00:00:00 2023'::timestamp without time zone) AND (compress_hyper_44_82_chunk._ts_meta_min_1 < 'Thu Jul 06 00:00:00 2023'::timestamp without time zone)) (44 rows) SELECT * FROM ( VALUES(1),(2),(3),(4),(5),(6),(7),(8),(9),(10) ) AS attr_ids(attr_id) @@ -2180,9 +2178,9 @@ SET work_mem = '16MB'; SELECT compress_chunk(ch) FROM show_chunks('sensor_data_compressed') ch LIMIT 3; compress_chunk ------------------------------------------ - _timescaledb_internal._hyper_45_85_chunk - _timescaledb_internal._hyper_45_86_chunk _timescaledb_internal._hyper_45_87_chunk + _timescaledb_internal._hyper_45_88_chunk + _timescaledb_internal._hyper_45_89_chunk (3 rows) ANALYZE sensor_data_compressed; @@ -2208,14 +2206,32 @@ SELECT * FROM sensor_data_compressed ORDER BY time DESC LIMIT 5; Order: sensor_data_compressed."time" DESC Startup Exclusion: false Runtime Exclusion: false - -> Index Scan using _hyper_45_91_chunk_sensor_data_compressed_time_idx on _timescaledb_internal._hyper_45_91_chunk (actual rows=2 loops=1) + -> Index Scan using _hyper_45_93_chunk_sensor_data_compressed_time_idx on _timescaledb_internal._hyper_45_93_chunk (actual rows=2 loops=1) + Output: _hyper_45_93_chunk."time", _hyper_45_93_chunk.sensor_id, _hyper_45_93_chunk.cpu, _hyper_45_93_chunk.temperature + -> Index Scan using _hyper_45_92_chunk_sensor_data_compressed_time_idx on _timescaledb_internal._hyper_45_92_chunk (actual rows=2 loops=1) + Output: _hyper_45_92_chunk."time", _hyper_45_92_chunk.sensor_id, _hyper_45_92_chunk.cpu, _hyper_45_92_chunk.temperature + -> Index Scan using _hyper_45_91_chunk_sensor_data_compressed_time_idx on _timescaledb_internal._hyper_45_91_chunk (actual rows=1 loops=1) Output: _hyper_45_91_chunk."time", _hyper_45_91_chunk.sensor_id, _hyper_45_91_chunk.cpu, _hyper_45_91_chunk.temperature - -> Index Scan using _hyper_45_90_chunk_sensor_data_compressed_time_idx on _timescaledb_internal._hyper_45_90_chunk (actual rows=2 loops=1) + -> Index Scan using _hyper_45_90_chunk_sensor_data_compressed_time_idx on _timescaledb_internal._hyper_45_90_chunk (never executed) Output: _hyper_45_90_chunk."time", _hyper_45_90_chunk.sensor_id, _hyper_45_90_chunk.cpu, _hyper_45_90_chunk.temperature - -> Index Scan using _hyper_45_89_chunk_sensor_data_compressed_time_idx on _timescaledb_internal._hyper_45_89_chunk (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_89_chunk (never executed) Output: _hyper_45_89_chunk."time", _hyper_45_89_chunk.sensor_id, _hyper_45_89_chunk.cpu, _hyper_45_89_chunk.temperature - -> Index Scan using _hyper_45_88_chunk_sensor_data_compressed_time_idx on _timescaledb_internal._hyper_45_88_chunk (never executed) + Batch Sorted Merge: true + Bulk Decompression: false + -> Sort (never executed) + Output: compress_hyper_46_96_chunk."time", compress_hyper_46_96_chunk.sensor_id, compress_hyper_46_96_chunk.cpu, compress_hyper_46_96_chunk.temperature, compress_hyper_46_96_chunk._ts_meta_count, compress_hyper_46_96_chunk._ts_meta_sequence_num, compress_hyper_46_96_chunk._ts_meta_min_1, compress_hyper_46_96_chunk._ts_meta_max_1 + Sort Key: compress_hyper_46_96_chunk._ts_meta_max_1 DESC + -> Seq Scan on _timescaledb_internal.compress_hyper_46_96_chunk (never executed) + Output: compress_hyper_46_96_chunk."time", compress_hyper_46_96_chunk.sensor_id, compress_hyper_46_96_chunk.cpu, compress_hyper_46_96_chunk.temperature, compress_hyper_46_96_chunk._ts_meta_count, compress_hyper_46_96_chunk._ts_meta_sequence_num, compress_hyper_46_96_chunk._ts_meta_min_1, compress_hyper_46_96_chunk._ts_meta_max_1 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_88_chunk (never executed) Output: _hyper_45_88_chunk."time", _hyper_45_88_chunk.sensor_id, _hyper_45_88_chunk.cpu, _hyper_45_88_chunk.temperature + Batch Sorted Merge: true + Bulk Decompression: false + -> Sort (never executed) + Output: compress_hyper_46_95_chunk."time", compress_hyper_46_95_chunk.sensor_id, compress_hyper_46_95_chunk.cpu, compress_hyper_46_95_chunk.temperature, compress_hyper_46_95_chunk._ts_meta_count, compress_hyper_46_95_chunk._ts_meta_sequence_num, compress_hyper_46_95_chunk._ts_meta_min_1, compress_hyper_46_95_chunk._ts_meta_max_1 + Sort Key: compress_hyper_46_95_chunk._ts_meta_max_1 DESC + -> Seq Scan on _timescaledb_internal.compress_hyper_46_95_chunk (never executed) + Output: compress_hyper_46_95_chunk."time", compress_hyper_46_95_chunk.sensor_id, compress_hyper_46_95_chunk.cpu, compress_hyper_46_95_chunk.temperature, compress_hyper_46_95_chunk._ts_meta_count, compress_hyper_46_95_chunk._ts_meta_sequence_num, compress_hyper_46_95_chunk._ts_meta_min_1, compress_hyper_46_95_chunk._ts_meta_max_1 -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_87_chunk (never executed) Output: _hyper_45_87_chunk."time", _hyper_45_87_chunk.sensor_id, _hyper_45_87_chunk.cpu, _hyper_45_87_chunk.temperature Batch Sorted Merge: true @@ -2225,24 +2241,6 @@ SELECT * FROM sensor_data_compressed ORDER BY time DESC LIMIT 5; Sort Key: compress_hyper_46_94_chunk._ts_meta_max_1 DESC -> Seq Scan on _timescaledb_internal.compress_hyper_46_94_chunk (never executed) Output: compress_hyper_46_94_chunk."time", compress_hyper_46_94_chunk.sensor_id, compress_hyper_46_94_chunk.cpu, compress_hyper_46_94_chunk.temperature, compress_hyper_46_94_chunk._ts_meta_count, compress_hyper_46_94_chunk._ts_meta_sequence_num, compress_hyper_46_94_chunk._ts_meta_min_1, compress_hyper_46_94_chunk._ts_meta_max_1 - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_86_chunk (never executed) - Output: _hyper_45_86_chunk."time", _hyper_45_86_chunk.sensor_id, _hyper_45_86_chunk.cpu, _hyper_45_86_chunk.temperature - Batch Sorted Merge: true - Bulk Decompression: false - -> Sort (never executed) - Output: compress_hyper_46_93_chunk."time", compress_hyper_46_93_chunk.sensor_id, compress_hyper_46_93_chunk.cpu, compress_hyper_46_93_chunk.temperature, compress_hyper_46_93_chunk._ts_meta_count, compress_hyper_46_93_chunk._ts_meta_sequence_num, compress_hyper_46_93_chunk._ts_meta_min_1, compress_hyper_46_93_chunk._ts_meta_max_1 - Sort Key: compress_hyper_46_93_chunk._ts_meta_max_1 DESC - -> Seq Scan on _timescaledb_internal.compress_hyper_46_93_chunk (never executed) - Output: compress_hyper_46_93_chunk."time", compress_hyper_46_93_chunk.sensor_id, compress_hyper_46_93_chunk.cpu, compress_hyper_46_93_chunk.temperature, compress_hyper_46_93_chunk._ts_meta_count, compress_hyper_46_93_chunk._ts_meta_sequence_num, compress_hyper_46_93_chunk._ts_meta_min_1, compress_hyper_46_93_chunk._ts_meta_max_1 - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_85_chunk (never executed) - Output: _hyper_45_85_chunk."time", _hyper_45_85_chunk.sensor_id, _hyper_45_85_chunk.cpu, _hyper_45_85_chunk.temperature - Batch Sorted Merge: true - Bulk Decompression: false - -> Sort (never executed) - Output: compress_hyper_46_92_chunk."time", compress_hyper_46_92_chunk.sensor_id, compress_hyper_46_92_chunk.cpu, compress_hyper_46_92_chunk.temperature, compress_hyper_46_92_chunk._ts_meta_count, compress_hyper_46_92_chunk._ts_meta_sequence_num, compress_hyper_46_92_chunk._ts_meta_min_1, compress_hyper_46_92_chunk._ts_meta_max_1 - Sort Key: compress_hyper_46_92_chunk._ts_meta_max_1 DESC - -> Seq Scan on _timescaledb_internal.compress_hyper_46_92_chunk (never executed) - Output: compress_hyper_46_92_chunk."time", compress_hyper_46_92_chunk.sensor_id, compress_hyper_46_92_chunk.cpu, compress_hyper_46_92_chunk.temperature, compress_hyper_46_92_chunk._ts_meta_count, compress_hyper_46_92_chunk._ts_meta_sequence_num, compress_hyper_46_92_chunk._ts_meta_min_1, compress_hyper_46_92_chunk._ts_meta_max_1 (42 rows) -- Only the first chunks should be accessed (batch sorted merge is disabled) @@ -2258,14 +2256,30 @@ SELECT * FROM sensor_data_compressed ORDER BY time DESC LIMIT 5; Order: sensor_data_compressed."time" DESC Startup Exclusion: false Runtime Exclusion: false - -> Index Scan using _hyper_45_91_chunk_sensor_data_compressed_time_idx on _timescaledb_internal._hyper_45_91_chunk (actual rows=2 loops=1) + -> Index Scan using _hyper_45_93_chunk_sensor_data_compressed_time_idx on _timescaledb_internal._hyper_45_93_chunk (actual rows=2 loops=1) + Output: _hyper_45_93_chunk."time", _hyper_45_93_chunk.sensor_id, _hyper_45_93_chunk.cpu, _hyper_45_93_chunk.temperature + -> Index Scan using _hyper_45_92_chunk_sensor_data_compressed_time_idx on _timescaledb_internal._hyper_45_92_chunk (actual rows=2 loops=1) + Output: _hyper_45_92_chunk."time", _hyper_45_92_chunk.sensor_id, _hyper_45_92_chunk.cpu, _hyper_45_92_chunk.temperature + -> Index Scan using _hyper_45_91_chunk_sensor_data_compressed_time_idx on _timescaledb_internal._hyper_45_91_chunk (actual rows=1 loops=1) Output: _hyper_45_91_chunk."time", _hyper_45_91_chunk.sensor_id, _hyper_45_91_chunk.cpu, _hyper_45_91_chunk.temperature - -> Index Scan using _hyper_45_90_chunk_sensor_data_compressed_time_idx on _timescaledb_internal._hyper_45_90_chunk (actual rows=2 loops=1) + -> Index Scan using _hyper_45_90_chunk_sensor_data_compressed_time_idx on _timescaledb_internal._hyper_45_90_chunk (never executed) Output: _hyper_45_90_chunk."time", _hyper_45_90_chunk.sensor_id, _hyper_45_90_chunk.cpu, _hyper_45_90_chunk.temperature - -> Index Scan using _hyper_45_89_chunk_sensor_data_compressed_time_idx on _timescaledb_internal._hyper_45_89_chunk (actual rows=1 loops=1) + -> Sort (never executed) Output: _hyper_45_89_chunk."time", _hyper_45_89_chunk.sensor_id, _hyper_45_89_chunk.cpu, _hyper_45_89_chunk.temperature - -> Index Scan using _hyper_45_88_chunk_sensor_data_compressed_time_idx on _timescaledb_internal._hyper_45_88_chunk (never executed) + Sort Key: _hyper_45_89_chunk."time" DESC + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_89_chunk (never executed) + Output: _hyper_45_89_chunk."time", _hyper_45_89_chunk.sensor_id, _hyper_45_89_chunk.cpu, _hyper_45_89_chunk.temperature + Bulk Decompression: true + -> Seq Scan on _timescaledb_internal.compress_hyper_46_96_chunk (never executed) + Output: compress_hyper_46_96_chunk."time", compress_hyper_46_96_chunk.sensor_id, compress_hyper_46_96_chunk.cpu, compress_hyper_46_96_chunk.temperature, compress_hyper_46_96_chunk._ts_meta_count, compress_hyper_46_96_chunk._ts_meta_sequence_num, compress_hyper_46_96_chunk._ts_meta_min_1, compress_hyper_46_96_chunk._ts_meta_max_1 + -> Sort (never executed) Output: _hyper_45_88_chunk."time", _hyper_45_88_chunk.sensor_id, _hyper_45_88_chunk.cpu, _hyper_45_88_chunk.temperature + Sort Key: _hyper_45_88_chunk."time" DESC + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_88_chunk (never executed) + Output: _hyper_45_88_chunk."time", _hyper_45_88_chunk.sensor_id, _hyper_45_88_chunk.cpu, _hyper_45_88_chunk.temperature + Bulk Decompression: true + -> Seq Scan on _timescaledb_internal.compress_hyper_46_95_chunk (never executed) + Output: compress_hyper_46_95_chunk."time", compress_hyper_46_95_chunk.sensor_id, compress_hyper_46_95_chunk.cpu, compress_hyper_46_95_chunk.temperature, compress_hyper_46_95_chunk._ts_meta_count, compress_hyper_46_95_chunk._ts_meta_sequence_num, compress_hyper_46_95_chunk._ts_meta_min_1, compress_hyper_46_95_chunk._ts_meta_max_1 -> Sort (never executed) Output: _hyper_45_87_chunk."time", _hyper_45_87_chunk.sensor_id, _hyper_45_87_chunk.cpu, _hyper_45_87_chunk.temperature Sort Key: _hyper_45_87_chunk."time" DESC @@ -2274,39 +2288,23 @@ SELECT * FROM sensor_data_compressed ORDER BY time DESC LIMIT 5; Bulk Decompression: true -> Seq Scan on _timescaledb_internal.compress_hyper_46_94_chunk (never executed) Output: compress_hyper_46_94_chunk."time", compress_hyper_46_94_chunk.sensor_id, compress_hyper_46_94_chunk.cpu, compress_hyper_46_94_chunk.temperature, compress_hyper_46_94_chunk._ts_meta_count, compress_hyper_46_94_chunk._ts_meta_sequence_num, compress_hyper_46_94_chunk._ts_meta_min_1, compress_hyper_46_94_chunk._ts_meta_max_1 - -> Sort (never executed) - Output: _hyper_45_86_chunk."time", _hyper_45_86_chunk.sensor_id, _hyper_45_86_chunk.cpu, _hyper_45_86_chunk.temperature - Sort Key: _hyper_45_86_chunk."time" DESC - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_86_chunk (never executed) - Output: _hyper_45_86_chunk."time", _hyper_45_86_chunk.sensor_id, _hyper_45_86_chunk.cpu, _hyper_45_86_chunk.temperature - Bulk Decompression: true - -> Seq Scan on _timescaledb_internal.compress_hyper_46_93_chunk (never executed) - Output: compress_hyper_46_93_chunk."time", compress_hyper_46_93_chunk.sensor_id, compress_hyper_46_93_chunk.cpu, compress_hyper_46_93_chunk.temperature, compress_hyper_46_93_chunk._ts_meta_count, compress_hyper_46_93_chunk._ts_meta_sequence_num, compress_hyper_46_93_chunk._ts_meta_min_1, compress_hyper_46_93_chunk._ts_meta_max_1 - -> Sort (never executed) - Output: _hyper_45_85_chunk."time", _hyper_45_85_chunk.sensor_id, _hyper_45_85_chunk.cpu, _hyper_45_85_chunk.temperature - Sort Key: _hyper_45_85_chunk."time" DESC - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_85_chunk (never executed) - Output: _hyper_45_85_chunk."time", _hyper_45_85_chunk.sensor_id, _hyper_45_85_chunk.cpu, _hyper_45_85_chunk.temperature - Bulk Decompression: true - -> Seq Scan on _timescaledb_internal.compress_hyper_46_92_chunk (never executed) - Output: compress_hyper_46_92_chunk."time", compress_hyper_46_92_chunk.sensor_id, compress_hyper_46_92_chunk.cpu, compress_hyper_46_92_chunk.temperature, compress_hyper_46_92_chunk._ts_meta_count, compress_hyper_46_92_chunk._ts_meta_sequence_num, compress_hyper_46_92_chunk._ts_meta_min_1, compress_hyper_46_92_chunk._ts_meta_max_1 (39 rows) RESET timescaledb.enable_decompression_sorted_merge; -- Compress the remaining chunks SELECT compress_chunk(ch, if_not_compressed => true) FROM show_chunks('sensor_data_compressed') ch; -NOTICE: chunk "_hyper_45_85_chunk" is already compressed -NOTICE: chunk "_hyper_45_86_chunk" is already compressed NOTICE: chunk "_hyper_45_87_chunk" is already compressed +NOTICE: chunk "_hyper_45_88_chunk" is already compressed +NOTICE: chunk "_hyper_45_89_chunk" is already compressed compress_chunk ------------------------------------------ - _timescaledb_internal._hyper_45_85_chunk - _timescaledb_internal._hyper_45_86_chunk _timescaledb_internal._hyper_45_87_chunk _timescaledb_internal._hyper_45_88_chunk _timescaledb_internal._hyper_45_89_chunk _timescaledb_internal._hyper_45_90_chunk _timescaledb_internal._hyper_45_91_chunk + _timescaledb_internal._hyper_45_92_chunk + _timescaledb_internal._hyper_45_93_chunk (7 rows) SELECT * FROM sensor_data_compressed ORDER BY time DESC LIMIT 5; @@ -2322,8 +2320,8 @@ SELECT * FROM sensor_data_compressed ORDER BY time DESC LIMIT 5; -- Only the first chunks should be accessed (batch sorted merge is enabled) :PREFIX SELECT * FROM sensor_data_compressed ORDER BY time DESC LIMIT 5; - QUERY PLAN ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ Limit (actual rows=5 loops=1) Output: sensor_data_compressed."time", sensor_data_compressed.sensor_id, sensor_data_compressed.cpu, sensor_data_compressed.temperature -> Custom Scan (ChunkAppend) on public.sensor_data_compressed (actual rows=5 loops=1) @@ -2331,7 +2329,27 @@ SELECT * FROM sensor_data_compressed ORDER BY time DESC LIMIT 5; Order: sensor_data_compressed."time" DESC Startup Exclusion: false Runtime Exclusion: false - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_91_chunk (actual rows=2 loops=1) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_93_chunk (actual rows=2 loops=1) + Output: _hyper_45_93_chunk."time", _hyper_45_93_chunk.sensor_id, _hyper_45_93_chunk.cpu, _hyper_45_93_chunk.temperature + Batch Sorted Merge: true + Bulk Decompression: false + -> Sort (actual rows=2 loops=1) + Output: compress_hyper_46_100_chunk."time", compress_hyper_46_100_chunk.sensor_id, compress_hyper_46_100_chunk.cpu, compress_hyper_46_100_chunk.temperature, compress_hyper_46_100_chunk._ts_meta_count, compress_hyper_46_100_chunk._ts_meta_sequence_num, compress_hyper_46_100_chunk._ts_meta_min_1, compress_hyper_46_100_chunk._ts_meta_max_1 + Sort Key: compress_hyper_46_100_chunk._ts_meta_max_1 DESC + Sort Method: quicksort + -> Seq Scan on _timescaledb_internal.compress_hyper_46_100_chunk (actual rows=2 loops=1) + Output: compress_hyper_46_100_chunk."time", compress_hyper_46_100_chunk.sensor_id, compress_hyper_46_100_chunk.cpu, compress_hyper_46_100_chunk.temperature, compress_hyper_46_100_chunk._ts_meta_count, compress_hyper_46_100_chunk._ts_meta_sequence_num, compress_hyper_46_100_chunk._ts_meta_min_1, compress_hyper_46_100_chunk._ts_meta_max_1 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_92_chunk (actual rows=2 loops=1) + Output: _hyper_45_92_chunk."time", _hyper_45_92_chunk.sensor_id, _hyper_45_92_chunk.cpu, _hyper_45_92_chunk.temperature + Batch Sorted Merge: true + Bulk Decompression: false + -> Sort (actual rows=2 loops=1) + Output: compress_hyper_46_99_chunk."time", compress_hyper_46_99_chunk.sensor_id, compress_hyper_46_99_chunk.cpu, compress_hyper_46_99_chunk.temperature, compress_hyper_46_99_chunk._ts_meta_count, compress_hyper_46_99_chunk._ts_meta_sequence_num, compress_hyper_46_99_chunk._ts_meta_min_1, compress_hyper_46_99_chunk._ts_meta_max_1 + Sort Key: compress_hyper_46_99_chunk._ts_meta_max_1 DESC + Sort Method: quicksort + -> Seq Scan on _timescaledb_internal.compress_hyper_46_99_chunk (actual rows=2 loops=1) + Output: compress_hyper_46_99_chunk."time", compress_hyper_46_99_chunk.sensor_id, compress_hyper_46_99_chunk.cpu, compress_hyper_46_99_chunk.temperature, compress_hyper_46_99_chunk._ts_meta_count, compress_hyper_46_99_chunk._ts_meta_sequence_num, compress_hyper_46_99_chunk._ts_meta_min_1, compress_hyper_46_99_chunk._ts_meta_max_1 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_91_chunk (actual rows=1 loops=1) Output: _hyper_45_91_chunk."time", _hyper_45_91_chunk.sensor_id, _hyper_45_91_chunk.cpu, _hyper_45_91_chunk.temperature Batch Sorted Merge: true Bulk Decompression: false @@ -2341,25 +2359,23 @@ SELECT * FROM sensor_data_compressed ORDER BY time DESC LIMIT 5; Sort Method: quicksort -> Seq Scan on _timescaledb_internal.compress_hyper_46_98_chunk (actual rows=2 loops=1) Output: compress_hyper_46_98_chunk."time", compress_hyper_46_98_chunk.sensor_id, compress_hyper_46_98_chunk.cpu, compress_hyper_46_98_chunk.temperature, compress_hyper_46_98_chunk._ts_meta_count, compress_hyper_46_98_chunk._ts_meta_sequence_num, compress_hyper_46_98_chunk._ts_meta_min_1, compress_hyper_46_98_chunk._ts_meta_max_1 - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_90_chunk (actual rows=2 loops=1) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_90_chunk (never executed) Output: _hyper_45_90_chunk."time", _hyper_45_90_chunk.sensor_id, _hyper_45_90_chunk.cpu, _hyper_45_90_chunk.temperature Batch Sorted Merge: true Bulk Decompression: false - -> Sort (actual rows=2 loops=1) + -> Sort (never executed) Output: compress_hyper_46_97_chunk."time", compress_hyper_46_97_chunk.sensor_id, compress_hyper_46_97_chunk.cpu, compress_hyper_46_97_chunk.temperature, compress_hyper_46_97_chunk._ts_meta_count, compress_hyper_46_97_chunk._ts_meta_sequence_num, compress_hyper_46_97_chunk._ts_meta_min_1, compress_hyper_46_97_chunk._ts_meta_max_1 Sort Key: compress_hyper_46_97_chunk._ts_meta_max_1 DESC - Sort Method: quicksort - -> Seq Scan on _timescaledb_internal.compress_hyper_46_97_chunk (actual rows=2 loops=1) + -> Seq Scan on _timescaledb_internal.compress_hyper_46_97_chunk (never executed) Output: compress_hyper_46_97_chunk."time", compress_hyper_46_97_chunk.sensor_id, compress_hyper_46_97_chunk.cpu, compress_hyper_46_97_chunk.temperature, compress_hyper_46_97_chunk._ts_meta_count, compress_hyper_46_97_chunk._ts_meta_sequence_num, compress_hyper_46_97_chunk._ts_meta_min_1, compress_hyper_46_97_chunk._ts_meta_max_1 - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_89_chunk (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_89_chunk (never executed) Output: _hyper_45_89_chunk."time", _hyper_45_89_chunk.sensor_id, _hyper_45_89_chunk.cpu, _hyper_45_89_chunk.temperature Batch Sorted Merge: true Bulk Decompression: false - -> Sort (actual rows=2 loops=1) + -> Sort (never executed) Output: compress_hyper_46_96_chunk."time", compress_hyper_46_96_chunk.sensor_id, compress_hyper_46_96_chunk.cpu, compress_hyper_46_96_chunk.temperature, compress_hyper_46_96_chunk._ts_meta_count, compress_hyper_46_96_chunk._ts_meta_sequence_num, compress_hyper_46_96_chunk._ts_meta_min_1, compress_hyper_46_96_chunk._ts_meta_max_1 Sort Key: compress_hyper_46_96_chunk._ts_meta_max_1 DESC - Sort Method: quicksort - -> Seq Scan on _timescaledb_internal.compress_hyper_46_96_chunk (actual rows=2 loops=1) + -> Seq Scan on _timescaledb_internal.compress_hyper_46_96_chunk (never executed) Output: compress_hyper_46_96_chunk."time", compress_hyper_46_96_chunk.sensor_id, compress_hyper_46_96_chunk.cpu, compress_hyper_46_96_chunk.temperature, compress_hyper_46_96_chunk._ts_meta_count, compress_hyper_46_96_chunk._ts_meta_sequence_num, compress_hyper_46_96_chunk._ts_meta_min_1, compress_hyper_46_96_chunk._ts_meta_max_1 -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_88_chunk (never executed) Output: _hyper_45_88_chunk."time", _hyper_45_88_chunk.sensor_id, _hyper_45_88_chunk.cpu, _hyper_45_88_chunk.temperature @@ -2379,32 +2395,14 @@ SELECT * FROM sensor_data_compressed ORDER BY time DESC LIMIT 5; Sort Key: compress_hyper_46_94_chunk._ts_meta_max_1 DESC -> Seq Scan on _timescaledb_internal.compress_hyper_46_94_chunk (never executed) Output: compress_hyper_46_94_chunk."time", compress_hyper_46_94_chunk.sensor_id, compress_hyper_46_94_chunk.cpu, compress_hyper_46_94_chunk.temperature, compress_hyper_46_94_chunk._ts_meta_count, compress_hyper_46_94_chunk._ts_meta_sequence_num, compress_hyper_46_94_chunk._ts_meta_min_1, compress_hyper_46_94_chunk._ts_meta_max_1 - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_86_chunk (never executed) - Output: _hyper_45_86_chunk."time", _hyper_45_86_chunk.sensor_id, _hyper_45_86_chunk.cpu, _hyper_45_86_chunk.temperature - Batch Sorted Merge: true - Bulk Decompression: false - -> Sort (never executed) - Output: compress_hyper_46_93_chunk."time", compress_hyper_46_93_chunk.sensor_id, compress_hyper_46_93_chunk.cpu, compress_hyper_46_93_chunk.temperature, compress_hyper_46_93_chunk._ts_meta_count, compress_hyper_46_93_chunk._ts_meta_sequence_num, compress_hyper_46_93_chunk._ts_meta_min_1, compress_hyper_46_93_chunk._ts_meta_max_1 - Sort Key: compress_hyper_46_93_chunk._ts_meta_max_1 DESC - -> Seq Scan on _timescaledb_internal.compress_hyper_46_93_chunk (never executed) - Output: compress_hyper_46_93_chunk."time", compress_hyper_46_93_chunk.sensor_id, compress_hyper_46_93_chunk.cpu, compress_hyper_46_93_chunk.temperature, compress_hyper_46_93_chunk._ts_meta_count, compress_hyper_46_93_chunk._ts_meta_sequence_num, compress_hyper_46_93_chunk._ts_meta_min_1, compress_hyper_46_93_chunk._ts_meta_max_1 - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_85_chunk (never executed) - Output: _hyper_45_85_chunk."time", _hyper_45_85_chunk.sensor_id, _hyper_45_85_chunk.cpu, _hyper_45_85_chunk.temperature - Batch Sorted Merge: true - Bulk Decompression: false - -> Sort (never executed) - Output: compress_hyper_46_92_chunk."time", compress_hyper_46_92_chunk.sensor_id, compress_hyper_46_92_chunk.cpu, compress_hyper_46_92_chunk.temperature, compress_hyper_46_92_chunk._ts_meta_count, compress_hyper_46_92_chunk._ts_meta_sequence_num, compress_hyper_46_92_chunk._ts_meta_min_1, compress_hyper_46_92_chunk._ts_meta_max_1 - Sort Key: compress_hyper_46_92_chunk._ts_meta_max_1 DESC - -> Seq Scan on _timescaledb_internal.compress_hyper_46_92_chunk (never executed) - Output: compress_hyper_46_92_chunk."time", compress_hyper_46_92_chunk.sensor_id, compress_hyper_46_92_chunk.cpu, compress_hyper_46_92_chunk.temperature, compress_hyper_46_92_chunk._ts_meta_count, compress_hyper_46_92_chunk._ts_meta_sequence_num, compress_hyper_46_92_chunk._ts_meta_min_1, compress_hyper_46_92_chunk._ts_meta_max_1 (73 rows) -- Only the first chunks should be accessed (batch sorted merge is disabled) SET timescaledb.enable_decompression_sorted_merge = FALSE; :PREFIX SELECT * FROM sensor_data_compressed ORDER BY time DESC LIMIT 5; - QUERY PLAN ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ Limit (actual rows=5 loops=1) Output: sensor_data_compressed."time", sensor_data_compressed.sensor_id, sensor_data_compressed.cpu, sensor_data_compressed.temperature -> Custom Scan (ChunkAppend) on public.sensor_data_compressed (actual rows=5 loops=1) @@ -2413,6 +2411,24 @@ SELECT * FROM sensor_data_compressed ORDER BY time DESC LIMIT 5; Startup Exclusion: false Runtime Exclusion: false -> Sort (actual rows=2 loops=1) + Output: _hyper_45_93_chunk."time", _hyper_45_93_chunk.sensor_id, _hyper_45_93_chunk.cpu, _hyper_45_93_chunk.temperature + Sort Key: _hyper_45_93_chunk."time" DESC + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_93_chunk (actual rows=2 loops=1) + Output: _hyper_45_93_chunk."time", _hyper_45_93_chunk.sensor_id, _hyper_45_93_chunk.cpu, _hyper_45_93_chunk.temperature + Bulk Decompression: true + -> Seq Scan on _timescaledb_internal.compress_hyper_46_100_chunk (actual rows=2 loops=1) + Output: compress_hyper_46_100_chunk."time", compress_hyper_46_100_chunk.sensor_id, compress_hyper_46_100_chunk.cpu, compress_hyper_46_100_chunk.temperature, compress_hyper_46_100_chunk._ts_meta_count, compress_hyper_46_100_chunk._ts_meta_sequence_num, compress_hyper_46_100_chunk._ts_meta_min_1, compress_hyper_46_100_chunk._ts_meta_max_1 + -> Sort (actual rows=2 loops=1) + Output: _hyper_45_92_chunk."time", _hyper_45_92_chunk.sensor_id, _hyper_45_92_chunk.cpu, _hyper_45_92_chunk.temperature + Sort Key: _hyper_45_92_chunk."time" DESC + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_92_chunk (actual rows=2 loops=1) + Output: _hyper_45_92_chunk."time", _hyper_45_92_chunk.sensor_id, _hyper_45_92_chunk.cpu, _hyper_45_92_chunk.temperature + Bulk Decompression: true + -> Seq Scan on _timescaledb_internal.compress_hyper_46_99_chunk (actual rows=2 loops=1) + Output: compress_hyper_46_99_chunk."time", compress_hyper_46_99_chunk.sensor_id, compress_hyper_46_99_chunk.cpu, compress_hyper_46_99_chunk.temperature, compress_hyper_46_99_chunk._ts_meta_count, compress_hyper_46_99_chunk._ts_meta_sequence_num, compress_hyper_46_99_chunk._ts_meta_min_1, compress_hyper_46_99_chunk._ts_meta_max_1 + -> Sort (actual rows=1 loops=1) Output: _hyper_45_91_chunk."time", _hyper_45_91_chunk.sensor_id, _hyper_45_91_chunk.cpu, _hyper_45_91_chunk.temperature Sort Key: _hyper_45_91_chunk."time" DESC Sort Method: quicksort @@ -2421,23 +2437,21 @@ SELECT * FROM sensor_data_compressed ORDER BY time DESC LIMIT 5; Bulk Decompression: true -> Seq Scan on _timescaledb_internal.compress_hyper_46_98_chunk (actual rows=2 loops=1) Output: compress_hyper_46_98_chunk."time", compress_hyper_46_98_chunk.sensor_id, compress_hyper_46_98_chunk.cpu, compress_hyper_46_98_chunk.temperature, compress_hyper_46_98_chunk._ts_meta_count, compress_hyper_46_98_chunk._ts_meta_sequence_num, compress_hyper_46_98_chunk._ts_meta_min_1, compress_hyper_46_98_chunk._ts_meta_max_1 - -> Sort (actual rows=2 loops=1) + -> Sort (never executed) Output: _hyper_45_90_chunk."time", _hyper_45_90_chunk.sensor_id, _hyper_45_90_chunk.cpu, _hyper_45_90_chunk.temperature Sort Key: _hyper_45_90_chunk."time" DESC - Sort Method: quicksort - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_90_chunk (actual rows=2 loops=1) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_90_chunk (never executed) Output: _hyper_45_90_chunk."time", _hyper_45_90_chunk.sensor_id, _hyper_45_90_chunk.cpu, _hyper_45_90_chunk.temperature Bulk Decompression: true - -> Seq Scan on _timescaledb_internal.compress_hyper_46_97_chunk (actual rows=2 loops=1) + -> Seq Scan on _timescaledb_internal.compress_hyper_46_97_chunk (never executed) Output: compress_hyper_46_97_chunk."time", compress_hyper_46_97_chunk.sensor_id, compress_hyper_46_97_chunk.cpu, compress_hyper_46_97_chunk.temperature, compress_hyper_46_97_chunk._ts_meta_count, compress_hyper_46_97_chunk._ts_meta_sequence_num, compress_hyper_46_97_chunk._ts_meta_min_1, compress_hyper_46_97_chunk._ts_meta_max_1 - -> Sort (actual rows=1 loops=1) + -> Sort (never executed) Output: _hyper_45_89_chunk."time", _hyper_45_89_chunk.sensor_id, _hyper_45_89_chunk.cpu, _hyper_45_89_chunk.temperature Sort Key: _hyper_45_89_chunk."time" DESC - Sort Method: quicksort - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_89_chunk (actual rows=2 loops=1) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_89_chunk (never executed) Output: _hyper_45_89_chunk."time", _hyper_45_89_chunk.sensor_id, _hyper_45_89_chunk.cpu, _hyper_45_89_chunk.temperature Bulk Decompression: true - -> Seq Scan on _timescaledb_internal.compress_hyper_46_96_chunk (actual rows=2 loops=1) + -> Seq Scan on _timescaledb_internal.compress_hyper_46_96_chunk (never executed) Output: compress_hyper_46_96_chunk."time", compress_hyper_46_96_chunk.sensor_id, compress_hyper_46_96_chunk.cpu, compress_hyper_46_96_chunk.temperature, compress_hyper_46_96_chunk._ts_meta_count, compress_hyper_46_96_chunk._ts_meta_sequence_num, compress_hyper_46_96_chunk._ts_meta_min_1, compress_hyper_46_96_chunk._ts_meta_max_1 -> Sort (never executed) Output: _hyper_45_88_chunk."time", _hyper_45_88_chunk.sensor_id, _hyper_45_88_chunk.cpu, _hyper_45_88_chunk.temperature @@ -2455,22 +2469,6 @@ SELECT * FROM sensor_data_compressed ORDER BY time DESC LIMIT 5; Bulk Decompression: true -> Seq Scan on _timescaledb_internal.compress_hyper_46_94_chunk (never executed) Output: compress_hyper_46_94_chunk."time", compress_hyper_46_94_chunk.sensor_id, compress_hyper_46_94_chunk.cpu, compress_hyper_46_94_chunk.temperature, compress_hyper_46_94_chunk._ts_meta_count, compress_hyper_46_94_chunk._ts_meta_sequence_num, compress_hyper_46_94_chunk._ts_meta_min_1, compress_hyper_46_94_chunk._ts_meta_max_1 - -> Sort (never executed) - Output: _hyper_45_86_chunk."time", _hyper_45_86_chunk.sensor_id, _hyper_45_86_chunk.cpu, _hyper_45_86_chunk.temperature - Sort Key: _hyper_45_86_chunk."time" DESC - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_86_chunk (never executed) - Output: _hyper_45_86_chunk."time", _hyper_45_86_chunk.sensor_id, _hyper_45_86_chunk.cpu, _hyper_45_86_chunk.temperature - Bulk Decompression: true - -> Seq Scan on _timescaledb_internal.compress_hyper_46_93_chunk (never executed) - Output: compress_hyper_46_93_chunk."time", compress_hyper_46_93_chunk.sensor_id, compress_hyper_46_93_chunk.cpu, compress_hyper_46_93_chunk.temperature, compress_hyper_46_93_chunk._ts_meta_count, compress_hyper_46_93_chunk._ts_meta_sequence_num, compress_hyper_46_93_chunk._ts_meta_min_1, compress_hyper_46_93_chunk._ts_meta_max_1 - -> Sort (never executed) - Output: _hyper_45_85_chunk."time", _hyper_45_85_chunk.sensor_id, _hyper_45_85_chunk.cpu, _hyper_45_85_chunk.temperature - Sort Key: _hyper_45_85_chunk."time" DESC - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_85_chunk (never executed) - Output: _hyper_45_85_chunk."time", _hyper_45_85_chunk.sensor_id, _hyper_45_85_chunk.cpu, _hyper_45_85_chunk.temperature - Bulk Decompression: true - -> Seq Scan on _timescaledb_internal.compress_hyper_46_92_chunk (never executed) - Output: compress_hyper_46_92_chunk."time", compress_hyper_46_92_chunk.sensor_id, compress_hyper_46_92_chunk.cpu, compress_hyper_46_92_chunk.temperature, compress_hyper_46_92_chunk._ts_meta_count, compress_hyper_46_92_chunk._ts_meta_sequence_num, compress_hyper_46_92_chunk._ts_meta_min_1, compress_hyper_46_92_chunk._ts_meta_max_1 (66 rows) RESET timescaledb.enable_decompression_sorted_merge; @@ -2480,8 +2478,8 @@ INSERT INTO sensor_data_compressed (time, sensor_id, cpu, temperature) -- Only the first chunks should be accessed (batch sorted merge is enabled) :PREFIX SELECT * FROM sensor_data_compressed ORDER BY time DESC LIMIT 5; - QUERY PLAN ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ Limit (actual rows=5 loops=1) Output: sensor_data_compressed."time", sensor_data_compressed.sensor_id, sensor_data_compressed.cpu, sensor_data_compressed.temperature -> Custom Scan (ChunkAppend) on public.sensor_data_compressed (actual rows=5 loops=1) @@ -2489,7 +2487,27 @@ SELECT * FROM sensor_data_compressed ORDER BY time DESC LIMIT 5; Order: sensor_data_compressed."time" DESC Startup Exclusion: false Runtime Exclusion: false - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_91_chunk (actual rows=2 loops=1) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_93_chunk (actual rows=2 loops=1) + Output: _hyper_45_93_chunk."time", _hyper_45_93_chunk.sensor_id, _hyper_45_93_chunk.cpu, _hyper_45_93_chunk.temperature + Batch Sorted Merge: true + Bulk Decompression: false + -> Sort (actual rows=2 loops=1) + Output: compress_hyper_46_100_chunk."time", compress_hyper_46_100_chunk.sensor_id, compress_hyper_46_100_chunk.cpu, compress_hyper_46_100_chunk.temperature, compress_hyper_46_100_chunk._ts_meta_count, compress_hyper_46_100_chunk._ts_meta_sequence_num, compress_hyper_46_100_chunk._ts_meta_min_1, compress_hyper_46_100_chunk._ts_meta_max_1 + Sort Key: compress_hyper_46_100_chunk._ts_meta_max_1 DESC + Sort Method: quicksort + -> Seq Scan on _timescaledb_internal.compress_hyper_46_100_chunk (actual rows=2 loops=1) + Output: compress_hyper_46_100_chunk."time", compress_hyper_46_100_chunk.sensor_id, compress_hyper_46_100_chunk.cpu, compress_hyper_46_100_chunk.temperature, compress_hyper_46_100_chunk._ts_meta_count, compress_hyper_46_100_chunk._ts_meta_sequence_num, compress_hyper_46_100_chunk._ts_meta_min_1, compress_hyper_46_100_chunk._ts_meta_max_1 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_92_chunk (actual rows=2 loops=1) + Output: _hyper_45_92_chunk."time", _hyper_45_92_chunk.sensor_id, _hyper_45_92_chunk.cpu, _hyper_45_92_chunk.temperature + Batch Sorted Merge: true + Bulk Decompression: false + -> Sort (actual rows=2 loops=1) + Output: compress_hyper_46_99_chunk."time", compress_hyper_46_99_chunk.sensor_id, compress_hyper_46_99_chunk.cpu, compress_hyper_46_99_chunk.temperature, compress_hyper_46_99_chunk._ts_meta_count, compress_hyper_46_99_chunk._ts_meta_sequence_num, compress_hyper_46_99_chunk._ts_meta_min_1, compress_hyper_46_99_chunk._ts_meta_max_1 + Sort Key: compress_hyper_46_99_chunk._ts_meta_max_1 DESC + Sort Method: quicksort + -> Seq Scan on _timescaledb_internal.compress_hyper_46_99_chunk (actual rows=2 loops=1) + Output: compress_hyper_46_99_chunk."time", compress_hyper_46_99_chunk.sensor_id, compress_hyper_46_99_chunk.cpu, compress_hyper_46_99_chunk.temperature, compress_hyper_46_99_chunk._ts_meta_count, compress_hyper_46_99_chunk._ts_meta_sequence_num, compress_hyper_46_99_chunk._ts_meta_min_1, compress_hyper_46_99_chunk._ts_meta_max_1 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_91_chunk (actual rows=1 loops=1) Output: _hyper_45_91_chunk."time", _hyper_45_91_chunk.sensor_id, _hyper_45_91_chunk.cpu, _hyper_45_91_chunk.temperature Batch Sorted Merge: true Bulk Decompression: false @@ -2499,25 +2517,23 @@ SELECT * FROM sensor_data_compressed ORDER BY time DESC LIMIT 5; Sort Method: quicksort -> Seq Scan on _timescaledb_internal.compress_hyper_46_98_chunk (actual rows=2 loops=1) Output: compress_hyper_46_98_chunk."time", compress_hyper_46_98_chunk.sensor_id, compress_hyper_46_98_chunk.cpu, compress_hyper_46_98_chunk.temperature, compress_hyper_46_98_chunk._ts_meta_count, compress_hyper_46_98_chunk._ts_meta_sequence_num, compress_hyper_46_98_chunk._ts_meta_min_1, compress_hyper_46_98_chunk._ts_meta_max_1 - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_90_chunk (actual rows=2 loops=1) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_90_chunk (never executed) Output: _hyper_45_90_chunk."time", _hyper_45_90_chunk.sensor_id, _hyper_45_90_chunk.cpu, _hyper_45_90_chunk.temperature Batch Sorted Merge: true Bulk Decompression: false - -> Sort (actual rows=2 loops=1) + -> Sort (never executed) Output: compress_hyper_46_97_chunk."time", compress_hyper_46_97_chunk.sensor_id, compress_hyper_46_97_chunk.cpu, compress_hyper_46_97_chunk.temperature, compress_hyper_46_97_chunk._ts_meta_count, compress_hyper_46_97_chunk._ts_meta_sequence_num, compress_hyper_46_97_chunk._ts_meta_min_1, compress_hyper_46_97_chunk._ts_meta_max_1 Sort Key: compress_hyper_46_97_chunk._ts_meta_max_1 DESC - Sort Method: quicksort - -> Seq Scan on _timescaledb_internal.compress_hyper_46_97_chunk (actual rows=2 loops=1) + -> Seq Scan on _timescaledb_internal.compress_hyper_46_97_chunk (never executed) Output: compress_hyper_46_97_chunk."time", compress_hyper_46_97_chunk.sensor_id, compress_hyper_46_97_chunk.cpu, compress_hyper_46_97_chunk.temperature, compress_hyper_46_97_chunk._ts_meta_count, compress_hyper_46_97_chunk._ts_meta_sequence_num, compress_hyper_46_97_chunk._ts_meta_min_1, compress_hyper_46_97_chunk._ts_meta_max_1 - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_89_chunk (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_89_chunk (never executed) Output: _hyper_45_89_chunk."time", _hyper_45_89_chunk.sensor_id, _hyper_45_89_chunk.cpu, _hyper_45_89_chunk.temperature Batch Sorted Merge: true Bulk Decompression: false - -> Sort (actual rows=2 loops=1) + -> Sort (never executed) Output: compress_hyper_46_96_chunk."time", compress_hyper_46_96_chunk.sensor_id, compress_hyper_46_96_chunk.cpu, compress_hyper_46_96_chunk.temperature, compress_hyper_46_96_chunk._ts_meta_count, compress_hyper_46_96_chunk._ts_meta_sequence_num, compress_hyper_46_96_chunk._ts_meta_min_1, compress_hyper_46_96_chunk._ts_meta_max_1 Sort Key: compress_hyper_46_96_chunk._ts_meta_max_1 DESC - Sort Method: quicksort - -> Seq Scan on _timescaledb_internal.compress_hyper_46_96_chunk (actual rows=2 loops=1) + -> Seq Scan on _timescaledb_internal.compress_hyper_46_96_chunk (never executed) Output: compress_hyper_46_96_chunk."time", compress_hyper_46_96_chunk.sensor_id, compress_hyper_46_96_chunk.cpu, compress_hyper_46_96_chunk.temperature, compress_hyper_46_96_chunk._ts_meta_count, compress_hyper_46_96_chunk._ts_meta_sequence_num, compress_hyper_46_96_chunk._ts_meta_min_1, compress_hyper_46_96_chunk._ts_meta_max_1 -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_88_chunk (never executed) Output: _hyper_45_88_chunk."time", _hyper_45_88_chunk.sensor_id, _hyper_45_88_chunk.cpu, _hyper_45_88_chunk.temperature @@ -2528,45 +2544,27 @@ SELECT * FROM sensor_data_compressed ORDER BY time DESC LIMIT 5; Sort Key: compress_hyper_46_95_chunk._ts_meta_max_1 DESC -> Seq Scan on _timescaledb_internal.compress_hyper_46_95_chunk (never executed) Output: compress_hyper_46_95_chunk."time", compress_hyper_46_95_chunk.sensor_id, compress_hyper_46_95_chunk.cpu, compress_hyper_46_95_chunk.temperature, compress_hyper_46_95_chunk._ts_meta_count, compress_hyper_46_95_chunk._ts_meta_sequence_num, compress_hyper_46_95_chunk._ts_meta_min_1, compress_hyper_46_95_chunk._ts_meta_max_1 - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_87_chunk (never executed) - Output: _hyper_45_87_chunk."time", _hyper_45_87_chunk.sensor_id, _hyper_45_87_chunk.cpu, _hyper_45_87_chunk.temperature - Batch Sorted Merge: true - Bulk Decompression: false - -> Sort (never executed) - Output: compress_hyper_46_94_chunk."time", compress_hyper_46_94_chunk.sensor_id, compress_hyper_46_94_chunk.cpu, compress_hyper_46_94_chunk.temperature, compress_hyper_46_94_chunk._ts_meta_count, compress_hyper_46_94_chunk._ts_meta_sequence_num, compress_hyper_46_94_chunk._ts_meta_min_1, compress_hyper_46_94_chunk._ts_meta_max_1 - Sort Key: compress_hyper_46_94_chunk._ts_meta_max_1 DESC - -> Seq Scan on _timescaledb_internal.compress_hyper_46_94_chunk (never executed) - Output: compress_hyper_46_94_chunk."time", compress_hyper_46_94_chunk.sensor_id, compress_hyper_46_94_chunk.cpu, compress_hyper_46_94_chunk.temperature, compress_hyper_46_94_chunk._ts_meta_count, compress_hyper_46_94_chunk._ts_meta_sequence_num, compress_hyper_46_94_chunk._ts_meta_min_1, compress_hyper_46_94_chunk._ts_meta_max_1 - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_86_chunk (never executed) - Output: _hyper_45_86_chunk."time", _hyper_45_86_chunk.sensor_id, _hyper_45_86_chunk.cpu, _hyper_45_86_chunk.temperature - Batch Sorted Merge: true - Bulk Decompression: false - -> Sort (never executed) - Output: compress_hyper_46_93_chunk."time", compress_hyper_46_93_chunk.sensor_id, compress_hyper_46_93_chunk.cpu, compress_hyper_46_93_chunk.temperature, compress_hyper_46_93_chunk._ts_meta_count, compress_hyper_46_93_chunk._ts_meta_sequence_num, compress_hyper_46_93_chunk._ts_meta_min_1, compress_hyper_46_93_chunk._ts_meta_max_1 - Sort Key: compress_hyper_46_93_chunk._ts_meta_max_1 DESC - -> Seq Scan on _timescaledb_internal.compress_hyper_46_93_chunk (never executed) - Output: compress_hyper_46_93_chunk."time", compress_hyper_46_93_chunk.sensor_id, compress_hyper_46_93_chunk.cpu, compress_hyper_46_93_chunk.temperature, compress_hyper_46_93_chunk._ts_meta_count, compress_hyper_46_93_chunk._ts_meta_sequence_num, compress_hyper_46_93_chunk._ts_meta_min_1, compress_hyper_46_93_chunk._ts_meta_max_1 -> Merge Append (never executed) - Sort Key: _hyper_45_85_chunk."time" DESC - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_85_chunk (never executed) - Output: _hyper_45_85_chunk."time", _hyper_45_85_chunk.sensor_id, _hyper_45_85_chunk.cpu, _hyper_45_85_chunk.temperature + Sort Key: _hyper_45_87_chunk."time" DESC + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_87_chunk (never executed) + Output: _hyper_45_87_chunk."time", _hyper_45_87_chunk.sensor_id, _hyper_45_87_chunk.cpu, _hyper_45_87_chunk.temperature Batch Sorted Merge: true Bulk Decompression: false -> Sort (never executed) - Output: compress_hyper_46_92_chunk."time", compress_hyper_46_92_chunk.sensor_id, compress_hyper_46_92_chunk.cpu, compress_hyper_46_92_chunk.temperature, compress_hyper_46_92_chunk._ts_meta_count, compress_hyper_46_92_chunk._ts_meta_sequence_num, compress_hyper_46_92_chunk._ts_meta_min_1, compress_hyper_46_92_chunk._ts_meta_max_1 - Sort Key: compress_hyper_46_92_chunk._ts_meta_max_1 DESC - -> Seq Scan on _timescaledb_internal.compress_hyper_46_92_chunk (never executed) - Output: compress_hyper_46_92_chunk."time", compress_hyper_46_92_chunk.sensor_id, compress_hyper_46_92_chunk.cpu, compress_hyper_46_92_chunk.temperature, compress_hyper_46_92_chunk._ts_meta_count, compress_hyper_46_92_chunk._ts_meta_sequence_num, compress_hyper_46_92_chunk._ts_meta_min_1, compress_hyper_46_92_chunk._ts_meta_max_1 - -> Index Scan using _hyper_45_85_chunk_sensor_data_compressed_time_idx on _timescaledb_internal._hyper_45_85_chunk (never executed) - Output: _hyper_45_85_chunk."time", _hyper_45_85_chunk.sensor_id, _hyper_45_85_chunk.cpu, _hyper_45_85_chunk.temperature + Output: compress_hyper_46_94_chunk."time", compress_hyper_46_94_chunk.sensor_id, compress_hyper_46_94_chunk.cpu, compress_hyper_46_94_chunk.temperature, compress_hyper_46_94_chunk._ts_meta_count, compress_hyper_46_94_chunk._ts_meta_sequence_num, compress_hyper_46_94_chunk._ts_meta_min_1, compress_hyper_46_94_chunk._ts_meta_max_1 + Sort Key: compress_hyper_46_94_chunk._ts_meta_max_1 DESC + -> Seq Scan on _timescaledb_internal.compress_hyper_46_94_chunk (never executed) + Output: compress_hyper_46_94_chunk."time", compress_hyper_46_94_chunk.sensor_id, compress_hyper_46_94_chunk.cpu, compress_hyper_46_94_chunk.temperature, compress_hyper_46_94_chunk._ts_meta_count, compress_hyper_46_94_chunk._ts_meta_sequence_num, compress_hyper_46_94_chunk._ts_meta_min_1, compress_hyper_46_94_chunk._ts_meta_max_1 + -> Index Scan using _hyper_45_87_chunk_sensor_data_compressed_time_idx on _timescaledb_internal._hyper_45_87_chunk (never executed) + Output: _hyper_45_87_chunk."time", _hyper_45_87_chunk.sensor_id, _hyper_45_87_chunk.cpu, _hyper_45_87_chunk.temperature (77 rows) -- Only the first chunks should be accessed (batch sorted merge is disabled) SET timescaledb.enable_decompression_sorted_merge = FALSE; :PREFIX SELECT * FROM sensor_data_compressed ORDER BY time DESC LIMIT 5; - QUERY PLAN ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ Limit (actual rows=5 loops=1) Output: sensor_data_compressed."time", sensor_data_compressed.sensor_id, sensor_data_compressed.cpu, sensor_data_compressed.temperature -> Custom Scan (ChunkAppend) on public.sensor_data_compressed (actual rows=5 loops=1) @@ -2575,6 +2573,24 @@ SELECT * FROM sensor_data_compressed ORDER BY time DESC LIMIT 5; Startup Exclusion: false Runtime Exclusion: false -> Sort (actual rows=2 loops=1) + Output: _hyper_45_93_chunk."time", _hyper_45_93_chunk.sensor_id, _hyper_45_93_chunk.cpu, _hyper_45_93_chunk.temperature + Sort Key: _hyper_45_93_chunk."time" DESC + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_93_chunk (actual rows=2 loops=1) + Output: _hyper_45_93_chunk."time", _hyper_45_93_chunk.sensor_id, _hyper_45_93_chunk.cpu, _hyper_45_93_chunk.temperature + Bulk Decompression: true + -> Seq Scan on _timescaledb_internal.compress_hyper_46_100_chunk (actual rows=2 loops=1) + Output: compress_hyper_46_100_chunk."time", compress_hyper_46_100_chunk.sensor_id, compress_hyper_46_100_chunk.cpu, compress_hyper_46_100_chunk.temperature, compress_hyper_46_100_chunk._ts_meta_count, compress_hyper_46_100_chunk._ts_meta_sequence_num, compress_hyper_46_100_chunk._ts_meta_min_1, compress_hyper_46_100_chunk._ts_meta_max_1 + -> Sort (actual rows=2 loops=1) + Output: _hyper_45_92_chunk."time", _hyper_45_92_chunk.sensor_id, _hyper_45_92_chunk.cpu, _hyper_45_92_chunk.temperature + Sort Key: _hyper_45_92_chunk."time" DESC + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_92_chunk (actual rows=2 loops=1) + Output: _hyper_45_92_chunk."time", _hyper_45_92_chunk.sensor_id, _hyper_45_92_chunk.cpu, _hyper_45_92_chunk.temperature + Bulk Decompression: true + -> Seq Scan on _timescaledb_internal.compress_hyper_46_99_chunk (actual rows=2 loops=1) + Output: compress_hyper_46_99_chunk."time", compress_hyper_46_99_chunk.sensor_id, compress_hyper_46_99_chunk.cpu, compress_hyper_46_99_chunk.temperature, compress_hyper_46_99_chunk._ts_meta_count, compress_hyper_46_99_chunk._ts_meta_sequence_num, compress_hyper_46_99_chunk._ts_meta_min_1, compress_hyper_46_99_chunk._ts_meta_max_1 + -> Sort (actual rows=1 loops=1) Output: _hyper_45_91_chunk."time", _hyper_45_91_chunk.sensor_id, _hyper_45_91_chunk.cpu, _hyper_45_91_chunk.temperature Sort Key: _hyper_45_91_chunk."time" DESC Sort Method: quicksort @@ -2583,23 +2599,21 @@ SELECT * FROM sensor_data_compressed ORDER BY time DESC LIMIT 5; Bulk Decompression: true -> Seq Scan on _timescaledb_internal.compress_hyper_46_98_chunk (actual rows=2 loops=1) Output: compress_hyper_46_98_chunk."time", compress_hyper_46_98_chunk.sensor_id, compress_hyper_46_98_chunk.cpu, compress_hyper_46_98_chunk.temperature, compress_hyper_46_98_chunk._ts_meta_count, compress_hyper_46_98_chunk._ts_meta_sequence_num, compress_hyper_46_98_chunk._ts_meta_min_1, compress_hyper_46_98_chunk._ts_meta_max_1 - -> Sort (actual rows=2 loops=1) + -> Sort (never executed) Output: _hyper_45_90_chunk."time", _hyper_45_90_chunk.sensor_id, _hyper_45_90_chunk.cpu, _hyper_45_90_chunk.temperature Sort Key: _hyper_45_90_chunk."time" DESC - Sort Method: quicksort - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_90_chunk (actual rows=2 loops=1) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_90_chunk (never executed) Output: _hyper_45_90_chunk."time", _hyper_45_90_chunk.sensor_id, _hyper_45_90_chunk.cpu, _hyper_45_90_chunk.temperature Bulk Decompression: true - -> Seq Scan on _timescaledb_internal.compress_hyper_46_97_chunk (actual rows=2 loops=1) + -> Seq Scan on _timescaledb_internal.compress_hyper_46_97_chunk (never executed) Output: compress_hyper_46_97_chunk."time", compress_hyper_46_97_chunk.sensor_id, compress_hyper_46_97_chunk.cpu, compress_hyper_46_97_chunk.temperature, compress_hyper_46_97_chunk._ts_meta_count, compress_hyper_46_97_chunk._ts_meta_sequence_num, compress_hyper_46_97_chunk._ts_meta_min_1, compress_hyper_46_97_chunk._ts_meta_max_1 - -> Sort (actual rows=1 loops=1) + -> Sort (never executed) Output: _hyper_45_89_chunk."time", _hyper_45_89_chunk.sensor_id, _hyper_45_89_chunk.cpu, _hyper_45_89_chunk.temperature Sort Key: _hyper_45_89_chunk."time" DESC - Sort Method: quicksort - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_89_chunk (actual rows=2 loops=1) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_89_chunk (never executed) Output: _hyper_45_89_chunk."time", _hyper_45_89_chunk.sensor_id, _hyper_45_89_chunk.cpu, _hyper_45_89_chunk.temperature Bulk Decompression: true - -> Seq Scan on _timescaledb_internal.compress_hyper_46_96_chunk (actual rows=2 loops=1) + -> Seq Scan on _timescaledb_internal.compress_hyper_46_96_chunk (never executed) Output: compress_hyper_46_96_chunk."time", compress_hyper_46_96_chunk.sensor_id, compress_hyper_46_96_chunk.cpu, compress_hyper_46_96_chunk.temperature, compress_hyper_46_96_chunk._ts_meta_count, compress_hyper_46_96_chunk._ts_meta_sequence_num, compress_hyper_46_96_chunk._ts_meta_min_1, compress_hyper_46_96_chunk._ts_meta_max_1 -> Sort (never executed) Output: _hyper_45_88_chunk."time", _hyper_45_88_chunk.sensor_id, _hyper_45_88_chunk.cpu, _hyper_45_88_chunk.temperature @@ -2609,34 +2623,18 @@ SELECT * FROM sensor_data_compressed ORDER BY time DESC LIMIT 5; Bulk Decompression: true -> Seq Scan on _timescaledb_internal.compress_hyper_46_95_chunk (never executed) Output: compress_hyper_46_95_chunk."time", compress_hyper_46_95_chunk.sensor_id, compress_hyper_46_95_chunk.cpu, compress_hyper_46_95_chunk.temperature, compress_hyper_46_95_chunk._ts_meta_count, compress_hyper_46_95_chunk._ts_meta_sequence_num, compress_hyper_46_95_chunk._ts_meta_min_1, compress_hyper_46_95_chunk._ts_meta_max_1 - -> Sort (never executed) - Output: _hyper_45_87_chunk."time", _hyper_45_87_chunk.sensor_id, _hyper_45_87_chunk.cpu, _hyper_45_87_chunk.temperature - Sort Key: _hyper_45_87_chunk."time" DESC - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_87_chunk (never executed) - Output: _hyper_45_87_chunk."time", _hyper_45_87_chunk.sensor_id, _hyper_45_87_chunk.cpu, _hyper_45_87_chunk.temperature - Bulk Decompression: true - -> Seq Scan on _timescaledb_internal.compress_hyper_46_94_chunk (never executed) - Output: compress_hyper_46_94_chunk."time", compress_hyper_46_94_chunk.sensor_id, compress_hyper_46_94_chunk.cpu, compress_hyper_46_94_chunk.temperature, compress_hyper_46_94_chunk._ts_meta_count, compress_hyper_46_94_chunk._ts_meta_sequence_num, compress_hyper_46_94_chunk._ts_meta_min_1, compress_hyper_46_94_chunk._ts_meta_max_1 - -> Sort (never executed) - Output: _hyper_45_86_chunk."time", _hyper_45_86_chunk.sensor_id, _hyper_45_86_chunk.cpu, _hyper_45_86_chunk.temperature - Sort Key: _hyper_45_86_chunk."time" DESC - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_86_chunk (never executed) - Output: _hyper_45_86_chunk."time", _hyper_45_86_chunk.sensor_id, _hyper_45_86_chunk.cpu, _hyper_45_86_chunk.temperature - Bulk Decompression: true - -> Seq Scan on _timescaledb_internal.compress_hyper_46_93_chunk (never executed) - Output: compress_hyper_46_93_chunk."time", compress_hyper_46_93_chunk.sensor_id, compress_hyper_46_93_chunk.cpu, compress_hyper_46_93_chunk.temperature, compress_hyper_46_93_chunk._ts_meta_count, compress_hyper_46_93_chunk._ts_meta_sequence_num, compress_hyper_46_93_chunk._ts_meta_min_1, compress_hyper_46_93_chunk._ts_meta_max_1 -> Merge Append (never executed) - Sort Key: _hyper_45_85_chunk."time" DESC + Sort Key: _hyper_45_87_chunk."time" DESC -> Sort (never executed) - Output: _hyper_45_85_chunk."time", _hyper_45_85_chunk.sensor_id, _hyper_45_85_chunk.cpu, _hyper_45_85_chunk.temperature - Sort Key: _hyper_45_85_chunk."time" DESC - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_85_chunk (never executed) - Output: _hyper_45_85_chunk."time", _hyper_45_85_chunk.sensor_id, _hyper_45_85_chunk.cpu, _hyper_45_85_chunk.temperature + Output: _hyper_45_87_chunk."time", _hyper_45_87_chunk.sensor_id, _hyper_45_87_chunk.cpu, _hyper_45_87_chunk.temperature + Sort Key: _hyper_45_87_chunk."time" DESC + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_87_chunk (never executed) + Output: _hyper_45_87_chunk."time", _hyper_45_87_chunk.sensor_id, _hyper_45_87_chunk.cpu, _hyper_45_87_chunk.temperature Bulk Decompression: true - -> Seq Scan on _timescaledb_internal.compress_hyper_46_92_chunk (never executed) - Output: compress_hyper_46_92_chunk."time", compress_hyper_46_92_chunk.sensor_id, compress_hyper_46_92_chunk.cpu, compress_hyper_46_92_chunk.temperature, compress_hyper_46_92_chunk._ts_meta_count, compress_hyper_46_92_chunk._ts_meta_sequence_num, compress_hyper_46_92_chunk._ts_meta_min_1, compress_hyper_46_92_chunk._ts_meta_max_1 - -> Index Scan using _hyper_45_85_chunk_sensor_data_compressed_time_idx on _timescaledb_internal._hyper_45_85_chunk (never executed) - Output: _hyper_45_85_chunk."time", _hyper_45_85_chunk.sensor_id, _hyper_45_85_chunk.cpu, _hyper_45_85_chunk.temperature + -> Seq Scan on _timescaledb_internal.compress_hyper_46_94_chunk (never executed) + Output: compress_hyper_46_94_chunk."time", compress_hyper_46_94_chunk.sensor_id, compress_hyper_46_94_chunk.cpu, compress_hyper_46_94_chunk.temperature, compress_hyper_46_94_chunk._ts_meta_count, compress_hyper_46_94_chunk._ts_meta_sequence_num, compress_hyper_46_94_chunk._ts_meta_min_1, compress_hyper_46_94_chunk._ts_meta_max_1 + -> Index Scan using _hyper_45_87_chunk_sensor_data_compressed_time_idx on _timescaledb_internal._hyper_45_87_chunk (never executed) + Output: _hyper_45_87_chunk."time", _hyper_45_87_chunk.sensor_id, _hyper_45_87_chunk.cpu, _hyper_45_87_chunk.temperature (70 rows) RESET timescaledb.enable_decompression_sorted_merge; @@ -2682,49 +2680,49 @@ SELECT show_chunks('compress_chunk_test') AS "CHUNK" \gset SELECT compress_chunk(:'CHUNK'); compress_chunk ------------------------------------------- - _timescaledb_internal._hyper_47_100_chunk + _timescaledb_internal._hyper_47_102_chunk (1 row) -- subsequent calls will be noop SELECT compress_chunk(:'CHUNK'); -NOTICE: chunk "_hyper_47_100_chunk" is already compressed +NOTICE: chunk "_hyper_47_102_chunk" is already compressed compress_chunk ------------------------------------------- - _timescaledb_internal._hyper_47_100_chunk + _timescaledb_internal._hyper_47_102_chunk (1 row) -- unless if_not_compressed is set to false \set ON_ERROR_STOP 0 SELECT compress_chunk(:'CHUNK', false); -ERROR: chunk "_hyper_47_100_chunk" is already compressed +ERROR: chunk "_hyper_47_102_chunk" is already compressed \set ON_ERROR_STOP 1 ALTER TABLE compress_chunk_test SET (timescaledb.compress_segmentby='device'); SELECT compressed_chunk_id from _timescaledb_catalog.chunk ch INNER JOIN _timescaledb_catalog.hypertable ht ON ht.id = ch.hypertable_id AND ht.table_name='compress_chunk_test'; compressed_chunk_id --------------------- - 101 + 103 (1 row) -- changing compression settings will not recompress the chunk by default SELECT compress_chunk(:'CHUNK'); -NOTICE: chunk "_hyper_47_100_chunk" is already compressed +NOTICE: chunk "_hyper_47_102_chunk" is already compressed compress_chunk ------------------------------------------- - _timescaledb_internal._hyper_47_100_chunk + _timescaledb_internal._hyper_47_102_chunk (1 row) -- unless we specify recompress := true SELECT compress_chunk(:'CHUNK', recompress := true); compress_chunk ------------------------------------------- - _timescaledb_internal._hyper_47_100_chunk + _timescaledb_internal._hyper_47_102_chunk (1 row) -- compressed_chunk_id should be different now SELECT compressed_chunk_id from _timescaledb_catalog.chunk ch INNER JOIN _timescaledb_catalog.hypertable ht ON ht.id = ch.hypertable_id AND ht.table_name='compress_chunk_test'; compressed_chunk_id --------------------- - 102 + 104 (1 row) --test partial handling @@ -2733,14 +2731,14 @@ INSERT INTO compress_chunk_test SELECT '2020-01-01', 'c3po', 3.14; SELECT compress_chunk(:'CHUNK'); compress_chunk ------------------------------------------- - _timescaledb_internal._hyper_47_100_chunk + _timescaledb_internal._hyper_47_102_chunk (1 row) -- compressed_chunk_id should not have changed SELECT compressed_chunk_id from _timescaledb_catalog.chunk ch INNER JOIN _timescaledb_catalog.hypertable ht ON ht.id = ch.hypertable_id AND ht.table_name='compress_chunk_test'; compressed_chunk_id --------------------- - 102 + 104 (1 row) -- should return no rows @@ -2756,7 +2754,7 @@ SELECT show_chunks('compress_chunk_test') AS "CHUNK2" LIMIT 1 OFFSET 1 \gset SELECT compress_chunk(:'CHUNK2'); compress_chunk ------------------------------------------- - _timescaledb_internal._hyper_47_103_chunk + _timescaledb_internal._hyper_47_105_chunk (1 row) -- make it partial and compress again @@ -2764,7 +2762,7 @@ INSERT INTO compress_chunk_test SELECT '2021-01-01', 'r2d2', 3.14; SELECT compress_chunk(:'CHUNK2'); compress_chunk ------------------------------------------- - _timescaledb_internal._hyper_47_103_chunk + _timescaledb_internal._hyper_47_105_chunk (1 row) -- should return no rows diff --git a/tsl/test/expected/compression_bgw-13.out b/tsl/test/expected/compression_bgw-13.out index 574c2a1d1fd..5e8d8259d92 100644 --- a/tsl/test/expected/compression_bgw-13.out +++ b/tsl/test/expected/compression_bgw-13.out @@ -307,16 +307,6 @@ SELECT COUNT(*) AS dropped_chunks_count 14 (1 row) --- We need to have some chunks that are marked as dropped, otherwise --- we will not have a problem below. -SELECT COUNT(*) AS dropped_chunks_count - FROM _timescaledb_catalog.chunk - WHERE dropped = TRUE; - dropped_chunks_count ----------------------- - 14 -(1 row) - SELECT count(*) FROM timescaledb_information.chunks WHERE hypertable_name = 'conditions' and is_compressed = true; count diff --git a/tsl/test/expected/compression_bgw-14.out b/tsl/test/expected/compression_bgw-14.out index b4435ebec3f..cda01bde1c4 100644 --- a/tsl/test/expected/compression_bgw-14.out +++ b/tsl/test/expected/compression_bgw-14.out @@ -307,16 +307,6 @@ SELECT COUNT(*) AS dropped_chunks_count 14 (1 row) --- We need to have some chunks that are marked as dropped, otherwise --- we will not have a problem below. -SELECT COUNT(*) AS dropped_chunks_count - FROM _timescaledb_catalog.chunk - WHERE dropped = TRUE; - dropped_chunks_count ----------------------- - 14 -(1 row) - SELECT count(*) FROM timescaledb_information.chunks WHERE hypertable_name = 'conditions' and is_compressed = true; count diff --git a/tsl/test/expected/compression_bgw-15.out b/tsl/test/expected/compression_bgw-15.out index b4435ebec3f..cda01bde1c4 100644 --- a/tsl/test/expected/compression_bgw-15.out +++ b/tsl/test/expected/compression_bgw-15.out @@ -307,16 +307,6 @@ SELECT COUNT(*) AS dropped_chunks_count 14 (1 row) --- We need to have some chunks that are marked as dropped, otherwise --- we will not have a problem below. -SELECT COUNT(*) AS dropped_chunks_count - FROM _timescaledb_catalog.chunk - WHERE dropped = TRUE; - dropped_chunks_count ----------------------- - 14 -(1 row) - SELECT count(*) FROM timescaledb_information.chunks WHERE hypertable_name = 'conditions' and is_compressed = true; count diff --git a/tsl/test/expected/compression_bgw-16.out b/tsl/test/expected/compression_bgw-16.out index b4435ebec3f..cda01bde1c4 100644 --- a/tsl/test/expected/compression_bgw-16.out +++ b/tsl/test/expected/compression_bgw-16.out @@ -307,16 +307,6 @@ SELECT COUNT(*) AS dropped_chunks_count 14 (1 row) --- We need to have some chunks that are marked as dropped, otherwise --- we will not have a problem below. -SELECT COUNT(*) AS dropped_chunks_count - FROM _timescaledb_catalog.chunk - WHERE dropped = TRUE; - dropped_chunks_count ----------------------- - 14 -(1 row) - SELECT count(*) FROM timescaledb_information.chunks WHERE hypertable_name = 'conditions' and is_compressed = true; count diff --git a/tsl/test/sql/cagg_ddl.sql.in b/tsl/test/sql/cagg_ddl.sql.in index fccd1cc6ed1..531e725b0e3 100644 --- a/tsl/test/sql/cagg_ddl.sql.in +++ b/tsl/test/sql/cagg_ddl.sql.in @@ -378,8 +378,8 @@ SELECT drop_chunks('drop_chunks_table', older_than => (integer_now_test2()-9)); SELECT * FROM drop_chunks_view ORDER BY time_bucket DESC; --earliest datapoint now in table SELECT * FROM drop_chunks_table ORDER BY time ASC limit 1; ---we see the chunks row with the dropped flags set; -SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk where dropped; +--chunks are removed +SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk WHERE dropped; --still see data in the view SELECT * FROM drop_chunks_view WHERE time_bucket < (integer_now_test2()-9) ORDER BY time_bucket DESC; --no data but covers dropped chunks diff --git a/tsl/test/sql/cagg_usage.sql.in b/tsl/test/sql/cagg_usage.sql.in index 0e87b5bee56..dd6c9f1f1cf 100644 --- a/tsl/test/sql/cagg_usage.sql.in +++ b/tsl/test/sql/cagg_usage.sql.in @@ -310,3 +310,66 @@ SELECT * FROM cagg3; CREATE MATERIALIZED VIEW cagg4 WITH (timescaledb.continuous,timescaledb.materialized_only=true) AS SELECT time_bucket('1 month', time, 'PST8PDT', "offset":= INTERVAL '15 day') FROM metrics GROUP BY 1; \set ON_ERROR_STOP 1 +-- +-- drop chunks tests +-- + +-- should return 4 chunks +SELECT + c.table_name as chunk_name, + c.status as chunk_status, c.dropped, c.compressed_chunk_id as comp_id +FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c +WHERE h.id = c.hypertable_id and h.table_name = 'metrics' +ORDER BY 1; + +-- all caggs in the new format (finalized=true) +SELECT user_view_name, finalized FROM _timescaledb_catalog.continuous_agg WHERE user_view_name in ('cagg1', 'cagg2', 'cagg3') ORDER BY 1; + +-- dropping chunk should also remove the catalog data +SELECT drop_chunks('metrics', older_than => '2000-01-01 00:00:00-02'::timestamptz); + +-- should return 3 chunks +SELECT + c.table_name as chunk_name, + c.status as chunk_status, c.dropped, c.compressed_chunk_id as comp_id +FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c +WHERE h.id = c.hypertable_id AND h.table_name = 'metrics' +ORDER BY 1; + +-- let's update the catalog to fake an old format cagg (finalized=false) +\c :TEST_DBNAME :ROLE_SUPERUSER +UPDATE _timescaledb_catalog.continuous_agg SET finalized=FALSE WHERE user_view_name = 'cagg1'; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER + +-- cagg1 now is a fake old format (finalized=false) +SELECT user_view_name, finalized FROM _timescaledb_catalog.continuous_agg WHERE user_view_name in ('cagg1', 'cagg2', 'cagg3') ORDER BY 1; + +-- cagg1 now is in the old format (finalized=false) +-- dropping chunk should NOT remove the catalog data +SELECT drop_chunks('metrics', older_than => '2000-01-13 00:00:00-02'::timestamptz); + +-- should return 3 chunks and one of them should be marked as dropped +SELECT + c.table_name as chunk_name, + c.status as chunk_status, c.dropped, c.compressed_chunk_id as comp_id +FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c +WHERE h.id = c.hypertable_id and h.table_name = 'metrics' +ORDER BY 1; + +-- remove the fake old format cagg +DROP MATERIALIZED VIEW cagg1; + +-- no more old format caggs (finalized=false) +SELECT user_view_name, finalized FROM _timescaledb_catalog.continuous_agg WHERE user_view_name in ('cagg1', 'cagg2', 'cagg3') ORDER BY 1; + +-- dropping chunk should remove the catalog data +SELECT drop_chunks('metrics', older_than => '2000-01-25 00:00:00-02'::timestamptz); + +-- should return 2 chunks and one of them should be marked as dropped +-- because we dropped chunk before when an old format cagg exists +SELECT + c.table_name as chunk_name, + c.status as chunk_status, c.dropped, c.compressed_chunk_id as comp_id +FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c +WHERE h.id = c.hypertable_id and h.table_name = 'metrics' +ORDER BY 1; diff --git a/tsl/test/sql/compression_bgw.sql.in b/tsl/test/sql/compression_bgw.sql.in index 0c57505dc8c..a4f8986991a 100644 --- a/tsl/test/sql/compression_bgw.sql.in +++ b/tsl/test/sql/compression_bgw.sql.in @@ -193,12 +193,6 @@ ALTER TABLE conditions SET (timescaledb.compress); SELECT COUNT(*) AS dropped_chunks_count FROM drop_chunks('conditions', TIMESTAMPTZ '2018-12-15 00:00'); --- We need to have some chunks that are marked as dropped, otherwise --- we will not have a problem below. -SELECT COUNT(*) AS dropped_chunks_count - FROM _timescaledb_catalog.chunk - WHERE dropped = TRUE; - SELECT count(*) FROM timescaledb_information.chunks WHERE hypertable_name = 'conditions' and is_compressed = true;