README: restructure — function reference first, simple to complex usage #166
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| name: Test pg_ash | |
| on: | |
| push: | |
| branches: [main] | |
| pull_request: | |
| branches: [main] | |
| jobs: | |
| test: | |
| runs-on: ubuntu-latest | |
| strategy: | |
| fail-fast: false | |
| matrix: | |
| postgres: [14, 15, 16, 17] | |
| services: | |
| postgres: | |
| image: postgres:${{ matrix.postgres }} | |
| env: | |
| POSTGRES_PASSWORD: postgres | |
| POSTGRES_DB: postgres | |
| options: >- | |
| --health-cmd pg_isready | |
| --health-interval 10s | |
| --health-timeout 5s | |
| --health-retries 5 | |
| ports: | |
| - 5432:5432 | |
| steps: | |
| - uses: actions/checkout@v4 | |
| - name: Install pg_cron | |
| run: | | |
| sudo apt-get update | |
| sudo apt-get install -y postgresql-${{ matrix.postgres }}-cron || echo "pg_cron package not available" | |
| - name: Configure pg_cron in container | |
| run: | | |
| docker exec ${{ job.services.postgres.id }} bash -c " | |
| if [ -f /usr/share/postgresql/${{ matrix.postgres }}/extension/pg_cron.control ]; then | |
| echo \"shared_preload_libraries = 'pg_cron'\" >> /var/lib/postgresql/data/postgresql.conf | |
| echo \"cron.database_name = 'postgres'\" >> /var/lib/postgresql/data/postgresql.conf | |
| fi | |
| " || true | |
| docker restart ${{ job.services.postgres.id }} || true | |
| sleep 5 | |
| - name: Wait for PostgreSQL | |
| run: | | |
| for i in {1..30}; do | |
| if pg_isready -h localhost -p 5432 -U postgres; then break; fi | |
| sleep 1 | |
| done | |
| - name: Create pg_cron extension | |
| env: | |
| PGPASSWORD: postgres | |
| run: | | |
| psql -h localhost -U postgres -d postgres -c "CREATE EXTENSION IF NOT EXISTS pg_cron;" || echo "pg_cron not available" | |
| - name: Install pg_ash (fresh install) | |
| env: | |
| PGPASSWORD: postgres | |
| run: | | |
| psql -h localhost -U postgres -d postgres -v ON_ERROR_STOP=1 -f sql/ash-install.sql | |
| - name: Test schema and infrastructure | |
| env: | |
| PGPASSWORD: postgres | |
| run: | | |
| psql -h localhost -U postgres -d postgres -v ON_ERROR_STOP=1 << 'EOF' | |
| DO $$ | |
| BEGIN | |
| -- Schema exists | |
| ASSERT EXISTS (SELECT FROM pg_namespace WHERE nspname = 'ash'), | |
| 'ash schema missing'; | |
| -- Config table initialized with version | |
| ASSERT (SELECT count(*) FROM ash.config) = 1, | |
| 'config not initialized'; | |
| ASSERT (SELECT version FROM ash.config WHERE singleton) = '1.2', | |
| 'version should be 1.2 for fresh install'; | |
| -- Core tables exist | |
| ASSERT EXISTS (SELECT FROM pg_tables WHERE schemaname = 'ash' AND tablename = 'wait_event_map'), | |
| 'wait_event_map missing'; | |
| ASSERT EXISTS (SELECT FROM pg_tables WHERE schemaname = 'ash' AND tablename = 'sample'), | |
| 'sample missing'; | |
| -- Partitioned query_map tables | |
| ASSERT EXISTS (SELECT FROM pg_tables WHERE schemaname = 'ash' AND tablename = 'query_map_0'), | |
| 'query_map_0 missing'; | |
| ASSERT EXISTS (SELECT FROM pg_tables WHERE schemaname = 'ash' AND tablename = 'query_map_1'), | |
| 'query_map_1 missing'; | |
| ASSERT EXISTS (SELECT FROM pg_tables WHERE schemaname = 'ash' AND tablename = 'query_map_2'), | |
| 'query_map_2 missing'; | |
| -- Sample partitions | |
| ASSERT EXISTS (SELECT FROM pg_tables WHERE schemaname = 'ash' AND tablename = 'sample_0'), | |
| 'sample_0 missing'; | |
| ASSERT EXISTS (SELECT FROM pg_tables WHERE schemaname = 'ash' AND tablename = 'sample_1'), | |
| 'sample_1 missing'; | |
| ASSERT EXISTS (SELECT FROM pg_tables WHERE schemaname = 'ash' AND tablename = 'sample_2'), | |
| 'sample_2 missing'; | |
| -- query_map_all view | |
| ASSERT EXISTS (SELECT FROM pg_views WHERE schemaname = 'ash' AND viewname = 'query_map_all'), | |
| 'query_map_all view missing'; | |
| -- Core functions exist | |
| ASSERT EXISTS (SELECT FROM pg_proc WHERE proname = 'epoch' AND pronamespace = 'ash'::regnamespace), | |
| 'epoch() missing'; | |
| ASSERT EXISTS (SELECT FROM pg_proc WHERE proname = 'current_slot' AND pronamespace = 'ash'::regnamespace), | |
| 'current_slot() missing'; | |
| ASSERT EXISTS (SELECT FROM pg_proc WHERE proname = 'take_sample' AND pronamespace = 'ash'::regnamespace), | |
| 'take_sample() missing'; | |
| ASSERT EXISTS (SELECT FROM pg_proc WHERE proname = 'rotate' AND pronamespace = 'ash'::regnamespace), | |
| 'rotate() missing'; | |
| ASSERT EXISTS (SELECT FROM pg_proc WHERE proname = 'decode_sample' AND pronamespace = 'ash'::regnamespace), | |
| 'decode_sample() missing'; | |
| ASSERT EXISTS (SELECT FROM pg_proc WHERE proname = '_wait_color' AND pronamespace = 'ash'::regnamespace), | |
| '_wait_color() missing'; | |
| RAISE NOTICE 'Schema and infrastructure tests PASSED'; | |
| END; | |
| $$; | |
| EOF | |
| - name: Test sampler and decoder | |
| env: | |
| PGPASSWORD: postgres | |
| run: | | |
| psql -h localhost -U postgres -d postgres -v ON_ERROR_STOP=1 << 'EOF' | |
| -- Test take_sample runs without error | |
| SELECT ash.take_sample(); | |
| -- Test _validate_data | |
| DO $$ | |
| BEGIN | |
| ASSERT ash._validate_data('{-1,2,5,6}'::integer[]) = true, | |
| 'valid data rejected'; | |
| ASSERT ash._validate_data('{-1,3,5,6}'::integer[]) = false, | |
| 'invalid count not detected'; | |
| ASSERT ash._validate_data('{1,-1,2,5,6}'::integer[]) = false, | |
| 'positive first element not detected'; | |
| ASSERT ash._validate_data(null) = false, | |
| 'null data not detected'; | |
| ASSERT ash._validate_data('{}'::integer[]) = false, | |
| 'empty array not detected'; | |
| RAISE NOTICE 'Validation tests PASSED'; | |
| END; | |
| $$; | |
| -- Test decode_sample round-trip | |
| DO $$ | |
| DECLARE | |
| v_wait_id smallint; | |
| v_qid_id int4; | |
| v_result record; | |
| v_count int; | |
| BEGIN | |
| -- Insert test dictionary entries | |
| INSERT INTO ash.wait_event_map (state, type, event) | |
| VALUES ('active', 'CPU*', 'CPU*') ON CONFLICT DO NOTHING; | |
| INSERT INTO ash.wait_event_map (state, type, event) | |
| VALUES ('active', 'IO', 'DataFileRead') ON CONFLICT DO NOTHING; | |
| SELECT id INTO v_wait_id FROM ash.wait_event_map | |
| WHERE state = 'active' AND type = 'CPU*' AND event = 'CPU*'; | |
| INSERT INTO ash.query_map_0 (query_id) VALUES (12345) ON CONFLICT DO NOTHING; | |
| SELECT id INTO v_qid_id FROM ash.query_map_0 WHERE query_id = 12345; | |
| -- Decode: 2 backends on CPU*, with query_ids | |
| SELECT count(*) INTO v_count | |
| FROM ash.decode_sample(ARRAY[-v_wait_id, 2, v_qid_id, 0]::integer[]); | |
| ASSERT v_count = 2, 'decode_sample wrong count: ' || v_count; | |
| -- Verify decoded fields: 2 backends = 2 rows | |
| SELECT * INTO v_result | |
| FROM ash.decode_sample(ARRAY[-v_wait_id, 2, v_qid_id, 0]::integer[], 0::smallint) | |
| LIMIT 1; | |
| ASSERT v_result.wait_event = 'CPU*', 'wrong wait_event: ' || v_result.wait_event; | |
| ASSERT v_result.query_id = 12345, 'wrong query_id: ' || v_result.query_id; | |
| RAISE NOTICE 'Decoder tests PASSED'; | |
| END; | |
| $$; | |
| EOF | |
| - name: Test rotation | |
| env: | |
| PGPASSWORD: postgres | |
| run: | | |
| psql -h localhost -U postgres -d postgres -v ON_ERROR_STOP=1 << 'EOF' | |
| DO $$ | |
| DECLARE | |
| v_slot_before smallint; | |
| v_slot_after smallint; | |
| v_target_slot smallint; | |
| v_count int; | |
| v_next_id int; | |
| BEGIN | |
| SELECT current_slot INTO v_slot_before FROM ash.config; | |
| -- Insert data into current partition | |
| INSERT INTO ash.sample (sample_ts, datid, active_count, data) | |
| VALUES (1000, 1, 2, '{-1,2,1,1}'); | |
| -- new_slot = (old + 1) % 3, truncate_slot = (new_slot + 1) % 3 | |
| v_target_slot := ((v_slot_before + 1) % 3)::smallint; | |
| DECLARE | |
| v_truncate_slot smallint := ((v_target_slot + 1) % 3)::smallint; | |
| BEGIN | |
| -- Seed the partition that will be TRUNCATED | |
| EXECUTE format( | |
| 'INSERT INTO ash.sample_%s (sample_ts, datid, active_count, data, slot) VALUES (999, 1, 1, ''{-1,1,1}'', %s)', | |
| v_truncate_slot, v_truncate_slot | |
| ); | |
| EXECUTE format( | |
| 'INSERT INTO ash.query_map_%s (query_id) VALUES (99999)', | |
| v_truncate_slot | |
| ); | |
| -- Verify data exists before rotation | |
| EXECUTE format('SELECT count(*) FROM ash.sample_%s', v_truncate_slot) | |
| INTO v_count; | |
| ASSERT v_count > 0, | |
| 'sample_' || v_truncate_slot || ' should have data before rotation'; | |
| -- Force rotation | |
| UPDATE ash.config SET rotated_at = now() - interval '2 days'; | |
| PERFORM ash.rotate(); | |
| SELECT current_slot INTO v_slot_after FROM ash.config; | |
| ASSERT v_slot_after = v_target_slot, | |
| 'slot should advance to ' || v_target_slot || ', got ' || v_slot_after; | |
| -- Verify TRUNCATION: truncated sample partition should be empty | |
| EXECUTE format('SELECT count(*) FROM ash.sample_%s', v_truncate_slot) | |
| INTO v_count; | |
| ASSERT v_count = 0, | |
| 'sample_' || v_truncate_slot || ' should be empty after rotation, has ' || v_count || ' rows'; | |
| -- Verify TRUNCATION: truncated query_map partition should be empty | |
| EXECUTE format('SELECT count(*) FROM ash.query_map_%s', v_truncate_slot) | |
| INTO v_count; | |
| ASSERT v_count = 0, | |
| 'query_map_' || v_truncate_slot || ' should be empty after rotation, has ' || v_count || ' rows'; | |
| -- Verify identity sequence was reset: next insert should get a low id | |
| EXECUTE format( | |
| 'INSERT INTO ash.query_map_%s (query_id) VALUES (88888) RETURNING id', | |
| v_truncate_slot | |
| ) INTO v_next_id; | |
| ASSERT v_next_id = 1, | |
| 'query_map_' || v_truncate_slot || ' identity should restart at 1, got ' || v_next_id; | |
| END; | |
| RAISE NOTICE 'Rotation tests PASSED (truncation + sequence reset verified)'; | |
| END; | |
| $$; | |
| EOF | |
| - name: Test start/stop | |
| env: | |
| PGPASSWORD: postgres | |
| run: | | |
| psql -h localhost -U postgres -d postgres -v ON_ERROR_STOP=1 << 'EOF' | |
| SELECT * FROM ash.start(); | |
| SELECT * FROM ash.stop(); | |
| DO $$ | |
| BEGIN | |
| ASSERT EXISTS (SELECT FROM pg_proc WHERE proname = 'start' AND pronamespace = 'ash'::regnamespace); | |
| ASSERT EXISTS (SELECT FROM pg_proc WHERE proname = 'stop' AND pronamespace = 'ash'::regnamespace); | |
| ASSERT EXISTS (SELECT FROM pg_proc WHERE proname = 'uninstall' AND pronamespace = 'ash'::regnamespace); | |
| RAISE NOTICE 'Start/stop tests PASSED'; | |
| END; | |
| $$; | |
| EOF | |
| - name: Seed test data | |
| env: | |
| PGPASSWORD: postgres | |
| run: | | |
| psql -h localhost -U postgres -d postgres -v ON_ERROR_STOP=1 << 'EOF' | |
| -- Reset to known state | |
| UPDATE ash.config SET current_slot = 0; | |
| TRUNCATE ash.sample_0, ash.sample_1, ash.sample_2; | |
| TRUNCATE ash.query_map_0, ash.query_map_1, ash.query_map_2; | |
| ALTER TABLE ash.query_map_0 ALTER COLUMN id RESTART; | |
| ALTER TABLE ash.query_map_1 ALTER COLUMN id RESTART; | |
| ALTER TABLE ash.query_map_2 ALTER COLUMN id RESTART; | |
| -- Seed dictionaries | |
| INSERT INTO ash.wait_event_map (state, type, event) VALUES | |
| ('active', 'CPU*', 'CPU*'), | |
| ('active', 'IO', 'DataFileRead'), | |
| ('active', 'Lock', 'tuple'), | |
| ('active', 'LWLock', 'WALWrite'), | |
| ('active', 'Client', 'ClientRead'), | |
| ('active', 'Extension', 'DblinkGetResult'), | |
| ('idle in transaction', 'IdleTx', 'IdleTx') | |
| ON CONFLICT DO NOTHING; | |
| INSERT INTO ash.query_map_0 (query_id) VALUES (111111), (222222), (333333) | |
| ON CONFLICT DO NOTHING; | |
| -- Seed realistic sample data across multiple timestamps | |
| -- Now - 30min, 20min, 10min, 5min, 1min — each with multiple wait events | |
| DO $$ | |
| DECLARE | |
| v_cpu_id smallint; | |
| v_io_id smallint; | |
| v_lock_id smallint; | |
| v_lwlock_id smallint; | |
| v_client_id smallint; | |
| v_ext_id smallint; | |
| v_q1 int4; | |
| v_q2 int4; | |
| v_q3 int4; | |
| v_now_ts int4; | |
| v_ts int4; | |
| BEGIN | |
| SELECT id INTO v_cpu_id FROM ash.wait_event_map WHERE type = 'CPU*' AND event = 'CPU*'; | |
| SELECT id INTO v_io_id FROM ash.wait_event_map WHERE type = 'IO' AND event = 'DataFileRead'; | |
| SELECT id INTO v_lock_id FROM ash.wait_event_map WHERE type = 'Lock' AND event = 'tuple'; | |
| SELECT id INTO v_lwlock_id FROM ash.wait_event_map WHERE type = 'LWLock' AND event = 'WALWrite'; | |
| SELECT id INTO v_client_id FROM ash.wait_event_map WHERE type = 'Client' AND event = 'ClientRead'; | |
| SELECT id INTO v_ext_id FROM ash.wait_event_map WHERE type = 'Extension' AND event = 'DblinkGetResult'; | |
| SELECT id INTO v_q1 FROM ash.query_map_0 WHERE query_id = 111111; | |
| SELECT id INTO v_q2 FROM ash.query_map_0 WHERE query_id = 222222; | |
| SELECT id INTO v_q3 FROM ash.query_map_0 WHERE query_id = 333333; | |
| v_now_ts := extract(epoch FROM now() - ash.epoch())::int4; | |
| -- Generate 60 samples across 30 minutes (every 30 seconds) | |
| FOR v_ts IN | |
| SELECT generate_series(v_now_ts - 1800, v_now_ts - 30, 30) | |
| LOOP | |
| -- Mix of CPU, IO, Lock — varies by position in timeline | |
| -- First half: mostly CPU + IO | |
| IF v_ts < v_now_ts - 900 THEN | |
| INSERT INTO ash.sample (sample_ts, datid, active_count, data, slot) | |
| VALUES (v_ts, 1, 5, | |
| ARRAY[-v_cpu_id, 3, v_q1, v_q2, v_q3, | |
| -v_io_id, 2, v_q1, v_q2]::integer[], 0); | |
| -- Second half: add Lock spike | |
| ELSE | |
| INSERT INTO ash.sample (sample_ts, datid, active_count, data, slot) | |
| VALUES (v_ts, 1, 8, | |
| ARRAY[-v_cpu_id, 2, v_q1, v_q2, | |
| -v_lock_id, 4, v_q1, v_q1, v_q2, v_q3, | |
| -v_io_id, 1, v_q3, | |
| -v_client_id, 1, v_q2]::integer[], 0); | |
| END IF; | |
| END LOOP; | |
| RAISE NOTICE 'Seeded 60 test samples across 30 minutes'; | |
| END; | |
| $$; | |
| EOF | |
| - name: Test reader functions (relative time) | |
| env: | |
| PGPASSWORD: postgres | |
| run: | | |
| psql -h localhost -U postgres -d postgres -v ON_ERROR_STOP=1 << 'EOF' | |
| DO $$ | |
| DECLARE | |
| v_count int; | |
| v_rec record; | |
| BEGIN | |
| -- top_waits: should return rows with bar column | |
| SELECT count(*) INTO v_count FROM ash.top_waits('1 hour'); | |
| ASSERT v_count > 0, 'top_waits returned 0 rows'; | |
| -- Verify bar column exists in top_waits | |
| SELECT * INTO v_rec FROM ash.top_waits('1 hour') LIMIT 1; | |
| ASSERT v_rec.bar IS NOT NULL, 'top_waits bar column is null'; | |
| -- top_queries: should find our seeded queries | |
| SELECT count(*) INTO v_count FROM ash.top_queries('1 hour'); | |
| ASSERT v_count > 0, 'top_queries returned 0 rows'; | |
| -- top_by_type: should have CPU* and IO at minimum | |
| SELECT count(*) INTO v_count FROM ash.top_by_type('1 hour'); | |
| ASSERT v_count >= 2, 'top_by_type returned fewer than 2 types'; | |
| -- wait_timeline | |
| SELECT count(*) INTO v_count FROM ash.wait_timeline('1 hour', '5 minutes'); | |
| ASSERT v_count > 0, 'wait_timeline returned 0 rows'; | |
| -- samples_by_database | |
| SELECT count(*) INTO v_count FROM ash.samples_by_database('1 hour'); | |
| ASSERT v_count > 0, 'samples_by_database returned 0 rows'; | |
| -- samples: decoded raw data | |
| SELECT count(*) INTO v_count FROM ash.samples('1 hour', 20); | |
| ASSERT v_count > 0, 'samples returned 0 rows'; | |
| -- activity_summary | |
| SELECT count(*) INTO v_count FROM ash.activity_summary('1 hour'); | |
| ASSERT v_count > 0, 'activity_summary returned 0 rows'; | |
| -- status | |
| SELECT count(*) INTO v_count FROM ash.status(); | |
| ASSERT v_count > 0, 'status returned 0 rows'; | |
| RAISE NOTICE 'Relative-time reader tests PASSED'; | |
| END; | |
| $$; | |
| EOF | |
| - name: Test reader functions (absolute time) | |
| env: | |
| PGPASSWORD: postgres | |
| run: | | |
| psql -h localhost -U postgres -d postgres -v ON_ERROR_STOP=1 << 'EOF' | |
| DO $$ | |
| DECLARE | |
| v_count int; | |
| v_start timestamptz := now() - interval '1 hour'; | |
| v_end timestamptz := now(); | |
| BEGIN | |
| SELECT count(*) INTO v_count FROM ash.top_waits_at(v_start, v_end); | |
| ASSERT v_count > 0, 'top_waits_at returned 0 rows'; | |
| SELECT count(*) INTO v_count FROM ash.top_queries_at(v_start, v_end); | |
| ASSERT v_count > 0, 'top_queries_at returned 0 rows'; | |
| SELECT count(*) INTO v_count FROM ash.top_by_type_at(v_start, v_end); | |
| ASSERT v_count >= 2, 'top_by_type_at returned fewer than 2 types'; | |
| SELECT count(*) INTO v_count FROM ash.wait_timeline_at(v_start, v_end, '5 minutes'); | |
| ASSERT v_count > 0, 'wait_timeline_at returned 0 rows'; | |
| SELECT count(*) INTO v_count FROM ash.samples_at(v_start, v_end, 20); | |
| ASSERT v_count > 0, 'samples_at returned 0 rows'; | |
| RAISE NOTICE 'Absolute-time reader tests PASSED'; | |
| END; | |
| $$; | |
| EOF | |
| - name: Test query-specific functions | |
| env: | |
| PGPASSWORD: postgres | |
| run: | | |
| psql -h localhost -U postgres -d postgres -v ON_ERROR_STOP=1 << 'EOF' | |
| DO $$ | |
| DECLARE | |
| v_count int; | |
| v_start timestamptz := now() - interval '1 hour'; | |
| v_end timestamptz := now(); | |
| BEGIN | |
| -- query_waits for a known query | |
| SELECT count(*) INTO v_count FROM ash.query_waits(111111, '1 hour'); | |
| ASSERT v_count > 0, 'query_waits returned 0 rows for query 111111'; | |
| -- query_waits_at | |
| SELECT count(*) INTO v_count FROM ash.query_waits_at(111111, v_start, v_end); | |
| ASSERT v_count > 0, 'query_waits_at returned 0 rows'; | |
| -- top_queries_with_text (may return fewer if pg_stat_statements not loaded) | |
| SELECT count(*) INTO v_count FROM ash.top_queries_with_text('1 hour'); | |
| -- Don't assert > 0 because pgss may not be available | |
| RAISE NOTICE 'Query-specific function tests PASSED'; | |
| END; | |
| $$; | |
| EOF | |
| - name: Test timeline_chart functions | |
| env: | |
| PGPASSWORD: postgres | |
| run: | | |
| psql -h localhost -U postgres -d postgres -v ON_ERROR_STOP=1 << 'EOF' | |
| DO $$ | |
| DECLARE | |
| v_count int; | |
| v_rec record; | |
| v_start timestamptz := now() - interval '1 hour'; | |
| v_end timestamptz := now(); | |
| BEGIN | |
| -- timeline_chart: relative time | |
| SELECT count(*) INTO v_count FROM ash.timeline_chart('1 hour', '5 minutes', 3, 40); | |
| ASSERT v_count > 1, 'timeline_chart returned too few rows: ' || v_count; | |
| -- First row should be legend (null bucket_start) | |
| SELECT * INTO v_rec FROM ash.timeline_chart('1 hour', '5 minutes', 3, 40) LIMIT 1; | |
| ASSERT v_rec.bucket_start IS NULL, 'first row should be legend (null bucket_start)'; | |
| ASSERT v_rec.chart IS NOT NULL AND v_rec.chart <> '', 'legend chart is empty'; | |
| -- Data rows should have bucket_start and chart; find one with samples | |
| SELECT * INTO v_rec FROM ash.timeline_chart('1 hour', '5 minutes', 3, 40) | |
| WHERE bucket_start IS NOT NULL AND active > 0 LIMIT 1; | |
| ASSERT v_rec.bucket_start IS NOT NULL, 'no data row with active > 0'; | |
| ASSERT v_rec.chart IS NOT NULL, 'data row has null chart'; | |
| -- timeline_chart_at: absolute time | |
| SELECT count(*) INTO v_count FROM ash.timeline_chart_at(v_start, v_end, '5 minutes', 3, 40); | |
| ASSERT v_count > 1, 'timeline_chart_at returned too few rows: ' || v_count; | |
| -- First row should be legend | |
| SELECT * INTO v_rec FROM ash.timeline_chart_at(v_start, v_end, '5 minutes', 3, 40) LIMIT 1; | |
| ASSERT v_rec.bucket_start IS NULL, 'at: first row should be legend'; | |
| ASSERT v_rec.chart IS NOT NULL AND v_rec.chart <> '', 'at: legend chart is empty'; | |
| -- detail column should exist and have values on rows with samples | |
| SELECT * INTO v_rec FROM ash.timeline_chart_at(v_start, v_end, '5 minutes', 3, 40) | |
| WHERE bucket_start IS NOT NULL AND active > 0 LIMIT 1; | |
| ASSERT v_rec.detail IS NOT NULL, 'detail column is null on data row'; | |
| RAISE NOTICE 'Timeline chart tests PASSED'; | |
| END; | |
| $$; | |
| EOF | |
| - name: Test _wait_color helper | |
| env: | |
| PGPASSWORD: postgres | |
| run: | | |
| psql -h localhost -U postgres -d postgres -v ON_ERROR_STOP=1 << 'EOF' | |
| DO $$ | |
| DECLARE | |
| v_green text := E'\033[38;2;080;250;123m'; | |
| v_light_yellow text := E'\033[38;2;241;250;140m'; | |
| v_blue text := E'\033[38;2;030;100;255m'; | |
| v_red text := E'\033[38;2;255;085;085m'; | |
| v_pink text := E'\033[38;2;255;121;198m'; | |
| v_cyan text := E'\033[38;2;000;200;255m'; | |
| v_yellow text := E'\033[38;2;255;220;100m'; | |
| v_orange text := E'\033[38;2;255;165;000m'; | |
| v_teal text := E'\033[38;2;000;210;180m'; | |
| v_purple text := E'\033[38;2;150;100;255m'; | |
| v_light_purple text := E'\033[38;2;190;150;255m'; | |
| v_gray text := E'\033[38;2;180;180;180m'; | |
| BEGIN | |
| ASSERT ash._wait_color('CPU*', true) = v_green, 'CPU* should be green'; | |
| ASSERT ash._wait_color('IdleTx', true) = v_light_yellow, 'IdleTx should be light yellow'; | |
| ASSERT ash._wait_color('IO:DataFileRead', true) = v_blue, 'IO should be blue'; | |
| ASSERT ash._wait_color('IO:WALWrite', true) = v_blue, 'IO:WALWrite should be blue'; | |
| ASSERT ash._wait_color('Lock:tuple', true) = v_red, 'Lock should be red'; | |
| ASSERT ash._wait_color('Lock:transactionid', true) = v_red, 'Lock:transactionid should be red'; | |
| ASSERT ash._wait_color('LWLock:WALWrite', true) = v_pink, 'LWLock should be pink'; | |
| ASSERT ash._wait_color('IPC:BgWorkerStartup', true) = v_cyan, 'IPC should be cyan'; | |
| ASSERT ash._wait_color('Client:ClientRead', true) = v_yellow, 'Client should be yellow'; | |
| ASSERT ash._wait_color('Timeout:PgSleep', true) = v_orange, 'Timeout should be orange'; | |
| ASSERT ash._wait_color('BufferPin:BufferPin', true) = v_teal, 'BufferPin should be teal'; | |
| ASSERT ash._wait_color('Activity:ArchiverMain', true) = v_purple, 'Activity should be purple'; | |
| ASSERT ash._wait_color('Extension:DblinkGetResult', true) = v_light_purple, 'Extension should be light purple'; | |
| ASSERT ash._wait_color('UnknownType:Whatever', true) = v_gray, 'unknown should be gray'; | |
| -- color=false returns empty string | |
| ASSERT ash._wait_color('CPU*', false) = '', 'color=false should return empty'; | |
| ASSERT ash._wait_color('CPU*') = '', 'default color should be off'; | |
| RAISE NOTICE '_wait_color tests PASSED'; | |
| END; | |
| $$; | |
| EOF | |
| - name: Test timeline_chart with different parameters | |
| env: | |
| PGPASSWORD: postgres | |
| run: | | |
| psql -h localhost -U postgres -d postgres -v ON_ERROR_STOP=1 << 'EOF' | |
| DO $$ | |
| DECLARE | |
| v_count int; | |
| BEGIN | |
| -- top=1 (only top event + Other) | |
| SELECT count(*) INTO v_count FROM ash.timeline_chart('1 hour', '5 minutes', 1, 30); | |
| ASSERT v_count > 1, 'top=1 should still return rows'; | |
| -- top=5 | |
| SELECT count(*) INTO v_count FROM ash.timeline_chart('1 hour', '5 minutes', 5, 60); | |
| ASSERT v_count > 1, 'top=5 should still return rows'; | |
| -- Small bucket (1 minute) | |
| SELECT count(*) INTO v_count FROM ash.timeline_chart('30 minutes', '1 minute', 3, 40); | |
| ASSERT v_count > 1, 'small bucket should return rows'; | |
| -- Large bucket (10 minutes) | |
| SELECT count(*) INTO v_count FROM ash.timeline_chart('30 minutes', '10 minutes', 3, 40); | |
| ASSERT v_count > 1, 'large bucket should return rows'; | |
| -- Width=10 (narrow) | |
| SELECT count(*) INTO v_count FROM ash.timeline_chart('30 minutes', '5 minutes', 3, 10); | |
| ASSERT v_count > 1, 'narrow width should return rows'; | |
| RAISE NOTICE 'Parameter variation tests PASSED'; | |
| END; | |
| $$; | |
| EOF | |
| - name: Test top_waits with bar (histogram merged) | |
| env: | |
| PGPASSWORD: postgres | |
| run: | | |
| psql -h localhost -U postgres -d postgres -v ON_ERROR_STOP=1 << 'EOF' | |
| DO $$ | |
| DECLARE | |
| v_rec record; | |
| BEGIN | |
| -- top_waits should have a bar column with █ characters | |
| SELECT * INTO v_rec FROM ash.top_waits('1 hour') LIMIT 1; | |
| ASSERT v_rec.bar IS NOT NULL, 'bar column missing from top_waits'; | |
| ASSERT position('█' in v_rec.bar) > 0 OR position('%' in v_rec.bar) > 0, | |
| 'bar should contain block chars or percentages'; | |
| -- "Other" rollup row should exist when there are enough events | |
| PERFORM 1 FROM ash.top_waits('1 hour', 2) | |
| WHERE wait_event = 'Other'; | |
| -- Note: may not always exist if fewer than limit+1 events | |
| RAISE NOTICE 'top_waits bar tests PASSED'; | |
| END; | |
| $$; | |
| EOF | |
| - name: Test internal helper functions | |
| env: | |
| PGPASSWORD: postgres | |
| run: | | |
| psql -h localhost -U postgres -d postgres -v ON_ERROR_STOP=1 << 'EOF' | |
| DO $$ | |
| DECLARE | |
| v_slot smallint; | |
| v_slots smallint[]; | |
| v_ts int4; | |
| v_wait_id smallint; | |
| v_cron_avail boolean; | |
| BEGIN | |
| -- current_slot: returns current partition slot | |
| SELECT ash.current_slot() INTO v_slot; | |
| ASSERT v_slot >= 0 AND v_slot <= 2, | |
| 'current_slot out of range: ' || v_slot; | |
| -- _active_slots: returns current + previous slot | |
| SELECT ash._active_slots() INTO v_slots; | |
| ASSERT array_length(v_slots, 1) = 2, | |
| '_active_slots should return 2 elements, got: ' || array_length(v_slots, 1); | |
| ASSERT v_slots[1] = v_slot, | |
| '_active_slots[1] should be current slot'; | |
| ASSERT v_slots[2] = ((v_slot - 1 + 3) % 3)::smallint, | |
| '_active_slots[2] should be previous slot'; | |
| -- _to_sample_ts: converts timestamptz to int4 offset from epoch | |
| SELECT ash._to_sample_ts(ash.epoch()) INTO v_ts; | |
| ASSERT v_ts = 0, '_to_sample_ts(epoch) should be 0, got: ' || v_ts; | |
| SELECT ash._to_sample_ts(ash.epoch() + interval '1 hour') INTO v_ts; | |
| ASSERT v_ts = 3600, '_to_sample_ts(epoch+1h) should be 3600, got: ' || v_ts; | |
| -- _to_sample_ts: future timestamps should be positive | |
| SELECT ash._to_sample_ts(now()) INTO v_ts; | |
| ASSERT v_ts > 0, '_to_sample_ts(now) should be positive'; | |
| -- _register_wait: register a new wait event | |
| SELECT ash._register_wait('active', 'Test', 'TestEvent') INTO v_wait_id; | |
| ASSERT v_wait_id IS NOT NULL, '_register_wait returned null'; | |
| -- _register_wait: same call should return same id (idempotent) | |
| DECLARE | |
| v_wait_id2 smallint; | |
| BEGIN | |
| SELECT ash._register_wait('active', 'Test', 'TestEvent') INTO v_wait_id2; | |
| ASSERT v_wait_id2 = v_wait_id, | |
| '_register_wait not idempotent: ' || v_wait_id || ' vs ' || v_wait_id2; | |
| END; | |
| -- _register_wait: different event should get different id | |
| DECLARE | |
| v_wait_id3 smallint; | |
| BEGIN | |
| SELECT ash._register_wait('active', 'Test', 'OtherEvent') INTO v_wait_id3; | |
| ASSERT v_wait_id3 <> v_wait_id, | |
| '_register_wait should return different id for different event'; | |
| END; | |
| -- _pg_cron_available: should return boolean without error | |
| SELECT ash._pg_cron_available() INTO v_cron_avail; | |
| ASSERT v_cron_avail IS NOT NULL, '_pg_cron_available returned null'; | |
| -- epoch: should be 2026-01-01 00:00:00 UTC | |
| ASSERT ash.epoch() = '2026-01-01 00:00:00+00'::timestamptz, | |
| 'epoch should be 2026-01-01, got: ' || ash.epoch(); | |
| RAISE NOTICE 'Internal helper tests PASSED'; | |
| END; | |
| $$; | |
| EOF | |
| - name: Test edge cases and corner cases | |
| env: | |
| PGPASSWORD: postgres | |
| run: | | |
| psql -h localhost -U postgres -d postgres -v ON_ERROR_STOP=1 << 'EOF' | |
| DO $$ | |
| DECLARE | |
| v_count int; | |
| v_rec record; | |
| BEGIN | |
| -- === Empty / future time ranges === | |
| -- Future range: no data | |
| SELECT count(*) INTO v_count | |
| FROM ash.timeline_chart_at(now() + interval '1 hour', now() + interval '2 hours'); | |
| ASSERT v_count = 0, 'future range should return 0 rows'; | |
| SELECT count(*) INTO v_count | |
| FROM ash.top_waits_at(now() + interval '1 hour', now() + interval '2 hours'); | |
| ASSERT v_count = 0, 'future top_waits_at should return 0 rows'; | |
| SELECT count(*) INTO v_count | |
| FROM ash.top_queries_at(now() + interval '1 hour', now() + interval '2 hours'); | |
| ASSERT v_count = 0, 'future top_queries_at should return 0 rows'; | |
| SELECT count(*) INTO v_count | |
| FROM ash.samples_at(now() + interval '1 hour', now() + interval '2 hours', 10); | |
| ASSERT v_count = 0, 'future samples_at should return 0 rows'; | |
| SELECT count(*) INTO v_count | |
| FROM ash.top_by_type_at(now() + interval '1 hour', now() + interval '2 hours'); | |
| ASSERT v_count = 0, 'future top_by_type_at should return 0 rows'; | |
| SELECT count(*) INTO v_count | |
| FROM ash.wait_timeline_at(now() + interval '1 hour', now() + interval '2 hours'); | |
| ASSERT v_count = 0, 'future wait_timeline_at should return 0 rows'; | |
| SELECT count(*) INTO v_count | |
| FROM ash.query_waits_at(111111, now() + interval '1 hour', now() + interval '2 hours'); | |
| ASSERT v_count = 0, 'future query_waits_at should return 0 rows'; | |
| -- === Very short intervals === | |
| SELECT count(*) INTO v_count FROM ash.top_waits('1 second'); | |
| -- May return 0, that's fine — should not error | |
| SELECT count(*) INTO v_count FROM ash.top_queries('1 second'); | |
| SELECT count(*) INTO v_count FROM ash.top_by_type('1 second'); | |
| SELECT count(*) INTO v_count FROM ash.wait_timeline('1 second', '1 second'); | |
| -- === Non-existent queries === | |
| SELECT count(*) INTO v_count FROM ash.query_waits(999999999, '1 hour'); | |
| ASSERT v_count = 0, 'non-existent query should return 0 rows'; | |
| SELECT count(*) INTO v_count | |
| FROM ash.query_waits_at(999999999, now() - interval '1 hour', now()); | |
| ASSERT v_count = 0, 'non-existent query_at should return 0 rows'; | |
| -- === _validate_data corner cases === | |
| ASSERT ash._validate_data(null) = false, 'null should be invalid'; | |
| ASSERT ash._validate_data('{}'::integer[]) = false, 'empty array should be invalid'; | |
| ASSERT ash._validate_data('{0}'::integer[]) = false, 'single zero should be invalid'; | |
| ASSERT ash._validate_data('{-1}'::integer[]) = false, 'single neg should be invalid (no count)'; | |
| ASSERT ash._validate_data('{-1,0}'::integer[]) = false, 'count=0 should be invalid'; | |
| ASSERT ash._validate_data('{-1,1,5}'::integer[]) = true, 'single backend should be valid'; | |
| ASSERT ash._validate_data('{-1,2,5,6,-2,1,7}'::integer[]) = true, 'multi-group should be valid'; | |
| ASSERT ash._validate_data('{-1,2,5}'::integer[]) = false, 'count=2 but only 1 qid should be invalid'; | |
| ASSERT ash._validate_data('{5,2,1,1}'::integer[]) = false, 'positive first element should be invalid'; | |
| -- === decode_sample corner cases === | |
| -- Empty/null data | |
| SELECT count(*) INTO v_count FROM ash.decode_sample(null); | |
| ASSERT v_count = 0, 'null decode should return 0 rows'; | |
| SELECT count(*) INTO v_count FROM ash.decode_sample('{}'::integer[]); | |
| ASSERT v_count = 0, 'empty decode should return 0 rows'; | |
| -- Data with count=0 (no backends for this wait, but marker present) | |
| SELECT count(*) INTO v_count FROM ash.decode_sample('{-1,0}'::integer[]); | |
| ASSERT v_count = 0, 'count=0 should return 0 rows'; | |
| -- === Limit edge cases === | |
| -- limit=1: should get exactly 1 event + maybe Other | |
| SELECT count(*) INTO v_count FROM ash.top_waits('1 hour', 1); | |
| ASSERT v_count >= 1 AND v_count <= 2, | |
| 'limit=1 should return 1-2 rows, got: ' || v_count; | |
| -- limit=100: should not error even with few events | |
| SELECT count(*) INTO v_count FROM ash.top_waits('1 hour', 100); | |
| ASSERT v_count > 0, 'limit=100 should return rows'; | |
| -- limit=0: edge case | |
| SELECT count(*) INTO v_count FROM ash.top_queries('1 hour', 0); | |
| -- Should return 0 or just Other | |
| -- === timeline_chart edge cases === | |
| -- Width=1: minimal chart | |
| SELECT count(*) INTO v_count FROM ash.timeline_chart('30 minutes', '5 minutes', 3, 1); | |
| ASSERT v_count > 1, 'width=1 should still return rows'; | |
| -- top=1: only top event + Other | |
| SELECT * INTO v_rec FROM ash.timeline_chart('30 minutes', '5 minutes', 1, 40) LIMIT 1; | |
| ASSERT v_rec.chart IS NOT NULL, 'top=1 legend should exist'; | |
| -- Bucket larger than interval | |
| SELECT count(*) INTO v_count FROM ash.timeline_chart('5 minutes', '1 hour', 3, 40); | |
| -- Should return at most 1-2 rows | |
| -- === Rotation edge cases === | |
| -- Multiple rotations: slot should cycle through 0→1→2→0 | |
| DECLARE | |
| v_s0 smallint; | |
| v_s1 smallint; | |
| v_s2 smallint; | |
| v_s3 smallint; | |
| BEGIN | |
| SELECT ash.current_slot() INTO v_s0; | |
| UPDATE ash.config SET rotated_at = now() - interval '2 days'; | |
| PERFORM ash.rotate(); | |
| SELECT ash.current_slot() INTO v_s1; | |
| ASSERT v_s1 = ((v_s0 + 1) % 3)::smallint, | |
| 'first rotation: expected ' || ((v_s0 + 1) % 3) || ', got ' || v_s1; | |
| UPDATE ash.config SET rotated_at = now() - interval '2 days'; | |
| PERFORM ash.rotate(); | |
| SELECT ash.current_slot() INTO v_s2; | |
| ASSERT v_s2 = ((v_s0 + 2) % 3)::smallint, | |
| 'second rotation: expected ' || ((v_s0 + 2) % 3) || ', got ' || v_s2; | |
| UPDATE ash.config SET rotated_at = now() - interval '2 days'; | |
| PERFORM ash.rotate(); | |
| SELECT ash.current_slot() INTO v_s3; | |
| ASSERT v_s3 = v_s0, | |
| 'third rotation should return to start: expected ' || v_s0 || ', got ' || v_s3; | |
| END; | |
| -- === Partition isolation === | |
| -- Data in inactive partition should NOT show in readers | |
| DECLARE | |
| v_inactive smallint; | |
| v_unique_wait_id smallint; | |
| v_slot smallint; | |
| v_count_before int; | |
| v_count_after int; | |
| v_found boolean; | |
| BEGIN | |
| SELECT ash.current_slot() INTO v_slot; | |
| -- Inactive partition = neither current nor previous | |
| v_inactive := ((v_slot + 1) % 3)::smallint; | |
| -- Register a unique wait event that only goes into inactive partition | |
| SELECT ash._register_wait('active', 'TestIsolation', 'GhostEvent') INTO v_unique_wait_id; | |
| -- Record current top_waits count | |
| SELECT count(*) INTO v_count_before FROM ash.top_waits('1 hour'); | |
| -- Insert into INACTIVE partition with the unique wait event | |
| EXECUTE format( | |
| 'INSERT INTO ash.sample_%s (sample_ts, datid, active_count, data, slot) VALUES (%s, 1, 100, %L::integer[], %s)', | |
| v_inactive, | |
| extract(epoch FROM now() - ash.epoch())::int4, | |
| ARRAY[-v_unique_wait_id, 100, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]::integer[], | |
| v_inactive | |
| ); | |
| -- GhostEvent should NOT appear in readers (filtered by _active_slots) | |
| SELECT EXISTS ( | |
| SELECT 1 FROM ash.top_waits('1 hour', 100) | |
| WHERE wait_event = 'TestIsolation:GhostEvent' | |
| ) INTO v_found; | |
| ASSERT NOT v_found, | |
| 'GhostEvent from inactive partition should not appear in top_waits'; | |
| -- Also check timeline_chart doesn't show it | |
| SELECT EXISTS ( | |
| SELECT 1 FROM ash.timeline_chart('1 hour', '5 minutes', 100, 40) | |
| WHERE detail LIKE '%GhostEvent%' | |
| ) INTO v_found; | |
| ASSERT NOT v_found, | |
| 'GhostEvent should not appear in timeline_chart'; | |
| -- Cleanup | |
| EXECUTE format('DELETE FROM ash.sample_%s WHERE active_count = 100', v_inactive); | |
| END; | |
| -- === Rotation guard: "rotated too recently" === | |
| DECLARE | |
| v_result text; | |
| BEGIN | |
| -- Don't touch rotated_at — should still be recent from earlier rotation | |
| SELECT ash.rotate() INTO v_result; | |
| ASSERT v_result LIKE 'skipped%', | |
| 'rotate() should skip when called too soon, got: ' || coalesce(v_result, 'NULL'); | |
| END; | |
| -- === Multi-database data === | |
| DECLARE | |
| v_cpu_id2 smallint; | |
| v_db_count int; | |
| v_cur_slot smallint; | |
| v_now_ts int4; | |
| BEGIN | |
| SELECT id INTO v_cpu_id2 FROM ash.wait_event_map WHERE type = 'CPU*' AND event = 'CPU*'; | |
| v_cur_slot := ash.current_slot(); | |
| v_now_ts := extract(epoch FROM now() - ash.epoch())::int4; | |
| -- Insert data for TWO different datids into current slot | |
| INSERT INTO ash.sample (sample_ts, datid, active_count, data, slot) | |
| VALUES | |
| (v_now_ts - 60, 1, 1, ARRAY[-v_cpu_id2, 1, 1]::integer[], v_cur_slot), | |
| (v_now_ts - 60, 99999, 1, ARRAY[-v_cpu_id2, 1, 1]::integer[], v_cur_slot); | |
| SELECT count(DISTINCT datid) INTO v_db_count | |
| FROM ash.samples_by_database('1 hour'); | |
| ASSERT v_db_count >= 2, | |
| 'samples_by_database should show at least 2 databases, got: ' || v_db_count; | |
| END; | |
| RAISE NOTICE 'Edge and corner case tests PASSED'; | |
| END; | |
| $$; | |
| -- === _wait_color edge cases (separate DO block) === | |
| DO $$ | |
| BEGIN | |
| -- Exact boundary matches | |
| ASSERT ash._wait_color('CPU*', true) = E'\033[38;2;080;250;123m', 'CPU* green'; | |
| ASSERT ash._wait_color('CPUsomething', true) = E'\033[38;2;080;250;123m', 'CPU prefix green'; | |
| ASSERT ash._wait_color('IdleTx', true) = E'\033[38;2;241;250;140m', 'IdleTx light yellow'; | |
| ASSERT ash._wait_color('IO:DataFileRead', true) = E'\033[38;2;030;100;255m', 'IO blue'; | |
| ASSERT ash._wait_color('IO:WALSync', true) = E'\033[38;2;030;100;255m', 'IO:WALSync blue'; | |
| ASSERT ash._wait_color('Lock:tuple', true) = E'\033[38;2;255;085;085m', 'Lock red'; | |
| ASSERT ash._wait_color('Lock:transactionid', true) = E'\033[38;2;255;085;085m', 'Lock:txid red'; | |
| ASSERT ash._wait_color('Lock:relation', true) = E'\033[38;2;255;085;085m', 'Lock:relation red'; | |
| ASSERT ash._wait_color('LWLock:WALWrite', true) = E'\033[38;2;255;121;198m', 'LWLock pink'; | |
| ASSERT ash._wait_color('LWLock:BufferContent', true) = E'\033[38;2;255;121;198m', 'LWLock:BufferContent pink'; | |
| ASSERT ash._wait_color('IPC:BgWorkerStartup', true) = E'\033[38;2;000;200;255m', 'IPC cyan'; | |
| ASSERT ash._wait_color('Client:ClientRead', true) = E'\033[38;2;255;220;100m', 'Client yellow'; | |
| ASSERT ash._wait_color('Client:ClientWrite', true) = E'\033[38;2;255;220;100m', 'Client:Write yellow'; | |
| ASSERT ash._wait_color('Timeout:PgSleep', true) = E'\033[38;2;255;165;000m', 'Timeout orange'; | |
| ASSERT ash._wait_color('BufferPin:BufferPin', true) = E'\033[38;2;000;210;180m', 'BufferPin teal'; | |
| ASSERT ash._wait_color('Activity:ArchiverMain', true) = E'\033[38;2;150;100;255m', 'Activity purple'; | |
| ASSERT ash._wait_color('Extension:DblinkGetResult', true) = E'\033[38;2;190;150;255m', 'Extension light purple'; | |
| ASSERT ash._wait_color('', true) = E'\033[38;2;180;180;180m', 'empty string gray (fallback)'; | |
| ASSERT ash._wait_color('') = '', 'empty string no-color returns empty'; | |
| ASSERT ash._wait_color('UnknownType:Whatever', true) = E'\033[38;2;180;180;180m', 'unknown gray'; -- 180 already 3 digits | |
| RAISE NOTICE '_wait_color comprehensive tests PASSED'; | |
| END; | |
| $$; | |
| EOF | |
| - name: Test uninstall | |
| env: | |
| PGPASSWORD: postgres | |
| run: | | |
| psql -h localhost -U postgres -d postgres -v ON_ERROR_STOP=1 << 'EOF' | |
| SELECT ash.uninstall(); | |
| DO $$ | |
| BEGIN | |
| ASSERT NOT EXISTS (SELECT FROM pg_namespace WHERE nspname = 'ash'), | |
| 'schema not dropped after uninstall'; | |
| RAISE NOTICE 'Uninstall test PASSED'; | |
| END; | |
| $$; | |
| EOF | |
| - name: "Upgrade path: 1.0 → 1.1 → 1.2" | |
| env: | |
| PGPASSWORD: postgres | |
| run: | | |
| psql -h localhost -U postgres -d postgres -v ON_ERROR_STOP=1 << 'EOF' | |
| -- Start from 1.0 | |
| \i sql/ash-1.0.sql | |
| DO $$ | |
| BEGIN | |
| ASSERT EXISTS (SELECT FROM pg_namespace WHERE nspname = 'ash'), '1.0 install failed'; | |
| RAISE NOTICE '1.0 installed OK'; | |
| END; | |
| $$; | |
| -- Upgrade to 1.1 | |
| \i sql/ash-1.1.sql | |
| DO $$ | |
| BEGIN | |
| ASSERT (SELECT version FROM ash.config WHERE singleton) = '1.1', | |
| 'version should be 1.1 after upgrade'; | |
| -- timeline_chart must exist | |
| ASSERT EXISTS ( | |
| SELECT FROM pg_proc WHERE proname = 'timeline_chart' | |
| AND pronamespace = 'ash'::regnamespace | |
| ), 'timeline_chart missing after 1.1 upgrade'; | |
| RAISE NOTICE '1.0 -> 1.1 upgrade PASSED'; | |
| END; | |
| $$; | |
| -- Upgrade to 1.2 | |
| \i sql/ash-1.1-to-1.2.sql | |
| DO $$ | |
| DECLARE | |
| v_count int; | |
| BEGIN | |
| ASSERT (SELECT version FROM ash.config WHERE singleton) = '1.2', | |
| 'version should be 1.2 after upgrade'; | |
| -- timeline_chart must exist with new column order | |
| SELECT count(*) INTO v_count | |
| FROM ash.timeline_chart('1 hour', '5 minutes', 3, 40); | |
| ASSERT v_count >= 0, 'timeline_chart missing after 1.2 upgrade'; | |
| SELECT count(*) INTO v_count | |
| FROM ash.timeline_chart('1 hour', '5 minutes', 3, 40, true); | |
| ASSERT v_count >= 0, 'timeline_chart with p_color missing after 1.2'; | |
| RAISE NOTICE '1.1 -> 1.2 upgrade PASSED'; | |
| END; | |
| $$; | |
| -- Uninstall after upgrade test | |
| SELECT ash.uninstall(); | |
| EOF | |
| - name: "Upgrade path: 1.1 → 1.2 (direct)" | |
| env: | |
| PGPASSWORD: postgres | |
| run: | | |
| psql -h localhost -U postgres -d postgres -v ON_ERROR_STOP=1 << 'EOF' | |
| \i sql/ash-1.1.sql | |
| DO $$ | |
| BEGIN | |
| ASSERT (SELECT version FROM ash.config WHERE singleton) = '1.1', | |
| 'version should be 1.1'; | |
| RAISE NOTICE '1.1 fresh install OK'; | |
| END; | |
| $$; | |
| \i sql/ash-1.1-to-1.2.sql | |
| DO $$ | |
| DECLARE | |
| v_count int; | |
| BEGIN | |
| ASSERT (SELECT version FROM ash.config WHERE singleton) = '1.2', | |
| 'version should be 1.2 after upgrade'; | |
| SELECT count(*) INTO v_count | |
| FROM ash.timeline_chart('1 hour', '5 minutes', 3, 40); | |
| ASSERT v_count >= 0, 'timeline_chart missing'; | |
| RAISE NOTICE '1.1 -> 1.2 direct upgrade PASSED'; | |
| END; | |
| $$; | |
| -- Uninstall | |
| SELECT ash.uninstall(); | |
| EOF | |
| - name: "Dev workflow: re-apply 1.2 migration (idempotent)" | |
| env: | |
| PGPASSWORD: postgres | |
| run: | | |
| psql -h localhost -U postgres -d postgres -v ON_ERROR_STOP=1 << 'EOF' | |
| \i sql/ash-install.sql | |
| -- Apply 1.1--1.2 migration on top of install (should be idempotent) | |
| \i sql/ash-1.1-to-1.2.sql | |
| DO $$ | |
| BEGIN | |
| ASSERT (SELECT version FROM ash.config WHERE singleton) = '1.2', | |
| 'version should still be 1.2'; | |
| RAISE NOTICE 'Idempotent re-apply PASSED'; | |
| END; | |
| $$; | |
| -- Apply again | |
| \i sql/ash-1.1-to-1.2.sql | |
| DO $$ | |
| DECLARE | |
| v_count int; | |
| BEGIN | |
| ASSERT (SELECT version FROM ash.config WHERE singleton) = '1.2', | |
| 'version should still be 1.2 after second apply'; | |
| SELECT count(*) INTO v_count | |
| FROM ash.timeline_chart('1 hour', '5 minutes', 3, 40); | |
| ASSERT v_count >= 0, 'timeline_chart missing after re-apply'; | |
| RAISE NOTICE 'Second re-apply PASSED'; | |
| END; | |
| $$; | |
| SELECT ash.uninstall(); | |
| EOF | |
| - name: Final summary | |
| env: | |
| PGPASSWORD: postgres | |
| run: | | |
| echo "All tests passed for PostgreSQL ${{ matrix.postgres }}" | |
| test-pg18: | |
| runs-on: ubuntu-latest | |
| container: | |
| image: pgxn/pgxn-tools | |
| steps: | |
| - uses: actions/checkout@v4 | |
| - name: Start PostgreSQL 18 | |
| run: | | |
| pg-start 18 || pg-start 17 | |
| - name: Install pg_ash (fresh install) | |
| run: | | |
| psql -d postgres -v ON_ERROR_STOP=1 -f sql/ash-install.sql | |
| - name: Seed test data and run tests | |
| run: | | |
| psql -d postgres -v ON_ERROR_STOP=1 << 'EOF' | |
| -- Seed dictionaries | |
| INSERT INTO ash.wait_event_map (state, type, event) VALUES | |
| ('active', 'CPU*', 'CPU*'), | |
| ('active', 'IO', 'DataFileRead'), | |
| ('active', 'Lock', 'tuple') | |
| ON CONFLICT DO NOTHING; | |
| INSERT INTO ash.query_map_0 (query_id) VALUES (111111), (222222) | |
| ON CONFLICT DO NOTHING; | |
| -- Seed samples | |
| DO $$ | |
| DECLARE | |
| v_cpu_id smallint; | |
| v_io_id smallint; | |
| v_lock_id smallint; | |
| v_q1 int4; | |
| v_q2 int4; | |
| v_now_ts int4; | |
| v_ts int4; | |
| BEGIN | |
| SELECT id INTO v_cpu_id FROM ash.wait_event_map WHERE type = 'CPU*'; | |
| SELECT id INTO v_io_id FROM ash.wait_event_map WHERE type = 'IO'; | |
| SELECT id INTO v_lock_id FROM ash.wait_event_map WHERE type = 'Lock'; | |
| SELECT id INTO v_q1 FROM ash.query_map_0 WHERE query_id = 111111; | |
| SELECT id INTO v_q2 FROM ash.query_map_0 WHERE query_id = 222222; | |
| v_now_ts := extract(epoch FROM now() - ash.epoch())::int4; | |
| FOR v_ts IN SELECT generate_series(v_now_ts - 1800, v_now_ts - 30, 30) | |
| LOOP | |
| INSERT INTO ash.sample (sample_ts, datid, active_count, data, slot) | |
| VALUES (v_ts, 1, 4, | |
| ARRAY[-v_cpu_id, 2, v_q1, v_q2, | |
| -v_io_id, 1, v_q1, | |
| -v_lock_id, 1, v_q2]::integer[], 0); | |
| END LOOP; | |
| END; | |
| $$; | |
| -- Test all core functions | |
| DO $$ | |
| DECLARE | |
| v_count int; | |
| BEGIN | |
| SELECT count(*) INTO v_count FROM ash.status(); | |
| ASSERT v_count > 0, 'status failed'; | |
| PERFORM ash.take_sample(); | |
| SELECT count(*) INTO v_count FROM ash.top_waits('1 hour'); | |
| ASSERT v_count > 0, 'top_waits failed'; | |
| SELECT count(*) INTO v_count FROM ash.top_queries('1 hour'); | |
| ASSERT v_count > 0, 'top_queries failed'; | |
| SELECT count(*) INTO v_count FROM ash.top_by_type('1 hour'); | |
| ASSERT v_count > 0, 'top_by_type failed'; | |
| SELECT count(*) INTO v_count FROM ash.wait_timeline('1 hour', '5 minutes'); | |
| ASSERT v_count > 0, 'wait_timeline failed'; | |
| SELECT count(*) INTO v_count FROM ash.samples('1 hour', 10); | |
| ASSERT v_count > 0, 'samples failed'; | |
| SELECT count(*) INTO v_count FROM ash.activity_summary('1 hour'); | |
| ASSERT v_count > 0, 'activity_summary failed'; | |
| SELECT count(*) INTO v_count FROM ash.query_waits(111111, '1 hour'); | |
| ASSERT v_count > 0, 'query_waits failed'; | |
| -- timeline_chart | |
| SELECT count(*) INTO v_count FROM ash.timeline_chart('1 hour', '5 minutes', 3, 40); | |
| ASSERT v_count > 1, 'timeline_chart failed'; | |
| SELECT count(*) INTO v_count | |
| FROM ash.timeline_chart_at(now() - interval '1 hour', now(), '5 minutes', 3, 40); | |
| ASSERT v_count > 1, 'timeline_chart_at failed'; | |
| -- _wait_color | |
| ASSERT ash._wait_color('CPU*', true) = E'\033[38;2;080;250;123m', 'CPU* color wrong'; | |
| ASSERT ash._wait_color('IO:DataFileRead', true) = E'\033[38;2;030;100;255m', 'IO color check'; | |
| ASSERT ash._wait_color('Lock:tuple', true) = E'\033[38;2;255;085;085m', 'Lock color wrong'; | |
| -- Absolute-time variants | |
| SELECT count(*) INTO v_count | |
| FROM ash.top_waits_at(now() - interval '1 hour', now()); | |
| ASSERT v_count > 0, 'top_waits_at failed'; | |
| SELECT count(*) INTO v_count | |
| FROM ash.top_queries_at(now() - interval '1 hour', now()); | |
| ASSERT v_count > 0, 'top_queries_at failed'; | |
| RAISE NOTICE 'All PG18 tests PASSED'; | |
| END; | |
| $$; | |
| EOF | |
| - name: Test uninstall and upgrade paths | |
| run: | | |
| psql -d postgres -v ON_ERROR_STOP=1 << 'EOF' | |
| -- Uninstall | |
| SELECT ash.uninstall(); | |
| DO $$ | |
| BEGIN | |
| ASSERT NOT EXISTS (SELECT FROM pg_namespace WHERE nspname = 'ash'), | |
| 'schema not dropped'; | |
| RAISE NOTICE 'Uninstall PASSED'; | |
| END; | |
| $$; | |
| -- Test upgrade: 1.1 → 1.2 | |
| \i sql/ash-1.1.sql | |
| DO $$ | |
| BEGIN | |
| ASSERT (SELECT version FROM ash.config WHERE singleton) = '1.1', | |
| 'version should be 1.1'; | |
| RAISE NOTICE '1.1 install OK'; | |
| END; | |
| $$; | |
| \i sql/ash-1.1-to-1.2.sql | |
| DO $$ | |
| DECLARE | |
| v_count int; | |
| BEGIN | |
| ASSERT (SELECT version FROM ash.config WHERE singleton) = '1.2', | |
| 'version should be 1.2 after upgrade'; | |
| SELECT count(*) INTO v_count | |
| FROM ash.timeline_chart('1 hour', '5 minutes', 3, 40); | |
| ASSERT v_count >= 0, 'timeline_chart missing after 1.2'; | |
| RAISE NOTICE '1.1 -> 1.2 upgrade PASSED'; | |
| END; | |
| $$; | |
| -- Re-apply (idempotent) | |
| \i sql/ash-1.1-to-1.2.sql | |
| DO $$ | |
| BEGIN | |
| ASSERT (SELECT version FROM ash.config WHERE singleton) = '1.2', | |
| 'version should still be 1.2'; | |
| RAISE NOTICE 'Idempotent re-apply PASSED'; | |
| END; | |
| $$; | |
| SELECT ash.uninstall(); | |
| EOF |