Skip to content

Instantly share code, notes, and snippets.

@cevian
Last active November 23, 2022 21:09
Show Gist options
  • Save cevian/6af2ded28f74290da1a46223f6e2847e to your computer and use it in GitHub Desktop.
Save cevian/6af2ded28f74290da1a46223f6e2847e to your computer and use it in GitHub Desktop.
info_disk_space_queries.sql
\pset pager off
\set ECHO all
\set ON_ERROR_STOP 0
select version();
\dx
select now();
--sanity_check: avg_not_yet_compressed_chunks is < 2
\x on
SELECT
avg(chunk_interval) as avg_chunk_interval_new_chunk,
sum(total_size_bytes-coalesce(after_compression_bytes,0))
AS not_yet_compressed_bytes,
avg(total_interval-compressed_interval)
AS avg_not_yet_compressed_interval,
avg(total_chunks-compressed_chunks) AS avg_not_yet_compressed_chunks,
sum(total_chunks-compressed_chunks) AS total_not_yet_compressed_chunks,
count(*) total_metrics,
sum(coalesce(after_compression_bytes,0))
AS compressed_bytes,
avg(compressed_interval) as avg_compressed_interval,
sum(total_size_bytes-coalesce(after_compression_bytes,0)) /
sum(extract(epoch from (total_interval-compressed_interval)))
AS not_yet_compressed_bytes_per_sec,
sum(coalesce(after_compression_bytes,0)) /
greatest(sum(extract(epoch from (compressed_interval))),1)
AS compressed_bytes_per_sec,
sum(compressed_chunks) as total_compressed_chunks,
sum(total_size_bytes) as total_prom_data_size,
pg_database_size(current_database()) as total_db_size
FROM prom_info.metric;
--were compression jobs delayed?
\x on
SELECT
count(*) total_metrics,
count(*) filter (where delay_compression_until is not null) were_delayed,
count(*) filter (where delay_compression_until > now()) still_delayed
FROM _prom_catalog.metric;
--what's the status of the jobs
\x on
SELECT *, now()-j.next_start till_start
FROM timescaledb_information.job_stats js
INNER JOIN timescaledb_information.jobs j ON (js.job_id = j.job_id)
WHERE proc_name ='execute_maintenance_job';
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment