Created
October 27, 2021 06:31
-
-
Save emidoots/a55c0f6d715a8ab10e77b9b565e5ccc3 to your computer and use it in GitHub Desktop.
pg partman notes
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
CREATE SCHEMA partman; | |
CREATE EXTENSION pg_partman SCHEMA partman; | |
CREATE ROLE partman WITH LOGIN; | |
GRANT ALL ON SCHEMA partman TO partman; | |
GRANT ALL ON ALL TABLES IN SCHEMA partman TO partman; | |
GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA partman TO partman; | |
GRANT EXECUTE ON ALL PROCEDURES IN SCHEMA partman TO partman; -- PG11+ only | |
GRANT ALL ON SCHEMA my_partition_schema TO partman; | |
GRANT TEMPORARY ON DATABASE sg to partman; -- allow creation of temp tables to move data out of default | |
-- https://github.com/pgpartman/pg_partman/blob/master/doc/pg_partman_howto_native.md#offline-partitioning | |
ALTER TABLE lsif_data_docs_search_public RENAME to old_lsif_data_docs_search_public; | |
-- Create our new partitioned table, identical to the old one but with no indexes and with PARTITION BY RANGE (id). | |
-- The actual search index over API docs, one entry per symbol/section of API docs. | |
CREATE TABLE lsif_data_docs_search_public ( | |
LIKE old_lsif_data_docs_search_public INCLUDING ALL | |
) PARTITION BY RANGE (id); | |
-- Drop triggers on our old table, we do not want dthem updating our aggregate count tables as we | |
-- move data. | |
-DROP TRIGGER lsif_data_docs_search_public_delete ON old_lsif_data_docs_search_public; | |
DROP TRIGGER lsif_data_docs_search_public_insert ON old_lsif_data_docs_search_public; | |
-- Create partitions with 30 million rows each, about enough to store 1,200 Go repos worth of | |
-- results. | |
SELECT partman.create_parent('public.lsif_data_docs_search_public', 'id', 'native', '30000000'); | |
-- Actually perform the partitioning. | |
-- | |
-- p_interval: copy in batches of 500,000 rows at a time | |
-- p_batch: total number of batches to copy | |
CALL partman.partition_data_proc('public.lsif_data_docs_search_public', p_interval := '500000', p_batch := 9999999, p_source_table := 'public.old_lsif_data_docs_search_public'); | |
VACUUM ANALYZE public.lsif_data_docs_search_public; |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment