Skip to content

Instantly share code, notes, and snippets.

@AlexRogalskiy
Last active April 2, 2025 08:26
Show Gist options
  • Save AlexRogalskiy/55be3bfcae3b2138f85188826f1fbeae to your computer and use it in GitHub Desktop.
Save AlexRogalskiy/55be3bfcae3b2138f85188826f1fbeae to your computer and use it in GitHub Desktop.
db-refcard
--==========================================================================================
SELECT * FROM customers AS c
WHERE EXISTS (
SELECT * FROM orders AS o
WHERE o.id = c.id
AND o.date = (CURRENT_DATE - '3 MONTH'::INTERVAL) --DATE_SUB(curdate(), INTERVAL 3 MONTH)
GROUP BY MONTH(o.date)
HAVING COUNT(DISTINCT MONTH(o.date)) = 3;
);
--==========================================================================================
SELECT NOW() - '30 MINUTES'::INTERVAL;
SELECT NOW() - INTERVAL '30' MINUTE;
SELECT NOW() - INTERVAL '30 MINUTES';
SELECT CURRENT_DATE - '1 MONTH'::INTERVAL;
SELECT CURRENT_DATE - INTERVAL '1' MONTH;
SELECT CURRENT_DATE - INTERVAL '1 MONTH';
SELECT CURRENT_TIME - '30 MINUTES'::INTERVAL;
SELECT CURRENT_TIME - INTERVAL '30' MINUTE;
SELECT CURRENT_TIME - INTERVAL '30 MINUTES';
--==========================================================================================
SELECT DISTINCT empname FROM employees WHERE DOB BETWEEN '01/02/1965' AND '31/11/1970';
--==========================================================================================
SELECT * FROM replies WHERE author != 'test' ORDER BY date DESC LIMIT 5, 30;
--==========================================================================================
SELECT jod_id FROM jobs WHERE progress LIKE '3!%' ESCAPE '!';
--==========================================================================================
SELECT concat(left(login, 5), '...') AS log FROM workers;
--==========================================================================================
create table emp_copy(select * from emp where 1=3);
create table emp select * from emp;
select max(salary) from emp where salary < (select max(salary) from emp where salary < (select max(salary) from emp));
select salary from emp order by salary desc limit 2,1;
select salary from emp order by salary limit 1 offset 2;
select salary from emp e1 where 2 = (select count(distinct e2.salary) from emp e2 where e2.salary > e1.salary);
--==========================================================================================
--select current_date;
--select current_time;
--select current_timestamp;
--select now();
--select date(now());
--select date(current_timestamp);
--==========================================================================================
select * from product where productId % 2 = 1;
select * from product where productId % 2 = 0;
--==========================================================================================
(select * from emp order by id desc limit 2) order by id asc;
select * from (select * from Product order by ProductID desc limit 2) as temp order by ProductId asc;
--==========================================================================================
select distinct dept_id from emp;
select salary from emp group by salary;
select salary from emp union select salary from emp;
select dept_id from emp e1 where e1.id >= all(select e2.id from emp e2 where e1.dept_id = e2.dept_id) order by dept_id;
select Name from Product e1 where e1.ProductID >= all(select e2.ProductID from Product e2 where e1.name = e2.name) order by ProductID;
select Name from Product e1 where e1.ProductID <= all(select e2.ProductID from Product e2 where e1.name = e2.name) order by ProductID;
--==========================================================================================
select min(productId), max(productId) from product group by name having count(1) > 1;
--==========================================================================================
select extract(year from date) as year, extract(month from date) as month, extract(day from date) as day from workers;
--==========================================================================================
-- 1. Вывести список сотрудников, получающих заработную плату большую чем у непосредственного руководителя
SELECT *
FROM Employee AS employees, Employee AS chieves
WHERE chieves.id = employees.chief_id AND employees.salary > chieves.salary;
-- 2. Вывести список сотрудников, получающих максимальную заработную плату в своем отделе
SELECT *
FROM Employee AS employees
WHERE employees.salary = (SELECT MAX(salary) FROM Employee AS max WHERE max.department_id = employees.department_id);
-- 3. Вывести список ID отделов, количество сотрудников в которых не превышает 3 человек
SELECT department_id
FROM Employee
GROUP BY department_id
HAVING COUNT(*) <= 3;
--4. Вывести список сотрудников, не имеющих назначенного руководителя, работающего в том-же отделе
SELECT *
FROM Employee AS employees
LEFT JOIN Employee AS chieves ON (employees.chief_id = chieves.Id AND employees.department_id = chieves.department_id)
WHERE chieves.id IS NULL;
--5. Найти список ID отделов с максимальной суммарной зарплатой сотрудников
WITH dep_salary AS
(SELECT department_id, sum(salary) AS salary
FROM employee
GROUP BY department_id)
SELECT department_id
FROM dep_salary
WHERE dep_salary.salary = (SELECT max(salary) FROM dep_salary);
select department_id, sum(salary) as salary from employee
group by department_id having sum(salary) >= all(select sum(salary) as salary from employee group by department_id);
select department_id, sum(salary) as salary from employee
group by department_id having sum(salary) = (select max(salary) from (select sum(salary) as salary from employee group by department_id) as temp);
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--=======================================================
create table users (
id bigint primary key generated always as identity,
username text not null unique,
email text not null unique,
password_hash text not null,
created_at timestamptz default now()
);
create table channels (
id bigint primary key generated always as identity,
name text not null unique,
description text,
created_at timestamptz default now()
);
create table messages (
id bigint primary key generated always as identity,
user_id bigint not null references users (id),
channel_id bigint references channels (id),
content text not null,
created_at timestamptz default now(),
is_direct_message boolean default false
);
create table channel_memberships (
id bigint primary key generated always as identity,
user_id bigint not null references users (id),
channel_id bigint not null references channels (id),
joined_at timestamptz default now(),
unique (user_id, channel_id)
);
--=======================================================
create table documents (
id bigint primary key generated always as identity,
title text not null,
parent_id bigint references documents (id) on delete cascade,
created_at timestamp with time zone default now() not null,
updated_at timestamp with time zone default now() not null
);
create table document_versions (
id bigint primary key generated always as identity,
document_id bigint references documents (id) on delete cascade,
version_number int not null,
content text not null,
created_at timestamp with time zone default now() not null,
unique (document_id, version_number)
);
create table tags (
id bigint primary key generated always as identity,
name text not null unique
);
create table document_tags (
document_id bigint references documents (id) on delete cascade,
tag_id bigint references tags (id) on delete cascade,
primary key (document_id, tag_id)
);
--=======================================================
create table users (
id bigint primary key generated always as identity,
username text not null unique,
email text not null unique,
password_hash text not null
);
create table categories (
id bigint primary key generated always as identity,
name text not null unique
);
create table lists (
id bigint primary key generated always as identity,
user_id bigint not null references users (id),
name text not null
);
create table tasks (
id bigint primary key generated always as identity,
list_id bigint not null references lists (id),
category_id bigint references categories (id),
title text not null,
description text,
due_date date,
priority int,
completed boolean default false
);
--=======================================================
--=======================================================
--=======================================================
--=======================================================
--=======================================================
--=======================================================
--=======================================================
--=======================================================
--=======================================================
--=======================================================
--=======================================================
--=======================================================
--=======================================================
--=======================================================
--=======================================================
--=======================================================
--=======================================================
sudo docker run -p 5432:5432 --name demo1 --detach -it postgresai/seamless:v3
sudo docker exec -it demo1 bash
psql demo1 -c 'select now()'
psql demo1 -c 'select * from pg_stat_statements limit 1'
psql demo1 -c 'select * from pg_stat_activity'
pgcenter top demo1
select *
from pg_stat_statements
order by total_exec_time desc
limit 5;
explain
select count(*)
from pgbench_history
where mtime::date = '2020-05-26';
explain (analyze, buffers)
select count(*)
from pgbench_history
where mtime::date = '2020-05-26';
create index concurrently i1 on pgbench_history((mtime::date));
analyze pgbench_history;
select pg_stat_statements_reset();
select * from pg_stat_statements order by total_exec_time desc limit 5;
truncate pgbench_history;
select pg_stat_statements_reset();
select * from pg_stat_statements order by total_exec_time desc limit 5;
\o | plan-exporter
explain (analyze, buffers)
select *
from pgbench_accounts
join pgbench_branches using (bid)
where aid < 1000
order by bbalance desc
limit 20;
\o | plan-exporter --target=dalibo
explain (analyze, buffers)
select *
from pgbench_accounts
join pgbench_branches using (bid)
where aid < 1000
order by bbalance desc
limit 20;
--===========================================================================================
set search_path to post;
drop extension pg_trgm;
create extension pg_trgm with schema pg_catalog; -- \dx
select count(*) from post_index;
delete from post_index where 1=1;
select * from post_index where idx like '10%' order by idx;
select pid as process_id,
usename as username,
datname as database_name,
client_addr as client_address,
application_name,
backend_start,
state,
state_change
from pg_stat_activity;
--===========================================================================================
--===========================================================================================
--===========================================================================================
--===========================================================================================
--===========================================================================================
--===========================================================================================
--===========================================================================================
--===========================================================================================
--===========================================================================================
--===========================================================================================
--===========================================================================================
--===========================================================================================
--===========================================================================================
-- Autogenerated: do not edit this file
CREATE TABLE BATCH_JOB_INSTANCE
(
JOB_INSTANCE_ID BIGINT NOT NULL PRIMARY KEY,
VERSION BIGINT,
JOB_NAME VARCHAR(100) NOT NULL,
JOB_KEY VARCHAR(32) NOT NULL,
CONSTRAINT JOB_INST_UN UNIQUE (JOB_NAME, JOB_KEY)
);
CREATE TABLE BATCH_JOB_EXECUTION
(
JOB_EXECUTION_ID BIGINT NOT NULL PRIMARY KEY,
VERSION BIGINT,
JOB_INSTANCE_ID BIGINT NOT NULL,
CREATE_TIME TIMESTAMP NOT NULL,
START_TIME TIMESTAMP DEFAULT NULL,
END_TIME TIMESTAMP DEFAULT NULL,
STATUS VARCHAR(10),
EXIT_CODE VARCHAR(2500),
EXIT_MESSAGE VARCHAR(2500),
LAST_UPDATED TIMESTAMP,
CONSTRAINT JOB_INST_EXEC_FK FOREIGN KEY (JOB_INSTANCE_ID)
REFERENCES BATCH_JOB_INSTANCE (JOB_INSTANCE_ID)
);
CREATE TABLE BATCH_JOB_EXECUTION_PARAMS
(
JOB_EXECUTION_ID BIGINT NOT NULL,
PARAMETER_NAME VARCHAR(100) NOT NULL,
PARAMETER_TYPE VARCHAR(100) NOT NULL,
PARAMETER_VALUE VARCHAR(2500),
IDENTIFYING CHAR(1) NOT NULL,
CONSTRAINT JOB_EXEC_PARAMS_FK FOREIGN KEY (JOB_EXECUTION_ID)
REFERENCES BATCH_JOB_EXECUTION (JOB_EXECUTION_ID)
);
CREATE TABLE BATCH_STEP_EXECUTION
(
STEP_EXECUTION_ID BIGINT NOT NULL PRIMARY KEY,
VERSION BIGINT NOT NULL,
STEP_NAME VARCHAR(100) NOT NULL,
JOB_EXECUTION_ID BIGINT NOT NULL,
CREATE_TIME TIMESTAMP NOT NULL,
START_TIME TIMESTAMP DEFAULT NULL,
END_TIME TIMESTAMP DEFAULT NULL,
STATUS VARCHAR(10),
COMMIT_COUNT BIGINT,
READ_COUNT BIGINT,
FILTER_COUNT BIGINT,
WRITE_COUNT BIGINT,
READ_SKIP_COUNT BIGINT,
WRITE_SKIP_COUNT BIGINT,
PROCESS_SKIP_COUNT BIGINT,
ROLLBACK_COUNT BIGINT,
EXIT_CODE VARCHAR(2500),
EXIT_MESSAGE VARCHAR(2500),
LAST_UPDATED TIMESTAMP,
CONSTRAINT JOB_EXEC_STEP_FK FOREIGN KEY (JOB_EXECUTION_ID)
REFERENCES BATCH_JOB_EXECUTION (JOB_EXECUTION_ID)
);
CREATE TABLE BATCH_STEP_EXECUTION_CONTEXT
(
STEP_EXECUTION_ID BIGINT NOT NULL PRIMARY KEY,
SHORT_CONTEXT VARCHAR(2500) NOT NULL,
SERIALIZED_CONTEXT TEXT,
CONSTRAINT STEP_EXEC_CTX_FK FOREIGN KEY (STEP_EXECUTION_ID)
REFERENCES BATCH_STEP_EXECUTION (STEP_EXECUTION_ID)
);
CREATE TABLE BATCH_JOB_EXECUTION_CONTEXT
(
JOB_EXECUTION_ID BIGINT NOT NULL PRIMARY KEY,
SHORT_CONTEXT VARCHAR(2500) NOT NULL,
SERIALIZED_CONTEXT TEXT,
CONSTRAINT JOB_EXEC_CTX_FK FOREIGN KEY (JOB_EXECUTION_ID)
REFERENCES BATCH_JOB_EXECUTION (JOB_EXECUTION_ID)
);
CREATE
SEQUENCE BATCH_STEP_EXECUTION_SEQ MAXVALUE 9223372036854775807 NO CYCLE;
CREATE
SEQUENCE BATCH_JOB_EXECUTION_SEQ MAXVALUE 9223372036854775807 NO CYCLE;
CREATE
SEQUENCE BATCH_JOB_SEQ MAXVALUE 9223372036854775807 NO CYCLE;
--==========================================================================================
CREATE KEYSPACE keyspace1 WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };
use keyspace1;
CREATE TABLE configuration (
name ascii,
value text,
PRIMARY KEY ( name )
) WITH compression = { 'sstable_compression' : 'LZ4Compressor' };
INSERT INTO configuration (name,value) VALUES('config.name2', 'qqqqqq;wwwwwww');
--==========================================================================================
CREATE KEYSPACE IF NOT EXISTS myKeySpace WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 3};
CREATE COLUMNFAMILY IF NOT EXISTS myKeySpace.books (id bigint PRIMARY KEY, name text);
--==========================================================================================
CREATE KEYSPACE IF NOT EXISTS baeldung WITH replication = {'class':'SimpleStrategy', 'replication_factor':1};
USE baeldung;
CREATE TABLE IF NOT EXISTS user_profile " +
"(id int, username text, user_age int, writetime bigint, PRIMARY KEY (id, user_age)) " +
"WITH CLUSTERING ORDER BY (user_age DESC);
CREATE TABLE IF NOT EXISTS admin_profile " +
"(id int, username text, user_age int, role text, writetime bigint, department text, " +
"PRIMARY KEY (id, user_age)) " +
"WITH CLUSTERING ORDER BY (user_age DESC);
CREATE TABLE IF NOT EXISTS counter " +
"(id text, count counter, PRIMARY KEY (id));
--==========================================================================================
CREATE KEYSPACE Kofu WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor':1};
CREATE TABLE Kofu.Users (login VARCHAR PRIMARY KEY, firstname VARCHAR, lastname VARCHAR);
USE Kofu;
--==========================================================================================
CREATE ROLE IF NOT EXISTS 'read_only_user' WITH PASSWORD = 'read_only_user' AND LOGIN = TRUE;
CREATE ROLE IF NOT EXISTS 'not_even_reads_user' WITH PASSWORD = 'tiger' AND LOGIN = TRUE;
CREATE KEYSPACE IF NOT EXISTS bridge_table_token_test WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor':'1'};
CREATE TABLE IF NOT EXISTS bridge_table_token_test.tbl_test (key text PRIMARY KEY, value text);
INSERT INTO bridge_table_token_test.tbl_test (key, value) VALUES ('a', 'alpha');
GRANT SELECT ON KEYSPACE bridge_table_token_test TO read_only_user;
--==========================================================================================
CREATE KEYSPACE IF NOT EXISTS CONSISTENT_BUILDER_DEMO_CLUSTER
WITH replication = {
'class': 'SimpleStrategy',
'replication_factor': 1
};
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
CREATE TABLE IF NOT EXISTS session_keys
(
key UInt32,
value String TTL INTERVAL 5 SECOND,
start_time DateTime
)
ENGINE = EmbeddedRocksDB
PRIMARY KEY key;
INSERT INTO session_keys
SELECT
toUInt32(rand() % 100) as key,
toString(rand()) as value,
toDateTime(now()) + (rand() % (60*60*24)) as start_time
FROM numbers_mt(10);
SELECT 'Выданные ключи (session_keys)';
SELECT * FROM session_keys LIMIT 5;
CREATE TABLE IF NOT EXISTS user_accounts
(
u_id UInt32,
login String
)
ENGINE = Log;
INSERT INTO user_accounts
SELECT
toUInt32(rand() % 100) as u_id, -- Используем number чтобы гарантировать повторение u_id
concat(randomPrintableASCII(2), '@', randomPrintableASCII(2), '.com') as login
FROM numbers_mt(10);
SELECT '';
SELECT 'Логины пользователей (user_accounts)';
SELECT * FROM user_accounts LIMIT 5;
SET join_algorithm = 'direct';
SELECT '';
SELECT 'Ключи по пользователям (результаты соединения)';
SELECT session_keys.value, user_accounts.login, session_keys.start_time
FROM user_accounts
JOIN session_keys ON session_keys.key = user_accounts.u_id
ORDER BY start_time DESC;
--==========================================================================================
[WITH expr_list|(subquery)]
SELECT [DISTINCT [ON (column1, column2, ...)]] expr_list
[FROM [db.]table | (subquery) | table_function] [FINAL]
[SAMPLE sample_coeff]
[ARRAY JOIN ...]
[GLOBAL] [ANY|ALL|ASOF] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI] JOIN (subquery)|table (ON <expr_list>)|(USING <column_list>)
[PREWHERE expr]
[WHERE expr]
[GROUP BY expr_list] [WITH ROLLUP|WITH CUBE] [WITH TOTALS]
[HAVING expr]
[ORDER BY expr_list] [WITH FILL] [FROM expr] [TO expr] [STEP expr] [INTERPOLATE [(expr_list)]]
[LIMIT [offset_value, ]n BY columns]
[LIMIT [n, ]m] [WITH TIES]
[SETTINGS ...]
[UNION ALL ...]
[INTO OUTFILE filename [COMPRESSION type [LEVEL level]] ]
[FORMAT format]
--==========================================================================================
SELECT
count(),
arrayStringConcat(arrayMap(x -> concat(demangle(addressToSymbol(x)), '\n ', addressToLine(x)), trace), '\n') AS sym
FROM system.trace_log
WHERE (query_id = 234324) AND (event_date = today())
GROUP BY trace
ORDER BY count() DESC
LIMIT 10
--==========================================================================================
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
(
name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1],
name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2],
...
) ENGINE = Distributed(cluster, database, table[, sharding_key[, policy_name]])
[SETTINGS name=value, ...]
--==========================================================================================
CREATE TABLE hits_UserID_URL
(
`UserID` UInt32,
`URL` String,
`EventTime` DateTime
)
ENGINE = MergeTree
PRIMARY KEY (UserID, URL)
ORDER BY (UserID, URL, EventTime)
SETTINGS index_granularity = 8192, index_granularity_bytes = 0;
--==========================================================================================
CREATE TABLE otel.server_text_log_0
(
`Timestamp` DateTime64(9) CODEC(Delta(8), ZSTD(1)),
`EventDate` Date,
`EventTime` DateTime,
`TraceId` String CODEC(ZSTD(1)),
`SpanId` String CODEC(ZSTD(1)),
`TraceFlags` UInt32 CODEC(ZSTD(1)),
`SeverityText` LowCardinality(String) CODEC(ZSTD(1)),
`SeverityNumber` Int32 CODEC(ZSTD(1)),
`ServiceName` LowCardinality(String) CODEC(ZSTD(1)),
`Body` String CODEC(ZSTD(1)),
`Namespace` LowCardinality(String),
`Cell` LowCardinality(String),
`CloudProvider` LowCardinality(String),
`Region` LowCardinality(String),
`ContainerName` LowCardinality(String),
`PodName` LowCardinality(String),
`query_id` String CODEC(ZSTD(1)),
`logger_name` LowCardinality(String),
`source_file` LowCardinality(String),
`source_line` LowCardinality(String),
`level` LowCardinality(String),
`thread_name` LowCardinality(String),
`thread_id` LowCardinality(String),
`ResourceSchemaUrl` String CODEC(ZSTD(1)),
`ScopeSchemaUrl` String CODEC(ZSTD(1)),
`ScopeName` String CODEC(ZSTD(1)),
`ScopeVersion` String CODEC(ZSTD(1)),
`ScopeAttributes` Map(LowCardinality(String), String) CODEC(ZSTD(1)),
`ResourceAttributes` Map(LowCardinality(String), String) CODEC(ZSTD(1)),
`LogAttributes` Map(LowCardinality(String), String) CODEC(ZSTD(1)),
INDEX idx_trace_id TraceId TYPE bloom_filter(0.001) GRANULARITY 1,
INDEX idx_thread_id thread_id TYPE bloom_filter(0.001) GRANULARITY 1,
INDEX idx_thread_name thread_name TYPE bloom_filter(0.001) GRANULARITY 1,
INDEX idx_Namespace Namespace TYPE bloom_filter(0.001) GRANULARITY 1,
INDEX idx_source_file source_file TYPE bloom_filter(0.001) GRANULARITY 1,
INDEX idx_scope_attr_key mapKeys(ScopeAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
INDEX idx_scope_attr_value mapValues(ScopeAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
INDEX idx_res_attr_key mapKeys(ResourceAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
INDEX idx_res_attr_value mapValues(ResourceAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
INDEX idx_log_attr_key mapKeys(LogAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
INDEX idx_log_attr_value mapValues(LogAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
INDEX idx_body Body TYPE tokenbf_v1(32768, 3, 0) GRANULARITY 1
)
ENGINE = SharedMergeTree
PARTITION BY EventDate
ORDER BY (PodName, Timestamp)
TTL EventTime + toIntervalDay(180)
SETTINGS index_granularity = 8192, ttl_only_drop_parts = 1;
--==========================================================================================
CREATE TABLE sales
(
sale_datetime DateTime, -- Дата и время продажи
amount Decimal(18,2), -- Сумма продажи
quantity UInt32, -- Количество товаров
client_id UInt32, -- ID клиента
seller_id UInt32, -- ID продавца
channel_id UInt32, -- ID канала продаж
version UInt64 -- Версионный столбец
)
ENGINE = MergeTree
PARTITION BY toYYYYMM(sale_datetime)
ORDER BY (client_id, seller_id, sale_datetime);
INSERT INTO sales (sale_datetime, amount, quantity, client_id, seller_id, channel_id, version) VALUES
('2023-10-01 10:00:00', 150.00, 3, 101, 201, 301, 1),
('2023-10-01 10:00:00', 150.00, 3, 101, 201, 301, 2),
('2023-10-02 11:30:00', 200.50, 5, 102, 202, 302, 1),
('2023-10-02 11:30:00', 200.50, 5, 102, 202, 302, 1),
('2023-10-03 14:45:00', 99.99, 2, 103, 203, 303, 1);
OPTIMIZE TABLE sales FINAL DEDUPLICATE;
--==========================================================================================
CREATE TABLE sales
(
sale_datetime DateTime, -- Дата и время продажи
amount Decimal(18,2), -- Сумма продажи
quantity UInt32, -- Количество товаров
client_id UInt32, -- ID клиента
seller_id UInt32, -- ID продавца
channel_id UInt32, -- ID канала продаж
version UInt64 -- Версионный столбец
)
ENGINE = ReplacingMergeTree
PARTITION BY toYYYYMM(sale_datetime)
ORDER BY (client_id, seller_id, sale_datetime);
INSERT INTO sales (sale_datetime, amount, quantity, client_id, seller_id, channel_id, version) VALUES
('2023-10-01 10:00:00', 150.00, 3, 101, 201, 301, 1),
('2023-10-01 10:00:00', 150.00, 3, 101, 201, 301, 2),
('2023-10-02 11:30:00', 200.50, 5, 102, 202, 302, 1),
('2023-10-02 11:30:00', 200.50, 5, 102, 202, 302, 1),
('2023-10-03 14:45:00', 99.99, 2, 103, 203, 303, 1);
SELECT 'изначальный вывод с ReplacingMergeTree';
SELECT * FROM sales;
--==========================================================================================
CREATE TABLE sales
(
sale_datetime DateTime, -- Дата и время продажи
amount Decimal(18,2), -- Сумма продажи
quantity UInt32, -- Количество товаров
client_id UInt32, -- ID клиента
seller_id UInt32, -- ID продавца
channel_id UInt32, -- ID канала продаж
version UInt64 -- Версионный столбец
)
ENGINE = ReplacingMergeTree (version)
PARTITION BY toYYYYMM(sale_datetime)
ORDER BY (client_id, seller_id, sale_datetime);
INSERT INTO sales (sale_datetime, amount, quantity, client_id, seller_id, channel_id, version) VALUES
('2023-10-01 10:00:00', 150.00, 3, 101, 201, 301, 1),
('2023-10-01 10:00:00', 150.00, 3, 101, 201, 301, 2),
('2023-10-02 11:30:00', 200.50, 5, 102, 202, 302, 1),
('2023-10-02 11:30:00', 200.50, 5, 102, 202, 302, 1),
('2023-10-03 14:45:00', 99.99, 2, 103, 203, 303, 1),
('2023-10-03 14:45:00', 99.99, 2, 103, 203, 303, 2);
SELECT 'вывод с параметризацией ReplacingMergeTree';
SELECT * FROM sales;
--==========================================================================================
-- на уровне запроса ко всем таблицам
SELECT count(*) FROM posts SETTINGS final = 1;
-- на уровне сеанса
SET final = 1; 6SELECT count(*) FROM posts;
--==========================================================================================
-- применение политики строк ко всем пользователям
CREATE ROW POLICY cdc_policy ON votes FOR SELECT USING _peerdb_is_deleted = 0 TO ALL;
--==========================================================================================
CREATE MATERIALIZED VIEW deduplicated_posts_mv REFRESH EVERY 1 HOUR TO deduplicated_posts AS
SELECT * FROM posts FINAL where _peerdb_is_deleted=0
--==========================================================================================
SELECT
owned_user_id,
COUNT(*) AS active_goals_count,
MAX(ts) AS latest_goal_time
FROM
(
SELECT
id,
argMax(owned_user_id, _peerdb_version) AS owned_user_id,
argMax(goal_title, _peerdb_version) AS goal_title,
argMax(goal_data, _peerdb_version) AS goal_data,
argMax(enabled, _peerdb_version) AS enabled,
argMax(ts, _peerdb_version) AS ts,
argMax(_peerdb_synced_at, _peerdb_version) AS _peerdb_synced_at,
argMax(_peerdb_is_deleted, _peerdb_version) AS _peerdb_is_deleted,
max(_peerdb_version) AS _peerdb_version
FROM peerdb.public_goals
WHERE enabled = true
GROUP BY id
) AS deduplicated_goals
GROUP BY owned_user_id;
--==========================================================================================
SELECT
owned_user_id,
COUNT(*) AS active_goals_count,
MAX(ts) AS latest_goal_time
FROM
(
SELECT
*,
ROW_NUMBER() OVER (PARTITION BY id ORDER BY _peerdb_version DESC) AS rn
FROM peerdb.public_goals
WHERE enabled = true
) AS ranked_goals
WHERE rn = 1
GROUP BY owned_user_id;
--==========================================================================================
WITH
'{db_name}' AS db_name,
'{table_name}' AS table_name
SELECT
db_name,
table_name,
if(any(interface) = 1, 'native', if(any(interface) = 2, 'http', 'unknown')) AS used_interface,
if(used_interface = 'native', 'Native', any(used_formats)[1]) AS used_transport_format,
count() AS inserts,
avg(read_rows) AS rows_per_insert,
sum(read_rows) AS total_inserted_rows,
dateDiff('second', min(query_start_time_microseconds), max(event_time_microseconds)) AS total_time_including_client_loop_s,
sum(ProfileEvents['NetworkReceiveBytes']) AS total_received_bytes,
round(total_received_bytes / inserts) AS received_bytes_per_insert,
formatReadableSize(total_received_bytes) AS total_received_bytes_readable,
formatReadableSize(received_bytes_per_insert) AS received_bytes_per_insert_readable,
sum(ProfileEvents['MergeTreeDataWriterCompressedBytes']) AS total_written_bytes_compressed,
round(total_written_bytes_compressed / inserts) AS written_bytes_compressed_per_insert,
formatReadableSize(total_written_bytes_compressed) AS total_written_bytes_compressed_readable,
formatReadableSize(written_bytes_compressed_per_insert) AS written_bytes_compressed_per_insert_readable,
round(sum(query_duration_ms) / 1000, 2) AS total_insert_duration_s,
round(sum(ProfileEvents['NetworkReceiveElapsedMicroseconds']) / 1000 / 1000, 2) AS total_network_receive_duration_s,
round(sum(ProfileEvents['MergeTreeDataWriterSortingBlocksMicroseconds']) / 1000 / 1000, 2) AS total_block_sort_duration_s,
round(sum(ProfileEvents['WriteBufferFromS3Microseconds']) / 1000 / 1000, 2) AS total_storage_write_duration_s,
round(sum(ProfileEvents['OSCPUVirtualTimeMicroseconds']) / 1000 / 1000, 2) AS total_cpu_s,
sum(ProfileEvents['DelayedInserts']) AS num_delayed_inserts,
sum(ProfileEvents['DelayedInsertsMilliseconds']) AS total_delayed_inserts_ms,
round(quantiles(0.50)(query_duration_ms)[1], 2) AS insert_duration_ms_50th,
round(quantiles(0.95)(query_duration_ms)[1], 2) AS insert_duration_ms_95th,
round(quantiles(0.99)(query_duration_ms)[1], 2) AS insert_duration_ms_99th,
round(quantiles(0.50)(ProfileEvents['NetworkReceiveElapsedMicroseconds'])[1] / 1000, 2) AS network_receive_duration_ms_50th,
round(quantiles(0.95)(ProfileEvents['NetworkReceiveElapsedMicroseconds'])[1] / 1000, 2) AS network_receive_duration_ms_95th,
round(quantiles(0.99)(ProfileEvents['NetworkReceiveElapsedMicroseconds'])[1] / 1000, 2) AS network_receive_duration_ms_99th,
round(quantiles(0.50)(ProfileEvents['MergeTreeDataWriterSortingBlocksMicroseconds'])[1] / 1000, 2) AS block_sort_duration_ms_50th,
round(quantiles(0.95)(ProfileEvents['MergeTreeDataWriterSortingBlocksMicroseconds'])[1] / 1000, 2) AS block_sort_duration_ms_95th,
round(quantiles(0.99)(ProfileEvents['MergeTreeDataWriterSortingBlocksMicroseconds'])[1] / 1000, 2) AS block_sort_duration_ms_99th,
round(quantiles(0.50)(ProfileEvents['WriteBufferFromS3Microseconds'])[1] / 1000, 2) AS storage_write_duration_ms_50th,
round(quantiles(0.95)(ProfileEvents['WriteBufferFromS3Microseconds'])[1] / 1000, 2) AS storage_write_duration_ms_95th,
round(quantiles(0.99)(ProfileEvents['WriteBufferFromS3Microseconds'])[1] / 1000, 2) AS storage_write_duration_ms_99th,
round(quantiles(0.50)(ProfileEvents['NetworkReceiveBytes'])[1]) AS network_received_bytes_50th,
round(quantiles(0.95)(ProfileEvents['NetworkReceiveBytes'])[1]) AS network_received_bytes_95th,
round(quantiles(0.99)(ProfileEvents['NetworkReceiveBytes'])[1]) AS network_received_bytes_99th,
formatReadableSize(network_received_bytes_50th) AS network_received_bytes_50th_readable,
formatReadableSize(network_received_bytes_95th) AS network_received_bytes_95th_readable,
formatReadableSize(network_received_bytes_99th) AS network_received_bytes_99th_readable,
round(quantiles(0.50)(ProfileEvents['MergeTreeDataWriterUncompressedBytes'])[1]) AS mergetree_written_uncompressed_bytes_50th,
round(quantiles(0.95)(ProfileEvents['MergeTreeDataWriterUncompressedBytes'])[1]) AS mergetree_written_uncompressed_bytes_95th,
round(quantiles(0.99)(ProfileEvents['MergeTreeDataWriterUncompressedBytes'])[1]) AS mergetree_written_uncompressed_bytes_99th,
formatReadableSize(mergetree_written_uncompressed_bytes_50th) AS mergetree_written_uncompressed_bytes_50th_readable,
formatReadableSize(mergetree_written_uncompressed_bytes_95th) AS mergetree_written_uncompressed_bytes_95th_readable,
formatReadableSize(mergetree_written_uncompressed_bytes_99th) AS mergetree_written_uncompressed_bytes_99th_readable,
round(quantiles(0.50)(ProfileEvents['MergeTreeDataWriterCompressedBytes'])[1]) AS mergetree_written_compressed_bytes_50th,
round(quantiles(0.95)(ProfileEvents['MergeTreeDataWriterCompressedBytes'])[1]) AS mergetree_written_compressed_bytes_95th,
round(quantiles(0.99)(ProfileEvents['MergeTreeDataWriterCompressedBytes'])[1]) AS mergetree_written_compressed_bytes_99th,
formatReadableSize(mergetree_written_compressed_bytes_50th) AS mergetree_written_compressed_bytes_50th_readable,
formatReadableSize(mergetree_written_compressed_bytes_95th) AS mergetree_written_compressed_bytes_95th_readable,
formatReadableSize(mergetree_written_compressed_bytes_99th) AS mergetree_written_compressed_bytes_99th_readable,
round(quantiles(0.50)(memory_usage)[1]) AS memory_usage_bytes_50th,
round(quantiles(0.95)(memory_usage)[1]) AS memory_usage_bytes_95th,
round(quantiles(0.99)(memory_usage)[1]) AS memory_usage_bytes_99th,
formatReadableSize(memory_usage_bytes_50th) AS memory_usage_50th_readable,
formatReadableSize(memory_usage_bytes_95th) AS memory_usage_95th_readable,
formatReadableSize(memory_usage_bytes_99th) AS memory_usage_99th_readable,
round(quantiles(0.50)(ProfileEvents['OSCPUVirtualTimeMicroseconds'])[1] / 1000, 2) AS cpu_ms_50th,
round(quantiles(0.95)(ProfileEvents['OSCPUVirtualTimeMicroseconds'])[1] / 1000, 2) AS cpu_ms_95th,
round(quantiles(0.99)(ProfileEvents['OSCPUVirtualTimeMicroseconds'])[1] / 1000, 2) AS cpu_ms_99th,
quantiles(0.50)(length(thread_ids))[1] AS num_used_threads_50th,
quantiles(0.95)(length(thread_ids))[1] AS num_used_threads_95th,
quantiles(0.99)(length(thread_ids))[1] AS num_used_threads_99th,
quantiles(0.50)(peak_threads_usage)[1] AS num_used_threads_concurrently_50th,
quantiles(0.95)(peak_threads_usage)[1] AS num_used_threads_concurrently_95th,
quantiles(0.99)(peak_threads_usage)[1] AS num_used_threads_concurrently_99th,
any(ProfileEvents) AS profileevents_sample
FROM
clusterAllReplicas(default, system.query_log)
WHERE
has(tables, db_name || '.' || table_name)
AND length(tables) = 1
AND is_initial_query
AND query_kind = 'Insert'
AND type = 'QueryFinish'
FORMAT Vertical
SETTINGS
skip_unavailable_shards = 1,
output_format_pretty_single_large_number_tip_threshold = 0;
--==========================================================================================
CREATE TABLE hits
(
WatchID BIGINT NOT NULL,
JavaEnable SMALLINT NOT NULL,
Title TEXT NOT NULL,
GoodEvent SMALLINT NOT NULL,
EventTime TIMESTAMP NOT NULL,
EventDate Date NOT NULL,
CounterID INTEGER NOT NULL,
ClientIP INTEGER NOT NULL,
RegionID INTEGER NOT NULL,
UserID BIGINT NOT NULL,
CounterClass SMALLINT NOT NULL,
OS SMALLINT NOT NULL,
UserAgent SMALLINT NOT NULL,
URL TEXT NOT NULL,
Referer TEXT NOT NULL,
IsRefresh SMALLINT NOT NULL,
RefererCategoryID SMALLINT NOT NULL,
RefererRegionID INTEGER NOT NULL,
URLCategoryID SMALLINT NOT NULL,
URLRegionID INTEGER NOT NULL,
ResolutionWidth SMALLINT NOT NULL,
ResolutionHeight SMALLINT NOT NULL,
ResolutionDepth SMALLINT NOT NULL,
FlashMajor SMALLINT NOT NULL,
FlashMinor SMALLINT NOT NULL,
FlashMinor2 TEXT NOT NULL,
NetMajor SMALLINT NOT NULL,
NetMinor SMALLINT NOT NULL,
UserAgentMajor SMALLINT NOT NULL,
UserAgentMinor VARCHAR(255) NOT NULL,
CookieEnable SMALLINT NOT NULL,
JavascriptEnable SMALLINT NOT NULL,
IsMobile SMALLINT NOT NULL,
MobilePhone SMALLINT NOT NULL,
MobilePhoneModel TEXT NOT NULL,
Params TEXT NOT NULL,
IPNetworkID INTEGER NOT NULL,
TraficSourceID SMALLINT NOT NULL,
SearchEngineID SMALLINT NOT NULL,
SearchPhrase TEXT NOT NULL,
AdvEngineID SMALLINT NOT NULL,
IsArtifical SMALLINT NOT NULL,
WindowClientWidth SMALLINT NOT NULL,
WindowClientHeight SMALLINT NOT NULL,
ClientTimeZone SMALLINT NOT NULL,
ClientEventTime TIMESTAMP NOT NULL,
SilverlightVersion1 SMALLINT NOT NULL,
SilverlightVersion2 SMALLINT NOT NULL,
SilverlightVersion3 INTEGER NOT NULL,
SilverlightVersion4 SMALLINT NOT NULL,
PageCharset TEXT NOT NULL,
CodeVersion INTEGER NOT NULL,
IsLink SMALLINT NOT NULL,
IsDownload SMALLINT NOT NULL,
IsNotBounce SMALLINT NOT NULL,
FUniqID BIGINT NOT NULL,
OriginalURL TEXT NOT NULL,
HID INTEGER NOT NULL,
IsOldCounter SMALLINT NOT NULL,
IsEvent SMALLINT NOT NULL,
IsParameter SMALLINT NOT NULL,
DontCountHits SMALLINT NOT NULL,
WithHash SMALLINT NOT NULL,
HitColor CHAR NOT NULL,
LocalEventTime TIMESTAMP NOT NULL,
Age SMALLINT NOT NULL,
Sex SMALLINT NOT NULL,
Income SMALLINT NOT NULL,
Interests SMALLINT NOT NULL,
Robotness SMALLINT NOT NULL,
RemoteIP INTEGER NOT NULL,
WindowName INTEGER NOT NULL,
OpenerName INTEGER NOT NULL,
HistoryLength SMALLINT NOT NULL,
BrowserLanguage TEXT NOT NULL,
BrowserCountry TEXT NOT NULL,
SocialNetwork TEXT NOT NULL,
SocialAction TEXT NOT NULL,
HTTPError SMALLINT NOT NULL,
SendTiming INTEGER NOT NULL,
DNSTiming INTEGER NOT NULL,
ConnectTiming INTEGER NOT NULL,
ResponseStartTiming INTEGER NOT NULL,
ResponseEndTiming INTEGER NOT NULL,
FetchTiming INTEGER NOT NULL,
SocialSourceNetworkID SMALLINT NOT NULL,
SocialSourcePage TEXT NOT NULL,
ParamPrice BIGINT NOT NULL,
ParamOrderID TEXT NOT NULL,
ParamCurrency TEXT NOT NULL,
ParamCurrencyID SMALLINT NOT NULL,
OpenstatServiceName TEXT NOT NULL,
OpenstatCampaignID TEXT NOT NULL,
OpenstatAdID TEXT NOT NULL,
OpenstatSourceID TEXT NOT NULL,
UTMSource TEXT NOT NULL,
UTMMedium TEXT NOT NULL,
UTMCampaign TEXT NOT NULL,
UTMContent TEXT NOT NULL,
UTMTerm TEXT NOT NULL,
FromTag TEXT NOT NULL,
HasGCLID SMALLINT NOT NULL,
RefererHash BIGINT NOT NULL,
URLHash BIGINT NOT NULL,
CLID INTEGER NOT NULL,
PRIMARY KEY (CounterID, EventDate, UserID, EventTime, WatchID)
)
ENGINE = MergeTree
SETTINGS
-- old_parts_lifetime = 1,
parts_to_delay_insert = 10_000,
inactive_parts_to_delay_insert = 10_000,
parts_to_throw_insert = 10_000,
replicated_deduplication_window = 0,
non_replicated_deduplication_window = 0;
--==========================================================================================
WITH
'{db_name}' AS db_name,
'{table_name}' AS table_name,
(SELECT
min(event_time)
FROM
clusterAllReplicas(default, system.query_log)
WHERE
has(tables, db_name || '.' || table_name)
AND length(tables) = 1
AND is_initial_query
AND query_kind = 'Insert') AS insert_sequence_start_time,
(SELECT
max(event_time)
FROM
clusterAllReplicas(default, system.query_log)
WHERE
has(tables, db_name || '.' || table_name)
AND length(tables) = 1
AND is_initial_query
AND query_kind = 'Insert') AS insert_sequence_end_time
SELECT
round(quantiles(0.50)(ProfileEvent_OSCPUVirtualTimeMicroseconds)[1] / 1_000_000,4) AS cpu_usage_50th,
round(quantiles(0.95)(ProfileEvent_OSCPUVirtualTimeMicroseconds)[1] / 1_000_000,4) AS cpu_usage_95th,
round(quantiles(0.99)(ProfileEvent_OSCPUVirtualTimeMicroseconds)[1] / 1_000_000,4) AS cpu_usage_99th,
round(quantiles(0.50)(CurrentMetric_MemoryTracking)[1]) AS memory_usage_50th,
round(quantiles(0.95)(CurrentMetric_MemoryTracking)[1]) AS memory_usage_95th,
round(quantiles(0.99)(CurrentMetric_MemoryTracking)[1]) AS memory_usage_99th,
formatReadableSize(memory_usage_50th) AS memory_usage_50th_readable,
formatReadableSize(memory_usage_95th) AS memory_usage_95th_readable,
formatReadableSize(memory_usage_99th) AS memory_usage_99th_readable
FROM clusterAllReplicas(default, system.metric_log)
WHERE event_time >= insert_sequence_start_time AND event_time <= insert_sequence_end_time
FORMAT Vertical
SETTINGS
skip_unavailable_shards = 1,
output_format_pretty_single_large_number_tip_threshold = 0;
--==========================================================================================
round(quantiles(0.50)(ProfileEvent_OSCPUVirtualTimeMicroseconds)[1] / 1_000_000,4) AS cpu_usage_50th,
round(quantiles(0.95)(ProfileEvent_OSCPUVirtualTimeMicroseconds)[1] / 1_000_000,4) AS cpu_usage_95th,
round(quantiles(0.99)(ProfileEvent_OSCPUVirtualTimeMicroseconds)[1] / 1_000_000,4) AS cpu_usage_99th,
round(quantiles(0.50)(CurrentMetric_MemoryTracking)[1]) AS memory_usage_50th,
round(quantiles(0.95)(CurrentMetric_MemoryTracking)[1]) AS memory_usage_95th,
round(quantiles(0.99)(CurrentMetric_MemoryTracking)[1]) AS memory_usage_99th,
formatReadableSize(memory_usage_50th) AS memory_usage_50th_readable,
formatReadableSize(memory_usage_95th) AS memory_usage_95th_readable,
formatReadableSize(memory_usage_99th) AS memory_usage_99th_readable
FROM clusterAllReplicas(default, system.metric_log)
WHERE event_time >= now() - INTERVAL 5 MINUTE
FORMAT Vertical
SETTINGS
skip_unavailable_shards = 1,
output_format_pretty_single_large_number_tip_threshold = 0;
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--====================================
with
reference as (
select
avg(f1) as f1,
avg(f2) as f2,
avg(f3) as f3,
avg(f4) as f4,
avg(f5) as f5
from composition
where
id = any($composition_ids)
)
select
match.id,
(
abs(match.f1 - reference.f1) +
abs(match.f2 - reference.f2) +
abs(match.f3 - reference.f3) +
abs(match.f4 - reference.f4) +
abs(match.f5 - reference.f5)
) / 5 as distance
from reference
right join composition as match on
match.id <> all($composition_ids) and
match.f1 >= reference.f1 - $margin and match.f1 <= reference.f1 + $margin and
match.f2 >= reference.f2 - $margin and match.f2 <= reference.f2 + $margin and
match.f3 >= reference.f3 - $margin and match.f3 <= reference.f3 + $margin and
match.f4 >= reference.f4 - $margin and match.f4 <= reference.f4 + $margin and
match.f5 >= reference.f5 - $margin and match.f5 <= reference.f5 + $margin
order by distance
limit 100
--====================================
create table composition (
id uuid not null primary key,
f1 float4 not null,
f2 float4 not null,
f3 float4 not null,
f4 float4 not null,
f5 float4 not null
);
--====================================
insert into composition (id, f1, f2, f3, f4, f5)
values ($id, $f1, $f2, $f3, $f4, $f5)
on conflict (id) do update
set
f1 = $f1,
f2 = $f2,
f3 = $f3,
f4 = $f4,
f5 = $f5
--====================================
create table composition (
id uuid not null primary key,
f1 float4 not null,
f2 float4 not null,
f3 float4 not null,
f4 float4 not null,
f5 float4 not null,
);
create index composition_features on composition (
f1, f2, f3, f4, f5
);
--====================================
--OLTP
ALTER SYSTEM SET autovacuum_max_workers = 4;
ALTER SYSTEM SET autovacuum_naptime = '10min';
ALTER SYSTEM SET autovacuum_vacuum_cost_limit = 4000;
ALTER SYSTEM SET autovacuum_vacuum_cost_delay = 10;
ALTER SYSTEM SET autovacuum_vacuum_scale_factor = 0.2;
ALTER SYSTEM SET autovacuum_analyze_scale_factor = 0.3;
-- OLAP
ALTER SYSTEM SET autovacuum_max_workers = 4;
ALTER SYSTEM SET autovacuum_naptime = '10min';
ALTER SYSTEM SET autovacuum_vacuum_cost_limit = 4000;
ALTER SYSTEM SET autovacuum_vacuum_cost_delay = 10;
ALTER SYSTEM SET autovacuum_vacuum_scale_factor = 0.2;
ALTER SYSTEM SET autovacuum_analyze_scale_factor = 0.3;
alter table <name>
add constraint either_email
check (email is not null or p2pmail is not null);
--==============================================================
alter table <name>
add constraint either_email
check (email is null <> p2pmail is null);
alter table the_table
add constraint check_at_least_one_email
check (num_nonnulls(email, p2pmail) > 0);
alter table the_table
add constraint check_at_least_one_email
check (num_nonnulls(nullif(trim(email),''), nullif(trim(p2pmail),'')) > 0);
--==============================================================
ALTER TABLE mytable ADD COLUMN mycolumn character varying(50) NOT NULL DEFAULT 'foo';
... some work (set real values as you want)...
ALTER TABLE mytable ALTER COLUMN mycolumn DROP DEFAULT;
--==============================================================
SELECT exists(SELECT 1 from regexp_split_to_table('abcdéfg','') x where ascii(x) not between 1 and 127);
CHECK (my_column ~ '^[ -~]*$')
HECK (my_column ~ '^[\x00-\x7F]*$')
alter table your_table
add constraint allow_ascii_only
check (your_column ~ '^[a-zA-Z0-9]+$');
--==============================================================
DROP TABLE IF EXISTS department, employee;
CREATE TABLE IF NOT EXISTS department(
id INT PRIMARY KEY,
name VARCHAR
);
create table IF NOT EXISTS employee(
id INT PRIMARY KEY,
name VARCHAR,
salary INT,
department_id INT,
CONSTRAINT fk_deparment_id
FOREIGN KEY(department_id)
REFERENCES department(id) ON DELETE CASCADE
);
INSERT INTO department(id, NAME) VALUES
(1, 'department-1'),
(2, 'department-2'),
(3, 'department-3'),
(4, 'department-4');
INSERT INTO employee(id, name, salary, department_id) VALUES
(1, 'employee-1', 100, 1),
(2, 'employee-2', 200, 2),
(3, 'employee-3', 300, 3),
(4, 'employee-1', 400, 1),
(5, 'employee-2', 400, 2),
(6, 'employee-3', 500, 3);
// additional tables
insert into organization (id, name) values (1, 'Test1');
insert into organization (id, name) values (2, 'Test2');
insert into organization (id, name) values (3, 'Test3');
insert into organization (id, name) values (4, 'Test4');
insert into organization (id, name) values (5, 'Test5');
insert into department (id, name, organization_id) values (1, 'Test1', 1);
insert into department (id, name, organization_id) values (2, 'Test2', 1);
insert into department (id, name, organization_id) values (3, 'Test3', 1);
insert into department (id, name, organization_id) values (4, 'Test4', 2);
insert into department (id, name, organization_id) values (5, 'Test5', 2);
insert into department (id, name, organization_id) values (6, 'Test6', 3);
insert into department (id, name, organization_id) values (7, 'Test7', 4);
insert into department (id, name, organization_id) values (8, 'Test8', 5);
insert into department (id, name, organization_id) values (9, 'Test9', 5);
insert into employee (id, first_name, last_name, position, salary, age, department_id, organization_id) values (1, 'John', 'Smith', 'Developer', 10000, 30, 1, 1);
insert into employee (id, first_name, last_name, position, salary, age, department_id, organization_id) values (2, 'Adam', 'Hamilton', 'Developer', 12000, 35, 1, 1);
insert into employee (id, first_name, last_name, position, salary, age, department_id, organization_id) values (3, 'Tracy', 'Smith', 'Architect', 15000, 40, 1, 1);
insert into employee (id, first_name, last_name, position, salary, age, department_id, organization_id) values (4, 'Lucy', 'Kim', 'Developer', 13000, 25, 2, 1);
insert into employee (id, first_name, last_name, position, salary, age, department_id, organization_id) values (5, 'Peter', 'Wright', 'Director', 50000, 50, 4, 2);
insert into employee (id, first_name, last_name, position, salary, age, department_id, organization_id) values (6, 'Alan', 'Murray', 'Developer', 20000, 37, 4, 2);
insert into employee (id, first_name, last_name, position, salary, age, department_id, organization_id) values (7, 'Pamela', 'Anderson', 'Analyst', 7000, 27, 4, 2);
CREATE TABLE IF NOT EXISTS vets (
id INT GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY,
first_name TEXT,
last_name TEXT
);
CREATE INDEX ON vets (last_name);
CREATE TABLE IF NOT EXISTS specialties (
id INT GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY,
name TEXT
);
CREATE INDEX ON specialties (name);
CREATE TABLE IF NOT EXISTS vet_specialties (
vet_id INT NOT NULL REFERENCES vets (id),
specialty_id INT NOT NULL REFERENCES specialties (id),
UNIQUE (vet_id, specialty_id)
);
CREATE TABLE IF NOT EXISTS types (
id INT GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY,
name TEXT
);
CREATE INDEX ON types (name);
CREATE TABLE IF NOT EXISTS owners (
id INT GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY,
first_name TEXT,
last_name TEXT,
address TEXT,
city TEXT,
telephone TEXT
);
CREATE INDEX ON owners (last_name);
CREATE TABLE IF NOT EXISTS pets (
id INT GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY,
name TEXT,
birth_date DATE,
type_id INT NOT NULL REFERENCES types (id),
owner_id INT REFERENCES owners (id)
);
CREATE INDEX ON pets (name);
CREATE INDEX ON pets (owner_id);
CREATE TABLE IF NOT EXISTS visits (
id INT GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY,
pet_id INT REFERENCES pets (id),
visit_date DATE,
description TEXT
);
CREATE INDEX ON visits (pet_id);
--================================================
INSERT INTO vets (first_name, last_name) SELECT 'James', 'Carter' WHERE NOT EXISTS (SELECT * FROM vets WHERE id=1);
INSERT INTO vets (first_name, last_name) SELECT 'Helen', 'Leary' WHERE NOT EXISTS (SELECT * FROM vets WHERE id=2);
INSERT INTO vets (first_name, last_name) SELECT 'Linda', 'Douglas' WHERE NOT EXISTS (SELECT * FROM vets WHERE id=3);
INSERT INTO vets (first_name, last_name) SELECT 'Rafael', 'Ortega' WHERE NOT EXISTS (SELECT * FROM vets WHERE id=4);
INSERT INTO vets (first_name, last_name) SELECT 'Henry', 'Stevens' WHERE NOT EXISTS (SELECT * FROM vets WHERE id=5);
INSERT INTO vets (first_name, last_name) SELECT 'Sharon', 'Jenkins' WHERE NOT EXISTS (SELECT * FROM vets WHERE id=6);
INSERT INTO specialties (name) SELECT 'radiology' WHERE NOT EXISTS (SELECT * FROM specialties WHERE name='radiology');
INSERT INTO specialties (name) SELECT 'surgery' WHERE NOT EXISTS (SELECT * FROM specialties WHERE name='surgery');
INSERT INTO specialties (name) SELECT 'dentistry' WHERE NOT EXISTS (SELECT * FROM specialties WHERE name='dentistry');
-- INSERT INTO vet_specialties VALUES (2, 1) ON CONFLICT (vet_id, specialty_id) DO NOTHING;
-- INSERT INTO vet_specialties VALUES (3, 2) ON CONFLICT (vet_id, specialty_id) DO NOTHING;
-- INSERT INTO vet_specialties VALUES (3, 3) ON CONFLICT (vet_id, specialty_id) DO NOTHING;
-- INSERT INTO vet_specialties VALUES (4, 2) ON CONFLICT (vet_id, specialty_id) DO NOTHING;
-- INSERT INTO vet_specialties VALUES (5, 1) ON CONFLICT (vet_id, specialty_id) DO NOTHING;
INSERT INTO types (name) SELECT 'cat' WHERE NOT EXISTS (SELECT * FROM types WHERE name='cat');
INSERT INTO types (name) SELECT 'dog' WHERE NOT EXISTS (SELECT * FROM types WHERE name='dog');
INSERT INTO types (name) SELECT 'lizard' WHERE NOT EXISTS (SELECT * FROM types WHERE name='lizard');
INSERT INTO types (name) SELECT 'snake' WHERE NOT EXISTS (SELECT * FROM types WHERE name='snake');
INSERT INTO types (name) SELECT 'bird' WHERE NOT EXISTS (SELECT * FROM types WHERE name='bird');
INSERT INTO types (name) SELECT 'hamster' WHERE NOT EXISTS (SELECT * FROM types WHERE name='hamster');
INSERT INTO owners (first_name, last_name, address, city, telephone) SELECT 'George', 'Franklin', '110 W. Liberty St.', 'Madison', '6085551023' WHERE NOT EXISTS (SELECT * FROM owners WHERE id=1);
INSERT INTO owners (first_name, last_name, address, city, telephone) SELECT 'Betty', 'Davis', '638 Cardinal Ave.', 'Sun Prairie', '6085551749' WHERE NOT EXISTS (SELECT * FROM owners WHERE id=2);
INSERT INTO owners (first_name, last_name, address, city, telephone) SELECT 'Eduardo', 'Rodriquez', '2693 Commerce St.', 'McFarland', '6085558763' WHERE NOT EXISTS (SELECT * FROM owners WHERE id=3);
INSERT INTO owners (first_name, last_name, address, city, telephone) SELECT 'Harold', 'Davis', '563 Friendly St.', 'Windsor', '6085553198' WHERE NOT EXISTS (SELECT * FROM owners WHERE id=4);
INSERT INTO owners (first_name, last_name, address, city, telephone) SELECT 'Peter', 'McTavish', '2387 S. Fair Way', 'Madison', '6085552765' WHERE NOT EXISTS (SELECT * FROM owners WHERE id=5);
INSERT INTO owners (first_name, last_name, address, city, telephone) SELECT 'Jean', 'Coleman', '105 N. Lake St.', 'Monona', '6085552654' WHERE NOT EXISTS (SELECT * FROM owners WHERE id=6);
INSERT INTO owners (first_name, last_name, address, city, telephone) SELECT 'Jeff', 'Black', '1450 Oak Blvd.', 'Monona', '6085555387' WHERE NOT EXISTS (SELECT * FROM owners WHERE id=7);
INSERT INTO owners (first_name, last_name, address, city, telephone) SELECT 'Maria', 'Escobito', '345 Maple St.', 'Madison', '6085557683' WHERE NOT EXISTS (SELECT * FROM owners WHERE id=8);
INSERT INTO owners (first_name, last_name, address, city, telephone) SELECT 'David', 'Schroeder', '2749 Blackhawk Trail', 'Madison', '6085559435' WHERE NOT EXISTS (SELECT * FROM owners WHERE id=9);
INSERT INTO owners (first_name, last_name, address, city, telephone) SELECT 'Carlos', 'Estaban', '2335 Independence La.', 'Waunakee', '6085555487' WHERE NOT EXISTS (SELECT * FROM owners WHERE id=10);
INSERT INTO pets (name, birth_date, type_id, owner_id) SELECT 'Leo', '2000-09-07', 1, 1 WHERE NOT EXISTS (SELECT * FROM pets WHERE id=1);
INSERT INTO pets (name, birth_date, type_id, owner_id) SELECT 'Basil', '2002-08-06', 6, 2 WHERE NOT EXISTS (SELECT * FROM pets WHERE id=2);
INSERT INTO pets (name, birth_date, type_id, owner_id) SELECT 'Rosy', '2001-04-17', 2, 3 WHERE NOT EXISTS (SELECT * FROM pets WHERE id=3);
INSERT INTO pets (name, birth_date, type_id, owner_id) SELECT 'Jewel', '2000-03-07', 2, 3 WHERE NOT EXISTS (SELECT * FROM pets WHERE id=4);
INSERT INTO pets (name, birth_date, type_id, owner_id) SELECT 'Iggy', '2000-11-30', 3, 4 WHERE NOT EXISTS (SELECT * FROM pets WHERE id=5);
INSERT INTO pets (name, birth_date, type_id, owner_id) SELECT 'George', '2000-01-20', 4, 5 WHERE NOT EXISTS (SELECT * FROM pets WHERE id=6);
INSERT INTO pets (name, birth_date, type_id, owner_id) SELECT 'Samantha', '1995-09-04', 1, 6 WHERE NOT EXISTS (SELECT * FROM pets WHERE id=7);
INSERT INTO pets (name, birth_date, type_id, owner_id) SELECT 'Max', '1995-09-04', 1, 6 WHERE NOT EXISTS (SELECT * FROM pets WHERE id=8);
INSERT INTO pets (name, birth_date, type_id, owner_id) SELECT 'Lucky', '1999-08-06', 5, 7 WHERE NOT EXISTS (SELECT * FROM pets WHERE id=9);
INSERT INTO pets (name, birth_date, type_id, owner_id) SELECT 'Mulligan', '1997-02-24', 2, 8 WHERE NOT EXISTS (SELECT * FROM pets WHERE id=10);
INSERT INTO pets (name, birth_date, type_id, owner_id) SELECT 'Freddy', '2000-03-09', 5, 9 WHERE NOT EXISTS (SELECT * FROM pets WHERE id=11);
INSERT INTO pets (name, birth_date, type_id, owner_id) SELECT 'Lucky', '2000-06-24', 2, 10 WHERE NOT EXISTS (SELECT * FROM pets WHERE id=12);
INSERT INTO pets (name, birth_date, type_id, owner_id) SELECT 'Sly', '2002-06-08', 1, 10 WHERE NOT EXISTS (SELECT * FROM pets WHERE id=13);
INSERT INTO visits (pet_id, visit_date, description) SELECT 7, '2010-03-04', 'rabies shot' WHERE NOT EXISTS (SELECT * FROM visits WHERE id=1);
INSERT INTO visits (pet_id, visit_date, description) SELECT 8, '2011-03-04', 'rabies shot' WHERE NOT EXISTS (SELECT * FROM visits WHERE id=2);
INSERT INTO visits (pet_id, visit_date, description) SELECT 8, '2009-06-04', 'neutered' WHERE NOT EXISTS (SELECT * FROM visits WHERE id=3);
INSERT INTO visits (pet_id, visit_date, description) SELECT 7, '2008-09-04', 'spayed' WHERE NOT EXISTS (SELECT * FROM visits WHERE id=4);
--================================================
-- Создание списка имен и фамилий
WITH first_names AS (
SELECT 'James' AS name UNION ALL
SELECT 'Mary' UNION ALL
SELECT 'John'
),
last_names AS (
SELECT 'Smith' AS name UNION ALL
SELECT 'Johnson' UNION ALL
SELECT 'Williams'
),
random_names AS (
SELECT
first_names.name AS first_name,
last_names.name AS last_name
FROM
first_names
CROSS JOIN
last_names
ORDER BY
RANDOM()
LIMIT 250
)
--INSERT INTO vets (first_name, last_name)
SELECT first_name, last_name FROM random_names;
-- Добавление специализаций для 80% ветеринаров
WITH vet_ids AS (
SELECT id
FROM vets
ORDER BY RANDOM()
LIMIT 200 -- 80% of 250
),
specialties AS (
SELECT id
FROM specialties
),
random_specialties AS (
SELECT
vet_ids.id AS vet_id,
specialties.id AS specialty_id
FROM
vet_ids
CROSS JOIN
specialties
ORDER BY
RANDOM()
LIMIT 300 -- В среднем 2 специализации на одного ветеринара
)
INSERT INTO vet_specialties (vet_id, specialty_id)
SELECT
vet_id,
specialty_id
FROM (
SELECT
vet_id,
specialty_id,
ROW_NUMBER() OVER (PARTITION BY vet_id ORDER BY RANDOM()) AS rn
FROM
random_specialties
) tmp
WHERE
rn <= 2; -- Назначить не более 2 специализаций на одного ветеринара
--==========================================================================================
CREATE TABLE orders (
order_id INT,
order_date DATE,
...
) PARTITION BY RANGE (order_date) (
PARTITION p0 VALUES LESS THAN ('2024-01-01'),
PARTITION p1 VALUES LESS THAN ('2025-01-01'),
...
);
--==========================================================================================
CREATE TABLE users (
user_id INT,
username VARCHAR(255),
...
) PARTITION BY HASH(user_id) PARTITIONS 4;
--==========================================================================================
CREATE TABLE sales (
sale_id INT,
region VARCHAR(255),
...
) PARTITION BY LIST (region) (
PARTITION p0 VALUES IN ('North', 'South'),
PARTITION p1 VALUES IN ('East', 'West')
);
--==========================================================================================
CREATE MATERIALIZED VIEW sales_summary AS
SELECT region, SUM(sales_amount) AS total_sales
FROM sales
GROUP BY region;
REFRESH MATERIALIZED VIEW sales_summary;
--==========================================================================================
CREATE TABLE MaterializedActiveCustomers AS
SELECT CustomerID, CustomerName, ContactName, Country
FROM Customers
WHERE Status = 'Active';
CREATE EVENT UpdateMaterializedActiveCustomers
ON SCHEDULE EVERY 1 HOUR
DO
BEGIN
DELETE FROM MaterializedActiveCustomers;
INSERT INTO MaterializedActiveCustomers
SELECT CustomerID, CustomerName, ContactName, Country
FROM Customers
WHERE Status = 'Active';
END;
--==========================================================================================
CREATE TABLE MaterializedActiveCustomers AS
SELECT CustomerID, CustomerName, ContactName, Country
FROM Customers
WHERE Status = 'Active';
DELIMITER //
CREATE TRIGGER after_customer_insert
AFTER INSERT ON Customers
FOR EACH ROW
BEGIN
IF NEW.Status = 'Active' THEN
INSERT INTO MaterializedActiveCustomers (CustomerID, CustomerName, ContactName, Country)
VALUES (NEW.CustomerID, NEW.CustomerName, NEW.ContactName, NEW.Country);
END IF;
END //
CREATE TRIGGER after_customer_update
AFTER UPDATE ON Customers
FOR EACH ROW
BEGIN
IF OLD.Status = 'Active' AND NEW.Status != 'Active' THEN
DELETE FROM MaterializedActiveCustomers WHERE CustomerID = OLD.CustomerID;
ELSEIF NEW.Status = 'Active' THEN
REPLACE INTO MaterializedActiveCustomers (CustomerID, CustomerName, ContactName, Country)
VALUES (NEW.CustomerID, NEW.CustomerName, NEW.ContactName, NEW.Country);
END IF;
END //
CREATE TRIGGER after_customer_delete
AFTER DELETE ON Customers
FOR EACH ROW
BEGIN
DELETE FROM MaterializedActiveCustomers WHERE CustomerID = OLD.CustomerID;
END //
DELIMITER ;
--==========================================================================================
SELECT a.employee_id, a.name, b.name AS manager_name
FROM employees a
INNER JOIN employees b ON a.manager_id = b.employee_id;
--==========================================================================================
SELECT a.*, b.*
FROM table1 a
LEFT JOIN LATERAL (
SELECT *
FROM table2 b
WHERE b.column1 = a.column1
ORDER BY b.column2 DESC
LIMIT 1
) b ON TRUE;
--==========================================================================================
SELECT COALESCE(a.column1, b.column1) AS column1, a.column2, b.column2
FROM table1 a
FULL OUTER JOIN table2 b ON a.column1 = b.column1;
--==========================================================================================
SELECT a.column1, b.column2
FROM table1 a
INNER JOIN table2 b ON a.column1 = b.column1 AND a.date_column BETWEEN '2023-01-01' AND '2023-12-31';
--==========================================================================================
SELECT a.*
FROM table1 a
LEFT JOIN table2 b ON a.column1 = b.column1
WHERE b.column1 IS NULL;
--==========================================================================================
SELECT a.*
FROM table1 a
WHERE EXISTS (SELECT 1 FROM table2 b WHERE a.column1 = b.column1);
--==========================================================================================
SELECT column1, column2, ROW_NUMBER() OVER (PARTITION BY column1 ORDER BY column2) AS row_num
FROM table_name;
SELECT empId, name, dept, row_number() OVER (ORDER BY empId) AS row_num FROM EMPLOYEE;
SELECT
DISTINCT price,
ROW_NUMBER () OVER (
ORDER BY
price
)
FROM
products
ORDER BY
price;
WITH prices AS (
SELECT
DISTINCT price
FROM
products
)
SELECT
price,
ROW_NUMBER () OVER (
ORDER BY
price
)
FROM
prices;
SELECT
price,
ROW_NUMBER () OVER (
ORDER BY
price
)
FROM
(
SELECT
DISTINCT price
FROM
products
) prices;
SELECT
*
FROM
(
SELECT
product_id,
product_name,
price,
ROW_NUMBER () OVER (
ORDER BY
product_name
)
FROM
products
) x
WHERE
ROW_NUMBER BETWEEN 6 AND 10;
SELECT
*
FROM
products
WHERE
price = (
SELECT
price
FROM
(
SELECT
price,
ROW_NUMBER () OVER (
ORDER BY
price DESC
) nth
FROM
(
SELECT
DISTINCT (price)
FROM
products
) prices
) sorted_prices
WHERE
nth = 3
SELECT column1, column2, SUM(column2) OVER (ORDER BY column1) AS running_total
FROM table_name;
SELECT column1, column2, RANK() OVER (PARTITION BY column1 ORDER BY column2) AS rank
FROM table_name;
SELECT column1, column2, AVG(column2) OVER (PARTITION BY column1 ORDER BY column2 ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING) AS moving_avg
FROM table_name;
--==========================================================================================
WITH RECURSIVE cte AS (
SELECT column1, column2
FROM table_name
WHERE condition
UNION ALL
SELECT t.column1, t.column2
FROM table_name t
INNER JOIN cte ON t.column1 = cte.column1
)
SELECT * FROM cte;
--==========================================================================================
SELECT json_column->>'key' AS value
FROM table_name;
SELECT json_agg(row_to_json(t))
FROM (SELECT column1, column2 FROM table_name) t;
UPDATE table_name
SET json_column = jsonb_set(json_column, '{key}', '"new_value"', true)
WHERE condition;
--==========================================================================================
SELECT
category,
SUM(CASE WHEN year = 2021 THEN sales ELSE 0 END) AS sales_2021,
SUM(CASE WHEN year = 2022 THEN sales ELSE 0 END) AS sales_2022
FROM sales_data
GROUP BY category;
--==========================================================================================
EXECUTE 'SELECT * FROM ' || table_name || ' WHERE ' || condition;
PREPARE stmt AS SELECT * FROM table_name WHERE column1 = $1;
EXECUTE stmt('value');
--==========================================================================================
CREATE DATABASE IF NOT EXISTS petclinic;
ALTER DATABASE petclinic
DEFAULT CHARACTER SET utf8
DEFAULT COLLATE utf8_general_ci;
GRANT ALL PRIVILEGES ON petclinic.* TO 'petclinic'@'%' IDENTIFIED BY 'petclinic';
--==========================================================================================
--========================================
CREATE TABLE sales
(
"Order Number" INTEGER,
CustomerKey INTEGER,
"Cars Owned" INTEGER,
Quantity INTEGER
);
INSERT INTO sales("Order Number", CustomerKey, ""Cars Owned"", Quantity)
SELECT number + 100000000 AS "Order Number",
number % 20 AS CustomerKey,
number % 20 % 5 AS "Cars Owned",
number % 10 AS Quantity
FROM generate_series(1, 500) as number;
--========================================
SELECT "Cars Owned", SUM(Quantity * "Cars Owned") AS "Calculated Quantity"
FROM sales
WHERE Quantity > (SELECT AVG(Quantity) FROM sales)
AND "Cars Owned" > 1
AND "Cars Owned" < 4
GROUP BY "Cars Owned"
--========================================
WITH filtered_sales AS (
SELECT "Cars Owned", Quantity
FROM sales
WHERE Quantity > (
SELECT AVG(Quantity) FROM sales
)
)
SELECT "Cars Owned",
SUM(Quantity * "Cars Owned") AS "Calculated Quantity",
1 + 2 * SUM(Quantity * "Cars Owned") AS "Final Quantity"
FROM filtered_sales
WHERE "Cars Owned" > 1 AND "Cars Owned" < 4
GROUP BY "Cars Owned"
--========================================
--======================
create schema if not exists main_schema authorization main_user;
create schema if not exists additional_schema authorization pg_monitor;
create user custom_user with nosuperuser nocreatedb nocreaterole password 'customUserPassword' connection limit 10;
grant usage on schema main_schema to custom_user;
create table if not exists additional_schema.additional_table (
id bigserial primary key,
name text not null
);
--======================
--liquibase formatted sql
--changeset ivan.vakhrushev:2024.12.09:warehouse.table
create table if not exists warehouse
(
id bigserial primary key,
name text not null
);
comment on table warehouse is 'Information about the warehouses';
comment on column warehouse.id is 'Unique identifier of the warehouse';
comment on column warehouse.name is 'Human readable name of the warehouse';
--changeset ivan.vakhrushev:2024.12.09:warehouse.initial.data
insert into warehouse (name)
select string_agg(substr(md5(random()::text), 1, 8), '')
from generate_series(1, 400);
--======================
--===================================================
EXPLAIN (analyze, costs off, summary off, timing off, buffers off)
WITH RECURSIVE t(n) AS (
VALUES (1)
UNION ALL
SELECT n+1 FROM t WHERE n < 100
)
SELECT sum(n) FROM t;
--===================================================
EXPLAIN (analyze, costs off, summary off, timing off, buffers off)
SELECT book_ref, count(*) OVER (PARTITION BY book_ref)
FROM tickets;
--===================================================
EXPLAIN (analyze, costs off, summary off, timing off, buffers off)
SELECT * FROM JSON_TABLE('{"a":1}'::jsonb, '$[*]' COLUMNS (a int));
--===================================================
CREATE EXTENSION file_fdw;
CREATE SERVER file_server
FOREIGN DATA WRAPPER file_fdw;
CREATE FOREIGN TABLE ft (
id integer
)
SERVER file_server
OPTIONS (
filename '/tmp/t.txt',
on_error 'ignore',
log_verbosity 'verbose'
);
ALTER FOREIGN TABLE ft OPTIONS (SET log_verbosity 'silent');
SELECT * FROM ft;
--===================================================
SELECT pg_stat_statements_reset();
SET SEARCH_PATH = pg_catalog;
SET search_path = public;
set search_path=bookings,public;
SET CUSTOM.PARAMETER = 1;
SET CUSTOM.parameter = 2;
set custom.parameter=42;
SELECT queryid, query, calls
FROM pg_stat_statements
WHERE query ILIKE 'SET%';
--===================================================
SELECT crc32('42'::bytea), crc32c('42'::bytea);
--===================================================
\lo_import 'logfile'
lo_import 24578
GRANT SELECT ON LARGE OBJECT 24578 TO public;
SELECT has_largeobject_privilege('alice', 24578, 'SELECT');
--===================================================
SELECT $2||$1 AS str \parse q
SELECT * FROM pg_prepared_statements \gx
\bind_named q 42 'Answer: ' \g
\bind_named q 'World!' 'Hello,' \g
--===================================================
$ pg_basebackup -D backup --format=tar --gzip
$ ls -l backup
$ pg_verifybackup backup --format=tar --no-parse-wal
--===================================================
--===================================================
--===================================================
--===================================================
--==========================================================================================
SELECT
name, email, COUNT(*)
FROM
users
GROUP BY
name, email
HAVING
COUNT(*) > 1
--==========================================================================================
SELECT id, name, email
FROM users u, users u2
WHERE u.name = u2.name AND u.email = u2.email AND u.id > u2.id
--==========================================================================================
DELETE FROM users
WHERE id IN (
SELECT id/*, name, email*/
FROM users u, users u2
WHERE u.name = u2.name AND u.email = u2.email AND u.id > u2.id
)
--==========================================================================================
declare @YourTable table (id int, name varchar(10), email varchar(50))
INSERT @YourTable VALUES (1,'John','John-email')
INSERT @YourTable VALUES (2,'John','John-email')
INSERT @YourTable VALUES (3,'fred','John-email')
INSERT @YourTable VALUES (4,'fred','fred-email')
INSERT @YourTable VALUES (5,'sam','sam-email')
INSERT @YourTable VALUES (6,'sam','sam-email')
SELECT
name,email, COUNT(*) AS CountOf
FROM @YourTable
GROUP BY name,email
HAVING COUNT(*)>1
--==========================================================================================
SELECT *
FROM (
SELECT a.*
, Row_Number() OVER (PARTITION BY Name, Age ORDER BY Name) AS r
FROM Customers AS a
) AS b
WHERE r > 1;
--==========================================================================================
SELECT name, email
FROM users
WHERE email in
(SELECT email FROM users
GROUP BY email
HAVING COUNT(*)>1)
--==========================================================================================
SELECT email, GROUP_CONCAT(id)
FROM users
GROUP BY email
HAVING COUNT(email) > 1;
--==========================================================================================
SELECT *
FROM <table>
WHERE
id NOT IN (
SELECT MIN(id)
FROM table
GROUP BY <column1>, <column2>
);
--==========================================================================================
select * from Users a
where exists (select * from Users b
where (a.name = b.name
or a.email = b.email)
and a.ID != b.id)
--==========================================================================================
SELECT * from
(SELECT name, email,
COUNT(name) OVER (PARTITION BY name, email) cnt
FROM users)
WHERE cnt > 1;
--==========================================================================================
;WITH CTE AS
(
SELECT ROW_NUMBER() OVER (PARTITION BY name ORDER BY name) AS T FROM @YourTable
)
DELETE FROM CTE WHERE T > 1
--==========================================================================================
WITH duplicates AS (SELECT id, ROW_NUMBER() OVER(PARTITION BY email ORDER BY email_confirmed) DESC as rownum
FROM contacts)
DELETE FROM contacts
USING duplicates
WHERE contact.id = duplicates.id AND duplicates.rownum > 1;
--==========================================================================================
WITH duplicates AS (select id, row_number() over(partition by firstname, lastname, email order by age DESC) as rownum FROM contacts)
DELETE FROM contacts
USING duplicates
WHERE contact.id = duplicates.id AND duplicates.rownum > 5;
--==========================================================================================
select * from product where productid not in (SELECT min(productid) FROM Product GROUP BY name);
--==========================================================================================
select * from emp e1 inner join emp e2 where e1.id < e2.id and e1.name = e2.name;
select e1.ProductId, e1.Name from Product e1 inner join Product e2 ON e1.ProductID < e2.ProductID WHERE e1.name = e2.name;
select DISTINCT e1.ProductId from Product e1 inner join Product e2 ON e1.ProductID < e2.ProductID WHERE e1.name = e2.name;
select DISTINCT e2.ProductId from Product e1 inner join Product e2 ON e1.ProductID < e2.ProductID WHERE e1.name = e2.name;
--==========================================================================================
select ProductID from Product e1 where e1.ProductID >= all(select e2.ProductID from Product e2 where e1.name = e2.name) order by ProductID;
select ProductID from Product e1 where e1.ProductID <= all(select e2.ProductID from Product e2 where e1.name = e2.name) order by ProductID;
--==========================================================================================
select empId, (select min(empId) from EMPLOYEE e2 where e.name = e2.name) as max FROM EMPLOYEE e;
select e.empId FROM EMPLOYEE e where e.empId > (select min(e2.empId) FROM EMPLOYEE e2 WHERE e2.name = e.name);
select e.empId FROM EMPLOYEE e where e.empId in (select min(e2.empId) FROM EMPLOYEE e2 GROUP BY e2.name);
--==========================================================================================
WITH Sales_CTE AS (
SELECT ProductID, SUM(Quantity) AS TotalQuantity
FROM Sales
GROUP BY ProductID
)
SELECT ProductID, TotalQuantity
FROM Sales_CTE
WHERE TotalQuantity > 100;
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==================================================================
-- pg_auto_embeddings v1.2.41
-- 1. Create schema for storing data of pg_auto_embeddings
CREATE SCHEMA IF NOT EXISTS pgae;
-- 2. Create table: credentials to store server url and appId/appSecret
CREATE TABLE IF NOT EXISTS pgae.credentials (
id int PRIMARY KEY DEFAULT 1,
server_host TEXT NOT NULL,
server_port TEXT NOT NULL,
user_login TEXT NOT NULL,
user_password TEXT NOT NULL,
user_database TEXT NOT NULL,
user_schema TEXT NOT NULL,
user_table TEXT NOT NULL,
model_name TEXT NOT NULL,
api_key TEXT NOT NULL
);
-- 3. Create table: list of registered auto_embeddings (source_schema, source_table, source_col, target_col, embedding_type)
CREATE TABLE IF NOT EXISTS pgae.auto_embeddings (
source_schema TEXT NOT NULL,
source_table TEXT NOT NULL,
source_col TEXT NOT NULL,
target_col TEXT NOT NULL,
PRIMARY KEY (source_schema, source_table, source_col, target_col)
);
-- functions:
-- bool create_auto_embedding(source_schema, source_table, source_col, destination_col);
-- bool delete_auto_embedding(source_schema, source_table, source_col, destination_col);
-- vector embedding(text);
-- --
-- bool pgae_init(text modelName, text apiKey)
-- bool pgae_init_onprem(text appServer, text appPort)
--------- INTERNAL PROCEDURES ---------
CREATE OR REPLACE PROCEDURE pgae.pgae_save_credentials_internal(
appServer TEXT,
appPort TEXT,
userLogin TEXT,
userPassword TEXT,
userDatabase TEXT,
userSchema TEXT,
userTable TEXT,
modelName TEXT,
apiKey TEXT
)
LANGUAGE plpgsql AS $$
BEGIN
INSERT INTO
pgae.credentials (
id,
server_host,
server_port,
user_login,
user_password,
user_database,
user_schema,
user_table,
model_name,
api_key
)
VALUES (
1,
appServer,
appPort,
userLogin,
userPassword,
userDatabase,
userSchema,
userTable,
modelName,
apiKey
)
ON CONFLICT (id) DO UPDATE SET
server_host = appServer,
server_port = appPort,
user_login = userLogin,
user_password = userPassword,
user_database = userDatabase,
user_schema = userSchema,
user_table = userTable,
model_name = modelName,
api_key = apiKey;
END;
$$;
CREATE OR REPLACE PROCEDURE pgae.pgae_init_credentials_internal(
appServer TEXT,
appPort TEXT,
modelName TEXT,
apiKey TEXT
)
LANGUAGE plpgsql AS $$
DECLARE
userLogin text;
userPassword text;
userDatabase text;
userSchema text;
userTable text;
BEGIN
CREATE EXTENSION IF NOT EXISTS postgres_fdw;
EXECUTE format('CREATE SERVER pgae_login_server
FOREIGN DATA WRAPPER postgres_fdw
OPTIONS (host %L, port %L, dbname %L, application_name %L)', appServer, appPort, 'pgae', 'pg_auto_embeddings');
CREATE USER MAPPING FOR CURRENT_USER
SERVER pgae_login_server
OPTIONS (user 'initial_user', password 'initial_password');
CREATE FOREIGN TABLE pgae.login (
user_login text,
user_password text,
user_database text,
user_schema text,
user_table text
)
SERVER pgae_login_server
OPTIONS (schema_name 'public', table_name 'login');
-- SELECT * FROM pgae.login;
-- select login and password from login table and save them into credentials table
SELECT
user_login,
user_password,
user_database,
user_schema,
user_table
FROM
pgae.login
INTO
userLogin,
userPassword,
userDatabase,
userSchema,
userTable;
CALL pgae.pgae_save_credentials_internal(appServer, appPort, userLogin, userPassword, userDatabase, userSchema, userTable, modelName, apiKey);
DROP FOREIGN TABLE pgae.login;
DROP SERVER pgae_login_server CASCADE;
END;
$$;
CREATE OR REPLACE PROCEDURE pgae.pgae_recreate_fdw_internal()
LANGUAGE plpgsql AS $$
DECLARE
serverHost TEXT;
serverPort TEXT;
userLogin TEXT;
userPassword TEXT;
userDatabase TEXT;
userSchema TEXT;
userTable TEXT;
BEGIN
SELECT
server_host,
server_port,
user_login,
user_password,
user_database,
user_schema,
user_table,
model_name,
api_key
FROM
pgae.credentials
INTO
serverHost,
serverPort,
userLogin,
userPassword,
userDatabase,
userSchema,
userTable;
-- Drop the server if it exists
IF EXISTS (SELECT 1 FROM pg_foreign_server WHERE srvname = 'pgae_server') THEN
EXECUTE 'DROP SERVER pgae_server CASCADE';
END IF;
-- Recreate the server
EXECUTE format('CREATE SERVER pgae_server
FOREIGN DATA WRAPPER postgres_fdw
OPTIONS (host %L, port %L, dbname %L, application_name %L)', serverHost, serverPort, userDatabase, 'pg_auto_embeddings');
EXECUTE format('CREATE USER MAPPING FOR PUBLIC
SERVER pgae_server
OPTIONS (user %L, password %L)', userLogin, userPassword);
EXECUTE format('CREATE FOREIGN TABLE pgae.embeddings (
text_val text,
model_name text,
api_key text,
embedding double precision[]
)
SERVER pgae_server
OPTIONS (schema_name %L, table_name %L)', userSchema, userTable);
END;
$$;
CREATE OR REPLACE FUNCTION pgae.pgae_embedding_internal(new_value TEXT)
RETURNS double precision[] AS $$
DECLARE
cred_model_name TEXT;
cred_api_key TEXT;
updated_embedding double precision[];
BEGIN
SELECT
c.model_name,
c.api_key
FROM
pgae.credentials AS c
INTO
cred_model_name,
cred_api_key;
-- Execute the update and capture the RETURNING value into the variable
UPDATE pgae.embeddings
SET
text_val = new_value,
model_name = cred_model_name,
api_key = cred_api_key
RETURNING embedding INTO updated_embedding;
-- Return the captured value
RETURN updated_embedding;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pgae.pgae_init_internal(appServer TEXT, appPort TEXT, modelName TEXT, apiKey TEXT)
LANGUAGE plpgsql AS $$
DECLARE
userLogin text;
userPassword text;
BEGIN
CALL pgae.pgae_init_credentials_internal(appServer, appPort, modelName, apiKey);
-- now full credentials are saved into credentials table
-- now we can create server and real user mapping
CALL pgae.pgae_recreate_fdw_internal();
END;
$$;
---------- PUBLIC PROCEDURES ---------
CREATE OR REPLACE PROCEDURE pgae_init(modelName TEXT, apiKey TEXT)
LANGUAGE plpgsql AS $$
BEGIN
CALL pgae.pgae_init_internal('pgae.elkornacio.com', '13070', modelName, apiKey);
END;
$$;
CREATE OR REPLACE PROCEDURE pgae_init_onprem(appServer TEXT, appPort TEXT, modelName TEXT, apiKey TEXT)
LANGUAGE plpgsql AS $$
BEGIN
CALL pgae.pgae_init_internal(appServer, appPort, modelName, apiKey);
END;
$$;
CREATE FUNCTION pgae_embedding(text_val TEXT)
RETURNS double precision[] AS $$
BEGIN
RETURN pgae.pgae_embedding_internal(text_val);
END;
$$ LANGUAGE plpgsql;
CREATE FUNCTION pgae_embedding_vec(text_val TEXT)
RETURNS vector AS $$
BEGIN
RETURN pgae.pgae_embedding_internal(text_val)::vector;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE FUNCTION pgae_create_auto_embedding(source_schema TEXT, source_table TEXT, source_col TEXT, destination_col TEXT)
RETURNS BOOLEAN AS $$
DECLARE
trigger_name TEXT;
BEGIN
-- 1. Check if the trigger already exists
IF EXISTS (
SELECT
1
FROM
pgae.auto_embeddings
WHERE
source_schema = source_schema
AND
source_table = source_table
AND
source_col = source_col
AND
target_col = target_col
) THEN
RETURN FALSE;
END IF;
-- 2. Create the trigger record
INSERT INTO
pgae.auto_embeddings
(
source_schema,
source_table,
source_col,
target_col
)
VALUES
(
source_schema,
source_table,
source_col,
target_col
);
-- 3. Put "<source_schema>_<source_table>_<source_col>_<target_col>" into a variable
trigger_name := CONCAT(
source_schema,
'_', source_table,
'_', source_col,
'_', target_col
);
-- 3. Create the unique trigger function:
EXECUTE format('CREATE OR REPLACE FUNCTION pgae_trigger_func_%I()
RETURNS TRIGGER AS __
BEGIN
NEW."%I" := pgae.pgae_embedding_internal(NEW."%I")::vector;
RETURN NEW;
END;
__ LANGUAGE plpgsql', trigger_name, target_col, source_col);
-- 4. Create the unique trigger:
EXECUTE format('CREATE TRIGGER pgae_trigger_%I
AFTER INSERT OR UPDATE ON %I.%I
FOR EACH ROW EXECUTE PROCEDURE pgae_trigger_func_%I()',
trigger_name, source_schema, source_table, trigger_name);
RETURN TRUE;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE FUNCTION pgae_delete_auto_embedding(source_schema TEXT, source_table TEXT, source_col TEXT, destination_col TEXT)
RETURNS BOOLEAN AS $$
DECLARE
trigger_name TEXT;
BEGIN
-- 1. Check if the trigger exists
IF NOT EXISTS (
SELECT
1
FROM
pgae.auto_embeddings
WHERE
source_schema = source_schema
AND
source_table = source_table
AND
source_col = source_col
AND
target_col = target_col
) THEN
RETURN FALSE;
END IF;
trigger_name := CONCAT(
source_schema,
'_', source_table,
'_', source_col,
'_', target_col
);
DELETE FROM
pgae.auto_embeddings
WHERE
source_schema = source_schema
AND
source_table = source_table
AND
source_col = source_col
AND
target_col = target_col;
EXECUTE format('DROP TRIGGER IF EXISTS pgae_trigger_%I ON %I.%I', trigger_name, source_schema, source_table);
EXECUTE format('DROP FUNCTION IF EXISTS pgae_trigger_func_%I()', trigger_name);
RETURN TRUE;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE pgae_self_destroy()
LANGUAGE plpgsql AS $$
DECLARE
trigger_name TEXT;
src_schema TEXT;
src_table TEXT;
src_col TEXT;
tgt_col TEXT;
BEGIN
-- Select all auto-embeddings
FOR src_schema, src_table, src_col, tgt_col IN
SELECT source_schema, source_table, source_col, target_col
FROM pgae.auto_embeddings
LOOP
-- Construct trigger name
trigger_name := CONCAT(src_schema, '_', src_table, '_', src_col, '_', tgt_col);
-- Drop corresponding trigger functions and triggers
EXECUTE format('DROP TRIGGER IF EXISTS pgae_trigger_%I ON %I.%I', trigger_name, src_schema, src_table);
EXECUTE format('DROP FUNCTION IF EXISTS pgae_trigger_func_%I()', trigger_name);
END LOOP;
-- Drop FDW server
IF EXISTS (SELECT 1 FROM pg_foreign_server WHERE srvname = 'pgae_server') THEN
EXECUTE 'DROP SERVER pgae_server CASCADE';
END IF;
-- Drop the whole pgae schema
DROP SCHEMA pgae CASCADE;
-- Drop all pgae public functions
DROP PROCEDURE IF EXISTS pgae_init(TEXT, TEXT);
DROP PROCEDURE IF EXISTS pgae_init_onprem(TEXT, TEXT, TEXT, TEXT);
DROP FUNCTION IF EXISTS pgae_embedding(TEXT);
DROP FUNCTION IF EXISTS pgae_embedding_vec(TEXT);
DROP FUNCTION IF EXISTS pgae_create_auto_embedding(TEXT, TEXT, TEXT, TEXT);
DROP FUNCTION IF EXISTS pgae_delete_auto_embedding(TEXT, TEXT, TEXT, TEXT);
DROP PROCEDURE IF EXISTS pgae_self_destroy();
END;
$$;
--==================================================================
CALL pgae_init('openai-text-embedding-3-small', 'ВАШ_OPENAI_API_КЛЮЧ');
SELECT pgae_self_destroy();
--==================================================================
--====================================================
SELECT DISTINCT a
FROM Author a
WHERE EXISTS (SELECT 1 FROM Book b WHERE b.author = a)
--====================================================
FROM Order o WHERE all (SELECT oi.quantity > 5 FROM o.orderItems oi) = true
--====================================================
FROM Order o WHERE not all (SELECT oi.quantity > 5 FROM o.orderItems oi) = true
--====================================================
FROM Author a WHERE :bookTitle = ANY (SELECT b.title FROM a.books b)
--====================================================
FROM Person p WHERE '555' = SOME ELEMENT(p.phoneNumbers)
--====================================================
FROM Person p WHERE SOME ELEMENT(p.phoneNumbers) > 10
--====================================================
FROM Person p WHERE '555-1234' IN ELEMENTS(p.phoneNumbers)
--====================================================
SELECT 'Name: ' || e.firstName || ' ' || e.lastName FROM Employee e
SELECT CONCAT(firstName, ' ', lastName) AS fullName FROM Employee
--====================================================
SELECT ROUND(p.price / 2) FROM Product p WHERE p.price > :minPrice
FROM Product p WHERE p.price < :maxPrice / 2
--====================================================
FROM Product p WHERE p.price BETWEEN :minPrice AND :maxPrice
FROM Product p WHERE p.price NOT BETWEEN :minPrice AND :maxPrice
--====================================================
FROM Product p WHERE p.name NOT LIKE :substring
--====================================================
--====================================================
--====================================================
--====================================================
--====================================================
CREATE EXTENSION pg_wait_sampling;
\d pg_wait_sampling_current
\d pg_wait_sampling_history
\d pg_wait_sampling_profile
DROP EXTENSION pg_wait_sampling;
--==========================================================================================
CREATE EXTENSION pg_wait_sampling;
WITH t as (SELECT sum(0) FROM pg_wait_sampling_current)
SELECT sum(0) FROM generate_series(1, 2), t;
WITH t as (SELECT sum(0) FROM pg_wait_sampling_history)
SELECT sum(0) FROM generate_series(1, 2), t;
WITH t as (SELECT sum(0) FROM pg_wait_sampling_profile)
SELECT sum(0) FROM generate_series(1, 2), t;
-- Some dummy checks just to be sure that all our functions work and return something.
SELECT count(*) = 1 as test FROM pg_wait_sampling_get_current(pg_backend_pid());
SELECT count(*) >= 0 as test FROM pg_wait_sampling_get_profile();
SELECT count(*) >= 0 as test FROM pg_wait_sampling_get_history();
SELECT pg_wait_sampling_reset_profile();
DROP EXTENSION pg_wait_sampling;
--==========================================================================================
/* contrib/pg_wait_sampling/setup.sql */
-- complain if script is sourced in psql, rather than via CREATE EXTENSION
\echo Use "CREATE EXTENSION pg_wait_sampling" to load this file. \quit
CREATE FUNCTION pg_wait_sampling_get_current (
pid int4,
OUT pid int4,
OUT event_type text,
OUT event text,
OUT queryid int8
)
RETURNS SETOF record
AS 'MODULE_PATHNAME'
LANGUAGE C VOLATILE CALLED ON NULL INPUT;
CREATE VIEW pg_wait_sampling_current AS
SELECT * FROM pg_wait_sampling_get_current(NULL::integer);
GRANT SELECT ON pg_wait_sampling_current TO PUBLIC;
CREATE FUNCTION pg_wait_sampling_get_history (
OUT pid int4,
OUT ts timestamptz,
OUT event_type text,
OUT event text,
OUT queryid int8
)
RETURNS SETOF record
AS 'MODULE_PATHNAME'
LANGUAGE C VOLATILE STRICT;
CREATE VIEW pg_wait_sampling_history AS
SELECT * FROM pg_wait_sampling_get_history();
GRANT SELECT ON pg_wait_sampling_history TO PUBLIC;
CREATE FUNCTION pg_wait_sampling_get_profile (
OUT pid int4,
OUT event_type text,
OUT event text,
OUT queryid int8,
OUT count int8
)
RETURNS SETOF record
AS 'MODULE_PATHNAME'
LANGUAGE C VOLATILE STRICT;
CREATE VIEW pg_wait_sampling_profile AS
SELECT * FROM pg_wait_sampling_get_profile();
GRANT SELECT ON pg_wait_sampling_profile TO PUBLIC;
CREATE FUNCTION pg_wait_sampling_reset_profile()
RETURNS void
AS 'MODULE_PATHNAME'
LANGUAGE C VOLATILE STRICT;
-- Don't want this to be available to non-superusers.
REVOKE ALL ON FUNCTION pg_wait_sampling_reset_profile() FROM PUBLIC;
--========================================================================================
DROP FUNCTION pg_wait_sampling_get_current (
pid int4,
OUT pid int4,
OUT event_type text,
OUT event text
) CASCADE;
DROP FUNCTION pg_wait_sampling_get_history (
OUT pid int4,
OUT ts timestamptz,
OUT event_type text,
OUT event text
) CASCADE;
DROP FUNCTION pg_wait_sampling_get_profile (
OUT pid int4,
OUT event_type text,
OUT event text,
OUT count bigint
) CASCADE;
CREATE FUNCTION pg_wait_sampling_get_current (
pid int4,
OUT pid int4,
OUT event_type text,
OUT event text,
OUT queryid int8
)
RETURNS SETOF record
AS 'MODULE_PATHNAME'
LANGUAGE C VOLATILE CALLED ON NULL INPUT;
CREATE VIEW pg_wait_sampling_current AS
SELECT * FROM pg_wait_sampling_get_current(NULL::integer);
GRANT SELECT ON pg_wait_sampling_current TO PUBLIC;
CREATE FUNCTION pg_wait_sampling_get_history (
OUT pid int4,
OUT ts timestamptz,
OUT event_type text,
OUT event text,
OUT queryid int8
)
RETURNS SETOF record
AS 'MODULE_PATHNAME'
LANGUAGE C VOLATILE STRICT;
CREATE VIEW pg_wait_sampling_history AS
SELECT * FROM pg_wait_sampling_get_history();
GRANT SELECT ON pg_wait_sampling_history TO PUBLIC;
CREATE FUNCTION pg_wait_sampling_get_profile (
OUT pid int4,
OUT event_type text,
OUT event text,
OUT queryid int8,
OUT count int8
)
RETURNS SETOF record
AS 'MODULE_PATHNAME'
LANGUAGE C VOLATILE STRICT;
CREATE VIEW pg_wait_sampling_profile AS
SELECT * FROM pg_wait_sampling_get_profile();
GRANT SELECT ON pg_wait_sampling_profile TO PUBLIC;
CREATE TABLE products (
id SERIAL PRIMARY KEY,
name VARCHAR(100),
tags TEXT[] -- массив текстовых значений для тегов
);
INSERT INTO products (name, tags)
VALUES ('Продукт 1', ARRAY['новинка', 'распродажа']),
('Продукт 2', ARRAY['популярное', 'скидка']);
SELECT * FROM products
WHERE 'новинка' = ANY(tags);
--==========================================================================
CREATE TABLE users (
id SERIAL PRIMARY KEY,
name VARCHAR(100),
profile JSONB -- бинарное представление JSON
);
INSERT INTO users (name, profile)
VALUES ('Иван Иванов', '{"age": 30, "city": "Москва", "interests": ["футбол", "чтение"]}'),
('Мария Смирнова', '{"age": 25, "city": "Санкт-Петербург", "interests": ["музыка", "путешествия"]}');
SELECT
name,
profile->>'age' AS age,
profile->>'city' AS city
FROM users;
SELECT * FROM users
WHERE 'музыка' = ANY(profile->'interests');
--==========================================================================
-- создание таблицы
CREATE TABLE user_profiles (
id SERIAL PRIMARY KEY,
username VARCHAR(100),
attributes JSONB,
tags TEXT[]
);
-- вставка данных
INSERT INTO user_profiles (username, attributes, tags)
VALUES ('user1', '{"age": 28, "location": "NY", "preferences": {"newsletter": true}}', ARRAY['active', 'premium']),
('user2', '{"age": 34, "location": "LA", "preferences": {"newsletter": false}}', ARRAY['inactive']);
-- запрос для извлечения данных
SELECT
username,
attributes->>'age' AS age,
attributes->>'location' AS location
FROM user_profiles
WHERE 'active' = ANY(tags);
--==========================================================================
-- установка расширения
CREATE EXTENSION IF NOT EXISTS pg_trgm;
-- создание таблицы статей
CREATE TABLE articles (
id SERIAL PRIMARY KEY,
title VARCHAR(255),
content TEXT
);
-- вставка данных
INSERT INTO articles (title, content)
VALUES ('Статья 1', 'Это пример текста для полнотекстового поиска.'),
('Статья 2', 'Поиск похожих текстов в базе данных.');
-- создание индекса
CREATE INDEX content_trgm_idx ON articles USING gin (content gin_trgm_ops);
-- поиск статьи с использованием триграммного поиска
SELECT * FROM articles
WHERE content % 'поиск';
--==========================================================================
-- установка PostGIS
CREATE EXTENSION IF NOT EXISTS postgis;
-- создание таблицы с географическими данными
CREATE TABLE locations (
id SERIAL PRIMARY KEY,
name VARCHAR(100),
coordinates GEOGRAPHY(POINT)
);
-- вставка данных
INSERT INTO locations (name, coordinates)
VALUES ('Place 1', ST_GeographyFromText('SRID=4326;POINT(-122.4194 37.7749)')),
('Place 2', ST_GeographyFromText('SRID=4326;POINT(-118.2437 34.0522)'));
--==========================================================================
WITH top_products AS (
SELECT id, name, sales
FROM products
WHERE sales > 1000
),
top_customers AS (
SELECT id, name, purchases
FROM customers
WHERE purchases > 500
)
SELECT tp.name AS product_name, tc.name AS customer_name
FROM top_products tp
JOIN top_customers tc ON tp.id = tc.id;
--==========================================================================
WITH RECURSIVE category_hierarchy AS (
SELECT id, name, parent_id
FROM categories
WHERE parent_id IS NULL
UNION ALL
SELECT c.id, c.name, c.parent_id
FROM categories c
INNER JOIN category_hierarchy ch ON c.parent_id = ch.id
)
SELECT * FROM category_hierarchy;
--==========================================================================
-- создание таблицы сотрудников
CREATE TABLE employees (
id SERIAL PRIMARY KEY,
name VARCHAR(100),
manager_id INT REFERENCES employees(id)
);
-- вставка данных
INSERT INTO employees (name, manager_id)
VALUES ('CEO', NULL),
('Manager 1', 1),
('Manager 2', 1),
('Employee 1', 2),
('Employee 2', 2),
('Employee 3', 3);
-- рекурсивный запрос для иерархии сотрудников
WITH RECURSIVE employee_hierarchy AS (
SELECT id, name, manager_id
FROM employees
WHERE manager_id IS NULL
UNION ALL
SELECT e.id, e.name, e.manager_id
FROM employees e
INNER JOIN employee_hierarchy eh ON e.manager_id = eh.id
)
SELECT * FROM employee_hierarchy;
--==========================================================================
--==========================================================================
--==========================================================================
--==========================================================================
--================================================================
WITH searches_expanded AS (
SELECT searches
FROM search_frequency
GROUP BY
searches,
GENERATE_SERIES(1, num_users))
SELECT
ROUND(PERCENTILE_CONT(0.50) WITHIN GROUP (
ORDER BY searches)::DECIMAL, 1) AS median
FROM searches_expanded;
--================================================================
WITH ranked_measurements AS (
SELECT
CAST(measurement_time AS DATE) AS measurement_day,
measurement_value,
ROW_NUMBER() OVER (
PARTITION BY CAST(measurement_time AS DATE)
ORDER BY measurement_time) AS measurement_num
FROM measurements
)
SELECT
measurement_day,
SUM(measurement_value) FILTER (WHERE measurement_num % 2 != 0) AS odd_sum,
SUM(measurement_value) FILTER (WHERE measurement_num % 2 = 0) AS even_sum
FROM ranked_measurements
GROUP BY measurement_day;
--================================================================
WITH reviews AS (
SELECT
place.place_category,
COUNT(ugc.content_id) AS content_count
FROM place_info place
JOIN maps_ugc_review ugc
ON place.place_id = ugc.place_id
WHERE content_tag = 'Off-topic'
GROUP BY place_category
)
SELECT
place_category,
content_count,
RANK() OVER (ORDER BY content_count DESC) AS top_place
FROM reviews;
--================================================================
SELECT
categories.category_name,
EXTRACT(MONTH FROM searches.search_date) AS month,
COUNT(*) OVER (PARTITION BY categories.category_name, EXTRACT(MONTH FROM searches.search_date)) AS total_searches
FROM
searches
LEFT JOIN
categories ON categories.category_id = searches.category_id
WHERE
EXTRACT(YEAR FROM searches.search_date) = 2024
ORDER BY
total_searches DESC
--================================================================
SELECT *
FROM ads
WHERE status = 'active'
AND impressions > 500000
AND YEAR(last_updated) = 2024;
--================================================================
CREATE FUNCTION get_avg_salary(department_name TEXT)
RETURNS NUMERIC AS
$BODY$
BEGIN
RETURN (SELECT AVG(salary) FROM google_employees WHERE department = department_name);
END;
$BODY$
LANGUAGE 'plpgsql';
SELECT get_avg_salary('Data Analytics');
--================================================================
SELECT
a.ad_id,
COUNT(DISTINCT a.user_id) AS total_clicks,
COUNT(DISTINCT c.user_id) AS total_conversions,
COUNT(DISTINCT c.user_id)*1.0 / COUNT(DISTINCT a.user_id) * 100.0 AS conversion_rate
FROM
ad_clicks a
LEFT JOIN
cart_addition c
ON
a.ad_id = c.ad_id AND a.user_id = c.user_id
GROUP BY
a.ad_id;
--================================================================
SELECT
campaign_id,
ad_group_id,
SUM(cost) / SUM(clicks) AS avg_CPC
FROM
ad_clicks
GROUP BY
campaign_id,
ad_group_id;
--================================================================
SELECT C.customer_id,
C.first_name,
C.last_name,
C.app,
MAX(P.date) AS latest_purchase_date
FROM Customers C
JOIN Purchases P
ON C.customer_id = P.customer_id
GROUP BY C.customer_id, C.first_name, C.last_name, C.app;
--================================================================
SELECT
transactions.store_id,
SUM(payouts.payout_amount) AS total_payout
FROM trade_in_transactions AS transactions
INNER JOIN trade_in_payouts AS payouts
ON transactions.model_id = payouts.model_id
GROUP BY transactions.store_id
ORDER BY total_payout DESC;
--================================================================
WITH lag_products AS (
SELECT
customer_id,
product_name,
LAG(product_name)
OVER(PARTITION BY customer_id
ORDER BY transaction_timestamp) AS prev_prod
FROM transactions
GROUP BY
customer_id,
product_name,
transaction_timestamp
),
interested_users AS (
SELECT customer_id AS airpod_iphone_buyers
FROM lag_products
WHERE LOWER(product_name) = 'airpods'
AND LOWER(prev_prod) = 'iphone'
GROUP BY customer_id
)
SELECT
ROUND(
COUNT(DISTINCT iu.airpod_iphone_buyers)::DECIMAL
/ COUNT(DISTINCT transactions.customer_id)::DECIMAL
* 100, 0)
FROM transactions
LEFT JOIN interested_users AS iu
ON iu.airpod_iphone_buyers = transactions.customer_id;
--================================================================
SELECT EXTRACT(MONTH FROM submit_date) as mth,
product_id as product,
AVG(stars) as avg_stars
FROM reviews
GROUP BY mth, product
ORDER BY mth, product;
--================================================================
SELECT MONTH(s.date_of_sale) as 'Month', p.product_name, AVG(s.quantity_sold) as 'Average_Sold'
FROM sales s
JOIN products p ON s.product_id = p.product_id
WHERE YEAR(s.date_of_sale) = 2021
GROUP BY Month, p.product_name
--================================================================
CREATE FUNCTION get_conversion_rate(start_date DATE, end_date DATE, event_name TEXT)
RETURNS NUMERIC AS
$BODY$
BEGIN
RETURN (SELECT COUNT(*) FROM events WHERE event_date BETWEEN start_date AND end_date AND event_name = 'conversion')
/ (SELECT COUNT(*) FROM events WHERE event_date BETWEEN start_date AND end_date AND event_name = 'impression');
END;
$BODY$
LANGUAGE 'plpgsql';
SELECT get_conversion_rate('2023-01-01', '2023-01-31', 'conversion');
--================================================================
SELECT
c.product_id,
sum(case when a.add_id is not null then 1 else 0 end) / count(c.click_id) as conversion_rate
FROM
clicks c
LEFT JOIN bag_adds a ON a.product_id = c.product_id AND a.user_id = c.user_id
GROUP BY c.product_id
--================================================================
WITH lag_products AS (
SELECT
customer_id,
product_name,
LAG(product_name)
OVER(PARTITION BY customer_id
ORDER BY transaction_timestamp) AS prev_prod
FROM transactions
GROUP BY
customer_id,
product_name,
transaction_timestamp
),
interested_users AS (
SELECT customer_id AS airpod_iphone_buyers
FROM lag_products
WHERE LOWER(product_name) = 'airpods'
AND LOWER(prev_prod) = 'iphone'
GROUP BY customer_id
)
SELECT
ROUND(
COUNT(DISTINCT iu.airpod_iphone_buyers)::DECIMAL
/ COUNT(DISTINCT transactions.customer_id)::DECIMAL
* 100, 0)
FROM transactions
LEFT JOIN interested_users AS iu
ON iu.airpod_iphone_buyers = transactions.customer_id;
--================================================================
SELECT
u.UserID,
u.UserName,
COUNT(DISTINCT d.DeviceType) AS TotalDevices,
SUM(s.StorageUsed) AS TotalStorageUsed
FROM
Users u
JOIN
Devices d ON u.UserID = d.UserID
JOIN
StorageUsage s ON d.DeviceID = s.DeviceID
GROUP BY
u.UserID,
u.UserName
HAVING
COUNT(DISTINCT d.DeviceType) > 1 AND SUM(s.StorageUsed) > 50
ORDER BY
TotalStorageUsed DESC;
--================================================================
with snaps_statistics as (
select age.age_bucket
sum(case when activities.activity_type = 'send' then activities.time_spents else 0 end) as send_timespent,
sum(case when activities.activity_type = 'open' then activities.time_spents else 0 end) as open_timespent,
sum(activities.time_spend) as total_timespent
from activities
inner join age_breakdown as age
on age.id = activities.id and where a.activity_type in ('send', 'open')
)
select round(send_timespent / total_timespent, 2) as send_pers,
round(open_timespent / total_timespent, 2) as open_perc
from snaps_statistics;
--================================================================
SELECT
user_id,
MAX(post_date::DATE) - MIN(post_date::DATE) AS days_between
FROM posts
WHERE DATE_PART('year', post_date::DATE) = 2024
GROUP BY user_id
HAVING COUNT(post_id) > 1;
--================================================================
SELECT
up.user_id,
COUNT(DISTINCT up.post_id) AS no_of_posts,
AVG(pi.comments + pi.reactions) AS avg_interaction_per_post
FROM
user_post up
JOIN
post_interactions pi on up.post_id = pi.post_id
GROUP BY
up.user_id
HAVING
COUNT(DISTINCT up.post_id) >= 2 AND AVG(pi.comments + pi.reactions) >= 150
--================================================================
SELECT region, SUM(sales)
FROM facebook_ads
WHERE date > '2024-01-01'
GROUP BY region
HAVING SUM(sales) > 500000;
--================================================================
SELECT
EXTRACT(MONTH FROM curr_month.event_date) AS mth,
COUNT(DISTINCT curr_month.user_id) AS monthly_active_users
FROM user_actions AS curr_month
WHERE EXISTS (
SELECT last_month.user_id
FROM user_actions AS last_month
WHERE last_month.user_id = curr_month.user_id
AND EXTRACT(MONTH FROM last_month.event_date) =
EXTRACT(MONTH FROM curr_month.event_date - interval '1 month')
)
AND EXTRACT(MONTH FROM curr_month.event_date) = 7
AND EXTRACT(YEAR FROM curr_month.event_date) = 2022
GROUP BY EXTRACT(MONTH FROM curr_month.event_date);
--================================================================
WITH private_events AS (
SELECT user_id, event_id
FROM event_rsvp
WHERE attendance_status IN ('going', 'maybe')
AND event_type = 'private'
)
SELECT
friends.user_a_id,
friends.user_b_id
FROM private_events AS events_1
INNER JOIN private_events AS events_2
ON events_1.user_id != events_2.user_id
AND events_1.event_id = events_2.event_id
INNER JOIN friendship_status AS friends
ON events_1.user_id = friends.user_a_id
AND events_2.user_id = friends.user_b_id
WHERE friends.status = 'not_friends'
GROUP BY friends.user_a_id, friends.user_b_id
HAVING COUNT(*) >= 2;
--================================================================
SELECT
U.user_id,
COALESCE(AVG(S.shares), 0) as avg_shares_per_post
FROM
user_posts as U
LEFT JOIN (
SELECT
post_id,
COUNT(share_id) as shares
FROM
post_shares
GROUP BY
post_id
) as S on U.post_id = S.post_id
GROUP BY
U.user_id;
--================================================================
SELECT
app_id,
ROUND(100.0 *
SUM(CASE WHEN event_type = 'click' THEN 1 ELSE 0 END) /
SUM(CASE WHEN event_type = 'impression' THEN 1 ELSE 0 END), 2) AS ctr_rate
FROM events
WHERE timestamp >= '2022-01-01'
AND timestamp < '2023-01-01'
GROUP BY app_id;
--================================================================
SELECT
age.age_bucket,
SUM(activities.time_spent) FILTER (WHERE activities.activity_type = 'send')/
SUM(activities.time_spent) AS send_perc,
SUM(activities.time_spent) FILTER (WHERE activities.activity_type = 'open')/
SUM(activities.time_spent) AS open_perc
FROM activities
INNER JOIN age_breakdown AS age
ON activities.user_id = age.user_id
WHERE activities.activity_type IN ('send', 'open')
GROUP BY age.age_bucket;
--================================================================
SELECT user_id, COUNT(*) AS num_snaps
FROM snaps
WHERE snap_date >= DATEADD(week, -4, '2022-09-06')
GROUP BY user_id
HAVING COUNT(*) > 400;
--================================================================
SELECT
snap_id,
date,
SUM(views) OVER (PARTITION BY snap_id ORDER BY date) as total_views_to_date,
SUM(views) OVER (PARTITION BY snap_id ORDER BY date ROWS BETWEEN 6 PRECEDING AND CURRENT ROW) as weekly_views
FROM
daily_snap_analytics
ORDER BY
snap_id,
date;
--================================================================
SELECT first_name, last_name
FROM snap_employees
INTERSECT
SELECT first_name, last_name
FROM snap_contractors
--================================================================
SELECT user_id
FROM user_activity
WHERE last_active_date >= '2021-08-01'
AND friend_count > 5
AND message_count >= 20;
--================================================================
SELECT c.camp_name,
CAST(count(ac.click_id) AS FLOAT)/count(ai.impression_id) as CTR
FROM campaigns c
LEFT JOIN ad_impressions ai on c.camp_id = ai.camp_id
LEFT JOIN ad_clicks ac on ai.impression_id = ac.click_id AND c.camp_id = ac.camp_id
GROUP BY c.camp_name;
--================================================================
SELECT
EXTRACT(month FROM activity_date) AS mon,
activity_name,
SUM(hours_spent)
FROM
user_activity
GROUP BY
EXTRACT(month FROM activity_date),
activity_name
ORDER BY
mon,
SUM(hours_spent) DESC;
--================================================================
SELECT u.user_id, u.user_name, COUNT(f.user_id1) AS num_accepted_requests
FROM users u
JOIN friendships f
ON u.user_id = f.user_id1
WHERE u.sign_up_date >= DATE_SUB(CURRENT_DATE, INTERVAL 30 DAY)
AND f.status = 'Accepted'
GROUP BY u.user_id, u.user_name
HAVING num_accepted_requests >= 1;
--================================================================
SELECT
sessions_1.session_id,
COUNT(sessions_2.session_id) AS concurrent_sessions
FROM sessions AS sessions_1
INNER JOIN sessions AS sessions_2
ON sessions_1.session_id != sessions_2.session_id
AND (sessions_2.start_time BETWEEN sessions_1.start_time AND sessions_1.end_time
OR sessions_1.start_time BETWEEN sessions_2.start_time AND sessions_2.end_time)
GROUP BY sessions_1.session_id
ORDER BY concurrent_sessions DESC;
--================================================================
SELECT
EXTRACT(MONTH FROM pin_date) as month,
user_id,
COUNT(*) OVER(PARTITION BY user_id, EXTRACT(MONTH FROM pin_date)) as avg_monthly_pin,
COUNT(*) OVER(PARTITION BY user_id, EXTRACT(MONTH FROM pin_date)) -
LAG(COUNT(*)) OVER(PARTITION BY user_id order by EXTRACT(MONTH FROM pin_date)) as pin_change
FROM
pins
--================================================================
SELECT
u.username,
b.board_id,
b.followers_count
FROM
Boards b
INNER JOIN Users u ON b.user_id = u.user_id
WHERE
b.category = 'Home Decor'
ORDER BY
b.followers_count DESC
LIMIT 3;
--================================================================
SELECT *
FROM pinterest_employees
LEFT JOIN pinterest_managers
ON pinterest_employees.id = pinterest_managers.id
WHERE pinterest_managers.id IS NULL;
SELECT * FROM pinterest_employees
EXCEPT
SELECT * FROM pinterest_managers
--================================================================
SELECT u.user_id, u.username
FROM users AS u
JOIN (
SELECT p.user_id
FROM pins AS p
WHERE p.category = 'Cooking'
AND p.pinned_date > DATEADD(day, -30, GETDATE())
GROUP BY p.user_id
HAVING COUNT(p.pin_id) >= 5
) AS cooking_users ON u.user_id = cooking_users.user_id
WHERE NOT EXISTS (
SELECT 1
FROM pins AS p2
WHERE p2.user_id = u.user_id
AND p2.category = 'Gardening'
AND p2.pinned_date > DATEADD(day, -30, GETDATE())
);
--================================================================
SELECT
user_id,
AVG(pins_count) AS avg_pins
FROM (
SELECT
pins.user_id,
boards.board_id,
COUNT(pins.pin_id) AS pins_count
FROM
boards
INNER JOIN
pins ON boards.board_id = pins.board_id
WHERE
pins.user_id = 123
GROUP BY
user_id, board_id
) AS user_boards
GROUP BY
user_id;
--================================================================
SELECT
a.ad_id,
(COUNT(c.click_id)/ COUNT(v.view_id))*100 as CTR
FROM
ad_clicks c
JOIN
ad_views v ON c.user_id = v.user_id AND c.ad_id = v.ad_id
JOIN
ads a ON a.ad_id = c.ad_id
WHERE
EXTRACT(MONTH FROM click_date) = 7 AND EXTRACT(YEAR FROM click_date) = 2024
GROUP BY
a.ad_id
--================================================================
SELECT *
FROM user_interactions
WHERE board_name LIKE 'Food%' OR board_name LIKE 'Diy%';
--================================================================
SELECT
ROUND(COUNT(texts.email_id)::DECIMAL
/COUNT(DISTINCT emails.email_id),2) AS activation_rate
FROM emails
LEFT JOIN texts
ON emails.email_id = texts.email_id
AND texts.signup_action = 'Confirmed';
--================================================================
SELECT
u.username,
COUNT(v.video_id) as num_videos
FROM
Users u
JOIN
Videos v ON u.user_id = v.user_id
GROUP BY
u.username
HAVING
COUNT(v.video_id) > 1000
ORDER BY
num_videos DESC;
--================================================================
SELECT DISTINCT user_id
FROM emails
INNER JOIN texts
ON emails.email_id = texts.email_id
WHERE texts.action_date = emails.signup_date + INTERVAL '1 day'
AND texts.signup_action = 'Confirmed';
--================================================================
SELECT u.username, AVG(v.video_length_seconds) AS average_video_duration_seconds
FROM videos v
JOIN users u
ON v.user_id = u.user_id
GROUP BY u.username
--================================================================
SELECT
Users.user_id,
COUNT(Videos.video_id) AS total_videos,
SUM(Videos.video_likes) AS total_likes
FROM
Users
JOIN
Videos ON Users.user_id=Videos.user_id
GROUP BY
Users.user_id
ORDER BY
total_likes DESC
LIMIT 5;
--================================================================
SELECT ROUND(AVG(uwv.watched_duration_secs)) as avg_watched_duration,
MAX(uwv.watched_duration_secs) as max_watched_duration,
MIN(uwv.watched_duration_secs) as min_watched_duration,
SQRT(SUM(CASE WHEN ul.liked THEN 1 ELSE 0 END)) as totalLikesSQRT
FROM user_watched_videos uwv
JOIN user_likes ul ON uwv.user_id = ul.user_id AND uwv.video_id = ul.video_id
--================================================================
WITH Ranked_Videos As (
SELECT
User_Id,
Video_Id,
Date,
Likes,
ROW_NUMBER() OVER (PARTITION BY User_Id, Date ORDER BY Likes DESC, Video_Id ASC) as rn
FROM Video_Stats
)
SELECT
User_Id,
Video_Id,
Date,
Likes
FROM Ranked_Videos
WHERE rn = 1;
--================================================================
SELECT
EXTRACT(MONTH FROM submit_date) AS mth,
product_id,
ROUND(AVG(stars), 2) AS avg_stars
FROM reviews
GROUP BY EXTRACT(MONTH FROM submit_date), product_id
ORDER BY mth, product_id;
--================================================================
SELECT
category,
product,
SUM(spend) AS total_spend
FROM product_spend
WHERE transaction_date >= '2022-01-01'
AND transaction_date <= '2022-12-31'
GROUP BY category, product;
--================================================================
WITH product_category_spend AS (
SELECT
category,
product,
SUM(spend) AS total_spend
FROM product_spend
WHERE transaction_date >= '2022-01-01'
AND transaction_date <= '2022-12-31'
GROUP BY category, product
),
top_spend AS (
SELECT *,
RANK() OVER (
PARTITION BY category
ORDER BY total_spend DESC) AS ranking
FROM product_category_spend)
SELECT category, product, total_spend
FROM top_spend
WHERE ranking <= 2
ORDER BY category, ranking;
--================================================================
SELECT current_date - INTEGER '1' AS yesterday_date
--================================================================
SELECT user_id, COUNT(*) as purchase_count
FROM Purchases
WHERE MONTH(purchase_date) = 8
GROUP BY user_id
HAVING COUNT(*) > 10
ORDER BY COUNT(*) DESC;
--================================================================
SELECT
ROUND(AVG(email_count)) as mean,
PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY email_count) AS median,
MODE() WITHIN GROUP (ORDER BY email_count) AS mode
FROM inbox_stats;
--================================================================
SELECT r.region_name, SUM(h.unit_cost * p.quantity) AS total_cost
FROM hardware h
JOIN production p ON h.hardware_id = p.hardware_id
JOIN region r ON p.production_id = r.production_id
WHERE MONTH(p.production_date) = 6 AND YEAR(p.production_date) = 2022
GROUP BY r.region_name;
--================================================================
SELECT *
FROM Sales
WHERE product_type = 'Software'
AND sales_year > 2015
AND NOT sales_region = 'Europe'
--================================================================
WITH Click_Rates AS (
SELECT product_category, COUNT(DISTINCT user_id) AS unique_clicks
FROM clicks
WHERE DATE(click_date) BETWEEN '2022-06-01' AND '2022-06-30'
GROUP BY product_category),
Conversion_Rates AS (
SELECT c.product_category, COUNT(DISTINCT v.user_id) AS unique_conversions
FROM conversions v
JOIN clicks c
ON c.product_id = v.product_id AND c.user_id = v.user_id
WHERE DATE(conversion_date) BETWEEN '2022-06-01' AND '2022-06-30'
GROUP BY c.product_category)
SELECT Click_Rates.product_category,
unique_clicks,
COALESCE(unique_conversions, 0) AS unique_conversions,
(unique_clicks * 1.0 / (SELECT COUNT(DISTINCT user_id) FROM clicks WHERE DATE(click_date) BETWEEN '2022-06-01' AND '2022-06-30')) AS click_through_rate,
(COALESCE(unique_conversions, 0) * 1.0 / unique_clicks) AS conversion_rate
FROM Click_Rates
LEFT JOIN Conversion_Rates ON Click_Rates.product_category = Conversion_Rates.product_category
--================================================================
SELECT *
FROM employees
WHERE job_description LIKE '%AI%';
--================================================================
SELECT
deals.employee_id,
CASE
WHEN SUM(deals.deal_size) > quotas.quota THEN 'yes'
ELSE 'no'
END AS made_quota
FROM deals
INNER JOIN sales_quotas AS quotas
ON deals.employee_id = quotas.employee_id
GROUP BY deals.employee_id, quotas.quota
ORDER BY deals.employee_id;
--================================================================
SELECT
deals.employee_id,
CASE
WHEN SUM(deals.deal_size) <= employee.quota
THEN employee.base + (employee.commission * SUM(deals.deal_size)) -- #1
ELSE employee.base + (employee.commission * employee.quota) +
((SUM(deals.deal_size) - employee.quota) * employee.commission * employee.accelerator) -- #2
END AS total_compensation
FROM deals
INNER JOIN employee_contract AS employee
ON deals.employee_id = employee.employee_id
GROUP BY deals.employee_id, employee.quota, employee.base, employee.commission, employee.accelerator
ORDER BY total_compensation DESC, deals.employee_id;
--================================================================
SELECT c.product_id,
COUNT(p.cart_id)::float/COUNT(c.click_id)::float as conversion_rate
FROM clicks c
LEFT JOIN products_in_cart p ON c.product_id = p.product_id AND c.user_id = p.user_id
GROUP BY c.product_id;
--================================================================
SELECT
EXTRACT(MONTH FROM sales_date) AS month,
category_id AS category,
AVG(revenue) AS avg_revenue
FROM
sales
GROUP BY
month,
category
ORDER BY
month,
category;
--================================================================
SELECT t1.customer_id, t1.total_sales
FROM oracle_sales t1
WHERE t1.total_sales > (
SELECT AVG(t2.total_sales)
FROM oracle_sales t2
WHERE t2.customer_id = t1.customer_id
);
SELECT t1.customer_id, t1.total_sales
FROM oracle_sales t1
WHERE t1.total_sales > (
SELECT AVG(t2.total_sales)
FROM oracle_sales t2
);
--================================================================
SELECT *
FROM customers
WHERE LOWER(company_name) LIKE '%oracle%';
--================================================================
SELECT
CASE
WHEN AGE <= 30 THEN '0-30'
WHEN AGE > 30 AND AGE <= 50 THEN '30-50'
WHEN AGE > 50 THEN '> 50'
END AS age_group,
AVG(amount) as avg_amount
FROM
(
SELECT
p.*,
c.birthdate,
EXTRACT(year FROM AGE(p.purchase_date, c.birthdate)) AS AGE
FROM
purchases p
JOIN
customer c ON p.customer_id = c.customer_id
) sub
GROUP BY age_group
ORDER BY age_group;
--================================================================
WITH top_10_cte AS (
SELECT
artists.artist_name,
DENSE_RANK() OVER (
ORDER BY COUNT(songs.song_id) DESC) AS artist_rank
FROM artists
INNER JOIN songs
ON artists.artist_id = songs.artist_id
INNER JOIN global_song_rank AS ranking
ON songs.song_id = ranking.song_id
WHERE ranking.rank <= 10
GROUP BY artists.artist_name
)
SELECT artist_name, artist_rank
FROM top_10_cte
WHERE artist_rank <= 5;
--================================================================
SELECT *
FROM oracle_employees
LEFT JOIN oracle_managers
ON oracle_employees.id = oracle_managers.id
WHERE oracle_managers.id IS NULL;
--================================================================
SELECT * FROM oracle_employees
EXCEPT
SELECT * FROM oracle_managers
--================================================================
WITH employee_queries AS (
SELECT
e.employee_id,
COALESCE(COUNT(DISTINCT q.query_id), 0) AS unique_queries
FROM employees AS e
LEFT JOIN queries AS q
ON e.employee_id = q.employee_id
AND q.query_starttime >= '2023-07-01T00:00:00Z'
AND q.query_starttime < '2023-10-01T00:00:00Z'
GROUP BY e.employee_id
)
SELECT
unique_queries,
COUNT(employee_id) AS employee_count
FROM employee_queries
GROUP BY unique_queries
ORDER BY unique_queries;
--================================================================
SELECT
DATE_TRUNC('month', submit_date) AS mth,
p.product_name AS product,
ROUND(AVG(stars), 2) AS avg_stars
FROM
reviews r
JOIN
products p ON p.product_id = r.product_id
GROUP BY
mth,
product
ORDER BY
mth ASC,
product ASC;
--================================================================
SELECT products.name AS product, colors.name AS color
FROM products
CROSS JOIN colors;
SELECT *
FROM ibm_employees
LEFT JOIN ibm_managers
ON ibm_employees.id = ibm_managers.id
WHERE ibm_managers.id IS NULL;
--================================================================
SELECT
department,
AVG(
CASE
WHEN end_date IS NULL THEN (CURRENT_DATE - start_date)
ELSE (end_date - start_date)
END
) AS avg_service_duration
FROM
employee_service
GROUP BY
department;
--================================================================
WITH view_to_cart AS (
SELECT v.product_id, COUNT(DISTINCT v.user_id) as views, COUNT(DISTINCT c.user_id) as cart_adds
FROM product_views v
LEFT JOIN product_cart c ON v.user_id = c.user_id AND v.product_id = c.product_id
GROUP BY v.product_id
),
cart_to_purchase AS (
SELECT c.product_id, COUNT(DISTINCT c.user_id) as cart_adds, COUNT(DISTINCT p.user_id) as purchases
FROM product_cart c
LEFT JOIN product_purchases p ON c.user_id = p.user_id AND c.product_id = p.product_id
GROUP BY c.product_id
)
SELECT v.product_id, v.views, v.cart_adds, IFNULL(p.purchases, 0) as purchases, (IFNULL(p.purchases,0)::float / v.views::float) as conversion_rate
FROM view_to_cart v
LEFT JOIN cart_to_purchase p ON v.product_id = p.product_id
ORDER BY conversion_rate DESC;
--================================================================
SELECT EXTRACT(MONTH FROM sale_date) AS month,
product_id AS product,
AVG(quantity) AS avg_quantity
FROM sales
GROUP BY 1, 2
ORDER BY 1, 2;
--================================================================
SELECT c.first_name, c.last_name, COUNT(o.order_id) as total_orders, SUM(o.order_total) as total_amount_spent
FROM customers c
JOIN orders o
ON c.customer_id = o.customer_id
GROUP BY c.first_name, c.last_name;
--================================================================
SELECT
artist_id,
listen_date,
AVG(daily_listens) OVER (
PARTITION BY artist_id
ORDER BY listen_date
RANGE BETWEEN INTERVAL '6 days' PRECEDING AND CURRENT ROW
) AS rolling_avg_listens
FROM artist_listens
ORDER BY artist_id, listen_date;
--================================================================
WITH history AS (
SELECT
user_id,
song_id,
song_plays
FROM songs_history
UNION ALL
SELECT
user_id,
song_id,
COUNT(song_id) AS song_plays
FROM songs_weekly
WHERE listen_time <= '08/04/2022 23:59:59'
GROUP BY user_id, song_id
)
SELECT
user_id,
song_id,
SUM(song_plays) AS song_count
FROM history
GROUP BY
user_id,
song_id
ORDER BY song_count DESC;
--================================================================
SELECT u.username, a.artist_name FROM (
SELECT stream.user_id, songs.artist_id, count(*) as num_songs
FROM streaming AS stream
JOIN songs ON stream.song_id = songs.song_id
GROUP BY stream.user_id, songs.artist_id
ORDER BY num_songs DESC ) AS sub_query
JOIN users AS u ON u.user_id = sub_query.user_id
JOIN artists AS a ON a.artist_id = sub_query.artist_id
LIMIT 1;
--================================================================
SELECT u.user_id FROM users u
INNER JOIN (
SELECT user_id, COUNT(DISTINCT artist_name) as cnt
FROM activity
WHERE month = 'August'
GROUP BY user_id
) a
ON u.user_id = a.user_id
WHERE u.subscription_status = 'Premium'
AND u.last_login >= current_date - interval '30 days'
AND a.cnt >= 15;
--================================================================
SELECT EXTRACT(MONTH FROM ua.timestamp) AS mth,
s.genre,
AVG(ua.listening_duration_sec)
FROM user_activity ua
JOIN songs s ON ua.song_id = s.song_id
GROUP BY mth, s.genre;
--================================================================
SELECT u.user_id, u.user_name
FROM users u
WHERE NOT EXISTS (SELECT 1
FROM (SELECT DISTINCT album_name
FROM album_listens
WHERE artist_name = 'Adele') a
WHERE NOT EXISTS (SELECT 1
FROM album_listens al
WHERE al.user_id = u.user_id
AND al.album_name = a.album_name
AND al.artist_name = 'Adele'))
--================================================================
--=========================================================================================
SELECT station_id,
date_trunc('day', start_time) AS charge_day,
SUM(EXTRACT(EPOCH FROM (end_time - start_time))/3600) AS total_charge_hours,
(SUM(EXTRACT(EPOCH FROM (end_time - start_time))/3600)
- LAG(SUM(EXTRACT(EPOCH FROM (end_time - start_time))/3600), 1,0)
OVER ( PARTITION BY station_id ORDER BY date_trunc('day', start_time) )
) AS diff_prev_day_hours
FROM charging_data
GROUP BY station_id, charge_day
ORDER BY station_id, charge_day;
--=========================================================================================
WITH clicks AS (
SELECT ad_campaign, product_model, COUNT(*) as num_clicks
FROM ad_clicks
GROUP BY ad_campaign, product_model
),
adds AS (
SELECT product_model, COUNT(*) as num_adds
FROM add_to_carts
GROUP BY product_model
)
SELECT clicks.ad_campaign, clicks.product_model, clicks.num_clicks, adds.num_adds,
(adds.num_adds::DECIMAL / clicks.num_clicks) * 100 AS conversion_rate
FROM clicks
JOIN adds ON clicks.product_model = adds.product_model;
--=========================================================================================
SELECT page1.url AS page_url, page2.url AS referred_from
FROM google_analytics AS page1
JOIN google_analytics AS page2 ON page1.referrer_id = page2.id
WHERE page1.id <> page2.id;
--=========================================================================================
SELECT EXTRACT(YEAR FROM sale_date) as year, model_id as model, AVG(price) as average_price
FROM sales
GROUP BY year, model;
--=========================================================================================
SELECT
run_id
,battery_model
,ROUND(ABS(charge_energy - discharge_energy)/SQRT(end_date - start_date + 1), 2) AS performance_index
FROM
battery_runs;
--=========================================================================================
SELECT
v.model_name,
v.manufacture_year,
AVG(s.distance_driven) AS average_distance,
AVG(s.power_consumed) AS average_power
FROM
vehicles v
JOIN
service_data s
ON
v.vehicle_id = s.vehicle_id
GROUP BY
v.model_name,
v.manufacture_year
ORDER BY
v.model_name,
v.manufacture_year;
--=========================================================================================
SELECT
merchant_id,
SUM(CASE WHEN LOWER(payment_method) = 'apple pay' THEN transaction_amount
ELSE 0 END) AS total_transaction
FROM transactions
GROUP BY merchant_id
ORDER BY total_transaction DESC;
SELECT
merchant_id,
SUM(transaction_amount) AS total_transaction
FROM transactions
where LOWER(payment_method) = 'apple pay'
GROUP BY merchant_id
ORDER BY total_transaction DESC;
--=========================================================================================
WITH daily_balances AS (
SELECT
DATE_TRUNC('day', transaction_date) AS transaction_day,
DATE_TRUNC('month', transaction_date) AS transaction_month,
SUM(CASE WHEN type = 'deposit' THEN amount
WHEN type = 'withdrawal' THEN -amount END) AS balance
FROM transactions
GROUP BY
DATE_TRUNC('day', transaction_date),
DATE_TRUNC('month', transaction_date))
SELECT
transaction_day,
SUM(balance) OVER (
PARTITION BY transaction_month
ORDER BY transaction_day) AS balance
FROM daily_balances
ORDER BY transaction_day;
--=========================================================================================
WITH monthly_stats AS (
SELECT
EXTRACT(MONTH FROM transaction_date) AS mth,
country,
COUNT(transaction_id) AS total_transactions,
AVG(amount) AS average_amount
FROM transactions
GROUP BY mth, country
)
SELECT
mth,
country,
total_transactions,
average_amount,
RANK() OVER(PARTITION BY country ORDER BY total_transactions DESC) AS rank
FROM monthly_stats
ORDER BY mth, country;
--=========================================================================================
SELECT
c.cardholder_id,
c.name,
SUM(t.amount) AS total_transaction_amount,
COUNT(t.transaction_id) AS transaction_count
FROM cardholder c
JOIN transaction t ON c.cardholder_id = t.card_id
JOIN merchant m ON t.merchant_id = m.merchant_id
WHERE c.country = m.country
AND DATE(t.transaction_date) BETWEEN '2022-07-01' AND '2022-07-31'
GROUP BY c.cardholder_id, c.name
HAVING SUM(t.amount) > 1000
ORDER BY total_transaction_amount DESC;
--=========================================================================================
SELECT
merchant_id,
date_trunc('day', transaction_date) as day,
COUNT(*) as transactions_count
FROM
Transactions
WHERE
transaction_date >= (now() - interval '1 month')
GROUP BY
1, 2
ORDER BY
avg_transactions DESC;
--=========================================================================================
SELECT
c.campaign_id,
COUNT(DISTINCT c.click_id) AS num_clicks,
COUNT(DISTINCT v.conversion_id) AS num_conversions,
(COUNT(DISTINCT v.conversion_id)::float / COUNT(DISTINCT c.click_id)::float) AS CTR
FROM
clicks c
LEFT JOIN
conversions v
ON
c.user_id = v.user_id
AND
c.campaign_id = v.campaign_id
GROUP BY
c.campaign_id;
--=========================================================================================
WITH payments AS (
SELECT
merchant_id,
EXTRACT(EPOCH FROM transaction_timestamp -
LAG(transaction_timestamp) OVER(
PARTITION BY merchant_id, credit_card_id, amount
ORDER BY transaction_timestamp)
)/60 AS minute_difference
FROM transactions)
SELECT COUNT(merchant_id) AS payment_count
FROM payments
WHERE minute_difference <= 10;
--=========================================================================================
SELECT
emp.employee_id AS employee_id,
emp.name AS employee_name
FROM employee AS mgr
INNER JOIN employee AS emp
ON mgr.employee_id = emp.manager_id
WHERE emp.salary > mgr.salary;
--=========================================================================================
CREATE TABLE ad_campaigns (
ad_id INTEGER PRIMARY KEY,
ad_name VARCHAR(255) NOT NULL,
start_date DATE NOT NULL,
end_date DATE NOT NULL,
budget DECIMAL(10,2) NOT NULL CHECK (budget > 0),
cost_per_click DECIMAL(10,2) NOT NULL CHECK (cost_per_click > 0)
);
--=========================================================================================
WITH Month_Sum AS (
SELECT customer_id,
EXTRACT(YEAR FROM transaction_date) AS year,
EXTRACT(MONTH FROM transaction_date) AS month,
SUM(amount) as total_amount
FROM Transactions
GROUP BY customer_id, year, month
)
SELECT m.year, m.month, c.customer_id, c.customer_name, m.total_amount
FROM Month_Sum AS m
JOIN Customers as c
ON m.customer_id = c.customer_id
WHERE (m.year, m.month, m.total_amount) IN (
SELECT year, month, MAX(total_amount)
FROM Month_Sum
GROUP BY year, month
)
ORDER BY m.year, m.month, m.total_amount DESC;
--=========================================================================================
SELECT customer_id, AVG(transaction_amount) as avg_transaction_amount
FROM transactions
WHERE DATE_PART('year', transaction_date) = 2021
GROUP BY customer_id;
--=========================================================================================
WITH page_view_counts AS (
SELECT
service_id,
COUNT(*) AS view_count
FROM
page_views
GROUP BY
service_id
),
add_to_cart_counts AS (
SELECT
service_id,
COUNT(*) AS cart_add_count
FROM
cart_adds
GROUP BY
service_id
)
SELECT
p.service_id,
c.cart_add_count::FLOAT / NULLIF(p.view_count, 0) AS conversion_rate
FROM
page_view_counts p
LEFT JOIN
add_to_cart_counts c ON p.service_id = c.service_id;
--=========================================================================================
SELECT
account_id,
CASE
WHEN transaction_type = 'Deposit' THEN amount
ELSE -amount END AS final_balance
FROM transactions
GROUP BY account_id
--=========================================================================================
WITH user_average AS (
SELECT
user_id,
AVG(amount) OVER (PARTITION BY user_id) as avg_transaction
FROM transactions)
SELECT
user_id,
avg_transaction,
RANK() OVER (ORDER BY avg_transaction DESC) as rank
FROM user_average
ORDER BY rank;
--=========================================================================================
SELECT COUNT(payer_id) / 2 AS unique_relationships
FROM (
SELECT payer_id, recipient_id
FROM payments
INTERSECT
SELECT recipient_id, payer_id
FROM payments) AS relationships;
--=========================================================================================
SELECT u.user_id, u.username
FROM Transactions t
JOIN User u ON t.user_id = u.user_id
WHERE t.transaction_date > (CURRENT_DATE - INTERVAL '1 month')
AND ((t.transaction_type = 'Sent' AND t.amount > 1000)
OR (t.transaction_type = 'Received' AND t.amount > 5000))
AND u.is_fraudulent = false
GROUP BY u.user_id, u.username;
--=========================================================================================
SELECT
DATE(ac.click_time) AS day,
COUNT(DISTINCT ac.user_id) AS total_clicks,
COUNT(DISTINCT as.user_id) AS total_setups,
COUNT(DISTINCT as.user_id)::float / COUNT(DISTINCT ac.user_id) AS click_through_conversion_rate
FROM
ad_clicks AS ac
LEFT JOIN
account_setup AS as ON ac.user_id = as.user_id
WHERE
DATE(ac.click_time) BETWEEN '2022-09-01' AND '2022-09-07'
GROUP BY
DATE(ac.click_time)
ORDER BY
day;
--=========================================================================================
SELECT
EXTRACT(MONTH FROM transaction_date) AS month,
product_id AS product,
SUM(transaction_amount) AS total_revenue
FROM
transactions
GROUP BY
month,
product
ORDER BY
total_revenue DESC;
--=========================================================================================
SELECT
t.user_id,
SUM(t.transaction_amount) AS total_amount,
AVG(t.transaction_amount) AS average_amount
FROM
Transactions t
GROUP BY
t.user_id
HAVING
COUNT(t.transaction_id) >= 2;
--=========================================================================================
SELECT
EXTRACT(MONTH FROM transaction_date) AS mth,
EXTRACT(YEAR FROM transaction_date) AS year,
merchant_id,
SUM(amount) OVER (PARTITION BY merchant_id, EXTRACT(MONTH FROM transaction_date), EXTRACT(YEAR FROM transaction_date)) as total_revenue
FROM
transactions;
--=========================================================================================
SELECT
merchant_id,
AVG(transaction_amount) as average_amount
FROM
merchant_transactions
WHERE
transaction_date BETWEEN '04/01/2022 00:00:00' AND '06/30/2022 23:59:59'
GROUP BY
merchant_id
ORDER BY
AVG(transaction_amount)
DESC;
--=========================================================================================
SELECT
EXTRACT(MONTH FROM date) AS month,
merchant_id,
AVG(amount) AS avg_daily_volume
FROM
transactions
GROUP BY
month,
merchant_id;
--=========================================================================================
SELECT c.name, COUNT(t.transaction_id) as total_transactions, SUM(t.amount) as total_amount
FROM transactions t
JOIN customer c
ON t.customer_id = c.customer_id
GROUP BY c.name
ORDER BY total_transactions DESC, total_amount DESC;
--=========================================================================================
SELECT
EXTRACT(hour FROM timestamp) as hour_of_day,
ROUND(AVG(payment_volume), 2) as avg_payment_volume
FROM
transactions
GROUP BY
hour_of_day
ORDER BY
hour_of_day;
--=========================================================================================
SELECT
DATE_PART('month', submit_date) AS mth,
product_id,
AVG(stars)::decimal(10,2) OVER (PARTITION BY product_id, DATE_PART('month', submit_date)) AS avg_stars
FROM
reviews
ORDER BY
mth, product_id;
--=========================================================================================
SELECT b.block_type, SUM(b.block_cost_per_hour * EXTRACT(HOUR FROM r.rental_length::interval)) AS total_earnings
FROM rentals r
JOIN blocks b ON r.block_id = b.block_id
WHERE r.start_date >= '2022-06-01' AND r.start_date < '2022-09-01'
GROUP BY b.block_type;
--=========================================================================================
SELECT *
FROM customers
WHERE signup_date > '2021-01-01'
AND total_purchase_usd > 150
ORDER BY signup_date DESC;
--=========================================================================================
SELECT ad_id,
CAST(count(distinct ac.click_id) AS float) / CAST(count(distinct ai.impression_id) AS float) AS CTR
FROM ad_impressions ai
LEFT OUTER JOIN ad_clicks ac ON ai.user_id = ac.user_id AND ai.ad_id = ac.ad_id
GROUP BY ai.ad_id;
SELECT tutorial_id,
CAST(count(distinct p.purchase_id) AS float) / CAST(count(distinct tv.view_id) AS float) AS CR
FROM tutorial_views tv
LEFT OUTER JOIN purchases p ON tv.user_id = p.user_id and tv.tutorial_id = p.tutorial_id
GROUP BY tv.tutorial_id;
--=========================================================================================
CREATE CLUSTERED INDEX transaction_id_index
ON block_payments (transaction_id);
CREATE INDEX transaction_id_index
ON block_payments (transaction_id);
--=========================================================================================
WITH monthly_transaction AS (
SELECT EXTRACT(MONTH FROM transaction_date) AS month,
customer_id,
SUM(amount) AS total_amount,
AVG(amount) AS average_amount
FROM transactions
GROUP BY month, customer_id
)
SELECT month,
customer_id,
total_amount,
average_amount,
RANK() OVER (PARTITION BY month ORDER BY total_amount DESC, average_amount DESC) AS rank
FROM monthly_transaction
ORDER BY month ASC, rank ASC;
--=========================================================================================
WITH transactions_per_customer AS(
SELECT date_part('month', transaction_date) as month,
customer_id,
COUNT(transaction_id) as transactions_count
FROM transactions
WHERE date_part('year', transaction_date) = 2023
GROUP BY month, customer_id)
SELECT month, customer_id, transactions_count
FROM transactions_per_customer
ORDER BY transactions_count DESC
LIMIT 5;
--=========================================================================================
SELECT c.name, f.account_balance
FROM customers c
JOIN financial_accounts f ON c.account_id = f.account_id
WHERE f.loan_status = 'No outstanding loan'
AND f.account_balance > 1000;
--=========================================================================================
CREATE TABLE fiserv_accounts (
account_id INTEGER PRIMARY KEY,
account_name VARCHAR(255) NOT NULL,
industry VARCHAR(255) NOT NULL
);
CREATE TABLE opportunities (
opportunity_id INTEGER PRIMARY KEY,
opportunity_name VARCHAR(255) NOT NULL,
account_id INTEGER NOT NULL,
FOREIGN KEY (account_id) REFERENCES fiserv_accounts(account_id)
);
--=========================================================================================
SELECT pv.product_id,
(COUNT(pa.product_id) / COUNT(pv.product_id)) * 100 AS click_through_rate
FROM products_views AS pv
LEFT JOIN products_added AS pa ON pv.product_id = pa.product_id AND pv.user_id = pa.user_id
GROUP BY pv.product_id
ORDER BY click_through_rate DESC;
--=========================================================================================
SELECT
C.first_name || ' ' || C.last_name AS customer_name,
C.email_id,
SUM(T.amount) as total_transaction_amount
FROM
customers C
JOIN
transactions T ON C.customer_id = T.customer_id
WHERE
DATE_PART('month', T.transaction_date) = 4 AND
DATE_PART('year', T.transaction_date) = 2020
GROUP BY
C.customer_id
ORDER BY
total_transaction_amount DESC;
--=========================================================================================
SELECT
EXTRACT(MONTH FROM transaction_date) AS month,
AVG(amount) AS average_amount
FROM
transactions
GROUP BY
month
ORDER BY
month;
--=========================================================================================
SELECT customer_id, COUNT(*) as transaction_count
FROM transactions
WHERE transaction_date > CURRENT_DATE - INTERVAL '30 days'
GROUP BY customer_id
ORDER BY transaction_count DESC
LIMIT 5;
--=========================================================================================
SELECT
user_id,
EXTRACT(MONTH FROM transaction_date) AS transaction_month,
SUM(amount) OVER (PARTITION BY user_id ORDER BY transaction_date)
AS running_total
FROM transactions;
--=========================================================================================
SELECT customer_id,
COALESCE(email_engagement, "not_active") as email_engagement,
COALESCE(sms_engagement, "not_opted_in") as sms_engagement
FROM global_payments_customers;
--=========================================================================================
SELECT t.transaction_date, m.merchant_name, t.transaction_status, SUM(t.transaction_amount)
FROM transactions t
JOIN merchants m ON t.merchant_id = m.merchant_id
GROUP BY t.transaction_date, m.merchant_name, t.transaction_status
ORDER BY t.transaction_date, m.merchant_name;
--=========================================================================================
SELECT p.payment_id, p.customer_id, c.first_name, c.last_name, p.amount
FROM payments AS p
JOIN customers AS c ON p.customer_id = c.customer_id
WHERE p.amount > 1000
AND p.status = 'Completed'
AND c.category = 'Business'
ORDER BY p.amount DESC;
--=========================================================================================
CREATE FUNCTION get_conversion_rate(start_date DATE, end_date DATE, event_name TEXT)
RETURNS NUMERIC AS
$BODY$
BEGIN
RETURN (SELECT COUNT(*) FROM events WHERE event_date BETWEEN start_date AND end_date AND event_name = 'conversion')
/ (SELECT COUNT(*) FROM events WHERE event_date BETWEEN start_date AND end_date AND event_name = 'impression');
END;
$BODY$
LANGUAGE 'plpgsql';
SELECT get_conversion_rate('2022-01-01', '2023-01-01', 'conversion');
--=========================================================================================
SELECT
c.client_name,
AVG(t.transaction_amount) as avg_transaction_amount
FROM
transactions t
JOIN
clients c
ON
t.client_id = c.client_id
GROUP BY
c.client_name;
--=========================================================================================
SELECT
customer_id,
TO_CHAR(transaction_date, 'Month') AS month,
SUM(amount) AS total_amount,
COUNT(transaction_id) AS total_transactions
FROM
transactions
GROUP BY
customer_id,
month
ORDER BY
customer_id,
month;
--=========================================================================================
SELECT
c.first_name,
c.last_name,
SUM(t.amount) AS total_amount
FROM
customers c
INNER JOIN
transactions t ON c.customer_id = t.customer_id
GROUP BY
c.customer_id, c.first_name, c.last_name;
--=========================================================================================
SELECT u.user_id, u.user_name, SUM(t.transaction_amount) as total_revenue
FROM users u
JOIN transactions t ON u.user_id = t.user_id
GROUP BY u.user_id, u.user_name
ORDER BY total_revenue DESC
LIMIT 10;
--=========================================================================================
SELECT
EXTRACT(MONTH FROM transaction_date)::INTEGER AS month,
customer_id,
SUM(amount) AS total_amt,
ROUND(AVG(amount), 2) AS avg_amt
FROM
transactions
GROUP BY
month, customer_id
ORDER BY
customer_id, month;
--=========================================================================================
SELECT c.client_name, AVG(t.amount) as avg_transaction_amount
FROM clients c
JOIN transactions t ON c.client_id = t.client_id
GROUP BY c.client_name;
--=========================================================================================
SELECT
x,
y,
COUNT(*) as occurrences
FROM fis_global_table
GROUP BY
x,
y
HAVING
occurrences> 1;
SELECT *
FROM fis_global_table t1
WHERE EXISTS
(
SELECT 1
FROM fis_global_table t2
WHERE t1.column_name = t2.column_name
AND t1.id <> t2.id
);
--=========================================================================================
SELECT *
FROM customer
WHERE account_status = 'active'
AND transaction_amount > 5000
AND transaction_date > NOW() - INTERVAL '1 year'
AND (account_type = 'Savings' OR account_type = 'Current');
--=========================================================================================
SELECT
DATE_PART('month', date) AS month,
product_category,
SUM(amount) as total_revenue
FROM
transactions
GROUP BY
DATE_PART('month', date),
product_category
ORDER BY
month,
total_revenue DESC;
--=========================================================================================
SELECT c.first_name, c.last_name, c.location, t.product_name, t.transaction_date, t.transaction_amount
FROM customers c
JOIN transactions t
ON c.customer_id = t.customer_id
ORDER BY c.location, c.last_name, c.first_name;
--=========================================================================================
SELECT
EXTRACT(MONTH FROM transaction_date) as month,
AVG(amount) as avg_amount
FROM
transactions
GROUP BY
month
ORDER BY
month;
--=========================================================================================
SELECT
emp.employee_id AS employee_id,
emp.name AS employee_name
FROM employee AS mgr
INNER JOIN employee AS emp
ON mgr.employee_id = emp.manager_id
WHERE emp.salary > mgr.salary;
--=========================================================================================
SELECT
sender_id,
AVG(amount_sent_usd) as average_amount_sent
FROM
transactions
WHERE
DATE(transaction_date) BETWEEN '2022-04-01' AND '2022-04-30'
GROUP BY
sender_id;
--=========================================================================================
SELECT
DATE_TRUNC('month', transaction_date) AS month,
sender_currency,
recipient_currency,
AVG(transaction_amount) AS average_amount
FROM
transactions
GROUP BY
DATE_TRUNC('month', transaction_date),
sender_currency,
recipient_currency
ORDER BY
month,
average_amount DESC;
--=========================================================================================
SELECT t.transaction_id, t.customer_id, t.amount, t.transaction_date
FROM transactions t
JOIN customers c ON t.customer_id = c.customer_id
WHERE c.first_name LIKE 'Ro%' AND t.transaction_date BETWEEN '2022-09-01' and '2022-09-30'
--=========================================================================================
SELECT
EXTRACT(MONTH FROM report_date) AS month,
product_id,
AVG(credit_score) OVER (PARTITION BY product_id, EXTRACT(MONTH FROM report_date)) AS avg_credit_score
FROM
credit_score_report
GROUP BY
product_id,
month
ORDER BY
month,
product_id;
--=========================================================================================
SELECT
emp.employee_id AS employee_id,
emp.name AS employee_name
FROM employee AS mgr
INNER JOIN employee AS emp
ON mgr.employee_id = emp.manager_id
WHERE emp.salary > mgr.salary;
--=========================================================================================
WITH cte AS
(
SELECT *,
LEAD(credit_score) OVER (PARTITION BY customer_id ORDER BY score_date) AS next_score
FROM credit_scores
)
SELECT c.customer_id, c.full_name,
SUM(cte.next_score - cte.credit_score) as total_score_change
FROM cte
INNER JOIN customers c on cte.customer_id = c.customer_id
WHERE cte.score_date BETWEEN '01/01/2021' AND '12/31/2021'
GROUP BY c.customer_id, c.full_name
ORDER BY total_score_change DESC;
--=========================================================================================
SELECT c.customer_id, c.birth_year, c.credit_score, COUNT(t.transaction_id) AS transaction_count
FROM customer AS c
INNER JOIN transactions AS t ON c.customer_id = t.customer_id
WHERE c.birth_year > 1980 AND c.credit_score > 750
GROUP BY c.customer_id, c.birth_year, c.credit_score
HAVING COUNT(t.transaction_id) > 3;
--=========================================================================================
SELECT
COUNT(DISTINCT(ec.email_id))::float / COUNT(DISTINCT(es.email_id))::float AS Click_Through_Rate
FROM
emails_sent es
LEFT JOIN emails_clicked ec ON es.email_id = ec.email_id
WHERE
EXTRACT(MONTH FROM es.sent_date) = EXTRACT(MONTH FROM timestamp '2022-06-01');
--=========================================================================================
SELECT
EXTRACT(QUARTER from pay_date) AS quarter,
EXTRACT(YEAR from pay_date) AS year,
dept_id,
SUM(amount) AS total_payroll
FROM
employee_earnings
WHERE
pay_date >= NOW() - INTERVAL '1 year'
GROUP BY
quarter,
year,
dept_id
ORDER BY
year DESC,
quarter DESC,
dept_id;
--=========================================================================================
SELECT MAX(salary) AS second_highest_salary
FROM employee
WHERE salary < (
SELECT MAX(salary)
FROM employee
);
--=========================================================================================
SELECT employees.department, payrolls.month_year, SUM(payrolls.base_salary + payrolls.bonus) as total_salary
FROM employees
INNER JOIN payrolls ON employees.employee_id = payrolls.employee_id
WHERE employees.department = 'HR' AND payrolls.month_year = '01/2022'
GROUP BY employees.department, payrolls.month_year;
--=========================================================================================
SELECT
client_id,
year,
AVG(salary) as avg_salary
FROM
salaries_paid
WHERE
year IN (2021, 2022)
GROUP BY
client_id, year
ORDER BY
client_id, year;
--=========================================================================================
SELECT u.user_id, u.user_name, COUNT(l.login_id) as login_frequency
FROM users u
JOIN logins l
ON u.user_id = l.user_id
WHERE l.login_timestamp >= CURRENT_DATE - INTERVAL '1 month'
GROUP BY u.user_id, u.user_name
ORDER BY login_frequency DESC
LIMIT 10;
--=========================================================================================
SELECT department_id,
AVG(salary) OVER (PARTITION BY department_id ORDER BY date ROWS BETWEEN 6 PRECEDING AND CURRENT ROW) as avg_salary
FROM employee_salaries
WHERE date >= (SELECT MAX(date) FROM employee_salaries) - INTERVAL '6 DAY'
GROUP BY department_id, date
ORDER BY department_id;
--=========================================================================================
SELECT client_id, AVG(date_processed - date_received) as average_processing_time
FROM payroll_data
GROUP BY client_id;
--=========================================================================================
SELECT i.ad_id,
(COUNT(c.user_id)* 100.0) / NULLIF(COUNT(i.user_id), 0) as ctr
FROM ad_impressions i
LEFT JOIN ad_clicks c
ON i.ad_id = c.ad_id AND i.user_id = c.user_id
WHERE DATE_PART('month', i."timestamp"::date) = 6 AND
DATE_PART('year', i."timestamp"::date) = 2022
GROUP BY i.ad_id;
--=========================================================================================
SELECT month_year,
ROUND(AVG(total_payroll), 2) AS avg_payroll_per_client
FROM client_monthly_payroll
GROUP BY month_year
ORDER BY month_year;
--=========================================================================================
SELECT department, SUM(salary)
FROM adp_employees
WHERE department LIKE '%Analytics%'
GROUP BY department
HAVING SUM(salary) > 1000000;
--=========================================================================================
SELECT c.customer_id, c.first_name, c.last_name, c.city,
co.product_id
FROM customers c
LEFT JOIN contracts co ON c.customer_id = co.customer_id;
--=========================================================================================
SELECT department,
AVG(salary) OVER (PARTITION BY department) as avg_salary
FROM employee;
--=========================================================================================
SELECT
concat(e.first_name, ' ', e.last_name) as employee_name,
to_char(t.work_date, 'Mon-YYYY') as month_year,
sum(case when t.leave_status = 'WORKED' then 1 else 0 end) as total_working_days,
sum(case when t.leave_status = 'VACATION' then 1 else 0 end) as vacation_days
FROM
employee e
JOIN timesheets t
ON e.employee_id = t.employee_id
GROUP BY
employee_name,
month_year
ORDER BY
employee_name,
month_year;
--=========================================================================================
SELECT e.employee_id, e.first_name, e.last_name, EXTRACT(YEAR FROM l.leave_start_date) AS "Year",
AVG((l.leave_end_date - l.leave_start_date + 1)) AS "Average Leave Days"
FROM employee e
JOIN leave l ON e.employee_id = l.employee_id
GROUP BY e.employee_id, e.first_name, e.last_name, "Year"
ORDER BY e.employee_id, "Year";
--=========================================================================================
SELECT
v.product_id,
COUNT(DISTINCT a.user_id)::float / COUNT(DISTINCT v.user_id) AS conversion_rate
FROM
view_logs v
LEFT JOIN
add_to_cart_logs a
ON
v.user_id = a.user_id AND v.product_id = a.product_id
GROUP BY
v.product_id
--=========================================================================================
SELECT d.dept_name,
SUM(e.salary) AS total_payroll,
AVG(e.salary) AS avg_salary
FROM employees AS e
JOIN departments AS d
ON e.department_id = d.dept_id
GROUP BY d.dept_name
--=========================================================================================
SELECT email, job_title, company_id
FROM workday_sfdc_leads
WHERE created_at > '2023-01-01';
UNION
SELECT email, job_title, company_id
FROM workday_hubspot_leads
WHERE created_at > '2023-01-01'
--=========================================================================================
SELECT eh.employee_id, eh.month, eh.year,
CASE
WHEN eh.hours_worked > 140 THEN ROUND((er.rate * eh.hours_worked) * 0.10)
WHEN eh.hours_worked > 120 THEN ROUND((er.rate * eh.hours_worked) * 0.05)
ELSE ROUND((er.rate * eh.hours_worked) * POWER(10, -2))
END AS bonus
FROM employee_hours eh
JOIN employee_rates er ON eh.employee_id = er.employee_id
--=========================================================================================
SELECT EXTRACT(MONTH FROM transaction_date) AS month,
EXTRACT(YEAR FROM transaction_date) AS year,
user_id,
COUNT(transaction_id) AS num_transactions
FROM transactions
WHERE EXTRACT(YEAR FROM transaction_date) = 2022
GROUP BY month, year, user_id
HAVING COUNT(transaction_id) > 50
ORDER BY num_transactions DESC;
--=========================================================================================
SELECT
EXTRACT(QUARTER FROM review_date) AS quarter,
AVG(score) OVER (PARTITION BY EXTRACT(QUARTER FROM review_date)) AS avg_score,
(
AVG(score) OVER (PARTITION BY EXTRACT(QUARTER FROM review_date)) -
LAG(AVG(score) OVER (PARTITION BY EXTRACT(QUARTER FROM review_date)))
OVER (ORDER BY EXTRACT(QUARTER FROM review_date))
) / NULLIF(LAG(AVG(score) OVER (PARTITION BY EXTRACT(QUARTER FROM review_date)))
OVER (ORDER BY EXTRACT(QUARTER FROM review_date)), 0)
* 100 AS pct_change
FROM
employee_performance
ORDER BY
quarter;
--=========================================================================================
SELECT
TO_CHAR(salary_date, 'YYYY-MM') AS year_month,
SUM(gross_salary) AS total_gross_salary,
AVG(gross_salary) AS avg_gross_salary,
COUNT(DISTINCT employee_id) AS employee_count
FROM
salary
GROUP BY
year_month
ORDER BY
year_month;
--=========================================================================================
SELECT
a.ad_id,
COUNT(c.click_id) as total_clicks,
(COUNT(c.click_id) /
(SELECT COUNT(*) FROM ads WHERE display_date >= '10/01/2022' AND display_date <= '10/31/2022')) AS click_through_rate
FROM
ads as A
LEFT JOIN
clicks as C
ON
a.ad_id = c.ad_id
WHERE
a.display_date >= '10/01/2022'
AND a.display_date <= '10/31/2022'
GROUP BY
a.ad_id
ORDER BY
click_through_rate DESC;
--=========================================================================================
SELECT
CASE
WHEN AGE(c.DOB) BETWEEN 18 AND 25 THEN '18-25'
WHEN AGE(c.DOB) BETWEEN 26 AND 35 THEN '26-35'
WHEN AGE(c.DOB) BETWEEN 36 AND 45 THEN '36-45'
WHEN AGE(c.DOB) BETWEEN 46 AND 55 THEN '46-55'
ELSE 'Above 55'
END as age_group,
AVG(t.Transaction_Amount) as average_transaction_amount
FROM
Customers c
JOIN
Transactions t
ON
c.ID = t.Customer_ID
GROUP BY
age_group
ORDER BY
age_group;
--=========================================================================================
WITH task_data AS (
SELECT employee_id,
COUNT(*) AS num_tasks,
AVG(completion_time_hours) AS avg_comp_time,
AVG(errors) AS avg_errors
FROM tasks
GROUP BY employee_id
),
max_min AS (
SELECT MAX(num_tasks) AS max_tasks, MIN(num_tasks) AS min_tasks,
MAX(avg_comp_time) AS max_time, MIN(avg_comp_time) AS min_time,
MAX(avg_errors) AS max_errors, MIN(avg_errors) AS min_errors
FROM task_data
)
SELECT e.first_name, e.last_name,
ROUND((((t.num_tasks - m.min_tasks) / (m.max_tasks - m.min_tasks))*100)::numeric, 2) as num_tasks_score,
ROUND((((m.max_time - t.avg_comp_time) / (m.max_time - m.min_time))*100)::numeric, 2) as avg_comp_time_score,
ROUND((((m.max_errors - t.avg_errors) / (m.max_errors - m.min_errors))*100)::numeric, 2) as avg_errors_score
FROM task_data t
JOIN employees e ON t.employee_id = e.employee_id
CROSS JOIN max_min m
--=========================================================================================
SELECT
users.username,
COUNT(payroll.payroll_id) AS payroll_processed
FROM
users
JOIN
payroll ON users.user_id = payroll.user_id
WHERE
payroll.processing_date >= NOW() - INTERVAL '3 months'
GROUP BY
users.username
ORDER BY
payroll_processed DESC
LIMIT 3;
--=========================================================================================
SELECT
id,
name,
department,
salary,
AVG(salary) OVER (PARTITION BY department) as average_salary,
salary - AVG(salary) OVER (PARTITION BY department) as difference
FROM employees
--=========================================================================================
SELECT DATE_PART('month', process_date) AS mth,
client_id AS client,
AVG(total_compensation)::numeric(10,2) AS avg_payroll
FROM payrolls
GROUP BY mth, client
ORDER BY mth ASC;
--=========================================================================================
SELECT c.customer_id, c.name, SUM(p.purchase_amount) as total_amount, COUNT(DISTINCT p.item_id) as unique_items
FROM customers c
JOIN purchases p ON c.customer_id = p.customer_id
GROUP BY c.customer_id, c.name
ORDER BY total_amount DESC;
--=========================================================================================
SELECT EXTRACT(MONTH FROM process_start_date) AS month,
AVG(EXTRACT(DAY FROM (process_complete_date - process_start_date))) AS average_processing_time
FROM payrolls
GROUP BY month
ORDER BY month ASC;
--=========================================================================================
SELECT
D.dep_name AS Department,
ROUND(AVG(E.salary) OVER (PARTITION BY E.dep_id), 2) AS Average_Salary
FROM
Employee E
INNER JOIN
Department D ON E.dep_id = D.dep_id
ORDER BY
Average_Salary DESC;
--=========================================================================================
SELECT
roles.role_name,
COUNT(employees.role_id) as employee_count
FROM
employees
JOIN
roles ON employees.role_id = roles.role_id
JOIN
departments ON employees.department_id = departments.department_id
WHERE
departments.department_name = 'Engineering'
GROUP BY
roles.role_name;
--=========================================================================================
SELECT
customer_id,
signup_date,
last_purchase_date,
total_purchases,
is_active,
region
FROM
CUSTOMERS
WHERE
last_purchase_date > current_date - INTERVAL '6 months'
AND total_purchases >= 10
AND is_active = true
AND region IN ('North America', 'Europe');
--=========================================================================================
SELECT
V.product_id,
COUNT(DISTINCT A.session_id)::float / COUNT(DISTINCT V.session_id) AS conversion_rate
FROM
product_views V
LEFT JOIN
product_adds A
ON
A.session_id = V.session_id AND A.product_id = V.product_id
GROUP BY
V.product_id
--=========================================================================================
SELECT c.customer_name, COUNT(DISTINCT p.product_name)
FROM customers c
JOIN purchases p
ON c.customer_id = p.customer_id
GROUP BY c.customer_name
HAVING COUNT(p.product_name) > 0;
--=========================================================================================
SELECT product_id,
ROUND(AVG(rating), 2) AS avg_rating,
ABS(MAX(rating) - MIN(rating)) AS abs_diff_max_min,
ROUND(AVG(rating) * SUM(review_count), 2) AS weighted_rating
FROM reviews
GROUP BY product_id;
--=========================================================================================
SELECT
EXTRACT(MONTH FROM date) AS Month,
customer_id AS Customer,
AVG(credit_score) OVER(PARTITION BY customer_id, EXTRACT(MONTH FROM date)) as average_score
FROM credit_scores
ORDER BY Month, Customer;
--=========================================================================================
CREATE UNIQUE INDEX employee_id_index
ON moody_s_employees (employee_id);
--=========================================================================================
WITH entities_risk_drop AS (
SELECT rr1.entity_id, e.entity_name
FROM risk_ratings rr1
INNER JOIN risk_ratings rr2 ON rr1.entity_id = rr2.entity_id
AND rr2.date_updated = (SELECT MAX(date_updated) FROM risk_ratings WHERE entity_id = rr1.entity_id AND date_updated < current_date - integer '30')
INNER JOIN entities e ON rr1.entity_id = e.entity_id
WHERE rr1.ratings < rr2.ratings AND rr1.date_updated = (SELECT MAX(date_updated) FROM risk_ratings WHERE entity_id = rr1.entity_id)
)
SELECT erd.entity_name, cb.bond_name
FROM entities_risk_drop erd
INNER JOIN corporate_bonds cb ON erd.entity_id = cb.entity_id;
--=========================================================================================
SELECT
*
FROM
customers
WHERE
sentiment = 'bearish' AND
risk_rating < 3 AND
(portfolio_type = 'fixed income' OR portfolio_type = 'equity')
ORDER BY
last_name ASC;
--=========================================================================================
SELECT A.product_line,
(C.conversion * 1.0 / A.clicks) AS click_through_conversion_rate
FROM ads A
JOIN conversions C on A.ad_id = C.ad_id
ORDER BY click_through_conversion_rate DESC;
--=========================================================================================
SELECT d.month, br.bond_id AS bond, AVG(br.rating) AS avg_rating
FROM bond_ratings br JOIN dates d ON br.date_id = d.date_id
GROUP BY d.month, br.bond_id
ORDER BY d.month, br.bond_id
--=========================================================================================
SELECT c.customer_id, c.first_name, c.last_name, c.email, SUM(s.amount) AS total_spent
FROM customers AS c
JOIN sales AS s
ON c.customer_id = s.customer_id
WHERE s.transaction_date >= (CURRENT_DATE - INTERVAL '30 days')
GROUP BY c.customer_id
HAVING SUM(s.amount) > 10000;
--=========================================================================================
SELECT
emp.employee_id AS employee_id,
emp.name AS employee_name
FROM employee AS mgr
INNER JOIN employee AS emp
ON mgr.employee_id = emp.manager_id
WHERE emp.salary > mgr.salary;
--=========================================================================================
SELECT
date,
company,
closing_price,
avg(closing_price) OVER (
PARTITION BY company
ORDER BY date
ROWS BETWEEN 6 PRECEDING AND 1 PRECEDING
) as rolling_avg_price
FROM
stock_prices
ORDER BY
date ASC;
--=========================================================================================
SELECT stock_id,
CASE WHEN MOD(ROUND(price * adjustment_factor, 2), 1) > 0.5 THEN
CEIL(ROUND(price * adjustment_factor, 2))
ELSE
FLOOR(ROUND(price * adjustment_factor, 2))
END as adjusted_price
FROM stocks;
--=========================================================================================
SELECT
EXTRACT(MONTH FROM submit_date) AS mth,
product_id AS product,
ROUND(AVG(stars::numeric), 2) AS avg_stars
FROM
reviews
GROUP BY
mth,
product
ORDER BY
mth,
product;
--=========================================================================================
WITH ranked_salary AS (
SELECT
name,
salary,
department_id,
DENSE_RANK() OVER (
PARTITION BY department_id ORDER BY salary DESC) AS ranking
FROM employee
)
SELECT
d.department_name,
rs.name,
rs.salary
FROM ranked_salary AS rs
INNER JOIN department AS d
ON rs.department_id = d.department_id
WHERE rs.ranking <= 3
ORDER BY d.department_id, rs.salary DESC, rs.name ASC;
--=========================================================================================
SELECT t.client_id, sum(t.amount) as total_purchases
FROM transactions t
WHERE t.transaction_type = 'purchase' AND
t.transaction_date >= (CURRENT_DATE - interval '1 month')
GROUP BY t.client_id;
--=========================================================================================
ELECT transaction_date AS date, COUNT(transaction_id) AS avg_transactions
FROM transactions
GROUP BY transaction_date
ORDER BY transaction_date;
--=========================================================================================
SELECT
c.first_name,
c.last_name,
COALESCE(SUM(t.transaction_amount), 0) AS total_transaction_amount
FROM
Customers c
LEFT JOIN
Transactions t ON c.customer_id = t.customer_id
GROUP BY
c.first_name, c.last_name
ORDER BY
c.first_name;
--=========================================================================================
SELECT customer_id, count(request_id) as request_count
FROM data_requests
WHERE timestamp > (current_date - interval '1 month')
GROUP BY customer_id
HAVING count(request_id) >= 50;
--=========================================================================================
SELECT
emp.employee_id AS employee_id,
emp.name AS employee_name
FROM employee AS mgr
INNER JOIN employee AS emp
ON mgr.employee_id = emp.manager_id
WHERE emp.salary > mgr.salary;
--=========================================================================================
WITH monthly_scores AS (
SELECT
company_id,
EXTRACT(MONTH FROM period) AS month,
AVG(credit_score) OVER (PARTITION BY company_id, EXTRACT(MONTH FROM period)) AS avg_credit_score
FROM
company_credit_score
)
SELECT DISTINCT
company_id,
month,
avg_credit_score
FROM
monthly_scores
ORDER BY
company_id, month;
--=========================================================================================
SELECT
customer_id,
company_name,
credit_score
FROM
customers
WHERE
credit_score > 650
AND is_active = 1
AND outstanding_payment = 1;
--=========================================================================================
SELECT
DATE(purchase_date) as date,
ROUND(SUM(course_price),2) as total_revenue,
ROUND(SUM(course_price * (discount_per_cent/100.0)), 2) as total_discount,
ROUND(SUM(course_price * (1 - discount_per_cent/100.0)), 2) as revenue_after_discount,
ROUND(AVG(course_price * (1- discount_per_cent/100.0)),2) as avg_revenue_per_sale
FROM
course_sales
GROUP BY
DATE(purchase_date)
ORDER BY
date;
--=========================================================================================
WITH monthly_scores AS (
SELECT customer_id,
EXTRACT(MONTH FROM score_date) AS month,
AVG(risk_score) OVER(PARTITION BY customer_id, EXTRACT(MONTH FROM score_date)) AS avg_score
FROM risk_scores
),
score_diffs AS (
SELECT customer_id, month, avg_score,
avg_score - LAG(avg_score, 1) OVER(PARTITION BY customer_id ORDER BY month) AS score_diff
FROM monthly_scores
)
SELECT * FROM score_diffs
ORDER BY customer_id, month;
--=========================================================================================
SELECT
defaulted,
AVG(credit_score) AS average_credit_score
FROM
credit_scores cs
JOIN
loan_defaults ld ON cs.user_id = ld.user_id
GROUP BY
defaulted;
--=========================================================================================
SELECT
pv.product_id,
COUNT(DISTINCT atc.user_id)::float / COUNT(DISTINCT pv.user_id) AS conversion_rate
FROM
page_views as pv
LEFT JOIN
add_to_carts as atc
ON
pv.user_id = atc.user_id AND pv.product_id = atc.product_id
WHERE
DATE(pv.view_date) BETWEEN '2022-06-01' AND '2022-06-30'
GROUP BY
pv.product_id;
--=========================================================================================
SELECT
CASE
WHEN EXTRACT(MONTH FROM purchase_date) BETWEEN 1 AND 3 THEN CONCAT('Q1-', EXTRACT(YEAR FROM purchase_date))
WHEN EXTRACT(MONTH FROM purchase_date) BETWEEN 4 AND 6 THEN CONCAT('Q2-', EXTRACT(YEAR FROM purchase_date))
WHEN EXTRACT(MONTH FROM purchase_date) BETWEEN 7 AND 9 THEN CONCAT('Q3-', EXTRACT(YEAR FROM purchase_date))
ELSE CONCAT('Q4-', EXTRACT(YEAR FROM purchase_date))
END AS quarter,
customer_id,
COUNT(*) as total_transactions,
SUM(amount) as total_spent,
COALESCE(((SUM(amount) OVER (PARTITION BY customer_id ORDER BY EXTRACT(QUARTER FROM purchase_date)) - LAG(SUM(amount)) OVER (PARTITION BY customer_id ORDER BY EXTRACT(QUARTER FROM purchase_date))) / LAG(SUM(amount)) OVER (PARTITION BY customer_id ORDER BY EXTRACT(QUARTER FROM purchase_date))) * 100, NULL) as quarterly_growth
FROM
transactions
WHERE
EXTRACT(YEAR FROM purchase_date) = 2021
GROUP BY
quarter,
customer_id
ORDER BY
customer_id,
quarter;
--=========================================================================================
SELECT c.customer_id, c.customer_name
FROM customer c
JOIN card cr ON c.customer_id = cr.customer_id
JOIN transaction t ON cr.card_id = t.card_id
WHERE c.country = 'USA'
AND
(SELECT COUNT(DISTINCT cr.card_type) FROM card cr WHERE cr.customer_id = c.customer_id) >= 2
AND
(SELECT COUNT(t.transaction_id)
FROM transaction t WHERE t.card_id = cr.card_id
AND t.transaction_date > (CURRENT_DATE - INTERVAL '1 month')) >= 5;
--=========================================================================================
SELECT customer_id,
COALESCE(email_engagement, "not_active") as email_engagement,
COALESCE(sms_engagement, "not_opted_in") as sms_engagement
FROM mastercard_customers;
--=========================================================================================
SELECT
DATE(transaction_date AT TIME ZONE 'EST') AS date,
SUM(amount) AS total_amount
FROM
transactions
GROUP BY
date
ORDER BY
date;
--=========================================================================================
SELECT user_id, SUM(amount_usd) AS total_amount
FROM transactions
WHERE user_id LIKE '_MC\_GOLD\_%'
AND EXTRACT(MONTH FROM transaction_date) = 6
AND amount_usd > 500
GROUP BY user_id
ORDER BY total_amount DESC;
--=========================================================================================
SELECT c.first_name, c.last_name, t.payment_type, AVG(t.transaction_value) as avg_transaction_value
FROM customer c
JOIN transaction t
ON c.customer_id = t.customer_id
GROUP BY c.first_name, c.last_name, t.payment_type;
--=========================================================================================
--=========================================================================================
--=========================================================================================
--=========================================================================================
--================================================================
--================================================================
--================================================================
--================================================================
--================================================================
--================================================================
--================================================================
--================================================================
--================================================================
--================================================================
--================================================================
--=========================================================================================
--cd /proc/<PID>/fd
--ls -ltr
select relname,relnamespace,relkind from pg_class where relfilenode IN (1358830,1358498);
select min_val, max_val from pg_settings where name='max_connections';
SELECT * FROM pg_stat_activity;
--=========================================================================================
select relname, seq_scan, idx_scan, vacuum_count from pg_stat_user_tables
--=========================================================================================
EXPLAIN (FORMAT JSON) SELECT * FROM users WHERE id = 20;
explain (analize, buffres)
--================================================================================
SELECT * FROM pg_stats WHERE tablename = 'your_table_name' AND attname = 'column_name';
--================================================================================
CREATE SEQUENCE your_table_id_seq;
ALTER TABLE your_table
ALTER COLUMN temp_id SET DEFAULT nextval('your_table_id_seq');
ALTER TABLE your_table
ALTER COLUMN temp_id SET NOT NULL;
UPDATE your_table
SET temp_id = nextval('your_table_id_seq');
ALTER TABLE your_table DROP COLUMN old_uuid_column;
ALTER TABLE your_table RENAME COLUMN old_uuid_column TO uuid_backup;
ALTER TABLE your_table
RENAME COLUMN temp_id TO id;
ALTER TABLE your_table
ADD CONSTRAINT your_table_pkey PRIMARY KEY (id);
--================================================================================
<changeSet id="1" author="dev">
<addColumn tableName="your_table">
<column name="temp_id" type="BIGINT" autoIncrement="true"/>
</addColumn>
</changeSet>
<changeSet id="2" author="dev">
<sql>UPDATE your_table SET temp_id = nextval('your_table_id_seq');</sql>
</changeSet>
<changeSet id="3" author="dev">
<dropPrimaryKey tableName="your_table" constraintName="your_table_pkey"/>
<dropColumn tableName="your_table" columnName="old_uuid_column"/>
<renameColumn tableName="your_table" oldColumnName="temp_id" newColumnName="id"/>
<addPrimaryKey tableName="your_table" columnNames="id" constraintName="your_table_pkey"/>
</changeSet>
--================================================================================
CREATE INDEX foo ON good_records (record_status, id)
WHERE notification_sent = false;
CREATE INDEX foo ON good_records (record_status) INCLUDE (id)
WHERE notification_sent = false;
--=========================================================================================
CREATE TYPE record_type AS ENUM (
'TRANSFER',
'TRADE',
'VOUCHER'
);
CREATE TYPE record_status AS ENUM (
'NEW',
'VALIDATED',
'EXPIRED'
);
CREATE TABLE good_records (
id uuid PRIMARY KEY,
user_id uuid NOT NULL,
type record_type NOT NULL,
status record_status NOT NULL,
amount numeric(36,18) NOT NULL DEFAULT 0,
expired_at timestamp WITH TIME ZONE NOT NULL,
notification_sent boolean DEFAULT false,
);
--=========================================================================================
--=========================================================================================
--=========================================================================================
--=============================================================
DO
$do$
BEGIN
IF EXISTS (SELECT FROM orders) THEN
--IF (SELECT count(*) FROM orders) > 0
--IF (SELECT count(*) > 0 FROM orders)
DELETE FROM orders;
ELSE
INSERT INTO orders VALUES (1,2,3);
END IF;
END
$do$
--=============================================================
DO
$do$
BEGIN
DELETE FROM orders;
IF NOT FOUND THEN
INSERT INTO orders VALUES (1,2,3);
END IF;
END
$do$
--=============================================================
select
case
when stage = 1 then 'running'
when stage = 2 then 'done'
when stage = 3 then 'stopped'
else
'not running'
end as run_status from processes
--=============================================================
DO $$ BEGIN
CASE
WHEN boolean-expression THEN
statements;
WHEN boolean-expression THEN
statements;
...
ELSE
statements;
END CASE;
END $$;
--=============================================================
IF select count(*) from orders > 0
THEN
DELETE from orders
ELSE
INSERT INTO orders values (1,2,3);
END IF;
--=============================================================
--=============================================================
--=============================================================
--=============================================================
--=============================================================
--=============================================================
--=============================================================
--=============================================================
--=============================================================
--=============================================================
--================================================================================
CREATE TYPE sex AS ENUM ('мужчина', 'женщина', 'иное');
--================================================================================
CREATE DOMAIN sex_char AS "char" CHECK (VALUE in ('m','f','x'));
CREATE FUNCTION sex(txt varchar, OUT ch sex_char) LANGUAGE plpgsql IMMUTABLE STRICT PARALLEL SAFE AS
$sex$
BEgin
ch:= case txt
when 'мужчина' then 'm'::sex_char
when 'женщина' then 'f'::sex_char
when 'иное' then 'x'::sex_char
else null
end;
if ch is null then
raise invalid_parameter_value;
end if;
END
$sex$;
CREATE FUNCTION sex(ch sex_char, OUT txt varchar) LANGUAGE plpgsql IMMUTABLE STRICT PARALLEL SAFE AS
$sex$
BEgin
txt:= case ch
when 'm'::sex_char then 'мужчина'
when 'f'::sex_char then 'женщина'
when 'x'::sex_char then 'иное'
else null
end;
if txt is null then
raise invalid_parameter_value;
end if;
END
$sex$;
select sex(ch=>'f');
select sex(txt=>'женщина');
select sex(sex(txt=>'женщина'));
select id from table where sex='f';
select id from table where sex=sex(txt=>'женщина');
select id from table join sex_t using (sex_t_id) where sex='женщина';
================================================================================
# Минимальный уровень WAL чтобы уменьшить время на создание таблиц
wal_level = minimal
max_wal_senders = 0
# Поскольку работаем с закэшированными таблицами, издержек на "случайный" доступ нет.
random_page_cost = 1
# отключаем распараллеливание
max_parallel_workers_per_gather=0
# Кэш PostgreSQL
shared_buffers = 14GB
--================================================================================
select pg_prewarm('sex1');
select pg_prewarm('sex1_btree');
select pg_prewarm('sex2');
select pg_prewarm('sex2_btree');
select pg_prewarm('sex3');
select pg_prewarm('sex3_btree');
select pg_prewarm('sex4');
select pg_prewarm('sex4_btree');
select pg_prewarm('sex5');
select pg_prewarm('sex5_btree');
select pg_prewarm('sex5h');
select pg_prewarm('sex5h_hash');
select pg_prewarm('sex6');
select pg_prewarm('sex6_gin');
select pg_prewarm('sex6h');
select pg_prewarm('sex6h_gin_hash');
--================================================================================
select count(id) from sex1 where sex='мужчина';
select count(id) from sex1 where sex='женщина';
select count(id) from sex1 where sex='иное';
select count(id) from sex2 where sex_char=sex(txt=>'мужчина');
select count(id) from sex2 where sex_char=sex(txt=>'женщина');
select count(id) from sex2 where sex_char=sex(txt=>'иное');
select count(id) from sex3 join sex_t using (sex_t_id) where sex='мужчина';
select count(id) from sex3 join sex_t using (sex_t_id) where sex='женщина';
select count(id) from sex3 join sex_t using (sex_t_id) where sex='иное';
select count(id) from sex3 where sex_t_id=(select t.sex_t_id from sex_t t where sex='мужчина');
select count(id) from sex3 where sex_t_id=(select t.sex_t_id from sex_t t where sex='женщина');
select count(id) from sex3 where sex_t_id=(select t.sex_t_id from sex_t t where sex='иное');
select count(id) from sex4 join sex_t4 using (sex_t4_id) where sex='мужчина';
select count(id) from sex4 join sex_t4 using (sex_t4_id) where sex='женщина';
select count(id) from sex4 join sex_t4 using (sex_t4_id) where sex='иное';
select count(id) from sex4 where sex_t4_id=(select t.sex_t4_id from sex_t4 t where sex='мужчина');
select count(id) from sex4 where sex_t4_id=(select t.sex_t4_id from sex_t4 t where sex='женщина');
select count(id) from sex4 where sex_t4_id=(select t.sex_t4_id from sex_t4 t where sex='иное');
select count(id) from sex5 where sex='мужчина';
select count(id) from sex5 where sex='женщина';
select count(id) from sex5 where sex='иное';
select count(id) from sex5h where sex='мужчина';
select count(id) from sex5h where sex='женщина';
select count(id) from sex5h where sex='иное';
select count(id) from sex6 where jdoc@>'{"sex":"мужчина"}';
select count(id) from sex6 where jdoc@>'{"sex":"женщина"}';
select count(id) from sex6 where jdoc@>'{"sex":"иное"}';
select count(id) from sex6h where jdoc@>'{"sex":"мужчина"}';
select count(id) from sex6h where jdoc@>'{"sex":"женщина"}';
select count(id) from sex6h where jdoc@>'{"sex":"иное"}';
--================================================================================
-- заполняем таблицы, во всех таблицах одинаковые данные
\set table_size 10000000
-- удобный view для посмотра размера таблиц после их заполнения
create or replace view disk as SELECT n.nspname AS schema,
c.relname,
pg_size_pretty(pg_relation_size(c.oid::regclass)) AS size,
pg_relation_size(c.oid::regclass)/1024 AS size_KiB
FROM pg_class c
LEFT JOIN pg_namespace n ON n.oid = c.relnamespace
ORDER BY (pg_relation_size(c.oid::regclass)) DESC
LIMIT 20;
begin;
-- sex1 официальный enum
CREATE TYPE sex_enum AS ENUM ('мужчина', 'женщина', 'иное');
create table sex1 (id float, sex sex_enum not null);
-- sex2 "char"
CREATE DOMAIN sex_char AS "char" CHECK (VALUE in ('m','f','x'));
CREATE FUNCTION sex(txt varchar, OUT ch sex_char) LANGUAGE plpgsql IMMUTABLE STRICT PARALLEL SAFE AS
$sex$
BEgin
ch:= case txt
when 'мужчина' then 'm'::sex_char
when 'женщина' then 'f'::sex_char
when 'иное' then 'x'::sex_char
else null
end;
if ch is null then
raise invalid_parameter_value;
end if;
END
$sex$;
CREATE FUNCTION sex(ch sex_char, OUT txt varchar) LANGUAGE plpgsql IMMUTABLE STRICT PARALLEL SAFE AS
$sex$
BEgin
txt:= case ch
when 'm'::sex_char then 'мужчина'
when 'f'::sex_char then 'женщина'
when 'x'::sex_char then 'иное'
else null
end;
if txt is null then
raise invalid_parameter_value;
end if;
END
$sex$;
create table sex2 (id float, sex_char "char" not null);
-- sex3 внешняя таблица c ключом smallint
create table sex_t (
sex_t_id smallint primary key,
sex varchar not null unique
);
insert into sex_t (sex_t_id,sex) values (1,'мужчина'),(0,'женщина'),(-1,'иное');
create table sex3 (id float, sex_t_id smallint not null references sex_t);
-- sex4 с serial, как бы это странно не выглядело, повторяю то, что видел в одной уважаемой компании
create table sex_t4 (
sex_t4_id serial primary key,
sex varchar not null unique
);
insert into sex_t4 (sex_t4_id,sex) values (1,'мужчина'),(0,'женщина'),(-1,'иное');
create table sex4 (id float, sex_t4_id integer not null references sex_t4);
-- текстовое поле
create table sex_t5 (
sex varchar primary key
);
insert into sex_t5 (sex) values ('мужчина'),('женщина'),('иное');
-- для btree индекса
create table sex5 (id float, sex varchar not null references sex_t5);
-- для hash индекса
create table sex5h (id float, sex varchar not null references sex_t5);
-- jsonb
-- для обычного gin индекса
create table sex6 (id float, jdoc jsonb not null);
-- для gin индекса с хэш по ключам и значениям
create table sex6h (id float, jdoc jsonb not null);
-- вставка данных
insert into sex1 (id,sex) select random, case when random<0.75 then 'мужчина'::sex_enum when random<0.99 then 'женщина'::sex_enum else 'иное'::sex_enum end from (select random() as random, generate_series(1,:table_size)) as subselect;
insert into sex5 (id,sex) select id,sex::varchar from sex1;
insert into sex2 (id,sex_char) select id,sex(sex) from sex5;
insert into sex3 (id,sex_t_id) select id,sex_t_id from sex5 join sex_t using (sex);
insert into sex4 (id,sex_t4_id) select id,sex_t4_id from sex5 join sex_t4 using (sex);
insert into sex5h (id,sex) select id,sex from sex5;
insert into sex6 (id,jdoc) select id,('{"sex": "'||sex||'"}')::jsonb from sex5;
insert into sex6h (id,jdoc) select id,jdoc from sex6;
-- создаем индексы
create index sex1_btree on sex1(sex);
create index sex2_btree on sex2(sex_char);
create index sex3_btree on sex3(sex_t_id);
create index sex4_btree on sex4(sex_t4_id);
create index sex5_btree on sex5(sex);
-- для текста используем hash
create index sex5h_hash on sex5h using hash(sex);
create index sex6_gin on sex6 using gin(jdoc);
-- тут тоже, по сути, hash
create index sex6h_gin_hash on sex6h using gin(jdoc jsonb_path_ops);
commit;
set role postgres;
-- экстеншин для прогрева (заполнения кэша PostgreSQL)
create extension if not exists pg_prewarm;
-- удобный экстеншин для мониторинга заполнения кэша
create extension if not exists pg_buffercache;
create or replace view cache as SELECT n.nspname AS schema,
c.relname,
pg_size_pretty(count(*) * 8192) AS buffered,
count(*) * 8 AS buffered_KiB,
round(100.0 * count(*)::numeric / ((( SELECT pg_settings.setting
FROM pg_settings
WHERE pg_settings.name = 'shared_buffers'::text))::integer)::numeric, 1) AS buffer_percent,
round(100.0 * count(*)::numeric * 8192::numeric / pg_table_size(c.oid::regclass)::numeric, 1) AS percent_of_relation
FROM pg_class c
JOIN pg_buffercache b ON b.relfilenode = c.relfilenode
JOIN pg_database d ON b.reldatabase = d.oid AND d.datname = current_database()
LEFT JOIN pg_namespace n ON n.oid = c.relnamespace
GROUP BY c.oid, n.nspname, c.relname
ORDER BY buffered_kib DESC
LIMIT 20;
-- заключительный vacuum
vacuum freeze analyze;
--================================================================================
#!/bin/sh
set -o errexit -o noclobber -o nounset -o pipefail
#set -o errexit -o noclobber -o nounset -o pipefail -o xtrace
# for pgbench
PATH="$PATH:/usr/pgsql-13/bin"
# config
# database connection parameters
readonly PGDATABASE='sex'
readonly PGPORT=5432
export PGDATABASE PGPORT
# output data file
readonly data_csv='data.csv'
# init data files
readonly header='sex:,male,female,other'
if [ ! -s "$data_csv" ]
then
echo "$header" >|"$data_csv"
fi
# prewarm to the cache
psql --quiet -f prewarm.sql >/dev/null
# more prewarm
pgbench --no-vacuum --transaction 100 --file test.sql >/dev/null
for i in $(seq 1 100)
do
echo -n "$i "
date --iso-8601=seconds
pgbench --no-vacuum --transaction 100 --report-latencies --file 'test.sql' | \
awk "
/from sex1 where sex='мужчина';\$/ {printf \"enum,%s,\", \$1 >>\"$data_csv\";}
/from sex1 where sex='женщина';\$/ {printf \"%s,\", \$1 >>\"$data_csv\";}
/from sex1 where sex='иное';\$/ {printf \"%s\\n\", \$1 >>\"$data_csv\";}
/from sex2 where sex_char=sex\(txt=>'мужчина'\);\$/ {printf \"\\\"char\\\",%s,\", \$1 >>\"$data_csv\";}
/from sex2 where sex_char=sex\(txt=>'женщина'\);\$/ {printf \"%s,\", \$1 >>\"$data_csv\";}
/from sex2 where sex_char=sex\(txt=>'иное'\);\$/ {printf \"%s\\n\", \$1 >>\"$data_csv\";}
/from sex3 join sex_t using \(sex_t_id\) where sex='мужчина';\$/ {printf \"smallint(join),%s,\", \$1 >>\"$data_csv\";}
/from sex3 join sex_t using \(sex_t_id\) where sex='женщина';\$/ {printf \"%s,\", \$1 >>\"$data_csv\";}
/from sex3 join sex_t using \(sex_t_id\) where sex='иное';\$/ {printf \"%s\\n\", \$1 >>\"$data_csv\";}
/from sex3 where sex_t_id=\(select t.sex_t_id from sex_t t where sex='мужчина'\);\$/ {printf \"smallint(subsel),%s,\", \$1 >>\"$data_csv\";}
/from sex3 where sex_t_id=\(select t.sex_t_id from sex_t t where sex='женщина'\);\$/ {printf \"%s,\", \$1 >>\"$data_csv\";}
/from sex3 where sex_t_id=\(select t.sex_t_id from sex_t t where sex='иное'\);\$/ {printf \"%s\\n\", \$1 >>\"$data_csv\";}
/from sex4 join sex_t4 using \(sex_t4_id\) where sex='мужчина';\$/ {printf \"integer(join),%s,\", \$1 >>\"$data_csv\";}
/from sex4 join sex_t4 using \(sex_t4_id\) where sex='женщина';\$/ {printf \"%s,\", \$1 >>\"$data_csv\";}
/from sex4 join sex_t4 using \(sex_t4_id\) where sex='иное';\$/ {printf \"%s\\n\", \$1 >>\"$data_csv\";}
/from sex4 where sex_t4_id=\(select t.sex_t4_id from sex_t4 t where sex='мужчина'\);\$/ {printf \"integer(subsel),%s,\", \$1 >>\"$data_csv\";}
/from sex4 where sex_t4_id=\(select t.sex_t4_id from sex_t4 t where sex='женщина'\);\$/ {printf \"%s,\", \$1 >>\"$data_csv\";}
/from sex4 where sex_t4_id=\(select t.sex_t4_id from sex_t4 t where sex='иное'\);\$/ {printf \"%s\\n\", \$1 >>\"$data_csv\";}
/from sex5 where sex='мужчина';\$/ {printf \"varchar(btree),%s,\", \$1 >>\"$data_csv\";}
/from sex5 where sex='женщина';\$/ {printf \"%s,\", \$1 >>\"$data_csv\";}
/from sex5 where sex='иное';\$/ {printf \"%s\\n\", \$1 >>\"$data_csv\";}
/from sex5h where sex='мужчина';\$/ {printf \"varchar(hash),%s,\", \$1 >>\"$data_csv\";}
/from sex5h where sex='женщина';\$/ {printf \"%s,\", \$1 >>\"$data_csv\";}
/from sex5h where sex='иное';\$/ {printf \"%s\\n\", \$1 >>\"$data_csv\";}
/from sex6 where jdoc@>'{\"sex\":\"мужчина\"}';\$/ {printf \"jsonb(gin),%s,\", \$1 >>\"$data_csv\";}
/from sex6 where jdoc@>'{\"sex\":\"женщина\"}';\$/ {printf \"%s,\", \$1 >>\"$data_csv\";}
/from sex6 where jdoc@>'{\"sex\":\"иное\"}';\$/ {printf \"%s\\n\", \$1 >>\"$data_csv\";}
/from sex6h where jdoc@>'{\"sex\":\"мужчина\"}';\$/ {printf \"jsonb(gin+hash),%s,\", \$1 >>\"$data_csv\";}
/from sex6h where jdoc@>'{\"sex\":\"женщина\"}';\$/ {printf \"%s,\", \$1 >>\"$data_csv\";}
/from sex6h where jdoc@>'{\"sex\":\"иное\"}';\$/ {printf \"%s\\n\", \$1 >>\"$data_csv\";}
"
done
echo 'Done'
--================================================================================
explain (costs false) select count(id) from sex1 where sex='женщина';
explain (costs false) select count(id) from sex3 join sex_t using (sex_t_id) where sex='мужчина';
explain (costs false) select count(id) from sex5 where sex='женщина';
explain (costs false) select count(id) from sex6 where jdoc@>'{"sex":"мужчина"}';
--================================================================================
CREATE EXTENSION IF NOT EXISTS pxf;
CREATE TABLESPACE warm LOCATION '/data1/warm'
SELECT *
FROM pg_catalog.pg_tablespace;
CREATE TABLE sales(
id int,
date date,
amt decimal(10,2)
)
DISTRIBUTED BY (id)
PARTITION BY RANGE (date)
(DEFAULT PARTITION other)
/*
* Хранится на pg_default
* Все данные попадаю в партицию по умолчанию.
*/
INSERT INTO sales(id, "date", amt)
WITH test AS(
select
generate_series('2010-01-01'::date, '2022-01-01'::date, '1 day'::interval) AS date
)
SELECT
to_char(date, 'YYYYMMDD')::integer AS id
, date
, (
random() * 1000
)::int + 1 AS amt
FROM
test;
/*4384*/
WITH test AS(
select
generate_series('2010-01-01'::date, '2022-01-01'::date, '1 day'::interval) AS date
)
SELECT
count(*)
FROM
test;
/*4384*/
SELECT
COUNT(*)
FROM sales
/*
Проверяем, что таблица партиционирована и имеет партицию по умолчанию.
*/
select
*
from pg_catalog.pg_partitions
/*Разбиваем партицию по умолчанию на две партиции default и sales_2010*/
alter table public.sales
split default partition
start ('2010-01-01'::date) inclusive end ('2011-01-01'::date) EXCLUSIVE
into (partition sales_2010, default partition);
/*Проверяем что партиции разбита на default и sales_2010 и обе находятся на pg_default*/
select *
from pg_catalog.pg_partitions
/*4384*/
SELECT
COUNT(*)
FROM sales
/*Видим, что оптимизатор видит из как две партиции*/
explain SELECT
COUNT(*)
FROM sales
/*Обращение к партиции как к таблице*/
select
count(*)
from public.sales_1_prt_sales_2010
/*Нарежем еще партиций для укперементов*/
alter table public.sales
split default partition
start ('2011-01-01'::date) inclusive end ('2012-01-01'::date) EXCLUSIVE
into (partition sales_2011, default partition);
alter table public.sales
split default partition
start ('2012-01-01'::date) inclusive end ('2013-01-01'::date) EXCLUSIVE
into (partition sales_2012, default partition);
/*Подменим партицию sales_2012, на ROW AO на табличное простанство warm*/
--Migrate partition 2012 on warm RAW AO
create table temp_sales_2012 (like sales)
WITH (appendonly = 'true', compresslevel = '1', orientation = 'row', compresstype = zstd)
TABLESPACE warm;
insert into temp_sales_2012
select *
from public.sales_1_prt_sales_2012
ALTER TABLE sales EXCHANGE PARTITION sales_2012
with table temp_sales_2012;
drop table temp_sales_2012;
select *
from pg_catalog.pg_partitions
/*4384*/
SELECT
COUNT(*)
FROM sales
/*--Migrate partition 2011 on warm Column AO*/
create table temp_sales_2011 (like sales)
WITH (appendonly = 'true', compresslevel = '1', orientation = 'column', compresstype = zstd)
TABLESPACE warm;
insert into temp_sales_2011
select *
from public.sales_1_prt_sales_2011
ALTER TABLE sales EXCHANGE PARTITION sales_2011
with table temp_sales_2011;
drop table temp_sales_2011;
select *
from pg_catalog.pg_partitions
/*4384*/
SELECT
COUNT(*)
FROM sales
explain SELECT
COUNT(*)
FROM sales
/*move partition to s3
chown gpadmin:gpadmin minio-site.xml
*/
CREATE WRITABLE EXTERNAL TABLE sale_ext_text_write
(LIKE sales)
LOCATION ('pxf://test-bucket/sale?PROFILE=s3:text&SERVER=default&COMPRESSION_CODEC=org.apache.hadoop.io.compress.GzipCodec' )
ON ALL FORMAT 'TEXT' ( delimiter=',' ) ENCODING 'UTF8';
INSERT INTO sale_ext_text_write
SELECT * FROM sales_1_prt_sales_2010;
-- drop EXTERNAL TABLE temp_sale_ext_text;
CREATE EXTERNAL TABLE temp_sale_ext_text (LIKE sales)
LOCATION ('pxf://test-bucket/sale?PROFILE=s3:text&SERVER=default&COMPRESSION_CODEC=org.apache.hadoop.io.compress.GzipCodec' )
ON ALL FORMAT 'TEXT' ( delimiter=',' ) ENCODING 'UTF8';
SELECT id, "date", amt
FROM temp_sale_ext_text;
ALTER TABLE sales EXCHANGE PARTITION sales_2010
with table temp_sale_ext_text
WITHOUT VALIDATION;
drop EXTERNAL TABLE sale_ext_text_write;
select *
from pg_catalog.pg_partitions
select count(*)
from sales
explain
select count(*)
from sales
--==========================================================================================
/*
Rename Table
ALTER TABLE table_name RENAME TO new_table_name;
This statement lets you change the name of a table to a different name.
As of version 0.6, a rename on a managed table moves its HDFS location as well. (Older Hive versions just renamed the table in the metastore without moving the HDFS location.)
Alter Table Properties
ALTER TABLE table_name SET TBLPROPERTIES table_properties;
table_properties:
: (property_name = property_value, property_name = property_value, ... )
You can use this statement to add your own metadata to the tables. Currently last_modified_user, last_modified_time properties are automatically added and managed by Hive. Users can add their own properties to this list. You can do DESCRIBE EXTENDED TABLE to get this information.
Alter Table Comment
To change the comment of a table you have to change the comment property of the TBLPROPERTIES:
ALTER TABLE table_name SET TBLPROPERTIES ('comment' = new_comment);
Add SerDe Properties
ALTER TABLE table_name [PARTITION partition_spec] SET SERDE serde_class_name [WITH SERDEPROPERTIES serde_properties];
ALTER TABLE table_name [PARTITION partition_spec] SET SERDEPROPERTIES serde_properties;
serde_properties:
: (property_name = property_value, property_name = property_value, ... )
*/
/*
Add Partitions
ALTER TABLE table_name ADD [IF NOT EXISTS] PARTITION partition_spec
[LOCATION 'location1'] partition_spec [LOCATION 'location2'] ...;
partition_spec:
: (partition_column = partition_col_value, partition_column = partition_col_value, ...)
You can use ALTER TABLE ADD PARTITION to add partitions to a table. Partition values should be quoted only if they are strings. The location must be a directory inside of which data files reside. (ADD PARTITION changes the table metadata, but does not load data. If the data does not exist in the partition's location, queries will not return any results.) An error is thrown if the partition_spec for the table already exists. You can use IF NOT EXISTS to skip the error.
Version 0.7
Although it is proper syntax to have multiple partition_spec in a single ALTER TABLE, if you do this in version 0.7 your partitioning scheme will fail. That is, every query specifying a partition will always use only the first partition.
Specifically, the following example will FAIL silently and without error in Hive 0.7, and all queries will go only to dt='2008-08-08' partition, no matter which partition you specify.
com.sensiblemetrics.api.ws.web2docs.annotations.general.Example:
ALTER TABLE page_view ADD PARTITION (dt='2008-08-08', country='us') location '/path/to/us/part080808'
PARTITION (dt='2008-08-09', country='us') location '/path/to/us/part080809';
*/
--==========================================================================================
/*
CREATE (DATABASE|SCHEMA) [IF NOT EXISTS] database_name
[COMMENT database_comment]
[LOCATION hdfs_path]
[WITH DBPROPERTIES (property_name=property_value, ...)];*/
create DATABASE IF NOT EXISTS dmpDB
COMMENT "dmp project hive db"
LOCATION ${HIVE_HOME}
WITH DBPROPERTIES ("creator"="morty","data="2016-5-17");
--- describe database dmpDB;
-- DROP DATABASE
DROP DATABASE IF_EXISTES dmpDB ;
--- ALTER DATABASE
/*
ALTER (DATABASE|SCHEMA) database_name SET DBPROPERTIES (property_name=property_value, ...); -- (Note: SCHEMA added in Hive 0.14.0)
ALTER (DATABASE|SCHEMA) database_name SET OWNER [USER|ROLE] user_or_role;
*/
ALTER DATABASE dmpDB SET DBPROPERTIES("creator"="morty","edit_by"="morty");
ALTER DATABASE dmpDB SET OWNER USER morty;
USE dmpDB;
USE DEFAULT;
--==========================================================================================
/*
CREATE [TEMPORARY] [EXTERNAL] TABLE [IF NOT EXISTS] [db_name.]table_name -- (Note: TEMPORARY available in Hive 0.14.0 and later)
[(col_name data_type [COMMENT col_comment], ...)]
[COMMENT table_comment]
[PARTITIONED BY (col_name data_type [COMMENT col_comment], ...)]
[CLUSTERED BY (col_name, col_name, ...) [SORTED BY (col_name [ASC|DESC], ...)] INTO num_buckets BUCKETS]
[SKEWED BY (col_name, col_name, ...) -- (Note: Available in Hive 0.10.0 and later)]
ON ((col_value, col_value, ...), (col_value, col_value, ...), ...)
[STORED AS DIRECTORIES]
[
[ROW FORMAT row_format]
[STORED AS file_format]
| STORED BY 'storage.handler.class.name' [WITH SERDEPROPERTIES (...)] -- (Note: Available in Hive 0.6.0 and later)
]
[LOCATION hdfs_path]
[TBLPROPERTIES (property_name=property_value, ...)] -- (Note: Available in Hive 0.6.0 and later)
[AS select_statement]; -- (Note: Available in Hive 0.5.0 and later; not supported for external tables)
CREATE [TEMPORARY] [EXTERNAL] TABLE [IF NOT EXISTS] [db_name.]table_name
LIKE existing_table_or_view_name
[LOCATION hdfs_path];
data_type
: primitive_type
| array_type
| map_type
| struct_type
| union_type -- (Note: Available in Hive 0.7.0 and later)
primitive_type
: TINYINT
| SMALLINT
| INT
| BIGINT
| BOOLEAN
| FLOAT
| DOUBLE
| STRING
| BINARY -- (Note: Available in Hive 0.8.0 and later)
| TIMESTAMP -- (Note: Available in Hive 0.8.0 and later)
| DECIMAL -- (Note: Available in Hive 0.11.0 and later)
| DECIMAL(precision, scale) -- (Note: Available in Hive 0.13.0 and later)
| DATE -- (Note: Available in Hive 0.12.0 and later)
| VARCHAR -- (Note: Available in Hive 0.12.0 and later)
| CHAR -- (Note: Available in Hive 0.13.0 and later)
array_type
: ARRAY < data_type >
map_type
: MAP < primitive_type, data_type >
struct_type
: STRUCT < col_name : data_type [COMMENT col_comment], ...>
union_type
: UNIONTYPE < data_type, data_type, ... > -- (Note: Available in Hive 0.7.0 and later)
row_format
: DELIMITED [FIELDS TERMINATED BY char [ESCAPED BY char]] [COLLECTION ITEMS TERMINATED BY char]
[MAP KEYS TERMINATED BY char] [LINES TERMINATED BY char]
[NULL DEFINED AS char] -- (Note: Available in Hive 0.13 and later)
| SERDE serde_name [WITH SERDEPROPERTIES (property_name=property_value, property_name=property_value, ...)]
file_format:
: SEQUENCEFILE
| TEXTFILE -- (Default, depending on hive.default.fileformat configuration)
| RCFILE -- (Note: Available in Hive 0.6.0 and later)
| ORC -- (Note: Available in Hive 0.11.0 and later)
| PARQUET -- (Note: Available in Hive 0.13.0 and later)
| AVRO -- (Note: Available in Hive 0.14.0 and later)
| INPUTFORMAT input_format_classname OUTPUTFORMAT output_format_classname
*/
--创建表
CREATE TABLE IF NOT EXISTS dmp_useraccount(
userid BINGINT COMMENT "userid",
userpassword STRING COMMENT "userpassword",
userpasswordmd5 STRING COMMENT "userpasswordencryption"
useremail STRING COMMENT "useremail",
) COMMENT "user account table"
PARTIONED BY(rgtime STRING)
FIELDS TERMINATED BY '\001'
COLLECTION ITEMS TERMINATED BY '\002'
MAP KEYS TERMINATED BY '\003'
STORED AS TEXTFILE;
--创建外部表
CREATE EXTERNAL TABLE IF NOT EXISTS dmpbean_user(
userid INT COMMENT "user_id",
username STRING COMMENT "user_name",
userage SMALLINT COMMENT "user_age",
userbirth DATE COMMENT "userbirthday",
userfamliy MAP<STRING,STRING> COMMENT "userfamilyinfo",
useremail ARRAY<STRING> COMMENT "useremail",
userincome STRING COMMENT "user_income",
userdescribe STRING COMMENT "user_describe",
userpostcode INT COMMENT "user_postcode",
userAddress STRUCT <province:STRING COMMENT "user_province",city:STRING COMMENT "user_city",street:STRING COMMENT "user_street"> COMMNET "user_address",
) COMMENT "USER BEAN TABLE DEMO"
PARTITIONED BY(province STRING,city STRING)
CLUSTERED BY(userage) INTO 10 BUCKETS
ROW FORMAT DELIMITED
FIELDS TERMINATED BY '\001'
COLLECTION ITEMS TERMINATED BY '\002'
MAP KEYS TERMINATED BY '\003' --[ROW FORMAT DELIMITED]关键字,是用来设置创建的表在加载数据的时候,支持的列分隔符。不同列之间用一个'\001'分割,集合(例如array,map)的元素之间以'\002'隔开,map中key和value用'\003'分割。
STORED AS TEXTFILE --不同的文件格式
LOCATION "hdfs://dev/data" --数据位置
;
-- Create Table As Select(CTAS)
/*
The target table cannot be a partitioned table.
The target table cannot be an external table.
The target table cannot be a list bucketing table
*/
CREATE TRABLE dmpbean_uservipaccount
ROW FORMAT SERED "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe"
STORED AS RCFile
AS
SELECT userid vipid,userpasswordmd5 vippassword,useremail vipemail,
FROM dmpbean_useraccount
/*
Pages LanguageManual LanguageManual Select
Skip to end of banner
Go to start of banner
Common Table Expression
Skip to end of metadata
Created by Harish Butani, last modified by Lars Francke on Sep 02, 2014 Go to start of metadata
A Common Table Expression (CTE) is a temporary result set derived from a simple query specified in a WITH clause,
which immediately precedes a SELECT or INSERT keyword. The CTE is defined only within the execution scope of a single statement.
One or more CTEs can be used in a Hive SELECT, INSERT, CREATE TABLE AS SELECT, or CREATE VIEW AS SELECT statement.
Common Table Expression Syntax
withClause: cteClause (, cteClause)*
cteClause: cte_name AS (select statment)
*/
--EXAMPLE CTE in SELECT Statements
WITH q1 AS (SELECT userid FROM dmpbean_user WHERE userid ='5')
SELECT *
FROM q1
-- from style
WITH q1 AS (SELECT * FROM dmpbean_user WHERE userid='5')
FROM q1
SELECT
*;
--chain CTES
WITH q1 AS (SELECT userid from q2 WHERE userid='5'),
q2 AS (SELECT userid from dmpbean_user WHERE userid='5')
SELECT *FROM (SELECT userid FROM q1) a;
-- union example
WITH q1 AS (SELECT * FROM dmpbean_user WHERE userid= '5'),
q2 AS (SELECT * FROM dmpbean_user s2 WHERE userid = '4')
SELECT * FROM q1 union all SELECT * FROM q2;
--CTE in Views, CTAS, and Insert Statements
-- insert example
CREATE TABLE s1 like dmpbean_user
WITH q1 as (SELECT userid,username FROM dmpbean_user WHERE userid=5)
FROM q1
INSERT OVERWRITE TABLE s1
SELECT *;
-- ctas example
create table s2 as
with q1 as ( select key from src where key = '4')
select * from q1;
-- view example
CREATE VIEW v1 AS
WITH q1 AS ( SELECT userid FROM dmpbean_user WHERE userid = '5')
SELECT * FROM q1;
SELECT * FROM v1;
-- VIEW example, name collision
CREATE VIEW v1 AS
WITH q1 AS ( SELECT userid FROM dmpbean_user WHERE userid = '5')
SELECT * FROM q1;
WITH q1 AS ( SELECT userid FROM dmpbean_user WHERE userid = '4')
SELECT * FROM v1;
--复制表结构
CREATE TABLE dmpbean_vipuser LIKE dmpbean_user;
--DROP TABLE
DROP TABLE IF EXISTS dmpbean_user [PURGE]
--TRUNCATE TABLE
TRUNCATE TABLE dmpbean_user [PARTITION partition_spec]
CREATE INDEX employess_index ON TABLE emplopyes(country)
AS 'org.apache.hadoop.hive.ql.index.compact.CompactIndexHandler'
WITH DEFERRED REBUILD
--==========================================================================================
CREATE DATABASE ${DATABASE} IF NOT EXISTS ;
create table du_u_data(
userid INT COMMENT '用户id',
itemid INT COMMENT '电影ID',
ratings DOUBLE COMMENT '评分',
dataTime STRING COMMENT '时间'
)ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' STORED AS TEXTFILE;
CREATE TABLE IF NOT EXISTS du_u_item(
movieid INT COMMENT'电影ID',
movietitle STRING COMMENT '电影名称',
releasedate String COMMENT'发行时间',
videoreleasedae String COMMENT '光碟发行时间',
IMDbURL STRING COMMENT 'IMBD_URL',
unknown_type STRING COMMENT '未知',
Action_type STRING COMMENT '动作',
Adventure STRING COMMENT '探险',
Animation STRING COMMENT '动物',
Childrens STRING COMMENT '',
Comedy STRING COMMENT '喜剧',
Crime STRING COMMENT '',
Documentary STRING COMMENT '',
Drama STRING COMMENT '歌剧',
Fantasy STRING COMMENT '',
Film_Noir STRING COMMENT '',
Horror STRING COMMENT '',
Musical STRING COMMENT '音乐片',
Mystery STRING COMMENT '悬疑片',
Romance STRING COMMENT '爱情片',
Sci_Fi STRING COMMENT '',
Thriller STRING COMMENT '',
War STRING COMMENT '战争片',
Western STRING COMMENT '西部片'
) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE;
load data local inpath '/user/zeus/duliang/warehouse/hive_db/ml_data/u.item' overwrite into table du_u_item;
--==========================================================================================
--=======================================================================
private static final String SELECT_TABLE_INDEX = "SELECT tmp.INDISPRIMARY AS Index_primary, tmp.TABLE_SCHEM, tmp.TABLE_NAME, tmp.NON_UNIQUE, tmp.INDEX_QUALIFIER, tmp.INDEX_NAME AS Key_name, tmp.indisclustered, tmp.ORDINAL_POSITION AS Seq_in_index, TRIM ( BOTH '\"' FROM pg_get_indexdef ( tmp.CI_OID, tmp.ORDINAL_POSITION, FALSE ) ) AS Column_name,CASE tmp.AM_NAME WHEN 'btree' THEN CASE tmp.I_INDOPTION [ tmp.ORDINAL_POSITION - 1 ] & 1 :: SMALLINT WHEN 1 THEN 'D' ELSE'A' END ELSE NULL END AS Collation, tmp.CARDINALITY, tmp.PAGES, tmp.FILTER_CONDITION , tmp.AM_NAME AS Index_method, tmp.DESCRIPTION AS Index_comment FROM ( SELECT n.nspname AS TABLE_SCHEM, ct.relname AS TABLE_NAME, NOT i.indisunique AS NON_UNIQUE, NULL AS INDEX_QUALIFIER, ci.relname AS INDEX_NAME,i.INDISPRIMARY , i.indisclustered , ( information_schema._pg_expandarray ( i.indkey ) ).n AS ORDINAL_POSITION, ci.reltuples AS CARDINALITY, ci.relpages AS PAGES, pg_get_expr ( i.indpred, i.indrelid ) AS FILTER_CONDITION, ci.OID AS CI_OID, i.indoption AS I_INDOPTION, am.amname AS AM_NAME , d.description FROM pg_class ct JOIN pg_namespace n ON ( ct.relnamespace = n.OID ) JOIN pg_index i ON ( ct.OID = i.indrelid ) JOIN pg_class ci ON ( ci.OID = i.indexrelid ) JOIN pg_am am ON ( ci.relam = am.OID ) left outer join pg_description d on i.indexrelid = d.objoid WHERE n.nspname = '%s' AND ct.relname = '%s' ) AS tmp ;";
private static String ROUTINES_SQL = "SELECT p.proname, p.prokind, pg_catalog.pg_get_functiondef(p.oid) as \"code\" FROM pg_catalog.pg_proc p where p.prokind = '%s' and p.proname='%s'";
private static String TRIGGER_SQL
= "SELECT n.nspname AS \"schema\", c.relname AS \"table_name\", t.tgname AS \"trigger_name\", t.tgenabled AS "
+ "\"enabled\", pg_get_triggerdef(t.oid) AS \"trigger_body\" FROM pg_trigger t JOIN pg_class c ON c.oid = t"
+ ".tgrelid JOIN pg_namespace n ON n.oid = c.relnamespace WHERE n.nspname = '%s' AND t.tgname ='%s';";
private static String TRIGGER_SQL_LIST
= "SELECT n.nspname AS \"schema\", c.relname AS \"table_name\", t.tgname AS \"trigger_name\", t.tgenabled AS "
+ "\"enabled\", pg_get_triggerdef(t.oid) AS \"trigger_body\" FROM pg_trigger t JOIN pg_class c ON c.oid = t"
+ ".tgrelid JOIN pg_namespace n ON n.oid = c.relnamespace WHERE n.nspname = '%s';";
private static String VIEW_SQL
= "SELECT schemaname, viewname, definition FROM pg_views WHERE schemaname = '%s' AND viewname = '%s';";
--=======================================================================
private static final String SELECT_KEY_INDEX = "SELECT ccu.table_schema AS Foreign_schema_name, ccu.table_name AS Foreign_table_name, ccu.column_name AS Foreign_column_name, constraint_type AS Constraint_type, tc.CONSTRAINT_NAME AS Key_name, tc.TABLE_NAME, kcu.Column_name, tc.is_deferrable, tc.initially_deferred FROM information_schema.table_constraints AS tc JOIN information_schema.key_column_usage AS kcu ON tc.CONSTRAINT_NAME = kcu.CONSTRAINT_NAME JOIN information_schema.constraint_column_usage AS ccu ON ccu.constraint_name = tc.constraint_name WHERE tc.TABLE_SCHEMA = '%s' AND tc.TABLE_NAME = '%s';";
--=======================================================================
public class SQLConst {
public static String TABLE_DEF_FUNCTION_SQL =
"""
CREATE TYPE tabledefs AS ENUM ('PKEY_INTERNAL','PKEY_EXTERNAL','FKEYS_INTERNAL', 'FKEYS_EXTERNAL', 'COMMENTS', 'FKEYS_NONE', 'INCLUDE_TRIGGERS', 'NO_TRIGGERS');
CREATE OR REPLACE FUNCTION pg_get_coldef(
in_schema text,
in_table text,
in_column text,
oldway boolean default False
)
RETURNS text
LANGUAGE plpgsql VOLATILE
AS
$$
DECLARE
v_coldef text;
v_dt1 text;
v_dt2 text;
v_dt3 text;
v_nullable boolean;
v_position int;
v_identity text;
v_generated text;
v_hasdflt boolean;
v_dfltexpr text;
BEGIN
IF oldway THEN
SELECT pg_catalog.format_type(a.atttypid, a.atttypmod) INTO v_coldef FROM pg_namespace n, pg_class c, pg_attribute a, pg_type t
WHERE n.nspname = in_schema AND n.oid = c.relnamespace AND c.relname = in_table AND a.attname = in_column and a.attnum > 0 AND a.attrelid = c.oid AND a.atttypid = t.oid ORDER BY a.attnum;
-- RAISE NOTICE 'DEBUG: oldway=%',v_coldef;
ELSE
SELECT CASE WHEN a.atttypid = ANY ('{int,int8,int2}'::regtype[]) AND EXISTS (SELECT FROM pg_attrdef ad WHERE ad.adrelid = a.attrelid AND ad.adnum = a.attnum AND
pg_get_expr(ad.adbin, ad.adrelid) = 'nextval(''' || (pg_get_serial_sequence (a.attrelid::regclass::text, a.attname))::regclass || '''::regclass)') THEN CASE a.atttypid
WHEN 'int'::regtype THEN 'serial' WHEN 'int8'::regtype THEN 'bigserial' WHEN 'int2'::regtype THEN 'smallserial' END ELSE format_type(a.atttypid, a.atttypmod) END AS data_type
INTO v_coldef FROM pg_namespace n, pg_class c, pg_attribute a, pg_type t
WHERE n.nspname = in_schema AND n.oid = c.relnamespace AND c.relname = in_table AND a.attname = in_column and a.attnum > 0 AND a.attrelid = c.oid AND a.atttypid = t.oid ORDER BY a.attnum;
END IF;
RETURN v_coldef;
END;
$$;
-- SELECT * FROM pg_get_tabledef('sample', 'address', false);
DROP FUNCTION IF EXISTS pg_get_tabledef(character varying,character varying,boolean,tabledefs[]);
CREATE OR REPLACE FUNCTION pg_get_tabledef(
in_schema varchar,
in_table varchar,
_verbose boolean,
VARIADIC arr tabledefs[] DEFAULT '{}':: tabledefs[]
)
RETURNS text
LANGUAGE plpgsql VOLATILE
AS
$$
DECLARE
v_qualified text := '';
v_table_ddl text;
v_table_oid int;
v_colrec record;
v_constraintrec record;
v_trigrec record;
v_indexrec record;
v_rec record;
v_constraint_name text;
v_constraint_def text;
v_pkey_def text := '';
v_fkey_def text := '';
v_fkey_defs text := '';
v_trigger text := '';
v_partition_key text := '';
v_partbound text;
v_parent text;
v_parent_schema text;
v_persist text;
v_temp text := '';
v_temp2 text;
v_relopts text;
v_tablespace text;
v_pgversion int;
bSerial boolean;
bPartition boolean;
bInheritance boolean;
bRelispartition boolean;
constraintarr text[] := '{}';
constraintelement text;
bSkip boolean;
bVerbose boolean := False;
v_cnt1 integer;
v_cnt2 integer;
search_path_old text := '';
search_path_new text := '';
v_partial boolean;
v_pos integer;
pkcnt int := 0;
fkcnt int := 0;
trigcnt int := 0;
cmtcnt int := 0;
pktype tabledefs := 'PKEY_INTERNAL';
fktype tabledefs := 'FKEYS_INTERNAL';
trigtype tabledefs := 'NO_TRIGGERS';
arglen integer;
vargs text;
avarg tabledefs;
v_ret text;
v_diag1 text;
v_diag2 text;
v_diag3 text;
v_diag4 text;
v_diag5 text;
v_diag6 text;
BEGIN
SET client_min_messages = 'notice';
IF _verbose THEN bVerbose = True; END IF;
arglen := array_length($4, 1);
IF arglen IS NULL THEN
-- nothing to do, so assume defaults
NULL;
ELSE
IF bVerbose THEN RAISE NOTICE 'arguments=%', $4; END IF;
FOREACH avarg IN ARRAY $4 LOOP
IF bVerbose THEN RAISE NOTICE 'arg=%', avarg; END IF;
IF avarg = 'FKEYS_INTERNAL' OR avarg = 'FKEYS_EXTERNAL' OR avarg = 'FKEYS_NONE' THEN
fkcnt = fkcnt + 1;
fktype = avarg;
ELSEIF avarg = 'INCLUDE_TRIGGERS' OR avarg = 'NO_TRIGGERS' THEN
trigcnt = trigcnt + 1;
trigtype = avarg;
ELSEIF avarg = 'PKEY_EXTERNAL' THEN
pkcnt = pkcnt + 1;
pktype = avarg;
ELSEIF avarg = 'COMMENTS' THEN
cmtcnt = cmtcnt + 1;
END IF;
END LOOP;
IF fkcnt > 1 THEN
RAISE WARNING 'Only one foreign key option can be provided. You provided %', fkcnt;
RETURN '';
ELSEIF trigcnt > 1 THEN
RAISE WARNING 'Only one trigger option can be provided. You provided %', trigcnt;
RETURN '';
ELSEIF pkcnt > 1 THEN
RAISE WARNING 'Only one pkey option can be provided. You provided %', pkcnt;
RETURN '';
ELSEIF cmtcnt > 1 THEN
RAISE WARNING 'Only one comments option can be provided. You provided %', cmtcnt;
RETURN '';
END IF;
END IF;
SELECT c.oid, (select setting from pg_settings where name = 'server_version_num') INTO v_table_oid, v_pgversion FROM pg_catalog.pg_class c LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind in ('r','p') AND c.relname = in_table AND n.nspname = in_schema;
SELECT setting INTO search_path_old FROM pg_settings WHERE name = 'search_path';
SELECT REPLACE(REPLACE(setting, '"$user"', '$user'), '$user', '"$user"') INTO search_path_old
FROM pg_settings
WHERE name = 'search_path';
EXECUTE 'SET search_path = "public"';
SELECT setting INTO search_path_new FROM pg_settings WHERE name = 'search_path';
IF (v_table_oid IS NULL) THEN
RAISE EXCEPTION 'table does not exist';
END IF;
SELECT tablespace INTO v_temp FROM pg_tables WHERE schemaname = in_schema and tablename = in_table and tablespace IS NOT NULL;
IF v_temp IS NULL THEN
v_tablespace := 'TABLESPACE pg_default';
ELSE
v_tablespace := 'TABLESPACE ' || v_temp;
END IF;
WITH relopts AS (SELECT unnest(c.reloptions) relopts FROM pg_class c, pg_namespace n WHERE n.nspname = in_schema and n.oid = c.relnamespace and c.relname = in_table)
SELECT string_agg(r.relopts, ', ') as relopts INTO v_temp from relopts r;
IF v_temp IS NULL THEN
v_relopts := '';
ELSE
v_relopts := ' WITH (' || v_temp || ')';
END IF;
v_partbound := '';
bPartition := False;
bInheritance := False;
IF v_pgversion < 100000 THEN
SELECT c2.relname parent, c2.relnamespace::regnamespace INTO v_parent, v_parent_schema from pg_class c1, pg_namespace n, pg_inherits i, pg_class c2
WHERE n.nspname = in_schema and n.oid = c1.relnamespace and c1.relname = in_table and c1.oid = i.inhrelid and i.inhparent = c2.oid and c1.relkind = 'r';
IF (v_parent IS NOT NULL) THEN
bPartition := True;
bInheritance := True;
END IF;
ELSE
SELECT c2.relname parent, c1.relispartition, pg_get_expr(c1.relpartbound, c1.oid, true), c2.relnamespace::regnamespace INTO v_parent, bRelispartition, v_partbound, v_parent_schema from pg_class c1, pg_namespace n, pg_inherits i, pg_class c2
WHERE n.nspname = in_schema and n.oid = c1.relnamespace and c1.relname = in_table and c1.oid = i.inhrelid and i.inhparent = c2.oid and c1.relkind = 'r';
IF (v_parent IS NOT NULL) THEN
bPartition := True;
IF bRelispartition THEN
bInheritance := False;
ELSE
bInheritance := True;
END IF;
END IF;
END IF;
IF bPartition THEN
SELECT count(*) INTO v_cnt1 FROM information_schema.tables t WHERE EXISTS (SELECT REGEXP_MATCHES(s.table_name, '([A-Z]+)','g') FROM information_schema.tables s
WHERE t.table_schema=s.table_schema AND t.table_name=s.table_name AND t.table_schema = in_schema AND t.table_name = in_table AND t.table_type = 'BASE TABLE');
SELECT COUNT(*) INTO v_cnt2 FROM pg_get_keywords() WHERE word = in_table AND catcode = 'R';
IF bInheritance THEN
IF v_cnt1 > 0 OR v_cnt2 > 0 THEN
v_table_ddl := 'CREATE TABLE ' || in_schema || '."' || in_table || '"( '|| E'\\n';
ELSE
v_table_ddl := 'CREATE TABLE ' || in_schema || '.' || in_table || '( '|| E'\\n';
END IF;
ELSE
IF v_relopts <> '' THEN
IF v_cnt1 > 0 OR v_cnt2 > 0 THEN
v_table_ddl := 'CREATE TABLE ' || in_schema || '."' || in_table || '" PARTITION OF ' || in_schema || '.' || v_parent || ' ' || v_partbound || v_relopts || ' ' || v_tablespace || '; ' || E'\\n';
ELSE
v_table_ddl := 'CREATE TABLE ' || in_schema || '.' || in_table || ' PARTITION OF ' || in_schema || '.' || v_parent || ' ' || v_partbound || v_relopts || ' ' || v_tablespace || '; ' || E'\\n';
END IF;
ELSE
IF v_cnt1 > 0 OR v_cnt2 > 0 THEN
v_table_ddl := 'CREATE TABLE ' || in_schema || '."' || in_table || '" PARTITION OF ' || in_schema || '.' || v_parent || ' ' || v_partbound || ' ' || v_tablespace || '; ' || E'\\n';
ELSE
v_table_ddl := 'CREATE TABLE ' || in_schema || '.' || in_table || ' PARTITION OF ' || in_schema || '.' || v_parent || ' ' || v_partbound || ' ' || v_tablespace || '; ' || E'\\n';
END IF;
END IF;
END IF;
END IF;
IF bVerbose THEN RAISE NOTICE '(1)tabledef so far: %', v_table_ddl; END IF;
IF NOT bPartition THEN
select c.relpersistence into v_persist from pg_class c, pg_namespace n where n.nspname = in_schema and n.oid = c.relnamespace and c.relname = in_table and c.relkind = 'r';
IF v_persist = 'u' THEN
v_temp := 'UNLOGGED';
ELSIF v_persist = 't' THEN
v_temp := 'TEMPORARY';
ELSE
v_temp := '';
END IF;
END IF;
IF NOT bPartition THEN
SELECT count(*) INTO v_cnt1 FROM information_schema.tables t WHERE EXISTS (SELECT REGEXP_MATCHES(s.table_name, '([A-Z]+)','g') FROM information_schema.tables s
WHERE t.table_schema=s.table_schema AND t.table_name=s.table_name AND t.table_schema = in_schema AND t.table_name = in_table AND t.table_type = 'BASE TABLE');
IF v_cnt1 > 0 THEN
v_table_ddl := 'CREATE ' || v_temp || ' TABLE ' || in_schema || '."' || in_table || '" (' || E'\\n';
ELSE
v_table_ddl := 'CREATE ' || v_temp || ' TABLE ' || in_schema || '.' || in_table || ' (' || E'\\n';
END IF;
END IF;
IF NOT bPartition THEN
FOR v_colrec IN
SELECT c.column_name, c.data_type, c.udt_name, c.udt_schema, c.character_maximum_length, c.is_nullable, c.column_default, c.numeric_precision, c.numeric_scale, c.is_identity, c.identity_generation, c.is_generated, c.generation_expression
FROM information_schema.columns c WHERE (table_schema, table_name) = (in_schema, in_table) ORDER BY ordinal_position
LOOP
IF bVerbose THEN RAISE NOTICE '(col loop) name=% type=% udt_name=% default=% is_generated=% gen_expr=%', v_colrec.column_name, v_colrec.data_type, v_colrec.udt_name, v_colrec.column_default, v_colrec.is_generated, v_colrec.generation_expression; END IF;
SELECT CASE WHEN pg_get_serial_sequence(quote_ident(in_schema) || '.' || quote_ident(in_table), v_colrec.column_name) IS NOT NULL THEN True ELSE False END into bSerial;
IF bVerbose THEN
SELECT pg_get_serial_sequence(quote_ident(in_schema) || '.' || quote_ident(in_table), v_colrec.column_name) into v_temp;
IF v_temp IS NULL THEN v_temp = 'NA'; END IF;
SELECT pg_get_coldef(in_schema, in_table,v_colrec.column_name) INTO v_diag1;
RAISE NOTICE 'DEBUG table: % Column: % datatype: % Serial=% serialval=% coldef=%', v_qualified, v_colrec.column_name, v_colrec.data_type, bSerial, v_temp, v_diag1;
RAISE NOTICE 'DEBUG tabledef: %', v_table_ddl;
END IF;
SELECT COUNT(*) INTO v_cnt1 FROM information_schema.columns t WHERE EXISTS (SELECT REGEXP_MATCHES(s.column_name, '([A-Z]+)','g') FROM information_schema.columns s
WHERE t.table_schema=s.table_schema and t.table_name=s.table_name and t.column_name=s.column_name AND t.table_schema = quote_ident(in_schema) AND column_name = v_colrec.column_name);
SELECT COUNT(*) INTO v_cnt2 FROM pg_get_keywords() WHERE word = v_colrec.column_name AND catcode = 'R';
IF v_cnt1 > 0 OR v_cnt2 > 0 THEN
v_table_ddl := v_table_ddl || ' "' || v_colrec.column_name || '" ';
ELSE
v_table_ddl := v_table_ddl || ' ' || v_colrec.column_name || ' ';
END IF;
IF v_colrec.is_generated = 'ALWAYS' and v_colrec.generation_expression IS NOT NULL THEN
v_temp = v_colrec.data_type || ' GENERATED ALWAYS AS (' || v_colrec.generation_expression || ') STORED ';
ELSEIF v_colrec.udt_name in ('geometry', 'box2d', 'box2df', 'box3d', 'geography', 'geometry_dump', 'gidx', 'spheroid', 'valid_detail') THEN
v_temp = v_colrec.udt_name;
ELSEIF v_colrec.data_type = 'USER-DEFINED' THEN
v_temp = v_colrec.udt_schema || '.' || v_colrec.udt_name;
ELSEIF v_colrec.data_type = 'ARRAY' THEN
v_temp = pg_get_coldef(in_schema, in_table,v_colrec.column_name);
ELSEIF pg_get_serial_sequence(quote_ident(in_schema) || '.' || quote_ident(in_table), v_colrec.column_name) IS NOT NULL THEN
-- Issue#8 fix: handle serial. Note: NOT NULL is implied so no need to declare it explicitly
v_temp = pg_get_coldef(in_schema, in_table,v_colrec.column_name);
ELSE
v_temp = v_colrec.data_type;
END IF;
IF v_colrec.is_identity = 'YES' THEN
IF v_colrec.identity_generation = 'ALWAYS' THEN
v_temp = v_temp || ' GENERATED ALWAYS AS IDENTITY NOT NULL';
ELSE
v_temp = v_temp || ' GENERATED BY DEFAULT AS IDENTITY NOT NULL';
END IF;
ELSEIF v_colrec.character_maximum_length IS NOT NULL THEN
v_temp = v_temp || ('(' || v_colrec.character_maximum_length || ')');
ELSEIF v_colrec.numeric_precision > 0 AND v_colrec.numeric_scale > 0 THEN
v_temp = v_temp || '(' || v_colrec.numeric_precision || ',' || v_colrec.numeric_scale || ')';
END IF;
IF bSerial THEN
v_temp = v_temp || ' NOT NULL';
ELSEIF v_colrec.is_nullable = 'NO' THEN
v_temp = v_temp || ' NOT NULL';
ELSEIF v_colrec.is_nullable = 'YES' THEN
v_temp = v_temp || ' NULL';
END IF;
IF v_colrec.column_default IS NOT null AND NOT bSerial THEN
v_temp = v_temp || (' DEFAULT ' || v_colrec.column_default);
END IF;
v_temp = v_temp || ',' || E'\\n';
v_table_ddl := v_table_ddl || v_temp;
END LOOP;
END IF;
IF bVerbose THEN RAISE NOTICE '(2)tabledef so far: %', v_table_ddl; END IF;
IF v_pgversion < 110000 THEN
FOR v_constraintrec IN
SELECT con.conname as constraint_name, con.contype as constraint_type,
CASE
WHEN con.contype = 'p' THEN 1 -- primary key constraint
WHEN con.contype = 'u' THEN 2 -- unique constraint
WHEN con.contype = 'f' THEN 3 -- foreign key constraint
WHEN con.contype = 'c' THEN 4
ELSE 5
END as type_rank,
pg_get_constraintdef(con.oid) as constraint_definition
FROM pg_catalog.pg_constraint con JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace
WHERE nsp.nspname = in_schema AND rel.relname = in_table ORDER BY type_rank
LOOP
v_constraint_name := v_constraintrec.constraint_name;
v_constraint_def := v_constraintrec.constraint_definition;
IF v_constraintrec.type_rank = 1 THEN
IF pkcnt = 0 OR pktype = 'PKEY_INTERNAL' THEN
v_constraint_name := v_constraintrec.constraint_name;
v_constraint_def := v_constraintrec.constraint_definition;
v_table_ddl := v_table_ddl || ' ' -- note: two char spacer to start, to indent the column
|| 'CONSTRAINT' || ' '
|| v_constraint_name || ' '
|| v_constraint_def
|| ',' || E'\\n';
ELSE
SELECT 'ALTER TABLE ONLY ' || in_schema || '.' || c.relname || ' ADD CONSTRAINT ' || r.conname || ' ' || pg_catalog.pg_get_constraintdef(r.oid, true) || ';' INTO v_pkey_def
FROM pg_catalog.pg_constraint r, pg_class c, pg_namespace n where r.conrelid = c.oid and r.contype = 'p' and n.oid = r.connamespace and n.nspname = in_schema AND c.relname = in_table and r.conname = v_constraint_name;
END IF;
IF bPartition THEN
continue;
END IF;
ELSIF v_constraintrec.type_rank = 3 THEN
IF fktype = 'FKEYS_NONE' THEN
continue;
ELSIF fkcnt = 0 OR fktype = 'FKEYS_INTERNAL' THEN
v_table_ddl := v_table_ddl || ' ' -- note: two char spacer to start, to indent the column
|| 'CONSTRAINT' || ' '
|| v_constraint_name || ' '
|| v_constraint_def
|| ',' || E'\\n';
ELSE
SELECT 'ALTER TABLE ONLY ' || n.nspname || '.' || c2.relname || ' ADD CONSTRAINT ' || r.conname || ' ' || pg_catalog.pg_get_constraintdef(r.oid, true) || ';' INTO v_fkey_def
FROM pg_constraint r, pg_class c1, pg_namespace n, pg_class c2 where r.conrelid = c1.oid and r.contype = 'f' and n.nspname = in_schema and n.oid = r.connamespace and r.conrelid = c2.oid and c2.relname = in_table;
v_fkey_defs = v_fkey_defs || v_fkey_def || E'\\n';
END IF;
ELSE
v_table_ddl := v_table_ddl || ' ' -- note: two char spacer to start, to indent the column
|| 'CONSTRAINT' || ' '
|| v_constraint_name || ' '
|| v_constraint_def
|| ',' || E'\\n';
END IF;
if bVerbose THEN RAISE NOTICE 'DEBUG4: constraint name=% constraint_def=%', v_constraint_name,v_constraint_def; END IF;
constraintarr := constraintarr || v_constraintrec.constraint_name:: text;
END LOOP;
ELSE
FOR v_constraintrec IN
SELECT con.conname as constraint_name, con.contype as constraint_type,
CASE
WHEN con.contype = 'p' THEN 1 -- primary key constraint
WHEN con.contype = 'u' THEN 2 -- unique constraint
WHEN con.contype = 'f' THEN 3 -- foreign key constraint
WHEN con.contype = 'c' THEN 4
ELSE 5
END as type_rank,
pg_get_constraintdef(con.oid) as constraint_definition
FROM pg_catalog.pg_constraint con JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace
WHERE nsp.nspname = in_schema AND rel.relname = in_table
--Issue#13 added this condition:
AND con.conparentid = 0
ORDER BY type_rank
LOOP
v_constraint_name := v_constraintrec.constraint_name;
v_constraint_def := v_constraintrec.constraint_definition;
IF v_constraintrec.type_rank = 1 THEN
IF pkcnt = 0 OR pktype = 'PKEY_INTERNAL' THEN
-- internal def
v_constraint_name := v_constraintrec.constraint_name;
v_constraint_def := v_constraintrec.constraint_definition;
v_table_ddl := v_table_ddl || ' ' -- note: two char spacer to start, to indent the column
|| 'CONSTRAINT' || ' '
|| v_constraint_name || ' '
|| v_constraint_def
|| ',' || E'\\n';
ELSE
SELECT 'ALTER TABLE ONLY ' || in_schema || '.' || c.relname || ' ADD CONSTRAINT ' || r.conname || ' ' || pg_catalog.pg_get_constraintdef(r.oid, true) || ';' INTO v_pkey_def
FROM pg_catalog.pg_constraint r, pg_class c, pg_namespace n where r.conrelid = c.oid and r.contype = 'p' and n.oid = r.connamespace and n.nspname = in_schema AND c.relname = in_table;
END IF;
IF bPartition THEN
continue;
END IF;
ELSIF v_constraintrec.type_rank = 3 THEN
IF fktype = 'FKEYS_NONE' THEN
-- skip
continue;
ELSIF fkcnt = 0 OR fktype = 'FKEYS_INTERNAL' THEN
-- internal def
v_table_ddl := v_table_ddl || ' ' -- note: two char spacer to start, to indent the column
|| 'CONSTRAINT' || ' '
|| v_constraint_name || ' '
|| v_constraint_def
|| ',' || E'\\n';
ELSE
SELECT 'ALTER TABLE ONLY ' || n.nspname || '.' || c2.relname || ' ADD CONSTRAINT ' || r.conname || ' ' || pg_catalog.pg_get_constraintdef(r.oid, true) || ';' INTO v_fkey_def
FROM pg_constraint r, pg_class c1, pg_namespace n, pg_class c2 where r.conrelid = c1.oid and r.contype = 'f' and n.nspname = in_schema and n.oid = r.connamespace and r.conrelid = c2.oid and c2.relname = in_table and
r.conname = v_constraint_name and r.conparentid = 0;
v_fkey_defs = v_fkey_defs || v_fkey_def || E'\\n';
END IF;
ELSE
v_table_ddl := v_table_ddl || ' ' -- note: two char spacer to start, to indent the column
|| 'CONSTRAINT' || ' '
|| v_constraint_name || ' '
|| v_constraint_def
|| ',' || E'\\n';
END IF;
if bVerbose THEN RAISE NOTICE 'DEBUG4: constraint name=% constraint_def=%', v_constraint_name,v_constraint_def; END IF;
constraintarr := constraintarr || v_constraintrec.constraint_name:: text;
END LOOP;
END IF;
select substring(v_table_ddl, length(v_table_ddl) - 1, 1) INTO v_temp;
IF v_temp = ',' THEN
v_table_ddl = substr(v_table_ddl, 0, length(v_table_ddl) - 1) || E'\\n';
END IF;
IF bVerbose THEN RAISE NOTICE '(3)tabledef so far: %', trim(v_table_ddl); END IF;
IF bVerbose THEN RAISE NOTICE '(4)tabledef so far: %', v_table_ddl; END IF;
IF bPartition and bInheritance THEN
IF v_parent_schema = '' OR v_parent_schema IS NULL THEN v_parent_schema = in_schema; END IF;
v_table_ddl := v_table_ddl || ') INHERITS (' || v_parent_schema || '.' || v_parent || ') ' || E'\\n' || v_relopts || ' ' || v_tablespace || ';' || E'\\n';
END IF;
IF v_pgversion >= 100000 AND NOT bPartition and NOT bInheritance THEN
SELECT pg_get_partkeydef(c1.oid) as partition_key INTO v_partition_key FROM pg_class c1 JOIN pg_namespace n ON (n.oid = c1.relnamespace) LEFT JOIN pg_partitioned_table p ON (c1.oid = p.partrelid)
WHERE n.nspname = in_schema and n.oid = c1.relnamespace and c1.relname = in_table and c1.relkind = 'p';
IF v_partition_key IS NOT NULL AND v_partition_key <> '' THEN
v_table_ddl := v_table_ddl || ') PARTITION BY ' || v_partition_key || ';' || E'\\n';
ELSEIF v_relopts <> '' THEN
v_table_ddl := v_table_ddl || ') ' || v_relopts || ' ' || v_tablespace || ';' || E'\\n';
ELSE
v_table_ddl := v_table_ddl || ') ' || v_tablespace || ';' || E'\\n';
END IF;
END IF;
IF bVerbose THEN RAISE NOTICE '(5)tabledef so far: %', v_table_ddl; END IF;
IF v_pkey_def <> '' THEN
v_table_ddl := v_table_ddl || v_pkey_def || E'\\n';
END IF;
IF v_fkey_defs <> '' THEN
v_table_ddl := v_table_ddl || v_fkey_defs || E'\\n';
END IF;
IF bVerbose THEN RAISE NOTICE '(6)tabledef so far: %', v_table_ddl; END IF;
FOR v_indexrec IN
SELECT indexdef, COALESCE(tablespace, 'pg_default') as tablespace, indexname FROM pg_indexes WHERE (schemaname, tablename) = (in_schema, in_table)
LOOP
bSkip = False;
FOREACH constraintelement IN ARRAY constraintarr
LOOP
IF constraintelement = v_indexrec.indexname THEN
-- RAISE NOTICE 'DEBUG7: skipping index, %', v_indexrec.indexname;
bSkip = True;
EXIT;
END IF;
END LOOP;
if bSkip THEN CONTINUE; END IF;
v_indexrec.indexdef := REPLACE(v_indexrec.indexdef, 'CREATE INDEX', 'CREATE INDEX IF NOT EXISTS');
v_indexrec.indexdef := REPLACE(v_indexrec.indexdef, 'CREATE UNIQUE INDEX', 'CREATE UNIQUE INDEX IF NOT EXISTS');
IF v_partition_key IS NOT NULL AND v_partition_key <> '' THEN
v_table_ddl := v_table_ddl || v_indexrec.indexdef || ';' || E'\\n';
ELSE
select CASE WHEN i.indpred IS NOT NULL THEN True ELSE False END INTO v_partial
FROM pg_index i JOIN pg_class c1 ON (i.indexrelid = c1.oid) JOIN pg_class c2 ON (i.indrelid = c2.oid)
WHERE c1.relnamespace::regnamespace::text = in_schema AND c2.relnamespace::regnamespace::text = in_schema AND c2.relname = in_table AND c1.relname = v_indexrec.indexname;
IF v_partial THEN
-- Put tablespace def before WHERE CLAUSE
v_temp = v_indexrec.indexdef;
v_pos = POSITION(' WHERE ' IN v_temp);
v_temp2 = SUBSTRING(v_temp, v_pos);
v_temp = SUBSTRING(v_temp, 1, v_pos);
v_table_ddl := v_table_ddl || v_temp || ' TABLESPACE ' || v_indexrec.tablespace || v_temp2 || ';' || E'\\n';
ELSE
v_table_ddl := v_table_ddl || v_indexrec.indexdef || ' TABLESPACE ' || v_indexrec.tablespace || ';' || E'\\n';
END IF;
END IF;
END LOOP;
IF bVerbose THEN RAISE NOTICE '(7)tabledef so far: %', v_table_ddl; END IF;
-- Issue#20: added logic for table and column comments
IF cmtcnt > 0 THEN
FOR v_rec IN
SELECT c.relname, 'COMMENT ON ' || CASE WHEN c.relkind in ('r','p') AND a.attname IS NULL THEN 'TABLE ' WHEN c.relkind in ('r','p') AND a.attname IS NOT NULL THEN 'COLUMN ' WHEN c.relkind = 'f' THEN 'FOREIGN TABLE '
WHEN c.relkind = 'm' THEN 'MATERIALIZED VIEW ' WHEN c.relkind = 'v' THEN 'VIEW ' WHEN c.relkind = 'i' THEN 'INDEX ' WHEN c.relkind = 'S' THEN 'SEQUENCE ' ELSE 'XX' END || n.nspname || '.' ||
CASE WHEN c.relkind in ('r','p') AND a.attname IS NOT NULL THEN quote_ident(c.relname) || '.' || a.attname ELSE quote_ident(c.relname) END || ' IS ' || quote_literal(d.description) || ';' as ddl
FROM pg_class c JOIN pg_namespace n ON (n.oid = c.relnamespace) LEFT JOIN pg_description d ON (c.oid = d.objoid) LEFT JOIN pg_attribute a ON (c.oid = a.attrelid AND a.attnum > 0 and a.attnum = d.objsubid)
WHERE d.description IS NOT NULL AND n.nspname = in_schema AND c.relname = in_table ORDER BY 2 desc, ddl
LOOP
--RAISE NOTICE 'comments:%', v_rec.ddl;
v_table_ddl = v_table_ddl || v_rec.ddl || E'\\n';
END LOOP;
END IF;
IF bVerbose THEN RAISE NOTICE '(8)tabledef so far: %', v_table_ddl; END IF;
IF trigtype = 'INCLUDE_TRIGGERS' THEN
-- Issue#14: handle multiple triggers for a table
FOR v_trigrec IN
select pg_get_triggerdef(t.oid, True) || ';' as triggerdef FROM pg_trigger t, pg_class c, pg_namespace n
WHERE n.nspname = in_schema and n.oid = c.relnamespace and c.relname = in_table and c.relkind = 'r' and t.tgrelid = c.oid and NOT t.tgisinternal
LOOP
v_table_ddl := v_table_ddl || v_trigrec.triggerdef;
v_table_ddl := v_table_ddl || E'\\n';
IF bVerbose THEN RAISE NOTICE 'triggerdef = %', v_trigrec.triggerdef; END IF;
END LOOP;
END IF;
IF bVerbose THEN RAISE NOTICE '(9)tabledef so far: %', v_table_ddl; END IF;
v_table_ddl := v_table_ddl || E'\\n';
IF bVerbose THEN RAISE NOTICE '(10)tabledef so far: %', v_table_ddl; END IF;
IF search_path_old = '' THEN
SELECT set_config('search_path', '', false) into v_temp;
ELSE
EXECUTE 'SET search_path = ' || search_path_old;
END IF;
RETURN v_table_ddl;
EXCEPTION
WHEN others THEN
BEGIN
GET STACKED DIAGNOSTICS v_diag1 = MESSAGE_TEXT, v_diag2 = PG_EXCEPTION_DETAIL, v_diag3 = PG_EXCEPTION_HINT, v_diag4 = RETURNED_SQLSTATE, v_diag5 = PG_CONTEXT, v_diag6 = PG_EXCEPTION_CONTEXT;
v_ret := 'line=' || v_diag6 || '. '|| v_diag4 || '. ' || v_diag1;
RAISE EXCEPTION '%', v_ret;
RETURN '';
END;
END;
$$;""".indent(1);
public static final String DROP_TYPE_SQL = "DROP TYPE IF EXISTS %s.%s CASCADE;";
public static final String ENUM_TYPE_DDL_SQL = """
SELECT 'CREATE TYPE "' || n.nspname || '"."' || t.typname || '" AS ENUM (' ||
string_agg(quote_literal(e.enumlabel), ', ') || ');' AS ddl
FROM pg_type t
JOIN pg_enum e ON t.oid = e.enumtypid
JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace
WHERE t.typtype = 'e'
GROUP BY n.nspname, t.typname;""";
}
--=======================================================================
--=======================================================================
--=======================================================================
--=======================================================================
--=======================================================================
--=======================================================================
--=======================================================================
--=======================================================================
--=======================================================================
--=======================================================================
--=======================================================================
--=======================================================================
--=======================================================================
--=======================================================================
--=======================================================================
--==========================================================================================
with positions as (
select level id from dual connect by level <= 5),
permutations as (
select p1.id a, p2.id b, p3.id c, p4.id d, p5.id e
from positions p1, positions p2, positions p3, positions p4, positions p5
where p1.id <> p2.id and p1.id <> p3.id and p1.id <> p4.id and p1.id <> p5.id and
p2.id <> p3.id and p2.id <> p4.id and p2.id <> p5.id and
p3.id <> p4.id and p3.id <> p5.id and
p4.id <> p5.id),
Names as (
select a Winslow, b Marcolla, c Contee, d Natsiou, e Finch from permutations),
Colors as (
select a red, b white, c purple, d blue, e green from permutations),
Cities as (
select a Bailton, b Serkonos, c Freiport, d Morley, e Danuol from permutations),
Drinks as (
select a absent, b coctail, c rum, d cider, e whiskey from permutations),
Items as (
select a ring, b diamond, c medal, d cigarcase, e coulomb from permutations),
solution as (
select *
from Names, Colors, Cities, Drinks, Items
where Winslow = blue and Marcolla = 1 and abs(Marcolla - white) = 1 and
red + 1 = purple and red = whiskey and Morley = green and
abs(cigarcase - Morley) = 1 and Finch = coulomb and Freiport = ring and
abs(diamond - Danuol) = 1 and abs(Danuol - coctail) = 1 and Contee = absent and
Serkonos = cider and rum = 3 and Natsiou = Bailton)
--==========================================================================================
select id as position,
case id
when Winslow then 'Winslow'
when Marcolla then 'Marcolla'
when Contee then 'Contee'
when Natsiou then 'Natsiou'
when Finch then 'Finch'
end as Name,
case id
when red then 'red'
when white then 'white'
when purple then 'purple'
when blue then 'blue'
when green then 'green'
end as Color,
case id
when Bailton then 'Bailton'
when Serkonos then 'Serkonos'
when Freiport then 'Freiport'
when Morley then 'Morley'
when Danuol then 'Danuol'
end as City,
case id
when absent then 'absent'
when coctail then 'coctail'
when rum then 'rum'
when cider then 'cider'
when whiskey then 'whiskey'
end as Drink,
case id
when ring then 'ring'
when diamond then 'diamond'
when medal then 'medal'
when cigarcase then 'cigarcase'
when coulomb then 'coulomb'
end as Item
from positions, solution
order by id
--==========================================================================================
select * from solution;
--==========================================================================================
--==========================================================================================
-- исходные наборы атрибутов
WITH RECURSIVE attr AS (
SELECT '{
{Winslow,Marcolla,Contee,Natsiou,Finch}
, {red,white,purple,blue,green}
, {Bailton,Serkonos,Freiport,Morley,Danuol}
, {absent,coctail,rum,cider,whiskey}
, {ring,diamond,order,cigar-case,coulomb}
}'::text[][] attr
)
-- количество атрибутов (= персон)
, qty AS (
SELECT array_length((TABLE attr), 1) qty
)
-- все комбинации атрибутов
, comb_attr AS (
SELECT
ARRAY(
SELECT
attr[j + 1][((i / (qty ^ j)::integer) % qty) + 1]
FROM
generate_series(0, qty - 1) j
) c
FROM
attr
, qty
, generate_series(0, (qty ^ qty)::integer - 1) i
)
-- разрешенные комбинации
, cond_single AS (
SELECT
*
FROM
comb_attr
, unnest(ARRAY[1,2,3,4,5]) pos -- генерируем варианты мест
WHERE
-- Уинслоу ... синее
('{Winslow,blue}'::text[] <@ c OR NOT ('{Winslow,blue}'::text[] && c)) AND
-- Марколла левее всех
(('Marcolla' = ANY(c) AND pos = 1) OR ('Marcolla' <> ALL(c) AND pos <> 1)) AND
-- красное ... виски
('{red,whiskey}'::text[] <@ c OR NOT ('{red,whiskey}'::text[] && c)) AND
-- Морли ... зелёное
('{Morley,green}'::text[] <@ c OR NOT ('{Morley,green}'::text[] && c)) AND
-- Финч ... Кулон
('{Finch,coulomb}'::text[] <@ c OR NOT ('{Finch,coulomb}'::text[] && c)) AND
-- Фрейпорт ... Перстень
('{Freiport,ring}'::text[] <@ c OR NOT ('{Freiport,ring}'::text[] && c)) AND
-- Конти ... абсент
('{Contee,absent}'::text[] <@ c OR NOT ('{Contee,absent}'::text[] && c)) AND
-- Серконос ... сидр
('{Serkonos,cider}'::text[] <@ c OR NOT ('{Serkonos,cider}'::text[] && c)) AND
-- посередине ... ром
(('rum' = ANY(c) AND pos = 3) OR ('rum' <> ALL(c) AND pos <> 3)) AND
-- Нациу ... Бейлтон
('{Natsiou,Bailton}'::text[] <@ c OR NOT ('{Natsiou,Bailton}'::text[] && c))
)
-- рекурсивно отсекаем повторяемость значений
, r AS (
SELECT
0 pos
, '{}'::text[] acc
UNION ALL
SELECT
cs.pos
, r.acc || cs.c -- добавляем комбинацию в накопленное
FROM
r
JOIN
cond_single cs
ON cs.pos = r.pos + 1 AND -- следующая позиция
NOT (cs.c && r.acc) -- нет пересечений атрибутов
)
-- комбинации размещения
, comb_person AS (
SELECT
grp
, i + 1 pos
, acc[i * qty + 1 : (i + 1) * qty] c -- слайс массива
FROM
qty
, LATERAL (
SELECT
row_number() OVER() grp
, *
FROM
r
WHERE
pos = qty -- только полные размещения
) T
, generate_series(0, qty - 1) i
)
SELECT
pos
, c person
FROM
comb_person
WHERE
grp IN (
SELECT
grp
FROM
comb_person X
JOIN
comb_person Y
USING(grp)
GROUP BY
1
HAVING
-- Марколла ... рядом с ... белое
bool_or('Marcolla' = ANY(X.c) AND 'white' = ANY(Y.c) AND abs(X.pos - Y.pos) = 1) AND
-- красное ... слева от ... пурпурное
bool_or('red' = ANY(X.c) AND 'purple' = ANY(Y.c) AND X.pos = Y.pos - 1) AND
-- Портсигар ... рядом с ... Морли
bool_or('cigar-case' = ANY(X.c) AND 'Morley' = ANY(Y.c) AND abs(X.pos - Y.pos) = 1) AND
-- Бриллиант ... рядом с ... Дануолл
bool_or('diamond' = ANY(X.c) AND 'Danuol' = ANY(Y.c) AND abs(X.pos - Y.pos) = 1) AND
-- Дануолл ... коктейль ... соседки
bool_or('Danuol' = ANY(X.c) AND 'coctail' = ANY(Y.c) AND abs(X.pos - Y.pos) = 1)
)
ORDER BY
grp, pos;
--==========================================================================================
drop database if exists dis2;
create database dis2;
use dis2;
-- Имена дам
create table Names (name char(10)) ENGINE=MEMORY;
insert into Names values ('Winslow'), ('Marcolla'), ('Contee'), ('Natsiou'), ('Finch');
-- Позиции дам за столом
create table Positions (pos int) ENGINE=MEMORY;
insert into Positions values (1), (2), (3), (4), (5);
-- Цвета платьев
create table Colors (color char(10)) ENGINE=MEMORY;
insert into Colors values ('red'), ('white'), ('purple'), ('blue'), ('green');
-- Родные города дам
create table Cities (city char(10)) ENGINE=MEMORY;
insert into Cities values ('Bailton'), ('Serkonos'), ('Freiport'), ('Morley'), ('Danuol');
-- Напитки дам
create table Drinks (drink char(10)) ENGINE=MEMORY;
insert into Drinks values ('absent'), ('coctail'), ('rum'), ('cider'), ('whiskey');
-- Ценности
create table Items (item char(10)) ENGINE=MEMORY;
insert into Items values ('ring'), ('diamond'), ('order'), ('cigar-case'), ('coulomb');
-- Таблица предварительных вариантов для просеивания
create table s (pos int, name char(10), item char(10), color char(10), drink char(10), city char(10)) ENGINE=MEMORY;
INSERT INTO s
SELECT pos, name, item, color, drink, city FROM Positions, Names, Colors, Drinks, Cities, Items
WHERE
((name='Winslow' AND color = 'blue') OR (name!='Winslow' AND color != 'blue'))
AND
((name='Marcolla' AND pos = 1) OR (name!='Marcolla' AND pos != 1))
AND
((color='white' AND pos = 2) OR (color!='white' AND pos != 2))
AND
((color='red' AND drink = 'whiskey') OR (color!='red' AND drink != 'whiskey'))
AND
((city='Morley' AND color = 'green') OR (city!='Morley' AND color != 'green'))
AND
((name='Finch' AND item = 'coulomb') OR (name!='Finch' AND item != 'coulomb'))
AND
((city='Freiport' AND item = 'ring') OR (city!='Freiport' AND item != 'ring'))
AND
((name='Contee' AND drink = 'absent') OR (name!='Contee' AND drink != 'absent'))
AND
((city='Serkonos' AND drink = 'cider') OR (city!='Serkonos' AND drink != 'cider'))
AND
((pos=3 AND drink = 'rum') OR (pos!=3 AND drink != 'rum'))
AND
((name='Natsiou' AND city = 'Bailton') OR (name!='Natsiou' AND city != 'Bailton'));
SELECT t1.name, t1.item, t2.name, t2.item, t3.name, t3.item, t4.name, t4.item, t5.name, t5.item
FROM s t1
JOIN s t2 ON t2.name != t1.name AND t2.item != t1.item AND t2.color != t1.color AND t2.drink != t1.drink AND t2.city != t1.city
JOIN s t3 ON t3.name NOT IN (t1.name, t2.name)
AND t3.item NOT IN (t1.item, t2.item)
AND t3.color NOT IN (t1.color, t2.color)
AND t3.drink NOT IN (t1.drink, t2.drink)
AND t3.city NOT IN (t1.city, t2.city)
JOIN s t4 ON t4.name NOT IN (t1.name, t2.name, t3.name)
AND t4.item NOT IN (t1.item, t2.item, t3.item)
AND t4.color NOT IN (t1.color, t2.color, t3.color)
AND t4.drink NOT IN (t1.drink, t2.drink, t3.drink)
AND t4.city NOT IN (t1.city, t2.city, t3.city)
JOIN s t5 ON t5.name NOT IN (t1.name, t2.name, t3.name, t4.name)
AND t5.item NOT IN (t1.item, t2.item, t3.item, t4.item)
AND t5.color NOT IN (t1.color, t2.color, t3.color, t4.color)
AND t5.drink NOT IN (t1.drink, t2.drink, t3.drink, t4.drink)
AND t5.city NOT IN (t1.city, t2.city, t3.city, t4.city)
WHERE
-- Мы хотим вывести дам в том порядке, в котором они сидели
t1.pos = 1 AND t2.pos=2 AND t3.pos = 3 AND t4.pos=4 AND t5.pos=5
-- Дама в красном сидит левее дамы в пурпурном, но место 2 занято дамой в белом, поэтому остаются места 3 и 4
AND (
(t3.color='red' AND t4.color='purple' ) OR (t4.color='red' AND t5.color='purple')
)
-- Рядом с дамой с портсигаром сидит дама из Морли
AND
(
(t1.item='cigar-case' AND t2.city='Morley') OR
(t2.item='cigar-case' AND 'Morley' IN (t1.city, t3.city)) OR
(t3.item='cigar-case' AND 'Morley' IN (t2.city, t4.city)) OR
(t4.item='cigar-case' AND 'Morley' IN (t3.city, t5.city)) OR
(t5.item='cigar-case' AND t4.city='Morley')
)
-- Рядом с дамой с бриллиантом сидит дама из Дануолл
AND
(
(t1.item='diamond' AND t2.city='Danuol') OR
(t2.item='diamond' AND 'Danuol' IN (t1.city, t3.city)) OR
(t3.item='diamond' AND 'Danuol' IN (t2.city, t4.city)) OR
(t4.item='diamond' AND 'Danuol' IN (t3.city, t5.city)) OR
(t5.item='diamond' AND t4.city='Danuol')
)
-- Рядом с дамой из Дануолла другая дама пила коктейль
AND
(
(t1.drink='coctail' AND t2.city='Danuol') OR
(t2.drink='coctail' AND 'Danuol' IN (t1.city, t3.city)) OR
(t3.drink='coctail' AND 'Danuol' IN (t2.city, t4.city)) OR
(t4.drink='coctail' AND 'Danuol' IN (t3.city, t5.city)) OR
(t5.drink='coctail' AND t4.city='Danuol')
);
--==========================================================================================
WITH Positions(pos) AS (
VALUES ROW(1), ROW(2), ROW(3), ROW(4), ROW(5)
)
, Names(name) AS (
VALUES ROW('Winslow'), ROW('Marcolla'), ROW('Contee'), ROW('Natsiou'), ROW('Finch')
)
, Colors(color) AS (
VALUES ROW('red'), ROW('white'), ROW('purple'), ROW('blue'), ROW('green')
)
, Cities(city) AS (
VALUES ROW('Bailton'), ROW('Serkonos'), ROW('Freiport'), ROW('Morley'), ROW('Danuol')
)
, Drinks(drink) AS (
VALUES ROW('absent'), ROW('coctail'), ROW('rum'), ROW('cider'), ROW('whiskey')
)
, Items(item) AS (
VALUES ROW('ring'), ROW('diamond'), ROW('order'), ROW('cigar-case'), ROW('coulomb')
)
, s AS (
SELECT DISTINCT *
FROM Positions, Names, Items, Colors, Drinks, Cities
WHERE ((name='Winslow' AND color = 'blue') OR (name!='Winslow' AND color != 'blue'))
AND
((name='Marcolla' AND pos = 1) OR (name!='Marcolla' AND pos != 1))
AND
((color='white' AND pos = 2) OR (color!='white' AND pos != 2))
AND
((color='red' AND drink = 'whiskey') OR (color!='red' AND drink != 'whiskey'))
AND
((city='Morley' AND color = 'green') OR (city!='Morley' AND color != 'green'))
AND
((name='Finch' AND item = 'coulomb') OR (name!='Finch' AND item != 'coulomb'))
AND
((city='Freiport' AND item = 'ring') OR (city!='Freiport' AND item != 'ring'))
AND
((name='Contee' AND drink = 'absent') OR (name!='Contee' AND drink != 'absent'))
AND
((city='Serkonos' AND drink = 'cider') OR (city!='Serkonos' AND drink != 'cider'))
AND
((pos=3 AND drink = 'rum') OR (pos!=3 AND drink != 'rum'))
AND
((name='Natsiou' AND city = 'Bailton') OR (name!='Natsiou' AND city != 'Bailton'))
)
SELECT t1.name, t1.item, t2.name, t2.item, t3.name, t3.item, t4.name, t4.item, t5.name, t5.item
FROM s t1
JOIN s t2 ON t2.name != t1.name AND t2.item != t1.item AND t2.color != t1.color AND t2.drink != t1.drink AND t2.city != t1.city
JOIN s t3 ON t3.name NOT IN (t1.name, t2.name)
AND t3.item NOT IN (t1.item, t2.item)
AND t3.color NOT IN (t1.color, t2.color)
AND t3.drink NOT IN (t1.drink, t2.drink)
AND t3.city NOT IN (t1.city, t2.city)
JOIN s t4 ON t4.name NOT IN (t1.name, t2.name, t3.name)
AND t4.item NOT IN (t1.item, t2.item, t3.item)
AND t4.color NOT IN (t1.color, t2.color, t3.color)
AND t4.drink NOT IN (t1.drink, t2.drink, t3.drink)
AND t4.city NOT IN (t1.city, t2.city, t3.city)
JOIN s t5 ON t5.name NOT IN (t1.name, t2.name, t3.name, t4.name)
AND t5.item NOT IN (t1.item, t2.item, t3.item, t4.item)
AND t5.color NOT IN (t1.color, t2.color, t3.color, t4.color)
AND t5.drink NOT IN (t1.drink, t2.drink, t3.drink, t4.drink)
AND t5.city NOT IN (t1.city, t2.city, t3.city, t4.city)
WHERE
-- Мы хотим вывести дам в том порядке, в котором они сидели
t1.pos = 1 AND t2.pos=2 AND t3.pos = 3 AND t4.pos=4 AND t5.pos=5
-- Дама в красном сидит левее дамы в пурпурном, но место 2 занято дамой в белом, поэтому остаются места 3 и 4
AND (
(t3.color='red' AND t4.color='purple' ) OR (t4.color='red' AND t5.color='purple')
)
-- Рядом с дамой с портсигаром сидит дама из Морли
AND
(
(t1.item='cigar-case' AND t2.city='Morley') OR
(t2.item='cigar-case' AND 'Morley' IN (t1.city, t3.city)) OR
(t3.item='cigar-case' AND 'Morley' IN (t2.city, t4.city)) OR
(t4.item='cigar-case' AND 'Morley' IN (t3.city, t5.city)) OR
(t5.item='cigar-case' AND t4.city='Morley')
)
-- Рядом с дамой с бриллиантом сидит дама из Дануолл
AND
(
(t1.item='diamond' AND t2.city='Danuol') OR
(t2.item='diamond' AND 'Danuol' IN (t1.city, t3.city)) OR
(t3.item='diamond' AND 'Danuol' IN (t2.city, t4.city)) OR
(t4.item='diamond' AND 'Danuol' IN (t3.city, t5.city)) OR
(t5.item='diamond' AND t4.city='Danuol')
)
-- Рядом с дамой из Дануолла другая дама пила коктейль
AND
(
(t1.drink='coctail' AND t2.city='Danuol') OR
(t2.drink='coctail' AND 'Danuol' IN (t1.city, t3.city)) OR
(t3.drink='coctail' AND 'Danuol' IN (t2.city, t4.city)) OR
(t4.drink='coctail' AND 'Danuol' IN (t3.city, t5.city)) OR
(t5.drink='coctail' AND t4.city='Danuol')
);
--==========================================================================================
WITH s(pos, name, item, color, drink, city) AS
(
SELECT pos, name, item, color, drink, city FROM Positions, Names, Colors, Drinks, Cities, Items
WHERE
((name='Winslow' AND color = 'blue') OR (name!='Winslow' AND color != 'blue'))
AND
((name='Marcolla' AND pos = 1) OR (name!='Marcolla' AND pos != 1))
AND
((color='white' AND pos = 2) OR (color!='white' AND pos != 2))
AND
((color='red' AND drink = 'whiskey') OR (color!='red' AND drink != 'whiskey'))
AND
((city='Morley' AND color = 'green') OR (city!='Morley' AND color != 'green'))
AND
((name='Finch' AND item = 'coulomb') OR (name!='Finch' AND item != 'coulomb'))
AND
((city='Freiport' AND item = 'ring') OR (city!='Freiport' AND item != 'ring'))
AND
((name='Contee' AND drink = 'absent') OR (name!='Contee' AND drink != 'absent'))
AND
((city='Serkonos' AND drink = 'cider') OR (city!='Serkonos' AND drink != 'cider'))
AND
((pos=3 AND drink = 'rum') OR (pos!=3 AND drink != 'rum'))
AND
((name='Natsiou' AND city = 'Bailton') OR (name!='Natsiou' AND city != 'Bailton'))
)
SELECT t1.name, t1.item, t2.name, t2.item, t3.name, t3.item, t4.name, t4.item, t5.name, t5.item
FROM s t1
JOIN s t2 ON t2.name != t1.name AND t2.item != t1.item AND t2.color != t1.color AND t2.drink != t1.drink AND t2.city != t1.city
JOIN s t3 ON t3.name NOT IN (t1.name, t2.name)
AND t3.item NOT IN (t1.item, t2.item)
AND t3.color NOT IN (t1.color, t2.color)
AND t3.drink NOT IN (t1.drink, t2.drink)
AND t3.city NOT IN (t1.city, t2.city)
JOIN s t4 ON t4.name NOT IN (t1.name, t2.name, t3.name)
AND t4.item NOT IN (t1.item, t2.item, t3.item)
AND t4.color NOT IN (t1.color, t2.color, t3.color)
AND t4.drink NOT IN (t1.drink, t2.drink, t3.drink)
AND t4.city NOT IN (t1.city, t2.city, t3.city)
JOIN s t5 ON t5.name NOT IN (t1.name, t2.name, t3.name, t4.name)
AND t5.item NOT IN (t1.item, t2.item, t3.item, t4.item)
AND t5.color NOT IN (t1.color, t2.color, t3.color, t4.color)
AND t5.drink NOT IN (t1.drink, t2.drink, t3.drink, t4.drink)
AND t5.city NOT IN (t1.city, t2.city, t3.city, t4.city)
WHERE
-- Мы хотим вывести дам в том порядке, в котором они сидели
t1.pos = 1 AND t2.pos=2 AND t3.pos = 3 AND t4.pos=4 AND t5.pos=5
-- Дама в красном сидит левее дамы в пурпурном, но место 2 занято дамой в белом, поэтому остаются места 3 и 4
AND (
(t3.color='red' AND t4.color='purple' ) OR (t4.color='red' AND t5.color='purple')
)
-- Рядом с дамой с портсигаром сидит дама из Морли
AND
(
(t1.item='cigar-case' AND t2.city='Morley') OR
(t2.item='cigar-case' AND 'Morley' IN (t1.city, t3.city)) OR
(t3.item='cigar-case' AND 'Morley' IN (t2.city, t4.city)) OR
(t4.item='cigar-case' AND 'Morley' IN (t3.city, t5.city)) OR
(t5.item='cigar-case' AND t4.city='Morley')
)
-- Рядом с дамой с бриллиантом сидит дама из Дануолл
AND
(
(t1.item='diamond' AND t2.city='Danuol') OR
(t2.item='diamond' AND 'Danuol' IN (t1.city, t3.city)) OR
(t3.item='diamond' AND 'Danuol' IN (t2.city, t4.city)) OR
(t4.item='diamond' AND 'Danuol' IN (t3.city, t5.city)) OR
(t5.item='diamond' AND t4.city='Danuol')
)
-- Рядом с дамой из Дануолла другая дама пила коктейль
AND
(
(t1.drink='coctail' AND t2.city='Danuol') OR
(t2.drink='coctail' AND 'Danuol' IN (t1.city, t3.city)) OR
(t3.drink='coctail' AND 'Danuol' IN (t2.city, t4.city)) OR
(t4.drink='coctail' AND 'Danuol' IN (t3.city, t5.city)) OR
(t5.drink='coctail' AND t4.city='Danuol')
);
--==========================================================================================
--==========================================================================================
--==========================================================================================
--=============================
=> CREATE TABLE accounts(
acc_no integer PRIMARY KEY,
amount numeric
);
=> INSERT INTO accounts
VALUES (1, 100.00), (2, 200.00), (3, 300.00);
--=============================
=> CREATE EXTENSION pageinspect;
--=============================
=> CREATE VIEW accounts_v AS
SELECT '(0,'||lp||')' AS ctid,
t_xmax as xmax,
CASE WHEN (t_infomask & 128) > 0 THEN 't' END AS lock_only,
CASE WHEN (t_infomask & 4096) > 0 THEN 't' END AS is_multi,
CASE WHEN (t_infomask2 & 8192) > 0 THEN 't' END AS keys_upd,
CASE WHEN (t_infomask & 16) > 0 THEN 't' END AS keyshr_lock,
CASE WHEN (t_infomask & 16+64) = 16+64 THEN 't' END AS shr_lock
FROM heap_page_items(get_raw_page('accounts',0))
ORDER BY lp;
--=============================
=> BEGIN;
=> UPDATE accounts SET amount = amount + 100.00 WHERE acc_no = 1;
=> UPDATE accounts SET acc_no = 20 WHERE acc_no = 2;
=> ROLLBACK;
=> BEGIN;
=> SELECT * FROM accounts WHERE acc_no = 1 FOR NO KEY UPDATE;
=> SELECT * FROM accounts WHERE acc_no = 2 FOR UPDATE;
--=============================
=> SELECT * FROM accounts_v LIMIT 2;
--=============================
=> BEGIN;
=> SELECT * FROM accounts WHERE acc_no = 1 FOR KEY SHARE;
=> SELECT * FROM accounts WHERE acc_no = 2 FOR SHARE;
--=============================
=> CREATE EXTENSION pgrowlocks;
=> SELECT * FROM pgrowlocks('accounts') \gx
--=============================
=> CREATE VIEW locks_v AS
SELECT pid,
locktype,
CASE locktype
WHEN 'relation' THEN relation::regclass::text
WHEN 'transactionid' THEN transactionid::text
WHEN 'tuple' THEN relation::regclass::text||':'||tuple::text
END AS lockid,
mode,
granted
FROM pg_locks
WHERE locktype in ('relation','transactionid','tuple')
AND (locktype != 'relation' OR relation = 'accounts'::regclass);
--=============================
=> BEGIN;
=> SELECT txid_current(), pg_backend_pid();
--=============================
=> SELECT pid, wait_event_type, wait_event, pg_blocking_pids(pid)
FROM pg_stat_activity
WHERE backend_type = 'client backend';
--=============================
WITH updated_cars AS (
UPDATE cars
SET buyer = 2
WHERE id = 3
RETURNING id
), updated_car_invoices AS (
UPDATE car_invoices
SET buyer = 2
WHERE car_id = 1
RETURNING car_id
)
SELECT * FROM updated_cars, updated_car_invoices;
--=============================
SET NOCOUNT OFF;
DECLARE @DB VARCHAR(250) = QUOTENAME(DB_NAME()),
@DBMaster VARCHAR(255) = 'БД-мастер',
@ERROR VARCHAR(MAX);
DECLARE @HistoryLimited bit = 1,
@table_name nvarchar(255),
@is_identity int = 0,
@stm nvarchar(max) = '',
@cols nvarchar(max) = '',
@IsNOTInsert bit,
@schema_name nvarchar(255),
@col_name_identity nvarchar(255),
@referencing_object nvarchar(255),
@referenced_object nvarchar(255),
@constraint_name nvarchar(255),
@referencing_columns nvarchar(max),
@referenced_columns nvarchar(max),
@rules nvarchar(max),
@key_cols nvarchar(max),
@StartMoment DATETIME2,
@FinishMoment DATETIME2,
@delete_referential_action INT,
@update_referential_action INT,
@max_row_insert INT = 100000,
@isClearTableFKs BIT = 1,
@RowCount BIGINT = 1,
@WhileDelCount INT = 0;
;
DECLARE @cnt TABLE (cnt BIGINT NOT NULL);
DROP TABLE IF EXISTS #tbl_res;
CREATE TABLE #tbl_res (
SchName NVARCHAR(255) NOT NULL,
TblName NVARCHAR(255) NOT NULL,
StartMoment DATETIME2 NOT NULL,
FinishMoment DATETIME2 NOT NULL,
Cnt BIGINT NOT NULL,
ErrorMsg NVARCHAR(MAX) NULL
);
EXEC sys.sp_msforeachtable "ALTER TABLE ? NOCHECK CONSTRAINT ALL";
DECLARE r_cursor_trigg_off CURSOR LOCAL FAST_FORWARD READ_ONLY FOR
SELECT FORMATMESSAGE('ALTER TABLE [%s].[%s] DISABLE TRIGGER [%s];'
+ CHAR(13), SCHEMA_NAME(b.[schema_id]), OBJECT_NAME(t.parent_id)
, t.[Name]) AS stm
FROM sys.triggers t
LEFT JOIN sys.tables b ON b.object_id = t.parent_id
WHERE t.is_disabled = 0
AND t.type_desc = 'SQL_TRIGGER'
AND OBJECT_NAME(t.parent_id) IS NOT NULL
ORDER BY SCHEMA_NAME(b.[schema_id]) ASC,
OBJECT_NAME(t.parent_id) ASC;
OPEN r_cursor_trigg_off;
FETCH NEXT FROM r_cursor_trigg_off
INTO @stm;
WHILE @@FETCH_STATUS = 0
BEGIN
PRINT @stm;
BEGIN TRY
BEGIN TRANSACTION;
EXEC sys.sp_executesql @stmt = @stm;
COMMIT;
END TRY
BEGIN CATCH
SET @ERROR = ERROR_MESSAGE();
PRINT @ERROR;
IF (@@TRANCOUNT > 0) ROLLBACK TRANSACTION
END CATCH
FETCH NEXT FROM r_cursor_trigg_off
INTO @stm;
END
CLOSE r_cursor_trigg_off;
DEALLOCATE r_cursor_trigg_off;
SET @stm = '';
SELECT @stm += FORMATMESSAGE('DISABLE TRIGGER [%s] ON DATABASE;'
+ CHAR(13), t.[Name])
FROM sys.triggers t
LEFT JOIN sys.tables b ON b.[object_id] = t.parent_id
WHERE t.is_disabled = 0
AND t.[type_desc] = 'SQL_TRIGGER'
AND OBJECT_NAME(t.parent_id) IS NULL;
PRINT @stm;
BEGIN TRY
BEGIN TRANSACTION;
EXEC sys.sp_executesql @stmt = @stm;
COMMIT;
END TRY
BEGIN CATCH
SET @ERROR = ERROR_MESSAGE();
PRINT @ERROR;
IF (@@TRANCOUNT > 0) ROLLBACK TRANSACTION
END CATCH
DROP TABLE IF EXISTS #tbls;
CREATE TABLE #tbls (
[name] NVARCHAR(255) NOT NULL,
sch_name NVARCHAR(255) NOT NULL,
IsNOTInsert BIT NOT NULL
);
INSERT INTO #tbls (
[name],
sch_name,
IsNOTInsert
)
SELECT t.[name],
SCHEMA_NAME(t.[schema_id]) AS sch_name,
--задаётся правило, по которому определяем
--нужно ли после очистки наполнять данными таблицу или нет
--по умолчанию нужно (0-да, 1-нет)
0 AS IsNOTInsert
FROM sys.tables AS t
--в фильтре задаем какие таблицы брать в расчет
--(в нашем случае какие не брать в расчет)
WHERE t.[name] NOT LIKE 'unused%'
AND t.[name] NOT LIKE 'removed%'
AND t.[name] NOT LIKE 'migrated%'
AND t.[name] NOT LIKE 'migration%'
AND t.[name] NOT LIKE 'sysdiag%'
AND t.[name] NOT LIKE 'test%'
AND t.[name] NOT LIKE 'tmp%'
AND t.[name] NOT LIKE '%_cache'
AND t.[name] NOT IN ('FKs');
IF NOT EXISTS (SELECT 1 FROM sys.tables AS t
WHERE t.[name]= 'FKs' AND t.[schema_id] = SCHEMA_ID('dbo'))
BEGIN
CREATE TABLE dbo.FKs (
referencing_object NVARCHAR(255) NOT NULL,
constraint_column_id INT NOT NULL,
referencing_column_name NVARCHAR(255) NOT NULL,
referenced_object NVARCHAR(255) NOT NULL,
referenced_column_name NVARCHAR(255) NOT NULL,
constraint_name NVARCHAR(255) NOT NULL,
delete_referential_action INT NOT NULL,
update_referential_action INT NOT NULL
);
END
ELSE IF (@isClearTableFKs = 1)
BEGIN
TRUNCATE TABLE dbo.FKs;
END
INSERT INTO dbo.FKs (
referencing_object,
constraint_column_id,
referencing_column_name,
referenced_object,
referenced_column_name,
constraint_name,
delete_referential_action,
update_referential_action
)
SELECT CONCAT('[', SCHEMA_NAME(P.[schema_id]), '].['
, OBJECT_NAME(FK.parent_object_id), ']') AS referencing_object,
FK.constraint_column_id,
CONCAT('[', COL_NAME(FK.parent_object_id
, FK.parent_column_id), ']') AS referencing_column_name,
CONCAT('[', SCHEMA_NAME(R.[schema_id]), '].['
, OBJECT_NAME(FK.referenced_object_id), ']') AS referenced_object,
CONCAT('[', COL_NAME(FK.referenced_object_id
, FK.referenced_column_id), ']') AS referenced_column_name,
CONCAT('[', OBJECT_NAME(FK.constraint_object_id)
, ']') AS constraint_name,
FKK.delete_referential_action,
FKK.update_referential_action
FROM sys.foreign_key_columns AS FK
INNER JOIN sys.foreign_keys AS FKK ON FKK.[object_id]
= FK.constraint_object_id
INNER JOIN sys.tables AS P ON P.[object_id] = FK.parent_object_id
INNER JOIN sys.tables AS R ON R.[object_id] = FK.referenced_object_id
WHERE NOT EXISTS (SELECT 1 FROM dbo.FKs AS t0
WHERE t0.constraint_name = CONCAT('['
, OBJECT_NAME(FK.constraint_object_id), ']'));
DELETE FROM trg
FROM dbo.FKs AS trg
WHERE NOT EXISTS (
SELECT 1
FROM #tbls AS src
WHERE trg.referencing_object = CONCAT('[', src.sch_name
, '].[', src.[name], ']')
OR trg.referenced_object = CONCAT('[', src.sch_name
, '].[', src.[name], ']')
)
DECLARE r_cursor_fk_drop CURSOR LOCAL FAST_FORWARD READ_ONLY FOR
SELECT t.referencing_object,
t.referenced_object,
t.constraint_name
FROM dbo.FKs AS t
WHERE EXISTS (SELECT 1 FROM sys.foreign_key_columns AS FK
WHERE t.constraint_name = CONCAT('['
, OBJECT_NAME(FK.constraint_object_id), ']'))
GROUP BY t.referencing_object,
t.referenced_object,
t.constraint_name;
OPEN r_cursor_fk_drop;
FETCH NEXT FROM r_cursor_fk_drop
INTO @referencing_object,
@referenced_object,
@constraint_name
WHILE @@FETCH_STATUS = 0
BEGIN
SET @stm = CONCAT('ALTER TABLE ', @referencing_object
, ' DROP CONSTRAINT ', @constraint_name, ';');
PRINT @stm;
BEGIN TRY
BEGIN TRANSACTION;
EXEC sys.sp_executesql @stmt = @stm;
COMMIT;
END TRY
BEGIN CATCH
SET @ERROR = ERROR_MESSAGE();
PRINT @ERROR;
IF (@@TRANCOUNT > 0) ROLLBACK TRANSACTION
END CATCH
FETCH NEXT FROM r_cursor_fk_drop
INTO @referencing_object,
@referenced_object,
@constraint_name;
END
CLOSE r_cursor_fk_drop;
DEALLOCATE r_cursor_fk_drop;
DECLARE r_cursor CURSOR LOCAL FAST_FORWARD READ_ONLY FOR
SELECT t.[name],
t.sch_name,
t.IsNOTInsert
FROM #tbls AS t
ORDER BY t.[name] ASC;
OPEN r_cursor;
FETCH NEXT FROM r_cursor
INTO @table_name,
@schema_name,
@IsNOTInsert;
WHILE @@FETCH_STATUS = 0
BEGIN
SET @cols = '';
SET @is_identity = 0;
SET @col_name_identity = NULL;
SET @stm = CONCAT('TRUNCATE TABLE ', @DB
, '.[', @schema_name, '].[', @table_name, ']');
PRINT @stm;
BEGIN TRY
BEGIN TRANSACTION;
EXEC sys.sp_executesql @stmt = @stm;
COMMIT;
END TRY
BEGIN CATCH
SET @ERROR = ERROR_MESSAGE();
PRINT @ERROR;
IF (@@TRANCOUNT > 0) ROLLBACK TRANSACTION
END CATCH
IF (@IsNOTInsert = 0)
BEGIN
SELECT @cols = @cols + CASE WHEN @cols = ''
THEN c.[name] ELSE ',' + c.name END,
@is_identity = @is_identity + c.is_identity,
@col_name_identity = CASE WHEN (c.is_identity = 1)
THEN c.[name] ELSE @col_name_identity END
FROM sys.tables t,
sys.columns c
WHERE t.[object_id] = c.[object_id]
AND t.[name] = @table_name
AND c.is_computed = 0;
SET @stm = '';
IF @is_identity > 0 SET @stm = CONCAT('SET IDENTITY_INSERT '
, @DB, '.[', @schema_name, '].[', @table_name, '] ON');
SET @stm = CONCAT(@stm, ' INSERT INTO ', @DB, '.['
, @schema_name, '].[', @table_name
, '](', @cols, ') SELECT ', @cols
, ' FROM [',@DBMaster,'].['
, @schema_name, '].['
, @table_name, '] WITH(NOLOCK) ');
--здесь можно задать ограничение на наполнение данными
IF @HistoryLimited = 1
BEGIN
IF @table_name LIKE '%History'
SET @stm = CONCAT(@stm
, ' WHERE ChangeDateTime > DATEADD (month, -1, SYSDATETIME()) ');
END
IF @is_identity > 0 SET @stm = CONCAT(@stm, ' SET IDENTITY_INSERT '
, @DB, '.[', @schema_name, '].[', @table_name, '] OFF');
IF @is_identity > 0 SET @stm = CONCAT(@stm, ' DBCC CHECKIDENT ("'
, @table_name, '")');
SET @StartMoment = SYSDATETIME();
SET @ERROR = NULL;
PRINT @stm;
BEGIN TRY
BEGIN TRANSACTION;
EXEC sys.sp_executesql @stmt = @stm;
COMMIT;
END TRY
BEGIN CATCH
SET @ERROR = ERROR_MESSAGE();
PRINT @ERROR;
IF (@@TRANCOUNT > 0) ROLLBACK TRANSACTION
END CATCH
SET @FinishMoment = SYSDATETIME();
SET @stm = CONCAT('SELECT COUNT_BIG(*) FROM ', '[', @schema_name
, '].[', @table_name, '] WITH (NOLOCK);');
DELETE FROM @cnt;
INSERT INTO @cnt (cnt)
EXEC sys.sp_executesql @stmt = @stm;
INSERT INTO #tbl_res (
SchName,
TblName,
StartMoment,
FinishMoment,
Cnt,
ErrorMsg
)
SELECT @schema_name,
@table_name,
@StartMoment,
@FinishMoment,
COALESCE((SELECT SUM(cnt) FROM @cnt), 0) AS Cnt,
@ERROR;
END
FETCH NEXT FROM r_cursor
INTO @table_name,
@schema_name,
@IsNOTInsert;
END
CLOSE r_cursor;
DEALLOCATE r_cursor;
WHILE (@RowCount > 0)
BEGIN
SET @RowCount = 0;
SET @WhileDelCount += 1;
DECLARE r_cursor_fk_corr CURSOR LOCAL FAST_FORWARD READ_ONLY FOR
SELECT t.referencing_object,
t.referenced_object,
t.constraint_name,
STRING_AGG (CONCAT('(trg.', t.referencing_column_name
, '=src.', t.referenced_column_name, ')'), ' AND ')
WITHIN GROUP (ORDER BY t.constraint_column_id ASC) AS rules,
STRING_AGG (CONCAT('(trg.', t.referencing_column_name
, ' IS NOT NULL)'), ' AND ')
WITHIN GROUP (ORDER BY t.constraint_column_id ASC) AS key_cols
FROM dbo.FKs AS t
GROUP BY t.referencing_object,
t.referenced_object,
t.constraint_name;
OPEN r_cursor_fk_corr;
FETCH NEXT FROM r_cursor_fk_corr
INTO @referencing_object
, @referenced_object
, @constraint_name
, @rules
, @key_cols;
WHILE @@FETCH_STATUS = 0
BEGIN
SET @stm = CONCAT('DELETE FROM trg FROM ', @referencing_object
,' AS trg WHERE ', @key_cols, ' AND NOT EXISTS (SELECT 1 FROM '
, @referenced_object,
' AS src WITH (NOLOCK) WHERE ', @rules, ');');
PRINT @stm;
BEGIN TRY
BEGIN TRANSACTION;
EXEC sys.sp_executesql @stmt = @stm;
SET @RowCount += @@ROWCOUNT;
COMMIT;
END TRY
BEGIN CATCH
SET @ERROR = ERROR_MESSAGE();
PRINT @ERROR;
IF (@@TRANCOUNT > 0) ROLLBACK TRANSACTION
END CATCH
FETCH NEXT FROM r_cursor_fk_corr
INTO @referencing_object
, @referenced_object
, @constraint_name
, @rules
, @key_cols;
END
CLOSE r_cursor_fk_corr;
DEALLOCATE r_cursor_fk_corr;
END
PRINT CONCAT('WHILE DELETE COUNT: ', @WhileDelCount);
DECLARE r_cursor_stat CURSOR LOCAL FAST_FORWARD READ_ONLY FOR
SELECT CONCAT('UPDATE STATISTICS ', @DB, '.[', t.sch_name, '].['
, t.[name], '] WITH FULLSCAN;') AS stm
FROM #tbls AS t;
OPEN r_cursor_stat;
FETCH NEXT FROM r_cursor_stat
INTO @stm;
WHILE @@FETCH_STATUS = 0
BEGIN
PRINT @stm;
BEGIN TRY
BEGIN TRANSACTION;
EXEC sys.sp_executesql @stmt = @stm;
COMMIT;
END TRY
BEGIN CATCH
SET @ERROR = ERROR_MESSAGE();
PRINT @ERROR;
IF (@@TRANCOUNT > 0) ROLLBACK TRANSACTION
END CATCH
FETCH NEXT FROM r_cursor_stat
INTO @stm
END
CLOSE r_cursor_stat;
DEALLOCATE r_cursor_stat;
DECLARE r_cursor_trigg_on CURSOR LOCAL FAST_FORWARD READ_ONLY FOR
SELECT FORMATMESSAGE('ALTER TABLE [%s].[%s] ENABLE TRIGGER [%s];'
+ CHAR(13), SCHEMA_NAME(b.[schema_id]), OBJECT_NAME(t.parent_id)
, t.[Name]) AS stm
FROM sys.triggers t
LEFT JOIN sys.tables b ON b.[object_id] = t.parent_id
WHERE t.is_disabled = 1
AND t.[type_desc] = 'SQL_TRIGGER'
AND OBJECT_NAME(t.parent_id) IS NOT NULL
OPEN r_cursor_trigg_on;
FETCH NEXT FROM r_cursor_trigg_on
INTO @stm;
WHILE @@FETCH_STATUS = 0
BEGIN
PRINT @stm;
BEGIN TRY
BEGIN TRANSACTION;
EXEC sys.sp_executesql @stmt = @stm;
COMMIT;
END TRY
BEGIN CATCH
SET @ERROR = ERROR_MESSAGE();
PRINT @ERROR;
IF (@@TRANCOUNT > 0) ROLLBACK TRANSACTION
END CATCH
FETCH NEXT FROM r_cursor_trigg_on
INTO @stm;
END
CLOSE r_cursor_trigg_on;
DEALLOCATE r_cursor_trigg_on;
SET @stm = '';
SELECT @stm += FORMATMESSAGE('ENABLE TRIGGER [%s] ON DATABASE;'
+ CHAR(13), t.[Name])
FROM sys.triggers t
WHERE t.is_disabled = 0
AND t.[type_desc] = 'SQL_TRIGGER'
AND OBJECT_NAME(t.parent_id) IS NULL;
PRINT @stm;
BEGIN TRY
BEGIN TRANSACTION;
EXEC sys.sp_executesql @stmt = @stm;
COMMIT;
END TRY
BEGIN CATCH
SET @ERROR = ERROR_MESSAGE();
PRINT @ERROR;
IF (@@TRANCOUNT > 0) ROLLBACK TRANSACTION
END CATCH
DECLARE r_cursor_fk_recover CURSOR LOCAL FAST_FORWARD READ_ONLY FOR
SELECT t.referencing_object,
t.referenced_object,
t.constraint_name,
STRING_AGG (t.referencing_column_name, ',')
WITHIN GROUP (ORDER BY t.constraint_column_id ASC)
AS referencing_columns,
STRING_AGG (t.referenced_column_name, ',')
WITHIN GROUP (ORDER BY t.constraint_column_id ASC)
AS referenced_columns,
t.delete_referential_action,
t.update_referential_action
FROM dbo.FKs AS t
WHERE NOT EXISTS (SELECT 1 FROM sys.foreign_key_columns AS FK
WHERE t.constraint_name = CONCAT('['
, OBJECT_NAME(FK.constraint_object_id), ']'))
GROUP BY t.referencing_object,
t.referenced_object,
t.constraint_name,
t.delete_referential_action,
t.update_referential_action;
OPEN r_cursor_fk_recover;
FETCH NEXT FROM r_cursor_fk_recover
INTO @referencing_object
, @referenced_object
, @constraint_name
, @referencing_columns
, @referenced_columns
, @delete_referential_action
, @update_referential_action;
WHILE @@FETCH_STATUS = 0
BEGIN
SET @stm = CONCAT('ALTER TABLE ', @referencing_object
,' WITH CHECK ADD CONSTRAINT ', @constraint_name,
' FOREIGN KEY(', @referencing_columns, ') REFERENCES '
, @referenced_object, ' (', @referenced_columns, ') '
, CASE
WHEN @delete_referential_action = 1
THEN 'ON DELETE CASCADE '
WHEN @delete_referential_action = 2
THEN 'ON DELETE SET NULL '
ELSE ''
END
, CASE
WHEN @update_referential_action = 1
THEN 'ON UPDATE CASCADE '
WHEN @update_referential_action = 2
THEN 'ON UPDATE SET NULL '
ELSE ''
END
, '; '
, 'ALTER TABLE ', @referencing_object, ' CHECK CONSTRAINT '
, @constraint_name, '; ');
PRINT @stm;
BEGIN TRY
BEGIN TRANSACTION;
EXEC sys.sp_executesql @stmt = @stm;
COMMIT;
END TRY
BEGIN CATCH
SET @ERROR = ERROR_MESSAGE();
PRINT @ERROR;
IF (@@TRANCOUNT > 0) ROLLBACK TRANSACTION
END CATCH
FETCH NEXT FROM r_cursor_fk_recover
INTO @referencing_object
, @referenced_object
, @constraint_name
, @referencing_columns
, @referenced_columns
, @delete_referential_action
, @update_referential_action;
END
CLOSE r_cursor_fk_recover;
DEALLOCATE r_cursor_fk_recover;
EXEC sys.sp_msforeachtable @commAND1="PRINT '?'"
, @commAND2="ALTER TABLE ? WITH CHECK CHECK CONSTRAINT ALL";
SELECT t.SchName,
t.TblName,
t.Cnt,
DATEDIFF(millisecond, t.StartMoment, t.FinishMoment) AS DiffMSec,
t.ErrorMsg
FROM #tbl_res AS t
ORDER BY t.SchName ASC, t.TblName ASC;
--ecm
select count(*) from ecm.t_order_main;
select count(*) from temp.ecm_aggregation_table_with_id;
select count(*) from temp.temp_ecm_id_integration_result;
select count(distinct(temp_id)) from temp.ecm_aggregation_table_with_id;
select count(distinct(user_id)) from temp.temp_ecm_id_integration_result;
--dbs
select count(*) from dbs.HSCHM_RETAILORDER_H_3YA;
select count(*) from temp.dbs_aggregation_table_with_ID;
select count(*) from temp.temp_dbs_id_integration_result;
select count(distinct(uuid)) from temp.dbs_aggregation_table_with_ID;
select count(distinct(user_id)) from temp.temp_dbs_id_integration_result;
--css
select count(*) from css.t_cs_customer;
select count(*) from temp.dw_ccs_merge_customer;
select count(*) from temp.temp_css_id_integration_result;
select count(distinct(temp_id)) from temp.dw_ccs_merge_customer;
select count(distinct(user_id)) from temp.temp_css_id_integration_result;
--外部融合表:数量
select count(*) from dw_dmp.dw_user_base_info;
select count(distinct(user_id)) from dw_dmp.dw_user_base_info;
select count(*) from dw_dmp.dw_user_phone_info;
select count(distinct(user_id)) from dw_dmp.dw_user_phone_info;
select count(*) from dw_dmp.dw_user_addr_info;
select count(distinct(user_id)) from dw_dmp.dw_user_addr_info;
select count(*) from dw_dmp.dw_user_pay_info;
select count(distinct(user_id)) from dw_dmp.dw_user_pay_info;
select count(*) from dw_dmp.dw_user_email_info;
select count(distinct(user_id)) from dw_dmp.dw_user_email_info;
--一对多关系校验:
--内部融合表:一个用户对应多个手机号
select
count(*)
from(
select
count(*) as count
from(
select
temp_id, receiver_mobile, count(*) as count
from
temp.ecm_aggregation_table_with_id
group by
temp_id, receiver_mobile
) temp
group by
temp.temp_id
having
count(*) > 5
) temp1;
select
count(*)
from(
select
count(*) as count
from(
select
uuid, receivermobile, count(*) as count
from
temp.dbs_aggregation_table_with_id
group by
uuid, receivermobile
)temp
group by
temp.uuid
having
count(*) > 5
) temp1;
select
count(*)
from(
select
count(*) as count
from(
select
temp_id, stelephone3, count(*) as count
from
temp.dw_ccs_merge_customer
group by
temp_id, stelephone3
)temp
group by
temp.temp_id
having
count(*) > 5
) temp1;
--内部融合表:多个手机号的时间跨度
select
count(*)
from (
select
temp2.temp_id,max(unix_timestamp(temp2.update_time))-min(unix_timestamp(temp2.update_time)) difftime,max(temp2.update_time),min(temp2.update_time)
from
temp.ecm_aggregation_table_with_id temp2
where
temp2.temp_id in(
select
temp1.temp_id
from(
select
temp_id, receiver_mobile, count(*) as count
from
temp.ecm_aggregation_table_with_id
group by
temp_id, receiver_mobile
) temp1
group by
temp1.temp_id
having
count(*) > 5
)
group by
temp2.temp_id
)temp3
where
temp3.difftime > 2592000*3;
select
count(*)
from (
select
temp2.uuid,max(unix_timestamp(temp2.created))-min(unix_timestamp(temp2.created)) difftime,max(temp2.created),min(temp2.created)
from
temp.dbs_aggregation_table_with_id temp2
where
temp2.uuid in(
select
temp1.uuid
from(
select
uuid, receivermobile, count(*) as count
from
temp.dbs_aggregation_table_with_id
group by
uuid, receivermobile
) temp1
group by
temp1.uuid
having
count(*) > 5
)
group by
temp2.uuid
)temp3
where
temp3.difftime > 2592000*3;
select
count(*)
from (
select
temp2.temp_id,max(unix_timestamp(temp2.pub_create_date))-min(unix_timestamp(temp2.pub_create_date)) difftime,max(temp2.pub_create_date),min(temp2.pub_create_date)
from
temp.dw_ccs_merge_customer temp2
where
temp2.temp_id in(
select
temp1.temp_id
from(
select
temp_id, stelephone3, count(*) as count
from
temp.dw_ccs_merge_customer
group by
temp_id, stelephone3
) temp1
group by
temp1.temp_id
having
count(*) > 5
)
group by
temp2.temp_id
)temp3
where
temp3.difftime > 2592000*3;
--内部融合表:是否存在一个手机号对应多个用户
select
count(*)
from(
select
count(*) as count
from(
select
temp_id, receiver_mobile, count(*) as count
from
temp.ecm_aggregation_table_with_id
group by
temp_id, receiver_mobile
)temp
group by
temp.receiver_mobile
having
count(*) > 1
) temp1;
select
count(*)
from(
select
count(*) as count
from(
select
uuid, receivermobile, count(*) as count
from
temp.dbs_aggregation_table_with_id
group by
uuid, receivermobile
)temp
group by
temp.receivermobile
having
count(*) > 1
) temp1;
select
count(*)
from(
select
count(*) as count
from(
select
temp_id, stelephone3, count(*) as count
from
temp.dw_ccs_merge_customer
group by
temp_id, stelephone3
)temp
group by
temp.stelephone3
having
count(*) > 1
) temp1;
--外部融合表:一个用户对应多个手机号、地址、支付号、邮箱
select
count(*)
from(
select
count(*) as count
from
dw_dmp.dw_user_phone_info
group by
user_id
having
count(*) > 20
) temp;
select
count(*)
from(
select
count(*) as count
from
dw_dmp.dw_user_addr_info
group by
user_id
having
count(*) > 20
) temp;
select
count(*)
from(
select
count(*) as count
from
dw_dmp.dw_user_pay_info
group by
user_id
having
count(*) > 20
) temp;
select
count(*)
from(
select
*
from
dw_dmp.dw_user_email_info
group by
user_id
having
count(*) > 20
) temp;
--外部融合表:是否存在一个手机号对应多个用户
表关系一对多,user_id是phone的外键,不可能存在。
--质量校验
--内部融合表:字段质量校验
select count(*) from temp.ecm_aggregation_table_with_id where temp_id is null;
select count(*) from temp.ecm_aggregation_table_with_id where receiver_mobile is null;
select count(*) from temp.dbs_aggregation_table_with_ID where uuid is null;
select count(*) from temp.dbs_aggregation_table_with_ID where receivermobile is null;
select count(*) from temp.dw_ccs_merge_customer where temp_id is null;
select count(*) from temp.dw_ccs_merge_customer where stelephone3 is null;
--外部融合表:字段质量校验
select count(*) from dw_dmp.dw_user_base_info where createtime is null;
select count(*) from dw_dmp.dw_user_base_info where unix_timestamp(createtime,'yyyy-MM-dd HH:mm:ss.S') == 0;
select count(*) from dw_dmp.dw_user_base_info where updatetime is null;
select count(*) from dw_dmp.dw_user_base_info where unix_timestamp(updatetime,'yyyy-MM-dd HH:mm:ss.S') == 0;
select count(*) from dw_dmp.dw_user_phone_info where mobile is null;
select count(*) from dw_dmp.dw_user_phone_info where mobile not regexp "^1\\d{10}$";
select count(*) from dw_dmp.dw_user_phone_info where mobile_source is null;
select count(*) from dw_dmp.dw_user_phone_info where mobile_source not regexp "^[1-9]$";
select count(*) from dw_dmp.dw_user_phone_info where starttime is null;
select count(*) from dw_dmp.dw_user_phone_info where unix_timestamp(starttime,'yyyy-MM-dd HH:mm:ss.S') == 0;
select count(*) from dw_dmp.dw_user_addr_info where address is null;
select count(*) from dw_dmp.dw_user_addr_info where address_source is null;
select count(*) from dw_dmp.dw_user_addr_info where address_source not regexp "^[1-9]$";
select count(*) from dw_dmp.dw_user_addr_info where createtime is null;
select count(*) from dw_dmp.dw_user_addr_info where unix_timestamp(createtime,'yyyy-MM-dd HH:mm:ss.S') == 0;
select count(*) from dw_dmp.dw_user_pay_info where pay_no is null;
select count(*) from dw_dmp.dw_user_pay_info where pay_no_source is null;
select count(*) from dw_dmp.dw_user_pay_info where pay_no_source not regexp "^[1-9]$";
select count(*) from dw_dmp.dw_user_pay_info where starttime is null;
select count(*) from dw_dmp.dw_user_pay_info where unix_timestamp(starttime,'yyyy-MM-dd HH:mm:ss.S') == 0;
select count(*) from dw_dmp.dw_user_email_info where email is null;
select count(*) from dw_dmp.dw_user_email_info where email not regexp "^[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+(\.[a-zA-Z0-9_-]+)+$";
select count(*) from dw_dmp.dw_user_email_info where email_source is null;
select count(*) from dw_dmp.dw_user_email_info where email_source not regexp "^[1-9]$";
select count(*) from dw_dmp.dw_user_email_info where starttime is null;
select count(*) from dw_dmp.dw_user_email_info where unix_timestamp(starttime,'yyyy-MM-dd HH:mm:ss.S') == 0;
--外部融合表:联合字段质量校验
select
count(*)
from
dw_dmp.dw_user_base_info
where
usrname is not null and usrname_sourece is null;
select
count(*)
from
dw_dmp.dw_user_base_info
where
cardid is not null and cardid_source is null;
select
count(*)
from
dw_dmp.dw_user_base_info
where
sex is not null and sex_source is null;
select
count(*)
from
dw_dmp.dw_user_base_info
where
birthdate is not null and birthdate_source is null;
select
count(*)
from
dw_dmp.dw_user_phone_info
where
mobile is not null and mobile_source is null;
select
count(*)
from
dw_dmp.dw_user_phone_info
where
telephone is not null and telephone_source is null;
select
count(*)
from
dw_dmp.dw_user_addr_info
where
address is not null and address_source is null;
select
count(*)
from
dw_dmp.dw_user_pay_info
where
pay_no is not null and pay_no_source is null;
select
count(*)
from
dw_dmp.dw_user_pay_info
where
buyernick is not null and buyernick_source is null;
select
count(*)
from
dw_dmp.dw_user_email_info
where
email is not null and email_source is null;
--融合效果验证
select
temp_id, count(*) as count
from
temp.ecm_aggregation_table_with_id
group by
temp_id
having
count(*) > 10
limit 10;
select
receiver_name_ori, *
from
temp.ecm_aggregation_table_with_id
where
temp_id = ;
select
uuid, count(*) as count
from
temp.dbs_aggregation_table_with_id
group by
uuid
having
count(*) > 10
limit 10;
select
*
from
temp.dbs_aggregation_table_with_id
where
uuid = ;
select
temp_id, count(*) as count
from
temp.dw_ccs_merge_customer
group by
temp_id
having
count(*) > 10
limit 10;
select
*
from
temp.dw_ccs_merge_customer
where
temp_id = ;
--=====================================
CREATE SCHEMA adm AUTHORIZATION postgres;
CREATE OR REPLACE FUNCTION adm.create_day_partition(t_name character varying, s_name character varying)
RETURNS void AS
$$
DECLARE
sql_query TEXT;
P_NAME VARCHAR(255);
N INTEGER := 0;
t_owner VARCHAR(255);
begin
select t.tableowner into t_owner from pg_tables t where t.tablename = t_name and t.schemaname = s_name ;
while N <= 10
loop
P_NAME := T_NAME|| replace(cast(CURRENT_DATE + N as varchar ),'-','_');
IF NOT EXISTS (select 1 as f1 from pg_tables t
where t.tablename = P_NAME and t.schemaname = S_NAME ) then
sql_query := 'CREATE TABLE ' || S_NAME || '.' || P_NAME || ' PARTITION OF ' || S_NAME || '.' || T_NAME || ' FOR VALUES FROM ('''
|| CURRENT_DATE + N || ' 00:00:00'') TO (''' || CURRENT_DATE + N + 1 || ' 00:00:00'')' ;
EXECUTE sql_query ;
sql_query := 'ALTER TABLE ' || S_NAME || '.' || P_NAME || ' OWNER TO ' || t_owner;
EXECUTE sql_query ;
END IF;
N := N+1 ;
end loop;
-- commit;
END $$ LANGUAGE plpgsql;
CREATE OR REPLACE FUNCTION adm.drop_old_day_partition ( T_NAME VARCHAR(240) -- table name
,S_NAME VARCHAR(100) -- schema name
,days_ago integer -- delete partitions older than N days
)
RETURNS void AS
$$
DECLARE
v_parent_rec RECORD;
begin
FOR v_parent_rec IN (
select q.* , 'DROP TABLE ' || q.part_name as sql_query
from (
SELECT cast( inhrelid::regclass as varchar) AS part_name
,to_date ( replace( cast( inhrelid::regclass as varchar)
,case when ( S_NAME = 'public' or S_NAME = '' ) then ''
else S_NAME || '.'
end ||
T_NAME,'') ,'yyyy_mm_dd') as dt
FROM pg_catalog.pg_inherits i
WHERE inhparent = ( S_NAME || '.' || T_NAME )::regclass ) q
where q.dt < current_date - days_ago )
LOOP
EXECUTE v_parent_rec.sql_query ;
END LOOP;
END $$ LANGUAGE plpgsql;
--=====================================
CREATE TABLE adm.pg_stat_activity_history (
sample_time timestamptz ,
datid oid NULL,
pid int4 NULL,
leader_pid int4 NULL,
usesysid oid NULL,
application_name text NULL,
client_addr inet NULL,
client_hostname text NULL,
client_port int4 NULL,
backend_start timestamptz NULL,
xact_start timestamptz NULL,
state_change timestamptz NULL,
wait_event_type text NULL,
wait_event text NULL,
state text NULL,
backend_xid xid NULL,
backend_xmin xid NULL,
query_id int8 NULL,
query text null,
query_start timestamptz NULL,
duration numeric NULL
) PARTITION BY RANGE (sample_time);
CREATE INDEX pg_stat_act_h_idx_stime ON adm.pg_stat_activity_history USING btree (sample_time);
INSERT INTO cron.job (schedule,command,nodename,nodeport,"database",username,active,jobname) VALUES
('20 0 * * *'
,'select adm.create_day_partition(''pg_stat_activity_history'', ''adm''); commit ;'
,'',5432
,'postgres','postgres',true,'part_awr_hist');
INSERT INTO cron.job (schedule,command,nodename,nodeport,"database",username,active,jobname) VALUES
('30 0 * * *'
,'select adm.drop_old_day_partition(''pg_stat_activity_history'', ''adm'', 14); commit ;'
,'',5432
,'postgres','postgres',true,'del_part_awr_hist');
select adm.create_day_partition('pg_stat_activity_history', 'adm');
CREATE OR REPLACE FUNCTION adm.pg_stat_activity_snapshot()
RETURNS void AS
$$
DECLARE
start_ts timestamp := (select clock_timestamp());
ldiff numeric := 0;
BEGIN
WHILE ldiff < 60
LOOP
insert
into adm.pg_stat_activity_history
select
clock_timestamp() as sample_time,
datid ,
pid ,
leader_pid ,
usesysid ,
application_name ,
client_addr inet ,
client_hostname ,
client_port ,
backend_start ,
xact_start ,
state_change ,
wait_event_type ,
wait_event ,
state ,
backend_xid ,
backend_xmin ,
query_id ,
query ,
query_start ,
1000 * extract(EPOCH from (clock_timestamp()-query_start)) as duration -- milliseconds (1/1000sec)
from pg_stat_activity
where state <> 'idle' and usename <> 'replicator'
and pid != pg_backend_pid();
perform pg_stat_clear_snapshot();
perform pg_sleep(10);
ldiff := EXTRACT (EPOCH FROM (clock_timestamp() - start_ts));
END LOOP;
END $$ LANGUAGE plpgsql;
INSERT INTO cron.job (schedule, command, nodename, nodeport, database, username) values
('* * * * *', 'SELECT adm.pg_stat_activity_snapshot();', '', 5432, 'postgres', 'postgres');
COMMIT;
--=====================================
--================================================================
Алгоритм работы такого скрипта будет следующим:
Отключить все ограничения.
Отключить все триггеры.
Для заданных таблиц сохранить все внешние ключи, после чего удалить их.
Произвести полную очистку заданных таблиц через команду TRUNCATE с последующим их заполнением данными.
Удалить битые данные.
Обновить статистики перезалитых выше таблиц.
Включить триггеры.
Восстановить внешние ключи.
Включить и перепроверить все ограничения.
--================================================================
SET NOCOUNT OFF;
DECLARE @DB VARCHAR(250) = QUOTENAME(DB_NAME()),
@DBMaster VARCHAR(255) = 'БД-мастер',
@ERROR VARCHAR(MAX);
DECLARE @HistoryLimited bit = 1,
@table_name nvarchar(255),
@is_identity int = 0,
@stm nvarchar(max) = '',
@cols nvarchar(max) = '',
@IsNOTInsert bit,
@schema_name nvarchar(255),
@col_name_identity nvarchar(255),
@referencing_object nvarchar(255),
@referenced_object nvarchar(255),
@constraint_name nvarchar(255),
@referencing_columns nvarchar(max),
@referenced_columns nvarchar(max),
@rules nvarchar(max),
@key_cols nvarchar(max),
@StartMoment DATETIME2,
@FinishMoment DATETIME2,
@delete_referential_action INT,
@update_referential_action INT,
@max_row_insert INT = 100000,
@isClearTableFKs BIT = 1,
@RowCount BIGINT = 1,
@WhileDelCount INT = 0;
;
DECLARE @cnt TABLE (cnt BIGINT NOT NULL);
DROP TABLE IF EXISTS #tbl_res;
CREATE TABLE #tbl_res (
SchName NVARCHAR(255) NOT NULL,
TblName NVARCHAR(255) NOT NULL,
StartMoment DATETIME2 NOT NULL,
FinishMoment DATETIME2 NOT NULL,
Cnt BIGINT NOT NULL,
ErrorMsg NVARCHAR(MAX) NULL
);
EXEC sys.sp_msforeachtable "ALTER TABLE ? NOCHECK CONSTRAINT ALL";
DECLARE r_cursor_trigg_off CURSOR LOCAL FAST_FORWARD READ_ONLY FOR
SELECT FORMATMESSAGE('ALTER TABLE [%s].[%s] DISABLE TRIGGER [%s];'
+ CHAR(13), SCHEMA_NAME(b.[schema_id]), OBJECT_NAME(t.parent_id)
, t.[Name]) AS stm
FROM sys.triggers t
LEFT JOIN sys.tables b ON b.object_id = t.parent_id
WHERE t.is_disabled = 0
AND t.type_desc = 'SQL_TRIGGER'
AND OBJECT_NAME(t.parent_id) IS NOT NULL
ORDER BY SCHEMA_NAME(b.[schema_id]) ASC,
OBJECT_NAME(t.parent_id) ASC;
OPEN r_cursor_trigg_off;
FETCH NEXT FROM r_cursor_trigg_off
INTO @stm;
WHILE @@FETCH_STATUS = 0
BEGIN
PRINT @stm;
BEGIN TRY
BEGIN TRANSACTION;
EXEC sys.sp_executesql @stmt = @stm;
COMMIT;
END TRY
BEGIN CATCH
SET @ERROR = ERROR_MESSAGE();
PRINT @ERROR;
IF (@@TRANCOUNT > 0) ROLLBACK TRANSACTION
END CATCH
FETCH NEXT FROM r_cursor_trigg_off
INTO @stm;
END
CLOSE r_cursor_trigg_off;
DEALLOCATE r_cursor_trigg_off;
SET @stm = '';
SELECT @stm += FORMATMESSAGE('DISABLE TRIGGER [%s] ON DATABASE;'
+ CHAR(13), t.[Name])
FROM sys.triggers t
LEFT JOIN sys.tables b ON b.[object_id] = t.parent_id
WHERE t.is_disabled = 0
AND t.[type_desc] = 'SQL_TRIGGER'
AND OBJECT_NAME(t.parent_id) IS NULL;
PRINT @stm;
BEGIN TRY
BEGIN TRANSACTION;
EXEC sys.sp_executesql @stmt = @stm;
COMMIT;
END TRY
BEGIN CATCH
SET @ERROR = ERROR_MESSAGE();
PRINT @ERROR;
IF (@@TRANCOUNT > 0) ROLLBACK TRANSACTION
END CATCH
DROP TABLE IF EXISTS #tbls;
CREATE TABLE #tbls (
[name] NVARCHAR(255) NOT NULL,
sch_name NVARCHAR(255) NOT NULL,
IsNOTInsert BIT NOT NULL
);
INSERT INTO #tbls (
[name],
sch_name,
IsNOTInsert
)
SELECT t.[name],
SCHEMA_NAME(t.[schema_id]) AS sch_name,
--задаётся правило, по которому определяем
--нужно ли после очистки наполнять данными таблицу или нет
--по умолчанию нужно (0-да, 1-нет)
0 AS IsNOTInsert
FROM sys.tables AS t
--в фильтре задаем какие таблицы брать в расчет
--(в нашем случае какие не брать в расчет)
WHERE t.[name] NOT LIKE 'unused%'
AND t.[name] NOT LIKE 'removed%'
AND t.[name] NOT LIKE 'migrated%'
AND t.[name] NOT LIKE 'migration%'
AND t.[name] NOT LIKE 'sysdiag%'
AND t.[name] NOT LIKE 'test%'
AND t.[name] NOT LIKE 'tmp%'
AND t.[name] NOT LIKE '%_cache'
AND t.[name] NOT IN ('FKs');
IF NOT EXISTS (SELECT 1 FROM sys.tables AS t
WHERE t.[name]= 'FKs' AND t.[schema_id] = SCHEMA_ID('dbo'))
BEGIN
CREATE TABLE dbo.FKs (
referencing_object NVARCHAR(255) NOT NULL,
constraint_column_id INT NOT NULL,
referencing_column_name NVARCHAR(255) NOT NULL,
referenced_object NVARCHAR(255) NOT NULL,
referenced_column_name NVARCHAR(255) NOT NULL,
constraint_name NVARCHAR(255) NOT NULL,
delete_referential_action INT NOT NULL,
update_referential_action INT NOT NULL
);
END
ELSE IF (@isClearTableFKs = 1)
BEGIN
TRUNCATE TABLE dbo.FKs;
END
INSERT INTO dbo.FKs (
referencing_object,
constraint_column_id,
referencing_column_name,
referenced_object,
referenced_column_name,
constraint_name,
delete_referential_action,
update_referential_action
)
SELECT CONCAT('[', SCHEMA_NAME(P.[schema_id]), '].['
, OBJECT_NAME(FK.parent_object_id), ']') AS referencing_object,
FK.constraint_column_id,
CONCAT('[', COL_NAME(FK.parent_object_id
, FK.parent_column_id), ']') AS referencing_column_name,
CONCAT('[', SCHEMA_NAME(R.[schema_id]), '].['
, OBJECT_NAME(FK.referenced_object_id), ']') AS referenced_object,
CONCAT('[', COL_NAME(FK.referenced_object_id
, FK.referenced_column_id), ']') AS referenced_column_name,
CONCAT('[', OBJECT_NAME(FK.constraint_object_id)
, ']') AS constraint_name,
FKK.delete_referential_action,
FKK.update_referential_action
FROM sys.foreign_key_columns AS FK
INNER JOIN sys.foreign_keys AS FKK ON FKK.[object_id]
= FK.constraint_object_id
INNER JOIN sys.tables AS P ON P.[object_id] = FK.parent_object_id
INNER JOIN sys.tables AS R ON R.[object_id] = FK.referenced_object_id
WHERE NOT EXISTS (SELECT 1 FROM dbo.FKs AS t0
WHERE t0.constraint_name = CONCAT('['
, OBJECT_NAME(FK.constraint_object_id), ']'));
DELETE FROM trg
FROM dbo.FKs AS trg
WHERE NOT EXISTS (
SELECT 1
FROM #tbls AS src
WHERE trg.referencing_object = CONCAT('[', src.sch_name
, '].[', src.[name], ']')
OR trg.referenced_object = CONCAT('[', src.sch_name
, '].[', src.[name], ']')
)
DECLARE r_cursor_fk_drop CURSOR LOCAL FAST_FORWARD READ_ONLY FOR
SELECT t.referencing_object,
t.referenced_object,
t.constraint_name
FROM dbo.FKs AS t
WHERE EXISTS (SELECT 1 FROM sys.foreign_key_columns AS FK
WHERE t.constraint_name = CONCAT('['
, OBJECT_NAME(FK.constraint_object_id), ']'))
GROUP BY t.referencing_object,
t.referenced_object,
t.constraint_name;
OPEN r_cursor_fk_drop;
FETCH NEXT FROM r_cursor_fk_drop
INTO @referencing_object,
@referenced_object,
@constraint_name
WHILE @@FETCH_STATUS = 0
BEGIN
SET @stm = CONCAT('ALTER TABLE ', @referencing_object
, ' DROP CONSTRAINT ', @constraint_name, ';');
PRINT @stm;
BEGIN TRY
BEGIN TRANSACTION;
EXEC sys.sp_executesql @stmt = @stm;
COMMIT;
END TRY
BEGIN CATCH
SET @ERROR = ERROR_MESSAGE();
PRINT @ERROR;
IF (@@TRANCOUNT > 0) ROLLBACK TRANSACTION
END CATCH
FETCH NEXT FROM r_cursor_fk_drop
INTO @referencing_object,
@referenced_object,
@constraint_name;
END
CLOSE r_cursor_fk_drop;
DEALLOCATE r_cursor_fk_drop;
DECLARE r_cursor CURSOR LOCAL FAST_FORWARD READ_ONLY FOR
SELECT t.[name],
t.sch_name,
t.IsNOTInsert
FROM #tbls AS t
ORDER BY t.[name] ASC;
OPEN r_cursor;
FETCH NEXT FROM r_cursor
INTO @table_name,
@schema_name,
@IsNOTInsert;
WHILE @@FETCH_STATUS = 0
BEGIN
SET @cols = '';
SET @is_identity = 0;
SET @col_name_identity = NULL;
SET @stm = CONCAT('TRUNCATE TABLE ', @DB
, '.[', @schema_name, '].[', @table_name, ']');
PRINT @stm;
BEGIN TRY
BEGIN TRANSACTION;
EXEC sys.sp_executesql @stmt = @stm;
COMMIT;
END TRY
BEGIN CATCH
SET @ERROR = ERROR_MESSAGE();
PRINT @ERROR;
IF (@@TRANCOUNT > 0) ROLLBACK TRANSACTION
END CATCH
IF (@IsNOTInsert = 0)
BEGIN
SELECT @cols = @cols + CASE WHEN @cols = ''
THEN c.[name] ELSE ',' + c.name END,
@is_identity = @is_identity + c.is_identity,
@col_name_identity = CASE WHEN (c.is_identity = 1)
THEN c.[name] ELSE @col_name_identity END
FROM sys.tables t,
sys.columns c
WHERE t.[object_id] = c.[object_id]
AND t.[name] = @table_name
AND c.is_computed = 0;
SET @stm = '';
IF @is_identity > 0 SET @stm = CONCAT('SET IDENTITY_INSERT '
, @DB, '.[', @schema_name, '].[', @table_name, '] ON');
SET @stm = CONCAT(@stm, ' INSERT INTO ', @DB, '.['
, @schema_name, '].[', @table_name
, '](', @cols, ') SELECT ', @cols
, ' FROM [',@DBMaster,'].['
, @schema_name, '].['
, @table_name, '] WITH(NOLOCK)');
--здесь можно задать ограничение на наполнение данными
IF @HistoryLimited = 1
BEGIN
IF @table_name LIKE '%History'
SET @stm = CONCAT(@stm
, ' WHERE ChangeDateTime > DATEADD (month, -1, SYSDATETIME()) ');
END
SET @stm = CONCAT(@stm, ' OPTION(RECOMPILE)');
IF @is_identity > 0 SET @stm = CONCAT(@stm, ' SET IDENTITY_INSERT '
, @DB, '.[', @schema_name, '].[', @table_name, '] OFF');
IF @is_identity > 0 SET @stm = CONCAT(@stm, ' DBCC CHECKIDENT ("'
, @table_name, '")');
SET @StartMoment = SYSDATETIME();
SET @ERROR = NULL;
PRINT @stm;
BEGIN TRY
BEGIN TRANSACTION;
EXEC sys.sp_executesql @stmt = @stm;
COMMIT;
END TRY
BEGIN CATCH
SET @ERROR = ERROR_MESSAGE();
PRINT @ERROR;
IF (@@TRANCOUNT > 0) ROLLBACK TRANSACTION
END CATCH
SET @FinishMoment = SYSDATETIME();
SET @stm = CONCAT('SELECT COUNT_BIG(*) FROM ', '[', @schema_name
, '].[', @table_name, '] WITH (NOLOCK);');
DELETE FROM @cnt;
INSERT INTO @cnt (cnt)
EXEC sys.sp_executesql @stmt = @stm;
INSERT INTO #tbl_res (
SchName,
TblName,
StartMoment,
FinishMoment,
Cnt,
ErrorMsg
)
SELECT @schema_name,
@table_name,
@StartMoment,
@FinishMoment,
COALESCE((SELECT SUM(cnt) FROM @cnt), 0) AS Cnt,
@ERROR;
END
FETCH NEXT FROM r_cursor
INTO @table_name,
@schema_name,
@IsNOTInsert;
END
CLOSE r_cursor;
DEALLOCATE r_cursor;
WHILE (@RowCount > 0)
BEGIN
SET @RowCount = 0;
SET @WhileDelCount += 1;
DECLARE r_cursor_fk_corr CURSOR LOCAL FAST_FORWARD READ_ONLY FOR
SELECT t.referencing_object,
t.referenced_object,
t.constraint_name,
STRING_AGG (CONCAT('(trg.', t.referencing_column_name
, '=src.', t.referenced_column_name, ')'), ' AND ')
WITHIN GROUP (ORDER BY t.constraint_column_id ASC) AS rules,
STRING_AGG (CONCAT('(trg.', t.referencing_column_name
, ' IS NOT NULL)'), ' AND ')
WITHIN GROUP (ORDER BY t.constraint_column_id ASC) AS key_cols
FROM dbo.FKs AS t
GROUP BY t.referencing_object,
t.referenced_object,
t.constraint_name;
OPEN r_cursor_fk_corr;
FETCH NEXT FROM r_cursor_fk_corr
INTO @referencing_object
, @referenced_object
, @constraint_name
, @rules
, @key_cols;
WHILE @@FETCH_STATUS = 0
BEGIN
SET @stm = CONCAT('DELETE FROM trg FROM ', @referencing_object
,' AS trg WHERE ', @key_cols, ' AND NOT EXISTS (SELECT 1 FROM '
, @referenced_object,
' AS src WITH (NOLOCK) WHERE ', @rules, ');');
PRINT @stm;
BEGIN TRY
BEGIN TRANSACTION;
EXEC sys.sp_executesql @stmt = @stm;
SET @RowCount += @@ROWCOUNT;
COMMIT;
END TRY
BEGIN CATCH
SET @ERROR = ERROR_MESSAGE();
PRINT @ERROR;
IF (@@TRANCOUNT > 0) ROLLBACK TRANSACTION
END CATCH
FETCH NEXT FROM r_cursor_fk_corr
INTO @referencing_object
, @referenced_object
, @constraint_name
, @rules
, @key_cols;
END
CLOSE r_cursor_fk_corr;
DEALLOCATE r_cursor_fk_corr;
END
PRINT CONCAT('WHILE DELETE COUNT: ', @WhileDelCount);
DECLARE r_cursor_stat CURSOR LOCAL FAST_FORWARD READ_ONLY FOR
SELECT CONCAT('UPDATE STATISTICS ', @DB, '.[', t.sch_name, '].['
, t.[name], '] WITH FULLSCAN;') AS stm
FROM #tbls AS t;
OPEN r_cursor_stat;
FETCH NEXT FROM r_cursor_stat
INTO @stm;
WHILE @@FETCH_STATUS = 0
BEGIN
PRINT @stm;
BEGIN TRY
BEGIN TRANSACTION;
EXEC sys.sp_executesql @stmt = @stm;
COMMIT;
END TRY
BEGIN CATCH
SET @ERROR = ERROR_MESSAGE();
PRINT @ERROR;
IF (@@TRANCOUNT > 0) ROLLBACK TRANSACTION
END CATCH
FETCH NEXT FROM r_cursor_stat
INTO @stm
END
CLOSE r_cursor_stat;
DEALLOCATE r_cursor_stat;
DECLARE r_cursor_trigg_on CURSOR LOCAL FAST_FORWARD READ_ONLY FOR
SELECT FORMATMESSAGE('ALTER TABLE [%s].[%s] ENABLE TRIGGER [%s];'
+ CHAR(13), SCHEMA_NAME(b.[schema_id]), OBJECT_NAME(t.parent_id)
, t.[Name]) AS stm
FROM sys.triggers t
LEFT JOIN sys.tables b ON b.[object_id] = t.parent_id
WHERE t.is_disabled = 1
AND t.[type_desc] = 'SQL_TRIGGER'
AND OBJECT_NAME(t.parent_id) IS NOT NULL
OPEN r_cursor_trigg_on;
FETCH NEXT FROM r_cursor_trigg_on
INTO @stm;
WHILE @@FETCH_STATUS = 0
BEGIN
PRINT @stm;
BEGIN TRY
BEGIN TRANSACTION;
EXEC sys.sp_executesql @stmt = @stm;
COMMIT;
END TRY
BEGIN CATCH
SET @ERROR = ERROR_MESSAGE();
PRINT @ERROR;
IF (@@TRANCOUNT > 0) ROLLBACK TRANSACTION
END CATCH
FETCH NEXT FROM r_cursor_trigg_on
INTO @stm;
END
CLOSE r_cursor_trigg_on;
DEALLOCATE r_cursor_trigg_on;
SET @stm = '';
SELECT @stm += FORMATMESSAGE('ENABLE TRIGGER [%s] ON DATABASE;'
+ CHAR(13), t.[Name])
FROM sys.triggers t
WHERE t.is_disabled = 0
AND t.[type_desc] = 'SQL_TRIGGER'
AND OBJECT_NAME(t.parent_id) IS NULL;
PRINT @stm;
BEGIN TRY
BEGIN TRANSACTION;
EXEC sys.sp_executesql @stmt = @stm;
COMMIT;
END TRY
BEGIN CATCH
SET @ERROR = ERROR_MESSAGE();
PRINT @ERROR;
IF (@@TRANCOUNT > 0) ROLLBACK TRANSACTION
END CATCH
DECLARE r_cursor_fk_recover CURSOR LOCAL FAST_FORWARD READ_ONLY FOR
SELECT t.referencing_object,
t.referenced_object,
t.constraint_name,
STRING_AGG (t.referencing_column_name, ',')
WITHIN GROUP (ORDER BY t.constraint_column_id ASC)
AS referencing_columns,
STRING_AGG (t.referenced_column_name, ',')
WITHIN GROUP (ORDER BY t.constraint_column_id ASC)
AS referenced_columns,
t.delete_referential_action,
t.update_referential_action
FROM dbo.FKs AS t
WHERE NOT EXISTS (SELECT 1 FROM sys.foreign_key_columns AS FK
WHERE t.constraint_name = CONCAT('['
, OBJECT_NAME(FK.constraint_object_id), ']'))
GROUP BY t.referencing_object,
t.referenced_object,
t.constraint_name,
t.delete_referential_action,
t.update_referential_action;
OPEN r_cursor_fk_recover;
FETCH NEXT FROM r_cursor_fk_recover
INTO @referencing_object
, @referenced_object
, @constraint_name
, @referencing_columns
, @referenced_columns
, @delete_referential_action
, @update_referential_action;
WHILE @@FETCH_STATUS = 0
BEGIN
SET @stm = CONCAT('ALTER TABLE ', @referencing_object
,' WITH CHECK ADD CONSTRAINT ', @constraint_name,
' FOREIGN KEY(', @referencing_columns, ') REFERENCES '
, @referenced_object, ' (', @referenced_columns, ') '
, CASE
WHEN @delete_referential_action = 1
THEN 'ON DELETE CASCADE '
WHEN @delete_referential_action = 2
THEN 'ON DELETE SET NULL '
ELSE ''
END
, CASE
WHEN @update_referential_action = 1
THEN 'ON UPDATE CASCADE '
WHEN @update_referential_action = 2
THEN 'ON UPDATE SET NULL '
ELSE ''
END
, '; '
, 'ALTER TABLE ', @referencing_object, ' CHECK CONSTRAINT '
, @constraint_name, '; ');
PRINT @stm;
BEGIN TRY
BEGIN TRANSACTION;
EXEC sys.sp_executesql @stmt = @stm;
COMMIT;
END TRY
BEGIN CATCH
SET @ERROR = ERROR_MESSAGE();
PRINT @ERROR;
IF (@@TRANCOUNT > 0) ROLLBACK TRANSACTION
END CATCH
FETCH NEXT FROM r_cursor_fk_recover
INTO @referencing_object
, @referenced_object
, @constraint_name
, @referencing_columns
, @referenced_columns
, @delete_referential_action
, @update_referential_action;
END
CLOSE r_cursor_fk_recover;
DEALLOCATE r_cursor_fk_recover;
EXEC sys.sp_msforeachtable @commAND1="PRINT '?'"
, @commAND2="ALTER TABLE ? WITH CHECK CHECK CONSTRAINT ALL";
SELECT t.SchName,
t.TblName,
t.Cnt,
DATEDIFF(millisecond, t.StartMoment, t.FinishMoment) AS DiffMSec,
t.ErrorMsg
FROM #tbl_res AS t
ORDER BY t.SchName ASC, t.TblName ASC;
--================================================================
sys.sp_msforeachtable — недокументированная хранимая процедура в SQL Server, которая позволяет итеративно применять команду T‑SQL к каждой таблице в текущей базе данных;
sys.triggers — системный объект, который содержит информацию о триггерах в БД;
sys.tables — системный объект, который содержит информацию о таблицах в БД;
sys.sp_executesql — системная хранимая процедура для выполнения инструкции Transact‑SQL или пакета, в том числе, созданных динамически;
sys.foreign_key_columns — системный объект, который содержит информацию о составе внешних ключей;
sys.foreign_keys — системный объект, который содержит информацию о внешних ключах;
sys.columns — системный объект, содержащий информацию о колонках.
--================================================================
--================================================================
--================================================================
--================================================================
--================================================================
--================================================================
--================================================================
--================================================================
--================================================================
--================================================================
--================================================================
# alex <alexbujduveanu>
# To connect to a database
mysql -h localhost -u root -p
# To backup all databases
mysqldump --all-databases --all-routines -u root -p > ~/fulldump.sql
# To restore all databases
mysql -u root -p < ~/fulldump.sql
# To create a database in utf8 charset
CREATE DATABASE owa CHARACTER SET utf8 COLLATE utf8_general_ci;
# Types of user permissions:
# ALL PRIVILEGES - gives user full unrestricted access
# CREATE - allows user to create new tables or databases
# DROP - allows user to delete tables or databases
# DELETE - allows user to delete rows from tables
# INSERT- allows user to insert rows into tables
# SELECT- allows user to use the Select command to read through databases
# UPDATE- allow user to update table rows
# GRANT OPTION- allows user to grant or remove other users' privileges
# To grant specific permissions to a particular user
GRANT permission_type ON database_name.table_name TO 'username'@'hostname';
# To add a user and give rights on the given database
GRANT ALL PRIVILEGES ON database.* TO 'user'@'localhost'IDENTIFIED BY 'password' WITH GRANT OPTION;
# To change the root password
SET PASSWORD FOR root@localhost=PASSWORD('new_password');
# To delete a database
DROP DATABASE database_name;
# To reload privileges from MySQL grant table
FLUSH PRIVILEGES;
# Show permissions for a particular user
SHOW GRANTS FOR 'username'@'hostname';
# Find out who the current user is
SELECT CURRENT_USER();
# To delete a table in the database
DROP TABLE table_name;
#To return all records from a particular table
SELECT * FROM table_name;
# To create a table (Users table used as example)
# Note: Since username is a primary key, it is NOT NULL by default. Email is optional in this example.
CREATE TABLE Users (
username VARCHAR(80),
password VARCHAR(80) NOT NULL,
email VARCHAR(80),
PRIMARY KEY (username)
);
# To disable general logging
set global general_log=0;
--=======================================================================================
CREATE TABLE `product` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT '商品ID',
`name` varchar(255) NOT NULL COMMENT '商品名称',
`description` varchar(1000) NOT NULL COMMENT '商品描述',
`price` decimal(10,2) NOT NULL COMMENT '商品单价',
`category_id` int(11) NOT NULL COMMENT '所属类别ID',
`brand_id` int(11) DEFAULT NULL COMMENT '品牌ID',
`origin` varchar(255) DEFAULT NULL COMMENT '商品产地',
`weight` decimal(10,2) DEFAULT NULL COMMENT '商品重量(kg)',
`length` decimal(10,2) DEFAULT NULL COMMENT '商品长度(cm)',
`width` decimal(10,2) DEFAULT NULL COMMENT '商品宽度(cm)',
`height` decimal(10,2) DEFAULT NULL COMMENT '商品高度(cm)',
`thumbnail` varchar(255) DEFAULT NULL COMMENT '商品缩略图URL',
`image` varchar(1000) DEFAULT NULL COMMENT '商品图片URL',
`is_sale` tinyint(1) NOT NULL DEFAULT '1' COMMENT '是否上架,0:下架,1:上架',
`stock` int(11) NOT NULL COMMENT '商品库存',
`created_at` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`updated_at` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
PRIMARY KEY (`id`),
KEY `category_id` (`category_id`),
KEY `brand_id` (`brand_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='商品表';
INSERT INTO product (name, description, price, category_id, brand_id, origin, weight, length, width, height, thumbnail, image, is_sale, stock, created_at, updated_at)
SELECT
CONCAT('商品', t1.n),
CONCAT('这是商品', t1.n, '的描述'),
ROUND(RAND() * 1000, 2),
FLOOR(RAND() * 10) + 1,
IF(RAND() < 0.5, NULL, FLOOR(RAND() * 10) + 1),
CONCAT('产地', t1.n),
ROUND(RAND() * 10, 2),
ROUND(RAND() * 100, 2),
ROUND(RAND() * 100, 2),
ROUND(RAND() * 100, 2),
CONCAT('http://example.com/thumbnail_', t1.n, '.jpg'),
CONCAT('http://example.com/image_', t1.n, '.jpg'),
IF(RAND() < 0.5, 0, 1),
FLOOR(RAND() * 1000),
NOW() - INTERVAL FLOOR(RAND() * 365) DAY,
NOW() - INTERVAL FLOOR(RAND() * 365) DAY
FROM
(SELECT @rownum:=0) t0,
(SELECT @rownum:=@rownum+1 AS n FROM information_schema.COLUMNS LIMIT 10000) t1 ;
CREATE TABLE `order` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT '订单ID',
`user_id` int(11) NOT NULL COMMENT '用户ID',
`total_price` decimal(10,2) NOT NULL COMMENT '订单总价',
`created_at` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`updated_at` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
PRIMARY KEY (`id`),
KEY `user_id` (`user_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='订单表';
INSERT INTO `order` (user_id, total_price, created_at, updated_at)
SELECT
FLOOR(RAND() * 1000) + 1,
ROUND(RAND() * 10000, 2),
NOW() - INTERVAL FLOOR(RAND() * 365) DAY,
NOW() - INTERVAL FLOOR(RAND() * 365) DAY
FROM
(SELECT @rownum:=0) t0,
(SELECT @rownum:=@rownum+1 AS n FROM information_schema.COLUMNS LIMIT 10000) t1 ;
CREATE TABLE `order_item` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT '订单明细ID',
`order_id` int(11) NOT NULL COMMENT '订单ID',
`product_id` int(11) NOT NULL COMMENT '商品ID',
`quantity` int(11) NOT NULL COMMENT '购买数量',
`price` decimal(10,2) NOT NULL COMMENT '商品单价',
`created_at` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`updated_at` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
PRIMARY KEY (`id`),
KEY `order_id` (`order_id`),
KEY `product_id` (`product_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='订单明细表';
INSERT INTO order_item (order_id, product_id, quantity, price, created_at, updated_at)
SELECT
FLOOR(RAND() * 10000) + 1,
FLOOR(RAND() * 1000) + 1,
FLOOR(RAND() * 10) + 1,
ROUND(RAND() * 1000, 2),
NOW() - INTERVAL FLOOR(RAND() * 365) DAY,
NOW() - INTERVAL FLOOR(RAND() * 365) DAY
FROM
(SELECT @rownum:=0) t0,
(SELECT @rownum:=@rownum+1 AS n FROM information_schema.COLUMNS LIMIT 10000) t1;
mkdir -p /mnt/experimental/data
CREATE TABLESPACE fias_data OWNER postgres LOCATION '/mnt/experimental/data';
CREATE DATABASE fias WITH OWNER postgres TABLESPACE fias_data;
--=====================================================================================================
--========================================================
EXPLAIN ANALYSE
SELECT *
FROM post
ORDER BY create_on
FETCH FIRST 50 ROWS ONLY;
EXPLAIN ANALYSE
SELECT *
FROM post
ORDER BY create_on
OFFSET 50 ROWS FETCH NEXT 50 ROWS ONLY;
EXPLAIN ANALYSE
SELECT *
FROM post
ORDER BY create_on
OFFSET 9950 ROWS FETCH NEXT 50 ROWS ONLY;
CREATE INDEX idx_post_created_on ON post (create_on DESC);
CREATE INDEX idx_post_created_on ON post (create_on DESC, id DESC);
EXPLAIN ANALYSE
SELECT *
FROM post
ORDER BY create_on DESC, id DESC
FETCH FIRST 50 ROWS ONLY;
EXPLAIN ANALYSE
SELECT *
FROM post
WHERE (create_on, id) < ('2022-10-30 00:11:43.224314', '8766d496-44c7-4e48-af29-b19178692cd9')
ORDER BY create_on DESC
FETCH FIRST 50 ROWS ONLY
--========================================================
--========================================================
--========================================================
--========================================================
--========================================================
--========================================================
--========================================================
--========================================================
--========================================================
--========================================================
--========================================================
--========================================================
--========================================================
--========================================================
drop table if exists test_part.buh_operations cascade;
create table test_part.buh_operations
(
op_id INT8,
op_date DATE,
op_code VARCHAR(20),
op_money numeric
) PARTITION BY RANGE (op_date);
--- добавляем 300 секций
do $$
declare
r record;
begin
for r in select
format ('CREATE TABLE test_part.buh_operations_%s PARTITION OF buh_operations FOR VALUES FROM (DATE''%s'') TO (DATE''%s'')'
,to_char(bgn, 'YYYYMMDD'), to_char(bgn, 'YYYY-MM-DD'), to_char(fnsh, 'YYYY-MM-DD')) as stmt
from (
select '2024-08-16'::date - g as bgn, '2024-08-16'::date - g +1 as fnsh from pg_catalog.generate_series (1,300) g
) b loop
raise notice '%', r.stmt ;
execute r.stmt;
end loop;
end;
$$
-- заполняем тестовыми данными
insert into test_part.buh_operations
select g, '2024-08-16'::date - (g/1000)::int , 'INC28', random()*100+50 from pg_catalog.generate_series(1000, 300000) g;
--=========================================================================================
drop table if exists test_part.buh_operations_pth CASCADE;
create table test_part.buh_operations_pth
(
op_id INT8,
op_date DATE not NULL,
op_code VARCHAR(20),
op_money numeric
);
insert into test_part.buh_operations_pth values (0, '2024-01-01'::date, 'INC34', 0.0 ); -- ALTER Не работает на пустой таблице
ALTER TABLE test_part.buh_operations_pth PARTITION BY RANGE (op_date) START FROM ('2023-10-21'::date) INTERVAL (interval'1 day');
delete from test_part.buh_operations_pth; --чистим ненужные данные
-- заполняем тестовыми данными
insert into test_part.buh_operations_pth
select g, '2024-08-16'::date - (g/1000)::int , 'INC28', random()*100+50 from pg_catalog.generate_series(1000, 300000) g;
--=========================================================================================
set max_parallel_workers_per_gather = 0;
explain analyze select * from test_part.buh_operations where op_date in (select now()::date - 10);
drop table if exists test_part.simple_dict;
create table test_part.simple_dict(op_code VARCHAR(20),op_date DATE,op_descript VARCHAR(2000));
insert into test_part.simple_dict (op_code, op_date, op_descript)
select 'INC28' as op_code, '2024-08-16'::date - g::int as op_date, 'assadfsdsdgYYTRYTZXXZXXbmbbmb' as op_descript
from pg_catalog.generate_series(1,15) g;
set enable_partitionwise_join = true; -- на всякий случай, но не поможет
set max_parallel_workers_per_gather =0;
explain analyze
select a.*, b.op_desript from test_part.buh_operations a inner join test_part.simple_dict b
on a.op_date = b.op_date
where 1=1;
create schema samplesjpaid;
set search_path to samplesjpaid,public;
--- injection
select * from person where name = '';
select * from person;
select * from hacker;
update person set amount = null where id = 2;
--working
CREATE TABLE PERSON
(
ID BIGSERIAL PRIMARY KEY,
NAME TEXT NOT NULL,
AMOUNT BIGSERIAL,
CONSTRAINT name_length CHECK (length(name) <= 512)
);
alter table person add column amount bigint;
insert into person(name) values ('John Doe'), ('Jane Doe');
insert into person(name) values ('O''Reilly');
insert into person(name) values ('O"Reilly');
select * from person where name like '%''%';
select * from person where name = '%';
select now();
select clock_timestamp();
select * from account;
insert into ITEM (ID, NAME, DESCRIPTION)
select id, 'name' || id, 'stub description for ' || id from generate_series(1, 1000000) as id;
SELECT * FROM ITEM FETCH FIRST 3 ROWS ONLY OFFSET 10;
SELECT * FROM ITEM TABLESAMPLE bernoulli(10) FETCH FIRST 3 ROWS ONLY OFFSET 10;
insert into ITEM (ID, DESCRIPTION)
select id, 'stub description for 1' from generate_series(1, 1000000) as id;
select nextval('item_id_seq') as id from generate_series(1, 1000000);
create extension pg_stat_statements with schema public;
select now();
show timezone ;
select * from pg_stat_statements where query ilike '%ITEM%';
select dbid, count(queryid) from pg_stat_statements group by dbid;
DROP TABLE ITEM;
CREATE TABLE ITEM
(
ID BIGSERIAL PRIMARY KEY,
NAME TEXT NOT NULL,
DESCRIPTION TEXT,
LAST_UPDATED TIMESTAMP NOT NULL DEFAULT NOW()
);
CREATE INDEX ITEM_NAME_IDX ON ITEM (NAME);
ALTER SEQUENCE ITEM_ID_SEQ INCREMENT BY 100;
select nextval('ITEM_ID_SEQ');
select * from ITEM order by id;
select last_updated, count(id) from ITEM group by 1;
SELECT pg_stat_statements_reset();
truncate table item;
delete from item where 1=1;
SELECT pg_size_pretty(pg_relation_size('item'));
COPY item FROM '/usr/share/import/items.txt'; --1,000,000 rows affected in 8 s 693 ms
select count(*) from item;
select * from item;
---
CREATE INDEX idx_id_gist ON item USING gist(id);
drop index idx_id_gist;
explain analyze
SELECT *
FROM item
ORDER BY id <-> 15648
LIMIT 6;
--- locking
CREATE TABLE ACCOUNT (
id bigserial primary key,
amount bigint not null default 1000
);
select * from account;
select id, sum(amount) from account group by rollup (1);
update account set amount = 1000 where 1=1;
SELECT pg_advisory_lock(1);
SELECT pg_advisory_xact_lock(1);
select pg_advisory_unlock(1);
select pg_backend_pid();
SELECT * FROM pg_locks WHERE pid = pg_backend_pid() AND locktype = 'advisory';
SELECT * FROM pg_locks WHERE locktype = 'advisory';
insert into account (amount) values (1000), (1000), (1000);
ROLLBACK;
BEGIN;
update samplesjpaid.account set amount = amount - 1 where id = 1;
update samplesjpaid.account set amount = amount + 1 where id = 2;
COMMIT;
BEGIN;
update samplesjpaid.account set amount = amount - 1 where id = 2;
update samplesjpaid.account set amount = amount + 1 where id = 3;
COMMIT;
BEGIN;
update samplesjpaid.account set amount = amount - 1 where id = 3;
update samplesjpaid.account set amount = amount + 1 where id = 1;
COMMIT;
select * from samplesjpaid.account where id in (1,2,3) FOR NO KEY UPDATE;
select * from samplesjpaid.account;
--pgbench -r -U postgres -t 10000 -f /usr/share/import/bench-0.sql -c 8 -j 2 playground
--pgbench -r -U postgres -t 10000 -f /usr/share/import/bench-2.sql -c 8 -j 2 playground
--PGOPTIONS='-c default_transaction_isolation=serializable' pgbench -r -U postgres -t 1000 -f /usr/share/import/bench-1.sql -c 8 -j 2 playground
--PGOPTIONS='-c default_transaction_isolation=repeatable\ read' pgbench -r -U postgres -t 1000 -f /usr/share/import/bench-1.sql -c 8 -j 2 playground
--PGOPTIONS='-c default_transaction_isolation=repeatable\ read' pgbench -r -U postgres -t 1000 -f /usr/share/import/bench-1.sql -c 8 -j 2 playground
--PGOPTIONS='-c default_transaction_isolation=repeatable\ read' pgbench -r -U postgres -t 1000 -f /usr/share/import/bench-4.sql -c 8 -j 1 playground
--PGOPTIONS='-c default_transaction_isolation=read\ committed' pgbench -r -U postgres -t 1000 -f /usr/share/import/bench-4.sql -c 8 -j 2 playground
--PGOPTIONS='-c default_transaction_isolation=read\ committed' pgbench -r -U postgres -t 1000 -f /usr/share/import/bench-6.sql -c 8 -j 1 playground
--PGOPTIONS='-c default_transaction_isolation=repeatable\ read' pgbench -r -U postgres -t 1000 -f /usr/share/import/bench-6.sql -c 8 -j 1 playground
--PGOPTIONS='-c default_transaction_isolation=repeatable\ read' pgbench -r -U postgres -t 10 -f /usr/share/import/bench-6.sql -c 2 -j 1 playground --verbose-errors
--=====================================================================================
CREATE SCHEMA IF NOT EXISTS samplesjpaid;
CREATE TABLE ITEM
(
ID BIGSERIAL PRIMARY KEY,
NAME TEXT NOT NULL,
DESCRIPTION TEXT,
LAST_UPDATED TIMESTAMP NOT NULL DEFAULT NOW() --useful to identify items inserted in single transaction
);
ALTER SEQUENCE ITEM_ID_SEQ INCREMENT BY 100;
CREATE INDEX ITEM_NAME_IDX ON ITEM (NAME);
CREATE TABLE PERSON
(
ID BIGSERIAL PRIMARY KEY,
NAME TEXT NOT NULL,
CONSTRAINT name_length CHECK (length(name) <= 512)
);
--=====================================================================================
CREATE SCHEMA IF NOT EXISTS sqlinjection;
SET SEARCH_PATH TO sqlinjection,public;
insert into book (title)
values ('Head First Java'),
('Another Some Title 4'),
('You Don''t Know JS. Up & Going'),
('Some Title 1'),
('Some Title 3'),
('Some Title 2'),
('What if % or _ is present in the title?')
;
truncate book;
select * from book where title like '%term1%' or title like '%term2%' or title like '%term3%' ;
select * from book where title like 'What if \% or \_ is present in the title?' escape '\';
select * from book where title like 'What if |% or |_ is present in the title?' escape '|';
select * from book where title ~ '^S{1,3}.+\d+$';
select * from book where title = '';
select * from book;
select * from book where title like '%'||E'\u0027'||'%';
select * from book where title like 'What if \% or \_ is present in the title?%' escape '\';
SELECT pg_stat_statements_reset();
select * from pg_stat_statements where query like '%book%';
--=====================================================================================
CREATE SEQUENCE repo_id_seq START WITH 100 INCREMENT BY 100;
CREATE SEQUENCE repo_file_id_seq START WITH 100 INCREMENT BY 100;
CREATE TABLE REPO
(
ID BIGSERIAL PRIMARY KEY,
NAME TEXT NOT NULL,
RELATIVE_PATH TEXT NOT NULL,
CONSTRAINT r_name_length CHECK (length(name) > 0 AND length(name) <= 512)
);
ALTER SEQUENCE REPO_ID_SEQ INCREMENT BY 100 RESTART 100;
CREATE UNIQUE INDEX REPO_RELATIVE_PATH_UNIQUE_IDX ON REPO (RELATIVE_PATH);
CREATE TABLE REPO_FILE
(
ID BIGSERIAL PRIMARY KEY,
REPO_ID BIGINT NOT NULL REFERENCES REPO (ID) ON DELETE CASCADE,
NAME TEXT NOT NULL,
RELATIVE_PATH TEXT NOT NULL,
CONTENT_TYPE TEXT NOT NULL,
SIZE_BYTES BIGINT,
CONTENT TEXT NOT NULL,
CONSTRAINT rf_name_length CHECK (length(name) > 0 AND length(name) <= 512)
);
ALTER SEQUENCE REPO_FILE_ID_SEQ INCREMENT BY 100 RESTART 100;
CREATE INDEX RF_R_ID_IDX ON REPO_FILE (REPO_ID);
CREATE INDEX RF_NAME_IDX ON REPO_FILE (NAME);
CREATE UNIQUE INDEX R_F_NAME_ID_UNIQUE_IDX ON REPO_FILE (REPO_ID, RELATIVE_PATH);
CREATE INDEX rf_content_idx ON repo_file USING gin(content gin_trgm_ops); --for queries of type "like '%term%'"
--=====================================================================================
CREATE TABLE account
(
account_id SERIAL PRIMARY KEY,
first_name TEXT NOT NULL,
last_name TEXT NOT NULL,
email TEXT NOT NULL UNIQUE,
password TEXT NOT NULL,
CHECK (first_name !~ '\s' AND last_name !~ '\s') ,
CHECK (email ~* '^\w+@\w+[.]\w+$'),
CHECK (char_length(password)>=8)
);
CREATE TABLE account_history (
account_history_id BIGSERIAL PRIMARY KEY,
account_id INT NOT NULL REFERENCES account (account_id),
search_key TEXT NOT NULL,
search_date DATE NOT NULL,
UNIQUE (account_id, search_key, search_date)
);
CREATE TABLE seller_account (
seller_account_id SERIAL PRIMARY KEY,
account_id INT NOT NULL REFERENCES account (account_id),
total_rank FLOAT,
number_of_advertisement INT,
street_name TEXT NOT NULL,
street_number TEXT NOT NULL,
zip_code TEXT NOT NULL,
city TEXT NOT NULL
);
CREATE TABLE car_model (
car_model_id SERIAL PRIMARY KEY,
make text,
model text,
UNIQUE (make, model)
);
CREATE TABLE car (
car_id SERIAL PRIMARY KEY,
number_of_owners INT NOT NULL,
registration_number TEXT UNIQUE NOT NULL,
manufacture_year INT NOT NULL,
number_of_doors INT DEFAULT 5 NOT NULL,
car_model_id INT NOT NULL REFERENCES car_model (car_model_id),
mileage INT
);
CREATE TABLE advertisement (
advertisement_id SERIAL PRIMARY KEY,
advertisement_date TIMESTAMP WITH TIME ZONE NOT NULL,
car_id INT NOT NULL REFERENCES car (car_id),
seller_account_id INT NOT NULL REFERENCES seller_account (seller_account_id)
);
CREATE TABLE advertisement_picture (
advertisement_picture_id SERIAL PRIMARY KEY,
advertisement_id INT REFERENCES advertisement (advertisement_id),
picture_location TEXT UNIQUE
);
CREATE TABLE advertisement_rating (
advertisement_rating_id SERIAL PRIMARY KEY,
advertisement_id INT NOT NULL REFERENCES advertisement (advertisement_id),
account_id INT NOT NULL REFERENCES account (account_id),
advertisement_rating_date DATE NOT NULL,
rank INT NOT NULL,
review TEXT NOT NULL,
CHECK (char_length(review) <= 200),
CHECK (rank IN (1, 2, 3, 4, 5))
);
CREATE TABLE favorite_ads (
account_id INT NOT NULL REFERENCES account (account_id),
advertisement_id INT NOT NULL REFERENCES advertisement (advertisement_id),
primary key (account_id, advertisement_id)
);
--=====================================================================================
CREATE SCHEMA IF NOT EXISTS sqlbook;
set search_path to sqlbook;
drop table jobhist;
drop table emp;
drop table dept;
CREATE TABLE dept
(
deptno smallint NOT NULL CONSTRAINT dept_pk PRIMARY KEY,
dname text NOT NULL CONSTRAINT dept_dname_uq UNIQUE, --VARCHAR2(14)
loc text , --VARCHAR2(13)
CONSTRAINT dname_length CHECK (length(dname) <= 14),
CONSTRAINT loc_length CHECK (length(loc) <= 13)
);
--
-- Create the 'emp' table
--
CREATE TABLE emp
(
empno smallint NOT NULL CONSTRAINT emp_pk PRIMARY KEY,
ename text,
job text,
mgr smallint,
hiredate DATE,
sal decimal(7,2) CONSTRAINT emp_sal_ck CHECK (sal > 0),
comm decimal(7,2),
deptno smallint CONSTRAINT emp_ref_dept_fk
REFERENCES dept(deptno)
);
--
-- Create the 'jobhist' table
--
CREATE TABLE jobhist
(
empno smallint NOT NULL,
startdate DATE NOT NULL,
enddate DATE,
job text,
sal decimal(7,2),
comm decimal(7,2),
deptno smallint,
chgdesc text,
CONSTRAINT jobhist_pk PRIMARY KEY (empno, startdate),
CONSTRAINT jobhist_ref_emp_fk FOREIGN KEY (empno)
REFERENCES emp (empno) ON DELETE CASCADE,
CONSTRAINT jobhist_ref_dept_fk FOREIGN KEY (deptno)
REFERENCES dept (deptno) ON DELETE SET NULL,
CONSTRAINT jobhist_date_chk CHECK (startdate <= enddate)
);
INSERT INTO dept VALUES (10,'ACCOUNTING','NEW YORK');
INSERT INTO dept VALUES (20,'RESEARCH','DALLAS');
INSERT INTO dept VALUES (30,'SALES','CHICAGO');
INSERT INTO dept VALUES (40,'OPERATIONS','BOSTON');
--
-- Load the 'emp' table
--
INSERT INTO emp VALUES (7369,'SMITH','CLERK',7902,'17-DEC-80',800,NULL,20);
INSERT INTO emp VALUES (7499,'ALLEN','SALESMAN',7698,'20-FEB-81',1600,300,30);
INSERT INTO emp VALUES (7521,'WARD','SALESMAN',7698,'22-FEB-81',1250,500,30);
INSERT INTO emp VALUES (7566,'JONES','MANAGER',7839,'02-APR-81',2975,NULL,20);
INSERT INTO emp VALUES (7654,'MARTIN','SALESMAN',7698,'28-SEP-81',1250,1400,30);
INSERT INTO emp VALUES (7698,'BLAKE','MANAGER',7839,'01-MAY-81',2850,NULL,30);
INSERT INTO emp VALUES (7782,'CLARK','MANAGER',7839,'09-JUN-81',2450,NULL,10);
INSERT INTO emp VALUES (7788,'SCOTT','ANALYST',7566,'19-APR-87',3000,NULL,20);
INSERT INTO emp VALUES (7839,'KING','PRESIDENT',NULL,'17-NOV-81',5000,NULL,10);
INSERT INTO emp VALUES (7844,'TURNER','SALESMAN',7698,'08-SEP-81',1500,0,30);
INSERT INTO emp VALUES (7876,'ADAMS','CLERK',7788,'23-MAY-87',1100,NULL,20);
INSERT INTO emp VALUES (7900,'JAMES','CLERK',7698,'03-DEC-81',950,NULL,30);
INSERT INTO emp VALUES (7902,'FORD','ANALYST',7566,'03-DEC-81',3000,NULL,20);
INSERT INTO emp VALUES (7934,'MILLER','CLERK',7782,'23-JAN-82',1300,NULL,10);
--
-- Load the 'jobhist' table
--
INSERT INTO jobhist VALUES (7369,'17-DEC-80',NULL,'CLERK',800,NULL,20,
'New Hire');
INSERT INTO jobhist VALUES (7499,'20-FEB-81',NULL,'SALESMAN',1600,300,30,
'New Hire');
INSERT INTO jobhist VALUES (7521,'22-FEB-81',NULL,'SALESMAN',1250,500,30,
'New Hire');
INSERT INTO jobhist VALUES (7566,'02-APR-81',NULL,'MANAGER',2975,NULL,20,
'New Hire');
INSERT INTO jobhist VALUES (7654,'28-SEP-81',NULL,'SALESMAN',1250,1400,30,
'New Hire');
INSERT INTO jobhist VALUES (7698,'01-MAY-81',NULL,'MANAGER',2850,NULL,30,
'New Hire');
INSERT INTO jobhist VALUES (7782,'09-JUN-81',NULL,'MANAGER',2450,NULL,10,
'New Hire');
INSERT INTO jobhist VALUES (7788,'19-APR-87','12-APR-88','CLERK',1000,NULL,20,
'New Hire');
INSERT INTO jobhist VALUES (7788,'13-APR-88','04-MAY-89','CLERK',1040,NULL,20,
'Raise');
INSERT INTO jobhist VALUES (7788,'05-MAY-90',NULL,'ANALYST',3000,NULL,20,
'Promoted to Analyst');
INSERT INTO jobhist VALUES (7839,'17-NOV-81',NULL,'PRESIDENT',5000,NULL,10,
'New Hire');
INSERT INTO jobhist VALUES (7844,'08-SEP-81',NULL,'SALESMAN',1500,0,30,
'New Hire');
INSERT INTO jobhist VALUES (7876,'23-MAY-87',NULL,'CLERK',1100,NULL,20,
'New Hire');
INSERT INTO jobhist VALUES (7900,'03-DEC-81','14-JAN-83','CLERK',950,NULL,10,
'New Hire');
INSERT INTO jobhist VALUES (7900,'15-JAN-83',NULL,'CLERK',950,NULL,30,
'Changed to Dept 30');
INSERT INTO jobhist VALUES (7902,'03-DEC-81',NULL,'ANALYST',3000,NULL,20,
'New Hire');
INSERT INTO jobhist VALUES (7934,'23-JAN-82',NULL,'CLERK',1300,NULL,10,
'New Hire');
--=====================================================================================
set search_path to sqlbook;
select * from emp;
select * from dept;
select * from (
select ename, job, sal as salary from emp) x where salary > 500
order by salary desc;
select ename || ' works as ' || job, row_number() over (order by sal desc) from emp order by empno;
select ename, sal,
case when sal < 1000 then 'under'
when sal > 2000 then 'over'
else 'OK' end
from emp;
select * from emp order by sal desc limit 5;
select * from emp order by random() limit 5;
select coalesce(comm, 0), * from emp;
select * from emp where deptno in (10, 20)
and (ename like '%I%' or job like '%ER%');
select * from emp order by 1,2,3,4;
select ename, job, substr(job, length(job) -1) from emp
order by substr(job, length(job) -1);
select e.ename, d.loc from emp e, dept d
where e.deptno = d.deptno and d.deptno = 10;
select e.ename, d.loc from emp e
inner join dept d on (e.deptno = d.deptno)
where d.deptno = 10;
select deptno from dept
intersect
select deptno from emp;
select deptno from dept
except
select deptno from emp;
select deptno from dept d
where not exists(
select 1 from emp e where e.deptno = d.deptno
);
select d.* from dept d
left outer join emp e on (d.deptno = e.deptno)
where e.ename is null;
select e.ename, d.loc from emp e
join dept d on e.deptno = d.deptno
where d.deptno = 10;
select e.ename, d.loc from emp e, dept d
where e.deptno=d.deptno and e.deptno = 10;
select * from emp e
full outer join dept d on e.deptno = d.deptno;
select * from emp e
where coalesce(e.comm,0) < (select comm from emp where ename = 'WARD');
select * from emp e
where comm is null or comm < (select comm from emp where ename = 'WARD');
--insert into
--create table t2 as select ...
update emp set sal = sal - 1 where deptno = 20;
--merge
--duplicates
create table dupes (id integer, name text);
insert into dupes values (1, 'NAPOLEON' );
insert into dupes values (2, 'DYNAМIТE' );
insert into dupes values (3, 'DYNAМIТE' );
insert into dupes values (4, 'SНЕ SELLS' );
insert into dupes values (5, 'SEA SНELLS' );
insert into dupes values (6, 'SEA SНELLS' );
insert into dupes values (7, 'SEA SНELLS' );
delete from dupes;
select * from dupes;
select name from dupes
group by name having count(id) > 1 ;
delete from dupes
where name in
(select name from dupes
group by name having count(id) > 1)
and (id, name) not in
(select min(id), name from dupes
group by name having count(id) > 1);
delete from dupes
where id not in (select min(id)
from dupes group by name) ;
--- METADATA ---
select * from information_schema.tables;
--tables in schema postgres
select table_name
from information_schema.tables
where table_schema = 'sqlbook';
--table columns
select column_name, data_type, ordinal_position from information_schema.columns
where table_schema = 'sqlbook' and table_name = 'emp'
order by ordinal_position;
--indexes on table
select ind.indexname, ind.indexdef from pg_indexes ind
where ind.tablename = 'emp' and ind.schemaname = 'sqlbook';
--constraints
select a.table_name,
a.constraint_name,
b.column_name,
a.constraint_type
from information_schema.table_constraints a,
information_schema.key_column_usage b
where a.table_name = 'emp'
and a.table_schema = 'sqlbook'
and a.table_name = b.table_name
and a.table_schema = b.table_schema
and a.constraint_name = b.constraint_name;
select fkeys.table_name,
fkeys.constraint_name,
fkeys.column_name,
ind_cols.indexname
from (
select a.constraint_schema,
a.table_name,
a.constraint_name,
a.column_name
from information_schema.key_column_usage a,
information_schema.referential_constraints b
where a.constraint_name = b.constraint_name
and a.constraint_schema = b.constraint_schema
and a.constraint_schema = 'sqlbook'
and a.table_name = 'emp'
) fkeys
left join
(
select a.schemaname, a.tablename, a.indexname, b.column_name
from pg_catalog.pg_indexes a,
information_schema.columns b
where a.tablename = b.table_name
and a.schemaname = b.table_schema
) ind_cols
on (fkeys.constraint_schema = ind_cols.schemaname
and fkeys.table_name = ind_cols.tablename
and fkeys.column_name = ind_cols.column_name)
where ind_cols.indexname is null; --not sure it works
---
--count, sum, avg, min, max
select ename, sal,
sum(sal) over (order by sal, empno) as running_total
from emp
order by 2;
select ename, sal,
row_number() over (partition by sal order by sal),
rank() over (partition by sal order by sal),
dense_rank() over (partition by sal order by sal)
from emp
order by 2;
select ename, sal,
avg(sal) over (order by sal),
row_number() over (order by sal),
rank() over (order by sal),
dense_rank() over (order by sal)
from emp
order by 2;
select empno, ename, sal,
lag(sal) over (order by sal, empno) as lag1,
lag(sal, 2) over (order by sal, empno) as lag2,
lag(sal, 3) over (order by sal, empno) as lag3
from emp order by sal;
select empno, ename, sal, (lag1 + lag2 + lag3) * 0.33333 as avg3 from
(select empno, ename, sal,
lag(sal) over (order by sal, empno) as lag1,
lag(sal, 2) over (order by sal, empno) as lag2,
lag(sal, 3) over (order by sal, empno) as lag3
from emp order by sal) x;
--median
select percentile_cont(0.5)
within group(order by sal)
from emp
where deptno=20;
--dates
select ename, hiredate, hiredate + interval '1 year' as hireplus from emp;
-- generate
select generate_series as id from generate_series(1, 10);
--paging
select * from (
select *, row_number() over (order by sal) as rn
from emp) x where rn between 1 and 5;
select * from emp limit 5;
--min, max
select * from (
select *, min(sal) over() as minsal, max(sal) over() as maxsal
from emp) x
where sal in (minsal, maxsal);
--group and null - use *
select job, count(comm) from emp
group by job;
select job, count(*) from emp
group by job;
--window
select ename, deptno,
count(*) over() as cnt from emp
where deptno in (10,20)
order by 1;
select ename, deptno,
count(*) over(partition by deptno) as dcnt
from emp
order by deptno;
select ename,
deptno, count(*) over(partition by deptno) as dcnt,
job, count(*) over(partition by job) as jcnt,
count(*) over(partition by deptno,job) as djcnt
from emp
order by deptno, job;
-- window aggregaye
select ename,
deptno, hiredate, sal,
sum(sal) over(partition by deptno) as pdnototal,
sum(sal) over() as overtotal,
sum(sal) over(order by hiredate) as running_total
from emp
where deptno = 10;
select ename,
deptno, hiredate, sal,
sum(sal) over(partition by deptno) as pdnototal,
sum(sal) over() as overtotal,
sum(sal) over(order by hiredate
range between unbounded preceding and current row) as running_total
from emp
where deptno = 10;
--with
with emp10 as (select * from emp where deptno = 10)
select * from emp10;
--SQL:2008
select * from emp
order by empno
OFFSET 5 rows
FETCH FIRST 10 ROWS ONLY;
select * from emp
order by empno;
--=====================================================================================
set search_path to masteringp13;
select now();
show timezone;
CREATE TABLE t_oil (
region text,
country text,
year int,
production int,
consumption int
);
COPY t_oil FROM PROGRAM '
curl https://www.cybertec-postgresql.com/secret/oil_ext.txt ';
COPY t_oil FROM '/usr/share/import/oil_ext.txt';
select count(*) from t_oil;
select region, avg(production)
from t_oil group by region
order by 2 desc;
select region, avg(production)
from t_oil group by ROLLUP(region);
-- ROLLUP GROUP BY
SELECT region, country, avg(production)
FROM t_oil
WHERE country IN ('USA', 'Canada', 'Iran', 'Oman')
GROUP BY ROLLUP (region, country);
-- CUBE GROUP BY
SELECT region, country, avg(production)
FROM t_oil
WHERE country IN ('USA', 'Canada', 'Iran', 'Oman')
GROUP BY CUBE (region, country);
-- GROUPING SETS GROUP BY
SELECT region, country, avg(production)
FROM t_oil
WHERE country IN ('USA', 'Canada', 'Iran', 'Oman')
GROUP BY GROUPING SETS ( (), region, country);
-- with filter
SELECT region,
avg(production) AS all,
avg(production) FILTER (WHERE year < 1990) AS old,
avg(production) FILTER (WHERE year >= 1990) AS new
FROM t_oil
GROUP BY ROLLUP (region);
-- WITHIN GROUP
SELECT region,
percentile_disc(0.5) WITHIN GROUP (ORDER BY production)
FROM t_oil
GROUP BY 1;
SELECT region,
percentile_disc(0.5) WITHIN GROUP (ORDER BY production)
FROM t_oil
GROUP BY ROLLUP (1);
--
SELECT percentile_disc(0.62) WITHIN GROUP (ORDER BY id),
percentile_cont(0.62) WITHIN GROUP (ORDER BY id)
FROM generate_series(1, 5) AS id;
-- mode for most frequent value
SELECT country, mode() WITHIN GROUP (ORDER BY production)
FROM t_oil
WHERE country = 'Other Middle East'
GROUP BY 1;
--
SELECT country, year, production, consumption,
avg(production) OVER (PARTITION BY country)
FROM t_oil;
--
SELECT year, production,
avg(production) OVER (PARTITION BY year < 1990)
FROM t_oil
WHERE country = 'Canada'
ORDER BY year;
--
SELECT country, year, production,
min(production) OVER (PARTITION BY country ORDER BY year)
FROM t_oil
WHERE year BETWEEN 1978 AND 1983
AND country IN ('Iran', 'Oman');
---
SELECT country, year, production,
min(production)
OVER (PARTITION BY country
ORDER BY year ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING)
FROM t_oil
WHERE year BETWEEN 1978 AND 1983
AND country IN ('Iran', 'Oman');
---
SELECT *, array_agg(id)
OVER (ORDER BY id ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING)
FROM generate_series(1, 5) AS id;
--
SELECT *,
array_agg(id) OVER (ORDER BY id ROWS BETWEEN
UNBOUNDED PRECEDING AND 0 FOLLOWING)
FROM generate_series(1, 5) AS id;
--
SELECT year,
production,
array_agg(production) OVER (ORDER BY year
ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING
EXCLUDE CURRENT ROW)
FROM t_oil
WHERE country = 'USA'
AND year < 1970;
--
SELECT country, year, production,
min(production) OVER (w),
max(production) OVER (w)
FROM t_oil
WHERE country = 'Canada'
AND year BETWEEN 1980
AND 1985
WINDOW w AS (ORDER BY year);
--
SELECT year, production,
rank() OVER (ORDER BY production)
FROM t_oil
WHERE country = 'Other Middle East'
ORDER BY rank
LIMIT 7;
SELECT year, production,
dense_rank() OVER (ORDER BY production)
FROM t_oil
WHERE country = 'Other Middle East'
ORDER BY dense_rank
LIMIT 7;
SELECT year, production,
ntile(4) OVER (ORDER BY production)
FROM t_oil
WHERE country = 'Iraq'
AND year BETWEEN 2000 AND 2006;
--duplicates
SELECT *
FROM (SELECT t_oil, lag(t_oil) OVER (ORDER BY year)
FROM t_oil
WHERE country = 'USA'
) AS x
WHERE t_oil = lag;
--=====================================================================================
CREATE SCHEMA masteringp13;
-- # psql -d playground -U postgres
set search_path to masteringp13,public;
--
SELECT count(*),
count(*) FILTER (WHERE id < 5),
count(*) FILTER (WHERE id > 2)
FROM generate_series(1, 10) AS id;
--
SELECT *
FROM generate_series(1, 4) AS x,
LATERAL (SELECT array_agg(y)
FROM generate_series(1, x) AS y
) AS z;
--
WITH x AS (SELECT avg(id)
FROM generate_series(1, 10) AS id)
SELECT *, y - (SELECT avg FROM x) AS diff
FROM generate_series(1, 10) AS y
WHERE y > (SELECT avg FROM x);
--missing indices
SELECT schemaname, relname, seq_scan, seq_tup_read,
idx_scan, seq_tup_read / seq_scan AS avg
FROM pg_stat_user_tables
WHERE seq_scan > 0
ORDER BY seq_tup_read DESC
LIMIT 20;
--
CREATE EXTENSION btree_gist with schema pg_catalog;
select * from pg_available_extensions where name like '%trg%';
select * from pg_available_extension_versions where name like '%gist%';
----
-- NOT NULL DEFAULT nextval('masteringp13.t_perf_insert_serial_id_seq'::regclass)
drop table t_uuid;
CREATE TABLE masteringp13.t_uuid
(
id UUID NOT NULL DEFAULT gen_random_uuid() PRIMARY KEY,
name text
);
insert into t_uuid (name) values ('name1'), ('name2');
select avg(id) from (
select gen_random_uuid() uuid, id from generate_series(1, 1000000) as id) x ;
insert into t_uuid (id, name) values ('5f998d32-b269-42b7-ad19-38ed08feb9fb', '1');
insert into t_uuid (id, name) values (gen_random_uuid(), 'name1'), (gen_random_uuid(), 'name2');
select * from t_uuid;
select gen_random_uuid();
----
show search_path;
---
SELECT pg_create_restore_point('my_daily_process_ended');
---
drop table t_perf_insert_assigned;
drop table t_perf_insert_serial;
---
create table t_perf_insert_assigned (
id bigint not null primary key,
name text
);
create table t_perf_insert_serial (
id bigserial not null primary key,
name text
);
select * from t_perf_insert_assigned limit 100;
select * from t_perf_insert_serial limit 100;
truncate t_perf_insert_assigned;
truncate t_perf_insert_serial;
insert into t_perf_insert_assigned
select id, 'something' from generate_series(1, 100000) id;
insert into t_perf_insert_serial (name)
select 'something' from generate_series(1, 100000);
select * from pg_stat_ssl;
--- cursors
select * from c(100);
CREATE OR REPLACE FUNCTION c(int)
RETURNS setof text AS
$$
DECLARE
v_rec record;
BEGIN
FOR v_rec IN SELECT tablename
FROM pg_tables
LIMIT $1
LOOP
RETURN NEXT v_rec.tablename;
END LOOP;
RETURN;
END;
$$ LANGUAGE 'plpgsql';
--- anonymous code blocks
DO
$$
BEGIN
RAISE NOTICE 'current time: %', now();
END;
$$ LANGUAGE 'plpgsql';
---
select error_test1(1,0);
---
CREATE FUNCTION error_test1(int, int) RETURNS int AS
$$
BEGIN
RAISE NOTICE 'debug message: % / %', $1, $2;
BEGIN
RETURN $1 / $2;
EXCEPTION
WHEN division_by_zero THEN
RAISE NOTICE 'division by zero detected: %', sqlerrm;
WHEN others THEN
RAISE NOTICE 'some other error: %', sqlerrm;
END;
RAISE NOTICE 'all errors handled';
RETURN 0;
END;
$$ LANGUAGE 'plpgsql';
----
show join_collapse_limit;
show work_mem;
EXPLAIN WITH x AS
(
SELECT *
FROM generate_series(1, 1000) AS id
)
SELECT *
FROM x AS a
JOIN x AS b ON (a.id = b.id)
JOIN x AS c ON (b.id = c.id)
JOIN x AS d ON (c.id = d.id)
JOIN x AS e ON (d.id = e.id)
JOIN x AS f ON (e.id = f.id);
----
SHOW config_file;
SELECT round((100 * total_exec_time / sum(total_exec_time)
OVER ())::numeric, 2) percent,
round(total_exec_time::numeric, 2) AS total,
calls,
round(mean_exec_time::numeric, 2) AS mean,
substring(query, 1, 40)
FROM pg_stat_statements
ORDER BY total_exec_time DESC
LIMIT 10;
select * from pg_stat_statements where query ilike '%ITEM%';
---- LOGS STATS
select * from pg_stat_activity;
select * from pg_stat_database;
select * from pg_stat_user_tables;
select * from pg_statio_user_tables;
-- which tables may need indexes
SELECT schemaname, relname, seq_scan, seq_tup_read,
seq_tup_read / seq_scan AS avg, idx_scan
FROM pg_stat_user_tables
WHERE seq_scan > 0
ORDER BY seq_tup_read DESC LIMIT 25;
--
select * from pg_stat_user_indexes;
--
SELECT schemaname, relname, indexrelname, idx_scan,
pg_size_pretty(pg_relation_size(indexrelid)) AS idx_size,
pg_size_pretty(sum(pg_relation_size(indexrelid))
OVER (ORDER BY idx_scan, indexrelid)) AS total
FROM pg_stat_user_indexes
ORDER BY 6 ;
--
select * from pg_stat_bgwriter;
select * from pg_stat_ssl;
-- transactions
select * from pg_stat_xact_user_tables;
-- vacuum
select * from pg_stat_progress_vacuum;
create extension pg_stat_statements;
select * from pg_stat_statements;
----
drop table t_test;
create table t_test(id serial, name text);
insert into t_test (name) select 'hans' from generate_series(1, 2000000) as id;
insert into t_test (name) select 'paul' from generate_series(1, 2000000) as id;
select name, count(*) from t_test group by 1;
EXPLAIN select * from t_test where id = 234569;
SELECT pg_size_pretty(pg_relation_size('t_test'));
SELECT pg_relation_size('t_test') / 8192;
SHOW cpu_tuple_cost;
SHOW cpu_operator_cost;
SHOW random_page_cost; -- consider setting to 1 for SSD
create index idx_id on t_test(id);
CREATE TABLE t_sva (sva text);
INSERT INTO t_sva VALUES ('1118090878');
INSERT INTO t_sva VALUES ('2345010477');
select sva, normalize_si(sva) from t_sva;
CREATE OR REPLACE FUNCTION normalize_si(text) RETURNS text AS $$
BEGIN
RETURN substring($1, 9, 2) ||
substring($1, 7, 2) ||
substring($1, 5, 2) ||
substring($1, 1, 4);
END; $$
LANGUAGE 'plpgsql' IMMUTABLE;
---
CREATE OR REPLACE FUNCTION si_lt(text, text) RETURNS boolean AS $$
BEGIN
RETURN normalize_si($1) < normalize_si($2);
END;
$$ LANGUAGE 'plpgsql' IMMUTABLE;
-- lower equals
CREATE OR REPLACE FUNCTION si_le(text, text)
RETURNS boolean AS
$$
BEGIN
RETURN normalize_si($1) <= normalize_si($2);
END;
$$
LANGUAGE 'plpgsql' IMMUTABLE;
-- greater equal
CREATE OR REPLACE FUNCTION si_ge(text, text)
RETURNS boolean AS
$$
BEGIN
RETURN normalize_si($1) >= normalize_si($2);
END;
$$
LANGUAGE 'plpgsql' IMMUTABLE;
-- greater
CREATE OR REPLACE FUNCTION si_gt(text, text)
RETURNS boolean AS
$$
BEGIN
RETURN normalize_si($1) > normalize_si($2);
END;
$$
LANGUAGE 'plpgsql' IMMUTABLE;
-- define operators
CREATE OPERATOR <# ( PROCEDURE=si_lt,
LEFTARG=text,
RIGHTARG=text);
CREATE OPERATOR <=# ( PROCEDURE=si_le,
LEFTARG=text,
RIGHTARG=text);
CREATE OPERATOR >=# ( PROCEDURE=si_ge,
LEFTARG=text,
RIGHTARG=text);
CREATE OPERATOR ># ( PROCEDURE=si_gt,
LEFTARG=text,
RIGHTARG=text);
CREATE OR REPLACE FUNCTION si_same(text, text) RETURNS int AS $$
BEGIN
IF normalize_si($1) < normalize_si($2)
THEN
RETURN -1;
ELSIF normalize_si($1) > normalize_si($2)
THEN
RETURN +1;
ELSE
RETURN 0;
END IF;
END;
$$ LANGUAGE 'plpgsql' IMMUTABLE;
CREATE OPERATOR CLASS sva_special_ops
FOR TYPE text USING btree
AS
OPERATOR 1 <# ,
OPERATOR 2 <=# ,
OPERATOR 3 = ,
OPERATOR 4 >=# ,
OPERATOR 5 ># ,
FUNCTION 1 si_same(text, text);
CREATE INDEX idx_special ON t_sva (sva sva_special_ops);
SET enable_seqscan TO off;
SET enable_seqscan TO on;
explain SELECT * FROM t_sva WHERE sva = '0000112273';
select * from pg_am;
select to_tsvector('a car car car cat cats sdfh hfgh dfgh русский русская');
SELECT cfgname FROM pg_ts_config;
select
select * from pg_stat_statements; -- ???
select * from pg_stat_user_tables;
call samplesjpaid.transfer_multiple();
select * from samplesjpaid.account where id in (1,2,3) FOR UPDATE;
select pg_advisory_lock(2);
select pg_advisory_unlock(2);
drop table if exists post_comment_details;
drop table if exists post_comment;
drop table if exists post_details;
drop table if exists post_tag;
drop table if exists post;
drop table if exists tag;
drop table if exists answer;
drop table if exists question;
drop table if exists cache_snapshot;
drop sequence if exists hibernate_sequence;
create table post (id int8 not null, title varchar(250), primary key (id));
create table post_comment (id int8 not null, review varchar(250), post_id int8, primary key (id));
create table post_details (id int8 not null, created_by varchar(250), created_on timestamp, updated_by varchar(250), updated_on timestamp, primary key (id));
create table post_tag (post_id int8 not null, tag_id int8 not null);
create table tag (id int8 not null, name varchar(50), primary key (id));
create table post_comment_details (id int8 not null, post_id int8 not null, user_id int8 not null, ip varchar(18) not null, fingerprint varchar(256), primary key (id));
create table question (id bigint not null, body text, created_on timestamp default now(), score integer not null default 0, title varchar(250), updated_on timestamp default now(), primary key (id));
create table answer (id bigint not null, accepted boolean not null default false, body text, created_on timestamp default now(), score integer not null default 0, updated_on timestamp default now(), question_id bigint, primary key (id));
create table cache_snapshot (region varchar(250), updated_on timestamp, primary key (region));
alter table post_comment add constraint post_comment_post_id foreign key (post_id) references post;
alter table post_details add constraint post_details_post_id foreign key (id) references post;
alter table post_tag add constraint post_tag_tag_id foreign key (tag_id) references tag;
alter table post_tag add constraint post_tag_post_id foreign key (post_id) references post;
alter table if exists answer add constraint answer_question_id foreign key (question_id) references question;
create sequence hibernate_sequence start with 1 increment by 1;
drop function if exists get_updated_questions_and_answers;
CREATE OR REPLACE FUNCTION get_updated_questions_and_answers()
RETURNS TABLE(
question_id bigint, question_title varchar(250), question_body text,
question_score integer, question_created_on timestamp, question_updated_on timestamp,
answer_id bigint, answer_body text, answer_accepted boolean,
answer_score integer, answer_created_on timestamp, answer_updated_on timestamp
)
LANGUAGE plpgsql
AS $$
DECLARE
previous_snapshot_timestamp timestamp;
max_snapshot_timestamp timestamp;
result_set_record record;
BEGIN
previous_snapshot_timestamp = (
SELECT
updated_on
FROM
cache_snapshot
WHERE
region = 'QA'
FOR NO KEY UPDATE
);
IF previous_snapshot_timestamp is null THEN
INSERT INTO cache_snapshot(
region,
updated_on
)
VALUES (
'QA',
to_timestamp(0)
);
previous_snapshot_timestamp = to_timestamp(0);
END IF;
max_snapshot_timestamp = to_timestamp(0);
FOR result_set_record IN(
SELECT
q1.id as question_id, q1.title as question_title,
q1.body as question_body, q1.score as question_score,
q1.created_on as question_created_on, q1.updated_on as question_updated_on,
a1.id as answer_id, a1.body as answer_body,
a1.accepted as answer_accepted, a1.score as answer_score,
a1.created_on as answer_created_on, a1.updated_on as answer_updated_on
FROM
question q1
LEFT JOIN
answer a1 on q1.id = a1.question_id
WHERE
q1.id IN (
SELECT q2.id
FROM question q2
WHERE
q2.updated_on > previous_snapshot_timestamp
) OR
q1.id IN (
SELECT a2.question_id
FROM answer a2
WHERE
a2.updated_on > previous_snapshot_timestamp
)
ORDER BY
question_created_on, answer_created_on
) loop
IF result_set_record.question_updated_on > max_snapshot_timestamp THEN
max_snapshot_timestamp = result_set_record.question_updated_on;
END IF;
IF result_set_record.answer_updated_on > max_snapshot_timestamp THEN
max_snapshot_timestamp = result_set_record.answer_updated_on;
END IF;
question_id = result_set_record.question_id;
question_title = result_set_record.question_title;
question_body = result_set_record.question_body;
question_score = result_set_record.question_score;
question_created_on = result_set_record.question_created_on;
question_updated_on = result_set_record.question_updated_on;
answer_id = result_set_record.answer_id;
answer_body = result_set_record.answer_body;
answer_accepted = result_set_record.answer_accepted;
answer_score = result_set_record.answer_score;
answer_created_on = result_set_record.answer_created_on;
answer_updated_on = result_set_record.answer_updated_on;
RETURN next;
END loop;
UPDATE
cache_snapshot
SET updated_on = max_snapshot_timestamp
WHERE
region = 'QA';
END
$$
;
drop function if exists set_updated_on_timestamp;
CREATE FUNCTION set_updated_on_timestamp()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_on = now();
RETURN NEW;
END
$$
language 'plpgsql'
;
drop trigger if exists question_set_updated_on_trigger on question;
drop trigger if exists answer_set_updated_on_trigger on answer;
CREATE TRIGGER question_set_updated_on_trigger
BEFORE UPDATE OR DELETE ON question
FOR EACH ROW EXECUTE FUNCTION set_updated_on_timestamp();
CREATE TRIGGER answer_set_updated_on_trigger
BEFORE UPDATE OR DELETE ON answer
FOR EACH ROW EXECUTE FUNCTION set_updated_on_timestamp();
--=====================================================================
drop table if exists post cascade;
drop table if exists post_comment cascade;
drop sequence hibernate_sequence;
create sequence hibernate_sequence start 1 increment 1;
create table post (id int8 not null, title varchar(255), primary key (id));
create table post_comment (id int8 not null, created_on timestamp, review varchar(255), score int4 not null, parent_id int8, post_id int8, primary key (id));
alter table post_comment add constraint FKmqxhu8q0j94rcly3yxlv0u498 foreign key (parent_id) references post_comment;
alter table post_comment add constraint post_comment_post_id foreign key (post_id) references post;
drop function if exists get_post_comment_scores;
CREATE OR REPLACE FUNCTION get_post_comment_scores(postId bigint, rankId integer)
RETURNS REFCURSOR AS
$$
DECLARE
postComments REFCURSOR;
BEGIN
OPEN postComments FOR
SELECT id, parent_id, review, created_on, score
FROM (
SELECT
id, parent_id, review, created_on, score,
dense_rank() OVER (ORDER BY total_score DESC) rank
FROM (
SELECT
id, parent_id, review, created_on, score,
SUM(score) OVER (PARTITION BY root_id) total_score
FROM (
WITH RECURSIVE post_comment_score(id, root_id, post_id,
parent_id, review, created_on, score) AS (
SELECT
id, id, post_id, parent_id, review, created_on, score
FROM post_comment
WHERE post_id = postId AND parent_id IS NULL
UNION ALL
SELECT pc.id, pcs.root_id, pc.post_id, pc.parent_id,
pc.review, pc.created_on, pc.score
FROM post_comment pc
INNER JOIN post_comment_score pcs ON pc.parent_id = pcs.id
)
SELECT id, parent_id, root_id, review, created_on, score
FROM post_comment_score
) score_by_comment
) score_total
ORDER BY total_score DESC, id ASC
) total_score_group
WHERE rank <= rankId;
RETURN postComments;
END;
$$
language 'plpgsql'
;
--=====================================================================
st.execute( "alter table post convert to character set sjis collate sjis_bin;" );
st.execute( "SET NAMES 'sjis';" );
/*st.execute( "alter table post convert to character set gbk collate gbk_bin;" );
st.execute( "SET character_set_client = 'gbk';" );
st.execute( "SET NAMES 'gbk';" );*/
/*st.execute( "alter table post convert to character set cp932 collate cp932_japanese_ci ;" );
st.execute( "SET character_set_client = 'cp932';" );
st.execute( "SET NAMES 'cp932';" );*/
//String sqlInjected = ((char)0xbf5c) + " or 1 >= ALL ( SELECT 1 FROM pg_locks, pg_sleep(10) ) --'";
String sqlInjected = ((char)0x815c) + " or 1 >= ALL ( SELECT 1 FROM pg_locks, pg_sleep(10) ) --'";
DROP SCHEMA public CASCADE;
CREATE SCHEMA public;
GRANT ALL ON SCHEMA public TO postgres;
GRANT ALL ON SCHEMA public TO public;
COMMENT ON SCHEMA public IS 'standard public schema';
return "SELECT tx.trx_id FROM information_schema.innodb_trx tx WHERE tx.trx_mysql_thread_id = connection_id()";
return "VALUES (TRANSACTION_ID())";
return """
SELECT RAWTOHEX(tx.xid)
FROM v$transaction tx
JOIN v$session s ON tx.addr=s.taddr
WHERE s.sid = sys_context('userenv','sid')
""" ;
return "SELECT CAST(pg_current_xact_id_if_assigned() AS text)";
--=====================================================================
SELECT SUBSTRING( LEFT( table_name, LENGTH( table_name )-1 ), 7 ) AS adso, -- тех. имя ADSO
mem_tot_gb, -- размер в Гб
rec_cnt -- число записей
FROM ( SELECT table_name,
ROUND( SUM( (memory_size_in_total)/1024/1024/1024 ),2 ) AS mem_tot_gb,
SUM( record_count ) AS rec_cnt
FROM m_cs_tables
WHERE table_name LIKE '/BIC/A%2'
GROUP BY table_name
)
WHERE mem_tot_gb > <лимит_потребления_памяти_на_одну_таблицу>
ORDER BY mem_tot_gb DESC
--=====================================================================
SELECT
last_name,
first_name
FROM clients
WHERE client_id IN (
SELECT DISTINCT client_id
FROM drives
WHERE start_time >= CURRENT_DATE - INTERVAL '30 days'
);
SELECT DISTINCT
last_name,
first_name
FROM clients c
WHERE EXISTS (
SELECT 1
FROM drives d
WHERE d.client_id = c.client_id
AND d.start_time >= CURRENT_DATE - INTERVAL '30 days'
);
CREATE INDEX idx_drives_start_time ON drives (start_time);
--===========================================================
SELECT
c.client_id,
c.first_name,
c.last_name,
d.start_location,
d.end_location,
d.fare,
CASE
WHEN d.fare > 3000 THEN 'Дорогая'
ELSE 'Дешевая'
END AS fare_category
FROM clients AS c
JOIN drives AS d ON c.client_id = d.client_id
ORDER BY d.fare DESC;
--===========================================================
SELECT
c.first_name,
c.last_name,
SUM(d.fare) AS total_fare
FROM clients AS c
JOIN drives AS d ON c.client_id = d.client_id
GROUP BY c.first_name, c.last_name
HAVING SUM(d.fare) > 5000;
--===========================================================
SELECT
c.first_name,
c.last_name,
d.start_location,
d.end_location,
d.start_time,
ROW_NUMBER() OVER(PARTITION BY c.client_id ORDER BY d.start_time) AS trip_number
FROM clients AS c
JOIN drives AS d ON c.client_id = d.client_id;
--===========================================================
SELECT
d.drive_id,
client_id,
fare
FROM drives d
WHERE fare > (SELECT AVG(fare) FROM drives);
WITH avg_fare AS (
SELECT AVG(fare) AS avg_fare
FROM drives
)
SELECT
d.drive_id,
client_id,
fare
FROM drives d, avg_fare
WHERE d.fare > avg_fare.avg_fare;
--===========================================================
SELECT
c.first_name,
c.last_name,
COUNT(d.drive_id) AS num_of_trips
FROM clients AS c
JOIN drives AS d ON c.client_id = d.client_id
GROUP BY c.first_name, c.last_name
HAVING COUNT(d.drive_id) > (
SELECT AVG(num_trips)
FROM (SELECT client_id, COUNT(drive_id) AS num_trips FROM drives GROUP BY client_id) AS avg_trips
);
WITH avg_trips AS (
SELECT AVG(num_trips) AS avg_num_trips
FROM (SELECT client_id, COUNT(drive_id) AS num_trips FROM drives GROUP BY client_id) AS trips
)
SELECT
c.first_name,
c.last_name,
COUNT(d.drive_id) AS num_of_trips
FROM clients AS c
JOIN drives AS d ON c.client_id = d.client_id
GROUP BY c.first_name, c.last_name
HAVING COUNT(d.drive_id) > (SELECT avg_num_trips FROM avg_trips);
--===========================================================
SELECT
c.first_name,
c.last_name,
MAX(d.fare) AS max_fare,
SUM(d.fare) AS total_fare
FROM clients AS c
JOIN drives AS d ON c.client_id = d.client_id
GROUP BY c.first_name, c.last_name;
--===========================================================
SELECT
c.first_name,
c.last_name,
d.start_location,
d.end_location,
d.fare
FROM clients AS c
JOIN drives AS d ON c.client_id = d.client_id
WHERE 1 = 1
AND d.start_location = 'Москва'
AND c.last_name LIKE 'А%';
--===========================================================
CREATE SEQUENCE MTG_ID_SEQ
START WITH 200
INCREMENT BY 100;
CREATE TABLE MEETINGS (
ID NUMBER(19,0) NOT NULL PRIMARY KEY,
DESCRIPTION VARCHAR2(500) NOT NULL,
DATE_DATE TIMESTAMP NULL,
DATE_OFFSETDT TIMESTAMP NULL
);
INSERT INTO MEETINGS
(SELECT 1, 'H2 startup', sysdate, sysdate from dual);
INSERT INTO MEETINGS
(SELECT 2, 'H2 startup minus two days', sysdate-2, sysdate-2 from dual);
--===========================================================
Select S_NAME from STUDENT S where EXISTS
( select * from STUDENT_COURSE SC where S.S_ID=SC.S_ID and
SC.C_ID=’C1’);
--===========================================================
--===========================================================
--===========================================================
--===========================================================
--===========================================================
--===========================================================
--===========================================================
DROP SEQUENCE EMP_ID_SEQ;
CREATE SEQUENCE EMP_ID_SEQ
START WITH 2000
INCREMENT BY 1000;
CREATE TABLE EMPLOYEE (
ID NUMERIC(19,0) NOT NULL PRIMARY KEY,
EMAIL VARCHAR(50) NOT NULL,
FIRST_NAME VARCHAR(50) NOT NULL,
LAST_NAME VARCHAR(50) NOT NULL
);
CREATE TABLE EMPLOYEE_JSONB (
ID NUMERIC(19,0) NOT NULL PRIMARY KEY,
CONTENT JSONB
);
delete from EMPLOYEE_JSONB where 1=1;
insert into EMPLOYEE_JSONB (ID, CONTENT) VALUES (
1003001,
'{ "id": 1003001, "email": "[email protected]", "firstName": "Tyler", "lastName": "Lewis"}'::jsonb
);
select * from EMPLOYEE_JSONB;
CREATE INDEX idxgin ON EMPLOYEE_JSONB USING GIN (content);
select * from EMPLOYEE_JSONB where CONTENT @> '"ryan"'::jsonb;
select * from EMPLOYEE_JSONB where CONTENT::json->>'firstName' like '%yan%';
select * from EMPLOYEE_JSONB where CONTENT->>'firstName' like '%yan%';
select count(*) from EMPLOYEE_JSONB;
insert into EMPLOYEE (ID, EMAIL, FIRST_NAME, LAST_NAME) VALUES (123456789, 't@t1', '1', '11');
delete from EMPLOYEE where ID = 123456789;
drop table EMPLOYEE;
select * from EMPLOYEE order by ID ;
select count(*) from EMPLOYEE;
delete from EMPLOYEE where 1=1;
explain
select * from employee where email = '[email protected]';
create index EMAIL_IDX on EMPLOYEE(email);
explain
select * from employee where FIRST_NAME like '%tyler%';
--==========================================================================================
CREATE SEQUENCE EMP_ID_SEQ
START WITH 2000
INCREMENT BY 1000;
CREATE TABLE EMPLOYEE (
ID NUMERIC(19,0) NOT NULL PRIMARY KEY,
EMAIL VARCHAR(50) NOT NULL,
FIRST_NAME VARCHAR(50) NOT NULL,
LAST_NAME VARCHAR(50) NOT NULL
);
CREATE TABLE EMPLOYEE_JSONB (
ID NUMERIC(19,0) NOT NULL PRIMARY KEY,
CONTENT JSONB,
CONSTRAINT fk_employee
FOREIGN KEY(ID)
REFERENCES EMPLOYEE(ID)
);
CREATE INDEX idxgin ON EMPLOYEE_JSONB USING GIN (content);
create index EMAIL_IDX on EMPLOYEE(email);
--==========================================================================================
CREATE TABLE client
(
client_id BIGSERIAL PRIMARY KEY,
first_name TEXT NOT NULL,
last_name TEXT NOT NULL,
email TEXT NOT NULL UNIQUE,
CONSTRAINT name_not_empty CHECK (first_name !~ '\s' AND last_name !~ '\s'),
CONSTRAINT first_name_length CHECK (length(first_name) <= 512),
CONSTRAINT last_name_length CHECK (length(last_name) <= 512)
);
ALTER SEQUENCE client_client_id_seq INCREMENT BY 100 RESTART 100;
CREATE TABLE account
(
account_id BIGSERIAL PRIMARY KEY,
client_id BIGINT NOT NULL REFERENCES client(client_id),
available_amount NUMERIC(19,2) NOT NULL
);
ALTER SEQUENCE account_account_id_seq INCREMENT BY 100 RESTART 100;
CREATE INDEX account_client_id_idx ON account(client_id);
--=================================================================================================
SELECT cpe.entity_id, value AS name FROM catalog_product_entity cpe
INNER JOIN eav_attribute ea ON cpe.entity_type_id = ea.entity_type_id
INNER JOIN catalog_product_entity_varchar cpev ON ea.attribute_id = cpev.attribute_id AND cpe.entity_id = cpev.entity_id
WHERE ea.attribute_code = 'name'
--=================================================================================================
CREATE TABLE tst_eav AS
SELECT
(random() * 1e4)::integer e -- 10k объектов
, (random() * 1e2)::integer a -- 100 характеристик
, (random() * 1e2)::integer v -- 100 вариантов значений
FROM
generate_series(1, 1e6); -- 1M записей о значениях
--=================================================================================================
SELECT
e
FROM
tst_eav r1
JOIN
tst_eav r2
USING(e)
WHERE
(r1.a, r1.v) = (1, 1) AND
(r2.a, r2.v) = (2, 2);
CREATE INDEX eav_idx1 ON tst_eav(a, v);
--=================================================================================================
SELECT
e
FROM
tst_eav
WHERE
(a, v) = (1, 1)
INTERSECT
SELECT
e
FROM
tst_eav
WHERE
(a, v) = (2, 2);
--=================================================================================================
SELECT
e
FROM
tst_eav
WHERE
(a, v) IN ((1, 1), (2, 2))
GROUP BY
e
HAVING
count(*) = 2; -- присутствуют оба условия
--=================================================================================================
SELECT
e
FROM
tst_eav T
WHERE
(a, v) IN ((1, 1), (2, 2))
GROUP BY
e
HAVING
array_length(array_agg(DISTINCT T), 1) = 2; -- оба уникальных условия
CREATE INDEX eav_idx2 ON tst_eav(a, v) INCLUDE(e);
--=================================================================================================
SELECT array_agg(column_name) FILTER (WHERE column_name IS NOT NULL) FROM table_name
--=================================================================================================
WITH orders AS (
SELECT 1 AS order_id, 101 AS product_id, 2 AS quantity
UNION ALL SELECT 1, 102, 1
UNION ALL SELECT 2, 103, 3
UNION ALL SELECT 2, 104, 1
UNION ALL SELECT 3, 101, 1
)
SELECT
order_id,
array_agg(product_id) AS products
FROM orders
GROUP BY order_id
ORDER BY order_id;
--=================================================================================================
WITH employees AS (
SELECT 1 AS emp_id, 'John' AS name, 'SQL' AS skill
UNION ALL SELECT 1, 'John', 'Python'
UNION ALL SELECT 1, 'John', 'Java'
UNION ALL SELECT 2, 'Jane', 'C++'
UNION ALL SELECT 2, 'Jane', 'Ruby'
)
SELECT
emp_id,
name,
array_agg(skill ORDER BY skill) AS skills
FROM employees
GROUP BY emp_id, name
ORDER BY emp_id;
--=================================================================================================
WITH sales(category, product, price, sale_date) AS (
VALUES
('Electronics', 'Laptop', 1200, '2023-01-15'::date),
('Electronics', 'Smartphone', 800, '2023-01-20'::date),
('Electronics', 'Tablet', 500, '2023-02-10'::date),
('Books', 'Novel', 20, '2023-02-05'::date),
('Books', 'Textbook', 100, '2023-02-15'::date),
('Books', 'Cookbook', 30, '2023-03-01'::date)
)
SELECT
category,
array_agg(
(SELECT product || ': ' || SUM(price)::text
FROM sales s2
WHERE s2.category = s1.category AND s2.product = s1.product
GROUP BY s2.product)
) AS product_sales
FROM sales s1
GROUP BY category;
--=================================================================================================
WITH sales(id, category, product, price, sale_date) AS (
VALUES
(1, 'Electronics', 'Laptop', 1200, '2023-01-15'::date),
(2, 'Electronics', 'Smartphone', 800, '2023-01-20'::date),
(3, 'Electronics', 'Tablet', 500, '2023-02-10'::date),
(4, 'Books', 'Novel', 20, '2023-02-05'::date),
(5, 'Books', 'Textbook', 100, '2023-02-15'::date),
(6, 'Books', 'Textbook', 200, '2023-02-15'::date),
(7, 'Books', 'Cookbook', 30, '2023-03-01'::date)
)
SELECT
category,
array_agg(
(SELECT product || ': ' || SUM(price)::text
FROM sales s2
WHERE s2.category = s1.category AND s2.id = s1.id
GROUP BY s2.product)
) AS product_sales
FROM sales s1
GROUP BY 1;
--=================================================================================================
WITH user_logins AS (
SELECT 1 AS user_id, 'Chrome' AS browser
UNION ALL SELECT 1, 'Firefox'
UNION ALL SELECT 1, 'Chrome'
UNION ALL SELECT 2, 'Safari'
UNION ALL SELECT 2, 'Chrome'
)
SELECT
user_id,
array_agg(DISTINCT browser ORDER BY browser) AS browsers_used
FROM user_logins
GROUP BY user_id;
--=================================================================================================
UPDATE customer c
SET customer_id = s.store_key
FROM dblink('port=5432, dbname=SERVER1 user=postgres password=309245'
, 'SELECT match_name, store_key FROM store')
AS s(match_name text, store_key integer)
WHERE c.match_name = s.match_name
AND c.customer_id IS DISTINCT FROM s.store_key;
--=================================================================================================
WITH accounts_to_delete AS
(
SELECT account_id
FROM accounts a
INNER JOIN customers c
ON a.customer_id = c.id
WHERE c.customer_name='Some Customer'
)
-- this fails if "Some Customer" has multiple accounts, but works if there's 1:
DELETE FROM accounts
WHERE accounts.guid =
(
SELECT account_id
FROM accounts_to_delete
);
-- this succeeds with any number of accounts:
DELETE FROM accounts
WHERE accounts.guid IN
(
SELECT account_id
FROM accounts_to_delete
);
--=================================================================================================
SELECT s.store_id,
s.name,
(SELECT e.employee_id
FROM employee e
WHERE e.store_id = s.store_id
AND e.rank_id = 'MANAGER'
ORDER BY e.last_name,
e.first_name,
e.middle_name
LIMIT 1
) AS employee_id
FROM store s
ORDER BY s.store_id
--=================================================================================================
SELECT s.store_id,
s.name,
(SELECT string_agg (
e.last_name || ' ' || e.first_name, '; '
ORDER BY e.last_name,
e.first_name
)
FROM employee e
WHERE e.store_id = s.store_id
AND e.rank_id = 'MANAGER'
) AS employees
FROM store s
ORDER BY s.store_id
--=================================================================================================
CREATE EXTENSION IF NOT EXISTS pxf;
CREATE TABLE sales_test (
id int
, date date
, amt decimal(10,2)
)
DISTRIBUTED BY (id)
INSERT INTO sales_test (id, "date", amt)
WITH test AS(
select
generate_series('2016-01-01'::date, '2022-01-01'::date, '1 day'::interval) AS date
)
SELECT
to_char(date, 'YYYYMMDD')::integer AS id
, date
, (
random() * 1000
)::int + 1 AS amt
FROM
test;
SELECT
*
FROM sales_test
LIMIT 100
--2193
SELECT
COUNT(*)
FROM sales_test
LIMIT 100
CREATE WRITABLE EXTERNAL TABLE sale_test_ext_text_write
(LIKE sales_test)
LOCATION ('pxf://test-bucket/sale-test?PROFILE=s3:text&SERVER=default&COMPRESSION_CODEC=org.apache.hadoop.io.compress.GzipCodec' )
ON ALL FORMAT 'TEXT' ( delimiter=',' ) ENCODING 'UTF8';
INSERT INTO sale_test_ext_text_write
SELECT * FROM sales_test;
CREATE EXTERNAL TABLE sale_test_ext_text_read (LIKE sales_test)
LOCATION ('pxf://test-bucket/sale-test?PROFILE=s3:text&SERVER=default&COMPRESSION_CODEC=org.apache.hadoop.io.compress.GzipCodec' )
ON ALL FORMAT 'TEXT' ( delimiter=',' ) ENCODING 'UTF8';
SELECT
COUNT(*)
FROM sale_test_ext_text_read
SELECT
*
FROM sale_test_ext_text_read
LIMIT 100
DROP EXTERNAL table sale_test_ext_text_write cascade;
DROP EXTERNAL table sale_test_ext_text_read cascade;
--=================================================================================================
create schema marts_check;
CREATE TABLE marts_check.dim_asset (
id int NOT NULL,
"domain" text NULL,
"key" text NULL,
group1 text NULL,
group2 text NULL,
group3 text NULL,
"name" text NULL,
"desc" text NULL,
schema_fields jsonb NULL,
asset_dicts jsonb NULL,
"_created_dttm" timestamptz NOT NULL,
CONSTRAINT pk__dim_asset PRIMARY KEY (id)
);
CREATE INDEX ix__dim_asset_asset_group_asset_key ON marts_check.dim_asset USING btree (domain, key);
CREATE TABLE marts_check.fact_asset_data (
id serial4 NOT NULL,
dim_asset_id int4 NULL,
measure_fields jsonb NULL,
dim_fields jsonb NULL,
is_actual bool NULL DEFAULT true,
date_from timestamptz NOT NULL,
date_to timestamptz NOT NULL,
dqp_engine_id int4 NULL,
"_created_dttm" timestamptz NOT NULL DEFAULT now(),
"_updated_dttm" timestamptz NOT NULL DEFAULT now(),
"_delete_after_dttm" timestamptz NULL,
dqp_engine_try_number int4 NULL,
CONSTRAINT pk__fact_asset_data PRIMARY KEY (id)
);
ALTER TABLE marts_check.fact_asset_data ADD CONSTRAINT fk__fact_asset_data__dim_asset_id__dim_asset FOREIGN KEY (dim_asset_id) REFERENCES marts_check.dim_asset(id);
INSERT INTO marts_check.dim_asset (id, "domain","key",group1,group2,group3,"name","desc",schema_fields,asset_dicts,"_created_dttm") VALUES
(1,'demo','afff2a23-22dc-46ae-bc7a-d9048fe2062c','leader','adb','asset gp','leader last week tunover and count','Week asset for leader project','{"dimDate": "opened_date", "measures": {"turnover_gp": {"desc": "Count of object in ADB for objects", "pgType": "Numeric"}, "records_count_gp": {"desc": "Count of object in ADB for objects", "pgType": "integer"}}, "dimensions": null}','null','2023-06-15 12:16:13.160'),
(2,'demo','5a216828-668c-407a-9a80-835242af5800','leader','click house','leader asset click','leader last week tunover and count','Week asset for leader project','{"dimDate": "opened_date", "measures": {"turnover_click": {"desc": "Count of object in clickhouse for objects", "pgType": "Numeric"}, "records_count_click": {"desc": "Count of object in clickhouse for objects", "pgType": "integer"}}, "dimensions": null}','null','2023-06-15 13:19:50.611'),
(3,'demo','01dee8a2-d0f8-4dbc-b34b-1c2c777c5020','leader','adb','hi42_conquest','Agg hi42_conquest','Aggregation hi42_conquest','{"dimDate": "dat_vte", "measures": {"sales_taxes_included": {"desc": "Count of object in ADB for objects", "pgType": "Numeric"}}, "dimensions": null}','null','2023-06-15 13:20:14.188');
INSERT INTO marts_check.fact_asset_data (dim_asset_id,measure_fields,dim_fields,is_actual,date_from,date_to,dqp_engine_id,"_created_dttm","_updated_dttm","_delete_after_dttm",dqp_engine_try_number) VALUES
(1,'{"turnover_gp": {"desc": "Count of object in ADB for objects", "value": 100, "pgType": "Numeric"}, "records_count_gp": {"desc": "Count of object in ADB for objects", "value": 1000, "pgType": "integer"}}','null',true,'2023-06-02 00:00:00.000','2023-06-03 00:00:00.000',1,'2023-06-15 13:19:21.571','2023-06-15 13:19:21.571','2024-06-14 13:19:22.648',2),
(1,'{"turnover_gp": {"de.sc": "Count of object in ADB for objects", "value": 101, "pgType": "Numeric"}, "records_count_gp": {"desc": "Count of object in ADB for objects", "value": 1001, "pgType": "integer"}}','null',true,'2023-06-01 00:00:00.000','2023-06-02 00:00:00.000',1,'2023-06-15 13:19:21.571','2023-06-15 13:19:21.571','2024-06-14 13:19:22.650',2),
(1,'{"turnover_gp": {"desc": "Count of object in ADB for objects", "value": 102, "pgType": "Numeric"}, "records_count_gp": {"desc": "Count of object in ADB for objects", "value": 1002, "pgType": "integer"}}','null',true,'2023-06-05 00:00:00.000','2023-06-06 00:00:00.000',1,'2023-06-15 13:19:21.571','2023-06-15 13:19:21.571','2024-06-14 13:19:22.650',2),
(1,'{"turnover_gp": {"desc": "Count of object in ADB for objects", "value": 103, "pgType": "Numeric"}, "records_count_gp": {"desc": "Count of object in ADB for objects", "value": 1003, "pgType": "integer"}}','null',true,'2023-06-03 00:00:00.000','2023-06-04 00:00:00.000',1,'2023-06-15 13:19:21.571','2023-06-15 13:19:21.571','2024-06-14 13:19:22.650',2),
(1,'{"turnover_gp": {"desc": "Count of object in ADB for objects", "value": 104, "pgType": "Numeric"}, "records_count_gp": {"desc": "Count of object in ADB for objects", "value": 1004, "pgType": "integer"}}','null',true,'2023-06-07 00:00:00.000','2023-06-08 00:00:00.000',1,'2023-06-15 13:19:21.571','2023-06-15 13:19:21.571','2024-06-14 13:19:22.650',2),
(1,'{"turnover_gp": {"desc": "Count of object in ADB for objects", "value": 105, "pgType": "Numeric"}, "records_count_gp": {"desc": "Count of object in ADB for objects", "value": 1005, "pgType": "integer"}}','null',true,'2023-06-06 00:00:00.000','2023-06-07 00:00:00.000',1,'2023-06-15 13:19:21.571','2023-06-15 13:19:21.571','2024-06-14 13:19:22.650',2),
(1,'{"turnover_gp": {"desc": "Count of object in ADB for objects", "value": 106, "pgType": "Numeric"}, "records_count_gp": {"desc": "Count of object in ADB for objects", "value": 1006, "pgType": "integer"}}','null',true,'2023-06-11 00:00:00.000','2023-06-12 00:00:00.000',1,'2023-06-15 13:19:21.571','2023-06-15 13:19:21.571','2024-06-14 13:19:22.652',2),
(1,'{"turnover_gp": {"desc": "Count of object in ADB for objects", "value": 107, "pgType": "Numeric"}, "records_count_gp": {"desc": "Count of object in ADB for objects", "value": 1007, "pgType": "integer"}}','null',true,'2023-06-09 00:00:00.000','2023-06-10 00:00:00.000',1,'2023-06-15 13:19:21.571','2023-06-15 13:19:21.571','2024-06-14 13:19:22.652',2),
(1,'{"turnover_gp": {"desc": "Count of object in ADB for objects", "value": 108, "pgType": "Numeric"}, "records_count_gp": {"desc": "Count of object in ADB for objects", "value": 1008, "pgType": "integer"}}','null',true,'2023-06-10 00:00:00.000','2023-06-11 00:00:00.000',1,'2023-06-15 13:19:21.571','2023-06-15 13:19:21.571','2024-06-14 13:19:22.652',2),
(1,'{"turnover_gp": {"desc": "Count of object in ADB for objects", "value": 109, "pgType": "Numeric"}, "records_count_gp": {"desc": "Count of object in ADB for objects", "value": 1009, "pgType": "integer"}}','null',true,'2023-06-12 00:00:00.000','2023-06-13 00:00:00.000',1,'2023-06-15 13:19:21.571','2023-06-15 13:19:21.571','2024-06-14 13:19:22.652',2);
INSERT INTO marts_check.fact_asset_data (dim_asset_id,measure_fields,dim_fields,is_actual,date_from,date_to,dqp_engine_id,"_created_dttm","_updated_dttm","_delete_after_dttm",dqp_engine_try_number) VALUES
(2,'{"turnover_click": {"desc": "Count of object in clickhouse for objects", "value": 100, "pgType": "Numeric"}, "records_count_click": {"desc": "Count of object in clickhouse for objects", "value": 1000, "pgType": "integer"}}','null',true,'2023-06-01 00:00:00.000','2023-06-02 00:00:00.000',2,'2023-06-15 13:19:50.661','2023-06-15 13:19:50.661','2024-06-14 13:19:53.531',1),
(2,'{"turnover_click": {"desc": "Count of object in clickhouse for objects", "value": 101, "pgType": "Numeric"}, "records_count_click": {"desc": "Count of object in clickhouse for objects", "value": 1001, "pgType": "integer"}}','null',true,'2023-06-02 00:00:00.000','2023-06-03 00:00:00.000',2,'2023-06-15 13:19:50.661','2023-06-15 13:19:50.661','2024-06-14 13:19:53.531',1),
(2,'{"turnover_click": {"desc": "Count of object in clickhouse for objects", "value": 101, "pgType": "Numeric"}, "records_count_click": {"desc": "Count of object in clickhouse for objects", "value": 1002, "pgType": "integer"}}','null',true,'2023-06-03 00:00:00.000','2023-06-04 00:00:00.000',2,'2023-06-15 13:19:50.661','2023-06-15 13:19:50.661','2024-06-14 13:19:53.531',1),
(2,'{"turnover_click": {"desc": "Count of object in clickhouse for objects", "value": 104, "pgType": "Numeric"}, "records_count_click": {"desc": "Count of object in clickhouse for objects", "value": 1003, "pgType": "integer"}}','null',true,'2023-06-04 00:00:00.000','2023-06-05 00:00:00.000',2,'2023-06-15 13:19:50.661','2023-06-15 13:19:50.661','2024-06-14 13:19:53.531',1),
(2,'{"turnover_click": {"desc": "Count of object in clickhouse for objects", "value": 104, "pgType": "Numeric"}, "records_count_click": {"desc": "Count of object in clickhouse for objects", "value": 1004, "pgType": "integer"}}','null',true,'2023-06-05 00:00:00.000','2023-06-06 00:00:00.000',2,'2023-06-15 13:19:50.661','2023-06-15 13:19:50.661','2024-06-14 13:19:53.531',1),
(2,'{"turnover_click": {"desc": "Count of object in clickhouse for objects", "value": 105, "pgType": "Numeric"}, "records_count_click": {"desc": "Count of object in clickhouse for objects", "value": 1005, "pgType": "integer"}}','null',true,'2023-06-06 00:00:00.000','2023-06-07 00:00:00.000',2,'2023-06-15 13:19:50.661','2023-06-15 13:19:50.661','2024-06-14 13:19:53.531',1),
(2,'{"turnover_click": {"desc": "Count of object in clickhouse for objects", "value": 106, "pgType": "Numeric"}, "records_count_click": {"desc": "Count of object in clickhouse for objects", "value": 1006, "pgType": "integer"}}','null',true,'2023-06-07 00:00:00.000','2023-06-08 00:00:00.000',2,'2023-06-15 13:19:50.661','2023-06-15 13:19:50.661','2024-06-14 13:19:53.531',1),
(2,'{"turnover_click": {"desc": "Count of object in clickhouse for objects", "value": 107, "pgType": "Numeric"}, "records_count_click": {"desc": "Count of object in clickhouse for objects", "value": 1007, "pgType": "integer"}}','null',true,'2023-06-08 00:00:00.000','2023-06-09 00:00:00.000',2,'2023-06-15 13:19:50.661','2023-06-15 13:19:50.661','2024-06-14 13:19:53.531',1),
(2,'{"turnover_click": {"desc": "Count of object in clickhouse for objects", "value": 108, "pgType": "Numeric"}, "records_count_click": {"desc": "Count of object in clickhouse for objects", "value": 9999, "pgType": "integer"}}','null',true,'2023-06-09 00:00:00.000','2023-06-10 00:00:00.000',2,'2023-06-15 13:19:50.661','2023-06-15 13:19:50.661','2024-06-14 13:19:53.531',1);
INSERT INTO marts_check.fact_asset_data (dim_asset_id,measure_fields,dim_fields,is_actual,date_from,date_to,dqp_engine_id,"_created_dttm","_updated_dttm","_delete_after_dttm",dqp_engine_try_number) VALUES
(3,'{"sales_taxes_included": {"desc": "Count of object in ADB for objects", "value": 87, "pgType": "Numeric"}}','null',true,'2023-05-31 00:00:00.000','2023-06-01 00:00:00.000',3,'2023-06-15 13:20:14.231','2023-06-15 13:20:14.231','2024-06-14 13:20:15.152',1),
(3,'{"sales_taxes_included": {"desc": "Count of object in ADB for objects", "value": 88, "pgType": "Numeric"}}','null',true,'2023-06-04 00:00:00.000','2023-06-05 00:00:00.000',3,'2023-06-15 13:20:14.231','2023-06-15 13:20:14.231','2024-06-14 13:20:15.154',1),
(3,'{"sales_taxes_included": {"desc": "Count of object in ADB for objects", "value": 89.19, "pgType": "Numeric"}}','null',true,'2023-06-09 00:00:00.000','2023-06-10 00:00:00.000',3,'2023-06-15 13:20:14.231','2023-06-15 13:20:14.231','2024-06-14 13:20:15.154',1),
(3,'{"sales_taxes_included": {"desc": "Count of object in ADB for objects", "value": 90, "pgType": "Numeric"}}','null',true,'2023-06-06 00:00:00.000','2023-06-07 00:00:00.000',3,'2023-06-15 13:20:14.231','2023-06-15 13:20:14.231','2024-06-14 13:20:15.154',1),
(3,'{"sales_taxes_included": {"desc": "Count of object in ADB for objects", "value": 91, "pgType": "Numeric"}}','null',true,'2023-06-13 00:00:00.000','2023-06-14 00:00:00.000',3,'2023-06-15 13:20:14.231','2023-06-15 13:20:14.231','2024-06-14 13:20:15.154',1),
(3,'{"sales_taxes_included": {"desc": "Count of object in ADB for objects", "value": 92, "pgType": "Numeric"}}','null',true,'2023-06-07 00:00:00.000','2023-06-08 00:00:00.000',3,'2023-06-15 13:20:14.231','2023-06-15 13:20:14.231','2024-06-14 13:20:15.154',1),
(3,'{"sales_taxes_included": {"desc": "Count of object in ADB for objects", "value": 93, "pgType": "Numeric"}}','null',true,'2023-06-01 00:00:00.000','2023-06-02 00:00:00.000',3,'2023-06-15 13:20:14.231','2023-06-15 13:20:14.231','2024-06-14 13:20:15.156',1),
(3,'{"sales_taxes_included": {"desc": "Count of object in ADB for objects", "value": 94, "pgType": "Numeric"}}','null',true,'2023-06-02 00:00:00.000','2023-06-03 00:00:00.000',3,'2023-06-15 13:20:14.231','2023-06-15 13:20:14.231','2024-06-14 13:20:15.156',1),
(3,'{"sales_taxes_included": {"desc": "Count of object in ADB for objects", "value": 95, "pgType": "Numeric"}}','null',true,'2023-06-11 00:00:00.000','2023-06-12 00:00:00.000',3,'2023-06-15 13:20:14.231','2023-06-15 13:20:14.231','2024-06-14 13:20:15.156',1),
(3,'{"sales_taxes_included": {"desc": "Count of object in ADB for objects", "value": 96, "pgType": "Numeric"}}','null',true,'2023-06-05 00:00:00.000','2023-06-06 00:00:00.000',3,'2023-06-15 13:20:14.231','2023-06-15 13:20:14.231','2024-06-14 13:20:15.156',1);
/*Сравнение asset 'afff2a23-22dc-46ae-bc7a-d9048fe2062c', '5a216828-668c-407a-9a80-835242af5800', '01dee8a2-d0f8-4dbc-b34b-1c2c777c5020'*/
SELECT
fad.date_from AS date_actual,
max((fad.measure_fields->'turnover_gp'->>'value')::numeric(18,2)) filter(WHERE da."key" = 'afff2a23-22dc-46ae-bc7a-d9048fe2062c') AS turnover_gp,
max((fad.measure_fields->'turnover_click'->>'value')::numeric(18,2)) filter(WHERE da."key" = '5a216828-668c-407a-9a80-835242af5800') AS turnover_click,
max((fad.measure_fields->'sales_taxes_included'->>'value')::numeric(18,2)) filter(WHERE da."key" = '01dee8a2-d0f8-4dbc-b34b-1c2c777c5020') AS sales_taxes_included
FROM
marts_check.dim_asset da
INNER JOIN marts_check.fact_asset_data fad
ON da.id = fad.dim_asset_id
WHERE
da."key" IN ('afff2a23-22dc-46ae-bc7a-d9048fe2062c', '5a216828-668c-407a-9a80-835242af5800', '01dee8a2-d0f8-4dbc-b34b-1c2c777c5020')
AND fad.is_actual
GROUP BY date_actual
--=========================================================================================
CREATE TABLE test_table(
opened_date DATE NOT NULL,
barcode VARCHAR(100) NOT NULL,
created_dttm TIMESTAMP NOT NULL DEFAULT CLOCK_TIMESTAMP()
)
WITH (
appendonly=true,
compresslevel=1,
orientation=column,
compresstype=zstd
)
DISTRIBUTED BY (barcode)
PARTITION BY RANGE (opened_date) (DEFAULT PARTITION other);
--=========================================================================================
CREATE TABLE test_write (
id UInt64,
host_name String,
even_type String,
event_count Int64
) ENGINE = MergeTree()
ORDER BY (id);
SELECT count(*)
FROM default.test_write
--=========================================================================================
/*DROP TABLE queue;*/
CREATE TABLE queue (
host_name String,
host_role String,
id UInt64,
host_time String
) ENGINE = Kafka('kafka:29092', 'worker_live_status_clickhouse', 'group', 'JSONEachRow');
/*DROP TABLE stat_queue; */
CREATE TABLE stat_queue (
host_time String,
host_name String,
host_role String,
id UInt64
) ENGINE = MergeTree()
ORDER BY (id);
CREATE MATERIALIZED VIEW host_status TO stat_queue
AS SELECT host_time, host_name, host_role, id
FROM queue;
SELECT count(*)
FROM default.stat_queue
--=========================================================================================
SELECT
b.id as blog_id,
age_in_years,
date(
created_on + (age_in_years + 1) * interval '1 year'
) AS next_anniversary,
date(
created_on + (age_in_years + 1) * interval '1 year'
) - date(now()) AS days_to_next_anniversary
FROM blog b
CROSS JOIN LATERAL (
SELECT
cast(
extract(YEAR FROM age(now(), b.created_on)) AS int
) AS age_in_years
) AS t
ORDER BY blog_id
--=========================================================================================
SELECT
b.id as blog_id,
extract(
YEAR FROM age(now(), b.created_on)
) AS age_in_years,
date(
created_on + (
extract(YEAR FROM age(now(), b.created_on)) + 1
) * interval '1 year'
) AS next_anniversary,
date(
created_on + (
extract(YEAR FROM age(now(), b.created_on)) + 1
) * interval '1 year'
) - date(now()) AS days_to_next_anniversary
FROM blog b
ORDER BY blog_id
--=========================================================================================
SELECT
(NULL, NULL) IS NULL as "(NULL, NULL) IS NULL", --true
(NULL, NULL) IS NOT NULL as "(NULL, NULL) IS NOT NULL", --false
NOT (NULL, NULL) IS NULL as "NOT (NULL, NULL) IS NULL", --false
(1, NULL) IS NULL as "(1, NULL) IS NULL", --false
(1, NULL) IS NOT NULL as "(1, NULL) IS NOT NULL", --false --!!!
NOT (1, NULL) IS NULL as "NOT (1, NULL) IS NULL" --true --!!!
--=========================================================================================
--=========================================================================================
--=========================================================================================
--=========================================================================================
--=========================================================================================
--=========================================================================================
--=========================================================================================

Физическая репликация

Создаем сеть, запоминаем адрес

docker network create pgnet
docker network inspect pgnet | grep Subnet # Запомнить маску сети

Поднимаем мастер

docker run -dit -v "$PWD/volumes/pgmaster/:/var/lib/postgresql/data" -e POSTGRES_PASSWORD=pass -p "5432:5432" --restart=unless-stopped --network=pgnet --name=pgmaster postgres

Меняем postgresql.conf на мастере

ssl = off
wal_level = replica
max_wal_senders = 4 # expected slave num

Подключаемся к мастеру и создаем пользователя для репликации

docker exec -it pgmaster su - postgres -c psql
create role replicator with login replication password 'pass';
exit

Добавляем запись в pgmaster/pg_hba.conf с subnet с первого шага

host    replication     replicator       __SUBNET__          md5

Перезапустим мастер

docker restart pgmaster

Сделаем бэкап для реплик

docker exec -it pgmaster bash
mkdir /pgslave
pg_basebackup -h pgmaster -D /pgslave -U replicator -v -P --wal-method=stream
exit

Копируем директорию себе

docker cp pgmaster:/pgslave volumes/pgslave/

Создадим файл, чтобы реплика узнала, что она реплика

touch volumes/pgslave/standby.signal

Меняем postgresql.conf на реплике pgslave

primary_conninfo = 'host=pgmaster port=5432 user=replicator password=pass application_name=pgslave'

Запускаем реплику pgslave

docker run -dit -v "$PWD/volumes/pgslave/:/var/lib/postgresql/data" -e POSTGRES_PASSWORD=pass -p "15432:5432" --network=pgnet --restart=unless-stopped --name=pgslave postgres

Запустим вторую реплику pgasyncslave

Скопируем бэкап

docker cp pgmaster:/pgslave volumes/pgasyncslave/

Изменим настройки pgasyncslave/postgresql.conf

primary_conninfo = 'host=pgmaster port=5432 user=replicator password=pass application_name=pgasyncslave'

Дадим знать что это реплика

touch volumes/pgasyncslave/standby.signal
	```

Запустим реплику `pgasyncslave`
```shell
docker run -dit -v "$PWD/volumes/pgasyncslave/:/var/lib/postgresql/data" -e POSTGRES_PASSWORD=pass -p "25432:5432" --network=pgnet --restart=unless-stopped --name=pgasyncslave postgres
	```

Убеждаемся что обе реплики работают в асинхронном режиме на `pgmaster`
```shell
docker exec -it pgmaster su - postgres -c psql
select application_name, sync_state from pg_stat_replication;
exit;
    ```

Включаем синхронную репликацию на `pgmaster`

Меняем файл `pgmaster/postgresql.conf`
```conf
synchronous_commit = on
synchronous_standby_names = 'FIRST 1 (pgslave, pgasyncslave)'

Перечитываем конфиг

docker exec -it pgmaster su - postgres -c psql
select pg_reload_conf();
exit;

Убеждаемся, что реплика стала синхронной

docker exec -it pgmaster su - postgres -c psql
select application_name, sync_state from pg_stat_replication;
exit;

Создадим тестовую таблицу на pgmaster и проверим репликацию

docker exec -it pgmaster su - postgres -c psql
create table test(id bigint primary key not null);
insert into test(id) values(1);
select * from test;
exit;

Проверим наличие данных на pgslave

docker exec -it pgslave su - postgres -c psql
select * from test;
exit;

Проверим наличие данных на pgasyncslave

docker exec -it pgasyncslave su - postgres -c psql
select * from test;
exit;

Попробуем сделать insert на pgslave

docker exec -it pgslave su - postgres -c psql
insert into test(id) values(2);
exit;
    ```

Укладываем репилку `pgasyncslave` и проверяем работу `pgmaster` и `pgslave`
```shell
docker stop pgasyncslave
docker exec -it pgmaster su - postgres -c psql
select application_name, sync_state from pg_stat_replication;
insert into test(id) values(2);
select * from test;
exit;
docker exec -it pgslave su - postgres -c psql
select * from test;
exit;
    ```

Укладываем репилку `pgslave` и проверяем работу `pgmaster`, а потом возвращаем реплику `pgslave`

terminal 1
```shell
docker stop pgslave
docker exec -it pgmaster su - postgres -c psql
select application_name, sync_state from pg_stat_replication;
insert into test(id) values(3);
exit;

terminal 2

docker start pgslave

Возвращаем вторую реплику pgasyncslave

docker start pgasyncslave

Убиваем мастер pgmaster

docker stop pgmaster

Запромоутим реплику pgslave

docker exec -it pgslave su - postgres -c psql
select pg_promote();
exit;

Пробуем записать в новый мастер pgslave

docker exec -it pgslave su - postgres -c psql
insert into test(id) values(4);
exit;

Настраиваем репликацию на pgslave (pgslave/postgresql.conf)

изменяем конфиг

synchronous_commit = on
synchronous_standby_names = 'ANY 1 (pgmaster, pgasyncslave)'

перечитываем конфиг

docker exec -it pgslave su - postgres -c psql
select pg_reload_conf();
exit;

Подключим вторую реплику pgasyncslave к новому мастеру pgslave

изменяем конфиг pgasyncslave/postgresql.conf

primary_conninfo = 'host=pgslave port=5432 user=replicator password=pass application_name=pgasyncslave'

перечитываем конфиг

docker exec -it pgasyncslave su - postgres -c psql
select pg_reload_conf();
exit;

Проверяем что к новому мастеру pgslave подключена реплика и она работает

docker exec -it pgslave su - postgres -c psql
select application_name, sync_state from pg_stat_replication;
insert into test(id) values (5)
select * from test;
exit;
docker exec -it pgasyncslave su - postgres -c psql
select * from test;
exit;

Восстановим старый мастер pgmaster как реплику

Помечаем как реплику

touch volumes/pgmaster/standby.signal

Изменяем конфиг pgmaster/postgresql.conf

primary_conninfo = 'host=pgslave port=5432 user=replicator password=pass application_name=pgmaster'

Запустим pgmaster

docker start pgmaster

Убедимся что pgmaster подключился как реплика к pgslave

docker exec -it pgslave su - postgres -c psql
select application_name, sync_state from pg_stat_replication;
exit;

Логическая репликация

Меняем wal_level для текущего мастера pgslave

Изменяем настройки pgslave/postgresql.conf

wal_level = logical

Перезапускаем pgslave

docker restart pgslave

Создадим публикацию в pgslave

docker exec -it pgslave su - postgres -c psql
GRANT CONNECT ON DATABASE postgres TO replicator;
GRANT SELECT ON ALL TABLES IN SCHEMA public TO replicator;
create publication pg_pub for table test;
exit;

Создадим новый сервер pgstandalone для логической репликации

docker run -dit -v "$PWD/volumes/pgstandalone/:/var/lib/postgresql/data" -e POSTGRES_PASSWORD=pass -p "35432:5432" --restart=unless-stopped --network=pgnet --name=pgstandalone postgres

Копируем файлы c pgslave в pgstandalone и восстанавливаем

docker exec -it pgslave su - postgres
pg_dumpall -U postgres -r -h pgslave -f /var/lib/postgresql/roles.dmp
pg_dump -U postgres -Fc -h pgslave -f /var/lib/postgresql/schema.dmp -s postgres
exit;

docker cp pgslave:/var/lib/postgresql/roles.dmp .
docker cp roles.dmp pgstandalone:/var/lib/postgresql/roles.dmp
docker cp pgslave:/var/lib/postgresql/schema.dmp .
docker cp schema.dmp pgstandalone:/var/lib/postgresql/schema.dmp

docker exec -it pgstandalone su - postgres
psql -f roles.dmp
pg_restore -d postgres -C schema.dmp
exit

Создаем подписку на pgstandalone

docker exec -it pgstandalone su - postgres -c psql
CREATE SUBSCRIPTION pg_sub CONNECTION 'host=pgslave port=5432 user=replicator password=pass dbname=postgres' PUBLICATION pg_pub;
exit;

Убеждаемся что репликация запущена

docker exec -it pgstandalone su - postgres -c psql
select * from test;
exit;

Сделаем конфликт в данных

Вставляем данные в подписчике pgstandalone

docker exec -it pgstandalone su - postgres -c psql
insert into test values(9);
exit;

Вставляем данные в паблишере pgslave

docker exec -it pgslave su - postgres -c psql
insert into test values(9);
insert into test values(10);
exit;

Убеждаемся что записи с id 10 не появилось на pgstandalone

docker exec -it pgstandalone su - postgres -c psql
select * from test;
exit;

Посмотрим в логи pgstandalone и убедимся что у нас произошел разрыв репликации

docker logs pgstandalone

2023-03-27 16:15:02.753 UTC [258] ERROR:  duplicate key value violates unique constraint "test_pkey"
2023-03-27 16:15:02.753 UTC [258] DETAIL:  Key (id)=(9) already exists.
2023-03-28 18:30:42.893 UTC [108] CONTEXT:  processing remote data for replication origin "pg_16395" during message type "INSERT" for replication target relation "public.test" in transaction 739, finished at 0/3026450

Исправляем конфликт

docker exec -it pgstandalone su - postgres -c psql
SELECT pg_replication_origin_advance('pg_16395', '0/3026451'::pg_lsn); # message from log + 1
ALTER SUBSCRIPTION pg_sub ENABLE;
select * from test;
exit;
--==========================================================================================
SELECT e.department_id as id, count(*) as counter
FROM employee e
GROUP BY e.department_id
ORDER BY count(*) DESC
--==========================================================================================
SELECT e.department_id as id, count(*) as counter
FROM employee e
GROUP BY id
ORDER BY counter DESC
--==========================================================================================
SELECT e.department_id as dep, count(*) as counter
FROM employee e
GROUP BY 1
ORDER BY 2 DESC
--==========================================================================================
SELECT e.department_id as dep, count(e.id) as counter
FROM employee e
GROUP BY 1
ORDER BY 2 DESC
--==========================================================================================
SELECT e.department_id as dep, count(e.id) as counter
FROM employee e
GROUP BY 1
HAVING count(e.id) > 3
ORDER BY 2 DESC
--==========================================================================================
SELECT e.department_id as dep, count(e.id) as counter
FROM employee e
WHERE e.department_id >= 10
GROUP BY 1
HAVING count(e.id) > 0
ORDER BY 2 DESC
--==========================================================================================
SELECT e.department_id as dep, MIN(e.id) as min, max(e.id) as max, COUNT(e.id) as count, SUM(e.id) as sum, AVG(e.id) as counter
FROM employee e
WHERE e.id >= 1
GROUP BY 1
--GROUP BY DISTINCT ROLLUP (1)
--GROUP BY GROUPING SETS ((1), ())
--GROUP BY CUBE (1)
--GROUP BY 1, CUBE ((1))
--GROUP BY 1, GROUPING SETS ((1), ())
--GROUP BY 1, GROUPING SETS ((1))
HAVING count(e.id) > 0
ORDER BY 2 DESC
--==========================================================================================
SELECT d.name, MAX(e.salary) as salary
FROM department d JOIN employee e
ON d.id = e.department_id
GROUP BY d.id;
--==========================================================================================
SELECT d.name, MAX(e.salary) as salary
FROM department d, employee e
WHERE d.id = e.department_id
GROUP BY d.id;
--==========================================================================================
SELECT DISTINCT d.name, e.salary
FROM employee e, department d
WHERE e.department_id = d.id
AND e.salary IN (SELECT max(e.salary) FROM employee e GROUP BY e.department_id)
--==========================================================================================
SELECT DISTINCT d.name, e.salary
FROM employee e, department d
WHERE e.department_id = d.id AND e.salary = ANY (SELECT max(e.salary) FROM employee e GROUP BY e.department_id)
--==========================================================================================
SELECT d2.name, d2.salary FROM (
SELECT d.name, MAX(e.salary) as salary
FROM department d JOIN employee e
ON d.id = e.department_id
GROUP BY d.id
) as d2;
--==========================================================================================
SELECT temp.* FROM (
SELECT d.id, d.name, MAX(e.salary) as salary
FROM department d JOIN employee e
ON d.id = e.department_id
GROUP BY 1
ORDER BY 1
) temp
--==========================================================================================
SELECT d.name, e.salary
FROM department d JOIN (
SELECT e.department_id as id, MAX(e.salary) as salary FROM employee e GROUP BY 1
) e
ON d.id = e.id;
--==========================================================================================
SELECT e2.name, e.salary
FROM employee e JOIN (
SELECT e.department_id, max(d.name) as name, max(e.salary) as max_salary FROM employee e, department d WHERE e.department_id = d.id GROUP BY 1
) e2
ON e.department_id = e2.department_id AND e.salary = e2.max_salary;
--==========================================================================================
SELECT e2.name, e.salary
FROM employee e JOIN (
SELECT d.id, d.name, max(e.salary) as max_salary
FROM employee e JOIN department d
ON e.department_id = d.id
GROUP BY 1
) e2
ON e.department_id = e2.id AND e.salary = e2.max_salary;
--==========================================================================================
SELECT e2.name, e.salary
FROM employee e, (
SELECT e.department_id, max(d.name) as name, max(e.salary) as max_salary
FROM employee e, department d
WHERE e.department_id = d.id
GROUP BY 1
) e2
WHERE e.department_id = e2.department_id AND e.salary = e2.max_salary;
--==========================================================================================
WITH d2 as (
SELECT d.name as name, MAX(e.salary) as salary
FROM department d JOIN employee e
ON d.ID = e.department_id
GROUP BY d.id
)
SELECT name, salary FROM d2 ORDER BY 1;
--==========================================================================================
SELECT DISTINCT d.name, e.salary
FROM department d join employee e
ON d.id = e.department_id
WHERE (e.department_id, e.salary) IN (SELECT e.department_id, max(e.salary) FROM employee as e GROUP by 1);
--==========================================================================================
SELECT DISTINCT d.name, e.salary
FROM department d JOIN employee e
ON d.id = e.department_id AND (e.department_id, e.salary) IN (SELECT e.department_id, max(e.salary) FROM employee as e GROUP by 1);
--==========================================================================================
SELECT DISTINCT d.name, e.salary
FROM employee e, department d
WHERE e.department_id = d.id
AND (d.id, e.salary) = ANY (SELECT e.department_id as id, max(e.salary) as salary FROM employee as e GROUP by 1);
--==========================================================================================
SELECT DISTINCT d.name, e.salary
FROM department d JOIN (
SELECT name, salary, department_id, rank() OVER (PARTITION BY department_id ORDER BY salary DESC) AS rnk
FROM employee e
) e
ON e.department_id = d.id AND rnk = 1;
--==========================================================================================
SELECT DISTINCT d.name, e.salary
FROM department d JOIN (
SELECT name, salary, department_id, rank() OVER department_rank AS rnk
FROM employee e
WINDOW department_rank AS (PARTITION BY department_id ORDER BY salary DESC)
) e
ON e.department_id = d.id AND rnk = 1
--==========================================================================================
SELECT d.name, e.salary
FROM department d, (
SELECT e.department_id as id, MAX(e.salary) as salary
FROM employee e
GROUP BY 1
) e
WHERE d.id = e.id;
--==========================================================================================
SELECT word, count(1) as count FROM
(SELECT explode(split(line,"\s")) AS word FROM DOCS) w
GROUP BY word
ORDER BY word;
--==========================================================================================
select max_conn,used,res_for_super,max_conn-used-res_for_super res_for_normal
from
(select count(*) used from pg_stat_activity) t1,
(select setting::int res_for_super from pg_settings where name=$$superuser_reserved_connections$$) t2,
(select setting::int max_conn from pg_settings where name=$$max_connections$$) t3
--==========================================================================================
--==========================================================================================
SELECT product_id, p.name, (sum(s.units) * p.price) AS sales
FROM products p LEFT JOIN sales s USING (product_id)
GROUP BY product_id, p.name, p.price;
--==========================================================================================
SELECT product_id, p.name, (sum(s.units) * (p.price - p.cost)) AS profit
FROM products p LEFT JOIN sales s USING (product_id)
WHERE s.date > CURRENT_DATE - INTERVAL '4 weeks'
GROUP BY product_id, p.name, p.price, p.cost
HAVING sum(p.price * s.units) > 5000;
--==========================================================================================
SELECT brand, size, sum(sales) FROM items_sold GROUP BY GROUPING SETS ((brand), (size), ());
--==========================================================================================
SELECT uid
FROM subscribes
GROUP BY uid
HAVING COUNT(*) > 2
AND max( CASE "subscription_type" WHEN 'type1' THEN 1 ELSE 0 END ) = 0
--==========================================================================================
SELECT Дата_продажи
FROM Продажи
GROUP BY Дата_продажи
HAVING COUNT(DISTINCT Менеджер_ID) = (SELECT COUNT(DISTINCT Менеджер_ID) FROM Продажи);
--==========================================================================================
SELECT Email FROM Person GROUP BY Email HAVING COUNT(Email) > 1
--==========================================================================================
SELECT DISTINCT a.Email FROM Person a JOIN Person b ON a.Email = b. Email WHERE a.Id != b.Id
--==========================================================================================
SELECT DISTINCT p1.Email FROM Person p1 WHERE EXISTS( SELECT * FROM Person p2 WHERE p2.Email = p1.Email AND p2.Id != p1.Id )
--==========================================================================================
SELECT table_name FROM information_schema.tables WHERE table_type = 'base table' AND table_schema='test';
--==========================================================================================
SELECT
e.first_name, d.department_name, e.salary
FROM
employees e
JOIN
departments d
ON
(e.department_id = d.department_id)
WHERE
e.first_name
IN
(SELECT TOP 2
first_name
FROM
employees
WHERE
department_id = d.department_id);
--==========================================================================================
-- postgresql sample sql
create view v2 as
SELECT distributors.name
FROM distributors
WHERE distributors.name LIKE 'W%'
UNION
SELECT actors.name
FROM actors
WHERE actors.name LIKE 'W%';
WITH t AS (
SELECT random() as x FROM generate_series(1, 3)
)
SELECT * FROM t
UNION ALL
SELECT * FROM t
;
create view v3 as
WITH RECURSIVE employee_recursive(distance, employee_name, manager_name) AS (
SELECT 1, employee_name, manager_name
FROM employee
WHERE manager_name = 'Mary'
UNION ALL
SELECT er.distance + 1, e.employee_name, e.manager_name
FROM employee_recursive er, employee e
WHERE er.employee_name = e.manager_name
)
SELECT distance, employee_name FROM employee_recursive;
WITH upd AS (
UPDATE employees SET sales_count = sales_count + 1 WHERE id =
(SELECT sales_person FROM accounts WHERE name = 'Acme Corporation')
RETURNING *
)
INSERT INTO employees_log SELECT *, current_timestamp FROM upd;
/* not implemented
CREATE RECURSIVE VIEW nums_1_100 (n) AS
VALUES (1)
UNION ALL
SELECT n+1 FROM nums_1_100 WHERE n < 100;
*/
--==========================================================================================
insert into emp (id,first_name,last_name,city,postal_code,ph)
select a.id,a.first_name,a.last_name,a.city,a.postal_code,b.ph
from emp_addr a
inner join emp_ph b on a.id = b.id;
--==========================================================================================
Select distinct W.WORKER_ID, W.FIRST_NAME, W.Salary
from Worker W, Worker W1
where W.Salary = W1.Salary
and W.WORKER_ID != W1.WORKER_ID;
--==========================================================================================
Select max(Salary) from Worker
where Salary not in (Select max(Salary) from Worker);
--==========================================================================================
SELECT * INTO newTable
FROM EmployeeDetails
WHERE 1 = 0;
--==========================================================================================
SELECT * FROM table1
SELECT COUNT(*) FROM table1
SELECT rows FROM sysindexes WHERE id = OBJECT_ID(table1) AND indid < 2
--==========================================================================================
UPDATE table SET emp_name = CASE WHEN emp_name = 'chuck' THEN 'charles' ELSE emp_name END WHERE 'chuck' IN (emp_name);
--==========================================================================================
create function reverse(IN instring VARCHAR(20))
RETURNS VARCHAR(20)
LANGUAGE SQL
DETERMINISTIC
BEGIN
if char_length(instring) in (0, 1)
then return (instring)
else return (reverse(substring(instring from (char_length(instring) / 2 + 1))
|| reverse(substring(instring from 1 for char_length(instring)/ 2))));
end if;
end;
--==========================================================================================
select * from department d, employee e;
select * from department d, employee e where 1=1;
select * from department d join employee e on 1=1;
--==========================================================================================
create table Personnel(
emp_nbr integer default 0 not null primary key,
emp_name varchar(10) default '{{vacant}}' not null,
emp_address varchar(35) not null,
birtdh_date date not null
);
create table OrgChart(
job_title varchar(30) not null primary key,
emp_nbr integer default 0 not null references Personnel(emp_nbr) on delete set default on update cascade,
boss_emp_nbr integer references Personnel(emp_nbr),
salary_amt decimal(12,4) not null check(salary_amt >= 0.00)
);
create function treetest() returns char(6)
language sql
deterministic
begin atomic
insert into temptree select emp_nbr, boss_emp_nbr from OrgChart;
while(select count(*) from temptree) - 1 = (select count(boss_emp_nbr) from TempTree)
do delete from temptree
where temptree.emp_name not in (select t2.boss_emp_nbr from temptree as t2 where t2.boss_emp_nbr is not null);
if not exists (select * from temptree)
then return ('tree');
else return ('cycles');
end if;
end while;
end;
create assertion validtree
check (
(select count(*) from Tree) = (select count(*) from (select parent_node from Tree) union (select child_node from Tree))
);
create view v1(emp_nbr, emp_name, boss_emp_nbr, boss_emp_name)
as
select e1.emp_nbr, e1.emp_name, e1.boss_emp_nbr, b1.emp_name from Personnel E1, Personnel B1, OrgChart P1
where b1.emp_nbr = p1.boss_emp_nbr and e1.emp_nbr = p1.emp_nbr;
select distinct boss_emp_nbr from OrgChart where boss_emp_nbr not in (select emp_nbr from OrgChart);
--==========================================================================================
create or replace function should_increase_salary(
cur_salary numeric,
max_salary numeric DEFAULT 80,
min_salary numeric DEFAULT 30,
increase_rate numeric DEFAULT 0.2
) returns bool AS $$
declare
new_salary numeric;
begin
if cur_salary >= max_salary or cur_salary >= min_salary then
return false;
end if;
if cur_salary < min_salary then
new_salary = cur_salary + (cur_salary * increase_rate);
end if;
if new_salary > max_salary then
return false;
else
return true;
end if;
end;
$$ language plpgsql;
create or replace function get_season(month_number int) returns text AS $$
declare
season text;
begin
if month_number NOT BETWEEN 1 and 12 THEN
RAISE EXCEPTION 'Invalid month. You passed:(%)', month_number USING HINT='Allowed from 1 up to 12', ERRCODE=12882;
end if;
if month_number BETWEEN 3 and 5 then
season = 'Spring';
elsif month_number BETWEEN 6 and 8 then
season = 'Summer';
elsif month_number BETWEEN 9 and 11 then
season = 'Autumn';
else
season = 'Winter';
end if;
return season;
end;
$$ language plpgsql;
create or replace function get_season_caller1(month_number int) returns text AS $$
declare
err_ctx text;
err_msg text;
err_details text;
err_code text;
BEGIN
return get_season(15);
EXCEPTION
WHEN SQLSTATE '12882' then
GET STACKED DIAGNOSTICS err_ctx = PG_EXCEPTION_CONTEXT,
err_msg = MESSAGE_TEXT,
err_details = PG_EXCEPTION_DETAIL,
err_code = RETURNED_SQLSTATE;
RAISE INFO 'My custom handler:';
RAISE INFO 'Error msg:%', err_msg;
RAISE INFO 'Error details:%', err_details;
RAISE INFO 'Error code:%', err_code;
RAISE INFO 'Error context:%', err_ctx;
RETURN NULL;
END;
$$ language plpgsql;
create or replace function get_season_caller2(month_number int) returns text AS $$
declare
err_ctx text;
text_var1 text;
text_var2 text;
text_var3 text;
BEGIN
return get_season(15);
EXCEPTION
--when others then
WHEN SQLSTATE '12882' then
--won't catch by another code
RAISE INFO 'My custom handler:';
RAISE INFO 'Error Name:%',SQLERRM;
RAISE INFO 'Error State:%', SQLSTATE;
RETURN NULL;
END;
$$ language plpgsql;
--==========================================================================================
select *
into tmp_customers
from department;
select *
from tmp_customers
create or replace function fix_customer_region() returns void AS $$
update tmp_customers
set region = 'unknown'
where region is null
$$ language sql
--show functions section in pgAdmin
--then demonstrate
select fix_customer_region()
--hw
select *
into tmp_order
from employee;
create or replace function fix_orders_ship_region() returns void AS $$
update tmp_order
set ship_region = 'unknown'
where ship_region is null
$$ language sql
select fix_orders_ship_region()
--==========================================================================================
create or replace function get_total_number_of_goods() returns bigint AS $$
select sum(units_in_stock)
from products
$$ language sql;
create or replace function get_total_number_of_goods() returns real AS $$
select avg(unit_price)
from products
$$ language sql;
select get_total_number_of_goods() as total_goods --as в самой функции будет проигнорирован
--hw
create or replace function get_max_price_from_discontinued() returns real AS $$
select max(unit_price)
from products
where discontinued = 1
$$ language sql;
select get_max_price_from_discontinued()
--Unless the function is declared to return void,
--the last statement must be a SELECT, or an INSERT, UPDATE, or DELETE that has a RETURNING clause.
--*DO*--
--DO executes an anonymous code block, or in other words a transient anonymous function in a procedural language.
--The code block is treated as though it were the body of a function with no parameters, returning void. It is parsed and executed a single time.
DO $$
BEGIN
select sum(units_in_stock)
from products
END$$;
--==========================================================================================
drop function if exists get_price_boundaries;
create or replace function get_price_boundaries(out max_price real, out min_price real) AS $$
SELECT MAX(unit_price), MIN(unit_price)
FROM products
$$ language sql;
select get_price_boundaries()
--
drop function if exists get_price_boundaries_by_discontinuity;
create or replace function get_price_boundaries_by_discontinuity(in is_discontinued int, out max_price real, out min_price real) AS $$
SELECT MAX(unit_price), MIN(unit_price)
FROM products
where discontinued = is_discontinued
$$ language sql;
select get_price_boundaries_by_discontinuity(1)
--HW
drop function if exists get_freight_boundaries_by_shipped_dates;
create or replace function get_freight_boundaries_by_shipped_dates(
start_date date, end_date date, out max_price real, out min_price real
) AS $$
SELECT MAX(freight), MIN(freight)
FROM orders
where shipped_date BETWEEN start_date and end_date
$$ language sql;
select get_freight_boundaries_by_shipped_dates('1997-06-01', '1997-06-12')
select *
from orders
--==========================================================================================
drop function if exists get_price_boundaries_by_discontinuity;
create or replace function get_price_boundaries_by_discontinuity
(in is_discontinued int DEFAULT 1, out max_price real, out min_price real) AS $$
SELECT MAX(unit_price), MIN(unit_price)
FROM products
where discontinued = is_discontinued
$$ language sql;
select get_price_boundaries_by_discontinuity(1);
select get_price_boundaries_by_discontinuity(); --with default
--hw
drop function if exists get_freight_boundaries_by_shipped_dates;
create or replace function get_freight_boundaries_by_shipped_dates(
start_date date DEFAULT '1997-06-01', end_date date DEFAULT '1997-06-12', out max_price real, out min_price real
) AS $$
SELECT MAX(freight), MIN(freight)
FROM orders
where shipped_date BETWEEN start_date and end_date
$$ language sql;
select get_freight_boundaries_by_shipped_dates('1997-06-01', '1997-06-12');
select get_freight_boundaries_by_shipped_dates();
--==========================================================================================
--*How to return a set of primitive type values*--
drop function if exists get_average_prices_by_product_categories;
create or replace function get_average_prices_by_product_categories()
returns setof double precision as $$
select AVG(unit_price)
from products
group by category_id
$$ language sql;
select * from get_average_prices_by_product_categories()
--to name the resulting column use 'as'
select * from get_average_prices_by_product_categories() as average_prices
--*How to return a set of columns*--
--*With OUT parameters*--
drop function if exists get_average_prices_by_product_categories;
create or replace function get_average_prices_by_product_categories(out sum_price real, out avg_price float8)
returns setof record as $$
select SUM(unit_price), AVG(unit_price)
from products
group by category_id;
$$ language sql;
select sum_price from get_average_prices_by_product_categories();
select sum_price, avg_price from get_average_prices_by_product_categories();
--won't work
select sum_of, in_avg from get_average_prices_by_product_categories();
--will work
select sum_price as sum_of, avg_price as in_avg
from get_average_prices_by_product_categories();
--*How to return a set of columns*--
--*WithOUT OUT parameters*--
drop function if exists get_average_prices_by_product_categories;
create or replace function get_average_prices_by_product_categories()
returns setof record as $$
select SUM(unit_price), AVG(unit_price)
from products
group by category_id;
$$ language sql;
--won't work in all 4 syntax options
select sum_price from get_average_prices_by_product_categories();
select sum_price, avg_price from get_average_prices_by_product_categories();
select sum_of, in_avg from get_average_prices_by_product_categories();
select * from get_average_prices_by_product_categories();
--works only this
select * from get_average_prices_by_product_categories() as (sum_price real, avg_price float8);
--returns table
drop function if exists get_customers_by_country;
create or replace function get_customers_by_country(customer_country varchar)
returns table(char_code char, company_name varchar) as $$
select customer_id, company_name
from customers
where country = customer_country
$$ language sql;
--правила селекта все те же что и при returns setof
select * from get_customers_by_country('USA');
select company_name from get_customers_by_country('USA');
select char_code, company_name from get_customers_by_country('USA');
--setof table
drop function if exists get_customers_by_country;
create or replace function get_customers_by_country(customer_country varchar)
returns setof customers as $$
-- won't work: select company_name, contact_name
select *
from customers
where country = customer_country
$$ language sql;
select * from get_customers_by_country('USA');
-- получим просто простыню текста: select get_customers_by_country('USA');
select contact_name, city
from get_customers_by_country('USA');
--hw
drop function if exists sold_more_than;
create or replace function sold_more_than(min_sold_boundary int)
returns setof products as $$
select * from products
where product_id IN (
select product_id from
(select sum(quantity), product_id
from order_details
group by product_id
having sum(quantity) >min_sold_boundary
) as filtered_out
)
$$ language sql;
select sold_more_than(100)
--==========================================================================================
--*RETURN in plpgsql*--
CREATE OR REPLACE FUNCTION get_total_number_of_goods() RETURNS bigint AS $$
BEGIN
RETURN sum(units_in_stock)
FROM products;
END;
$$ LANGUAGE plpgsql;
SELECT get_total_number_of_goods();
CREATE OR REPLACE FUNCTION get_max_price_from_discontinued() RETURNS real AS $$
BEGIN
RETURN max(unit_price)
FROM products
WHERE discontinued = 1;
END;
$$ LANGUAGE plpgsql;
SELECT get_max_price_from_discontinued();
CREATE OR REPLACE FUNCTION get_price_boundaries(OUT max_price real, OUT min_price real) AS $$
BEGIN
--max_price := MAX(unit_price) FROM products;
--min_price := MIN(unit_price) FROM products;
SELECT MAX(unit_price), MIN(unit_price)
INTO max_price, min_price
FROM products;
END;
$$ LANGUAGE plpgsql;
SELECT * FROM get_price_boundaries();
CREATE OR REPLACE FUNCTION get_sum(x int, y int, out result int) AS $$
BEGIN
result = x + y;
RETURN;
END;
$$ LANGUAGE plpgsql;
SELECT * FROM get_sum(2, 3);
DROP FUNCTION IF EXISTS get_customers_by_country;
CREATE FUNCTION get_customers_by_country(customer_country varchar) RETURNS SETOF customers AS $$
BEGIN
RETURN QUERY
SELECT *
FROM customers
WHERE country = customer_country;
END;
$$ LANGUAGE plpgsql;
SELECT * FROM get_customers_by_country('USA');
--* Declaring variables*--:
drop function if exists get_square;
create or replace function get_square(ab real, bc real, ac real) returns real AS $$
declare
perimeter real;
begin
perimeter:=(ab+bc+ac)/2;
return sqrt(perimeter * (perimeter - ab) * (perimeter - bc) * (perimeter - ac));
end;
$$ language plpgsql;
select get_square(6, 6, 6)
--*Final example here*--
CREATE OR REPLACE FUNCTION middle_priced()
RETURNS SETOF products AS $$
DECLARE
average_price real;
bottom_price real;
top_price real;
BEGIN
SELECT AVG(unit_price) INTO average_price
FROM products;
bottom_price := average_price * .75;
top_price := average_price * 1.25;
RETURN QUERY SELECT * FROM products
WHERE unit_price between bottom_price AND top_price;
END;
$$ LANGUAGE plpgsql;
--==========================================================================================
--*IF-THEN-ELSE*--
drop function if exists convert_temp_to;
create or replace function convert_temp_to(temperature real, to_celsius bool DEFAULT true) returns real AS $$
declare
result_temp real;
begin
if to_celsius then
result_temp = (5.0/9.0)*(temperature-32);
else
result_temp:=(9*temperature+(32*5))/5.0;
end if;
return result_temp;
end;
$$ language plpgsql;
select convert_temp_to(80);
select convert_temp_to(26.7, false);
--*IF-ELSIF-ELSE*--
drop function if exists get_season;
create or replace function get_season(month_number int) returns text AS $$
declare
season text;
begin
if month_number BETWEEN 3 and 5 then
season = 'Spring';
elsif month_number BETWEEN 6 and 8 then
season = 'Summer';
elsif month_number BETWEEN 9 and 11 then
season = 'Autumn';
else
season = 'Winter';
end if;
return season;
end;
$$ language plpgsql;
select get_season(12)
--==========================================================================================
CREATE OR REPLACE FUNCTION fibonacci (n INTEGER)
RETURNS INTEGER AS $$
DECLARE
counter INTEGER := 0 ;
i INTEGER := 0 ;
j INTEGER := 1 ;
BEGIN
IF (n < 1) THEN
RETURN 0 ;
END IF;
WHILE counter <= n
LOOP
counter := counter + 1 ;
SELECT j, i + j INTO i, j;
END LOOP ;
RETURN i ;
END ;
-- rewritten with explicit exit instead if WHILE--
CREATE OR REPLACE FUNCTION fibonacci (n INTEGER)
RETURNS INTEGER AS $$
DECLARE
counter INTEGER := 0 ;
i INTEGER := 0 ;
j INTEGER := 1 ;
BEGIN
IF (n < 1) THEN
RETURN 0 ;
END IF;
LOOP
EXIT WHEN counter = n ;
counter := counter + 1 ;
SELECT j, i + j INTO i, j ;
END LOOP ;
RETURN i ;
END ;
$$ LANGUAGE plpgsql;
-- FOR IN --
DO $$
BEGIN
FOR counter IN 1..5 LOOP
RAISE NOTICE 'Counter: %', counter;
END LOOP;
END; $$
DO $$
BEGIN
FOR counter IN REVERSE 5..1 LOOP
RAISE NOTICE 'Counter: %', counter;
END LOOP;
END; $$
DO $$
BEGIN
FOR counter IN 1..6 BY 2 LOOP
RAISE NOTICE 'Counter: %', counter;
END LOOP;
END; $$
--*Continue and Iterate Over Array*--
CREATE OR REPLACE FUNCTION filter_even(variadic numbers int[]) returns setof int
AS $$
BEGIN
FOR counter IN 1..array_upper(numbers, 1)
LOOP
CONTINUE WHEN counter % 2 != 0;
return next counter;
END LOOP;
END;
$$ LANGUAGE plpgsql;
select * from filter_even(1, 2, 3, 4, 5, 6)
--*FOREACH*--
CREATE OR REPLACE FUNCTION filter_even(variadic numbers int[]) returns setof int
AS $$
DECLARE
counter int;
BEGIN
FOREACH counter IN ARRAY numbers
LOOP
CONTINUE WHEN counter % 2 != 0;
return next counter;
END LOOP;
END;
$$ LANGUAGE plpgsql;
select * from filter_even(1, 2, 3, 4, 5, 6)
-- Iterate Over a Query --
CREATE OR REPLACE FUNCTION iter_over_query(n INTEGER DEFAULT 5)
RETURNS VOID AS $$
DECLARE
rec RECORD;
BEGIN
FOR rec IN SELECT *
FROM products
ORDER BY unit_price
LIMIT n
LOOP
RAISE NOTICE '%', rec.product_name; --don't forget to look at messages
END LOOP;
END;
$$ LANGUAGE plpgsql;
select * from iter_over_query()
--*RETURN NEXT*--
-- Иногда нам необходима построчная обработка данных и уже затем построчный их возврат из функции
-- в таком случае надо использовать выражение RETURN NEXT для возврата каждой строки.
-- Это выражение можно вызывать несколько раз и результатом каждого вызова будет новая строка в выходном наборе данных.
-- Вот простейший пример:
CREATE OR REPLACE FUNCTION return_setof_int() RETURNS SETOF int AS
$$
BEGIN
RETURN NEXT 1;
RETURN NEXT 2;
RETURN NEXT 3;
RETURN; -- Необязательный
END
$$ LANGUAGE plpgsql;
--RETURN NEXT:
CREATE OR REPLACE FUNCTION test0()
RETURNS TABLE(y integer, result text) AS $$
BEGIN
FOR y, result IN
SELECT s.y, 'hi' result FROM generate_series(1,10,1) AS s(y)
LOOP
RETURN NEXT y;
END LOOP;
END
$$ LANGUAGE plpgsql;
SELECT * FROM test0();
--а вот пример посложнее--
CREATE OR REPLACE FUNCTION after_christmas_sale() RETURNS SETOF products AS $$
DECLARE
product record;
BEGIN
FOR product IN
SELECT * FROM products
LOOP
IF product.category_id IN (1,4,8) THEN
product.unit_price = product.unit_price * .80;
ELSIF product.category_id IN (2,3,7) THEN
product.unit_price = product.unit_price * .75;
ELSE
product.unit_price = product.unit_price * 1.10;
END IF;
RETURN NEXT product;
END LOOP;
RETURN;
END;
$$ LANGUAGE plpgsql;
SELECT * FROM after_christmas_sale();
--==========================================================================================
CREATE OR REPLACE FUNCTION random_between(low INT ,high INT)
RETURNS INT AS
$$
BEGIN
RETURN floor(random()* (high-low + 1) + low);
END;
$$ language 'plpgsql' STRICT;
--==========================================================================================
--string functions--
select chr(72);
select concat('Abra', '-abra-', 'cadabra');
select upper('abcd'); -- удобно для сравнения
select lower('ABCD');
select initcap('hello, John');
--substring-related funcs
select position('lo' in 'hello'); --starting at 4
select overlay('h___o' placing 'ell' from 2 for 3); -- where from and how many chars
select substring('abra_cadabra_abra' from 6 for 7) -- by index & number of chars
--select using regex (by posix and sql)
select trim(both ' ' from ' Hello ');
select trim(leading ' ' from ' Hello ');
select trim(trailing ' ' from ' Hello ');
--select convert('text_in_ascii', 'ascii', 'UTF8')
--replace
--numerical functions--
select abs(-1), abs(1);
--what about rounding mode?
select round(3.4),round(3.6),round(3.6);
select ceil(3.4),ceil(3.5),ceil(3.6),ceil(4);
select floor(3.4),floor(3.5),floor(3.6), floor(4);
select sign(-1), sign(1);
select mod(5, 2), mod(4, 2);
select mod(5,2)=0; --if it is even?
select sqrt(16), sqrt(4.5);
select format(sqrt(4.5), 2);
select power(2, 5) --or pow in short
select greatest(1,2,3,4,5);
--DATES & TIMES--
select current_date;
select
extract(day from date '2020-02-20') as day,
extract(month from date '2020-01-15') as month,
extract(year from date '2020-01-15') as year
select date_trunc('month', date'2020-01-15') -- cut off days
select date_trunc('year', date'2020-01-15') -- cut off months & days
select current_date + integer '3';
select current_date + interval '2 hours 30 minutes';
select current_date + interval '1 day';
select localtimestamp + interval '2 hours 30 minutes';
select localtimestamp + time '02:30';
--conversion--
--todo: casting example
select convert(int, 2.5);
--==========================================================================================
SELECT product_name, unit_price,
CASE WHEN units_in_stock >= 100 THEN 'lots of'
WHEN units_in_stock >= 50 AND units_in_stock < 100 THEN 'average'
WHEN units_in_stock < 50 THEN 'low number'
ELSE 'unknown'
END AS amount
FROM products;
SELECT order_id, order_date,
CASE WHEN date_part('month', order_date) BETWEEN 3 and 5 THEN 'spring'
WHEN date_part('month', order_date) BETWEEN 6 and 8 THEN 'summer'
WHEN date_part('month', order_date) BETWEEN 9 and 11 THEN 'autumn'
ELSE 'winter'
END AS bla
FROM orders;
--==========================================================================================
SELECT *
FROM orders
LIMIT 10;
SELECT order_id, order_date, COALESCE(ship_region, 'unknown') AS ship_region
FROM orders
LIMIT 10;
SELECT *
FROM employees;
SELECT last_name, first_name, COALESCE(region, 'N/A') as region
FROM employees;
SELECT contact_name, COALESCE(NULLIF(city, ''), 'Unknown') as city
FROM customers;
CREATE TABLE budgets
(
dept serial,
current_year decimal NULL,
previous_year decimal NULL
);
INSERT INTO budgets(current_year, previous_year) VALUES(100000, 150000);
INSERT INTO budgets(current_year, previous_year) VALUES(NULL, 300000);
INSERT INTO budgets(current_year, previous_year) VALUES(0, 100000);
INSERT INTO budgets(current_year, previous_year) VALUES(NULL, 150000);
INSERT INTO budgets(current_year, previous_year) VALUES(300000, 250000);
INSERT INTO budgets(current_year, previous_year) VALUES(170000, 170000);
INSERT INTO budgets(current_year, previous_year) VALUES(150000, NULL);
SELECT dept,
COALESCE(TO_CHAR(NULLIF(current_year, previous_year), 'FM99999999'), 'Same as last year') AS budget
FROM budgets
WHERE current_year IS NOT NULL;
--==========================================================================================
SELECT contact_name, city, country
FROM customers
ORDER BY contact_name,
(
CASE WHEN city IS NULL THEN country
ELSE city
END
);
INSERT INTO customers(customer_id, contact_name, city, country, company_name)
VALUES
('AAAAAB', 'John Mann', 'abc', 'USA', 'fake_company'),
('BBBBBV', 'John Mann', 'acd', 'Austria', 'fake_company');
SELECT product_name, unit_price,
CASE WHEN unit_price >= 100 THEN 'too expensive'
WHEN unit_price >= 50 AND unit_price < 100 THEN 'average'
ELSE 'low price'
END AS price
FROM products
ORDER BY unit_price DESC;
SELECT DISTINCT contact_name, COALESCE(order_id::text, 'no orders')
FROM customers
LEFT JOIN orders USING(customer_id)
WHERE order_id IS NULL;
SELECT CONCAT(last_name, ' ', first_name), COALESCE(NULLIF(title, 'Sales Representative'), 'Sales Stuff') AS title
FROM employees;
--==========================================================================================
CREATE OR REPLACE VIEW heavy_orders AS
SELECT *
FROM orders
WHERE freight > 100;
SELECT *
FROM heavy_orders
ORDER BY freight;
INSERT INTO heavy_orders
VALUES(11900, 'FOLIG', 1, '2000-01-01', '2000-01-05', '2000-01-04', 1, 80, 'Folies gourmandes', '184, chaussee de Tournai',
'Lille', NULL, 59000, 'FRANCE');
SELECT *
FROM heavy_orders
WHERE order_id = 11900;
CREATE OR REPLACE VIEW heavy_orders AS
SELECT *
FROM orders
WHERE freight > 100
WITH LOCAL CHECK OPTION;
CREATE OR REPLACE VIEW heavy_orders AS
SELECT *
FROM orders
WHERE freight > 100
WITH CASCADE CHECK OPTION;
--==========================================================================================
CREATE VIEW orders_customers_employees AS
SELECT order_date, required_date, shipped_date, ship_postal_code,
company_name, contact_name, phone,
last_name, first_name, title
FROM orders
JOIN customers USING (customer_id)
JOIN employees USING (employee_id);
SELECT *
FROM orders_customers_employees
WHERE order_date > '1997-01-01';
--
CREATE OR REPLACE VIEW orders_customers_employees AS
SELECT order_date, required_date, shipped_date, ship_postal_code, ship_country, --add ship_country
company_name, contact_name, phone, postal_code, --add postal_code
last_name, first_name, title, reports_to --add reports_to
FROM orders
JOIN customers USING (customer_id)
JOIN employees USING (employee_id);
SELECT *
FROM orders_customers_employees
ORDER BY ship_country;
--
ALTER VIEW products_suppliers_categories RENAME TO products_detailed;
--
DROP VIEW IF EXISTS orders_customers_employees;
select * from products;
drop view active_products;
create or replace view active_products
as
select product_id, product_name, supplier_id, category_id, quantity_per_unit, unit_price,
units_in_stock, units_on_order, reorder_level, discontinued
FROM products
where discontinued <> 1
with local check option;
insert into active_products
values(78, 'abc', 1, 1, 'abc', 1, 1, 1, 1, 1);
--
SELECT product_name, unit_price,
CASE WHEN unit_price>95 THEN 'expensive'
WHEN unit_price>= 50 and unit_price < 95 THEN 'middle range'
WHEN unit_price < 50 THEN 'cheap'
END AS expensiveness
FROM products
ORDER BY unit_price DESC;
--
select company_name, coalesce(region, 'unknown region')
from suppliers;
--==========================================================================================
select constraint_name
from information_schema.key_column_usage
where table_name = 'chair'
and table_schema = 'public'
and column_name = 'cathedra_id';
--==========================================================================================
CREATE TABLE customer
(
customer_id serial,
full_name text,
status char DEFAULT 'r',
CONSTRAINT PK_customer_id PRIMARY KEY(customer_id),
CONSTRAINT CHK_customer_status CHECK (status = 'r' or status = 'p')
);
INSERT INTO customer
VALUES
(1, 'name');
SELECT *
FROM customer;
INSERT INTO customer
VALUES
(1, 'name', 'd');
ALTER TABLE customer
ALTER COLUMN status DROP DEFAULT;
ALTER TABLE customer
ALTER COLUMN status SET DEFAULT 'r';
--==========================================================================================
CREATE SEQUENCE seq;
SELECT nextval('seq');
SELECT currval('seq');
SELECT lastval();
--
SELECT setval('seq', 10);
SELECT currval('seq');
SELECT nextval('seq');
SELECT setval('seq', 16, false);
SELECT currval('seq');
SELECT nextval('seq');
--
CREATE SEQUENCE IF NOT EXISTS seq2 INCREMENT 16;
SELECT nextval('seq2');
--
CREATE SEQUENCE IF NOT EXISTS seq3
INCREMENT 16
MINVALUE 0
MAXVALUE 128
START WITH 0
SELECT nextval('seq3');
ALTER SEQUENCE seq3 RENAME TO seq4
ALTER SEQUENCE seq4 RESTART WITH 16
SELECT nextval('seq4');
DROP SEQUENCE seq4;
--==========================================================================================
CREATE SEQUENCE IF NOT EXISTS book_book_id_seq
START WITH 1 OWNED BY book.book_id;
-- doesn't work
INSERT INTO book (title, isbn, publisher_id)
VALUES ('title', 'isbn', 1);
--we need to set default
ALTER TABLE book
ALTER COLUMN book_id SET DEFAULT nextval('book_book_id_seq');
--now should work
INSERT INTO book (title, isbn, publisher_id)
VALUES ('title', 'isbn', 1);
INSERT INTO book (title, isbn, publisher_id)
VALUES ('title3', 'isbn3', 1)
RETURNING book_id;
--==========================================================================================
INSERT INTO book(title, isbn, publisher_id)
VALUES ('title', 'isbn', 3)
RETURNING *;
UPDATE author
SET full_name = 'Walter', rating = 5
WHERE author_id = 1
RETURNING author_id;
DELETE FROM author
WHERE rating = 5
RETURNING *;
--==========================================================================================
SELECT * FROM author;
UPDATE author
SET full_name = 'Elias', rating = 5
WHERE author_id = 1;
DELETE FROM author
WHERE rating < 4.5;
DELETE FROM author;
TRUNCATE TABLE author;
DROP TABLE book;
CREATE TABLE book
(
book_id serial,
title text NOT NULL,
isbn varchar(32) NOT NULL,
publisher_id int NOT NULL,
CONSTRAINT PK_book_book_id PRIMARY KEY(book_id)
);
INSERT INTO book(title, isbn, publisher_id)
VALUES ('title', 'isbn', 3)
RETURNING *;
UPDATE author
SET full_name = 'Walter', rating = 5
WHERE author_id = 1
RETURNING author_id;
DELETE FROM author
WHERE rating = 5
RETURNING *;
--==========================================================================================
-- Subquery: что если мы хотим найти все компании поставщиков из тех стран, в которые делают заказы заказчики?
SELECT company_name
FROM suppliers
WHERE country IN (SELECT country FROM customers)
--equivalent query
SELECT DISTINCT suppliers.company_name
FROM suppliers
JOIN customers USING(country)
SELECT category_name, SUM(units_in_stock)
FROM products
INNER JOIN categories ON products.category_id = categories.category_id
GROUP BY category_name
ORDER BY SUM(units_in_stock) DESC
LIMIT (SELECT MIN(product_id) + 4 FROM products)
--среднее кол-во товаров в наличии
SELECT AVG(units_in_stock)
FROM products
-- а если мы хотим вывести такие товары, количество которого в наличии больше чем в среднем
SELECT product_name, units_in_stock
FROM products
WHERE units_in_stock >
(SELECT AVG(units_in_stock)
FROM products)
ORDER BY units_in_stock
--==========================================================================================
--выбрать все уникальные компании заказчиков которые делали заказы на более чем 40 единиц товаров
--с джойнами
SELECT DISTINCT company_name
FROM customers
JOIN orders USING(customer_id)
JOIN order_details USING(order_id)
WHERE quantity > 40;
--с подзапросом
SELECT DISTINCT company_name --from course
FROM customers
WHERE customer_id = ANY(SELECT customer_id FROM orders
JOIN order_details USING(order_id)
WHERE quantity > 40);
-- можно комбинировать джойны с подзапросами
-- это у нас просто среднее количество единиц товара по всем заказам
SELECT AVG(quantity)
FROM order_details;
-- давайте выберем такие продукты, количество которых больше среднего по заказам
-- используя предыдущий запрос в качестве подзапроса можно написать следующий запрос:
SELECT DISTINCT product_name, quantity
FROM products
JOIN order_details USING(product_id)
WHERE quantity >
(SELECT AVG(quantity)
FROM order_details);
-- найти все продукты количество которых больше среднего значения количества заказанных товаров из групп, полученных группированием по product_id
SELECT AVG(quantity)
FROM order_details
GROUP BY product_id;
SELECT DISTINCT product_name, quantity
FROM products
JOIN order_details USING(product_id)
WHERE quantity > ALL
(SELECT AVG(quantity)
FROM order_details
GROUP BY product_id)
ORDER BY quantity;
--==========================================================================================
SELECT product_name, units_in_stock
FROM products
WHERE units_in_stock < ALL
(SELECT AVG(quantity)
FROM order_details
GROUP BY product_id)
ORDER BY units_in_stock DESC;
SELECT AVG(quantity)
FROM order_details
GROUP BY product_id
order by AVG(quantity)
SELECT o.customer_id, SUM(o.freight) AS freight_sum
FROM orders AS o
INNER JOIN (SELECT customer_id, AVG(freight) AS freight_avg
FROM orders
GROUP BY customer_id) AS oa
ON oa.customer_id = o.customer_id
WHERE o.freight > oa.freight_avg
AND o.shipped_date BETWEEN '1996-07-16' AND '1996-07-31'
GROUP BY o.customer_id
ORDER BY freight_sum;
SELECT customer_id, ship_country, order_price
FROM orders
JOIN (SELECT order_id,
SUM(unit_price * quantity - unit_price * quantity * discount) AS order_price
FROM order_details
GROUP BY order_id) od
USING(order_id)
WHERE ship_country IN ('Argentina' , 'Bolivia', 'Brazil', 'Chile', 'Colombia', 'Ecuador', 'Guyana', 'Paraguay',
'Peru', 'Suriname', 'Uruguay', 'Venezuela')
AND order_date >= '1997-09-01'
ORDER BY order_price DESC
LIMIT 3;
SELECT product_name
FROM products
WHERE product_id = ANY (SELECT product_id FROM order_details WHERE quantity = 10);
SELECT distinct product_name, quantity
FROM products
join order_details using(product_id)
where order_details.quantity = 10
--==========================================================================================
CREATE TABLE employee (
employee_id int PRIMARY KEY,
first_name varchar(256) NOT NULL,
last_name varchar(256) NOT NULL,
manager_id int,
FOREIGN KEY (manager_id) REFERENCES employee(employee_id);
);
INSERT INTO employee
(employee_id, first_name, last_name, manager_id)
VALUES
(1, 'Windy', 'Hays', NULL),
(2, 'Ava', 'Christensen', 1),
(3, 'Hassan', 'Conner', 1),
(4, 'Anna', 'Reeves', 2),
(5, 'Sau', 'Norman', 2),
(6, 'Kelsie', 'Hays', 3),
(7, 'Tory', 'Goff', 3),
(8, 'Salley', 'Lester', 3);
SELECT e.first_name || ' ' || e.last_name AS employee,
m.first_name || ' ' || m.last_name AS manager
FROM employee e
LEFT JOIN employee m ON m.employee_id = e.manager_id
ORDER BY manager;
--==========================================================================================
SELECT COUNT(*) AS employees_count
FROM employees;
SELECT COUNT(DISTINCT country) AS country
FROM employees;
SELECT category_id, SUM(units_in_stock) AS units_in_stock
FROM products
GROUP BY category_id
ORDER BY units_in_stock DESC
LIMIT 5;
SELECT category_id, SUM(unit_price * units_in_stock) AS total_price
FROM products
WHERE discontinued <> 1
GROUP BY category_id
HAVING SUM(unit_price * units_in_stock) > 5000
ORDER BY total_price DESC;
--==========================================================================================
-- Найти заказчиков и обслуживающих их заказы сотрудкников
-- таких, что и заказчики и сотрудники из города London, а доставка идёт компанией Speedy Express.
-- Вывести компанию заказчика и ФИО сотрудника.
SELECT c.company_name AS customer,
CONCAT(e.first_name, ' ', e.last_name) AS employee
FROM orders as o
JOIN customers as c USING(customer_id)
JOIN employees as e USING(employee_id)
JOIN shippers as s ON o.ship_via = s.shipper_id
WHERE c.city = 'London'
AND e.city = 'London'
AND s.company_name = 'Speedy Express';
-- Найти активные (см. поле discontinued) продукты из категории Beverages и Seafood, которых в продаже менее 20 единиц
-- Вывести наименование продуктов, кол-во единиц в продаже, имя контакта поставщика и его телефонный номер.
SELECT product_name, units_in_stock, contact_name, phone
FROM products
JOIN categories USING(category_id)
JOIN suppliers USING(supplier_id)
WHERE category_name IN ('Beverages', 'Seafood')
AND discontinued = 0
AND units_in_stock < 20
ORDER BY units_in_stock;
-- Найти заказчиков, не сделавших ни одного заказа
-- Вывести имя заказчика и order_id
SELECT distinct contact_name, order_id
FROM customers
LEFT JOIN orders USING(customer_id)
WHERE order_id IS NULL
ORDER BY contact_name;
--Переписать предыдущий запрос, использовав симметричный вид джойна (подсказа: речь о LEFT и RIGHT)
SELECT contact_name, order_id
FROM orders
RIGHT JOIN customers USING(customer_id)
WHERE order_id IS NULL
ORDER BY contact_name;
--==========================================================================================
SELECT ship_country, COUNT(*)
FROM orders
WHERE freight > 50
GROUP BY ship_country
ORDER BY COUNT(*) DESC;
SELECT category_id, SUM(UnitsInStock)
FROM products
GROUP BY category_id
ORDER BY SUM(units_in_stock) DESC;
LIMIT 5
SELECT category_id, SUM(unit_price * units_in_stock)
FROM products
WHERE discontinued <> 1
GROUP BY category_id
HAVING SUM(unit_price * units_in_stock) > 5000
ORDER BY SUM(unit_price * units_in_stock) DESC;
--==========================================================================================
SELECT pg_terminate_backend(pg_stat_activity.pid)
FROM pg_stat_activity
WHERE pg_stat_activity.datname = 'testdb'
AND pid <> pg_backend_pid()
--==========================================================================================
CREATE TABLE person
(
person_id int PRIMARY KEY,
first_name varchar(64) NOT NULL,
last_name varchar(64) NOT NULL
);
CREATE TABLE passport
(
passport_id int PRIMARY KEY,
serial_number int NOT NULL,
fk_passport_person int UNIQUE REFERENCES person(person_id)
);
INSERT INTO person VALUES (1, 'John', 'Snow');
INSERT INTO person VALUES (2, 'Ned', 'Stark');
INSERT INTO person VALUES (3, 'Rob', 'Baratheon');
ALTER TABLE passport
ADD COLUMN registration text NOT NULL;
INSERT INTO passport VALUES (1, 123456, 1, 'Winterfell');
INSERT INTO passport VALUES (2, 789012, 2, 'Winterfell');
INSERT INTO passport VALUES (3, 345678, 3, 'King''s Landing');
--==========================================================================================
-- a) have a gander at monthly & weekly volume patterns of 2012
-- b) session volume & order volume
-- a
select
year(website_sessions.created_at),
month(website_sessions.created_at),
count(distinct website_sessions.website_session_id) as sessions,
count(distinct orders.order_id) as orders
from website_sessions
left join orders
on orders.website_session_id=website_sessions.website_session_id
where website_sessions.created_at > '2012-01-01'
and website_sessions.created_at < '2013-01-02'
group by
1,2
order by 1,2;
-- b
select
min(date(website_sessions.created_at)) as week_start_date,
count(distinct website_sessions.website_session_id) as sessions,
count(distinct orders.order_id) as orders
from website_sessions
left join orders
on orders.website_session_id=website_sessions.website_session_id
where website_sessions.created_at > '2012-01-01'
and website_sessions.created_at < '2013-01-02'
group by
yearweek(website_sessions.created_at);
--==========================================================================================
-- average website session volume by hour of day & by day week
select
hr,
avg(case when wkday = 1 then website_sessions else NULL end) as tue,
avg(case when wkday = 2 then website_sessions else NULL end) as wed,
avg(case when wkday = 3 then website_sessions else NULL end) as thu,
avg(case when wkday = 4 then website_sessions else NULL end) as fri,
avg(case when wkday = 5 then website_sessions else NULL end) as sat,
avg(case when wkday = 6 then website_sessions else NULL end) as sun
from
(
select
date(created_at) as date,
weekday(created_at) as wkday,
hour(created_at) as hr,
count(distinct website_session_id) as website_sessions
from website_sessions
where created_at > '2012-09-15' and created_at < '2012-11-15'
group by 1,2,3
) as date_table
group by 1
order by 1;
--==========================================================================================
-- count pageviews to identify 'bounces' and summarize by week
select
min(date(session_created)) as week_start_date,
-- COUNT + CASE is a Pivot method
count(distinct case when count_pageveiws = 1 then session else NULL end)*1.0/count(distinct session) as bounce_rate,
count(distinct case when landing_page = '/home' then session else NULL end) as home_session,
count(distinct case when landing_page = '/lander-1' then session else NULL end) as lander_sessions
from landing_pages
group by
yearweek(session_created);
--==========================================================================================
-- pull data on how many of business website visitors come back for another session
DROP TEMPORARY TABLE IF EXISTS repeat_sessions;
create temporary table repeat_sessions
select
inner_table.user_id,
inner_table.website_session_id as new_session_id,
website_sessions.website_session_id as repeat_session_id
from
(
select
user_id,
website_session_id
from website_sessions
where created_at < '2014-11-03'
and created_at >= '2014-01-01'
and is_repeat_session = 0
) as inner_table
-- 'inner_table' will have only new queries
left join website_sessions
on website_sessions.user_id=inner_table.user_id
and website_sessions.is_repeat_session = 1 -- can be, but redundant
and website_sessions.website_session_id > inner_table.website_session_id
-- above one specifies that repeat session should be further than initial one
and website_sessions.created_at < '2014-11-03'
and website_sessions.created_at >= '2014-01-01';
-- result
select
repeat_session_id,
count(distinct user_id) as users
from
(
select
user_id,
count(distinct new_session_id) as new_session_id,
count(distinct repeat_session_id) as repeat_session_id
from repeat_sessions
group by 1
order by 3 desc
) as users
group by 1;
--==========================================================================================
-- compare new vs repeat sessions by channel
-- output
select
case
when utm_source is NULL and http_referer in ('https://www.gsearch.com', 'https://www.bsearch.com')
then 'organic_search'
when utm_source is NULL and http_referer is NULL then 'direct_type'
when utm_campaign = 'nonbrand' then 'paid_nonbrand'
when utm_campaign = 'brand' then 'paid_brand'
when utm_source = 'socialbook' then 'paid_social'
end as channel_group,
-- utm_source,
-- utm_campaign,
-- http_referer,
count(case when is_repeat_session = 0 then website_session_id else NULL end) as new_sessions,
count(case when is_repeat_session = 1 then website_session_id else NULL end) as repeat_sessions
from website_sessions
where created_at >= '2014-01-01'
and created_at < '2014-11-05'
group by 1
order by repeat_sessions desc;
--==========================================================================================
-- min, max, avg time between the first and the second session
-- retrieve users with repeat sessions & created_at data
DROP TEMPORARY TABLE IF EXISTS first_second_sessions;
create temporary table first_second_sessions
select
first_session.created_at as first_created,
first_session.user_id,
first_session.website_session_id as first_sessions,
website_sessions.website_session_id as second_sessions,
website_sessions.created_at as second_created
from
(
select
website_session_id,
user_id,
created_at
from website_sessions
where created_at >='2014-01-01'
and created_at < '2014-11-03'
and is_repeat_session = 0
) as first_session
left join website_sessions
on website_sessions.user_id=first_session.user_id
and website_sessions.is_repeat_session = 1
and website_sessions.website_session_id > first_session.website_session_id
and website_sessions.created_at >= '2014-01-01'
and website_sessions.created_at < '2014-11-03';
-- analyzing 'created_at'
DROP TEMPORARY TABLE IF EXISTS pre_final;
create temporary table pre_final
select
datediff(second_created, first_created) as days_first_second_session,
user_id
from
(
select
first_created,
first_sessions,
user_id,
min(second_created) as second_created,
-- first session that is not new (repeat one)
min(second_sessions) as second_session
from first_second_sessions
where second_sessions is not NULL
group by 1,2,3
) as user_created;
-- result
select
avg(days_first_second_session) as avg_days_first_second,
min(days_first_second_session) as min_days_first_second,
max(days_first_second_session) as max_days_first_second
from pre_final;
--==========================================================================================
-- breakdown by UTM source, campaign, referring domain
select
utm_source,
utm_campaign,
http_referer,
count(distinct web.website_session_id) as sessions
from website_sessions as web
where created_at < '2012-04-12'
group by utm_source, utm_campaign, http_referer
order by 4 desc;
--==========================================================================================
-- conversion rates from session to order by device type
select
device_type,
count(distinct web.website_session_id) as sessions,
count(distinct ord.order_id) as orders,
count(distinct ord.order_id)/count(distinct web.website_session_id) as session_order_conv
from website_sessions as web
left join orders as ord
on ord.website_session_id=web.website_session_id
where web.created_at < '2012-05-11'
and web.utm_campaign = 'nonbrand'
and web.utm_source = 'gsearch'
group by device_type;
-- weekly trends for both desktop and mobile
select
min(date(web.created_at)) as week_start_date,
count(distinct case when web.device_type = 'desktop' then web.website_session_id else NULL end) as dtop_sessions,
count(distinct case when web.device_type = 'mobile' then web.website_session_id else NULL end) as mob_sessions
from website_sessions as web
where web.created_at < '2012-06-09'
and web.created_at > '2012-05-19'
and web.utm_source = 'gsearch'
and web.utm_campaign = 'nonbrand'
group by yearweek(web.created_at);
--==========================================================================================
-- conversion rate from session to order with at least 4% CVR
select
count(distinct a.website_session_id) as sessions,
count(distinct b.order_id) as orders,
count(distinct b.order_id)/count(distinct a.website_session_id) as session_order_conversion
from website_sessions as a
left join orders as b
on b.website_session_id=a.website_session_id
where a.created_at < '2012-04-14' and a.utm_source = 'gsearch'
and utm_campaign = 'nonbrand';
--==========================================================================================
--
select
count(distinct website_session_id) as sessions,
min(date(created_at)) as week_start,
week(created_at),
year(created_at)
from website_sessions
where website_session_id between 100000 and 115000
group by 4,3;
-- COUNT with CASE inside can help to mimick Excel's Pivot.
-- Use GROUP BY to define your row labels, and CASE to pivot to columns
-- Below we want to know number of orders where 1 or 2 items were purchased and total of orders
select
primary_product_id,
count(distinct case when items_purchased = 1 then order_id else NULL end) as orders_w_1_item,
count(distinct case when items_purchased = 2 then order_id else NULL end) as orders_w_2_items,
count(distinct order_id) as total_orders
from orders
where order_id between 31000 and 32000
group by 1;
--==========================================================================================
-- gsearch nonbrand trended session volme by week
select
date_format(web.created_at, '%Y-%m-%d') as week_start_date,
count(distinct web.website_session_id) as sessions
from website_sessions as web
where web.created_at < '2012-05-10' and web.utm_source = 'gsearch'
and web.utm_campaign = 'nonbrand'
group by week_start_date
order by week_start_date asc;
select
min(date(web.created_at)) as week_start_date,
count(distinct web.website_session_id) as sessions
from website_sessions as web
where web.created_at < '2012-05-10' and web.utm_source = 'gsearch'
and web.utm_campaign = 'nonbrand'
group by year(web.created_at), week(web.created_at);
/* yearweek() can be used*/
--==========================================================================================
-- average website session volume by hour of day & by day week
select
hr,
avg(case when wkday = 1 then website_sessions else NULL end) as tue,
avg(case when wkday = 2 then website_sessions else NULL end) as wed,
avg(case when wkday = 3 then website_sessions else NULL end) as thu,
avg(case when wkday = 4 then website_sessions else NULL end) as fri,
avg(case when wkday = 5 then website_sessions else NULL end) as sat,
avg(case when wkday = 6 then website_sessions else NULL end) as sun
from
(
select
date(created_at) as date,
weekday(created_at) as wkday,
hour(created_at) as hr,
count(distinct website_session_id) as website_sessions
from website_sessions
where created_at > '2012-09-15' and created_at < '2012-11-15'
group by 1,2,3
) as date_table
group by 1
order by 1;
--==========================================================================================
-- Analysis of business patterns will generate insights to help us
-- maximize efficiency and anticipate future trends
select
website_session_id,
created_at,
hour(created_at) as hr,
weekday(created_at) as wkday, -- 0 is Mnd, 1 is Tues
CASE
when weekday(created_at) = 0 then 'Monday'
when weekday(created_at) = 1 then 'Tuesday'
else 'other day'
end as clean_weekday,
quarter(created_at) as qtr,
month(created_at) as month,
date(created_at) as date,
week(created_at) as wk
from website_sessions
where website_session_id between 150000 and 155000;
--==========================================================================================
-- a) have a gander at monthly & weekly volume patterns of 2012
-- b) session volume & order volume
-- a
select
year(website_sessions.created_at),
month(website_sessions.created_at),
count(distinct website_sessions.website_session_id) as sessions,
count(distinct orders.order_id) as orders
from website_sessions
left join orders
on orders.website_session_id=website_sessions.website_session_id
where website_sessions.created_at > '2012-01-01'
and website_sessions.created_at < '2013-01-02'
group by
1,2
order by 1,2;
-- b
select
min(date(website_sessions.created_at)) as week_start_date,
count(distinct website_sessions.website_session_id) as sessions,
count(distinct orders.order_id) as orders
from website_sessions
left join orders
on orders.website_session_id=website_sessions.website_session_id
where website_sessions.created_at > '2012-01-01'
and website_sessions.created_at < '2013-01-02'
group by
yearweek(website_sessions.created_at);
--==========================================================================================
-- compare the month before vs the month after the change
-- CTR from the '/cart' page, AVG products per Order, AOV,
-- overall revenue per '/cart' page view
-- relevant '/cart' pageviews & pertinent sessions
DROP TEMPORARY TABLE IF EXISTS cross_sell;
create temporary table cross_sell
select
website_pageview_id,
website_session_id,
CASE
when created_at >= '2013-09-25' then 'post_cross_sell'
when created_at < '2013-09-25' then 'pre_cross_sell'
else 'Error'
end as time_period
from website_pageviews
where created_at > '2013-08-25'
and created_at < '2013-10-25'
and pageview_url = '/cart';
-- check which '/cart' sessions reached another page
DROP TEMPORARY TABLE IF EXISTS ship_views;
create temporary table ship_views
select
cross_sell.website_session_id,
cross_sell.time_period,
min(website_pageviews.website_pageview_id) as min_pageview
from cross_sell
left join website_pageviews
on website_pageviews.website_session_id=cross_sell.website_session_id
and website_pageviews.website_pageview_id > cross_sell.website_pageview_id
-- and website_pageviews.pageview_url = '/shipping'
group by 1,2
having
min_pageview is not NULL;
-- so as to disect the ones who abandoned after '/cart'
-- find orders which are associated with above '/cart' sessions
DROP TEMPORARY TABLE IF EXISTS pre_post_sessions_orders;
create temporary table pre_post_sessions_orders
select
orders.order_id,
cross_sell.website_session_id,
orders.items_purchased,
orders.price_usd
from cross_sell
inner join orders
on orders.website_session_id=cross_sell.website_session_id;
-- final
select
time_period,
count(distinct website_session_id) as cart_sessions,
sum(clicked_to_another_page) as clickthorugh,
sum(clicked_to_another_page)/count(distinct website_session_id) as cart_clickthorugh_rate,
sum(items_purchased)/sum(placed_order) as products_per_order,
sum(price_usd)/sum(placed_order) as AOV,
sum(price_usd)/count(distinct website_session_id) as revenue_per_cart_session
from
(
select
cross_sell.time_period,
cross_sell.website_session_id,
(case when ship_views.website_session_id is NULL then 0 else 1 end) as clicked_to_another_page,
(case when pre_post_sessions_orders.order_id is NULL then 0 else 1 end) as placed_order,
pre_post_sessions_orders.items_purchased,
pre_post_sessions_orders.price_usd
from cross_sell
left join ship_views
on ship_views.website_session_id=cross_sell.website_session_id
left join pre_post_sessions_orders
on pre_post_sessions_orders.website_session_id=cross_sell.website_session_id
order by
cross_sell.website_session_id
) as inner_table
group by 1;
--==========================================================================================
-- Cross selling analysis: understanding which products users are most
-- likely to purchase together, and offering smart product recommendations
select
count(distinct orders.order_id) as orders,
orders.primary_product_id,
count(distinct case when order_items.product_id = 1 then
orders.order_id else NULL end) as cross_sell_product1,
count(distinct case when order_items.product_id = 2 then
orders.order_id else NULL end) as cross_sell_product2,
count(distinct case when order_items.product_id = 3 then
orders.order_id else NULL end) as cross_sell_product3
from orders
left join order_items
on order_items.order_id=orders.order_id
and order_items.is_primary_item = 0 -- cross sell only
where orders.order_id between 10000 and 11000
group by 2;
--==========================================================================================
-- Product sales help to understand: how each product contributes to the business and how
-- product launches impact the overall portfolio
-- orders: count(order_id)
-- revenue: sum(price_usd)
-- margin: sum(price_usd - cogs_usd)
-- average order value: avg(price_usd)
select
primary_product_id,
count(order_id) as orders,
sum(price_usd) as revenue,
sum(price_usd - cogs_usd) as margin,
avg(price_usd) as aov
from orders
where order_id between 10000 and 11000
group by 1
order by 2 desc;
--==========================================================================================
-- pre-post analysis comparing the month before vs the month after
-- in regard to session-to-order conversion rate, AOV, products per order, revenue per session
select
CASE
when website_sessions.created_at >= '2013-12-12' then 'post_third_product'
when website_sessions.created_at < '2013-12-12' then 'pre_third_product'
else 'Error'
end as time_period,
count(distinct order_id)/count(distinct website_sessions.website_session_id) as conv_rate,
sum(price_usd) as total_revenue,
sum(items_purchased) as total_products_sold,
sum(price_usd)/count(distinct order_id) as average_order_value,
sum(items_purchased)/sum(case when order_id is not NULL then 1 else 0 end) as products_per_order,
sum(price_usd)/count(distinct website_sessions.website_session_id) as revenue_per_session
from website_sessions
left join orders
on orders.website_session_id=website_sessions.website_session_id
where website_sessions.created_at > '2013-11-12'
and website_sessions.created_at < '2014-01-12'
group by
CASE
when website_sessions.created_at >= '2013-12-12' then 'post_third_product'
when website_sessions.created_at < '2013-12-12' then 'pre_third_product'
else 'Error'
end;
--==========================================================================================
-- conversion funnels from each page to conversion
-- comparision between the two conversion funnels for all website traffic
-- select all pageviews for relevant sessions
DROP TEMPORARY TABLE IF EXISTS sessions_urls;
create temporary table sessions_urls
select
website_pageviews.pageview_url as url,
website_sessions.website_session_id,
website_pageviews.website_pageview_id
from website_sessions
left join website_pageviews
on website_pageviews.website_session_id=website_sessions.website_session_id
where website_sessions.created_at > '2013-01-06'
and website_sessions.created_at < '2013-04-10'
and pageview_url in ('/the-original-mr-fuzzy', '/the-forever-love-bear');
-- scrutinize which pageview_url to look for (it'll be a demo which will be incorporated further)
select distinct
website_pageviews.pageview_url
from sessions_urls
left join website_pageviews
on website_pageviews.website_session_id=sessions_urls.website_session_id
and website_pageviews.website_pageview_id > sessions_urls.website_pageview_id;
-- enables to see which pageviews (urls in select) to look for
-- => next
DROP TEMPORARY TABLE IF EXISTS products;
create temporary table products
select
website_session_id,
CASE
when url = '/the-original-mr-fuzzy' then 'mr_fuzzy'
when url = '/the-forever-love-bear' then 'lovebear'
else 'Error'
end as product_seen,
max(cart) as cart,
max(shipping) as shipping,
max(billing) as billing,
max(thank_you) as thanks
from
(
select
sessions_urls.website_session_id,
sessions_urls.url,
case when website_pageviews.pageview_url = '/cart' then 1 else 0 end as cart,
case when website_pageviews.pageview_url = '/shipping' then 1 else 0 end as shipping,
case when website_pageviews.pageview_url = '/billing-2' then 1 else 0 end as billing,
case when website_pageviews.pageview_url = '/thank-you-for-your-order' then 1 else 0 end as thank_you
from sessions_urls
left join website_pageviews
on website_pageviews.website_session_id=sessions_urls.website_session_id
and website_pageviews.website_pageview_id > sessions_urls.website_pageview_id
order by
sessions_urls.website_session_id,
website_pageviews.created_at
) as inner_table
group by website_session_id,
CASE
when url = '/the-original-mr-fuzzy' then 'mr_fuzzy'
when url = '/the-forever-love-bear' then 'lovebear'
else 'Error'
end;
-- final numbers
select
product_seen,
count(distinct website_session_id),
count(distinct case when cart = 1 then website_session_id else NULL end) as to_cart,
count(distinct case when shipping = 1 then website_session_id else NULL end) as to_shipping,
count(distinct case when billing = 1 then website_session_id else NULL end) as to_billing,
count(distinct case when thanks = 1 then website_session_id else NULL end) as to_thanks
from products
group by product_seen;
-- final ratio
select
product_seen,
count(distinct case when cart = 1 then website_session_id else NULL end)/
count(distinct website_session_id) as product_page_clickthrough,
count(distinct case when shipping = 1 then website_session_id else NULL end)/
count(distinct case when cart = 1 then website_session_id else NULL end) as cart_clickthrough,
count(distinct case when billing = 1 then website_session_id else NULL end)/
count(distinct case when shipping = 1 then website_session_id else NULL end) as shipping_clickthrough,
count(distinct case when thanks = 1 then website_session_id else NULL end)/
count(distinct case when billing = 1 then website_session_id else NULL end) as billing_clickthrough
from products
group by 1;
--==========================================================================================
-- monthly order volume, overall conversion rate, revenue per seesion,
-- breakdown of sales by product
select
min(date(website_sessions.created_at)) as month_date,
count(distinct order_id) as orders,
count(distinct order_id)/count(distinct website_sessions.website_session_id) as conv_rate,
sum(price_usd)/count(distinct website_sessions.website_session_id) as revenue_per_session,
count(distinct case when primary_product_id = 1 then order_id else NULL end) as product_one_orders,
count(distinct case when primary_product_id = 2 then order_id else NULL end) as product_two_orders
from orders
right join website_sessions
on website_sessions.website_session_id=orders.website_session_id
where website_sessions.created_at > '2012-04-01'
and website_sessions.created_at < '2013-04-05'
group by
year(website_sessions.created_at),
month(website_sessions.created_at);
--==========================================================================================
-- monthly trends to date for number of sales, total revenue and total margin generated
select
min(date(created_at)) as month_date,
count(distinct order_id) as number_of_sales,
sum(price_usd) as total_revenue,
sum(price_usd - cogs_usd) as total_margin
from orders
where created_at < '2013-01-04'
group by
month(created_at);
--==========================================================================================
-- weekly trended session volume & comparision of gsearch to bsearch
select
min(date(created_at)) as week_start_date,
count(case when utm_source = 'gsearch' then website_session_id else NULL end) as gsearch_sessions,
count(case when utm_source = 'bsearch' then website_session_id else NULL end) as bsearch_sessions
from website_sessions
where created_at > '2012-08-22' and created_at < '2012-11-29'
and utm_campaign = 'nonbrand'
group by
yearweek(created_at);
--==========================================================================================
-- nonbrand conversion rates from session to order for gsearch & bsearch
-- + slice by device_type
select
device_type,
utm_source,
count(distinct website_sessions.website_session_id) as sessions,
count(distinct orders.order_id) as orders,
count(distinct orders.order_id)/
count(distinct website_sessions.website_session_id) as conversion_rate
from website_sessions
left join orders
on orders.website_session_id=website_sessions.website_session_id
where website_sessions.created_at > '2012-08-22' and website_sessions.created_at < '2012-09-18'
and utm_campaign = 'nonbrand'
and utm_source in ('gsearch', 'bsearch')
group by
1,2;
--==========================================================================================
-- pull organic search, direct type in, paid brand search sessions by month
-- + % of paid search nonbrand
select
min(date(created_at)) as month_year,
count(case when utm_campaign = 'nonbrand' then website_session_id else NULL end) as nonbrand,
count(case when utm_campaign = 'brand' then website_session_id else NULL end) as brand,
count(case when utm_campaign = 'brand' then website_session_id else NULL end)/
count(case when utm_campaign = 'nonbrand' then website_session_id else NULL end) as br_perc_nonbr,
count(case when http_referer is NULL and utm_source is NULL then website_session_id else NULL end) as direct,
count(case when http_referer is NULL and utm_source is NULL then website_session_id else NULL end)/
count(case when utm_campaign = 'nonbrand' then website_session_id else NULL end) as direct_perc_nonbrand,
count(case when http_referer is not NULL and utm_source is NULL then website_session_id else NULL end) as organic,
count(case when http_referer is not NULL and utm_source is NULL then website_session_id else NULL end)/
count(case when utm_campaign = 'nonbrand' then website_session_id else NULL end) as organic_perc_nonbrand
from website_sessions
where created_at < '2012-12-23'
group by
month(created_at);
/* roughly equals to below query. In below one I specify in particular which 'http_referer' to use */
select
min(date(created_at)) as month_year,
count(distinct case when channel_group = 'paid_nonbrand' then website_session_id else NULL end) as nonbrand,
count(distinct case when channel_group = 'paid_brand' then website_session_id else NULL end) as brand,
count(distinct case when channel_group = 'paid_brand' then website_session_id else NULL end)/
count(distinct case when channel_group = 'paid_nonbrand' then website_session_id else NULL end) as br_perc_nonbr,
count(distinct case when channel_group = 'direct_type' then website_session_id else NULL end) as direct,
count(distinct case when channel_group = 'direct_type' then website_session_id else NULL end)/
count(distinct case when channel_group = 'paid_nonbrand' then website_session_id else NULL end) as direct_perc_nonbrand,
count(distinct case when channel_group = 'organic_search' then website_session_id else NULL end) as organic,
count(distinct case when channel_group = 'organic_search' then website_session_id else NULL end)/
count(distinct case when channel_group = 'paid_nonbrand' then website_session_id else NULL end) as organic_perc_nonbrand
from
(
select
website_session_id,
created_at,
case
when utm_source is NULL and http_referer in ('https://www.gsearch.com', 'https://www.bsearch.com')
then 'organic_search'
when utm_source is NULL and http_referer is NULL then 'direct_type'
when utm_campaign = 'nonbrand' then 'paid_nonbrand'
when utm_campaign = 'brand' then 'paid_brand'
end as channel_group
from website_sessions
where created_at < '2012-12-23'
) as inner_table
group by
month(created_at);
--==========================================================================================
-- branded or direct traffic speaks about how well brand is doing with consumers
-- and how well brand drives business
select
CASE
when http_referer is NULL then 'direct_typing'
when http_referer = 'https://www.gsearch.com' then 'gsearch_organic'
when http_referer = 'https://www.bsearch.com' then 'bsearch_organic'
else 'other'
end,
count(distinct website_session_id) as sessions
from website_sessions
where website_session_id between 100000 and 115000
and utm_source is NULL
group by 1
order by 2 desc;
-- utm_source is paid traffic if not NULL else (direct traffic)
-- Considering utm_source is NULL:
-- http_referer: the website that sent us the traffic. If NULL -> direct typing
-- if not NULL -> organic search
-- including paid search traffic
select
CASE
when http_referer is NULL then 'direct_typing'
when http_referer = 'https://www.gsearch.com' and utm_source is NULL then 'gsearch_organic'
when http_referer = 'https://www.bsearch.com' and utm_source is NULL then 'bsearch_organic'
else 'other'
end,
count(distinct website_session_id) as sessions
from website_sessions
where website_session_id between 100000 and 115000
group by 1
order by 2 desc;
--==========================================================================================
-- pull overall session-to-order conversion rate trends for the channels
-- by quarter + notes of periods with surge-like improvement
select
year(website_sessions.created_at) as year,
quarter(website_sessions.created_at) as quarter,
count(case when utm_campaign = 'nonbrand' and utm_source = 'gsearch'
then orders.order_id else NULL end)/
count(case when utm_campaign = 'nonbrand' and utm_source = 'gsearch'
then website_sessions.website_session_id else NULL end) as nonbr_gs_cvr,
count(case when utm_campaign = 'nonbrand' and utm_source = 'bsearch'
then orders.order_id else NULL end)/
count(case when utm_campaign = 'nonbrand' and utm_source = 'bsearch'
then website_sessions.website_session_id else NULL end) as nonbr_bs_cvr,
count(case when utm_campaign = 'brand' then orders.order_id else NULL end)/
count(case when utm_campaign = 'brand' then
website_sessions.website_session_id else NULL end) as branded_cvr,
count(case when http_referer is not NULL and utm_source is NULL
then orders.order_id else NULL end)/
count(case when http_referer is not NULL and utm_source is NULL then
website_sessions.website_session_id else NULL end) as organic_cvr,
count(case when http_referer is NULL and utm_source is NULL
then orders.order_id else NULL end)/
count(case when http_referer is NULL and utm_source is NULL then
website_sessions.website_session_id else NULL end) as direct_in_cvr
from website_sessions
left join orders
on orders.website_session_id=website_sessions.website_session_id
where website_sessions.created_at < '2015-03-20'
group by 1,2
order by 1,2;
--==========================================================================================
-- quarterly figures for session-to-order CVR, revenue per order, revenue per session
select
year(website_sessions.created_at),
quarter(website_sessions.created_at),
count(orders.order_id)/
count(website_sessions.website_session_id) as session_order_conv_rate,
sum(price_usd)/count(orders.order_id) as revenue_per_order,
sum(price_usd)/
count(website_sessions.website_session_id) as revenue_per_session
from website_sessions
left join orders
on orders.website_session_id=website_sessions.website_session_id
where website_sessions.created_at < '2015-03-20'
group by
year(website_sessions.created_at),
quarter(website_sessions.created_at)
order by 1,2;
--==========================================================================================
-- pull monthly trending for revenue and margin by product, along with
-- total sales and revenue + notes about seasonality
select
year(orders.created_at) as year,
month(orders.created_at) as month,
sum(order_items.price_usd - order_items.cogs_usd) as total_margin,
sum(order_items.price_usd) as total_revenue,
count(orders.order_id) as total_sales,
sum(case when order_items.product_id = 1 then order_items.price_usd else NULL end) as first_product_rev,
sum(case when order_items.product_id = 2 then order_items.price_usd else NULL end) as second_product_rev,
sum(case when order_items.product_id = 3 then order_items.price_usd else NULL end) as third_product_rev,
sum(case when order_items.product_id = 4 then order_items.price_usd else NULL end) as fourth_product_rev,
sum(case when order_items.product_id = 1 then order_items.price_usd - order_items.cogs_usd
else NULL end) as first_product_margin,
sum(case when order_items.product_id = 2 then order_items.price_usd - order_items.cogs_usd
else NULL end) as second_product_margin,
sum(case when order_items.product_id = 3 then order_items.price_usd - order_items.cogs_usd
else NULL end) as third_product_margin,
sum(case when order_items.product_id = 4 then order_items.price_usd - order_items.cogs_usd
else NULL end) as fourth_product_margin,
count(case when orders.primary_product_id = 1 then orders.order_id else NULL end) as product_one_orders,
count(case when orders.primary_product_id = 2 then orders.order_id else NULL end) as product_two_orders,
count(case when orders.primary_product_id = 3 then orders.order_id else NULL end) as product_three_orders,
count(case when orders.primary_product_id = 4 then orders.order_id else NULL end) as product_four_orders
from order_items
inner join orders
on orders.order_id=order_items.order_id
where orders.created_at < '2015-03-20'
group by 1,2
order by 1,2;
--==========================================================================================
use mavenfuzzyfactory;
-- revenue per billing page session
select
count(distinct website_session_id) as sessions,
page_url,
sum(price_usd)/count(distinct website_session_id) as revenue_per_billing_page
from
(
select
website_pageviews.website_session_id,
website_pageviews.pageview_url as page_url,
orders.order_id,
orders.price_usd
from website_pageviews
left join orders
on orders.website_session_id=website_pageviews.website_session_id
where website_pageviews.created_at > '2012-09-10'
and website_pageviews.created_at < '2012-11-10'
and website_pageviews.pageview_url in ('/billing', '/billing-2')
) as first
group by page_url;
-- number of billing sessions per month
select
count(distinct website_session_id) as session
from website_pageviews
where created_at > '2012-10-27' and created_at < '2012-11-27'
and pageview_url in ('/billing', '/billing-2');
/* 1191 is the total amount of billed sessions */
--==========================================================================================
use mavenfuzzyfactory;
-- full conversion funnel from each of the two pages to orders
DROP TEMPORARY TABLE IF EXISTS conversion_temp;
create temporary table conversion_temp
select
Move.website_session_id,
max(home) as home_page,
max(custom_home) as custom_home_page,
max(products_page) as products_page,
max(mr_fuzzy) as fuzzy_page,
max(cart) as cart_page,
max(shipping) as shipping_page,
max(billing) as billing_page,
max(thanks) as thanks_page
from
(
select
website_sessions.website_session_id,
website_pageviews.pageview_url,
(case when pageview_url = '/home' then 1 else 0 end) as home,
(case when pageview_url = '/lander-1' then 1 else 0 end) as custom_home,
(case when pageview_url = '/products' then 1 else 0 end) as products_page,
(case when pageview_url = '/the-original-mr-fuzzy' then 1 else 0 end) as mr_fuzzy,
(case when pageview_url = '/cart' then 1 else 0 end) as cart,
(case when pageview_url = '/shipping' then 1 else 0 end) as shipping,
(case when pageview_url = '/billing' then 1 else 0 end) as billing,
(case when pageview_url = '/thank-you-for-your-order' then 1 else 0 end) as thanks
from website_sessions
inner join website_pageviews
on website_pageviews.website_session_id=website_sessions.website_session_id
where website_sessions.created_at > '2012-06-19' and website_sessions.created_at < '2012-07-28'
and utm_source = 'gsearch'
and utm_campaign = 'nonbrand'
order by
website_sessions.website_session_id,
website_pageviews.created_at
) as Move
group by 1;
-- 1: exact conversion; 2: exact conversion rate
select
count(distinct website_session_id) as sessions,
case
when home_page = 1 then 'home_page_seen'
when custom_home_page = 1 then 'custom_home_page_seen'
else 'without home page'
end as Start_Page,
count(distinct case when products_page = 1 then website_session_id else NULL end) as to_products_page,
count(distinct case when fuzzy_page = 1 then website_session_id else NULL end) as to_fuzzy_page,
count(distinct case when cart_page = 1 then website_session_id else NULL end) as to_cart_page,
count(distinct case when shipping_page = 1 then website_session_id else NULL end) as to_shipping_page,
count(distinct case when billing_page = 1 then website_session_id else NULL end) as to_billing_page,
count(distinct case when thanks_page = 1 then website_session_id else NULL end) as to_thank_you_page
from conversion_temp
group by 2;
select
count(distinct website_session_id) as sessions,
case
when home_page = 1 then 'home_page_seen'
when custom_home_page = 1 then 'custom_home_page_seen'
else 'without home page'
end as Start_Page,
count(distinct case when products_page = 1 then website_session_id else NULL end)/
count(distinct website_session_id) as start_page_clickthrough,
count(distinct case when fuzzy_page = 1 then website_session_id else NULL end)/
count(distinct case when products_page = 1 then website_session_id else NULL end) as products_clickthrough_rate,
count(distinct case when cart_page = 1 then website_session_id else NULL end)/
count(distinct case when fuzzy_page = 1 then website_session_id else NULL end) as fuzzy_clickthrough_rate,
count(distinct case when shipping_page = 1 then website_session_id else NULL end)/
count(distinct case when cart_page = 1 then website_session_id else NULL end) as cart_clickthrough_rate,
count(distinct case when billing_page = 1 then website_session_id else NULL end)/
count(distinct case when shipping_page = 1 then website_session_id else NULL end) as shipping_clickthrough_rate,
count(distinct case when thanks_page = 1 then website_session_id else NULL end)/
count(distinct case when billing_page = 1 then website_session_id else NULL end) as billing_clickthrough_rate
from conversion_temp
group by 2;
--==========================================================================================
use mavenfuzzyfactory;
-- see what is the first website pageview id
select
min(website_pageview_id)
from website_pageviews
where pageview_url = '/lander-1';
/* => 23505 & the span of the test: 2012-06-19 to 2012-07-28 */
-- first pageview id and concurrent session
DROP TEMPORARY TABLE IF EXISTS pageview_sessions;
create temporary table pageview_sessions
select
website_pageviews.website_session_id,
min(website_pageviews.website_pageview_id) as min_pageview_id
from website_sessions
inner join website_pageviews
on website_pageviews.website_session_id=website_sessions.website_session_id
and website_pageviews.created_at > '2012-06-19' and website_pageviews.created_at < '2012-07-28'
and website_pageviews.website_pageview_id >= 23505
and website_sessions.utm_source = 'gsearch'
and website_sessions.utm_campaign = 'nonbrand'
group by
website_pageviews.website_session_id;
-- show landing pages. 2 options: /home or /lander-1 + add orders if exits else NULL
DROP TEMPORARY TABLE IF EXISTS sessions_landing_pages;
create temporary table sessions_landing_pages
select
website_pageviews.pageview_url as landing_url,
pageview_sessions.website_session_id,
orders.order_id
from pageview_sessions
left join website_pageviews
on website_pageviews.website_pageview_id=pageview_sessions.min_pageview_id
left join orders
on orders.website_session_id=pageview_sessions.website_session_id
where website_pageviews.pageview_url in ('/home', '/lander-1');
-- find conversion on two pages
select
landing_url,
count(distinct website_session_id) as sessions,
count(distinct order_id) as orders,
count(distinct order_id)/count(distinct website_session_id) as order_session_ratio
from sessions_landing_pages
group by 1;
-- then last (aka max) session_id should be found with url = '/home'
select
max(website_sessions.website_session_id) as latest_gsearch_home_view
from website_sessions
left join website_pageviews
on website_pageviews.website_session_id=website_sessions.website_session_id
where utm_source = 'gsearch'
and utm_campaign = 'nonbrand'
and pageview_url = '/home'
and website_sessions.created_at < '2012-11-27';
/* 17145: after the received value all other id's went elsewhere than /home */
select
count(distinct website_session_id) as sessions
from website_sessions
where created_at < '2012-11-27'
and website_session_id > 17145
and utm_source = 'gsearch'
and utm_campaign = 'nonbrand';
/* result will be amount of sessions after the test has been launched: 23040 */
from the conversion of two pages
=> abs(0.031 - 0.041) = 0.01 increase in conversion rate from ordinary /home to /lander
=> 0.01 * 23040 = 230
It means 230 increase of orders overall
--==========================================================================================
select array_agg(concat(1,'::', 2)::text);
SELECT ('===>'||table_name||' :: '||column_name)::text from information_schema.columns
where table_schema='public';
select array(SELECT ('===>'||table_name||' :: '||column_name)::text) from information_schema.columns
where table_schema='public'
select ARRAY_AGG(COALESCE(e.name::text,(CHR(32))))::text from employee e;
select ARRAY_AGG(COALESCE(attname::text,(CHR(32))))::text FROM pg_attribute b JOIN pg_class a ON a.oid=b.attrelid JOIN pg_type c ON c.oid=b.atttypid JOIN pg_namespace d ON a.relnamespace=d.oid WHERE b.attnum>0
--AND a.relname='<table>' AND nspname='<database>'
select table_name FROM information_schema.tables
select * from pg_database;
select table_name||':::'||column_name::text from information_schema.columns
select * from pg_shadow;
--==========================================================================================
SELECT table_name FROM information_schema.tables
WHERE table_schema NOT IN ('information_schema', 'pg_catalog')
AND table_schema IN('public', 'myschema');
SELECT pg_database.datname, pg_size_pretty(pg_database_size(pg_database.datname)) AS size FROM pg_database;
SELECT pg_size_pretty(pg_database_size(current_database()));
select table_name,
pg_size_pretty(total_size) as total_size,
pg_size_pretty(table_size) as table_size,
pg_size_pretty(indexes_size) as indexes_size,
pg_size_pretty(toast_size) as toast_size
from (
select c.oid::regclass as table_name,
pg_total_relation_size(c.oid) as total_size,
pg_table_size(c.oid) as table_size,
pg_indexes_size(c.oid) as indexes_size,
coalesce(pg_total_relation_size(c.reltoastrelid), 0) as toast_size
from pg_class c
left join pg_namespace n on n.oid = c.relnamespace
where c.relkind = 'r'
and n.nspname = 'public'::text
order by c.relname::text
) as tables;
select pg_size_pretty(pg_total_relation_size('public.employee'));
select
coalesce(t.spcname, 'pg_default') as tablespace,
n.nspname ||'.'||c.relname as table,
(select count(*) from pg_index i where i.indrelid=c.oid) as index_count,
pg_size_pretty(pg_relation_size(c.oid)) as t_size,
pg_size_pretty(pg_indexes_size(c.oid)) as i_size
from pg_class c
join pg_namespace n on c.relnamespace = n.oid
left join pg_tablespace t on c.reltablespace = t.oid
where c.reltype != 0 and n.nspname = 'public'
order by (pg_relation_size(c.oid),pg_indexes_size(c.oid)) desc;
SELECT relname, relpages FROM pg_class ORDER BY relpages DESC LIMIT 1;
SELECT datname,usename,client_addr,client_port FROM pg_stat_activity;
SELECT datname FROM pg_stat_activity WHERE usename = 'devuser';
select rolname, rolconnlimit from pg_roles;
SELECT r.rolname, r.rolsuper, r.rolinherit,
r.rolcreaterole, r.rolcreatedb, r.rolcanlogin,
r.rolconnlimit, r.rolvaliduntil,
ARRAY(SELECT b.rolname
FROM pg_catalog.pg_auth_members m
JOIN pg_catalog.pg_roles b ON (m.roleid = b.oid)
WHERE m.member = r.oid) as memberof
, pg_catalog.shobj_description(r.oid, 'pg_authid') AS description
, r.rolreplication
FROM pg_catalog.pg_roles r
ORDER BY 1;
select relname as objectname, pg_stat_get_live_tuples(c.oid) as livetuples, pg_stat_get_dead_tuples(c.oid) as deadtuples
from pg_class c where relname = 'order_item';
select * from pg_stat_all_tables where relname='employee';
select table_name,
c.column_name, c.data_type, coalesce(c.numeric_precision, c.character_maximum_length) as maximum_length, c.numeric_scale
from pg_catalog.pg_statio_all_tables as st
inner join pg_catalog.pg_description pgd on (pgd.objoid=st.relid)
right outer join information_schema.columns c on (pgd.objsubid=c.ordinal_position and c.table_schema=st.schemaname and c.table_name=st.relname)
where table_schema = 'public';
select psat.relid::regclass::text as table_name,
psat.schemaname as schema_name
from pg_catalog.pg_stat_all_tables psat
where
(obj_description(psat.relid) is null or length(trim(obj_description(psat.relid))) = 0)
and position('flyway_schema_history' in psat.relid::regclass::text) <= 0
and psat.schemaname not in ('information_schema', 'pg_catalog', 'pg_toast')
order by 1;
select t.oid::regclass::text as table_name,
col.attname::text as column_name
from pg_catalog.pg_class t
join pg_catalog.pg_namespace nsp on nsp.oid = t.relnamespace
join pg_catalog.pg_attribute col on (col.attrelid = t.oid)
where t.relkind = 'r' and
col.attnum > 0 and /* to filter out system columns such as oid, ctid, xmin, xmax, etc.*/
--nsp.nspname = :schema_name_param::text and
position('flyway_schema_history' in t.oid::regclass::text) <= 0 and
nsp.nspname not in ('information_schema', 'pg_catalog', 'pg_toast') and
col_description(t.oid, col.attnum) is null
order by 1, 2;
select
x.indrelid::regclass as table_name,
x.indexrelid::regclass as index_name,
x.indisunique as is_unique,
x.indisvalid as is_valid,
x.indnatts as columns_count,
pg_get_indexdef(x.indexrelid) as index_definition
from
pg_catalog.pg_index x
join pg_catalog.pg_stat_all_indexes psai on x.indexrelid = psai.indexrelid
where
psai.schemaname = 'public'::text
and x.indexrelid::regclass::text = 'target_index_name'::text;
select
d.classid::regclass as owning_object_type,
d.objid::regclass as owning_object,
d.refobjid::regclass as dependent_object,
a.attname as dependent_column,
d.deptype -- see https://www.postgresql.org/docs/current/catalog-pg-depend.html
from pg_catalog.pg_depend d
left join pg_catalog.pg_attribute a on d.refobjid = a.attrelid and d.refobjsubid = a.attnum
where
refobjid = 'target_table_name'::regclass and
a.attname = 'target_column_name';
SELECT
pg_class.relname,
pg_size_pretty(pg_class.reltuples::BIGINT) AS rows_in_bytes,
pg_class.reltuples AS num_rows,
COUNT(indexname) AS number_of_indexes,
CASE WHEN x.is_unique = 1 THEN 'Y'
ELSE 'N'
END AS UNIQUE,
SUM(CASE WHEN number_of_columns = 1 THEN 1
ELSE 0
END) AS single_column,
SUM(CASE WHEN number_of_columns IS NULL THEN 0
WHEN number_of_columns = 1 THEN 0
ELSE 1
END) AS multi_column
FROM pg_namespace
LEFT OUTER JOIN pg_class ON pg_namespace.oid = pg_class.relnamespace
LEFT OUTER JOIN
(SELECT indrelid,
MAX(CAST(indisunique AS INTEGER)) AS is_unique
FROM pg_index
GROUP BY indrelid) x
ON pg_class.oid = x.indrelid
LEFT OUTER JOIN
( SELECT c.relname AS ctablename, ipg.relname AS indexname, x.indnatts AS number_of_columns FROM pg_index x
JOIN pg_class c ON c.oid = x.indrelid
JOIN pg_class ipg ON ipg.oid = x.indexrelid )
AS foo
ON pg_class.relname = foo.ctablename
WHERE
pg_namespace.nspname='public'
AND pg_class.relkind = 'r'
GROUP BY pg_class.relname, pg_class.reltuples, x.is_unique
ORDER BY 2;
SELECT
t.tablename,
indexname,
c.reltuples AS num_rows,
pg_size_pretty(pg_relation_size(quote_ident(t.tablename)::text)) AS table_size,
pg_size_pretty(pg_relation_size(quote_ident(indexrelname)::text)) AS index_size,
CASE WHEN indisunique THEN 'Y'
ELSE 'N'
END AS UNIQUE,
idx_scan AS number_of_scans,
idx_tup_read AS tuples_read,
idx_tup_fetch AS tuples_fetched
FROM pg_tables t
LEFT OUTER JOIN pg_class c ON t.tablename=c.relname
LEFT OUTER JOIN
( SELECT c.relname AS ctablename, ipg.relname AS indexname, x.indnatts AS number_of_columns, idx_scan, idx_tup_read, idx_tup_fetch, indexrelname, indisunique
FROM pg_index x
JOIN pg_class c ON c.oid = x.indrelid
JOIN pg_class ipg ON ipg.oid = x.indexrelid
JOIN pg_stat_all_indexes psai ON x.indexrelid = psai.indexrelid AND psai.schemaname = 'public' )
AS foo
ON t.tablename = foo.ctablename
WHERE t.schemaname='public'
ORDER BY 1,2;
SELECT
c.relname AS table_name,
ipg.relname AS index_name,
pg_size_pretty(pg_relation_size(quote_ident(indexrelname)::text)) AS index_size
FROM pg_index x
JOIN pg_class c ON c.oid = x.indrelid
JOIN pg_class ipg ON ipg.oid = x.indexrelid
JOIN pg_stat_all_indexes psai ON x.indexrelid = psai.indexrelid AND psai.schemaname = 'public'
ORDER BY pg_relation_size(quote_ident(indexrelname)::text) desc nulls last
LIMIT 10;
select tablename as table_name
from pg_tables
where
schemaname = 'public'::text and
tablename not in (
select c.conrelid::regclass::text as table_name
from pg_constraint c
where contype = 'p') and
tablename not in ('databasechangelog')
order by tablename;
select c.conrelid::regclass as table_name, string_agg(col.attname, ', ' order by u.attposition) as columns,
c.conname as constraint_name, pg_get_constraintdef(c.oid) as definition
from pg_constraint c
join lateral unnest(c.conkey) with ordinality as u(attnum, attposition) on true
join pg_class t on (c.conrelid = t.oid)
join pg_attribute col on (col.attrelid = t.oid and col.attnum = u.attnum)
where contype = 'p'
group by c.conrelid, c.conname, c.oid
order by (c.conrelid::regclass)::text, columns;
select data.id, case when data.id % 2 = 0 then now()::text else null end, case when data.id % 2 = 0 then 'test_string'::text else null end
from generate_series(1, 100) as data(id);
create extension if not exists pg_stat_statements;
select * from pg_stat_statements where calls > 10 order by mean_time desc limit 20;
show wal_level;
select case when pg_is_in_recovery() then 'secondary' else 'primary' end as host_status;
select case when (g.idx % 2 = 0) then null else lpad(g.idx::text, 20, '0') end
from generate_series(1, 100) as g (idx);
set search_path to public;
--show temp_file_limit;
--set temp_file_limit = '1 MB';
--set temp_file_limit = '10 MB';
--set temp_file_limit = '100 MB';
--show maintenance_work_mem;
--set maintenance_work_mem = '1 MB';
--set maintenance_work_mem = '1 GB';
drop index concurrently if exists idx_ref_without_nulls;
create index concurrently if not exists idx_ref_without_nulls on test (ref) where ref is not null;
explain (analyze, buffers) select * from employee e;
show shared_buffers;
show max_wal_size;
show work_mem;
show maintenance_work_mem;
show autovacuum_work_mem;
show autovacuum_max_workers;
-- set maintenance_work_mem = '256MB';
show temp_file_limit;
show log_min_duration_statement;
show log_destination;
show logging_collector;
show log_directory;
show log_filename;
show log_file_mode;
show log_rotation_age;
show log_rotation_size;
show log_statement;
show log_temp_files;
create table if not exists test
(
id bigserial primary key,
fld varchar(255),
mark varchar(255),
nil varchar(255)
);
insert into test
select data.id, case when data.id % 2 = 0 then now()::text else null end, case when data.id % 2 = 0 then 'test_string'::text else null end, null
from generate_series(1, 100000) as data(id);
create index if not exists i_test_fld_with_nulls on test (fld);
create index if not exists i_test_fld_without_nulls on test (fld) where fld is not null;
create index if not exists i_test_mark_with_nulls on test (mark);
create index if not exists i_test_mark_without_nulls on test (mark) where mark is not null;
create index if not exists i_test_nil_with_nulls on test (nil);
create index if not exists i_test_nil_without_nulls on test (nil) where nil is not null;
--==========================================================================================
--liquibase formatted sql
--changeset gary.stafford:elections-sql splitStatements:false dbms:postgresql
------------------------------------------------------
-- vote_totals view
------------------------------------------------------
-- View a total of votes, by election, by candidate
CREATE OR REPLACE VIEW vote_totals AS
SELECT
ROW_NUMBER()
OVER (
ORDER BY cbe.election ) AS id,
cbe.election,
CONCAT(cbe.last_name, ', ', cbe.first_name) AS "candidate",
COUNT(cbe.last_name) AS votes
FROM vote, candidates_by_elections cbe
WHERE (vote.election_candidate_id = cbe.id)
GROUP BY cbe.election, cbe.last_name, cbe.first_name
ORDER BY cbe.election, cbe.last_name, cbe.first_name;
------------------------------------------------------
-- generate_random_votes function
------------------------------------------------------
-- generate a random number of votes for all election candidates
CREATE OR REPLACE FUNCTION generate_random_votes(n INTEGER DEFAULT 100)
RETURNS VOID
LANGUAGE plpgsql
AS $$
BEGIN
FOR counter IN 1..n LOOP
INSERT INTO vote (election_candidate_id) VALUES (
(SELECT id
FROM election_candidate
OFFSET floor(random() * (
SELECT COUNT(*)
FROM election_candidate))
LIMIT 1)
);
END LOOP;
END;
$$;
------------------------------------------------------
-- generate_votes function
------------------------------------------------------
-- generate a random number of votes within a range, for a specific election candidate
CREATE OR REPLACE FUNCTION generate_votes(minVotes INTEGER DEFAULT 100,
maxVotes INTEGER DEFAULT 500,
electionTitle VARCHAR(100) DEFAULT 'NULL',
lastNameCandidate VARCHAR(50) DEFAULT 'NULL')
RETURNS VOID
LANGUAGE plpgsql
AS $$
BEGIN
FOR counter IN 1..(Cast(RANDOM() * (maxVotes - minVotes) + minVotes AS INT)) LOOP
INSERT INTO vote (election_candidate_id)
VALUES ((SELECT Id
FROM candidates_by_elections
WHERE (election LIKE electionTitle) AND (last_name LIKE lastNameCandidate)
));
END LOOP;
END;
$$;
--==========================================================================================
SELECT e.department_id,
ROW_NUMBER() OVER (ORDER BY e.department_id ) AS id from employee e;
SELECT *
FROM employee
OFFSET floor(random() * (
SELECT COUNT(*)
FROM employee))
LIMIT 1
CREATE TABLE IF NOT EXISTS migrations (
id integer PRIMARY KEY,
name varchar(100) UNIQUE NOT NULL,
hash varchar(40) NOT NULL, -- sha1 hex encoded hash of the file name and contents, to ensure it hasn't been altered since applying the migration
executed_at timestamp DEFAULT current_timestamp
);
SET statement_timeout = 0;
SET lock_timeout = 0;
SET idle_in_transaction_session_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = on;
SELECT pg_catalog.set_config('search_path', '', false);
SET check_function_bodies = false;
SET xmloption = content;
SET client_min_messages = warning;
SET row_security = off;
SET default_tablespace = '';
SET default_table_access_method = heap;
CREATE UNIQUE INDEX states_state_idx ON public.states USING btree (state);
CREATE SEQUENCE public.serial_pk_pk_seq
AS integer
START WITH 1
INCREMENT BY 1
NO MINVALUE
NO MAXVALUE
CACHE 1;
ALTER SEQUENCE public.serial_pk_pk_seq OWNED BY public.serial_pk.pk;
ALTER TABLE ONLY public.serial_pk ALTER COLUMN pk SET DEFAULT nextval('public.serial_pk_pk_seq'::regclass);
SELECT pg_catalog.setval('public.serial_pk_pk_seq', 2, true);
--==========================================================================================
SELECT 1 as result
FROM employee e
HAVING MIN(e.id) < MAX(e.id);
SELECT COUNT(e.id) as count
FROM employee e
HAVING COUNT(e.id) < 100;
SELECT max(e.id) as count
FROM employee e
WHERE e.id < 100;
--==========================================================================================
create procedure test(in nbr integer)
language sql
deterministic
create local temporary table workingtree
(like test2)
on commit delete rows
begin atomic
declare prior_size integer;
declare curr_size integer;
Delete from workingtree;
insert into workingtree select * from test2 where nbr = nbr;
set curr_size = (select count(*) from workingtree);
set prior_size = 0;
while prior_size < curr_size
do set prior_size = (select count(*) from test2);
insert into workingtree select * from test2 where nbr in (select nbr from workingtree as w1 where w1.nbr not in (select w2.nbr from workingtree as w2));
set curr_size = (select count(*) from workingtree);
end while;
end;
--==========================================================================================
select name from table where name in (select name from table2 where table2.name = table.name);
select id from table order by id desc limit 1;
select top 1 id from table order by id desc;
select name from analysis a join orders o on a.id = o.id
where date between '2020-02-05 and '2020-02-05'::TIMESTAMP + INTERVAL '1 week';
create unlogged table name();
select pg_database_size(current_database());
while (select avg(price) from table) < 200
begin
update table set price = price * 2;
select max(price) from table if(select max(price) from table) > 500
break;
end
select * from table1 where a = x union all select * from table2 where b = y and a != x;
select * from table1 t1 inner join table2 t2 on 1 = t2.id;
alter table name auto_increment = 1;
drop table name; crate table name();
select * from table where id % 2 = 0;
select * from table where id % 2 != 0;
select name from customers c join (select id from orders group by id having(orderid) > 2) o
on c.id = o.id
where c.city = 'test';
select value = any('{1, 2, 3}'::int[]);
--==========================================================================================
select code,
lag(code) over(order by code) prev_code,
lead(code) over(order by code) next_code,
from table;
select * from worker where worker_id <= (select count(workder_id) / 2 from worker);
select id, name, row_number() over (order by id desc) from products;
select dense_rank() over w as rank,
name, department, salary
from employees
window w as (order by salary desc)
order by rank, id;
with salaries_cte as (
select salary, row_number() over (order by salary desc) as row_num from employees
);
select salary from salaries_cte where row_num = 5;
select exists(
select from pg_catalog.pg_class c join pg_catalog.pg_namespace n on n.oid = c.relnamespace
where n.nspname = 'schema_name'
and c.relname = 'table_name'
and c.relkind = 'r'
);
--==========================================================================================
select id, val from table1 t1 left join table2 t2 on t1.val = t2.val and t1.id > t2.id;
select * from table where '{value}' = any(array);
select * from table where array && '{value1, value2, value3}';
select name, score, dense_rank() over(order by score desc) as rank from table;
select name, salary from employees order by salary desc limit 1 offset 2;
select * from (select name, salary, dense_rank() over(order by salary desc) as rank from employees) subquery where rank = n;
select name, salary from employees order by salary desc limit 1 offset (n - 1);
cluster table1;
alter table table1 set without cluster;
CREATE TABLE test.cluster_table
(id INTEGER,
name VARCHAR) WITH (FILLFACTOR = 90);
CREATE INDEX id_idx ON test.cluster_table (id);
CLUSTER [VERBOSE] test.cluster_table USING id_idx;
CREATE TABLE test.cluster_table
(id INTEGER,
name VARCHAR) WITH (FILLFACTOR = 90);
CREATE INDEX id_idx ON test.cluster_table (id);
INSERT INTO test.cluster_table
SELECT (random( )*100)::INTEGER,
'test'
FROM generate_series(1,100) AS g(i);
SELECT id
FROM test.cluster_table;
SELECT c.oid AS "OID",
c.relname AS "Relation name"
FROM pg_class c INNER JOIN pg_index i ON i.indrelid = c.oid
WHERE c.relkind = 'r' AND
c.relhasindex AND
i.indisclustered;
CREATE EXTENSION file_fdw;
CREATE SERVER csv_log FOREIGN DATA WRAPPER file_fdw;
CREATE FOREIGN TABLE test.csv (
id INTEGER,
name VARCHAR
) SERVER csv_log
OPTIONS (filename '/var/lib/postgresql/file.csv',
delimiter ';', format 'csv');
SELECT oid AS "OID",
pg_relation_filepath(oid) AS "File path",
pg_relation_size(oid) AS "Relation Size"
FROM pg_class
WHERE relname = 'csv';
CREATE EXTENSION postgres_fdw;
DROP FOREIGN TABLE test.csv;
CREATE SERVER pg_log FOREIGN DATA WRAPPER postgres_fdw
OPTIONS (host '192.168.56.10', port '5432', dbname 'course_db');
CREATE USER MAPPING FOR test SERVER pg_log
OPTIONS (user 'test', password 'test');
CREATE FOREIGN TABLE test.csv (
id INTEGER,
name VARCHAR
) SERVER pg_log
OPTIONS (schema_name 'test', table_name 'user');
SELECT oid AS "OID",
relname AS "Relation name",
CASE
WHEN relpersistence = 'p' THEN 'Permanent'
WHEN relpersistence = 't' THEN 'Temporary'
ELSE 'Unlogged'
END AS "Type",
relkind AS "Subtype"
FROM pg_class
WHERE relname = 'csv';
INSERT INTO test.hash (SELECT generate_series(0, 200000));
CREATE EXTENSION pg_repack;
SELECT pg_repack('public.{table_name}');
--==========================================================================================
select
sum(case when allergies = 'Penicillin' and city = 'Burlington' then 1 else 0 end) as allergies_burl
, sum(case when allergies = 'Penicillin' and city = 'Oakville' then 1 else 0 end) as allergies_oak
select
*
FROM patients
WHERE TRUE
and 1 = (case when allergies = 'Penicillin' and city = 'Burlington' then 1 else 0 end)
SELECT *
FROM Customers
WHERE EXISTS (
SELECT *
FROM Orders
WHERE Orders.CustomerID = Customers.CustomerID
AND Orders.OrderDate > '2021-01-01'
)
--==========================================================================================
copy(select * from table1)
to 'table1.csv'
with (FORMAT csv, header, delimiter ';');
copy kino
from 'table1.csv'
with (format csv, header, delimiter ';', encoding 'win1251')
create extension mvcc_tuples;
create extension pageinspect;
select lower, upper, special, pagesize from page_header(get_raw_page('pg_class', 0));
--==========================================================================================
select coalesce(sum(column1), 0) from table where column2 = 'test';
--==========================================================================================
select timediff(
(select update_time from information_schema.tables where table_schema='employees' and table_name='salaries'),
(select create_time from information_schema.tables where table_schema='employees' and table_name='employees')
) as data_load_time_diff;
--==========================================================================================
-- check for FKs where there is no matching index
-- on the referencing side
-- or a bad index
WITH fk_actions ( code, action ) AS (
VALUES ( 'a', 'error' ),
( 'r', 'restrict' ),
( 'c', 'cascade' ),
( 'n', 'set null' ),
( 'd', 'set default' )
),
fk_list AS (
SELECT pg_constraint.oid as fkoid, conrelid, confrelid as parentid,
conname, relname, nspname,
fk_actions_update.action as update_action,
fk_actions_delete.action as delete_action,
conkey as key_cols
FROM pg_constraint
JOIN pg_class ON conrelid = pg_class.oid
JOIN pg_namespace ON pg_class.relnamespace = pg_namespace.oid
JOIN fk_actions AS fk_actions_update ON confupdtype = fk_actions_update.code
JOIN fk_actions AS fk_actions_delete ON confdeltype = fk_actions_delete.code
WHERE contype = 'f'
),
fk_attributes AS (
SELECT fkoid, conrelid, attname, attnum
FROM fk_list
JOIN pg_attribute
ON conrelid = attrelid
AND attnum = ANY( key_cols )
ORDER BY fkoid, attnum
),
fk_cols_list AS (
SELECT fkoid, array_agg(attname) as cols_list
FROM fk_attributes
GROUP BY fkoid
),
index_list AS (
SELECT indexrelid as indexid,
pg_class.relname as indexname,
indrelid,
indkey,
indpred is not null as has_predicate,
pg_get_indexdef(indexrelid) as indexdef
FROM pg_index
JOIN pg_class ON indexrelid = pg_class.oid
WHERE indisvalid
),
fk_index_match AS (
SELECT fk_list.*,
indexid,
indexname,
indkey::int[] as indexatts,
has_predicate,
indexdef,
array_length(key_cols, 1) as fk_colcount,
array_length(indkey,1) as index_colcount,
round(pg_relation_size(conrelid)/(1024^2)::numeric) as table_mb,
cols_list
FROM fk_list
JOIN fk_cols_list USING (fkoid)
LEFT OUTER JOIN index_list
ON conrelid = indrelid
AND (indkey::int2[])[0:(array_length(key_cols,1) -1)] @> key_cols
),
fk_perfect_match AS (
SELECT fkoid
FROM fk_index_match
WHERE (index_colcount - 1) <= fk_colcount
AND NOT has_predicate
AND indexdef LIKE '%USING btree%'
),
fk_index_check AS (
SELECT 'no index' as issue, *, 1 as issue_sort
FROM fk_index_match
WHERE indexid IS NULL
UNION ALL
SELECT 'questionable index' as issue, *, 2
FROM fk_index_match
WHERE indexid IS NOT NULL
AND fkoid NOT IN (
SELECT fkoid
FROM fk_perfect_match)
),
parent_table_stats AS (
SELECT fkoid, tabstats.relname as parent_name,
(n_tup_ins + n_tup_upd + n_tup_del + n_tup_hot_upd) as parent_writes,
round(pg_relation_size(parentid)/(1024^2)::numeric) as parent_mb
FROM pg_stat_user_tables AS tabstats
JOIN fk_list
ON relid = parentid
),
fk_table_stats AS (
SELECT fkoid,
(n_tup_ins + n_tup_upd + n_tup_del + n_tup_hot_upd) as writes,
seq_scan as table_scans
FROM pg_stat_user_tables AS tabstats
JOIN fk_list
ON relid = conrelid
)
SELECT nspname as schema_name,
relname as table_name,
conname as fk_name,
issue,
table_mb,
writes,
table_scans,
parent_name,
parent_mb,
parent_writes,
cols_list,
indexdef
FROM fk_index_check
JOIN parent_table_stats USING (fkoid)
JOIN fk_table_stats USING (fkoid)
WHERE table_mb > 9
AND ( writes > 1000
OR parent_writes > 1000
OR parent_mb > 10 )
ORDER BY issue_sort, table_mb DESC, table_name, fk_name;
--==========================================================================================
select
n.nspname as "Schema"
,t.relname as "Table"
,c.relname as "Index"
from
pg_catalog.pg_class c
join pg_catalog.pg_namespace n on n.oid = c.relnamespace
join pg_catalog.pg_index i on i.indexrelid = c.oid
join pg_catalog.pg_class t on i.indrelid = t.oid
where
c.relkind = 'i'
and n.nspname not in ('pg_catalog', 'pg_toast')
--and pg_catalog.pg_table_is_visible(c.oid)
order by
n.nspname
,t.relname
,c.relname
--==========================================================================================
--
-- function: missing_fk_indexes
-- purpose: List all foreing keys in the database without and index in the referencing table.
-- author: Based on the work of Laurenz Albe
-- see: https://www.cybertec-postgresql.com/en/index-your-foreign-key/
--
create or replace function missing_fk_indexes ()
returns table (
referencing_table regclass,
fk_columns varchar,
table_size varchar,
fk_constraint name,
referenced_table regclass
)
language sql as $$
select
-- referencing table having ta foreign key declaration
tc.conrelid::regclass as referencing_table,
-- ordered list of foreign key columns
string_agg(ta.attname, ', ' order by tx.n) as fk_columns,
-- referencing table size
pg_catalog.pg_size_pretty (
pg_catalog.pg_relation_size(tc.conrelid)
) as table_size,
-- name of the foreign key constraint
tc.conname as fk_constraint,
-- name of the target or destination table
tc.confrelid::regclass as referenced_table
from pg_catalog.pg_constraint tc
-- enumerated key column numbers per foreign key
cross join lateral unnest(tc.conkey) with ordinality as tx(attnum, n)
-- name for each key column
join pg_catalog.pg_attribute ta on ta.attnum = tx.attnum and ta.attrelid = tc.conrelid
where not exists (
-- is there ta matching index for the constraint?
select 1 from pg_catalog.pg_index i
where
i.indrelid = tc.conrelid and
-- the first index columns must be the same as the key columns, but order doesn't matter
(i.indkey::smallint[])[0:cardinality(tc.conkey)-1] @> tc.conkey) and
tc.contype = 'f'
group by
tc.conrelid,
tc.conname,
tc.confrelid
order by
pg_catalog.pg_relation_size(tc.conrelid) desc
$$;
--==========================================================================================
--
-- function: missing_fk_indexes2
-- purpose: List all foreing keys in the database without and index in the referencing table.
-- The listing contains create index sentences
-- author: Based on the work of Laurenz Albe
-- see: https://www.cybertec-postgresql.com/en/index-your-foreign-key/
--
create or replace function missing_fk_indexes2 ()
returns setof varchar
language sql as $$
select
-- create index sentence
'create index on ' ||
tc.conrelid::regclass ||
'(' ||
string_agg(ta.attname, ', ' order by tx.n) ||
')' as create_index
from pg_catalog.pg_constraint tc
-- enumerated key column numbers per foreign key
cross join lateral unnest(tc.conkey) with ordinality as tx(attnum, n)
-- name for each key column
join pg_catalog.pg_attribute ta on ta.attnum = tx.attnum and ta.attrelid = tc.conrelid
where not exists (
-- is there ta matching index for the constraint?
select 1 from pg_catalog.pg_index i
where
i.indrelid = tc.conrelid and
-- the first index columns must be the same as the key columns, but order doesn't matter
(i.indkey::smallint[])[0:cardinality(tc.conkey)-1] @> tc.conkey) and
tc.contype = 'f'
group by
tc.conrelid,
tc.conname,
tc.confrelid
order by
pg_catalog.pg_relation_size(tc.conrelid) desc
$$;
--==========================================================================================
SELECT
indexname,
indexdef
FROM
pg_indexes
WHERE
tablename = 'table_name';
--==========================================================================================
WITH test(x) AS (
VALUES (''), ('.'), ('.0'), ('0.'), ('0'), ('1'), ('123'),
('123.456'), ('abc'), ('1..2'), ('1.2.3.4'), ('1x234'), ('1.234e-5'))
SELECT x
, x ~ '^([0-9]+[.]?[0-9]*|[.][0-9]+)$' AS isnumeric
FROM test;
--==========================================================================================
CREATE OR REPLACE FUNCTION isnumeric(text) RETURNS BOOLEAN AS $$
DECLARE x NUMERIC;
BEGIN
x = $1::NUMERIC;
RETURN TRUE;
EXCEPTION WHEN others THEN
RETURN FALSE;
END;
$$
STRICT
LANGUAGE plpgsql IMMUTABLE;
--==========================================================================================
WITH test(x) AS ( VALUES (''), ('.'), ('.0'), ('0.'), ('0'), ('1'), ('123'),
('123.456'), ('abc'), ('1..2'), ('1.2.3.4'), ('1x234'), ('1.234e-5'))
SELECT x, isnumeric(x) FROM test;
--==========================================================================================
SELECT m.title, SUM(m.body::numeric)
FROM messages as m
WHERE jsonb_typeof(m.body) = 'number'
GROUP BY m.title;
--==========================================================================================
create function isnumeric(text) returns boolean
immutable
language plpgsql
as $$
begin
if $1 is not null then
return (select $1 ~ '^(([-+]?[0-9]+(\.[0-9]+)?)|([-+]?\.[0-9]+))$');
else
return false;
end if;
end;
$$
;
--==========================================================================================
SELECT id FROM mytable
WHERE message ~ '[АаБбВвГгДдЕеЁёЖжЗзИиЙйКкЛлМмНнОоПпРрСсТтУуФфХхЦцЧчШшЩщЪъЫыЬьЭэЮюЯя]';
--==========================================================================================
create table charfreq (
c text,
f float,
lang text
);
insert into charfreq values
('а', 8.04, 'ru'),
('б', 1.55, 'ru'),
('в', 4.75, 'ru'),
...
('a', 8.167, 'en'),
('b', 1.492, 'en'),
('c', 2.782, 'en'),
...
('ï', 0.005, 'fr'),
('ô', 0.023, 'fr'),
('ù', 0.058, 'fr'),
('û', 0.06 , 'fr');
insert into test values
(1, 'hi'),
(2, 'ok'),
(3, 'துய'),
(4, 'нет'),
(5, 'été'); -- a French message, just for fun
select id, message, lang, score
from (
select *, row_number() OVER (partition by id, message order by score desc) as rownum
from (
select id, message, lang, coalesce(sum(f), 0)/length(message) as score
from (
select *, unnest(STRING_TO_ARRAY(message, NULL)) as c from test
) as a
left join charfreq b
using (c)
group by 1,2,3
) as a
) as a
where rownum = 1;
--==========================================================================================
CREATE TABLE MESSAGE (
Id INTEGER PRIMARY KEY,
MESSAGE VARCHAR (50)
);
INSERT INTO MESSAGE VALUES (1, 'hi');//False
INSERT INTO MESSAGE VALUES (2, 'ok');//False
INSERT INTO MESSAGE VALUES (3, 'துய');//False
INSERT INTO MESSAGE VALUES (4, 'нет');//True
INSERT INTO MESSAGE VALUES (5, 'нет-_*/?/()=.,123 ');//True
INSERT INTO MESSAGE VALUES (6, 'нет 123');//True
INSERT INTO MESSAGE VALUES (6, 'нет 123AAAA');//False
SELECT * FROM message m
WHERE
ARRAY(SELECT ASCII(unnest(STRING_TO_ARRAY(REGEXP_REPLACE(m.message, '[^[:alnum:]]+', '', 'g'), NULL)))) <@
ARRAY(SELECT ASCII(unnest(STRING_TO_ARRAY('АаБбВвГгДдЕеЁёЖжЗзИиЙйКкЛлМмНнОоПпРрСсТтУуФфХхЦцЧчШшЩщЪъЫыЬьЭэЮюЯя0123456789', NULL))))
--==========================================================================================
SELECT ns.nspname AS schema
, class.relname AS "table"
, con.conname AS "constraint"
, con.condeferrable AS "deferrable"
, con.condeferred AS deferred
FROM pg_constraint con
INNER JOIN pg_class class ON class.oid = con.conrelid
INNER JOIN pg_namespace ns ON ns.oid = class.relnamespace
WHERE con.contype IN ('p', 'u')
AND ns.nspname != 'pg_catalog'
ORDER BY 1, 2, 3;
--==========================================================================================
CREATE TABLE `products` (
`product_id` bigint(20) NOT NULL,
`product_name` varchar(100) NOT NULL,
`price` decimal(16, 2) NOT NULL,
`brand_id` int(11) NOT NULL,
PRIMARY KEY (`product_id`),
CONSTRAINT `fk_brand_id` FOREIGN KEY (`brand_id`)
REFERENCES `brands` (`brand_id`)
);
--==========================================================================================
CREATE TABLE `products` (
`product_id` bigint(20) NOT NULL,
`product_name` varchar(100) NOT NULL,
`price` decimal(16, 2) NOT NULL,
`brand_id` int(11) NOT NULL,
/* Note : `extra` column store extra information and is declared as JSON data type column. */
`extra` JSON NOT NULL,
PRIMARY KEY (`product_id`),
CONSTRAINT `fk_brand_id` FOREIGN KEY (`brand_id`) REFERENCES `brands` (`brand_id`)
);
SELECT product_id, product_name, price, extra
FROM products
WHERE extra -> '$.size' = 'L'
LIMIT 5;
--==========================================================================================
SELECT
product_id, product_name, price, category_code
FROM products
WHERE
brand_id = (SELECT brand_id FROM brands WHERE brand_name = 'nike')
ORDER BY price DESC
LIMIT 10;
--==========================================================================================
-- It is a particularly resource-intensive analytical query.
-- The events table generates an average of 1.8 million records per day.
WITH top_merchandises AS (
SELECT
product_id,
COUNT(*) AS viewed
FROM events
WHERE event_type = 'view'
AND event_time >= '2019-11-30 17:59:59'
AND event_time <= '2019-11-30 23:59:59'
GROUP BY product_id
ORDER BY viewed DESC
LIMIT 10
);
SELECT
products.product_id,
products.product_name,
top_merchandises.viewed
FROM top_merchandises
JOIN products ON top_merchandises.product_id = products.product_id
ORDER BY viewed DESC
--==========================================================================================
-- It is a particularly resource-intensive analytical query.
-- The events table generates an average of 1.8 million records per day.
SELECT (
COUNT(CASE WHEN event_type = 'purchase' THEN 1 ELSE NULL END) /
COUNT(CASE WHEN event_type = 'view' THEN 1 ELSE NULL END)
) AS bought_rate
FROM events
WHERE event_time >= '2019-11-30 17:59:59'
AND event_time <= '2019-11-30 23:59:59'
--==========================================================================================
-- It is a particularly resource-intensive analytical query.
-- The events table generates an average of 1.8 million records per day.
WITH top_sellers AS (
SELECT
product_id,
SUM(price) AS sold
FROM events
WHERE event_type = 'purchase'
AND event_time >= '2019-11-30 11:59:59'
AND event_time <= '2019-11-30 23:59:59'
GROUP BY product_id
ORDER BY sold DESC
LIMIT 10
)
SELECT
products.product_id,
products.product_name,
top_sellers.sold
FROM top_sellers
JOIN products ON top_sellers.product_id = products.product_id
ORDER BY sold DESC
--==========================================================================================
-- SET search_path TO TPCC;
-- Condition 1: W_YTD = sum(D_YTD)
SELECT * FROM (SELECT w.w_id, w.w_ytd, d.sum_d_ytd
FROM bmsql_warehouse w,
(SELECT d_w_id, SUM(d_ytd) sum_d_ytd
FROM bmsql_district
GROUP BY d_w_id) d
WHERE w.w_id = d.d_w_id) as x
WHERE w_ytd != sum_d_ytd;
-- Condition 2: D_NEXT_O_ID - 1 = max(O_ID) = max(NO_O_ID)
SELECT * FROM (SELECT d.d_w_id, d.d_id, d.d_next_o_id, o.max_o_id, no.max_no_o_id
FROM bmsql_district d,
(SELECT o_w_id, o_d_id, MAX(o_id) max_o_id
FROM bmsql_oorder
GROUP BY o_w_id, o_d_id) o,
(SELECT no_w_id, no_d_id, MAX(no_o_id) max_no_o_id
FROM bmsql_new_order
GROUP BY no_w_id, no_d_id) no
WHERE d.d_w_id = o.o_w_id AND d.d_w_id = no.no_w_id AND
d.d_id = o.o_d_id AND d.d_id = no.no_d_id) as x
WHERE d_next_o_id - 1 != max_o_id OR d_next_o_id - 1 != max_no_o_id;
-- Condition 3: max(NO_O_ID) - min(NO_O_ID) + 1
-- = [number of rows in the NEW-ORDER table for this bmsql_district]
SELECT * FROM (SELECT no_w_id, no_d_id, MAX(no_o_id) max_no_o_id,
MIN(no_o_id) min_no_o_id, COUNT(*) count_no
FROM bmsql_new_order
GROUP BY no_w_id, no_d_Id) as x
WHERE max_no_o_id - min_no_o_id + 1 != count_no;
-- Condition 4: sum(O_OL_CNT)
-- = [number of rows in the ORDER-LINE table for this bmsql_district]
SELECT * FROM (SELECT o.o_w_id, o.o_d_id, o.sum_o_ol_cnt, ol.count_ol
FROM (SELECT o_w_id, o_d_id, SUM(o_ol_cnt) sum_o_ol_cnt
FROM bmsql_oorder
GROUP BY o_w_id, o_d_id) o,
(SELECT ol_w_id, ol_d_id, COUNT(*) count_ol
FROM bmsql_order_line
GROUP BY ol_w_id, ol_d_id) ol
WHERE o.o_w_id = ol.ol_w_id AND
o.o_d_id = ol.ol_d_id) as x
WHERE sum_o_ol_cnt != count_ol;
-- Condition 5: For any row in the ORDER table, O_CARRIER_ID is set to a null
-- value if and only if there is a corresponding row in the
-- NEW-ORDER table
SELECT * FROM (SELECT o.o_w_id, o.o_d_id, o.o_id, o.o_carrier_id, no.count_no
FROM bmsql_oorder o,
(SELECT no_w_id, no_d_id, no_o_id, COUNT(*) count_no
FROM bmsql_new_order
GROUP BY no_w_id, no_d_id, no_o_id) no
WHERE o.o_w_id = no.no_w_id AND
o.o_d_id = no.no_d_id AND
o.o_id = no.no_o_id) as x
WHERE (o_carrier_id IS NULL AND count_no = 0) OR
(o_carrier_id IS NOT NULL AND count_no != 0);
-- Condition 6: For any row in the ORDER table, O_OL_CNT must equal the number
-- of rows in the ORDER-LINE table for the corresponding order
SELECT * FROM (SELECT o.o_w_id, o.o_d_id, o.o_id, o.o_ol_cnt, ol.count_ol
FROM bmsql_oorder o,
(SELECT ol_w_id, ol_d_id, ol_o_id, COUNT(*) count_ol
FROM bmsql_order_line
GROUP BY ol_w_id, ol_d_id, ol_o_id) ol
WHERE o.o_w_id = ol.ol_w_id AND
o.o_d_id = ol.ol_d_id AND
o.o_id = ol.ol_o_id) as x
WHERE o_ol_cnt != count_ol;
-- Condition 7: For any row in the ORDER-LINE table, OL_DELIVERY_D is set to
-- a null date/time if and only if the corresponding row in the
-- ORDER table has O_CARRIER_ID set to a null value
SELECT * FROM (SELECT ol.ol_w_id, ol.ol_d_id, ol.ol_o_id, ol.ol_delivery_d,
o.o_carrier_id
FROM bmsql_order_line ol,
bmsql_oorder o
WHERE ol.ol_w_id = o.o_w_id AND
ol.ol_d_id = o.o_d_id AND
ol.ol_o_id = o.o_id) as x
WHERE (ol_delivery_d IS NULL AND o_carrier_id IS NOT NULL) OR
(ol_delivery_d IS NOT NULL AND o_carrier_id IS NULL);
-- Condition 8: W_YTD = sum(H_AMOUNT)
SELECT *
FROM (SELECT w.w_id, w.w_ytd, h.sum_h_amount
FROM bmsql_warehouse w,
(SELECT h_w_id, SUM(h_amount) sum_h_amount FROM bmsql_history GROUP BY h_w_id) h
WHERE w.w_id = h.h_w_id) as x
WHERE w_ytd != sum_h_amount;
-- Condition 9: D_YTD = sum(H_AMOUNT)
SELECT *
FROM (SELECT d.d_w_id, d.d_id, d.d_ytd, h.sum_h_amount
FROM bmsql_district d,
(SELECT h_w_id, h_d_id, SUM(h_amount) sum_h_amount
FROM bmsql_history
GROUP BY h_w_id, h_d_id) h
WHERE d.d_w_id = h.h_w_id
AND d.d_id = h.h_d_id) as x
WHERE d_ytd != sum_h_amount;
--==========================================================================================
-- ----
-- Extra Schema objects/definitions for history.hist_id in PostgreSQL
-- ----
-- ----
-- This is an extra column not present in the TPC-C
-- specs. It is useful for replication systems like
-- Bucardo and Slony-I, which like to have a primary
-- key on a table. It is an auto-increment or serial
-- column type. The definition below is compatible
-- with Oracle 11g, using a sequence and a trigger.
-- ----
-- Adjust the sequence above the current max(hist_id)
select setval('bmsql_hist_id_seq', (select max(hist_id) from bmsql_history));
-- Make nextval(seq) the default value of the hist_id column.
alter table bmsql_history
alter column hist_id set default nextval('bmsql_hist_id_seq');
-- Add a primary key history(hist_id)
alter table bmsql_history add primary key (hist_id);
--==========================================================================================
copy bmsql_config
(cfg_name, cfg_value)
from '/tmp/csv/bmsql_config.csv' WITH CSV;
copy bmsql_warehouse
(w_id, w_ytd, w_tax, w_name, w_street_1, w_street_2, w_city, w_state, w_zip)
from '/tmp/csv/bmsql_warehouse.csv' WITH CSV;
copy bmsql_item
(i_id, i_name, i_price, i_data, i_im_id)
from '/tmp/csv/bmsql_item.csv' WITH CSV;
copy bmsql_stock
(s_i_id, s_w_id, s_quantity, s_ytd, s_order_cnt, s_remote_cnt, s_data,
s_dist_01, s_dist_02, s_dist_03, s_dist_04, s_dist_05,
s_dist_06, s_dist_07, s_dist_08, s_dist_09, s_dist_10)
from '/tmp/csv/bmsql_stock.csv' WITH CSV;
copy bmsql_district
(d_id, d_w_id, d_ytd, d_tax, d_next_o_id, d_name, d_street_1,
d_street_2, d_city, d_state, d_zip)
from '/tmp/csv/bmsql_district.csv' WITH CSV;
copy bmsql_customer
(c_id, c_d_id, c_w_id, c_discount, c_credit, c_last, c_first, c_credit_lim,
c_balance, c_ytd_payment, c_payment_cnt, c_delivery_cnt, c_street_1,
c_street_2, c_city, c_state, c_zip, c_phone, c_since, c_middle, c_data)
from '/tmp/csv/bmsql_customer.csv' WITH CSV;
copy bmsql_history
(hist_id, h_c_id, h_c_d_id, h_c_w_id, h_d_id, h_w_id, h_date, h_amount, h_data)
from '/tmp/csv/bmsql_history.csv' WITH CSV;
copy bmsql_oorder
(o_id, o_w_id, o_d_id, o_c_id, o_carrier_id, o_ol_cnt, o_all_local, o_entry_d)
from '/tmp/csv/bmsql_oorder.csv' WITH CSV NULL AS 'NULL';
copy bmsql_order_line
(ol_w_id, ol_d_id, ol_o_id, ol_number, ol_i_id, ol_delivery_d,
ol_amount, ol_supply_w_id, ol_quantity, ol_dist_info)
from '/tmp/csv/bmsql_order_line.csv' WITH CSV NULL AS 'NULL';
copy bmsql_new_order
(no_w_id, no_d_id, no_o_id)
from '/tmp/csv/bmsql_new_order.csv' WITH CSV;
--==========================================================================================
# Can be used with
# mysql < check-placement.sql | grep -v us-east-1
# -or-
# mysql < check-placement.sql | grep us-east-1
use information_schema;
WITH store_index AS (SELECT store_id, substring(address, -1) as node_number, label->>"$[0].value" as aws_region from tikv_store_status)
SELECT
node_number as node, aws_region, is_leader, count(*) as c
FROM tikv_region_peers
INNER JOIN TIKV_REGION_STATUS USING (region_id)
INNER JOIN store_index USING (store_id)
WHERE db_name = 'test'
GROUP BY
node_number, is_leader
ORDER BY node_number;
--==========================================================================================
update users, prospect_users
set users.about = prospect_users.about
where prospect_users.username = users.username;
==========================================================================================
delete users
from users, prospect_users
where users.username = prospect_users.username
and NOT prospect_users.active
--==========================================================================================
SELECT users.*, posts.*
FROM users
LEFT JOIN posts
ON posts.user_id = users.id
WHERE posts.title LIKE '%SQL%';
--==========================================================================================
SELECT users.*, posts.*
FROM users
LEFT JOIN posts
ON posts.user_id = users.id
AND posts.title LIKE '%SQL%';
--==========================================================================================
SELECT users.*, posts.*
FROM posts
LEFT JOIN users
ON posts.user_id = users.id;
==========================================================================================
SELECT users.*, posts.*
FROM users
RIGHT JOIN posts
ON posts.user_id = users.id;
--==========================================================================================
UPDATE CUSTOMERS
SET SALARY = SALARY * 0.25
WHERE AGE IN (
SELECT AGE
FROM CUSTOMERS_BKP
WHERE AGE >= 27
);
--==========================================================================================
DELETE FROM CUSTOMERS
WHERE AGE IN (
SELECT AGE
FROM CUSTOMERS_BKP
WHERE AGE >= 27
);
==========================================================================================
SELECT id, name, amount, date
FROM customer
LEFT JOIN orders
ON customers.id = orders.customer_id
UNION
SELECT id, name, amount, date
FROM customer
RIGHT JOIN orders
ON customers.id = orders.customer_id
--==========================================================================================
SELECT customer_name, SUM(price) AS Total_Purchase
FROM purchase
WHERE customer_name
LIKE "S%"
GROUP BY customer_name
HAVING SUM(price) > 1000;
--==========================================================================================
SELECT customer_name, AVG(price) AS Average_Purchase
FROM purchase
GROUP BY customer_name
HAVING AVG(price) > 550
ORDER BY customer_name DESC;
--==========================================================================================
SELECT
mz_catalog.mz_sources.name AS source_name,
source_id AS source_id,
count(*) AS error_count
FROM mz_internal.mz_source_status_history h
JOIN mz_catalog.mz_sources ON h.source_id = mz_catalog.mz_sources.id
WHERE h.error IS NOT NULL
GROUP BY 1, 2;
--==========================================================================================
select Description from Table_Name group by Description having max(len(Description)) > 27;
select tt.name, max(tt.length) from (select t.name, sum(len(t.content)) from table t group by 1) as tt;
SELECT TOP 1 column_name, LEN(column_name) AS Lenght FROM table_name ORDER BY LEN(column_name) DESC
select ID, [description], len([description]) as descriptionlength
FROM [database1].[dbo].[table1]
where len([description]) =
(select max(len([description]))
FROM [database1].[dbo].[table1]
--==========================================================================================
CREATE POLICY post_owner_policy ON post
USING (owner = current_user);
CREATE POLICY post_read_policy ON post FOR SELECT
USING (published = true);
--==========================================================================================
CREATE FUNCTION add_them(a integer, b integer)
RETURNS integer AS $$
SELECT a + b;
$$ LANGUAGE SQL IMMUTABLE;
--==========================================================================================
CREATE TABLE COMPANY7(
ID INT PRIMARY KEY NOT NULL,
NAME TEXT,
AGE INT ,
ADDRESS CHAR(50),
SALARY REAL,
EXCLUDE USING gist
(NAME WITH =,
AGE WITH <>)
);
INSERT INTO COMPANY7 VALUES(1, 'Paul', 32, 'California', 20000.00 );
INSERT INTO COMPANY7 VALUES(2, 'Paul', 32, 'Texas', 20000.00 );
INSERT INTO COMPANY7 VALUES(3, 'Paul', 42, 'California', 20000.00 );
--==========================================================================================
CREATE TABLE COMPANY5(
ID INT PRIMARY KEY NOT NULL,
NAME TEXT NOT NULL,
AGE INT NOT NULL,
ADDRESS CHAR(50),
SALARY REAL CHECK(SALARY > 0)
);
--==========================================================================================
create schema rabbitmq;
create or replace function rabbitmq.send_message(channel text, routing_key text, message text) returns void as $$
select pg_notify(channel, routing_key || '|' || message);
$$ stable language sql;
create or replace function rabbitmq.on_row_change() returns trigger as $$
declare
routing_key text;
row record;
begin
routing_key := 'row_change'
'.table-'::text || TG_TABLE_NAME::text ||
'.event-'::text || TG_OP::text;
if (TG_OP = 'DELETE') then
row := old;
elsif (TG_OP = 'UPDATE') then
row := new;
elsif (TG_OP = 'INSERT') then
row := new;
end if;
-- change 'events' to the desired channel/exchange name
perform rabbitmq.send_message('events', routing_key, row_to_json(row)::text);
return null;
end;
$$ stable language plpgsql;
--==========================================================================================
SELECT
CONCAT(e1.FirstName, ' ', e1.LastName) AS Employee,
CONCAT(e2.FirstName, ' ', e2.LastName) AS Manager
FROM Employees e1
LEFT JOIN Employees e2
ON e1.ReportsTo = e2.EmployeeId;
--==========================================================================================
-- models/customer_revenue_incremental.sql
{{ config(materialized='incremental', unique_key='customer_id') }}
WITH latest_orders AS (
SELECT * FROM orders WHERE updated_at > (SELECT MAX(updated_at) FROM {{ this }})
),
updated_totals AS (
SELECT customer_id, SUM(order_total) AS total_sales FROM latest_orders GROUP BY customer_id
),
existing_totals AS (
SELECT customer_id, total_sales FROM {{ this }} WHERE customer_id NOT IN (SELECT customer_id FROM updated_totals)
)
SELECT * FROM updated_totals
UNION ALL
SELECT * FROM existing_totals;
-- models/customer_revenue.sql
{{ config(materialized='view', indexes=[{'columns': ['customer_id']}]) }}
SELECT
customer_id,
SUM(order_total) AS total_sales
FROM orders
GROUP BY customer_id;
--==========================================================================================
CREATE VIEW dynamic_pricing AS
WITH
recent_prices AS (
SELECT
grp.product_id,
avg(sub.price) AS avg_price
FROM (SELECT DISTINCT product_id FROM public.sales) AS grp,
LATERAL (
SELECT
sales.product_id,
sales.price
FROM public.sales
WHERE sales.product_id = grp.product_id
ORDER BY sales.sale_date DESC LIMIT 10
) AS sub
GROUP BY grp.product_id
),
promotion_effect AS (
SELECT
p.product_id,
min(pr.promotion_discount) AS promotion_discount
FROM public.promotions AS pr
INNER JOIN public.products AS p ON pr.product_id = p.product_id
WHERE pr.active = TRUE
GROUP BY p.product_id
),
popularity_score AS (
SELECT
s.product_id,
rank() OVER (PARTITION BY p.category_id ORDER BY count(s.sale_id) DESC) AS popularity_rank,
count(s.sale_id) AS sale_count
FROM public.sales AS s
INNER JOIN public.products AS p ON s.product_id = p.product_id
GROUP BY s.product_id, p.category_id
),
inventory_status AS (
SELECT
i.product_id,
sum(i.stock) AS total_stock,
rank() OVER (ORDER BY sum(i.stock) DESC) AS stock_rank
FROM public.inventory AS i
GROUP BY i.product_id
),
high_demand_products AS (
SELECT
p.product_id,
avg(s.sale_price) AS avg_sale_price,
count(s.sale_id) AS total_sales
FROM public.products AS p
INNER JOIN public.sales AS s ON p.product_id = s.product_id
GROUP BY p.product_id
HAVING count(s.sale_id) > (SELECT avg(total_sales) FROM (SELECT count(*) AS total_sales FROM public.sales GROUP BY product_id) AS subquery)
),
dynamic_pricing AS (
SELECT
p.product_id,
p.base_price,
CASE
WHEN pop.popularity_rank <= 3 THEN 1.2
WHEN pop.popularity_rank BETWEEN 4 AND 10 THEN 1.1
ELSE 0.9
END AS popularity_adjustment,
rp.avg_price,
coalesce(1.0 - (pe.promotion_discount / 100), 1) AS promotion_discount,
CASE
WHEN inv.stock_rank <= 3 THEN 1.1
WHEN inv.stock_rank BETWEEN 4 AND 10 THEN 1.05
ELSE 1
END AS stock_adjustment,
CASE
WHEN p.base_price > rp.avg_price THEN 1 + (p.base_price - rp.avg_price) / rp.avg_price
ELSE 1 - (rp.avg_price - p.base_price) / rp.avg_price
END AS demand_multiplier,
hd.avg_sale_price,
CASE
WHEN p.product_name ILIKE '%cheap%' THEN 0.8
ELSE 1.0
END AS additional_discount
FROM public.products AS p
LEFT JOIN recent_prices AS rp ON p.product_id = rp.product_id
LEFT JOIN promotion_effect AS pe ON p.product_id = pe.product_id
INNER JOIN popularity_score AS pop ON p.product_id = pop.product_id
LEFT JOIN inventory_status AS inv ON p.product_id = inv.product_id
LEFT JOIN high_demand_products AS hd ON p.product_id = hd.product_id
)
SELECT
dp.product_id,
round(dp.base_price * dp.popularity_adjustment * dp.stock_adjustment * dp.demand_multiplier, 2) AS adjusted_price,
round(dp.base_price * dp.popularity_adjustment * dp.stock_adjustment * dp.demand_multiplier * dp.promotion_discount * dp.additional_discount, 2) AS discounted_price
FROM dynamic_pricing AS dp;
ALTER TABLE public.inventory ADD CONSTRAINT inventory_product_id_fkey FOREIGN KEY (product_id) REFERENCES public.products (product_id);
ALTER TABLE public.promotions ADD CONSTRAINT promotions_product_id_fkey FOREIGN KEY (product_id) REFERENCES public.products (product_id);
ALTER TABLE public.sales ADD CONSTRAINT sales_product_id_fkey FOREIGN KEY (product_id) REFERENCES public.products (product_id);
CREATE INDEX idx_products_product_name ON products (product_name);
CREATE INDEX idx_sales_product_id ON sales (product_id);
CREATE INDEX idx_sales_sale_date ON sales (sale_date);
CREATE INDEX idx_sales_product_id_sale_date ON sales (product_id, sale_date);
CREATE INDEX idx_promotions_product_id ON promotions (product_id);
CREATE INDEX idx_promotions_active ON promotions (active);
CREATE INDEX idx_promotions_product_id_active ON promotions (product_id, active);
CREATE INDEX idx_inventory_product_id ON inventory (product_id);
--==========================================================================================
SELECT c.oid::regclass as table_name,
greatest(age(c.relfrozenxid),age(t.relfrozenxid)) as age
FROM pg_class c
LEFT JOIN pg_class t ON c.reltoastrelid = t.oid
WHERE c.relkind IN ('r', 'm');
--==========================================================================================
-- Шаг 1: Убираем значение по умолчанию для колонки id
ALTER TABLE ci_stages ALTER COLUMN id DROP DEFAULT;
-- Шаг 2: Удаляем внешний ключ из таблицы p_ci_builds
ALTER TABLE p_ci_builds DROP CONSTRAINT fk_3a9eaa254d;
-- Шаг 3: Удаляем последовательность
DROP SEQUENCE IF EXISTS ci_stages_id_seq;
-- Шаг 4: Создаём новую последовательность
CREATE SEQUENCE ci_stages_id_seq
START WITH 14876 -- или другое значение, большее максимального id
INCREMENT BY 1
NO MINVALUE
NO MAXVALUE
CACHE 1;
-- Шаг 5: Привязываем последовательность к колонке id
ALTER TABLE ci_stages ALTER COLUMN id SET DEFAULT nextval('ci_stages_id_seq');
-- Шаг 6: Восстанавливаем внешний ключ
ALTER TABLE p_ci_builds
ADD CONSTRAINT fk_3a9eaa254d FOREIGN KEY (stage_id) REFERENCES ci_stages(id) ON DELETE CASCADE;
-- Шаг 7: Сбрасываем значение последовательности на максимальный id
SELECT setval('ci_stages_id_seq', (SELECT MAX(id) FROM ci_stages));
ALTER SEQUENCE ci_stages_id_seq OWNER TO gitlab;
ALTER SEQUENCE ci_stages_id_seq OWNED BY ci_stages.id;
--==========================================================================================
EXPLAIN ANALYZE
UPDATE
tbl
SET
val = val + 1
FROM
(
SELECT
ctid
FROM
tbl
WHERE
id IN (1, 2, 3)
ORDER BY
id
FOR UPDATE -- блокировка
) lc
WHERE
tbl.ctid = lc.ctid; -- поиск по физической позиции записи
--==========================================================================================
select * from foo where not exists (select 1 from bar where foo.col = bar.x);
SELECT * FROM blah WHERE timestampcol >= '2018-06-01' AND timestampcol < '2018-06-08'
SELECT column1, column2, ...FROM table1 WHERE EXISTS (SELECT 1 FROM table2 WHERE table1.column = table2.column);
SELECT column1, column2, ...FROM table1WHERE column IN (SELECT column FROM table2);
SELECT column1, column2,...FROM table1 LEFT JOIN table2 ON table1.column_name = table2.column_name WHERE table2.column_name IS NULL;
Use text, or a domain over text, with CHECK(length(VALUE)=3) or CHECK(VALUE ~ '^[[:alpha:]]{3}$') or similar.
--==========================================================================================
select row_number() over() id, student FROM seats order by if(mod(id, 2) = 0, id-1, id+1);
--==========================================================================================
select c.name FROM customers c JOIN (select customerId from orders group by customerId having count(orderId) > 2) o
on c.customerId = o.customerId
where c.city = 'New Your';
--==========================================================================================
CREATE VIEW car AS (SELECT * FROM vehicle WHERE type='car');
CREATE VIEW universal_comedies AS
SELECT *
FROM comedies
WHERE classification = 'U'
WITH LOCAL CHECK OPTION;
CREATE VIEW pg_comedies AS
SELECT *
FROM comedies
WHERE classification = 'PG'
WITH CASCADED CHECK OPTION;
CREATE VIEW comedies AS
SELECT f.*,
country_code_to_name(f.country_code) AS country,
(SELECT avg(r.rating)
FROM user_ratings r
WHERE r.film_id = f.id) AS avg_rating
FROM films f
WHERE f.kind = 'Comedy';
CREATE RECURSIVE VIEW public.nums_1_100 (n) AS
VALUES (1)
UNION ALL
SELECT n+1 FROM nums_1_100 WHERE n < 100;
create rule car_insert as on insert to car do instead
insert into vehicle(type, top_speed, license_plate)
values('car', new.top_speed, new.license_plate);
insert into car(top_speed, license_plate) values(160,'v4n1ty');
table car;
CREATE FUNCTION insertCar() RETURNS trigger AS $$
BEGIN
INSERT INTO vehicle
(type, top_speed, license_plate)
VALUES
('car', new.top_speed, new.license_plate);
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER insertCarTrigger INSTEAD OF INSERT ON car
FOR EACH ROW EXECUTE PROCEDURE insertCar();
The view must have exactly one entry in its FROM list, which must be a table or another updatable view.
The view definition must not contain WITH, DISTINCT, GROUP BY, HAVING, LIMIT, or OFFSET clauses at the top level.
The view definition must not contain set operations (UNION, INTERSECT or EXCEPT) at the top level.
The view's select list must not contain any aggregates, window functions or set-returning functions.
--==========================================================================================
updates salary set sex = case sex when 'm' then 'f' else 'm' end;
--==========================================================================================
select * from (select name, salaray, dense_rank() over (order by salary desc) as rank from employees) as sub where rank = 1;
select name, salary from employees order by salaray desc limit 1 offset 2;
--==========================================================================================
select * from employee e where day(e.created_at) in (1, 2, 3, 4, 5, 6, 7, 8);
select customer_id from customer group by customer_id having count(distinct product_key) = (select count(*) from product);
--==========================================================================================
SELECT
ROUND ((LENGTH (desc) - LENGTH (REPLACE (desc, ‘val‘,‘‘) )) / LENGTH(‘val‘)) AS c
FROM items;
--==========================================================================================
select a.name as emp_name, b.name as man_name from employees a
join employees b on a.manager_id = b.id;
--==========================================================================================
SELECT pg_export_snapshot();
BEGIN ISOLATION LEVEL REPEATABLE READ;
SET TRANSACTION SNAPSHOT '00000004-0000006E-1';
--==========================================================================================
DO $$
DECLARE
cur CURSOR FOR SELECT id, name FROM users;
rec RECORD;
BEGIN
OPEN cur;
LOOP
FETCH cur INTO rec;
EXIT WHEN NOT FOUND;
RAISE NOTICE 'User: %, Name: %', rec.id, rec.name;
END LOOP;
CLOSE cur;
END $$;
--==========================================================================================
select employee_id, mode() within group (ORDER BY customer_id)
FROM orders
GROUP BY employee_id;
--==========================================================================================
1) Переносим базу PostgreSQL и схему нужной базы:
pg_dumpall --database=postgres --host=x.x.x.x --no-password --globals-only --no-privileges | psql
pg_dump --dbname name_db --host=x.x.x.x --no-password --create --schema-only | psql
2) Создаём публикацию базы на мастере:
CREATE PUBLICATION name_pub FOR ALL TABLES;
3) Cоздаём подписку на реплике:
CREATE SUBSCRIPTION name_sub CONNECTION 'host=x.x.x.x dbname=name_db' PUBLICATION name_pub;
4) Проверяем публикацию на мастере:
select * from pg_catalog.pg_publication;
5) Проверяем подписку на реплике:
SELECT * FROM pg_stat_subscription;
--==========================================================================================
1) MASTER:
\c name_db
select usename,client_addr,state,replay_lag from pg_stat_replication;
select slot_name,slot_type,active from pg_replication_slots;
2) MASTER:
\c name_db
select pg_create_logical_replication_slot('logical_replica_slot', 'pgoutput');
select * from pg_replication_slots;
!!!check slot for database?
3) MASTER:
\c name_db
CREATE PUBLICATION name_db_pub FOR ALL TABLES;
4) REPLICA:
\c postgres
SELECT pg_promote();
5) REPLICA:
Берем LSN из 13-й версии psql и продвигаем слот через pg_replication_slot_advance:
cat /var/log/postgresql/postgresql-13-main.log | grep "redo done at"
6) MASTER:
\c name_db
select pg_replication_slot_advance('logical_replica_slot', '2BB97/CF307EA0');
7) MASTER:
Удаляем старый слот потоковой реплики:
select pg_drop_replication_slot('repmgr_slot_5');
8) REPLICA:
Установка 16-й версии:
apt-get install -y postgresql-16 postgresql-16-repack postgresql-16-repmgr postgresql-client-16
9) REPLICA:
Проверка:
pg_lsclusters
10) REPLICA
Подготовка старого инстанса:
psql -U postgres -p 5432 << EOF
drop database pghero;
\connect name_db
drop extension pg_repack cascade;
drop extension pg_stat_statements cascade;
drop schema pghero cascade;
drop schema repack cascade;
drop schema okmeter cascade;
drop schema repmgr cascade;
\connect postgres
drop extension pg_repack cascade;
drop extension pg_stat_statements cascade;
drop schema pghero cascade;
drop schema repack cascade;
drop schema okmeter cascade;
drop schema repmgr cascade;
EOF
Иначе при переносе данных на новый инстанс будут ошибки при создании представлений и прочего.
pg_restore: creating VIEW "pghero.pg_stat_activity"
pg_restore: while PROCESSING TOC:
pg_restore: from TOC entry 203; 1259 19999367 VIEW pg_stat_activity postgres
pg_restore: error: could not execute query: ERROR: column reference "backend_type" is ambiguous
LINE 34: "pg_stat_activity"."backend_type"
11) REPLICA:
Стоп PostgreSQL:
systemctl stop postgresql
12) REPLICA:
Логин под юзером PostgreSQL и работа по апгрейду:
su postgres
13) REPLICA:
Проверка возможности обновления:
/usr/lib/postgresql/16/bin/pg_upgrade \
--old-datadir=/var/lib/postgresql/13/main \
--new-datadir=/var/lib/postgresql/16/main \
--old-bindir=/usr/lib/postgresql/13/bin \
--new-bindir=/usr/lib/postgresql/16/bin \
--old-options '-c config_file=/etc/postgresql/13/main/postgresql.conf' \
--new-options '-c config_file=/etc/postgresql/16/main/postgresql.conf' \
--check
14) REPLICA:
Апгрейд путём создания жёстких ссылок на inode файлов:
/usr/lib/postgresql/16/bin/pg_upgrade \
--old-datadir=/var/lib/postgresql/13/main \
--new-datadir=/var/lib/postgresql/16/main \
--old-bindir=/usr/lib/postgresql/13/bin \
--new-bindir=/usr/lib/postgresql/16/bin \
--old-options '-c config_file=/etc/postgresql/13/main/postgresql.conf' \
--new-options '-c config_file=/etc/postgresql/16/main/postgresql.conf' \
--link
15) REPLICA:
Выход из-под юзера PostgreSQL:
exit
16) REPLICA:
Правка конфига 16-й версии и смена портов:
rsync -av /etc/postgresql/13/main/ /etc/postgresql/16/main/
sed -i '/stats_temp_directory/d' /etc/postgresql/16/main/postgresql.conf
sed -i '/vacuum_defer_cleanup_age/d' /etc/postgresql/16/main/postgresql.conf
sed -i 's/pg_stat_statements,pg_repack/pg_stat_statements/' /etc/postgresql/16/main/postgresql.conf
sed -i 's/\/13\//\/16\//' /etc/postgresql/16/main/postgresql.conf
sed -i 's/5433/5432/' /etc/postgresql/16/main/postgresql.conf
sed -i 's/13-main/16-main/' /etc/postgresql/16/main/postgresql.conf
sed -i 's/13\/main/16\/main/' /etc/postgresql/16/main/postgresql.conf
sed -i 's/5432/5433/' /etc/postgresql/13/main/postgresql.conf
17) REPLICA:
Старт службы PostgreSQL:
systemctl start postgresql
18) REPLICA:
Логин под юзером PostgreSQL:
su postgres
19) REPLICA:
Обновление Optimizer statistics, который не переносится из старого инстанса:
/usr/lib/postgresql/16/bin/vacuumdb --all --analyze-in-stages
20) REPLICA:
Удаление старого инстанса:
./delete_old_cluster.sh
rm -rf /etc/postgresql/13/main
21) REPLICA:
Выход из-под юзера PostgreSQL:
exit
22) REPLICA:
\c name_db
CREATE SUBSCRIPTION name_db_sub CONNECTION 'host=x.x.x.x dbname=name_db' PUBLICATION name_db_pub WITH (copy_data=false, slot_name='logical_replica_slot', create_slot=false);
--==========================================================================================
select
rc1_0.id,
rc1_0.created_at,
rc1_0.key,
rc1_0.value
from
( SELECT
ss.id,
ss.key,
ss.value,
ss.created_at
FROM
system_settings AS ss
INNER JOIN
( SELECT
ss2.key as k2, MAX(ss2.created_at) as ca2
FROM
system_settings ss2
GROUP BY
ss2.key ) AS t
ON t.k2 = ss.key
AND t.ca2 = ss.created_at
WHERE
ss.type = 'SYSTEM'
AND ss.active IS TRUE ) rc1_0
where
rc1_0.key=?
--==========================================================================================
select
dense_rank() over w as rank,
first_name, department.name as dep_name,
salary
from employee
join department using(dep_id)
window w as (order by salary desc)
order by rank, emp_id;
--==========================================================================================
select * from pg_stats where tablename = 'pgconf' and attname = 'fk_id';
create index fk_not_null on pgconf(fk_id) where fk_id is not null;
--==========================================================================================
create index simple on pgconf((state = 'state'));
create index normal on pgconf(created_at, state);
create index complex on pgconf(created_at) where state != 'state';
--==========================================================================================
delete from person p1 using person p2 where p1.email = p2.email and p1.id > p2.id;
--==========================================================================================
with highest_salary as (
select max(salary) as salary
from employee
)
select max(salary)
from employee
where salary < (select salary from highest_salary);
--==========================================================================================
USE <database>
SELECT * FROM fn_my_permissions(NULL, 'DATABASE');
sp_configure 'show advanced options', '1'
RECONFIGURE
#This enables xp_cmdshell
sp_configure 'xp_cmdshell', '1'
RECONFIGURE
--==========================================================================================
SELECT *
FROM (
SELECT
post_id,
post_title,
comment_id,
comment_review,
DENSE_RANK() OVER (ORDER BY p_pc.comment_count DESC) AS ranking
FROM (
SELECT
p.id AS post_id,
p.title AS post_title,
pc.id AS comment_id,
pc.review AS comment_review,
COUNT(post_id) OVER(PARTITION BY post_id) AS comment_count
FROM post p
LEFT JOIN post_comment pc ON p.id = pc.post_id
WHERE p.title LIKE 'SQL%'
) p_pc
) p_pc_r
WHERE p_pc_r.ranking <= 2
ORDER BY post_id, comment_id
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--================================================================
INSERT INTO users(id, name, department)
SELECT
us[1]::integer
, us[2]::text
, us[3]::text
FROM
(
SELECT
us::text[]
FROM
unnest($1::text[]) us
) T
ON CONFLICT
DO NOTHING;
$1 = '{"{1,Vasya,Developers}","{2,Petya,Developers}","{3,Kolya,Developers}","{4,Masha,Support}","{5,Sasha,Support}"}'
--================================================================
INSERT INTO users(id, name, department)
SELECT
(val->>0)::integer -- взяли нужный элемент json-массива
, val->>1
, dep
FROM
(
SELECT
json_array_elements(value) val -- развернули массивы-людей
, key dep
FROM
json_each($1::json) -- развернули ключи-отделы
) T
ON CONFLICT
DO NOTHING;
$1 = '{"Developers":[[1,"Vasya"],[2,"Petya"],[3,"Kolya"]],"Support":[[4,"Masha"],[5,"Sasha"]]}'
--================================================================
COPY users(id, name) FROM stdin;
1\tVasya\n2\tPetya\n3\tKolya\n
\.
--================================================================
... id = ANY($1::integer[]) -- $1 : '{2,3,5,8,13}'
--================================================================
INSERT INTO tbl
SELECT
unnest[1]::text k
, unnest[2]::integer v
FROM (
SELECT
unnest($1::text[])::text[] -- $1 : '{"{a,1}","{b,2}","{c,3}","{d,4}"}'
) T;
--================================================================
SELECT
T.*
FROM
unnest('{1,2,3}'::integer[]) _id
, LATERAL ( -- выполняется отдельно для каждого ID
SELECT
*
FROM
tbl
WHERE
id = _id
ORDER BY
ts DESC
LIMIT 1
) T;
--================================================================
SELECT
ARRAY["2021-04", "2021-05", "2021-06"] -- это мы столбцы складываем в массив
FROM
(
SELECT
sum(
CASE
WHEN dt >= '2021-04-01' AND dt < '2021-05-01'
THEN qty
END
) "2021-04"
, sum(
CASE
WHEN dt >= '2021-05-01' AND dt < '2021-06-01'
THEN qty
END
) "2021-05"
, sum(
CASE
WHEN dt >= '2021-06-01' AND dt < '2021-07-01'
THEN qty
END
) "2021-06" -- это имена столбцов
FROM
sales
WHERE
dt >= '2021-04-01' AND dt < '2021-07-01'
) T;
--================================================================
SELECT
array_agg(sum ORDER BY id)
FROM
(
SELECT
date_trunc('month', dt) id
, sum(qty)
FROM
sales
WHERE
dt >= '2021-04-01' AND dt < '2021-07-01'
GROUP BY
1
) T;
--================================================================
SELECT
*
FROM
docs
WHERE
user IN (
SELECT id FROM users WHERE department = $1
);
--================================================================
WITH T1 AS (
SELECT
row_number() OVER() rn
, unnest v1
FROM
unnest('{1,2,3,4}'::integer[])
)
, T2 AS (
SELECT
row_number() OVER() rn
, unnest v2
FROM
unnest('{5,6}'::integer[])
)
SELECT
T1.v1
, T2.v2
FROM
T1
LEFT JOIN
T2
USING(rn);
--================================================================
WITH T1 AS (
SELECT
*
FROM
unnest('{1,2,3,4}'::integer[])
WITH ORDINALITY T(v1, rn)
)
, T2 AS (
SELECT
*
FROM
unnest('{5,6}'::integer[])
WITH ORDINALITY T(v2, rn)
)
SELECT
T1.v1
, T2.v2
FROM
T1
LEFT JOIN
T2
USING(rn);
--================================================================
SELECT
v1,
COALESCE(v2, 0)
FROM
unnest(
'{1,2,3,4}'::integer[]
, '{5,6}'::integer[]
) T(v1, v2);
--================================================================
SELECT
key k
, value v
FROM
json_each($1::json); -- '{"a":1,"b":2,"c":3,"d":4}'
--================================================================
SELECT
*
FROM
json_populate_recordset(
NULL::pg_class
, $1::json -- $1 : '[{"relname":"pg_class","oid":1262},{"relname":"pg_namespace","oid":2615}]'
);
--================================================================
SELECT
*
FROM
json_to_recordset($1::json) T(k text, v integer);
--================================================================
SET my.val = '{1,2,3}';
DO $$
DECLARE
id integer;
BEGIN
FOR id IN (SELECT unnest(current_setting('my.val')::integer[])) LOOP
RAISE NOTICE 'id : %', id;
END LOOP;
END;
$$ LANGUAGE plpgsql;
--================================================================
SELECT
json_object_agg(js ->> 'language', coalesce(js ->> 'value', ''))
FILTER(WHERE js ->> 'language' = 'ru') ->> 'ru' ru
, json_object_agg(js ->> 'language', coalesce(js ->> 'value', ''))
FILTER(WHERE js ->> 'language' = 'en') ->> 'en' en
, json_object_agg(js ->> 'language', coalesce(js ->> 'value', ''))
FILTER(WHERE js ->> 'language' = 'de') ->> 'de' de
, json_object_agg(js ->> 'language', coalesce(js ->> 'value', ''))
FILTER(WHERE js ->> 'language' = 'fr') ->> 'fr' fr
FROM
(
VALUES
('{"language" : "ru", "value" : "Бухгалтерия"}'::json)
, ('{"language" : "en", "value" : "Accounting"}')
, ('{"language" : "de", "value" : "Buchhaltung"}')
, ('{"language" : "fr", "value" : "Comptabilité"}')
) T(js);
--================================================================
json_object_agg(js ->> 'language', coalesce(js ->> 'value', ''))
FILTER(WHERE js ->> 'language' = 'ru' AND coalesce(js ->> 'value', '') <> '') ->> 'ru' ru
--================================================================
SELECT
coalesce((array_agg(js ->> 'value') FILTER(WHERE js ->> 'language' = 'ru'))[1], '') ru
, coalesce((array_agg(js ->> 'value') FILTER(WHERE js ->> 'language' = 'en'))[1], '') en
, coalesce((array_agg(js ->> 'value') FILTER(WHERE js ->> 'language' = 'de'))[1], '') de
, coalesce((array_agg(js ->> 'value') FILTER(WHERE js ->> 'language' = 'fr'))[1], '') fr
FROM
(
VALUES
('{"language" : "ru", "value" : "Бухгалтерия"}'::json)
, ('{"language" : "en", "value" : "Accounting"}')
, ('{"language" : "de", "value" : "Buchhaltung"}')
, ('{"language" : "fr", "value" : "Comptabilité"}')
) T(js);
--================================================================
SELECT
coalesce((array_agg(value) FILTER(WHERE language = 'ru'))[1], '') ru
, coalesce((array_agg(value) FILTER(WHERE language = 'en'))[1], '') en
, coalesce((array_agg(value) FILTER(WHERE language = 'de'))[1], '') de
, coalesce((array_agg(value) FILTER(WHERE language = 'fr'))[1], '') fr
FROM
(
VALUES
('{"language" : "ru", "value" : "Бухгалтерия"}'::json)
, ('{"language" : "en", "value" : "Accounting"}')
, ('{"language" : "de", "value" : "Buchhaltung"}')
, ('{"language" : "fr", "value" : "Comptabilité"}')
) T(js)
, json_to_record(js) X(language text, value text);
--================================================================
SELECT
coalesce(
(
array_agg(value ORDER BY value DESC NULLS LAST) -- сначала непустые значения
FILTER(WHERE language = 'ru') -- фильтр по значению ключа
)[1] -- эмулируем first_value
, ''
) ru
FROM
(
VALUES
('{"language" : "ru", "value" : "Бухгалтерия"}'::json)
, ('{"language" : "ru"}')
) T(js)
, json_to_record(js) X(language text, value text);
--================================================================
SELECT
coalesce(max(value) FILTER(WHERE language = 'ru'), '') ru
FROM
(
VALUES
('{"language" : "ru", "value" : "Бухгалтерия"}'::json)
, ('{"language" : "ru"}')
) T(js)
, json_to_record(js) X(language text, value text);
--================================================================
--По набору ключей
WITH jsd AS (
SELECT $${
"1" : {"Номер" : 101, "Дата" : "2023-11-01", "Сумма" : 123.45, "Флаги" : [true,false,null]}
, "2" : {"Номер" : 202, "Дата" : "2023-11-02", "Сумма" : 321.54, "Флаги" : [false,null,true]}
, "3" : {"Номер" : 303, "Дата" : "2023-11-03", "Сумма" : 100.00, "Флаги" : [null,true,false]}
}$$::jsonb
)
SELECT
*
FROM
unnest(ARRAY[1, 2]) id -- входящий набор ключей
, jsonb_to_record( -- функцию можно звать без LATERAL
(TABLE jsd) -> id::text
) T( -- определяем имена и типы извлекаемых ключей
"Номер"
integer
, "Дата"
date
, "Сумма"
numeric(32,2)
, "Флаги"
boolean[]
);
--================================================================
-- По всем ключам объекта
WITH jsd AS (
SELECT $${
"1" : {"Номер" : 101, "Дата" : "2023-11-01", "Сумма" : 123.45, "Флаги" : [true,false,null]}
, "2" : {"Номер" : 202, "Дата" : "2023-11-02", "Сумма" : 321.54, "Флаги" : [false,null,true]}
, "3" : {"Номер" : 303, "Дата" : "2023-11-03", "Сумма" : 100.00, "Флаги" : [null,true,false]}
}$$::jsonb
)
SELECT
jskey::integer
, T.*
FROM
jsonb_each((TABLE jsd)) js(jskey, jsval) -- все пары ключ-значение
, jsonb_to_record(jsval) T(
"Номер"
integer
, "Дата"
date
, "Сумма"
numeric(32,2)
, "Флаги"
boolean[]
);
--================================================================
-- По всему массиву
WITH jsd AS (
SELECT $$[
{"id" : 1, "Номер" : 101, "Дата" : "2023-11-01", "Сумма" : 123.45, "Флаги" : [true,false,null]}
, {"id" : 2, "Номер" : 202, "Дата" : "2023-11-02", "Сумма" : 321.54, "Флаги" : [false,null,true]}
, {"id" : 3, "Номер" : 303, "Дата" : "2023-11-03", "Сумма" : 100.00, "Флаги" : [null,true,false]}
]$$::jsonb
)
SELECT
*
FROM
jsonb_to_recordset((TABLE jsd)) T(
id
integer
, "Номер"
integer
, "Дата"
date
, "Сумма"
numeric(32,2)
, "Флаги"
boolean[]
);
--================================================================
CREATE TABLE doc(
doc_id
serial
PRIMARY KEY
, customer_id
integer
, dt
date
, sum
numeric(32,2)
);
CREATE INDEX ON doc(customer_id, dt DESC);
INSERT INTO doc(
customer_id
, dt
, sum
)
SELECT
(random() * 1e5)::integer
, now() - random() * '1 year'::interval
, random() * 1e6
FROM
generate_series(1, 1e5) id;
--================================================================
SELECT
id customer_id
, (SELECT doc_id FROM doc WHERE customer_id = id ORDER BY dt DESC LIMIT 1) doc_id
, (SELECT dt FROM doc WHERE customer_id = id ORDER BY dt DESC LIMIT 1) dt
, (SELECT sum FROM doc WHERE customer_id = id ORDER BY dt DESC LIMIT 1) sum
FROM
unnest(ARRAY[1,2,4,8,16,32,64]) id;
--================================================================
SELECT
id
, (
SELECT
doc -- это запись всей таблицы
FROM
doc
WHERE
customer_id = id
ORDER BY
dt DESC
LIMIT 1
).* -- разворачиваем запись в отдельные поля
FROM
unnest(ARRAY[1,2,4,8,16,32,64]) id;
--================================================================
WITH dc AS (
SELECT
id
, (
SELECT
doc
FROM
doc
WHERE
customer_id = id
ORDER BY
dt DESC
LIMIT 1
) doc -- это одно поле-запись
FROM
unnest(ARRAY[1,2,4,8,16,32,64]) id
)
SELECT
id
, (doc).* -- разворачиваем в отдельные поля
FROM
dc;
--================================================================
SELECT
*
FROM
unnest(ARRAY[1,2,4,8,16,32,64]) id
LEFT JOIN
LATERAL(
SELECT
*
FROM
doc
WHERE
customer_id = id
ORDER BY
dt DESC
LIMIT 1
) doc
ON TRUE; -- LEFT JOIN всегда должен иметь ON-condition
--================================================================
ALTER TABLE doc
ADD COLUMN emp_author integer
, ADD COLUMN emp_executor integer;
-- проставляем авторов/исполнителей
UPDATE
doc
SET
emp_author = (random() * 1e3)::integer
, emp_executor = (random() * 1e3)::integer;
CREATE TABLE employee(
emp_id
serial
PRIMARY KEY
, emp_name
varchar
);
-- генерируем "сотрудников"
INSERT INTO employee(
emp_name
)
SELECT
(
SELECT
string_agg(chr(((random() * 94) + 32)::integer), '')
FROM
generate_series(1, (random() * 16 + i % 16)::integer)
)
FROM
generate_series(1, 1e3) i;
--================================================================
SELECT
*
FROM
unnest(ARRAY[1,2,4,8,16,32,64]) id
LEFT JOIN
LATERAL(
SELECT
*
FROM
doc
WHERE
customer_id = id
ORDER BY
dt DESC
LIMIT 1
) doc
ON TRUE
LEFT JOIN
LATERAL( -- извлекаем автора
SELECT
emp_name emp_a
FROM
employee
WHERE
emp_id = doc.emp_author
LIMIT 1
) emp_a
ON TRUE
LEFT JOIN
LATERAL( -- извлекаем исполнителя
SELECT
emp_name emp_e
FROM
employee
WHERE
emp_id = doc.emp_executor
LIMIT 1
) emp_e
ON TRUE;
--================================================================
SELECT
*
FROM
unnest(ARRAY[1,2,4,8,16,32,64]) id
LEFT JOIN
LATERAL(
SELECT
*
FROM
doc
WHERE
customer_id = id
ORDER BY
dt DESC
LIMIT 1
) doc
ON TRUE
LEFT JOIN
LATERAL(
SELECT -- min + FILTER = PIVOT
min(emp_name) FILTER(WHERE emp_id = doc.emp_author) emp_a
, min(emp_name) FILTER(WHERE emp_id = doc.emp_executor) emp_e
FROM
employee
WHERE
emp_id IN (doc.emp_author, doc.emp_executor) -- отбор сразу по обоим ключам
) emp
ON TRUE;
--================================================================
SELECT
(
SELECT
string_agg(chr(((random() * 94) + 32)::integer), '')
FROM
generate_series(1, (random() * 16 + i % 16)::integer)
)
FROM
generate_series(1, 1e3) i;
--================================================================
--================================================================
--================================================================
--================================================================
--================================================================
--================================================================
--================================================================
--================================================================
--================================================================
--================================================================
--================================================================
--================================================================
--================================================================
SELECT
sum(abs(v2 - v1)) -- суммируем разницу значений
FROM
(
SELECT
array_agg(line[1] ORDER BY line[1]) lst1 -- упорядочиваем элементы каждого списка
, array_agg(line[2] ORDER BY line[2]) lst2
FROM
(
SELECT
line::numeric[] -- переводим строковые представления в числовые значения
FROM
regexp_matches($$
3 4
4 3
2 5
1 3
3 9
3 3
$$
, '(\d+)\s+(\d+)' -- каждая пара чисел, невзирая на переводы строк
, 'g'
) line
) T
) T
, unnest(lst1, lst2) u(v1, v2); -- "разворачиваем" оба массива параллельно
--================================================================
SELECT
sum(sym) -- суммарная "схожесть"
FROM
(
SELECT
v * count(*) FILTER(WHERE lst = 0) * count(*) FILTER(WHERE lst = 1) sym -- "схожесть" для конкретного значения
FROM
(
SELECT
array_agg(line[1] ORDER BY line[1]) lst1
, array_agg(line[2] ORDER BY line[2]) lst2
FROM
(
SELECT
line::numeric[]
FROM
regexp_matches($$
3 4
4 3
2 5
1 3
3 9
3 3
$$
, '(\d+)\s+(\d+)'
, 'g'
) line
) T
) T
, unnest(lst1, lst2) u(v1, v2)
, unnest(ARRAY[v1, v2], ARRAY[0, 1]) g(v, lst) -- "размножаем" строки
GROUP BY
v
) T;
--================================================================
SELECT
count(*) FILTER(WHERE cond) -- количество с выполнившимся условием
FROM
(
SELECT
string_to_array(line, ' ')::numeric[] rpt -- преобразуем строку в массив чисел
FROM
regexp_split_to_table($$
7 6 4 2 1
1 2 7 8 9
9 7 6 2 1
1 3 2 4 5
8 6 4 4 1
1 3 6 7 9
$$, '[\r\n]+') line -- разбиение по строкам
WHERE
btrim(line) <> '' -- фильтрация пустых строк
) T
, LATERAL (
SELECT
(
bool_and(rpt[i] < rpt[i + 1]) OR -- все возрастают
bool_and(rpt[i] > rpt[i + 1]) -- все убывают
) AND
bool_and(abs(rpt[i] - rpt[i + 1]) BETWEEN 1 AND 3) cond -- все разницы между 1..3
FROM
generate_series(1, array_length(rpt, 1) - 1) i -- перебор без последнего элемента
) cond;
--================================================================
SELECT
count(*) FILTER(WHERE cond)
FROM
(
SELECT
rptno
, bool_or(cond) cond -- хоть один удовлетворяет условию?
FROM
(
SELECT
row_number() OVER() rptno -- нумеруем "исходные" отчеты
, string_to_array(line, ' ')::numeric[] rpts
FROM
regexp_split_to_table($$
7 6 4 2 1
1 2 7 8 9
9 7 6 2 1
1 3 2 4 5
8 6 4 4 1
1 3 6 7 9
$$, '[\r\n]+') line
WHERE
btrim(line) <> ''
) T
, LATERAL ( -- генерируем все возможные подотчеты
SELECT
rpts[:i-1] || rpts[i+1:] rpt -- "клеим" массив до/после индекса
FROM
generate_subscripts(rpts, 1) i -- перебираем все индексы
) rptn
, LATERAL (
SELECT
(
bool_and(rpt[i] < rpt[i + 1]) OR
bool_and(rpt[i] > rpt[i + 1])
) AND
bool_and(abs(rpt[i] - rpt[i + 1]) BETWEEN 1 AND 3) cond
FROM
generate_series(1, array_length(rpt, 1) - 1) i
) cond
GROUP BY
1
) T;
--================================================================
SELECT
row_number() OVER() rptno -- нумеруем "исходные" отчеты
, string_to_array(line, ' ')::numeric[] rpts
FROM
regexp_split_to_table($$
7 6 4 2 1
1 2 7 8 9
9 7 6 2 1
1 3 2 4 5
8 6 4 4 1
1 3 6 7 9
$$, '[\r\n]+') line
WHERE
btrim(line) <> ''
--================================================================
SELECT
sum(exp[1]::numeric * exp[2]::numeric) -- суммируем значения
FROM
regexp_matches(
$source$xmul(2,4)%&mul[3,7]!@^do_not_mul(5,5)+mul(32,64]then(mul(11,8)mul(8,5))$source$
, 'mul\((\d{1,3}),(\d{1,3})\)' -- скобки экранируем, числа 1..3-значные
, 'g' -- по всей строке
) exp;
--================================================================
SELECT
sum(exp[1]::numeric * exp[2]::numeric)
FROM
regexp_matches(
regexp_replace(
$source$xmul(2,4)&mul[3,7]!^don't()_mul(5,5)+mul(32,64](mul(11,8)undo()?mul(8,5))$source$
, 'don''t\(\).*?(?:do\(\)|$)' -- "выстригаем" все блоки don't-do или don't-конец
, '_' -- непустая строка
, 'g' -- по всей строке
)
, 'mul\((\d{1,3}),(\d{1,3})\)'
, 'g'
) exp;
--================================================================
WITH matrix AS(
SELECT
array_agg(regexp_split_to_array(line, '')) m
FROM
regexp_split_to_table($$
MMMSXXMASM
MSAMXMSMSA
AMXSXMAAMM
MSAMASMSMX
XMASAMXAMM
XXAMMXXAMA
SMSMSASXSS
SAXAMASAAA
MAMMMXMMMM
MXMXAXMASX
$$, '[\r\n]+') line
WHERE
btrim(line) <> ''
)
SELECT
count(*) FILTER(WHERE word = 'XMAS') -- подсчитываем кол-во слов 'XMAS'
FROM
matrix
, generate_subscripts(m, 1) x -- первый уровень массива
, generate_subscripts(m, 2) y -- второй уровень
, LATERAL (
SELECT
unnest(ARRAY[ -- генерируем слова во всех 8 возможных направлениях сразу
string_agg(m[x + i][y], '')
, string_agg(m[x - i][y], '')
, string_agg(m[x][y + i], '')
, string_agg(m[x][y - i], '')
, string_agg(m[x + i][y + i], '')
, string_agg(m[x + i][y - i], '')
, string_agg(m[x - i][y + i], '')
, string_agg(m[x - i][y - i], '')
]) word
FROM
generate_series(0, length('XMAS') - 1) i -- генерируем смещения по кол-ву букв искомого слова
) T;
--================================================================
SELECT
regexp_split_to_array(line, '') m
FROM
regexp_split_to_table($$
MMMSXXMASM
MSAMXMSMSA
AMXSXMAAMM
MSAMASMSMX
XMASAMXAMM
XXAMMXXAMA
SMSMSASXSS
SAXAMASAAA
MAMMMXMMMM
MXMXAXMASX
$$, '[\r\n]+') line
WHERE
btrim(line) <> ''
--================================================================
WITH matrix AS(
SELECT
array_agg(regexp_split_to_array(line, '')) m
FROM
regexp_split_to_table($$
MMMSXXMASM
MSAMXMSMSA
AMXSXMAAMM
MSAMASMSMX
XMASAMXAMM
XXAMMXXAMA
SMSMSASXSS
SAXAMASAAA
MAMMMXMMMM
MXMXAXMASX
$$, '[\r\n]+') line
WHERE
btrim(line) <> ''
)
SELECT
count(*)
FROM
(
SELECT
x
, y
FROM
matrix
, generate_series(2, array_length(m, 1)) x
, generate_series(2, array_length(m, 2)) y
, LATERAL (
SELECT
unnest(ARRAY[ -- только диагонали
string_agg(m[x + i][y + i], '')
, string_agg(m[x + i][y - i], '')
, string_agg(m[x - i][y + i], '')
, string_agg(m[x - i][y - i], '')
]) word
FROM
generate_series(-1, 1) i -- [-1, 0, 1]
) T
WHERE
word = 'MAS'
GROUP BY
1, 2
HAVING
count(*) = 2 -- ровно 2 слова 'MAS' через этот центр
) T;
--================================================================
select i from generate_series(-1, 1) i;
--================================================================
WITH src AS (
SELECT $$
47|53
97|13
97|61
97|47
75|29
61|13
75|53
29|13
97|29
53|29
61|53
97|53
61|29
47|13
75|47
97|75
47|61
75|61
47|29
75|13
53|13
75,47,61,53,29
97,61,53,29,13
75,29,13
75,97,47,61,53
61,13,29
97,13,75,29,47
$$
)
, rul AS ( -- правила в виде jsonb-словаря
SELECT
jsonb_object(
array_agg(pagea) -- ключ словаря - номер after-страницы
, array_agg(pageb::text) -- значение - массив номеров before-страниц
)
FROM
(
SELECT
line[2] pagea -- номер страницы "после"
, array_agg(line[1]) pageb -- массив номеров, которые должны быть "до"
FROM
regexp_matches(
(TABLE src)
, '(\d+)\|(\d+)'
, 'g'
) line
GROUP BY
1
) T
)
, upd AS ( -- обновления в виде массива номеров
SELECT
string_to_array(line, ',')::numeric[] upd
FROM
regexp_split_to_table(
(TABLE src)
, '[\r\n]+'
) line
WHERE
line ~ '^(\d+)(,\d+)*$'
)
SELECT
sum(upd[array_length(upd, 1) / 2 + 1]) -- суммируем значения в середине массива
FROM
upd
, LATERAL (
SELECT
bool_and(
NOT(
upd[i + 1:] && coalesce(
((TABLE rul) ->> upd[i]::text)::numeric[] -- массив-значение по ключу словаря
, '{}' -- если такого ключа в словаре нет, подставим пустой массив
)
)
) cond -- условие не нарушается ни для одной позиции
FROM
generate_series(1, array_length(upd, 1) - 1) i -- [1..n-1]
) T
WHERE
cond;
--================================================================
WITH src AS (
SELECT $$
47|53
97|13
97|61
97|47
75|29
61|13
75|53
29|13
97|29
53|29
61|53
97|53
61|29
47|13
75|47
97|75
47|61
75|61
47|29
75|13
53|13
75,47,61,53,29
97,61,53,29,13
75,29,13
75,97,47,61,53
61,13,29
97,13,75,29,47
$$
)
SELECT
line[2] pagea -- номер страницы "после"
, array_agg(line[1]) pageb -- массив номеров, которые должны быть "до"
FROM
regexp_matches(
(TABLE src)
, '(\d+)\|(\d+)'
, 'g'
) line
GROUP BY
1
--================================================================
WITH RECURSIVE src AS (
SELECT $$
47|53
97|13
97|61
97|47
75|29
61|13
75|53
29|13
97|29
53|29
61|53
97|53
61|29
47|13
75|47
97|75
47|61
75|61
47|29
75|13
53|13
75,47,61,53,29
97,61,53,29,13
75,29,13
75,97,47,61,53
61,13,29
97,13,75,29,47
$$
)
, rul AS ( -- словарь правил
SELECT
jsonb_object(
array_agg(pagea)
, array_agg(pageb::text)
)
FROM
(
SELECT
line[2] pagea
, array_agg(line[1]) pageb
FROM
regexp_matches(
(TABLE src)
, '(\d+)\|(\d+)'
, 'g'
) line
GROUP BY
1
) T
)
, upd AS ( -- массивы обновлений
SELECT
string_to_array(line, ',')::numeric[] upd
FROM
regexp_split_to_table(
(TABLE src)
, '[\r\n]+'
) line
WHERE
line ~ '^(\d+)(,\d+)*$'
)
, wrong AS ( -- только ошибочные обновления
SELECT
row_number() OVER() id -- пронумеровали
, upd
FROM
upd
, LATERAL (
SELECT
bool_and(NOT(upd[i + 1:] && coalesce(((TABLE rul) ->> upd[i]::text)::numeric[], '{}'))) cond
FROM
generate_series(1, array_length(upd, 1) - 1) i
) T
WHERE
NOT cond
)
, r AS ( -- рекурсивная сортировка "пузырьком"
SELECT
id
, upd
, array_length(upd, 1) ln -- длина конкретного массива-обновления
, 0 n -- счетчик шагов в нем
FROM
wrong
UNION ALL
SELECT
id
, CASE
WHEN j <= i THEN upd -- пропускаем неподходящие положения указателей
WHEN upd[j] = ANY(coalesce(((TABLE rul) ->> upd[i]::text)::numeric[], '{}'))
THEN upd[:i - 1] || upd[j] || upd[i + 1:j - 1] || upd[i] || upd[j + 1:] -- swap(upd, i, j)
ELSE upd
END
, ln
, n + 1
FROM
r
, LATERAL ( -- преобразуем счетчик в пару "указателей"
SELECT
(n / ln) + 1 i
, (n % ln) + 1 j
) T
WHERE
n < ln ^ 2
)
SELECT
sum(upd[array_length(upd, 1) / 2 + 1])
FROM
(
SELECT DISTINCT ON(id) -- отбираем по каждому обновлению ...
upd
FROM
r
ORDER BY
id, n DESC -- ... только последнее по счетчику состояние
) T;
--================================================================
WITH RECURSIVE matrix AS ( -- карту - в матрицу (двумерный массив)
SELECT
array_agg(regexp_split_to_array(line, '')) m
FROM
regexp_split_to_table($$
....#.....
.........#
..........
..#.......
.......#..
..........
.#..^.....
........#.
#.........
......#...
$$, '[\r\n]+') line
WHERE
btrim(line) <> ''
)
, dir AS ( -- набор "направлений"
SELECT
ARRAY[0, -1] du -- вверх
, ARRAY[0, +1] dd -- вниз
, ARRAY[-1, 0] dl -- влево
, ARRAY[+1, 0] dr -- вправо
)
, r AS ( -- моделируем "обход"
SELECT
x
, y
, du d -- исходное направление движения "вверх"
FROM
matrix
, generate_subscripts(m, 1) y -- перебираем все ячейки матрицы
, generate_subscripts(m, 2) x
, dir
WHERE
m[y][x] = '^' -- стартовая позиция охранника в матрице
UNION ALL
SELECT
-- если можем ходить - принимаем новую позицию и сохраняем направление
-- если не можем - сохраняем позицию и принимаем новое направление
CASE WHEN cond THEN nx ELSE x END
, CASE WHEN cond THEN ny ELSE y END
, CASE WHEN cond THEN d ELSE nd END
FROM
matrix
, r
, LATERAL ( -- предрасчитываем плановые значения для следующего шага
SELECT
x + d[1] nx
, y + d[2] ny
, CASE d
WHEN dl THEN du -- влево -> вверх
WHEN du THEN dr -- вверх -> вправо
WHEN dr THEN dd -- вправо -> вниз
WHEN dd THEN dl -- вниз -> ввлево
END nd
FROM
dir
) n
, LATERAL (
SELECT
coalesce(m[ny][nx], '') <> '#' cond -- можно ли сходить, куда хотим?
) T
WHERE
m[ny][nx] IS NOT NULL -- шагаем, пока не вышли за границы
)
SELECT
count(DISTINCT (x, y)) -- подсчитываем количество уникальных клеток на нашем пути
FROM
r;
--================================================================
WITH RECURSIVE matrix AS (
SELECT
array_agg(regexp_split_to_array(line, '')) m
FROM
regexp_split_to_table($$
....#.....
.........#
..........
..#.......
.......#..
..........
.#..^.....
........#.
#.........
......#...
$$, '[\r\n]+') line
WHERE
btrim(line) <> ''
)
, dir AS (
SELECT
ARRAY[0, -1] du
, ARRAY[0, +1] dd
, ARRAY[-1, 0] dl
, ARRAY[+1, 0] dr
)
, src AS ( -- поиск стартовой позиции
SELECT
x
, y
, du d
FROM
matrix
, generate_subscripts(m, 1) y
, generate_subscripts(m, 2) x
, dir
WHERE
m[y][x] = '^'
)
, r AS ( -- поиск первичного пути с подсчетом шагов
SELECT
0 i
, *
FROM
src
UNION ALL
SELECT
i + 1
, CASE WHEN cond THEN nx ELSE x END
, CASE WHEN cond THEN ny ELSE y END
, CASE WHEN cond THEN d ELSE nd END
FROM
matrix
, r
, LATERAL (
SELECT
x + d[1] nx
, y + d[2] ny
, CASE d
WHEN dl THEN du
WHEN du THEN dr
WHEN dr THEN dd
WHEN dd THEN dl
END nd
FROM
dir
) n
, LATERAL (
SELECT
coalesce(m[ny][nx], '') <> '#' cond
) T
WHERE
m[ny][nx] IS NOT NULL
)
SELECT
count(*) FILTER(WHERE is_cycle) -- количество позиций, приводящих к зацикливанию
FROM
(
SELECT DISTINCT ON(x, y) -- список всех посещенных позиций, куда будем ставить препятствие
x
, y
-- состояние на предыдущем шаге
, lag(x) OVER(ORDER BY i) px
, lag(y) OVER(ORDER BY i) py
, lag(d) OVER(ORDER BY i) pd
FROM
r
ORDER BY
x, y, i
) T
, LATERAL (
WITH RECURSIVE r AS ( -- построение пути с препятствием в текущей позиции
SELECT
T.px x -- стартуем с предыдущей позиции
, T.py y
, T.pd d
WHERE
(T.x, T.y) <> (SELECT x, y FROM src)
UNION ALL
SELECT
CASE WHEN cond THEN nx ELSE x END
, CASE WHEN cond THEN ny ELSE y END
, CASE WHEN cond THEN d ELSE nd END
FROM
matrix
, r
, LATERAL (
SELECT
x + d[1] nx
, y + d[2] ny
, CASE d
WHEN dl THEN du
WHEN du THEN dr
WHEN dr THEN dd
WHEN dd THEN dl
END nd
FROM
dir
) n
, LATERAL (
SELECT
coalesce(m[ny][nx], '') <> '#' AND -- не было блока, куда хотим сходить
(nx, ny) <> (T.x, T.y) cond -- и мы его сейчас туда не ставили
) T
WHERE
m[ny][nx] IS NOT NULL
) CYCLE x, y, d SET is_cycle USING path -- защита от зацикливания
SELECT
bool_or(is_cycle) is_cycle -- цикл в какой-то момент все-таки возник
FROM
r
) chk;
--================================================================
WITH RECURSIVE src AS (
SELECT
'125 17' nums -- исходный набор камней
, 6 blinks -- количество морганий
)
, r AS (
SELECT
0 blink -- количество прошедших морганий
, regexp_split_to_table(nums, ' ')::numeric num -- развернули строку в набор чисел
FROM
src
UNION ALL
SELECT
blink + 1 -- увеличиваем счетчик морганий
, unnest( -- "разворачиваем" элементы массива
CASE
-- правило #1
WHEN num = 0 THEN
ARRAY[1]
-- правило #2
WHEN length(num::text) % 2 = 0 THEN
ARRAY[
left(num::text, length(num::text) >> 1)
, right(num::text, length(num::text) >> 1)
]::numeric[]
-- правило #3
ELSE
ARRAY[num * 2024]
END
)
FROM
r
, src
WHERE
blink < blinks -- ограничение по количеству морганий
)
SELECT
num
FROM
r
, src
WHERE
blink = blinks; -- отбираем только камни на последнем шаге
--================================================================
WITH RECURSIVE src AS (
SELECT
'125 17' nums
, 6 blinks
)
, r AS (
SELECT
0 blink
, regexp_split_to_table(nums, ' ')::numeric num
, 1::numeric qty
FROM
src
UNION ALL
(
WITH tmp AS (
SELECT
blink + 1 blink
, unnest(
CASE
WHEN num = 0 THEN
ARRAY[1]
WHEN length(num::text) % 2 = 0 THEN
ARRAY[
left(num::text, length(num::text) >> 1)
, right(num::text, length(num::text) >> 1)
]::numeric[]
ELSE
ARRAY[num * 2024]
END
) num
, qty
FROM
r
, src
WHERE
blink < blinks
)
SELECT
blink
, num
, sum(qty) qty
FROM
tmp
GROUP BY
1, 2
)
)
SELECT
sum(qty) count
FROM
r
, src
WHERE
blink = blinks;
--================================================================
select a1.i, a2.i FROM
(select i from generate_series(-1, 1) i) as a1,
(select i from generate_series(-1, 1) i) as a2
--================================================================
select value,count(*) from demo group by value order by 2;
explain (analyze, buffers, costs off, summary off)
select * from demo where value != 0
explain (analyze, buffers, costs off, summary off)
select * from demo where value<0 or value>0
create index demo_value on demo(value asc) where value != 0
--================================================================
--================================================================
--================================================================
--================================================================
--================================================================
--================================================================
--================================================================
--================================================================
--================================================================
--================================================================
--================================================================
--================================================================
--================================================================
--================================================================
--================================================================
--================================================================
--================================================================
--================================================================
--================================================================
--================================================================
--================================================================
--========================================================================
-- Create a table with a vector column
CREATE TABLE books (id SERIAL PRIMARY KEY, book_embedding REAL[3]);
-- Insert some vectors
INSERT INTO books (book_embedding) VALUES ('{0,1,0}'), ('{3,2,4}');
-- Create an index for faster queries
CREATE INDEX book_index ON books USING lantern_hnsw(book_embedding dist_l2sq_ops)
WITH (M=2, ef_construction=10, ef=4, dim=3);
-- Query the nearest vector
SELECT id FROM books ORDER BY book_embedding <-> '{0,0,0}' LIMIT 1;
-- Query the nearest vector to a text embedding
SELECT id FROM books ORDER BY book_embedding <-> text_embedding('BAAI/bge-base-en', 'My text input') LIMIT 1;
--========================================================================
SELECT pg_type.oid FROM pg_type
JOIN pg_depend ON pg_type.oid = pg_depend.objid
JOIN pg_extension ON pg_depend.refobjid = pg_extension.oid
WHERE typname='vector' AND extname='vector'
LIMIT 1
--========================================================================
select name, rank() over(order by score desc) as rank
from (values ('alice', 90), ('Bob', 85), ('Charlie', 90)) as t(name, score);
--========================================================================
--========================================================================
--========================================================================
--========================================================================
--========================================================================
--========================================================================
--========================================================================
--========================================================================
--========================================================================
--========================================================================
--========================================================================
--========================================================================
--========================================================================
--========================================================================
--========================================================================
--=================================================================================
SELECT
users.city,
COUNT(trades.order_id) AS total_orders
FROM trades
INNER JOIN users
ON trades.user_id = users.user_id
WHERE trades.status = 'Completed'
GROUP BY users.city
ORDER BY total_orders DESC
LIMIT 3;
--=================================================================================
SELECT
trade_date,
stock_symbol,
(volume - LAG(volume) OVER (PARTITION BY stock_symbol ORDER BY trade_date))
/ LAG(volume) OVER (PARTITION BY stock_symbol ORDER BY trade_date) * 100 as volume_change_pct
FROM trading_volume;
--=================================================================================
SELECT
user_id,
transaction_type,
COUNT(*) AS total_transactions,
SUM(quantity) AS total_quantity
FROM
transactions
GROUP BY
user_id,
transaction_type;
SELECT
stock_id,
AVG(price) AS average_price
FROM
transactions
GROUP BY
stock_id;
SELECT
t.*,
u.user_name,
u.user_country,
s.stock_symbol,
s.stock_name,
s.stock_sector
FROM
transactions t
JOIN
users u ON t.user_id = u.user_id
JOIN
stocks s ON t.stock_id = s.stock_id;
--=================================================================================
SELECT EXTRACT(MONTH FROM trade_date) as month, stock as most_traded_stock
FROM trades
GROUP BY month, stock
ORDER BY count(*) DESC
LIMIT 1;
--=================================================================================
SELECT u.name, SUM(t.volume)
FROM users AS u
JOIN transactions AS t ON u.user_id = t.user_id
WHERE (t.transaction_date >= '2022-08-01' AND t.transaction_date < '2022-09-01')
GROUP BY u.name
HAVING SUM(t.volume) > 100000
ORDER BY SUM(t.volume) DESC;
--=================================================================================
WITH ranked_salary AS (
SELECT
name,
salary,
department_id,
DENSE_RANK() OVER (
PARTITION BY department_id ORDER BY salary DESC) AS ranking
FROM employee
)
SELECT
d.department_name,
rs.name,
rs.salary
FROM ranked_salary AS rs
INNER JOIN department AS d
ON rs.department_id = d.department_id
WHERE rs.ranking <= 3
ORDER BY d.department_id, rs.salary DESC, rs.name ASC;
--=================================================================================
SELECT
EXTRACT(MONTH FROM trade_date) AS mth,
stock_symbol,
AVG(return_percentage) OVER (PARTITION BY EXTRACT(MONTH FROM trade_date), stock_symbol) AS avg_return,
ROW_NUMBER() OVER (PARTITION BY EXTRACT(MONTH FROM trade_date) ORDER BY AVG(return_percentage) OVER (PARTITION BY EXTRACT(MONTH FROM trade_date), stock_symbol) DESC) AS rank
FROM
trade
ORDER BY
mth ASC,
rank ASC;
--=================================================================================
SELECT DISTINCT c.customer_id, c.customer_name
FROM customers c
JOIN transactions t ON c.customer_id = t.customer_id
WHERE c.opening_balance > 50000 AND c.city = 'Houston' AND t.missed_transaction = 'no'
GROUP BY c.customer_id, c.customer_name, t.missed_transaction
HAVING COUNT(t.transaction_id) = (SELECT COUNT(transaction_id) FROM transactions WHERE customer_id = c.customer_id);
--=================================================================================
SELECT c.campaign_name,
(SUM (CASE WHEN cl.invested = True THEN 1 ELSE 0 END)::FLOAT / COUNT(cl.click_id)) AS click_through_rate
FROM campaigns c
JOIN clicks cl ON c.campaign_id = cl.campaign_id
WHERE DATE_TRUNC('month',cl.click_date) = DATE_TRUNC('month', DATE '2022-08-01')
GROUP BY c.campaign_name;
--=================================================================================
SELECT client_id, investment_type, AVG(transaction_amount) AS avg_transaction_amount
FROM transactions
GROUP BY client_id, investment_type
HAVING AVG(transaction_amount) IN (
SELECT MAX(AVG(transaction_amount))
FROM transactions
GROUP BY client_id
)
select max(t.avg), min(t.max) from (SELECT ProductID, AVG(value) as avg, MAX(value) as max
FROM Product
GROUP BY 1
ORDER BY 1) as t;
--=================================================================================
SELECT
customer_id,
DATE_PART('quarter', transaction_date) AS quarter,
SUM(amount) AS total_transaction_value
FROM
transactions
GROUP BY
customer_id, quarter
HAVING
SUM(amount) > 10000
ORDER BY
total_transaction_value DESC;
--=================================================================================
SELECT
DATE_TRUNC('quarter', transaction_date)::DATE AS quarter,
customer_id,
SUM(transaction_value) AS total_transaction_value,
RANK() OVER(PARTITION BY DATE_TRUNC('quarter', transaction_date)::DATE
ORDER BY SUM(transaction_value) DESC) AS customer_rank
FROM
transactions
GROUP BY
quarter,
customer_id;
--=================================================================================
SELECT c.customer_id
FROM customers c
JOIN transactions t
ON c.customer_id = t.customer_id
WHERE t.transaction_date > (CURRENT_DATE - INTERVAL '1 month')
AND t.transaction_amount > 500
AND c.account_balance > 0
AND c.account_start_date < (CURRENT_DATE - INTERVAL '1 month')
GROUP BY c.customer_id
HAVING COUNT(t.transaction_id) > 5;
--=================================================================================
SELECT
EXTRACT(MONTH FROM transaction_date) AS month,
SUM(amount) AS total_deposited
FROM
transactions
WHERE
transaction_type = 'Deposit'
AND
EXTRACT(YEAR FROM transaction_date) = 2022
GROUP BY
EXTRACT(MONTH FROM transaction_date)
ORDER BY
month;
--=================================================================================
SELECT u.user_id,
DATE_PART('month', i.investment_date) AS Month,
COUNT(i.investment_id) AS Total_Investments
FROM users u
JOIN investments i
ON u.user_id = i.user_id
WHERE i.investment_date BETWEEN CURRENT_DATE - INTERVAL '1 year' AND CURRENT_DATE
GROUP BY u.user_id, Month
HAVING COUNT(i.investment_id) > 100
ORDER BY Total_Investments DESC;
--=================================================================================
SELECT
date_part('month', date) AS month,
fund_id,
AVG(value) AS avg_value
FROM
(
SELECT
date,
fund_id,
value,
ROW_NUMBER() OVER (PARTITION BY date, fund_id ORDER BY value_id DESC) AS rn
FROM
portfolio_values
) subquery
WHERE
rn = 1
GROUP BY
month,
fund_id
ORDER BY
month,
fund_id;
--=================================================================================
SELECT c.name, SUM(i.investment_amount) as total_investment
FROM clients c
INNER JOIN investments i ON c.client_id = i.client_id
WHERE EXTRACT(YEAR FROM i.investment_date) = 2022
GROUP BY c.client_id
ORDER BY total_investment DESC
LIMIT 10;
--=================================================================================
SELECT
year,
month,
product_id,
AVG(return_percentage) OVER (
PARTITION BY product_id
ORDER BY year, month
ROWS BETWEEN 2 PRECEDING AND CURRENT ROW
) as avg_return_percentage
FROM
investment_returns
ORDER BY
year,
month,
product_id;
--=================================================================================
SELECT p.portfolio_name,
AVG(pb.balance) AS average_balance
FROM portfolios AS p
INNER JOIN portfolio_balances AS pb
ON p.portfolio_id = pb.portfolio_id
WHERE pb.date BETWEEN '2022-07-01' AND '2022-09-30'
GROUP BY p.portfolio_name;
--=================================================================================
WITH monthly_volumes AS (
SELECT
date_trunc('month', trade_date) as mth,
symbol,
sum(volume) as trade_volume
FROM
trades
GROUP BY
mth,
symbol
)
SELECT
to_char(mth, 'Month') as mth,
symbol,
trade_volume,
COALESCE(
((trade_volume - lag(trade_volume) OVER (PARTITION BY symbol ORDER BY mth)) / lag(trade_volume) OVER (PARTITION BY symbol ORDER BY mth)) * 100,
'-'
) as difference
FROM
monthly_volumes
ORDER BY
mth,
symbol;
--=================================================================================
WITH data AS (
SELECT 'Akash' AS name, 50 AS deals_closed UNION ALL
SELECT 'Brittany', 50 UNION ALL
SELECT 'Carlos', 40 UNION ALL
SELECT 'Dave', 30 UNION ALL
SELECT 'Eve', 30 UNION ALL
SELECT 'Farhad', 10
)
SELECT name, deals_closed,
RANK() OVER (ORDER BY deals_closed DESC) as rank,
DENSE_RANK() OVER (ORDER BY deals_closed DESC) as dense_rank
FROM interactive_brokers_sales;
--=================================================================================
SELECT t.full_name, count(r.trade_id) as total_trades, t.account_balance
FROM traders t
JOIN trades r
ON t.trader_id = r.trader_id
WHERE r.trade_date BETWEEN date_trunc('month', CURRENT_DATE - interval '1 month') AND CURRENT_DATE
GROUP BY t.trader_id
HAVING count(r.trade_id) > 10 AND t.account_balance > 50000;
--=================================================================================
SELECT e1.name AS employee1, e2.name AS employee2
FROM interactive_brokers_employees AS e1
JOIN interactive_brokers_employees AS e2 ON e1.department_id = e2.department_id
WHERE e1.id <> e2.id;
--=================================================================================
SELECT T.transaction_date, T.client_id, C.client_name, T.max_amount
FROM (
SELECT transaction_date, client_id, MAX(amount) AS max_amount
FROM Transactions
GROUP BY transaction_date, client_id
) AS T
JOIN Clients C
ON T.client_id = C.client_id
ORDER BY T.transaction_date, T.max_amount DESC;
--=================================================================================
SELECT c.customer_id, c.customer_name, c.account_open_date
FROM customers c
INNER JOIN investments i ON c.customer_id = i.customer_id
WHERE DATE_PART('year', AGE(current_date, c.account_open_date)) >= 1
AND c.customer_id IN (
SELECT customer_id
FROM investments
GROUP BY customer_id
HAVING COUNT(DISTINCT account_id) > 1 AND SUM(investment_amount) > 10000
)
--=================================================================================
select pg_database_size(current_database());
--===========================================
SELECT * from table_name WHERE boolean_column IS NOT TRUE;
--===========================================
SELECT * FROM table_name WHERE boolean_column IS NULL
UNION
SELECT * FROM table_name WHERE boolean_column = FALSE
--===========================================
SELECT * FROM table_name WHERE COALESCE(boolean_column, FALSE) = FALSE
--===========================================
SELECT *
FROM table_name
WHERE boolean_column IS DISTINCT FROM TRUE
--=================================================================================
--=================================================================================
--=================================================================================
--=================================================================================
--=================================================================================
--=================================================================================
--=================================================================================
--=================================================================================
--=================================================================================
--=================================================================================
--=================================================================================
--=================================================================================
--=================================================================================
--=================================================================================
--=================================================================================
--=================================================================================
--=================================================================================
--=========================================================================================
--REGEXP_LIKE — проверяет, соответствует ли строка заданному шаблону.
--REGEXP_REPLACE — заменяет часть строки, соответствующую шаблону.
--REGEXP_SUBSTR — извлекает подстроку по шаблону.
--REGEXP_INSTR — находит позицию вхождения шаблона.
--REGEXP_COUNT — считает количество вхождений шаблона.
--SIMMILAR TO — проверяет, соответствует ли строка определённому шаблону.
SELECT user_id, email
FROM users
WHERE NOT REGEXP_LIKE(email, '^[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,}$');
UPDATE contacts
SET phone_number = '+7' || REGEXP_REPLACE(phone_number, '\D', '')
WHERE REGEXP_LIKE(phone_number, '^\+?7?\d{10}$');
SELECT log_id, log_entry
FROM system_logs
WHERE REGEXP_LIKE(log_entry, 'Access denied for user \'admin_\w*\d+\'', 'i');
SELECT
payment_id,
REGEXP_REPLACE(card_number, '\d{12}(\d{4})', '************\1') AS masked_card_number
FROM payments;
WITH hashtags AS (
SELECT
post_id,
REGEXP_SUBSTR(content, '#\w+', 1, LEVEL) AS hashtag
FROM social_posts
CONNECT BY REGEXP_SUBSTR(content, '#\w+', 1, LEVEL) IS NOT NULL
AND PRIOR post_id = post_id
AND PRIOR SYS_GUID() IS NOT NULL
)
SELECT DISTINCT hashtag
FROM hashtags
WHERE hashtag IS NOT NULL;
SELECT
order_id,
TO_NUMBER(REGEXP_SUBSTR(product_ids, '[^,]+', 1, LEVEL)) AS product_id
FROM orders
CONNECT BY REGEXP_SUBSTR(product_ids, '[^,]+', 1, LEVEL) IS NOT NULL
AND PRIOR order_id = order_id
AND PRIOR SYS_GUID() IS NOT NULL;
SELECT regexp_replace('ABC123', '\d+', '', 'g');
SELECT column_name
FROM table_name
WHERE REGEXP_LIKE(column_name, '(.*)\1');
--=========================================================================================
--=========================================================================================
--=========================================================================================
--=========================================================================================
--=========================================================================================
---
SELECT COUNT(id)
FROM stackoverflow.posts
WHERE post_type_id=1
AND (score>300 OR favorites_count >= 100)
GROUP BY post_type_id;
---
SELECT ROUND(AVG(t.count),0)
FROM (
SELECT COUNT(id),
creation_date::date
FROM stackoverflow.posts
WHERE post_type_id = 1
GROUP BY creation_date::date
HAVING creation_date::date BETWEEN '2008-11-01' AND '2008-11-18') AS t;
---
SELECT COUNT(DISTINCT u.id)
FROM stackoverflow.badges AS b
JOIN stackoverflow.users AS u ON b.user_id=u.id
WHERE b.creation_date::date = u.creation_date::date;
---
SELECT COUNT(t.id)
FROM (
SELECT p.id
FROM stackoverflow.posts AS p
JOIN stackoverflow.votes AS v ON p.id = v.post_id
JOIN stackoverflow.users AS u ON p.user_id = u.id
WHERE u.display_name LIKE 'Joel Coehoorn'
GROUP BY p.id
HAVING COUNT(v.id)>=1) as t;
---
SELECT *,
ROW_NUMBER() OVER(ORDER BY id DESC) AS rank
FROM stackoverflow.vote_types
ORDER BY id;
---
SELECT *
FROM (
SELECT v.user_id,
COUNT(vt.id) AS cnt
FROM stackoverflow.votes AS v
JOIN stackoverflow.vote_types as vt ON vt.id = v.vote_type_id
WHERE vt.name LIKE 'Close'
GROUP BY v.user_id
ORDER BY cnt DESC LIMIT 10
) AS t
ORDER BY t.cnt DESC, t.user_id DESC;
---
SELECT *,
DENSE_RANK() OVER (ORDER BY t.cnt DESC) AS n
FROM (SELECT COUNT(id) AS cnt,
user_id
FROM stackoverflow.badges
WHERE creation_date::date BETWEEN '2008-11-15' AND '2008-12-15'
GROUP BY 2
ORDER BY cnt DESC, user_id LIMIT 10) as t;
---
WITH t AS (
SELECT ROUND(AVG(score)) AS avg_score,
user_id
FROM stackoverflow.posts
WHERE title IS NOT NULL
AND score <> 0
GROUP BY user_id
)
SELECT p.title,
t.user_id,
p.score,
t.avg_score
FROM t
JOIN stackoverflow.posts AS p ON t.user_id=p.user_id
WHERE p.title IS NOT NULL
AND p.score <> 0;
---
SELECT title
FROM stackoverflow.posts
WHERE user_id IN (
SELECT user_id
FROM stackoverflow.badges
GROUP BY user_id
HAVING COUNT(id) >1000
)
AND title IS NOT NULL;
---
SELECT id,
views,
CASE
WHEN views>=350 THEN 1
WHEN views<100 THEN 3
ELSE 2
END AS group
FROM stackoverflow.users
WHERE location LIKE '%United States%'
AND views > 0;
---
WITH tab AS
(SELECT t.id,
t.views,
t.group,
MAX(t.views) OVER (PARTITION BY t.group) AS max
FROM (SELECT id,
views,
CASE
WHEN views>=350 THEN 1
WHEN views<100 THEN 3
ELSE 2
END AS group
FROM stackoverflow.users
WHERE location LIKE '%United States%'
AND views > 0
) as t
)
SELECT tab.id,
tab.views,
tab.group
FROM tab
WHERE tab.views = tab.max
ORDER BY tab.views DESC, tab.id;
---
SELECT *,
SUM(t.cnt_id) OVER (ORDER BY t.days) as nn
FROM (
SELECT EXTRACT(DAY FROM creation_date::date) AS days,
COUNT(id) AS cnt_id
FROM stackoverflow.users
WHERE creation_date::date BETWEEN '2008-11-01' AND '2008-11-30'
GROUP BY EXTRACT(DAY FROM creation_date::date)
) as t;
---
WITH p AS
(SELECT DISTINCT user_id,
MIN(creation_date) OVER (PARTITION BY user_id) AS min_dt
FROM stackoverflow.posts
)
SELECT p.user_id,
(p.min_dt - u.creation_date) AS diff
FROM stackoverflow.users AS u
JOIN p ON u.id = p.user_id;
---
SELECT SUM(views_count),
DATE_TRUNC('month', creation_date)::date AS mnth
FROM stackoverflow.posts
GROUP BY DATE_TRUNC('month', creation_date)::date
ORDER BY SUM(views_count) DESC;
---
SELECT u.display_name,
COUNT(DISTINCT p.user_id)
FROM stackoverflow.posts AS p
JOIN stackoverflow.users AS u ON p.user_id = u.id
JOIN stackoverflow.post_types AS pt ON pt.id = p.post_type_id
WHERE p.creation_date::date BETWEEN u.creation_date::date AND (u.creation_date::date + INTERVAL '1 month')
AND pt.type LIKE '%Answer%'
GROUP BY u.display_name
HAVING COUNT(p.id) > 100
ORDER BY u.display_name;
---
WITH t AS (
SELECT u.id
FROM stackoverflow.posts AS p
JOIN stackoverflow.users AS u ON p.user_id = u.id
WHERE DATE_TRUNC('month', u.creation_date)::date = '2008-09-01'
AND DATE_TRUNC('month', p.creation_date)::date = '2008-12-01'
GROUP BY u.id
HAVING COUNT(p.id)>0
)
SELECT COUNT(p.id),
DATE_TRUNC('month', p.creation_date)::date
FROM stackoverflow.posts AS p
WHERE p.user_id IN (SELECT * FROM t)
AND DATE_TRUNC('year', p.creation_date)::date = '2008-01-01'
GROUP BY DATE_TRUNC('month', p.creation_date)::date
ORDER BY DATE_TRUNC('month', p.creation_date)::date DESC;
---
SELECT user_id,
creation_date,
views_count,
SUM(views_count) OVER (PARTITION BY user_id ORDER BY creation_date)
FROM stackoverflow.posts;
---
SELECT ROUND(AVG(t.cnt))
FROM (
SELECT user_id,
COUNT(DISTINCT creation_date::date) AS cnt
FROM stackoverflow.posts
WHERE creation_date::date BETWEEN '2008-12-01' AND '2008-12-07'
GROUP BY user_id
) AS t
---
WITH t AS (
SELECT EXTRACT(MONTH from creation_date::date) AS month,
COUNT(DISTINCT id)
FROM stackoverflow.posts
WHERE creation_date::date BETWEEN '2008-09-01' AND '2008-12-31'
GROUP BY month
)
SELECT *,
ROUND(((count::numeric / LAG(count) OVER (ORDER BY month)) - 1) * 100,2) AS user_growth
FROM t;
---
WITH t AS (
SELECT user_id,
COUNT(DISTINCT id) AS cnt
FROM stackoverflow.posts
GROUP BY user_id
ORDER BY cnt DESC
LIMIT 1),
t1 AS (
SELECT p.user_id,
p.creation_date,
extract('week' from p.creation_date) AS week_number
FROM stackoverflow.posts AS p
JOIN t ON t.user_id = p.user_id
WHERE DATE_TRUNC('month', p.creation_date)::date = '2008-10-01'
)
SELECT DISTINCT week_number::numeric,
MAX(creation_date) OVER (PARTITION BY week_number)
FROM t1
ORDER BY week_number;
select name, rank() over(order by score DESC) as rank FROM (values('Alice', 90, 'Bob', 85, 'Charlie', 90)) as t(name, score);
SELECT
c.ClientID,
SUM(o.TotalAmount) AS TotalAmount,
MAX(o.TotalAmount) AS MaxOrderAmount,
MAX(o.OrderDate) AS LastOrderDate,
RANK() OVER (ORDER BY SUM(o.TotalAmount) DESC) AS ClientRank
FROM
Clients c
JOIN
Orders o ON c.ClientID = o.ClientID
LEFT JOIN
Products p ON o.OrderID = p.OrderID
GROUP BY
c.ClientID, c.ClientName;
CREATE OR REPLACE PROCEDURE GetMostPopularProducts IS
v_client_id Clients.ClientID%TYPE;
v_product_id Orders.ProductID%TYPE;
v_max_quantity NUMBER := 0;
v_max_product_id Orders.ProductID%TYPE;
v_avg_rating Products.AverageRating%TYPE;
BEGIN
FOR client_record IN (SELECT ClientID FROM Clients) LOOP
-- Находим товар с максимальным количеством заказов для клиента
FOR order_record IN (SELECT ProductID, SUM(Quantity) AS TotalQuantity
FROM Orders WHERE ClientID = client_record.ClientID
GROUP BY ProductID) LOOP
IF order_record.TotalQuantity > v_max_quantity THEN
v_max_quantity := order_record.TotalQuantity;
v_max_product_id := order_record.ProductID;
END IF;
END LOOP;
-- Находим средний рейтинг для самого популярного товара
SELECT AVG(Rating) INTO v_avg_rating FROM Reviews WHERE ProductID = v_max_product_id;
-- Выводим результаты
DBMS_OUTPUT.PUT_LINE('Клиент: ' || client_record.ClientID || ', Товар: ' || v_max_product_id ||
', Кол-во заказов: ' || v_max_quantity || ', Средний рейтинг: ' || v_avg_rating);
-- Сброс переменных для следующего клиента
v_max_quantity := 0;
END LOOP;
END GetMostPopularProducts;
CREATE OR REPLACE PROCEDURE Transfer_Funds (p_from_account NUMBER, p_to_account NUMBER, p_amount NUMBER) AS
BEGIN
IF (SELECT Balance FROM Accounts WHERE AccountID = p_from_account) < p_amount THEN
RAISE_APPLICATION_ERROR(-20001, 'Недостаточно средств');
END IF;
UPDATE Accounts SET Balance = Balance - p_amount WHERE AccountID = p_from_account;
UPDATE Accounts SET Balance = Balance + p_amount WHERE AccountID = p_to_account;
INSERT INTO Transfers (FromAccountID, ToAccountID, Amount) VALUES (p_from_account, p_to_account, p_amount);
COMMIT;
END;
SELECT e.name as name,
MIN(e.id) as min,
max(e.id) as max,
COUNT(e.id) as count,
SUM(e.id) as sum,
AVG(e.id) as counter
FROM demo e
WHERE e.id >= 1
GROUP BY 1
--GROUP BY DISTINCT ROLLUP (1)
--GROUP BY GROUPING SETS ((1), ())
--GROUP BY CUBE (1)
--GROUP BY 1, CUBE ((1))
--GROUP BY 1, GROUPING SETS ((1), ())
--GROUP BY 1, GROUPING SETS ((1))
HAVING count(e.id) > 0
--ORDER BY 2 DESC
CREATE OR REPLACE function get_name() returns varchar volatile AS $$
select string_agg(chr(trunc(65+random()*26)::integer), '')
from generate_series(1,(trunc(random()*30)::integer));
$$ language sql;
CREATE OR REPLACE FUNCTION create_mytable()
RETURNS void
LANGUAGE plpgsql AS
$func$
BEGIN
IF EXISTS (SELECT FROM pg_catalog.pg_tables
WHERE schemaname = 'myschema'
AND tablename = 'mytable') THEN
RAISE NOTICE 'Table myschema.mytable already exists.';
ELSE
CREATE TABLE myschema.mytable (i integer);
END IF;
END
$func$;
insert into customer_etalon(
cust_id, -- идентификатор клиента
first_name, -- фамилия
midle_name, -- имя
last_name, -- отчество
-- адрес регистрации
is_reg_idx_verif, -- индекс адреса регистрации верифицирован?
reg_index, -- индекс адреса регистрации
is_reg_town_verif, -- город адреса регистрации верифицирован?
reg_town_id, -- город адреса регистрации
is_reg_street_verif, -- улица адреса регистрации верифицирована?
reg_street_id, -- улица адреса регистрации
is_reg_house_verif, -- дом адреса регистрации верифицирован?
reg_house, -- дом адреса регистрации
is_reg_flat_verif, -- квартира адреса регистрации верифицирована?
reg_flat, -- квартира адреса регистрации
-- фактический адрес проживания
is_fact_idx_verif, -- индекс адреса проживания верифицирован?
fact_index, -- индекс адреса проживания
is_fact_town_verif, -- город адреса проживания верифицирован?
fact_town_id, -- город адреса проживания
is_fact_street_verif,-- улица адреса проживания верифицирована?
fact_street_id, -- улица адреса проживания
is_fact_house_verif, -- дом адреса регистрации верифицирован?
fact_house, -- дом адреса проживания
is_fact_flat_verif, -- квартира адреса проживания верифицирована?
fact_flat -- квартира адреса проживания
)
select
cust_id as cust_id, -- идентификатор клиента
get_name() as first_name, -- фамилия
get_name() as midle_name, -- имя
get_name() as last_name, -- отчество
-- адрес регистрации
case when trunc(random()*10) = 1 then 1 else 0 end as is_reg_idx_verif,
trunc(random()*100000) as reg_index,
case when trunc(random()*10) = 1 then 1 else 0 end as is_reg_town_verif,
trunc(random()*10000) as reg_town_id,
case when trunc(random()*10) = 1 then 1 else 0 end as is_reg_street_verif,
trunc(random()*1000) as reg_street_id,
case when trunc(random()*10) = 1 then 1 else 0 end as is_reg_house_verif,
trunc(random()*1000) reg_house,
case when trunc(random()*10) = 1 then 1 else 0 end as is_reg_flat_verif, -- квартира адреса регистрации верифицирована?
trunc(random()*1000) as reg_flat, -- квартира адреса регистрации
-- фактический адрес проживания
case when trunc(random()*10) = 1 then 1 else 0 end as is_fact_idx_verif, -- индекс адреса проживания верифицирован?
trunc(random()*100000) as fact_index, -- индекс адреса проживания
case when trunc(random()*10) = 1 then 1 else 0 end as is_fact_town_verif, -- город адреса проживания верифицирован?
trunc(random()*10000) as fact_town_id, -- город адреса проживания
case when trunc(random()*10) = 1 then 1 else 0 end as is_fact_street_verif,-- улица адреса проживания верифицирована?
trunc(random()*1000) as fact_street_id, -- улица адреса проживания
case when trunc(random()*10) = 1 then 1 else 0 end as is_fact_house_verif, -- дом адреса регистрации верифицирован?
trunc(random()*1000) fact_house, -- дом адреса проживания
case when trunc(random()*10) = 1 then 1 else 0 end as is_fact_flat_verif, -- квартира адреса проживания верифицирована?
trunc(random()*1000) as fact_flat
FROM generate_series(1,1000000) cust_id;
SELECT pg_size_pretty(pg_table_size('customer_etalon')) AS table_size,
pg_size_pretty(pg_indexes_size('customer_etalon')) AS index_size;
select *
from customer_etalon
fetch first 10 rows only
explain (analyze)
select count(*),
sum(case when is_reg_idx_verif then 1 else 0 end),
sum(case when is_reg_town_verif then 1 else 0 end),
sum(case when is_reg_street_verif then 1 else 0 end),
sum(case when is_reg_house_verif then 1 else 0 end),
sum(case when is_reg_flat_verif then 1 else 0 end),
sum(case when is_fact_idx_verif then 1 else 0 end),
sum(case when is_fact_town_verif then 1 else 0 end),
sum(case when is_fact_street_verif then 1 else 0 end),
sum(case when is_fact_house_verif then 1 else 0 end),
sum(case when is_fact_flat_verif then 1 else 0 end)
from customer_v1;
WITH RECURSIVE src AS (
SELECT
'125 17' nums
, 6 blinks
)
, r AS (
SELECT
0 blink
, regexp_split_to_table(nums, ' ')::numeric num
, 1::numeric qty
FROM
src
UNION ALL
(
WITH tmp AS (
SELECT
blink + 1 blink
, unnest(
CASE
WHEN num = 0 THEN
ARRAY[1]
WHEN length(num::text) % 2 = 0 THEN
ARRAY[
left(num::text, length(num::text) >> 1)
, right(num::text, length(num::text) >> 1)
]::numeric[]
ELSE
ARRAY[num * 2024]
END
) num
, qty
FROM
r
, src
WHERE
blink < blinks
)
SELECT
blink
, num
, sum(qty) qty
FROM
tmp
GROUP BY
1, 2
)
)
SELECT
sum(qty) count
FROM
r
, src
WHERE
blink = blinks;
DO $$
DECLARE
table_name TEXT := 'users';
column_name TEXT := 'email';
BEGIN
EXECUTE FORMAT('SELECT %I FROM %I', column_name, table_name);
END $$;
--=====================================================================================
DO $$
DECLARE
table_name TEXT := 'users';
query TEXT;
BEGIN
query := FORMAT('SELECT * FROM %I', table_name);
RAISE NOTICE 'Executing query: %', query;
EXECUTE query;
END $$;
--=====================================================================================
PREPARE dynamic_query(TEXT) AS SELECT * FROM users WHERE email = $1;
EXECUTE dynamic_query('[email protected]');
--=====================================================================================
DO $$
DECLARE
query TEXT;
param TEXT := '[email protected]';
BEGIN
query := 'SELECT * FROM users WHERE email = $1';
EXECUTE query USING param;
END $$;
--=====================================================================================
CREATE OR REPLACE FUNCTION get_client_data(table_name TEXT)
RETURNS TABLE(id INT, value TEXT) AS $$
BEGIN
RETURN QUERY EXECUTE FORMAT('SELECT id, value FROM %I', table_name);
END $$ LANGUAGE plpgsql;
SELECT * FROM get_client_data('data_client_1');
--=====================================================================================
CREATE OR REPLACE FUNCTION get_filtered_data(start_date DATE, end_date DATE, status TEXT)
RETURNS TABLE(id INT, created_at DATE, status TEXT) AS $$
BEGIN
RETURN QUERY EXECUTE FORMAT(
'SELECT id, created_at, status
FROM orders
WHERE created_at BETWEEN %L AND %L
AND status = %L',
start_date, end_date, status
);
END $$ LANGUAGE plpgsql;
SELECT * FROM get_filtered_data('2024-01-01', '2024-12-31', 'active');
--=====================================================================================
CREATE OR REPLACE FUNCTION update_multiple_tables(tables TEXT[], new_value TEXT)
RETURNS VOID AS $$
DECLARE
table_name TEXT;
BEGIN
FOREACH table_name IN ARRAY tables LOOP
EXECUTE FORMAT('UPDATE %I SET value = %L WHERE value IS NULL', table_name, new_value);
END LOOP;
END $$ LANGUAGE plpgsql;
SELECT update_multiple_tables(ARRAY['table1', 'table2'], 'default_value');
--=====================================================================================
CREATE OR REPLACE FUNCTION create_indexes(tables TEXT[], column_name TEXT)
RETURNS VOID AS $$
DECLARE
table_name TEXT;
BEGIN
FOREACH table_name IN ARRAY tables LOOP
EXECUTE FORMAT('CREATE INDEX IF NOT EXISTS idx_%I_%I ON %I (%I)',
table_name, column_name, table_name, column_name);
END LOOP;
END $$ LANGUAGE plpgsql;
--=====================================================================================
--=====================================================================================
--=====================================================================================
--=====================================================================================
--=====================================================================================
--=====================================================================================
--=====================================================================================
--=====================================================================================
--=====================================================================================
--=====================================================================================
--=====================================================================================
--=====================================================================================
--=====================================================================================
--=====================================================================================
--=====================================================================================
WITH store_sales AS (
SELECT store, SUM(amount) AS total_sales
FROM orders
GROUP BY store
), top_stores AS (
SELECT store
FROM store_sales
WHERE total_sales > (SELECT SUM(total_sales)/10 FROM store_sales)
)
SELECT store,
book,
SUM(quantity) AS book_quantity,
SUM(amount) AS book_sales
FROM orders
WHERE store IN (SELECT store FROM top_stores)
GROUP BY store, book;
--=====================================================================================
CREATE TABLE employees (
employee_id serial PRIMARY KEY,
full_name VARCHAR NOT NULL,
manager_id INT
);
INSERT INTO employees (employee_id, full_name, manager_id)
VALUES
(1, 'James Wilson', NULL),
(2, 'Mary Burton', 1),
(3, 'Patricia Robinson', 1),
(4, 'Robert Gray', 1),
(5, 'Elizabeth Tucker', 2),
(6, 'Joseph Lewis', 2),
(7, 'William Ferguson', 2),
(8, 'Linda Black', 3),
(9, 'David Green', 3),
(10, 'Daniel Gray', 5),
(11, 'Mark Armstrong', 4),
(12, 'Donald Carter', 7),
(13, 'Elizabeth Collins', 7),
(14, 'Paul Brown', 8),
(15, 'Andrew Clarke', 8);
WITH RECURSIVE subordinates (employee_id, full_name, manager_id) AS (
SELECT employee_id, manager_id, full_name
FROM employees WHERE employee_id = 2
UNION
SELECT e.employee_id, e.manager_id, e.full_name
FROM employees e
INNER JOIN subordinates s ON s.employee_id = e.manager_id
)
SELECT * FROM subordinates;
--=====================================================================================
WITH RECURSIVE subordinates(employee_id, manager_id, full_name) AS (
SELECT employee_id, manager_id, full_name
FROM employees WHERE employee_id = 2
UNION
SELECT e.employee_id, e.manager_id, e.full_name
FROM employees e
INNER JOIN subordinates s ON s.employee_id = e.manager_id
) SEARCH DEPTH FIRST BY employee_id SET ordercol
SELECT * FROM subordinates ORDER BY ordercol;
--=====================================================================================
WITH RECURSIVE subordinates(employee_id, manager_id, full_name) AS (
SELECT employee_id, manager_id, full_name
FROM employees WHERE employee_id = 2
UNION
SELECT e.employee_id, e.manager_id, e.full_name
FROM employees e
INNER JOIN subordinates s ON s.employee_id = e.manager_id
) SEARCH BREADTH FIRST BY employee_id SET ordercol
SELECT * FROM subordinates ORDER BY ordercol;
--=====================================================================================
WITH RECURSIVE subordinates(employee_id, manager_id, full_name) AS (
SELECT employee_id, manager_id, full_name
FROM employees WHERE employee_id=2
UNION
SELECT e.employee_id, e.manager_id, e.full_name
FROM employees e
INNER JOIN subordinates s ON s.employee_id = e.manager_id
) CYCLE employee_id SET is_cycle USING path
SELECT * FROM subordinates;
--=====================================================================================
WITH employees_data AS MATERIALIZED (
SELECT * FROM employees
)
SELECT full_name FROM employees_data WHERE employee_id = 5;
--=====================================================================================
WITH employees_data AS NOT MATERIALIZED (
SELECT * FROM employees
)
SELECT (
SELECT full_name FROM employees_data WHERE employee_id = 5),
(SELECT full_name FROM employees_data WHERE employee_id = 6);
--=====================================================================================
WITH moved_rows AS (
DELETE FROM employees
WHERE
employee_id = 4 OR
employee_id = 5
RETURNING *
)
INSERT INTO retired_employee
SELECT * FROM moved_rows;
--=====================================================================================
WITH employees_data AS (
UPDATE employees SET manager_id = 1 WHERE employee_id =10
RETURNING *
)
SELECT * FROM employees_data;
--=====================================================================================
WITH RECURSIVE subordinates (employee_id, full_name, manager_id) AS (
SELECT employee_id, manager_id, full_name
FROM employees WHERE employee_id = 2
UNION
SELECT e.employee_id, e.manager_id, e.full_name
FROM employees e
INNER JOIN subordinates s ON s.employee_id = e.manager_id
)
DELETE FROM employees
WHERE employee_id IN (SELECT employee_id FROM subordinates);
--=====================================================================================
--=====================================================================================
--=====================================================================================
--=====================================================================================
--=====================================================================================
--=====================================================================================
--=====================================================================================
--=====================================================================================
--=====================================================================================
WITH RECURSIVE parent_table(space_id, space_name, parent_id) AS (
SELECT id as space_id, name as space_name, parent_id
FROM spaces WHERE id = {id}
UNION
SELECT s.id as space_id, s.name as space_name, s.parent_id
FROM spaces as s
INNER JOIN parent_table p ON p.space_id = s.parent_id
)
SELECT * FROM parent_table
--=====================================================================================
WITH RECURSIVE parent_table(id, name, parent_id, ids, cycle) AS (
SELECT id, name, parent_id, ARRAY[id], false
FROM spaces WHERE id = :val
UNION
SELECT s.id, s.name, s.parent_id, s.id || p.ids, s.id = ANY(p.ids)
FROM spaces as s
INNER JOIN parent_table p ON p.parent_id = s.id
WHERE not cycle
)
SELECT * FROM parent_table
--=====================================================================================
--=====================================================================================
--=====================================================================================
--=====================================================================================
--=====================================================================================
--========================================================================================================
SELECT yr,party, votes,
RANK() OVER (PARTITION BY yr ORDER BY votes DESC) as posn
FROM ge
WHERE constituency = 'S14000021'
ORDER BY party,yr
--========================================================================================================
SELECT party, votes,
RANK() OVER (ORDER BY votes DESC) as posn
FROM ge
WHERE constituency = 'E14000539' AND yr = 2017
ORDER BY votes
--========================================================================================================
SELECT constituency,party, votes
FROM ge
WHERE constituency BETWEEN 'S14000021' AND 'S14000026'
AND yr = 2017
ORDER BY constituency,votes DESC
--========================================================================================================
CREATE TABLE cd.members
(
memid integer NOT NULL,
surname character varying(200) NOT NULL,
firstname character varying(200) NOT NULL,
address character varying(300) NOT NULL,
zipcode integer NOT NULL,
telephone character varying(20) NOT NULL,
recommendedby integer,
joindate timestamp NOT NULL,
CONSTRAINT members_pk PRIMARY KEY (memid),
CONSTRAINT fk_members_recommendedby FOREIGN KEY (recommendedby)
REFERENCES cd.members(memid) ON DELETE SET NULL
);
CREATE TABLE cd.facilities
(
facid integer NOT NULL,
name character varying(100) NOT NULL,
membercost numeric NOT NULL,
guestcost numeric NOT NULL,
initialoutlay numeric NOT NULL,
monthlymaintenance numeric NOT NULL,
CONSTRAINT facilities_pk PRIMARY KEY (facid)
);
CREATE TABLE cd.bookings
(
bookid integer NOT NULL,
facid integer NOT NULL,
memid integer NOT NULL,
starttime timestamp NOT NULL,
slots integer NOT NULL,
CONSTRAINT bookings_pk PRIMARY KEY (bookid),
CONSTRAINT fk_bookings_facid FOREIGN KEY (facid) REFERENCES cd.facilities(facid),
CONSTRAINT fk_bookings_memid FOREIGN KEY (memid) REFERENCES cd.members(memid)
);
--========================================================================================================
WITH tiles (rn, x, y) AS
(
VALUES
(1, 0, 0),
(2, 1, 0),
(3, 2, 0),
(4, 3, 1),
(5, 4, 2),
(6, 4, 3),
(7, 4, 4),
(8, 3, 4),
(9, 2, 4),
(10, 1, 3),
(11, 0, 2),
(12, 0, 1),
(13, 1, 1),
(14, 2, 1),
(15, 3, 2),
(16, 3, 3),
(17, 2, 3),
(18, 1, 2),
(19, 2, 2)
)
SELECT *
FROM tiles
--========================================================================================================
WITH tiles (rn, x, y) AS
(
VALUES
(1, 0, 0),
(2, 1, 0),
(3, 2, 0),
(4, 3, 1),
(5, 4, 2),
(6, 4, 3),
(7, 4, 4),
(8, 3, 4),
(9, 2, 4),
(10, 1, 3),
(11, 0, 2),
(12, 0, 1),
(13, 1, 1),
(14, 2, 1),
(15, 3, 2),
(16, 3, 3),
(17, 2, 3),
(18, 1, 2),
(19, 2, 2)
),
resources AS
(
SELECT '////####^^^' || CHR(34) || CHR(34) || CHR(34) || CHR(34) || '... '::TEXT tile_resources
)
SELECT *
FROM tiles
JOIN (
SELECT ROW_NUMBER() OVER (ORDER BY RANDOM()) rn,
resource
FROM resources
CROSS JOIN
LATERAL
REGEXP_SPLIT_TO_TABLE(tile_resources, '') q (resource)
) tr
USING (rn)
--========================================================================================================
SELECT SETSEED(0.201703);
WITH RECURSIVE
resources AS
(
SELECT '////####^^^' || CHR(34) || CHR(34) || CHR(34) || CHR(34) || '... '::TEXT tile_resources
),
tiles (rn, x, y) AS
(
VALUES
(1, 0, 0),
(2, 1, 0),
(3, 2, 0),
(4, 3, 1),
(5, 4, 2),
(6, 4, 3),
(7, 4, 4),
(8, 3, 4),
(9, 2, 4),
(10, 1, 3),
(11, 0, 2),
(12, 0, 1),
(13, 1, 1),
(14, 2, 1),
(15, 3, 2),
(16, 3, 3),
(17, 2, 3),
(18, 1, 2),
(19, 2, 2)
),
layout AS
(
SELECT *,
CASE resource
WHEN ' ' THEN
NULL
ELSE
rn + SUM(CASE resource WHEN ' ' THEN -1 ELSE 0 END) OVER (ORDER BY rn)
END score_rn
FROM tiles
JOIN (
SELECT ROW_NUMBER() OVER (ORDER BY RANDOM()) rn,
resource
FROM resources
CROSS JOIN
LATERAL
REGEXP_SPLIT_TO_TABLE(tile_resources, '') q (resource)
) tr
USING (rn)
),
score AS
(
SELECT 1 attempt,
ARRAY_AGG(s ORDER BY RANDOM()) score_array,
NULL::BIGINT desert
FROM generate_series(2, 12) s
CROSS JOIN
generate_series(1, 2) r
WHERE s <> 7
AND NOT (r = 2 AND s IN (2, 12))
UNION ALL
SELECT attempt + 1 attempt,
sa.score_array,
(
SELECT rn
FROM layout
WHERE score_rn IS NULL
) desert
FROM (
SELECT *
FROM score
WHERE EXISTS
(
SELECT NULL
FROM (
SELECT *
FROM UNNEST(score_array) WITH ORDINALITY q(s1, score_rn)
JOIN layout
USING (score_rn)
) sc1
JOIN (
SELECT *
FROM UNNEST(score_array) WITH ORDINALITY q(s2, score_rn)
JOIN layout
USING (score_rn)
) sc2
ON s1 IN (6, 8)
AND s2 IN (6, 8)
AND ((sc1.x - sc2.x), (sc1.y - sc2.y)) IN ((-1, -1), (-1, 0), (0, -1), (0, 1), (1, 0), (1, 1))
)
) s
CROSS JOIN
LATERAL
(
SELECT ARRAY_AGG(score ORDER BY RANDOM()) score_array
FROM UNNEST(score_array) WITH ORDINALITY q(score, score_rn)
) sa
),
score_good AS
(
SELECT score, score_rn
FROM (
SELECT *
FROM score
ORDER BY
attempt DESC
LIMIT 1
) s
CROSS JOIN
LATERAL
UNNEST(score_array) WITH ORDINALITY q (score, score_rn)
)
SELECT *
FROM score_good
--========================================================================================================
SELECT SETSEED(0.201703);
WITH RECURSIVE
resources AS
(
SELECT '////####^^^' || CHR(34) || CHR(34) || CHR(34) || CHR(34) || '... '::TEXT tile_resources
),
tiles (rn, x, y) AS
(
VALUES
(1, 0, 0),
(2, 1, 0),
(3, 2, 0),
(4, 3, 1),
(5, 4, 2),
(6, 4, 3),
(7, 4, 4),
(8, 3, 4),
(9, 2, 4),
(10, 1, 3),
(11, 0, 2),
(12, 0, 1),
(13, 1, 1),
(14, 2, 1),
(15, 3, 2),
(16, 3, 3),
(17, 2, 3),
(18, 1, 2),
(19, 2, 2)
),
layout AS
(
SELECT *,
CASE resource
WHEN ' ' THEN
NULL
ELSE
rn + SUM(CASE resource WHEN ' ' THEN -1 ELSE 0 END) OVER (ORDER BY rn)
END score_rn
FROM tiles
JOIN (
SELECT ROW_NUMBER() OVER (ORDER BY RANDOM()) rn,
resource
FROM resources
CROSS JOIN
LATERAL
REGEXP_SPLIT_TO_TABLE(tile_resources, '') q (resource)
) tr
USING (rn)
),
score AS
(
SELECT 1 attempt,
ARRAY_AGG(s ORDER BY RANDOM()) score_array,
NULL::BIGINT desert
FROM generate_series(2, 12) s
CROSS JOIN
generate_series(1, 2) r
WHERE s <> 7
AND NOT (r = 2 AND s IN (2, 12))
UNION ALL
SELECT attempt + 1 attempt,
sa.score_array,
(
SELECT rn
FROM layout
WHERE score_rn IS NULL
) desert
FROM (
SELECT *
FROM score
WHERE EXISTS
(
SELECT NULL
FROM (
SELECT *
FROM UNNEST(score_array) WITH ORDINALITY q(s1, score_rn)
JOIN layout
USING (score_rn)
) sc1
JOIN (
SELECT *
FROM UNNEST(score_array) WITH ORDINALITY q(s2, score_rn)
JOIN layout
USING (score_rn)
) sc2
ON s1 IN (6, 8)
AND s2 IN (6, 8)
AND ((sc1.x - sc2.x), (sc1.y - sc2.y)) IN ((-1, -1), (-1, 0), (0, -1), (0, 1), (1, 0), (1, 1))
)
) s
CROSS JOIN
LATERAL
(
SELECT ARRAY_AGG(score ORDER BY RANDOM()) score_array
FROM UNNEST(s.score_array) WITH ORDINALITY q(score, score_rn)
) sa
),
score_good AS
(
SELECT score, score_rn, attempt
FROM (
SELECT *
FROM score
ORDER BY
attempt DESC
LIMIT 1
) s
CROSS JOIN
LATERAL
UNNEST(score_array) WITH ORDINALITY q (score, score_rn)
)
SELECT *
FROM layout
LEFT JOIN
score_good
USING (score_rn)
ORDER BY
rn;
--========================================================================================================
WITH RECURSIVE
resources AS
(
SELECT '////####^^^' || CHR(34) || CHR(34) || CHR(34) || CHR(34) || '... '::TEXT tile_resources,
'/#^' || CHR(34) || '.????'::TEXT harbor_resources
),
harbors (rn, x, y, pier1, pier2) AS
(
VALUES
(1, -1, -1, 0, 1),
(2, 1, -1, 1, 2),
(3, 3, 0, 1, 2),
(4, 5, 2, 2, 3),
(5, 5, 4, 3, 4),
(6, 4, 5, 3, 4),
(7, 2, 5, 4, 5),
(8, 0, 3, 5, 0),
(9, -1, 1, 5, 0)
),
harbor_resources AS
(
SELECT '/#>".????'::TEXT harbor_resources
)
SELECT resource, rn, x, y, pier1, pier2
FROM harbors
CROSS JOIN
resources
JOIN LATERAL
(
SELECT resource, ROW_NUMBER() OVER (ORDER BY RANDOM()) rn
FROM REGEXP_SPLIT_TO_TABLE(harbor_resources, '') q (resource)
) q
USING (rn)
ORDER BY
RANDOM()
--========================================================================================================
SELECT SETSEED(0.201704);
WITH RECURSIVE
resources AS
(
SELECT '////####^^^' || CHR(34) || CHR(34) || CHR(34) || CHR(34) || '... '::TEXT tile_resources,
'/#^' || CHR(34) || '.????'::TEXT harbor_resources
),
tiles (rn, x, y) AS
(
VALUES
(1, 0, 0),
(2, 1, 0),
(3, 2, 0),
(4, 3, 1),
(5, 4, 2),
(6, 4, 3),
(7, 4, 4),
(8, 3, 4),
(9, 2, 4),
(10, 1, 3),
(11, 0, 2),
(12, 0, 1),
(13, 1, 1),
(14, 2, 1),
(15, 3, 2),
(16, 3, 3),
(17, 2, 3),
(18, 1, 2),
(19, 2, 2)
),
harbors (rn, x, y, pier1, pier2) AS
(
VALUES
(1, -1, -1, 0, 1),
(2, 1, -1, 1, 2),
(3, 3, 0, 1, 2),
(4, 5, 2, 2, 3),
(5, 5, 4, 3, 4),
(6, 4, 5, 3, 4),
(7, 2, 5, 4, 5),
(8, 0, 3, 5, 0),
(9, -1, 1, 5, 0)
),
score AS
(
SELECT 1 attempt,
ARRAY_AGG(s ORDER BY RANDOM()) score_array
FROM generate_series(2, 12) s
CROSS JOIN
generate_series(1, 2) r
WHERE s <> 7
AND NOT (r = 2 AND s IN (2, 12))
UNION ALL
SELECT attempt + 1 attempt,
sa.score_array
FROM (
SELECT *
FROM score
WHERE EXISTS
(
SELECT NULL
FROM (
SELECT *
FROM UNNEST(score_array) WITH ORDINALITY q(s1, rn)
JOIN tiles
USING (rn)
) sc1
JOIN (
SELECT *
FROM UNNEST(score_array) WITH ORDINALITY q(s2, rn)
JOIN tiles t
USING (rn)
) sc2
ON s1 IN (6, 8)
AND s2 IN (6, 8)
AND ((sc1.x - sc2.x), (sc1.y - sc2.y)) IN ((-1, -1), (-1, 0), (0, -1), (0, 1), (1, 0), (1, 1))
)
) s
CROSS JOIN
LATERAL
(
SELECT ARRAY_AGG(score ORDER BY RANDOM()) score_array
FROM UNNEST(score_array) WITH ORDINALITY q(score, score_rn)
) sa
),
score_good AS
(
SELECT score, score_rn
FROM (
SELECT *
FROM score
ORDER BY
attempt DESC
LIMIT 1
) s
CROSS JOIN
LATERAL
UNNEST(score_array) WITH ORDINALITY q (score, score_rn)
),
layout AS
(
SELECT *
FROM (
SELECT *,
CASE resource
WHEN ' ' THEN
NULL
ELSE
rn + SUM(CASE resource WHEN ' ' THEN -1 ELSE 0 END) OVER (ORDER BY rn)
END score_rn
FROM tiles
JOIN (
SELECT ROW_NUMBER() OVER (ORDER BY RANDOM()) rn,
resource
FROM resources
CROSS JOIN
LATERAL
REGEXP_SPLIT_TO_TABLE(tile_resources, '') q (resource)
) tr
USING (rn)
) t
LEFT JOIN
score_good
USING (score_rn)
ORDER BY
rn
)
SELECT row
FROM (
SELECT r,
STRING_AGG(COALESCE(letter, ' '), '' ORDER BY c) AS row
FROM generate_series(0, 70) r
CROSS JOIN
generate_series(0, 89) c
LEFT JOIN
(
SELECT *
FROM (
SELECT *,
ROW_NUMBER() OVER (PARTITION BY r, c ORDER BY layer DESC) rn
FROM (
SELECT 10 height,
16 width
) d
CROSS JOIN
LATERAL
(
SELECT letter, r, c, layer
FROM layout
CROSS JOIN
LATERAL
(
SELECT height * x + 15 center_r,
width * y - (width / 2)::INT * x + 24 center_c
) c
CROSS JOIN
LATERAL
(
SELECT *
FROM (
SELECT 1 layer, resource letter, center_r + rs r, center_c + cs c
FROM (
SELECT height * 1.5 * 0.8 th, width * 0.9 tw
) t
CROSS JOIN
generate_series(-(th / 2)::INT, (th / 2)::INT) rs
CROSS JOIN
generate_series(-(tw / 2)::INT, (tw / 2)::INT ) cs
CROSS JOIN
LATERAL
(
SELECT rs::FLOAT / th rsf, cs::FLOAT / tw csf
) f
WHERE rsf BETWEEN -0.25 AND 0.25
OR
ABS(csf) BETWEEN 0 AND 1 - ABS(rsf * 2)
UNION ALL
SELECT 2 layer, ' ', center_r + rs r, center_c + cs c
FROM (
SELECT height * 1.5 * 0.35 th, width * 0.35 tw
) t
CROSS JOIN
generate_series(-(th / 2)::INT, (th / 2)::INT) rs
CROSS JOIN
generate_series(-(tw / 2)::INT, (tw / 2)::INT ) cs
CROSS JOIN
LATERAL
(
SELECT rs::FLOAT / th rsf, cs::FLOAT / tw csf
) f
WHERE rsf BETWEEN -0.25 AND 0.25
OR
ABS(csf) BETWEEN 0 AND 1 - ABS(rsf * 2)
UNION ALL
SELECT 3 layer, score_letter letter, center_r r, center_c + pos - 1 c
FROM REGEXP_SPLIT_TO_TABLE(score::TEXT, '') WITH ORDINALITY l(score_letter, pos)
) q
) q2
UNION ALL
SELECT letter, r, c, 4 layer
FROM harbors
JOIN LATERAL
(
SELECT resource, ROW_NUMBER() OVER (ORDER BY RANDOM()) rn
FROM resources
CROSS JOIN
LATERAL
REGEXP_SPLIT_TO_TABLE(harbor_resources, '') q (resource)
) q2
USING (rn)
CROSS JOIN
LATERAL
(
SELECT height * x + 15 center_r,
width * y - (width / 2)::INT * x + 25 center_c
) c
CROSS JOIN
LATERAL
(
SELECT resource letter, center_r r, center_c c
UNION ALL
SELECT letter, r, c
FROM (
SELECT pier1
UNION ALL
SELECT pier2
) p (pier)
CROSS JOIN
LATERAL
(
SELECT SUBSTRING('|\/|\/', (pier + 1), 1) letter,
center_r + ((ARRAY[0.4, 0.2, -0.2, -0.4, -0.2, 0.2])[pier + 1] * height * 1.5 * 0.8)::INT r,
center_c + ((ARRAY[0, 0.3, 0.3, 0, -0.3, -0.3])[pier + 1] * width * 0.9)::INT c
) pl
) p2
) q3
) l
WHERE rn = 1
) t
USING (r, c)
GROUP BY
r
) q
ORDER BY
r
--========================================================================================================
--========================================================================================================
--========================================================================================================
--========================================================================================================
--========================================================================================================
--========================================================================================================
--========================================================================================================
--========================================================================================================
select employee_id,
case when employee_id % 2 = 1 and substring(name from 1 for 1) != 'M'
then salary
else 0
end as bonus
from employees
order by employee_id;
--===============================================================================
DROP TABLE IF EXISTS parted,plain CASCADE;
CREATE TEMP TABLE parted (x integer, y integer, payload text)
PARTITION BY HASH (payload);
CREATE TEMP TABLE parted_p1 PARTITION OF parted
FOR VALUES WITH (MODULUS 2, REMAINDER 0);
CREATE TEMP TABLE parted_p2 PARTITION OF parted
FOR VALUES WITH (MODULUS 2, REMAINDER 1);
INSERT INTO parted (x,y,payload)
SELECT (random()*600)::integer, (random()*600)::integer, md5((gs%500)::text)
FROM generate_series(1,1E5) AS gs;
CREATE TEMP TABLE plain (x numeric, y numeric, payload text);
INSERT INTO plain (x,y,payload) SELECT x,y,payload FROM parted;
CREATE INDEX ON parted(payload);
CREATE INDEX ON plain(payload);
VACUUM ANALYZE;
VACUUM ANALYZE parted;
--===============================================================================
EXPLAIN (COSTS OFF)
SELECT * FROM plain p1 JOIN plain p2 USING (payload) LIMIT 100;
EXPLAIN (COSTS OFF)
SELECT * FROM parted p1 JOIN parted p2 USING (payload) LIMIT 100;
--===============================================================================
SET enable_partitionwise_join = 'true';
EXPLAIN (COSTS OFF)
SELECT * FROM parted p1 JOIN parted p2 USING (payload) LIMIT 100;
--===============================================================================
EXPLAIN (COSTS OFF)
SELECT * FROM parted p1 JOIN parted p2 USING (payload,y)
ORDER BY payload,y LIMIT 100;
--===============================================================================
EXPLAIN (COSTS OFF)
SELECT * FROM parted p1 JOIN parted p2 USING (payload,y)
ORDER BY payload,y LIMIT 1;
--===============================================================================
--===============================================================================
--===============================================================================
--===============================================================================
--===============================================================================
--===============================================================================
--===============================================================================
--===============================================================================
--===============================================================================
--===============================================================================
--===============================================================================
--==============================================================================================
SELECT query, projections FROM system.query_log WHERE query_id='<query_id>'
--==============================================================================================
CREATE TABLE user_events (
timestamp DateTime,
user_session String,
page Enum('/product' = 1, '/about' = 2, '/provider' = 3, '/question' = 4, '/' =5),
event_type Enum('click' = 1, 'download' = 2, 'submit' = 3, 'scroll' = 4)
) ENGINE = MergeTree()
ORDER BY event_type;
--==============================================================================================
ALTER TABLE user_events ADD PROJECTION event_type_projection (
SELECT
*
ORDER BY event_type
);
ALTER TABLE user_events MATERIALIZE PROJECTION event_type_projection;
--==============================================================================================
ALTER TABLE user_events ADD PROJECTION page_projection (
SELECT
*
ORDER BY page
);
ALTER TABLE user_events MATERIALIZE PROJECTION page_projection;
--==============================================================================================
INSERT INTO user_events
SELECT
now() - INTERVAL rand() % 10000 SECOND AS timestamp,
concat('user_session_', toString(rand() % 10 + 1)) AS session,
arrayElement(['/product', '/about', '/provider', '/question', '/'], rand() % 5 + 1) AS page,
arrayElement(['click', 'download', 'submit', 'scroll'], rand() % 4 + 1) AS event_type
FROM numbers(100);
--==============================================================================================
SELECT
event_type AS "Тип события",
count() AS "Количество событий"
FROM user_events
GROUP BY event_type
FORMAT Pretty;
SELECT
page AS "Страница сайта",
count() AS "Количество событий"
FROM user_events
GROUP BY page
FORMAT Pretty;
--==============================================================================================
https://fiddle.clickhouse.com/d51161c2-d7be-4849-861a-06476ac29758
--==============================================================================================
--==============================================================================================
--==============================================================================================
CREATE INDEX idx_orders_date_brin ON orders
USING BRIN(order_date)
WITH (pages_per_range = 32);
--===================================================================
CREATE TABLE orders (
id BIGSERIAL PRIMARY KEY,
order_date DATE NOT NULL,
customer_id BIGINT NOT NULL,
total_amount NUMERIC(10, 2) NOT NULL
);
--===================================================================
INSERT INTO orders (order_date, customer_id, total_amount)
SELECT
(DATE '2023-01-01' + (RANDOM()*365)::INT),
(RANDOM()*1000000)::BIGINT,
(RANDOM()*1000)::NUMERIC(10,2)
FROM generate_series(1,10000000) g;
--===================================================================
CREATE INDEX idx_orders_date_brin ON orders
USING BRIN(order_date date_minmax_ops)
WITH (pages_per_range = 64);
--===================================================================
EXPLAIN ANALYZE
SELECT * FROM orders
WHERE order_date BETWEEN '2023-06-01' AND '2023-06-15';
--===================================================================
SELECT brin_summarize_new_values('orders');
--===================================================================
--===================================================================
--===================================================================
--===================================================================
--===================================================================
--===================================================================
--===================================================================
--===================================================================
--===================================================================
--===================================================================
--===================================================================
--===================================================================
--===================================================================
---
SELECT COUNT(status)
FROM company
GROUP BY status
HAVING status LIKE 'closed';
---
SELECT funding_total
FROM company
WHERE category_code LIKE 'news'
AND country_code LIKE 'USA'
ORDER BY funding_total DESC;
---
SELECT SUM(price_amount)
FROM acquisition
WHERE term_code LIKE 'cash'
AND EXTRACT(YEAR from acquired_at) BETWEEN 2011 AND 2013
GROUP BY term_code;
---
SELECT first_name,
last_name,
twitter_username
FROM people
WHERE twitter_username LIKE 'Silver%';
---
SELECT *
FROM people
WHERE twitter_username LIKE '%money%'
AND last_name LIKE 'K%';
---
SELECT country_code,
SUM(funding_total)
FROM company
GROUP BY country_code
ORDER BY SUM(funding_total) DESC;
---
SELECT funded_at,
MIN(raised_amount),
MAX(raised_amount)
FROM funding_round
GROUP BY funded_at
HAVING MIN(raised_amount) NOT IN (0, MAX(raised_amount));
---
SELECT *,
CASE
WHEN invested_companies >= 100 THEN 'high_activity'
WHEN invested_companies BETWEEN 20 AND 99 THEN 'middle_activity'
ELSE 'low_activity'
END
FROM fund;
---
SELECT CASE
WHEN invested_companies>=100 THEN 'high_activity'
WHEN invested_companies>=20 THEN 'middle_activity'
ELSE 'low_activity'
END AS activity,
ROUND(AVG(investment_rounds))
FROM fund
GROUP BY activity
ORDER BY ROUND(AVG(investment_rounds));
---
SELECT country_code,
MIN(invested_companies),
MAX(invested_companies),
AVG(invested_companies)
FROM (SELECT *
FROM fund
WHERE EXTRACT(YEAR FROM founded_at) BETWEEN 2010 AND 2012) AS f
GROUP BY country_code
HAVING MIN(invested_companies) > 0
ORDER BY AVG(invested_companies) DESC
LIMIT 10;
---
SELECT p.first_name,
p.last_name,
e.instituition
FROM people AS p
LEFT JOIN education AS e ON p.id = e.person_id;
---
SELECT c.name,
COUNT(DISTINCT tab2.instituition)
FROM company AS c
LEFT JOIN
(SELECT tab1.instituition,
p.company_id
FROM
(SELECT person_id,
instituition
FROM education
WHERE instituition IS NOT NULL
) AS tab1
INNER JOIN people AS p ON p.id = tab1.person_id
) AS tab2 ON tab2.company_id = c.id
GROUP BY c.name
ORDER BY COUNT(DISTINCT tab2.instituition) DESC
LIMIT 5;
---
SELECT DISTINCT name
FROM company
WHERE status LIKE 'closed'
AND id IN (SELECT company_id
FROM funding_round
WHERE is_first_round = 1
AND is_last_round = 1);
---
SELECT DISTINCT p.id
FROM company AS c
INNER JOIN people AS p ON c.id = p.company_id
WHERE c.status LIKE 'closed'
AND c.id IN (SELECT company_id
FROM funding_round
WHERE is_first_round = 1
AND is_last_round = 1);
---
SELECT DISTINCT p.id,
e.instituition
FROM company AS c
INNER JOIN people AS p ON c.id = p.company_id
LEFT JOIN education AS e ON p.id = e.person_id
WHERE c.status LIKE 'closed'
AND c.id IN (SELECT company_id
FROM funding_round
WHERE is_first_round = 1
AND is_last_round = 1)
AND e.instituition IS NOT NULL;
---
SELECT DISTINCT p.id,
COUNT(e.instituition)
FROM company AS c
INNER JOIN people AS p ON c.id = p.company_id
LEFT JOIN education AS e ON p.id = e.person_id
WHERE c.status LIKE 'closed'
AND c.id IN (SELECT company_id
FROM funding_round
WHERE is_first_round = 1
AND is_last_round = 1)
AND e.instituition IS NOT NULL
GROUP BY p.id;
---
SELECT AVG(tab1.count_in)
FROM (SELECT DISTINCT p.id,
COUNT(e.instituition) AS count_in
FROM company AS c
INNER JOIN people AS p ON c.id = p.company_id
LEFT JOIN education AS e ON p.id = e.person_id
WHERE c.status LIKE 'closed'
AND c.id IN (SELECT company_id
FROM funding_round
WHERE is_first_round = 1
AND is_last_round = 1)
AND e.instituition IS NOT NULL
GROUP BY p.id) AS tab1;
---
SELECT AVG(tab1.count_in)
FROM (SELECT DISTINCT p.id,
COUNT(e.instituition) AS count_in
FROM company AS c
INNER JOIN people AS p ON c.id = p.company_id
LEFT JOIN education AS e ON p.id = e.person_id
WHERE c.name LIKE 'Facebook'
AND e.instituition IS NOT NULL
GROUP BY p.id
) AS tab1;
---
SELECT f.name AS name_of_fund,
C.name AS name_of_company,
fr.raised_amount AS amount
FROM investment AS i
JOIN company AS c ON i.company_id=c.id
JOIN fund AS f ON i.fund_id=f.id
JOIN funding_round AS fr ON i.funding_round_id = fr.id
WHERE EXTRACT(YEAR FROM fr.funded_at) BETWEEN 2012 AND 2013
AND c.milestones > 6;
---
SELECT company.name AS acquiring_company,
tab2.price_amount,
tab2.acquired_company,
tab2.funding_total,
ROUND(tab2.price_amount / tab2.funding_total)
FROM
(
SELECT c.name AS acquired_company,
c.funding_total,
tab1.acquiring_company_id,
tab1.price_amount
FROM company AS c
RIGHT JOIN (
SELECT acquiring_company_id,
acquired_company_id,
price_amount
FROM acquisition
WHERE price_amount > 0
) AS tab1 ON c.id = tab1.acquired_company_id
) AS tab2 LEFT JOIN company ON company.id = tab2.acquiring_company_id
WHERE tab2.funding_total > 0
ORDER BY tab2.price_amount DESC, tab2.acquired_company
LIMIT 10;
---
SELECT c.name,
tab1.month
FROM company AS c
RIGHT JOIN (
SELECT company_id,
EXTRACT(MONTH FROM funded_at) AS month
FROM funding_round
WHERE EXTRACT(YEAR FROM funded_at) BETWEEN 2010 AND 2013
) AS tab1 ON c.id = tab1.company_id
WHERE c.category_code LIKE 'social';
---
WITH
-- выбираем месяц инвестиционных раундов в 2010-2013 гг
tab1 AS (SELECT EXTRACT(MONTH FROM funded_at) AS month,
id AS funding_round_id
FROM funding_round
WHERE EXTRACT(YEAR FROM funded_at) BETWEEN 2010 AND 2013
),
-- считаем кол-во купленных и общую сумму по сделкам за 2010-2013 гг в разрезе месяца
tab2 AS (SELECT EXTRACT(MONTH FROM acquired_at) AS month,
COUNT(acquired_company_id) AS count_acquired,
SUM(price_amount) AS total_amount
FROM acquisition
WHERE EXTRACT(YEAR FROM acquired_at) BETWEEN 2010 AND 2013
GROUP BY EXTRACT(MONTH FROM acquired_at)
),
-- ищем фонды из США
tab3 AS (SELECT i.funding_round_id,
f.name
FROM investment AS i
JOIN fund AS f ON f.id = i.fund_id
WHERE fund_id IN (SELECT id
FROM fund
WHERE country_code LIKE 'USA')
),
tab4 AS (SELECT month,
COUNT(DISTINCT name) AS count_USA
FROM tab1
LEFT JOIN tab3 ON tab1.funding_round_id = tab3.funding_round_id
GROUP BY month)
SELECT tab4.month,
tab4.count_USA,
tab2.count_acquired,
tab2.total_amount
FROM tab4
LEFT JOIN tab2 ON tab4.month = tab2.month;
---
WITH
total_11 AS (SELECT AVG(funding_total) AS total_2011,
country_code
FROM company
GROUP BY country_code,
EXTRACT(YEAR FROM founded_at)
HAVING EXTRACT(YEAR FROM founded_at) = 2011),
total_12 AS (SELECT AVG(funding_total) AS total_2012,
country_code
FROM company
GROUP BY country_code,
EXTRACT(YEAR FROM founded_at)
HAVING EXTRACT(YEAR FROM founded_at) = 2012),
total_13 AS (SELECT AVG(funding_total) AS total_2013,
country_code
FROM company
GROUP BY country_code,
EXTRACT(YEAR FROM founded_at)
HAVING EXTRACT(YEAR FROM founded_at) = 2013)
SELECT total_11.country_code,
total_11.total_2011,
total_12.total_2012,
total_13.total_2013
FROM total_11
INNER JOIN total_12 ON total_11.country_code = total_12.country_code
INNER JOIN total_13 ON total_11.country_code = total_13.country_code
ORDER BY total_11.total_2011 DESC;
--==========================================================================================
--mysql
SELECT
c.table_name as "Таблица",
t.table_comment as "Комментарий к таблице",
c.ordinal_position as "№ п.п",
c.column_name as "Поле",
c.column_comment as "Комментарий к полю",
c.column_type as "Тип",
CASE WHEN c.column_key = 'PRI' THEN 'PK' END as "Ключ",
c.is_nullable as "NULL"
FROM
information_schema.columns c
JOIN information_schema.tables t ON c.table_name = t.table_name
AND c.table_schema = t.table_schema
WHERE
t.table_type = 'BASE TABLE'
AND t.table_schema = '<имя схемы, если надо>'
ORDER BY
c.table_name,
c.ordinal_position;
--==========================================================================================
-- postgresql
SELECT
c.relname as "Таблица",
(
SELECT
td.description
FROM
pg_catalog.pg_description td
WHERE
td.objoid = a.attrelid
AND td.objsubid = 0
) as "Комментарий к таблице",
a.attnum as "№ п/п",
a.attname as "Поле",
ad.description as "Комментарий к полю",
pt.typname as "Тип",
CASE WHEN a.atttypmod = -1 THEN NULL ELSE a.atttypmod END "Размер",
a.attnotnull as "NULL",
CASE WHEN a.attnum IN(
SELECT
UNNEST(cn.conkey)
FROM
pg_catalog.pg_constraint cn
WHERE
cn.conrelid = a.attrelid
AND cn.contype LIKE 'p'
) THEN 'PK' END as "Ключ" FROM
pg_catalog.pg_attribute a
JOIN pg_catalog.pg_type pt ON a.atttypid = pt.oid
JOIN pg_catalog.pg_class c ON a.attrelid = c.oid
JOIN pg_catalog.pg_namespace n ON c.relnamespace = n.oid
LEFT JOIN pg_catalog.pg_description ad ON ad.objoid = a.attrelid
AND ad.objsubid = a.attnum
WHERE
a.attnum > 0 AND n.nspname = 'public' and c.reltype <> 0
ORDER BY
c.relname,
a.attnum;
--==========================================================================================
-- oracle
SELECT
cl.owner as "Владелец",
cl.table_name as "Таблица",
tc.comments as "Комментарий к таблице",
cl.internal_column_id as "№ п/п",
cl.column_name as "Поле",
cl.data_type as "Тип",
cl.data_length as "Размер",
cl.data_precision as "Точность",
cl.nullable as "NULL",
cm.comments as "Комментарий к полю"
FROM
sys.all_tab_cols cl
JOIN sys.all_col_comments cm ON cl.owner = cm.owner
AND cl.table_name = cm.table_name
AND cl.column_name = cm.column_name
JOIN sys.all_tab_comments tc ON cl.owner = tc.owner
AND cl.table_name = tc.table_name
WHERE
cl.owner = <имя схемы, если надо>
ORDER BY cl.table_name,
cl.internal_column_id;
SELECT
cl.table_name as "Таблица",
tc.comments as "Комментарий к таблице",
cl.internal_column_id as "№ п/п",
cl.column_name as "Поле",
cl.data_type as "Тип",
cl.data_length as "Размер",
cl.data_precision as "Точность",
cl.nullable as "NULL",
cm.comments as "Комментарий к полю"
FROM
sys.user_tab_cols cl
JOIN sys.user_col_comments cm ON cl.table_name = cm.table_name
AND cl.column_name = cm.column_name
JOIN sys.user_tab_comments tc ON cl.table_name = tc.table_name
ORDER BY
cl.table_name,
cl.internal_column_id
--==========================================================================================
-- MS Access
SELECT
*
FROM
MSysObjects o
WHERE
o.Flags = 0 AND o.Type = 1 ORDER BY
o.NAME;
--==========================================================================================
-- SQLite
SELECT
*
FROM
sqlite_master m
WHERE
m.type = 'table';
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--==========================================================================================
--============================================
LOAD 'auto_explain';
SET auto_explain.log_analyze = 'on';
SET auto_explain.log_buffers = 'on';
SET auto_explain.log_min_duration = 0;
SET auto_explain.log_nested_statements = 'on';
SET auto_explain.log_timing = 'on';
SET auto_explain.log_triggers = 'on';
--============================================
SELECT i FROM generate_series(1, 1e6) i
SELECT i::integer FROM generate_series(1, 1e6) i
SELECT sum(i) FROM generate_series(1, 1e6) i
--============================================
SELECT sum(qty) FROM doc_item WHERE doc_id IN (
SELECT id FROM doc WHERE owner = 'Alice'
);
--============================================
--============================================
--============================================
--============================================
--============================================
--============================================
--============================================
--============================================
SELECT query, calls,
round(total_exec_time::numeric, 2) AS total_time,
round(mean_exec_time::numeric, 2) AS mean_time,
round((100 * total_exec_time / sum(total_exec_time) OVER ())::numeric, 2) AS percentage
FROM pg_stat_statements
ORDER BY total_exec_time DESC
LIMIT 10;
--============================================================
WITH jsd AS (
SELECT $$[
{"id" : 1, "Номер" : 101, "Дата" : "2023-11-01", "Сумма" : 123.45, "Флаги" : [true,false,null]}
, {"id" : 2, "Номер" : 202, "Дата" : "2023-11-02", "Сумма" : 321.54, "Флаги" : [false,null,true]}
, {"id" : 3, "Номер" : 303, "Дата" : "2023-11-03", "Сумма" : 100.00, "Флаги" : [null,true,false]}
]$$::jsonb
)
SELECT
*
FROM
jsonb_to_recordset((TABLE jsd)) T(
id
integer
, "Номер"
integer
, "Дата"
date
, "Сумма"
numeric(32,2)
, "Флаги"
boolean[]
);
--============================================================
--============================================================
--============================================================
--============================================================
--============================================================
--============================================================
--============================================================
--============================================================
--============================================================
CREATE TABLE Managers (
manager_id INT PRIMARY KEY,
name VARCHAR(255),
grade VARCHAR(50),
city VARCHAR(50)
);
INSERT INTO Managers (manager_id, name, grade, city)
VALUES
(1, 'John Doe', 'Senior', 'Париж'),
(2, 'Jane Smith', 'Middle', 'Лондон'),
(3, 'James Brown', 'Junior', 'Берлин'),
(4, 'Chris Wilson', 'Middle', 'Мадрид'),
(5, 'Russell White', 'Junior', 'Мадрид'),
(6, 'Caitlin Maxwell', 'Senior', 'Мадрид'),
(7, 'Robert Morales', 'Senior', 'Рим'),
(8, 'Michael Walter', 'Middle', 'Мадрид'),
(9, 'Travis Smith', 'Junior', 'Мадрид'),
(10, 'Abigail Collins', 'Middle', 'Лондон'),
(11, 'Victor Arnold', 'Junior', 'Берлин'),
(12, 'Emma Hayes', 'Senior', 'Париж'),
(13, 'Diana Long', 'Middle', 'Рим'),
(14, 'Marcus Robinson', 'Junior', 'Берлин'),
(15, 'Charlotte Murray', 'Senior', 'Париж'),
(16, 'Ryan Washington', 'Junior', 'Мадрид'),
(17, 'Megan Austin', 'Middle', 'Рим'),
(18, 'Evelyn Edwards', 'Middle', 'Берлин'),
(19, 'Katherine Ray', 'Senior', 'Лондон'),
(20, 'Sarah Ward', 'Junior', 'Париж'),
(21, 'Alexandra Spencer', 'Middle', 'Берлин'),
(22, 'Timothy Hoffman', 'Junior', 'Лондон'),
(23, 'Eric Soto', 'Senior', 'Мадрид'),
(24, 'Brenda Beck', 'Junior', 'Париж'),
(25, 'Sophia Schneider', 'Middle', 'Лондон'),
(26, 'Bobby Fox', 'Senior', 'Берлин'),
(27, 'Crystal Warren', 'Junior', 'Рим'),
(28, 'Adam Tucker', 'Middle', 'Мадрид'),
(29, 'Denise Lowe', 'Senior', 'Рим'),
(30, 'Matthew Russell', 'Junior', 'Лондон'),
(31, 'Samantha Murphy', 'Middle', 'Берлин'),
(32, 'Diana Hopkins', 'Junior', 'Париж'),
(33, 'Jack Anderson', 'Senior', 'Мадрид'),
(34, 'Heather Carroll', 'Junior', 'Берлин'),
(35, 'David Gutierrez', 'Middle', 'Рим'),
(36, 'Julie Graham', 'Senior', 'Париж'),
(37, 'Anthony Hernandez', 'Junior', 'Мадрид'),
(38, 'Melanie Webb', 'Middle', 'Лондон'),
(39, 'Christina Burton', 'Senior', 'Рим'),
(40, 'Harold Perry', 'Junior', 'Мадрид'),
(41, 'Jesse Robertson', 'Middle', 'Лондон'),
(42, 'Gloria Duncan', 'Junior', 'Берлин'),
(43, 'Bruce Daniels', 'Senior', 'Париж'),
(44, 'Rebecca Watson', 'Middle', 'Берлин'),
(45, 'Zachary Knight', 'Junior', 'Рим'),
(46, 'Nathan Grant', 'Senior', 'Париж'),
(47, 'Julia Jenkins', 'Middle', 'Мадрид'),
(48, 'Sara Lane', 'Senior', 'Берлин'),
(49, 'Christina Kelly', 'Junior', 'Париж'),
(50, 'Chris Wilson', 'Middle', 'Мадрид');
SELECT * FROM Managers;
CREATE TABLE Loyalty (
loyalty_id INT PRIMARY KEY,
discount DECIMAL(3, 2),
name VARCHAR(50)
);
INSERT INTO Loyalty (loyalty_id, discount, name)
VALUES
(1, 0.05, 'Bronze'),
(2, 0.10, 'Silver'),
(3, 0.15, 'Gold'),
(4, 0.20, 'Platinum'),
(5, 0.25, 'Diamond');
SELECT * FROM Loyalty;
CREATE TABLE Clients (
client_id INT PRIMARY KEY,
reg_date DATE,
age INT,
phone VARCHAR(50),
loyalty_id INT,
FOREIGN KEY (loyalty_id) REFERENCES Loyalty(loyalty_id)
);
INSERT INTO Clients (client_id, reg_date, age, phone, loyalty_id)
VALUES
(1, '2021-03-10', 28, '123-456-7890', 1),
(2, '2020-11-15', 35, '123-456-7891', 3),
(3, '2022-02-20', 22, '123-456-7892', 2),
(4, '2024-01-31', 40, '(954)626-4329x539', 3),
(5, '2023-07-01', 52, '+1-694-911-8641x4917', 2),
(6, '2023-11-25', 47, '+1-911-113-9550x32020', 3),
(7, '2021-11-16', 50, '+1-846-404-3386x00290', 3),
(8, '2023-01-01', 39, '383.278.8258', 3),
(9, '2023-05-17', 20, '+1-689-713-4760x46391', 5),
(10, '2024-05-29', 43, '942-640-0789', 5),
(11, '2024-05-01', 52, '(067)510-8482', 5),
(12, '2022-03-24', 26, '811.842.9970', 3),
(13, '2024-01-26', 63, '288.635.4193', 5),
(14, '2024-06-07', 44, '(476)406-1624x163', 5),
(15, '2021-09-21', 18, '(510)064-3482x064', 3),
(16, '2022-12-25', 54, '+1-601-785-2984x5326', 4),
(17, '2023-05-31', 67, '265.851.6353x05257', 2),
(18, '2024-05-27', 30, '(240)016-1892x2059', 1),
(19, '2023-12-01', 55, '001-852-229-1955x655', 4),
(20, '2023-07-04', 48, '(520)416-1890x38456', 4),
(21, '2021-10-05', 39, '797.212.5596x41264', 1),
(22, '2022-09-01', 55, '+1-108-707-2097x81440', 1),
(23, '2023-08-20', 30, '001-521-370-3769x4106', 1),
(24, '2022-12-25', 62, '362-138-6912', 4),
(25, '2024-08-24', 66, '+1-841-095-2103x865', 4),
(26, '2023-04-02', 38, '859.776.3852', 3),
(27, '2024-02-20', 59, '322.492.0144x665', 4),
(28, '2022-08-11', 31, '(722)881-6836', 4),
(29, '2022-07-04', 68, '+1-830-204-1619x90606', 3),
(30, '2024-01-02', 20, '8672579809', 1),
(31, '2022-09-20', 68, '611-928-3059x06292', 1),
(32, '2023-04-18', 27, '2740110882', 3),
(33, '2023-05-12', 57, '279.583.4715', 3),
(34, '2021-12-11', 44, '320-022-6982x173', 4),
(35, '2022-06-29', 32, '+1-513-394-5243', 4),
(36, '2022-06-24', 55, '(160)894-7519x714', 4),
(37, '2022-07-07', 50, '994.476.3928x768', 3),
(38, '2022-05-29', 53, '853-572-4723', 5),
(39, '2024-05-03', 56, '669.769.4391x46398', 5),
(40, '2024-02-09', 57, '120-525-1950', 1),
(41, '2024-01-28', 39, '833.734.1709x6146', 4),
(42, '2023-07-11', 19, '6363156683', 2),
(43, '2021-11-24', 55, '001-149-641-2882', 4),
(44, '2024-08-26', 40, '+1-584-513-3913x573', 1),
(45, '2021-11-22', 23, '001-158-499-0884x281', 4),
(46, '2022-05-06', 36, '8026271708', 3),
(47, '2022-02-13', 21, '032-375-4967x2171', 2),
(48, '2022-01-17', 27, '(046)433-5339', 4),
(49, '2023-09-26', 25, '236.359.9593', 2),
(50, '2023-01-17', 48, '123-456-7893', 4);
SELECT * FROM Clients;
CREATE TABLE Orders (
order_id INT PRIMARY KEY,
date DATE,
time TIME,
sum DECIMAL(10, 2),
quantity INT,
product_id INT,
client_id INT,
manager_id INT,
FOREIGN KEY (client_id) REFERENCES Clients(client_id),
FOREIGN KEY (manager_id) REFERENCES Managers(manager_id)
);
INSERT INTO Orders (order_id, date, time, sum, quantity, product_id, client_id, manager_id)
VALUES
(1, '2023-05-10', '12:00:00', 1000.50, 2, 101, 1, 2),
(2, '2022-07-15', '15:45:00', 500.00, 1, 102, 2, 3),
(3, '2024-01-20', '18:30:00', 1500.75, 3, 103, 3, 1),
(4, '2024-05-18', '07:42:50', 334.47, 8, 32, 15, 20),
(5, '2023-05-24', '19:47:40', 638.64, 7, 34, 43, 44),
(6, '2023-11-13', '00:16:21', 824.96, 7, 95, 36, 30),
(7, '2024-04-12', '12:12:39', 480.12, 10, 47, 4, 8),
(8, '2023-09-24', '08:55:20', 915.41, 4, 25, 38, 35),
(9, '2022-12-14', '11:39:50', 407.84, 3, 56, 36, 8),
(10, '2022-11-09', '13:55:03', 795.04, 4, 73, 6, 36),
(11, '2022-10-23', '12:22:44', 809.87, 1, 13, 12, 10),
(12, '2023-04-18', '11:30:33', 761.85, 8, 15, 3, 28),
(13, '2023-04-12', '07:19:55', 2252.93, 4, 3, 22, 20),
(14, '2023-03-30', '22:44:52', 39.25, 9, 93, 2, 9),
(15, '2023-04-16', '18:20:37', 168.50, 5, 21, 26, 37),
(16, '2023-10-07', '02:03:52', 823.47, 3, 43, 16, 45),
(17, '2023-12-09', '21:46:26', 358.19, 4, 12, 15, 38),
(18, '2022-10-19', '20:53:31', 118.23, 10, 66, 18, 13),
(19, '2024-05-07', '13:33:07', 655.96, 5, 24, 37, 49),
(20, '2022-10-11', '10:50:38', 454.41, 7, 53, 29, 11),
(21, '2024-04-14', '03:46:15', 935.08, 7, 30, 1, 31),
(22, '2024-09-02', '23:06:57', 315.26, 8, 49, 38, 48),
(23, '2023-04-12', '07:12:55', 337.49, 1, 71, 15, 26),
(24, '2024-06-27', '10:15:15', 453.28, 4, 1, 15, 48),
(25, '2024-08-30', '17:43:31', 990.63, 6, 92, 34, 19),
(26, '2023-04-28', '20:00:16', 469.60, 8, 6, 36, 26),
(27, '2023-12-21', '04:56:59', 210.82, 7, 11, 22, 19),
(28, '2024-03-25', '19:06:36', 950.56, 4, 24, 4, 41),
(29, '2022-10-26', '06:14:55', 460.71, 7, 65, 38, 28),
(30, '2022-12-04', '01:50:03', 743.50, 4, 86, 14, 19),
(31, '2024-02-21', '16:34:06', 293.36, 1, 66, 32, 26),
(32, '2023-05-05', '20:32:33', 806.34, 5, 81, 28, 46),
(33, '2024-03-06', '21:49:44', 460.08, 10, 93, 3, 18),
(34, '2023-07-10', '00:23:26', 524.57, 2, 13, 22, 12),
(35, '2023-11-25', '02:27:15', 561.76, 7, 15, 5, 22),
(36, '2022-11-17', '16:56:56', 75.83, 5, 51, 31, 43),
(37, '2024-03-10', '14:59:15', 97.69, 6, 20, 14, 14),
(38, '2024-07-20', '17:43:55', 846.69, 2, 37, 47, 8),
(39, '2023-12-26', '23:06:11', 751.65, 8, 87, 14, 13),
(40, '2023-02-17', '19:32:25', 392.24, 2, 26, 26, 27),
(41, '2023-03-30', '18:50:12', 664.12, 7, 82, 50, 41),
(42, '2024-07-11', '19:53:00', 183.33, 7, 9, 24, 3),
(43, '2024-07-09', '08:26:37', 210.74, 2, 55, 23, 10),
(44, '2024-05-12', '02:58:11', 640.16, 7, 83, 23, 18),
(45, '2022-10-02', '17:24:03', 328.50, 2, 80, 5, 32),
(46, '2023-04-27', '20:09:22', 407.60, 5, 99, 30, 26),
(47, '2023-09-04', '23:50:02', 957.65, 10, 27, 32, 13),
(48, '2024-05-13', '01:14:38', 946.13, 4, 69, 41, 39),
(49, '2024-05-29', '15:09:07', 926.29, 6, 86, 17, 12),
(50, '2023-09-30', '09:00:00', 750.20, 1, 110, 50, 1);
SELECT * FROM Orders;
-- Задача 1: Определить уровень скидки для клиентов в зависимости от их возраста
-- Выведите client_id, age и категорию скидки:
-- • Молодёжь (до 30 лет).
-- • Взрослый (от 30 до 50 лет).
-- • Пожилой (старше 50 лет).
SELECT
l.name AS loyalty_category,
l.discount AS discount,
ROUND(AVG(c.age), 2) AS average_age
FROM Clients c
LEFT JOIN Loyalty l
ON c.loyalty_id = l.loyalty_id
GROUP BY l.name, l.discount
ORDER BY l.discount DESC;
CREATE OR REPLACE FUNCTION set_last_update()
RETURNS trigger AS $$
BEGIN
IF OLD.last_update != NEW.last_update THEN
RAISE EXCEPTION 'last_update field cannot be updated by SQL request';
END IF;
NEW.last_update := CURRENT_TIMESTAMP;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER test_last_update_trigger
BEFORE UPDATE
ON test
FOR EACH ROW
EXECUTE PROCEDURE set_last_update();
SELECT
column1,
column2,
SUM(column3) OVER (PARTITION BY column1 ORDER BY column2 ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS running_total
FROM
table;
SELECT LEN('Hello World') AS StringLength;
CREATE FUNCTION GetEmployees()
RETURNS TABLE (
EmployeeID INT,
EmployeeName VARCHAR(255),
Salary DECIMAL(10, 2)
)
AS
RETURN (
SELECT * FROM Employees
);
MERGE INTO target_table AS target
USING source_table AS source
ON target.id = source.id
WHEN MATCHED THEN
UPDATE SET target.column1 = source.column1, target.column2 = source.column2
WHEN NOT MATCHED THEN
INSERT (id, column1, column2) VALUES (source.id, source.column1, source.column2)
WHEN NOT MATCHED BY SOURCE THEN
DELETE;
SELECT column1, column2
FROM table_name
WHERE to_tsvector('russian', column1 || ' ' || column2) @@ to_tsquery('russian', 'поисковый_запрос');
WITH RECURSIVE CategoryHierarchy AS (
SELECT category_id, category_name, parent_category_id
FROM categories
WHERE parent_category_id IS NULL
UNION ALL
SELECT c.category_id, c.category_name, c.parent_category_id
FROM categories c
JOIN CategoryHierarchy ch ON c.parent_category_id = ch.category_id
)
SELECT * FROM CategoryHierarchy;
with recursive
Mapping as (
select
id as node_id,
parent_directory_id as parent_node_id,
name as node_name
from Files
),
Levels as (
select
node_id,
parent_node_id,
node_name,
cast(parent_node_id as char(2000)) as parents,
cast(node_name as char(2000)) as full_path,
0 as node_level
from Mapping
where parent_node_id is null
union
select
Mapping.node_id,
Mapping.parent_node_id,
Mapping.node_name,
concat(coalesce(concat(prev.parents, '-'), ''), cast(Mapping.parent_node_id as char)),
concat_ws(' ', prev.full_path, Mapping.node_name),
prev.node_level + 1
from
Levels as prev
inner join Mapping on Mapping.parent_node_id = prev.node_id
),
Branches as (
select
node_id,
parent_node_id,
node_name,
parents,
full_path,
node_level,
case
when parent_node_id is not null then
case
when node_id = last_value(node_id) over WindowByParents then '└── '
else '├── '
end
else ''
end as node_branch,
case
when parent_node_id is not null then
case
when node_id = last_value(node_id) over WindowByParents then ' '
else '│ '
end
else ''
end as branch_through
from Levels
window WindowByParents as (
partition by parents
order by node_name
rows between current row and unbounded following
)
order by full_path
),
Tree as (
select
node_id,
parent_node_id,
node_name,
parents,
full_path,
node_level,
node_branch,
cast(branch_through as char(2000)) as all_through
from Branches
where parent_node_id is null
union
select
Branches.node_id,
Branches.parent_node_id,
Branches.node_name,
Branches.parents,
Branches.full_path,
Branches.node_level,
Branches.node_branch,
concat(prev.all_through, Branches.branch_through)
from
Tree as prev
inner join Branches on Branches.parent_node_id = prev.node_id
),
FineTree as (
select
tr.node_id,
tr.parent_node_id,
tr.node_name,
tr.parents,
tr.full_path,
tr.node_level,
concat(coalesce(parent.all_through, ''), tr.node_branch, tr.node_name) as fine_tree
from
Tree as tr
left join Tree as parent on
parent.node_id = tr.parent_node_id
order by tr.full_path
)
select fine_tree from FineTree
;
-- SQL Function Example
CREATE FUNCTION get_avg_salary (@dept_name VARCHAR(50))
RETURNS INT
AS
BEGIN
DECLARE @result INT
SELECT @result = AVG(salary)
FROM employee_info
WHERE department = @dept_name
RETURN @result
END
--=======================================================================
CREATE PROCEDURE insert_employee_info
@name VARCHAR(50),
@dob DATE,
@salary INT
AS
BEGIN
INSERT INTO employee_info (name, dob, salary)
VALUES (@name, @dob, @salary)
END
--=======================================================================
-- создадим схему
CREATE SCHEMA hstore;
-- установим расширение в его схему
CREATE EXTENSION hstore WITH SCHEMA hstore;
-- не забываем сразу подключить схему в search_path
ALTER SYSTEM SET search_path = '$user', 'public', 'hstore';
--=======================================================================
set local time zone $client_timezone;
select now()::timestamp <@ tsrange('2024-01-01 09:00:00', '2024-01-01 10:00:00');
--=======================================================================
SELECT first_name, last_name, salary
FROM employees
WHERE salary > (SELECT AVG(salary) FROM employees);
--=======================================================================
SELECT first_name, last_name,
(SELECT SUM(amount) FROM sales WHERE sales.employee_id = employees.employee_id) AS total_sales
FROM employees;
--=======================================================================
SELECT
date,
region,
product,
sales_amount,
AVG(sales_amount) OVER (
PARTITION BY region, product
ORDER BY date
ROWS BETWEEN 2 PRECEDING AND CURRENT ROW
) as rolling_avg_sales
FROM
sales_data
--=======================================================================
SELECT
vc.video_id
, CASE
WHEN meta.GENRE IN ('Drama', 'Comedy') THEN 'Entertainment'
ELSE meta.GENRE
END AS content_type
FROM video_content
INNER JOIN metadata
ON video_content.video_id = metadata.video_id
;
--=======================================================================
-- Using nested inline views.
SELECT
vhs.movie
, vhs.vhs_revenue
, cs.cinema_revenue
FROM
(
SELECT
movie_id
, SUM(ticket_sales) AS cinema_revenue
FROM tickets
GROUP BY movie_id
) AS cs
INNER JOIN
(
SELECT
movie
, movie_id
, SUM(revenue) AS vhs_revenue
FROM blockbuster
GROUP BY movie, movie_id
) AS vhs
ON cs.movie_id = vhs.movie_id
;
-- Using CTEs.
WITH cinema_sales AS
(
SELECT
movie_id
, SUM(ticket_sales) AS cinema_revenue
FROM tickets
GROUP BY movie_id
),
vhs_sales AS
(
SELECT
movie
, movie_id
, SUM(revenue) AS vhs_revenue
FROM blockbuster
GROUP BY movie, movie_id
)
SELECT
vhs.movie
, vhs.vhs_revenue
, cs.cinema_revenue
FROM cinema_sales AS cs
INNER JOIN vhs_sales AS vhs
ON cs.movie_id = vhs.movie_id
;
--=======================================================================
-- Anti-join.
SELECT
video_content.*
FROM video_content
LEFT JOIN archive
on video_content.series_id = archive.series_id
WHERE 1=1
AND archive.series_id IS NULL -- Any rows with no match will have a NULL value.
;
-- Subquery.
SELECT
*
FROM video_content
WHERE 1=1
AND series_id NOT IN (SELECT DISTINCT SERIES_ID FROM archive) -- Be mindful of NULL values.
;
-- Correlated subquery.
SELECT
*
FROM video_content vc
WHERE 1=1
AND NOT EXISTS (
SELECT 1
FROM archive a
WHERE a.series_id = vc.series_id
)
;
--=======================================================================
-- Using QUALIFY:
SELECT
product
, market
, SUM(revenue) AS market_revenue
FROM sales
GROUP BY product, market
QUALIFY DENSE_RANK() OVER (PARTITION BY product ORDER BY SUM(revenue) DESC) <= 10
ORDER BY product, market_revenue
;
-- Without QUALIFY:
SELECT
product
, market
, market_revenue
FROM
(
SELECT
product
, market
, SUM(revenue) AS market_revenue
, DENSE_RANK() OVER (PARTITION BY product ORDER BY SUM(revenue) DESC) AS market_rank
FROM sales
GROUP BY product, market
)
WHERE market_rank <= 10
ORDER BY product, market_revenue
;
--=======================================================================
SELECT
COALESCE(dept_no, 'Total') AS dept_no
, SUM(salary) AS dept_salary
FROM employees
GROUP BY ROLLUP(dept_no)
ORDER BY dept_salary -- Be sure to order by this column to ensure the Total appears last/at the bottom of the result set.
;
--=======================================================================
-- Miles Davis will be returned from this query.
SELECT Name
FROM artist
WHERE name = 'Miles Davis'
EXCEPT
SELECT Name
FROM artist
WHERE name = 'Nirvana'
;
-- Nothing will be returned from this query as 'Miles Davis' appears in both queries' result sets.
SELECT Name
FROM artist
WHERE name = 'Miles Davis'
EXCEPT
SELECT Name
FROM artist
WHERE name = 'Miles Davis'
;
--=======================================================================
INSERT INTO departments (id)
VALUES (1), (2), (NULL);
-- Doesn't work due to NULL being present.
SELECT *
FROM employees
WHERE department_id NOT IN (SELECT DISTINCT id from departments)
;
-- Solution.
SELECT *
FROM employees e
WHERE NOT EXISTS (
SELECT 1
FROM departments d
WHERE d.id = e.department_id
)
;
--=======================================================================
-- You can instead do this:
SELECT
product
, CASE product WHEN 'Robot' THEN 0 ELSE revenue END AS revenue
, RANK() OVER (ORDER BY CASE product WHEN 'Robot' THEN 0 ELSE revenue END DESC)
FROM products
;
--=======================================================================
-- If I'd read the documentation further I'd also have realised that my solution
--to the NULL problem with GREATEST()...
SELECT COALESCE(GREATEST(signup_date, consumption_date), signup_date, consumption_date);
-- ... could have been solved with the following function:
SELECT GREATEST_IGNORE_NULLS(signup_date, consumption_date);
insert into table (id) values (ID) on conflict (id) do update set counter=table.counter+1 returning count;
WITH updated AS (
UPDATE sequences SET sequence = sequence + ?
WHERE sequence_id = ? RETURNING sequence
)
SELECT * FROM updated;
--================================================
CREATE TYPE status AS ENUM
('UNCONFIRMED', 'REGISTERED', 'VALIDATED', 'PAID');
CREATE TABLE my_table (id serial primary key, status status) ;
CREATE TABLE status_counter (status status primary key, counter integer CHECK (counter < 6000)) ;
ALTER TABLE IF EXISTS status_counter DROP CONSTRAINT IF EXISTS counter_limit ;
ALTER TABLE IF EXISTS status_counter ADD CONSTRAINT counter_limit CHECK (counter < 20) ;
INSERT INTO status_counter
SELECT 'VALIDATED', count(*) FROM my_table WHERE status='VALIDATED' ;
CREATE OR REPLACE FUNCTION table_insert ()
RETURNS trigger LANGUAGE plpgsql AS
$$
BEGIN
IF NEW.status = 'VALIDATED'
THEN
UPDATE status_counter
SET counter = counter + 1
WHERE status='VALIDATED' ;
END IF ;
RETURN NEW ;
END ;
$$ ;
CREATE OR REPLACE TRIGGER my_table_insert BEFORE INSERT ON my_table
FOR EACH ROW EXECUTE FUNCTION table_insert() ;
CREATE OR REPLACE FUNCTION table_update ()
RETURNS trigger LANGUAGE plpgsql AS
$$
BEGIN
IF NEW.status IS NOT DISTINCT FROM 'VALIDATED'
AND OLD.status IS DISTINCT FROM 'VALIDATED'
THEN
UPDATE status_counter
SET counter = counter + 1
WHERE status='VALIDATED' ;
ELSEIF NEW.status IS DISTINCT FROM 'VALIDATED'
AND OLD.status IS NOT DISTINCT FROM 'VALIDATED'
THEN
UPDATE status_counter
SET counter = counter - 1
WHERE status='VALIDATED' ;
END IF ;
RETURN NEW ;
END ;
$$ ;
CREATE OR REPLACE TRIGGER my_table_update BEFORE UPDATE OF status ON my_table
FOR EACH ROW EXECUTE FUNCTION table_update() ;
--================================================
CREATE OR REPLACE FUNCTION a(int)
RETURNS int
LANGUAGE SQL
IMMUTABLE PARALLEL SAFE
BEGIN ATOMIC;
SELECT $1 * 3;
END;
CREATE OR REPLACE FUNCTION b(int)
RETURNS int
LANGUAGE SQL
IMMUTABLE PARALLEL SAFE
BEGIN ATOMIC;
SELECT a($1) * 2;
END;
CREATE OR REPLACE FUNCTION c(int)
RETURNS int
LANGUAGE SQL
IMMUTABLE PARALLEL SAFE
BEGIN ATOMIC;
SELECT b($1);
END;
--================================================
CREATE TABLE `slotted_counters` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`record_type` int(11) NOT NULL,
`record_id` int(11) NOT NULL,
`slot` int(11) NOT NULL DEFAULT '0',
`count` int(11) DEFAULT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `records_and_slots` (`record_type`,`record_id`,`slot`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8
INSERT INTO slotted_counters(record_type, record_id, slot, count)
VALUES (123, 456, RAND() * 100, 1)
ON DUPLICATE KEY UPDATE count = count + 1;
SELECT SUM(count) as count FROM slotted_counters
WHERE (record_type = 123 AND record_id = 456);
--================================================
CREATE TEXT SEARCH DICTIONARY synonym_russian (
TEMPLATE = synonym,
FILE = synonyms_russian.sample, -- путь к файлу синонимов
DICTIONARY = russian_stem -- использовать стемминг на русском
);
ALTER TEXT SEARCH CONFIGURATION russian
ALTER MAPPING FOR asciiword, word WITH synonym_russian, russian_stem;
--================================================
CREATE TEXT SEARCH DICTIONARY thesaurus_russian (
TEMPLATE = thesaurus,
FILE = thesaurus_russian.sample, -- путь к файлу с тезаурусом
DICTIONARY = russian_stem -- использовать стемминг
);
ALTER TEXT SEARCH CONFIGURATION russian
ALTER MAPPING FOR asciiword, word WITH thesaurus_russian, russian_stem;
--================================================
CREATE TEXT SEARCH DICTIONARY ispell_russian (
TEMPLATE = ispell,
DictFile = russian,
AffFile = russian,
StopWords = russian
);
ALTER TEXT SEARCH CONFIGURATION russian
ALTER MAPPING FOR word, asciiword WITH ispell_russian, russian_stem;
--================================================
CREATE TEXT SEARCH DICTIONARY stopwords_russian (
TEMPLATE = simple,
STOPWORDS = russian_stopwords.sample
);
ALTER TEXT SEARCH CONFIGURATION russian
ALTER MAPPING FOR asciiword, word WITH stopwords_russian, russian_stem;
--================================================
CREATE TEXT SEARCH CONFIGURATION my_russian ( COPY = russian );
-- Настройка словаря синонимов
CREATE TEXT SEARCH DICTIONARY synonym_russian (
TEMPLATE = synonym,
FILE = synonyms_russian.sample,
DICTIONARY = russian_stem
);
-- Настройка тезауруса
CREATE TEXT SEARCH DICTIONARY thesaurus_russian (
TEMPLATE = thesaurus,
FILE = thesaurus_russian.sample,
DICTIONARY = russian_stem
);
-- Настройка Ispell для исправления орфографии
CREATE TEXT SEARCH DICTIONARY ispell_russian (
TEMPLATE = ispell,
DictFile = russian,
AffFile = russian,
StopWords = russian
);
-- Настройка стоп-слов
CREATE TEXT SEARCH DICTIONARY stopwords_russian (
TEMPLATE = simple,
STOPWORDS = russian_stopwords.sample
);
-- Применение всех словарей к конфигурации
ALTER TEXT SEARCH CONFIGURATION my_russian
ALTER MAPPING FOR word, asciiword WITH synonym_russian, thesaurus_russian, ispell_russian, stopwords_russian, russian_stem;
SELECT to_tsvector('my_russian', 'Это простой государственно-правовой текст для теста. This is a simple text example for testing.')
@@ to_tsquery('my_russian', 'просто & для & examples & test');
--================================================
CREATE INDEX idx_fulltext ON documents USING GIN(tsvector_col);
SELECT * FROM documents WHERE tsvector_col @@ to_tsquery('fox & dog');
--================================================
SELECT body, ts_rank(tsvector_col, to_tsquery('AI & machine & learning')) AS rank
FROM documents
WHERE tsvector_col @@ to_tsquery('AI | machine | learning')
ORDER BY rank DESC;
SELECT body, ts_rank_cd(tsvector_col, to_tsquery('AI & machine & learning')) AS rank
FROM documents
WHERE tsvector_col @@ to_tsquery('AI | machine | learning')
ORDER BY rank DESC;
--================================================
CREATE TABLE aliases (t tsquery PRIMARY KEY, s tsquery);
INSERT INTO aliases VALUES(to_tsquery('AI'), to_tsquery('Artificial & intelligence | AI'));
INSERT INTO aliases VALUES(to_tsquery('ML'), to_tsquery('Machine & learning | ML'));
SELECT ts_rewrite(to_tsquery('AI & ML'), 'SELECT * FROM aliases');
SELECT * FROM documents
WHERE to_tsvector(body) @@ ts_rewrite(to_tsquery('AI | ML'), 'SELECT * FROM aliases');
SELECT to_tsvector('english', 'fat cat'), to_tsvector('english', 'fat cat') @@ to_tsquery('fat <-> cat');
SELECT to_tsvector('english', 'fat rat and cat'), to_tsvector('english', 'fat rat and cat') @@ to_tsquery('fat <3> cat');
SELECT to_tsvector('english', 'fat cat'), to_tsvector('english', 'fat cat') @@ tsquery_phrase(to_tsquery('fat'), to_tsquery('cat'));
--================================================
CREATE TABLE documents (
id SERIAL PRIMARY KEY,
title TEXT,
body TEXT,
tsvector_col TSVECTOR
);
SELECT * FROM documents WHERE tsvector_col @@ to_tsquery('machine & learning');
SELECT * FROM documents WHERE to_tsvector(body) @@ to_tsquery('sci:*');
SELECT to_tsvector('simple', 'Это простой государственно-правовой текст для теста. This is a simple text example for testing.')
@@ to_tsquery('simple', 'просто & для & examples & test');
>>
?column?
----------
f
SELECT to_tsvector('english', 'Это простой государственно-правовой текст для теста. This is a simple text example for testing.')
@@ to_tsquery('english', 'просто & для & examples & test');
>>
?column?
----------
f
SELECT to_tsvector('russian', 'Это простой государственно-правовой текст для теста. This is a simple text example for testing.')
@@ to_tsquery('russian', 'просто & для & examples & test');
>>
?column?
----------
t
CREATE TEMP TABLE mentor_performance AS
WITH cte_portfolio AS (
SELECT
members.first_name,
members.region,
transactions.ticker,
transactions.txn_type,
COUNT(*) AS transaction_count,
SUM(transactions.quantity) AS total_quantity,
SUM(transactions.quantity * prices.price) AS gross_values,
SUM(transactions.quantity * prices.price * transactions.percentage_fee / 100) AS fees
FROM trading.transactions
INNER JOIN trading.members
ON transactions.member_id = members.member_id
INNER JOIN trading.prices
ON transactions.ticker = prices.ticker
AND transactions.txn_date = prices.market_date
GROUP BY
members.first_name,
members.region,
transactions.ticker,
transactions.txn_type
),
cte_summary AS (
SELECT
first_name,
region,
ticker,
SUM(
CASE
WHEN txn_type = 'BUY' THEN total_quantity
WHEN txn_type = 'SELL' THEN -total_quantity
END
) AS final_quantity,
SUM(CASE WHEN txn_type = 'BUY' THEN gross_values ELSE 0 END) AS initial_investment,
SUM(CASE WHEN txn_type = 'SELL' THEN gross_values ELSE 0 END) AS sales_revenue,
SUM(CASE WHEN txn_type = 'BUY' THEN fees ELSE 0 END) AS purchase_fees,
SUM(CASE WHEN txn_type = 'SELL' THEN fees ELSE 0 END) AS sales_fees,
SUM(CASE WHEN txn_type = 'BUY' THEN total_quantity ELSE 0 END) AS purchase_quantity,
SUM(CASE WHEN txn_type = 'SELL' THEN total_quantity ELSE 0 END) AS sales_quantity,
SUM(CASE WHEN txn_type = 'BUY' THEN transaction_count ELSE 0 END) AS purchase_transactions,
SUM(CASE WHEN txn_type = 'SELL' THEN transaction_count ELSE 0 END) AS sales_transactions
FROM cte_portfolio
GROUP BY
first_name,
region,
ticker
),
cte_metrics AS (
SELECT
summary.first_name,
summary.region,
summary.ticker,
summary.final_quantity * final.price AS actual_final_value,
summary.purchase_quantity * final.price AS theoretical_final_value,
summary.sales_revenue,
summary.purchase_fees,
summary.sales_fees,
summary.initial_investment,
summary.purchase_quantity,
summary.sales_quantity,
summary.purchase_transactions,
summary.sales_transactions,
summary.initial_investment / purchase_quantity AS dollar_cost_average,
summary.sales_revenue / sales_quantity AS average_selling_price
FROM cte_summary AS summary
INNER JOIN trading.prices AS final
ON summary.ticker = final.ticker
WHERE final.market_date = '2021-08-29'
)
SELECT
first_name,
region,
ticker,
actual_final_value AS final_portfolio_value,
( actual_final_value + sales_revenue - purchase_fees - sales_fees ) / initial_investment AS actual_profitability,
( theoretical_final_value - purchase_fees ) / initial_investment AS theoretical_profitability,
dollar_cost_average,
average_selling_price,
sales_revenue,
purchase_fees,
sales_fees,
initial_investment,
purchase_quantity,
sales_quantity,
purchase_transactions,
sales_transactions
FROM cte_metrics;
/* Question Set 1 - Easy */
/* Q1: Who is the senior most employee based on job title? */
SELECT title, last_name, first_name
FROM employee
ORDER BY levels DESC
LIMIT 1
/* Q2: Which countries have the most Invoices? */
SELECT COUNT(*) AS c, billing_country
FROM invoice
GROUP BY billing_country
ORDER BY c DESC
/* Q3: What are top 3 values of total invoice? */
SELECT total
FROM invoice
ORDER BY total DESC
/* Q4: Which city has the best customers? We would like to throw a promotional Music Festival in the city we made the most money.
Write a query that returns one city that has the highest sum of invoice totals.
Return both the city name & sum of all invoice totals */
SELECT billing_city,SUM(total) AS InvoiceTotal
FROM invoice
GROUP BY billing_city
ORDER BY InvoiceTotal DESC
LIMIT 1;
/* Q5: Who is the best customer? The customer who has spent the most money will be declared the best customer.
Write a query that returns the person who has spent the most money.*/
SELECT customer.customer_id, first_name, last_name, SUM(total) AS total_spending
FROM customer
JOIN invoice ON customer.customer_id = invoice.customer_id
GROUP BY customer.customer_id
ORDER BY total_spending DESC
LIMIT 1;
/* Question Set 2 - Moderate */
/* Q1: Write query to return the email, first name, last name, & Genre of all Rock Music listeners.
Return your list ordered alphabetically by email starting with A. */
/*Method 1 */
SELECT DISTINCT email,first_name, last_name
FROM customer
JOIN invoice ON customer.customer_id = invoice.customer_id
JOIN invoiceline ON invoice.invoice_id = invoiceline.invoice_id
WHERE track_id IN(
SELECT track_id FROM track
JOIN genre ON track.genre_id = genre.genre_id
WHERE genre.name LIKE 'Rock'
)
ORDER BY email;
/* Method 2 */
SELECT DISTINCT email AS Email,first_name AS FirstName, last_name AS LastName, genre.name AS Name
FROM customer
JOIN invoice ON invoice.customer_id = customer.customer_id
JOIN invoiceline ON invoiceline.invoice_id = invoice.invoice_id
JOIN track ON track.track_id = invoiceline.track_id
JOIN genre ON genre.genre_id = track.genre_id
WHERE genre.name LIKE 'Rock'
ORDER BY email;
/* Q2: Let's invite the artists who have written the most rock music in our dataset.
Write a query that returns the Artist name and total track count of the top 10 rock bands. */
SELECT artist.artist_id, artist.name,COUNT(artist.artist_id) AS number_of_songs
FROM track
JOIN album ON album.album_id = track.album_id
JOIN artist ON artist.artist_id = album.artist_id
JOIN genre ON genre.genre_id = track.genre_id
WHERE genre.name LIKE 'Rock'
GROUP BY artist.artist_id
ORDER BY number_of_songs DESC
LIMIT 10;
/* Q3: Return all the track names that have a song length longer than the average song length.
Return the Name and Milliseconds for each track. Order by the song length with the longest songs listed first. */
SELECT name,miliseconds
FROM track
WHERE miliseconds > (
SELECT AVG(miliseconds) AS avg_track_length
FROM track )
ORDER BY miliseconds DESC;
/* Question Set 3 - Advance */
/* Q1: Find how much amount spent by each customer on artists? Write a query to return customer name, artist name and total spent */
/* Steps to Solve: First, find which artist has earned the most according to the InvoiceLines. Now use this artist to find
which customer spent the most on this artist. For this query, you will need to use the Invoice, InvoiceLine, Track, Customer,
Album, and Artist tables. Note, this one is tricky because the Total spent in the Invoice table might not be on a single product,
so you need to use the InvoiceLine table to find out how many of each product was purchased, and then multiply this by the price
for each artist. */
WITH best_selling_artist AS (
SELECT artist.artist_id AS artist_id, artist.name AS artist_name, SUM(invoice_line.unit_price*invoice_line.quantity) AS total_sales
FROM invoice_line
JOIN track ON track.track_id = invoice_line.track_id
JOIN album ON album.album_id = track.album_id
JOIN artist ON artist.artist_id = album.artist_id
GROUP BY 1
ORDER BY 3 DESC
LIMIT 1
)
SELECT c.customer_id, c.first_name, c.last_name, bsa.artist_name, SUM(il.unit_price*il.quantity) AS amount_spent
FROM invoice i
JOIN customer c ON c.customer_id = i.customer_id
JOIN invoice_line il ON il.invoice_id = i.invoice_id
JOIN track t ON t.track_id = il.track_id
JOIN album alb ON alb.album_id = t.album_id
JOIN best_selling_artist bsa ON bsa.artist_id = alb.artist_id
GROUP BY 1,2,3,4
ORDER BY 5 DESC;
/* Q2: We want to find out the most popular music Genre for each country. We determine the most popular genre as the genre
with the highest amount of purchases. Write a query that returns each country along with the top Genre. For countries where
the maximum number of purchases is shared return all Genres. */
/* Steps to Solve: There are two parts in question- first most popular music genre and second need data at country level. */
/* Method 1: Using CTE */
WITH popular_genre AS
(
SELECT COUNT(invoice_line.quantity) AS purchases, customer.country, genre.name, genre.genre_id,
ROW_NUMBER() OVER(PARTITION BY customer.country ORDER BY COUNT(invoice_line.quantity) DESC) AS RowNo
FROM invoice_line
JOIN invoice ON invoice.invoice_id = invoice_line.invoice_id
JOIN customer ON customer.customer_id = invoice.customer_id
JOIN track ON track.track_id = invoice_line.track_id
JOIN genre ON genre.genre_id = track.genre_id
GROUP BY 2,3,4
ORDER BY 2 ASC, 1 DESC
)
SELECT * FROM popular_genre WHERE RowNo <= 1
/* Method 2: : Using Recursive */
WITH RECURSIVE
sales_per_country AS(
SELECT COUNT(*) AS purchases_per_genre, customer.country, genre.name, genre.genre_id
FROM invoice_line
JOIN invoice ON invoice.invoice_id = invoice_line.invoice_id
JOIN customer ON customer.customer_id = invoice.customer_id
JOIN track ON track.track_id = invoice_line.track_id
JOIN genre ON genre.genre_id = track.genre_id
GROUP BY 2,3,4
ORDER BY 2
),
max_genre_per_country AS (SELECT MAX(purchases_per_genre) AS max_genre_number, country
FROM sales_per_country
GROUP BY 2
ORDER BY 2)
SELECT sales_per_country.*
FROM sales_per_country
JOIN max_genre_per_country ON sales_per_country.country = max_genre_per_country.country
WHERE sales_per_country.purchases_per_genre = max_genre_per_country.max_genre_number;
/* Q3: Write a query that determines the customer that has spent the most on music for each country.
Write a query that returns the country along with the top customer and how much they spent.
For countries where the top amount spent is shared, provide all customers who spent this amount. */
/* Steps to Solve: Similar to the above question. There are two parts in question-
first find the most spent on music for each country and second filter the data for respective customers. */
/* Method 1: using CTE */
WITH Customter_with_country AS (
SELECT customer.customer_id,first_name,last_name,billing_country,SUM(total) AS total_spending,
ROW_NUMBER() OVER(PARTITION BY billing_country ORDER BY SUM(total) DESC) AS RowNo
FROM invoice
JOIN customer ON customer.customer_id = invoice.customer_id
GROUP BY 1,2,3,4
ORDER BY 4 ASC,5 DESC)
SELECT * FROM Customter_with_country WHERE RowNo <= 1
/* Method 2: Using Recursive */
WITH RECURSIVE
customter_with_country AS (
SELECT customer.customer_id,first_name,last_name,billing_country,SUM(total) AS total_spending
FROM invoice
JOIN customer ON customer.customer_id = invoice.customer_id
GROUP BY 1,2,3,4
ORDER BY 2,3 DESC),
country_max_spending AS(
SELECT billing_country,MAX(total_spending) AS max_spending
FROM customter_with_country
GROUP BY billing_country)
SELECT cc.billing_country, cc.total_spending, cc.first_name, cc.last_name, cc.customer_id
FROM customter_with_country cc
JOIN country_max_spending ms
ON cc.billing_country = ms.billing_country
WHERE cc.total_spending = ms.max_spending
ORDER BY 1;
/* source: www.youtube.com/@RishabhMishraOfficial */
/* Thank You :) */
create table public.models (
id int8 generated by default as identity( increment by 1 minvalue 1 maxvalue 9223372036854775807 start 1 cache 1 no cycle) not null,
"name" varchar(50) not null,
manufacturer varchar(50) not null,
constraint models_pk primary key (id),
constraint models_unique unique (name, manufacturer)
);
create table public.price (
id int8 generated by default as identity( increment by 1 minvalue 1 maxvalue 9223372036854775807 start 1 cache 1 no cycle) not null,
price_value numeric(18,6) not null,
model_id int4 not null,
comment varchar(255) null,
constraint price_pk primary key (id),
constraint price_models_fk foreign key (model_id) references public.models(id),
constraint price_unique unique (price_value, model_id)
);
with
-- Настраиваем запрос: задаем название таблицы и название схемы
params(table_name, table_schema) as (
values (
'price',
'public'
)
),
-- Описываем ожидаемые столбцы таблицы
required_columns(column_name, data_type, is_nullable, character_maximum_length) as (
values
('id', 'bigint', 'NO', null),
('price_value', 'numeric', 'NO', 18.6),
('model_id', 'integer', 'NO', null),
('comment', 'character varying', 'YES', 255)
),
-- Описываем ожидаемые ограничения
required_constraints(column_name, constraint_type) as (
values
('id', 'PRIMARY KEY'),
('id', 'FOREIGN KEY'),
('model_id', 'FOREIGN KEY'),
('price_value', 'UNIQUE'),
('model_id', 'UNIQUE')
),
-- Находим информацию о столбцах тестируемой таблицы и добавляем обработку null'ов
columns_info as (
select
column_name, data_type, is_nullable,
coalesce(numeric_precision, 0) as numeric_precision,
coalesce(numeric_scale, 0) as numeric_scale,
coalesce(character_maximum_length, 0) as character_maximum_length
from information_schema.columns
where
table_name = (select table_name from params)
and table_schema = (select table_schema from params)
),
-- Проверяем существование таблицы и подсчитываем количество столбцов в ней
check_table_exist as (
select
case when count_all_fields < 1 then false else true end table_exists,
case when count_all_fields < 1 then 1 else 0 end table_exists_error,
count_all_fields
from (
select count (*) as count_all_fields
from columns_info
) sq
),
-- Сравниваем ожидаемый и текущий наборы атрибутов таблицы
fields_comparison as (
select t.*
from columns_info t
inner join required_columns r
on t.column_name = r.column_name
and t.data_type = r.data_type
and t.is_nullable = r.is_nullable
and (
-- Сравниваем целую часть десятичных значений
case
when t.data_type = 'numeric'
then t.numeric_precision = trunc(r.character_maximum_length::numeric)
end
and
-- Сравниваем дробную часть десятичных значений
case
when t.data_type = 'numeric'
then t.numeric_scale = (r.character_maximum_length::numeric - trunc(r.character_maximum_length::numeric))
* power(10, length(split_part(r.character_maximum_length::text, '.', 2)))
end
or t.character_maximum_length = coalesce(r.character_maximum_length::numeric, 0)
)
),
-- Ищем лишние столбцы и считаем их количество
check_unexpected_fields as (
select
count (column_name) as count_unexpected_fields,
string_agg(column_name, ', ') as unexpected_fields
from (
select column_name
from columns_info
except
select column_name
from required_columns
) sq
),
-- Ищем недостающие столбцы и считаем их количество
check_missing_fields as (
select
count (column_name) as count_missing_fields,
string_agg(column_name, ', ') as missing_fields
from (
select column_name
from required_columns
except
select column_name
from columns_info
) sq
),
-- Ищем невалидные столбцы и считаем их количество
check_invalid_fields as (
select
count (column_name) as count_invalid_fields,
string_agg(column_name, ', ') as invalid_fields
from (
select column_name
from required_columns
except
select column_name
from fields_comparison
except
select string_to_table(missing_fields, ', ')
from check_missing_fields
) sq
),
-- Ищем все ограничения для таблицы
constraints_query as(
select
t1.constraint_type,
t2.column_name,
t3.column_name as foreign_column
from information_schema.table_constraints t1
left join information_schema.constraint_column_usage t2
on t1.constraint_name = t2.constraint_name
left join information_schema.key_column_usage as t3
on t1.constraint_name = t3.constraint_name
and t1.table_schema = t3.table_schema
where
t1.table_name = (select table_name from params)
and t1.constraint_schema = (select table_schema from params)
and t2.column_name is not null
),
-- Включаем значения зависимых ключей (foreign_column) в список ограничений (column_name)
union_foreign_ref_columns as (
select column_name, constraint_type
from constraints_query
union all
select foreign_column as column_name, constraint_type
from constraints_query
),
-- Ищем лишние ограничения и считаем их количество
check_unexpected_constraints as (
select
count (column_name) as count_unexpected_constraints,
string_agg(column_name || ' (' || constraint_type || ')', ', ') as unexpected_constraints
from (
select *
from union_foreign_ref_columns
except
select column_name, constraint_type
from required_constraints
) sq
),
-- Ищем недостающие ограничения и считаем их количество
check_missing_constraints as (
select
count (column_name) as count_missing_constraints,
string_agg(column_name || ' (' || constraint_type || ')', ', ') as missing_constraints
from (
select column_name, constraint_type
from required_constraints
except
select *
from union_foreign_ref_columns
) sq
),
-- Собираем полученные данные
checks as (
select
-- Выводим всю информацию об ошибках и суммируем их количество
table_exists_error + count_unexpected_fields + count_missing_fields + count_invalid_fields +
count_unexpected_constraints + count_missing_constraints as errors,
*
from check_table_exist
cross join check_unexpected_fields
cross join check_missing_fields
cross join check_invalid_fields
cross join check_unexpected_constraints
cross join check_missing_constraints
)
select *
from checks
with columns_info as (
select
column_name, data_type, is_nullable,
coalesce(numeric_precision, 0) as numeric_precision,
coalesce(numeric_scale, 0) as numeric_scale,
coalesce(character_maximum_length, 0) as character_maximum_length
from information_schema.columns
where
table_name = 'price'
and table_schema = 'public'
)
select
case when count_all_fields < 1 then false else true end table_exists,
case when count_all_fields < 1 then 1 else 0 end table_exists_error,
count_all_fields
from (
select count (*) as count_all_fields
from columns_info
) sq
CREATE TABLE IF NOT EXISTS department(
id INT PRIMARY KEY,
name VARCHAR
);
create TABLE IF NOT EXISTS employee(
id INT PRIMARY KEY,
name VARCHAR,
salary INT,
department_id INT,
CONSTRAINT fk_deparment_id
FOREIGN KEY(department_id)
REFERENCES department(ID) ON DELETE CASCADE
);
/* START */
DROP TABLE IF EXISTS "payment" CASCADE;
DROP TABLE IF EXISTS "bank_transaction" CASCADE;
DROP TABLE IF EXISTS "orderdetail" CASCADE;
DROP TABLE IF EXISTS "order" CASCADE;
DROP TABLE IF EXISTS "product" CASCADE;
DROP TABLE IF EXISTS "productline" CASCADE;
DROP TABLE IF EXISTS "top3product" CASCADE;
DROP TABLE IF EXISTS "productlinedetail" CASCADE;
DROP TABLE IF EXISTS "office_has_manager" CASCADE;
DROP TABLE IF EXISTS "manager" CASCADE;
DROP TABLE IF EXISTS "customer" CASCADE;
DROP TABLE IF EXISTS "customerdetail" CASCADE;
DROP TABLE IF EXISTS "sale" CASCADE;
DROP TABLE IF EXISTS "daily_activity" CASCADE;
DROP TABLE IF EXISTS "token" CASCADE;
DROP TABLE IF EXISTS "employee" CASCADE;
DROP TABLE IF EXISTS "employee_status" CASCADE;
DROP TABLE IF EXISTS "department" CASCADE;
DROP TABLE IF EXISTS "office" CASCADE;
DROP TABLE IF EXISTS "office_flights" CASCADE;
DROP SEQUENCE IF EXISTS "manager_seq";
DROP SEQUENCE IF EXISTS "product_seq";
DROP SEQUENCE IF EXISTS "order_seq";
DROP SEQUENCE IF EXISTS "sale_seq";
DROP SEQUENCE IF EXISTS "customer_seq";
DROP SEQUENCE IF EXISTS "employee_seq";
DROP SEQUENCE IF EXISTS "token_seq";
DROP TYPE IF EXISTS "rate_type";
DROP TYPE IF EXISTS "vat_type";
DROP TYPE IF EXISTS "evaluation_criteria";
DROP DOMAIN IF EXISTS "postal_code";
DROP FUNCTION IF EXISTS "make_array";
DROP FUNCTION IF EXISTS "dup";
DROP FUNCTION IF EXISTS "get_avg_sale";
DROP FUNCTION IF EXISTS "get_salary_stat";
DROP FUNCTION IF EXISTS "swap";
DROP FUNCTION IF EXISTS "new_salary";
DROP FUNCTION IF EXISTS "get_customer";
DROP FUNCTION IF EXISTS "get_offices_multiple";
DROP FUNCTION IF EXISTS "employee_office_arr";
DROP FUNCTION IF EXISTS "sale_price";
DROP FUNCTION IF EXISTS "top_three_sales_per_employee";
DROP FUNCTION IF EXISTS "product_of_product_line";
DROP FUNCTION IF EXISTS "update_msrp";
DROP VIEW IF EXISTS "customer_master";
DROP VIEW IF EXISTS "office_master";
DROP VIEW IF EXISTS "product_master";
CREATE EXTENSION IF NOT EXISTS hstore;
CREATE EXTENSION IF NOT EXISTS pgcrypto;
-- TABLE OFFICE
CREATE DOMAIN "postal_code" AS VARCHAR(15)
CHECK(
VALUE ~ '^\d{5}$'
OR VALUE ~ '^[A-Z]{2}[0-9]{3}[A-Z]{2}$'
);
CREATE TABLE "office" (
"office_code" VARCHAR(10) NOT NULL,
"city" VARCHAR(50) DEFAULT NULL,
"phone" VARCHAR(50) NOT NULL,
"address_line_first" VARCHAR(50) NOT NULL,
"address_line_second" VARCHAR(50) DEFAULT NULL,
"state" VARCHAR(50) DEFAULT NULL,
"country" VARCHAR(50) DEFAULT NULL,
"postal_code" postal_code NOT NULL,
"territory" VARCHAR(10) NOT NULL,
"location" POINT DEFAULT NULL,
"internal_budget" INT NOT NULL,
CONSTRAINT "office_pk" PRIMARY KEY ("office_code"),
CONSTRAINT "office_postal_code_uk" UNIQUE ("postal_code")
);
-- TABLE DEPARTMENT
CREATE TABLE "department" (
"department_id" SERIAL NOT NULL,
"name" VARCHAR(50) NOT NULL,
"phone" VARCHAR(50) NOT NULL,
"code" INT NOT NULL,
"office_code" VARCHAR(10) NOT NULL,
"topic" TEXT[] DEFAULT NULL,
"dep_net_ipv4" INET DEFAULT NULL,
"local_budget" FLOAT DEFAULT NULL,
"profit" FLOAT DEFAULT NULL,
"forecast_profit" FLOAT DEFAULT NULL,
"cash" FLOAT DEFAULT NULL,
"accounts_receivable" FLOAT DEFAULT NULL,
"inventories" FLOAT DEFAULT NULL,
"accounts_payable" FLOAT DEFAULT NULL,
"st_borrowing" FLOAT DEFAULT NULL,
"accrued_liabilities" FLOAT DEFAULT NULL,
CONSTRAINT "department_pk" PRIMARY KEY ("department_id"),
CONSTRAINT "department_code_uk" UNIQUE ("code"),
CONSTRAINT "department_office_fk" FOREIGN KEY ("office_code") REFERENCES "office" ("office_code")
);
ALTER SEQUENCE "department_department_id_seq" RESTART WITH 20;
-- TABLE EMPLOYEE
CREATE TABLE "employee" (
"employee_number" BIGINT NOT NULL,
"last_name" VARCHAR(50) NOT NULL,
"first_name" VARCHAR(50) NOT NULL,
"extension" VARCHAR(10) NOT NULL,
"email" VARCHAR(100) NOT NULL,
"office_code" VARCHAR(10) NOT NULL,
"salary" INT NOT NULL,
"commission" INT DEFAULT NULL,
"reports_to" BIGINT DEFAULT NULL,
"job_title" VARCHAR(50) NOT NULL,
"employee_of_year" INT[] DEFAULT NULL,
"monthly_bonus" INT[] DEFAULT NULL,
CONSTRAINT "employee_pk" PRIMARY KEY ("employee_number"),
CONSTRAINT "employee_employee_fk" FOREIGN KEY ("reports_to") REFERENCES "employee" ("employee_number"),
CONSTRAINT "employees_office_fk" FOREIGN KEY ("office_code") REFERENCES "office" ("office_code")
);
-- this sequence is not used automatically
CREATE SEQUENCE "employee_seq" START 100000 INCREMENT 10 MINVALUE 100000 MAXVALUE 10000000 OWNED BY "employee"."employee_number";
-- TABLE EMPLOYEE_STATUS
CREATE TABLE "employee_status" (
"id" SERIAL NOT NULL,
"employee_number" BIGINT NOT NULL,
"status" VARCHAR(50) NOT NULL,
"acquired_date" DATE NOT NULL,
CONSTRAINT "id_pk" PRIMARY KEY ("id"),
CONSTRAINT "employee_status_employee_fk" FOREIGN KEY ("employee_number") REFERENCES "employee" ("employee_number")
);
-- TABLE SALE
CREATE SEQUENCE "sale_seq" START 1000000;
CREATE TYPE "rate_type" AS enum('SILVER', 'GOLD', 'PLATINUM');
CREATE TYPE "vat_type" AS enum('NONE', 'MIN', 'MAX');
CREATE TABLE "sale" (
"sale_id" BIGINT NOT NULL DEFAULT NEXTVAL ('"sale_seq"'),
"fiscal_year" INT NOT NULL,
"sale" FLOAT NOT NULL,
"employee_number" BIGINT DEFAULT NULL,
"hot" BOOLEAN DEFAULT FALSE,
"rate" rate_type DEFAULT NULL,
"vat" vat_type DEFAULT NULL,
"fiscal_month" INT NOT NULL,
"revenue_growth" FLOAT NOT NULL,
"trend" VARCHAR(10) DEFAULT NULL,
CONSTRAINT "sale_pk" PRIMARY KEY ("sale_id"),
CONSTRAINT "sale_employee_fk" FOREIGN KEY ("employee_number") REFERENCES "employee" ("employee_number") ON UPDATE CASCADE
);
-- TABLE DAILY_ACTIVITY
CREATE TABLE "daily_activity" (
"day_id" SERIAL NOT NULL,
"day_date" DATE NOT NULL,
"sales" FLOAT NOT NULL,
"visitors" FLOAT NOT NULL,
"conversion" FLOAT NOT NULL,
CONSTRAINT "daily_activity_pk" PRIMARY KEY ("day_id")
);
-- TABLE TOKEN
CREATE SEQUENCE "token_seq" START 1000000;
CREATE TABLE "token" (
"token_id" BIGINT NOT NULL DEFAULT NEXTVAL ('"token_seq"'),
"sale_id" BIGINT NOT NULL,
"amount" FLOAT NOT NULL,
"updated_on" TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
CONSTRAINT "token_pk" PRIMARY KEY ("token_id"),
CONSTRAINT "token_sale_fk" FOREIGN KEY ("sale_id") REFERENCES "sale" ("sale_id") ON DELETE CASCADE ON UPDATE CASCADE
);
-- TABLE CUSTOMER
CREATE SEQUENCE "customer_seq" START 1000000;
CREATE TABLE "customer" (
"customer_number" BIGINT NOT NULL DEFAULT NEXTVAL ('"customer_seq"'),
"customer_name" VARCHAR(50) NOT NULL,
"contact_last_name" VARCHAR(50) NOT NULL,
"contact_first_name" VARCHAR(50) NOT NULL,
"phone" VARCHAR(50) NOT NULL,
"sales_rep_employee_number" BIGINT DEFAULT NULL,
"credit_limit" DECIMAL(10,2) DEFAULT NULL,
"first_buy_date" INT DEFAULT NULL,
CONSTRAINT "customer_pk" PRIMARY KEY ("customer_number"),
CONSTRAINT "customer_name_uk" UNIQUE ("customer_name"),
CONSTRAINT "customer_employee_fk" FOREIGN KEY ("sales_rep_employee_number") REFERENCES "employee" ("employee_number") ON UPDATE CASCADE
);
-- TABLE CUSTOMERDETAIL
CREATE TABLE "customerdetail" (
"customer_number" BIGINT NOT NULL,
"address_line_first" VARCHAR(50) NOT NULL,
"address_line_second" VARCHAR(50) DEFAULT NULL,
"city" VARCHAR(50) DEFAULT NULL,
"state" VARCHAR(50) DEFAULT NULL,
"postal_code" VARCHAR(15) DEFAULT NULL,
"country" VARCHAR(50) DEFAULT NULL,
CONSTRAINT "customerdetail_pk" PRIMARY KEY ("customer_number"),
CONSTRAINT "customer_address_line_first_uk" UNIQUE ("address_line_first"),
CONSTRAINT "customerdetail_customer_fk" FOREIGN KEY ("customer_number") REFERENCES "customer" ("customer_number")
);
-- TABLE MANAGER
CREATE TYPE "evaluation_criteria" AS ("communication_ability" INT, "ethics" INT, "performance" INT, "employee_input" INT);
CREATE SEQUENCE "manager_seq" START 1000000;
CREATE TABLE "manager" (
"manager_id" BIGINT NOT NULL DEFAULT NEXTVAL ('"manager_seq"'),
"manager_name" VARCHAR(50) NOT NULL DEFAULT '"anonymous"',
"manager_detail" JSON DEFAULT NULL,
"manager_evaluation" evaluation_criteria DEFAULT NULL,
CONSTRAINT "manager_pk" PRIMARY KEY ("manager_id")
);
-- TABLE OFFICE_HAS_MANAGER
CREATE TABLE "office_has_manager" (
"offices_office_code" VARCHAR(10) NOT NULL,
"managers_manager_id" BIGINT NOT NULL,
CONSTRAINT "office_manager_uk" UNIQUE ("offices_office_code", "managers_manager_id"),
CONSTRAINT "office_fk" FOREIGN KEY ("offices_office_code") REFERENCES "office" ("office_code") ON UPDATE NO ACTION ON DELETE NO ACTION,
CONSTRAINT "manager_fk" FOREIGN KEY ("managers_manager_id") REFERENCES "manager" ("manager_id") ON UPDATE NO ACTION ON DELETE NO ACTION
);
-- TABLE PRODUCTLINE
CREATE TABLE "productline" (
"product_line" VARCHAR(50) NOT NULL,
"code" BIGINT NOT NULL,
"text_description" VARCHAR(4000) DEFAULT NULL,
"html_description" XML DEFAULT NULL,
"image" BYTEA DEFAULT NULL,
"created_on" DATE NOT NULL DEFAULT NOW(),
CONSTRAINT "productline_pk" PRIMARY KEY ("product_line", "code"),
CONSTRAINT "productline_uk" UNIQUE("product_line")
);
-- TABLE PRODUCTDETAIL
CREATE TABLE "productlinedetail" (
"product_line" VARCHAR(50) NOT NULL,
"code" BIGINT NOT NULL,
"line_capacity" VARCHAR(20) NOT NULL,
"line_type" INT DEFAULT 0,
CONSTRAINT "productlinedetail_pk" PRIMARY KEY ("product_line","code"),
CONSTRAINT "productlinedetail_uk" UNIQUE("product_line"),
CONSTRAINT "productlinedetail_productline_fk" FOREIGN KEY ("product_line","code") REFERENCES "productline" ("product_line","code")
);
-- TABLE PRODUCT
CREATE SEQUENCE "product_seq" START 1000000;
CREATE TABLE "product" (
"product_id" BIGINT NOT NULL DEFAULT NEXTVAL ('"product_seq"'),
"product_name" VARCHAR(70) DEFAULT NULL,
"product_line" VARCHAR(50) DEFAULT NULL,
"code" BIGINT NOT NULL,
"product_scale" VARCHAR(10) DEFAULT NULL,
"product_vendor" VARCHAR(50) DEFAULT NULL,
"product_description" TEXT DEFAULT NULL,
"quantity_in_stock" INT DEFAULT 0,
"buy_price" DECIMAL(10,2) NOT NULL DEFAULT 0.0,
"msrp" DECIMAL(10,2) NOT NULL DEFAULT 0.0,
"specs" HSTORE DEFAULT NULL,
"product_uid" BIGINT GENERATED BY DEFAULT AS IDENTITY (START WITH 10 INCREMENT BY 10),
CONSTRAINT "product_pk" PRIMARY KEY ("product_id"),
CONSTRAINT "product_productline_fk" FOREIGN KEY ("product_line","code") REFERENCES "productline" ("product_line","code")
);
-- TABLE ORDER
CREATE SEQUENCE "order_seq" START 1000000;
CREATE TABLE "order" (
"order_id" BIGINT NOT NULL DEFAULT NEXTVAL ('"order_seq"'),
"order_date" DATE NOT NULL,
"required_date" DATE NOT NULL,
"shipped_date" DATE DEFAULT NULL,
"status" VARCHAR(15) NOT NULL,
"comments" TEXT DEFAULT NULL,
"customer_number" BIGINT NOT NULL,
"amount" DECIMAL(10,2) NOT NULL,
CONSTRAINT "order_pk" PRIMARY KEY ("order_id"),
CONSTRAINT "order_customer_fk" FOREIGN KEY ("customer_number") REFERENCES "customer" ("customer_number")
);
-- TABLE ORDERDETAIL
CREATE TABLE "orderdetail" (
"orderdetail_id" SERIAL NOT NULL,
"order_id" BIGINT NOT NULL,
"product_id" BIGINT NOT NULL,
"quantity_ordered" INT NOT NULL,
"price_each" DECIMAL(10,2) NOT NULL,
"order_line_number" INT NOT NULL,
CONSTRAINT "orderdetail_pk" PRIMARY KEY ("orderdetail_id"),
CONSTRAINT "orderdetail_uk" UNIQUE ("order_id", "product_id"),
CONSTRAINT "orderdetail_order_fk" FOREIGN KEY ("order_id") REFERENCES "order" ("order_id"),
CONSTRAINT "orderdetail_product_fk" FOREIGN KEY ("product_id") REFERENCES "product" ("product_id")
);
-- TABLE TOP3PRODUCT
CREATE TABLE "top3product" (
"product_id" BIGINT NOT NULL,
"product_name" VARCHAR(70) DEFAULT NULL,
CONSTRAINT "top3product_pk" PRIMARY KEY ("product_id")
);
-- TABLE PAYMENT
CREATE TABLE "payment" (
"customer_number" BIGINT NOT NULL,
"check_number" VARCHAR(50) NOT NULL,
"payment_date" TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
"invoice_amount" DECIMAL(10,2) NOT NULL,
"caching_date" TIMESTAMP DEFAULT NULL,
"version" INT NOT NULL DEFAULT 0,
"modified" TIMESTAMP NOT NULL DEFAULT NOW(),
CONSTRAINT "payment_pk" PRIMARY KEY ("customer_number","check_number"),
CONSTRAINT "check_number_uk" UNIQUE("check_number"),
CONSTRAINT "payment_customer_fk" FOREIGN KEY ("customer_number") REFERENCES "customer" ("customer_number")
);
-- TABLE BANK_TRANSACTION
CREATE TABLE "bank_transaction" (
"transaction_id" SERIAL NOT NULL,
"bank_name" VARCHAR(50) NOT NULL,
"bank_iban" VARCHAR(50) NOT NULL,
"transfer_amount" DECIMAL(10,2) NOT NULL,
"caching_date" TIMESTAMP NOT NULL DEFAULT NOW(),
"customer_number" BIGINT NOT NULL,
"check_number" VARCHAR(50) NOT NULL,
"card_type" VARCHAR(50) NOT NULL,
"status" VARCHAR(50) NOT NULL DEFAULT 'SUCCESS',
CONSTRAINT "bank_transaction_pk" PRIMARY KEY ("transaction_id"),
CONSTRAINT "bank_transaction_customer_fk" FOREIGN KEY ("customer_number","check_number") REFERENCES "payment" ("customer_number","check_number")
);
ALTER SEQUENCE "bank_transaction_transaction_id_seq" RESTART WITH 100;
-- TABLE OFFICE_FLIGHTS
CREATE TABLE "office_flights" (
"depart_town" VARCHAR(32) NOT NULL,
"arrival_town" VARCHAR(32) NOT NULL,
"distance_km" INT NOT NULL,
CONSTRAINT "office_flights_pk" PRIMARY KEY ("depart_town", "arrival_town")
);
/* USER-DEFINED FUNCTIONS */
CREATE FUNCTION "make_array"(anyelement, anyelement) RETURNS anyarray
AS $$
SELECT ARRAY[$1, $2];
$$ LANGUAGE sql;
CREATE FUNCTION "dup" (IN "f1" anyelement, OUT "f2" anyelement, OUT "f3" anyarray)
AS 'select $1, array[$1,$1]'
LANGUAGE sql;
CREATE OR REPLACE FUNCTION "get_avg_sale"(IN "len_from" INT, IN "len_to" INT)
RETURNS INT
LANGUAGE plpgsql
AS $$
DECLARE "avg_count" INT;
BEGIN
SELECT avg("sale"."sale")
INTO "avg_count"
FROM "sale"
WHERE "sale"."sale" BETWEEN "len_from" AND "len_to";
RETURN "avg_count";
END;
$$;
CREATE OR REPLACE FUNCTION "get_salary_stat"(
OUT "min_sal" INT, OUT "max_sal" INT, OUT "avg_sal" NUMERIC)
LANGUAGE plpgsql
AS $$
BEGIN
SELECT MIN("public"."employee"."salary"),
MAX("public"."employee"."salary"),
AVG("public"."employee"."salary")::NUMERIC(7,2)
INTO "min_sal", "max_sal", "avg_sal"
FROM "public"."employee";
END;
$$;
CREATE OR REPLACE FUNCTION "swap"(
INOUT "x" INT, INOUT "y" INT) RETURNS RECORD
LANGUAGE plpgsql
AS $$
BEGIN
SELECT "x","y" INTO "y","x";
END;
$$;
CREATE OR REPLACE FUNCTION "new_salary"(IN "salary" INT, IN "bonus" INT DEFAULT 50, IN "penalty" INT DEFAULT 0)
RETURNS INT
LANGUAGE sql
AS $$
SELECT $1 + $2 - $3;
$$;
CREATE OR REPLACE FUNCTION "get_customer"(IN "cl" INT) RETURNS REFCURSOR
AS $$
DECLARE
"cur" REFCURSOR;
BEGIN
OPEN "cur" FOR SELECT * FROM "customer" WHERE "credit_limit" > "cl" ORDER BY "customer_name";
RETURN "cur";
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE FUNCTION "get_offices_multiple"() RETURNS SETOF REFCURSOR
AS $$
DECLARE
"ref1" REFCURSOR;
"ref2" REFCURSOR;
BEGIN
OPEN "ref1" FOR SELECT "public"."office"."city", "public"."office"."country"
FROM "public"."office" WHERE "public"."office"."internal_budget" < 100000;
RETURN NEXT "ref1";
OPEN "ref2" FOR SELECT "public"."office"."city", "public"."office"."country"
FROM "public"."office" WHERE "public"."office"."internal_budget" > 100000;
RETURN NEXT "ref2";
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE FUNCTION "employee_office_arr"(VARCHAR(10))
RETURNS BIGINT[]
AS $$
SELECT ARRAY(SELECT "public"."employee"."employee_number"
FROM "public"."employee" WHERE "public"."employee"."office_code" = $1)
$$ LANGUAGE sql;
CREATE OR REPLACE FUNCTION "department_topic_arr"(IN "id" BIGINT)
RETURNS text[]
AS $$
SELECT "public"."department"."topic"
FROM "public"."department" WHERE "public"."department"."department_id" = "id"
$$ LANGUAGE sql;
CREATE OR REPLACE FUNCTION "sale_price"(
"quantity" INT, "list_price" REAL, "fraction_of_price" REAL)
RETURNS REAL LANGUAGE plpgsql
AS $$
BEGIN
RETURN ("list_price" - ("list_price" * "fraction_of_price")) * "quantity";
END;
$$;
CREATE OR REPLACE FUNCTION "top_three_sales_per_employee"(IN "employee_nr" BIGINT)
RETURNS TABLE("sales" FLOAT) LANGUAGE plpgsql
AS $$
BEGIN
RETURN QUERY
SELECT
"public"."sale"."sale" AS "sales"
FROM
"public"."sale"
WHERE
employee_nr = "public"."sale"."employee_number"
ORDER BY
"public"."sale"."sale" DESC
LIMIT 3;
END;
$$;
CREATE OR REPLACE FUNCTION "product_of_product_line"(IN "p_line_in" VARCHAR)
RETURNS TABLE("p_id" BIGINT, "p_name" VARCHAR, "p_line" VARCHAR) LANGUAGE plpgsql
AS $$
BEGIN
RETURN QUERY
SELECT
"public"."product"."product_id" AS "p_id",
"public"."product"."product_name" AS "p_name",
"public"."product"."product_line" AS "p_line"
FROM
"public"."product"
WHERE
"p_line_in" = "public"."product"."product_line";
END;
$$;
CREATE OR REPLACE FUNCTION "update_msrp" (IN "id" BIGINT, IN "debit" INT)
RETURNS REAL
AS $$
UPDATE "public"."product"
SET "msrp" = "public"."product"."msrp" - "debit"
WHERE "public"."product"."product_id" = "id"
RETURNING "public"."product"."msrp";
$$ LANGUAGE sql;
/* USER-DEFINED VIEWS */
CREATE OR REPLACE VIEW "customer_master" AS
SELECT "customer"."customer_name",
"customer"."credit_limit",
"customerdetail"."city",
"customerdetail"."country",
"customerdetail"."address_line_first",
"customerdetail"."postal_code",
"customerdetail"."state"
FROM "customer"
JOIN "customerdetail" ON "customerdetail"."customer_number" = "customer"."customer_number"
WHERE "customer"."first_buy_date" IS NOT NULL;
CREATE OR REPLACE VIEW "office_master" AS
SELECT "office"."office_code",
"office"."city",
"office"."country",
"office"."state",
"office"."phone",
"office"."postal_code"
FROM "office"
WHERE "office"."city" IS NOT NULL;
CREATE OR REPLACE VIEW "product_master" AS
SELECT "product"."product_line",
"product"."product_name",
"product"."product_scale"
FROM "product";
/* END */
/*
*********************************************************************
http://www.mysqltutorial.org
*********************************************************************
Name: MySQL Sample Database classicmodels
Link: http://www.mysqltutorial.org/mysql-sample-database.aspx
*********************************************************************
This is a modified version of the original schema for MySQL
*/
PRAGMA foreign_keys=OFF;
BEGIN TRANSACTION;
CREATE TABLE IF NOT EXISTS "schema_migrations" ("version" varchar NOT NULL PRIMARY KEY);
INSERT INTO schema_migrations VALUES('20200619185311');
INSERT INTO schema_migrations VALUES('20200619185427');
INSERT INTO schema_migrations VALUES('20200619185837');
INSERT INTO schema_migrations VALUES('20200619193022');
INSERT INTO schema_migrations VALUES('20200619193650');
INSERT INTO schema_migrations VALUES('20200619193721');
INSERT INTO schema_migrations VALUES('20200619193737');
INSERT INTO schema_migrations VALUES('20200620042743');
INSERT INTO schema_migrations VALUES('20200620045102');
CREATE TABLE IF NOT EXISTS "ar_internal_metadata" ("key" varchar NOT NULL PRIMARY KEY, "value" varchar, "created_at" datetime NOT NULL, "updated_at" datetime NOT NULL);
INSERT INTO ar_internal_metadata VALUES('environment','development','2020-07-23 01:59:21.368379','2020-07-23 01:59:21.368379');
CREATE TABLE IF NOT EXISTS "roles" ("id" integer PRIMARY KEY AUTOINCREMENT NOT NULL, "title" varchar, "created_at" datetime NOT NULL, "updated_at" datetime NOT NULL);
INSERT INTO roles VALUES(1,'admin','2020-07-23 01:59:24.656549','2020-07-23 01:59:24.656549');
INSERT INTO roles VALUES(2,'accountant','2020-07-23 01:59:24.683004','2020-07-23 01:59:24.683004');
INSERT INTO roles VALUES(3,'employee','2020-07-23 01:59:24.709098','2020-07-23 01:59:24.709098');
CREATE TABLE IF NOT EXISTS "users" ("id" integer PRIMARY KEY AUTOINCREMENT NOT NULL, "email" varchar, "title" varchar, "location_id" integer, "organization_id" integer, "manager_id" integer, "created_at" datetime NOT NULL, "updated_at" datetime NOT NULL);
INSERT INTO users VALUES(1,'[email protected]','CEO',1,1,NULL,'2020-07-23 01:59:24.803843','2020-07-23 01:59:24.803843');
INSERT INTO users VALUES(2,'[email protected]','CFO',3,1,1,'2020-07-23 01:59:24.849942','2020-07-23 01:59:24.849942');
INSERT INTO users VALUES(3,'[email protected]','Senior Accountant',1,1,2,'2020-07-23 01:59:24.893020','2020-07-23 01:59:24.893020');
INSERT INTO users VALUES(4,'[email protected]','Accountant',1,1,3,'2020-07-23 01:59:24.935930','2020-07-23 01:59:24.935930');
INSERT INTO users VALUES(5,'[email protected]','Director of Engineering',1,1,1,'2020-07-23 01:59:24.986995','2020-07-23 01:59:24.986995');
INSERT INTO users VALUES(6,'[email protected]','Engineering Manager',1,1,5,'2020-07-23 01:59:25.045160','2020-07-23 01:59:25.045160');
INSERT INTO users VALUES(7,'[email protected]','Software Engineer',1,1,6,'2020-07-23 01:59:25.082724','2020-07-23 01:59:25.082724');
INSERT INTO users VALUES(8,'[email protected]','Director of Sales',1,1,1,'2020-07-23 01:59:25.116264','2020-07-23 01:59:25.116264');
INSERT INTO users VALUES(9,'[email protected]','Regional Sales Manager',2,1,8,'2020-07-23 01:59:25.169940','2020-07-23 01:59:25.169940');
INSERT INTO users VALUES(10,'[email protected]','Sales Rep',2,1,9,'2020-07-23 01:59:25.213552','2020-07-23 01:59:25.213552');
INSERT INTO users VALUES(11,'[email protected]','Accountant',2,1,3,'2020-07-23 01:59:25.257120','2020-07-23 01:59:25.257120');
INSERT INTO users VALUES(12,'[email protected]','CTO',3,1,1,'2020-07-23 01:59:25.299858','2020-07-23 01:59:25.299858');
INSERT INTO users VALUES(13,'[email protected]','CEO',1,2,NULL,'2020-07-23 01:59:30.488827','2020-07-23 01:59:30.488827');
CREATE TABLE IF NOT EXISTS "roles_users" ("user_id" integer NOT NULL, "role_id" integer NOT NULL);
INSERT INTO roles_users VALUES(1,1);
INSERT INTO roles_users VALUES(2,1);
INSERT INTO roles_users VALUES(3,2);
INSERT INTO roles_users VALUES(4,2);
INSERT INTO roles_users VALUES(5,3);
INSERT INTO roles_users VALUES(6,3);
INSERT INTO roles_users VALUES(7,3);
INSERT INTO roles_users VALUES(8,3);
INSERT INTO roles_users VALUES(9,3);
INSERT INTO roles_users VALUES(10,3);
INSERT INTO roles_users VALUES(11,2);
INSERT INTO roles_users VALUES(12,1);
INSERT INTO roles_users VALUES(13,1);
CREATE TABLE IF NOT EXISTS "expenses" ("id" integer PRIMARY KEY AUTOINCREMENT NOT NULL, "user_id" integer, "amount" integer, "description" varchar, "project_id" integer, "created_at" datetime NOT NULL, "updated_at" datetime NOT NULL, CONSTRAINT "fk_rails_c3ee69df61"
FOREIGN KEY ("user_id")
REFERENCES "users" ("id")
, CONSTRAINT "fk_rails_f097e0a9ca"
FOREIGN KEY ("project_id")
REFERENCES "projects" ("id")
);
INSERT INTO expenses VALUES(1,4,64165,'Trust fund pour-over.',6,'2020-07-23 01:59:26.793029','2020-07-23 01:59:26.793029');
INSERT INTO expenses VALUES(2,1,17743,'Pug irony.',8,'2020-07-23 01:59:26.829541','2020-07-23 01:59:26.829541');
INSERT INTO expenses VALUES(3,12,8092,'Gastropub viral.',7,'2020-07-23 01:59:26.862951','2020-07-23 01:59:26.862951');
INSERT INTO expenses VALUES(4,3,55013,'Intelligentsia health.',2,'2020-07-23 01:59:26.902323','2020-07-23 01:59:26.902323');
INSERT INTO expenses VALUES(5,11,7703,'Paleo kickstarter.',3,'2020-07-23 01:59:26.944763','2020-07-23 01:59:26.944763');
INSERT INTO expenses VALUES(6,10,6022,'Plaid tote bag.',3,'2020-07-23 01:59:26.977392','2020-07-23 01:59:26.977392');
INSERT INTO expenses VALUES(7,4,1075,'Locavore migas.',5,'2020-07-23 01:59:27.006154','2020-07-23 01:59:27.006154');
INSERT INTO expenses VALUES(8,2,72414,'Diy semiotics.',7,'2020-07-23 01:59:27.055030','2020-07-23 01:59:27.055030');
INSERT INTO expenses VALUES(9,5,62919,'Retro pabst.',6,'2020-07-23 01:59:27.097562','2020-07-23 01:59:27.097562');
INSERT INTO expenses VALUES(10,12,82684,'Shabby chic brooklyn.',7,'2020-07-23 01:59:27.132183','2020-07-23 01:59:27.132183');
INSERT INTO expenses VALUES(11,9,20696,'Biodiesel chartreuse.',1,'2020-07-23 01:59:27.165553','2020-07-23 01:59:27.165553');
INSERT INTO expenses VALUES(12,5,29639,'Swag cornhole.',5,'2020-07-23 01:59:27.210909','2020-07-23 01:59:27.210909');
INSERT INTO expenses VALUES(13,10,92267,'Tilde ramps.',1,'2020-07-23 01:59:27.248681','2020-07-23 01:59:27.248681');
INSERT INTO expenses VALUES(14,9,75724,'Yolo butcher.',1,'2020-07-23 01:59:27.277687','2020-07-23 01:59:27.277687');
INSERT INTO expenses VALUES(15,10,40051,'Carry pinterest.',1,'2020-07-23 01:59:27.316445','2020-07-23 01:59:27.316445');
INSERT INTO expenses VALUES(16,12,60910,'Food truck ethical.',7,'2020-07-23 01:59:27.355072','2020-07-23 01:59:27.355072');
INSERT INTO expenses VALUES(17,5,35384,'Polaroid mlkshk.',8,'2020-07-23 01:59:27.394964','2020-07-23 01:59:27.394964');
INSERT INTO expenses VALUES(18,6,42905,'Sustainable humblebrag.',6,'2020-07-23 01:59:27.436173','2020-07-23 01:59:27.436173');
INSERT INTO expenses VALUES(19,7,53212,'Ugh master.',5,'2020-07-23 01:59:27.472859','2020-07-23 01:59:27.472859');
INSERT INTO expenses VALUES(20,1,55754,'Bitters hashtag.',8,'2020-07-23 01:59:27.507571','2020-07-23 01:59:27.507571');
INSERT INTO expenses VALUES(21,12,4376,'8-bit dreamcatcher.',7,'2020-07-23 01:59:27.548086','2020-07-23 01:59:27.548086');
INSERT INTO expenses VALUES(22,3,8772,'Blog vhs.',2,'2020-07-23 01:59:27.582775','2020-07-23 01:59:27.582775');
INSERT INTO expenses VALUES(23,5,93244,'Gentrify flannel.',5,'2020-07-23 01:59:27.611932','2020-07-23 01:59:27.611932');
INSERT INTO expenses VALUES(24,3,13393,'Blog distillery.',6,'2020-07-23 01:59:27.647973','2020-07-23 01:59:27.647973');
INSERT INTO expenses VALUES(25,2,70243,'Vinegar meh.',7,'2020-07-23 01:59:27.689016','2020-07-23 01:59:27.689016');
INSERT INTO expenses VALUES(26,9,4656,'Portland semiotics.',1,'2020-07-23 01:59:27.718580','2020-07-23 01:59:27.718580');
INSERT INTO expenses VALUES(27,2,28668,'Sartorial mlkshk.',7,'2020-07-23 01:59:27.748297','2020-07-23 01:59:27.748297');
INSERT INTO expenses VALUES(28,8,59060,'Cornhole meh.',8,'2020-07-23 01:59:27.789877','2020-07-23 01:59:27.789877');
INSERT INTO expenses VALUES(29,4,3914,'Beard narwhal.',5,'2020-07-23 01:59:27.831786','2020-07-23 01:59:27.831786');
INSERT INTO expenses VALUES(30,8,73241,'Flexitarian farm-to-table.',8,'2020-07-23 01:59:27.867501','2020-07-23 01:59:27.867501');
INSERT INTO expenses VALUES(31,10,84047,'Chambray vice.',1,'2020-07-23 01:59:27.899671','2020-07-23 01:59:27.899671');
INSERT INTO expenses VALUES(32,6,22716,'Occupy bespoke.',2,'2020-07-23 01:59:27.937669','2020-07-23 01:59:27.937669');
INSERT INTO expenses VALUES(33,11,46972,'You probably haven''t heard of them mustache.',3,'2020-07-23 01:59:27.969329','2020-07-23 01:59:27.969329');
INSERT INTO expenses VALUES(34,8,71749,'Church-key locavore.',8,'2020-07-23 01:59:28.015755','2020-07-23 01:59:28.015755');
INSERT INTO expenses VALUES(35,12,93152,'Sustainable portland.',7,'2020-07-23 01:59:28.054563','2020-07-23 01:59:28.054563');
INSERT INTO expenses VALUES(36,3,94527,'Diy chillwave.',6,'2020-07-23 01:59:28.088886','2020-07-23 01:59:28.088886');
INSERT INTO expenses VALUES(37,6,62378,'Vhs poutine.',2,'2020-07-23 01:59:28.129478','2020-07-23 01:59:28.129478');
INSERT INTO expenses VALUES(38,6,33062,'Etsy scenester.',5,'2020-07-23 01:59:28.163469','2020-07-23 01:59:28.163469');
INSERT INTO expenses VALUES(39,12,73320,'Vhs carry.',7,'2020-07-23 01:59:28.197837','2020-07-23 01:59:28.197837');
INSERT INTO expenses VALUES(40,9,67408,'Slow-carb art party.',3,'2020-07-23 01:59:28.245418','2020-07-23 01:59:28.245418');
INSERT INTO expenses VALUES(41,11,63633,'Banh mi cleanse.',3,'2020-07-23 01:59:28.282211','2020-07-23 01:59:28.282211');
INSERT INTO expenses VALUES(42,1,21737,'Selfies loko.',5,'2020-07-23 01:59:28.316213','2020-07-23 01:59:28.316213');
INSERT INTO expenses VALUES(43,1,45714,'Phlogiston hoodie.',8,'2020-07-23 01:59:28.355364','2020-07-23 01:59:28.355364');
INSERT INTO expenses VALUES(44,11,81268,'Slow-carb tote bag.',3,'2020-07-23 01:59:28.390195','2020-07-23 01:59:28.390195');
INSERT INTO expenses VALUES(45,1,79875,'Waistcoat lumbersexual.',6,'2020-07-23 01:59:28.429581','2020-07-23 01:59:28.429581');
INSERT INTO expenses VALUES(46,8,77942,'Echo polaroid.',9,'2020-07-23 01:59:28.470106','2020-07-23 01:59:28.470106');
INSERT INTO expenses VALUES(47,3,82163,'Deep v migas.',6,'2020-07-23 01:59:28.510865','2020-07-23 01:59:28.510865');
INSERT INTO expenses VALUES(48,12,54165,'Swag trust fund.',4,'2020-07-23 01:59:28.542343','2020-07-23 01:59:28.542343');
INSERT INTO expenses VALUES(49,2,10209,'Whatever viral.',7,'2020-07-23 01:59:28.578791','2020-07-23 01:59:28.578791');
INSERT INTO expenses VALUES(50,3,21358,'Farm-to-table lomo.',5,'2020-07-23 01:59:28.610228','2020-07-23 01:59:28.610228');
INSERT INTO expenses VALUES(51,7,90780,'Typewriter carry.',2,'2020-07-23 01:59:28.649679','2020-07-23 01:59:28.649679');
INSERT INTO expenses VALUES(52,11,22351,'Quinoa neutra.',1,'2020-07-23 01:59:28.672660','2020-07-23 01:59:28.672660');
INSERT INTO expenses VALUES(53,7,12513,'Squid iphone.',6,'2020-07-23 01:59:28.715897','2020-07-23 01:59:28.715897');
INSERT INTO expenses VALUES(54,3,74361,'Vhs mumblecore.',8,'2020-07-23 01:59:28.756867','2020-07-23 01:59:28.756867');
INSERT INTO expenses VALUES(55,11,56747,'Iphone salvia.',1,'2020-07-23 01:59:28.783876','2020-07-23 01:59:28.783876');
INSERT INTO expenses VALUES(56,11,97009,'Crucifix distillery.',1,'2020-07-23 01:59:28.824702','2020-07-23 01:59:28.824702');
INSERT INTO expenses VALUES(57,5,62993,'Bicycle rights quinoa.',9,'2020-07-23 01:59:28.864023','2020-07-23 01:59:28.864023');
INSERT INTO expenses VALUES(58,5,47652,'Pbr&b leggings.',2,'2020-07-23 01:59:28.908834','2020-07-23 01:59:28.908834');
INSERT INTO expenses VALUES(59,2,53979,'Xoxo celiac.',4,'2020-07-23 01:59:28.941396','2020-07-23 01:59:28.941396');
INSERT INTO expenses VALUES(60,12,47715,'Tousled ugh.',7,'2020-07-23 01:59:28.966652','2020-07-23 01:59:28.966652');
INSERT INTO expenses VALUES(61,3,40337,'Tofu viral.',5,'2020-07-23 01:59:28.994543','2020-07-23 01:59:28.994543');
INSERT INTO expenses VALUES(62,4,74212,'Forage master.',2,'2020-07-23 01:59:29.023798','2020-07-23 01:59:29.023798');
INSERT INTO expenses VALUES(63,2,2836,'Echo drinking.',10,'2020-07-23 01:59:29.061061','2020-07-23 01:59:29.061061');
INSERT INTO expenses VALUES(64,10,34003,'Irony portland.',3,'2020-07-23 01:59:29.101656','2020-07-23 01:59:29.101656');
INSERT INTO expenses VALUES(65,12,44382,'Post-ironic stumptown.',4,'2020-07-23 01:59:29.135824','2020-07-23 01:59:29.135824');
INSERT INTO expenses VALUES(66,2,89275,'Phlogiston church-key.',4,'2020-07-23 01:59:29.169962','2020-07-23 01:59:29.169962');
INSERT INTO expenses VALUES(67,12,52967,'Mlkshk flannel.',7,'2020-07-23 01:59:29.197831','2020-07-23 01:59:29.197831');
INSERT INTO expenses VALUES(68,12,18522,'Synth green juice.',10,'2020-07-23 01:59:29.238667','2020-07-23 01:59:29.238667');
INSERT INTO expenses VALUES(69,12,4736,'Fashion axe semiotics.',4,'2020-07-23 01:59:29.274958','2020-07-23 01:59:29.274958');
INSERT INTO expenses VALUES(70,8,46020,'Bitters ennui.',8,'2020-07-23 01:59:29.311330','2020-07-23 01:59:29.311330');
INSERT INTO expenses VALUES(71,12,7245,'Williamsburg butcher.',10,'2020-07-23 01:59:29.345242','2020-07-23 01:59:29.345242');
INSERT INTO expenses VALUES(72,9,46437,'Polaroid occupy.',3,'2020-07-23 01:59:29.381216','2020-07-23 01:59:29.381216');
INSERT INTO expenses VALUES(73,12,57370,'Pop-up flexitarian.',4,'2020-07-23 01:59:29.421942','2020-07-23 01:59:29.421942');
INSERT INTO expenses VALUES(74,8,92729,'Tattooed brunch.',8,'2020-07-23 01:59:29.444160','2020-07-23 01:59:29.444160');
INSERT INTO expenses VALUES(75,2,40273,'Vinegar polaroid.',7,'2020-07-23 01:59:29.471505','2020-07-23 01:59:29.471505');
INSERT INTO expenses VALUES(76,10,48961,'Truffaut wolf.',1,'2020-07-23 01:59:29.506802','2020-07-23 01:59:29.506802');
INSERT INTO expenses VALUES(77,12,19606,'Lumbersexual tofu.',7,'2020-07-23 01:59:29.530067','2020-07-23 01:59:29.530067');
INSERT INTO expenses VALUES(78,2,1413,'Wes anderson drinking.',10,'2020-07-23 01:59:29.556927','2020-07-23 01:59:29.556927');
INSERT INTO expenses VALUES(79,9,27631,'Intelligentsia iphone.',1,'2020-07-23 01:59:29.612244','2020-07-23 01:59:29.612244');
INSERT INTO expenses VALUES(80,12,8181,'Selfies kogi.',10,'2020-07-23 01:59:29.658125','2020-07-23 01:59:29.658125');
INSERT INTO expenses VALUES(81,6,35172,'Skateboard synth.',9,'2020-07-23 01:59:29.698597','2020-07-23 01:59:29.698597');
INSERT INTO expenses VALUES(82,3,50251,'Umami fingerstache.',6,'2020-07-23 01:59:29.735382','2020-07-23 01:59:29.735382');
INSERT INTO expenses VALUES(83,7,85491,'Mumblecore sriracha.',9,'2020-07-23 01:59:29.782195','2020-07-23 01:59:29.782195');
INSERT INTO expenses VALUES(84,5,23815,'Deep v mlkshk.',9,'2020-07-23 01:59:29.812659','2020-07-23 01:59:29.812659');
INSERT INTO expenses VALUES(85,3,67684,'Church-key wes anderson.',9,'2020-07-23 01:59:29.853565','2020-07-23 01:59:29.853565');
INSERT INTO expenses VALUES(86,5,91711,'Williamsburg vhs.',6,'2020-07-23 01:59:29.885666','2020-07-23 01:59:29.885666');
INSERT INTO expenses VALUES(87,6,11407,'Mustache heirloom.',5,'2020-07-23 01:59:29.929122','2020-07-23 01:59:29.929122');
INSERT INTO expenses VALUES(88,1,58385,'Street shabby chic.',8,'2020-07-23 01:59:29.958955','2020-07-23 01:59:29.958955');
INSERT INTO expenses VALUES(89,5,31278,'Neutra bitters.',9,'2020-07-23 01:59:29.999538','2020-07-23 01:59:29.999538');
INSERT INTO expenses VALUES(90,9,43679,'Microdosing polaroid.',3,'2020-07-23 01:59:30.031723','2020-07-23 01:59:30.031723');
INSERT INTO expenses VALUES(91,12,13581,'Listicle squid.',7,'2020-07-23 01:59:30.072248','2020-07-23 01:59:30.072248');
INSERT INTO expenses VALUES(92,4,72905,'Whatever truffaut.',6,'2020-07-23 01:59:30.102800','2020-07-23 01:59:30.102800');
INSERT INTO expenses VALUES(93,7,83728,'Neutra banh mi.',6,'2020-07-23 01:59:30.135553','2020-07-23 01:59:30.135553');
INSERT INTO expenses VALUES(94,2,40782,'Taxidermy retro.',10,'2020-07-23 01:59:30.164569','2020-07-23 01:59:30.164569');
INSERT INTO expenses VALUES(95,9,90313,'Literally sartorial.',1,'2020-07-23 01:59:30.199102','2020-07-23 01:59:30.199102');
INSERT INTO expenses VALUES(96,4,71548,'Stumptown portland.',5,'2020-07-23 01:59:30.234590','2020-07-23 01:59:30.234590');
INSERT INTO expenses VALUES(97,9,19812,'90''s helvetica.',3,'2020-07-23 01:59:30.274835','2020-07-23 01:59:30.274835');
INSERT INTO expenses VALUES(98,12,7354,'Dreamcatcher yolo.',4,'2020-07-23 01:59:30.304072','2020-07-23 01:59:30.304072');
INSERT INTO expenses VALUES(99,7,81731,'Yr literally.',9,'2020-07-23 01:59:30.348982','2020-07-23 01:59:30.348982');
INSERT INTO expenses VALUES(100,8,43546,'Blue bottle pug.',9,'2020-07-23 01:59:30.391978','2020-07-23 01:59:30.391978');
INSERT INTO expenses VALUES(101,13,45476,'Hire a copywriter to create some job postings.',11,'2020-07-23 01:59:30.560484','2020-07-23 01:59:30.560484');
CREATE TABLE IF NOT EXISTS "projects" ("id" integer PRIMARY KEY AUTOINCREMENT NOT NULL, "team_id" integer, "location_id" integer, "name" varchar, "created_at" datetime NOT NULL, "updated_at" datetime NOT NULL);
INSERT INTO projects VALUES(1,3,2,'Equity Investment Instruments','2020-07-23 01:59:26.410163','2020-07-23 01:59:26.410163');
INSERT INTO projects VALUES(2,1,1,'Clothing & Accessories','2020-07-23 01:59:26.453004','2020-07-23 01:59:26.453004');
INSERT INTO projects VALUES(3,2,2,'Medical Supplies','2020-07-23 01:59:26.477936','2020-07-23 01:59:26.477936');
INSERT INTO projects VALUES(4,3,3,'Apparel Retailers','2020-07-23 01:59:26.506309','2020-07-23 01:59:26.506309');
INSERT INTO projects VALUES(5,2,1,'Medical Supplies','2020-07-23 01:59:26.537835','2020-07-23 01:59:26.537835');
INSERT INTO projects VALUES(6,3,1,'Electronic Equipment','2020-07-23 01:59:26.587487','2020-07-23 01:59:26.587487');
INSERT INTO projects VALUES(7,1,3,'Iron & Steel','2020-07-23 01:59:26.625163','2020-07-23 01:59:26.625163');
INSERT INTO projects VALUES(8,3,1,'Reinsurance','2020-07-23 01:59:26.666440','2020-07-23 01:59:26.666440');
INSERT INTO projects VALUES(9,3,1,'Forestry','2020-07-23 01:59:26.696504','2020-07-23 01:59:26.696504');
INSERT INTO projects VALUES(10,2,3,'Brewers','2020-07-23 01:59:26.731228','2020-07-23 01:59:26.731228');
INSERT INTO projects VALUES(11,5,1,'Hire first employee','2020-07-23 01:59:30.528606','2020-07-23 01:59:30.528606');
CREATE TABLE IF NOT EXISTS "teams" ("id" integer PRIMARY KEY AUTOINCREMENT NOT NULL, "name" varchar, "organization_id" integer, "created_at" datetime NOT NULL, "updated_at" datetime NOT NULL, CONSTRAINT "fk_rails_f07f0bd66d"
FOREIGN KEY ("organization_id")
REFERENCES "organizations" ("id")
);
INSERT INTO teams VALUES(1,'Engineering',1,'2020-07-23 01:59:24.535388','2020-07-23 01:59:24.535388');
INSERT INTO teams VALUES(2,'Leadership',1,'2020-07-23 01:59:24.568823','2020-07-23 01:59:24.568823');
INSERT INTO teams VALUES(3,'Finance',1,'2020-07-23 01:59:24.593442','2020-07-23 01:59:24.593442');
INSERT INTO teams VALUES(4,'Sales',1,'2020-07-23 01:59:24.615934','2020-07-23 01:59:24.615934');
INSERT INTO teams VALUES(5,'Leadership',2,'2020-07-23 01:59:30.447400','2020-07-23 01:59:30.447400');
CREATE TABLE IF NOT EXISTS "organizations" ("id" integer PRIMARY KEY AUTOINCREMENT NOT NULL, "name" varchar, "created_at" datetime NOT NULL, "updated_at" datetime NOT NULL);
INSERT INTO organizations VALUES(1,'Foo Industries','2020-07-23 01:59:24.481020','2020-07-23 01:59:24.481020');
INSERT INTO organizations VALUES(2,'Bar, Inc.','2020-07-23 01:59:30.418613','2020-07-23 01:59:30.418613');
CREATE TABLE IF NOT EXISTS "locations" ("id" integer PRIMARY KEY AUTOINCREMENT NOT NULL, "name" varchar, "created_at" datetime NOT NULL, "updated_at" datetime NOT NULL);
INSERT INTO locations VALUES(1,'NYC','2020-07-23 01:59:24.383830','2020-07-23 01:59:24.383830');
INSERT INTO locations VALUES(2,'Milwaukee','2020-07-23 01:59:24.411004','2020-07-23 01:59:24.411004');
INSERT INTO locations VALUES(3,'Crestone','2020-07-23 01:59:24.444765','2020-07-23 01:59:24.444765');
CREATE TABLE IF NOT EXISTS "teams_users" ("user_id" integer NOT NULL, "team_id" integer NOT NULL);
INSERT INTO teams_users VALUES(1,2);
INSERT INTO teams_users VALUES(2,2);
INSERT INTO teams_users VALUES(3,3);
INSERT INTO teams_users VALUES(4,3);
INSERT INTO teams_users VALUES(5,1);
INSERT INTO teams_users VALUES(6,1);
INSERT INTO teams_users VALUES(7,1);
INSERT INTO teams_users VALUES(8,4);
INSERT INTO teams_users VALUES(9,4);
INSERT INTO teams_users VALUES(10,4);
INSERT INTO teams_users VALUES(11,3);
INSERT INTO teams_users VALUES(12,2);
INSERT INTO teams_users VALUES(13,5);
DELETE FROM sqlite_sequence;
INSERT INTO sqlite_sequence VALUES('locations',3);
INSERT INTO sqlite_sequence VALUES('organizations',2);
INSERT INTO sqlite_sequence VALUES('teams',5);
INSERT INTO sqlite_sequence VALUES('roles',3);
INSERT INTO sqlite_sequence VALUES('users',13);
INSERT INTO sqlite_sequence VALUES('projects',11);
INSERT INTO sqlite_sequence VALUES('expenses',101);
CREATE INDEX "index_users_on_location_id" ON "users" ("location_id");
CREATE INDEX "index_users_on_organization_id" ON "users" ("organization_id");
CREATE INDEX "index_users_on_manager_id" ON "users" ("manager_id");
CREATE INDEX "index_roles_users_on_user_id" ON "roles_users" ("user_id");
CREATE INDEX "index_roles_users_on_role_id" ON "roles_users" ("role_id");
CREATE INDEX "index_expenses_on_user_id" ON "expenses" ("user_id");
CREATE INDEX "index_expenses_on_project_id" ON "expenses" ("project_id");
CREATE INDEX "index_projects_on_team_id" ON "projects" ("team_id");
CREATE INDEX "index_projects_on_location_id" ON "projects" ("location_id");
CREATE INDEX "index_teams_on_organization_id" ON "teams" ("organization_id");
CREATE INDEX "index_teams_users_on_user_id" ON "teams_users" ("user_id");
CREATE INDEX "index_teams_users_on_team_id" ON "teams_users" ("team_id");
COMMIT;
-- Sakila Spatial Sample Database Schema
-- Version 0.9
-- Copyright (c) 2014, Oracle Corporation
-- All rights reserved.
-- Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
-- * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
-- * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
-- * Neither the name of Oracle Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
-- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-- Modified in September 2015 by Giuseppe Maxia
-- The schema and data can now be loaded by any MySQL 5.x version.
SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0;
SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0;
SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='TRADITIONAL';
DROP SCHEMA IF EXISTS sakila;
CREATE SCHEMA sakila;
USE sakila;
--
-- Table structure for table `actor`
--
CREATE TABLE actor (
actor_id SMALLINT UNSIGNED NOT NULL AUTO_INCREMENT,
first_name VARCHAR(45) NOT NULL,
last_name VARCHAR(45) NOT NULL,
last_update TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
PRIMARY KEY (actor_id),
KEY idx_actor_last_name (last_name)
)ENGINE=InnoDB DEFAULT CHARSET=utf8;
--
-- Table structure for table `address`
--
CREATE TABLE address (
address_id SMALLINT UNSIGNED NOT NULL AUTO_INCREMENT,
address VARCHAR(50) NOT NULL,
address2 VARCHAR(50) DEFAULT NULL,
district VARCHAR(20) NOT NULL,
city_id SMALLINT UNSIGNED NOT NULL,
postal_code VARCHAR(10) DEFAULT NULL,
phone VARCHAR(20) NOT NULL,
/*!50705 location GEOMETRY NOT NULL,*/
last_update TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
PRIMARY KEY (address_id),
KEY idx_fk_city_id (city_id),
/*!50705 SPATIAL KEY `idx_location` (location),*/
CONSTRAINT `fk_address_city` FOREIGN KEY (city_id) REFERENCES city (city_id) ON DELETE RESTRICT ON UPDATE CASCADE
)ENGINE=InnoDB DEFAULT CHARSET=utf8;
--
-- Table structure for table `category`
--
CREATE TABLE category (
category_id TINYINT UNSIGNED NOT NULL AUTO_INCREMENT,
name VARCHAR(25) NOT NULL,
last_update TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
PRIMARY KEY (category_id)
)ENGINE=InnoDB DEFAULT CHARSET=utf8;
--
-- Table structure for table `city`
--
CREATE TABLE city (
city_id SMALLINT UNSIGNED NOT NULL AUTO_INCREMENT,
city VARCHAR(50) NOT NULL,
country_id SMALLINT UNSIGNED NOT NULL,
last_update TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
PRIMARY KEY (city_id),
KEY idx_fk_country_id (country_id),
CONSTRAINT `fk_city_country` FOREIGN KEY (country_id) REFERENCES country (country_id) ON DELETE RESTRICT ON UPDATE CASCADE
)ENGINE=InnoDB DEFAULT CHARSET=utf8;
--
-- Table structure for table `country`
--
CREATE TABLE country (
country_id SMALLINT UNSIGNED NOT NULL AUTO_INCREMENT,
country VARCHAR(50) NOT NULL,
last_update TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
PRIMARY KEY (country_id)
)ENGINE=InnoDB DEFAULT CHARSET=utf8;
--
-- Table structure for table `customer`
--
CREATE TABLE customer (
customer_id SMALLINT UNSIGNED NOT NULL AUTO_INCREMENT,
store_id TINYINT UNSIGNED NOT NULL,
first_name VARCHAR(45) NOT NULL,
last_name VARCHAR(45) NOT NULL,
email VARCHAR(50) DEFAULT NULL,
address_id SMALLINT UNSIGNED NOT NULL,
active BOOLEAN NOT NULL DEFAULT TRUE,
create_date DATETIME NOT NULL,
last_update TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
PRIMARY KEY (customer_id),
KEY idx_fk_store_id (store_id),
KEY idx_fk_address_id (address_id),
KEY idx_last_name (last_name),
CONSTRAINT fk_customer_address FOREIGN KEY (address_id) REFERENCES address (address_id) ON DELETE RESTRICT ON UPDATE CASCADE,
CONSTRAINT fk_customer_store FOREIGN KEY (store_id) REFERENCES store (store_id) ON DELETE RESTRICT ON UPDATE CASCADE
)ENGINE=InnoDB DEFAULT CHARSET=utf8;
--
-- Table structure for table `film`
--
CREATE TABLE film (
film_id SMALLINT UNSIGNED NOT NULL AUTO_INCREMENT,
title VARCHAR(255) NOT NULL,
description TEXT DEFAULT NULL,
release_year YEAR DEFAULT NULL,
language_id TINYINT UNSIGNED NOT NULL,
original_language_id TINYINT UNSIGNED DEFAULT NULL,
rental_duration TINYINT UNSIGNED NOT NULL DEFAULT 3,
rental_rate DECIMAL(4,2) NOT NULL DEFAULT 4.99,
length SMALLINT UNSIGNED DEFAULT NULL,
replacement_cost DECIMAL(5,2) NOT NULL DEFAULT 19.99,
rating ENUM('G','PG','PG-13','R','NC-17') DEFAULT 'G',
special_features SET('Trailers','Commentaries','Deleted Scenes','Behind the Scenes') DEFAULT NULL,
last_update TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
PRIMARY KEY (film_id),
KEY idx_title (title),
KEY idx_fk_language_id (language_id),
KEY idx_fk_original_language_id (original_language_id),
CONSTRAINT fk_film_language FOREIGN KEY (language_id) REFERENCES language (language_id) ON DELETE RESTRICT ON UPDATE CASCADE,
CONSTRAINT fk_film_language_original FOREIGN KEY (original_language_id) REFERENCES language (language_id) ON DELETE RESTRICT ON UPDATE CASCADE
)ENGINE=InnoDB DEFAULT CHARSET=utf8;
--
-- Table structure for table `film_actor`
--
CREATE TABLE film_actor (
actor_id SMALLINT UNSIGNED NOT NULL,
film_id SMALLINT UNSIGNED NOT NULL,
last_update TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
PRIMARY KEY (actor_id,film_id),
KEY idx_fk_film_id (`film_id`),
CONSTRAINT fk_film_actor_actor FOREIGN KEY (actor_id) REFERENCES actor (actor_id) ON DELETE RESTRICT ON UPDATE CASCADE,
CONSTRAINT fk_film_actor_film FOREIGN KEY (film_id) REFERENCES film (film_id) ON DELETE RESTRICT ON UPDATE CASCADE
)ENGINE=InnoDB DEFAULT CHARSET=utf8;
--
-- Table structure for table `film_category`
--
CREATE TABLE film_category (
film_id SMALLINT UNSIGNED NOT NULL,
category_id TINYINT UNSIGNED NOT NULL,
last_update TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
PRIMARY KEY (film_id, category_id),
CONSTRAINT fk_film_category_film FOREIGN KEY (film_id) REFERENCES film (film_id) ON DELETE RESTRICT ON UPDATE CASCADE,
CONSTRAINT fk_film_category_category FOREIGN KEY (category_id) REFERENCES category (category_id) ON DELETE RESTRICT ON UPDATE CASCADE
)ENGINE=InnoDB DEFAULT CHARSET=utf8;
--
-- Table structure for table `film_text`
--
CREATE TABLE film_text (
film_id SMALLINT NOT NULL,
title VARCHAR(255) NOT NULL,
description TEXT,
PRIMARY KEY (film_id),
FULLTEXT KEY idx_title_description (title,description)
)ENGINE=MyISAM DEFAULT CHARSET=utf8;
-- After MySQL 5.6.10, InnoDB supports fulltext indexes
/*!50610 ALTER TABLE film_text engine=InnoDB */;
--
-- Triggers for loading film_text from film
--
DELIMITER ;;
CREATE TRIGGER `ins_film` AFTER INSERT ON `film` FOR EACH ROW BEGIN
INSERT INTO film_text (film_id, title, description)
VALUES (new.film_id, new.title, new.description);
END;;
CREATE TRIGGER `upd_film` AFTER UPDATE ON `film` FOR EACH ROW BEGIN
IF (old.title != new.title) OR (old.description != new.description) OR (old.film_id != new.film_id)
THEN
UPDATE film_text
SET title=new.title,
description=new.description,
film_id=new.film_id
WHERE film_id=old.film_id;
END IF;
END;;
CREATE TRIGGER `del_film` AFTER DELETE ON `film` FOR EACH ROW BEGIN
DELETE FROM film_text WHERE film_id = old.film_id;
END;;
DELIMITER ;
--
-- Table structure for table `inventory`
--
CREATE TABLE inventory (
inventory_id MEDIUMINT UNSIGNED NOT NULL AUTO_INCREMENT,
film_id SMALLINT UNSIGNED NOT NULL,
store_id TINYINT UNSIGNED NOT NULL,
last_update TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
PRIMARY KEY (inventory_id),
KEY idx_fk_film_id (film_id),
KEY idx_store_id_film_id (store_id,film_id),
CONSTRAINT fk_inventory_store FOREIGN KEY (store_id) REFERENCES store (store_id) ON DELETE RESTRICT ON UPDATE CASCADE,
CONSTRAINT fk_inventory_film FOREIGN KEY (film_id) REFERENCES film (film_id) ON DELETE RESTRICT ON UPDATE CASCADE
)ENGINE=InnoDB DEFAULT CHARSET=utf8;
--
-- Table structure for table `language`
--
CREATE TABLE language (
language_id TINYINT UNSIGNED NOT NULL AUTO_INCREMENT,
name CHAR(20) NOT NULL,
last_update TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
PRIMARY KEY (language_id)
)ENGINE=InnoDB DEFAULT CHARSET=utf8;
--
-- Table structure for table `payment`
--
CREATE TABLE payment (
payment_id SMALLINT UNSIGNED NOT NULL AUTO_INCREMENT,
customer_id SMALLINT UNSIGNED NOT NULL,
staff_id TINYINT UNSIGNED NOT NULL,
rental_id INT DEFAULT NULL,
amount DECIMAL(5,2) NOT NULL,
payment_date DATETIME NOT NULL,
last_update TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
PRIMARY KEY (payment_id),
KEY idx_fk_staff_id (staff_id),
KEY idx_fk_customer_id (customer_id),
CONSTRAINT fk_payment_rental FOREIGN KEY (rental_id) REFERENCES rental (rental_id) ON DELETE SET NULL ON UPDATE CASCADE,
CONSTRAINT fk_payment_customer FOREIGN KEY (customer_id) REFERENCES customer (customer_id) ON DELETE RESTRICT ON UPDATE CASCADE,
CONSTRAINT fk_payment_staff FOREIGN KEY (staff_id) REFERENCES staff (staff_id) ON DELETE RESTRICT ON UPDATE CASCADE
)ENGINE=InnoDB DEFAULT CHARSET=utf8;
--
-- Table structure for table `rental`
--
CREATE TABLE rental (
rental_id INT NOT NULL AUTO_INCREMENT,
rental_date DATETIME NOT NULL,
inventory_id MEDIUMINT UNSIGNED NOT NULL,
customer_id SMALLINT UNSIGNED NOT NULL,
return_date DATETIME DEFAULT NULL,
staff_id TINYINT UNSIGNED NOT NULL,
last_update TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
PRIMARY KEY (rental_id),
UNIQUE KEY (rental_date,inventory_id,customer_id),
KEY idx_fk_inventory_id (inventory_id),
KEY idx_fk_customer_id (customer_id),
KEY idx_fk_staff_id (staff_id),
CONSTRAINT fk_rental_staff FOREIGN KEY (staff_id) REFERENCES staff (staff_id) ON DELETE RESTRICT ON UPDATE CASCADE,
CONSTRAINT fk_rental_inventory FOREIGN KEY (inventory_id) REFERENCES inventory (inventory_id) ON DELETE RESTRICT ON UPDATE CASCADE,
CONSTRAINT fk_rental_customer FOREIGN KEY (customer_id) REFERENCES customer (customer_id) ON DELETE RESTRICT ON UPDATE CASCADE
)ENGINE=InnoDB DEFAULT CHARSET=utf8;
--
-- Table structure for table `staff`
--
CREATE TABLE staff (
staff_id TINYINT UNSIGNED NOT NULL AUTO_INCREMENT,
first_name VARCHAR(45) NOT NULL,
last_name VARCHAR(45) NOT NULL,
address_id SMALLINT UNSIGNED NOT NULL,
picture BLOB DEFAULT NULL,
email VARCHAR(50) DEFAULT NULL,
store_id TINYINT UNSIGNED NOT NULL,
active BOOLEAN NOT NULL DEFAULT TRUE,
username VARCHAR(16) NOT NULL,
password VARCHAR(40) BINARY DEFAULT NULL,
last_update TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
PRIMARY KEY (staff_id),
KEY idx_fk_store_id (store_id),
KEY idx_fk_address_id (address_id),
CONSTRAINT fk_staff_store FOREIGN KEY (store_id) REFERENCES store (store_id) ON DELETE RESTRICT ON UPDATE CASCADE,
CONSTRAINT fk_staff_address FOREIGN KEY (address_id) REFERENCES address (address_id) ON DELETE RESTRICT ON UPDATE CASCADE
)ENGINE=InnoDB DEFAULT CHARSET=utf8;
--
-- Table structure for table `store`
--
CREATE TABLE store (
store_id TINYINT UNSIGNED NOT NULL AUTO_INCREMENT,
manager_staff_id TINYINT UNSIGNED NOT NULL,
address_id SMALLINT UNSIGNED NOT NULL,
last_update TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
PRIMARY KEY (store_id),
UNIQUE KEY idx_unique_manager (manager_staff_id),
KEY idx_fk_address_id (address_id),
CONSTRAINT fk_store_staff FOREIGN KEY (manager_staff_id) REFERENCES staff (staff_id) ON DELETE RESTRICT ON UPDATE CASCADE,
CONSTRAINT fk_store_address FOREIGN KEY (address_id) REFERENCES address (address_id) ON DELETE RESTRICT ON UPDATE CASCADE
)ENGINE=InnoDB DEFAULT CHARSET=utf8;
--
-- View structure for view `customer_list`
--
CREATE VIEW customer_list
AS
SELECT cu.customer_id AS ID, CONCAT(cu.first_name, _utf8' ', cu.last_name) AS name, a.address AS address, a.postal_code AS `zip code`,
a.phone AS phone, city.city AS city, country.country AS country, IF(cu.active, _utf8'active',_utf8'') AS notes, cu.store_id AS SID
FROM customer AS cu JOIN address AS a ON cu.address_id = a.address_id JOIN city ON a.city_id = city.city_id
JOIN country ON city.country_id = country.country_id;
--
-- View structure for view `film_list`
--
CREATE VIEW film_list
AS
SELECT film.film_id AS FID, film.title AS title, film.description AS description, category.name AS category, film.rental_rate AS price,
film.length AS length, film.rating AS rating, GROUP_CONCAT(CONCAT(actor.first_name, _utf8' ', actor.last_name) SEPARATOR ', ') AS actors
FROM category LEFT JOIN film_category ON category.category_id = film_category.category_id LEFT JOIN film ON film_category.film_id = film.film_id
JOIN film_actor ON film.film_id = film_actor.film_id
JOIN actor ON film_actor.actor_id = actor.actor_id
GROUP BY film.film_id, category.name;
--
-- View structure for view `nicer_but_slower_film_list`
--
CREATE VIEW nicer_but_slower_film_list
AS
SELECT film.film_id AS FID, film.title AS title, film.description AS description, category.name AS category, film.rental_rate AS price,
film.length AS length, film.rating AS rating, GROUP_CONCAT(CONCAT(CONCAT(UCASE(SUBSTR(actor.first_name,1,1)),
LCASE(SUBSTR(actor.first_name,2,LENGTH(actor.first_name))),_utf8' ',CONCAT(UCASE(SUBSTR(actor.last_name,1,1)),
LCASE(SUBSTR(actor.last_name,2,LENGTH(actor.last_name)))))) SEPARATOR ', ') AS actors
FROM category LEFT JOIN film_category ON category.category_id = film_category.category_id LEFT JOIN film ON film_category.film_id = film.film_id
JOIN film_actor ON film.film_id = film_actor.film_id
JOIN actor ON film_actor.actor_id = actor.actor_id
GROUP BY film.film_id, category.name;
--
-- View structure for view `staff_list`
--
CREATE VIEW staff_list
AS
SELECT s.staff_id AS ID, CONCAT(s.first_name, _utf8' ', s.last_name) AS name, a.address AS address, a.postal_code AS `zip code`, a.phone AS phone,
city.city AS city, country.country AS country, s.store_id AS SID
FROM staff AS s JOIN address AS a ON s.address_id = a.address_id JOIN city ON a.city_id = city.city_id
JOIN country ON city.country_id = country.country_id;
--
-- View structure for view `sales_by_store`
--
CREATE VIEW sales_by_store
AS
SELECT
CONCAT(c.city, _utf8',', cy.country) AS store
, CONCAT(m.first_name, _utf8' ', m.last_name) AS manager
, SUM(p.amount) AS total_sales
FROM payment AS p
INNER JOIN rental AS r ON p.rental_id = r.rental_id
INNER JOIN inventory AS i ON r.inventory_id = i.inventory_id
INNER JOIN store AS s ON i.store_id = s.store_id
INNER JOIN address AS a ON s.address_id = a.address_id
INNER JOIN city AS c ON a.city_id = c.city_id
INNER JOIN country AS cy ON c.country_id = cy.country_id
INNER JOIN staff AS m ON s.manager_staff_id = m.staff_id
GROUP BY s.store_id
ORDER BY cy.country, c.city;
--
-- View structure for view `sales_by_film_category`
--
-- Note that total sales will add up to >100% because
-- some titles belong to more than 1 category
--
CREATE VIEW sales_by_film_category
AS
SELECT
c.name AS category
, SUM(p.amount) AS total_sales
FROM payment AS p
INNER JOIN rental AS r ON p.rental_id = r.rental_id
INNER JOIN inventory AS i ON r.inventory_id = i.inventory_id
INNER JOIN film AS f ON i.film_id = f.film_id
INNER JOIN film_category AS fc ON f.film_id = fc.film_id
INNER JOIN category AS c ON fc.category_id = c.category_id
GROUP BY c.name
ORDER BY total_sales DESC;
--
-- View structure for view `actor_info`
--
CREATE DEFINER=CURRENT_USER SQL SECURITY INVOKER VIEW actor_info
AS
SELECT
a.actor_id,
a.first_name,
a.last_name,
GROUP_CONCAT(DISTINCT CONCAT(c.name, ': ',
(SELECT GROUP_CONCAT(f.title ORDER BY f.title SEPARATOR ', ')
FROM sakila.film f
INNER JOIN sakila.film_category fc
ON f.film_id = fc.film_id
INNER JOIN sakila.film_actor fa
ON f.film_id = fa.film_id
WHERE fc.category_id = c.category_id
AND fa.actor_id = a.actor_id
)
)
ORDER BY c.name SEPARATOR '; ')
AS film_info
FROM sakila.actor a
LEFT JOIN sakila.film_actor fa
ON a.actor_id = fa.actor_id
LEFT JOIN sakila.film_category fc
ON fa.film_id = fc.film_id
LEFT JOIN sakila.category c
ON fc.category_id = c.category_id
GROUP BY a.actor_id, a.first_name, a.last_name;
--
-- Procedure structure for procedure `rewards_report`
--
DELIMITER //
CREATE PROCEDURE rewards_report (
IN min_monthly_purchases TINYINT UNSIGNED
, IN min_dollar_amount_purchased DECIMAL(10,2) UNSIGNED
, OUT count_rewardees INT
)
LANGUAGE SQL
NOT DETERMINISTIC
READS SQL DATA
SQL SECURITY DEFINER
COMMENT 'Provides a customizable report on best customers'
proc: BEGIN
DECLARE last_month_start DATE;
DECLARE last_month_end DATE;
/* Some sanity checks... */
IF min_monthly_purchases = 0 THEN
SELECT 'Minimum monthly purchases parameter must be > 0';
LEAVE proc;
END IF;
IF min_dollar_amount_purchased = 0.00 THEN
SELECT 'Minimum monthly dollar amount purchased parameter must be > $0.00';
LEAVE proc;
END IF;
/* Determine start and end time periods */
SET last_month_start = DATE_SUB(CURRENT_DATE(), INTERVAL 1 MONTH);
SET last_month_start = STR_TO_DATE(CONCAT(YEAR(last_month_start),'-',MONTH(last_month_start),'-01'),'%Y-%m-%d');
SET last_month_end = LAST_DAY(last_month_start);
/*
Create a temporary storage area for
Customer IDs.
*/
CREATE TEMPORARY TABLE tmpCustomer (customer_id SMALLINT UNSIGNED NOT NULL PRIMARY KEY);
/*
Find all customers meeting the
monthly purchase requirements
*/
INSERT INTO tmpCustomer (customer_id)
SELECT p.customer_id
FROM payment AS p
WHERE DATE(p.payment_date) BETWEEN last_month_start AND last_month_end
GROUP BY customer_id
HAVING SUM(p.amount) > min_dollar_amount_purchased
AND COUNT(customer_id) > min_monthly_purchases;
/* Populate OUT parameter with count of found customers */
SELECT COUNT(*) FROM tmpCustomer INTO count_rewardees;
/*
Output ALL customer information of matching rewardees.
Customize output as needed.
*/
SELECT c.*
FROM tmpCustomer AS t
INNER JOIN customer AS c ON t.customer_id = c.customer_id;
/* Clean up */
DROP TABLE tmpCustomer;
END //
DELIMITER ;
DELIMITER $$
CREATE FUNCTION get_customer_balance(p_customer_id INT, p_effective_date DATETIME) RETURNS DECIMAL(5,2)
DETERMINISTIC
READS SQL DATA
BEGIN
#OK, WE NEED TO CALCULATE THE CURRENT BALANCE GIVEN A CUSTOMER_ID AND A DATE
#THAT WE WANT THE BALANCE TO BE EFFECTIVE FOR. THE BALANCE IS:
# 1) RENTAL FEES FOR ALL PREVIOUS RENTALS
# 2) ONE DOLLAR FOR EVERY DAY THE PREVIOUS RENTALS ARE OVERDUE
# 3) IF A FILM IS MORE THAN RENTAL_DURATION * 2 OVERDUE, CHARGE THE REPLACEMENT_COST
# 4) SUBTRACT ALL PAYMENTS MADE BEFORE THE DATE SPECIFIED
DECLARE v_rentfees DECIMAL(5,2); #FEES PAID TO RENT THE VIDEOS INITIALLY
DECLARE v_overfees INTEGER; #LATE FEES FOR PRIOR RENTALS
DECLARE v_payments DECIMAL(5,2); #SUM OF PAYMENTS MADE PREVIOUSLY
SELECT IFNULL(SUM(film.rental_rate),0) INTO v_rentfees
FROM film, inventory, rental
WHERE film.film_id = inventory.film_id
AND inventory.inventory_id = rental.inventory_id
AND rental.rental_date <= p_effective_date
AND rental.customer_id = p_customer_id;
SELECT IFNULL(SUM(IF((TO_DAYS(rental.return_date) - TO_DAYS(rental.rental_date)) > film.rental_duration,
((TO_DAYS(rental.return_date) - TO_DAYS(rental.rental_date)) - film.rental_duration),0)),0) INTO v_overfees
FROM rental, inventory, film
WHERE film.film_id = inventory.film_id
AND inventory.inventory_id = rental.inventory_id
AND rental.rental_date <= p_effective_date
AND rental.customer_id = p_customer_id;
SELECT IFNULL(SUM(payment.amount),0) INTO v_payments
FROM payment
WHERE payment.payment_date <= p_effective_date
AND payment.customer_id = p_customer_id;
RETURN v_rentfees + v_overfees - v_payments;
END $$
DELIMITER ;
DELIMITER $$
CREATE PROCEDURE film_in_stock(IN p_film_id INT, IN p_store_id INT, OUT p_film_count INT)
READS SQL DATA
BEGIN
SELECT inventory_id
FROM inventory
WHERE film_id = p_film_id
AND store_id = p_store_id
AND inventory_in_stock(inventory_id);
SELECT FOUND_ROWS() INTO p_film_count;
END $$
DELIMITER ;
DELIMITER $$
CREATE PROCEDURE film_not_in_stock(IN p_film_id INT, IN p_store_id INT, OUT p_film_count INT)
READS SQL DATA
BEGIN
SELECT inventory_id
FROM inventory
WHERE film_id = p_film_id
AND store_id = p_store_id
AND NOT inventory_in_stock(inventory_id);
SELECT FOUND_ROWS() INTO p_film_count;
END $$
DELIMITER ;
DELIMITER $$
CREATE FUNCTION inventory_held_by_customer(p_inventory_id INT) RETURNS INT
READS SQL DATA
BEGIN
DECLARE v_customer_id INT;
DECLARE EXIT HANDLER FOR NOT FOUND RETURN NULL;
SELECT customer_id INTO v_customer_id
FROM rental
WHERE return_date IS NULL
AND inventory_id = p_inventory_id;
RETURN v_customer_id;
END $$
DELIMITER ;
DELIMITER $$
CREATE FUNCTION inventory_in_stock(p_inventory_id INT) RETURNS BOOLEAN
READS SQL DATA
BEGIN
DECLARE v_rentals INT;
DECLARE v_out INT;
#AN ITEM IS IN-STOCK IF THERE ARE EITHER NO ROWS IN THE rental TABLE
#FOR THE ITEM OR ALL ROWS HAVE return_date POPULATED
SELECT COUNT(*) INTO v_rentals
FROM rental
WHERE inventory_id = p_inventory_id;
IF v_rentals = 0 THEN
RETURN TRUE;
END IF;
SELECT COUNT(rental_id) INTO v_out
FROM inventory LEFT JOIN rental USING(inventory_id)
WHERE inventory.inventory_id = p_inventory_id
AND rental.return_date IS NULL;
IF v_out > 0 THEN
RETURN FALSE;
ELSE
RETURN TRUE;
END IF;
END $$
DELIMITER ;
SET SQL_MODE=@OLD_SQL_MODE;
SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS;
SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS;
-- Sample employee database
-- See changelog table for details
-- Copyright (C) 2007,2008, MySQL AB
--
-- Original data created by Fusheng Wang and Carlo Zaniolo
-- http://www.cs.aau.dk/TimeCenter/software.htm
-- http://www.cs.aau.dk/TimeCenter/Data/employeeTemporalDataSet.zip
--
-- Current schema by Giuseppe Maxia
-- Data conversion from XML to relational by Patrick Crews
--
-- This work is licensed under the
-- Creative Commons Attribution-Share Alike 3.0 Unported License.
-- To view a copy of this license, visit
-- http://creativecommons.org/licenses/by-sa/3.0/ or send a letter to
-- Creative Commons, 171 Second Street, Suite 300, San Francisco,
-- California, 94105, USA.
--
-- DISCLAIMER
-- To the best of our knowledge, this data is fabricated, and
-- it does not correspond to real people.
-- Any similarity to existing people is purely coincidental.
--
DROP DATABASE IF EXISTS employees;
CREATE DATABASE IF NOT EXISTS employees;
USE employees;
SELECT 'CREATING DATABASE STRUCTURE' as 'INFO';
DROP TABLE IF EXISTS dept_emp,
dept_manager,
titles,
salaries,
employees,
departments;
/*!50503 set default_storage_engine = InnoDB */;
/*!50503 select CONCAT('storage engine: ', @@default_storage_engine) as INFO */;
CREATE TABLE employees (
emp_no INT NOT NULL,
birth_date DATE NOT NULL,
first_name VARCHAR(14) NOT NULL,
last_name VARCHAR(16) NOT NULL,
gender ENUM ('M','F') NOT NULL,
hire_date DATE NOT NULL,
PRIMARY KEY (emp_no)
);
CREATE TABLE departments (
dept_no CHAR(4) NOT NULL,
dept_name VARCHAR(40) NOT NULL,
PRIMARY KEY (dept_no),
UNIQUE KEY (dept_name)
);
CREATE TABLE dept_manager (
emp_no INT NOT NULL,
dept_no CHAR(4) NOT NULL,
from_date DATE NOT NULL,
to_date DATE NOT NULL,
FOREIGN KEY (emp_no) REFERENCES employees (emp_no) ON DELETE CASCADE,
FOREIGN KEY (dept_no) REFERENCES departments (dept_no) ON DELETE CASCADE,
PRIMARY KEY (emp_no,dept_no)
);
CREATE TABLE dept_emp (
emp_no INT NOT NULL,
dept_no CHAR(4) NOT NULL,
from_date DATE NOT NULL,
to_date DATE NOT NULL,
FOREIGN KEY (emp_no) REFERENCES employees (emp_no) ON DELETE CASCADE,
FOREIGN KEY (dept_no) REFERENCES departments (dept_no) ON DELETE CASCADE,
PRIMARY KEY (emp_no,dept_no)
);
CREATE TABLE titles (
emp_no INT NOT NULL,
title VARCHAR(50) NOT NULL,
from_date DATE NOT NULL,
to_date DATE,
# FOREIGN KEY (emp_no) REFERENCES employees (emp_no) ON DELETE CASCADE,
PRIMARY KEY (emp_no,title, from_date)
);
/*!50130
ALTER TABLE titles
partition by range (to_days(from_date))
(
partition p01 values less than (to_days('1985-12-31')),
partition p02 values less than (to_days('1986-12-31')),
partition p03 values less than (to_days('1987-12-31')),
partition p04 values less than (to_days('1988-12-31')),
partition p05 values less than (to_days('1989-12-31')),
partition p06 values less than (to_days('1990-12-31')),
partition p07 values less than (to_days('1991-12-31')),
partition p08 values less than (to_days('1992-12-31')),
partition p09 values less than (to_days('1993-12-31')),
partition p10 values less than (to_days('1994-12-31')),
partition p11 values less than (to_days('1995-12-31')),
partition p12 values less than (to_days('1996-12-31')),
partition p13 values less than (to_days('1997-12-31')),
partition p14 values less than (to_days('1998-12-31')),
partition p15 values less than (to_days('1999-12-31')),
partition p16 values less than (to_days('2000-12-31')),
partition p17 values less than (to_days('2001-12-31')),
partition p18 values less than (to_days('2002-12-31')),
partition p19 values less than (to_days('3000-12-31'))
) */;
CREATE TABLE salaries (
emp_no INT NOT NULL,
salary INT NOT NULL,
from_date DATE NOT NULL,
to_date DATE NOT NULL,
# FOREIGN KEY (emp_no) REFERENCES employees (emp_no) ON DELETE CASCADE,
PRIMARY KEY (emp_no, from_date)
);
/*!50130
ALTER TABLE salaries
partition by range (to_days(from_date))
(
partition p01 values less than (to_days('1985-01-01')),
partition p02 values less than (to_days('1986-01-01')),
partition p03 values less than (to_days('1987-01-01')),
partition p04 values less than (to_days('1988-01-01')),
partition p05 values less than (to_days('1989-01-01')),
partition p06 values less than (to_days('1990-01-01')),
partition p07 values less than (to_days('1991-01-01')),
partition p08 values less than (to_days('1992-01-01')),
partition p09 values less than (to_days('1993-01-01')),
partition p10 values less than (to_days('1994-01-01')),
partition p11 values less than (to_days('1995-01-01')),
partition p12 values less than (to_days('1996-01-01')),
partition p13 values less than (to_days('1997-01-01')),
partition p14 values less than (to_days('1998-01-01')),
partition p15 values less than (to_days('1999-01-01')),
partition p16 values less than (to_days('2000-01-01')),
partition p17 values less than (to_days('2001-01-01')),
partition p18 values less than (to_days('2001-02-01')),
partition p19 values less than (to_days('2001-03-01')),
partition p20 values less than (to_days('2001-04-01')),
partition p21 values less than (to_days('2001-05-01')),
partition p22 values less than (to_days('2001-06-01')),
partition p23 values less than (to_days('2001-07-01')),
partition p24 values less than (to_days('2001-08-01')),
partition p25 values less than (to_days('2001-09-01')),
partition p26 values less than (to_days('2001-10-01')),
partition p27 values less than (to_days('2001-11-01')),
partition p28 values less than (to_days('2001-12-01')),
partition p29 values less than (to_days('2002-01-01')),
partition p30 values less than (to_days('2002-02-01')),
partition p31 values less than (to_days('2002-03-01')),
partition p32 values less than (to_days('2002-04-01')),
partition p33 values less than (to_days('2002-05-01')),
partition p34 values less than (to_days('2002-06-01')),
partition p35 values less than (to_days('2002-07-01')),
partition p36 values less than (to_days('2002-08-01')),
partition p37 values less than (to_days('2002-09-01')),
partition p38 values less than (to_days('2002-10-01')),
partition p39 values less than (to_days('2002-11-01')),
partition p40 values less than (to_days('2002-12-01')),
partition p41 values less than (to_days('3000-01-01'))
)
*/;
CREATE OR REPLACE VIEW dept_emp_latest_date AS
SELECT emp_no, MAX(from_date) AS from_date, MAX(to_date) AS to_date
FROM dept_emp
GROUP BY emp_no;
# shows only the current department for each employee
CREATE OR REPLACE VIEW current_dept_emp AS
SELECT l.emp_no, dept_no, l.from_date, l.to_date
FROM dept_emp d
INNER JOIN dept_emp_latest_date l
ON d.emp_no=l.emp_no AND d.from_date=l.from_date AND l.to_date = d.to_date;
flush /*!50503 binary */ logs;
SELECT 'LOADING departments' as 'INFO';
source load_departments.dump ;
SELECT 'LOADING employees' as 'INFO';
source load_employees.dump ;
SELECT 'LOADING dept_emp' as 'INFO';
source load_dept_emp.dump ;
SELECT 'LOADING dept_manager' as 'INFO';
source load_dept_manager.dump ;
SELECT 'LOADING titles' as 'INFO';
source load_titles.dump ;
SELECT 'LOADING salaries' as 'INFO';
source load_salaries1.dump ;
source load_salaries2.dump ;
source load_salaries3.dump ;
source show_elapsed.sql ;
use employees;
delimiter //
drop function if exists emp_dept_id //
drop function if exists emp_dept_name //
drop function if exists emp_name //
drop function if exists current_manager //
drop procedure if exists show_departments //
--
-- returns the department id of a given employee
--
create function emp_dept_id( employee_id int )
returns char(4)
reads sql data
begin
declare max_date date;
set max_date = (
select
max(from_date)
from
dept_emp
where
emp_no = employee_id
);
set @max_date=max_date;
return (
select
dept_no
from
dept_emp
where
emp_no = employee_id
and
from_date = max_date
limit 1
);
end //
--
-- returns the department name of a given employee
--
create function emp_dept_name( employee_id int )
returns varchar(40)
reads sql data
begin
return (
select
dept_name
from
departments
where
dept_no = emp_dept_id(employee_id)
);
end//
--
-- returns the employee name of a given employee id
--
create function emp_name (employee_id int)
returns varchar(32)
reads SQL data
begin
return (
select
concat(first_name, ' ', last_name) as name
from
employees
where
emp_no = employee_id
);
end//
--
-- returns the manager of a department
-- choosing the most recent one
-- from the manager list
--
create function current_manager( dept_id char(4) )
returns varchar(32)
reads sql data
begin
declare max_date date;
set max_date = (
select
max(from_date)
from
dept_manager
where
dept_no = dept_id
);
set @max_date=max_date;
return (
select
emp_name(emp_no)
from
dept_manager
where
dept_no = dept_id
and
from_date = max_date
limit 1
);
end //
delimiter ;
--
-- selects the employee records with the
-- latest department
--
CREATE OR REPLACE VIEW v_full_employees
AS
SELECT
emp_no,
first_name , last_name ,
birth_date , gender,
hire_date,
emp_dept_name(emp_no) as department
from
employees;
--
-- selects the department list with manager names
--
CREATE OR REPLACE VIEW v_full_departments
AS
SELECT
dept_no, dept_name, current_manager(dept_no) as manager
FROM
departments;
delimiter //
--
-- shows the departments with the number of employees
-- per department
--
create procedure show_departments()
modifies sql data
begin
DROP TABLE IF EXISTS department_max_date;
DROP TABLE IF EXISTS department_people;
CREATE TEMPORARY TABLE department_max_date
(
emp_no int not null primary key,
dept_from_date date not null,
dept_to_date date not null, # bug#320513
KEY (dept_from_date, dept_to_date)
);
INSERT INTO department_max_date
SELECT
emp_no, max(from_date), max(to_date)
FROM
dept_emp
GROUP BY
emp_no;
CREATE TEMPORARY TABLE department_people
(
emp_no int not null,
dept_no char(4) not null,
primary key (emp_no, dept_no)
);
insert into department_people
select dmd.emp_no, dept_no
from
department_max_date dmd
inner join dept_emp de
on dmd.dept_from_date=de.from_date
and dmd.dept_to_date=de.to_date
and dmd.emp_no=de.emp_no;
SELECT
dept_no,dept_name,manager, count(*)
from v_full_departments
inner join department_people using (dept_no)
group by dept_no;
# with rollup;
DROP TABLE department_max_date;
DROP TABLE department_people;
end //
drop function if exists employees_usage //
drop procedure if exists employees_help //
CREATE FUNCTION employees_usage ()
RETURNS TEXT
DETERMINISTIC
BEGIN
RETURN
'
== USAGE ==
====================
PROCEDURE show_departments()
shows the departments with the manager and
number of employees per department
FUNCTION current_manager (dept_id)
Shows who is the manager of a given departmennt
FUNCTION emp_name (emp_id)
Shows name and surname of a given employee
FUNCTION emp_dept_id (emp_id)
Shows the current department of given employee
';
END //
create procedure employees_help()
deterministic
begin
select employees_usage() as info;
end//
delimiter ;
-- Sample employee database
-- See changelog table for details
-- Copyright (C) 2007,2008, MySQL AB
--
-- Original data created by Fusheng Wang and Carlo Zaniolo
-- http://www.cs.aau.dk/TimeCenter/software.htm
-- http://www.cs.aau.dk/TimeCenter/Data/employeeTemporalDataSet.zip
--
-- Current schema by Giuseppe Maxia
-- Data conversion from XML to relational by Patrick Crews
--
-- This work is licensed under the
-- Creative Commons Attribution-Share Alike 3.0 Unported License.
-- To view a copy of this license, visit
-- http://creativecommons.org/licenses/by-sa/3.0/ or send a letter to
-- Creative Commons, 171 Second Street, Suite 300, San Francisco,
-- California, 94105, USA.
--
-- DISCLAIMER
-- To the best of our knowledge, this data is fabricated, and
-- it does not correspond to real people.
-- Any similarity to existing people is purely coincidental.
--
USE employees;
SELECT 'TESTING INSTALLATION' as 'INFO';
DROP TABLE IF EXISTS expected_values, found_values;
CREATE TABLE expected_values (
table_name varchar(30) not null primary key,
recs int not null,
crc_sha varchar(100) not null,
crc_md5 varchar(100) not null
);
CREATE TABLE found_values LIKE expected_values;
INSERT INTO `expected_values` VALUES
('employees', 300024,'4d4aa689914d8fd41db7e45c2168e7dcb9697359',
'4ec56ab5ba37218d187cf6ab09ce1aa1'),
('departments', 9,'4b315afa0e35ca6649df897b958345bcb3d2b764',
'd1af5e170d2d1591d776d5638d71fc5f'),
('dept_manager', 24,'9687a7d6f93ca8847388a42a6d8d93982a841c6c',
'8720e2f0853ac9096b689c14664f847e'),
('dept_emp', 331603, 'd95ab9fe07df0865f592574b3b33b9c741d9fd1b',
'ccf6fe516f990bdaa49713fc478701b7'),
('titles', 443308,'d12d5f746b88f07e69b9e36675b6067abb01b60e',
'bfa016c472df68e70a03facafa1bc0a8'),
('salaries', 2844047,'b5a1785c27d75e33a4173aaa22ccf41ebd7d4a9f',
'fd220654e95aea1b169624ffe3fca934');
SELECT table_name, recs AS expected_records, crc_md5 AS expected_crc FROM expected_values;
DROP TABLE IF EXISTS tchecksum;
CREATE TABLE tchecksum (chk char(100));
SET @crc= '';
INSERT INTO tchecksum
SELECT @crc := MD5(CONCAT_WS('#',@crc,
emp_no,birth_date,first_name,last_name,gender,hire_date))
FROM employees ORDER BY emp_no;
INSERT INTO found_values VALUES ('employees', (SELECT COUNT(*) FROM employees), @crc,@crc);
SET @crc = '';
INSERT INTO tchecksum
SELECT @crc := MD5(CONCAT_WS('#',@crc, dept_no,dept_name))
FROM departments ORDER BY dept_no;
INSERT INTO found_values values ('departments', (SELECT COUNT(*) FROM departments), @crc,@crc);
SET @crc = '';
INSERT INTO tchecksum
SELECT @crc := MD5(CONCAT_WS('#',@crc, dept_no,emp_no, from_date,to_date))
FROM dept_manager ORDER BY dept_no,emp_no;
INSERT INTO found_values values ('dept_manager', (SELECT COUNT(*) FROM dept_manager), @crc,@crc);
SET @crc = '';
INSERT INTO tchecksum
SELECT @crc := MD5(CONCAT_WS('#',@crc, dept_no,emp_no, from_date,to_date))
FROM dept_emp ORDER BY dept_no,emp_no;
INSERT INTO found_values values ('dept_emp', (SELECT COUNT(*) FROM dept_emp), @crc,@crc);
SET @crc = '';
INSERT INTO tchecksum
SELECT @crc := MD5(CONCAT_WS('#',@crc, emp_no, title, from_date,to_date))
FROM titles order by emp_no,title,from_date;
INSERT INTO found_values values ('titles', (SELECT COUNT(*) FROM titles), @crc,@crc);
SET @crc = '';
INSERT INTO tchecksum
SELECT @crc := MD5(CONCAT_WS('#',@crc, emp_no, salary, from_date,to_date))
FROM salaries order by emp_no,from_date,to_date;
INSERT INTO found_values values ('salaries', (SELECT COUNT(*) FROM salaries), @crc,@crc);
DROP TABLE tchecksum;
SELECT table_name, recs as 'found_records ', crc_md5 as found_crc from found_values;
SELECT
e.table_name,
IF(e.recs=f.recs,'OK', 'not ok') AS records_match,
IF(e.crc_md5=f.crc_md5,'ok','not ok') AS crc_match
from
expected_values e INNER JOIN found_values f USING (table_name);
set @crc_fail=(select count(*) from expected_values e inner join found_values f on (e.table_name=f.table_name) where f.crc_md5 != e.crc_md5);
set @count_fail=(select count(*) from expected_values e inner join found_values f on (e.table_name=f.table_name) where f.recs != e.recs);
select timediff(
now(),
(select create_time from information_schema.tables where table_schema='employees' and table_name='expected_values')
) as computation_time;
DROP TABLE expected_values,found_values;
select 'CRC' as summary, if(@crc_fail = 0, "OK", "FAIL" ) as 'result'
union all
select 'count', if(@count_fail = 0, "OK", "FAIL" );
--==================================================================================
psql -c "CREATE USER admin WITH PASSWORD 'test101';"
sudo -u postgres bash -c "psql -c \"CREATE USER vagrant WITH PASSWORD 'vagrant';\""
su -c "psql -c \"CREATE ROLE my_user WITH LOGIN PASSWORD 'my_password' \"" postgres
docker exec -it pgmaster su - postgres -c psql
--==================================================================================
# usage:
# mysql -u root -p < st_insert_user.sql
DELIMITER $$
USE desktop_cloud$$
DROP PROCEDURE IF EXISTS sp_insert_into_radcheck$$
CREATE PROCEDURE sp_insert_into_radcheck(
IN p_username VARCHAR(64),
IN p_attribute VARCHAR(64),
IN p_op VARCHAR(2),
IN p_value VARCHAR(256)
)
BEGIN
INSERT INTO radcheck
(
userName,
attribute,
op,
value
)
VALUES
(
p_username,
p_attribute,
p_op,
p_value
);
END$$
DELIMITER ;
--==================================================================================
# usage:
# mysql -u root -p < st_insert_user.sql
DELIMITER $$
USE desktop_cloud$$
DROP PROCEDURE IF EXISTS sp_insert_into_activeusers$$
CREATE PROCEDURE sp_insert_into_activeusers
(
IN p_username VARCHAR(16),
IN p_password VARCHAR(16)
)
BEGIN
INSERT INTO activeusers
(
username,
password
)
VALUES
(
p_username,
p_password
);
END$$
DELIMITER ;
--==================================================================================
SELECT task,date,priority,status,first_name,last_name
FROM tasks t
LEFT JOIN people p ON t.name=p.name
WHERE priority=2
ORDER BY date
LIMIT 1
join -t':' -1 5 -2 1 tasks people \
| awk -F':' '{print $2":"$3":"$4":"$5":"$6" "$7}' \
| grep ':2:' \
| sort -t ':' -k2 \
| head -1
create database userdb;
-- Drop table emp if exists
DROP TABLE emp;
-- Drop table emp_add if exists
DROP TABLE emp_add
-- Drop table emp_contact if exists
DROP TABLE emp_contact;
-- Creates table emp
CREATE TABLE emp (
id int,
nameString,
deg String,
salary Double,
dept String
);
-- Creates table BSEFMCG
CREATE TABLE emp_add (
id int,
hno,String,
street string,
city String
);
-- Creates table empH to contain manipulated exported data
CREATE TABLE emp_contact (
id int,
phon varchar,
email string
);
-- Creates table BSEFMCGH to contain manipulated exported data
<!-------------导入demo--------------------------------->
导入语法
$ sqoop import (generic-args) (import-args)
$ sqoop-import (generic-args) (import-args)
下面的命令用于从MySQL数据库服务器中的emp表导入HDFS。
$ sqoop import \
--connect jdbc:mysql://localhost/userdb \
--username root \
--table emp --m 1
---------------------------------------------------------
在导入表数据到HDFS使用Sqoop导入工具,我们可以指定目标目录。
以下是指定目标目录选项的Sqoop导入命令的语法。
--target-dir <new or exist directory in HDFS>
下面的命令是用来导入emp_add表数据到'/queryresult'目录。
$ sqoop import \
--connect jdbc:mysql://localhost/userdb \
--username root \
--table emp_add \
--m 1 \ <!--the query can be executed once and imported serially, by specifying a single map task with -m 1:->
--target-dir /queryresult
"where"子句的一个子集。它执行在各自的数据库服务器相应的SQL查询,并将结果存储在HDFS的目标目录。
下面的命令用来导入emp_add表数据的子集。子集查询检索员工ID和地址,居住城市为:Secunderabad
$ sqoop import \
--connect jdbc:mysql://localhost/userdb \
--username root \
--table emp_add \
--m 1 \
--where “city =’sec-bad’” \
--target-dir /wherequery
------------------------------------------------
增量导入
增量导入是仅导入新添加的表中的行的技术。它需要添加‘incremental’, ‘check-column’, 和 ‘last-value’选项来执行增量导入。
下面的语法用于Sqoop导入命令增量选项。
--incremental <mode>
--check-column <column name>
--last value <last check column value>
让我们假设新添加的数据转换成emp表如下:
1206, satish p, grp des, 20000, GR
$ sqoop import \
--connect jdbc:mysql://localhost/userdb \
--username root \
--table emp \
--m 1 \
--incremental append \
--check-column id \
-last value 1205
----------------------------------------------------------------------------
以下语法用于导入所有表。
$ sqoop import-all-tables (generic-args) (import-args)
$ sqoop-import-all-tables (generic-args) (import-args)
$ sqoop import \
--connect jdbc:mysql://localhost/userdb \
--username root
-------------------------------------------------
以下是export命令语法。
$ sqoop export (generic-args) (export-args)
$ sqoop-export (generic-args) (export-args)
下面的命令是用来导出表数据(这是在HDFS emp_data文件)到MySQL数据库服务器DB数据库的employee表中。
$ sqoop export \
--connect jdbc:mysql://localhost/db \
--username root \
--table employee \
--export-dir /emp/emp_data
-----------------------------------------------------------------------------
Sqoop作业创建并保存导入和导出命令。它指定参数来识别并调用已保存的工作。这种重新调用或重新执行用在增量导入,可以从RDBMS表到HDFS导入更新的行。
语法
以下是创建Sqoop作业的语法。
$ sqoop job (generic-args) (job-args)
[-- [subtool-name] (subtool-args)]
$ sqoop-job (generic-args) (job-args)
[-- [subtool-name] (subtool-args)
创建作业(--create)
在这里,我们创建一个名为myjob,这可以从RDBMS表的数据导入到HDFS作业。
下面的命令用于创建一个从DB数据库的employee表导入到HDFS文件的作业。
$ sqoop job --create myjob \
--import \
--connect jdbc:mysql://localhost/db \
--username root \
--table employee --m 1
验证作业 (--list)
‘--list’ 参数是用来验证保存的作业。下面的命令用来验证保存Sqoop作业的列表。
$ sqoop job --list
检查作业(--show)
‘--show’ 参数用于检查或验证特定的工作,及其详细信息。以下命令和样本输出用来验证一个名为myjob的作业。
$ sqoop job --show myjob
执行作业 (--exec)
‘--exec’ 选项用于执行保存的作业。下面的命令用于执行保存的作业称为myjob。
$ sqoop job --exec myjob
--------------------------------------------------------------------------
从面向对象应用程序的观点来看,每一个数据库表具有包含“setter”和“getter”的方法来初始化DAO类对象。
此工具(-codegen)自动生成DAO类。
它产生的DAO类在Java中是基于表的模式结构。在Java定义实例作为导入过程的一部分。
这个工具的主要用途是检查是否遗漏了Java代码。如果是这样,这将创建Java字段之间的缺省定界符的新版本。
语法
以下是Sqoop代码生成命令的语法。
$ sqoop codegen (generic-args) (codegen-args)
$ sqoop-codegen (generic-args) (codegen-args)
让我们以USERDB数据库中的表emp来生成Java代码为例。
下面的命令用来执行该给定的例子。
$ sqoop codegen \
--connect jdbc:mysql://localhost/userdb \
--username root \
--table emp
---------------------------------------------------------------------------
Sqoop “eval”工具。它允许用户执行用户定义的查询,对各自的数据库服务器和预览结果在控制台中。
这样,用户可以期望得到的表数据来导入。
使用eval我们可以评估任何类型的SQL查询可以是DDL或DML语句
以下语法用于Sqoop eval命令。
$ sqoop eval (generic-args) (eval-args)
$ sqoop-eval (generic-args) (eval-args)
选择查询评估计算
使用eval工具,我们可以评估计算任何类型的SQL查询。让我们在选择DB数据库的employee表行限制的一个例子。下面的命令用来评估计算使用SQL查询给定的例子。
$ sqoop eval \
--connect jdbc:mysql://localhost/db \
--username root \
--query “SELECT * FROM employee LIMIT 3”
Sqoop的eval工具可以适用于两个模拟和定义的SQL语句。这意味着,我们可以使用eval的INSERT语句了。下面的命令用于在DB数据库的员工(employee) 表中插入新行。
$ sqoop eval \
--connect jdbc:mysql://localhost/db \
--username root \
-e “INSERT INTO employee VALUES(1207,‘Raju’,‘UI dev’,15000,‘TP’)”
------------------------------------------------------------------------------------------
语法
以下语法用于Sqoop列表数据库命令。
$ sqoop list-databases (generic-args) (list-databases-args)
$ sqoop-list-databases (generic-args) (list-databases-args)
下面的命令用于列出MySQL数据库服务器的所有数据库。
$ sqoop list-databases \
--connect jdbc:mysql://localhost/ \
--username root
--------------------------------------------------------------------
Sqoop的list-tables工具解析并执行针对特定数据库的“SHOW TABLES”查询。此后,它列出了在数据库中存在的表。
语法
以下是使用 Sqoop 的 list-tables 命令的语法。
$ sqoop list-tables (generic-args) (list-tables-args)
$ sqoop-list-tables (generic-args) (list-tables-args)
示例查询
下面的命令用于列出MySQL数据库服务器的USERDB数据库下的所有的表。
$ sqoop list-tables \
--connect jdbc:mysql://localhost/userdb \
--username root
--================================================
SELECT a.attname,s.stadistinct,s.stanullfrac,s.stawidth
FROM pg_statistic s, pg_attribute a
WHERE
starelid=16395 AND
starelid=attrelid AND
s.staattnum=a.attnum AND
a.attname IN ('x1', 'x2', 'x3', 'x4');
--================================================
SET max_parallel_workers = 32; -- Устанавливаем перед стартом инстанса
SET max_parallel_workers_per_gather = 16;
SET parallel_setup_cost = 0.001;
SET parallel_tuple_cost = 0.0001;
SET min_parallel_table_scan_size = 0;
--================================================
CREATE TABLE t1 (x1 integer, x2 integer,x3 integer,x4 integer);
CREATE TABLE t2 (x1 integer, x2 integer,x3 integer,x4 integer);
INSERT INTO t1 (x1,x2,x3,x4) SELECT value%2,value%10,value%20,value%100
FROM generate_series(1,1000) AS value;
INSERT INTO t2 (x1,x2,x3,x4) SELECT value%2,value%10,value%20,value%100
FROM generate_series(1,1000) AS value;
CREATE INDEX t1_idx ON t1 (x1,x2,x3,x4);
CREATE INDEX t2_idx ON t2 (x1,x2,x3,x4);
VACUUM ANALYZE t1,t2;
EXPLAIN (ANALYZE, COSTS ON, BUFFERS OFF)
SELECT * FROM t1 WHERE x1=1 AND x2=1 AND x3=1 AND x4=1;
EXPLAIN (ANALYZE, COSTS ON, BUFFERS OFF)
SELECT * FROM t1 JOIN t2
ON (t1.x1=t2.x1 AND t1.x2=t2.x2 AND t1.x3=t2.x3 AND t1.x4=t2.x4);
--================================================
CREATE TEMP TABLE t1 (x numeric PRIMARY KEY, payload text);
CREATE TEMP TABLE t2 (x numeric, y numeric PRIMARY KEY);
CREATE TEMP TABLE t3 (x numeric, payload text);
INSERT INTO t1 (x, payload)
(SELECT gs, 'long line of text'
FROM generate_series(1,1E5) AS gs);
INSERT INTO t2 (x,y)
(SELECT gs%10, gs FROM generate_series(1,1E6) AS gs);
INSERT INTO t3 (x, payload)
(SELECT -(gs%10)+2, 'long line of text'
FROM generate_series(1,1E5) AS gs);
VACUUM ANALYZE t1,t2,t3;
EXPLAIN (COSTS OFF, ANALYZE, BUFFERS OFF, TIMING OFF)
SELECT * FROM t3 WHERE x IN (
SELECT y FROM t2 WHERE x IN (
SELECT x FROM t1)
);
--================================================
CREATE TABLE t1 (x numeric PRIMARY KEY, payload text);
CREATE TABLE t2 (x numeric, y numeric PRIMARY KEY);
CREATE TABLE t3 (x numeric, payload text);
INSERT INTO t1 (x, payload)
(SELECT value, 'long line of text'
FROM generate_series(1,100000));
INSERT INTO t2 (x,y)
(SELECT value % 10, value FROM generate_series(1,1000000));
INSERT INTO t3 (x, payload)
(SELECT -(value % 10) + 2, 'long line of text'
FROM generate_series(1,100000));
ALTER DATABASE SCOPED CONFIGURATION SET MAXDOP = 1;
SELECT * FROM t3 WHERE x IN (
SELECT y FROM t2 WHERE x IN (
SELECT t1.x FROM t1)
)
OPTION (USE HINT('DISABLE_BATCH_MODE_ADAPTIVE_JOINS'), LOOP JOIN);
--================================================
CREATE TABLE t1 (x1 integer, x2 text);
CREATE TABLE t2 (y1 integer, y2 text);
CREATE INDEX t1_idx0 ON t1 (x1);
CREATE INDEX t2_idx0 ON t2 (y1);
INSERT INTO t1 (x1,x2)
SELECT value % 100, 'long line of text number 1'
FROM GENERATE_SERIES(1, 10000) AS value;
INSERT INTO t2 (y1,y2)
SELECT value % 10000, 'long line of text number 2'
FROM GENERATE_SERIES(1, 30000) AS value;
VACUUM ANALYZE t1, t2;
EXPLAIN (COSTS OFF, ANALYZE, BUFFERS OFF, TIMING OFF)
SELECT * FROM
(SELECT x1 FROM t1 GROUP BY x1) AS q1(x1)
JOIN
(SELECT y1 FROM t2 GROUP BY y1) AS q2(y1)
ON q2.y1 = q1.x1;
SET enable_material = f;
SET enable_hashjoin = f;
SET enable_mergejoin = f;
EXPLAIN (COSTS OFF, ANALYZE, BUFFERS OFF, TIMING OFF)
SELECT * FROM
(SELECT x1 FROM t1 GROUP BY x1) AS q1(x1),
LATERAL (SELECT y1 FROM t2 WHERE t2.y1=q1.x1 GROUP BY y1) AS q2(y1);
--================================================
--================================================
--================================================
--================================================
--================================================
--================================================
--================================================
--================================================
--================================================
--================================================
--================================================
--================================================
--================================================
--================================================
--================================================
--================================================
--===========================================================================
DROP TABLE IF EXISTS public.fk1010_1 CASCADE;
CREATE TABLE public.fk1010_1
(
id integer NOT NULL,
i1 integer NOT NULL,
i2 integer NOT NULL,
CONSTRAINT fk1010_1_pk PRIMARY KEY (id),
CONSTRAINT fk1010_1_unique_id_i1 UNIQUE (id, i1),
CONSTRAINT fk1010_1_unique_id_i2 UNIQUE (id, i2),
CONSTRAINT fk1010_1_unique_i2 UNIQUE (i2)
);
DROP TABLE IF EXISTS public.fk1010_1_fk;
CREATE TABLE public.fk1010_1_fk
(
id integer GENERATED ALWAYS AS IDENTITY NOT NULL,
fk1010_1_id integer NOT NULL,
fk1010_1_i1 integer NOT NULL,
fk1010_1_i2 integer NOT NULL,
CONSTRAINT fk1010_1_fk_pk PRIMARY KEY (id),
CONSTRAINT fk1010_1_fk_fk1010_1_id FOREIGN KEY (fk1010_1_id) REFERENCES public.fk1010_1(id),
CONSTRAINT fk1010_1_fk_fk1010_1_id_i1 FOREIGN KEY (fk1010_1_id, fk1010_1_i1) REFERENCES public.fk1010_1(id, i1),
CONSTRAINT fk1010_1_fk_fk1010_1_id_i2 FOREIGN KEY (fk1010_1_id, fk1010_1_i2) REFERENCES public.fk1010_1(id, i2),
CONSTRAINT fk1010_1_fk_fk1010_1_id_i2_copy FOREIGN KEY (fk1010_1_id, fk1010_1_i2) REFERENCES public.fk1010_1(id, i2),
CONSTRAINT fk1010_1_fk_fk1010_1_i2 FOREIGN KEY (fk1010_1_i2) REFERENCES public.fk1010_1(i2)
);
-- FK у которых совпадают атрибуты
-- fk1010_1_fk_fk1010_1_id_i2 и fk1010_1_fk_fk1010_1_id_i2_copy
-- FK имеют пересечения по атрибутам
-- fk1010_1_fk_fk1010_1_id и fk1010_1_fk_fk1010_1_id_i1
-- fk1010_1_fk_fk1010_1_id и fk1010_1_fk_fk1010_1_id_i2
-- fk1010_1_fk_fk1010_1_id и fk1010_1_fk_fk1010_1_id_i2_copy
-- fk1010_1_fk_fk1010_1_id_i1 и fk1010_1_fk_fk1010_1_id_i2
-- fk1010_1_fk_fk1010_1_id_i1 и fk1010_1_fk_fk1010_1_id_i2_copy
-- fk1010_1_fk_fk1010_1_i2 и fk1010_1_fk_fk1010_1_id_i2
-- fk1010_1_fk_fk1010_1_i2 и fk1010_1_fk_fk1010_1_id_i2_copy
--===========================================================================
WITH
-- объединяем ограничения FK с атрибутами, которые в них задействованы
fk_with_attributes AS (
SELECT
c.conname as fk_name,
c.conrelid,
c.confrelid,
fk_conkey.conkey_order AS att_order,
fk_conkey.conkey_number,
fk_confkey.confkey_number,
rel_att.attname AS rel_att_name,
rel_att.atttypid AS rel_att_type_id,
rel_att.atttypmod AS rel_att_type_mod,
rel_att.attnotnull AS rel_att_notnull,
frel_att.attname AS frel_att_name,
frel_att.atttypid AS frel_att_type_id,
frel_att.atttypmod AS frel_att_type_mod,
frel_att.attnotnull AS frel_att_notnull
FROM pg_catalog.pg_constraint AS c
CROSS JOIN LATERAL UNNEST(c.conkey) WITH ORDINALITY AS fk_conkey(conkey_number, conkey_order)
LEFT JOIN LATERAL UNNEST(c.confkey) WITH ORDINALITY AS fk_confkey(confkey_number, confkey_order)
ON fk_conkey.conkey_order = fk_confkey.confkey_order
LEFT JOIN pg_catalog.pg_attribute AS rel_att
ON rel_att.attrelid = c.conrelid AND rel_att.attnum = fk_conkey.conkey_number
LEFT JOIN pg_catalog.pg_attribute AS frel_att
ON frel_att.attrelid = c.confrelid AND frel_att.attnum = fk_confkey.confkey_number
WHERE c.contype IN ('f')
),
--
fk_with_attributes_grouped AS (
SELECT
fk_name,
conrelid,
confrelid,
array_agg (rel_att_name order by att_order) as rel_att_names,
array_agg (frel_att_name order by att_order) as frel_att_names
FROM fk_with_attributes
GROUP BY 1, 2, 3
)
SELECT
r_from.relname, -- ссылающееся отношение
c1.fk_name, -- наименование ограничения fk
c2.fk_name -- наименование ограничения fk (потенцильный дубль)
FROM fk_with_attributes_grouped AS c1
INNER JOIN fk_with_attributes_grouped AS c2 ON c1.fk_name < c2.fk_name
AND c1.conrelid = c2.conrelid AND c1.confrelid = c2.confrelid
AND c1.rel_att_names = c2.rel_att_names
INNER JOIN pg_catalog.pg_class AS r_from ON r_from.oid = c1.conrelid
--===========================================================================
WITH
-- объединяем ограничения FK с атрибутами, которые в них задействованы
fk_with_attributes AS (
SELECT
c.conname as fk_name,
c.conrelid,
c.confrelid,
fk_conkey.conkey_order AS att_order,
fk_conkey.conkey_number,
fk_confkey.confkey_number,
rel_att.attname AS rel_att_name,
rel_att.atttypid AS rel_att_type_id,
rel_att.atttypmod AS rel_att_type_mod,
rel_att.attnotnull AS rel_att_notnull,
frel_att.attname AS frel_att_name,
frel_att.atttypid AS frel_att_type_id,
frel_att.atttypmod AS frel_att_type_mod,
frel_att.attnotnull AS frel_att_notnull
FROM pg_catalog.pg_constraint AS c
CROSS JOIN LATERAL UNNEST(c.conkey) WITH ORDINALITY AS fk_conkey(conkey_number, conkey_order)
LEFT JOIN LATERAL UNNEST(c.confkey) WITH ORDINALITY AS fk_confkey(confkey_number, confkey_order)
ON fk_conkey.conkey_order = fk_confkey.confkey_order
LEFT JOIN pg_catalog.pg_attribute AS rel_att
ON rel_att.attrelid = c.conrelid AND rel_att.attnum = fk_conkey.conkey_number
LEFT JOIN pg_catalog.pg_attribute AS frel_att
ON frel_att.attrelid = c.confrelid AND frel_att.attnum = fk_confkey.confkey_number
WHERE c.contype IN ('f')
),
--
fk_with_attributes_grouped AS (
SELECT
fk_name,
conrelid,
confrelid,
array_agg (rel_att_name order by att_order) as rel_att_names,
array_agg (frel_att_name order by att_order) as frel_att_names
FROM fk_with_attributes
GROUP BY 1, 2, 3
)
SELECT
r_from.relname, -- ссылающееся отношение
c1.fk_name, -- наименование ограничения fk
c2.fk_name -- наименование ограничения fk (пересечение по атрибутам)
FROM fk_with_attributes_grouped AS c1
INNER JOIN fk_with_attributes_grouped AS c2 ON c1.fk_name < c2.fk_name
AND c1.conrelid = c2.conrelid AND c1.confrelid = c2.confrelid
AND (c1.rel_att_names && c2.rel_att_names)
INNER JOIN pg_catalog.pg_class AS r_from ON r_from.oid = c1.conrelid
--===========================================================================
SELECT
n.nspname, -- схема
t.relname -- отношение
FROM pg_catalog.pg_class AS t
INNER JOIN pg_catalog.pg_namespace AS n
ON t.relnamespace = n.oid
WHERE
relkind IN ('r', 'p')
AND t.oid NOT IN (SELECT conrelid FROM pg_catalog.pg_constraint WHERE contype IN ('f'))
AND t.oid NOT IN (SELECT confrelid FROM pg_catalog.pg_constraint WHERE contype IN ('f'))
AND n.nspname NOT IN ('information_schema', 'pg_catalog')
AND n.nspname NOT LIKE 'pg_toast%'
--===========================================================================
-- целевое отношение (справочник)
DROP TABLE IF EXISTS public.fk1002_2 CASCADE;
CREATE TABLE public.fk1002_2
(
id integer NOT NULL,
value varchar(10) NOT NULL,
CONSTRAINT fk1002_2_pk PRIMARY KEY (id, value)
);
-- заполнение справочника
INSERT INTO public.fk1002_2 (id, value) VALUES (10, '10');
INSERT INTO public.fk1002_2 (id, value) VALUES (20, '20');
-- ссылающееся отношение
DROP TABLE IF EXISTS public.fk1002_2_fk;
CREATE TABLE public.fk1002_2_fk
(
fk1002_2_id integer NOT NULL,
fk1002_2_value varchar(10),
CONSTRAINT fk1002_2_fk_fk1002_2
FOREIGN KEY (fk1002_2_id, fk1002_2_value)
REFERENCES public.fk1002_2 (id, value)
);
-- добавление данных
-- из-за того, что поле fk1002_2_value может содержать NULL
-- обе записи будут добавлены в таблицу
-- если бы для CONSTRAINT fk1002_2_fk_fk1002_2 установили MATCH FULL
-- тогда вторая запись не была бы добавлена в таблицу
INSERT INTO public.fk1002_2_fk (fk1002_2_id, fk1002_2_value) VALUES (20, '20');
INSERT INTO public.fk1002_2_fk (fk1002_2_id, fk1002_2_value) VALUES (30, NULL);
--===========================================================================
WITH
-- объединяем ограничения FK с атрибутами, которые в них задействованы
fk_with_attributes AS (
SELECT
c.conname as fk_name,
c.conrelid,
c.confrelid,
c.confmatchtype,
fk_conkey.conkey_order AS att_order,
fk_conkey.conkey_number,
fk_confkey.confkey_number,
rel_att.attname AS rel_att_name,
rel_att.atttypid AS rel_att_type_id,
rel_att.atttypmod AS rel_att_type_mod,
rel_att.attnotnull AS rel_att_notnull,
frel_att.attname AS frel_att_name,
frel_att.atttypid AS frel_att_type_id,
frel_att.atttypmod AS frel_att_type_mod,
frel_att.attnotnull AS frel_att_notnull
FROM pg_catalog.pg_constraint AS c
CROSS JOIN LATERAL UNNEST(c.conkey) WITH ORDINALITY AS fk_conkey(conkey_number, conkey_order)
LEFT JOIN LATERAL UNNEST(c.confkey) WITH ORDINALITY AS fk_confkey(confkey_number, confkey_order)
ON fk_conkey.conkey_order = fk_confkey.confkey_order
LEFT JOIN pg_catalog.pg_attribute AS rel_att
ON rel_att.attrelid = c.conrelid AND rel_att.attnum = fk_conkey.conkey_number
LEFT JOIN pg_catalog.pg_attribute AS frel_att
ON frel_att.attrelid = c.confrelid AND frel_att.attnum = fk_confkey.confkey_number
WHERE c.contype IN ('f')
)
SELECT
c.fk_name, -- наименование ограничения fk
r_from.relname, -- ссылающееся отношение
c.rel_att_names -- nullable атрибуты в ссылающемся отношении
FROM (
-- отбираем FK, у которые есть расхождения типов колонок
SELECT
fk_name,
conrelid,
confrelid,
array_agg (rel_att_name order by att_order ) as rel_att_names
FROM fk_with_attributes
WHERE
(rel_att_notnull IS NOT TRUE)
AND confmatchtype NOT IN ('f')
GROUP BY 1, 2, 3
) AS c
INNER JOIN pg_catalog.pg_class AS r_from
ON r_from.oid = c.conrelid
--===========================================================================
-- колонка id в целевом отношении integer, в ссылающемся отношении
-- колонка fk1001_2_id bigint
-- целевое отношение (справочник)
DROP TABLE IF EXISTS public.fk1001_2 CASCADE;
CREATE TABLE public.fk1001_2
(
id integer GENERATED ALWAYS AS IDENTITY NOT NULL,
value text NOT NULL,
CONSTRAINT fk1001_2_pk PRIMARY KEY (id, value)
);
-- ссылающееся отношение
DROP TABLE IF EXISTS public.fk1001_2_fk;
CREATE TABLE public.fk1001_2_fk
(
fk1001_2_id bigint NOT NULL,
value text NOT NULL,
CONSTRAINT fk1001_2_fk_fk1001_2 FOREIGN KEY (fk1001_2_id, value)
REFERENCES public.fk1001_2(id, value)
);
--===========================================================================
WITH
-- объединяем ограничения FK с атрибутами, которые в них задействованы
fk_with_attributes AS (
SELECT
c.conname as fk_name,
c.conrelid,
c.confrelid,
fk_conkey.conkey_order AS att_order,
fk_conkey.conkey_number,
fk_confkey.confkey_number,
rel_att.attname AS rel_att_name,
rel_att.atttypid AS rel_att_type_id,
rel_att.atttypmod AS rel_att_type_mod,
rel_att.attnotnull AS rel_att_notnull,
frel_att.attname AS frel_att_name,
frel_att.atttypid AS frel_att_type_id,
frel_att.atttypmod AS frel_att_type_mod,
frel_att.attnotnull AS frel_att_notnull
FROM pg_catalog.pg_constraint AS c
CROSS JOIN LATERAL UNNEST(c.conkey) WITH ORDINALITY AS fk_conkey(conkey_number, conkey_order)
LEFT JOIN LATERAL UNNEST(c.confkey) WITH ORDINALITY AS fk_confkey(confkey_number, confkey_order)
ON fk_conkey.conkey_order = fk_confkey.confkey_order
LEFT JOIN pg_catalog.pg_attribute AS rel_att
ON rel_att.attrelid = c.conrelid AND rel_att.attnum = fk_conkey.conkey_number
LEFT JOIN pg_catalog.pg_attribute AS frel_att
ON frel_att.attrelid = c.confrelid AND frel_att.attnum = fk_confkey.confkey_number
WHERE c.contype IN ('f')
)
SELECT
c.fk_name, -- наименование ограничения fk
r_from.relname, -- ссылающееся отношение
c.rel_att_names, -- атрибуты в ссылающемся отношении
r_to.relname, -- целесое отношение
c.frel_att_names -- атрибуты в целевом отношении
FROM (
-- отбираем FK, у которые есть расхождения типов колонок
SELECT
fk_name,
conrelid,
confrelid,
array_agg (rel_att_name order by att_order ) as rel_att_names,
array_agg (frel_att_name order by att_order ) as frel_att_names
FROM fk_with_attributes
WHERE
((rel_att_type_id <> frel_att_type_id) OR (rel_att_type_mod <> frel_att_type_mod))
GROUP BY 1, 2, 3
) AS c
INNER JOIN pg_catalog.pg_class AS r_from
ON r_from.oid = c.conrelid
INNER JOIN pg_catalog.pg_class AS r_to
ON r_to.oid = c.confrelid
--===========================================================================
-- 8.08%
DROP SEQUENCE IF EXISTS public.s1010_2;
CREATE SEQUENCE public.s1010_2 AS smallint INCREMENT BY 1 MAXVALUE 100 START WITH 92;
-- 15.15%
DROP SEQUENCE IF EXISTS public.".s1010_3 ";
CREATE SEQUENCE public.".s1010_3 " AS smallint INCREMENT BY -1 MINVALUE -100 START WITH -85;
-- зацикленная последовательность, да неё не строим оценку
DROP SEQUENCE IF EXISTS public.s1010_5;
CREATE SEQUENCE public.s1010_5 AS smallint INCREMENT BY 1 MAXVALUE 100 START WITH 99 CYCLE;
--===========================================================================
SELECT
schemaname, -- наименование схемы
sequencename, -- наименование последовательности
CASE -- проверка направления роста последовательности
WHEN increment_by > 0 THEN 100.0*(max_value - COALESCE(last_value, start_value))/(max_value - min_value)
ELSE 100.0*(COALESCE(last_value, start_value) - min_value)/(max_value - min_value)
END::numeric(5, 2) -- значение в %
FROM pg_catalog.pg_sequences
WHERE NOT cycle -- исключаем цикличные последовательности
--===========================================================================
DROP TABLE IF EXISTS public.i1010_1 CASCADE;
CREATE TABLE public.i1010_1
(
id integer GENERATED ALWAYS AS IDENTITY NOT NULL,
value integer[] NOT NULL,
CONSTRAINT i1010_1_pk PRIMARY KEY (id)
);
INSERT INTO public.i1010_1 (value) VALUES('{1, 3, 5}');
INSERT INTO public.i1010_1 (value) VALUES(ARRAY[2, 4, 6]);
-- создаем индексы, явно указывая методы доступа btree и gin
CREATE INDEX i_btree_value ON public.i1010_1 USING btree (value);
CREATE INDEX i_gin_value ON public.i1010_1 USING gin (value);
-- для демонстрации задействования индексов
-- отключаем последовательное сканирование
SET enable_seqscan TO off;
-- проверяем планы запросов в зависимости от операций
-- Bitmap Index Scan on i_gin_value
EXPLAIN
SELECT id FROM public.i1010_1 WHERE value @> ARRAY[1];
-- Index Scan using i_btree_value
EXPLAIN
SELECT id FROM public.i1010_1 WHERE value = ARRAY[2, 4, 6];
--===========================================================================
SELECT
c.relname, -- наименование отношения
ic.relname, -- наименование индекса
a.amname -- тип индекса
FROM pg_catalog.pg_index AS i
INNER JOIN pg_catalog.pg_class AS ic ON i.indexrelid = ic.oid
INNER JOIN pg_catalog.pg_am AS a ON ic.relam = a.oid AND a.amname = 'btree'
INNER JOIN pg_catalog.pg_class AS c ON i.indrelid = c.oid
WHERE
-- проверяем существование в индексе колонки с типом массива
EXISTS (SELECT * FROM pg_catalog.pg_attribute AS att
INNER JOIN pg_catalog.pg_type AS typ ON typ.oid = att.atttypid
WHERE att.attrelid = i.indrelid
AND att.attnum = ANY ((string_to_array(indkey::text, ' ')::int2[])[1:indnkeyatts])
AND typ.typcategory = 'A')
--===========================================================================
--
CREATE TABLE public.c1001_1
(
id integer GENERATED ALWAYS AS IDENTITY NOT NULL,
parent_id integer NOT NULL,
value integer NOT NULL,
CONSTRAINT c1001_1_pk PRIMARY KEY (id)
);
-- добавляем корректные и некорректные данные
INSERT INTO public.c1001_1 (parent_id, value) VALUES(1, 1);
INSERT INTO public.c1001_1 (parent_id, value) VALUES(-1, -1);
-- создаем ограничения с NOT VALID
-- чтобы исторические данные не проверялись при создании ограничений
ALTER TABLE public.c1001_1 ADD CONSTRAINT c1001_1_fk FOREIGN KEY (parent_id) REFERENCES public.c1001_1(id) NOT VALID;
ALTER TABLE public.c1001_1 ADD CONSTRAINT c1001_1_chk CHECK ( value > 0 ) NOT VALID;
-- добавляем корректные данные
-- ограничения уже действуют, некорректные не добавить
INSERT INTO public.c1001_1 (parent_id, value) VALUES(2, 2);
-- пока исторические данные не откорректированы
-- выполнить VALIDATE CONSTRAINT без ошибок не получится
ALTER TABLE public.c1001_1 VALIDATE CONSTRAINT c1001_1_fk;
ALTER TABLE public.c1001_1 VALIDATE CONSTRAINT c1001_1_chk;
--===========================================================================
SELECT
t.relname, -- наименование отношения
c.conname, -- наименование ограничения
c.contype -- тип ограничения
FROM pg_catalog.pg_constraint AS c
INNER JOIN pg_catalog.pg_class AS t
ON t.oid = c.conrelid AND c.contype IN ('c', 'f')
AND (NOT c.convalidated);
--===========================================================================
CREATE PUBLICATION my_publication FOR ALL TABLES;
CREATE TABLE my_table (id SERIAL PRIMARY KEY);
CREATE SUBSCRIPTION my_subscription CONNECTION 'host=127.0.0.1 port=5432 dbname=postgres user=postgres password=my-secret-pw' PUBLICATION my_publication;
--===========================================================================
select date, region, product, sales_amount, avg(sales_amount) over (
partition by region, product order by date rows between 2 preceding and current row
) as rolling_avg_sales
from sales_data;
--===========================================================================
SELECT
c.relname, -- наименование отношения
ic.relname, -- наименование индекса
i.indisvalid,
i.indisready,
(SELECT string_agg(format('%I', c.conname), ',') FROM pg_catalog.pg_constraint AS c WHERE c.conindid = ic.oid) -- в каких ограничениях задействован
FROM pg_catalog.pg_index AS i
INNER JOIN pg_catalog.pg_class AS ic ON i.indexrelid = ic.oid
INNER JOIN pg_catalog.pg_class AS c ON i.indrelid = c.oid
WHERE
NOT i.indisvalid OR NOT i.indisready
ORDER BY 1, 2
--===========================================================================
DROP TABLE IF EXISTS public.i1001_1 CASCADE;
CREATE TABLE public.i1001_1
(
id integer GENERATED ALWAYS AS IDENTITY NOT NULL,
value integer NOT NULL,
CONSTRAINT i1001_1_pk PRIMARY KEY (id),
CONSTRAINT i1001_1_unique UNIQUE (value)
);
CREATE UNIQUE INDEX i_id_unique ON public.i1001_1 USING btree (id);
CREATE UNIQUE INDEX i_id_unique_desc ON public.i1001_1 USING btree (id DESC);
CREATE INDEX i_id ON public.i1001_1 USING btree (id);
CREATE INDEX i_id_partial ON public.i1001_1 USING btree (id) WHERE (id > 0);
CREATE UNIQUE INDEX i_id_unique_to_lower_text ON public.i1001_1 USING btree (lower(id::text));
CREATE UNIQUE INDEX i_value_unique ON public.i1001_1 USING btree (value);
CREATE UNIQUE INDEX i_value_unique_desc ON public.i1001_1 USING btree (value DESC);
CREATE INDEX i_value ON public.i1001_1 USING btree (value);
-- unique and regular index with identical columns
CREATE UNIQUE INDEX i_id_value_unique ON public.i1001_1 USING btree (id, value);
CREATE INDEX i_id_value ON public.i1001_1 USING btree (id, value);
-- with include
CREATE UNIQUE INDEX i_id_unique_include_value ON public.i1001_1 USING btree (id) INCLUDE (value);
CREATE INDEX i_id_include_value ON public.i1001_1 USING btree (id) INCLUDE (value);
--===========================================================================
WITH
idx AS (
SELECT
c.relname as table_name, -- наименование отношения
ic.relname as index_name, -- наименование индекса
ic.oid,
i.indisunique,
i.indrelid,
pg_get_indexdef(ic.oid) AS object_definition,
regexp_replace( -- ' DESC,'
regexp_replace( -- ' DESC\)'
regexp_replace( -- ' NULLS LAST,'
regexp_replace( -- ' NULLS LAST\)'
regexp_replace( -- ' NULLS FIRST,'
regexp_replace( -- ' NULLS FIRST\)'
regexp_replace( -- ' INDEX .* ON '
pg_get_indexdef(ic.oid), ' INDEX .* ON ', ' INDEX ON '),
' NULLS FIRST\)', ')'),
' NULLS FIRST,', ','),
' NULLS LAST\)', ')'),
' NULLS LAST,', ','),
' DESC\)', ')'),
' DESC,', ',')
AS simplified_object_definition,
(SELECT string_agg(format('%I', c.conname), ',') FROM pg_catalog.pg_constraint AS c WHERE c.conindid = ic.oid)
AS used_in_constraint
FROM pg_catalog.pg_index AS i
INNER JOIN pg_catalog.pg_class AS ic ON i.indexrelid = ic.oid
INNER JOIN pg_catalog.pg_class AS c ON i.indrelid = c.oid
)
SELECT
i1.table_name, -- наименование отношения
i1.index_name as i1_index_name, -- наименование индекса i1
i2.index_name as i2_index_name, -- наименование индекса i2
i1.simplified_object_definition as simplified_index_definition,
i1.object_definition as i1_index_definition,
i2.object_definition as i2_index_definition,
i1.used_in_constraint as i1_used_in_constraint,
i2.used_in_constraint as i2_used_in_constraint
FROM idx as i1
INNER JOIN idx AS i2 ON i1.oid < i2.oid AND i1.indrelid = i2.indrelid
AND i1.simplified_object_definition = i2.simplified_object_definition
ORDER BY 1, 2
--===========================================================================
WITH
idx AS (
SELECT
c.relname as table_name, -- наименование отношения
ic.relname as index_name, -- наименование индекса
ic.oid,
i.indisunique,
i.indrelid,
pg_get_indexdef(ic.oid) AS object_definition,
regexp_replace( -- ' DESC,'
regexp_replace( -- ' DESC\)'
regexp_replace( -- ' NULLS LAST,'
regexp_replace( -- ' NULLS LAST\)'
regexp_replace( -- ' NULLS FIRST,'
regexp_replace( -- ' NULLS FIRST\)'
regexp_replace( -- ' INDEX .* ON '
pg_get_indexdef(ic.oid), ' INDEX .* ON ', ' INDEX ON '),
' NULLS FIRST\)', ')'),
' NULLS FIRST,', ','),
' NULLS LAST\)', ')'),
' NULLS LAST,', ','),
' DESC\)', ')'),
' DESC,', ',')
AS simplified_object_definition,
(SELECT string_agg(format('%I', c.conname), ',') FROM pg_catalog.pg_constraint AS c WHERE c.conindid = ic.oid)
AS used_in_constraint
FROM pg_catalog.pg_index AS i
INNER JOIN pg_catalog.pg_class AS ic ON i.indexrelid = ic.oid
INNER JOIN pg_catalog.pg_class AS c ON i.indrelid = c.oid
)
SELECT
i1.table_name, -- наименование отношения
i1.index_name as i1_unique_index_name, -- наименование индекса i1
i2.index_name as i2_index_name, -- наименование индекса i2
i1.object_definition as i1_unique_index_definition,
i2.object_definition as i2_index_definition,
i1.used_in_constraint as i1_used_in_constraint,
i2.used_in_constraint as i2_used_in_constraint
FROM idx as i1
INNER JOIN idx AS i2 ON i1.indrelid = i2.indrelid
AND i1.indisunique AND NOT i2.indisunique
AND replace(i1.simplified_object_definition, ' UNIQUE ', ' ') = i2.simplified_object_definition
ORDER BY 1, 2
--===========================================================================
WITH
idx AS (
SELECT
c.relname as table_name, -- наименование отношения
ic.relname as index_name, -- наименование индекса
ic.oid,
i.indisunique,
i.indrelid,
pg_get_indexdef(ic.oid) AS object_definition,
replace( -- ' UNIQUE '
regexp_replace( -- ' INCLUDE'
regexp_replace( -- ' WHERE'
regexp_replace( -- ' DESC,'
regexp_replace( -- ' DESC\)'
regexp_replace( -- ' NULLS LAST,'
regexp_replace( -- ' NULLS LAST\)'
regexp_replace( -- ' NULLS FIRST,'
regexp_replace( -- ' NULLS FIRST\)'
regexp_replace( -- ' INDEX .* ON '
pg_get_indexdef(ic.oid), ' INDEX .* ON ', ' INDEX ON '),
' NULLS FIRST\)', ')'),
' NULLS FIRST,', ','),
' NULLS LAST\)', ')'),
' NULLS LAST,', ','),
' DESC\)', ')'),
' DESC,', ','),
' WHERE .*', ''),
' INCLUDE .*', ''),
' UNIQUE ', ' ')
AS simplified_object_definition,
(SELECT string_agg(format('%I', c.conname), ',') FROM pg_catalog.pg_constraint AS c WHERE c.conindid = ic.oid)
AS used_in_constraint
FROM pg_catalog.pg_index AS i
INNER JOIN pg_catalog.pg_class AS ic ON i.indexrelid = ic.oid
INNER JOIN pg_catalog.pg_class AS c ON i.indrelid = c.oid
)
SELECT
i1.table_name, -- наименование отношения
i1.index_name as i1_index_name, -- наименование индекса i1
i2.index_name as i2_index_name, -- наименование индекса i2
i1.simplified_object_definition as simplified_index_definition,
i1.object_definition as i1_index_definition,
i2.object_definition as i2_index_definition,
i1.used_in_constraint as i1_used_in_constraint,
i2.used_in_constraint as i2_used_in_constraint
FROM idx as i1
INNER JOIN idx AS i2 ON i1.oid < i2.oid AND i1.indrelid = i2.indrelid
AND i1.simplified_object_definition = i2.simplified_object_definition
ORDER BY 1, 2
--===========================================================================
select
pc.oid::regclass::text as table_name,
pg_table_size(pc.oid) as table_size
from
pg_catalog.pg_class pc
inner join pg_catalog.pg_namespace nsp on nsp.oid = pc.relnamespace
where
pc.relkind = 'r' and
pc.oid not in (
select c.conrelid as table_oid
from pg_catalog.pg_constraint c
where c.contype = 'p'
) and
nsp.nspname = :schema_name_param::text
order by table_name;
--===========================================================================
select
x.indrelid::regclass::text as table_name,
x.indexrelid::regclass::text as index_name,
pg_relation_size(x.indexrelid) as index_size_bytes
from pg_index x
join pg_stat_all_indexes psai on x.indexrelid = psai.indexrelid
where psai.schemaname = 'demo'::text;
--===========================================================================
create schema if not exists demo;
create table if not exists demo.orders
(
id bigint primary key generated always as identity,
user_id bigint not null,
shop_id bigint not null,
status int not null,
created_at timestamptz not null default current_timestamp
);
create table if not exists demo.order_item
(
id bigint primary key generated always as identity,
order_id bigint not null references demo.orders (id),
price decimal(22, 2) not null default 0,
amount int not null default 0,
sku varchar(255) not null,
warehouse_id int
);
insert into demo.orders (user_id, shop_id, status)
select
(ids.id % 10) + 1 as user_id,
(ids.id % 4) + 1 as shop_id,
1 as status – new order
from generate_series(1, 100000) ids (id);
insert into demo.order_item (order_id, price, amount, sku)
select
id as order_id,
(random() + 1) * 1000.0 as price,
(random() * 10) + 1 as amount,
md5(random()::text) as sku
from demo.orders;
insert into demo.order_item (order_id, price, amount, sku)
select
id as order_id,
(random() + 1) * 2000.0 as price,
(random() * 5) + 1 as amount,
md5((random() + 1)::text) as sku
from demo.orders where id % 2 = 0;
-- собираем статистику
vacuum analyze demo.orders, demo.order_item;
create index concurrently if not exists idx_order_item_order_id
on demo.order_item (order_id);
explain (analyze, buffers)
select oi.id, oi.order_id, oi.price, oi.amount, oi.sku, o.user_id, o.shop_id, o.status
from demo.orders o
join demo.order_item oi on oi.order_id = o.id
where o.id = 100 -- есть индекс по этому столбцу
order by oi.id;
create index concurrently if not exists idx_order_item_warehouse_id_without_nulls
on demo.order_item (warehouse_id) where warehouse_id is not null;
--===========================================================================
select count(distinct salary) as distinct_count,
count(salary) as total_count
from (values(1000), (2000), (1000), (NULL)) as salaries(salary);
--===========================================================================
--===========================================================================
--===========================================================================
--===========================================================================
--===========================================================================
--===========================================================================
--===========================================================================
--===========================================================================
--===========================================================================
This file has been truncated, but you can view the full file.
--===================================================
SET enable_seqscan = ON;
SET random_page_cost = 1.1;
EXPLAIN ANALYZE SELECT * FROM schema_a.table_a f WHERE f.date BETWEEN '2024-01-01' AND '2024-06-01';
--===================================================
SET work_mem = '30MB';
EXPLAIN ANALYZE SELECT * FROM schema_a.table_a f JOIN schema_b.table_b b ON b.id = f.bpo_id WHERE column_x > 100;
--===================================================
SET work_mem = '800MB';
--SET seq_page_cost = 1;
--SET random_page_cost = 4;
EXPLAIN (ANALYZE) SELECT * FROM table_a ORDER BY column_x;
--===================================================
SELECT query, calls,
round(total_exec_time::numeric, 2) AS total_time,
round(mean_exec_time::numeric, 2) AS mean_time,
round((100 * total_exec_time / sum(total_exec_time) OVER ())::numeric, 2) AS percentage
FROM pg_stat_statements
ORDER BY total_exec_time DESC
LIMIT 10;
--===================================================
SELECT event as issue_status, COUNT(*) as cnt FROM (
SELECT type, repo.name, actor.login,
JSON_EXTRACT(payload, '$.action') as event,
FROM `githubarchive.day.20190101`
WHERE type = 'IssuesEvent'
)
GROUP by issue_status;
--===================================================
SELECT
COUNT(*)
FROM `githubarchive.day.2015*`
WHERE
type = 'PushEvent'
AND (_TABLE_SUFFIX BETWEEN '0101' AND '0105')
/* count number of watches between Jan~Oct 2014 */
SELECT COUNT(*)
FROM `githubarchive.month.2014*`
WHERE
type = 'WatchEvent'
AND (_TABLE_SUFFIX BETWEEN '01' AND '10')
*/
/* count number of forks in 2012~2014 */
SELECT COUNT(*)
FROM `githubarchive.year.20*`
WHERE
type = 'ForkEvent'
AND (_TABLE_SUFFIX BETWEEN '12' AND '14')
--===================================================
--===================================================
--===================================================
--===================================================
--===================================================
--===================================================
--===================================================
--===================================================
--===================================================
--===================================================
--===================================================
--
View raw

(Sorry about that, but we can’t show files that are this big right now.)

View raw

(Sorry about that, but we can’t show files that are this big right now.)

View raw

(Sorry about that, but we can’t show files that are this big right now.)

View raw

(Sorry about that, but we can’t show files that are this big right now.)

View raw

(Sorry about that, but we can’t show files that are this big right now.)

View raw

(Sorry about that, but we can’t show files that are this big right now.)

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment