====== Postgresql ======
psql -h 192.168.1.100 -p 5432 -U u1 mydatabase
import psycopg2
conn = psycopg2.connect("host='10.59.1.50' dbname='DB' user='perm' password='perm'")
===== Interesting sites =====
** PostgreSQL **
* https://postgresqlco.nf/ - configuration
* https://pgpedia.info/ - encyclopedia of postgres
* https://www.interdb.jp/pg/ - internal structure postgresql Pavel Kopalov, [13.04.21 22:23]
* https://pgstats.dev/ PostgreSQL Observability
* https://habr.com/ru/companies/tensor/articles/771406/ - Antipatterns
* https://dataegret.ru/2020/07/how-to-become-a-postgresql-dba/ - books
* https://pganalyze.com/
* https://wiki.postgresql.org/wiki/Sample_Databases
* https://pgconfig.rustprooflabs.com/ - compare config with default
* https://postgresqlco.nf/tuning-guide - configure
* https://kb.objectrocket.com/category/postgresql - read
* https://til.hashrocket.com/posts/da9ade5291-clear-the-screen-in-psql - psql clear console
** Draw Db diagram **
* нарисовать диаграмму данных
** Kubernetes - postgres VIEW **
* https://cloudnative-pg.io/documentation/1.19/
===== Postgres.Cmd =====
==== Postgres.System commands ====
Star stop init db create databases
* ''pg_config --configure'' - как скомпилирован Postgres
==== Postgres.PSQL ====
++++ psql - cheatsheet|
-- show object in all schemes https://stackoverflow.com/questions/15644152/list-tables-in-a-postgresql-schema
\dt *.*
-- clear console
\! clear
--show info
\conninfo
-- show timing
\timing on
-- ------------Files --------------------
-- field separator
\f ','
-- toggle between unaligned and aligned output mode
\a
-- save in file
\o '/tmp/output.csv'
-----------------------------
-- vertical show data
\x
++++
===== Postgres.Sql =====
* https://habr.com/ru/articles/791260/ - courses
* https://gist.github.com/rgreenjr/3637525 - useful sqls
* https://habr.com/ru/post/280912/ Pg Трюки
* progress bar query observable https://postgres.ai/blog/20220114-progress-bar-for-postgres-queries-lets-dive-deeper
++++ Sql.interesting sql|
-- выставить схему по умолчанию
SET search_path TO adm,public;
-- ----- Собрать таблицу из списка ----
SELECT number
FROM (VALUES ('555123'), ('555321'), ('555000') ) prefixes (prefix)
JOIN phonenumbers ON number >= prefix
AND number < prefix || 'a'
-- системная колонка типа CTID с физическим размещением на диске - типа TID
SELECT ctid, * FROM adm.sqlcommands
-- Generate data
INSERT INTO temperature (ts, city, temperature)
SELECT ts + (INTERVAL '60 minutes' * random()), city, 30*random()
FROM generate_series('2010-01-01' :: TIMESTAMP,
'2020-12-31', '1 hour') AS ts,
unnest(array['Moscow', 'Berlin']) AS city;
-- Иерархия hierarchy запрос
/*create table T1(id int, F1 varchar(100));
insert into T1 values(1, '10,56,657,34,767,71');
insert into T1 values(3, '1,5487,27,9');
insert into T1 values(2, '12,6,65,8,67,22,70,5');*/
with recursive t1r as
(select
0 as i
,ROW_NUMBER () OVER (ORDER BY id) AS ir
,id
,f1
,'' f
from T1
UNION
SELECT
i+1
, ROW_NUMBER () OVER (ORDER BY i) AS ir
, id
, f1
, split_part(f1, ',', i+1)
from t1r
where i<10
and split_part(f1, ',', i+1)!=''
)
select id,i as num, f as val
from t1r
where i>0
order by id,i;
++++
==== Sql.system ====
++++ SQL check version/size|
// postgres размер
SELECT version();
SELECT pg_size_pretty(pg_database_size('UIS_MV'));
SELECT oid,table_schema,TABLE_NAME
, to_char(row_estimate::INT , 'FM999 999 999 999 999 999') as rows_estimate
, pg_size_pretty(total_bytes) AS total
, pg_size_pretty(index_bytes) AS INDEX
, pg_size_pretty(toast_bytes) AS toast
, pg_size_pretty(table_bytes) AS TABLE
FROM (
SELECT *, total_bytes-index_bytes-COALESCE(toast_bytes,0) AS table_bytes FROM (
SELECT c.oid,nspname AS table_schema, relname AS TABLE_NAME
, c.reltuples AS row_estimate
, pg_total_relation_size(c.oid) AS total_bytes
, pg_indexes_size(c.oid) AS index_bytes
, pg_total_relation_size(reltoastrelid) AS toast_bytes
FROM pg_class c
LEFT JOIN pg_namespace n ON n.oid = c.relnamespace
WHERE relkind = 'r'
) a
) a ORDER BY total_bytes DESC
LIMIT 10;
SELECT *, pg_size_pretty(total_bytes) AS total
, pg_size_pretty(index_bytes) AS INDEX
, pg_size_pretty(toast_bytes) AS toast
, pg_size_pretty(table_bytes) AS TABLE
FROM (
SELECT *, total_bytes-index_bytes-COALESCE(toast_bytes,0) AS table_bytes FROM (
SELECT c.oid,nspname AS table_schema, relname AS TABLE_NAME
, to_char(c.reltuples , 'FM999 999 999 999 999 999') AS row_estimate
, pg_total_relation_size(c.oid) AS total_bytes
, pg_indexes_size(c.oid) AS index_bytes
, pg_total_relation_size(reltoastrelid) AS toast_bytes
FROM pg_class c
LEFT JOIN pg_namespace n ON n.oid = c.relnamespace
WHERE relkind = 'r'
) a
) a ORDER BY total_bytes DESC;
++++
++++ Find objects and fields|
-- find fields
SELECT
pg_namespace.nspname,
pg_class.relname,
pg_attribute.attname AS column_name,
pg_catalog.format_type(pg_attribute.atttypid, pg_attribute.atttypmod) AS data_type
FROM
pg_catalog.pg_attribute
INNER JOIN
pg_catalog.pg_class ON pg_class.oid = pg_attribute.attrelid
INNER JOIN
pg_catalog.pg_namespace ON pg_namespace.oid = pg_class.relnamespace
WHERE
pg_attribute.attnum > 0
AND NOT pg_attribute.attisdropped
AND pg_namespace.nspname = 'public'
AND pg_class.relname = 'ЭтапИсходящегоЗапроса'
and ( 1=0 or pg_catalog.format_type(pg_attribute.atttypid, pg_attribute.atttypmod) = 'text')
ORDER BY
attnum ASC;
-- find fields
SELECT tablename,attname, avg_width FROM pg_stats
WHERE 1=1
and tablename in('СообщениеВИС', 'СообщениеВИСИЗ', 'ЗначениеПараметраИЗ', 'ЭтапИсходящегоЗапроса')
and ( 1=1 or
(tablename, attname) IN
( VALUES
('СообщениеВИС', 'ТипСообщения'),
('СообщениеВИС','Сообщение')
)
)
order by 1 desc;
++++
==== Sql.check ====
++++ SQL check database data|
#!/bin/bash
# Проверить содержимое на целостность
if ! su - postgres -c 'pg_dumpall > /dev/null 2>/tmp/dumpall_log'
then
echo 'pg_dumpall failed'
exit 125
fi
++++
++++ SQL List index|
-- list index
SELECT c.oid, i.indisunique
,n.nspname
,c.relname
,c.relpages
FROM pg_index i
JOIN pg_opclass op ON i.indclass[0] = op.oid
JOIN pg_am am ON op.opcmethod = am.oid
JOIN pg_class c ON i.indexrelid = c.oid
JOIN pg_namespace n ON c.relnamespace = n.oid
WHERE am.amname = 'btree'
AND (1=1 or n.nspname = 'pg_catalog') -- # set 1=0 if you need only system
AND c.relpersistence != 't'
AND c.relkind = 'i' AND i.indisready AND i.indisvalid
ORDER BY c.relpages DESC LIMIT 10;
++++
++++ SQL check database index - amcheck verify tbl b-tree Indexes |
-- verification tbl Tabular b-tree Indexes IN PUBLIC SCHEME
create extension if not exists amcheck;
set
statement_timeout to 0;
do $$ declare r record;
sql text;
ts_pre timestamptz;
e_message text;
e_detail text;
e_context text;
e_hint text;
errcount int := 0;
begin raise info 'begin!...';
for r in
select
row_number() over(
order by
tc.reltuples
) as i,
count(*) over() as cnt,
c.oid,
i.indisunique,
c.relname,
c.relpages :: int8,
tc.reltuples :: int8 as tuples
from
pg_index i
join pg_opclass op on i.indclass[0] = op.oid
join pg_am am on op.opcmethod = am.oid
join pg_class c on i.indexrelid = c.oid
join pg_class tc on i.indrelid = tc.oid
join pg_namespace n on c.relnamespace = n.oid
where
am.amname = 'btree' --and n.nspname = 'public'
and c.relpersistence <> 't' -- don't check temp tables
and c.relkind = 'i'
and i.indisready
and i.indisvalid
--and tc.relname = 'tbl' -- comment this out to check the whole DB
--and c.relname in ('index_projects_on_name_and_id', 'index_projects_on_lower_name', 'index_projects_api_name_id_desc')
order by
tc.reltuples loop ts_pre := clock_timestamp();
raise info '[%] Processing %/%: index: % (index relpages: %; heap tuples: ~%)...',
ts_pre :: timestamptz(3),
r.i,
r.cnt,
r.relname,
r.relpages,
r.tuples;
begin perform bt_index_parent_check(
index => r.oid, heapallindexed => true
);
raise info '[%] SUCCESS %/% – index: %. Time taken: %',
clock_timestamp():: timestamptz(3),
r.i,
r.cnt,
r.relname,
(
clock_timestamp() - ts_pre
);
exception when others then get stacked diagnostics e_message = message_text,
e_detail = pg_exception_detail,
e_context = pg_exception_context,
e_hint = pg_exception_hint;
errcount := errcount + 1;
raise warning $err$[ % ] FAILED %/% – index : %.ERROR : % CONTEXT : % DETAIL : % HINT : % $err$,
clock_timestamp():: timestamptz(3),
r.i,
r.cnt,
r.relname,
e_message,
e_detail,
e_context,
e_hint;
end;
end loop;
if errcount = 0 then raise info 'Btree index scan with amcheck successfully finished. 0 errors.';
else raise exception 'Index corruption detected by amcheck, % errors, see details in the log.',
errcount;
end if;
end $$;
++++
==== Sql.Generate Data====
* generate data - https://www.tonic.ai/blog/how-to-create-realistic-test-data-for-postgresql
++++ SQL generate database data|
-- https://www.tonic.ai/blog/how-to-create-realistic-test-data-for-postgresql
-- generate data
select now();
SET search_path TO adm,public;
SHOW SEARCH_PATH;
CREATE TABLE users(
id SERIAL PRIMARY KEY,
email VARCHAR(40) NOT NULL UNIQUE
);
CREATE TABLE posts(
id SERIAL PRIMARY KEY,
user_id INTEGER NOT NULL REFERENCES users(id),
title VARCHAR(100) NOT NULL UNIQUE
);
CREATE TABLE comments(
id SERIAL PRIMARY KEY,
user_id INTEGER NOT NULL REFERENCES users(id),
post_id INTEGER NOT NULL REFERENCES posts(id),
body VARCHAR(500) NOT NULL
);
INSERT INTO adm.users(email)
SELECT
'user_' || seq || '@' || (
CASE (RANDOM() * 2)::INT
WHEN 0 THEN 'gmail'
WHEN 1 THEN 'hotmail'
WHEN 2 THEN 'yahoo'
END
) || '.com' AS email
FROM GENERATE_SERIES(1, 10) seq;
SELECT * FROM adm.users;
INSERT INTO adm.posts(user_id, title)
WITH expanded AS (
SELECT RANDOM(), seq, u.id AS user_id
FROM GENERATE_SERIES(1, 50) seq, users u
), shuffled AS (
SELECT e.*
FROM expanded e
INNER JOIN (
SELECT ei.seq, MIN(ei.random) FROM expanded ei GROUP BY ei.seq
) em ON (e.seq = em.seq AND e.random = em.min)
ORDER BY e.seq
)
SELECT
s.user_id,
'It is ' || s.seq || ' ' || (
CASE (RANDOM() * 2)::INT
WHEN 0 THEN 'sql'
WHEN 1 THEN 'elixir'
WHEN 2 THEN 'ruby'
END
) as title
FROM shuffled s;
SELECT * FROM adm.posts LIMIT 10;
INSERT INTO comments(user_id, post_id, body)
WITH expanded AS (
SELECT RANDOM(), seq, u.id AS user_id, p.id AS post_id
FROM GENERATE_SERIES(1, 2000) seq, users u, posts p
), shuffled AS (
SELECT e.*
FROM expanded e
INNER JOIN (
SELECT ei.seq, MIN(ei.random) FROM expanded ei GROUP BY ei.seq
) em ON (e.seq = em.seq AND e.random = em.min)
ORDER BY e.seq
)
SELECT
s.user_id,
s.post_id,
'no this test ' || s.seq AS body
FROM shuffled s;
SELECT * FROM comments where body like '%Test comment 927%' LIMIT 10;
SELECT * FROM comments where body like '%no this test %' LIMIT 10;
/*
drop TABLE comments;
drop TABLE posts;
drop TABLE users;
*/
select now()
++++
==== Sql.JSONB ====
* PG JSON fucntion - https://postgrespro.ru/docs/postgresql/14/functions-json
* Antipattern jsonb - https://github.com/jugatsu/onec-docker#support
++++ SQL json examples|
# jsonb_populate
select x.*
from "test"."jsonb_example_table" t
join lateral jsonb_to_recordset( t.data ) as
x( "id" int,
"name" text,
"email" text,
"roles" jsonb,
"esia_tag" text,
"position" text,
"created_at" text,
"department" text,
"updated_at" text,
"subdepartment" text,
"fo_and_to_organ" jsonb,
"email_verified_at" text,
"fo_and_to_organs_id" jsonb,
"digital_certificates" text,
"place_of_work_filled" bool
) on true
where t.id=16;
-- example TO_TIMESTAMP('2021-10-26T01:53:12.000000Z','YYYY-MM-DD"T"HH24:MI:SS"Z"')
++++
===== Postgres.DATA =====
Загрузка управления и обработка
++++ SQL.COPY load from DB |
COPY country TO STDOUT (DELIMITER '|');
COPY (select ir, "Район", "HouseNumber", "Name", "Полный_адрес" from dist)
to '/tmp/DistrictMap.csv'
DELIMITER ','
CSV HEADER;
COPY country TO PROGRAM 'gzip > /usr1/proj/bray/sql/country_data.gz';
++++
++++ SQL.COPY load data to DB|
SET search_path TO dist,public;
-- создать таблицу
CREATE TABLE dist (
ir bigint,
"Район" character varying(450),
"HouseNumber" character varying(10),
"Name" character varying(450),
"Полный_адрес" character varying(1000)
);
-- загрузить данные
COPY dist(ir, "Район", "HouseNumber", "Name", "Полный_адрес")
FROM '/tmp/DistrictMap.csv'
DELIMITER ','
CSV HEADER;
select upper("Name"),ds.*
from dist ds where upper("Name") like '%ИГНАТОВ%'
and "HouseNumber"='116'
order by ir desc
select "HouseNumber","Name",count(*)
from dist
group by "HouseNumber","Name","Район"
having count(*)>1
order by 3 desc
;
++++
===== Postgres.DBA =====
Describe of admin tasks
* Check configuration files - ''select sourcefile, name,sourceline,error from pg_file_settings where error is not null;''
* Check config of authorization in postgresqland pg_hba ''SHOW password_encryption;SELECT * FROM pg_hba_file_rules();''
++++ Bloat - MVCC - transaction - Get current xmin - PageInspector|
https://www.percona.com/blog/2018/08/06/basic-understanding-bloat-vacuum-postgresql-mvcc/
-- select attributes
percona=# SELECT attname, format_type (atttypid, atttypmod)
FROM pg_attribute
WHERE attrelid::regclass::text='scott.employee'
ORDER BY attnum;
percona=# select txid_current();
txid_current
--------------
646
(1 row)
percona=# INSERT into scott.employee VALUES (9,'avi',9);
INSERT 0 1
percona=# select xmin,xmax,cmin,cmax,* from scott.employee where emp_id = 9;
xmin | xmax | cmin | cmax | emp_id | emp_name | dept_id
------+------+------+------+--------+----------+---------
647 | 0 | 0 | 0 | 9 | avi | 9
(1 row)
--show bloat table
WITH constants AS (
-- define some constants for sizes of things
-- for reference down the query and easy maintenance
SELECT current_setting('block_size')::numeric AS bs, 23 AS hdr, 8 AS ma
),
no_stats AS (
-- screen out table who have attributes
-- which dont have stats, such as JSON
SELECT table_schema, table_name,
n_live_tup::numeric as est_rows,
pg_table_size(relid)::numeric as table_size
FROM information_schema.columns
JOIN pg_stat_user_tables as psut
ON table_schema = psut.schemaname
AND table_name = psut.relname
LEFT OUTER JOIN pg_stats
ON table_schema = pg_stats.schemaname
AND table_name = pg_stats.tablename
AND column_name = attname
WHERE attname IS NULL
AND table_schema NOT IN ('pg_catalog', 'information_schema')
GROUP BY table_schema, table_name, relid, n_live_tup
),
null_headers AS (
-- calculate null header sizes
-- omitting tables which dont have complete stats
-- and attributes which aren't visible
SELECT
hdr+1+(sum(case when null_frac <> 0 THEN 1 else 0 END)/8) as nullhdr,
SUM((1-null_frac)*avg_width) as datawidth,
MAX(null_frac) as maxfracsum,
schemaname,
tablename,
hdr, ma, bs
FROM pg_stats CROSS JOIN constants
LEFT OUTER JOIN no_stats
ON schemaname = no_stats.table_schema
AND tablename = no_stats.table_name
WHERE schemaname NOT IN ('pg_catalog', 'information_schema')
AND no_stats.table_name IS NULL
AND EXISTS ( SELECT 1
FROM information_schema.columns
WHERE schemaname = columns.table_schema
AND tablename = columns.table_name )
GROUP BY schemaname, tablename, hdr, ma, bs
),
data_headers AS (
-- estimate header and row size
SELECT
ma, bs, hdr, schemaname, tablename,
(datawidth+(hdr+ma-(case when hdr%ma=0 THEN ma ELSE hdr%ma END)))::numeric AS datahdr,
(maxfracsum*(nullhdr+ma-(case when nullhdr%ma=0 THEN ma ELSE nullhdr%ma END))) AS nullhdr2
FROM null_headers
),
table_estimates AS (
-- make estimates of how large the table should be
-- based on row and page size
SELECT schemaname, tablename, bs,
reltuples::numeric as est_rows, relpages * bs as table_bytes,
CEIL((reltuples*
(datahdr + nullhdr2 + 4 + ma -
(CASE WHEN datahdr%ma=0
THEN ma ELSE datahdr%ma END)
)/(bs-20))) * bs AS expected_bytes,
reltoastrelid
FROM data_headers
JOIN pg_class ON tablename = relname
JOIN pg_namespace ON relnamespace = pg_namespace.oid
AND schemaname = nspname
WHERE pg_class.relkind = 'r'
),
estimates_with_toast AS (
-- add in estimated TOAST table sizes
-- estimate based on 4 toast tuples per page because we dont have
-- anything better. also append the no_data tables
SELECT schemaname, tablename,
TRUE as can_estimate,
est_rows,
table_bytes + ( coalesce(toast.relpages, 0) * bs ) as table_bytes,
expected_bytes + ( ceil( coalesce(toast.reltuples, 0) / 4 ) * bs ) as expected_bytes
FROM table_estimates LEFT OUTER JOIN pg_class as toast
ON table_estimates.reltoastrelid = toast.oid
AND toast.relkind = 't'
),
table_estimates_plus AS (
-- add some extra metadata to the table data
-- and calculations to be reused
-- including whether we cant estimate it
-- or whether we think it might be compressed
SELECT current_database() as databasename,
schemaname, tablename, can_estimate,
est_rows,
CASE WHEN table_bytes > 0
THEN table_bytes::NUMERIC
ELSE NULL::NUMERIC END
AS table_bytes,
CASE WHEN expected_bytes > 0
THEN expected_bytes::NUMERIC
ELSE NULL::NUMERIC END
AS expected_bytes,
CASE WHEN expected_bytes > 0 AND table_bytes > 0
AND expected_bytes <= table_bytes
THEN (table_bytes - expected_bytes)::NUMERIC
ELSE 0::NUMERIC END AS bloat_bytes
FROM estimates_with_toast
UNION ALL
SELECT current_database() as databasename,
table_schema, table_name, FALSE,
est_rows, table_size,
NULL::NUMERIC, NULL::NUMERIC
FROM no_stats
),
bloat_data AS (
-- do final math calculations and formatting
select current_database() as databasename,
schemaname, tablename, can_estimate,
table_bytes, round(table_bytes/(1024^2)::NUMERIC,3) as table_mb,
expected_bytes, round(expected_bytes/(1024^2)::NUMERIC,3) as expected_mb,
round(bloat_bytes*100/table_bytes) as pct_bloat,
round(bloat_bytes/(1024::NUMERIC^2),2) as mb_bloat,
table_bytes, expected_bytes, est_rows
FROM table_estimates_plus
)
-- filter output for bloated tables
SELECT databasename, schemaname, tablename,
can_estimate,
est_rows,
pct_bloat, mb_bloat,
table_mb
FROM bloat_data
-- this where clause defines which tables actually appear
-- in the bloat chart
-- example below filters for tables which are either 50%
-- bloated and more than 20mb in size, or more than 25%
-- bloated and more than 1GB in size
WHERE ( pct_bloat >= 50 AND mb_bloat >= 20 )
OR ( pct_bloat >= 25 AND mb_bloat >= 1000 )
ORDER BY pct_bloat DESC;
++++
++++ Get ddl dump for table|
# facts in log
pg_dump UIS_MV -t 'public.stage' --schema-only
++++
++++ Situation with Invalid page header encountered: blockNo 0, path .... base/16384/923678|
# facts in log
WARNING: 2022/07/02 02:55:35.641221 Invalid page header encountered: blockNo 0, path /var/lib/postgresql/data/base/16384/923678
WARNING: 2022/07/02 02:55:35.646335 Invalid page header encountered: blockNo 0, path /var/lib/postgresql/data/base/16384/923753
WARNING: 2022/07/02 02:55:35.646392 Invalid page header encountered: blockNo 1, path /var/lib/postgresql/data/base/16384/923753
WARNING: 2022/07/02 02:55:35.648309 Invalid page header encountered: blockNo 0, path /var/lib/postgresql/data/base/16384/923628
WARNING: 2022/07/02 02:55:35.648593 Invalid page header encountered: blockNo 0, path /var/lib/postgresql/data/base/16384/923840
WARNING: 2022/07/02 02:55:35.648655 Invalid page header encountered: blockNo 1, path /var/lib/postgresql/data/base/16384/923628
WARNING: 2022/07/02 02:55:35.648722 Invalid page header encountered: blockNo 1, path /var/lib/postgresql/data/base/16384/923840
# Find
UIS_MV=# SELECT oid,relname,relnamespace,reltype,relfilenode FROM pg_class where relfilenode in (923678, 923753, 923753, 923628, 923840, 923628, 923840);
oid | relname | relnamespace | reltype | relfilenode
--------+-----------------------------+--------------+---------+-------------
418561 | sqlcommands | 418347 | 418563 | 923628
418566 | pg_toast_418561_index | 99 | 0 | 923678
923753 | idx_sqlcommands_currentstep | 418347 | 0 | 923753
923840 | sqlcommands_pkey | 418347 | 0 | 923840
(4 rows)
Time: 1.112 ms
UIS_MV=#show data_directory;
/var/lib/postgresql/data
$ find /var/lib/postgresql/data/ -type f -name 923628
/var/lib/postgresql/data/base/16384/923628
# FIX
UIS_MV=#SET zero_damaged_pages = ON;
UIS_MV=#VACUUM FULL adm.sqlcommands;
UIS_MV=#REINDEX TABLE adm.sqlcommands;
++++
* shared memory no space left https://medium.com/@tough_jonquil_dog_815/postgresql-no-space-left-on-device-a9c5c3b41cff
* docker shared memory https://www.instaclustr.com/blog/postgresql-docker-and-shared-memory/
==== Postgres.DBA locale collation LC_COLLATE setlocale====
* debian
localedef -i ru_RU -c -f UTF-8 -A /usr/share/locale/locale.alias ru_RU.utf8
export LANG=ru_RU.utf8
==== Postgres.DBA users privileges grant ====
++++ Users manipulation |
-- https://tableplus.com/blog/2018/04/postgresql-how-to-grant-access-to-users.html
--1. Grant CONNECT to the database:
GRANT CONNECT ON DATABASE database_name TO username;
--2. Grant USAGE on schema:
GRANT USAGE ON SCHEMA schema_name TO username;
--3. Grant on all tables for DML statements: SELECT, INSERT, UPDATE, DELETE:
GRANT SELECT, INSERT, UPDATE, DELETE ON ALL TABLES IN SCHEMA schema_name TO username;
--4. Grant all privileges on all tables in the schema:
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA schema_name TO username;
--5. Grant all privileges on all sequences in the schema:
GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA schema_name TO username;
--6. Grant all privileges on the database:
GRANT ALL PRIVILEGES ON DATABASE database_name TO username;
--7. Grant permission to create database:
ALTER USER username CREATEDB;
--8. Make a user superuser
ALTER USER myuser WITH SUPERUSER;
--9. Remove superuser status:
ALTER USER username WITH NOSUPERUSER;
--Those statements above only affect the current existing tables. To apply to newly created tables, you need to use alter default. For example:
ALTER DEFAULT PRIVILEGES
FOR USER username
IN SCHEMA schema_name
GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO username;
++++
==== Postgres.DBA dependencies ====
++++ dependencies - pg_depend get info |
-- https://www.cybertec-postgresql.com/en/tracking-view-dependencies-in-postgresql/
SELECT
classid::regclass AS "depender object class",
CASE classid
WHEN 'pg_class'::regclass THEN objid::regclass::text
WHEN 'pg_type'::regclass THEN objid::regtype::text
WHEN 'pg_proc'::regclass THEN objid::regprocedure::text
ELSE objid::text
END AS "depender object identity",
objsubid,
refclassid::regclass AS "referenced object class",
CASE refclassid
WHEN 'pg_class'::regclass THEN refobjid::regclass::text
WHEN 'pg_type'::regclass THEN refobjid::regtype::text
WHEN 'pg_proc'::regclass THEN refobjid::regprocedure::text
ELSE refobjid::text
END AS "referenced object identity",
refobjsubid,
CASE deptype
WHEN 'p' THEN 'pinned'
WHEN 'i' THEN 'internal'
WHEN 'a' THEN 'automatic'
WHEN 'n' THEN 'normal'
END AS "dependency type"
, pc.conname
, case pc.contype::text
when 'c' then 'check constraint'
when 'f' then 'foreign key constraint'
when 'p' then 'primary key constraint'
when 'u' then 'unique constraint'
else pc.contype::text
end as constrainttype
,conrelid::regclass as "conn_rel_from"
,(select attname from pg_attribute where pg_attribute.attnum=pc.conkey[1] and attrelid=conrelid)
,confrelid::regclass as "conn_foreign_on"
,(select attname from pg_attribute where pg_attribute.attnum=pc.confkey[1] and attrelid=confrelid)
FROM pg_catalog.pg_depend d
left join pg_constraint pc on
pc.oid=d.objid
--pc.confrelid = d.refobjid
and classid::regclass='pg_constraint'::regclass
WHERE (objid >= 16384 OR refobjid >= 16384)
AND refobjid = 'public."Запрос"'::regclass
and classid::regclass='pg_constraint'::regclass
;
;
++++
++++ dependencies - pg_depend Hierarchies|
-- Hierarchies from object - объекты зависящие от объекта - ищи "ВложениеЗапроса"
with recursive temp1 as (
SELECT refobjid as label
,1 AS i
,ROW_NUMBER () OVER () AS ir
,array[array[1,ROW_NUMBER () OVER () ]] as order_pth
,array[array[
conrelid::regclass ||'.'||(select attname from pg_attribute where pg_attribute.attnum=pc.conkey[1] and attrelid=conrelid limit 1)
,
confrelid::regclass||'.'||(select attname from pg_attribute where pg_attribute.attnum=pc.confkey[1] and attrelid=confrelid)
]] as path
,conrelid as cconrelid
,confrelid as cconfrelid
,/*case pc.contype::text
when 'c' then 'check constraint'
when 'f' then 'foreign key constraint'
when 'p' then 'primary key constraint'
when 'u' then 'unique constraint'
else pc.contype::text
end */
pc.contype::text as cconstrainttype
,conrelid::regclass as "cconn_rel_from"
,(select attname from pg_attribute where pg_attribute.attnum=pc.conkey[1] and attrelid=conrelid) as cattcol
,confrelid::regclass as "cconn_foreign_on"
,(select attname from pg_attribute where pg_attribute.attnum=pc.confkey[1] and attrelid=confrelid) as cforcol
FROM pg_catalog.pg_depend d
left join pg_constraint pc on
pc.oid=d.objid
--pc.confrelid = d.refobjid
and classid::regclass='pg_constraint'::regclass
WHERE (objid >= 16384 OR refobjid >= 16384)
AND refobjid = 'public."ВложениеЗапроса"'::regclass
and classid::regclass='pg_constraint'::regclass
AND pc.contype::text='f'
and conrelid=refobjid
union
select
label
, i+1
, ROW_NUMBER () OVER (ORDER BY i) AS ir
, order_pth || array [i+1,ROW_NUMBER () OVER (ORDER BY i) ]
, path ||array[
conrelid::regclass ||'.'||(select attname from pg_attribute where pg_attribute.attnum=pc.conkey[1] and attrelid=conrelid limit 1)
,
confrelid::regclass||'.'||(select attname from pg_attribute where pg_attribute.attnum=pc.confkey[1] and attrelid=confrelid)
]
, conrelid
, confrelid
, pc.contype::text
, conrelid::regclass
, (select attname from pg_attribute where pg_attribute.attnum=pc.conkey[1] and attrelid=conrelid)
, confrelid::regclass
, (select attname from pg_attribute where pg_attribute.attnum=pc.confkey[1] and attrelid=confrelid)
from temp1 t,
pg_catalog.pg_depend d
left join pg_constraint pc on pc.oid=d.objid
--pc.confrelid = d.refobjid
and classid::regclass='pg_constraint'::regclass
WHERE (objid >= 16384 OR refobjid >= 16384)
AND refobjid = t.cconn_foreign_on
AND confrelid <> t.cconfrelid
and classid::regclass='pg_constraint'::regclass
AND pc.contype::text='f'
and t.i<5 and t.label!=conrelid
)
select
label
, i
, ir
--, order_pth
,
format(
'+%s %s -> %s',
repeat('-', round((i-1) * 4)::int),
path[i][1],path[i][2]
)
--, cconrelid
--, cconfrelid
from temp1
-- where i<5
order by order_pth
++++
++++ dependencies - pg_depend Recursive Hierarchies - объекты ссылающиеся на объект исследования|
-- объекты ссылающиеся на объект исследования - обратная иерархия - ищи "Адрес"
with recursive temp1 as (
SELECT refobjid as label
,1 AS i
,ROW_NUMBER () OVER () AS ir
,array[array[1,ROW_NUMBER () OVER () ]] as order_pth
,array[array[
conrelid::regclass ||'.'||(select attname from pg_attribute where pg_attribute.attnum=pc.conkey[1] and attrelid=conrelid limit 1)
,
confrelid::regclass||'.'||(select attname from pg_attribute where pg_attribute.attnum=pc.confkey[1] and attrelid=confrelid)
]] as path
,conrelid as cconrelid
,confrelid as cconfrelid
,/*case pc.contype::text
when 'c' then 'check constraint'
when 'f' then 'foreign key constraint'
when 'p' then 'primary key constraint'
when 'u' then 'unique constraint'
else pc.contype::text
end */
pc.contype::text as cconstrainttype
,conrelid::regclass as "cconn_rel_from"
,(select attname from pg_attribute where pg_attribute.attnum=pc.conkey[1] and attrelid=conrelid) as cattcol
,confrelid::regclass as "cconn_foreign_on"
,(select attname from pg_attribute where pg_attribute.attnum=pc.confkey[1] and attrelid=confrelid) as cforcol
FROM pg_catalog.pg_depend d
left join pg_constraint pc on pc.oid=d.objid
--pc.confrelid = d.refobjid
and classid::regclass='pg_constraint'::regclass
WHERE (objid >= 16384 OR refobjid >= 16384)
AND refobjid = 'public."Адрес"'::regclass
and classid::regclass='pg_constraint'::regclass
AND pc.contype::text='f'
and confrelid=refobjid
and confrelid!=conrelid
union
select
label
, i+1
, ROW_NUMBER () OVER (ORDER BY i) AS ir
, order_pth || array [i+1,ROW_NUMBER () OVER (ORDER BY i) ]
, path ||array[
conrelid::regclass ||'.'||(select attname from pg_attribute where pg_attribute.attnum=pc.conkey[1] and attrelid=conrelid limit 1)
,
confrelid::regclass||'.'||(select attname from pg_attribute where pg_attribute.attnum=pc.confkey[1] and attrelid=confrelid)
]
, conrelid
, confrelid
, pc.contype::text
, conrelid::regclass
, (select attname from pg_attribute where pg_attribute.attnum=pc.conkey[1] and attrelid=conrelid)
, confrelid::regclass
, (select attname from pg_attribute where pg_attribute.attnum=pc.confkey[1] and attrelid=confrelid)
from temp1 t,
pg_catalog.pg_depend d
left join pg_constraint pc on pc.oid=d.objid
--pc.confrelid = d.refobjid
and classid::regclass='pg_constraint'::regclass
WHERE (objid >= 16384 OR refobjid >= 16384)
AND refobjid = t.cconrelid
--AND confrelid <> t.cconfrelid
and classid::regclass='pg_constraint'::regclass
AND pc.contype::text='f'
and t.i<5 -- 5 итерация
and t.label!=confrelid -- если вернулись к первоначальному субъекту
and confrelid = t.cconrelid -- фильтруем только зависимые объекты!!!
and confrelid!=conrelid -- убираем зависимость объекта на себя
)
select
label
, i
, ir
, order_pth
,format(
'+%s %s -> %s',
repeat('-', round((5-i) * 4)::int),
path[i][1],path[i][2]
)
, cconrelid
, cconfrelid
, cconrelid::regclass
, cconfrelid::regclass
--,path
--,t.*
from temp1 t
where 1=1
--and cconfrelid='"ЗаявлениеНаУслугу"'::regclass
--and cconfrelid=cconrelid
order by order_pth
++++
++++ PG_rewrite / PG_constraint details |
select ev_class::regclass,
r.*
from pg_rewrite r
where oid=17182;
select
case pc.contype::text
when 'c' then 'check constraint'
when 'f' then 'foreign key constraint'
when 'p' then 'primary key constraint'
when 'u' then 'unique constraint'
else pc.contype::text
end as constrainttype
,conrelid::regclass
,(select attname from pg_attribute where pg_attribute.attnum=pc.conkey[1] and attrelid=conrelid)
,confrelid::regclass
,(select attname from pg_attribute where pg_attribute.attnum=pc.confkey[1] and attrelid=confrelid)
from pg_constraint pc
where pc.oid=4849966;
++++
==== Postgres.DBA extensions / function ====
++++ extensions / function - get list of function |
SELECT e.extname, ne.nspname AS extschema, p.proname, np.nspname AS proschema
,pg_catalog.pg_get_function_identity_arguments(p.oid)
FROM pg_catalog.pg_extension AS e
INNER JOIN pg_catalog.pg_depend AS d ON (d.refobjid = e.oid)
INNER JOIN pg_catalog.pg_proc AS p ON (p.oid = d.objid)
INNER JOIN pg_catalog.pg_namespace AS ne ON (ne.oid = e.extnamespace)
INNER JOIN pg_catalog.pg_namespace AS np ON (np.oid = p.pronamespace)
WHERE d.deptype = 'e'
ORDER BY 1, 3;
++++
==== Postgres.DBA partition ====
++++ Partition - get info |
SELECT i.inhrelid::regclass
, partition_bound
, split_part(partition_bound, '''', 2) AS lower_bound
, split_part(partition_bound, '''', 4) AS upper_bound
FROM pg_inherits i
JOIN pg_class c ON c.oid = i.inhrelid
, pg_get_expr(c.relpartbound, i.inhrelid) AS partition_bound
WHERE inhparent = 'ЭтапАрхив'::regclass;
++++
++++ Partition - Example creation |
CREATE TABLE public."ЭтапАрхив" (
primarykey uuid NOT NULL,
"Статус" character varying(24),
"ТекстЗапроса" text,
"ТекстОтвета" text,
"ВремяОтправки" timestamp(3) without time zone,
"ВремяПолученияОтвета" timestamp(3) without time zone,
"ИмяЗапросаВоВложении" character varying(50),
createtime timestamp(3) without time zone,
creator character varying(255),
edittime timestamp(3) without time zone,
editor character varying(255),
"КонфигурацияЭтапа" uuid,
"Запрос" uuid,
"ИдСообщенияСМЭВ3" uuid,
namespaceuri text,
rootelement text,
"НомерИтерации" integer
) PARTITION BY RANGE ("ВремяОтправки")
(PARTITION "ЭтапАрхив_before_2017" for values from
('1990-01-01') to ('2017-01-01') );
create table if not exists public."ЭтапАрхив_before_2017"
partition of public."ЭтапАрхив"
for values from ('1990-01-01') to (TIMESTAMP' 2017-01-01 00:00:00');
create table if not exists public."ЭтапАрхив_2023"
partition of public."ЭтапАрхив" for values from
('2023-01-01') to ('2024-01-01');
++++
++++ Partition - Auto create partition|
-- https://evilmartians.com/chronicles/a-slice-of-life-table-partitioning-in-postgresql-databases
CREATE TABLE measurement (
city_id int not null,
logdate date not null,
peaktemp int,
unitsales int
);
CREATE OR REPLACE FUNCTION new_partition_creator() RETURNS trigger AS
$BODY$
DECLARE
partition_date TEXT;
partition TEXT;
partition_day int;
startdate date;
enddate date;
BEGIN
partition_day := to_char(NEW.logdate,'DD');
partition_date := to_char(NEW.logdate,'YYYY_MM');
IF partition_day < 15 THEN
partition := TG_RELNAME || '_' || partition_date || '_p1';
startdate := to_char(NEW.logdate,'YYYY-MM-01');
enddate := date_trunc('MONTH', NEW.logdate) + INTERVAL '1 MONTH - 1 day';
ELSE
partition := TG_RELNAME || '_' || partition_date || '_p2';
startdate := to_char(NEW.logdate,'YYYY-MM-15');
enddate := date_trunc('MONTH', NEW.logdate) + INTERVAL '1 MONTH - 1 day';
END IF;
IF NOT EXISTS(SELECT relname FROM pg_class WHERE relname=partition) THEN
RAISE NOTICE 'A partition has been created %',partition;
EXECUTE 'CREATE TABLE ' || partition || ' ( CHECK ( logdate >= DATE ''' || startdate || ''' AND logdate <= DATE ''' || enddate || ''' )) INHERITS (' || TG_RELNAME || ');';
EXECUTE 'CREATE INDEX ' || partition || '_logdate ON ' || partition || '(logdate)';
EXECUTE 'ALTER TABLE ' || partition || ' add primary key(city_id);';
END IF;
EXECUTE 'INSERT INTO ' || partition || ' SELECT(' || TG_RELNAME || ' ' || quote_literal(NEW) || ').* RETURNING city_id;';
RETURN NULL;
END;
$BODY$
LANGUAGE plpgsql VOLATILE
COST 100;
CREATE TRIGGER testing_partition_insert_trigger BEFORE INSERT ON measurement FOR EACH ROW EXECUTE PROCEDURE new_partition_creator();
postgres=# insert into measurement values(1,'2017-10-11',10,10);
NOTICE: A partition has been created measurement_2017_10_p1
INSERT 0 0
++++
===== Postgres.Backup =====
**Резервное копирование pg_dump и pg_restore**
++++ SQL pg_dump/pg_restore |
** Prepare **
# exec CMDS .1.196
/usr/local/bin/docker-compose -f /opt/docker-pg-wal-g/docker-compose.yml exec -T Db bash
/usr/local/bin/docker-compose -f /opt/docker-pg-wal-g/docker-compose.yml exec -u postgres Db bash
# Restore DB
createdb UIS_MV
psql UIS_MV CREATE USER uis_mv WITH PASSWORD 'uis_mvuis_mv';
**0. Dump DDL - only structure** \\
pg_dump --host localhost --port 5432 -d ISUI --username postgres --format plain --verbose --file "/tmp/table.sql" --table dbo.tmp_dist
\\
Резервная копия UISMV - запуск только в **screen** !!! \\
**1. резервная копия в формате каталога в 10 потоков**
$ time pg_dump -Z 2 -j 10 -Fd --clean --if-exists UIS_MV -f /tmp/backup/dmp/temp/
**2. restore**
$ time pg_restore -c -d UIS_MV -j 12 /opt/dmp/temp
# if plain text
$ zcat ./UIS_MV_20220713_192005.dump.gz | psql UIS_MV 2>&1 >./walg/log.txt
**3. test**
ash-5.0$ time pg_dumpall > /dev/null 2>/tmp/dump.log
++++
==== Postgres.Backup.Wal-g wal-g ====
* Wal-g - https://habr.com/ru/post/506610/ - пример
* пример ручного dump c WAL https://pgdash.io/blog/postgres-incremental-backup-recovery.html
* пример pg_backup
=== Wal-g.Backup статьи ===
* надежные и быстрые бэкапы [[https://highload.ru/spring/2021/abstracts/7276|ссылка]]
* пример - [[https://habr.com/ru/post/506610/|habr пример]] [[https://medium.com/@philyuchkoff/wal-g-953490c74b98|medium]] [[https://github.com/camptocamp/wal-g-prometheus-exporter|wal-g-prometheus-exporter]]
=== Wal-g.Backup резервного копирования ===
++++ Backup Порядок действий и инструкции|
- оцениваем необходимое место смотри выше - SQL check version/size
- запускаем SQL переключая wal ''SELECT pg_walfile_name(pg_switch_wal()), pg_walfile_name(pg_switch_wal());''
- запускаем backup wal-g - см. wal-g
- проверяем корректность см. ''Sql.check - SQL check database data''
- проверка index`ов - ''SQL check database index - amcheck''
++++
=== WAL-G.restore восстановление ===
++++ Walg restore восстановление|
manage DB
# WAL-G list repository
/usr/local/bin/docker-compose -f /opt/docker-pg-wal-g/docker-compose.yml exec -u postgres -T Db wal-g backup-list --pretty
/usr/local/bin/docker-compose -f /opt/docker-pg-wal-g/docker-compose.yml exec -u postgres -T Db wal-g wal-show
/usr/local/bin/docker-compose -f /opt/docker-pg-wal-g/docker-compose.yml exec -T Db /home/wal-g/cron_do_backup.sh
# PG run in compose
/usr/local/bin/docker-compose -f /opt/docker-pg-wal-g/docker-compose.yml exec -T Db bash
/usr/local/bin/docker-compose -f /opt/docker-pg-wal-g/docker-compose.yml exec -T Db -u postgres psql UIS_MV
# PG STOP
/usr/local/bin/docker-compose -f /opt/docker-pg-wal-g/docker-compose.yml down
1. запускаем контейнер восстановления
docker container run --rm -it -u postgres --name pgrestore \
-v $(pwd)/wal-g/.walg.json:/var/lib/postgresql/.walg.json \
-v $(pwd)/wal-g/wal-g:/usr/bin/wal-g \
-v $(pwd)/wal-g/:/home/wal-g/ \
-v /media/add300gb/postgres/data:/var/lib/postgresql/data/ \
-v /etc/localtime:/etc/localtime \
-v /etc/sysconfig/clock:/etc/sysconfig/clock \
-v /media/synology-esb-smb/uismv-db-backups/full-backups:/opt/dmp \
postgres:14-alpine /bin/bash
2. Очищаем $PGDATA
cd $PGDATA
rm -rf -- $PGDATA/*
3. проверяем репозиторий резервного копирования
# WAL-G list repostory
wal-g wal-show && wal-g backup-list --pretty
# WAL-G restore data
wal-g backup-fetch $PGDATA base_0000000200000069000000F1_D_0000000200000069000000EC --reverse-unpack --skip-redundant-tars
# после восстановления переводим базу в RECOVERY MODE
touch $PGDATA/recovery.signal
# если необходимо PITR [Set PITR]
echo " " >> $PGDATA/postgresql.conf
echo "recovery_target_time = '2022-07-18 09:29:07.907202+05'" >> $PGDATA/postgresql.conf
# готовы к запуску базы
6. Запускаем recovery postgresql
# запуск внутри контейнера
/usr/local/bin/docker-entrypoint.sh postgres -c cluster_name='UIS_MV'
# или запустить целевой контейнер на host
/usr/local/bin/docker-compose -f /opt/docker-pg-wal-g/docker-compose.yml up Db
-- check data in DB - example
[mea@root-esb-pg ~]$ /usr/local/bin/docker-compose -f /opt/docker-pg-wal-g/docker-compose.yml exec -u postgres Db psql UIS_MV
UIS_MV=# SET search_path TO adm,public;
UIS_MV=# SHOW SEARCH_PATH;
search_path
-------------
adm, public
(1 row)
UIS_MV=# SELECT * FROM comments where body like '%no this test %' LIMIT 10;
id | user_id | post_id | body
----+---------+---------+------
(0 rows)
UIS_MV=# SELECT * FROM comments where body like '%Test comment 927%' LIMIT 10;
id | user_id | post_id | body
-------+---------+---------+---------------------
18001 | 6 | 16 | Test comment 927 1
...
18010 | 10 | 46 | Test comment 927 10
(10 rows)
-- # finish check
select pg_wal_replay_resume();
7. am_check - see in wiki - RECONNECT
-- in public do
CREATE extension IF NOT EXISTS amcheck;
-- list index
SELECT c.oid, i.indisunique
,n.nspname
,c.relname
,c.relpages
FROM pg_index i
JOIN pg_opclass op ON i.indclass[0] = op.oid
JOIN pg_am am ON op.opcmethod = am.oid
JOIN pg_class c ON i.indexrelid = c.oid
JOIN pg_namespace n ON c.relnamespace = n.oid
WHERE am.amname = 'btree'
AND (1=1 or n.nspname = 'pg_catalog') -- # set 1=0 if you need only system
AND c.relpersistence != 't'
AND c.relkind = 'i' AND i.indisready AND i.indisvalid
ORDER BY c.relpages DESC LIMIT 10;
INFO: Btree index scan with amcheck successfully finished. 0 errors.
DO
-- example restore
9:36 ->
bash-5.1$ wal-g backup-fetch $PGDATA base_0000000200000069000000F1_D_0000000200000069000000EC --reverse-unpack --skip-redundant-tars
INFO: 2022/07/18 09:36:51.195271 Selecting the backup with name base_0000000200000069000000F1_D_0000000200000069000000EC...
INFO: 2022/07/18 09:36:51.227569 Delta base_0000000200000069000000F1_D_0000000200000069000000EC at LSN 69/F1000028
INFO: 2022/07/18 09:36:51.236294 AO files metadata was not found. Skipping the AO segments unpacking.
INFO: 2022/07/18 09:48:25.596068 Finished extraction of part_065.tar.br
INFO: 2022/07/18 09:48:25.625988 Finished extraction of pg_control.tar.br
INFO: 2022/07/18 09:48:25.630771
Backup extraction complete.
220718 09:35:19 - 18 09:48:33 73G /media/add300gb/postgres
++++
===== PostgresPro =====
* https://github.com/sgrinko/docker-postgres - docker good example
* https://help.1forma.ru/Maintenance/tech_req_1f_prepare_postgrepro.htm - установка базы
===== Postgres Cluster =====
https://github.com/vitabaks/postgresql_cluster
===== Postgres Information , monitoring , performance , tools =====
* :!:[[https://tapoueh.org/conf/|Автор Postgresql]]
* :!:[[https://explain.tensor.ru/|разбор запросов explain tensor]][[https://github.com/dalibo/pev2|анализатор запросов postgresql pev2]] ''psql -XqAt -f /opt/dmp/walg/explain.sql UIS_MV > /tmp/analyze.json''
* :!:[[https://habr.com/ru/company/oleg-bunin/blog/461935/|Как работать с GO]]
* [[https://habr.com/ru/company/alfa/blog/539350/|Расширение кластера PostgreSQL размером 5,7 ТБ и переход с версии 9.6 на 12.4 альфа]]
==== Postgres.Monitoring ====
* pgwatch2 - monitoring Postgres
* [[https://severalnines.com/database-blog/performance-monitoring-auditing-postgresql-top-resources|Performance monitoring]]
* [[https://pgconf.ru/media/2017/04/03/20170317H2_Vagin.pdf|Мониторинг pg в АВИТО]]
* [[https://github.com/postgrespro/mamonsu | Zabbix PgPRo Mamaonsu]]
* [[https://github.com/bucardo/check_postgres/| Nagios - CheckPostgesql]]
* [[https://github.com/corybolar/Docker-NagiosXI/blob/master/Dockerfile| NagiosXI docker]]
* [[https://www.postgresql.eu/events/pgconfeu2018/sessions/session/2166/slides/147/monitoring.pdf| presentation]]
* [[https://pgmetrics.io/docs/install.html|pgmetrics]]
* [[https://github.com/powa-team/powa-web|Powa instrument for monitoring postgresql]]
==== Postgres.performance ====
* [[https://infostart.ru/public/1023353/|Как одно изменение конфигурации PostgreSQL улучшило производительность медленных запросов в 50 раз]]
* [[https://www.slideshare.net/samokhvalov/postgres-docker-highload-2018|Docker Highload]]
* [[https://tapoueh.org/tags/concurrency/|Postgresql concurrency]]
* [[https://habr.com/ru/companies/tensor/articles/808931/|Неожиданные последствия запуска PostgreSQL в Docker: отключение JIT и разбор запросов / Хабр
==== Postgres.Pooler ====
* [[https://brandur.org/postgres-connections|Для чего пуллер]]
==== Usefull tools ====
* [[https://github.com/dataegret/pg-utils|Usefull scripts]]
* [[https://github.com/dataegret/pgcompacttable|pgcompacttable - компактор БД]][[https://drive.google.com/file/d/11tFzuCayc0Fru43pjRppPDgqMoWNOipt/view|презентация]]
==== Postgresql Partition ====
- [[https://www.2ndquadrant.com/en/blog/postgresql-12-partitioning/]]
- [[https://habr.com/ru/company/postgrespro/blog/456716/|Не очень большие данные]][[https://drive.google.com/file/d/1IOyajC9Q1w-NKYPJzvWHLLfKfsHsiSYQ/view|present]]
=== Postgres.Книги ===
* https://postgrespro.ru/education/books/internals - PostgreSQL 14 изнутри
* https://www.interdb.jp/pg/index.html - The Internals of PostgreSQL
https://postgrespro.ru/education/books/introbook \\
{{:pasted:postgresbook.avif?400}}
- https://habr.com/ru/company/mailru/blog/266811/
- https://www.youtube.com/watch?v=lMPYerAYEVs
- https://learn.hashicorp.com/tutorials/consul/deployment-guide
===== Postgres Install =====
* postgres install example in docker https://habr.com/ru/post/578744/
* https://habr.com/ru/articles/735274/comments/ - postgresql docker shared_buffers effective_cache_size work_mem shm_size Random_page_cost ssd
===== Postgres Additional info =====
* https://shekhargulati.com/2022/07/08/my-notes-on-gitlabs-postgres-schema-design/