PostgreSQL Cheatsheet
Getting Started
Installation
sudo apt update
sudo apt install postgresql postgresql-contrib
brew install postgresql
sudo systemctl start postgresql
brew services start postgresql
sudo -u postgres psql
psql postgres
sudo -u postgres createuser --interactive
sudo -u postgres createdb mydatabase
psql Commands
\c database_name
\l
\dt
\d table_name
\du
SELECT current_user;
SELECT current_database();
\i /path/to/file.sql
\q
\?
Connection String
postgresql://username:password@host:port/database
postgresql://user:pass@localhost:5432/mydb
postgresql://postgres:secret@192.168.1.100:5432/production
export DATABASE_URL="postgresql://user:pass@localhost:5432/mydb"
Database Operations
Create and Drop Database
CREATE DATABASE company_db;
CREATE DATABASE company_db WITH OWNER = myuser;
CREATE DATABASE company_db
WITH OWNER = myuser
ENCODING = 'UTF8'
LC_COLLATE = 'en_US.UTF-8'
LC_CTYPE = 'en_US.UTF-8'
TEMPLATE = template0;
DROP DATABASE company_db;
DROP DATABASE IF EXISTS company_db;
ALTER DATABASE old_name RENAME TO new_name;
ALTER DATABASE company_db OWNER TO new_owner;
Users and Permissions
CREATE USER john WITH PASSWORD 'secret123';
CREATE ROLE admin WITH LOGIN PASSWORD 'admin123';
CREATE USER developer WITH
LOGIN
PASSWORD 'dev123'
CREATEDB
CREATEROLE;
GRANT ALL PRIVILEGES ON DATABASE company_db TO john;
GRANT CONNECT ON DATABASE company_db TO john;
GRANT USAGE ON SCHEMA public TO john;
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO john;
GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public TO john;
GRANT SELECT, INSERT, UPDATE, DELETE ON employees TO john;
GRANT SELECT ON employees TO readonly_user;
CREATE ROLE hr_role;
GRANT hr_role TO john;
REVOKE ALL PRIVILEGES ON DATABASE company_db FROM john;
REVOKE INSERT, UPDATE, DELETE ON employees FROM john;
DROP USER john;
DROP ROLE IF EXISTS admin;
ALTER USER john PASSWORD 'newpassword';
\du
SELECT * FROM information_schema.role_table_grants WHERE grantee = 'john';
Table Operations
Create Tables
CREATE TABLE employees (
id SERIAL PRIMARY KEY,
first_name VARCHAR(50) NOT NULL,
last_name VARCHAR(50) NOT NULL,
email VARCHAR(100) UNIQUE NOT NULL,
hire_date DATE DEFAULT CURRENT_DATE,
salary DECIMAL(10,2),
department_id INTEGER REFERENCES departments(id)
);
CREATE TABLE products (
id SERIAL PRIMARY KEY,
name VARCHAR(100) NOT NULL,
price DECIMAL(10,2) CHECK (price > 0),
category VARCHAR(50) DEFAULT 'General',
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
CONSTRAINT unique_name_category UNIQUE (name, category)
);
CREATE TABLE orders (
id SERIAL PRIMARY KEY,
customer_id INTEGER NOT NULL,
order_date DATE DEFAULT CURRENT_DATE,
total_amount DECIMAL(10,2),
FOREIGN KEY (customer_id) REFERENCES customers(id) ON DELETE CASCADE
);
CREATE TEMPORARY TABLE temp_data (
id INTEGER,
value TEXT
);
CREATE TABLE high_earners AS
SELECT * FROM employees WHERE salary > 100000;
CREATE TABLE employees_backup (LIKE employees INCLUDING ALL);
Alter Tables
ALTER TABLE employees ADD COLUMN phone VARCHAR(20);
ALTER TABLE employees ADD COLUMN age INTEGER DEFAULT 0;
ALTER TABLE employees DROP COLUMN phone;
ALTER TABLE employees DROP COLUMN IF EXISTS age;
ALTER TABLE employees ALTER COLUMN salary TYPE DECIMAL(12,2);
ALTER TABLE employees ALTER COLUMN salary SET NOT NULL;
ALTER TABLE employees ALTER COLUMN salary DROP NOT NULL;
ALTER TABLE employees ALTER COLUMN salary SET DEFAULT 50000;
ALTER TABLE employees ALTER COLUMN salary DROP DEFAULT;
ALTER TABLE employees RENAME COLUMN first_name TO fname;
ALTER TABLE employees RENAME TO staff;
ALTER TABLE employees ADD CONSTRAINT salary_check CHECK (salary > 0);
ALTER TABLE employees ADD CONSTRAINT fk_department
FOREIGN KEY (department_id) REFERENCES departments(id);
ALTER TABLE employees DROP CONSTRAINT salary_check;
ALTER TABLE employees DROP CONSTRAINT IF EXISTS fk_department;
ALTER TABLE employees ADD PRIMARY KEY (id);
ALTER TABLE employees ADD CONSTRAINT unique_email UNIQUE (email);
ALTER TABLE employees OWNER TO new_owner;
Drop Tables
DROP TABLE employees;
DROP TABLE IF EXISTS employees;
DROP TABLE employees, departments, orders;
DROP TABLE employees CASCADE;
TRUNCATE TABLE employees;
TRUNCATE TABLE employees RESTART IDENTITY;
TRUNCATE TABLE employees CASCADE;
Data Types
Numeric Types
SMALLINT
INTEGER (INT)
BIGINT
SERIAL
BIGSERIAL
DECIMAL(precision, scale)
NUMERIC(precision, scale)
REAL
DOUBLE PRECISION
CREATE TABLE numeric_examples (
small_num SMALLINT,
regular_num INTEGER,
big_num BIGINT,
auto_id SERIAL,
price DECIMAL(10,2),
percentage REAL,
scientific DOUBLE PRECISION
);
String Types
CHAR(n)
VARCHAR(n)
TEXT
CREATE TABLE string_examples (
country_code CHAR(2),
name VARCHAR(100),
description TEXT,
status VARCHAR(20) DEFAULT 'active'
);
SELECT
UPPER('hello') as uppercase,
LOWER('WORLD') as lowercase,
LENGTH('PostgreSQL') as length,
SUBSTRING('PostgreSQL', 1, 4) as sub,
CONCAT('Hello', ' ', 'World') as concat,
TRIM(' spaced ') as trimmed,
POSITION('SQL' IN 'PostgreSQL') as pos;
Date and Time Types
DATE
TIME
TIMESTAMP
TIMESTAMPTZ
INTERVAL
CREATE TABLE datetime_examples (
event_date DATE,
event_time TIME,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
scheduled_at TIMESTAMPTZ,
duration INTERVAL
);
SELECT
CURRENT_DATE as today,
CURRENT_TIME as now_time,
CURRENT_TIMESTAMP as now_timestamp,
NOW() as now_function,
AGE('2023-01-01') as age_calc,
EXTRACT(YEAR FROM NOW()) as year_part,
DATE_TRUNC('month', NOW()) as month_start;
SELECT
NOW() + INTERVAL '1 day' as tomorrow,
NOW() - INTERVAL '1 month' as last_month,
DATE '2023-01-01' + INTEGER '7' as week_later;
Boolean and Other Types
CREATE TABLE boolean_example (
id SERIAL PRIMARY KEY,
is_active BOOLEAN DEFAULT TRUE,
is_verified BOOLEAN NOT NULL
);
CREATE TABLE array_example (
id SERIAL PRIMARY KEY,
tags TEXT[],
scores INTEGER[],
matrix INTEGER[][]
);
CREATE TABLE json_example (
id SERIAL PRIMARY KEY,
data JSON,
metadata JSONB
);
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
CREATE TABLE uuid_example (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
name VARCHAR(100)
);
CREATE TYPE mood AS ENUM ('sad', 'ok', 'happy');
CREATE TABLE person (
name TEXT,
current_mood mood
);
CRUD Operations
INSERT
INSERT INTO employees (first_name, last_name, email, salary)
VALUES ('John', 'Doe', 'john.doe@email.com', 75000);
INSERT INTO employees (first_name, last_name, email, salary) VALUES
('Jane', 'Smith', 'jane.smith@email.com', 80000),
('Bob', 'Johnson', 'bob.johnson@email.com', 70000),
('Alice', 'Williams', 'alice.williams@email.com', 85000);
INSERT INTO employees_backup
SELECT * FROM employees WHERE salary > 75000;
INSERT INTO employees (email, first_name, last_name, salary)
VALUES ('john.doe@email.com', 'John', 'Doe', 80000)
ON CONFLICT (email) DO UPDATE SET
salary = EXCLUDED.salary,
updated_at = CURRENT_TIMESTAMP;
INSERT INTO employees (first_name, last_name, email, salary)
VALUES ('Charlie', 'Brown', 'charlie@email.com', 72000)
RETURNING id, first_name, last_name;
INSERT INTO employees (first_name, last_name, email)
VALUES ('Default', 'User', 'default@email.com');
SELECT
SELECT * FROM employees;
SELECT first_name, last_name, salary FROM employees;
SELECT * FROM employees WHERE salary > 75000;
SELECT * FROM employees WHERE department = 'IT' AND salary BETWEEN 60000 AND 80000;
SELECT * FROM employees WHERE first_name IN ('John', 'Jane', 'Bob');
SELECT * FROM employees WHERE email LIKE '%@gmail.com';
SELECT * FROM employees WHERE hire_date >= '2023-01-01';
SELECT * FROM employees ORDER BY salary DESC;
SELECT * FROM employees ORDER BY last_name, first_name;
SELECT * FROM employees ORDER BY salary DESC NULLS LAST;
SELECT * FROM employees LIMIT 10;
SELECT * FROM employees LIMIT 10 OFFSET 20;
SELECT department, COUNT(*) as employee_count, AVG(salary) as avg_salary
FROM employees
GROUP BY department
HAVING COUNT(*) > 5
ORDER BY avg_salary DESC;
SELECT DISTINCT department FROM employees;
SELECT DISTINCT department, location FROM employees;
SELECT
first_name,
last_name,
salary,
ROW_NUMBER() OVER (ORDER BY salary DESC) as salary_rank,
RANK() OVER (PARTITION BY department ORDER BY salary DESC) as dept_rank,
LAG(salary) OVER (ORDER BY hire_date) as previous_salary
FROM employees;
WITH high_earners AS (
SELECT * FROM employees WHERE salary > 80000
),
departments_stats AS (
SELECT department, COUNT(*) as count FROM high_earners GROUP BY department
)
SELECT * FROM departments_stats WHERE count > 2;
WITH RECURSIVE employee_hierarchy AS (
SELECT id, first_name, last_name, manager_id, 1 as level
FROM employees
WHERE manager_id IS NULL
UNION
SELECT e.id, e.first_name, e.last_name, e.manager_id, eh.level + 1
FROM employees e
JOIN employee_hierarchy eh ON e.manager_id = eh.id
)
SELECT * FROM employee_hierarchy ORDER BY level, last_name;
JOIN Operations
SELECT e.first_name, e.last_name, d.department_name
FROM employees e
INNER JOIN departments d ON e.department_id = d.id;
SELECT e.first_name, e.last_name, d.department_name
FROM employees e
LEFT JOIN departments d ON e.department_id = d.id;
SELECT e.first_name, e.last_name, d.department_name
FROM employees e
RIGHT JOIN departments d ON e.department_id = d.id;
SELECT e.first_name, e.last_name, d.department_name
FROM employees e
FULL OUTER JOIN departments d ON e.department_id = d.id;
SELECT e1.first_name as employee, e2.first_name as manager
FROM employees e1
LEFT JOIN employees e2 ON e1.manager_id = e2.id;
SELECT
e.first_name,
e.last_name,
d.department_name,
l.city,
l.country
FROM employees e
JOIN departments d ON e.department_id = d.id
JOIN locations l ON d.location_id = l.id;
SELECT e.first_name, p.project_name
FROM employees e
CROSS JOIN projects p;
UPDATE
UPDATE employees SET salary = 80000 WHERE id = 1;
UPDATE employees
SET salary = salary * 1.1,
updated_at = CURRENT_TIMESTAMP
WHERE department = 'IT';
UPDATE employees
SET salary = (
SELECT AVG(salary) * 1.1
FROM employees e2
WHERE e2.department = employees.department
)
WHERE performance_rating = 'Excellent';
UPDATE employees
SET salary = salary_adjustments.new_salary
FROM salary_adjustments
WHERE employees.id = salary_adjustments.employee_id;
UPDATE employees
SET salary = salary * 1.05
WHERE department = 'Sales'
RETURNING id, first_name, last_name, salary;
UPDATE employees
SET bonus = CASE
WHEN salary > 100000 THEN salary * 0.1
WHEN salary > 75000 THEN salary * 0.05
ELSE 1000
END;
DELETE
DELETE FROM employees WHERE id = 1;
DELETE FROM employees WHERE salary < 50000;
DELETE FROM employees WHERE hire_date < '2020-01-01';
DELETE FROM employees
WHERE department_id IN (
SELECT id FROM departments WHERE budget < 100000
);
DELETE e1 FROM employees e1
JOIN employees e2 ON e1.email = e2.email AND e1.id > e2.id;
DELETE FROM employees
WHERE department = 'Marketing'
RETURNING id, first_name, last_name;
TRUNCATE TABLE employees;
TRUNCATE TABLE employees RESTART IDENTITY CASCADE;
Advanced Features
Indexes
CREATE INDEX idx_employees_email ON employees (email);
CREATE INDEX idx_employees_salary ON employees (salary);
CREATE INDEX idx_employees_name ON employees (last_name, first_name);
CREATE UNIQUE INDEX idx_employees_email_unique ON employees (email);
CREATE INDEX idx_active_employees ON employees (last_name)
WHERE status = 'active';
CREATE INDEX idx_employees_lower_email ON employees (LOWER(email));
CREATE INDEX idx_products_search ON products
USING gin(to_tsvector('english', name || ' ' || description));
\di
SELECT * FROM pg_indexes WHERE tablename = 'employees';
DROP INDEX idx_employees_email;
DROP INDEX IF EXISTS idx_employees_salary;
REINDEX INDEX idx_employees_email;
REINDEX TABLE employees;
Views
CREATE VIEW employee_summary AS
SELECT
e.id,
e.first_name || ' ' || e.last_name as full_name,
e.email,
d.department_name,
e.salary
FROM employees e
JOIN departments d ON e.department_id = d.id;
CREATE MATERIALIZED VIEW department_stats AS
SELECT
d.department_name,
COUNT(e.id) as employee_count,
AVG(e.salary) as avg_salary,
MIN(e.salary) as min_salary,
MAX(e.salary) as max_salary
FROM departments d
LEFT JOIN employees e ON d.id = e.department_id
GROUP BY d.id, d.department_name;
REFRESH MATERIALIZED VIEW department_stats;
CREATE VIEW high_earners AS
SELECT * FROM employees WHERE salary > 80000
WITH CHECK OPTION;
DROP VIEW employee_summary;
DROP MATERIALIZED VIEW department_stats;
Functions
CREATE OR REPLACE FUNCTION get_employee_count()
RETURNS INTEGER AS $$
BEGIN
RETURN (SELECT COUNT(*) FROM employees);
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE FUNCTION get_employees_by_department(dept_name TEXT)
RETURNS TABLE(id INTEGER, full_name TEXT, salary DECIMAL) AS $$
BEGIN
RETURN QUERY
SELECT
e.id,
e.first_name || ' ' || e.last_name,
e.salary
FROM employees e
JOIN departments d ON e.department_id = d.id
WHERE d.department_name = dept_name;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE FUNCTION calculate_bonus(
base_salary DECIMAL,
performance_rating TEXT DEFAULT 'average'
)
RETURNS DECIMAL AS $$
BEGIN
CASE performance_rating
WHEN 'excellent' THEN RETURN base_salary * 0.15;
WHEN 'good' THEN RETURN base_salary * 0.10;
WHEN 'average' THEN RETURN base_salary * 0.05;
ELSE RETURN 0;
END CASE;
END;
$$ LANGUAGE plpgsql;
SELECT get_employee_count();
SELECT * FROM get_employees_by_department('IT');
SELECT calculate_bonus(75000, 'excellent');
DROP FUNCTION get_employee_count();
Triggers
CREATE OR REPLACE FUNCTION update_modified_time()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_at = CURRENT_TIMESTAMP;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER employees_update_trigger
BEFORE UPDATE ON employees
FOR EACH ROW
EXECUTE FUNCTION update_modified_time();
CREATE TABLE employee_audit (
audit_id SERIAL PRIMARY KEY,
employee_id INTEGER,
action TEXT,
old_values JSONB,
new_values JSONB,
changed_by TEXT DEFAULT current_user,
changed_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE OR REPLACE FUNCTION employee_audit_trigger()
RETURNS TRIGGER AS $$
BEGIN
IF TG_OP = 'INSERT' THEN
INSERT INTO employee_audit (employee_id, action, new_values)
VALUES (NEW.id, 'INSERT', row_to_json(NEW));
RETURN NEW;
ELSIF TG_OP = 'UPDATE' THEN
INSERT INTO employee_audit (employee_id, action, old_values, new_values)
VALUES (NEW.id, 'UPDATE', row_to_json(OLD), row_to_json(NEW));
RETURN NEW;
ELSIF TG_OP = 'DELETE' THEN
INSERT INTO employee_audit (employee_id, action, old_values)
VALUES (OLD.id, 'DELETE', row_to_json(OLD));
RETURN OLD;
END IF;
RETURN NULL;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER employee_audit_trigger
AFTER INSERT OR UPDATE OR DELETE ON employees
FOR EACH ROW
EXECUTE FUNCTION employee_audit_trigger();
DROP TRIGGER employees_update_trigger ON employees;
Transactions
BEGIN;
UPDATE employees SET salary = salary * 1.1 WHERE department = 'IT';
UPDATE departments SET budget = budget * 1.1 WHERE name = 'IT';
COMMIT;
BEGIN;
UPDATE employees SET salary = salary * 2;
ROLLBACK;
BEGIN;
UPDATE employees SET salary = salary * 1.05;
SAVEPOINT before_bonus;
UPDATE employees SET bonus = salary * 0.1;
ROLLBACK TO before_bonus;
COMMIT;
BEGIN ISOLATION LEVEL READ COMMITTED;
COMMIT;
BEGIN ISOLATION LEVEL REPEATABLE READ;
COMMIT;
BEGIN ISOLATION LEVEL SERIALIZABLE;
COMMIT;
JSON Operations
JSON and JSONB
CREATE TABLE products (
id SERIAL PRIMARY KEY,
name VARCHAR(100),
attributes JSON,
metadata JSONB
);
INSERT INTO products (name, attributes, metadata) VALUES
('Laptop', '{"brand": "Dell", "ram": "16GB", "storage": "512GB SSD"}',
'{"category": "electronics", "tags": ["computer", "portable"], "price": 999.99}'),
('Phone', '{"brand": "Apple", "model": "iPhone 14", "storage": "128GB"}',
'{"category": "electronics", "tags": ["mobile", "smartphone"], "price": 799.99}');
SELECT name, attributes->>'brand' as brand FROM products;
SELECT name, attributes->'ram' as ram FROM products;
SELECT * FROM products WHERE attributes->>'brand' = 'Dell';
SELECT * FROM products WHERE metadata @> '{"category": "electronics"}';
SELECT * FROM products WHERE metadata ? 'price';
SELECT * FROM products WHERE metadata->'tags' ? 'computer';
SELECT
name,
json_extract_path_text(attributes, 'brand') as brand,
jsonb_extract_path(metadata, 'price') as price,
jsonb_array_length(metadata->'tags') as tag_count
FROM products;
UPDATE products
SET metadata = metadata || '{"on_sale": true}'
WHERE metadata->>'category' = 'electronics';
UPDATE products
SET metadata = jsonb_set(metadata, '{price}', '899.99')
WHERE name = 'Laptop';
UPDATE products
SET metadata = metadata - 'on_sale'
WHERE name = 'Phone';
SELECT
metadata->>'category' as category,
json_agg(name) as products,
avg((metadata->>'price')::numeric) as avg_price
FROM products
GROUP BY metadata->>'category';
Arrays
CREATE TABLE posts (
id SERIAL PRIMARY KEY,
title VARCHAR(200),
tags TEXT[],
ratings INTEGER[]
);
INSERT INTO posts (title, tags, ratings) VALUES
('PostgreSQL Tutorial', ARRAY['database', 'sql', 'postgresql'], ARRAY[5, 4, 5, 4]),
('Web Development', ARRAY['html', 'css', 'javascript'], ARRAY[4, 5, 3, 4]);
SELECT * FROM posts WHERE 'postgresql' = ANY(tags);
SELECT * FROM posts WHERE tags @> ARRAY['database'];
SELECT * FROM posts WHERE array_length(tags, 1) > 2;
SELECT
title,
array_length(tags, 1) as tag_count,
array_to_string(tags, ', ') as tag_list,
array_agg(ratings) as all_ratings,
(SELECT avg(r) FROM unnest(ratings) as r) as avg_rating
FROM posts
GROUP BY id, title, tags, ratings;
UPDATE posts
SET tags = array_append(tags, 'tutorial')
WHERE title LIKE '%Tutorial%';
UPDATE posts
SET tags = array_remove(tags, 'css')
WHERE 'css' = ANY(tags);
Performance and Optimization
Query Optimization
EXPLAIN SELECT * FROM employees WHERE salary > 75000;
EXPLAIN ANALYZE SELECT * FROM employees WHERE salary > 75000;
EXPLAIN (ANALYZE, BUFFERS) SELECT * FROM employees WHERE salary > 75000;
EXPLAIN SELECT * FROM employees WHERE email = 'john@email.com';
EXPLAIN ANALYZE
SELECT e.first_name, d.department_name
FROM employees e
JOIN departments d ON e.department_id = d.id
WHERE e.salary > 75000;
EXPLAIN ANALYZE
SELECT * FROM employees
WHERE department_id IN (SELECT id FROM departments WHERE budget > 100000);
EXPLAIN ANALYZE
SELECT e.* FROM employees e
JOIN departments d ON e.department_id = d.id
WHERE d.budget > 100000;
Statistics and Vacuum
ANALYZE employees;
ANALYZE;
VACUUM employees;
VACUUM ANALYZE employees;
VACUUM FULL employees;
SHOW autovacuum;
SHOW autovacuum_naptime;
SELECT
schemaname,
tablename,
n_tup_ins,
n_tup_upd,
n_tup_del,
last_vacuum,
last_autovacuum,
last_analyze
FROM pg_stat_user_tables;
Configuration and Monitoring
SHOW all;
SHOW shared_buffers;
SHOW work_mem;
SHOW maintenance_work_mem;
SET work_mem = '256MB';
SET enable_seqscan = off;
SELECT
pid,
usename,
application_name,
client_addr,
state,
query_start,
query
FROM pg_stat_activity
WHERE state = 'active';
SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE pid = 12345;
SELECT
datname,
pg_size_pretty(pg_database_size(datname)) as size
FROM pg_database;
SELECT
tablename,
pg_size_pretty(pg_total_relation_size(tablename::regclass)) as size
FROM pg_tables
WHERE schemaname = 'public'
ORDER BY pg_total_relation_size(tablename::regclass) DESC;
SELECT
indexrelname,
idx_tup_read,
idx_tup_fetch,
idx_scan
FROM pg_stat_user_indexes
ORDER BY idx_scan DESC;
Backup and Recovery
pg_dump and pg_restore
pg_dump -h localhost -U username -d database_name > backup.sql
pg_dump -h localhost -U username -d database_name -f backup.sql
pg_dump -h localhost -U username -d database_name -Fc > backup.dump
pg_dump -h localhost -U username -d database_name -t employees -t departments > tables_backup.sql
pg_dump -h localhost -U username -d database_name | gzip > backup.sql.gz
pg_dumpall -h localhost -U username > all_databases.sql
psql -h localhost -U username -d database_name < backup.sql
pg_restore -h localhost -U username -d database_name backup.dump
pg_restore -h localhost -U username -d database_name -t employees backup.dump
pg_restore -h localhost -U username -d database_name -v backup.dump
pg_restore -h localhost -U username -d database_name -c backup.dump
Point-in-Time Recovery
archive_mode = on
archive_command = 'cp %p /path/to/archive/%f'
wal_level = replica
pg_basebackup -h localhost -U username -D /path/to/backup -Ft -z -P
restore_command = 'cp /path/to/archive/%f %p'
recovery_target_time = '2023-12-01 14:30:00'
restore_command = 'cp /path/to/archive/%f %p'
recovery_target_time = '2023-12-01 14:30:00'
Best Practices
Performance Tips
- Use appropriate indexes for frequently queried columns
- Analyze query plans with EXPLAIN ANALYZE
- Use LIMIT when you don't need all results
- Prefer EXISTS over IN for large subqueries
- Use connection pooling (PgBouncer, pgpool-II)
- Regular VACUUM and ANALYZE for table maintenance
- Partition large tables for better performance
- Use appropriate data types (don't use TEXT for everything)
- Normalize data but consider denormalization for read-heavy workloads
- Monitor slow queries and optimize them
Security Best Practices
- Use strong passwords and regular rotation
- Limit user privileges to minimum required
- Use SSL/TLS for connections
- Regular security updates for PostgreSQL
- Audit database access and monitor logs
- Use parameterized queries to prevent SQL injection
- Backup encryption for sensitive data
- Network security with firewalls and VPNs
- Regular security assessments
- Follow principle of least privilege