This guide covers the complete database workflow in nself, including schema design, migrations, seeding, backups, and team collaboration patterns. nself provides a comprehensive set of CLI commands to manage your PostgreSQL database throughout the development lifecycle.
# Database commands overview (v0.4.8)
nself db status # Show database status
nself db shell # Open psql shell
nself db schema import # Import DBML schema
nself db schema export # Export current schema
nself db schema diff # Compare schemas
nself db migrate status # Check migration status
nself db migrate up # Apply pending migrations
nself db migrate down # Rollback migrations
nself db migrate create # Create custom migration
nself db seed # Run seed files
nself db mock # Generate mock data
nself db backup # Create backup
nself db restore # Restore from backup
nself db update # Safe team migration (backup + migrate)nself uses DBML (Database Markup Language) for schema design, which integrates with dbdiagram.io for visual editing.
// schema.dbml - Place in nself/schema.dbml
Project my_app {
database_type: 'PostgreSQL'
Note: 'My Application Database Schema'
}
// Users table
Table users {
id uuid [pk, default: `gen_random_uuid()`]
email varchar(255) [not null, unique]
password_hash text [not null]
display_name varchar(100)
avatar_url text
role user_role [default: 'user']
email_verified boolean [default: false]
created_at timestamptz [default: `now()`]
updated_at timestamptz [default: `now()`]
indexes {
email [unique]
created_at
role
}
}
// Posts table
Table posts {
id uuid [pk, default: `gen_random_uuid()`]
user_id uuid [not null, ref: > users.id]
title varchar(255) [not null]
slug varchar(255) [not null]
content text
excerpt text
status post_status [default: 'draft']
published_at timestamptz
created_at timestamptz [default: `now()`]
updated_at timestamptz [default: `now()`]
indexes {
slug [unique]
user_id
status
published_at
}
}
// Enums
Enum user_role {
admin
moderator
user
guest
}
Enum post_status {
draft
published
archived
}# Export schema for dbdiagram.io
nself db schema export --format dbml > schema.dbml
# After editing at dbdiagram.io, import changes
nself db schema import schema.dbml
# Or use dbdiagram sync feature
nself db schema sync --token YOUR_DBDIAGRAM_TOKEN# Import DBML schema and generate migration
nself db schema import schema.dbml
# Import with a specific migration name
nself db schema import schema.dbml --name add_posts_table
# Preview SQL without creating migration
nself db schema import schema.dbml --dry-run
# Import and apply immediately (dev only)
nself db schema import schema.dbml --applynself/
├── migrations/
│ ├── 001_create_users.sql
│ ├── 002_create_posts.sql
│ ├── 003_add_user_preferences.sql
│ └── 004_create_comments.sql
├── seeds/
│ ├── common/ # Shared seed data
│ ├── local/ # Development seeds
│ ├── staging/ # Staging seeds
│ └── production/ # Production seeds
└── schema.dbml # Master schema file-- Migration: 001_create_users
-- Created: 2026-01-24
-- Description: Create users table with authentication fields
-- UP
CREATE TYPE user_role AS ENUM ('admin', 'moderator', 'user', 'guest');
CREATE TABLE users (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
email VARCHAR(255) NOT NULL UNIQUE,
password_hash TEXT NOT NULL,
display_name VARCHAR(100),
avatar_url TEXT,
role user_role DEFAULT 'user',
email_verified BOOLEAN DEFAULT FALSE,
created_at TIMESTAMPTZ DEFAULT NOW(),
updated_at TIMESTAMPTZ DEFAULT NOW()
);
CREATE INDEX idx_users_email ON users(email);
CREATE INDEX idx_users_role ON users(role);
CREATE INDEX idx_users_created_at ON users(created_at);
-- Create updated_at trigger
CREATE OR REPLACE FUNCTION trigger_set_timestamp()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_at = NOW();
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER set_users_timestamp
BEFORE UPDATE ON users
FOR EACH ROW
EXECUTE FUNCTION trigger_set_timestamp();
-- DOWN
DROP TRIGGER IF EXISTS set_users_timestamp ON users;
DROP TABLE IF EXISTS users;
DROP TYPE IF EXISTS user_role;# Check migration status
nself db migrate status
# Example output:
# Migration Status
# ────────────────────────────────────────────
# VERSION NAME STATUS APPLIED
# 001 create_users Applied 2026-01-20 10:30
# 002 create_posts Applied 2026-01-21 14:15
# 003 add_user_preferences Pending -
# 004 create_comments Pending -
# Apply all pending migrations
nself db migrate up
# Apply specific number of migrations
nself db migrate up 1
# Rollback last migration
nself db migrate down
# Rollback multiple migrations
nself db migrate down 2
# Create custom migration
nself db migrate create add_indexes
# Force refresh all migrations (dev only!)
nself db migrate freshCommands like nself db migrate fresh and nself db reset are blocked in production to prevent accidental data loss. Use nself db migrate down for controlled rollbacks.
nself/seeds/
├── common/
│ ├── 01_roles.sql # Always run first
│ └── 02_default_settings.sql
├── local/
│ ├── 01_test_users.sql # Development test data
│ └── 02_sample_posts.sql
├── staging/
│ └── 01_demo_data.sql # Staging demo data
└── production/
└── 01_initial_admin.sql # Production bootstrap-- nself/seeds/local/01_test_users.sql
-- Test users for local development
INSERT INTO users (id, email, password_hash, display_name, role, email_verified)
VALUES
-- Password for all test users: 'password123' (bcrypt hash)
('11111111-1111-1111-1111-111111111111',
'admin@demo.com',
'$2b$12$LQv3c1yqBWVHxkd0LHAkCOYz6TtxMQJqhN8/X4c5E5gF6H7I8J9K0',
'Admin User',
'admin',
true),
('22222222-2222-2222-2222-222222222222',
'alice@demo.com',
'$2b$12$LQv3c1yqBWVHxkd0LHAkCOYz6TtxMQJqhN8/X4c5E5gF6H7I8J9K0',
'Alice Developer',
'user',
true),
('33333333-3333-3333-3333-333333333333',
'bob@demo.com',
'$2b$12$LQv3c1yqBWVHxkd0LHAkCOYz6TtxMQJqhN8/X4c5E5gF6H7I8J9K0',
'Bob Tester',
'user',
true)
ON CONFLICT (email) DO UPDATE SET
display_name = EXCLUDED.display_name,
role = EXCLUDED.role;# Run all seeds for current environment
nself db seed
# Run specific seed file
nself db seed 01_test_users.sql
# Run seeds for specific environment
nself db seed --env local
nself db seed --env staging
# Preview seed files that would run
nself db seed --dry-run# Generate mock data for development
nself db mock
# Generate specific number of records
nself db mock --users 100 --posts 500
# Generate with specific seed (reproducible)
nself db mock --seed 12345
# Clear mock data
nself db mock --clear# Create backup (auto-named with timestamp)
nself db backup
# Output:
# Creating backup...
# Backup created: backups/2026-01-24_143052_nhost.sql.gz
# Size: 2.3 MB
# Create backup with custom name
nself db backup --name before_migration
# Create backup without compression
nself db backup --no-compress
# Create backup to specific path
nself db backup --output /path/to/backup.sql# Backup specific tables only
nself db backup --tables users,posts,comments
# Exclude specific tables
nself db backup --exclude sessions,logs
# Schema only (no data)
nself db backup --schema-only
# Data only (no schema)
nself db backup --data-only
# Include Hasura metadata
nself db backup --include-metadata# List available backups
nself db backup list
# Restore from latest backup
nself db restore
# Restore from specific backup
nself db restore backups/2026-01-24_143052_nhost.sql.gz
# Restore with preview (show what will happen)
nself db restore backup.sql --dry-run
# Restore to clean database (drop all first)
nself db restore backup.sql --cleanRestoring in production requires typing yes-restore-production as confirmation. Always test restores in a non-production environment first.
# Configure automated backups in .env
BACKUP_ENABLED=true
BACKUP_SCHEDULE="0 2 * * *" # Daily at 2 AM
BACKUP_RETENTION_DAYS=30
BACKUP_COMPRESSION=true
# Remote backup storage
BACKUP_STORAGE=s3
AWS_BACKUP_BUCKET=my-app-backups
AWS_ACCESS_KEY_ID=your-key
AWS_SECRET_ACCESS_KEY=your-secret
# Check backup status
nself db backup status
# Output:
# Backup Configuration
# ────────────────────────
# Schedule: Daily at 2:00 AM
# Retention: 30 days
# Storage: S3 (my-app-backups)
# Last Backup: 2026-01-24 02:00:15
# Next Backup: 2026-01-25 02:00:00
# Backups Count: 24When working in a team, use nself db update instead of raw migration commands. This command safely applies migrations with automatic backups:
# Safe team update (recommended for all developers)
nself db update
# This command:
# 1. Creates automatic backup
# 2. Checks for pending migrations
# 3. Applies migrations in a transaction
# 4. Verifies database integrity
# 5. Provides rollback option if issues occur
# Output:
# Database Update
# ────────────────────────
# Creating backup... Done (2.3 MB)
# Checking migrations... 2 pending
# Applying 003_add_user_preferences... Done
# Applying 004_create_comments... Done
# Verifying integrity... Passed
#
# Update complete! 2 migrations applied.# Compare local schema with staging
nself db schema diff staging
# Compare with production
nself db schema diff production
# Output:
# Schema Differences (local vs staging)
# ────────────────────────────────────────
# + Table: comments (missing in staging)
# ~ Column: users.preferences (different type)
# - Index: idx_posts_date (missing in local)
# Generate migration to sync
nself db schema diff staging --generate-migration# 1. Lead developer makes schema changes
# Edit schema.dbml or use dbdiagram.io
# 2. Import changes and create migration
nself db schema import schema.dbml --name add_comments_feature
# 3. Review the generated migration
cat nself/migrations/005_add_comments_feature.sql
# 4. Apply locally and test
nself db migrate up
nself db seed
# 5. Commit migration files
git add nself/migrations/005_add_comments_feature.sql
git commit -m "feat(db): add comments table"
git push
# 6. Team members pull and apply
git pull
nself db update # Safe update with backup# Open psql shell
nself db shell
# Connect to specific database
nself db shell --database other_db
# Execute single query
nself db shell -c "SELECT count(*) FROM users"
# Execute query file
nself db shell -f query.sql-- Check table sizes
SELECT
tablename,
pg_size_pretty(pg_total_relation_size(schemaname || '.' || tablename)) as size
FROM pg_tables
WHERE schemaname = 'public'
ORDER BY pg_total_relation_size(schemaname || '.' || tablename) DESC;
-- Check index usage
SELECT
indexrelname as index_name,
idx_scan as times_used,
idx_tup_read as tuples_read
FROM pg_stat_user_indexes
ORDER BY idx_scan DESC;
-- Find slow queries
SELECT
query,
calls,
round(total_exec_time::numeric, 2) as total_time_ms,
round(mean_exec_time::numeric, 2) as avg_time_ms
FROM pg_stat_statements
ORDER BY total_exec_time DESC
LIMIT 10;
-- Check active connections
SELECT
datname,
usename,
state,
query
FROM pg_stat_activity
WHERE datname = current_database();| Command | Local | Staging | Production |
|---|---|---|---|
migrate up | Immediate | With confirmation | With confirmation |
migrate down | Immediate | With confirmation | Requires yes-destroy-production |
migrate fresh | Allowed | Blocked | Blocked |
mock | Allowed | Blocked | Blocked |
seed | Immediate | With confirmation | With confirmation |
restore | Immediate | With confirmation | Requires yes-restore-production |
backup | Allowed | Allowed | Allowed |
-- Migration with explicit transaction handling
-- UP
BEGIN;
-- Add new column
ALTER TABLE users ADD COLUMN preferences JSONB DEFAULT '{}';
-- Migrate existing data
UPDATE users SET preferences = jsonb_build_object(
'theme', 'light',
'notifications', true
) WHERE preferences = '{}';
-- Add constraint after data migration
ALTER TABLE users ADD CONSTRAINT check_preferences_format
CHECK (preferences ? 'theme' AND preferences ? 'notifications');
COMMIT;
-- DOWN
BEGIN;
ALTER TABLE users DROP CONSTRAINT check_preferences_format;
ALTER TABLE users DROP COLUMN preferences;
COMMIT;-- Adding a column with default (safe)
ALTER TABLE users ADD COLUMN status VARCHAR(20) DEFAULT 'active';
-- Adding NOT NULL constraint safely
-- Step 1: Add column as nullable
ALTER TABLE users ADD COLUMN new_field TEXT;
-- Step 2: Backfill data (in application or migration)
UPDATE users SET new_field = 'default_value' WHERE new_field IS NULL;
-- Step 3: Add constraint (after all data is filled)
ALTER TABLE users ALTER COLUMN new_field SET NOT NULL;
-- Renaming with alias (keeps old name working)
ALTER TABLE users RENAME COLUMN old_name TO new_name;
CREATE VIEW users_compat AS SELECT *, new_name AS old_name FROM users;-- Batch processing for large tables
DO $$
DECLARE
batch_size INTEGER := 10000;
processed INTEGER := 0;
total_rows INTEGER;
BEGIN
SELECT COUNT(*) INTO total_rows FROM large_table WHERE migrated = FALSE;
WHILE processed < total_rows LOOP
UPDATE large_table
SET
new_column = transform_function(old_column),
migrated = TRUE
WHERE id IN (
SELECT id FROM large_table
WHERE migrated = FALSE
LIMIT batch_size
);
processed := processed + batch_size;
RAISE NOTICE 'Processed % of % rows', LEAST(processed, total_rows), total_rows;
-- Allow other queries to run
PERFORM pg_sleep(0.5);
END LOOP;
END $$;# Check what went wrong
nself logs postgres
# Check migration status
nself db migrate status
# Check for locks
nself db shell -c "SELECT * FROM pg_locks WHERE NOT granted"
# If migration partially applied, you may need to:
# 1. Fix the issue manually
# 2. Mark migration as applied
nself db migrate repair# Check database is running
nself status
# Check connection
nself db shell -c "SELECT 1"
# Check connection count
nself db shell -c "SELECT count(*) FROM pg_stat_activity"
# Restart database if needed
nself restart postgres# Analyze tables for query planning
nself db shell -c "ANALYZE"
# Check for missing indexes
nself db shell -c "
SELECT schemaname, relname, seq_scan, idx_scan
FROM pg_stat_user_tables
WHERE seq_scan > idx_scan AND seq_scan > 1000
ORDER BY seq_scan DESC"
# Vacuum to reclaim space
nself db shell -c "VACUUM ANALYZE"nself db update which does this automaticallyA solid database workflow is essential for maintaining data integrity and enabling team collaboration. Use nself's database commands to keep your schema in sync, your data safe, and your team productive.