feat: Implement v1.1.1 Phase 1 - Core Infrastructure
Phase 1 of v1.1.1 "Polish" release focusing on production readiness. Implements logging, connection pooling, validation, and error handling. Following specs in docs/design/v1.1.1/developer-qa.md and ADRs 052-055. **Structured Logging** (Q3, ADR-054) - RotatingFileHandler (10MB files, keep 10) - Correlation IDs for request tracing - All print statements replaced with logging - Context-aware correlation IDs (init/request) - Logs written to data/logs/starpunk.log **Database Connection Pooling** (Q2, ADR-053) - Connection pool with configurable size (default: 5) - Request-scoped connections via Flask g object - Pool statistics for monitoring - WAL mode enabled for concurrency - Backward compatible get_db() signature **Configuration Validation** (Q14, ADR-052) - Validates presence and type of all config values - Fail-fast startup with clear error messages - LOG_LEVEL enum validation - Type checking for strings, integers, paths - Non-zero exit status on errors **Centralized Error Handling** (Q4, ADR-055) - Moved handlers to starpunk/errors.py - Micropub spec-compliant JSON errors - HTML templates for browser requests - All errors logged with correlation IDs - MicropubError exception class **Database Module Reorganization** - Moved database.py to database/ package - Separated init.py, pool.py, schema.py - Maintains backward compatibility - Cleaner separation of concerns **Testing** - 580 tests passing - 1 pre-existing flaky test noted - No breaking changes to public API **Documentation** - CHANGELOG.md updated with v1.1.1 entry - Version bumped to 1.1.1 - Implementation report in docs/reports/ 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
16
starpunk/database/__init__.py
Normal file
16
starpunk/database/__init__.py
Normal file
@@ -0,0 +1,16 @@
|
||||
"""
|
||||
Database package for StarPunk
|
||||
|
||||
Provides database initialization and connection pooling
|
||||
|
||||
Per v1.1.1 Phase 1:
|
||||
- Connection pooling for improved performance (ADR-053)
|
||||
- Request-scoped connections via Flask's g object
|
||||
- Pool statistics for monitoring
|
||||
"""
|
||||
|
||||
from starpunk.database.init import init_db
|
||||
from starpunk.database.pool import init_pool, get_db, get_pool_stats
|
||||
from starpunk.database.schema import INITIAL_SCHEMA_SQL
|
||||
|
||||
__all__ = ['init_db', 'init_pool', 'get_db', 'get_pool_stats', 'INITIAL_SCHEMA_SQL']
|
||||
44
starpunk/database/init.py
Normal file
44
starpunk/database/init.py
Normal file
@@ -0,0 +1,44 @@
|
||||
"""
|
||||
Database initialization for StarPunk
|
||||
"""
|
||||
|
||||
import sqlite3
|
||||
from pathlib import Path
|
||||
from starpunk.database.schema import INITIAL_SCHEMA_SQL
|
||||
|
||||
|
||||
def init_db(app=None):
|
||||
"""
|
||||
Initialize database schema and run migrations
|
||||
|
||||
Args:
|
||||
app: Flask application instance (optional, for config access)
|
||||
"""
|
||||
if app:
|
||||
db_path = app.config["DATABASE_PATH"]
|
||||
logger = app.logger
|
||||
else:
|
||||
# Fallback to default path
|
||||
db_path = Path("./data/starpunk.db")
|
||||
logger = None
|
||||
|
||||
# Ensure parent directory exists
|
||||
db_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Create database and initial schema
|
||||
conn = sqlite3.connect(db_path)
|
||||
try:
|
||||
conn.executescript(INITIAL_SCHEMA_SQL)
|
||||
conn.commit()
|
||||
if logger:
|
||||
logger.info(f"Database initialized: {db_path}")
|
||||
else:
|
||||
# Fallback logging when logger not available (e.g., during testing)
|
||||
import logging
|
||||
logging.getLogger(__name__).info(f"Database initialized: {db_path}")
|
||||
finally:
|
||||
conn.close()
|
||||
|
||||
# Run migrations
|
||||
from starpunk.migrations import run_migrations
|
||||
run_migrations(db_path, logger=logger)
|
||||
196
starpunk/database/pool.py
Normal file
196
starpunk/database/pool.py
Normal file
@@ -0,0 +1,196 @@
|
||||
"""
|
||||
Database connection pool for StarPunk
|
||||
|
||||
Per ADR-053 and developer Q&A Q2:
|
||||
- Provides connection pooling for improved performance
|
||||
- Integrates with Flask's g object for request-scoped connections
|
||||
- Maintains same interface as get_db() for transparency
|
||||
- Pool statistics available for metrics
|
||||
|
||||
Note: Migrations use direct connections (not pooled) for isolation
|
||||
"""
|
||||
|
||||
import sqlite3
|
||||
from pathlib import Path
|
||||
from threading import Lock
|
||||
from collections import deque
|
||||
from flask import g
|
||||
|
||||
|
||||
class ConnectionPool:
|
||||
"""
|
||||
Simple connection pool for SQLite
|
||||
|
||||
SQLite doesn't benefit from traditional connection pooling like PostgreSQL,
|
||||
but this provides connection reuse and request-scoped connection management.
|
||||
"""
|
||||
|
||||
def __init__(self, db_path, pool_size=5, timeout=10.0):
|
||||
"""
|
||||
Initialize connection pool
|
||||
|
||||
Args:
|
||||
db_path: Path to SQLite database file
|
||||
pool_size: Maximum number of connections in pool
|
||||
timeout: Timeout for getting connection (seconds)
|
||||
"""
|
||||
self.db_path = Path(db_path)
|
||||
self.pool_size = pool_size
|
||||
self.timeout = timeout
|
||||
self._pool = deque(maxlen=pool_size)
|
||||
self._lock = Lock()
|
||||
self._stats = {
|
||||
'connections_created': 0,
|
||||
'connections_reused': 0,
|
||||
'connections_closed': 0,
|
||||
'pool_hits': 0,
|
||||
'pool_misses': 0,
|
||||
}
|
||||
|
||||
def _create_connection(self):
|
||||
"""Create a new database connection"""
|
||||
conn = sqlite3.connect(
|
||||
self.db_path,
|
||||
timeout=self.timeout,
|
||||
check_same_thread=False # Allow connection reuse across threads
|
||||
)
|
||||
conn.row_factory = sqlite3.Row # Return rows as dictionaries
|
||||
|
||||
# Enable WAL mode for better concurrency
|
||||
conn.execute("PRAGMA journal_mode=WAL")
|
||||
|
||||
self._stats['connections_created'] += 1
|
||||
return conn
|
||||
|
||||
def get_connection(self):
|
||||
"""
|
||||
Get a connection from the pool
|
||||
|
||||
Returns:
|
||||
sqlite3.Connection: Database connection
|
||||
"""
|
||||
with self._lock:
|
||||
if self._pool:
|
||||
# Reuse existing connection
|
||||
conn = self._pool.pop()
|
||||
self._stats['pool_hits'] += 1
|
||||
self._stats['connections_reused'] += 1
|
||||
return conn
|
||||
else:
|
||||
# Create new connection
|
||||
self._stats['pool_misses'] += 1
|
||||
return self._create_connection()
|
||||
|
||||
def return_connection(self, conn):
|
||||
"""
|
||||
Return a connection to the pool
|
||||
|
||||
Args:
|
||||
conn: Database connection to return
|
||||
"""
|
||||
if not conn:
|
||||
return
|
||||
|
||||
with self._lock:
|
||||
if len(self._pool) < self.pool_size:
|
||||
# Return to pool
|
||||
self._pool.append(conn)
|
||||
else:
|
||||
# Pool is full, close connection
|
||||
conn.close()
|
||||
self._stats['connections_closed'] += 1
|
||||
|
||||
def close_connection(self, conn):
|
||||
"""
|
||||
Close a connection without returning to pool
|
||||
|
||||
Args:
|
||||
conn: Database connection to close
|
||||
"""
|
||||
if conn:
|
||||
conn.close()
|
||||
self._stats['connections_closed'] += 1
|
||||
|
||||
def get_stats(self):
|
||||
"""
|
||||
Get pool statistics
|
||||
|
||||
Returns:
|
||||
dict: Pool statistics for monitoring
|
||||
"""
|
||||
with self._lock:
|
||||
return {
|
||||
**self._stats,
|
||||
'pool_size': len(self._pool),
|
||||
'max_pool_size': self.pool_size,
|
||||
}
|
||||
|
||||
def close_all(self):
|
||||
"""Close all connections in the pool"""
|
||||
with self._lock:
|
||||
while self._pool:
|
||||
conn = self._pool.pop()
|
||||
conn.close()
|
||||
self._stats['connections_closed'] += 1
|
||||
|
||||
|
||||
# Global pool instance (initialized by app factory)
|
||||
_pool = None
|
||||
|
||||
|
||||
def init_pool(app):
|
||||
"""
|
||||
Initialize the connection pool
|
||||
|
||||
Args:
|
||||
app: Flask application instance
|
||||
"""
|
||||
global _pool
|
||||
|
||||
db_path = app.config['DATABASE_PATH']
|
||||
pool_size = app.config.get('DB_POOL_SIZE', 5)
|
||||
timeout = app.config.get('DB_TIMEOUT', 10.0)
|
||||
|
||||
_pool = ConnectionPool(db_path, pool_size, timeout)
|
||||
app.logger.info(f"Database connection pool initialized (size={pool_size})")
|
||||
|
||||
# Register teardown handler
|
||||
@app.teardown_appcontext
|
||||
def close_connection(error):
|
||||
"""Return connection to pool when request context ends"""
|
||||
conn = g.pop('db', None)
|
||||
if conn:
|
||||
_pool.return_connection(conn)
|
||||
|
||||
|
||||
def get_db(app=None):
|
||||
"""
|
||||
Get database connection for current request
|
||||
|
||||
Uses Flask's g object for request-scoped connection management.
|
||||
Connection is automatically returned to pool at end of request.
|
||||
|
||||
Args:
|
||||
app: Flask application (optional, for backward compatibility with tests)
|
||||
When provided, this parameter is ignored as we use the pool
|
||||
|
||||
Returns:
|
||||
sqlite3.Connection: Database connection
|
||||
"""
|
||||
# Note: app parameter is kept for backward compatibility but ignored
|
||||
# The pool is request-scoped via Flask's g object
|
||||
if 'db' not in g:
|
||||
g.db = _pool.get_connection()
|
||||
return g.db
|
||||
|
||||
|
||||
def get_pool_stats():
|
||||
"""
|
||||
Get connection pool statistics
|
||||
|
||||
Returns:
|
||||
dict: Pool statistics for monitoring
|
||||
"""
|
||||
if _pool:
|
||||
return _pool.get_stats()
|
||||
return {}
|
||||
84
starpunk/database/schema.py
Normal file
84
starpunk/database/schema.py
Normal file
@@ -0,0 +1,84 @@
|
||||
"""
|
||||
Database schema definition for StarPunk
|
||||
|
||||
Initial database schema (v1.0.0 baseline)
|
||||
DO NOT MODIFY - This represents the v1.0.0 schema state
|
||||
All schema changes after v1.0.0 must go in migration files
|
||||
"""
|
||||
|
||||
INITIAL_SCHEMA_SQL = """
|
||||
-- Notes metadata (content is in files)
|
||||
CREATE TABLE IF NOT EXISTS notes (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
slug TEXT UNIQUE NOT NULL,
|
||||
file_path TEXT UNIQUE NOT NULL,
|
||||
published BOOLEAN DEFAULT 0,
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
deleted_at TIMESTAMP,
|
||||
content_hash TEXT
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_notes_created_at ON notes(created_at DESC);
|
||||
CREATE INDEX IF NOT EXISTS idx_notes_published ON notes(published);
|
||||
CREATE INDEX IF NOT EXISTS idx_notes_slug ON notes(slug);
|
||||
CREATE INDEX IF NOT EXISTS idx_notes_deleted_at ON notes(deleted_at);
|
||||
|
||||
-- Authentication sessions (IndieLogin)
|
||||
CREATE TABLE IF NOT EXISTS sessions (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
session_token_hash TEXT UNIQUE NOT NULL,
|
||||
me TEXT NOT NULL,
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
expires_at TIMESTAMP NOT NULL,
|
||||
last_used_at TIMESTAMP,
|
||||
user_agent TEXT,
|
||||
ip_address TEXT
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_sessions_token_hash ON sessions(session_token_hash);
|
||||
CREATE INDEX IF NOT EXISTS idx_sessions_expires ON sessions(expires_at);
|
||||
CREATE INDEX IF NOT EXISTS idx_sessions_me ON sessions(me);
|
||||
|
||||
-- Micropub access tokens (secure storage with hashed tokens)
|
||||
CREATE TABLE IF NOT EXISTS tokens (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
token_hash TEXT UNIQUE NOT NULL,
|
||||
me TEXT NOT NULL,
|
||||
client_id TEXT,
|
||||
scope TEXT DEFAULT 'create',
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
expires_at TIMESTAMP NOT NULL,
|
||||
last_used_at TIMESTAMP,
|
||||
revoked_at TIMESTAMP
|
||||
);
|
||||
|
||||
-- Authorization codes for IndieAuth token exchange
|
||||
CREATE TABLE IF NOT EXISTS authorization_codes (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
code_hash TEXT UNIQUE NOT NULL,
|
||||
me TEXT NOT NULL,
|
||||
client_id TEXT NOT NULL,
|
||||
redirect_uri TEXT NOT NULL,
|
||||
scope TEXT,
|
||||
state TEXT,
|
||||
code_challenge TEXT,
|
||||
code_challenge_method TEXT,
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
expires_at TIMESTAMP NOT NULL,
|
||||
used_at TIMESTAMP
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_auth_codes_hash ON authorization_codes(code_hash);
|
||||
CREATE INDEX IF NOT EXISTS idx_auth_codes_expires ON authorization_codes(expires_at);
|
||||
|
||||
-- CSRF state tokens (for admin login flow)
|
||||
CREATE TABLE IF NOT EXISTS auth_state (
|
||||
state TEXT PRIMARY KEY,
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
expires_at TIMESTAMP NOT NULL,
|
||||
redirect_uri TEXT
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_auth_state_expires ON auth_state(expires_at);
|
||||
"""
|
||||
Reference in New Issue
Block a user