Files
dashboard/backend/src/config/database.ts
Matthias Hochmeister 02cf5138cf fix: dashboard layout, widget caching, and backend stability
Layout:
- Remove Container maxWidth cap so widgets scale fluidly on wide screens
- Fix ActivityFeed Card missing height:100% and overflow:hidden that caused
  the timeline connector pseudo-element to bleed outside the card boundary

Performance (frontend):
- Migrate VehicleDashboardCard, EquipmentDashboardCard, AtemschutzDashboardCard,
  UpcomingEventsWidget, and PersonalWarningsBanner from useEffect+useState to
  TanStack Query — cached for 5 min, so navigating back to the dashboard no
  longer re-fires all 9 API requests
- Add gcTime:10min and refetchOnWindowFocus:false to QueryClient defaults to
  prevent spurious refetches on tab-switch

Backend stability:
- Raise default RATE_LIMIT_MAX from 100 to 300 req/15min — the previous limit
  was easily exceeded by a single active user during normal dashboard navigation
- Increase DB connectionTimeoutMillis from 2s to 5s to handle burst-load
  scenarios where multiple requests compete for pool slots simultaneously

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-03 12:55:16 +01:00

150 lines
4.2 KiB
TypeScript

import { Pool, PoolConfig, types } from 'pg';
import * as fs from 'fs';
import * as path from 'path';
import environment from './environment';
import logger from '../utils/logger';
// Override pg's default DATE parser: return plain 'YYYY-MM-DD' strings
// instead of JavaScript Date objects (which introduce timezone shifts).
types.setTypeParser(1082, (val: string) => val);
const poolConfig: PoolConfig = {
host: environment.database.host,
port: environment.database.port,
database: environment.database.name,
user: environment.database.user,
password: environment.database.password,
max: 20, // Maximum number of clients in the pool
idleTimeoutMillis: 30000, // Close idle clients after 30 seconds
connectionTimeoutMillis: 5000, // Return an error if connection takes longer than 5 seconds
};
const pool = new Pool(poolConfig);
// Handle pool errors
pool.on('error', (err) => {
logger.error('Unexpected error on idle database client', err);
});
// Test database connection
export const testConnection = async (): Promise<boolean> => {
try {
const client = await pool.connect();
const result = await client.query('SELECT NOW()');
logger.info('Database connection successful', { timestamp: result.rows[0].now });
client.release();
return true;
} catch (error) {
logger.error('Failed to connect to database', { error });
return false;
}
};
// Graceful shutdown
export const closePool = async (): Promise<void> => {
try {
await pool.end();
logger.info('Database pool closed');
} catch (error) {
logger.error('Error closing database pool', { error });
}
};
/**
* Check if a table exists in the database
*/
export const tableExists = async (tableName: string): Promise<boolean> => {
try {
const result = await pool.query(
`SELECT EXISTS (
SELECT FROM information_schema.tables
WHERE table_schema = 'public'
AND table_name = $1
)`,
[tableName]
);
return result.rows[0].exists;
} catch (error) {
logger.error(`Failed to check if table ${tableName} exists`, { error });
return false;
}
};
/**
* Run database migrations from SQL files
*/
export const runMigrations = async (): Promise<void> => {
const migrationsDir = path.join(__dirname, '../database/migrations');
try {
// Check if migrations directory exists
if (!fs.existsSync(migrationsDir)) {
logger.warn('Migrations directory not found', { path: migrationsDir });
return;
}
// Read all migration files
const files = fs.readdirSync(migrationsDir)
.filter(file => file.endsWith('.sql'))
.sort(); // Sort to ensure migrations run in order
if (files.length === 0) {
logger.info('No migration files found');
return;
}
logger.info(`Found ${files.length} migration file(s)`);
// Create migrations tracking table if it doesn't exist
await pool.query(`
CREATE TABLE IF NOT EXISTS migrations (
id SERIAL PRIMARY KEY,
filename VARCHAR(255) UNIQUE NOT NULL,
executed_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()
)
`);
// Run each migration
for (const file of files) {
// Check if migration has already been run
const result = await pool.query(
'SELECT filename FROM migrations WHERE filename = $1',
[file]
);
if (result.rows.length > 0) {
logger.info(`Migration ${file} already executed, skipping`);
continue;
}
// Read and execute migration
const filePath = path.join(migrationsDir, file);
const sql = fs.readFileSync(filePath, 'utf8');
logger.info(`Running migration: ${file}`);
await pool.query('BEGIN');
try {
await pool.query(sql);
await pool.query(
'INSERT INTO migrations (filename) VALUES ($1)',
[file]
);
await pool.query('COMMIT');
logger.info(`Migration ${file} completed successfully`);
} catch (error) {
await pool.query('ROLLBACK');
logger.error(`Migration ${file} failed`, { error });
throw error;
}
}
logger.info('All migrations completed successfully');
} catch (error) {
logger.error('Failed to run migrations', { error });
throw error;
}
};
export default pool;