#!/usr/bin/env node /** * TuneVault Oracle HTTP Proxy * =========================== * Runs on your Oracle server. Exposes an HTTP endpoint that TuneVault * calls through your HTTPS proxy to collect Oracle health metrics. * * Architecture: * TuneVault (Render) → HTTPS → Outbound Proxy → localhost:3100 → Oracle :1521 * * Quick Start: * export TUNEVAULT_API_KEY="your-secret-key-here" * node oracle-proxy.js * * With PM2 (recommended for production): * npm install -g pm2 * TUNEVAULT_API_KEY="your-key" pm2 start oracle-proxy.js --name tunevault-proxy * pm2 save && pm2 startup * * Requirements: * - Node.js 18+ * - npm install oracledb (thin mode — no Oracle Instant Client needed) * - TUNEVAULT_API_KEY environment variable * * The proxy listens on 127.0.0.1:3100 only. * Your HTTPS proxy should point to http://localhost:3100 * * Update your proxy config to route: * hostname: oracledb.yourdomain.com * service: http://localhost:3100 (was: tcp://localhost:1521) */ 'use strict'; const http = require('http'); const { execFile } = require('child_process'); const oracledb = require('oracledb'); // ============================================================ // Config // ============================================================ const PORT = parseInt(process.env.PORT || '3100', 10); const HOST = process.env.BIND_HOST || '127.0.0.1'; // localhost only — outbound proxy handles external const API_KEY = process.env.TUNEVAULT_API_KEY; if (!API_KEY) { console.error('FATAL: TUNEVAULT_API_KEY environment variable is required.'); console.error('Set it with: export TUNEVAULT_API_KEY="your-secret-key-here"'); process.exit(1); } // oracledb 6.x defaults to thin mode — no Instant Client needed // Thin mode supports Oracle 12.1+ // ============================================================ // HTTP Server // ============================================================ const server = http.createServer(async (req, res) => { // CORS headers (not needed for reverse proxy, but safe) res.setHeader('Content-Type', 'application/json'); res.setHeader('X-Powered-By', 'TuneVault-Proxy'); // Health ping — no auth required if (req.method === 'GET' && req.url === '/health') { return send(res, 200, { status: 'healthy', proxy: 'TuneVault Oracle Proxy', version: '3.1.1', proxy_version: '3.1.1' }); } // All other endpoints require API key const authHeader = req.headers['x-api-key'] || req.headers['authorization']?.replace('Bearer ', ''); if (authHeader !== API_KEY) { return send(res, 401, { error: 'Unauthorized — invalid or missing API key' }); } // POST /api/healthcheck — run Oracle health queries if (req.method === 'POST' && req.url === '/api/healthcheck') { let body = ''; req.on('data', chunk => { body += chunk.toString(); }); req.on('end', async () => { try { const params = JSON.parse(body || '{}'); const { service_name, username, password, host, port } = params; if (!service_name || !username || !password) { return send(res, 400, { error: 'service_name, username, and password are required' }); } // Connect to Oracle locally const oracleHost = host || 'localhost'; const oraclePort = parseInt(port || '1521', 10); console.log(`[${new Date().toISOString()}] Health check: ${username}@${oracleHost}:${oraclePort}/${service_name}`); const metrics = await collectMetrics({ host: oracleHost, port: oraclePort, serviceName: service_name, username, password }); return send(res, 200, { success: true, metrics }); } catch (err) { console.error('Health check failed:', err.message); return send(res, 500, { success: false, error: formatOracleError(err) }); } }); return; } // POST /api/test — quick connection test (same auth) if (req.method === 'POST' && req.url === '/api/test') { let body = ''; req.on('data', chunk => { body += chunk.toString(); }); req.on('end', async () => { try { const params = JSON.parse(body || '{}'); const { service_name, username, password, host, port } = params; if (!service_name || !username || !password) { return send(res, 400, { error: 'service_name, username, and password are required' }); } const oracleHost = host || 'localhost'; const oraclePort = parseInt(port || '1521', 10); const result = await testConnection({ host: oracleHost, port: oraclePort, serviceName: service_name, username, password }); return send(res, result.success ? 200 : 400, result); } catch (err) { return send(res, 500, { success: false, error: formatOracleError(err) }); } }); return; } return send(res, 404, { error: 'Not found' }); }); function send(res, status, data) { res.writeHead(status); res.end(JSON.stringify(data)); } server.listen(PORT, HOST, () => { console.log(`TuneVault Oracle Proxy listening on ${HOST}:${PORT}`); console.log(`Health: http://localhost:${PORT}/health`); console.log(`Proxy is ready. Update your outbound HTTPS proxy to route to http://localhost:${PORT}`); }); server.on('error', (err) => { if (err.code === 'EADDRINUSE') { console.error(`Port ${PORT} is already in use. Change with: PORT=3101 node oracle-proxy.js`); } else { console.error('Server error:', err.message); } process.exit(1); }); // ============================================================ // Oracle Connection Test // ============================================================ async function testConnection({ host, port, serviceName, username, password }) { let connection; try { const connectString = `${host}:${port}/${serviceName}`; connection = await oracledb.getConnection({ user: username, password: password, connectString, connectTimeout: 15 }); const result = await connection.execute(`SELECT banner FROM v$version WHERE ROWNUM = 1`); const version = result.rows?.[0]?.[0] || 'Connected'; return { success: true, message: 'Connection successful', version }; } catch (err) { return { success: false, message: formatOracleError(err) }; } finally { if (connection) { try { await connection.close(); } catch (e) { /* ignore */ } } } } // ============================================================ // Oracle Metrics Collection // ============================================================ async function collectMetrics({ host, port, serviceName, username, password }) { let connection; try { const connectString = `${host}:${port}/${serviceName}`; connection = await oracledb.getConnection({ user: username, password: password, connectString, connectTimeout: 30 }); const awrAvailable = await checkAwrAvailability(connection); const [instanceInfo, tablespaces, waitEvents, topSql, indexAnalysis, sgaStats, pgaStats, osStats, undoStats, tempStats, alertLog, resourceLimits, sgaPgaHistory, backupStats, appsHealth, dbObjects, sessionStats, securityStats, schemaStats] = await Promise.all([ queryInstanceInfo(connection), queryTablespaces(connection), queryWaitEvents(connection), queryTopSql(connection), queryIndexAnalysis(connection), querySgaStats(connection), queryPgaStats(connection), queryOsStats(connection), queryUndoStats(connection, awrAvailable), queryTempStats(connection, awrAvailable), queryAlertLog(connection), queryResourceLimits(connection, awrAvailable), querySgaPgaHistory(connection, awrAvailable), queryBackupStats(connection), queryAppsHealth(connection), queryDbObjects(connection), querySessionStats(connection), querySecurityStats(connection), querySchemaStats(connection) ]); return { instance: instanceInfo, tablespaces, wait_events: waitEvents, top_sql: topSql, index_analysis: indexAnalysis, sga_stats: sgaStats, pga_stats: pgaStats, redo_stats: { redo_size_mb_per_hour: 0, log_switches_per_hour: 0, log_file_size_mb: 0, log_groups: 0, avg_log_sync_ms: 0, max_log_sync_ms: 0 }, os_stats: osStats, undo_stats: undoStats, temp_stats: tempStats, alert_log: alertLog, resource_limits: resourceLimits, sga_pga_history: sgaPgaHistory, backup_stats: backupStats, apps_health: appsHealth, db_objects: dbObjects, session_stats: sessionStats, security_stats: securityStats, schema_stats: schemaStats, awr_available: awrAvailable, proxy_version: '3.2.0', snapshot_info: { begin_snap_id: 0, end_snap_id: 0, begin_time: new Date(Date.now() - 12 * 60 * 60 * 1000).toISOString(), end_time: new Date().toISOString(), elapsed_time_min: 720, db_time_min: 0 } }; } finally { if (connection) { try { await connection.close(); } catch (e) { /* ignore */ } } } } // ============================================================ // Query Functions // ============================================================ async function queryInstanceInfo(conn) { try { const result = await conn.execute(` SELECT d.NAME as db_name, i.INSTANCE_NAME, i.HOST_NAME, i.VERSION, d.PLATFORM_NAME, TO_CHAR(i.STARTUP_TIME, 'YYYY-MM-DD HH24:MI:SS') as startup_time, ROUND(SYSDATE - i.STARTUP_TIME) as uptime_days, (SELECT VALUE FROM v$parameter WHERE name = 'cpu_count') as cpus, ROUND((SELECT TO_NUMBER(VALUE)/1024/1024/1024 FROM v$parameter WHERE name = 'sga_target'), 1) as sga_target_gb, ROUND((SELECT TO_NUMBER(VALUE)/1024/1024/1024 FROM v$parameter WHERE name = 'pga_aggregate_target'), 1) as pga_target_gb, (SELECT TO_NUMBER(VALUE) FROM v$parameter WHERE name = 'db_block_size') as db_block_size FROM v$database d, v$instance i `); const row = result.rows?.[0]; if (!row) throw new Error('No instance data'); return { db_name: row[0] || 'UNKNOWN', instance_name: row[1] || 'unknown', host_name: row[2] || 'unknown', version: row[3] || 'Unknown', platform: row[4] || 'Unknown', startup_time: row[5] || '', uptime_days: row[6] || 0, rac: false, cpus: parseInt(row[7]) || 1, sga_target_gb: parseFloat(row[8]) || 0, pga_aggregate_target_gb: parseFloat(row[9]) || 0, db_block_size: parseInt(row[10]) || 8192 }; } catch (err) { console.error('Instance query failed:', err.message); try { const r1 = await conn.execute(`SELECT name FROM v$database`); const r2 = await conn.execute(`SELECT instance_name, host_name, version FROM v$instance`); return { db_name: r1.rows?.[0]?.[0] || 'UNKNOWN', instance_name: r2.rows?.[0]?.[0] || 'unknown', host_name: r2.rows?.[0]?.[1] || 'unknown', version: r2.rows?.[0]?.[2] || 'Unknown', platform: 'Unknown', startup_time: '', uptime_days: 0, rac: false, cpus: 1, sga_target_gb: 0, pga_aggregate_target_gb: 0, db_block_size: 8192 }; } catch (e2) { return { db_name: 'UNKNOWN', instance_name: 'unknown', host_name: 'unknown', version: 'Unknown', platform: 'Unknown', startup_time: '', uptime_days: 0, rac: false, cpus: 1, sga_target_gb: 0, pga_aggregate_target_gb: 0, db_block_size: 8192 }; } } } async function queryTablespaces(conn) { try { const result = await conn.execute(` SELECT ts.TABLESPACE_NAME, ROUND(um.USED_SPACE * ts_block.BLOCK_SIZE / 1024 / 1024 / 1024, 1) as used_gb, ROUND(um.TABLESPACE_SIZE * ts_block.BLOCK_SIZE / 1024 / 1024 / 1024, 1) as total_gb, ROUND(um.USED_PERCENT, 1) as pct_used, CASE WHEN df.autoext > 0 THEN 1 ELSE 0 END as autoextend FROM DBA_TABLESPACE_USAGE_METRICS um JOIN DBA_TABLESPACES ts ON ts.TABLESPACE_NAME = um.TABLESPACE_NAME LEFT JOIN (SELECT TABLESPACE_NAME, BLOCK_SIZE FROM DBA_TABLESPACES) ts_block ON ts_block.TABLESPACE_NAME = um.TABLESPACE_NAME LEFT JOIN ( SELECT TABLESPACE_NAME, SUM(CASE WHEN AUTOEXTENSIBLE = 'YES' THEN 1 ELSE 0 END) as autoext FROM DBA_DATA_FILES GROUP BY TABLESPACE_NAME ) df ON df.TABLESPACE_NAME = um.TABLESPACE_NAME ORDER BY um.USED_PERCENT DESC `); return (result.rows || []).map(row => { const pct = parseFloat(row[3]) || 0; return { name: row[0], used_gb: parseFloat(row[1]) || 0, total_gb: parseFloat(row[2]) || 0, pct_used: pct, autoextend: row[4] > 0, status: pct > 90 ? 'critical' : pct > 80 ? 'warning' : 'ok' }; }); } catch (err) { console.error('Tablespace primary query failed:', err.message); try { const result = await conn.execute(` SELECT df.TABLESPACE_NAME, ROUND(SUM(df.BYTES) / 1024 / 1024 / 1024, 1) as total_gb, ROUND((SUM(df.BYTES) - NVL(fs.free_bytes, 0)) / 1024 / 1024 / 1024, 1) as used_gb, ROUND((1 - NVL(fs.free_bytes, 0) / SUM(df.BYTES)) * 100, 1) as pct_used, MAX(CASE WHEN df.AUTOEXTENSIBLE = 'YES' THEN 1 ELSE 0 END) as autoextend FROM DBA_DATA_FILES df LEFT JOIN (SELECT TABLESPACE_NAME, SUM(BYTES) as free_bytes FROM DBA_FREE_SPACE GROUP BY TABLESPACE_NAME) fs ON fs.TABLESPACE_NAME = df.TABLESPACE_NAME GROUP BY df.TABLESPACE_NAME, fs.free_bytes ORDER BY pct_used DESC `); return (result.rows || []).map(row => { const pct = parseFloat(row[3]) || 0; return { name: row[0], used_gb: parseFloat(row[2]) || 0, total_gb: parseFloat(row[1]) || 0, pct_used: pct, autoextend: row[4] > 0, status: pct > 90 ? 'critical' : pct > 80 ? 'warning' : 'ok' }; }); } catch (e2) { return []; } } } async function queryWaitEvents(conn) { try { const result = await conn.execute(` SELECT EVENT, WAIT_CLASS, TOTAL_WAITS, ROUND(TIME_WAITED / 100, 1) as time_waited_s, CASE WHEN TOTAL_WAITS > 0 THEN ROUND((TIME_WAITED / 100 / TOTAL_WAITS) * 1000, 2) ELSE 0 END as avg_wait_ms FROM V$SYSTEM_EVENT WHERE WAIT_CLASS NOT IN ('Idle') AND TOTAL_WAITS > 0 ORDER BY TIME_WAITED DESC FETCH FIRST 15 ROWS ONLY `); const rows = result.rows || []; const totalTime = rows.reduce((s, r) => s + (parseFloat(r[3]) || 0), 0); return rows.map(row => ({ event: row[0], wait_class: row[1], total_waits: parseInt(row[2]) || 0, time_waited_s: parseFloat(row[3]) || 0, avg_wait_ms: parseFloat(row[4]) || 0, pct_db_time: totalTime > 0 ? Math.round((parseFloat(row[3]) / totalTime) * 1000) / 10 : 0 })); } catch (err) { console.error('Wait events query failed:', err.message); return []; } } async function queryTopSql(conn) { try { const result = await conn.execute(` SELECT SQL_ID, SUBSTR(SQL_TEXT, 1, 500), EXECUTIONS, ROUND(ELAPSED_TIME / 1000000, 1), ROUND(CPU_TIME / 1000000, 1), BUFFER_GETS, DISK_READS, ROWS_PROCESSED, CASE WHEN EXECUTIONS > 0 THEN ROUND(ELAPSED_TIME / EXECUTIONS / 1000, 2) ELSE 0 END as elapsed_per_exec_ms, CASE WHEN EXECUTIONS > 0 THEN ROUND(BUFFER_GETS / EXECUTIONS) ELSE 0 END as buffer_gets_per_exec, PLAN_HASH_VALUE FROM V$SQL WHERE EXECUTIONS > 0 AND ELAPSED_TIME > 0 AND PARSING_SCHEMA_NAME NOT IN ('SYS','SYSTEM','DBSNMP','SYSMAN','OUTLN','MDSYS','ORDSYS','EXFSYS','WMSYS','APPQOSSYS','DBSFWUSER') AND SQL_TEXT NOT LIKE '%v$%' AND SQL_TEXT NOT LIKE '%V$%' AND COMMAND_TYPE IN (2, 3, 6, 7, 189) ORDER BY ELAPSED_TIME DESC FETCH FIRST 10 ROWS ONLY `); return (result.rows || []).map(row => { const elapsedPerExec = parseFloat(row[8]) || 0; const bufferGetsPerExec = parseInt(row[9]) || 0; let issue = 'Normal operation'; if (elapsedPerExec > 20) issue = 'Very slow execution — check execution plan'; else if (elapsedPerExec > 5) issue = 'Slow execution — review query and indexes'; if (bufferGetsPerExec > 1000) issue = 'High buffer gets — possible full table scan or missing index'; if (parseInt(row[6]) > parseInt(row[5]) * 0.1 && parseInt(row[6]) > 10000) issue = 'High disk reads relative to buffer gets — data not in cache'; return { sql_id: row[0], sql_text: row[1] || '', executions: parseInt(row[2]) || 0, elapsed_time_s: parseFloat(row[3]) || 0, cpu_time_s: parseFloat(row[4]) || 0, buffer_gets: parseInt(row[5]) || 0, disk_reads: parseInt(row[6]) || 0, rows_processed: parseInt(row[7]) || 0, elapsed_per_exec_ms: elapsedPerExec, buffer_gets_per_exec: bufferGetsPerExec, plan_hash: String(row[10] || '0'), issue }; }); } catch (err) { console.error('Top SQL query failed:', err.message); return []; } } async function queryIndexAnalysis(conn) { try { const result = await conn.execute(` SELECT i.OWNER, i.INDEX_NAME, i.TABLE_NAME, ROUND(s.LEAF_BLOCKS * (SELECT TO_NUMBER(VALUE) FROM v$parameter WHERE name = 'db_block_size') / 1024 / 1024) as size_mb, i.BLEVEL, s.LEAF_BLOCKS, i.CLUSTERING_FACTOR, NVL(s.PCT_DIRECT_ACCESS, 100) as pct_direct_access, i.STATUS, CASE WHEN i.STATUS != 'VALID' THEN 'unusable' WHEN i.BLEVEL > 4 THEN 'critical' WHEN NVL(s.PCT_DIRECT_ACCESS, 100) < 50 THEN 'critical' WHEN i.BLEVEL > 3 THEN 'fragmented' WHEN NVL(s.PCT_DIRECT_ACCESS, 100) < 70 THEN 'fragmented' ELSE 'ok' END as health_status FROM DBA_INDEXES i LEFT JOIN DBA_IND_STATISTICS s ON s.OWNER = i.OWNER AND s.INDEX_NAME = i.INDEX_NAME AND s.PARTITION_NAME IS NULL WHERE i.OWNER NOT IN ('SYS','SYSTEM','DBSNMP','SYSMAN','OUTLN','MDSYS','ORDSYS','EXFSYS','WMSYS','XDB','CTXSYS','APPQOSSYS','DBSFWUSER','APEX_040000','APEX_040200','APEX_050000','FLOWS_FILES') AND i.INDEX_TYPE = 'NORMAL' AND NVL(s.LEAF_BLOCKS, 0) > 100 ORDER BY CASE WHEN i.STATUS != 'VALID' THEN 1 WHEN i.BLEVEL > 4 THEN 2 WHEN i.BLEVEL > 3 THEN 3 ELSE 4 END, s.LEAF_BLOCKS DESC NULLS LAST FETCH FIRST 20 ROWS ONLY `); return (result.rows || []).map(row => { const blevel = parseInt(row[4]) || 0; const pctDirect = parseInt(row[7]) || 100; const estPctDeleted = Math.max(0, Math.min(100, Math.round(100 - pctDirect + (blevel > 3 ? (blevel - 3) * 15 : 0)))); return { owner: row[0], index_name: row[1], table_name: row[2], size_mb: parseInt(row[3]) || 0, blevel, leaf_blocks: parseInt(row[5]) || 0, clustering_factor: parseInt(row[6]) || 0, pct_deleted: estPctDeleted, status: row[9] || 'ok' }; }); } catch (err) { console.error('Index analysis query failed:', err.message); return []; } } async function querySgaStats(conn) { try { const [sgaR, hitR, libR, dictR, spR, parseR, compR] = await Promise.all([ conn.execute(`SELECT ROUND(SUM(VALUE) / 1024 / 1024 / 1024, 1) FROM V$SGA`), conn.execute(`SELECT ROUND((1 - (phys.VALUE / (db_gets.VALUE + con_gets.VALUE))) * 100, 1) FROM (SELECT VALUE FROM V$SYSSTAT WHERE NAME = 'physical reads') phys, (SELECT VALUE FROM V$SYSSTAT WHERE NAME = 'db block gets') db_gets, (SELECT VALUE FROM V$SYSSTAT WHERE NAME = 'consistent gets') con_gets`), conn.execute(`SELECT ROUND(SUM(PINS - RELOADS) / NULLIF(SUM(PINS), 0) * 100, 1) FROM V$LIBRARYCACHE`), conn.execute(`SELECT ROUND(SUM(GETS - GETMISSES) / NULLIF(SUM(GETS), 0) * 100, 1) FROM V$ROWCACHE`), conn.execute(`SELECT ROUND(free_bytes.val / total_bytes.val * 100, 1) FROM (SELECT SUM(BYTES) as val FROM V$SGASTAT WHERE POOL = 'shared pool' AND NAME = 'free memory') free_bytes, (SELECT SUM(BYTES) as val FROM V$SGASTAT WHERE POOL = 'shared pool') total_bytes`), conn.execute(`SELECT ROUND(hp.VALUE / NULLIF(GREATEST(uptime.VALUE, 1), 0), 1), ROUND((tp.VALUE - hp.VALUE) / NULLIF(GREATEST(uptime.VALUE, 1), 0), 1) FROM (SELECT VALUE FROM V$SYSSTAT WHERE NAME = 'parse count (hard)') hp, (SELECT VALUE FROM V$SYSSTAT WHERE NAME = 'parse count (total)') tp, (SELECT (SYSDATE - STARTUP_TIME) * 86400 as VALUE FROM V$INSTANCE) uptime`), conn.execute(`SELECT NAME, ROUND(VALUE / 1024 / 1024 / 1024, 1) FROM V$SGA`) ]); const components = {}; (compR.rows || []).forEach(r => { const name = (r[0] || '').toLowerCase(); if (name.includes('buffer')) components.buffer_cache_gb = parseFloat(r[1]) || 0; if (name.includes('shared')) components.shared_pool_gb = parseFloat(r[1]) || 0; if (name.includes('large')) components.large_pool_gb = parseFloat(r[1]) || 0; if (name.includes('java')) components.java_pool_gb = parseFloat(r[1]) || 0; if (name.includes('stream')) components.streams_pool_gb = parseFloat(r[1]) || 0; }); return { sga_size_gb: parseFloat(sgaR.rows?.[0]?.[0]) || 0, buffer_cache_gb: components.buffer_cache_gb || 0, shared_pool_gb: components.shared_pool_gb || 0, large_pool_gb: components.large_pool_gb || 0, java_pool_gb: components.java_pool_gb || 0, streams_pool_gb: components.streams_pool_gb || 0, buffer_cache_hit_ratio: parseFloat(hitR.rows?.[0]?.[0]) || 0, library_cache_hit_ratio: parseFloat(libR.rows?.[0]?.[0]) || 0, dictionary_cache_hit_ratio: parseFloat(dictR.rows?.[0]?.[0]) || 0, shared_pool_free_pct: parseFloat(spR.rows?.[0]?.[0]) || 0, hard_parses_per_sec: parseFloat(parseR.rows?.[0]?.[0]) || 0, soft_parses_per_sec: parseFloat(parseR.rows?.[0]?.[1]) || 0 }; } catch (err) { console.error('SGA stats query failed:', err.message); return { sga_size_gb: 0, buffer_cache_gb: 0, shared_pool_gb: 0, large_pool_gb: 0, java_pool_gb: 0, streams_pool_gb: 0, buffer_cache_hit_ratio: 0, library_cache_hit_ratio: 0, dictionary_cache_hit_ratio: 0, shared_pool_free_pct: 0, hard_parses_per_sec: 0, soft_parses_per_sec: 0 }; } } async function queryPgaStats(conn) { try { const r = await conn.execute(` SELECT ROUND((SELECT TO_NUMBER(VALUE)/1024/1024/1024 FROM v$parameter WHERE name = 'pga_aggregate_target'), 1), ROUND((SELECT VALUE/1024/1024/1024 FROM V$PGASTAT WHERE NAME = 'total PGA allocated'), 1), ROUND((SELECT VALUE/1024/1024/1024 FROM V$PGASTAT WHERE NAME = 'maximum PGA allocated'), 1), (SELECT VALUE FROM V$PGASTAT WHERE NAME = 'over allocation count'), ROUND((SELECT VALUE FROM V$PGASTAT WHERE NAME = 'cache hit percentage'), 1) FROM DUAL `); const wa = await conn.execute(` SELECT ROUND(optimal.cnt/NULLIF(total.cnt,0)*100,1), ROUND(onepass.cnt/NULLIF(total.cnt,0)*100,1), ROUND(multipass.cnt/NULLIF(total.cnt,0)*100,1) FROM (SELECT SUM(OPTIMAL_EXECUTIONS+ONEPASS_EXECUTIONS+MULTIPASSES_EXECUTIONS) as cnt FROM V$SQL_WORKAREA_HISTOGRAM WHERE LOW_OPTIMAL_SIZE>0) total, (SELECT SUM(OPTIMAL_EXECUTIONS) as cnt FROM V$SQL_WORKAREA_HISTOGRAM WHERE LOW_OPTIMAL_SIZE>0) optimal, (SELECT SUM(ONEPASS_EXECUTIONS) as cnt FROM V$SQL_WORKAREA_HISTOGRAM WHERE LOW_OPTIMAL_SIZE>0) onepass, (SELECT SUM(MULTIPASSES_EXECUTIONS) as cnt FROM V$SQL_WORKAREA_HISTOGRAM WHERE LOW_OPTIMAL_SIZE>0) multipass `); const row = r.rows?.[0] || []; const waRow = wa.rows?.[0] || []; return { pga_target_gb: parseFloat(row[0]) || 0, pga_allocated_gb: parseFloat(row[1]) || 0, pga_max_allocated_gb: parseFloat(row[2]) || 0, over_allocation_count: parseInt(row[3]) || 0, cache_hit_pct: parseFloat(row[4]) || 0, optimal_executions_pct: parseFloat(waRow[0]) || 0, onepass_executions_pct: parseFloat(waRow[1]) || 0, multipass_executions_pct: parseFloat(waRow[2]) || 0 }; } catch (err) { console.error('PGA stats query failed:', err.message); return { pga_target_gb: 0, pga_allocated_gb: 0, pga_max_allocated_gb: 0, over_allocation_count: 0, cache_hit_pct: 0, optimal_executions_pct: 0, onepass_executions_pct: 0, multipass_executions_pct: 0 }; } } async function queryOsStats(conn) { try { const result = await conn.execute(` SELECT STAT_NAME, VALUE FROM V$OSSTAT WHERE STAT_NAME IN ('NUM_CPUS','IDLE_TIME','BUSY_TIME','USER_TIME','SYS_TIME','IOWAIT_TIME','PHYSICAL_MEMORY_BYTES','FREE_MEMORY_BYTES') `); const stats = {}; (result.rows || []).forEach(r => { stats[r[0]] = parseFloat(r[1]) || 0; }); const totalCpuTime = (stats.IDLE_TIME || 0) + (stats.BUSY_TIME || 0); const cpuPct = totalCpuTime > 0 ? Math.round((stats.BUSY_TIME || 0) / totalCpuTime * 1000) / 10 : 0; const ioPct = totalCpuTime > 0 ? Math.round((stats.IOWAIT_TIME || 0) / totalCpuTime * 1000) / 10 : 0; return { cpu_count: parseInt(stats.NUM_CPUS) || 1, avg_cpu_utilization_pct: cpuPct, max_cpu_utilization_pct: Math.min(cpuPct * 1.3, 100), avg_io_wait_pct: ioPct, physical_memory_gb: Math.round((stats.PHYSICAL_MEMORY_BYTES || 0) / 1024 / 1024 / 1024 * 10) / 10, free_memory_gb: Math.round((stats.FREE_MEMORY_BYTES || 0) / 1024 / 1024 / 1024 * 10) / 10, swap_used_gb: 0, avg_disk_read_ms: 0, avg_disk_write_ms: 0 }; } catch (err) { console.error('OS stats query failed:', err.message); return { cpu_count: 1, avg_cpu_utilization_pct: 0, max_cpu_utilization_pct: 0, avg_io_wait_pct: 0, physical_memory_gb: 0, free_memory_gb: 0, swap_used_gb: 0, avg_disk_read_ms: 0, avg_disk_write_ms: 0 }; } } // ============================================================ // Wave A: Undo, Temp, Alert Log, Resource Limits, SGA/PGA History // ============================================================ async function checkAwrAvailability(conn) { try { await conn.execute(`SELECT COUNT(*) FROM DBA_HIST_UNDOSTAT WHERE ROWNUM = 1`); return true; } catch (err) { return false; } } async function queryUndoStats(conn, awrAvailable) { try { const currentResult = await conn.execute(` SELECT UNDOBLKS, TXNCOUNT, MAXQUERYLEN, MAXCONCURRENCY, TUNED_UNDORETENTION, EXPIREDBLKS, UNEXPIREDBLKS, ACTIVEBLKS FROM V$UNDOSTAT WHERE ROWNUM = 1 ORDER BY END_TIME DESC `); const tsResult = await conn.execute(` SELECT d.TABLESPACE_NAME, SUM(d.BYTES)/1073741824 AS TOTAL_GB, SUM(d.BYTES - NVL(f.FREE_BYTES,0))/1073741824 AS USED_GB, ROUND(SUM(d.BYTES - NVL(f.FREE_BYTES,0))/SUM(d.BYTES)*100,1) AS PCT_USED, t.RETENTION FROM DBA_DATA_FILES d JOIN DBA_TABLESPACES t ON t.TABLESPACE_NAME=d.TABLESPACE_NAME LEFT JOIN (SELECT FILE_ID, SUM(BYTES) AS FREE_BYTES FROM DBA_FREE_SPACE GROUP BY FILE_ID) f ON f.FILE_ID=d.FILE_ID WHERE t.CONTENTS='UNDO' GROUP BY d.TABLESPACE_NAME, t.RETENTION `); const row = (currentResult.rows||[[]])[0]||[]; const tsRow = (tsResult.rows||[[]])[0]||[]; const current = { undo_blocks: parseInt(row[0])||0, transaction_count: parseInt(row[1])||0, max_query_length_s: parseInt(row[2])||0, max_concurrency: parseInt(row[3])||0, tuned_undo_retention_s: parseInt(row[4])||900, expired_blocks: parseInt(row[5])||0, unexpired_blocks: parseInt(row[6])||0, active_blocks: parseInt(row[7])||0, tablespace_name: tsRow[0]||'UNDOTBS1', total_gb: parseFloat(tsRow[1])||0, used_gb: parseFloat(tsRow[2])||0, pct_used: parseFloat(tsRow[3])||0, retention_mode: tsRow[4]||'NOGUARANTEE' }; let historical = { peak_pct_used: null, peak_time: null, peak_query_length_s: null, lookback_days: 30 }; if (awrAvailable) { try { const hr = await conn.execute(` SELECT ROUND(MAX(u.UNDOBLKS)/NULLIF(d.TOTAL_BLOCKS,0)*100,1), TO_CHAR(MAX(u.END_TIME) KEEP (DENSE_RANK LAST ORDER BY u.UNDOBLKS),'YYYY-MM-DD HH24:MI'), MAX(u.MAXQUERYLEN), ROUND(MAX(u.TUNED_UNDORETENTION)/60,0) FROM DBA_HIST_UNDOSTAT u CROSS JOIN ( SELECT SUM(BLOCKS) AS TOTAL_BLOCKS FROM DBA_DATA_FILES df JOIN DBA_TABLESPACES t ON t.TABLESPACE_NAME=df.TABLESPACE_NAME WHERE t.CONTENTS='UNDO' ) d WHERE u.END_TIME > SYSDATE-30 `); const h = (hr.rows||[[]])[0]||[]; historical = { peak_pct_used: parseFloat(h[0])||null, peak_time: h[1]||null, peak_query_length_s: parseInt(h[2])||null, max_tuned_retention_min: parseInt(h[3])||null, lookback_days: 30 }; } catch(e) {} } return { current, historical, awr_available: awrAvailable }; } catch(err) { console.error('Undo stats query failed:', err.message); return { current: { tablespace_name:'UNDOTBS1', total_gb:0, used_gb:0, pct_used:0, tuned_undo_retention_s:900, max_query_length_s:0, retention_mode:'NOGUARANTEE' }, historical: { peak_pct_used:null, peak_time:null, lookback_days:30 }, awr_available: awrAvailable }; } } async function queryTempStats(conn, awrAvailable) { try { const freeResult = await conn.execute(` SELECT TABLESPACE_NAME, ROUND(TABLESPACE_SIZE/1073741824,2), ROUND(FREE_SPACE/1073741824,2), ROUND((TABLESPACE_SIZE-FREE_SPACE)/NULLIF(TABLESPACE_SIZE,0)*100,1) FROM DBA_TEMP_FREE_SPACE `); const sessionResult = await conn.execute(` SELECT s.SID, s.SERIAL#, s.USERNAME, ROUND(s.BLOCKS*8192/1048576,1), s.TABLESPACE FROM V$TEMPSEG_USAGE s ORDER BY s.BLOCKS DESC FETCH FIRST 10 ROWS ONLY `).catch(() => ({ rows: [] })); const freeRow = (freeResult.rows||[[]])[0]||[]; const totalGb = parseFloat(freeRow[1])||0, freeGb = parseFloat(freeRow[2])||0; const current = { tablespace_name: freeRow[0]||'TEMP', total_gb: totalGb, used_gb: Math.max(0, totalGb-freeGb), free_gb: freeGb, pct_used: parseFloat(freeRow[3])||0, top_sessions: (sessionResult.rows||[]).map(r=>({ sid:r[0], serial:r[1], username:r[2]||'UNKNOWN', temp_mb:parseFloat(r[3])||0, tablespace:r[4]||'' })) }; let historical = { peak_gb:null, peak_pct:null, peak_time:null, lookback_days:30 }; if (awrAvailable) { try { const hr = await conn.execute(`SELECT NULL, NULL, NULL FROM DUAL`); // Simplified: DBA_HIST_TBSPC_SPACE_USAGE join is temp-tablespace specific — returning null when not easy to isolate historical = { peak_gb: null, peak_pct: null, peak_time: null, lookback_days: 30 }; } catch(e) {} } return { current, historical, awr_available: awrAvailable }; } catch(err) { console.error('Temp stats query failed:', err.message); return { current:{ tablespace_name:'TEMP', total_gb:0, used_gb:0, free_gb:0, pct_used:0, top_sessions:[] }, historical:{ peak_gb:null, peak_pct:null, peak_time:null, lookback_days:30 }, awr_available: awrAvailable }; } } async function queryAlertLog(conn) { try { const result = await conn.execute(` SELECT TO_CHAR(ORIGINATING_TIMESTAMP,'YYYY-MM-DD HH24:MI:SS'), MESSAGE_TEXT FROM V$DIAG_ALERT_EXT WHERE ORIGINATING_TIMESTAMP > SYSDATE-1 AND (MESSAGE_TEXT LIKE 'ORA-%' OR MESSAGE_TEXT LIKE '%checkpoint%' OR MESSAGE_TEXT LIKE '%corruption%' OR MESSAGE_TEXT LIKE '%recovery%' OR MESSAGE_TEXT LIKE '%error%' OR MESSAGE_TEXT LIKE '%warning%' OR MESSAGE_TEXT LIKE '%TNS-%' OR MESSAGE_TEXT LIKE '%instance%' OR MESSAGE_TEXT LIKE 'Thread%') ORDER BY ORIGINATING_TIMESTAMP DESC FETCH FIRST 200 ROWS ONLY `); const classified = (result.rows||[]).map(r => { const msg = (r[1]||'').trim(); let severity = 'info'; if (/ORA-600|ORA-7445|ORA-1578|ORA-04031|ORA-01555/.test(msg)) severity = 'critical'; else if (/ORA-\d{4,5}/.test(msg)) severity = 'warning'; else if (/checkpoint not complete|cannot allocate new log|block corruption|instance termination/.test(msg.toLowerCase())) severity = 'critical'; else if (/checkpoint|redo log switch|archiv|TNS-1\d{4}/.test(msg.toLowerCase())) severity = 'warning'; else if (/TNS-12560|TNS-12537|opiodr aborting|Fatal NI/.test(msg)) severity = 'noise'; return { ts: r[0]||'', message: msg, severity }; }); const summary = { total: classified.length, critical: classified.filter(e=>e.severity==='critical').length, warning: classified.filter(e=>e.severity==='warning').length, info: classified.filter(e=>e.severity==='info').length, noise: classified.filter(e=>e.severity==='noise').length }; return { entries: classified.slice(0,100), summary }; } catch(err) { const msg = err.message || ''; // V$DIAG_ALERT_EXT requires an explicit grant even with SELECT_CATALOG_ROLE const missingGrant = msg.includes('ORA-00942') || msg.includes('ORA-01031'); const errorMsg = missingGrant ? 'Alert log access requires GRANT SELECT ON V_$DIAG_ALERT_EXT TO your_user' : msg; if (!missingGrant) console.error('Alert log query failed:', msg); return { entries:[], summary:{ total:0, critical:0, warning:0, info:0, noise:0 }, error: errorMsg }; } } async function queryResourceLimits(conn, awrAvailable) { try { const currentResult = await conn.execute(` SELECT RESOURCE_NAME, CURRENT_UTILIZATION, MAX_UTILIZATION, INITIAL_ALLOCATION, LIMIT_VALUE FROM V$RESOURCE_LIMIT WHERE RESOURCE_NAME IN ('sessions','processes','enqueue_locks','enqueue_resources','dml_locks','temporary_table_locks','transactions','max_rollback_segments') ORDER BY CASE RESOURCE_NAME WHEN 'sessions' THEN 1 WHEN 'processes' THEN 2 WHEN 'transactions' THEN 3 WHEN 'enqueue_locks' THEN 4 ELSE 9 END `); const current = (currentResult.rows||[]).map(r => { const limitVal = r[4]==='UNLIMITED' ? null : (parseInt(r[4])||null); const maxUtil = parseInt(r[2])||0; const pctUsed = limitVal ? Math.round(maxUtil/limitVal*100) : null; return { resource:r[0]||'', current_utilization:parseInt(r[1])||0, max_utilization:maxUtil, initial_allocation:r[3]||'0', limit_value:limitVal, limit_display:r[4]||'0', pct_max_used:pctUsed, status: pctUsed!==null?(pctUsed>=90?'critical':pctUsed>=80?'warning':'ok'):'ok' }; }); let historical = {}; if (awrAvailable) { try { const hr = await conn.execute(` SELECT RESOURCE_NAME, MAX(CURRENT_UTILIZATION), MAX(MAX_UTILIZATION) FROM DBA_HIST_RESOURCE_LIMIT WHERE SNAP_ID IN (SELECT SNAP_ID FROM DBA_HIST_SNAPSHOT WHERE END_INTERVAL_TIME > SYSDATE-30) GROUP BY RESOURCE_NAME `); (hr.rows||[]).forEach(r => { historical[r[0]] = { hist_max:parseInt(r[1])||0, hist_peak:parseInt(r[2])||0 }; }); } catch(e) {} } return { current, historical, awr_available: awrAvailable }; } catch(err) { console.error('Resource limits query failed:', err.message); return { current:[], historical:{}, awr_available: awrAvailable }; } } async function querySgaPgaHistory(conn, awrAvailable) { try { const paramResult = await conn.execute(` SELECT NAME, VALUE FROM V$PARAMETER WHERE NAME IN ('sga_target','pga_aggregate_target','sga_max_size','memory_target','memory_max_target') `); const params = {}; (paramResult.rows||[]).forEach(r => { params[r[0]] = parseInt(r[1])||0; }); const current = { sga_target_gb: Math.round((params['sga_target']||0)/1073741824*10)/10, pga_target_gb: Math.round((params['pga_aggregate_target']||0)/1073741824*10)/10, sga_max_gb: Math.round((params['sga_max_size']||0)/1073741824*10)/10, memory_target_gb: Math.round((params['memory_target']||0)/1073741824*10)/10 }; const resizeResult = await conn.execute(` SELECT TO_CHAR(START_TIME,'YYYY-MM-DD HH24:MI'), COMPONENT, OPER_TYPE, ROUND(INITIAL_SIZE/1073741824,2), ROUND(FINAL_SIZE/1073741824,2), STATUS FROM V$SGA_RESIZE_OPS ORDER BY START_TIME DESC FETCH FIRST 20 ROWS ONLY `).catch(() => ({ rows:[] })); const resizeOps = (resizeResult.rows||[]).map(r => ({ op_time:r[0]||'', component:r[1]||'', oper_type:r[2]||'', from_gb:parseFloat(r[3])||0, to_gb:parseFloat(r[4])||0, status:r[5]||'' })); let pgaHistory = { peak_allocated_gb:null, peak_time:null }; let sgaComponentHistory = []; if (awrAvailable) { try { const pr = await conn.execute(` SELECT ROUND(MAX(VALUE)/1073741824,2), TO_CHAR(MAX(s.END_INTERVAL_TIME) KEEP (DENSE_RANK LAST ORDER BY p.VALUE),'YYYY-MM-DD HH24:MI') FROM DBA_HIST_PGASTAT p JOIN DBA_HIST_SNAPSHOT s ON s.SNAP_ID=p.SNAP_ID WHERE p.NAME='maximum PGA allocated' AND s.END_INTERVAL_TIME>SYSDATE-30 `); const phr = (pr.rows||[[]])[0]||[]; pgaHistory = { peak_allocated_gb:parseFloat(phr[0])||null, peak_time:phr[1]||null, lookback_days:30 }; } catch(e) {} try { const sr = await conn.execute(` SELECT NAME, ROUND(MAX(VALUE)/1073741824,2), ROUND(MIN(VALUE)/1073741824,2) FROM DBA_HIST_SGA WHERE SNAP_ID IN (SELECT SNAP_ID FROM DBA_HIST_SNAPSHOT WHERE END_INTERVAL_TIME>SYSDATE-30) AND NAME IN ('Database Buffers','Shared Pool Size','Large Pool Size','Java Pool Size') GROUP BY NAME `); sgaComponentHistory = (sr.rows||[]).map(r => ({ component:r[0]||'', peak_gb:parseFloat(r[1])||0, min_gb:parseFloat(r[2])||0 })); } catch(e) {} } return { current, resize_ops: resizeOps, pga_history: pgaHistory, sga_component_history: sgaComponentHistory, awr_available: awrAvailable }; } catch(err) { console.error('SGA/PGA history query failed:', err.message); return { current:{ sga_target_gb:0, pga_target_gb:0, sga_max_gb:0, memory_target_gb:0 }, resize_ops:[], pga_history:{ peak_allocated_gb:null, peak_time:null }, sga_component_history:[], awr_available: awrAvailable }; } } // ============================================================ // Wave B: Backup & Recovery Health Checks // ============================================================ /** * Master function — runs all 4 backup checks and returns structured backup_stats. * Gracefully handles non-RMAN databases (all checks return null on failure). */ async function queryBackupStats(conn) { const [rmanBackup, fraUsage, archivelogRate, backupValidation] = await Promise.all([ queryRmanBackup(conn), queryFraUsage(conn), queryArchivelogRate(conn), queryBackupValidation(conn) ]); // Compute overall backup status (worst of the 4 checks) const statuses = [rmanBackup, fraUsage, archivelogRate, backupValidation] .map(c => (c && c.status) || 'unknown'); const overallStatus = statuses.includes('critical') ? 'critical' : statuses.includes('warning') ? 'warning' : statuses.every(s => s === 'ok') ? 'ok' : 'unknown'; return { rman_backup: rmanBackup, fra_usage: fraUsage, archivelog_rate: archivelogRate, backup_validation: backupValidation, overall_status: overallStatus }; } /** * Check 1: RMAN Backup Freshness * No full backup in >48h = critical | >24h = warning | <24h = ok */ async function queryRmanBackup(conn) { try { // Last backup job by type from V$RMAN_BACKUP_JOB_DETAILS const jobResult = await conn.execute(` SELECT INPUT_TYPE, STATUS, TO_CHAR(START_TIME, 'YYYY-MM-DD HH24:MI:SS') AS START_TIME, TO_CHAR(END_TIME, 'YYYY-MM-DD HH24:MI:SS') AS END_TIME, ROUND((SYSDATE - END_TIME) * 24, 1) AS HOURS_AGO, ROUND(OUTPUT_BYTES / 1073741824, 2) AS SIZE_GB, ELAPSED_SECONDS FROM ( SELECT INPUT_TYPE, STATUS, START_TIME, END_TIME, OUTPUT_BYTES, ELAPSED_SECONDS, ROW_NUMBER() OVER (PARTITION BY INPUT_TYPE ORDER BY END_TIME DESC) AS RN FROM V$RMAN_BACKUP_JOB_DETAILS WHERE STATUS = 'COMPLETED' ) WHERE RN = 1 ORDER BY CASE INPUT_TYPE WHEN 'DB FULL' THEN 1 WHEN 'DB INCR' THEN 2 WHEN 'ARCHIVELOG' THEN 3 ELSE 4 END `).catch(() => ({ rows: [] })); // Recent backup jobs (last 10 regardless of type) const recentResult = await conn.execute(` SELECT INPUT_TYPE, STATUS, TO_CHAR(START_TIME, 'YYYY-MM-DD HH24:MI:SS') AS START_TIME, TO_CHAR(END_TIME, 'YYYY-MM-DD HH24:MI:SS') AS END_TIME, ROUND((SYSDATE - END_TIME) * 24, 1) AS HOURS_AGO, ROUND(OUTPUT_BYTES / 1073741824, 2) AS SIZE_GB, ELAPSED_SECONDS FROM V$RMAN_BACKUP_JOB_DETAILS ORDER BY START_TIME DESC FETCH FIRST 10 ROWS ONLY `).catch(() => ({ rows: [] })); const lastByType = (jobResult.rows || []).map(r => ({ input_type: r[0] || '', status: r[1] || '', start_time: r[2] || '', end_time: r[3] || '', hours_ago: parseFloat(r[4]) || 0, size_gb: parseFloat(r[5]) || 0, elapsed_seconds: parseInt(r[6]) || 0 })); const recentJobs = (recentResult.rows || []).map(r => ({ input_type: r[0] || '', status: r[1] || '', start_time: r[2] || '', end_time: r[3] || '', hours_ago: parseFloat(r[4]) || 0, size_gb: parseFloat(r[5]) || 0, elapsed_seconds: parseInt(r[6]) || 0 })); // Find last full backup age const fullBackup = lastByType.find(b => b.input_type === 'DB FULL'); const incrBackup = lastByType.find(b => b.input_type === 'DB INCR'); const archBackup = lastByType.find(b => b.input_type === 'ARCHIVELOG'); const fullHoursAgo = fullBackup ? fullBackup.hours_ago : null; let status = 'unknown'; if (recentJobs.length === 0 && lastByType.length === 0) { status = 'unknown'; // No RMAN usage detected } else if (fullHoursAgo === null) { status = 'critical'; // No full backup ever } else if (fullHoursAgo > 48) { status = 'critical'; } else if (fullHoursAgo > 24) { status = 'warning'; } else { status = 'ok'; } return { status, rman_available: recentJobs.length > 0 || lastByType.length > 0, last_by_type: lastByType, recent_jobs: recentJobs, full_backup_hours_ago: fullHoursAgo, last_full_backup: fullBackup || null, last_incremental_backup: incrBackup || null, last_archivelog_backup: archBackup || null }; } catch (err) { console.error('RMAN backup query failed:', err.message); return { status: 'unknown', rman_available: false, last_by_type: [], recent_jobs: [], error: err.message }; } } /** * Check 2: Fast Recovery Area (FRA) Usage * >90% used AND <10% reclaimable = critical | >80% = warning | <80% = ok */ async function queryFraUsage(conn) { try { // FRA overview from V$RECOVERY_FILE_DEST const destResult = await conn.execute(` SELECT NAME, ROUND(SPACE_LIMIT / 1073741824, 2) AS LIMIT_GB, ROUND(SPACE_USED / 1073741824, 2) AS USED_GB, ROUND(SPACE_RECLAIMABLE / 1073741824, 2) AS RECLAIMABLE_GB, NUMBER_OF_FILES FROM V$RECOVERY_FILE_DEST `).catch(() => ({ rows: [] })); // Breakdown by file type from V$FLASH_RECOVERY_AREA_USAGE const usageResult = await conn.execute(` SELECT FILE_TYPE, ROUND(PERCENT_SPACE_USED, 1) AS PCT_USED, ROUND(PERCENT_SPACE_RECLAIMABLE, 1) AS PCT_RECLAIMABLE, NUMBER_OF_FILES FROM V$FLASH_RECOVERY_AREA_USAGE ORDER BY PERCENT_SPACE_USED DESC `).catch(() => ({ rows: [] })); // Archivelog generation rate (last 24h) for "hours until full" prediction const genRateResult = await conn.execute(` SELECT ROUND(SUM(BLOCKS * BLOCK_SIZE) / 1073741824, 2) AS ARCHIVELOGS_24H_GB FROM V$ARCHIVED_LOG WHERE COMPLETION_TIME > SYSDATE - 1 AND STANDBY_DEST = 'NO' `).catch(() => ({ rows: [[0]] })); const destRow = (destResult.rows || [[]])[0] || []; const limitGb = parseFloat(destRow[1]) || 0; const usedGb = parseFloat(destRow[2]) || 0; const reclaimableGb = parseFloat(destRow[3]) || 0; const fraLocation = destRow[0] || ''; const pctUsed = limitGb > 0 ? Math.round((usedGb / limitGb) * 1000) / 10 : 0; const pctReclaimable = limitGb > 0 ? Math.round((reclaimableGb / limitGb) * 1000) / 10 : 0; const archivelogs24hGb = parseFloat((genRateResult.rows || [[0]])[0]?.[0]) || 0; // Hours until FRA full: (limitGb - usedGb + reclaimableGb) / hourly_rate const availableGb = limitGb - usedGb + reclaimableGb; const hourlyRateGb = archivelogs24hGb / 24; const hoursUntilFull = (hourlyRateGb > 0 && limitGb > 0) ? Math.round(availableGb / hourlyRateGb) : null; const fileTypeBreakdown = (usageResult.rows || []).map(r => ({ file_type: r[0] || '', pct_used: parseFloat(r[1]) || 0, pct_reclaimable: parseFloat(r[2]) || 0, number_of_files: parseInt(r[3]) || 0 })); let status = 'unknown'; if (limitGb === 0) { status = 'unknown'; // FRA not configured } else if (pctUsed > 90 && pctReclaimable < 10) { status = 'critical'; } else if (pctUsed > 80) { status = 'warning'; } else { status = 'ok'; } return { status, fra_configured: limitGb > 0, location: fraLocation, limit_gb: limitGb, used_gb: usedGb, reclaimable_gb: reclaimableGb, pct_used: pctUsed, pct_reclaimable: pctReclaimable, archivelogs_24h_gb: archivelogs24hGb, hours_until_full: hoursUntilFull, file_type_breakdown: fileTypeBreakdown }; } catch (err) { console.error('FRA usage query failed:', err.message); return { status: 'unknown', fra_configured: false, error: err.message }; } } /** * Check 3: Archivelog Generation Rate * Not in archivelog mode = critical | switch frequency >20/hour = warning | otherwise = ok */ async function queryArchivelogRate(conn) { try { // Archivelog mode + current sequence const modeResult = await conn.execute(` SELECT LOG_MODE, ROUND((SYSDATE - STARTUP_TIME) * 24) AS HOURS_UP FROM V$DATABASE, V$INSTANCE `).catch(() => ({ rows: [['ARCHIVELOG', 0]] })); // Recent archivelog generation (last 24h) const archResult = await conn.execute(` SELECT TO_CHAR(COMPLETION_TIME, 'YYYY-MM-DD HH24') AS HOUR, COUNT(*) AS LOG_COUNT, ROUND(SUM(BLOCKS * BLOCK_SIZE) / 1048576, 1) AS SIZE_MB FROM V$ARCHIVED_LOG WHERE COMPLETION_TIME > SYSDATE - 1 AND STANDBY_DEST = 'NO' GROUP BY TO_CHAR(COMPLETION_TIME, 'YYYY-MM-DD HH24') ORDER BY HOUR DESC `).catch(() => ({ rows: [] })); // Redo log groups and sizes const logResult = await conn.execute(` SELECT l.GROUP#, l.MEMBERS, ROUND(l.BYTES / 1048576, 0) AS SIZE_MB, l.STATUS, l.ARCHIVED FROM V$LOG l ORDER BY l.GROUP# `).catch(() => ({ rows: [] })); // Log switch frequency from V$LOG_HISTORY (last 24h) const switchResult = await conn.execute(` SELECT ROUND(COUNT(*) / 24.0, 1) AS SWITCHES_PER_HOUR, COUNT(*) AS SWITCHES_24H FROM V$LOG_HISTORY WHERE FIRST_TIME > SYSDATE - 1 `).catch(() => ({ rows: [[0, 0]] })); const modeRow = (modeResult.rows || [['ARCHIVELOG', 0]])[0] || ['ARCHIVELOG', 0]; const logMode = modeRow[0] || 'ARCHIVELOG'; const archHourly = (archResult.rows || []).map(r => ({ hour: r[0] || '', log_count: parseInt(r[1]) || 0, size_mb: parseFloat(r[2]) || 0 })); const logGroups = (logResult.rows || []).map(r => ({ group_num: parseInt(r[0]) || 0, members: parseInt(r[1]) || 0, size_mb: parseInt(r[2]) || 0, status: r[3] || '', archived: r[4] || '' })); const switchRow = (switchResult.rows || [[0, 0]])[0] || [0, 0]; const switchesPerHour = parseFloat(switchRow[0]) || 0; const switches24h = parseInt(switchRow[1]) || 0; const totalArchivelogs24h = archHourly.reduce((sum, h) => sum + h.log_count, 0); const totalSizeMb24h = archHourly.reduce((sum, h) => sum + h.size_mb, 0); let status = 'ok'; if (logMode !== 'ARCHIVELOG') { status = 'critical'; // Not in archivelog mode } else if (switchesPerHour > 20) { status = 'warning'; } else { status = 'ok'; } return { status, log_mode: logMode, archivelog_mode: logMode === 'ARCHIVELOG', switches_per_hour: switchesPerHour, switches_24h: switches24h, archivelogs_24h: totalArchivelogs24h, total_size_mb_24h: totalSizeMb24h, hourly_breakdown: archHourly.slice(0, 24), log_groups: logGroups }; } catch (err) { console.error('Archivelog rate query failed:', err.message); return { status: 'unknown', archivelog_mode: null, error: err.message }; } } /** * Check 4: Backup Validation * Corruption found or last 3 RMAN jobs failed = critical */ async function queryBackupValidation(conn) { try { // Recent RMAN operations from V$RMAN_STATUS const rmanStatusResult = await conn.execute(` SELECT OPERATION, STATUS, TO_CHAR(START_TIME, 'YYYY-MM-DD HH24:MI:SS') AS START_TIME, TO_CHAR(END_TIME, 'YYYY-MM-DD HH24:MI:SS') AS END_TIME, MBYTES_PROCESSED, OUTPUT FROM V$RMAN_STATUS WHERE OPERATION IN ('BACKUP', 'RESTORE', 'RECOVER', 'DELETE', 'VALIDATE') AND START_TIME > SYSDATE - 7 ORDER BY START_TIME DESC FETCH FIRST 20 ROWS ONLY `).catch(() => ({ rows: [] })); // Backup corruption from V$BACKUP_CORRUPTION const backupCorrResult = await conn.execute(` SELECT COUNT(*) AS CORRUPT_COUNT, SUM(BLOCKS) AS CORRUPT_BLOCKS FROM V$BACKUP_CORRUPTION `).catch(() => ({ rows: [[0, 0]] })); // Copy corruption from V$COPY_CORRUPTION const copyCorrResult = await conn.execute(` SELECT COUNT(*) AS CORRUPT_COUNT, SUM(BLOCKS) AS CORRUPT_BLOCKS FROM V$COPY_CORRUPTION `).catch(() => ({ rows: [[0, 0]] })); const rmanOps = (rmanStatusResult.rows || []).map(r => ({ operation: r[0] || '', status: r[1] || '', start_time: r[2] || '', end_time: r[3] || '', mbytes_processed: parseFloat(r[4]) || 0, output: (r[5] || '').substring(0, 300) })); const backupCorrupt = parseInt((backupCorrResult.rows || [[0]])[0]?.[0]) || 0; const backupCorruptBlocks = parseInt((backupCorrResult.rows || [[0, 0]])[0]?.[1]) || 0; const copyCorrupt = parseInt((copyCorrResult.rows || [[0]])[0]?.[0]) || 0; const copyCorruptBlocks = parseInt((copyCorrResult.rows || [[0, 0]])[0]?.[1]) || 0; const totalCorruptions = backupCorrupt + copyCorrupt; // Check last 3 RMAN backup jobs const recentBackups = rmanOps.filter(op => op.operation === 'BACKUP').slice(0, 3); const last3Failed = recentBackups.length > 0 && recentBackups.every(b => b.status === 'FAILED'); let status = 'ok'; if (totalCorruptions > 0) { status = 'critical'; } else if (last3Failed) { status = 'critical'; } else if (recentBackups.some(b => b.status === 'FAILED')) { status = 'warning'; } else { status = 'ok'; } return { status, backup_corruptions: backupCorrupt, backup_corrupt_blocks: backupCorruptBlocks, copy_corruptions: copyCorrupt, copy_corrupt_blocks: copyCorruptBlocks, total_corruptions: totalCorruptions, recent_operations: rmanOps, last_3_backups_failed: last3Failed }; } catch (err) { console.error('Backup validation query failed:', err.message); return { status: 'unknown', total_corruptions: 0, error: err.message }; } } // ============================================================ // Wave E: EBS Apps Health Checks // All EBS tables schema-qualified with APPS. for SYSTEM user // ============================================================ async function queryAppsHealth(conn) { try { // EBS detection const ebsDetect = await conn.execute( `SELECT COUNT(*) FROM APPS.FND_APPLICATION WHERE ROWNUM = 1` ).catch(() => null); if (!ebsDetect) return null; const [cmResult, requestResult, oppResult, wfComponentsResult, stuckNotifResult, wfErrorResult] = await Promise.all([ // ── Concurrent Manager queues (verified against EBS 12.2.12) ── conn.execute(` SELECT DISTINCT b.user_concurrent_queue_name AS manager_name, a.target_node AS node, a.running_processes AS actual_procs, a.max_processes AS target_procs, DECODE(b.control_code, 'D', 'Deactivating', 'E', 'Deactivated', 'N', 'Node unavai', 'A', 'Activating', 'X', 'Terminated', 'T', 'Terminating', 'V', 'Verifying', 'O', 'Suspending', 'P', 'Suspended', 'Q', 'Resuming', 'R', 'Restarting', 'Running') AS status_label FROM apps.fnd_concurrent_queues a, apps.fnd_concurrent_queues_vl b WHERE a.concurrent_queue_id = b.concurrent_queue_id AND a.max_processes != 0 ORDER BY a.max_processes DESC `).catch(() => ({ rows: [] })), // Pending / running request counts conn.execute(` SELECT PHASE_CODE, COUNT(*) FROM APPS.FND_CONCURRENT_REQUESTS WHERE PHASE_CODE IN ('P','R') AND HOLD_FLAG = 'N' GROUP BY PHASE_CODE `).catch(() => ({ rows: [] })), // ── OPP (dedicated query) ────────────────────────────────────── conn.execute(` SELECT b.user_concurrent_queue_name AS manager_name, a.running_processes AS actual, a.max_processes AS target FROM apps.fnd_concurrent_queues a, apps.fnd_concurrent_queues_vl b WHERE a.concurrent_queue_id = b.concurrent_queue_id AND a.concurrent_queue_name = 'FNDCPOPP' `).catch(() => ({ rows: [] })), // ── All Workflow components (WF% types, all 13 components) ── conn.execute(` SELECT component_type, component_name, component_status, startup_mode FROM apps.fnd_svc_components WHERE component_type LIKE 'WF%' ORDER BY 1, 2 `).catch(() => ({ rows: [] })), conn.execute(` SELECT COUNT(*) FROM APPS.WF_NOTIFICATIONS WHERE STATUS = 'OPEN' AND MAIL_STATUS = 'MAIL' AND BEGIN_DATE < SYSDATE - 1/24 `).catch(() => ({ rows: [[0]] })), conn.execute(`SELECT COUNT(*) FROM APPS.WF_ERROR`).catch(() => ({ rows: [[0]] })) ]); // Process managers (new query: [name, node, actual, target, status_label]) const managers = (cmResult.rows || []).map(r => ({ display_name: r[0] || '', node: r[1] || '', actual: parseInt(r[2]) || 0, target: parseInt(r[3]) || 0, status_label: r[4] || 'Running' })); const icm = managers.find(m => (m.display_name || '').toLowerCase().includes('internal concurrent manager') ); let pendingCount = 0, runningCount = 0; (requestResult.rows || []).forEach(r => { if (r[0] === 'P') pendingCount = parseInt(r[1]) || 0; if (r[0] === 'R') runningCount = parseInt(r[1]) || 0; }); const icmDown = !icm || (icm.actual === 0 && icm.target > 0); let cmStatus = icmDown ? 'critical' : 'ok'; if (!icmDown) { const underStaffed = managers.some(m => m.target > 0 && m.actual < m.target); if (underStaffed || pendingCount > 50) cmStatus = 'warning'; } // OPP (dedicated query result) const oppRows = oppResult.rows || []; const oppActual = oppRows.length > 0 ? parseInt(oppRows[0][1]) || 0 : 0; const oppTarget = oppRows.length > 0 ? parseInt(oppRows[0][2]) || 0 : 0; const oppData = { manager_name: oppRows.length > 0 ? (oppRows[0][0] || 'Output Post Processor') : 'Output Post Processor', actual: oppActual, target: oppTarget, status: oppActual === 0 && oppTarget > 0 ? 'critical' : oppActual < oppTarget ? 'warning' : 'ok', recommendation: oppActual === 0 && oppTarget > 0 ? 'OPP is not running — PDF/output generation will fail. Start OPP from the CM admin page.' : oppActual < oppTarget ? `OPP has ${oppActual}/${oppTarget} processes running — throughput may be degraded.` : `OPP healthy: ${oppActual}/${oppTarget} process(es) running.` }; // Workflow (all WF% components) const wfServices = (wfComponentsResult.rows || []).map(r => ({ type: r[0] || '', name: r[1] || '', status: r[2] || 'UNKNOWN', startup_mode: r[3] || '' })); const mailer = wfServices.find(s => s.type === 'WF_MAILER' || (s.name || '').toLowerCase().includes('mailer') ); const mailerRunning = !!(mailer && mailer.status === 'RUNNING'); const stuckNotif = parseInt((stuckNotifResult.rows || [[0]])[0]?.[0]) || 0; const wfErrCount = parseInt((wfErrorResult.rows || [[0]])[0]?.[0]) || 0; const criticalComps = wfServices.filter(s => s.status === 'DEACTIVATED_SYSTEM' || s.status === 'STOPPED'); const warningComps = wfServices.filter(s => s.status !== 'RUNNING' && s.status !== 'DEACTIVATED_SYSTEM' && s.status !== 'STOPPED'); let wfStatus = 'ok'; if (mailer && !mailerRunning) wfStatus = 'critical'; else if (criticalComps.length > 0) wfStatus = 'critical'; else if (stuckNotif > 50 || wfErrCount > 20) wfStatus = 'critical'; else if (warningComps.length > 0 || stuckNotif > 5 || wfErrCount > 0) wfStatus = 'warning'; // Overall const statuses = [cmStatus, wfStatus, oppData.status]; const overallStatus = statuses.includes('critical') ? 'critical' : statuses.includes('warning') ? 'warning' : 'ok'; // ── Wave F: APPS_ENV and ADOP_STATUS ──────────────────────── const appsEnvResult = await queryAppsEnv(conn); const adopResult = await queryAdopStatus(); // Fold new check statuses into overall const allStatuses = [cmStatus, wfStatus, oppData.status, appsEnvResult.status, adopResult.status]; const newOverall = allStatuses.includes('critical') ? 'critical' : allStatuses.includes('warning') ? 'warning' : 'ok'; return { is_ebs: true, status: newOverall, concurrent_managers: { status: cmStatus, icm_down: icmDown, managers, pending_requests: pendingCount, running_requests: runningCount }, opp: oppData, workflow: { status: wfStatus, mailer_running: mailerRunning, mailer_component: mailer || null, services: wfServices, stuck_notifications: stuckNotif, error_count: wfErrCount }, apps_env: appsEnvResult, adop_status: adopResult }; } catch (err) { console.error('EBS apps health query failed:', err.message); return null; } } // ============================================================ // Wave F: APPS_ENV Check // Query EBS environment variables from the database and (when // the proxy runs on the app tier) from the OS environment. // ============================================================ /** * APPS_ENV — display key EBS Apps environment variables. * * Sources (highest to lowest priority): * 1. OS environment (proxy runs on app tier — variables set by * sourcing the EBS context file, e.g. CONTEXT_FILE, INST_TOP …) * 2. FND_PROFILE_OPTION_VALUES for DB-accessible profile options * (e.g. APPS_JDBC_URL stored in FND_OAM_METVAL) * * Returns check_id, status, severity, message, and a display object * with columns/rows for table rendering in the UI. */ async function queryAppsEnv(conn) { const CRITICAL_VARS = [ 'CONTEXT_FILE', 'INST_TOP', 'COMMON_TOP', 'ADMIN_SCRIPTS_HOME' ]; const ALL_VARS = [ 'CONTEXT_FILE', 'ADMIN_SCRIPTS_HOME', 'INST_TOP', 'COMMON_TOP', 'FMW_HOME', 'EBS_DOMAIN_HOME', 'APPS_JDBC_URL', 'TWO_TASK', 'ORACLE_HOME', 'ORACLE_SID', 'TNS_ADMIN' ]; // Step 1: read from OS environment (available when proxy runs on EBS app tier) const envVars = {}; for (const v of ALL_VARS) { const val = process.env[v]; if (val !== undefined) envVars[v] = val; } // Step 2: supplement missing vars from Oracle DB where possible try { // APPS_JDBC_URL: stored in FND_OAM_METVAL (EBS 12.2+) under key 'APPS_JDBC_URL' const jdbcResult = await conn.execute( `SELECT METVAL_CLOB FROM APPS.FND_OAM_METVAL WHERE METNAME = 'APPS_JDBC_URL' AND ROWNUM = 1` ).catch(() => null); if (jdbcResult && jdbcResult.rows && jdbcResult.rows[0] && !envVars['APPS_JDBC_URL']) { envVars['APPS_JDBC_URL'] = String(jdbcResult.rows[0][0] || '').substring(0, 200); } } catch (e) { /* non-fatal */ } try { // TNS_ADMIN: read from v$parameter (LOCAL_LISTENER or similar) as a heuristic const tnsResult = await conn.execute( `SELECT VALUE FROM v$parameter WHERE name = 'tns_admin' AND ROWNUM = 1` ).catch(() => null); if (tnsResult && tnsResult.rows && tnsResult.rows[0] && !envVars['TNS_ADMIN']) { const val = String(tnsResult.rows[0][0] || ''); if (val) envVars['TNS_ADMIN'] = val; } } catch (e) { /* non-fatal */ } try { // ORACLE_SID: from v$instance const sidResult = await conn.execute(`SELECT INSTANCE_NAME FROM v$instance`).catch(() => null); if (sidResult && sidResult.rows && sidResult.rows[0] && !envVars['ORACLE_SID']) { envVars['ORACLE_SID'] = String(sidResult.rows[0][0] || ''); } } catch (e) { /* non-fatal */ } // Step 3: build rows const rows = ALL_VARS.map(v => { const val = envVars[v]; const present = val !== undefined && val !== ''; const isCritical = CRITICAL_VARS.includes(v); return { variable: v, value: present ? val : null, state: present ? 'OK' : 'MISSING', critical: isCritical }; }); // Step 4: compute status const missingCritical = rows.filter(r => r.critical && r.state === 'MISSING'); const missingAny = rows.filter(r => r.state === 'MISSING'); let status, message; if (missingCritical.length > 0) { status = 'critical'; message = `${missingCritical.length} critical EBS env var(s) missing: ${missingCritical.map(r => r.variable).join(', ')}`; } else if (missingAny.length > 0) { status = 'warning'; message = `${missingAny.length} EBS env var(s) not detected (non-critical): ${missingAny.map(r => r.variable).join(', ')}`; } else { status = 'ok'; message = `All ${rows.length} EBS environment variables are present.`; } return { check_id: 'APPS_ENV', status, severity: missingCritical.length > 0 ? 'critical' : missingAny.length > 0 ? 'warning' : 'ok', message, os_env_available: Object.keys(envVars).some(k => process.env[k] !== undefined), display: { columns: ['Variable', 'State', 'Value'], rows: rows.map(r => [r.variable, r.state, r.value ? r.value.substring(0, 120) : '—']) }, variables: rows }; } // ============================================================ // Wave F: ADOP_STATUS Check // Run `adop -status -detail` on the OS (proxy runs on EBS server) // and parse Session ID, Node, Phase, Status. // SKIP if APPS_PWD not available in environment. // ============================================================ /** * ADOP_STATUS — run adop patching utility status check. * * Requirements: * - Proxy runs on EBS app tier * - APPS_PWD set in environment (used to pass credentials non-interactively) * - adop utility on PATH (or ADMIN_SCRIPTS_HOME in environment) * * Parsing targets: * Session ID, Node, Phase, Status * * Returns check_id, status, severity, message, display (columns + rows). */ async function queryAdopStatus() { // Locate adop binary const adminScriptsHome = process.env.ADMIN_SCRIPTS_HOME || ''; const adopBin = adminScriptsHome ? `${adminScriptsHome}/adop` : 'adop'; // If APPS_PWD not in environment, return an actionable warning instead of silently skipping. // This surfaces the missing config to the user in the dashboard rather than hiding it. const appsPwd = process.env.APPS_PWD || process.env.APPS_PASSWORD || ''; if (!appsPwd) { return { check_id: 'ADOP_STATUS', status: 'warning', severity: 'warning', message: 'Set APPS_PWD in proxy.env to enable ADOP patching session detection. See setup guide: https://tunevault.app/docs#proxy-setup', display: { columns: ['Field', 'Value'], rows: [['Action Required', 'Add APPS_PWD= to your proxy.env file and restart the proxy service to enable ADOP patch session monitoring.']] }, sessions: [] }; } return new Promise(resolve => { const TIMEOUT_MS = 30000; const env = Object.assign({}, process.env, { APPS_PWD: appsPwd }); const proc = execFile( adopBin, ['-status', '-detail'], { env, timeout: TIMEOUT_MS, maxBuffer: 512 * 1024 }, (err, stdout, stderr) => { if (err && err.killed) { resolve({ check_id: 'ADOP_STATUS', status: 'warning', severity: 'warning', message: `adop -status timed out after ${TIMEOUT_MS / 1000}s.`, display: { columns: ['Field', 'Value'], rows: [['Status', 'TIMEOUT']] }, sessions: [] }); return; } if (err && !stdout) { // adop not installed or not on PATH const msg = (err.message || '').substring(0, 200); resolve({ check_id: 'ADOP_STATUS', status: 'warning', severity: 'warning', message: `adop command failed: ${msg}`, display: { columns: ['Field', 'Value'], rows: [['Error', msg]] }, sessions: [] }); return; } // Parse the output const output = (stdout || '') + (stderr || ''); const sessions = parseAdopOutput(output); // Determine status const failedSessions = sessions.filter(s => (s.status || '').toUpperCase() === 'FAILED'); const activeSessions = sessions.filter(s => ['INPROGRESS', 'IN_PROGRESS', 'RUNNING', 'ACTIVE'].includes((s.status || '').toUpperCase())); let status, message; if (sessions.length === 0) { status = 'ok'; message = 'No ADOP sessions found — no active or recent patching activity.'; } else if (failedSessions.length > 0) { status = 'critical'; message = `${failedSessions.length} ADOP session(s) in FAILED state. Review and clean up before next patch cycle.`; } else if (activeSessions.length > 0) { status = 'warning'; message = `${activeSessions.length} ADOP patching session(s) currently active.`; } else { status = 'ok'; message = `ADOP: ${sessions.length} historical session(s) found, none in FAILED state.`; } resolve({ check_id: 'ADOP_STATUS', status, severity: failedSessions.length > 0 ? 'critical' : activeSessions.length > 0 ? 'warning' : 'ok', message, display: { columns: ['Session ID', 'Node', 'Phase', 'Status'], rows: sessions.map(s => [s.session_id || '—', s.node || '—', s.phase || '—', s.status || '—']) }, sessions, raw_output: output.substring(0, 2000) }); } ); // Safety net — execFile timeout param above should handle it, but belt-and-suspenders setTimeout(() => { try { proc.kill('SIGTERM'); } catch (e) { /* ignore */ } }, TIMEOUT_MS + 1000); }); } /** * Parse `adop -status -detail` output. * * Typical output block per session: * SESSION ID : 42 * NODE : ebsapp01 * PHASE : FINALIZE * STATUS : SUCCESS * * Returns array of { session_id, node, phase, status }. */ function parseAdopOutput(text) { const sessions = []; // Split by blank lines to separate session blocks const blocks = text.split(/\n\s*\n/); for (const block of blocks) { const lines = block.split('\n').map(l => l.trim()).filter(Boolean); const session = {}; for (const line of lines) { const m = line.match(/^(SESSION\s*ID|NODE|PHASE|STATUS)\s*[:\-]\s*(.+)/i); if (m) { const key = m[1].trim().toLowerCase().replace(/\s+/, '_'); const value = m[2].trim(); if (key === 'session_id') session.session_id = value; else if (key === 'node') session.node = value; else if (key === 'phase') session.phase = value; else if (key === 'status') session.status = value; } } // Only include blocks that look like adop session entries if (session.session_id || session.phase || session.status) { sessions.push(session); } } return sessions; } // ============================================================ // Wave G: Database Objects & Session Stats (OB03, OB04, OB06, OB07, OB09, OB12) // ============================================================ /** * queryDbObjects — collects invalid objects, stale stats, SCN headroom, SPFILE status. * All queries are read-only against SYS-visible views. */ async function queryDbObjects(conn) { try { const EXCLUDED_OWNERS = `'SYS','SYSTEM','DBSNMP','SYSMAN','OUTLN','MDSYS','ORDSYS','XDB','CTXSYS','WMSYS','EXFSYS','APPQOSSYS','DBSFWUSER','OJVMSYS','DVSYS','LBACSYS'`; const [invalidR, staleR, scnR, spfileR, recycleR, controlR] = await Promise.all([ // Invalid objects count conn.execute(` SELECT COUNT(*) AS invalid_count, COUNT(CASE WHEN OBJECT_TYPE IN ('PACKAGE BODY','PACKAGE') THEN 1 END) AS invalid_packages, COUNT(CASE WHEN OBJECT_TYPE = 'PROCEDURE' THEN 1 END) AS invalid_procedures, COUNT(CASE WHEN OBJECT_TYPE = 'VIEW' THEN 1 END) AS invalid_views, COUNT(CASE WHEN OBJECT_TYPE = 'TRIGGER' THEN 1 END) AS invalid_triggers FROM DBA_OBJECTS WHERE STATUS = 'INVALID' AND OWNER NOT IN (${EXCLUDED_OWNERS}) `).catch(() => ({ rows: [[0,0,0,0,0]] })), // Stale statistics count conn.execute(` SELECT COUNT(*) AS stale_count, COUNT(CASE WHEN LAST_ANALYZED IS NULL THEN 1 END) AS never_analyzed FROM DBA_TAB_STATISTICS WHERE STALE_STATS = 'YES' AND OWNER NOT IN (${EXCLUDED_OWNERS}) `).catch(() => ({ rows: [[0,0]] })), // SCN headroom (days remaining) conn.execute(` SELECT CURRENT_SCN, ROUND((TO_NUMBER(SYSDATE - TO_DATE('01-01-1988','DD-MM-YYYY')) * 24 * 3600 * 16384 * 1024 - CURRENT_SCN) / (24*3600*16384), 0) AS days_remaining FROM V$DATABASE `).catch(() => ({ rows: [[null, null]] })), // SPFILE in use conn.execute(` SELECT DECODE(COUNT(*),0,'PFILE','SPFILE') AS param_file_type FROM V$PARAMETER WHERE NAME = 'spfile' AND VALUE IS NOT NULL `).catch(() => ({ rows: [['UNKNOWN']] })), // Recycle bin size conn.execute(` SELECT COUNT(*) AS object_count, ROUND(SUM(SPACE)*8192/1073741824, 2) AS size_gb FROM DBA_RECYCLEBIN `).catch(() => ({ rows: [[0, 0]] })), // Control file count conn.execute(` SELECT COUNT(*) AS controlfile_count, SUM(CASE WHEN STATUS IS NOT NULL AND STATUS != '' THEN 1 ELSE 0 END) AS invalid_count FROM V$CONTROLFILE `).catch(() => ({ rows: [[0, 0]] })) ]); const invalidRow = (invalidR.rows || [[0,0,0,0,0]])[0] || [0,0,0,0,0]; const staleRow = (staleR.rows || [[0,0]])[0] || [0,0]; const scnRow = (scnR.rows || [[null,null]])[0] || [null,null]; const spfileRow = (spfileR.rows || [['UNKNOWN']])[0] || ['UNKNOWN']; const recycleRow = (recycleR.rows || [[0,0]])[0] || [0,0]; const ctrlRow = (controlR.rows || [[0,0]])[0] || [0,0]; return { invalid_objects: { count: parseInt(invalidRow[0]) || 0, packages: parseInt(invalidRow[1]) || 0, procedures: parseInt(invalidRow[2]) || 0, views: parseInt(invalidRow[3]) || 0, triggers: parseInt(invalidRow[4]) || 0 }, stale_stats: { stale_count: parseInt(staleRow[0]) || 0, never_analyzed: parseInt(staleRow[1]) || 0 }, scn_headroom: { current_scn: parseInt(scnRow[0]) || null, days_remaining: parseInt(scnRow[1]) || null }, spfile: { param_file_type: String(spfileRow[0] || 'UNKNOWN'), using_spfile: String(spfileRow[0] || '') === 'SPFILE' }, recyclebin: { object_count: parseInt(recycleRow[0]) || 0, size_gb: parseFloat(recycleRow[1]) || 0 }, controlfiles: { count: parseInt(ctrlRow[0]) || 0, invalid_count: parseInt(ctrlRow[1]) || 0 } }; } catch (err) { console.error('DB objects query failed:', err.message); return { invalid_objects: { count: 0, packages: 0, procedures: 0, views: 0, triggers: 0 }, stale_stats: { stale_count: 0, never_analyzed: 0 }, scn_headroom: { current_scn: null, days_remaining: null }, spfile: { param_file_type: 'UNKNOWN', using_spfile: false }, recyclebin: { object_count: 0, size_gb: 0 }, controlfiles: { count: 0, invalid_count: 0 } }; } } /** * querySessionStats — active/blocked session counts, long-running SQL. * Covers OB06_BLOCKING_LOCKS, OB07_LISTENER_SESSIONS, PF09_LONG_RUNNING_SQL. */ async function querySessionStats(conn) { try { const [sessionR, blockedR, longSqlR] = await Promise.all([ // Active user sessions conn.execute(` SELECT COUNT(*) AS total_sessions, COUNT(CASE WHEN STATUS = 'ACTIVE' AND TYPE = 'USER' THEN 1 END) AS active_sessions, COUNT(CASE WHEN TYPE = 'USER' THEN 1 END) AS user_sessions FROM V$SESSION `).catch(() => ({ rows: [[0,0,0]] })), // Blocked sessions conn.execute(` SELECT COUNT(*) AS blocked_count FROM V$SESSION WHERE BLOCKING_SESSION IS NOT NULL AND STATUS = 'ACTIVE' `).catch(() => ({ rows: [[0]] })), // Long-running SQL (>5 minutes) conn.execute(` SELECT COUNT(*) AS long_running_count, ROUND(MAX((SYSDATE - SQL_EXEC_START) * 1440), 1) AS max_runtime_min FROM V$SESSION WHERE STATUS = 'ACTIVE' AND TYPE = 'USER' AND SQL_EXEC_START IS NOT NULL AND (SYSDATE - SQL_EXEC_START) * 1440 > 5 `).catch(() => ({ rows: [[0, 0]] })) ]); const sessRow = (sessionR.rows || [[0,0,0]])[0] || [0,0,0]; const blockRow = (blockedR.rows || [[0]])[0] || [0]; const longRow = (longSqlR.rows || [[0,0]])[0] || [0,0]; return { total_sessions: parseInt(sessRow[0]) || 0, active_sessions: parseInt(sessRow[1]) || 0, user_sessions: parseInt(sessRow[2]) || 0, blocked_sessions: parseInt(blockRow[0]) || 0, long_running_sql_count: parseInt(longRow[0]) || 0, max_runtime_min: parseFloat(longRow[1]) || 0 }; } catch (err) { console.error('Session stats query failed:', err.message); return { total_sessions: 0, active_sessions: 0, user_sessions: 0, blocked_sessions: 0, long_running_sql_count: 0, max_runtime_min: 0 }; } } /** * querySecurityStats — covers SEC01-SEC07: default passwords, public privs, * audit settings, password policy, unlocked users, obj audit, roles. * All queries are experimental — Oracle security posture checks. */ async function querySecurityStats(conn) { try { const [defaultPwdR, publicPrivR, unlicensedUsersR, profileR, auditR, dbaUsersR] = await Promise.all([ // Default/well-known passwords check (count of accounts using DEFAULT profile OR password = username) conn.execute(` SELECT COUNT(*) AS default_pwd_count FROM DBA_USERS_WITH_DEFPWD WHERE ACCOUNT_STATUS = 'OPEN' `).catch(() => ({ rows: [[0]] })), // Dangerous PUBLIC grants (EXECUTE on UTL_FILE, UTL_HTTP, DBMS_JAVA, etc.) conn.execute(` SELECT COUNT(*) AS dangerous_public_grants FROM DBA_SYS_PRIVS WHERE GRANTEE = 'PUBLIC' AND PRIVILEGE IN ('CREATE PROCEDURE','CREATE ANY PROCEDURE','CREATE ANY TRIGGER','ALTER SYSTEM','ALTER DATABASE','DROP ANY TABLE','EXECUTE ANY PROCEDURE') `).catch(() => ({ rows: [[0]] })), // Unlicensed/schema-only accounts that are OPEN (should be locked) conn.execute(` SELECT COUNT(*) AS open_schema_accounts FROM DBA_USERS WHERE ACCOUNT_STATUS = 'OPEN' AND AUTHENTICATION_TYPE = 'NONE' AND USERNAME NOT IN ('SYS','SYSTEM') `).catch(() => ({ rows: [[0]] })), // Password policy (verify function in DEFAULT profile) conn.execute(` SELECT LIMIT AS password_verify_function FROM DBA_PROFILES WHERE PROFILE = 'DEFAULT' AND RESOURCE_NAME = 'PASSWORD_VERIFY_FUNCTION' `).catch(() => ({ rows: [['NULL']] })), // Audit enabled check conn.execute(` SELECT VALUE AS audit_trail FROM V$PARAMETER WHERE NAME = 'audit_trail' `).catch(() => ({ rows: [['NONE']] })), // DBA-privileged users (should be minimal) conn.execute(` SELECT COUNT(DISTINCT GRANTEE) AS dba_user_count FROM DBA_SYS_PRIVS WHERE PRIVILEGE = 'DBA' AND GRANTEE NOT IN ('SYS','SYSTEM','DBA','SYSMAN') `).catch(() => ({ rows: [[0]] })) ]); const defPwd = parseInt((defaultPwdR.rows || [[0]])[0]?.[0]) || 0; const pubPrivs = parseInt((publicPrivR.rows || [[0]])[0]?.[0]) || 0; const openSchema = parseInt((unlicensedUsersR.rows || [[0]])[0]?.[0]) || 0; const pwdVerifyFn = String((profileR.rows || [['NULL']])[0]?.[0] || 'NULL'); const auditTrail = String((auditR.rows || [['NONE']])[0]?.[0] || 'NONE'); const dbaUserCount = parseInt((dbaUsersR.rows || [[0]])[0]?.[0]) || 0; return { default_pwd_accounts: defPwd, dangerous_public_grants: pubPrivs, open_schema_accounts: openSchema, password_verify_function: pwdVerifyFn, password_policy_active: pwdVerifyFn !== 'NULL' && pwdVerifyFn !== 'null', audit_trail: auditTrail, audit_enabled: auditTrail !== 'NONE' && auditTrail !== 'none', dba_user_count: dbaUserCount }; } catch (err) { console.error('Security stats query failed:', err.message); return { default_pwd_accounts: 0, dangerous_public_grants: 0, open_schema_accounts: 0, password_verify_function: 'UNKNOWN', password_policy_active: false, audit_trail: 'UNKNOWN', audit_enabled: false, dba_user_count: 0 }; } } /** * querySchemaStats — table/segment growth, recyclebin, datafile status. * Covers ST04_SEGMENT_GROWTH, ST05_DATAFILE_STATUS, ST06_RECYCLEBIN_SIZE, ST07. */ async function querySchemaStats(conn) { try { const EXCLUDED_OWNERS = `'SYS','SYSTEM','DBSNMP','SYSMAN','OUTLN','MDSYS','ORDSYS','XDB','CTXSYS','WMSYS','EXFSYS'`; const [topSegR, datafileR, sortR, ftScanR] = await Promise.all([ // Top 10 segments by size conn.execute(` SELECT OWNER, SEGMENT_NAME, SEGMENT_TYPE, ROUND(SUM(BYTES)/1073741824, 2) AS size_gb FROM DBA_SEGMENTS WHERE OWNER NOT IN (${EXCLUDED_OWNERS}) GROUP BY OWNER, SEGMENT_NAME, SEGMENT_TYPE ORDER BY size_gb DESC FETCH FIRST 10 ROWS ONLY `).catch(() => ({ rows: [] })), // Offline/problem datafiles conn.execute(` SELECT COUNT(*) AS problem_count, COUNT(CASE WHEN STATUS = 'OFFLINE' THEN 1 END) AS offline_count FROM DBA_DATA_FILES WHERE STATUS NOT IN ('AVAILABLE','ONLINE') `).catch(() => ({ rows: [[0,0]] })), // Disk sort stats from V$SYSSTAT conn.execute(` SELECT d.VALUE AS disk_sorts, m.VALUE AS mem_sorts FROM (SELECT VALUE FROM V$SYSSTAT WHERE NAME = 'sorts (disk)') d, (SELECT VALUE FROM V$SYSSTAT WHERE NAME = 'sorts (memory)') m `).catch(() => ({ rows: [[0,0]] })), // Full table scan stats conn.execute(` SELECT s.VALUE AS long_scans, i.VALUE AS index_lookups FROM (SELECT VALUE FROM V$SYSSTAT WHERE NAME = 'table scans (long tables)') s, (SELECT VALUE FROM V$SYSSTAT WHERE NAME = 'table fetch by rowid') i `).catch(() => ({ rows: [[0,0]] })) ]); const topSegments = (topSegR.rows || []).map(r => ({ owner: r[0] || '', segment_name: r[1] || '', segment_type: r[2] || '', size_gb: parseFloat(r[3]) || 0 })); const dfRow = (datafileR.rows || [[0,0]])[0] || [0,0]; const sortRow = (sortR.rows || [[0,0]])[0] || [0,0]; const scanRow = (ftScanR.rows || [[0,0]])[0] || [0,0]; const diskSorts = parseInt(sortRow[0]) || 0; const memSorts = parseInt(sortRow[1]) || 0; const diskSortPct = (diskSorts + memSorts) > 0 ? Math.round(diskSorts / (diskSorts + memSorts) * 10000) / 100 : 0; const longScans = parseInt(scanRow[0]) || 0; const indexLookups = parseInt(scanRow[1]) || 0; const ftScanPct = (longScans + indexLookups) > 0 ? Math.round(longScans / (longScans + indexLookups) * 10000) / 100 : 0; return { top_segments: topSegments, problem_datafiles: parseInt(dfRow[0]) || 0, offline_datafiles: parseInt(dfRow[1]) || 0, disk_sort_pct: diskSortPct, disk_sorts: diskSorts, mem_sorts: memSorts, full_table_scan_pct: ftScanPct, long_scans: longScans }; } catch (err) { console.error('Schema stats query failed:', err.message); return { top_segments: [], problem_datafiles: 0, offline_datafiles: 0, disk_sort_pct: 0, disk_sorts: 0, mem_sorts: 0, full_table_scan_pct: 0, long_scans: 0 }; } } // ============================================================ // Error Formatting // ============================================================ function formatOracleError(err) { const msg = err.message || String(err); if (msg.includes('ORA-12154')) return 'TNS name could not be resolved. Check service name.'; if (msg.includes('ORA-12541')) return 'No listener at the specified host and port. Check host and port.'; if (msg.includes('ORA-12514')) return 'Service name not found. Check the service name or SID.'; if (msg.includes('ORA-12170')) return 'Connection timed out. Host may be unreachable.'; if (msg.includes('ORA-01017')) return 'Invalid username or password.'; if (msg.includes('ORA-28000')) return 'Account is locked.'; if (msg.includes('ORA-28001')) return 'Password has expired.'; if (msg.includes('ORA-01031')) return 'Insufficient privileges. User needs SELECT_CATALOG_ROLE or explicit grants.'; if (msg.includes('ORA-00942')) return 'Table or view does not exist. User may need additional grants.'; if (msg.includes('ENOTFOUND')) return 'Hostname not found. Check the hostname or IP address.'; if (msg.includes('ECONNREFUSED')) return 'Connection refused. Check host, port, and that the Oracle listener is running.'; if (msg.includes('ETIMEDOUT')) return 'Connection timed out. Host may be unreachable.'; if (msg.includes('NJS-500')) return 'oracledb thin client error. Ensure Oracle DB is version 12.1+.'; return msg; }