From 331d8b819412b6cb016080e99765c94d27cbf167 Mon Sep 17 00:00:00 2001 From: Matt Date: Thu, 12 Jun 2025 21:54:47 +0200 Subject: [PATCH] REVERT Network Updates --- Dockerfile | 28 +--- docker-entrypoint.sh | 54 ------ docs/502-error-fixes-deployment.md | 212 ----------------------- ecosystem.config.js | 50 ------ nginx/client.portnimara.dev.conf | 197 ---------------------- nginx/error-pages/error-502.html | 137 --------------- nginx/upstream.conf | 11 -- server/api/health.ts | 259 ----------------------------- server/plugins/memory-monitor.ts | 67 -------- server/plugins/startup-health.ts | 30 ---- server/utils/cleanup-manager.ts | 160 ------------------ server/utils/documeso.ts | 238 +++++++++++++------------- server/utils/health-checker.ts | 144 ---------------- server/utils/nocodb.ts | 89 ++++------ server/utils/request-queue.ts | 190 --------------------- server/utils/resilient-http.ts | 229 ------------------------- server/utils/service-checks.ts | 162 ------------------ 17 files changed, 161 insertions(+), 2096 deletions(-) delete mode 100644 docker-entrypoint.sh delete mode 100644 docs/502-error-fixes-deployment.md delete mode 100644 ecosystem.config.js delete mode 100644 nginx/client.portnimara.dev.conf delete mode 100644 nginx/error-pages/error-502.html delete mode 100644 nginx/upstream.conf delete mode 100644 server/api/health.ts delete mode 100644 server/plugins/memory-monitor.ts delete mode 100644 server/plugins/startup-health.ts delete mode 100644 server/utils/cleanup-manager.ts delete mode 100644 server/utils/health-checker.ts delete mode 100644 server/utils/request-queue.ts delete mode 100644 server/utils/resilient-http.ts delete mode 100644 server/utils/service-checks.ts diff --git a/Dockerfile b/Dockerfile index b890e9f..567c074 100644 --- a/Dockerfile +++ b/Dockerfile @@ -16,31 +16,5 @@ RUN npm prune FROM base as production ENV PORT=$PORT - -# Install PM2 and required tools -RUN apt-get update && apt-get install -y \ - curl \ - tini \ - && rm -rf /var/lib/apt/lists/* \ - && npm install -g pm2 - -# Copy built application COPY --from=build /app/.output /app/.output - -# Copy PM2 config and entrypoint -COPY ecosystem.config.js /app/ -COPY docker-entrypoint.sh /app/ -RUN chmod +x /app/docker-entrypoint.sh - -# Create logs directory -RUN mkdir -p /app/logs - -# Health check -HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \ - CMD curl -f http://localhost:${PORT}/api/health || exit 1 - -# Use tini as init process -ENTRYPOINT ["/usr/bin/tini", "--"] - -# Start with our custom entrypoint -CMD ["/app/docker-entrypoint.sh"] +CMD ["node", ".output/server/index.mjs"] \ No newline at end of file diff --git a/docker-entrypoint.sh b/docker-entrypoint.sh deleted file mode 100644 index 120d92a..0000000 --- a/docker-entrypoint.sh +++ /dev/null @@ -1,54 +0,0 @@ -#!/bin/sh -set -e - -echo "[Entrypoint] Starting Port Nimara Client Portal..." - -# Create logs directory if it doesn't exist -mkdir -p /app/logs - -# Install PM2 globally if not already installed -if ! command -v pm2 &> /dev/null; then - echo "[Entrypoint] Installing PM2..." - npm install -g pm2 -fi - -# Wait for dependent services to be ready -echo "[Entrypoint] Waiting for dependent services..." - -# Function to check service availability -check_service() { - local service_name=$1 - local service_url=$2 - local max_attempts=30 - local attempt=0 - - while [ $attempt -lt $max_attempts ]; do - if curl -s -o /dev/null -w "%{http_code}" "$service_url" | grep -E "^(2|3|4)" > /dev/null; then - echo "[Entrypoint] $service_name is ready" - return 0 - fi - - attempt=$((attempt + 1)) - echo "[Entrypoint] Waiting for $service_name... (attempt $attempt/$max_attempts)" - sleep 2 - done - - echo "[Entrypoint] Warning: $service_name might not be ready after $max_attempts attempts" - return 1 -} - -# Check NocoDB if URL is set -if [ ! -z "$NUXT_NOCODB_URL" ]; then - check_service "NocoDB" "$NUXT_NOCODB_URL/api/v1/health" || true -fi - -# Check Directus if URL is set -if [ ! -z "$NUXT_PUBLIC_DIRECTUS_URL" ]; then - check_service "Directus" "$NUXT_PUBLIC_DIRECTUS_URL/server/health" || true -fi - -# Start the application with PM2 -echo "[Entrypoint] Starting application with PM2..." - -# Start PM2 in no-daemon mode -exec pm2-runtime start ecosystem.config.js diff --git a/docs/502-error-fixes-deployment.md b/docs/502-error-fixes-deployment.md deleted file mode 100644 index ae04504..0000000 --- a/docs/502-error-fixes-deployment.md +++ /dev/null @@ -1,212 +0,0 @@ -# 502 Error Fixes - Deployment Guide - -This guide covers the comprehensive fixes implemented to resolve the 502 errors in the Port Nimara Client Portal. - -## Overview of Changes - -### 1. Health Check System -- **New Files:** - - `server/api/health.ts` - Health check endpoint - - `server/utils/health-checker.ts` - Application readiness management - - `server/utils/service-checks.ts` - Service availability checks - - `server/plugins/startup-health.ts` - Startup health monitoring - -### 2. Process Management (PM2) -- **New Files:** - - `ecosystem.config.js` - PM2 configuration with cluster mode - - `docker-entrypoint.sh` - Custom entrypoint with service checks -- **Modified Files:** - - `Dockerfile` - Added PM2, health checks, and tini init process - -### 3. Nginx Configuration -- **New Files:** - - `nginx/upstream.conf` - Upstream configuration with health checks - - `nginx/client.portnimara.dev.conf` - Enhanced Nginx configuration - - `nginx/error-pages/error-502.html` - Custom 502 error page with auto-retry - -### 4. External Service Resilience -- **New Files:** - - `server/utils/resilient-http.ts` - HTTP client with retry logic and circuit breaker - - `server/utils/documeso.ts` - Enhanced Documenso client -- **Modified Files:** - - `server/utils/nocodb.ts` - Updated to use resilient HTTP client - -### 5. Memory & Performance Optimization -- **New Files:** - - `server/plugins/memory-monitor.ts` - Memory usage monitoring - - `server/utils/cleanup-manager.ts` - Resource cleanup utilities - - `server/utils/request-queue.ts` - Request queue system - -## Deployment Steps - -### Step 1: Build Docker Image - -```bash -# Build the new Docker image with PM2 support -docker build -t port-nimara-client-portal:resilient . -``` - -### Step 2: Update Nginx Configuration - -1. Copy the new Nginx configuration files to your server: -```bash -# Copy upstream configuration -sudo cp nginx/upstream.conf /etc/nginx/conf.d/ - -# Copy site configuration -sudo cp nginx/client.portnimara.dev.conf /etc/nginx/sites-available/client.portnimara.dev - -# Create error pages directory -sudo mkdir -p /etc/nginx/error-pages -sudo cp nginx/error-pages/error-502.html /etc/nginx/error-pages/ - -# Test Nginx configuration -sudo nginx -t - -# Reload Nginx -sudo nginx -s reload -``` - -### Step 3: Update Environment Variables - -Ensure all required environment variables are set: - -```bash -# Required for health checks -NUXT_NOCODB_URL=your-nocodb-url -NUXT_NOCODB_TOKEN=your-nocodb-token -NUXT_MINIO_ACCESS_KEY=your-minio-key -NUXT_MINIO_SECRET_KEY=your-minio-secret -NUXT_DOCUMENSO_API_KEY=your-documenso-key -NUXT_DOCUMENSO_BASE_URL=https://signatures.portnimara.dev - -# Optional - for enabling garbage collection monitoring -NODE_OPTIONS="--max-old-space-size=8192 --expose-gc" -``` - -### Step 4: Deploy with Docker Compose - -Create or update your `docker-compose.yml`: - -```yaml -version: '3.8' - -services: - client-portal: - image: port-nimara-client-portal:resilient - ports: - - "3028:3000" - environment: - - NODE_ENV=production - - NUXT_NOCODB_URL=${NUXT_NOCODB_URL} - - NUXT_NOCODB_TOKEN=${NUXT_NOCODB_TOKEN} - - NUXT_MINIO_ACCESS_KEY=${NUXT_MINIO_ACCESS_KEY} - - NUXT_MINIO_SECRET_KEY=${NUXT_MINIO_SECRET_KEY} - - NUXT_DOCUMENSO_API_KEY=${NUXT_DOCUMENSO_API_KEY} - - NUXT_DOCUMENSO_BASE_URL=${NUXT_DOCUMENSO_BASE_URL} - volumes: - - ./logs:/app/logs - healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:3000/api/health"] - interval: 30s - timeout: 10s - retries: 3 - start_period: 60s - restart: unless-stopped - networks: - - port-nimara-network - -networks: - port-nimara-network: - external: true -``` - -Deploy: -```bash -docker-compose up -d -``` - -### Step 5: Verify Deployment - -1. Check health endpoint: -```bash -curl https://client.portnimara.dev/api/health -``` - -2. Monitor PM2 processes: -```bash -docker exec -it pm2 list -docker exec -it pm2 monit -``` - -3. Check logs: -```bash -# PM2 logs -docker exec -it pm2 logs - -# Container logs -docker logs -f -``` - -## Monitoring & Maintenance - -### Health Check Monitoring - -The health endpoint provides comprehensive information: -- Service availability (NocoDB, Directus, MinIO, Documenso) -- Memory usage -- Response times -- Circuit breaker status - -### PM2 Commands - -```bash -# Inside the container -pm2 list # List all processes -pm2 monit # Real-time monitoring -pm2 reload all # Zero-downtime reload -pm2 logs # View logs -pm2 flush # Clear logs -``` - -### Troubleshooting - -1. **If 502 errors persist:** - - Check `/api/health` to identify failing services - - Review PM2 logs for crash information - - Verify all environment variables are set - - Check Nginx error logs: `sudo tail -f /var/log/nginx/error.log` - -2. **High memory usage:** - - Monitor memory via health endpoint - - PM2 will auto-restart at 7GB limit - - Check for memory leaks in logs - -3. **Slow responses:** - - Check circuit breaker status in health endpoint - - Review queue status for backlogs - - Monitor external service response times - -## Benefits of These Changes - -1. **Automatic Recovery:** PM2 cluster mode ensures the app restarts on crashes -2. **Graceful Degradation:** Circuit breakers prevent cascading failures -3. **Better Error Handling:** Retry logic handles temporary failures -4. **Improved Visibility:** Health checks and monitoring provide insights -5. **User Experience:** Custom 502 page with auto-retry improves UX -6. **Resource Management:** Memory monitoring and cleanup prevent leaks - -## Rollback Plan - -If issues arise: - -1. Keep the previous Docker image tagged -2. Revert Nginx configuration: - ```bash - sudo cp /etc/nginx/sites-available/client.portnimara.dev.backup /etc/nginx/sites-available/client.portnimara.dev - sudo nginx -s reload - ``` -3. Deploy previous Docker image: - ```bash - docker-compose down - docker-compose up -d diff --git a/ecosystem.config.js b/ecosystem.config.js deleted file mode 100644 index 1f4658c..0000000 --- a/ecosystem.config.js +++ /dev/null @@ -1,50 +0,0 @@ -module.exports = { - apps: [{ - name: 'port-nimara-client-portal', - script: '.output/server/index.mjs', - instances: 2, - exec_mode: 'cluster', - - // Memory management - max_memory_restart: '7G', - - // Restart policy - max_restarts: 10, - min_uptime: '10s', - - // Graceful shutdown - kill_timeout: 10000, - listen_timeout: 10000, - - // Environment variables - env: { - NODE_ENV: 'production', - NODE_OPTIONS: '--max-old-space-size=8192', - PORT: process.env.PORT || 3000 - }, - - // Logging - error_file: './logs/error.log', - out_file: './logs/out.log', - log_file: './logs/combined.log', - time: true, - - // Advanced options - autorestart: true, - watch: false, - ignore_watch: ['node_modules', 'logs', '.output'], - - // Health check - health_check: { - interval: 30, - url: 'http://localhost:3000/api/health', - max_consecutive_failures: 3 - }, - - // Graceful reload - wait_ready: true, - - // Crash handling - exp_backoff_restart_delay: 100 - }] -}; diff --git a/nginx/client.portnimara.dev.conf b/nginx/client.portnimara.dev.conf deleted file mode 100644 index 282450a..0000000 --- a/nginx/client.portnimara.dev.conf +++ /dev/null @@ -1,197 +0,0 @@ -# Include upstream configuration -include /etc/nginx/conf.d/upstream.conf; - -server { - if ($host = client.portnimara.dev) { - return 301 https://$host$request_uri; - } # managed by Certbot - - client_max_body_size 64M; - - listen 80; - server_name client.portnimara.dev; - - location / { - return 301 https://$host$request_uri; - } - - location ^~ /.well-known/acme-challenge/ { - alias /var/www/html/.well-known/acme-challenge/; - default_type "text/plain"; - allow all; - } -} - -server { - client_max_body_size 64M; - - # Timeout configurations to prevent 502 errors - proxy_connect_timeout 300s; - proxy_send_timeout 300s; - proxy_read_timeout 300s; - send_timeout 300s; - - # Client timeout settings - client_body_timeout 300s; - client_header_timeout 300s; - - # Buffer settings to handle larger responses - proxy_buffer_size 128k; - proxy_buffers 4 256k; - proxy_busy_buffers_size 256k; - proxy_temp_file_write_size 256k; - - # Keepalive settings - keepalive_timeout 65; - keepalive_requests 100; - - listen 443 ssl http2; - server_name client.portnimara.dev; - - ssl_certificate /etc/letsencrypt/live/client.portnimara.dev/fullchain.pem; # managed by Certbot - ssl_certificate_key /etc/letsencrypt/live/client.portnimara.dev/privkey.pem; # managed by Certbot - - # Error pages - error_page 502 503 504 /error-502.html; - location = /error-502.html { - root /etc/nginx/error-pages; - internal; - } - - # Health check endpoint (bypass upstream for monitoring) - location = /api/health { - proxy_pass http://port_nimara_backend; - proxy_http_version 1.1; - proxy_set_header Connection ""; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; - - # Short timeout for health checks - proxy_connect_timeout 5s; - proxy_send_timeout 5s; - proxy_read_timeout 5s; - - # Don't retry health checks - proxy_next_upstream off; - } - - location / { - proxy_pass http://port_nimara_backend; - proxy_http_version 1.1; - - # Headers - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Real-IP $http_cf_connecting_ip; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection "upgrade"; - - # Disable buffering for real-time responses - proxy_request_buffering off; - proxy_buffering off; - - # Apply timeout settings - proxy_connect_timeout 300s; - proxy_send_timeout 300s; - proxy_read_timeout 300s; - - # Retry logic for better resilience - proxy_next_upstream error timeout invalid_header http_500 http_502 http_503; - proxy_next_upstream_tries 3; - proxy_next_upstream_timeout 10s; - - # Add custom header to track retries - add_header X-Upstream-Status $upstream_status always; - } - - location /api/ { - proxy_pass http://port_nimara_backend; - proxy_http_version 1.1; - - # Headers - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection 'upgrade'; - proxy_set_header Host $host; - proxy_cache_bypass $http_upgrade; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; - - # Extended timeouts for API routes (webhooks, IMAP operations) - proxy_connect_timeout 600s; - proxy_send_timeout 600s; - proxy_read_timeout 600s; - - # Disable buffering for API responses - proxy_request_buffering off; - proxy_buffering off; - - # Retry logic - proxy_next_upstream error timeout http_502 http_503; - proxy_next_upstream_tries 2; - proxy_next_upstream_timeout 30s; - } - - # Special handling for long-running email operations - location ~ ^/api/email/(send|fetch-thread|test-connection) { - proxy_pass http://port_nimara_backend; - proxy_http_version 1.1; - - # Headers - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection 'upgrade'; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; - - # Extra long timeouts for email operations - proxy_connect_timeout 900s; - proxy_send_timeout 900s; - proxy_read_timeout 900s; - - # Disable buffering - proxy_request_buffering off; - proxy_buffering off; - - # No retry for email operations (to avoid duplicates) - proxy_next_upstream off; - } - - # Special handling for Documenso operations - location ~ ^/api/(email/generate-eoi-document|eoi/) { - proxy_pass http://port_nimara_backend; - proxy_http_version 1.1; - - # Headers - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; - - # Extended timeouts for document operations - proxy_connect_timeout 300s; - proxy_send_timeout 300s; - proxy_read_timeout 300s; - - # Enable buffering for large responses - proxy_buffering on; - proxy_buffer_size 128k; - proxy_buffers 8 256k; - - # Retry logic for Documenso - proxy_next_upstream error timeout http_502 http_503; - proxy_next_upstream_tries 3; - proxy_next_upstream_timeout 20s; - } - - location ^~ /.well-known/acme-challenge/ { - alias /var/www/html/.well-known/acme-challenge/; - default_type "text/plain"; - allow all; - } -} diff --git a/nginx/error-pages/error-502.html b/nginx/error-pages/error-502.html deleted file mode 100644 index 7c3ba9e..0000000 --- a/nginx/error-pages/error-502.html +++ /dev/null @@ -1,137 +0,0 @@ - - - - - - Port Nimara - Service Temporarily Unavailable - - - -
- -

Service Temporarily Unavailable

-

We're sorry, but the Port Nimara Client Portal is temporarily unavailable. This may be due to scheduled maintenance or a temporary issue.

- -
- - The system is restarting. Please wait... -
- -

The page will automatically refresh when the service is available.

- - -
- - - - diff --git a/nginx/upstream.conf b/nginx/upstream.conf deleted file mode 100644 index a4e8df4..0000000 --- a/nginx/upstream.conf +++ /dev/null @@ -1,11 +0,0 @@ -# Upstream configuration for Port Nimara Client Portal -upstream port_nimara_backend { - # Define multiple backend servers (PM2 cluster instances) - server 127.0.0.1:3028 max_fails=2 fail_timeout=30s; - - # Keepalive connections to backend - keepalive 32; - - # Load balancing method - least_conn; -} diff --git a/server/api/health.ts b/server/api/health.ts deleted file mode 100644 index cb6e708..0000000 --- a/server/api/health.ts +++ /dev/null @@ -1,259 +0,0 @@ -import { getNocoDbConfiguration } from '../utils/nocodb'; -import { getMinioClient } from '../utils/minio'; -import { useDirectus } from '#imports'; - -interface HealthCheckResult { - status: 'healthy' | 'unhealthy' | 'degraded'; - timestamp: string; - uptime: number; - services: { - [key: string]: { - status: 'up' | 'down' | 'slow'; - responseTime?: number; - error?: string; - }; - }; - memory: { - used: number; - total: number; - percentage: number; - }; - environment: { - nodeVersion: string; - configStatus: 'complete' | 'partial' | 'missing'; - missingVars?: string[]; - }; -} - -export default defineEventHandler(async (event): Promise => { - const startTime = Date.now(); - const result: HealthCheckResult = { - status: 'healthy', - timestamp: new Date().toISOString(), - uptime: process.uptime(), - services: {}, - memory: { - used: 0, - total: 0, - percentage: 0 - }, - environment: { - nodeVersion: process.version, - configStatus: 'complete' - } - }; - - // Memory check - const memUsage = process.memoryUsage(); - result.memory = { - used: Math.round(memUsage.heapUsed / 1024 / 1024), // MB - total: Math.round(memUsage.heapTotal / 1024 / 1024), // MB - percentage: Math.round((memUsage.heapUsed / memUsage.heapTotal) * 100) - }; - - // Check if memory usage is too high - if (result.memory.percentage > 90) { - result.status = 'unhealthy'; - } else if (result.memory.percentage > 80) { - result.status = 'degraded'; - } - - // Environment variables check - const requiredEnvVars = [ - 'NUXT_NOCODB_URL', - 'NUXT_NOCODB_TOKEN', - 'NUXT_MINIO_ACCESS_KEY', - 'NUXT_MINIO_SECRET_KEY', - 'NUXT_DOCUMENSO_API_KEY', - 'NUXT_DOCUMENSO_BASE_URL' - ]; - - const missingVars = requiredEnvVars.filter(varName => !process.env[varName]); - if (missingVars.length > 0) { - result.environment.configStatus = missingVars.length > 3 ? 'missing' : 'partial'; - result.environment.missingVars = missingVars; - result.status = 'unhealthy'; - } - - // Service health checks - - // 1. NocoDB Check - try { - const nocoStart = Date.now(); - const nocoConfig = getNocoDbConfiguration(); - - if (!nocoConfig.url || !nocoConfig.token) { - result.services.nocodb = { - status: 'down', - error: 'Missing configuration' - }; - result.status = 'unhealthy'; - } else { - const response = await fetch(`${nocoConfig.url}/api/v1/health`, { - headers: { - 'xc-token': nocoConfig.token - }, - signal: AbortSignal.timeout(5000) // 5 second timeout - }); - - const responseTime = Date.now() - nocoStart; - - if (response.ok) { - result.services.nocodb = { - status: responseTime > 3000 ? 'slow' : 'up', - responseTime - }; - if (responseTime > 3000) result.status = 'degraded'; - } else { - result.services.nocodb = { - status: 'down', - responseTime, - error: `HTTP ${response.status}` - }; - result.status = 'unhealthy'; - } - } - } catch (error) { - result.services.nocodb = { - status: 'down', - error: error instanceof Error ? error.message : 'Unknown error' - }; - result.status = 'unhealthy'; - } - - // 2. Directus Check - try { - const directusStart = Date.now(); - const { $directus } = useNuxtApp(); - - // Check if Directus URL is configured - const directusUrl = useRuntimeConfig().public.directus?.url; - if (!directusUrl) { - result.services.directus = { - status: 'down', - error: 'Missing configuration' - }; - result.status = 'unhealthy'; - } else { - const response = await fetch(`${directusUrl}/server/health`, { - signal: AbortSignal.timeout(5000) - }); - - const responseTime = Date.now() - directusStart; - - if (response.ok) { - result.services.directus = { - status: responseTime > 3000 ? 'slow' : 'up', - responseTime - }; - if (responseTime > 3000) result.status = 'degraded'; - } else { - result.services.directus = { - status: 'down', - responseTime, - error: `HTTP ${response.status}` - }; - result.status = 'unhealthy'; - } - } - } catch (error) { - result.services.directus = { - status: 'down', - error: error instanceof Error ? error.message : 'Unknown error' - }; - result.status = 'unhealthy'; - } - - // 3. MinIO Check - try { - const minioStart = Date.now(); - const minioClient = getMinioClient(); - const bucketName = useRuntimeConfig().minio.bucketName; - - // Try to check if bucket exists - const bucketExists = await minioClient.bucketExists(bucketName); - const responseTime = Date.now() - minioStart; - - if (bucketExists) { - result.services.minio = { - status: responseTime > 3000 ? 'slow' : 'up', - responseTime - }; - if (responseTime > 3000) result.status = 'degraded'; - } else { - result.services.minio = { - status: 'down', - responseTime, - error: 'Bucket not found' - }; - result.status = 'unhealthy'; - } - } catch (error) { - result.services.minio = { - status: 'down', - error: error instanceof Error ? error.message : 'Unknown error' - }; - result.status = 'unhealthy'; - } - - // 4. Documenso Check - try { - const documensoStart = Date.now(); - const documensoUrl = process.env.NUXT_DOCUMENSO_BASE_URL; - const documensoKey = process.env.NUXT_DOCUMENSO_API_KEY; - - if (!documensoUrl || !documensoKey) { - result.services.documenso = { - status: 'down', - error: 'Missing configuration' - }; - result.status = 'unhealthy'; - } else { - const response = await fetch(`${documensoUrl}/api/health`, { - headers: { - 'Authorization': `Bearer ${documensoKey}` - }, - signal: AbortSignal.timeout(5000) - }); - - const responseTime = Date.now() - documensoStart; - - if (response.ok || response.status === 401) { // 401 means API is up but key might be wrong - result.services.documenso = { - status: responseTime > 3000 ? 'slow' : 'up', - responseTime - }; - if (responseTime > 3000) result.status = 'degraded'; - } else { - result.services.documenso = { - status: 'down', - responseTime, - error: `HTTP ${response.status}` - }; - result.status = 'unhealthy'; - } - } - } catch (error) { - result.services.documenso = { - status: 'down', - error: error instanceof Error ? error.message : 'Unknown error' - }; - // Documenso being down shouldn't make the whole app unhealthy - if (result.status === 'healthy') result.status = 'degraded'; - } - - // Overall response time - const totalTime = Date.now() - startTime; - if (totalTime > 10000 && result.status === 'healthy') { - result.status = 'degraded'; - } - - // Set appropriate HTTP status code - if (result.status === 'unhealthy') { - setResponseStatus(event, 503); - } else if (result.status === 'degraded') { - setResponseStatus(event, 200); // Still return 200 for degraded to not trigger Nginx failures - } - - return result; -}); diff --git a/server/plugins/memory-monitor.ts b/server/plugins/memory-monitor.ts deleted file mode 100644 index f31ecfe..0000000 --- a/server/plugins/memory-monitor.ts +++ /dev/null @@ -1,67 +0,0 @@ -export default defineNitroPlugin((nitroApp) => { - let lastHeapSnapshot: number = 0; - const heapSnapshotThreshold = 1024 * 1024 * 1024 * 6; // 6GB - - // Monitor memory usage every 30 seconds - setInterval(() => { - const memUsage = process.memoryUsage(); - const heapUsedMB = Math.round(memUsage.heapUsed / 1024 / 1024); - const heapTotalMB = Math.round(memUsage.heapTotal / 1024 / 1024); - const rssMB = Math.round(memUsage.rss / 1024 / 1024); - const externalMB = Math.round(memUsage.external / 1024 / 1024); - - console.log(`[Memory Monitor] Heap: ${heapUsedMB}MB / ${heapTotalMB}MB | RSS: ${rssMB}MB | External: ${externalMB}MB`); - - // Warning if memory usage is high - if (memUsage.heapUsed > heapSnapshotThreshold) { - console.warn(`[Memory Monitor] High memory usage detected: ${heapUsedMB}MB`); - - // Take heap snapshot if we haven't taken one in the last hour - const now = Date.now(); - if (now - lastHeapSnapshot > 3600000) { // 1 hour - lastHeapSnapshot = now; - console.warn('[Memory Monitor] Memory usage critical - consider taking heap snapshot'); - - // Force garbage collection if available - if (global.gc) { - console.log('[Memory Monitor] Running garbage collection...'); - global.gc(); - - // Log memory after GC - setTimeout(() => { - const afterGC = process.memoryUsage(); - const heapFreed = memUsage.heapUsed - afterGC.heapUsed; - console.log(`[Memory Monitor] GC freed ${Math.round(heapFreed / 1024 / 1024)}MB`); - }, 1000); - } - } - } - }, 30000); // Every 30 seconds - - // Monitor event loop lag - let lastCheck = Date.now(); - setInterval(() => { - const now = Date.now(); - const lag = now - lastCheck - 1000; - - if (lag > 100) { - console.warn(`[Memory Monitor] Event loop lag detected: ${lag}ms`); - } - - lastCheck = now; - }, 1000); - - // Add memory info to health endpoint - nitroApp.hooks.hook('request', async (event) => { - if (event.node.req.url === '/api/health') { - const memUsage = process.memoryUsage(); - event.context.memoryInfo = { - heapUsed: Math.round(memUsage.heapUsed / 1024 / 1024), - heapTotal: Math.round(memUsage.heapTotal / 1024 / 1024), - rss: Math.round(memUsage.rss / 1024 / 1024), - external: Math.round(memUsage.external / 1024 / 1024), - arrayBuffers: Math.round(memUsage.arrayBuffers / 1024 / 1024) - }; - } - }); -}); diff --git a/server/plugins/startup-health.ts b/server/plugins/startup-health.ts deleted file mode 100644 index 3443bb4..0000000 --- a/server/plugins/startup-health.ts +++ /dev/null @@ -1,30 +0,0 @@ -import { healthChecker } from '../utils/health-checker'; - -export default defineNitroPlugin(async (nitroApp) => { - console.log('[Startup Health] Initializing application health checks...'); - - // Perform initial health checks on startup - const startTime = Date.now(); - - try { - const isReady = await healthChecker.performStartupHealthChecks(); - - if (isReady) { - console.log(`[Startup Health] Application ready in ${Date.now() - startTime}ms`); - } else { - console.warn('[Startup Health] Application started but some services are unavailable'); - console.warn('[Startup Health] Status:', healthChecker.getStartupStatus()); - } - } catch (error) { - console.error('[Startup Health] Failed to perform startup checks:', error); - } - - // Monitor service health periodically - setInterval(async () => { - try { - await healthChecker.performStartupHealthChecks(); - } catch (error) { - console.error('[Startup Health] Periodic health check failed:', error); - } - }, 60000); // Check every minute -}); diff --git a/server/utils/cleanup-manager.ts b/server/utils/cleanup-manager.ts deleted file mode 100644 index ed2a9b7..0000000 --- a/server/utils/cleanup-manager.ts +++ /dev/null @@ -1,160 +0,0 @@ -interface CleanupTask { - name: string; - cleanup: () => void | Promise; - interval?: number; -} - -class CleanupManager { - private static instance: CleanupManager; - private tasks: Map = new Map(); - private intervals: Map = new Map(); - - private constructor() { - // Register default cleanup tasks - this.registerDefaultTasks(); - } - - static getInstance(): CleanupManager { - if (!CleanupManager.instance) { - CleanupManager.instance = new CleanupManager(); - } - return CleanupManager.instance; - } - - private registerDefaultTasks() { - // Clean up old temporary files - this.registerTask({ - name: 'temp-files-cleanup', - cleanup: async () => { - console.log('[Cleanup] Running temporary files cleanup...'); - // Implementation would go here - }, - interval: 3600000 // Every hour - }); - - // Clean up expired cache entries - this.registerTask({ - name: 'cache-cleanup', - cleanup: async () => { - console.log('[Cleanup] Running cache cleanup...'); - // Implementation would go here - }, - interval: 1800000 // Every 30 minutes - }); - } - - registerTask(task: CleanupTask) { - this.tasks.set(task.name, task); - - if (task.interval) { - // Clear existing interval if any - const existingInterval = this.intervals.get(task.name); - if (existingInterval) { - clearInterval(existingInterval); - } - - // Set up new interval - const interval = setInterval(async () => { - try { - await task.cleanup(); - } catch (error) { - console.error(`[Cleanup] Task '${task.name}' failed:`, error); - } - }, task.interval); - - this.intervals.set(task.name, interval); - } - } - - async runTask(taskName: string) { - const task = this.tasks.get(taskName); - if (!task) { - throw new Error(`Cleanup task '${taskName}' not found`); - } - - try { - await task.cleanup(); - console.log(`[Cleanup] Task '${taskName}' completed successfully`); - } catch (error) { - console.error(`[Cleanup] Task '${taskName}' failed:`, error); - throw error; - } - } - - async runAllTasks() { - console.log('[Cleanup] Running all cleanup tasks...'); - const results: { task: string; success: boolean; error?: any }[] = []; - - for (const [name, task] of this.tasks) { - try { - await task.cleanup(); - results.push({ task: name, success: true }); - } catch (error) { - results.push({ task: name, success: false, error }); - } - } - - return results; - } - - stopAllIntervals() { - for (const [name, interval] of this.intervals) { - clearInterval(interval); - console.log(`[Cleanup] Stopped interval for task '${name}'`); - } - this.intervals.clear(); - } -} - -export const cleanupManager = CleanupManager.getInstance(); - -// Utility functions for common cleanup operations -export const cleanupUtils = { - // Clean up old files in a directory - async cleanupOldFiles(directory: string, maxAgeMs: number) { - // Implementation would check file ages and remove old ones - console.log(`[Cleanup] Would clean files older than ${maxAgeMs}ms in ${directory}`); - }, - - // Clean up unused stream handles - cleanupStreams(streams: any[]) { - streams.forEach(stream => { - if (stream && typeof stream.destroy === 'function') { - stream.destroy(); - } - }); - }, - - // Clean up IMAP connections - cleanupIMAPConnections(connections: any[]) { - connections.forEach(conn => { - try { - if (conn && typeof conn.end === 'function') { - conn.end(); - } - } catch (error) { - console.error('[Cleanup] Failed to close IMAP connection:', error); - } - }); - }, - - // Memory-efficient array processing - async processInBatches( - items: T[], - batchSize: number, - processor: (batch: T[]) => Promise - ): Promise { - const results: R[] = []; - - for (let i = 0; i < items.length; i += batchSize) { - const batch = items.slice(i, i + batchSize); - const batchResults = await processor(batch); - results.push(...batchResults); - - // Allow garbage collection between batches - await new Promise(resolve => setImmediate(resolve)); - } - - return results; - } -}; diff --git a/server/utils/documeso.ts b/server/utils/documeso.ts index 6940859..9af1af6 100644 --- a/server/utils/documeso.ts +++ b/server/utils/documeso.ts @@ -1,139 +1,153 @@ -import { resilientHttp } from './resilient-http'; - -interface DocumensoConfig { +// Documeso API client utilities +interface DocumesoConfig { + apiUrl: string; apiKey: string; - baseUrl: string; } -export const getDocumensoConfiguration = (): DocumensoConfig => { - const apiKey = process.env.NUXT_DOCUMENSO_API_KEY; - const baseUrl = process.env.NUXT_DOCUMENSO_BASE_URL; - - if (!apiKey || !baseUrl) { - throw new Error('Documenso configuration missing'); - } - - return { apiKey, baseUrl }; +interface DocumesoRecipient { + id: number; + documentId: number; + email: string; + name: string; + role: 'SIGNER' | 'APPROVER' | 'VIEWER'; + signingOrder: number; + token: string; + signedAt: string | null; + readStatus: 'NOT_OPENED' | 'OPENED'; + signingStatus: 'NOT_SIGNED' | 'SIGNED'; + sendStatus: 'NOT_SENT' | 'SENT'; + signingUrl: string; +} + +interface DocumesoDocument { + id: number; + externalId: string; + userId: number; + teamId: number; + title: string; + status: 'DRAFT' | 'PENDING' | 'COMPLETED' | 'CANCELLED'; + documentDataId: string; + createdAt: string; + updatedAt: string; + completedAt: string | null; + recipients: DocumesoRecipient[]; +} + +interface DocumesoListResponse { + documents: DocumesoDocument[]; + total: number; + page: number; + perPage: number; +} + +// Get Documeso configuration +const getDocumesoConfig = (): DocumesoConfig => { + return { + apiUrl: 'https://signatures.portnimara.dev/api/v1', + apiKey: 'Bearer api_malptg62zqyb0wrp' + }; }; -// Helper function for resilient Documenso requests -async function documensoFetch(endpoint: string, options: RequestInit = {}): Promise { - const config = getDocumensoConfiguration(); - const url = `${config.baseUrl}${endpoint}`; +// Fetch a single document by ID +export const getDocumesoDocument = async (documentId: number): Promise => { + const config = getDocumesoConfig(); - const response = await resilientHttp.fetchWithRetry( - url, - { - ...options, + try { + const response = await $fetch(`${config.apiUrl}/documents/${documentId}`, { headers: { - 'Authorization': `Bearer ${config.apiKey}`, - 'Content-Type': 'application/json', - ...options.headers + 'Authorization': config.apiKey, + 'Content-Type': 'application/json' + } + }); + + return response; + } catch (error) { + console.error('Failed to fetch Documeso document:', error); + throw error; + } +}; + +// Search documents by external ID (e.g., 'loi-94') +export const searchDocumesoDocuments = async (externalId?: string): Promise => { + const config = getDocumesoConfig(); + + try { + const response = await $fetch(`${config.apiUrl}/documents`, { + headers: { + 'Authorization': config.apiKey, + 'Content-Type': 'application/json' }, - serviceName: 'documenso' - }, - { - maxRetries: options.method === 'POST' || options.method === 'PUT' ? 2 : 3, - timeout: 20000, // 20 seconds for document operations - retryableStatuses: [408, 429, 500, 502, 503, 504] + params: { + perPage: 100 + } + }); + + // If externalId is provided, filter by it + if (externalId) { + return response.documents.filter(doc => doc.externalId === externalId); } - ); - - if (!response.ok) { - const errorText = await response.text(); - throw new Error(`Documenso request failed: ${response.status} - ${errorText}`); - } - - return response.json(); -} - -export const checkDocumentStatus = async (documentId: string): Promise => { - console.log('[Documenso] Checking document status:', documentId); - - try { - const result = await documensoFetch(`/api/v1/documents/${documentId}`, { - method: 'GET' - }); - console.log('[Documenso] Document status retrieved:', result.status); - return result; + return response.documents; } catch (error) { - console.error('[Documenso] Failed to check document status:', error); + console.error('Failed to search Documeso documents:', error); throw error; } }; -export const createDocument = async (templateId: string, data: any): Promise => { - console.log('[Documenso] Creating document from template:', templateId); - - try { - const result = await documensoFetch('/api/v1/documents', { - method: 'POST', - body: JSON.stringify({ - templateId, - ...data - }) - }); - - console.log('[Documenso] Document created with ID:', result.id); - return result; - } catch (error) { - console.error('[Documenso] Failed to create document:', error); - throw error; - } +// Get document by external ID (e.g., 'loi-94') +export const getDocumesoDocumentByExternalId = async (externalId: string): Promise => { + const documents = await searchDocumesoDocuments(externalId); + return documents.length > 0 ? documents[0] : null; }; -export const sendDocument = async (documentId: string, signers: any[]): Promise => { - console.log('[Documenso] Sending document:', documentId); +// Check signature status for a document +export const checkDocumentSignatureStatus = async (documentId: number): Promise<{ + documentStatus: string; + unsignedRecipients: DocumesoRecipient[]; + signedRecipients: DocumesoRecipient[]; + clientSigned: boolean; + allSigned: boolean; +}> => { + const document = await getDocumesoDocument(documentId); - try { - const result = await documensoFetch(`/api/v1/documents/${documentId}/send`, { - method: 'POST', - body: JSON.stringify({ signers }) - }); - - console.log('[Documenso] Document sent successfully'); - return result; - } catch (error) { - console.error('[Documenso] Failed to send document:', error); - throw error; - } + const unsignedRecipients = document.recipients.filter(r => r.signingStatus === 'NOT_SIGNED'); + const signedRecipients = document.recipients.filter(r => r.signingStatus === 'SIGNED'); + + // Check if client (signingOrder = 1) has signed + const clientRecipient = document.recipients.find(r => r.signingOrder === 1); + const clientSigned = clientRecipient ? clientRecipient.signingStatus === 'SIGNED' : false; + + const allSigned = unsignedRecipients.length === 0; + + return { + documentStatus: document.status, + unsignedRecipients, + signedRecipients, + clientSigned, + allSigned + }; }; -export const deleteDocument = async (documentId: string): Promise => { - console.log('[Documenso] Deleting document:', documentId); +// Get recipients who need to sign (excluding client) +export const getRecipientsToRemind = async (documentId: number): Promise => { + const status = await checkDocumentSignatureStatus(documentId); - try { - const result = await documensoFetch(`/api/v1/documents/${documentId}`, { - method: 'DELETE' - }); - - console.log('[Documenso] Document deleted successfully'); - return result; - } catch (error) { - console.error('[Documenso] Failed to delete document:', error); - throw error; + // Only remind if client has signed + if (!status.clientSigned) { + return []; } + + // Return unsigned recipients with signingOrder > 1 + return status.unsignedRecipients.filter(r => r.signingOrder > 1); }; -export const verifyTemplate = async (templateId: string): Promise => { - console.log('[Documenso] Verifying template:', templateId); - - try { - await documensoFetch(`/api/v1/templates/${templateId}`, { - method: 'GET' - }); - - console.log('[Documenso] Template verified successfully'); - return true; - } catch (error) { - console.error('[Documenso] Template verification failed:', error); - return false; - } +// Format recipient name for emails +export const formatRecipientName = (recipient: DocumesoRecipient): string => { + const firstName = recipient.name.split(' ')[0]; + return firstName; }; -// Get circuit breaker status for monitoring -export const getDocumensoHealthStatus = () => { - const status = resilientHttp.getCircuitBreakerStatus(); - return status.documenso || { state: 'UNKNOWN', failures: 0 }; +// Get signing URL for a recipient +export const getSigningUrl = (recipient: DocumesoRecipient): string => { + return recipient.signingUrl; }; diff --git a/server/utils/health-checker.ts b/server/utils/health-checker.ts deleted file mode 100644 index fc9797b..0000000 --- a/server/utils/health-checker.ts +++ /dev/null @@ -1,144 +0,0 @@ -import type { H3Event } from 'h3'; - -interface ServiceCheckResult { - status: 'up' | 'down' | 'slow'; - responseTime?: number; - error?: string; -} - -class HealthChecker { - private static instance: HealthChecker; - private isReady: boolean = false; - private startupChecks: Map = new Map(); - private requiredServices = ['nocodb', 'directus', 'minio']; - - private constructor() { - this.initializeStartupChecks(); - } - - static getInstance(): HealthChecker { - if (!HealthChecker.instance) { - HealthChecker.instance = new HealthChecker(); - } - return HealthChecker.instance; - } - - private initializeStartupChecks() { - this.requiredServices.forEach(service => { - this.startupChecks.set(service, false); - }); - } - - async checkServiceWithRetry( - serviceName: string, - checkFunction: () => Promise, - maxRetries: number = 3, - retryDelay: number = 1000 - ): Promise { - let lastError: string | undefined; - - for (let attempt = 0; attempt < maxRetries; attempt++) { - try { - const result = await checkFunction(); - - if (result.status === 'up' || result.status === 'slow') { - this.startupChecks.set(serviceName, true); - return result; - } - - lastError = result.error; - } catch (error) { - lastError = error instanceof Error ? error.message : 'Unknown error'; - } - - if (attempt < maxRetries - 1) { - await new Promise(resolve => setTimeout(resolve, retryDelay * Math.pow(2, attempt))); - } - } - - return { - status: 'down', - error: lastError || 'Max retries exceeded' - }; - } - - markServiceReady(serviceName: string) { - this.startupChecks.set(serviceName, true); - this.checkOverallReadiness(); - } - - markServiceDown(serviceName: string) { - this.startupChecks.set(serviceName, false); - this.isReady = false; - } - - private checkOverallReadiness() { - const allRequired = this.requiredServices.every(service => - this.startupChecks.get(service) === true - ); - - if (allRequired) { - this.isReady = true; - console.log('[HealthChecker] All required services are ready'); - } - } - - isApplicationReady(): boolean { - return this.isReady; - } - - getStartupStatus(): { [key: string]: boolean } { - const status: { [key: string]: boolean } = {}; - this.startupChecks.forEach((value, key) => { - status[key] = value; - }); - return status; - } - - async performStartupHealthChecks(): Promise { - console.log('[HealthChecker] Performing startup health checks...'); - - // Import check functions dynamically to avoid circular dependencies - const { checkNocoDB, checkDirectus, checkMinIO } = await import('./service-checks'); - - // Check all services in parallel - const checks = await Promise.all([ - this.checkServiceWithRetry('nocodb', checkNocoDB, 5, 2000), - this.checkServiceWithRetry('directus', checkDirectus, 5, 2000), - this.checkServiceWithRetry('minio', checkMinIO, 5, 2000) - ]); - - // Log results - checks.forEach((result, index) => { - const serviceName = ['nocodb', 'directus', 'minio'][index]; - console.log(`[HealthChecker] ${serviceName}: ${result.status}${result.error ? ` - ${result.error}` : ''}`); - }); - - this.checkOverallReadiness(); - return this.isReady; - } -} - -export const healthChecker = HealthChecker.getInstance(); - -// Middleware to check if app is ready -export function createReadinessMiddleware() { - return defineEventHandler(async (event: H3Event) => { - // Skip health check endpoint itself - if (event.node.req.url === '/api/health') { - return; - } - - // During startup, return 503 for all requests except health - if (!healthChecker.isApplicationReady()) { - const startupStatus = healthChecker.getStartupStatus(); - - setResponseStatus(event, 503); - return { - error: 'Service Unavailable', - message: 'Application is starting up. Please wait...', - startupStatus - }; - } - }); -} diff --git a/server/utils/nocodb.ts b/server/utils/nocodb.ts index 9cb9d59..f58ddd3 100644 --- a/server/utils/nocodb.ts +++ b/server/utils/nocodb.ts @@ -1,5 +1,3 @@ -import { resilientHttp } from './resilient-http'; - export interface PageInfo { pageSize: number; totalRows: number; @@ -29,43 +27,24 @@ export const createTableUrl = (table: Table) => { return url; }; -// Helper function for resilient NocoDB requests -async function nocoDbFetch(url: string, options: RequestInit = {}): Promise { - const response = await resilientHttp.fetchWithRetry( - url, - { - ...options, - headers: { - 'xc-token': getNocoDbConfiguration().token, - 'Content-Type': 'application/json', - ...options.headers - }, - serviceName: 'nocodb' +export const getInterests = async () => + $fetch(createTableUrl(Table.Interest), { + headers: { + "xc-token": getNocoDbConfiguration().token, }, - { - maxRetries: options.method === 'POST' || options.method === 'PATCH' || options.method === 'DELETE' ? 2 : 3, - timeout: 15000 - } - ); - - if (!response.ok) { - const errorText = await response.text(); - throw new Error(`NocoDB request failed: ${response.status} - ${errorText}`); - } - - return response.json(); -} - -export const getInterests = async () => { - const url = createTableUrl(Table.Interest); - return nocoDbFetch(url + '?limit=1000'); -}; + params: { + limit: 1000, + }, + }); export const getInterestById = async (id: string) => { console.log('[nocodb.getInterestById] Fetching interest ID:', id); - const url = `${createTableUrl(Table.Interest)}/${id}`; - const result = await nocoDbFetch(url); + const result = await $fetch(`${createTableUrl(Table.Interest)}/${id}`, { + headers: { + "xc-token": getNocoDbConfiguration().token, + }, + }); console.log('[nocodb.getInterestById] Raw result from NocoDB:', { id: result.Id, @@ -187,9 +166,13 @@ export const updateInterest = async (id: string, data: Partial, retryC console.log('[nocodb.updateInterest] Request body:', JSON.stringify(cleanData, null, 2)); // Try sending as a single object first (as shown in the API docs) - const result = await nocoDbFetch(url, { + const result = await $fetch(url, { method: "PATCH", - body: JSON.stringify(cleanData) + headers: { + "xc-token": getNocoDbConfiguration().token, + "Content-Type": "application/json" + }, + body: cleanData }); console.log('[nocodb.updateInterest] Update successful for ID:', id); return result; @@ -222,7 +205,7 @@ export const updateInterest = async (id: string, data: Partial, retryC } }; -export const createInterest = async (data: Partial) => { +export const createInterest = async (data: Partial) => { console.log('[nocodb.createInterest] Creating interest with fields:', Object.keys(data)); // Create a clean data object that matches the InterestsRequest schema @@ -270,9 +253,12 @@ export const createInterest = async (data: Partial) => { console.log('[nocodb.createInterest] URL:', url); try { - const result = await nocoDbFetch(url, { + const result = await $fetch(url, { method: "POST", - body: JSON.stringify(cleanData) + headers: { + "xc-token": getNocoDbConfiguration().token, + }, + body: cleanData, }); console.log('[nocodb.createInterest] Created interest with ID:', result.Id); return result; @@ -307,9 +293,13 @@ export const deleteInterest = async (id: string) => { try { // According to NocoDB API docs, DELETE requires ID in the body - const result = await nocoDbFetch(url, { + const result = await $fetch(url, { method: "DELETE", - body: JSON.stringify(requestBody) + headers: { + "xc-token": getNocoDbConfiguration().token, + "Content-Type": "application/json" + }, + body: requestBody }); console.log('[nocodb.deleteInterest] DELETE successful'); @@ -333,22 +323,11 @@ export const deleteInterest = async (id: string) => { } }; -export const triggerWebhook = async (url: string, payload: any) => { - // Webhooks might not need the same resilience as data operations - const response = await fetch(url, { +export const triggerWebhook = async (url: string, payload: any) => + $fetch(url, { method: "POST", - headers: { - 'Content-Type': 'application/json' - }, - body: JSON.stringify(payload) + body: payload, }); - - if (!response.ok) { - throw new Error(`Webhook failed: ${response.status}`); - } - - return response.json(); -}; export const updateInterestEOIDocument = async (id: string, documentData: any) => { console.log('[nocodb.updateInterestEOIDocument] Updating EOI document for interest:', id); diff --git a/server/utils/request-queue.ts b/server/utils/request-queue.ts deleted file mode 100644 index 133db54..0000000 --- a/server/utils/request-queue.ts +++ /dev/null @@ -1,190 +0,0 @@ -interface QueuedRequest { - id: string; - priority: 'high' | 'normal' | 'low'; - execute: () => Promise; - resolve: (value: T) => void; - reject: (error: any) => void; - timestamp: number; - retries: number; -} - -interface QueueOptions { - maxConcurrent: number; - maxQueueSize: number; - requestTimeout: number; - maxRetries: number; -} - -class RequestQueue { - private static instances: Map = new Map(); - private queue: QueuedRequest[] = []; - private activeRequests: number = 0; - private requestId: number = 0; - - constructor(private name: string, private options: QueueOptions) { - this.options = { - maxConcurrent: 5, - maxQueueSize: 100, - requestTimeout: 30000, - maxRetries: 2, - ...options - }; - } - - static getInstance(name: string, options?: Partial): RequestQueue { - if (!RequestQueue.instances.has(name)) { - RequestQueue.instances.set(name, new RequestQueue(name, options as QueueOptions)); - } - return RequestQueue.instances.get(name)!; - } - - async add( - execute: () => Promise, - priority: 'high' | 'normal' | 'low' = 'normal' - ): Promise { - if (this.queue.length >= this.options.maxQueueSize) { - throw new Error(`Queue ${this.name} is full (${this.queue.length} items)`); - } - - return new Promise((resolve, reject) => { - const request: QueuedRequest = { - id: `${this.name}-${++this.requestId}`, - priority, - execute, - resolve, - reject, - timestamp: Date.now(), - retries: 0 - }; - - // Insert based on priority - if (priority === 'high') { - // Find the first non-high priority item - const index = this.queue.findIndex(item => item.priority !== 'high'); - if (index === -1) { - this.queue.push(request); - } else { - this.queue.splice(index, 0, request); - } - } else if (priority === 'low') { - this.queue.push(request); - } else { - // Normal priority - insert before low priority items - const index = this.queue.findIndex(item => item.priority === 'low'); - if (index === -1) { - this.queue.push(request); - } else { - this.queue.splice(index, 0, request); - } - } - - console.log(`[Queue ${this.name}] Added request ${request.id} (priority: ${priority}, queue size: ${this.queue.length})`); - - // Process queue - this.processQueue(); - }); - } - - private async processQueue() { - while (this.queue.length > 0 && this.activeRequests < this.options.maxConcurrent) { - const request = this.queue.shift(); - if (!request) continue; - - this.activeRequests++; - - // Process request with timeout - const timeoutPromise = new Promise((_, reject) => { - setTimeout(() => reject(new Error('Request timeout')), this.options.requestTimeout); - }); - - try { - console.log(`[Queue ${this.name}] Processing request ${request.id} (active: ${this.activeRequests}/${this.options.maxConcurrent})`); - - const result = await Promise.race([ - request.execute(), - timeoutPromise - ]); - - request.resolve(result); - console.log(`[Queue ${this.name}] Request ${request.id} completed successfully`); - } catch (error) { - console.error(`[Queue ${this.name}] Request ${request.id} failed:`, error); - - // Retry logic - if (request.retries < this.options.maxRetries) { - request.retries++; - console.log(`[Queue ${this.name}] Retrying request ${request.id} (attempt ${request.retries + 1}/${this.options.maxRetries + 1})`); - - // Re-queue with same priority - this.queue.unshift(request); - } else { - request.reject(error); - } - } finally { - this.activeRequests--; - - // Continue processing - if (this.queue.length > 0) { - setImmediate(() => this.processQueue()); - } - } - } - } - - getStatus() { - return { - name: this.name, - queueLength: this.queue.length, - activeRequests: this.activeRequests, - maxConcurrent: this.options.maxConcurrent, - queueByPriority: { - high: this.queue.filter(r => r.priority === 'high').length, - normal: this.queue.filter(r => r.priority === 'normal').length, - low: this.queue.filter(r => r.priority === 'low').length - } - }; - } - - clear() { - const cleared = this.queue.length; - this.queue.forEach(request => { - request.reject(new Error('Queue cleared')); - }); - this.queue = []; - console.log(`[Queue ${this.name}] Cleared ${cleared} requests`); - return cleared; - } -} - -// Pre-configured queues for different operations -export const queues = { - documenso: RequestQueue.getInstance('documenso', { - maxConcurrent: 3, - maxQueueSize: 50, - requestTimeout: 45000, - maxRetries: 2 - }), - - email: RequestQueue.getInstance('email', { - maxConcurrent: 2, - maxQueueSize: 30, - requestTimeout: 60000, - maxRetries: 1 - }), - - nocodb: RequestQueue.getInstance('nocodb', { - maxConcurrent: 5, - maxQueueSize: 100, - requestTimeout: 20000, - maxRetries: 3 - }) -}; - -// Utility function to get all queue statuses -export function getAllQueueStatuses() { - return { - documenso: queues.documenso.getStatus(), - email: queues.email.getStatus(), - nocodb: queues.nocodb.getStatus() - }; -} diff --git a/server/utils/resilient-http.ts b/server/utils/resilient-http.ts deleted file mode 100644 index 98cdf08..0000000 --- a/server/utils/resilient-http.ts +++ /dev/null @@ -1,229 +0,0 @@ -interface RetryOptions { - maxRetries?: number; - initialDelay?: number; - maxDelay?: number; - backoffMultiplier?: number; - timeout?: number; - retryableStatuses?: number[]; - retryableErrors?: string[]; -} - -interface CircuitBreakerOptions { - failureThreshold?: number; - resetTimeout?: number; - halfOpenRequests?: number; -} - -enum CircuitState { - CLOSED = 'CLOSED', - OPEN = 'OPEN', - HALF_OPEN = 'HALF_OPEN' -} - -class CircuitBreaker { - private state: CircuitState = CircuitState.CLOSED; - private failures: number = 0; - private lastFailureTime: number = 0; - private halfOpenRequests: number = 0; - - constructor(private options: CircuitBreakerOptions = {}) { - this.options = { - failureThreshold: 10, - resetTimeout: 30000, // 30 seconds - halfOpenRequests: 3, - ...options - }; - } - - async execute(fn: () => Promise): Promise { - // Check circuit state - if (this.state === CircuitState.OPEN) { - const timeSinceLastFailure = Date.now() - this.lastFailureTime; - if (timeSinceLastFailure > this.options.resetTimeout!) { - this.state = CircuitState.HALF_OPEN; - this.halfOpenRequests = 0; - } else { - throw new Error('Circuit breaker is OPEN'); - } - } - - if (this.state === CircuitState.HALF_OPEN) { - if (this.halfOpenRequests >= this.options.halfOpenRequests!) { - throw new Error('Circuit breaker is HALF_OPEN - max requests reached'); - } - this.halfOpenRequests++; - } - - try { - const result = await fn(); - - // Success - reset failures - if (this.state === CircuitState.HALF_OPEN) { - this.state = CircuitState.CLOSED; - this.failures = 0; - } - - return result; - } catch (error) { - this.failures++; - this.lastFailureTime = Date.now(); - - if (this.failures >= this.options.failureThreshold!) { - this.state = CircuitState.OPEN; - console.error(`[CircuitBreaker] Opening circuit after ${this.failures} failures`); - } - - throw error; - } - } - - getState(): { state: CircuitState; failures: number } { - return { - state: this.state, - failures: this.failures - }; - } - - reset(): void { - this.state = CircuitState.CLOSED; - this.failures = 0; - this.halfOpenRequests = 0; - } -} - -export class ResilientHttpClient { - private circuitBreakers: Map = new Map(); - - constructor( - private defaultRetryOptions: RetryOptions = {}, - private defaultCircuitBreakerOptions: CircuitBreakerOptions = {} - ) { - this.defaultRetryOptions = { - maxRetries: 3, - initialDelay: 1000, - maxDelay: 16000, - backoffMultiplier: 2, - timeout: 30000, - retryableStatuses: [408, 429, 500, 502, 503, 504], - retryableErrors: ['ECONNREFUSED', 'ETIMEDOUT', 'ENOTFOUND', 'ECONNRESET'], - ...defaultRetryOptions - }; - } - - private getCircuitBreaker(serviceName: string): CircuitBreaker { - if (!this.circuitBreakers.has(serviceName)) { - this.circuitBreakers.set( - serviceName, - new CircuitBreaker(this.defaultCircuitBreakerOptions) - ); - } - return this.circuitBreakers.get(serviceName)!; - } - - async fetchWithRetry( - url: string, - options: RequestInit & { serviceName?: string } = {}, - retryOptions: RetryOptions = {} - ): Promise { - const mergedRetryOptions = { ...this.defaultRetryOptions, ...retryOptions }; - const serviceName = options.serviceName || new URL(url).hostname; - const circuitBreaker = this.getCircuitBreaker(serviceName); - - return circuitBreaker.execute(async () => { - return this.executeWithRetry(url, options, mergedRetryOptions); - }); - } - - private async executeWithRetry( - url: string, - options: RequestInit, - retryOptions: RetryOptions - ): Promise { - let lastError: Error | null = null; - let delay = retryOptions.initialDelay!; - - for (let attempt = 0; attempt <= retryOptions.maxRetries!; attempt++) { - try { - // Add timeout to request - const controller = new AbortController(); - const timeoutId = setTimeout( - () => controller.abort(), - retryOptions.timeout! - ); - - const response = await fetch(url, { - ...options, - signal: controller.signal - }); - - clearTimeout(timeoutId); - - // Check if response is retryable - if ( - !response.ok && - retryOptions.retryableStatuses!.includes(response.status) && - attempt < retryOptions.maxRetries! - ) { - console.warn( - `[ResilientHttp] Retryable status ${response.status} for ${url}, attempt ${attempt + 1}/${retryOptions.maxRetries}` - ); - await this.delay(delay); - delay = Math.min(delay * retryOptions.backoffMultiplier!, retryOptions.maxDelay!); - continue; - } - - return response; - } catch (error: any) { - lastError = error; - - // Check if error is retryable - const isRetryable = retryOptions.retryableErrors!.some( - errType => error.message?.includes(errType) || error.code === errType - ); - - if (isRetryable && attempt < retryOptions.maxRetries!) { - console.warn( - `[ResilientHttp] Retryable error for ${url}: ${error.message}, attempt ${attempt + 1}/${retryOptions.maxRetries}` - ); - await this.delay(delay); - delay = Math.min(delay * retryOptions.backoffMultiplier!, retryOptions.maxDelay!); - continue; - } - - throw error; - } - } - - throw lastError || new Error(`Failed after ${retryOptions.maxRetries} retries`); - } - - private delay(ms: number): Promise { - return new Promise(resolve => setTimeout(resolve, ms)); - } - - getCircuitBreakerStatus(): { [serviceName: string]: { state: string; failures: number } } { - const status: { [serviceName: string]: { state: string; failures: number } } = {}; - - this.circuitBreakers.forEach((breaker, serviceName) => { - status[serviceName] = breaker.getState(); - }); - - return status; - } -} - -// Create a singleton instance -export const resilientHttp = new ResilientHttpClient( - { - maxRetries: 3, - initialDelay: 1000, - maxDelay: 8000, - backoffMultiplier: 2, - timeout: 30000 - }, - { - failureThreshold: 10, - resetTimeout: 30000, - halfOpenRequests: 3 - } -); diff --git a/server/utils/service-checks.ts b/server/utils/service-checks.ts deleted file mode 100644 index efbf724..0000000 --- a/server/utils/service-checks.ts +++ /dev/null @@ -1,162 +0,0 @@ -import { getNocoDbConfiguration } from './nocodb'; -import { getMinioClient } from './minio'; - -interface ServiceCheckResult { - status: 'up' | 'down' | 'slow'; - responseTime?: number; - error?: string; -} - -export async function checkNocoDB(): Promise { - const startTime = Date.now(); - - try { - const config = getNocoDbConfiguration(); - - if (!config.url || !config.token) { - return { - status: 'down', - error: 'Missing NocoDB configuration' - }; - } - - const response = await fetch(`${config.url}/api/v1/health`, { - headers: { - 'xc-token': config.token - }, - signal: AbortSignal.timeout(5000) - }); - - const responseTime = Date.now() - startTime; - - if (response.ok) { - return { - status: responseTime > 3000 ? 'slow' : 'up', - responseTime - }; - } - - return { - status: 'down', - responseTime, - error: `HTTP ${response.status}` - }; - } catch (error) { - return { - status: 'down', - error: error instanceof Error ? error.message : 'Unknown error' - }; - } -} - -export async function checkDirectus(): Promise { - const startTime = Date.now(); - - try { - const directusUrl = useRuntimeConfig().public.directus?.url; - - if (!directusUrl) { - return { - status: 'down', - error: 'Missing Directus configuration' - }; - } - - const response = await fetch(`${directusUrl}/server/health`, { - signal: AbortSignal.timeout(5000) - }); - - const responseTime = Date.now() - startTime; - - if (response.ok) { - return { - status: responseTime > 3000 ? 'slow' : 'up', - responseTime - }; - } - - return { - status: 'down', - responseTime, - error: `HTTP ${response.status}` - }; - } catch (error) { - return { - status: 'down', - error: error instanceof Error ? error.message : 'Unknown error' - }; - } -} - -export async function checkMinIO(): Promise { - const startTime = Date.now(); - - try { - const minioClient = getMinioClient(); - const bucketName = useRuntimeConfig().minio.bucketName; - - const bucketExists = await minioClient.bucketExists(bucketName); - const responseTime = Date.now() - startTime; - - if (bucketExists) { - return { - status: responseTime > 3000 ? 'slow' : 'up', - responseTime - }; - } - - return { - status: 'down', - responseTime, - error: 'Bucket not found' - }; - } catch (error) { - return { - status: 'down', - error: error instanceof Error ? error.message : 'Unknown error' - }; - } -} - -export async function checkDocumenso(): Promise { - const startTime = Date.now(); - - try { - const documensoUrl = process.env.NUXT_DOCUMENSO_BASE_URL; - const documensoKey = process.env.NUXT_DOCUMENSO_API_KEY; - - if (!documensoUrl || !documensoKey) { - return { - status: 'down', - error: 'Missing Documenso configuration' - }; - } - - const response = await fetch(`${documensoUrl}/api/health`, { - headers: { - 'Authorization': `Bearer ${documensoKey}` - }, - signal: AbortSignal.timeout(5000) - }); - - const responseTime = Date.now() - startTime; - - if (response.ok || response.status === 401) { - return { - status: responseTime > 3000 ? 'slow' : 'up', - responseTime - }; - } - - return { - status: 'down', - responseTime, - error: `HTTP ${response.status}` - }; - } catch (error) { - return { - status: 'down', - error: error instanceof Error ? error.message : 'Unknown error' - }; - } -}