REVERT Network Updates

This commit is contained in:
Matt 2025-06-12 21:54:47 +02:00
parent f6508aa435
commit 331d8b8194
17 changed files with 161 additions and 2096 deletions

View File

@ -16,31 +16,5 @@ RUN npm prune
FROM base as production
ENV PORT=$PORT
# Install PM2 and required tools
RUN apt-get update && apt-get install -y \
curl \
tini \
&& rm -rf /var/lib/apt/lists/* \
&& npm install -g pm2
# Copy built application
COPY --from=build /app/.output /app/.output
# Copy PM2 config and entrypoint
COPY ecosystem.config.js /app/
COPY docker-entrypoint.sh /app/
RUN chmod +x /app/docker-entrypoint.sh
# Create logs directory
RUN mkdir -p /app/logs
# Health check
HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \
CMD curl -f http://localhost:${PORT}/api/health || exit 1
# Use tini as init process
ENTRYPOINT ["/usr/bin/tini", "--"]
# Start with our custom entrypoint
CMD ["/app/docker-entrypoint.sh"]
CMD ["node", ".output/server/index.mjs"]

View File

@ -1,54 +0,0 @@
#!/bin/sh
set -e
echo "[Entrypoint] Starting Port Nimara Client Portal..."
# Create logs directory if it doesn't exist
mkdir -p /app/logs
# Install PM2 globally if not already installed
if ! command -v pm2 &> /dev/null; then
echo "[Entrypoint] Installing PM2..."
npm install -g pm2
fi
# Wait for dependent services to be ready
echo "[Entrypoint] Waiting for dependent services..."
# Function to check service availability
check_service() {
local service_name=$1
local service_url=$2
local max_attempts=30
local attempt=0
while [ $attempt -lt $max_attempts ]; do
if curl -s -o /dev/null -w "%{http_code}" "$service_url" | grep -E "^(2|3|4)" > /dev/null; then
echo "[Entrypoint] $service_name is ready"
return 0
fi
attempt=$((attempt + 1))
echo "[Entrypoint] Waiting for $service_name... (attempt $attempt/$max_attempts)"
sleep 2
done
echo "[Entrypoint] Warning: $service_name might not be ready after $max_attempts attempts"
return 1
}
# Check NocoDB if URL is set
if [ ! -z "$NUXT_NOCODB_URL" ]; then
check_service "NocoDB" "$NUXT_NOCODB_URL/api/v1/health" || true
fi
# Check Directus if URL is set
if [ ! -z "$NUXT_PUBLIC_DIRECTUS_URL" ]; then
check_service "Directus" "$NUXT_PUBLIC_DIRECTUS_URL/server/health" || true
fi
# Start the application with PM2
echo "[Entrypoint] Starting application with PM2..."
# Start PM2 in no-daemon mode
exec pm2-runtime start ecosystem.config.js

View File

@ -1,212 +0,0 @@
# 502 Error Fixes - Deployment Guide
This guide covers the comprehensive fixes implemented to resolve the 502 errors in the Port Nimara Client Portal.
## Overview of Changes
### 1. Health Check System
- **New Files:**
- `server/api/health.ts` - Health check endpoint
- `server/utils/health-checker.ts` - Application readiness management
- `server/utils/service-checks.ts` - Service availability checks
- `server/plugins/startup-health.ts` - Startup health monitoring
### 2. Process Management (PM2)
- **New Files:**
- `ecosystem.config.js` - PM2 configuration with cluster mode
- `docker-entrypoint.sh` - Custom entrypoint with service checks
- **Modified Files:**
- `Dockerfile` - Added PM2, health checks, and tini init process
### 3. Nginx Configuration
- **New Files:**
- `nginx/upstream.conf` - Upstream configuration with health checks
- `nginx/client.portnimara.dev.conf` - Enhanced Nginx configuration
- `nginx/error-pages/error-502.html` - Custom 502 error page with auto-retry
### 4. External Service Resilience
- **New Files:**
- `server/utils/resilient-http.ts` - HTTP client with retry logic and circuit breaker
- `server/utils/documeso.ts` - Enhanced Documenso client
- **Modified Files:**
- `server/utils/nocodb.ts` - Updated to use resilient HTTP client
### 5. Memory & Performance Optimization
- **New Files:**
- `server/plugins/memory-monitor.ts` - Memory usage monitoring
- `server/utils/cleanup-manager.ts` - Resource cleanup utilities
- `server/utils/request-queue.ts` - Request queue system
## Deployment Steps
### Step 1: Build Docker Image
```bash
# Build the new Docker image with PM2 support
docker build -t port-nimara-client-portal:resilient .
```
### Step 2: Update Nginx Configuration
1. Copy the new Nginx configuration files to your server:
```bash
# Copy upstream configuration
sudo cp nginx/upstream.conf /etc/nginx/conf.d/
# Copy site configuration
sudo cp nginx/client.portnimara.dev.conf /etc/nginx/sites-available/client.portnimara.dev
# Create error pages directory
sudo mkdir -p /etc/nginx/error-pages
sudo cp nginx/error-pages/error-502.html /etc/nginx/error-pages/
# Test Nginx configuration
sudo nginx -t
# Reload Nginx
sudo nginx -s reload
```
### Step 3: Update Environment Variables
Ensure all required environment variables are set:
```bash
# Required for health checks
NUXT_NOCODB_URL=your-nocodb-url
NUXT_NOCODB_TOKEN=your-nocodb-token
NUXT_MINIO_ACCESS_KEY=your-minio-key
NUXT_MINIO_SECRET_KEY=your-minio-secret
NUXT_DOCUMENSO_API_KEY=your-documenso-key
NUXT_DOCUMENSO_BASE_URL=https://signatures.portnimara.dev
# Optional - for enabling garbage collection monitoring
NODE_OPTIONS="--max-old-space-size=8192 --expose-gc"
```
### Step 4: Deploy with Docker Compose
Create or update your `docker-compose.yml`:
```yaml
version: '3.8'
services:
client-portal:
image: port-nimara-client-portal:resilient
ports:
- "3028:3000"
environment:
- NODE_ENV=production
- NUXT_NOCODB_URL=${NUXT_NOCODB_URL}
- NUXT_NOCODB_TOKEN=${NUXT_NOCODB_TOKEN}
- NUXT_MINIO_ACCESS_KEY=${NUXT_MINIO_ACCESS_KEY}
- NUXT_MINIO_SECRET_KEY=${NUXT_MINIO_SECRET_KEY}
- NUXT_DOCUMENSO_API_KEY=${NUXT_DOCUMENSO_API_KEY}
- NUXT_DOCUMENSO_BASE_URL=${NUXT_DOCUMENSO_BASE_URL}
volumes:
- ./logs:/app/logs
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:3000/api/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 60s
restart: unless-stopped
networks:
- port-nimara-network
networks:
port-nimara-network:
external: true
```
Deploy:
```bash
docker-compose up -d
```
### Step 5: Verify Deployment
1. Check health endpoint:
```bash
curl https://client.portnimara.dev/api/health
```
2. Monitor PM2 processes:
```bash
docker exec -it <container-id> pm2 list
docker exec -it <container-id> pm2 monit
```
3. Check logs:
```bash
# PM2 logs
docker exec -it <container-id> pm2 logs
# Container logs
docker logs -f <container-id>
```
## Monitoring & Maintenance
### Health Check Monitoring
The health endpoint provides comprehensive information:
- Service availability (NocoDB, Directus, MinIO, Documenso)
- Memory usage
- Response times
- Circuit breaker status
### PM2 Commands
```bash
# Inside the container
pm2 list # List all processes
pm2 monit # Real-time monitoring
pm2 reload all # Zero-downtime reload
pm2 logs # View logs
pm2 flush # Clear logs
```
### Troubleshooting
1. **If 502 errors persist:**
- Check `/api/health` to identify failing services
- Review PM2 logs for crash information
- Verify all environment variables are set
- Check Nginx error logs: `sudo tail -f /var/log/nginx/error.log`
2. **High memory usage:**
- Monitor memory via health endpoint
- PM2 will auto-restart at 7GB limit
- Check for memory leaks in logs
3. **Slow responses:**
- Check circuit breaker status in health endpoint
- Review queue status for backlogs
- Monitor external service response times
## Benefits of These Changes
1. **Automatic Recovery:** PM2 cluster mode ensures the app restarts on crashes
2. **Graceful Degradation:** Circuit breakers prevent cascading failures
3. **Better Error Handling:** Retry logic handles temporary failures
4. **Improved Visibility:** Health checks and monitoring provide insights
5. **User Experience:** Custom 502 page with auto-retry improves UX
6. **Resource Management:** Memory monitoring and cleanup prevent leaks
## Rollback Plan
If issues arise:
1. Keep the previous Docker image tagged
2. Revert Nginx configuration:
```bash
sudo cp /etc/nginx/sites-available/client.portnimara.dev.backup /etc/nginx/sites-available/client.portnimara.dev
sudo nginx -s reload
```
3. Deploy previous Docker image:
```bash
docker-compose down
docker-compose up -d

View File

@ -1,50 +0,0 @@
module.exports = {
apps: [{
name: 'port-nimara-client-portal',
script: '.output/server/index.mjs',
instances: 2,
exec_mode: 'cluster',
// Memory management
max_memory_restart: '7G',
// Restart policy
max_restarts: 10,
min_uptime: '10s',
// Graceful shutdown
kill_timeout: 10000,
listen_timeout: 10000,
// Environment variables
env: {
NODE_ENV: 'production',
NODE_OPTIONS: '--max-old-space-size=8192',
PORT: process.env.PORT || 3000
},
// Logging
error_file: './logs/error.log',
out_file: './logs/out.log',
log_file: './logs/combined.log',
time: true,
// Advanced options
autorestart: true,
watch: false,
ignore_watch: ['node_modules', 'logs', '.output'],
// Health check
health_check: {
interval: 30,
url: 'http://localhost:3000/api/health',
max_consecutive_failures: 3
},
// Graceful reload
wait_ready: true,
// Crash handling
exp_backoff_restart_delay: 100
}]
};

View File

@ -1,197 +0,0 @@
# Include upstream configuration
include /etc/nginx/conf.d/upstream.conf;
server {
if ($host = client.portnimara.dev) {
return 301 https://$host$request_uri;
} # managed by Certbot
client_max_body_size 64M;
listen 80;
server_name client.portnimara.dev;
location / {
return 301 https://$host$request_uri;
}
location ^~ /.well-known/acme-challenge/ {
alias /var/www/html/.well-known/acme-challenge/;
default_type "text/plain";
allow all;
}
}
server {
client_max_body_size 64M;
# Timeout configurations to prevent 502 errors
proxy_connect_timeout 300s;
proxy_send_timeout 300s;
proxy_read_timeout 300s;
send_timeout 300s;
# Client timeout settings
client_body_timeout 300s;
client_header_timeout 300s;
# Buffer settings to handle larger responses
proxy_buffer_size 128k;
proxy_buffers 4 256k;
proxy_busy_buffers_size 256k;
proxy_temp_file_write_size 256k;
# Keepalive settings
keepalive_timeout 65;
keepalive_requests 100;
listen 443 ssl http2;
server_name client.portnimara.dev;
ssl_certificate /etc/letsencrypt/live/client.portnimara.dev/fullchain.pem; # managed by Certbot
ssl_certificate_key /etc/letsencrypt/live/client.portnimara.dev/privkey.pem; # managed by Certbot
# Error pages
error_page 502 503 504 /error-502.html;
location = /error-502.html {
root /etc/nginx/error-pages;
internal;
}
# Health check endpoint (bypass upstream for monitoring)
location = /api/health {
proxy_pass http://port_nimara_backend;
proxy_http_version 1.1;
proxy_set_header Connection "";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# Short timeout for health checks
proxy_connect_timeout 5s;
proxy_send_timeout 5s;
proxy_read_timeout 5s;
# Don't retry health checks
proxy_next_upstream off;
}
location / {
proxy_pass http://port_nimara_backend;
proxy_http_version 1.1;
# Headers
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Real-IP $http_cf_connecting_ip;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
# Disable buffering for real-time responses
proxy_request_buffering off;
proxy_buffering off;
# Apply timeout settings
proxy_connect_timeout 300s;
proxy_send_timeout 300s;
proxy_read_timeout 300s;
# Retry logic for better resilience
proxy_next_upstream error timeout invalid_header http_500 http_502 http_503;
proxy_next_upstream_tries 3;
proxy_next_upstream_timeout 10s;
# Add custom header to track retries
add_header X-Upstream-Status $upstream_status always;
}
location /api/ {
proxy_pass http://port_nimara_backend;
proxy_http_version 1.1;
# Headers
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# Extended timeouts for API routes (webhooks, IMAP operations)
proxy_connect_timeout 600s;
proxy_send_timeout 600s;
proxy_read_timeout 600s;
# Disable buffering for API responses
proxy_request_buffering off;
proxy_buffering off;
# Retry logic
proxy_next_upstream error timeout http_502 http_503;
proxy_next_upstream_tries 2;
proxy_next_upstream_timeout 30s;
}
# Special handling for long-running email operations
location ~ ^/api/email/(send|fetch-thread|test-connection) {
proxy_pass http://port_nimara_backend;
proxy_http_version 1.1;
# Headers
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# Extra long timeouts for email operations
proxy_connect_timeout 900s;
proxy_send_timeout 900s;
proxy_read_timeout 900s;
# Disable buffering
proxy_request_buffering off;
proxy_buffering off;
# No retry for email operations (to avoid duplicates)
proxy_next_upstream off;
}
# Special handling for Documenso operations
location ~ ^/api/(email/generate-eoi-document|eoi/) {
proxy_pass http://port_nimara_backend;
proxy_http_version 1.1;
# Headers
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# Extended timeouts for document operations
proxy_connect_timeout 300s;
proxy_send_timeout 300s;
proxy_read_timeout 300s;
# Enable buffering for large responses
proxy_buffering on;
proxy_buffer_size 128k;
proxy_buffers 8 256k;
# Retry logic for Documenso
proxy_next_upstream error timeout http_502 http_503;
proxy_next_upstream_tries 3;
proxy_next_upstream_timeout 20s;
}
location ^~ /.well-known/acme-challenge/ {
alias /var/www/html/.well-known/acme-challenge/;
default_type "text/plain";
allow all;
}
}

View File

@ -1,137 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Port Nimara - Service Temporarily Unavailable</title>
<style>
body {
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, sans-serif;
background-color: #f5f5f5;
margin: 0;
padding: 0;
display: flex;
align-items: center;
justify-content: center;
min-height: 100vh;
}
.error-container {
background-color: white;
border-radius: 8px;
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
padding: 40px;
max-width: 500px;
text-align: center;
}
.logo {
width: 120px;
height: 120px;
margin: 0 auto 20px;
background-color: #387bca;
border-radius: 50%;
display: flex;
align-items: center;
justify-content: center;
color: white;
font-size: 48px;
font-weight: bold;
}
h1 {
color: #333;
font-size: 24px;
margin-bottom: 10px;
}
p {
color: #666;
line-height: 1.6;
margin-bottom: 20px;
}
.status {
background-color: #f0f0f0;
border-radius: 4px;
padding: 10px;
margin: 20px 0;
font-size: 14px;
color: #666;
}
.retry-button {
background-color: #387bca;
color: white;
border: none;
padding: 12px 24px;
border-radius: 4px;
font-size: 16px;
cursor: pointer;
transition: background-color 0.3s;
}
.retry-button:hover {
background-color: #2d6aa8;
}
.spinner {
display: inline-block;
width: 20px;
height: 20px;
border: 3px solid rgba(56, 123, 202, 0.3);
border-radius: 50%;
border-top-color: #387bca;
animation: spin 1s ease-in-out infinite;
margin-right: 10px;
vertical-align: middle;
}
@keyframes spin {
to { transform: rotate(360deg); }
}
</style>
</head>
<body>
<div class="error-container">
<div class="logo">PN</div>
<h1>Service Temporarily Unavailable</h1>
<p>We're sorry, but the Port Nimara Client Portal is temporarily unavailable. This may be due to scheduled maintenance or a temporary issue.</p>
<div class="status">
<span class="spinner"></span>
<span id="status-text">The system is restarting. Please wait...</span>
</div>
<p>The page will automatically refresh when the service is available.</p>
<button class="retry-button" onclick="location.reload()">Retry Now</button>
</div>
<script>
// Auto-refresh every 5 seconds
let retryCount = 0;
const maxRetries = 60; // 5 minutes max
function checkHealth() {
fetch('/api/health')
.then(response => {
if (response.ok) {
// Service is back up, reload the page
location.reload();
} else {
retryCount++;
if (retryCount > maxRetries) {
document.getElementById('status-text').textContent = 'Service is taking longer than expected. Please contact support if the issue persists.';
} else {
document.getElementById('status-text').textContent = `Checking service status... (Attempt ${retryCount})`;
}
}
})
.catch(() => {
retryCount++;
if (retryCount > maxRetries) {
document.getElementById('status-text').textContent = 'Service is taking longer than expected. Please contact support if the issue persists.';
}
});
}
// Check health every 5 seconds
setInterval(checkHealth, 5000);
// Initial check after 2 seconds
setTimeout(checkHealth, 2000);
</script>
</body>
</html>

View File

@ -1,11 +0,0 @@
# Upstream configuration for Port Nimara Client Portal
upstream port_nimara_backend {
# Define multiple backend servers (PM2 cluster instances)
server 127.0.0.1:3028 max_fails=2 fail_timeout=30s;
# Keepalive connections to backend
keepalive 32;
# Load balancing method
least_conn;
}

View File

@ -1,259 +0,0 @@
import { getNocoDbConfiguration } from '../utils/nocodb';
import { getMinioClient } from '../utils/minio';
import { useDirectus } from '#imports';
interface HealthCheckResult {
status: 'healthy' | 'unhealthy' | 'degraded';
timestamp: string;
uptime: number;
services: {
[key: string]: {
status: 'up' | 'down' | 'slow';
responseTime?: number;
error?: string;
};
};
memory: {
used: number;
total: number;
percentage: number;
};
environment: {
nodeVersion: string;
configStatus: 'complete' | 'partial' | 'missing';
missingVars?: string[];
};
}
export default defineEventHandler(async (event): Promise<HealthCheckResult> => {
const startTime = Date.now();
const result: HealthCheckResult = {
status: 'healthy',
timestamp: new Date().toISOString(),
uptime: process.uptime(),
services: {},
memory: {
used: 0,
total: 0,
percentage: 0
},
environment: {
nodeVersion: process.version,
configStatus: 'complete'
}
};
// Memory check
const memUsage = process.memoryUsage();
result.memory = {
used: Math.round(memUsage.heapUsed / 1024 / 1024), // MB
total: Math.round(memUsage.heapTotal / 1024 / 1024), // MB
percentage: Math.round((memUsage.heapUsed / memUsage.heapTotal) * 100)
};
// Check if memory usage is too high
if (result.memory.percentage > 90) {
result.status = 'unhealthy';
} else if (result.memory.percentage > 80) {
result.status = 'degraded';
}
// Environment variables check
const requiredEnvVars = [
'NUXT_NOCODB_URL',
'NUXT_NOCODB_TOKEN',
'NUXT_MINIO_ACCESS_KEY',
'NUXT_MINIO_SECRET_KEY',
'NUXT_DOCUMENSO_API_KEY',
'NUXT_DOCUMENSO_BASE_URL'
];
const missingVars = requiredEnvVars.filter(varName => !process.env[varName]);
if (missingVars.length > 0) {
result.environment.configStatus = missingVars.length > 3 ? 'missing' : 'partial';
result.environment.missingVars = missingVars;
result.status = 'unhealthy';
}
// Service health checks
// 1. NocoDB Check
try {
const nocoStart = Date.now();
const nocoConfig = getNocoDbConfiguration();
if (!nocoConfig.url || !nocoConfig.token) {
result.services.nocodb = {
status: 'down',
error: 'Missing configuration'
};
result.status = 'unhealthy';
} else {
const response = await fetch(`${nocoConfig.url}/api/v1/health`, {
headers: {
'xc-token': nocoConfig.token
},
signal: AbortSignal.timeout(5000) // 5 second timeout
});
const responseTime = Date.now() - nocoStart;
if (response.ok) {
result.services.nocodb = {
status: responseTime > 3000 ? 'slow' : 'up',
responseTime
};
if (responseTime > 3000) result.status = 'degraded';
} else {
result.services.nocodb = {
status: 'down',
responseTime,
error: `HTTP ${response.status}`
};
result.status = 'unhealthy';
}
}
} catch (error) {
result.services.nocodb = {
status: 'down',
error: error instanceof Error ? error.message : 'Unknown error'
};
result.status = 'unhealthy';
}
// 2. Directus Check
try {
const directusStart = Date.now();
const { $directus } = useNuxtApp();
// Check if Directus URL is configured
const directusUrl = useRuntimeConfig().public.directus?.url;
if (!directusUrl) {
result.services.directus = {
status: 'down',
error: 'Missing configuration'
};
result.status = 'unhealthy';
} else {
const response = await fetch(`${directusUrl}/server/health`, {
signal: AbortSignal.timeout(5000)
});
const responseTime = Date.now() - directusStart;
if (response.ok) {
result.services.directus = {
status: responseTime > 3000 ? 'slow' : 'up',
responseTime
};
if (responseTime > 3000) result.status = 'degraded';
} else {
result.services.directus = {
status: 'down',
responseTime,
error: `HTTP ${response.status}`
};
result.status = 'unhealthy';
}
}
} catch (error) {
result.services.directus = {
status: 'down',
error: error instanceof Error ? error.message : 'Unknown error'
};
result.status = 'unhealthy';
}
// 3. MinIO Check
try {
const minioStart = Date.now();
const minioClient = getMinioClient();
const bucketName = useRuntimeConfig().minio.bucketName;
// Try to check if bucket exists
const bucketExists = await minioClient.bucketExists(bucketName);
const responseTime = Date.now() - minioStart;
if (bucketExists) {
result.services.minio = {
status: responseTime > 3000 ? 'slow' : 'up',
responseTime
};
if (responseTime > 3000) result.status = 'degraded';
} else {
result.services.minio = {
status: 'down',
responseTime,
error: 'Bucket not found'
};
result.status = 'unhealthy';
}
} catch (error) {
result.services.minio = {
status: 'down',
error: error instanceof Error ? error.message : 'Unknown error'
};
result.status = 'unhealthy';
}
// 4. Documenso Check
try {
const documensoStart = Date.now();
const documensoUrl = process.env.NUXT_DOCUMENSO_BASE_URL;
const documensoKey = process.env.NUXT_DOCUMENSO_API_KEY;
if (!documensoUrl || !documensoKey) {
result.services.documenso = {
status: 'down',
error: 'Missing configuration'
};
result.status = 'unhealthy';
} else {
const response = await fetch(`${documensoUrl}/api/health`, {
headers: {
'Authorization': `Bearer ${documensoKey}`
},
signal: AbortSignal.timeout(5000)
});
const responseTime = Date.now() - documensoStart;
if (response.ok || response.status === 401) { // 401 means API is up but key might be wrong
result.services.documenso = {
status: responseTime > 3000 ? 'slow' : 'up',
responseTime
};
if (responseTime > 3000) result.status = 'degraded';
} else {
result.services.documenso = {
status: 'down',
responseTime,
error: `HTTP ${response.status}`
};
result.status = 'unhealthy';
}
}
} catch (error) {
result.services.documenso = {
status: 'down',
error: error instanceof Error ? error.message : 'Unknown error'
};
// Documenso being down shouldn't make the whole app unhealthy
if (result.status === 'healthy') result.status = 'degraded';
}
// Overall response time
const totalTime = Date.now() - startTime;
if (totalTime > 10000 && result.status === 'healthy') {
result.status = 'degraded';
}
// Set appropriate HTTP status code
if (result.status === 'unhealthy') {
setResponseStatus(event, 503);
} else if (result.status === 'degraded') {
setResponseStatus(event, 200); // Still return 200 for degraded to not trigger Nginx failures
}
return result;
});

View File

@ -1,67 +0,0 @@
export default defineNitroPlugin((nitroApp) => {
let lastHeapSnapshot: number = 0;
const heapSnapshotThreshold = 1024 * 1024 * 1024 * 6; // 6GB
// Monitor memory usage every 30 seconds
setInterval(() => {
const memUsage = process.memoryUsage();
const heapUsedMB = Math.round(memUsage.heapUsed / 1024 / 1024);
const heapTotalMB = Math.round(memUsage.heapTotal / 1024 / 1024);
const rssMB = Math.round(memUsage.rss / 1024 / 1024);
const externalMB = Math.round(memUsage.external / 1024 / 1024);
console.log(`[Memory Monitor] Heap: ${heapUsedMB}MB / ${heapTotalMB}MB | RSS: ${rssMB}MB | External: ${externalMB}MB`);
// Warning if memory usage is high
if (memUsage.heapUsed > heapSnapshotThreshold) {
console.warn(`[Memory Monitor] High memory usage detected: ${heapUsedMB}MB`);
// Take heap snapshot if we haven't taken one in the last hour
const now = Date.now();
if (now - lastHeapSnapshot > 3600000) { // 1 hour
lastHeapSnapshot = now;
console.warn('[Memory Monitor] Memory usage critical - consider taking heap snapshot');
// Force garbage collection if available
if (global.gc) {
console.log('[Memory Monitor] Running garbage collection...');
global.gc();
// Log memory after GC
setTimeout(() => {
const afterGC = process.memoryUsage();
const heapFreed = memUsage.heapUsed - afterGC.heapUsed;
console.log(`[Memory Monitor] GC freed ${Math.round(heapFreed / 1024 / 1024)}MB`);
}, 1000);
}
}
}
}, 30000); // Every 30 seconds
// Monitor event loop lag
let lastCheck = Date.now();
setInterval(() => {
const now = Date.now();
const lag = now - lastCheck - 1000;
if (lag > 100) {
console.warn(`[Memory Monitor] Event loop lag detected: ${lag}ms`);
}
lastCheck = now;
}, 1000);
// Add memory info to health endpoint
nitroApp.hooks.hook('request', async (event) => {
if (event.node.req.url === '/api/health') {
const memUsage = process.memoryUsage();
event.context.memoryInfo = {
heapUsed: Math.round(memUsage.heapUsed / 1024 / 1024),
heapTotal: Math.round(memUsage.heapTotal / 1024 / 1024),
rss: Math.round(memUsage.rss / 1024 / 1024),
external: Math.round(memUsage.external / 1024 / 1024),
arrayBuffers: Math.round(memUsage.arrayBuffers / 1024 / 1024)
};
}
});
});

View File

@ -1,30 +0,0 @@
import { healthChecker } from '../utils/health-checker';
export default defineNitroPlugin(async (nitroApp) => {
console.log('[Startup Health] Initializing application health checks...');
// Perform initial health checks on startup
const startTime = Date.now();
try {
const isReady = await healthChecker.performStartupHealthChecks();
if (isReady) {
console.log(`[Startup Health] Application ready in ${Date.now() - startTime}ms`);
} else {
console.warn('[Startup Health] Application started but some services are unavailable');
console.warn('[Startup Health] Status:', healthChecker.getStartupStatus());
}
} catch (error) {
console.error('[Startup Health] Failed to perform startup checks:', error);
}
// Monitor service health periodically
setInterval(async () => {
try {
await healthChecker.performStartupHealthChecks();
} catch (error) {
console.error('[Startup Health] Periodic health check failed:', error);
}
}, 60000); // Check every minute
});

View File

@ -1,160 +0,0 @@
interface CleanupTask {
name: string;
cleanup: () => void | Promise<void>;
interval?: number;
}
class CleanupManager {
private static instance: CleanupManager;
private tasks: Map<string, CleanupTask> = new Map();
private intervals: Map<string, NodeJS.Timeout> = new Map();
private constructor() {
// Register default cleanup tasks
this.registerDefaultTasks();
}
static getInstance(): CleanupManager {
if (!CleanupManager.instance) {
CleanupManager.instance = new CleanupManager();
}
return CleanupManager.instance;
}
private registerDefaultTasks() {
// Clean up old temporary files
this.registerTask({
name: 'temp-files-cleanup',
cleanup: async () => {
console.log('[Cleanup] Running temporary files cleanup...');
// Implementation would go here
},
interval: 3600000 // Every hour
});
// Clean up expired cache entries
this.registerTask({
name: 'cache-cleanup',
cleanup: async () => {
console.log('[Cleanup] Running cache cleanup...');
// Implementation would go here
},
interval: 1800000 // Every 30 minutes
});
}
registerTask(task: CleanupTask) {
this.tasks.set(task.name, task);
if (task.interval) {
// Clear existing interval if any
const existingInterval = this.intervals.get(task.name);
if (existingInterval) {
clearInterval(existingInterval);
}
// Set up new interval
const interval = setInterval(async () => {
try {
await task.cleanup();
} catch (error) {
console.error(`[Cleanup] Task '${task.name}' failed:`, error);
}
}, task.interval);
this.intervals.set(task.name, interval);
}
}
async runTask(taskName: string) {
const task = this.tasks.get(taskName);
if (!task) {
throw new Error(`Cleanup task '${taskName}' not found`);
}
try {
await task.cleanup();
console.log(`[Cleanup] Task '${taskName}' completed successfully`);
} catch (error) {
console.error(`[Cleanup] Task '${taskName}' failed:`, error);
throw error;
}
}
async runAllTasks() {
console.log('[Cleanup] Running all cleanup tasks...');
const results: { task: string; success: boolean; error?: any }[] = [];
for (const [name, task] of this.tasks) {
try {
await task.cleanup();
results.push({ task: name, success: true });
} catch (error) {
results.push({ task: name, success: false, error });
}
}
return results;
}
stopAllIntervals() {
for (const [name, interval] of this.intervals) {
clearInterval(interval);
console.log(`[Cleanup] Stopped interval for task '${name}'`);
}
this.intervals.clear();
}
}
export const cleanupManager = CleanupManager.getInstance();
// Utility functions for common cleanup operations
export const cleanupUtils = {
// Clean up old files in a directory
async cleanupOldFiles(directory: string, maxAgeMs: number) {
// Implementation would check file ages and remove old ones
console.log(`[Cleanup] Would clean files older than ${maxAgeMs}ms in ${directory}`);
},
// Clean up unused stream handles
cleanupStreams(streams: any[]) {
streams.forEach(stream => {
if (stream && typeof stream.destroy === 'function') {
stream.destroy();
}
});
},
// Clean up IMAP connections
cleanupIMAPConnections(connections: any[]) {
connections.forEach(conn => {
try {
if (conn && typeof conn.end === 'function') {
conn.end();
}
} catch (error) {
console.error('[Cleanup] Failed to close IMAP connection:', error);
}
});
},
// Memory-efficient array processing
async processInBatches<T, R>(
items: T[],
batchSize: number,
processor: (batch: T[]) => Promise<R[]>
): Promise<R[]> {
const results: R[] = [];
for (let i = 0; i < items.length; i += batchSize) {
const batch = items.slice(i, i + batchSize);
const batchResults = await processor(batch);
results.push(...batchResults);
// Allow garbage collection between batches
await new Promise(resolve => setImmediate(resolve));
}
return results;
}
};

View File

@ -1,139 +1,153 @@
import { resilientHttp } from './resilient-http';
interface DocumensoConfig {
// Documeso API client utilities
interface DocumesoConfig {
apiUrl: string;
apiKey: string;
baseUrl: string;
}
export const getDocumensoConfiguration = (): DocumensoConfig => {
const apiKey = process.env.NUXT_DOCUMENSO_API_KEY;
const baseUrl = process.env.NUXT_DOCUMENSO_BASE_URL;
if (!apiKey || !baseUrl) {
throw new Error('Documenso configuration missing');
interface DocumesoRecipient {
id: number;
documentId: number;
email: string;
name: string;
role: 'SIGNER' | 'APPROVER' | 'VIEWER';
signingOrder: number;
token: string;
signedAt: string | null;
readStatus: 'NOT_OPENED' | 'OPENED';
signingStatus: 'NOT_SIGNED' | 'SIGNED';
sendStatus: 'NOT_SENT' | 'SENT';
signingUrl: string;
}
return { apiKey, baseUrl };
interface DocumesoDocument {
id: number;
externalId: string;
userId: number;
teamId: number;
title: string;
status: 'DRAFT' | 'PENDING' | 'COMPLETED' | 'CANCELLED';
documentDataId: string;
createdAt: string;
updatedAt: string;
completedAt: string | null;
recipients: DocumesoRecipient[];
}
interface DocumesoListResponse {
documents: DocumesoDocument[];
total: number;
page: number;
perPage: number;
}
// Get Documeso configuration
const getDocumesoConfig = (): DocumesoConfig => {
return {
apiUrl: 'https://signatures.portnimara.dev/api/v1',
apiKey: 'Bearer api_malptg62zqyb0wrp'
};
};
// Helper function for resilient Documenso requests
async function documensoFetch<T>(endpoint: string, options: RequestInit = {}): Promise<T> {
const config = getDocumensoConfiguration();
const url = `${config.baseUrl}${endpoint}`;
// Fetch a single document by ID
export const getDocumesoDocument = async (documentId: number): Promise<DocumesoDocument> => {
const config = getDocumesoConfig();
const response = await resilientHttp.fetchWithRetry(
url,
{
...options,
try {
const response = await $fetch<DocumesoDocument>(`${config.apiUrl}/documents/${documentId}`, {
headers: {
'Authorization': `Bearer ${config.apiKey}`,
'Content-Type': 'application/json',
...options.headers
'Authorization': config.apiKey,
'Content-Type': 'application/json'
}
});
return response;
} catch (error) {
console.error('Failed to fetch Documeso document:', error);
throw error;
}
};
// Search documents by external ID (e.g., 'loi-94')
export const searchDocumesoDocuments = async (externalId?: string): Promise<DocumesoDocument[]> => {
const config = getDocumesoConfig();
try {
const response = await $fetch<DocumesoListResponse>(`${config.apiUrl}/documents`, {
headers: {
'Authorization': config.apiKey,
'Content-Type': 'application/json'
},
serviceName: 'documenso'
},
{
maxRetries: options.method === 'POST' || options.method === 'PUT' ? 2 : 3,
timeout: 20000, // 20 seconds for document operations
retryableStatuses: [408, 429, 500, 502, 503, 504]
params: {
perPage: 100
}
);
if (!response.ok) {
const errorText = await response.text();
throw new Error(`Documenso request failed: ${response.status} - ${errorText}`);
}
return response.json();
}
export const checkDocumentStatus = async (documentId: string): Promise<any> => {
console.log('[Documenso] Checking document status:', documentId);
try {
const result = await documensoFetch<any>(`/api/v1/documents/${documentId}`, {
method: 'GET'
});
console.log('[Documenso] Document status retrieved:', result.status);
return result;
// If externalId is provided, filter by it
if (externalId) {
return response.documents.filter(doc => doc.externalId === externalId);
}
return response.documents;
} catch (error) {
console.error('[Documenso] Failed to check document status:', error);
console.error('Failed to search Documeso documents:', error);
throw error;
}
};
export const createDocument = async (templateId: string, data: any): Promise<any> => {
console.log('[Documenso] Creating document from template:', templateId);
// Get document by external ID (e.g., 'loi-94')
export const getDocumesoDocumentByExternalId = async (externalId: string): Promise<DocumesoDocument | null> => {
const documents = await searchDocumesoDocuments(externalId);
return documents.length > 0 ? documents[0] : null;
};
try {
const result = await documensoFetch<any>('/api/v1/documents', {
method: 'POST',
body: JSON.stringify({
templateId,
...data
})
});
// Check signature status for a document
export const checkDocumentSignatureStatus = async (documentId: number): Promise<{
documentStatus: string;
unsignedRecipients: DocumesoRecipient[];
signedRecipients: DocumesoRecipient[];
clientSigned: boolean;
allSigned: boolean;
}> => {
const document = await getDocumesoDocument(documentId);
console.log('[Documenso] Document created with ID:', result.id);
return result;
} catch (error) {
console.error('[Documenso] Failed to create document:', error);
throw error;
const unsignedRecipients = document.recipients.filter(r => r.signingStatus === 'NOT_SIGNED');
const signedRecipients = document.recipients.filter(r => r.signingStatus === 'SIGNED');
// Check if client (signingOrder = 1) has signed
const clientRecipient = document.recipients.find(r => r.signingOrder === 1);
const clientSigned = clientRecipient ? clientRecipient.signingStatus === 'SIGNED' : false;
const allSigned = unsignedRecipients.length === 0;
return {
documentStatus: document.status,
unsignedRecipients,
signedRecipients,
clientSigned,
allSigned
};
};
// Get recipients who need to sign (excluding client)
export const getRecipientsToRemind = async (documentId: number): Promise<DocumesoRecipient[]> => {
const status = await checkDocumentSignatureStatus(documentId);
// Only remind if client has signed
if (!status.clientSigned) {
return [];
}
// Return unsigned recipients with signingOrder > 1
return status.unsignedRecipients.filter(r => r.signingOrder > 1);
};
export const sendDocument = async (documentId: string, signers: any[]): Promise<any> => {
console.log('[Documenso] Sending document:', documentId);
try {
const result = await documensoFetch<any>(`/api/v1/documents/${documentId}/send`, {
method: 'POST',
body: JSON.stringify({ signers })
});
console.log('[Documenso] Document sent successfully');
return result;
} catch (error) {
console.error('[Documenso] Failed to send document:', error);
throw error;
}
// Format recipient name for emails
export const formatRecipientName = (recipient: DocumesoRecipient): string => {
const firstName = recipient.name.split(' ')[0];
return firstName;
};
export const deleteDocument = async (documentId: string): Promise<any> => {
console.log('[Documenso] Deleting document:', documentId);
try {
const result = await documensoFetch<any>(`/api/v1/documents/${documentId}`, {
method: 'DELETE'
});
console.log('[Documenso] Document deleted successfully');
return result;
} catch (error) {
console.error('[Documenso] Failed to delete document:', error);
throw error;
}
};
export const verifyTemplate = async (templateId: string): Promise<boolean> => {
console.log('[Documenso] Verifying template:', templateId);
try {
await documensoFetch<any>(`/api/v1/templates/${templateId}`, {
method: 'GET'
});
console.log('[Documenso] Template verified successfully');
return true;
} catch (error) {
console.error('[Documenso] Template verification failed:', error);
return false;
}
};
// Get circuit breaker status for monitoring
export const getDocumensoHealthStatus = () => {
const status = resilientHttp.getCircuitBreakerStatus();
return status.documenso || { state: 'UNKNOWN', failures: 0 };
// Get signing URL for a recipient
export const getSigningUrl = (recipient: DocumesoRecipient): string => {
return recipient.signingUrl;
};

View File

@ -1,144 +0,0 @@
import type { H3Event } from 'h3';
interface ServiceCheckResult {
status: 'up' | 'down' | 'slow';
responseTime?: number;
error?: string;
}
class HealthChecker {
private static instance: HealthChecker;
private isReady: boolean = false;
private startupChecks: Map<string, boolean> = new Map();
private requiredServices = ['nocodb', 'directus', 'minio'];
private constructor() {
this.initializeStartupChecks();
}
static getInstance(): HealthChecker {
if (!HealthChecker.instance) {
HealthChecker.instance = new HealthChecker();
}
return HealthChecker.instance;
}
private initializeStartupChecks() {
this.requiredServices.forEach(service => {
this.startupChecks.set(service, false);
});
}
async checkServiceWithRetry(
serviceName: string,
checkFunction: () => Promise<ServiceCheckResult>,
maxRetries: number = 3,
retryDelay: number = 1000
): Promise<ServiceCheckResult> {
let lastError: string | undefined;
for (let attempt = 0; attempt < maxRetries; attempt++) {
try {
const result = await checkFunction();
if (result.status === 'up' || result.status === 'slow') {
this.startupChecks.set(serviceName, true);
return result;
}
lastError = result.error;
} catch (error) {
lastError = error instanceof Error ? error.message : 'Unknown error';
}
if (attempt < maxRetries - 1) {
await new Promise(resolve => setTimeout(resolve, retryDelay * Math.pow(2, attempt)));
}
}
return {
status: 'down',
error: lastError || 'Max retries exceeded'
};
}
markServiceReady(serviceName: string) {
this.startupChecks.set(serviceName, true);
this.checkOverallReadiness();
}
markServiceDown(serviceName: string) {
this.startupChecks.set(serviceName, false);
this.isReady = false;
}
private checkOverallReadiness() {
const allRequired = this.requiredServices.every(service =>
this.startupChecks.get(service) === true
);
if (allRequired) {
this.isReady = true;
console.log('[HealthChecker] All required services are ready');
}
}
isApplicationReady(): boolean {
return this.isReady;
}
getStartupStatus(): { [key: string]: boolean } {
const status: { [key: string]: boolean } = {};
this.startupChecks.forEach((value, key) => {
status[key] = value;
});
return status;
}
async performStartupHealthChecks(): Promise<boolean> {
console.log('[HealthChecker] Performing startup health checks...');
// Import check functions dynamically to avoid circular dependencies
const { checkNocoDB, checkDirectus, checkMinIO } = await import('./service-checks');
// Check all services in parallel
const checks = await Promise.all([
this.checkServiceWithRetry('nocodb', checkNocoDB, 5, 2000),
this.checkServiceWithRetry('directus', checkDirectus, 5, 2000),
this.checkServiceWithRetry('minio', checkMinIO, 5, 2000)
]);
// Log results
checks.forEach((result, index) => {
const serviceName = ['nocodb', 'directus', 'minio'][index];
console.log(`[HealthChecker] ${serviceName}: ${result.status}${result.error ? ` - ${result.error}` : ''}`);
});
this.checkOverallReadiness();
return this.isReady;
}
}
export const healthChecker = HealthChecker.getInstance();
// Middleware to check if app is ready
export function createReadinessMiddleware() {
return defineEventHandler(async (event: H3Event) => {
// Skip health check endpoint itself
if (event.node.req.url === '/api/health') {
return;
}
// During startup, return 503 for all requests except health
if (!healthChecker.isApplicationReady()) {
const startupStatus = healthChecker.getStartupStatus();
setResponseStatus(event, 503);
return {
error: 'Service Unavailable',
message: 'Application is starting up. Please wait...',
startupStatus
};
}
});
}

View File

@ -1,5 +1,3 @@
import { resilientHttp } from './resilient-http';
export interface PageInfo {
pageSize: number;
totalRows: number;
@ -29,43 +27,24 @@ export const createTableUrl = (table: Table) => {
return url;
};
// Helper function for resilient NocoDB requests
async function nocoDbFetch<T>(url: string, options: RequestInit = {}): Promise<T> {
const response = await resilientHttp.fetchWithRetry(
url,
{
...options,
export const getInterests = async () =>
$fetch<InterestsResponse>(createTableUrl(Table.Interest), {
headers: {
'xc-token': getNocoDbConfiguration().token,
'Content-Type': 'application/json',
...options.headers
"xc-token": getNocoDbConfiguration().token,
},
serviceName: 'nocodb'
params: {
limit: 1000,
},
{
maxRetries: options.method === 'POST' || options.method === 'PATCH' || options.method === 'DELETE' ? 2 : 3,
timeout: 15000
}
);
if (!response.ok) {
const errorText = await response.text();
throw new Error(`NocoDB request failed: ${response.status} - ${errorText}`);
}
return response.json();
}
export const getInterests = async () => {
const url = createTableUrl(Table.Interest);
return nocoDbFetch<InterestsResponse>(url + '?limit=1000');
};
});
export const getInterestById = async (id: string) => {
console.log('[nocodb.getInterestById] Fetching interest ID:', id);
const url = `${createTableUrl(Table.Interest)}/${id}`;
const result = await nocoDbFetch<any>(url);
const result = await $fetch<Interest>(`${createTableUrl(Table.Interest)}/${id}`, {
headers: {
"xc-token": getNocoDbConfiguration().token,
},
});
console.log('[nocodb.getInterestById] Raw result from NocoDB:', {
id: result.Id,
@ -187,9 +166,13 @@ export const updateInterest = async (id: string, data: Partial<Interest>, retryC
console.log('[nocodb.updateInterest] Request body:', JSON.stringify(cleanData, null, 2));
// Try sending as a single object first (as shown in the API docs)
const result = await nocoDbFetch<any>(url, {
const result = await $fetch<Interest>(url, {
method: "PATCH",
body: JSON.stringify(cleanData)
headers: {
"xc-token": getNocoDbConfiguration().token,
"Content-Type": "application/json"
},
body: cleanData
});
console.log('[nocodb.updateInterest] Update successful for ID:', id);
return result;
@ -222,7 +205,7 @@ export const updateInterest = async (id: string, data: Partial<Interest>, retryC
}
};
export const createInterest = async (data: Partial<any>) => {
export const createInterest = async (data: Partial<Interest>) => {
console.log('[nocodb.createInterest] Creating interest with fields:', Object.keys(data));
// Create a clean data object that matches the InterestsRequest schema
@ -270,9 +253,12 @@ export const createInterest = async (data: Partial<any>) => {
console.log('[nocodb.createInterest] URL:', url);
try {
const result = await nocoDbFetch<any>(url, {
const result = await $fetch<Interest>(url, {
method: "POST",
body: JSON.stringify(cleanData)
headers: {
"xc-token": getNocoDbConfiguration().token,
},
body: cleanData,
});
console.log('[nocodb.createInterest] Created interest with ID:', result.Id);
return result;
@ -307,9 +293,13 @@ export const deleteInterest = async (id: string) => {
try {
// According to NocoDB API docs, DELETE requires ID in the body
const result = await nocoDbFetch<any>(url, {
const result = await $fetch(url, {
method: "DELETE",
body: JSON.stringify(requestBody)
headers: {
"xc-token": getNocoDbConfiguration().token,
"Content-Type": "application/json"
},
body: requestBody
});
console.log('[nocodb.deleteInterest] DELETE successful');
@ -333,23 +323,12 @@ export const deleteInterest = async (id: string) => {
}
};
export const triggerWebhook = async (url: string, payload: any) => {
// Webhooks might not need the same resilience as data operations
const response = await fetch(url, {
export const triggerWebhook = async (url: string, payload: any) =>
$fetch(url, {
method: "POST",
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify(payload)
body: payload,
});
if (!response.ok) {
throw new Error(`Webhook failed: ${response.status}`);
}
return response.json();
};
export const updateInterestEOIDocument = async (id: string, documentData: any) => {
console.log('[nocodb.updateInterestEOIDocument] Updating EOI document for interest:', id);

View File

@ -1,190 +0,0 @@
interface QueuedRequest<T> {
id: string;
priority: 'high' | 'normal' | 'low';
execute: () => Promise<T>;
resolve: (value: T) => void;
reject: (error: any) => void;
timestamp: number;
retries: number;
}
interface QueueOptions {
maxConcurrent: number;
maxQueueSize: number;
requestTimeout: number;
maxRetries: number;
}
class RequestQueue {
private static instances: Map<string, RequestQueue> = new Map();
private queue: QueuedRequest<any>[] = [];
private activeRequests: number = 0;
private requestId: number = 0;
constructor(private name: string, private options: QueueOptions) {
this.options = {
maxConcurrent: 5,
maxQueueSize: 100,
requestTimeout: 30000,
maxRetries: 2,
...options
};
}
static getInstance(name: string, options?: Partial<QueueOptions>): RequestQueue {
if (!RequestQueue.instances.has(name)) {
RequestQueue.instances.set(name, new RequestQueue(name, options as QueueOptions));
}
return RequestQueue.instances.get(name)!;
}
async add<T>(
execute: () => Promise<T>,
priority: 'high' | 'normal' | 'low' = 'normal'
): Promise<T> {
if (this.queue.length >= this.options.maxQueueSize) {
throw new Error(`Queue ${this.name} is full (${this.queue.length} items)`);
}
return new Promise<T>((resolve, reject) => {
const request: QueuedRequest<T> = {
id: `${this.name}-${++this.requestId}`,
priority,
execute,
resolve,
reject,
timestamp: Date.now(),
retries: 0
};
// Insert based on priority
if (priority === 'high') {
// Find the first non-high priority item
const index = this.queue.findIndex(item => item.priority !== 'high');
if (index === -1) {
this.queue.push(request);
} else {
this.queue.splice(index, 0, request);
}
} else if (priority === 'low') {
this.queue.push(request);
} else {
// Normal priority - insert before low priority items
const index = this.queue.findIndex(item => item.priority === 'low');
if (index === -1) {
this.queue.push(request);
} else {
this.queue.splice(index, 0, request);
}
}
console.log(`[Queue ${this.name}] Added request ${request.id} (priority: ${priority}, queue size: ${this.queue.length})`);
// Process queue
this.processQueue();
});
}
private async processQueue() {
while (this.queue.length > 0 && this.activeRequests < this.options.maxConcurrent) {
const request = this.queue.shift();
if (!request) continue;
this.activeRequests++;
// Process request with timeout
const timeoutPromise = new Promise<never>((_, reject) => {
setTimeout(() => reject(new Error('Request timeout')), this.options.requestTimeout);
});
try {
console.log(`[Queue ${this.name}] Processing request ${request.id} (active: ${this.activeRequests}/${this.options.maxConcurrent})`);
const result = await Promise.race([
request.execute(),
timeoutPromise
]);
request.resolve(result);
console.log(`[Queue ${this.name}] Request ${request.id} completed successfully`);
} catch (error) {
console.error(`[Queue ${this.name}] Request ${request.id} failed:`, error);
// Retry logic
if (request.retries < this.options.maxRetries) {
request.retries++;
console.log(`[Queue ${this.name}] Retrying request ${request.id} (attempt ${request.retries + 1}/${this.options.maxRetries + 1})`);
// Re-queue with same priority
this.queue.unshift(request);
} else {
request.reject(error);
}
} finally {
this.activeRequests--;
// Continue processing
if (this.queue.length > 0) {
setImmediate(() => this.processQueue());
}
}
}
}
getStatus() {
return {
name: this.name,
queueLength: this.queue.length,
activeRequests: this.activeRequests,
maxConcurrent: this.options.maxConcurrent,
queueByPriority: {
high: this.queue.filter(r => r.priority === 'high').length,
normal: this.queue.filter(r => r.priority === 'normal').length,
low: this.queue.filter(r => r.priority === 'low').length
}
};
}
clear() {
const cleared = this.queue.length;
this.queue.forEach(request => {
request.reject(new Error('Queue cleared'));
});
this.queue = [];
console.log(`[Queue ${this.name}] Cleared ${cleared} requests`);
return cleared;
}
}
// Pre-configured queues for different operations
export const queues = {
documenso: RequestQueue.getInstance('documenso', {
maxConcurrent: 3,
maxQueueSize: 50,
requestTimeout: 45000,
maxRetries: 2
}),
email: RequestQueue.getInstance('email', {
maxConcurrent: 2,
maxQueueSize: 30,
requestTimeout: 60000,
maxRetries: 1
}),
nocodb: RequestQueue.getInstance('nocodb', {
maxConcurrent: 5,
maxQueueSize: 100,
requestTimeout: 20000,
maxRetries: 3
})
};
// Utility function to get all queue statuses
export function getAllQueueStatuses() {
return {
documenso: queues.documenso.getStatus(),
email: queues.email.getStatus(),
nocodb: queues.nocodb.getStatus()
};
}

View File

@ -1,229 +0,0 @@
interface RetryOptions {
maxRetries?: number;
initialDelay?: number;
maxDelay?: number;
backoffMultiplier?: number;
timeout?: number;
retryableStatuses?: number[];
retryableErrors?: string[];
}
interface CircuitBreakerOptions {
failureThreshold?: number;
resetTimeout?: number;
halfOpenRequests?: number;
}
enum CircuitState {
CLOSED = 'CLOSED',
OPEN = 'OPEN',
HALF_OPEN = 'HALF_OPEN'
}
class CircuitBreaker {
private state: CircuitState = CircuitState.CLOSED;
private failures: number = 0;
private lastFailureTime: number = 0;
private halfOpenRequests: number = 0;
constructor(private options: CircuitBreakerOptions = {}) {
this.options = {
failureThreshold: 10,
resetTimeout: 30000, // 30 seconds
halfOpenRequests: 3,
...options
};
}
async execute<T>(fn: () => Promise<T>): Promise<T> {
// Check circuit state
if (this.state === CircuitState.OPEN) {
const timeSinceLastFailure = Date.now() - this.lastFailureTime;
if (timeSinceLastFailure > this.options.resetTimeout!) {
this.state = CircuitState.HALF_OPEN;
this.halfOpenRequests = 0;
} else {
throw new Error('Circuit breaker is OPEN');
}
}
if (this.state === CircuitState.HALF_OPEN) {
if (this.halfOpenRequests >= this.options.halfOpenRequests!) {
throw new Error('Circuit breaker is HALF_OPEN - max requests reached');
}
this.halfOpenRequests++;
}
try {
const result = await fn();
// Success - reset failures
if (this.state === CircuitState.HALF_OPEN) {
this.state = CircuitState.CLOSED;
this.failures = 0;
}
return result;
} catch (error) {
this.failures++;
this.lastFailureTime = Date.now();
if (this.failures >= this.options.failureThreshold!) {
this.state = CircuitState.OPEN;
console.error(`[CircuitBreaker] Opening circuit after ${this.failures} failures`);
}
throw error;
}
}
getState(): { state: CircuitState; failures: number } {
return {
state: this.state,
failures: this.failures
};
}
reset(): void {
this.state = CircuitState.CLOSED;
this.failures = 0;
this.halfOpenRequests = 0;
}
}
export class ResilientHttpClient {
private circuitBreakers: Map<string, CircuitBreaker> = new Map();
constructor(
private defaultRetryOptions: RetryOptions = {},
private defaultCircuitBreakerOptions: CircuitBreakerOptions = {}
) {
this.defaultRetryOptions = {
maxRetries: 3,
initialDelay: 1000,
maxDelay: 16000,
backoffMultiplier: 2,
timeout: 30000,
retryableStatuses: [408, 429, 500, 502, 503, 504],
retryableErrors: ['ECONNREFUSED', 'ETIMEDOUT', 'ENOTFOUND', 'ECONNRESET'],
...defaultRetryOptions
};
}
private getCircuitBreaker(serviceName: string): CircuitBreaker {
if (!this.circuitBreakers.has(serviceName)) {
this.circuitBreakers.set(
serviceName,
new CircuitBreaker(this.defaultCircuitBreakerOptions)
);
}
return this.circuitBreakers.get(serviceName)!;
}
async fetchWithRetry(
url: string,
options: RequestInit & { serviceName?: string } = {},
retryOptions: RetryOptions = {}
): Promise<Response> {
const mergedRetryOptions = { ...this.defaultRetryOptions, ...retryOptions };
const serviceName = options.serviceName || new URL(url).hostname;
const circuitBreaker = this.getCircuitBreaker(serviceName);
return circuitBreaker.execute(async () => {
return this.executeWithRetry(url, options, mergedRetryOptions);
});
}
private async executeWithRetry(
url: string,
options: RequestInit,
retryOptions: RetryOptions
): Promise<Response> {
let lastError: Error | null = null;
let delay = retryOptions.initialDelay!;
for (let attempt = 0; attempt <= retryOptions.maxRetries!; attempt++) {
try {
// Add timeout to request
const controller = new AbortController();
const timeoutId = setTimeout(
() => controller.abort(),
retryOptions.timeout!
);
const response = await fetch(url, {
...options,
signal: controller.signal
});
clearTimeout(timeoutId);
// Check if response is retryable
if (
!response.ok &&
retryOptions.retryableStatuses!.includes(response.status) &&
attempt < retryOptions.maxRetries!
) {
console.warn(
`[ResilientHttp] Retryable status ${response.status} for ${url}, attempt ${attempt + 1}/${retryOptions.maxRetries}`
);
await this.delay(delay);
delay = Math.min(delay * retryOptions.backoffMultiplier!, retryOptions.maxDelay!);
continue;
}
return response;
} catch (error: any) {
lastError = error;
// Check if error is retryable
const isRetryable = retryOptions.retryableErrors!.some(
errType => error.message?.includes(errType) || error.code === errType
);
if (isRetryable && attempt < retryOptions.maxRetries!) {
console.warn(
`[ResilientHttp] Retryable error for ${url}: ${error.message}, attempt ${attempt + 1}/${retryOptions.maxRetries}`
);
await this.delay(delay);
delay = Math.min(delay * retryOptions.backoffMultiplier!, retryOptions.maxDelay!);
continue;
}
throw error;
}
}
throw lastError || new Error(`Failed after ${retryOptions.maxRetries} retries`);
}
private delay(ms: number): Promise<void> {
return new Promise(resolve => setTimeout(resolve, ms));
}
getCircuitBreakerStatus(): { [serviceName: string]: { state: string; failures: number } } {
const status: { [serviceName: string]: { state: string; failures: number } } = {};
this.circuitBreakers.forEach((breaker, serviceName) => {
status[serviceName] = breaker.getState();
});
return status;
}
}
// Create a singleton instance
export const resilientHttp = new ResilientHttpClient(
{
maxRetries: 3,
initialDelay: 1000,
maxDelay: 8000,
backoffMultiplier: 2,
timeout: 30000
},
{
failureThreshold: 10,
resetTimeout: 30000,
halfOpenRequests: 3
}
);

View File

@ -1,162 +0,0 @@
import { getNocoDbConfiguration } from './nocodb';
import { getMinioClient } from './minio';
interface ServiceCheckResult {
status: 'up' | 'down' | 'slow';
responseTime?: number;
error?: string;
}
export async function checkNocoDB(): Promise<ServiceCheckResult> {
const startTime = Date.now();
try {
const config = getNocoDbConfiguration();
if (!config.url || !config.token) {
return {
status: 'down',
error: 'Missing NocoDB configuration'
};
}
const response = await fetch(`${config.url}/api/v1/health`, {
headers: {
'xc-token': config.token
},
signal: AbortSignal.timeout(5000)
});
const responseTime = Date.now() - startTime;
if (response.ok) {
return {
status: responseTime > 3000 ? 'slow' : 'up',
responseTime
};
}
return {
status: 'down',
responseTime,
error: `HTTP ${response.status}`
};
} catch (error) {
return {
status: 'down',
error: error instanceof Error ? error.message : 'Unknown error'
};
}
}
export async function checkDirectus(): Promise<ServiceCheckResult> {
const startTime = Date.now();
try {
const directusUrl = useRuntimeConfig().public.directus?.url;
if (!directusUrl) {
return {
status: 'down',
error: 'Missing Directus configuration'
};
}
const response = await fetch(`${directusUrl}/server/health`, {
signal: AbortSignal.timeout(5000)
});
const responseTime = Date.now() - startTime;
if (response.ok) {
return {
status: responseTime > 3000 ? 'slow' : 'up',
responseTime
};
}
return {
status: 'down',
responseTime,
error: `HTTP ${response.status}`
};
} catch (error) {
return {
status: 'down',
error: error instanceof Error ? error.message : 'Unknown error'
};
}
}
export async function checkMinIO(): Promise<ServiceCheckResult> {
const startTime = Date.now();
try {
const minioClient = getMinioClient();
const bucketName = useRuntimeConfig().minio.bucketName;
const bucketExists = await minioClient.bucketExists(bucketName);
const responseTime = Date.now() - startTime;
if (bucketExists) {
return {
status: responseTime > 3000 ? 'slow' : 'up',
responseTime
};
}
return {
status: 'down',
responseTime,
error: 'Bucket not found'
};
} catch (error) {
return {
status: 'down',
error: error instanceof Error ? error.message : 'Unknown error'
};
}
}
export async function checkDocumenso(): Promise<ServiceCheckResult> {
const startTime = Date.now();
try {
const documensoUrl = process.env.NUXT_DOCUMENSO_BASE_URL;
const documensoKey = process.env.NUXT_DOCUMENSO_API_KEY;
if (!documensoUrl || !documensoKey) {
return {
status: 'down',
error: 'Missing Documenso configuration'
};
}
const response = await fetch(`${documensoUrl}/api/health`, {
headers: {
'Authorization': `Bearer ${documensoKey}`
},
signal: AbortSignal.timeout(5000)
});
const responseTime = Date.now() - startTime;
if (response.ok || response.status === 401) {
return {
status: responseTime > 3000 ? 'slow' : 'up',
responseTime
};
}
return {
status: 'down',
responseTime,
error: `HTTP ${response.status}`
};
} catch (error) {
return {
status: 'down',
error: error instanceof Error ? error.message : 'Unknown error'
};
}
}