Include full contents of all nested repositories
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Submodule letsbe-sysadmin-agent deleted from 65d529112d
55
letsbe-sysadmin-agent/.gitea/workflows/build.yml
Normal file
55
letsbe-sysadmin-agent/.gitea/workflows/build.yml
Normal file
@@ -0,0 +1,55 @@
|
||||
name: Build and Push Docker Image
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- master
|
||||
tags:
|
||||
- 'v*'
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- master
|
||||
|
||||
env:
|
||||
REGISTRY: code.letsbe.solutions
|
||||
IMAGE_NAME: letsbe/sysadmin-agent
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to Gitea Container Registry
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ gitea.actor }}
|
||||
password: ${{ secrets.REGISTRY_TOKEN }}
|
||||
|
||||
- name: Extract metadata (tags, labels)
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
tags: |
|
||||
type=ref,event=branch
|
||||
type=ref,event=pr
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=raw,value=latest,enable={{is_default_branch}}
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
73
letsbe-sysadmin-agent/.gitignore
vendored
Normal file
73
letsbe-sysadmin-agent/.gitignore
vendored
Normal file
@@ -0,0 +1,73 @@
|
||||
# Python
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
*.so
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
|
||||
# Virtual environments
|
||||
venv/
|
||||
ENV/
|
||||
env/
|
||||
.venv/
|
||||
|
||||
# Environment variables
|
||||
.env
|
||||
.env.local
|
||||
.env.*.local
|
||||
|
||||
# IDE
|
||||
.idea/
|
||||
.vscode/
|
||||
*.swp
|
||||
*.swo
|
||||
*~
|
||||
.project
|
||||
.pydevproject
|
||||
.settings/
|
||||
|
||||
# Docker
|
||||
.docker/
|
||||
|
||||
# Testing
|
||||
.pytest_cache/
|
||||
.coverage
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
|
||||
# Mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Logs
|
||||
*.log
|
||||
logs/
|
||||
|
||||
# OS
|
||||
.DS_Store
|
||||
Thumbs.db
|
||||
|
||||
# Agent local data
|
||||
.letsbe-agent/
|
||||
pending_results.json
|
||||
|
||||
# Claude Code & MCP
|
||||
.claude/
|
||||
.serena/
|
||||
121
letsbe-sysadmin-agent/CLAUDE.md
Normal file
121
letsbe-sysadmin-agent/CLAUDE.md
Normal file
@@ -0,0 +1,121 @@
|
||||
# CLAUDE.md — LetsBe SysAdmin Agent
|
||||
|
||||
## Purpose
|
||||
|
||||
You are the engineering assistant for the LetsBe SysAdmin Agent.
|
||||
This is an autonomous automation worker installed on each tenant server.
|
||||
|
||||
It performs tasks received from the LetsBe Orchestrator, including:
|
||||
|
||||
- Heartbeats
|
||||
- Task polling
|
||||
- Shell command execution
|
||||
- Editing environment files
|
||||
- Managing Docker Compose
|
||||
- Running Playwright flows (stubbed for MVP)
|
||||
- Sending back task results + events
|
||||
|
||||
The agent communicates exclusively with the Orchestrator's REST API.
|
||||
|
||||
---
|
||||
|
||||
## Tech Stack
|
||||
|
||||
- Python 3.11
|
||||
- Async I/O (asyncio + httpx)
|
||||
- Playwright (installed via separate container or OS-level)
|
||||
- Shell command execution via subprocess (safe wrappers)
|
||||
- Docker Compose interaction (subprocess)
|
||||
- File edits via python
|
||||
|
||||
This repo is separate from the orchestrator.
|
||||
|
||||
---
|
||||
|
||||
## Target File Structure
|
||||
|
||||
letsbe-sysadmin-agent/
|
||||
app/
|
||||
__init__.py
|
||||
main.py
|
||||
config.py # Settings: ORCHESTRATOR_URL, AGENT_TOKEN, etc.
|
||||
agent.py # Agent lifecycle: register, heartbeat
|
||||
task_manager.py # Task polling + dispatch logic
|
||||
executors/
|
||||
__init__.py
|
||||
shell_executor.py # Run allowed OS commands
|
||||
file_executor.py # Modify files/env vars
|
||||
docker_executor.py # Interact with docker compose
|
||||
playwright_executor.py # Stub for now
|
||||
clients/
|
||||
orchestrator_client.py # All API calls
|
||||
utils/
|
||||
logger.py
|
||||
validation.py
|
||||
tasks/
|
||||
base.py
|
||||
echo.py # MVP sample task: ECHO payload
|
||||
|
||||
docker-compose.yml (optional for dev)
|
||||
requirements.txt
|
||||
|
||||
---
|
||||
|
||||
## MVP Task Types
|
||||
|
||||
1. ECHO task
|
||||
- Payload: {"message": "..."}
|
||||
- Agent just returns payload as result.
|
||||
|
||||
2. SHELL task
|
||||
- Payload: {"cmd": "ls -la"}
|
||||
- Agent runs safe shell command.
|
||||
|
||||
3. FILE_WRITE task
|
||||
- Payload: {"path": "...", "content": "..."}
|
||||
- Agent writes file.
|
||||
|
||||
4. DOCKER_RELOAD task
|
||||
- Payload: {"compose_path": "..."}
|
||||
- Agent runs `docker compose up -d`.
|
||||
|
||||
More complex tasks (Poste, DKIM, Keycloak, etc.) come later.
|
||||
|
||||
---
|
||||
|
||||
## API Flow
|
||||
|
||||
- Register:
|
||||
POST /agents/register
|
||||
- Heartbeat:
|
||||
POST /agents/{id}/heartbeat
|
||||
- Fetch next task:
|
||||
GET /tasks/next?agent_id=...
|
||||
- Submit result:
|
||||
PATCH /tasks/{id}
|
||||
|
||||
All API calls use httpx async.
|
||||
|
||||
---
|
||||
|
||||
## Coding Conventions
|
||||
|
||||
- Everything async
|
||||
- Use small, testable executors
|
||||
- Never run shell commands directly in business logic
|
||||
- All exceptions must be caught and submitted as FAILED tasks
|
||||
- Use structured logging
|
||||
- The agent must never crash — only tasks can crash
|
||||
|
||||
---
|
||||
|
||||
## Your First Instructions for Claude Code
|
||||
|
||||
When asked, generate a complete scaffold for the agent as described above:
|
||||
- app/main.py with startup loop
|
||||
- Basic heartbeat cycle
|
||||
- orchestrator client
|
||||
- simple task manager
|
||||
- simple executors
|
||||
- ECHO and SHELL tasks implemented
|
||||
- requirements.txt + Dockerfile
|
||||
74
letsbe-sysadmin-agent/Dockerfile
Normal file
74
letsbe-sysadmin-agent/Dockerfile
Normal file
@@ -0,0 +1,74 @@
|
||||
FROM python:3.11-slim
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /app
|
||||
|
||||
# Install system dependencies
|
||||
# - Docker CLI for docker executor
|
||||
# - curl for health checks
|
||||
# - Playwright browser dependencies
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
docker-cli \
|
||||
curl \
|
||||
# Playwright Chromium dependencies
|
||||
libnss3 \
|
||||
libnspr4 \
|
||||
libatk1.0-0 \
|
||||
libatk-bridge2.0-0 \
|
||||
libcups2 \
|
||||
libdrm2 \
|
||||
libdbus-1-3 \
|
||||
libxkbcommon0 \
|
||||
libatspi2.0-0 \
|
||||
libxcomposite1 \
|
||||
libxdamage1 \
|
||||
libxfixes3 \
|
||||
libxrandr2 \
|
||||
libgbm1 \
|
||||
libasound2 \
|
||||
libpango-1.0-0 \
|
||||
libcairo2 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install Docker Compose plugin (not in Debian repos, download from Docker)
|
||||
RUN mkdir -p /usr/local/lib/docker/cli-plugins && \
|
||||
curl -SL "https://github.com/docker/compose/releases/download/v2.32.1/docker-compose-linux-x86_64" \
|
||||
-o /usr/local/lib/docker/cli-plugins/docker-compose && \
|
||||
chmod +x /usr/local/lib/docker/cli-plugins/docker-compose
|
||||
|
||||
# Copy requirements first for layer caching
|
||||
COPY requirements.txt .
|
||||
|
||||
# Install Python dependencies
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
# Install Playwright browsers (Chromium only for smaller image)
|
||||
# Skip install-deps as we manually install required libs above
|
||||
# and the automatic deps installer uses outdated Ubuntu package names
|
||||
RUN playwright install chromium
|
||||
|
||||
# Copy application code
|
||||
COPY app/ ./app/
|
||||
|
||||
# Create non-root user for security
|
||||
RUN useradd -m -s /bin/bash agent && \
|
||||
mkdir -p /home/agent/.letsbe-agent && \
|
||||
mkdir -p /opt/letsbe/playwright-artifacts && \
|
||||
chown -R agent:agent /home/agent/.letsbe-agent && \
|
||||
chown -R agent:agent /opt/letsbe/playwright-artifacts
|
||||
|
||||
# Environment
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
|
||||
# Default to non-root user
|
||||
# Note: May need root for Docker socket access; use docker group instead
|
||||
USER agent
|
||||
|
||||
# Entry point
|
||||
CMD ["python", "-m", "app.main"]
|
||||
|
||||
# Health check
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD python -c "import sys; sys.exit(0)"
|
||||
186
letsbe-sysadmin-agent/ROADMAP.md
Normal file
186
letsbe-sysadmin-agent/ROADMAP.md
Normal file
@@ -0,0 +1,186 @@
|
||||
# SysAdmin Agent Roadmap
|
||||
|
||||
This document tracks Agent-specific work for the AI SysAdmin system.
|
||||
|
||||
## Completed Work
|
||||
|
||||
### Core Infrastructure
|
||||
- [x] Secure startup
|
||||
- [x] Automatic registration with orchestrator
|
||||
- [x] Polling loop (configurable interval)
|
||||
- [x] Heartbeat loop
|
||||
- [x] Executor registry system
|
||||
- [x] BaseExecutor + ExecutionResult model
|
||||
- [x] Logging with structlog
|
||||
- [x] Sandboxing and path validation
|
||||
- [x] Task timing, error propagation
|
||||
- [x] Circuit breaker for resilience
|
||||
- [x] Full test suite (140+ tests)
|
||||
|
||||
### Executors
|
||||
|
||||
| Executor | Purpose | Tests | Status |
|
||||
|----------|---------|-------|--------|
|
||||
| ECHO | Test connectivity | ✅ | Done |
|
||||
| SHELL | Run allowed shell commands | ✅ | Done |
|
||||
| ENV_UPDATE | Atomic env file edits | ✅ | Done |
|
||||
| ENV_INSPECT | Read and parse env files | ✅ | Done |
|
||||
| FILE_WRITE | Write files safely | ✅ | Done |
|
||||
| FILE_INSPECT | Read files with size limits | 24 | Done |
|
||||
| DOCKER_RELOAD | Pull + up -d compose stacks | 26 | Done |
|
||||
| COMPOSITE | Chain multiple executors | ✅ | Done |
|
||||
| NEXTCLOUD | Nextcloud-specific tasks | ✅ | Done |
|
||||
| PLAYWRIGHT | Browser automation | ✅ | Done |
|
||||
|
||||
### Security
|
||||
- [x] Path sandboxing to `/opt/letsbe/`
|
||||
- [x] Allowed file root validation
|
||||
- [x] Max file size limits
|
||||
- [x] Shell command timeout
|
||||
- [x] Non-root execution (configurable)
|
||||
|
||||
---
|
||||
|
||||
## Remaining Work
|
||||
|
||||
### Phase 1: Support for New Playbooks
|
||||
|
||||
No new executors needed - existing executors support all Phase 1 tool playbooks via COMPOSITE tasks.
|
||||
|
||||
---
|
||||
|
||||
### Phase 2: Introspection Executors
|
||||
|
||||
| Executor | Purpose | Status |
|
||||
|----------|---------|--------|
|
||||
| SERVICE_DISCOVER | List all running services/containers | ⬚ Todo |
|
||||
| CONFIG_SCAN | Find misconfigurations across services | ⬚ Todo |
|
||||
| NGINX_INSPECT | Parse nginx configs for domain info | ⬚ Todo |
|
||||
|
||||
---
|
||||
|
||||
### Phase 3: Server-Level Executors
|
||||
|
||||
| Executor | Purpose | Status |
|
||||
|----------|---------|--------|
|
||||
| NGINX_RELOAD | Validate and reload nginx | ⬚ Todo |
|
||||
| HEALTHCHECK | Check docker status, ports, logs | ⬚ Todo |
|
||||
| STACK_HEALTH | Verify docker compose stack integrity | ⬚ Todo |
|
||||
| PACKAGE_UPGRADE | System package updates | ⬚ Todo |
|
||||
|
||||
**NGINX_RELOAD requirements:**
|
||||
- Validate config with `nginx -t`
|
||||
- Reload with `nginx -s reload`
|
||||
- Rollback on failure
|
||||
- Path sandboxing for config files
|
||||
|
||||
**HEALTHCHECK requirements:**
|
||||
- Check container status via Docker API
|
||||
- Verify expected ports are listening
|
||||
- Scan logs for error patterns
|
||||
- Return structured health report
|
||||
|
||||
---
|
||||
|
||||
### Phase 4: Advanced Executors
|
||||
|
||||
| Executor | Purpose | Status |
|
||||
|----------|---------|--------|
|
||||
| BACKUP | Create and upload backups | ⬚ Todo |
|
||||
| RESTORE | Restore from backup | ⬚ Todo |
|
||||
| LOG_TAIL | Stream logs from containers | ⬚ Todo |
|
||||
| CERT_CHECK | Verify SSL certificate status | ⬚ Todo |
|
||||
|
||||
---
|
||||
|
||||
### Phase 5: Playwright Browser Automation ✅
|
||||
|
||||
**Completed:**
|
||||
|
||||
- [x] Playwright installation in container
|
||||
- [x] Scenario-based executor architecture
|
||||
- [x] Domain allowlist security (mandatory)
|
||||
- [x] Screenshot capture for success/failure
|
||||
- [x] Artifact storage with per-task isolation
|
||||
- [x] Route interception for domain blocking
|
||||
- [x] Unit tests for validation logic
|
||||
|
||||
**Available Scenarios:**
|
||||
|
||||
| Scenario | Purpose | Status |
|
||||
|----------|---------|--------|
|
||||
| `echo` | Test connectivity and page load | ✅ Done |
|
||||
| `nextcloud_initial_setup` | Automate Nextcloud admin setup wizard | ✅ Done |
|
||||
|
||||
**Usage Example:**
|
||||
```json
|
||||
{
|
||||
"type": "PLAYWRIGHT",
|
||||
"payload": {
|
||||
"scenario": "nextcloud_initial_setup",
|
||||
"inputs": {
|
||||
"base_url": "https://cloud.example.com",
|
||||
"admin_username": "admin",
|
||||
"admin_password": "secret123"
|
||||
},
|
||||
"options": {
|
||||
"allowed_domains": ["cloud.example.com"],
|
||||
"screenshot_on_success": true
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Remaining Work:**
|
||||
- [ ] MCP sidecar service for exploratory browser control
|
||||
- [ ] Additional tool setup scenarios (Keycloak, Poste, etc.)
|
||||
|
||||
---
|
||||
|
||||
## Executor Implementation Pattern
|
||||
|
||||
All executors follow the same pattern:
|
||||
|
||||
```python
|
||||
from app.executors.base import BaseExecutor, ExecutionResult
|
||||
|
||||
class NewExecutor(BaseExecutor):
|
||||
"""Description of what this executor does."""
|
||||
|
||||
async def execute(self, payload: dict) -> ExecutionResult:
|
||||
# 1. Validate payload
|
||||
# 2. Validate paths (if file operations)
|
||||
# 3. Perform operation
|
||||
# 4. Return ExecutionResult(success=True/False, data={...}, error=...)
|
||||
```
|
||||
|
||||
Register in `app/executors/__init__.py`:
|
||||
```python
|
||||
from .new_executor import NewExecutor
|
||||
EXECUTOR_REGISTRY["NEW_TYPE"] = NewExecutor
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Testing
|
||||
|
||||
All executors must have comprehensive tests:
|
||||
|
||||
```bash
|
||||
# Run all tests
|
||||
pytest
|
||||
|
||||
# Run specific executor tests
|
||||
pytest tests/test_executors/test_new_executor.py -v
|
||||
|
||||
# Run with coverage
|
||||
pytest --cov=app/executors
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Next Steps
|
||||
|
||||
1. Existing executors support Phase 1 - no changes needed
|
||||
2. When Phase 2 starts, implement SERVICE_DISCOVER executor
|
||||
3. When Phase 3 starts, implement NGINX_RELOAD and HEALTHCHECK
|
||||
3
letsbe-sysadmin-agent/app/__init__.py
Normal file
3
letsbe-sysadmin-agent/app/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
"""LetsBe SysAdmin Agent - Autonomous automation worker for tenant servers."""
|
||||
|
||||
__version__ = "0.1.0"
|
||||
382
letsbe-sysadmin-agent/app/agent.py
Normal file
382
letsbe-sysadmin-agent/app/agent.py
Normal file
@@ -0,0 +1,382 @@
|
||||
"""Agent lifecycle management: registration and heartbeat."""
|
||||
|
||||
import asyncio
|
||||
import platform
|
||||
import random
|
||||
from typing import Optional
|
||||
|
||||
from app.clients.hub_client import get_hub_client
|
||||
from app.clients.orchestrator_client import (
|
||||
CircuitBreakerOpen,
|
||||
EventLevel,
|
||||
HeartbeatResult,
|
||||
HeartbeatStatus,
|
||||
OrchestratorClient,
|
||||
)
|
||||
from app.config import Settings, get_settings
|
||||
from app.utils.logger import get_logger
|
||||
|
||||
logger = get_logger("agent")
|
||||
|
||||
|
||||
class Agent:
|
||||
"""Agent lifecycle manager.
|
||||
|
||||
Handles:
|
||||
- Registration with orchestrator
|
||||
- Periodic heartbeat
|
||||
- Graceful shutdown
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
client: Optional[OrchestratorClient] = None,
|
||||
settings: Optional[Settings] = None,
|
||||
):
|
||||
self.settings = settings or get_settings()
|
||||
self.client = client or OrchestratorClient(self.settings)
|
||||
self.hub_client = get_hub_client()
|
||||
self._shutdown_event = asyncio.Event()
|
||||
self._registered = False
|
||||
|
||||
@property
|
||||
def is_registered(self) -> bool:
|
||||
"""Check if agent is registered with orchestrator."""
|
||||
return self._registered and self.client.agent_id is not None
|
||||
|
||||
def _get_metadata(self) -> dict:
|
||||
"""Gather agent metadata for registration."""
|
||||
return {
|
||||
"platform": platform.system(),
|
||||
"platform_version": platform.version(),
|
||||
"python_version": platform.python_version(),
|
||||
"hostname": self.settings.hostname,
|
||||
"version": self.settings.agent_version,
|
||||
}
|
||||
|
||||
async def register(self, max_retries: int = 5) -> bool:
|
||||
"""Register agent with the orchestrator.
|
||||
|
||||
Registration priority order:
|
||||
1. Load persisted credentials (fast path) - ALWAYS TRY FIRST
|
||||
2. LOCAL_MODE + LOCAL_AGENT_KEY → /register-local endpoint
|
||||
3. REGISTRATION_TOKEN → standard secure registration
|
||||
4. TENANT_ID → legacy registration (deprecated)
|
||||
|
||||
Args:
|
||||
max_retries: Maximum registration attempts
|
||||
|
||||
Returns:
|
||||
True if registration succeeded or credentials were loaded
|
||||
"""
|
||||
if self._registered:
|
||||
logger.info("agent_already_registered", agent_id=self.client.agent_id)
|
||||
return True
|
||||
|
||||
# ============================================================
|
||||
# Priority 1: Try to load persisted credentials first
|
||||
# ============================================================
|
||||
if self.client.load_credentials():
|
||||
self._registered = True
|
||||
logger.info(
|
||||
"credentials_restored",
|
||||
agent_id=self.client.agent_id,
|
||||
tenant_id=self.client.tenant_id,
|
||||
)
|
||||
|
||||
# Verify credentials still work by sending heartbeat
|
||||
result = await self.client.heartbeat()
|
||||
|
||||
if result.status == HeartbeatStatus.SUCCESS:
|
||||
logger.info("credentials_verified")
|
||||
# Retry any pending results from previous session
|
||||
await self.client.retry_pending_results()
|
||||
return True
|
||||
|
||||
elif result.status == HeartbeatStatus.AUTH_FAILED:
|
||||
# Only clear credentials on explicit auth failure (401/403)
|
||||
logger.warning("credentials_invalid_clearing", reason=result.message)
|
||||
self.client.clear_credentials()
|
||||
self._registered = False
|
||||
# Fall through to registration
|
||||
|
||||
elif result.status == HeartbeatStatus.NOT_REGISTERED:
|
||||
# Should not happen if load_credentials succeeded, but handle it
|
||||
logger.warning("credentials_not_registered_state")
|
||||
self._registered = False
|
||||
# Fall through to registration
|
||||
|
||||
elif result.status in (HeartbeatStatus.SERVER_ERROR, HeartbeatStatus.NETWORK_ERROR):
|
||||
# Transient error - keep credentials, retry later
|
||||
# Do NOT retry_pending_results here - orchestrator is unhealthy
|
||||
# Main heartbeat loop will handle retries with backoff
|
||||
logger.warning(
|
||||
"credentials_verification_transient_error",
|
||||
status=result.status.value,
|
||||
message=result.message,
|
||||
)
|
||||
return True
|
||||
|
||||
# ============================================================
|
||||
# Priority 2: LOCAL_MODE registration via /register-local
|
||||
# ============================================================
|
||||
if self.settings.local_mode and self.settings.local_agent_key:
|
||||
return await self._register_local(max_retries)
|
||||
|
||||
# ============================================================
|
||||
# Priority 3 & 4: Standard or legacy registration
|
||||
# ============================================================
|
||||
# Check if we have registration token or can do legacy registration
|
||||
if not self.settings.registration_token and not self.settings.tenant_id:
|
||||
# For backward compatibility, allow registration without token
|
||||
# (orchestrator will create shared agent)
|
||||
logger.warning(
|
||||
"registration_no_token",
|
||||
message="No REGISTRATION_TOKEN provided. Using legacy registration.",
|
||||
)
|
||||
|
||||
metadata = self._get_metadata()
|
||||
|
||||
for attempt in range(max_retries):
|
||||
try:
|
||||
# register() returns (agent_id, secret_or_token, tenant_id)
|
||||
agent_id, secret, tenant_id = await self.client.register(metadata)
|
||||
self._registered = True
|
||||
|
||||
logger.info(
|
||||
"agent_registered",
|
||||
agent_id=agent_id,
|
||||
tenant_id=tenant_id,
|
||||
hostname=self.settings.hostname,
|
||||
version=self.settings.agent_version,
|
||||
auth_type="secure" if self.client.agent_secret else "legacy",
|
||||
)
|
||||
|
||||
# Send registration event
|
||||
await self.client.send_event(
|
||||
EventLevel.INFO,
|
||||
f"Agent registered: {self.settings.hostname}",
|
||||
metadata=metadata,
|
||||
)
|
||||
|
||||
# Retry any pending results from previous session
|
||||
await self.client.retry_pending_results()
|
||||
|
||||
return True
|
||||
|
||||
except CircuitBreakerOpen:
|
||||
logger.warning(
|
||||
"registration_circuit_breaker_open",
|
||||
attempt=attempt + 1,
|
||||
)
|
||||
# Wait for cooldown
|
||||
await asyncio.sleep(self.settings.circuit_breaker_cooldown)
|
||||
|
||||
except Exception as e:
|
||||
delay = self.settings.backoff_base * (2 ** attempt)
|
||||
delay = min(delay, self.settings.backoff_max)
|
||||
# Add jitter
|
||||
delay += random.uniform(0, delay * 0.25)
|
||||
|
||||
logger.error(
|
||||
"registration_failed",
|
||||
attempt=attempt + 1,
|
||||
max_retries=max_retries,
|
||||
error=str(e),
|
||||
retry_in=delay,
|
||||
)
|
||||
|
||||
if attempt < max_retries - 1:
|
||||
await asyncio.sleep(delay)
|
||||
|
||||
logger.error("registration_exhausted", max_retries=max_retries)
|
||||
return False
|
||||
|
||||
async def _register_local(self, max_retries: int = 5) -> bool:
|
||||
"""Register agent using LOCAL_MODE endpoint.
|
||||
|
||||
Uses LOCAL_AGENT_KEY to register with /api/v1/agents/register-local.
|
||||
If agent already exists but we have no credentials, automatically
|
||||
attempts credential rotation.
|
||||
|
||||
Args:
|
||||
max_retries: Maximum registration attempts
|
||||
|
||||
Returns:
|
||||
True if registration succeeded
|
||||
"""
|
||||
logger.info(
|
||||
"local_mode_registration_starting",
|
||||
orchestrator_url=self.settings.orchestrator_url,
|
||||
)
|
||||
|
||||
metadata = self._get_metadata()
|
||||
|
||||
for attempt in range(max_retries):
|
||||
try:
|
||||
# register_local() returns (agent_id, secret or None, tenant_id, already_registered)
|
||||
agent_id, secret, tenant_id, already_registered = await self.client.register_local(
|
||||
local_agent_key=self.settings.local_agent_key,
|
||||
rotate=False,
|
||||
)
|
||||
|
||||
# Handle case where agent exists but we have no credentials
|
||||
if already_registered and not secret:
|
||||
logger.warning(
|
||||
"local_agent_exists_no_credentials",
|
||||
agent_id=agent_id,
|
||||
message="Agent exists but no persisted credentials. Attempting rotation.",
|
||||
)
|
||||
# Retry with rotation to get new credentials
|
||||
agent_id, secret, tenant_id, _ = await self.client.register_local(
|
||||
local_agent_key=self.settings.local_agent_key,
|
||||
rotate=True,
|
||||
)
|
||||
logger.info(
|
||||
"local_agent_credentials_rotated",
|
||||
agent_id=agent_id,
|
||||
)
|
||||
|
||||
self._registered = True
|
||||
|
||||
logger.info(
|
||||
"local_mode_agent_registered",
|
||||
agent_id=agent_id,
|
||||
tenant_id=tenant_id,
|
||||
hostname=self.settings.hostname,
|
||||
version=self.settings.agent_version,
|
||||
already_registered=already_registered,
|
||||
)
|
||||
|
||||
# Send registration event
|
||||
await self.client.send_event(
|
||||
EventLevel.INFO,
|
||||
f"Agent registered (LOCAL_MODE): {self.settings.hostname}",
|
||||
metadata=metadata,
|
||||
)
|
||||
|
||||
# Retry any pending results from previous session
|
||||
await self.client.retry_pending_results()
|
||||
|
||||
return True
|
||||
|
||||
except CircuitBreakerOpen:
|
||||
logger.warning(
|
||||
"local_registration_circuit_breaker_open",
|
||||
attempt=attempt + 1,
|
||||
)
|
||||
await asyncio.sleep(self.settings.circuit_breaker_cooldown)
|
||||
|
||||
except Exception as e:
|
||||
delay = self.settings.backoff_base * (2 ** attempt)
|
||||
delay = min(delay, self.settings.backoff_max)
|
||||
delay += random.uniform(0, delay * 0.25)
|
||||
|
||||
logger.error(
|
||||
"local_registration_failed",
|
||||
attempt=attempt + 1,
|
||||
max_retries=max_retries,
|
||||
error=str(e),
|
||||
retry_in=delay,
|
||||
)
|
||||
|
||||
if attempt < max_retries - 1:
|
||||
await asyncio.sleep(delay)
|
||||
|
||||
logger.error("local_registration_exhausted", max_retries=max_retries)
|
||||
return False
|
||||
|
||||
async def heartbeat_loop(self) -> None:
|
||||
"""Run the heartbeat loop until shutdown.
|
||||
|
||||
Sends periodic heartbeats to the orchestrator.
|
||||
Uses exponential backoff on failures.
|
||||
"""
|
||||
if not self.is_registered:
|
||||
logger.warning("heartbeat_loop_not_registered")
|
||||
return
|
||||
|
||||
logger.info(
|
||||
"heartbeat_loop_started",
|
||||
interval=self.settings.heartbeat_interval,
|
||||
)
|
||||
|
||||
consecutive_failures = 0
|
||||
backoff_multiplier = 1.0
|
||||
|
||||
while not self._shutdown_event.is_set():
|
||||
result = await self.client.heartbeat()
|
||||
|
||||
if result.status == HeartbeatStatus.SUCCESS:
|
||||
consecutive_failures = 0
|
||||
backoff_multiplier = 1.0
|
||||
logger.debug("heartbeat_sent", agent_id=self.client.agent_id)
|
||||
|
||||
# Also send heartbeat to Hub if configured (with credentials)
|
||||
if self.hub_client.is_configured:
|
||||
await self.hub_client.send_heartbeat(include_credentials=True)
|
||||
|
||||
elif result.status == HeartbeatStatus.AUTH_FAILED:
|
||||
# Credentials truly invalid (e.g., agent deleted in orchestrator)
|
||||
logger.warning(
|
||||
"heartbeat_auth_failed_clearing_credentials",
|
||||
message=result.message,
|
||||
)
|
||||
self.client.clear_credentials()
|
||||
self._registered = False # Outer loop will re-run register()
|
||||
consecutive_failures = 0
|
||||
backoff_multiplier = 1.0
|
||||
# Break out of heartbeat loop to trigger re-registration
|
||||
break
|
||||
|
||||
else:
|
||||
# NETWORK_ERROR / SERVER_ERROR / NOT_REGISTERED
|
||||
# Transient issues - keep credentials, just backoff
|
||||
consecutive_failures += 1
|
||||
backoff_multiplier = min(backoff_multiplier * 1.5, 4.0)
|
||||
logger.warning(
|
||||
"heartbeat_failed_transient",
|
||||
status=result.status.value,
|
||||
message=result.message,
|
||||
consecutive_failures=consecutive_failures,
|
||||
)
|
||||
|
||||
# Calculate next interval with backoff
|
||||
interval = self.settings.heartbeat_interval * backoff_multiplier
|
||||
# Add jitter (0-10% of interval)
|
||||
interval += random.uniform(0, interval * 0.1)
|
||||
|
||||
# Wait for next heartbeat or shutdown
|
||||
try:
|
||||
await asyncio.wait_for(
|
||||
self._shutdown_event.wait(),
|
||||
timeout=interval,
|
||||
)
|
||||
break # Shutdown requested
|
||||
except asyncio.TimeoutError:
|
||||
pass # Normal timeout, continue loop
|
||||
|
||||
logger.info("heartbeat_loop_stopped")
|
||||
|
||||
async def shutdown(self) -> None:
|
||||
"""Initiate graceful shutdown."""
|
||||
logger.info("agent_shutdown_initiated")
|
||||
|
||||
# Signal shutdown
|
||||
self._shutdown_event.set()
|
||||
|
||||
# Send shutdown event if we can
|
||||
if self.is_registered:
|
||||
try:
|
||||
await self.client.send_event(
|
||||
EventLevel.INFO,
|
||||
f"Agent shutting down: {self.settings.hostname}",
|
||||
)
|
||||
except Exception:
|
||||
pass # Best effort
|
||||
|
||||
# Close clients
|
||||
await self.client.close()
|
||||
await self.hub_client.close()
|
||||
|
||||
logger.info("agent_shutdown_complete")
|
||||
11
letsbe-sysadmin-agent/app/clients/__init__.py
Normal file
11
letsbe-sysadmin-agent/app/clients/__init__.py
Normal file
@@ -0,0 +1,11 @@
|
||||
"""API clients for external services."""
|
||||
|
||||
from .hub_client import HubClient, get_hub_client, send_hub_heartbeat
|
||||
from .orchestrator_client import OrchestratorClient
|
||||
|
||||
__all__ = [
|
||||
"HubClient",
|
||||
"OrchestratorClient",
|
||||
"get_hub_client",
|
||||
"send_hub_heartbeat",
|
||||
]
|
||||
160
letsbe-sysadmin-agent/app/clients/hub_client.py
Normal file
160
letsbe-sysadmin-agent/app/clients/hub_client.py
Normal file
@@ -0,0 +1,160 @@
|
||||
"""Async HTTP client for communicating with the LetsBe Hub."""
|
||||
|
||||
import asyncio
|
||||
from typing import Any, Optional
|
||||
|
||||
import httpx
|
||||
|
||||
from app.config import Settings, get_settings
|
||||
from app.utils.credential_reader import get_all_tool_credentials, get_credential_hash
|
||||
from app.utils.logger import get_logger
|
||||
|
||||
logger = get_logger("hub_client")
|
||||
|
||||
|
||||
class HubClient:
|
||||
"""Async client for Hub REST API.
|
||||
|
||||
Used for sending heartbeats with tool credentials directly to the Hub.
|
||||
This bypasses the orchestrator for credential synchronization.
|
||||
"""
|
||||
|
||||
def __init__(self, settings: Optional[Settings] = None):
|
||||
self.settings = settings or get_settings()
|
||||
self._client: Optional[httpx.AsyncClient] = None
|
||||
self._last_credentials_hash: str = ""
|
||||
|
||||
@property
|
||||
def is_configured(self) -> bool:
|
||||
"""Check if Hub connection is configured."""
|
||||
return bool(
|
||||
self.settings.hub_url
|
||||
and self.settings.hub_api_key
|
||||
and self.settings.hub_telemetry_enabled
|
||||
)
|
||||
|
||||
def _get_headers(self) -> dict[str, str]:
|
||||
"""Get headers for Hub API requests."""
|
||||
return {
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": f"Bearer {self.settings.hub_api_key}",
|
||||
"X-Agent-Version": self.settings.agent_version,
|
||||
"X-Agent-Hostname": self.settings.hostname,
|
||||
}
|
||||
|
||||
async def _get_client(self) -> httpx.AsyncClient:
|
||||
"""Get or create the HTTP client."""
|
||||
if self._client is None or self._client.is_closed:
|
||||
self._client = httpx.AsyncClient(
|
||||
base_url=self.settings.hub_url,
|
||||
headers=self._get_headers(),
|
||||
timeout=httpx.Timeout(30.0, connect=10.0),
|
||||
)
|
||||
return self._client
|
||||
|
||||
async def send_heartbeat(
|
||||
self,
|
||||
include_credentials: bool = True,
|
||||
status: Optional[dict[str, Any]] = None,
|
||||
) -> bool:
|
||||
"""Send heartbeat to Hub with optional credentials.
|
||||
|
||||
Args:
|
||||
include_credentials: Include tool credentials in heartbeat
|
||||
status: Optional system status metrics
|
||||
|
||||
Returns:
|
||||
True if heartbeat was sent successfully
|
||||
"""
|
||||
if not self.is_configured:
|
||||
logger.debug("hub_heartbeat_skipped", reason="not_configured")
|
||||
return False
|
||||
|
||||
try:
|
||||
payload: dict[str, Any] = {
|
||||
"agentVersion": self.settings.agent_version,
|
||||
}
|
||||
|
||||
# Include system status if provided
|
||||
if status:
|
||||
payload["status"] = status
|
||||
|
||||
# Include tool credentials only when they've changed
|
||||
if include_credentials:
|
||||
current_hash = get_credential_hash()
|
||||
if current_hash and current_hash != self._last_credentials_hash:
|
||||
credentials = get_all_tool_credentials()
|
||||
if credentials:
|
||||
payload["credentials"] = credentials
|
||||
payload["credentialsHash"] = current_hash
|
||||
self._last_credentials_hash = current_hash
|
||||
logger.debug(
|
||||
"hub_heartbeat_with_credentials",
|
||||
tools=list(credentials.keys()),
|
||||
)
|
||||
elif current_hash:
|
||||
# Just send the hash so Hub knows credentials haven't changed
|
||||
payload["credentialsHash"] = current_hash
|
||||
|
||||
client = await self._get_client()
|
||||
response = await client.post(
|
||||
"/api/v1/orchestrator/heartbeat",
|
||||
json=payload,
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
logger.info(
|
||||
"hub_heartbeat_sent",
|
||||
server_id=data.get("serverId"),
|
||||
commands_pending=len(data.get("commands", [])),
|
||||
)
|
||||
return True
|
||||
elif response.status_code == 401:
|
||||
logger.warning(
|
||||
"hub_heartbeat_auth_failed",
|
||||
status_code=response.status_code,
|
||||
)
|
||||
return False
|
||||
else:
|
||||
logger.warning(
|
||||
"hub_heartbeat_failed",
|
||||
status_code=response.status_code,
|
||||
response=response.text[:200],
|
||||
)
|
||||
return False
|
||||
|
||||
except (httpx.ConnectError, httpx.TimeoutException) as e:
|
||||
logger.warning("hub_heartbeat_network_error", error=str(e))
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error("hub_heartbeat_error", error=str(e))
|
||||
return False
|
||||
|
||||
async def close(self) -> None:
|
||||
"""Close the HTTP client."""
|
||||
if self._client and not self._client.is_closed:
|
||||
await self._client.aclose()
|
||||
self._client = None
|
||||
|
||||
|
||||
# Singleton instance
|
||||
_hub_client: Optional[HubClient] = None
|
||||
|
||||
|
||||
def get_hub_client() -> HubClient:
|
||||
"""Get the singleton Hub client instance."""
|
||||
global _hub_client
|
||||
if _hub_client is None:
|
||||
_hub_client = HubClient()
|
||||
return _hub_client
|
||||
|
||||
|
||||
async def send_hub_heartbeat() -> bool:
|
||||
"""Convenience function to send heartbeat to Hub.
|
||||
|
||||
Returns:
|
||||
True if heartbeat was sent successfully, False if not configured or failed
|
||||
"""
|
||||
client = get_hub_client()
|
||||
return await client.send_heartbeat()
|
||||
922
letsbe-sysadmin-agent/app/clients/orchestrator_client.py
Normal file
922
letsbe-sysadmin-agent/app/clients/orchestrator_client.py
Normal file
@@ -0,0 +1,922 @@
|
||||
"""Async HTTP client for communicating with the LetsBe Orchestrator."""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import random
|
||||
import time
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
from typing import Any, Optional
|
||||
|
||||
import httpx
|
||||
|
||||
from app.config import Settings, get_settings
|
||||
from app.utils.logger import get_logger
|
||||
|
||||
logger = get_logger("orchestrator_client")
|
||||
|
||||
|
||||
class TaskStatus(str, Enum):
|
||||
"""Task execution status (matches orchestrator values)."""
|
||||
|
||||
PENDING = "pending"
|
||||
RUNNING = "running" # Was IN_PROGRESS
|
||||
COMPLETED = "completed"
|
||||
FAILED = "failed"
|
||||
|
||||
|
||||
class EventLevel(str, Enum):
|
||||
"""Event severity level."""
|
||||
|
||||
DEBUG = "debug"
|
||||
INFO = "info"
|
||||
WARNING = "warning"
|
||||
ERROR = "error"
|
||||
|
||||
|
||||
@dataclass
|
||||
class Task:
|
||||
"""Task received from orchestrator."""
|
||||
|
||||
id: str
|
||||
type: str
|
||||
payload: dict[str, Any]
|
||||
tenant_id: Optional[str] = None
|
||||
created_at: Optional[str] = None
|
||||
|
||||
|
||||
class CircuitBreakerOpen(Exception):
|
||||
"""Raised when circuit breaker is open."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class HeartbeatStatus(str, Enum):
|
||||
"""Status of a heartbeat attempt."""
|
||||
|
||||
SUCCESS = "success"
|
||||
AUTH_FAILED = "auth_failed" # 401/403 - credentials invalid
|
||||
SERVER_ERROR = "server_error" # 5xx - transient, retry
|
||||
NETWORK_ERROR = "network_error" # Connection failed, timeout
|
||||
NOT_REGISTERED = "not_registered" # No agent_id/secret set
|
||||
|
||||
|
||||
@dataclass
|
||||
class HeartbeatResult:
|
||||
"""Result of a heartbeat attempt with status and optional message."""
|
||||
|
||||
status: HeartbeatStatus
|
||||
message: str = ""
|
||||
|
||||
|
||||
class OrchestratorClient:
|
||||
"""Async client for Orchestrator REST API.
|
||||
|
||||
Features:
|
||||
- Exponential backoff with jitter on failures
|
||||
- Circuit breaker to prevent hammering during outages
|
||||
- X-Agent-Id and X-Agent-Secret headers for new auth
|
||||
- Backward compatible with legacy Bearer token auth
|
||||
- Event logging to orchestrator
|
||||
- Local result persistence for retry
|
||||
- Credential persistence to survive restarts
|
||||
"""
|
||||
|
||||
# API version prefix for all endpoints
|
||||
API_PREFIX = "/api/v1"
|
||||
|
||||
def __init__(self, settings: Optional[Settings] = None):
|
||||
self.settings = settings or get_settings()
|
||||
self._client: Optional[httpx.AsyncClient] = None
|
||||
self._agent_id: Optional[str] = None
|
||||
self._agent_secret: Optional[str] = None # New auth scheme
|
||||
self._tenant_id: Optional[str] = None # Set after registration
|
||||
self._token: Optional[str] = None # Legacy token (deprecated)
|
||||
|
||||
# Initialize from settings if provided
|
||||
if self.settings.agent_id:
|
||||
self._agent_id = self.settings.agent_id
|
||||
if self.settings.agent_secret:
|
||||
self._agent_secret = self.settings.agent_secret
|
||||
if self.settings.tenant_id:
|
||||
self._tenant_id = self.settings.tenant_id
|
||||
if self.settings.agent_token:
|
||||
self._token = self.settings.agent_token
|
||||
|
||||
# Circuit breaker state
|
||||
self._consecutive_failures = 0
|
||||
self._circuit_open_until: Optional[float] = None
|
||||
|
||||
# Persistence paths
|
||||
self._pending_path = Path(self.settings.pending_results_path).expanduser()
|
||||
self._credentials_path = Path(self.settings.credentials_path).expanduser()
|
||||
|
||||
@property
|
||||
def agent_id(self) -> Optional[str]:
|
||||
"""Get the current agent ID."""
|
||||
return self._agent_id
|
||||
|
||||
@agent_id.setter
|
||||
def agent_id(self, value: str) -> None:
|
||||
"""Set the agent ID after registration."""
|
||||
self._agent_id = value
|
||||
self._invalidate_client()
|
||||
|
||||
@property
|
||||
def agent_secret(self) -> Optional[str]:
|
||||
"""Get the current agent secret (new auth scheme)."""
|
||||
return self._agent_secret
|
||||
|
||||
@agent_secret.setter
|
||||
def agent_secret(self, value: str) -> None:
|
||||
"""Set the agent secret after registration."""
|
||||
self._agent_secret = value
|
||||
self._invalidate_client()
|
||||
|
||||
@property
|
||||
def tenant_id(self) -> Optional[str]:
|
||||
"""Get the tenant ID."""
|
||||
return self._tenant_id
|
||||
|
||||
@tenant_id.setter
|
||||
def tenant_id(self, value: str) -> None:
|
||||
"""Set the tenant ID."""
|
||||
self._tenant_id = value
|
||||
|
||||
@property
|
||||
def token(self) -> Optional[str]:
|
||||
"""Get the legacy authentication token (deprecated)."""
|
||||
return self._token
|
||||
|
||||
@token.setter
|
||||
def token(self, value: str) -> None:
|
||||
"""Set the legacy authentication token (deprecated)."""
|
||||
self._token = value
|
||||
self._invalidate_client()
|
||||
|
||||
@property
|
||||
def is_registered(self) -> bool:
|
||||
"""Check if agent has credentials (registered or loaded)."""
|
||||
return self._agent_id is not None and (
|
||||
self._agent_secret is not None or self._token is not None
|
||||
)
|
||||
|
||||
def _invalidate_client(self) -> None:
|
||||
"""Force client recreation to pick up new headers."""
|
||||
if self._client and not self._client.is_closed:
|
||||
asyncio.create_task(self._client.aclose())
|
||||
self._client = None
|
||||
|
||||
def _get_headers(self) -> dict[str, str]:
|
||||
"""Get headers for API requests including version and auth.
|
||||
|
||||
Uses new X-Agent-Id/X-Agent-Secret scheme if available,
|
||||
falls back to legacy Bearer token for backward compatibility.
|
||||
"""
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"X-Agent-Version": self.settings.agent_version,
|
||||
"X-Agent-Hostname": self.settings.hostname,
|
||||
}
|
||||
|
||||
# Prefer new auth scheme
|
||||
if self._agent_id and self._agent_secret:
|
||||
headers["X-Agent-Id"] = self._agent_id
|
||||
headers["X-Agent-Secret"] = self._agent_secret
|
||||
# Fall back to legacy Bearer token
|
||||
elif self._token:
|
||||
headers["Authorization"] = f"Bearer {self._token}"
|
||||
|
||||
return headers
|
||||
|
||||
async def _get_client(self) -> httpx.AsyncClient:
|
||||
"""Get or create the HTTP client."""
|
||||
if self._client is None or self._client.is_closed:
|
||||
self._client = httpx.AsyncClient(
|
||||
base_url=self.settings.orchestrator_url,
|
||||
headers=self._get_headers(),
|
||||
timeout=httpx.Timeout(30.0, connect=10.0),
|
||||
)
|
||||
return self._client
|
||||
|
||||
def _check_circuit_breaker(self) -> None:
|
||||
"""Check if circuit breaker is open."""
|
||||
if self._circuit_open_until is not None:
|
||||
if time.time() < self._circuit_open_until:
|
||||
raise CircuitBreakerOpen(
|
||||
f"Circuit breaker open until {self._circuit_open_until}"
|
||||
)
|
||||
else:
|
||||
# Cooldown period has passed, reset
|
||||
logger.info("circuit_breaker_reset", cooldown_complete=True)
|
||||
self._circuit_open_until = None
|
||||
self._consecutive_failures = 0
|
||||
|
||||
def _record_success(self) -> None:
|
||||
"""Record a successful API call."""
|
||||
self._consecutive_failures = 0
|
||||
|
||||
def _record_failure(self) -> None:
|
||||
"""Record a failed API call and potentially trip circuit breaker."""
|
||||
self._consecutive_failures += 1
|
||||
if self._consecutive_failures >= self.settings.circuit_breaker_threshold:
|
||||
self._circuit_open_until = time.time() + self.settings.circuit_breaker_cooldown
|
||||
logger.warning(
|
||||
"circuit_breaker_tripped",
|
||||
consecutive_failures=self._consecutive_failures,
|
||||
cooldown_seconds=self.settings.circuit_breaker_cooldown,
|
||||
)
|
||||
|
||||
def _calculate_backoff(self, attempt: int) -> float:
|
||||
"""Calculate exponential backoff with jitter.
|
||||
|
||||
Args:
|
||||
attempt: Current attempt number (0-indexed)
|
||||
|
||||
Returns:
|
||||
Delay in seconds
|
||||
"""
|
||||
# Exponential backoff: base * 2^attempt
|
||||
delay = self.settings.backoff_base * (2 ** attempt)
|
||||
# Cap at max
|
||||
delay = min(delay, self.settings.backoff_max)
|
||||
# Add jitter (0-25% of delay)
|
||||
jitter = random.uniform(0, delay * 0.25)
|
||||
return delay + jitter
|
||||
|
||||
async def _request_with_retry(
|
||||
self,
|
||||
method: str,
|
||||
path: str,
|
||||
max_retries: int = 3,
|
||||
**kwargs,
|
||||
) -> httpx.Response:
|
||||
"""Make an HTTP request with retry logic.
|
||||
|
||||
Args:
|
||||
method: HTTP method
|
||||
path: API path
|
||||
max_retries: Maximum retry attempts
|
||||
**kwargs: Additional arguments for httpx
|
||||
|
||||
Returns:
|
||||
HTTP response
|
||||
|
||||
Raises:
|
||||
CircuitBreakerOpen: If circuit breaker is tripped
|
||||
httpx.HTTPError: If all retries fail
|
||||
"""
|
||||
self._check_circuit_breaker()
|
||||
client = await self._get_client()
|
||||
|
||||
last_error: Optional[Exception] = None
|
||||
|
||||
for attempt in range(max_retries + 1):
|
||||
try:
|
||||
response = await client.request(method, path, **kwargs)
|
||||
|
||||
# Check for server errors (5xx)
|
||||
if response.status_code >= 500:
|
||||
self._record_failure()
|
||||
raise httpx.HTTPStatusError(
|
||||
f"Server error: {response.status_code}",
|
||||
request=response.request,
|
||||
response=response,
|
||||
)
|
||||
|
||||
self._record_success()
|
||||
return response
|
||||
|
||||
except (httpx.RequestError, httpx.HTTPStatusError) as e:
|
||||
last_error = e
|
||||
self._record_failure()
|
||||
|
||||
if attempt < max_retries:
|
||||
delay = self._calculate_backoff(attempt)
|
||||
logger.warning(
|
||||
"request_retry",
|
||||
method=method,
|
||||
path=path,
|
||||
attempt=attempt + 1,
|
||||
max_retries=max_retries,
|
||||
delay=delay,
|
||||
error=str(e),
|
||||
)
|
||||
await asyncio.sleep(delay)
|
||||
else:
|
||||
logger.error(
|
||||
"request_failed",
|
||||
method=method,
|
||||
path=path,
|
||||
attempts=max_retries + 1,
|
||||
error=str(e),
|
||||
)
|
||||
|
||||
raise last_error or Exception("Unknown error during request")
|
||||
|
||||
async def register(self, metadata: Optional[dict] = None) -> tuple[str, str, Optional[str]]:
|
||||
"""Register agent with the orchestrator.
|
||||
|
||||
Supports two registration flows:
|
||||
1. New (secure): Uses REGISTRATION_TOKEN from settings
|
||||
2. Legacy (deprecated): Uses TENANT_ID directly
|
||||
|
||||
Args:
|
||||
metadata: Optional metadata about the agent
|
||||
|
||||
Returns:
|
||||
Tuple of (agent_id, secret_or_token, tenant_id)
|
||||
"""
|
||||
payload = {
|
||||
"hostname": self.settings.hostname,
|
||||
"version": self.settings.agent_version,
|
||||
"metadata": metadata or {},
|
||||
}
|
||||
|
||||
# Determine registration flow
|
||||
if self.settings.registration_token:
|
||||
# New secure registration flow
|
||||
payload["registration_token"] = self.settings.registration_token
|
||||
logger.info(
|
||||
"registering_agent_secure",
|
||||
hostname=self.settings.hostname,
|
||||
)
|
||||
else:
|
||||
# Legacy registration flow (deprecated)
|
||||
if self.settings.tenant_id:
|
||||
payload["tenant_id"] = self.settings.tenant_id
|
||||
logger.warning(
|
||||
"registering_agent_legacy",
|
||||
hostname=self.settings.hostname,
|
||||
tenant_id=self.settings.tenant_id,
|
||||
message="Using deprecated registration flow. Consider using REGISTRATION_TOKEN.",
|
||||
)
|
||||
|
||||
response = await self._request_with_retry(
|
||||
"POST",
|
||||
f"{self.API_PREFIX}/agents/register",
|
||||
json=payload,
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
data = response.json()
|
||||
|
||||
# Handle response based on registration flow
|
||||
if "agent_secret" in data:
|
||||
# New secure registration response
|
||||
# Use setters to trigger client invalidation
|
||||
self.agent_id = data["agent_id"]
|
||||
self.agent_secret = data["agent_secret"]
|
||||
self._tenant_id = data.get("tenant_id")
|
||||
|
||||
# Persist credentials for restart recovery
|
||||
await self._save_credentials()
|
||||
|
||||
logger.info(
|
||||
"agent_registered_secure",
|
||||
agent_id=self._agent_id,
|
||||
tenant_id=self._tenant_id,
|
||||
)
|
||||
return self._agent_id, self._agent_secret, self._tenant_id
|
||||
else:
|
||||
# Legacy registration response
|
||||
# Use setters to trigger client invalidation
|
||||
self.agent_id = data["agent_id"]
|
||||
self.token = data.get("token")
|
||||
self._tenant_id = self.settings.tenant_id
|
||||
|
||||
# Also persist legacy credentials
|
||||
await self._save_credentials()
|
||||
|
||||
logger.info(
|
||||
"agent_registered_legacy",
|
||||
agent_id=self._agent_id,
|
||||
)
|
||||
return self._agent_id, self._token, self._tenant_id
|
||||
|
||||
async def register_local(
|
||||
self, local_agent_key: str, rotate: bool = False
|
||||
) -> tuple[str, Optional[str], str, bool]:
|
||||
"""Register agent using LOCAL_MODE endpoint.
|
||||
|
||||
This is used when LOCAL_MODE=true. The agent authenticates using
|
||||
LOCAL_AGENT_KEY (not a registration token).
|
||||
|
||||
Args:
|
||||
local_agent_key: The LOCAL_AGENT_KEY for authentication
|
||||
rotate: If True, force credential rotation (deletes existing agent)
|
||||
|
||||
Returns:
|
||||
Tuple of (agent_id, agent_secret, tenant_id, already_registered)
|
||||
- agent_secret is None if already_registered=True (use persisted creds)
|
||||
|
||||
Raises:
|
||||
httpx.HTTPError: If registration fails
|
||||
"""
|
||||
payload = {
|
||||
"hostname": self.settings.hostname,
|
||||
"version": self.settings.agent_version,
|
||||
}
|
||||
|
||||
# Build URL with optional rotate query param
|
||||
url = f"{self.API_PREFIX}/agents/register-local"
|
||||
if rotate:
|
||||
url += "?rotate=true"
|
||||
|
||||
logger.info(
|
||||
"registering_agent_local",
|
||||
hostname=self.settings.hostname,
|
||||
rotate=rotate,
|
||||
)
|
||||
|
||||
try:
|
||||
client = await self._get_client()
|
||||
# Make direct request (no retry for registration)
|
||||
response = await client.request(
|
||||
"POST",
|
||||
url,
|
||||
json=payload,
|
||||
headers={"X-Local-Agent-Key": local_agent_key},
|
||||
)
|
||||
|
||||
# Handle specific status codes
|
||||
if response.status_code == 404:
|
||||
raise httpx.HTTPStatusError(
|
||||
"LOCAL_MODE not enabled on orchestrator",
|
||||
request=response.request,
|
||||
response=response,
|
||||
)
|
||||
elif response.status_code == 401:
|
||||
raise httpx.HTTPStatusError(
|
||||
"Invalid LOCAL_AGENT_KEY",
|
||||
request=response.request,
|
||||
response=response,
|
||||
)
|
||||
elif response.status_code == 503:
|
||||
raise httpx.HTTPStatusError(
|
||||
"Orchestrator not ready (tenant not bootstrapped)",
|
||||
request=response.request,
|
||||
response=response,
|
||||
)
|
||||
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
|
||||
agent_id = data["agent_id"]
|
||||
agent_secret = data.get("agent_secret") # None if already_registered
|
||||
tenant_id = data["tenant_id"]
|
||||
already_registered = data.get("already_registered", False)
|
||||
|
||||
# Only set credentials if we got a new secret
|
||||
if agent_secret:
|
||||
self.agent_id = agent_id
|
||||
self.agent_secret = agent_secret
|
||||
self._tenant_id = tenant_id
|
||||
|
||||
# Persist credentials atomically
|
||||
await self._save_credentials_atomic()
|
||||
|
||||
logger.info(
|
||||
"local_agent_registered",
|
||||
agent_id=agent_id,
|
||||
tenant_id=tenant_id,
|
||||
rotated=rotate,
|
||||
)
|
||||
else:
|
||||
logger.info(
|
||||
"local_agent_already_registered",
|
||||
agent_id=agent_id,
|
||||
tenant_id=tenant_id,
|
||||
message="No new secret - use persisted credentials",
|
||||
)
|
||||
|
||||
return agent_id, agent_secret, tenant_id, already_registered
|
||||
|
||||
except httpx.HTTPStatusError:
|
||||
raise
|
||||
except (httpx.ConnectError, httpx.TimeoutException) as e:
|
||||
logger.warning("register_local_network_error", error=str(e))
|
||||
raise
|
||||
|
||||
async def _save_credentials_atomic(self) -> None:
|
||||
"""Persist agent credentials atomically (temp → chmod → rename).
|
||||
|
||||
This prevents credential file corruption if the process is killed
|
||||
during write.
|
||||
"""
|
||||
try:
|
||||
# Ensure directory exists
|
||||
self._credentials_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
credentials = {
|
||||
"agent_id": self._agent_id,
|
||||
"tenant_id": self._tenant_id,
|
||||
}
|
||||
|
||||
# Include appropriate credential based on auth type
|
||||
if self._agent_secret:
|
||||
credentials["agent_secret"] = self._agent_secret
|
||||
elif self._token:
|
||||
credentials["token"] = self._token
|
||||
|
||||
# Write to temp file first
|
||||
temp_path = self._credentials_path.with_suffix(".tmp")
|
||||
temp_path.write_text(json.dumps(credentials, indent=2))
|
||||
|
||||
# Set secure permissions BEFORE rename (no window of insecure file)
|
||||
try:
|
||||
temp_path.chmod(0o600)
|
||||
except OSError:
|
||||
pass # Ignore on Windows
|
||||
|
||||
# Atomic rename
|
||||
temp_path.rename(self._credentials_path)
|
||||
|
||||
logger.info(
|
||||
"credentials_saved_atomic",
|
||||
path=str(self._credentials_path),
|
||||
agent_id=self._agent_id,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("credentials_save_failed", error=str(e))
|
||||
raise
|
||||
|
||||
async def heartbeat(self) -> HeartbeatResult:
|
||||
"""Send heartbeat to orchestrator.
|
||||
|
||||
Returns:
|
||||
HeartbeatResult with status indicating success or failure type.
|
||||
- SUCCESS: Heartbeat acknowledged (200)
|
||||
- AUTH_FAILED: Credentials invalid (401/403)
|
||||
- SERVER_ERROR: Server issue (5xx), transient
|
||||
- NETWORK_ERROR: Connection failed, transient
|
||||
- NOT_REGISTERED: No agent_id set
|
||||
"""
|
||||
if not self._agent_id:
|
||||
logger.warning("heartbeat_skipped", reason="not_registered")
|
||||
return HeartbeatResult(HeartbeatStatus.NOT_REGISTERED, "No agent_id set")
|
||||
|
||||
try:
|
||||
response = await self._request_with_retry(
|
||||
"POST",
|
||||
f"{self.API_PREFIX}/agents/{self._agent_id}/heartbeat",
|
||||
max_retries=1, # Don't retry too aggressively for heartbeats
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
return HeartbeatResult(HeartbeatStatus.SUCCESS)
|
||||
elif response.status_code in (401, 403):
|
||||
msg = f"HTTP {response.status_code}: {response.text[:200]}"
|
||||
logger.warning("heartbeat_auth_failed", status_code=response.status_code)
|
||||
return HeartbeatResult(HeartbeatStatus.AUTH_FAILED, msg)
|
||||
elif response.status_code >= 500:
|
||||
msg = f"HTTP {response.status_code}: {response.text[:200]}"
|
||||
logger.warning("heartbeat_server_error", status_code=response.status_code)
|
||||
return HeartbeatResult(HeartbeatStatus.SERVER_ERROR, msg)
|
||||
else:
|
||||
# 4xx other than 401/403 - treat as auth failure
|
||||
msg = f"HTTP {response.status_code}: {response.text[:200]}"
|
||||
logger.warning("heartbeat_client_error", status_code=response.status_code)
|
||||
return HeartbeatResult(HeartbeatStatus.AUTH_FAILED, msg)
|
||||
|
||||
except (httpx.ConnectError, httpx.TimeoutException) as e:
|
||||
logger.warning("heartbeat_network_error", error=str(e))
|
||||
return HeartbeatResult(HeartbeatStatus.NETWORK_ERROR, str(e))
|
||||
except httpx.HTTPError as e:
|
||||
logger.warning("heartbeat_http_error", error=str(e))
|
||||
return HeartbeatResult(HeartbeatStatus.NETWORK_ERROR, str(e))
|
||||
except CircuitBreakerOpen:
|
||||
logger.warning("heartbeat_circuit_breaker_open")
|
||||
return HeartbeatResult(HeartbeatStatus.NETWORK_ERROR, "Circuit breaker open")
|
||||
|
||||
async def fetch_next_task(self) -> Optional[Task]:
|
||||
"""Fetch the next available task for this agent.
|
||||
|
||||
Returns:
|
||||
Task if available, None otherwise
|
||||
"""
|
||||
if not self.is_registered:
|
||||
logger.warning("fetch_task_skipped", reason="not_registered")
|
||||
return None
|
||||
|
||||
try:
|
||||
# Note: agent_id is now in headers (X-Agent-Id), not query params
|
||||
response = await self._request_with_retry(
|
||||
"GET",
|
||||
f"{self.API_PREFIX}/tasks/next",
|
||||
max_retries=1,
|
||||
)
|
||||
|
||||
if response.status_code == 204 or not response.content:
|
||||
return None
|
||||
|
||||
data = response.json()
|
||||
if data is None:
|
||||
return None
|
||||
|
||||
task = Task(
|
||||
id=data["id"],
|
||||
type=data["type"],
|
||||
payload=data.get("payload", {}),
|
||||
tenant_id=data.get("tenant_id"),
|
||||
created_at=data.get("created_at"),
|
||||
)
|
||||
|
||||
logger.info("task_received", task_id=task.id, task_type=task.type)
|
||||
return task
|
||||
|
||||
except (httpx.HTTPError, CircuitBreakerOpen) as e:
|
||||
logger.warning("fetch_task_failed", error=str(e))
|
||||
return None
|
||||
|
||||
async def update_task(
|
||||
self,
|
||||
task_id: str,
|
||||
status: TaskStatus,
|
||||
result: Optional[dict] = None,
|
||||
error: Optional[str] = None,
|
||||
) -> bool:
|
||||
"""Update task status in orchestrator.
|
||||
|
||||
Args:
|
||||
task_id: Task identifier
|
||||
status: New status
|
||||
result: Task result data (for COMPLETED)
|
||||
error: Error message (for FAILED)
|
||||
|
||||
Returns:
|
||||
True if update was successful
|
||||
"""
|
||||
payload: dict[str, Any] = {"status": status.value}
|
||||
if result is not None:
|
||||
payload["result"] = result
|
||||
if error is not None:
|
||||
payload["error"] = error
|
||||
|
||||
try:
|
||||
response = await self._request_with_retry(
|
||||
"PATCH",
|
||||
f"{self.API_PREFIX}/tasks/{task_id}",
|
||||
json=payload,
|
||||
)
|
||||
success = response.status_code in (200, 204)
|
||||
|
||||
if success:
|
||||
logger.info("task_updated", task_id=task_id, status=status.value)
|
||||
else:
|
||||
logger.warning(
|
||||
"task_update_unexpected_status",
|
||||
task_id=task_id,
|
||||
status_code=response.status_code,
|
||||
)
|
||||
|
||||
return success
|
||||
|
||||
except (httpx.HTTPError, CircuitBreakerOpen) as e:
|
||||
logger.error("task_update_failed", task_id=task_id, error=str(e))
|
||||
# Save to pending results for retry
|
||||
await self._save_pending_result(task_id, status, result, error)
|
||||
return False
|
||||
|
||||
async def send_event(
|
||||
self,
|
||||
level: EventLevel,
|
||||
message: str,
|
||||
task_id: Optional[str] = None,
|
||||
metadata: Optional[dict] = None,
|
||||
) -> bool:
|
||||
"""Send an event to the orchestrator for timeline/dashboard.
|
||||
|
||||
Args:
|
||||
level: Event severity level
|
||||
message: Event description
|
||||
task_id: Related task ID (optional)
|
||||
metadata: Additional event data
|
||||
|
||||
Returns:
|
||||
True if event was sent successfully
|
||||
"""
|
||||
payload = {
|
||||
"level": level.value,
|
||||
"source": "agent",
|
||||
"agent_id": self._agent_id,
|
||||
"message": message,
|
||||
"metadata": metadata or {},
|
||||
}
|
||||
if task_id:
|
||||
payload["task_id"] = task_id
|
||||
|
||||
try:
|
||||
response = await self._request_with_retry(
|
||||
"POST",
|
||||
f"{self.API_PREFIX}/events",
|
||||
json=payload,
|
||||
max_retries=1, # Don't block on event logging
|
||||
)
|
||||
return response.status_code in (200, 201, 204)
|
||||
except Exception as e:
|
||||
# Don't fail operations due to event logging issues
|
||||
logger.debug("event_send_failed", error=str(e))
|
||||
return False
|
||||
|
||||
async def _save_pending_result(
|
||||
self,
|
||||
task_id: str,
|
||||
status: TaskStatus,
|
||||
result: Optional[dict],
|
||||
error: Optional[str],
|
||||
) -> None:
|
||||
"""Save a task result locally for later retry.
|
||||
|
||||
Args:
|
||||
task_id: Task identifier
|
||||
status: Task status
|
||||
result: Task result
|
||||
error: Error message
|
||||
"""
|
||||
try:
|
||||
# Ensure directory exists
|
||||
self._pending_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Load existing pending results
|
||||
pending: list[dict] = []
|
||||
if self._pending_path.exists():
|
||||
pending = json.loads(self._pending_path.read_text())
|
||||
|
||||
# Add new result
|
||||
pending.append({
|
||||
"task_id": task_id,
|
||||
"status": status.value,
|
||||
"result": result,
|
||||
"error": error,
|
||||
"timestamp": time.time(),
|
||||
})
|
||||
|
||||
# Save back
|
||||
self._pending_path.write_text(json.dumps(pending, indent=2))
|
||||
logger.info("pending_result_saved", task_id=task_id, path=str(self._pending_path))
|
||||
|
||||
except Exception as e:
|
||||
logger.error("pending_result_save_failed", task_id=task_id, error=str(e))
|
||||
|
||||
async def retry_pending_results(self) -> int:
|
||||
"""Retry sending any pending results.
|
||||
|
||||
Returns:
|
||||
Number of results successfully sent
|
||||
"""
|
||||
if not self._pending_path.exists():
|
||||
return 0
|
||||
|
||||
try:
|
||||
pending = json.loads(self._pending_path.read_text())
|
||||
except Exception as e:
|
||||
logger.error("pending_results_load_failed", error=str(e))
|
||||
return 0
|
||||
|
||||
successful = 0
|
||||
remaining = []
|
||||
|
||||
for item in pending:
|
||||
try:
|
||||
response = await self._request_with_retry(
|
||||
"PATCH",
|
||||
f"{self.API_PREFIX}/tasks/{item['task_id']}",
|
||||
json={
|
||||
"status": item["status"],
|
||||
"result": item.get("result"),
|
||||
"error": item.get("error"),
|
||||
},
|
||||
max_retries=1,
|
||||
)
|
||||
if response.status_code in (200, 204):
|
||||
successful += 1
|
||||
logger.info("pending_result_sent", task_id=item["task_id"])
|
||||
else:
|
||||
remaining.append(item)
|
||||
except Exception:
|
||||
remaining.append(item)
|
||||
|
||||
# Update pending file
|
||||
if remaining:
|
||||
self._pending_path.write_text(json.dumps(remaining, indent=2))
|
||||
else:
|
||||
self._pending_path.unlink(missing_ok=True)
|
||||
|
||||
if successful:
|
||||
logger.info("pending_results_retried", successful=successful, remaining=len(remaining))
|
||||
|
||||
return successful
|
||||
|
||||
async def _save_credentials(self) -> None:
|
||||
"""Persist agent credentials to disk for restart recovery.
|
||||
|
||||
Credentials are stored with secure file permissions (0600).
|
||||
"""
|
||||
try:
|
||||
# Ensure directory exists
|
||||
self._credentials_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
credentials = {
|
||||
"agent_id": self._agent_id,
|
||||
"tenant_id": self._tenant_id,
|
||||
}
|
||||
|
||||
# Include appropriate credential based on auth type
|
||||
if self._agent_secret:
|
||||
credentials["agent_secret"] = self._agent_secret
|
||||
elif self._token:
|
||||
credentials["token"] = self._token
|
||||
|
||||
# Write with secure permissions
|
||||
self._credentials_path.write_text(json.dumps(credentials, indent=2))
|
||||
|
||||
# Set secure permissions (owner read/write only)
|
||||
# Note: On Windows, this has limited effect
|
||||
try:
|
||||
self._credentials_path.chmod(0o600)
|
||||
except OSError:
|
||||
pass # Ignore on Windows
|
||||
|
||||
logger.info(
|
||||
"credentials_saved",
|
||||
path=str(self._credentials_path),
|
||||
agent_id=self._agent_id,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("credentials_save_failed", error=str(e))
|
||||
|
||||
def load_credentials(self) -> bool:
|
||||
"""Load persisted credentials from disk.
|
||||
|
||||
Returns:
|
||||
True if credentials were loaded successfully
|
||||
"""
|
||||
if not self._credentials_path.exists():
|
||||
return False
|
||||
|
||||
try:
|
||||
data = json.loads(self._credentials_path.read_text())
|
||||
|
||||
self._agent_id = data.get("agent_id")
|
||||
self._tenant_id = data.get("tenant_id")
|
||||
|
||||
# Load appropriate credential
|
||||
if "agent_secret" in data:
|
||||
self._agent_secret = data["agent_secret"]
|
||||
elif "token" in data:
|
||||
self._token = data["token"]
|
||||
|
||||
if self._agent_id:
|
||||
logger.info(
|
||||
"credentials_loaded",
|
||||
agent_id=self._agent_id,
|
||||
tenant_id=self._tenant_id,
|
||||
auth_type="secure" if self._agent_secret else "legacy",
|
||||
)
|
||||
return True
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
logger.error("credentials_load_failed", error=str(e))
|
||||
return False
|
||||
|
||||
def clear_credentials(self) -> None:
|
||||
"""Clear persisted credentials (useful for re-registration)."""
|
||||
self._agent_id = None
|
||||
self._agent_secret = None
|
||||
self._token = None
|
||||
self._tenant_id = None
|
||||
|
||||
if self._credentials_path.exists():
|
||||
try:
|
||||
self._credentials_path.unlink()
|
||||
logger.info("credentials_cleared")
|
||||
except Exception as e:
|
||||
logger.error("credentials_clear_failed", error=str(e))
|
||||
|
||||
self._invalidate_client()
|
||||
|
||||
def reset_circuit_breaker(self) -> None:
|
||||
"""Manually reset the circuit breaker.
|
||||
|
||||
Useful when retrying registration after a long wait period,
|
||||
to give the orchestrator a fresh chance to respond.
|
||||
"""
|
||||
if self._circuit_open_until is not None or self._consecutive_failures > 0:
|
||||
logger.info(
|
||||
"circuit_breaker_manual_reset",
|
||||
was_open=self._circuit_open_until is not None,
|
||||
previous_failures=self._consecutive_failures,
|
||||
)
|
||||
self._circuit_open_until = None
|
||||
self._consecutive_failures = 0
|
||||
|
||||
async def close(self) -> None:
|
||||
"""Close the HTTP client."""
|
||||
if self._client and not self._client.is_closed:
|
||||
await self._client.aclose()
|
||||
self._client = None
|
||||
161
letsbe-sysadmin-agent/app/config.py
Normal file
161
letsbe-sysadmin-agent/app/config.py
Normal file
@@ -0,0 +1,161 @@
|
||||
"""Agent configuration via environment variables."""
|
||||
|
||||
import socket
|
||||
from functools import lru_cache
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field
|
||||
from pydantic_settings import BaseSettings, SettingsConfigDict
|
||||
|
||||
from app import __version__
|
||||
|
||||
|
||||
class Settings(BaseSettings):
|
||||
"""Agent settings loaded from environment variables.
|
||||
|
||||
All settings are frozen after initialization to prevent runtime mutation.
|
||||
"""
|
||||
|
||||
model_config = SettingsConfigDict(
|
||||
env_file=".env",
|
||||
env_file_encoding="utf-8",
|
||||
frozen=True, # Prevent runtime mutation
|
||||
)
|
||||
|
||||
# Agent identity
|
||||
agent_version: str = Field(default=__version__, description="Agent version for API headers")
|
||||
hostname: str = Field(default_factory=socket.gethostname, description="Agent hostname")
|
||||
agent_id: Optional[str] = Field(default=None, description="Assigned by orchestrator after registration")
|
||||
|
||||
# ============================================================
|
||||
# LOCAL_MODE SETTINGS (Phase 2)
|
||||
# When LOCAL_MODE=true, agent uses /register-local endpoint
|
||||
# with LOCAL_AGENT_KEY for registration (no registration token needed)
|
||||
# ============================================================
|
||||
local_mode: bool = Field(
|
||||
default=False,
|
||||
description="Enable LOCAL_MODE for single-tenant registration via LOCAL_AGENT_KEY"
|
||||
)
|
||||
local_agent_key: Optional[str] = Field(
|
||||
default=None,
|
||||
description="Key for local registration. Required when LOCAL_MODE=true."
|
||||
)
|
||||
|
||||
# New secure registration (recommended for multi-tenant)
|
||||
registration_token: Optional[str] = Field(
|
||||
default=None,
|
||||
description="Registration token from orchestrator. Required for first-time registration (multi-tenant)."
|
||||
)
|
||||
|
||||
# Agent credentials (set after registration, persisted to disk)
|
||||
agent_secret: Optional[str] = Field(
|
||||
default=None,
|
||||
description="Agent secret for authentication. Set after registration."
|
||||
)
|
||||
|
||||
# Tenant assignment (derived from registration token, or can be set directly for legacy)
|
||||
tenant_id: Optional[str] = Field(
|
||||
default=None,
|
||||
description="Tenant UUID this agent belongs to. Set after registration."
|
||||
)
|
||||
|
||||
# Orchestrator connection
|
||||
# Default URL is for Docker-based dev where orchestrator runs on the host.
|
||||
# When running directly on a Linux tenant server, set ORCHESTRATOR_URL to
|
||||
# the orchestrator's public URL (e.g., "https://orchestrator.letsbe.io").
|
||||
orchestrator_url: str = Field(
|
||||
default="http://host.docker.internal:8000",
|
||||
description="Orchestrator API base URL"
|
||||
)
|
||||
|
||||
# Hub connection (for direct credential sync)
|
||||
# When HUB_URL and HUB_API_KEY are set, agent sends heartbeats with
|
||||
# credentials directly to the Hub (bypassing orchestrator for this purpose)
|
||||
hub_url: Optional[str] = Field(
|
||||
default=None,
|
||||
description="Hub API base URL for credential sync (e.g., https://hub.letsbe.io)"
|
||||
)
|
||||
hub_api_key: Optional[str] = Field(
|
||||
default=None,
|
||||
description="Hub API key for authentication (from ServerConnection.hubApiKey)"
|
||||
)
|
||||
hub_telemetry_enabled: bool = Field(
|
||||
default=True,
|
||||
description="Enable sending heartbeats with credentials to Hub"
|
||||
)
|
||||
|
||||
# Legacy auth (deprecated - use registration_token + agent_secret instead)
|
||||
agent_token: Optional[str] = Field(
|
||||
default=None,
|
||||
description="[DEPRECATED] Legacy authentication token. Use agent_secret instead."
|
||||
)
|
||||
|
||||
# Timing intervals (seconds)
|
||||
heartbeat_interval: int = Field(default=30, ge=5, le=300, description="Heartbeat interval")
|
||||
poll_interval: int = Field(default=5, ge=1, le=60, description="Task polling interval")
|
||||
|
||||
# Logging
|
||||
log_level: str = Field(default="INFO", description="Log level (DEBUG, INFO, WARNING, ERROR)")
|
||||
log_json: bool = Field(default=True, description="Output logs as JSON")
|
||||
|
||||
# Resilience
|
||||
max_concurrent_tasks: int = Field(default=3, ge=1, le=10, description="Max concurrent task executions")
|
||||
backoff_base: float = Field(default=1.0, ge=0.1, le=10.0, description="Base backoff time in seconds")
|
||||
backoff_max: float = Field(default=60.0, ge=10.0, le=300.0, description="Max backoff time in seconds")
|
||||
circuit_breaker_threshold: int = Field(default=5, ge=1, le=20, description="Consecutive failures to trip breaker")
|
||||
circuit_breaker_cooldown: int = Field(default=30, ge=10, le=900, description="Cooldown period in seconds")
|
||||
|
||||
# Security - File operations
|
||||
allowed_file_root: str = Field(default="/opt/letsbe", description="Root directory for file operations")
|
||||
allowed_env_root: str = Field(default="/opt/letsbe/env", description="Root directory for ENV file operations")
|
||||
max_file_size: int = Field(default=10 * 1024 * 1024, description="Max file size in bytes (default 10MB)")
|
||||
|
||||
# Security - Shell operations
|
||||
shell_timeout: int = Field(default=60, ge=5, le=600, description="Default shell command timeout")
|
||||
|
||||
# Security - Docker operations
|
||||
allowed_compose_paths: list[str] = Field(
|
||||
default=["/opt/letsbe", "/home/letsbe"],
|
||||
description="Allowed directories for compose files"
|
||||
)
|
||||
allowed_stacks_root: str = Field(
|
||||
default="/opt/letsbe/stacks",
|
||||
description="Root directory for Docker stack operations"
|
||||
)
|
||||
|
||||
# Local persistence
|
||||
pending_results_path: str = Field(
|
||||
default="~/.letsbe-agent/pending_results.json",
|
||||
description="Path for buffering unsent task results"
|
||||
)
|
||||
credentials_path: str = Field(
|
||||
default="~/.letsbe-agent/credentials.json",
|
||||
description="Path for persisting agent credentials after registration"
|
||||
)
|
||||
|
||||
# Playwright browser automation
|
||||
playwright_artifacts_dir: str = Field(
|
||||
default="/opt/letsbe/playwright-artifacts",
|
||||
description="Directory for screenshots, traces, and other browser artifacts"
|
||||
)
|
||||
playwright_default_timeout_ms: int = Field(
|
||||
default=60000, ge=5000, le=300000,
|
||||
description="Default timeout for Playwright actions in milliseconds"
|
||||
)
|
||||
playwright_navigation_timeout_ms: int = Field(
|
||||
default=120000, ge=10000, le=300000,
|
||||
description="Timeout for page navigation in milliseconds"
|
||||
)
|
||||
mcp_service_url: Optional[str] = Field(
|
||||
default=None,
|
||||
description="URL for Playwright MCP sidecar service (for exploratory mode)"
|
||||
)
|
||||
|
||||
|
||||
@lru_cache
|
||||
def get_settings() -> Settings:
|
||||
"""Get cached settings instance.
|
||||
|
||||
Settings are loaded once and cached for the lifetime of the process.
|
||||
"""
|
||||
return Settings()
|
||||
69
letsbe-sysadmin-agent/app/executors/__init__.py
Normal file
69
letsbe-sysadmin-agent/app/executors/__init__.py
Normal file
@@ -0,0 +1,69 @@
|
||||
"""Task executors registry."""
|
||||
|
||||
from typing import Type
|
||||
|
||||
from app.executors.base import BaseExecutor, ExecutionResult
|
||||
from app.executors.composite_executor import CompositeExecutor
|
||||
from app.executors.docker_executor import DockerExecutor
|
||||
from app.executors.echo_executor import EchoExecutor
|
||||
from app.executors.env_inspect_executor import EnvInspectExecutor
|
||||
from app.executors.env_update_executor import EnvUpdateExecutor
|
||||
from app.executors.file_executor import FileExecutor
|
||||
from app.executors.file_inspect_executor import FileInspectExecutor
|
||||
from app.executors.nextcloud_executor import NextcloudSetDomainExecutor
|
||||
from app.executors.playwright_executor import PlaywrightExecutor
|
||||
from app.executors.shell_executor import ShellExecutor
|
||||
|
||||
# Registry mapping task types to executor classes
|
||||
EXECUTOR_REGISTRY: dict[str, Type[BaseExecutor]] = {
|
||||
"ECHO": EchoExecutor,
|
||||
"SHELL": ShellExecutor,
|
||||
"FILE_WRITE": FileExecutor,
|
||||
"ENV_UPDATE": EnvUpdateExecutor,
|
||||
"ENV_INSPECT": EnvInspectExecutor,
|
||||
"FILE_INSPECT": FileInspectExecutor,
|
||||
"DOCKER_RELOAD": DockerExecutor,
|
||||
"COMPOSITE": CompositeExecutor,
|
||||
"PLAYWRIGHT": PlaywrightExecutor,
|
||||
"NEXTCLOUD_SET_DOMAIN": NextcloudSetDomainExecutor,
|
||||
}
|
||||
|
||||
|
||||
def get_executor(task_type: str) -> BaseExecutor:
|
||||
"""Get an executor instance for a task type.
|
||||
|
||||
Args:
|
||||
task_type: The type of task to execute
|
||||
|
||||
Returns:
|
||||
Executor instance
|
||||
|
||||
Raises:
|
||||
ValueError: If task type is not registered
|
||||
"""
|
||||
if task_type not in EXECUTOR_REGISTRY:
|
||||
raise ValueError(
|
||||
f"Unknown task type: {task_type}. "
|
||||
f"Available: {list(EXECUTOR_REGISTRY.keys())}"
|
||||
)
|
||||
|
||||
executor_class = EXECUTOR_REGISTRY[task_type]
|
||||
return executor_class()
|
||||
|
||||
|
||||
__all__ = [
|
||||
"BaseExecutor",
|
||||
"ExecutionResult",
|
||||
"EchoExecutor",
|
||||
"ShellExecutor",
|
||||
"FileExecutor",
|
||||
"FileInspectExecutor",
|
||||
"EnvUpdateExecutor",
|
||||
"EnvInspectExecutor",
|
||||
"DockerExecutor",
|
||||
"CompositeExecutor",
|
||||
"PlaywrightExecutor",
|
||||
"NextcloudSetDomainExecutor",
|
||||
"EXECUTOR_REGISTRY",
|
||||
"get_executor",
|
||||
]
|
||||
59
letsbe-sysadmin-agent/app/executors/base.py
Normal file
59
letsbe-sysadmin-agent/app/executors/base.py
Normal file
@@ -0,0 +1,59 @@
|
||||
"""Base executor class for all task types."""
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Optional
|
||||
|
||||
from app.utils.logger import get_logger
|
||||
|
||||
|
||||
@dataclass
|
||||
class ExecutionResult:
|
||||
"""Result of task execution."""
|
||||
|
||||
success: bool
|
||||
data: dict[str, Any]
|
||||
error: Optional[str] = None
|
||||
duration_ms: Optional[float] = None
|
||||
|
||||
|
||||
class BaseExecutor(ABC):
|
||||
"""Abstract base class for task executors.
|
||||
|
||||
All executors must implement the execute() method.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.logger = get_logger(self.__class__.__name__)
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def task_type(self) -> str:
|
||||
"""Return the task type this executor handles."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def execute(self, payload: dict[str, Any]) -> ExecutionResult:
|
||||
"""Execute the task with the given payload.
|
||||
|
||||
Args:
|
||||
payload: Task-specific payload data
|
||||
|
||||
Returns:
|
||||
ExecutionResult with success status and result data
|
||||
"""
|
||||
pass
|
||||
|
||||
def validate_payload(self, payload: dict[str, Any], required_fields: list[str]) -> None:
|
||||
"""Validate that required fields are present in payload.
|
||||
|
||||
Args:
|
||||
payload: Task payload
|
||||
required_fields: List of required field names
|
||||
|
||||
Raises:
|
||||
ValueError: If a required field is missing
|
||||
"""
|
||||
missing = [f for f in required_fields if f not in payload]
|
||||
if missing:
|
||||
raise ValueError(f"Missing required fields: {', '.join(missing)}")
|
||||
207
letsbe-sysadmin-agent/app/executors/composite_executor.py
Normal file
207
letsbe-sysadmin-agent/app/executors/composite_executor.py
Normal file
@@ -0,0 +1,207 @@
|
||||
"""Composite executor for sequential task execution."""
|
||||
|
||||
import time
|
||||
from typing import Any
|
||||
|
||||
from app.executors.base import BaseExecutor, ExecutionResult
|
||||
|
||||
|
||||
class CompositeExecutor(BaseExecutor):
|
||||
"""Execute a sequence of tasks in order.
|
||||
|
||||
Executes each task in the sequence using the appropriate executor.
|
||||
Stops on first failure and returns partial results.
|
||||
|
||||
Security measures:
|
||||
- Each sub-task uses the same validated executors
|
||||
- Sequential execution only (no parallelism)
|
||||
- Stops immediately on first failure
|
||||
|
||||
Payload:
|
||||
{
|
||||
"steps": [
|
||||
{"type": "ENV_UPDATE", "payload": {...}},
|
||||
{"type": "DOCKER_RELOAD", "payload": {...}}
|
||||
]
|
||||
}
|
||||
|
||||
Result (success):
|
||||
{
|
||||
"steps": [
|
||||
{"index": 0, "type": "ENV_UPDATE", "status": "completed", "result": {...}},
|
||||
{"index": 1, "type": "DOCKER_RELOAD", "status": "completed", "result": {...}}
|
||||
]
|
||||
}
|
||||
|
||||
Result (failure at step 1):
|
||||
ExecutionResult.success = False
|
||||
ExecutionResult.error = "Step 1 (DOCKER_RELOAD) failed: <error message>"
|
||||
ExecutionResult.data = {
|
||||
"steps": [
|
||||
{"index": 0, "type": "ENV_UPDATE", "status": "completed", "result": {...}},
|
||||
{"index": 1, "type": "DOCKER_RELOAD", "status": "failed", "error": "..."}
|
||||
]
|
||||
}
|
||||
"""
|
||||
|
||||
@property
|
||||
def task_type(self) -> str:
|
||||
return "COMPOSITE"
|
||||
|
||||
async def execute(self, payload: dict[str, Any]) -> ExecutionResult:
|
||||
"""Execute a sequence of tasks.
|
||||
|
||||
Args:
|
||||
payload: Must contain "steps" list of step definitions
|
||||
|
||||
Returns:
|
||||
ExecutionResult with execution summary
|
||||
"""
|
||||
self.validate_payload(payload, ["steps"])
|
||||
|
||||
steps = payload["steps"]
|
||||
|
||||
# Validate steps is a non-empty list
|
||||
if not isinstance(steps, list):
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={"steps": []},
|
||||
error="'steps' must be a list of step definitions",
|
||||
)
|
||||
|
||||
if not steps:
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={"steps": []},
|
||||
error="'steps' cannot be empty",
|
||||
)
|
||||
|
||||
# Import registry here to avoid circular imports
|
||||
from app.executors import get_executor
|
||||
|
||||
self.logger.info(
|
||||
"composite_starting",
|
||||
total_steps=len(steps),
|
||||
step_types=[step.get("type", "UNKNOWN") if isinstance(step, dict) else "INVALID" for step in steps],
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
results: list[dict[str, Any]] = []
|
||||
|
||||
for i, step in enumerate(steps):
|
||||
# Validate step structure
|
||||
if not isinstance(step, dict):
|
||||
self.logger.error("composite_invalid_step", step_index=i)
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={"steps": results},
|
||||
error=f"Step {i} is not a valid step definition (must be dict)",
|
||||
)
|
||||
|
||||
step_type = step.get("type")
|
||||
step_payload = step.get("payload", {})
|
||||
|
||||
if not step_type:
|
||||
self.logger.error("composite_missing_type", step_index=i)
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={"steps": results},
|
||||
error=f"Step {i} missing 'type' field",
|
||||
)
|
||||
|
||||
self.logger.info(
|
||||
"composite_step_starting",
|
||||
step_index=i,
|
||||
step_type=step_type,
|
||||
)
|
||||
|
||||
# Get executor for this step type
|
||||
try:
|
||||
executor = get_executor(step_type)
|
||||
except ValueError as e:
|
||||
self.logger.error(
|
||||
"composite_unknown_type",
|
||||
step_index=i,
|
||||
step_type=step_type,
|
||||
error=str(e),
|
||||
)
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={"steps": results},
|
||||
error=f"Step {i} ({step_type}) failed: {e}",
|
||||
)
|
||||
|
||||
# Execute the step
|
||||
try:
|
||||
result = await executor.execute(step_payload)
|
||||
|
||||
step_result: dict[str, Any] = {
|
||||
"index": i,
|
||||
"type": step_type,
|
||||
"status": "completed" if result.success else "failed",
|
||||
"result": result.data,
|
||||
}
|
||||
if result.error:
|
||||
step_result["error"] = result.error
|
||||
|
||||
results.append(step_result)
|
||||
|
||||
self.logger.info(
|
||||
"composite_step_completed",
|
||||
step_index=i,
|
||||
step_type=step_type,
|
||||
success=result.success,
|
||||
)
|
||||
|
||||
# Stop on first failure
|
||||
if not result.success:
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
self.logger.warning(
|
||||
"composite_step_failed",
|
||||
step_index=i,
|
||||
step_type=step_type,
|
||||
error=result.error,
|
||||
)
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={"steps": results},
|
||||
error=f"Step {i} ({step_type}) failed: {result.error}",
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
self.logger.error(
|
||||
"composite_step_exception",
|
||||
step_index=i,
|
||||
step_type=step_type,
|
||||
error=str(e),
|
||||
)
|
||||
# Add failed step to results
|
||||
results.append({
|
||||
"index": i,
|
||||
"type": step_type,
|
||||
"status": "failed",
|
||||
"error": str(e),
|
||||
})
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={"steps": results},
|
||||
error=f"Step {i} ({step_type}) failed: {e}",
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
# All steps completed successfully
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
|
||||
self.logger.info(
|
||||
"composite_completed",
|
||||
steps_completed=len(results),
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
return ExecutionResult(
|
||||
success=True,
|
||||
data={"steps": results},
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
290
letsbe-sysadmin-agent/app/executors/docker_executor.py
Normal file
290
letsbe-sysadmin-agent/app/executors/docker_executor.py
Normal file
@@ -0,0 +1,290 @@
|
||||
"""Docker Compose executor for container management."""
|
||||
|
||||
import asyncio
|
||||
import subprocess
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from app.config import get_settings
|
||||
from app.executors.base import BaseExecutor, ExecutionResult
|
||||
from app.utils.validation import ValidationError, validate_file_path
|
||||
|
||||
|
||||
class DockerExecutor(BaseExecutor):
|
||||
"""Execute Docker Compose operations with security controls.
|
||||
|
||||
Security measures:
|
||||
- Directory validation against allowed stacks root
|
||||
- Compose file existence verification
|
||||
- Path traversal prevention
|
||||
- Timeout enforcement on each subprocess
|
||||
- No shell=True, command list only
|
||||
|
||||
Payload:
|
||||
{
|
||||
"compose_dir": "/opt/letsbe/stacks/myapp",
|
||||
"pull": true # Optional, defaults to false
|
||||
}
|
||||
|
||||
Result:
|
||||
{
|
||||
"compose_dir": "/opt/letsbe/stacks/myapp",
|
||||
"compose_file": "/opt/letsbe/stacks/myapp/docker-compose.yml",
|
||||
"pull_ran": true,
|
||||
"logs": {
|
||||
"pull": "<stdout+stderr>",
|
||||
"up": "<stdout+stderr>"
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
# Compose file search order
|
||||
COMPOSE_FILE_NAMES = ["docker-compose.yml", "compose.yml"]
|
||||
|
||||
# Default timeout for each docker command (seconds)
|
||||
DEFAULT_COMMAND_TIMEOUT = 300
|
||||
|
||||
@property
|
||||
def task_type(self) -> str:
|
||||
return "DOCKER_RELOAD"
|
||||
|
||||
async def execute(self, payload: dict[str, Any]) -> ExecutionResult:
|
||||
"""Execute Docker Compose pull (optional) and up -d --remove-orphans.
|
||||
|
||||
Args:
|
||||
payload: Must contain "compose_dir", optionally "pull" (bool) and "timeout"
|
||||
|
||||
Returns:
|
||||
ExecutionResult with reload confirmation and logs
|
||||
"""
|
||||
self.validate_payload(payload, ["compose_dir"])
|
||||
settings = get_settings()
|
||||
|
||||
compose_dir = payload["compose_dir"]
|
||||
pull = payload.get("pull", False)
|
||||
timeout = payload.get("timeout", self.DEFAULT_COMMAND_TIMEOUT)
|
||||
|
||||
# Validate compose directory is under allowed stacks root
|
||||
try:
|
||||
validated_dir = validate_file_path(
|
||||
compose_dir,
|
||||
settings.allowed_stacks_root,
|
||||
must_exist=True,
|
||||
)
|
||||
except ValidationError as e:
|
||||
self.logger.warning("docker_dir_validation_failed", path=compose_dir, error=str(e))
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={},
|
||||
error=f"Directory validation failed: {e}",
|
||||
)
|
||||
|
||||
# Verify it's actually a directory
|
||||
if not validated_dir.is_dir():
|
||||
self.logger.warning("docker_not_directory", path=compose_dir)
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={},
|
||||
error=f"Path is not a directory: {compose_dir}",
|
||||
)
|
||||
|
||||
# Find compose file in order of preference
|
||||
compose_file = self._find_compose_file(validated_dir)
|
||||
if compose_file is None:
|
||||
self.logger.warning("docker_compose_not_found", dir=compose_dir)
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={},
|
||||
error=f"No compose file found in {compose_dir}. "
|
||||
f"Looked for: {', '.join(self.COMPOSE_FILE_NAMES)}",
|
||||
)
|
||||
|
||||
self.logger.info(
|
||||
"docker_reloading",
|
||||
compose_dir=str(validated_dir),
|
||||
compose_file=str(compose_file),
|
||||
pull=pull,
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
logs: dict[str, str] = {}
|
||||
pull_ran = False
|
||||
|
||||
try:
|
||||
# Run pull if requested
|
||||
if pull:
|
||||
pull_ran = True
|
||||
exit_code, stdout, stderr = await self._run_compose_command(
|
||||
compose_file,
|
||||
validated_dir,
|
||||
["pull"],
|
||||
timeout,
|
||||
)
|
||||
logs["pull"] = self._combine_output(stdout, stderr)
|
||||
|
||||
if exit_code != 0:
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
self.logger.warning(
|
||||
"docker_pull_failed",
|
||||
compose_dir=str(validated_dir),
|
||||
exit_code=exit_code,
|
||||
stderr=stderr[:500] if stderr else None,
|
||||
)
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={
|
||||
"compose_dir": str(validated_dir),
|
||||
"compose_file": str(compose_file),
|
||||
"pull_ran": pull_ran,
|
||||
"logs": logs,
|
||||
},
|
||||
error=f"Docker pull failed with exit code {exit_code}",
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
# Run up -d --remove-orphans
|
||||
exit_code, stdout, stderr = await self._run_compose_command(
|
||||
compose_file,
|
||||
validated_dir,
|
||||
["up", "-d", "--remove-orphans"],
|
||||
timeout,
|
||||
)
|
||||
logs["up"] = self._combine_output(stdout, stderr)
|
||||
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
success = exit_code == 0
|
||||
|
||||
if success:
|
||||
self.logger.info(
|
||||
"docker_reloaded",
|
||||
compose_dir=str(validated_dir),
|
||||
exit_code=exit_code,
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
else:
|
||||
self.logger.warning(
|
||||
"docker_reload_failed",
|
||||
compose_dir=str(validated_dir),
|
||||
exit_code=exit_code,
|
||||
stderr=stderr[:500] if stderr else None,
|
||||
)
|
||||
|
||||
return ExecutionResult(
|
||||
success=success,
|
||||
data={
|
||||
"compose_dir": str(validated_dir),
|
||||
"compose_file": str(compose_file),
|
||||
"pull_ran": pull_ran,
|
||||
"logs": logs,
|
||||
},
|
||||
error=f"Docker up failed with exit code {exit_code}" if not success else None,
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
self.logger.error("docker_timeout", compose_dir=str(validated_dir), timeout=timeout)
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={
|
||||
"compose_dir": str(validated_dir),
|
||||
"compose_file": str(compose_file),
|
||||
"pull_ran": pull_ran,
|
||||
"logs": logs,
|
||||
},
|
||||
error=f"Docker operation timed out after {timeout} seconds",
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
self.logger.error("docker_error", compose_dir=str(validated_dir), error=str(e))
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={
|
||||
"compose_dir": str(validated_dir),
|
||||
"compose_file": str(compose_file),
|
||||
"pull_ran": pull_ran,
|
||||
"logs": logs,
|
||||
},
|
||||
error=str(e),
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
def _find_compose_file(self, compose_dir: Path) -> Path | None:
|
||||
"""Find compose file in the directory.
|
||||
|
||||
Searches in order: docker-compose.yml, compose.yml
|
||||
|
||||
Args:
|
||||
compose_dir: Directory to search in
|
||||
|
||||
Returns:
|
||||
Path to compose file, or None if not found
|
||||
"""
|
||||
for filename in self.COMPOSE_FILE_NAMES:
|
||||
compose_file = compose_dir / filename
|
||||
if compose_file.exists():
|
||||
return compose_file
|
||||
return None
|
||||
|
||||
def _combine_output(self, stdout: str, stderr: str) -> str:
|
||||
"""Combine stdout and stderr into a single string.
|
||||
|
||||
Args:
|
||||
stdout: Standard output
|
||||
stderr: Standard error
|
||||
|
||||
Returns:
|
||||
Combined output string
|
||||
"""
|
||||
parts = []
|
||||
if stdout:
|
||||
parts.append(stdout)
|
||||
if stderr:
|
||||
parts.append(stderr)
|
||||
return "\n".join(parts)
|
||||
|
||||
async def _run_compose_command(
|
||||
self,
|
||||
compose_file: Path,
|
||||
compose_dir: Path,
|
||||
args: list[str],
|
||||
timeout: int,
|
||||
) -> tuple[int, str, str]:
|
||||
"""Run a docker compose command.
|
||||
|
||||
Args:
|
||||
compose_file: Path to compose file
|
||||
compose_dir: Working directory
|
||||
args: Additional arguments after 'docker compose -f <file>'
|
||||
timeout: Operation timeout in seconds
|
||||
|
||||
Returns:
|
||||
Tuple of (exit_code, stdout, stderr)
|
||||
"""
|
||||
def _run() -> tuple[int, str, str]:
|
||||
# Build command: docker compose -f <file> <args>
|
||||
cmd = [
|
||||
"docker",
|
||||
"compose",
|
||||
"-f",
|
||||
str(compose_file),
|
||||
] + args
|
||||
|
||||
# Run command from compose directory, no shell=True
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=timeout,
|
||||
cwd=str(compose_dir),
|
||||
)
|
||||
|
||||
return result.returncode, result.stdout, result.stderr
|
||||
|
||||
return await asyncio.wait_for(
|
||||
asyncio.to_thread(_run),
|
||||
timeout=timeout + 30, # Watchdog with buffer
|
||||
)
|
||||
45
letsbe-sysadmin-agent/app/executors/echo_executor.py
Normal file
45
letsbe-sysadmin-agent/app/executors/echo_executor.py
Normal file
@@ -0,0 +1,45 @@
|
||||
"""Echo executor for testing and debugging."""
|
||||
|
||||
from typing import Any
|
||||
|
||||
from app.executors.base import BaseExecutor, ExecutionResult
|
||||
|
||||
|
||||
class EchoExecutor(BaseExecutor):
|
||||
"""Simple echo executor that returns the payload as-is.
|
||||
|
||||
Used for testing connectivity and task flow.
|
||||
|
||||
Payload:
|
||||
{
|
||||
"message": "string to echo back"
|
||||
}
|
||||
|
||||
Result:
|
||||
{
|
||||
"echoed": "string that was sent"
|
||||
}
|
||||
"""
|
||||
|
||||
@property
|
||||
def task_type(self) -> str:
|
||||
return "ECHO"
|
||||
|
||||
async def execute(self, payload: dict[str, Any]) -> ExecutionResult:
|
||||
"""Echo back the payload message.
|
||||
|
||||
Args:
|
||||
payload: Must contain "message" field
|
||||
|
||||
Returns:
|
||||
ExecutionResult with the echoed message
|
||||
"""
|
||||
self.validate_payload(payload, ["message"])
|
||||
|
||||
message = payload["message"]
|
||||
self.logger.info("echo_executing", message=message)
|
||||
|
||||
return ExecutionResult(
|
||||
success=True,
|
||||
data={"echoed": message},
|
||||
)
|
||||
161
letsbe-sysadmin-agent/app/executors/env_inspect_executor.py
Normal file
161
letsbe-sysadmin-agent/app/executors/env_inspect_executor.py
Normal file
@@ -0,0 +1,161 @@
|
||||
"""ENV file inspection executor for reading current values."""
|
||||
|
||||
import time
|
||||
from typing import Any
|
||||
|
||||
from app.config import get_settings
|
||||
from app.executors.base import BaseExecutor, ExecutionResult
|
||||
from app.utils.validation import ValidationError, validate_file_path
|
||||
|
||||
|
||||
class EnvInspectExecutor(BaseExecutor):
|
||||
"""Read ENV files to inspect current values.
|
||||
|
||||
Security measures:
|
||||
- Path validation against allowed env root (/opt/letsbe/env)
|
||||
- Directory traversal prevention
|
||||
- File must exist (no blind path probing)
|
||||
- Read-only operation (no file modification)
|
||||
|
||||
Payload:
|
||||
{
|
||||
"path": "/opt/letsbe/env/chatwoot.env",
|
||||
"keys": ["FRONTEND_URL", "BACKEND_URL"] # optional, null returns all
|
||||
}
|
||||
|
||||
Result (success):
|
||||
{
|
||||
"path": "/opt/letsbe/env/chatwoot.env",
|
||||
"keys": {
|
||||
"FRONTEND_URL": "https://...",
|
||||
"BACKEND_URL": "https://..."
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
@property
|
||||
def task_type(self) -> str:
|
||||
return "ENV_INSPECT"
|
||||
|
||||
async def execute(self, payload: dict[str, Any]) -> ExecutionResult:
|
||||
"""Read ENV file and return current key-value pairs.
|
||||
|
||||
Args:
|
||||
payload: Must contain "path", optionally "keys" to filter
|
||||
|
||||
Returns:
|
||||
ExecutionResult with dict of key-value pairs
|
||||
"""
|
||||
# Path is always required
|
||||
if "path" not in payload:
|
||||
raise ValueError("Missing required field: path")
|
||||
|
||||
settings = get_settings()
|
||||
|
||||
file_path = payload["path"]
|
||||
requested_keys = payload.get("keys")
|
||||
|
||||
# Validate keys is a list if provided
|
||||
if requested_keys is not None and not isinstance(requested_keys, list):
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={},
|
||||
error="'keys' must be a list of key names or null",
|
||||
)
|
||||
|
||||
# Validate path is under allowed env root
|
||||
try:
|
||||
validated_path = validate_file_path(
|
||||
file_path,
|
||||
settings.allowed_env_root,
|
||||
must_exist=True, # File MUST exist for inspect
|
||||
)
|
||||
except ValidationError as e:
|
||||
self.logger.warning("env_path_validation_failed", path=file_path, error=str(e))
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={},
|
||||
error=f"Path validation failed: {e}",
|
||||
)
|
||||
|
||||
self.logger.info(
|
||||
"env_inspecting",
|
||||
path=str(validated_path),
|
||||
filter_keys=requested_keys,
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
|
||||
try:
|
||||
# Read and parse the ENV file
|
||||
content = validated_path.read_text(encoding="utf-8")
|
||||
all_keys = self._parse_env_file(content)
|
||||
|
||||
# Filter keys if requested
|
||||
if requested_keys is None:
|
||||
result_keys = all_keys
|
||||
else:
|
||||
# Return only requested keys that exist (ignore unknown)
|
||||
result_keys = {k: v for k, v in all_keys.items() if k in requested_keys}
|
||||
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
|
||||
self.logger.info(
|
||||
"env_inspected",
|
||||
path=str(validated_path),
|
||||
keys_returned=len(result_keys),
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
return ExecutionResult(
|
||||
success=True,
|
||||
data={
|
||||
"path": str(validated_path),
|
||||
"keys": result_keys,
|
||||
},
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
self.logger.error("env_inspect_error", path=str(validated_path), error=str(e))
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={},
|
||||
error=str(e),
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
def _parse_env_file(self, content: str) -> dict[str, str]:
|
||||
"""Parse ENV file content into key-value dict.
|
||||
|
||||
Handles:
|
||||
- KEY=value format
|
||||
- Lines starting with # (comments)
|
||||
- Empty lines
|
||||
- Whitespace trimming
|
||||
- Quoted values (single and double quotes)
|
||||
|
||||
Args:
|
||||
content: Raw ENV file content
|
||||
|
||||
Returns:
|
||||
Dict of key-value pairs
|
||||
"""
|
||||
env_dict = {}
|
||||
for line in content.splitlines():
|
||||
line = line.strip()
|
||||
# Skip empty lines and comments
|
||||
if not line or line.startswith("#"):
|
||||
continue
|
||||
# Split on first = only
|
||||
if "=" in line:
|
||||
key, value = line.split("=", 1)
|
||||
key = key.strip()
|
||||
value = value.strip()
|
||||
# Remove surrounding quotes if present
|
||||
if (value.startswith('"') and value.endswith('"')) or \
|
||||
(value.startswith("'") and value.endswith("'")):
|
||||
value = value[1:-1]
|
||||
env_dict[key] = value
|
||||
return env_dict
|
||||
285
letsbe-sysadmin-agent/app/executors/env_update_executor.py
Normal file
285
letsbe-sysadmin-agent/app/executors/env_update_executor.py
Normal file
@@ -0,0 +1,285 @@
|
||||
"""ENV file update executor with atomic writes and key validation."""
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
import stat
|
||||
import tempfile
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from app.config import get_settings
|
||||
from app.executors.base import BaseExecutor, ExecutionResult
|
||||
from app.utils.validation import ValidationError, validate_env_key, validate_file_path
|
||||
|
||||
|
||||
class EnvUpdateExecutor(BaseExecutor):
|
||||
"""Update ENV files with key-value merging and removal.
|
||||
|
||||
Security measures:
|
||||
- Path validation against allowed env root (/opt/letsbe/env)
|
||||
- ENV key format validation (^[A-Z][A-Z0-9_]*$)
|
||||
- Atomic writes (temp file + fsync + rename)
|
||||
- Secure permissions (chmod 640)
|
||||
- Directory traversal prevention
|
||||
|
||||
Payload:
|
||||
{
|
||||
"path": "/opt/letsbe/env/chatwoot.env",
|
||||
"updates": {
|
||||
"DATABASE_URL": "postgres://localhost/mydb",
|
||||
"API_KEY": "secret123"
|
||||
},
|
||||
"remove_keys": ["OLD_KEY", "DEPRECATED_VAR"] # optional
|
||||
}
|
||||
|
||||
Result:
|
||||
{
|
||||
"updated_keys": ["DATABASE_URL", "API_KEY"],
|
||||
"removed_keys": ["OLD_KEY"],
|
||||
"path": "/opt/letsbe/env/chatwoot.env"
|
||||
}
|
||||
"""
|
||||
|
||||
# Secure file permissions: owner rw, group r, others none (640)
|
||||
FILE_MODE = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP # 0o640
|
||||
|
||||
@property
|
||||
def task_type(self) -> str:
|
||||
return "ENV_UPDATE"
|
||||
|
||||
async def execute(self, payload: dict[str, Any]) -> ExecutionResult:
|
||||
"""Update ENV file with new key-value pairs and optional removals.
|
||||
|
||||
Args:
|
||||
payload: Must contain "path" and at least one of "updates" or "remove_keys"
|
||||
|
||||
Returns:
|
||||
ExecutionResult with lists of updated and removed keys
|
||||
"""
|
||||
# Path is always required
|
||||
if "path" not in payload:
|
||||
raise ValueError("Missing required field: path")
|
||||
|
||||
settings = get_settings()
|
||||
|
||||
file_path = payload["path"]
|
||||
updates = payload.get("updates", {})
|
||||
remove_keys = payload.get("remove_keys", [])
|
||||
|
||||
# Validate that at least one operation is provided
|
||||
if not updates and not remove_keys:
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={},
|
||||
error="At least one of 'updates' or 'remove_keys' must be provided",
|
||||
)
|
||||
|
||||
# Validate updates is a dict if provided
|
||||
if updates and not isinstance(updates, dict):
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={},
|
||||
error="'updates' must be a dictionary of key-value pairs",
|
||||
)
|
||||
|
||||
# Validate remove_keys is a list if provided
|
||||
if remove_keys and not isinstance(remove_keys, list):
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={},
|
||||
error="'remove_keys' must be a list of key names",
|
||||
)
|
||||
|
||||
# Validate path is under allowed env root
|
||||
try:
|
||||
validated_path = validate_file_path(
|
||||
file_path,
|
||||
settings.allowed_env_root,
|
||||
must_exist=False,
|
||||
)
|
||||
except ValidationError as e:
|
||||
self.logger.warning("env_path_validation_failed", path=file_path, error=str(e))
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={},
|
||||
error=f"Path validation failed: {e}",
|
||||
)
|
||||
|
||||
# Validate all update keys match pattern
|
||||
try:
|
||||
for key in updates.keys():
|
||||
validate_env_key(key)
|
||||
except ValidationError as e:
|
||||
self.logger.warning("env_key_validation_failed", error=str(e))
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={},
|
||||
error=str(e),
|
||||
)
|
||||
|
||||
# Validate all remove_keys match pattern
|
||||
try:
|
||||
for key in remove_keys:
|
||||
if not isinstance(key, str):
|
||||
raise ValidationError(f"remove_keys must contain strings, got: {type(key).__name__}")
|
||||
validate_env_key(key)
|
||||
except ValidationError as e:
|
||||
self.logger.warning("env_remove_key_validation_failed", error=str(e))
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={},
|
||||
error=str(e),
|
||||
)
|
||||
|
||||
self.logger.info(
|
||||
"env_updating",
|
||||
path=str(validated_path),
|
||||
update_keys=list(updates.keys()) if updates else [],
|
||||
remove_keys=remove_keys,
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
|
||||
try:
|
||||
# Read existing ENV file if it exists
|
||||
existing_env = {}
|
||||
if validated_path.exists():
|
||||
content = validated_path.read_text(encoding="utf-8")
|
||||
existing_env = self._parse_env_file(content)
|
||||
|
||||
# Track which keys were actually removed (existed before)
|
||||
actually_removed = [k for k in remove_keys if k in existing_env]
|
||||
|
||||
# Apply updates (new values overwrite existing)
|
||||
merged_env = {**existing_env, **updates}
|
||||
|
||||
# Remove specified keys
|
||||
for key in remove_keys:
|
||||
merged_env.pop(key, None)
|
||||
|
||||
# Serialize and write atomically with secure permissions
|
||||
new_content = self._serialize_env(merged_env)
|
||||
await self._atomic_write_secure(validated_path, new_content.encode("utf-8"))
|
||||
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
|
||||
self.logger.info(
|
||||
"env_updated",
|
||||
path=str(validated_path),
|
||||
updated_keys=list(updates.keys()) if updates else [],
|
||||
removed_keys=actually_removed,
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
return ExecutionResult(
|
||||
success=True,
|
||||
data={
|
||||
"updated_keys": list(updates.keys()) if updates else [],
|
||||
"removed_keys": actually_removed,
|
||||
"path": str(validated_path),
|
||||
},
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
self.logger.error("env_update_error", path=str(validated_path), error=str(e))
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={},
|
||||
error=str(e),
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
def _parse_env_file(self, content: str) -> dict[str, str]:
|
||||
"""Parse ENV file content into key-value dict.
|
||||
|
||||
Handles:
|
||||
- KEY=value format
|
||||
- Lines starting with # (comments)
|
||||
- Empty lines
|
||||
- Whitespace trimming
|
||||
- Quoted values (single and double quotes)
|
||||
|
||||
Args:
|
||||
content: Raw ENV file content
|
||||
|
||||
Returns:
|
||||
Dict of key-value pairs
|
||||
"""
|
||||
env_dict = {}
|
||||
for line in content.splitlines():
|
||||
line = line.strip()
|
||||
# Skip empty lines and comments
|
||||
if not line or line.startswith("#"):
|
||||
continue
|
||||
# Split on first = only
|
||||
if "=" in line:
|
||||
key, value = line.split("=", 1)
|
||||
key = key.strip()
|
||||
value = value.strip()
|
||||
# Remove surrounding quotes if present
|
||||
if (value.startswith('"') and value.endswith('"')) or \
|
||||
(value.startswith("'") and value.endswith("'")):
|
||||
value = value[1:-1]
|
||||
env_dict[key] = value
|
||||
return env_dict
|
||||
|
||||
def _serialize_env(self, env_dict: dict[str, str]) -> str:
|
||||
"""Serialize dict to ENV file format.
|
||||
|
||||
Args:
|
||||
env_dict: Key-value pairs
|
||||
|
||||
Returns:
|
||||
ENV file content string with sorted keys
|
||||
"""
|
||||
lines = []
|
||||
for key, value in sorted(env_dict.items()):
|
||||
# Quote values that contain spaces, newlines, or equals signs
|
||||
if " " in str(value) or "\n" in str(value) or "=" in str(value):
|
||||
value = f'"{value}"'
|
||||
lines.append(f"{key}={value}")
|
||||
return "\n".join(lines) + "\n" if lines else ""
|
||||
|
||||
async def _atomic_write_secure(self, path: Path, content: bytes) -> int:
|
||||
"""Write file atomically with secure permissions.
|
||||
|
||||
Uses temp file + fsync + rename pattern for atomicity.
|
||||
Sets chmod 640 (owner rw, group r, others none) for security.
|
||||
|
||||
Args:
|
||||
path: Target file path
|
||||
content: Content to write
|
||||
|
||||
Returns:
|
||||
Number of bytes written
|
||||
"""
|
||||
def _write() -> int:
|
||||
# Ensure parent directory exists
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Write to temp file in same directory (for atomic rename)
|
||||
fd, temp_path = tempfile.mkstemp(
|
||||
dir=path.parent,
|
||||
prefix=".tmp_",
|
||||
suffix=".env",
|
||||
)
|
||||
temp_path_obj = Path(temp_path)
|
||||
|
||||
try:
|
||||
os.write(fd, content)
|
||||
os.fsync(fd) # Ensure data is on disk
|
||||
finally:
|
||||
os.close(fd)
|
||||
|
||||
# Set secure permissions before rename (640)
|
||||
os.chmod(temp_path, self.FILE_MODE)
|
||||
|
||||
# Atomic rename
|
||||
os.replace(temp_path_obj, path)
|
||||
|
||||
return len(content)
|
||||
|
||||
return await asyncio.to_thread(_write)
|
||||
223
letsbe-sysadmin-agent/app/executors/file_executor.py
Normal file
223
letsbe-sysadmin-agent/app/executors/file_executor.py
Normal file
@@ -0,0 +1,223 @@
|
||||
"""File write executor with security controls."""
|
||||
|
||||
import os
|
||||
import tempfile
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from app.config import get_settings
|
||||
from app.executors.base import BaseExecutor, ExecutionResult
|
||||
from app.utils.validation import ValidationError, sanitize_input, validate_file_path
|
||||
|
||||
|
||||
class FileExecutor(BaseExecutor):
|
||||
"""Write files with strict security controls.
|
||||
|
||||
Security measures:
|
||||
- Path validation against allowed root directories
|
||||
- Directory traversal prevention
|
||||
- Maximum file size enforcement
|
||||
- Atomic writes (temp file + rename)
|
||||
- Content sanitization
|
||||
|
||||
Supported roots:
|
||||
- /opt/agent_data (general file operations)
|
||||
- /opt/letsbe/env (ENV file operations)
|
||||
|
||||
Payload:
|
||||
{
|
||||
"path": "/opt/letsbe/env/app.env",
|
||||
"content": "KEY=value\\nKEY2=value2",
|
||||
"mode": "write" # "write" (default) or "append"
|
||||
}
|
||||
|
||||
Result:
|
||||
{
|
||||
"written": true,
|
||||
"path": "/opt/letsbe/env/app.env",
|
||||
"size": 123
|
||||
}
|
||||
"""
|
||||
|
||||
@property
|
||||
def task_type(self) -> str:
|
||||
return "FILE_WRITE"
|
||||
|
||||
async def execute(self, payload: dict[str, Any]) -> ExecutionResult:
|
||||
"""Write content to a file.
|
||||
|
||||
Args:
|
||||
payload: Must contain "path" and "content", optionally "mode"
|
||||
|
||||
Returns:
|
||||
ExecutionResult with write confirmation
|
||||
"""
|
||||
self.validate_payload(payload, ["path", "content"])
|
||||
settings = get_settings()
|
||||
|
||||
file_path = payload["path"]
|
||||
content = payload["content"]
|
||||
mode = payload.get("mode", "write")
|
||||
|
||||
if mode not in ("write", "append"):
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={},
|
||||
error=f"Invalid mode: {mode}. Must be 'write' or 'append'",
|
||||
)
|
||||
|
||||
# Validate path against allowed roots (env or general)
|
||||
# Try env root first if path starts with it, otherwise use general root
|
||||
try:
|
||||
allowed_root = self._determine_allowed_root(file_path, settings)
|
||||
validated_path = validate_file_path(
|
||||
file_path,
|
||||
allowed_root,
|
||||
must_exist=False,
|
||||
)
|
||||
sanitized_content = sanitize_input(content, max_length=settings.max_file_size)
|
||||
except ValidationError as e:
|
||||
self.logger.warning("file_validation_failed", path=file_path, error=str(e))
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={},
|
||||
error=f"Validation failed: {e}",
|
||||
)
|
||||
|
||||
# Check content size
|
||||
content_bytes = sanitized_content.encode("utf-8")
|
||||
if len(content_bytes) > settings.max_file_size:
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={},
|
||||
error=f"Content size {len(content_bytes)} exceeds max {settings.max_file_size}",
|
||||
)
|
||||
|
||||
self.logger.info(
|
||||
"file_writing",
|
||||
path=str(validated_path),
|
||||
mode=mode,
|
||||
size=len(content_bytes),
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
|
||||
try:
|
||||
if mode == "write":
|
||||
bytes_written = await self._atomic_write(validated_path, content_bytes)
|
||||
else:
|
||||
bytes_written = await self._append(validated_path, content_bytes)
|
||||
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
|
||||
self.logger.info(
|
||||
"file_written",
|
||||
path=str(validated_path),
|
||||
bytes_written=bytes_written,
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
return ExecutionResult(
|
||||
success=True,
|
||||
data={
|
||||
"written": True,
|
||||
"path": str(validated_path),
|
||||
"size": bytes_written,
|
||||
},
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
self.logger.error("file_write_error", path=str(validated_path), error=str(e))
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={},
|
||||
error=str(e),
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
def _determine_allowed_root(self, file_path: str, settings) -> str:
|
||||
"""Determine which allowed root to use based on file path.
|
||||
|
||||
Args:
|
||||
file_path: The requested file path
|
||||
settings: Application settings
|
||||
|
||||
Returns:
|
||||
The appropriate allowed root directory
|
||||
"""
|
||||
from pathlib import Path as P
|
||||
|
||||
# Normalize the path for comparison
|
||||
normalized = str(P(file_path).expanduser())
|
||||
|
||||
# Check if path is under env root
|
||||
env_root = str(P(settings.allowed_env_root).expanduser())
|
||||
if normalized.startswith(env_root):
|
||||
return settings.allowed_env_root
|
||||
|
||||
# Default to general file root
|
||||
return settings.allowed_file_root
|
||||
|
||||
async def _atomic_write(self, path: Path, content: bytes) -> int:
|
||||
"""Write file atomically using temp file + rename.
|
||||
|
||||
Args:
|
||||
path: Target file path
|
||||
content: Content to write
|
||||
|
||||
Returns:
|
||||
Number of bytes written
|
||||
"""
|
||||
import asyncio
|
||||
|
||||
def _write() -> int:
|
||||
# Ensure parent directory exists
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Write to temp file in same directory (for atomic rename)
|
||||
fd, temp_path = tempfile.mkstemp(
|
||||
dir=path.parent,
|
||||
prefix=".tmp_",
|
||||
suffix=path.suffix,
|
||||
)
|
||||
|
||||
try:
|
||||
os.write(fd, content)
|
||||
os.fsync(fd) # Ensure data is on disk
|
||||
finally:
|
||||
os.close(fd)
|
||||
|
||||
# Atomic rename
|
||||
os.rename(temp_path, path)
|
||||
|
||||
return len(content)
|
||||
|
||||
return await asyncio.to_thread(_write)
|
||||
|
||||
async def _append(self, path: Path, content: bytes) -> int:
|
||||
"""Append content to file.
|
||||
|
||||
Args:
|
||||
path: Target file path
|
||||
content: Content to append
|
||||
|
||||
Returns:
|
||||
Number of bytes written
|
||||
"""
|
||||
import asyncio
|
||||
|
||||
def _append() -> int:
|
||||
# Ensure parent directory exists
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
with open(path, "ab") as f:
|
||||
written = f.write(content)
|
||||
f.flush()
|
||||
os.fsync(f.fileno())
|
||||
|
||||
return written
|
||||
|
||||
return await asyncio.to_thread(_append)
|
||||
153
letsbe-sysadmin-agent/app/executors/file_inspect_executor.py
Normal file
153
letsbe-sysadmin-agent/app/executors/file_inspect_executor.py
Normal file
@@ -0,0 +1,153 @@
|
||||
"""File inspection executor for reading portions of text files."""
|
||||
|
||||
import time
|
||||
from typing import Any
|
||||
|
||||
from app.config import get_settings
|
||||
from app.executors.base import BaseExecutor, ExecutionResult
|
||||
from app.utils.validation import ValidationError, validate_file_path
|
||||
|
||||
|
||||
class FileInspectExecutor(BaseExecutor):
|
||||
"""Read portions of files for inspection.
|
||||
|
||||
Security measures:
|
||||
- Path validation against allowed file root (/opt/letsbe)
|
||||
- Directory traversal prevention
|
||||
- File must exist (no blind path probing)
|
||||
- Read-only operation (no file modification)
|
||||
- Byte limit enforced (max 1MB)
|
||||
|
||||
Payload:
|
||||
{
|
||||
"path": "/opt/letsbe/env/chatwoot.env",
|
||||
"max_bytes": 4096 # optional, default 4096, max 1MB
|
||||
}
|
||||
|
||||
Result (success):
|
||||
{
|
||||
"path": "/opt/letsbe/env/chatwoot.env",
|
||||
"bytes_read": 123,
|
||||
"truncated": false,
|
||||
"content": "..."
|
||||
}
|
||||
"""
|
||||
|
||||
# Default and maximum byte limits
|
||||
DEFAULT_MAX_BYTES = 4096
|
||||
ABSOLUTE_MAX_BYTES = 1_048_576 # 1 MB
|
||||
|
||||
@property
|
||||
def task_type(self) -> str:
|
||||
return "FILE_INSPECT"
|
||||
|
||||
async def execute(self, payload: dict[str, Any]) -> ExecutionResult:
|
||||
"""Read file content up to max_bytes.
|
||||
|
||||
Args:
|
||||
payload: Must contain "path", optionally "max_bytes"
|
||||
|
||||
Returns:
|
||||
ExecutionResult with file content and metadata
|
||||
"""
|
||||
# Path is always required
|
||||
if "path" not in payload:
|
||||
raise ValueError("Missing required field: path")
|
||||
|
||||
settings = get_settings()
|
||||
|
||||
raw_path = payload["path"]
|
||||
max_bytes = payload.get("max_bytes", self.DEFAULT_MAX_BYTES)
|
||||
|
||||
# Validate max_bytes is a valid integer
|
||||
try:
|
||||
max_bytes_int = int(max_bytes)
|
||||
except (TypeError, ValueError):
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={},
|
||||
error=f"Invalid max_bytes value: {max_bytes!r}",
|
||||
)
|
||||
|
||||
# Validate max_bytes is within allowed range
|
||||
if max_bytes_int <= 0 or max_bytes_int > self.ABSOLUTE_MAX_BYTES:
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={},
|
||||
error=f"max_bytes must be between 1 and {self.ABSOLUTE_MAX_BYTES}",
|
||||
)
|
||||
|
||||
# Validate path is under allowed file root
|
||||
try:
|
||||
validated_path = validate_file_path(
|
||||
raw_path,
|
||||
settings.allowed_file_root,
|
||||
must_exist=True,
|
||||
)
|
||||
except ValidationError as e:
|
||||
self.logger.warning("file_path_validation_failed", path=raw_path, error=str(e))
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={},
|
||||
error=f"Path validation failed: {e}",
|
||||
)
|
||||
|
||||
self.logger.info(
|
||||
"file_inspecting",
|
||||
path=str(validated_path),
|
||||
max_bytes=max_bytes_int,
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
|
||||
try:
|
||||
# Read up to max_bytes + 1 to detect truncation
|
||||
with validated_path.open("rb") as f:
|
||||
content_bytes = f.read(max_bytes_int + 1)
|
||||
|
||||
truncated = len(content_bytes) > max_bytes_int
|
||||
if truncated:
|
||||
content_bytes = content_bytes[:max_bytes_int]
|
||||
|
||||
# Decode as UTF-8 with errors replaced
|
||||
content_text = content_bytes.decode("utf-8", errors="replace")
|
||||
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
|
||||
self.logger.info(
|
||||
"file_inspected",
|
||||
path=str(validated_path),
|
||||
bytes_read=len(content_bytes),
|
||||
truncated=truncated,
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
return ExecutionResult(
|
||||
success=True,
|
||||
data={
|
||||
"path": str(validated_path),
|
||||
"bytes_read": len(content_bytes),
|
||||
"truncated": truncated,
|
||||
"content": content_text,
|
||||
},
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
except OSError as e:
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
self.logger.error("file_inspect_read_error", path=str(validated_path), error=str(e))
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={},
|
||||
error=f"Failed to read file: {e}",
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
except Exception as e:
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
self.logger.error("file_inspect_error", path=str(validated_path), error=str(e))
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={},
|
||||
error=str(e),
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
358
letsbe-sysadmin-agent/app/executors/nextcloud_executor.py
Normal file
358
letsbe-sysadmin-agent/app/executors/nextcloud_executor.py
Normal file
@@ -0,0 +1,358 @@
|
||||
"""Nextcloud domain configuration executor."""
|
||||
|
||||
import asyncio
|
||||
import subprocess
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from app.executors.base import BaseExecutor, ExecutionResult
|
||||
|
||||
|
||||
class NextcloudSetDomainExecutor(BaseExecutor):
|
||||
"""Execute Nextcloud domain configuration via occ commands.
|
||||
|
||||
This executor configures Nextcloud's external domain settings by running
|
||||
occ config:system:set commands via docker compose exec. It keeps the
|
||||
Orchestrator unaware of container names, occ paths, and docker-compose syntax.
|
||||
|
||||
Security measures:
|
||||
- URL parsing with validation
|
||||
- No shell=True, command list only
|
||||
- Timeout enforcement on each subprocess
|
||||
|
||||
Payload:
|
||||
{
|
||||
"public_url": "https://cloud.example.com"
|
||||
}
|
||||
|
||||
Result (success):
|
||||
{
|
||||
"public_url": "https://cloud.example.com",
|
||||
"host": "cloud.example.com",
|
||||
"scheme": "https",
|
||||
"commands_executed": 3,
|
||||
"logs": {
|
||||
"overwritehost": "<stdout+stderr>",
|
||||
"overwriteprotocol": "<stdout+stderr>",
|
||||
"overwrite.cli.url": "<stdout+stderr>"
|
||||
}
|
||||
}
|
||||
|
||||
Result (failure):
|
||||
{
|
||||
"public_url": "https://cloud.example.com",
|
||||
"host": "cloud.example.com",
|
||||
"scheme": "https",
|
||||
"commands_executed": 2,
|
||||
"failed_command": "overwriteprotocol",
|
||||
"failed_args": ["config:system:set", "overwriteprotocol", "--value=https"],
|
||||
"logs": {...}
|
||||
}
|
||||
"""
|
||||
|
||||
# TODO: These constants may need adjustment based on actual Nextcloud stack setup
|
||||
NEXTCLOUD_STACK_DIR = "/opt/letsbe/stacks/nextcloud"
|
||||
NEXTCLOUD_SERVICE_NAME = "app"
|
||||
NEXTCLOUD_OCC_PATH = "/var/www/html/occ"
|
||||
NEXTCLOUD_USER = "www-data"
|
||||
|
||||
# Compose file search order (matches DockerExecutor)
|
||||
COMPOSE_FILE_NAMES = ["docker-compose.yml", "compose.yml"]
|
||||
|
||||
# Default timeout for each occ command (seconds)
|
||||
DEFAULT_COMMAND_TIMEOUT = 60
|
||||
|
||||
@property
|
||||
def task_type(self) -> str:
|
||||
return "NEXTCLOUD_SET_DOMAIN"
|
||||
|
||||
async def execute(self, payload: dict[str, Any]) -> ExecutionResult:
|
||||
"""Execute Nextcloud domain configuration commands.
|
||||
|
||||
Runs three occ config:system:set commands to configure:
|
||||
- overwritehost: The domain/host portion of the URL
|
||||
- overwriteprotocol: The scheme (http/https)
|
||||
- overwrite.cli.url: The full public URL
|
||||
|
||||
Args:
|
||||
payload: Must contain "public_url", optionally "timeout"
|
||||
|
||||
Returns:
|
||||
ExecutionResult with configuration confirmation and logs
|
||||
"""
|
||||
self.validate_payload(payload, ["public_url"])
|
||||
|
||||
public_url = payload["public_url"]
|
||||
timeout = payload.get("timeout", self.DEFAULT_COMMAND_TIMEOUT)
|
||||
|
||||
# Parse URL into components
|
||||
try:
|
||||
scheme, host, normalized_url = self._parse_public_url(public_url)
|
||||
except ValueError as e:
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={"public_url": public_url},
|
||||
error=str(e),
|
||||
)
|
||||
|
||||
# Find compose file in the Nextcloud stack directory
|
||||
stack_dir = Path(self.NEXTCLOUD_STACK_DIR)
|
||||
compose_file = self._find_compose_file(stack_dir)
|
||||
|
||||
if compose_file is None:
|
||||
self.logger.warning("nextcloud_compose_not_found", dir=self.NEXTCLOUD_STACK_DIR)
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={"public_url": public_url, "host": host, "scheme": scheme},
|
||||
error=f"Nextcloud compose file not found in {self.NEXTCLOUD_STACK_DIR}. "
|
||||
f"Looked for: {', '.join(self.COMPOSE_FILE_NAMES)}",
|
||||
)
|
||||
|
||||
self.logger.info(
|
||||
"nextcloud_setting_domain",
|
||||
public_url=normalized_url,
|
||||
host=host,
|
||||
scheme=scheme,
|
||||
compose_file=str(compose_file),
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
logs: dict[str, str] = {}
|
||||
commands_executed = 0
|
||||
|
||||
# Define the three occ commands to run
|
||||
occ_commands = [
|
||||
("overwritehost", ["config:system:set", "overwritehost", f"--value={host}"]),
|
||||
("overwriteprotocol", ["config:system:set", "overwriteprotocol", f"--value={scheme}"]),
|
||||
("overwrite.cli.url", ["config:system:set", "overwrite.cli.url", f"--value={normalized_url}"]),
|
||||
]
|
||||
|
||||
try:
|
||||
for cmd_name, occ_args in occ_commands:
|
||||
exit_code, stdout, stderr = await self._run_occ_command(
|
||||
compose_file,
|
||||
occ_args,
|
||||
timeout,
|
||||
)
|
||||
logs[cmd_name] = self._combine_output(stdout, stderr)
|
||||
commands_executed += 1
|
||||
|
||||
if exit_code != 0:
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
self.logger.warning(
|
||||
"nextcloud_occ_command_failed",
|
||||
command=cmd_name,
|
||||
occ_args=occ_args,
|
||||
exit_code=exit_code,
|
||||
stderr=stderr[:500] if stderr else None,
|
||||
)
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={
|
||||
"public_url": normalized_url,
|
||||
"host": host,
|
||||
"scheme": scheme,
|
||||
"commands_executed": commands_executed,
|
||||
"failed_command": cmd_name,
|
||||
"failed_args": occ_args,
|
||||
"logs": logs,
|
||||
},
|
||||
error=f"occ {cmd_name} failed with exit code {exit_code}",
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
self.logger.info(
|
||||
"nextcloud_domain_set",
|
||||
public_url=normalized_url,
|
||||
host=host,
|
||||
scheme=scheme,
|
||||
commands_executed=commands_executed,
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
return ExecutionResult(
|
||||
success=True,
|
||||
data={
|
||||
"public_url": normalized_url,
|
||||
"host": host,
|
||||
"scheme": scheme,
|
||||
"commands_executed": commands_executed,
|
||||
"logs": logs,
|
||||
},
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
self.logger.error(
|
||||
"nextcloud_timeout",
|
||||
public_url=normalized_url,
|
||||
timeout=timeout,
|
||||
commands_executed=commands_executed,
|
||||
)
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={
|
||||
"public_url": normalized_url,
|
||||
"host": host,
|
||||
"scheme": scheme,
|
||||
"commands_executed": commands_executed,
|
||||
"logs": logs,
|
||||
},
|
||||
error=f"Nextcloud occ operation timed out after {timeout} seconds",
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
self.logger.error(
|
||||
"nextcloud_error",
|
||||
public_url=normalized_url,
|
||||
error=str(e),
|
||||
commands_executed=commands_executed,
|
||||
)
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={
|
||||
"public_url": normalized_url,
|
||||
"host": host,
|
||||
"scheme": scheme,
|
||||
"commands_executed": commands_executed,
|
||||
"logs": logs,
|
||||
},
|
||||
error=str(e),
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
def _parse_public_url(self, public_url: str) -> tuple[str, str, str]:
|
||||
"""Parse public URL into scheme, host, and normalized URL.
|
||||
|
||||
Args:
|
||||
public_url: Full URL like "https://cloud.example.com" or just "cloud.example.com"
|
||||
|
||||
Returns:
|
||||
Tuple of (scheme, host, normalized_url)
|
||||
- scheme: "http" or "https" (defaults to "https" if not provided)
|
||||
- host: Domain with optional port (e.g., "cloud.example.com:8443")
|
||||
- normalized_url: Full URL with trailing slash stripped
|
||||
|
||||
Raises:
|
||||
ValueError: If URL is invalid or missing host
|
||||
"""
|
||||
if not public_url or not public_url.strip():
|
||||
raise ValueError("public_url cannot be empty")
|
||||
|
||||
url = public_url.strip()
|
||||
|
||||
# Parse the URL
|
||||
parsed = urlparse(url)
|
||||
|
||||
# Extract scheme, default to "https" if not provided
|
||||
scheme = parsed.scheme if parsed.scheme else "https"
|
||||
|
||||
# Extract host (netloc includes port if present)
|
||||
host = parsed.netloc
|
||||
|
||||
# Handle URLs without scheme (e.g., "cloud.example.com" or "cloud.example.com/path")
|
||||
# urlparse treats "cloud.example.com" as a path, not netloc
|
||||
if not host and not parsed.scheme:
|
||||
# The URL was provided without a scheme, so we need to re-parse with scheme
|
||||
url_with_scheme = f"https://{url}"
|
||||
parsed = urlparse(url_with_scheme)
|
||||
host = parsed.netloc
|
||||
scheme = "https"
|
||||
|
||||
if not host:
|
||||
raise ValueError(f"Invalid URL - no host found: {public_url}")
|
||||
|
||||
# Reconstruct normalized URL (with trailing slash stripped)
|
||||
normalized_url = f"{scheme}://{host}"
|
||||
if parsed.path and parsed.path != "/":
|
||||
normalized_url += parsed.path.rstrip("/")
|
||||
|
||||
return scheme, host, normalized_url
|
||||
|
||||
def _find_compose_file(self, compose_dir: Path) -> Path | None:
|
||||
"""Find compose file in the directory.
|
||||
|
||||
Searches in order: docker-compose.yml, compose.yml
|
||||
|
||||
Args:
|
||||
compose_dir: Directory to search in
|
||||
|
||||
Returns:
|
||||
Path to compose file, or None if not found
|
||||
"""
|
||||
for filename in self.COMPOSE_FILE_NAMES:
|
||||
compose_file = compose_dir / filename
|
||||
if compose_file.exists():
|
||||
return compose_file
|
||||
return None
|
||||
|
||||
def _combine_output(self, stdout: str, stderr: str) -> str:
|
||||
"""Combine stdout and stderr into a single string.
|
||||
|
||||
Args:
|
||||
stdout: Standard output
|
||||
stderr: Standard error
|
||||
|
||||
Returns:
|
||||
Combined output string
|
||||
"""
|
||||
parts = []
|
||||
if stdout:
|
||||
parts.append(stdout)
|
||||
if stderr:
|
||||
parts.append(stderr)
|
||||
return "\n".join(parts)
|
||||
|
||||
async def _run_occ_command(
|
||||
self,
|
||||
compose_file: Path,
|
||||
occ_args: list[str],
|
||||
timeout: int,
|
||||
) -> tuple[int, str, str]:
|
||||
"""Run a Nextcloud occ command via docker compose exec.
|
||||
|
||||
Args:
|
||||
compose_file: Path to the docker-compose file
|
||||
occ_args: Arguments to pass to occ (e.g., ["config:system:set", "overwritehost", "--value=..."])
|
||||
timeout: Operation timeout in seconds
|
||||
|
||||
Returns:
|
||||
Tuple of (exit_code, stdout, stderr)
|
||||
"""
|
||||
def _run() -> tuple[int, str, str]:
|
||||
# Build command: docker compose -f <file> exec -T --user <user> <service> php <occ_path> <args>
|
||||
cmd = [
|
||||
"docker",
|
||||
"compose",
|
||||
"-f",
|
||||
str(compose_file),
|
||||
"exec",
|
||||
"-T", # Disable pseudo-TTY allocation
|
||||
"--user",
|
||||
self.NEXTCLOUD_USER,
|
||||
self.NEXTCLOUD_SERVICE_NAME,
|
||||
"php",
|
||||
self.NEXTCLOUD_OCC_PATH,
|
||||
] + occ_args
|
||||
|
||||
# Run command from stack directory, no shell=True
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=timeout,
|
||||
cwd=self.NEXTCLOUD_STACK_DIR,
|
||||
)
|
||||
|
||||
return result.returncode, result.stdout, result.stderr
|
||||
|
||||
return await asyncio.wait_for(
|
||||
asyncio.to_thread(_run),
|
||||
timeout=timeout + 30, # Watchdog with buffer
|
||||
)
|
||||
329
letsbe-sysadmin-agent/app/executors/playwright_executor.py
Normal file
329
letsbe-sysadmin-agent/app/executors/playwright_executor.py
Normal file
@@ -0,0 +1,329 @@
|
||||
"""Playwright browser automation executor.
|
||||
|
||||
Executes deterministic, scenario-based browser automation tasks.
|
||||
Each scenario is a reusable workflow registered in the scenario registry.
|
||||
"""
|
||||
|
||||
import time
|
||||
import uuid
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from playwright.async_api import async_playwright, Route, Request
|
||||
|
||||
from app.config import get_settings
|
||||
from app.executors.base import BaseExecutor, ExecutionResult
|
||||
from app.playwright_scenarios import get_scenario, get_scenario_names, ScenarioOptions
|
||||
from app.utils.validation import is_domain_allowed, validate_allowed_domains, ValidationError
|
||||
|
||||
|
||||
class PlaywrightExecutor(BaseExecutor):
|
||||
"""Browser automation executor using Playwright scenarios.
|
||||
|
||||
Executes pre-defined browser automation scenarios with strict security controls.
|
||||
Each execution creates an isolated browser context with domain restrictions.
|
||||
|
||||
Payload:
|
||||
{
|
||||
"scenario": "nextcloud_initial_setup", # Required: registered scenario name
|
||||
"inputs": { # Required: scenario-specific inputs
|
||||
"base_url": "https://cloud.example.com",
|
||||
"admin_username": "admin",
|
||||
"admin_password": "secret123"
|
||||
},
|
||||
"options": { # Optional configuration
|
||||
"timeout_ms": 60000, # Action timeout (default: 60000)
|
||||
"screenshot_on_failure": true, # Screenshot on fail (default: true)
|
||||
"screenshot_on_success": false, # Screenshot on success (default: false)
|
||||
"save_trace": false, # Save trace file (default: false)
|
||||
"allowed_domains": ["cloud.example.com"] # REQUIRED: domain allowlist
|
||||
}
|
||||
}
|
||||
|
||||
Security:
|
||||
- allowed_domains is REQUIRED - blocks all requests to non-listed domains
|
||||
- Browser runs in headless mode only (not configurable)
|
||||
- Each execution gets an isolated browser context
|
||||
- Artifacts are stored in per-task directories
|
||||
"""
|
||||
|
||||
@property
|
||||
def task_type(self) -> str:
|
||||
return "PLAYWRIGHT"
|
||||
|
||||
async def execute(self, payload: dict[str, Any]) -> ExecutionResult:
|
||||
"""Execute a Playwright scenario.
|
||||
|
||||
Args:
|
||||
payload: Task payload with scenario, inputs, and options
|
||||
|
||||
Returns:
|
||||
ExecutionResult with scenario output and artifact paths
|
||||
"""
|
||||
start_time = time.time()
|
||||
settings = get_settings()
|
||||
|
||||
try:
|
||||
# Validate required fields
|
||||
self.validate_payload(payload, ["scenario", "inputs"])
|
||||
|
||||
scenario_name = payload["scenario"]
|
||||
inputs = payload["inputs"]
|
||||
options_dict = payload.get("options", {})
|
||||
|
||||
# Validate allowed_domains is present
|
||||
allowed_domains = options_dict.get("allowed_domains")
|
||||
if not allowed_domains:
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={"scenario": scenario_name},
|
||||
error="Security error: 'allowed_domains' is required in options",
|
||||
duration_ms=(time.time() - start_time) * 1000,
|
||||
)
|
||||
|
||||
# Validate domain patterns
|
||||
try:
|
||||
allowed_domains = validate_allowed_domains(allowed_domains)
|
||||
except ValidationError as e:
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={"scenario": scenario_name},
|
||||
error=f"Invalid allowed_domains: {e}",
|
||||
duration_ms=(time.time() - start_time) * 1000,
|
||||
)
|
||||
|
||||
# Get scenario from registry
|
||||
scenario = get_scenario(scenario_name)
|
||||
if scenario is None:
|
||||
available = get_scenario_names()
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={
|
||||
"scenario": scenario_name,
|
||||
"available_scenarios": available,
|
||||
},
|
||||
error=f"Unknown scenario: '{scenario_name}'. Available: {available}",
|
||||
duration_ms=(time.time() - start_time) * 1000,
|
||||
)
|
||||
|
||||
# Validate scenario inputs
|
||||
missing_inputs = scenario.validate_inputs(inputs)
|
||||
if missing_inputs:
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={
|
||||
"scenario": scenario_name,
|
||||
"missing_inputs": missing_inputs,
|
||||
"required_inputs": scenario.required_inputs,
|
||||
},
|
||||
error=f"Missing required inputs: {missing_inputs}",
|
||||
duration_ms=(time.time() - start_time) * 1000,
|
||||
)
|
||||
|
||||
# Create artifacts directory for this execution
|
||||
task_id = str(uuid.uuid4())[:8]
|
||||
artifacts_dir = Path(settings.playwright_artifacts_dir) / f"task-{task_id}"
|
||||
artifacts_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Build scenario options
|
||||
scenario_options = ScenarioOptions(
|
||||
timeout_ms=options_dict.get("timeout_ms", settings.playwright_default_timeout_ms),
|
||||
screenshot_on_failure=options_dict.get("screenshot_on_failure", True),
|
||||
screenshot_on_success=options_dict.get("screenshot_on_success", False),
|
||||
save_trace=options_dict.get("save_trace", False),
|
||||
allowed_domains=allowed_domains,
|
||||
artifacts_dir=artifacts_dir,
|
||||
)
|
||||
|
||||
self.logger.info(
|
||||
"playwright_scenario_starting",
|
||||
scenario=scenario_name,
|
||||
task_id=task_id,
|
||||
allowed_domains=allowed_domains,
|
||||
)
|
||||
|
||||
# Execute scenario with browser
|
||||
result = await self._run_scenario(
|
||||
scenario=scenario,
|
||||
inputs=inputs,
|
||||
options=scenario_options,
|
||||
task_id=task_id,
|
||||
)
|
||||
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
|
||||
self.logger.info(
|
||||
"playwright_scenario_completed",
|
||||
scenario=scenario_name,
|
||||
success=result.success,
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
return ExecutionResult(
|
||||
success=result.success,
|
||||
data={
|
||||
"scenario": scenario_name,
|
||||
"result": result.data,
|
||||
"screenshots": result.screenshots,
|
||||
"artifacts_dir": str(artifacts_dir),
|
||||
"trace_path": result.trace_path,
|
||||
},
|
||||
error=result.error,
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
except ValueError as e:
|
||||
# Validation errors
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={},
|
||||
error=str(e),
|
||||
duration_ms=(time.time() - start_time) * 1000,
|
||||
)
|
||||
except Exception as e:
|
||||
self.logger.error(
|
||||
"playwright_executor_error",
|
||||
error=str(e),
|
||||
error_type=type(e).__name__,
|
||||
)
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={},
|
||||
error=f"Playwright executor error: {e}",
|
||||
duration_ms=(time.time() - start_time) * 1000,
|
||||
)
|
||||
|
||||
async def _run_scenario(
|
||||
self,
|
||||
scenario,
|
||||
inputs: dict[str, Any],
|
||||
options: ScenarioOptions,
|
||||
task_id: str,
|
||||
):
|
||||
"""Run a scenario with browser and domain restrictions.
|
||||
|
||||
Args:
|
||||
scenario: The scenario instance to execute
|
||||
inputs: Scenario inputs
|
||||
options: Scenario options
|
||||
task_id: Task identifier for logging
|
||||
|
||||
Returns:
|
||||
ScenarioResult from the scenario execution
|
||||
"""
|
||||
from app.playwright_scenarios import ScenarioResult
|
||||
|
||||
settings = get_settings()
|
||||
blocked_requests: list[str] = []
|
||||
|
||||
async def route_handler(route: Route, request: Request) -> None:
|
||||
"""Block requests to non-allowed domains."""
|
||||
url = request.url
|
||||
|
||||
if is_domain_allowed(url, options.allowed_domains):
|
||||
await route.continue_()
|
||||
else:
|
||||
blocked_requests.append(url)
|
||||
self.logger.warning(
|
||||
"playwright_blocked_request",
|
||||
url=url,
|
||||
task_id=task_id,
|
||||
)
|
||||
await route.abort("blockedbyclient")
|
||||
|
||||
async with async_playwright() as p:
|
||||
# Launch browser in headless mode (always)
|
||||
browser = await p.chromium.launch(
|
||||
headless=True,
|
||||
args=[
|
||||
"--no-sandbox",
|
||||
"--disable-setuid-sandbox",
|
||||
"--disable-dev-shm-usage",
|
||||
"--disable-gpu",
|
||||
],
|
||||
)
|
||||
|
||||
try:
|
||||
# Create isolated context
|
||||
context = await browser.new_context(
|
||||
viewport={"width": 1280, "height": 720},
|
||||
user_agent="LetsBe-SysAdmin-Agent/1.0 Playwright",
|
||||
)
|
||||
|
||||
# Set default timeouts
|
||||
context.set_default_timeout(options.timeout_ms)
|
||||
context.set_default_navigation_timeout(
|
||||
settings.playwright_navigation_timeout_ms
|
||||
)
|
||||
|
||||
# Start tracing if enabled
|
||||
if options.save_trace and options.artifacts_dir:
|
||||
await context.tracing.start(
|
||||
screenshots=True,
|
||||
snapshots=True,
|
||||
)
|
||||
|
||||
# Apply domain restrictions via route interception
|
||||
await context.route("**/*", route_handler)
|
||||
|
||||
# Create page
|
||||
page = await context.new_page()
|
||||
|
||||
try:
|
||||
# Run scenario setup hook
|
||||
await scenario.setup(page, options)
|
||||
|
||||
# Execute the scenario
|
||||
result = await scenario.execute(page, inputs, options)
|
||||
|
||||
# Take success screenshot if enabled
|
||||
if options.screenshot_on_success and options.artifacts_dir:
|
||||
screenshot_path = options.artifacts_dir / "success.png"
|
||||
await page.screenshot(path=str(screenshot_path))
|
||||
result.screenshots.append(str(screenshot_path))
|
||||
|
||||
except Exception as e:
|
||||
# Capture failure screenshot
|
||||
screenshots = []
|
||||
if options.screenshot_on_failure and options.artifacts_dir:
|
||||
try:
|
||||
screenshot_path = options.artifacts_dir / "failure.png"
|
||||
await page.screenshot(path=str(screenshot_path))
|
||||
screenshots.append(str(screenshot_path))
|
||||
except Exception as screenshot_error:
|
||||
self.logger.warning(
|
||||
"playwright_screenshot_failed",
|
||||
error=str(screenshot_error),
|
||||
)
|
||||
|
||||
result = ScenarioResult(
|
||||
success=False,
|
||||
data={"blocked_requests": blocked_requests},
|
||||
screenshots=screenshots,
|
||||
error=str(e),
|
||||
)
|
||||
|
||||
finally:
|
||||
# Run scenario teardown hook
|
||||
try:
|
||||
await scenario.teardown(page, options)
|
||||
except Exception as teardown_error:
|
||||
self.logger.warning(
|
||||
"playwright_teardown_error",
|
||||
error=str(teardown_error),
|
||||
)
|
||||
|
||||
# Stop tracing and save
|
||||
if options.save_trace and options.artifacts_dir:
|
||||
trace_path = options.artifacts_dir / "trace.zip"
|
||||
await context.tracing.stop(path=str(trace_path))
|
||||
result.trace_path = str(trace_path)
|
||||
|
||||
# Add blocked requests info
|
||||
if blocked_requests:
|
||||
result.data["blocked_requests"] = blocked_requests
|
||||
|
||||
return result
|
||||
|
||||
finally:
|
||||
await browser.close()
|
||||
163
letsbe-sysadmin-agent/app/executors/shell_executor.py
Normal file
163
letsbe-sysadmin-agent/app/executors/shell_executor.py
Normal file
@@ -0,0 +1,163 @@
|
||||
"""Shell command executor with strict security controls."""
|
||||
|
||||
import asyncio
|
||||
import time
|
||||
from typing import Any, Optional
|
||||
|
||||
from app.config import get_settings
|
||||
from app.executors.base import BaseExecutor, ExecutionResult
|
||||
from app.utils.validation import ValidationError, validate_shell_command
|
||||
|
||||
|
||||
class ShellExecutor(BaseExecutor):
|
||||
"""Execute shell commands with strict security controls.
|
||||
|
||||
Security measures:
|
||||
- Absolute path allowlist for commands
|
||||
- Per-command argument validation via regex
|
||||
- Forbidden shell metacharacter blocking
|
||||
- No shell=True (prevents shell injection)
|
||||
- Timeout enforcement with watchdog
|
||||
- Runs via asyncio.to_thread to avoid blocking
|
||||
|
||||
Payload:
|
||||
{
|
||||
"cmd": "/usr/bin/ls", # Must be absolute path
|
||||
"args": "-la /opt/data", # Optional arguments
|
||||
"timeout": 60 # Optional timeout override
|
||||
}
|
||||
|
||||
Result:
|
||||
{
|
||||
"exit_code": 0,
|
||||
"stdout": "...",
|
||||
"stderr": "...",
|
||||
"duration_ms": 123.45
|
||||
}
|
||||
"""
|
||||
|
||||
@property
|
||||
def task_type(self) -> str:
|
||||
return "SHELL"
|
||||
|
||||
async def execute(self, payload: dict[str, Any]) -> ExecutionResult:
|
||||
"""Execute a shell command.
|
||||
|
||||
Args:
|
||||
payload: Must contain "cmd", optionally "args" and "timeout"
|
||||
|
||||
Returns:
|
||||
ExecutionResult with command output
|
||||
"""
|
||||
self.validate_payload(payload, ["cmd"])
|
||||
settings = get_settings()
|
||||
|
||||
cmd = payload["cmd"]
|
||||
args_str = payload.get("args", "")
|
||||
timeout_override = payload.get("timeout")
|
||||
|
||||
# Validate command and arguments
|
||||
try:
|
||||
validated_cmd, args_list, default_timeout = validate_shell_command(cmd, args_str)
|
||||
except ValidationError as e:
|
||||
self.logger.warning("shell_validation_failed", cmd=cmd, error=str(e))
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={"exit_code": -1, "stdout": "", "stderr": ""},
|
||||
error=f"Validation failed: {e}",
|
||||
)
|
||||
|
||||
# Determine timeout
|
||||
timeout = timeout_override if timeout_override is not None else default_timeout
|
||||
timeout = min(timeout, settings.shell_timeout) # Cap at global max
|
||||
|
||||
self.logger.info(
|
||||
"shell_executing",
|
||||
cmd=validated_cmd,
|
||||
args=args_list,
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
|
||||
try:
|
||||
# Run in thread pool to avoid blocking event loop
|
||||
result = await asyncio.wait_for(
|
||||
self._run_subprocess(validated_cmd, args_list),
|
||||
timeout=timeout * 2, # Watchdog at 2x timeout
|
||||
)
|
||||
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
exit_code, stdout, stderr = result
|
||||
|
||||
success = exit_code == 0
|
||||
|
||||
self.logger.info(
|
||||
"shell_completed",
|
||||
cmd=validated_cmd,
|
||||
exit_code=exit_code,
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
return ExecutionResult(
|
||||
success=success,
|
||||
data={
|
||||
"exit_code": exit_code,
|
||||
"stdout": stdout,
|
||||
"stderr": stderr,
|
||||
},
|
||||
error=stderr if not success else None,
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
self.logger.error("shell_timeout", cmd=validated_cmd, timeout=timeout)
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={"exit_code": -1, "stdout": "", "stderr": ""},
|
||||
error=f"Command timed out after {timeout} seconds",
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
self.logger.error("shell_error", cmd=validated_cmd, error=str(e))
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={"exit_code": -1, "stdout": "", "stderr": ""},
|
||||
error=str(e),
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
async def _run_subprocess(
|
||||
self,
|
||||
cmd: str,
|
||||
args: list[str],
|
||||
) -> tuple[int, str, str]:
|
||||
"""Run subprocess in thread pool.
|
||||
|
||||
Args:
|
||||
cmd: Command to run (absolute path)
|
||||
args: Command arguments
|
||||
|
||||
Returns:
|
||||
Tuple of (exit_code, stdout, stderr)
|
||||
"""
|
||||
import subprocess
|
||||
|
||||
def _run() -> tuple[int, str, str]:
|
||||
# Build full command list
|
||||
full_cmd = [cmd] + args
|
||||
|
||||
# Run WITHOUT shell=True for security
|
||||
result = subprocess.run(
|
||||
full_cmd,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=get_settings().shell_timeout,
|
||||
)
|
||||
|
||||
return result.returncode, result.stdout, result.stderr
|
||||
|
||||
return await asyncio.to_thread(_run)
|
||||
200
letsbe-sysadmin-agent/app/main.py
Normal file
200
letsbe-sysadmin-agent/app/main.py
Normal file
@@ -0,0 +1,200 @@
|
||||
"""Main entry point for the LetsBe SysAdmin Agent."""
|
||||
|
||||
import asyncio
|
||||
import signal
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from app import __version__
|
||||
from app.agent import Agent
|
||||
from app.clients.orchestrator_client import OrchestratorClient
|
||||
from app.config import get_settings
|
||||
from app.task_manager import TaskManager
|
||||
from app.utils.logger import configure_logging, get_logger
|
||||
|
||||
|
||||
def print_banner() -> None:
|
||||
"""Print startup banner."""
|
||||
settings = get_settings()
|
||||
banner = f"""
|
||||
+==============================================================+
|
||||
| LetsBe SysAdmin Agent v{__version__:<24}|
|
||||
+==============================================================+
|
||||
| Hostname: {settings.hostname:<45}|
|
||||
| Orchestrator: {settings.orchestrator_url:<45}|
|
||||
| Log Level: {settings.log_level:<45}|
|
||||
+==============================================================+
|
||||
"""
|
||||
print(banner)
|
||||
|
||||
|
||||
def validate_mounted_directories() -> None:
|
||||
"""Check that required host directories are mounted.
|
||||
|
||||
Logs warnings if directories are missing but does not prevent startup.
|
||||
"""
|
||||
logger = get_logger("main")
|
||||
|
||||
required_dirs = [
|
||||
"/opt/letsbe/env",
|
||||
"/opt/letsbe/stacks",
|
||||
"/opt/letsbe/nginx",
|
||||
]
|
||||
|
||||
missing = []
|
||||
for dir_path in required_dirs:
|
||||
if not Path(dir_path).is_dir():
|
||||
missing.append(dir_path)
|
||||
|
||||
if missing:
|
||||
logger.warning(
|
||||
"mounted_directories_missing",
|
||||
missing=missing,
|
||||
message="Some host directories are not mounted. Tasks requiring these paths will fail.",
|
||||
)
|
||||
else:
|
||||
logger.info("mounted_directories_ok", directories=required_dirs)
|
||||
|
||||
|
||||
async def main() -> int:
|
||||
"""Main async entry point.
|
||||
|
||||
Returns:
|
||||
Exit code (0 for success, non-zero for failure)
|
||||
"""
|
||||
settings = get_settings()
|
||||
|
||||
# Configure logging
|
||||
configure_logging(settings.log_level, settings.log_json)
|
||||
logger = get_logger("main")
|
||||
|
||||
print_banner()
|
||||
validate_mounted_directories()
|
||||
|
||||
logger.info(
|
||||
"agent_starting",
|
||||
version=__version__,
|
||||
hostname=settings.hostname,
|
||||
orchestrator_url=settings.orchestrator_url,
|
||||
)
|
||||
|
||||
# Create components
|
||||
client = OrchestratorClient(settings)
|
||||
agent = Agent(client, settings)
|
||||
task_manager = TaskManager(client, settings)
|
||||
|
||||
# Shutdown handler
|
||||
shutdown_event = asyncio.Event()
|
||||
|
||||
def handle_signal(sig: int) -> None:
|
||||
"""Handle shutdown signals."""
|
||||
sig_name = signal.Signals(sig).name
|
||||
logger.info("signal_received", signal=sig_name)
|
||||
shutdown_event.set()
|
||||
|
||||
# Register signal handlers (Unix)
|
||||
if sys.platform != "win32":
|
||||
loop = asyncio.get_running_loop()
|
||||
for sig in (signal.SIGTERM, signal.SIGINT):
|
||||
loop.add_signal_handler(sig, lambda s=sig: handle_signal(s))
|
||||
else:
|
||||
# Windows: Use default CTRL+C handling
|
||||
pass
|
||||
|
||||
try:
|
||||
# Register with orchestrator - retry indefinitely until success or shutdown
|
||||
# This ensures the agent survives orchestrator restarts/updates
|
||||
registration_attempt = 0
|
||||
while not shutdown_event.is_set():
|
||||
registration_attempt += 1
|
||||
|
||||
# Reset circuit breaker before each attempt to give orchestrator a fresh chance
|
||||
# This is important after waiting - orchestrator may have come back up
|
||||
client.reset_circuit_breaker()
|
||||
|
||||
logger.info(
|
||||
"registration_attempt",
|
||||
attempt=registration_attempt,
|
||||
message="Attempting to register with orchestrator...",
|
||||
)
|
||||
|
||||
if await agent.register():
|
||||
break # Registration successful
|
||||
|
||||
# Wait before retrying, with exponential backoff up to 60 seconds
|
||||
wait_time = min(30 * (1.5 ** min(registration_attempt - 1, 4)), 60)
|
||||
logger.warning(
|
||||
"registration_retry_wait",
|
||||
attempt=registration_attempt,
|
||||
wait_seconds=wait_time,
|
||||
message="Orchestrator unavailable, will retry...",
|
||||
)
|
||||
|
||||
# Wait but allow shutdown to interrupt
|
||||
try:
|
||||
await asyncio.wait_for(shutdown_event.wait(), timeout=wait_time)
|
||||
# If we get here, shutdown was requested
|
||||
logger.info("shutdown_during_registration")
|
||||
return 0
|
||||
except asyncio.TimeoutError:
|
||||
# Normal timeout, continue to retry
|
||||
pass
|
||||
|
||||
if shutdown_event.is_set():
|
||||
logger.info("shutdown_before_registration_complete")
|
||||
return 0
|
||||
|
||||
# Start background tasks
|
||||
heartbeat_task = asyncio.create_task(
|
||||
agent.heartbeat_loop(),
|
||||
name="heartbeat",
|
||||
)
|
||||
poll_task = asyncio.create_task(
|
||||
task_manager.poll_loop(),
|
||||
name="poll",
|
||||
)
|
||||
|
||||
logger.info("agent_running")
|
||||
|
||||
# Wait for shutdown signal
|
||||
await shutdown_event.wait()
|
||||
|
||||
logger.info("shutdown_initiated")
|
||||
|
||||
# Graceful shutdown
|
||||
await task_manager.shutdown()
|
||||
await agent.shutdown()
|
||||
|
||||
# Cancel background tasks
|
||||
heartbeat_task.cancel()
|
||||
poll_task.cancel()
|
||||
|
||||
# Wait for tasks to finish
|
||||
await asyncio.gather(
|
||||
heartbeat_task,
|
||||
poll_task,
|
||||
return_exceptions=True,
|
||||
)
|
||||
|
||||
logger.info("agent_stopped")
|
||||
return 0
|
||||
|
||||
except Exception as e:
|
||||
logger.error("agent_fatal_error", error=str(e))
|
||||
await client.close()
|
||||
return 1
|
||||
|
||||
|
||||
def run() -> None:
|
||||
"""Entry point for CLI."""
|
||||
try:
|
||||
exit_code = asyncio.run(main())
|
||||
sys.exit(exit_code)
|
||||
except KeyboardInterrupt:
|
||||
print("\nAgent interrupted by user")
|
||||
sys.exit(130)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
run()
|
||||
116
letsbe-sysadmin-agent/app/playwright_scenarios/__init__.py
Normal file
116
letsbe-sysadmin-agent/app/playwright_scenarios/__init__.py
Normal file
@@ -0,0 +1,116 @@
|
||||
"""Playwright scenario registry.
|
||||
|
||||
This module provides the central registry for all available Playwright scenarios.
|
||||
Scenarios are registered at import time and looked up by name during execution.
|
||||
|
||||
Usage:
|
||||
from app.playwright_scenarios import get_scenario, list_scenarios
|
||||
|
||||
# Get a specific scenario
|
||||
scenario = get_scenario("nextcloud_initial_setup")
|
||||
|
||||
# List all available scenarios
|
||||
available = list_scenarios()
|
||||
"""
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from app.playwright_scenarios.base import BaseScenario, ScenarioOptions, ScenarioResult
|
||||
|
||||
# Registry mapping scenario names to scenario classes
|
||||
_SCENARIO_REGISTRY: dict[str, type[BaseScenario]] = {}
|
||||
|
||||
|
||||
def register_scenario(scenario_class: type[BaseScenario]) -> type[BaseScenario]:
|
||||
"""Decorator to register a scenario class.
|
||||
|
||||
Usage:
|
||||
@register_scenario
|
||||
class MyScenario(BaseScenario):
|
||||
...
|
||||
|
||||
Args:
|
||||
scenario_class: The scenario class to register
|
||||
|
||||
Returns:
|
||||
The scenario class (unchanged)
|
||||
|
||||
Raises:
|
||||
ValueError: If a scenario with the same name is already registered
|
||||
"""
|
||||
# Create instance to get the name
|
||||
instance = scenario_class()
|
||||
name = instance.name
|
||||
|
||||
if name in _SCENARIO_REGISTRY:
|
||||
raise ValueError(
|
||||
f"Scenario '{name}' is already registered by {_SCENARIO_REGISTRY[name].__name__}"
|
||||
)
|
||||
|
||||
_SCENARIO_REGISTRY[name] = scenario_class
|
||||
return scenario_class
|
||||
|
||||
|
||||
def get_scenario(name: str) -> Optional[BaseScenario]:
|
||||
"""Get a scenario instance by name.
|
||||
|
||||
Args:
|
||||
name: The scenario name (e.g., 'nextcloud_initial_setup')
|
||||
|
||||
Returns:
|
||||
Scenario instance if found, None otherwise
|
||||
"""
|
||||
scenario_class = _SCENARIO_REGISTRY.get(name)
|
||||
if scenario_class is None:
|
||||
return None
|
||||
return scenario_class()
|
||||
|
||||
|
||||
def list_scenarios() -> list[dict[str, str]]:
|
||||
"""List all registered scenarios with their metadata.
|
||||
|
||||
Returns:
|
||||
List of dictionaries with scenario name, description, and required inputs
|
||||
"""
|
||||
result = []
|
||||
for name, scenario_class in sorted(_SCENARIO_REGISTRY.items()):
|
||||
instance = scenario_class()
|
||||
result.append({
|
||||
"name": name,
|
||||
"description": instance.description,
|
||||
"required_inputs": instance.required_inputs,
|
||||
"optional_inputs": instance.optional_inputs,
|
||||
})
|
||||
return result
|
||||
|
||||
|
||||
def get_scenario_names() -> list[str]:
|
||||
"""Get list of all registered scenario names.
|
||||
|
||||
Returns:
|
||||
Sorted list of scenario names
|
||||
"""
|
||||
return sorted(_SCENARIO_REGISTRY.keys())
|
||||
|
||||
|
||||
# Import scenario modules to trigger registration
|
||||
# Add imports here as new scenarios are created:
|
||||
from app.playwright_scenarios import echo # noqa: F401
|
||||
from app.playwright_scenarios.nextcloud import initial_setup # noqa: F401
|
||||
from app.playwright_scenarios.poste import initial_setup as poste_initial_setup # noqa: F401
|
||||
from app.playwright_scenarios.chatwoot import initial_setup as chatwoot_initial_setup # noqa: F401
|
||||
from app.playwright_scenarios.keycloak import initial_setup as keycloak_initial_setup # noqa: F401
|
||||
from app.playwright_scenarios.n8n import initial_setup as n8n_initial_setup # noqa: F401
|
||||
from app.playwright_scenarios.calcom import initial_setup as calcom_initial_setup # noqa: F401
|
||||
from app.playwright_scenarios.umami import initial_setup as umami_initial_setup # noqa: F401
|
||||
from app.playwright_scenarios.uptime_kuma import initial_setup as uptime_kuma_initial_setup # noqa: F401
|
||||
|
||||
__all__ = [
|
||||
"BaseScenario",
|
||||
"ScenarioOptions",
|
||||
"ScenarioResult",
|
||||
"register_scenario",
|
||||
"get_scenario",
|
||||
"list_scenarios",
|
||||
"get_scenario_names",
|
||||
]
|
||||
162
letsbe-sysadmin-agent/app/playwright_scenarios/base.py
Normal file
162
letsbe-sysadmin-agent/app/playwright_scenarios/base.py
Normal file
@@ -0,0 +1,162 @@
|
||||
"""Base classes for Playwright scenario execution.
|
||||
|
||||
Scenarios are deterministic, reusable browser automation sequences
|
||||
that execute specific UI workflows against tenant applications.
|
||||
"""
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Any, Optional
|
||||
|
||||
from playwright.async_api import Page
|
||||
|
||||
|
||||
@dataclass
|
||||
class ScenarioOptions:
|
||||
"""Configuration options for scenario execution.
|
||||
|
||||
Attributes:
|
||||
timeout_ms: Default timeout for actions in milliseconds
|
||||
screenshot_on_failure: Capture screenshot when scenario fails
|
||||
screenshot_on_success: Capture screenshot when scenario succeeds
|
||||
save_trace: Save Playwright trace for debugging
|
||||
allowed_domains: List of domains the scenario can access (REQUIRED for security)
|
||||
artifacts_dir: Directory to save screenshots and traces
|
||||
"""
|
||||
timeout_ms: int = 60000
|
||||
screenshot_on_failure: bool = True
|
||||
screenshot_on_success: bool = False
|
||||
save_trace: bool = False
|
||||
allowed_domains: list[str] = field(default_factory=list)
|
||||
artifacts_dir: Optional[Path] = None
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
if self.artifacts_dir and isinstance(self.artifacts_dir, str):
|
||||
self.artifacts_dir = Path(self.artifacts_dir)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ScenarioResult:
|
||||
"""Result of a scenario execution.
|
||||
|
||||
Attributes:
|
||||
success: Whether the scenario completed successfully
|
||||
data: Scenario-specific result data
|
||||
screenshots: List of paths to captured screenshots
|
||||
error: Error message if scenario failed
|
||||
trace_path: Path to trace file if tracing was enabled
|
||||
"""
|
||||
success: bool
|
||||
data: dict[str, Any]
|
||||
screenshots: list[str] = field(default_factory=list)
|
||||
error: Optional[str] = None
|
||||
trace_path: Optional[str] = None
|
||||
|
||||
|
||||
class BaseScenario(ABC):
|
||||
"""Abstract base class for Playwright scenarios.
|
||||
|
||||
Each scenario implements a specific UI automation workflow.
|
||||
Scenarios are registered by name and dispatched by the PlaywrightExecutor.
|
||||
|
||||
Example implementation:
|
||||
class NextcloudInitialSetup(BaseScenario):
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return "nextcloud_initial_setup"
|
||||
|
||||
@property
|
||||
def required_inputs(self) -> list[str]:
|
||||
return ["base_url", "admin_username", "admin_password"]
|
||||
|
||||
async def execute(self, page, inputs, options) -> ScenarioResult:
|
||||
# Perform setup steps...
|
||||
return ScenarioResult(success=True, data={"setup": "complete"})
|
||||
"""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def name(self) -> str:
|
||||
"""Unique name identifying this scenario.
|
||||
|
||||
This name is used in task payloads to select the scenario.
|
||||
Convention: lowercase_with_underscores (e.g., 'nextcloud_initial_setup')
|
||||
"""
|
||||
...
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def required_inputs(self) -> list[str]:
|
||||
"""List of required input keys for this scenario.
|
||||
|
||||
The executor validates that all required inputs are present
|
||||
before executing the scenario.
|
||||
"""
|
||||
...
|
||||
|
||||
@property
|
||||
def optional_inputs(self) -> list[str]:
|
||||
"""List of optional input keys for this scenario.
|
||||
|
||||
Override this property to declare optional inputs with defaults.
|
||||
"""
|
||||
return []
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
"""Human-readable description of what this scenario does.
|
||||
|
||||
Override this property to provide documentation.
|
||||
"""
|
||||
return f"Scenario: {self.name}"
|
||||
|
||||
@abstractmethod
|
||||
async def execute(
|
||||
self,
|
||||
page: Page,
|
||||
inputs: dict[str, Any],
|
||||
options: ScenarioOptions,
|
||||
) -> ScenarioResult:
|
||||
"""Execute the scenario against the provided page.
|
||||
|
||||
Args:
|
||||
page: Playwright Page object with domain restrictions applied
|
||||
inputs: Dictionary of input values (validated by executor)
|
||||
options: Scenario options including timeout and artifact settings
|
||||
|
||||
Returns:
|
||||
ScenarioResult with success status and any result data
|
||||
|
||||
Note:
|
||||
- Domain restrictions are already enforced by the executor
|
||||
- Screenshots on failure are handled by the executor
|
||||
- Focus on the business logic of the UI workflow
|
||||
"""
|
||||
...
|
||||
|
||||
async def setup(self, page: Page, options: ScenarioOptions) -> None:
|
||||
"""Optional setup hook called before execute().
|
||||
|
||||
Override to perform setup actions like setting viewport size,
|
||||
configuring page settings, etc.
|
||||
"""
|
||||
pass
|
||||
|
||||
async def teardown(self, page: Page, options: ScenarioOptions) -> None:
|
||||
"""Optional teardown hook called after execute().
|
||||
|
||||
Override to perform cleanup actions. Called even if execute() fails.
|
||||
"""
|
||||
pass
|
||||
|
||||
def validate_inputs(self, inputs: dict[str, Any]) -> list[str]:
|
||||
"""Validate inputs and return list of missing required keys.
|
||||
|
||||
Args:
|
||||
inputs: Dictionary of inputs to validate
|
||||
|
||||
Returns:
|
||||
List of missing required input keys (empty if all present)
|
||||
"""
|
||||
return [key for key in self.required_inputs if key not in inputs]
|
||||
@@ -0,0 +1,5 @@
|
||||
"""Cal.com browser automation scenarios."""
|
||||
|
||||
from app.playwright_scenarios.calcom.initial_setup import CalcomInitialSetup
|
||||
|
||||
__all__ = ["CalcomInitialSetup"]
|
||||
@@ -0,0 +1,254 @@
|
||||
"""Cal.com initial setup scenario.
|
||||
|
||||
Automates the first-time setup for a fresh Cal.com installation.
|
||||
This scenario:
|
||||
1. Navigates to the Cal.com setup page
|
||||
2. Creates the admin account
|
||||
3. Completes onboarding steps
|
||||
"""
|
||||
|
||||
import secrets
|
||||
import string
|
||||
from typing import Any
|
||||
|
||||
from playwright.async_api import Page
|
||||
|
||||
from app.playwright_scenarios import register_scenario
|
||||
from app.playwright_scenarios.base import BaseScenario, ScenarioOptions, ScenarioResult
|
||||
|
||||
|
||||
def generate_secure_password(length: int = 24) -> str:
|
||||
"""Generate a cryptographically secure password.
|
||||
|
||||
Args:
|
||||
length: Password length (default: 24)
|
||||
|
||||
Returns:
|
||||
A secure random password with mixed characters
|
||||
"""
|
||||
alphabet = string.ascii_letters + string.digits + "!@#$%^&*"
|
||||
password = [
|
||||
secrets.choice(string.ascii_lowercase),
|
||||
secrets.choice(string.ascii_uppercase),
|
||||
secrets.choice(string.digits),
|
||||
secrets.choice("!@#$%^&*"),
|
||||
]
|
||||
password.extend(secrets.choice(alphabet) for _ in range(length - 4))
|
||||
password_list = list(password)
|
||||
secrets.SystemRandom().shuffle(password_list)
|
||||
return "".join(password_list)
|
||||
|
||||
|
||||
@register_scenario
|
||||
class CalcomInitialSetup(BaseScenario):
|
||||
"""Automate Cal.com first-time admin account setup.
|
||||
|
||||
This scenario handles the initial account creation when
|
||||
Cal.com is freshly installed. It navigates to the signup page,
|
||||
fills in account details, and completes the onboarding wizard.
|
||||
|
||||
Required inputs:
|
||||
base_url: The Cal.com instance URL (e.g., https://cal.example.com)
|
||||
admin_email: Email address for the admin account
|
||||
|
||||
Optional inputs:
|
||||
admin_password: Password for admin account (auto-generated if not provided)
|
||||
admin_username: Username for the admin account (default: "admin")
|
||||
admin_name: Display name for the admin account (default: "Admin")
|
||||
|
||||
Result data:
|
||||
setup_completed: Whether initial setup was completed
|
||||
admin_email: The configured admin email address
|
||||
admin_password: The password (generated or provided) - STORE SECURELY
|
||||
already_configured: True if Cal.com was already set up
|
||||
"""
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return "calcom_initial_setup"
|
||||
|
||||
@property
|
||||
def required_inputs(self) -> list[str]:
|
||||
return ["base_url", "admin_email"]
|
||||
|
||||
@property
|
||||
def optional_inputs(self) -> list[str]:
|
||||
return ["admin_password", "admin_username", "admin_name"]
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return "Automate Cal.com first-time admin account setup"
|
||||
|
||||
async def execute(
|
||||
self,
|
||||
page: Page,
|
||||
inputs: dict[str, Any],
|
||||
options: ScenarioOptions,
|
||||
) -> ScenarioResult:
|
||||
"""Execute the Cal.com initial setup.
|
||||
|
||||
Args:
|
||||
page: Playwright Page object
|
||||
inputs: Scenario inputs (base_url, admin_email)
|
||||
options: Scenario options
|
||||
|
||||
Returns:
|
||||
ScenarioResult with setup status and credentials
|
||||
"""
|
||||
base_url = inputs["base_url"].rstrip("/")
|
||||
admin_email = inputs["admin_email"]
|
||||
admin_password = inputs.get("admin_password") or generate_secure_password()
|
||||
admin_username = inputs.get("admin_username", "admin")
|
||||
admin_name = inputs.get("admin_name", "Admin")
|
||||
|
||||
screenshots = []
|
||||
result_data = {
|
||||
"setup_completed": False,
|
||||
"admin_email": admin_email,
|
||||
"admin_password": admin_password,
|
||||
"already_configured": False,
|
||||
}
|
||||
|
||||
try:
|
||||
# Navigate to Cal.com
|
||||
await page.goto(base_url, wait_until="networkidle")
|
||||
|
||||
current_url = page.url
|
||||
|
||||
# Check if already configured (redirects to login)
|
||||
if "/auth/login" in current_url:
|
||||
result_data["already_configured"] = True
|
||||
result_data["setup_completed"] = True
|
||||
return ScenarioResult(
|
||||
success=True,
|
||||
data=result_data,
|
||||
screenshots=screenshots,
|
||||
error=None,
|
||||
)
|
||||
|
||||
# Navigate to signup page
|
||||
signup_url = f"{base_url}/signup"
|
||||
await page.goto(signup_url, wait_until="networkidle")
|
||||
|
||||
# If redirected to login, the instance may already be set up
|
||||
if "/auth/login" in page.url and "/signup" not in page.url:
|
||||
result_data["already_configured"] = True
|
||||
result_data["setup_completed"] = True
|
||||
return ScenarioResult(
|
||||
success=True,
|
||||
data=result_data,
|
||||
screenshots=screenshots,
|
||||
error=None,
|
||||
)
|
||||
|
||||
# Fill in the signup form
|
||||
# Username
|
||||
username_input = page.locator(
|
||||
'input[name="username"], '
|
||||
'input[id="username"], '
|
||||
'input[placeholder*="username" i]'
|
||||
).first
|
||||
if await username_input.count() > 0:
|
||||
await username_input.wait_for(state="visible", timeout=10000)
|
||||
await username_input.fill(admin_username)
|
||||
|
||||
# Full name
|
||||
name_input = page.locator(
|
||||
'input[name="name"], '
|
||||
'input[name="full_name"], '
|
||||
'input[placeholder*="name" i]'
|
||||
).first
|
||||
if await name_input.count() > 0:
|
||||
await name_input.fill(admin_name)
|
||||
|
||||
# Email
|
||||
email_input = page.locator(
|
||||
'input[name="email"], '
|
||||
'input[type="email"], '
|
||||
'input[placeholder*="email" i]'
|
||||
).first
|
||||
await email_input.wait_for(state="visible", timeout=10000)
|
||||
await email_input.fill(admin_email)
|
||||
|
||||
# Password
|
||||
password_input = page.locator(
|
||||
'input[name="password"], '
|
||||
'input[type="password"]'
|
||||
).first
|
||||
await password_input.fill(admin_password)
|
||||
|
||||
# Take screenshot before submitting
|
||||
if options.screenshot_on_success and options.artifacts_dir:
|
||||
pre_submit_path = options.artifacts_dir / "calcom_pre_submit.png"
|
||||
await page.screenshot(path=str(pre_submit_path))
|
||||
screenshots.append(str(pre_submit_path))
|
||||
|
||||
# Click Sign up / Create Account button
|
||||
submit_button = page.locator(
|
||||
'button:has-text("Sign up"), '
|
||||
'button:has-text("Create"), '
|
||||
'button:has-text("Register"), '
|
||||
'button[type="submit"]'
|
||||
).first
|
||||
await submit_button.click()
|
||||
|
||||
# Wait for onboarding or dashboard
|
||||
await page.wait_for_timeout(3000)
|
||||
|
||||
# Cal.com has an onboarding wizard after signup
|
||||
# Skip through onboarding steps
|
||||
for _ in range(5):
|
||||
skip_button = page.locator(
|
||||
'button:has-text("Skip"), '
|
||||
'a:has-text("Skip"), '
|
||||
'button:has-text("Next"), '
|
||||
'button:has-text("Continue"), '
|
||||
'button:has-text("Finish")'
|
||||
)
|
||||
if await skip_button.count() > 0:
|
||||
await skip_button.first.click()
|
||||
await page.wait_for_timeout(2000)
|
||||
else:
|
||||
break
|
||||
|
||||
# Check if we reached the dashboard or event types page
|
||||
await page.wait_for_timeout(2000)
|
||||
current_url = page.url
|
||||
|
||||
if any(kw in current_url for kw in ["/event-types", "/dashboard", "/bookings", "/settings"]):
|
||||
result_data["setup_completed"] = True
|
||||
else:
|
||||
# Check for dashboard indicators
|
||||
dashboard_el = page.locator(
|
||||
'[class*="event-type"], '
|
||||
'[class*="dashboard"], '
|
||||
':has-text("Event Types")'
|
||||
)
|
||||
if await dashboard_el.count() > 0:
|
||||
result_data["setup_completed"] = True
|
||||
|
||||
# Take final screenshot
|
||||
if options.screenshot_on_success and options.artifacts_dir:
|
||||
final_path = options.artifacts_dir / "calcom_setup_complete.png"
|
||||
await page.screenshot(path=str(final_path))
|
||||
screenshots.append(str(final_path))
|
||||
|
||||
return ScenarioResult(
|
||||
success=result_data["setup_completed"],
|
||||
data=result_data,
|
||||
screenshots=screenshots,
|
||||
error=None if result_data["setup_completed"] else "Setup may not have completed",
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
if options.screenshot_on_failure and options.artifacts_dir:
|
||||
error_path = options.artifacts_dir / "calcom_setup_error.png"
|
||||
await page.screenshot(path=str(error_path))
|
||||
screenshots.append(str(error_path))
|
||||
|
||||
return ScenarioResult(
|
||||
success=False,
|
||||
data=result_data,
|
||||
screenshots=screenshots,
|
||||
error=f"Cal.com setup failed: {str(e)}",
|
||||
)
|
||||
@@ -0,0 +1,5 @@
|
||||
"""Chatwoot browser automation scenarios."""
|
||||
|
||||
from app.playwright_scenarios.chatwoot.initial_setup import ChatwootInitialSetup
|
||||
|
||||
__all__ = ["ChatwootInitialSetup"]
|
||||
@@ -0,0 +1,291 @@
|
||||
"""Chatwoot initial setup scenario.
|
||||
|
||||
Automates the first-time setup for a fresh Chatwoot installation.
|
||||
This scenario:
|
||||
1. Navigates to the Chatwoot installation wizard
|
||||
2. Fills in admin account details (name, company, email, password)
|
||||
3. Unchecks the newsletter subscription
|
||||
4. Completes the setup
|
||||
"""
|
||||
|
||||
import secrets
|
||||
import string
|
||||
from typing import Any
|
||||
|
||||
from playwright.async_api import Page
|
||||
|
||||
from app.playwright_scenarios import register_scenario
|
||||
from app.playwright_scenarios.base import BaseScenario, ScenarioOptions, ScenarioResult
|
||||
|
||||
|
||||
def generate_secure_password(length: int = 24) -> str:
|
||||
"""Generate a cryptographically secure password.
|
||||
|
||||
Args:
|
||||
length: Password length (default: 24)
|
||||
|
||||
Returns:
|
||||
A secure random password with mixed characters
|
||||
"""
|
||||
alphabet = string.ascii_letters + string.digits + "!@#$%^&*"
|
||||
password = [
|
||||
secrets.choice(string.ascii_lowercase),
|
||||
secrets.choice(string.ascii_uppercase),
|
||||
secrets.choice(string.digits),
|
||||
secrets.choice("!@#$%^&*"),
|
||||
]
|
||||
password.extend(secrets.choice(alphabet) for _ in range(length - 4))
|
||||
password_list = list(password)
|
||||
secrets.SystemRandom().shuffle(password_list)
|
||||
return "".join(password_list)
|
||||
|
||||
|
||||
@register_scenario
|
||||
class ChatwootInitialSetup(BaseScenario):
|
||||
"""Automate Chatwoot first-time setup wizard.
|
||||
|
||||
This scenario handles the initial super admin account creation when
|
||||
Chatwoot is freshly installed. It fills in the account details,
|
||||
unchecks the newsletter subscription, and completes the setup.
|
||||
|
||||
Required inputs:
|
||||
base_url: The Chatwoot instance URL (e.g., https://chatwoot.example.com)
|
||||
admin_name: Full name for the admin account
|
||||
company_name: Company/organization name
|
||||
admin_email: Email address for the admin account
|
||||
|
||||
Optional inputs:
|
||||
admin_password: Password for admin account (auto-generated if not provided)
|
||||
|
||||
Result data:
|
||||
setup_completed: Whether initial setup was completed
|
||||
admin_email: The configured admin email address
|
||||
admin_password: The password (generated or provided) - STORE SECURELY
|
||||
already_configured: True if Chatwoot was already set up
|
||||
"""
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return "chatwoot_initial_setup"
|
||||
|
||||
@property
|
||||
def required_inputs(self) -> list[str]:
|
||||
return ["base_url", "admin_name", "company_name", "admin_email"]
|
||||
|
||||
@property
|
||||
def optional_inputs(self) -> list[str]:
|
||||
return ["admin_password"]
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return "Automate Chatwoot first-time admin account setup"
|
||||
|
||||
async def execute(
|
||||
self,
|
||||
page: Page,
|
||||
inputs: dict[str, Any],
|
||||
options: ScenarioOptions,
|
||||
) -> ScenarioResult:
|
||||
"""Execute the Chatwoot initial setup.
|
||||
|
||||
Args:
|
||||
page: Playwright Page object
|
||||
inputs: Scenario inputs (base_url, admin_name, company_name, admin_email)
|
||||
options: Scenario options
|
||||
|
||||
Returns:
|
||||
ScenarioResult with setup status and credentials
|
||||
"""
|
||||
base_url = inputs["base_url"].rstrip("/")
|
||||
admin_name = inputs["admin_name"]
|
||||
company_name = inputs["company_name"]
|
||||
admin_email = inputs["admin_email"]
|
||||
|
||||
# Generate password if not provided
|
||||
admin_password = inputs.get("admin_password") or generate_secure_password()
|
||||
|
||||
screenshots = []
|
||||
result_data = {
|
||||
"setup_completed": False,
|
||||
"admin_name": admin_name,
|
||||
"company_name": company_name,
|
||||
"admin_email": admin_email,
|
||||
"admin_password": admin_password, # Return for secure storage
|
||||
"already_configured": False,
|
||||
}
|
||||
|
||||
try:
|
||||
# Navigate to Chatwoot
|
||||
await page.goto(base_url, wait_until="networkidle")
|
||||
|
||||
current_url = page.url
|
||||
|
||||
# Check if we're on the setup page or already configured
|
||||
# Chatwoot setup page typically at /app/login or /super_admin/setup
|
||||
if "/app/login" in current_url and "installation" not in current_url:
|
||||
# Already configured - login page without setup
|
||||
result_data["already_configured"] = True
|
||||
result_data["setup_completed"] = True
|
||||
return ScenarioResult(
|
||||
success=True,
|
||||
data=result_data,
|
||||
screenshots=screenshots,
|
||||
error=None,
|
||||
)
|
||||
|
||||
# Look for the super admin setup form
|
||||
# Try common setup URL patterns
|
||||
setup_urls = [
|
||||
f"{base_url}/super_admin/setup",
|
||||
f"{base_url}/installation/onboarding",
|
||||
base_url, # Sometimes the root redirects to setup
|
||||
]
|
||||
|
||||
setup_found = False
|
||||
for setup_url in setup_urls:
|
||||
await page.goto(setup_url, wait_until="networkidle")
|
||||
|
||||
# Check for setup form elements
|
||||
name_input = page.locator('input[name="name"], input[placeholder*="name" i]')
|
||||
if await name_input.count() > 0:
|
||||
setup_found = True
|
||||
break
|
||||
|
||||
if not setup_found:
|
||||
# Check if already configured
|
||||
if "/app" in page.url or "/dashboard" in page.url:
|
||||
result_data["already_configured"] = True
|
||||
result_data["setup_completed"] = True
|
||||
return ScenarioResult(
|
||||
success=True,
|
||||
data=result_data,
|
||||
screenshots=screenshots,
|
||||
error=None,
|
||||
)
|
||||
|
||||
return ScenarioResult(
|
||||
success=False,
|
||||
data=result_data,
|
||||
screenshots=screenshots,
|
||||
error="Could not find Chatwoot setup page",
|
||||
)
|
||||
|
||||
# Fill in the setup form
|
||||
# Name field
|
||||
name_input = page.locator(
|
||||
'input[name="name"], '
|
||||
'input[placeholder*="name" i], '
|
||||
'input[id*="name" i]'
|
||||
).first
|
||||
await name_input.wait_for(state="visible", timeout=10000)
|
||||
await name_input.fill(admin_name)
|
||||
|
||||
# Company name field
|
||||
company_input = page.locator(
|
||||
'input[name="company_name"], '
|
||||
'input[name="account_name"], '
|
||||
'input[placeholder*="company" i], '
|
||||
'input[placeholder*="account" i]'
|
||||
).first
|
||||
if await company_input.count() > 0:
|
||||
await company_input.fill(company_name)
|
||||
|
||||
# Email field
|
||||
email_input = page.locator(
|
||||
'input[name="email"], '
|
||||
'input[type="email"], '
|
||||
'input[placeholder*="email" i]'
|
||||
).first
|
||||
await email_input.fill(admin_email)
|
||||
|
||||
# Password field
|
||||
password_input = page.locator(
|
||||
'input[name="password"], '
|
||||
'input[type="password"]'
|
||||
).first
|
||||
await password_input.fill(admin_password)
|
||||
|
||||
# Uncheck newsletter subscription if present
|
||||
newsletter_checkbox = page.locator(
|
||||
'input[type="checkbox"][name*="subscribe" i], '
|
||||
'input[type="checkbox"][name*="newsletter" i], '
|
||||
'input[type="checkbox"][id*="subscribe" i], '
|
||||
'label:has-text("Subscribe") input[type="checkbox"], '
|
||||
'label:has-text("newsletter") input[type="checkbox"]'
|
||||
)
|
||||
if await newsletter_checkbox.count() > 0:
|
||||
checkbox = newsletter_checkbox.first
|
||||
is_checked = await checkbox.is_checked()
|
||||
if is_checked:
|
||||
await checkbox.uncheck()
|
||||
|
||||
# Take screenshot before submitting if requested
|
||||
if options.screenshot_on_success and options.artifacts_dir:
|
||||
pre_submit_path = options.artifacts_dir / "chatwoot_pre_submit.png"
|
||||
await page.screenshot(path=str(pre_submit_path))
|
||||
screenshots.append(str(pre_submit_path))
|
||||
|
||||
# Click Finish Setup / Submit button
|
||||
submit_button = page.locator(
|
||||
'button:has-text("Finish"), '
|
||||
'button:has-text("Setup"), '
|
||||
'button:has-text("Create"), '
|
||||
'button[type="submit"], '
|
||||
'input[type="submit"]'
|
||||
).first
|
||||
await submit_button.click()
|
||||
|
||||
# Wait for setup to complete - should redirect to login or dashboard
|
||||
try:
|
||||
await page.wait_for_url(
|
||||
lambda url: "/app" in url or "/dashboard" in url or "/login" in url,
|
||||
timeout=60000,
|
||||
)
|
||||
result_data["setup_completed"] = True
|
||||
except Exception:
|
||||
# Check if there's an error message
|
||||
error_el = page.locator('.error, .alert-danger, [class*="error"]')
|
||||
if await error_el.count() > 0:
|
||||
error_text = await error_el.first.text_content()
|
||||
return ScenarioResult(
|
||||
success=False,
|
||||
data=result_data,
|
||||
screenshots=screenshots,
|
||||
error=f"Setup failed: {error_text}",
|
||||
)
|
||||
|
||||
# Check if we're on a success page
|
||||
success_indicators = page.locator(
|
||||
':has-text("success"), '
|
||||
':has-text("Welcome"), '
|
||||
':has-text("Dashboard")'
|
||||
)
|
||||
if await success_indicators.count() > 0:
|
||||
result_data["setup_completed"] = True
|
||||
|
||||
# Take final screenshot
|
||||
if options.screenshot_on_success and options.artifacts_dir:
|
||||
final_path = options.artifacts_dir / "chatwoot_setup_complete.png"
|
||||
await page.screenshot(path=str(final_path))
|
||||
screenshots.append(str(final_path))
|
||||
|
||||
return ScenarioResult(
|
||||
success=result_data["setup_completed"],
|
||||
data=result_data,
|
||||
screenshots=screenshots,
|
||||
error=None if result_data["setup_completed"] else "Setup may not have completed",
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
# Take error screenshot
|
||||
if options.screenshot_on_failure and options.artifacts_dir:
|
||||
error_path = options.artifacts_dir / "chatwoot_setup_error.png"
|
||||
await page.screenshot(path=str(error_path))
|
||||
screenshots.append(str(error_path))
|
||||
|
||||
return ScenarioResult(
|
||||
success=False,
|
||||
data=result_data,
|
||||
screenshots=screenshots,
|
||||
error=f"Chatwoot setup failed: {str(e)}",
|
||||
)
|
||||
120
letsbe-sysadmin-agent/app/playwright_scenarios/echo.py
Normal file
120
letsbe-sysadmin-agent/app/playwright_scenarios/echo.py
Normal file
@@ -0,0 +1,120 @@
|
||||
"""Echo scenario for testing Playwright executor.
|
||||
|
||||
This simple scenario navigates to a URL and verifies the page loads.
|
||||
Useful for testing the Playwright infrastructure without complex workflows.
|
||||
"""
|
||||
|
||||
from typing import Any
|
||||
|
||||
from playwright.async_api import Page
|
||||
|
||||
from app.playwright_scenarios import register_scenario
|
||||
from app.playwright_scenarios.base import BaseScenario, ScenarioOptions, ScenarioResult
|
||||
|
||||
|
||||
@register_scenario
|
||||
class EchoScenario(BaseScenario):
|
||||
"""Simple echo scenario for testing Playwright executor.
|
||||
|
||||
This scenario navigates to a URL and returns basic page information.
|
||||
Useful for verifying:
|
||||
- Playwright is installed and working
|
||||
- Domain restrictions are enforced
|
||||
- Screenshots are captured correctly
|
||||
|
||||
Required inputs:
|
||||
url: The URL to navigate to
|
||||
|
||||
Optional inputs:
|
||||
wait_for_selector: CSS selector to wait for (default: body)
|
||||
expected_title: Expected page title (optional validation)
|
||||
|
||||
Result data:
|
||||
title: Page title after load
|
||||
url: Final URL after any redirects
|
||||
content_length: Approximate content length
|
||||
"""
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return "echo"
|
||||
|
||||
@property
|
||||
def required_inputs(self) -> list[str]:
|
||||
return ["url"]
|
||||
|
||||
@property
|
||||
def optional_inputs(self) -> list[str]:
|
||||
return ["wait_for_selector", "expected_title"]
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return "Navigate to URL and return page info (test scenario)"
|
||||
|
||||
async def execute(
|
||||
self,
|
||||
page: Page,
|
||||
inputs: dict[str, Any],
|
||||
options: ScenarioOptions,
|
||||
) -> ScenarioResult:
|
||||
"""Navigate to URL and capture page information.
|
||||
|
||||
Args:
|
||||
page: Playwright Page object
|
||||
inputs: Scenario inputs (url, optional wait_for_selector)
|
||||
options: Scenario options
|
||||
|
||||
Returns:
|
||||
ScenarioResult with page information
|
||||
"""
|
||||
url = inputs["url"]
|
||||
wait_for_selector = inputs.get("wait_for_selector", "body")
|
||||
expected_title = inputs.get("expected_title")
|
||||
|
||||
screenshots = []
|
||||
result_data = {}
|
||||
|
||||
try:
|
||||
# Navigate to the URL
|
||||
response = await page.goto(url, wait_until="networkidle")
|
||||
|
||||
# Wait for specified selector
|
||||
if wait_for_selector:
|
||||
await page.wait_for_selector(wait_for_selector, timeout=options.timeout_ms)
|
||||
|
||||
# Collect page information
|
||||
result_data = {
|
||||
"title": await page.title(),
|
||||
"url": page.url,
|
||||
"status_code": response.status if response else None,
|
||||
"content_length": len(await page.content()),
|
||||
}
|
||||
|
||||
# Validate title if expected
|
||||
if expected_title and result_data["title"] != expected_title:
|
||||
return ScenarioResult(
|
||||
success=False,
|
||||
data=result_data,
|
||||
screenshots=screenshots,
|
||||
error=f"Title mismatch: expected '{expected_title}', got '{result_data['title']}'",
|
||||
)
|
||||
|
||||
# Take screenshot if requested
|
||||
if options.screenshot_on_success and options.artifacts_dir:
|
||||
screenshot_path = options.artifacts_dir / "echo_result.png"
|
||||
await page.screenshot(path=str(screenshot_path))
|
||||
screenshots.append(str(screenshot_path))
|
||||
|
||||
return ScenarioResult(
|
||||
success=True,
|
||||
data=result_data,
|
||||
screenshots=screenshots,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
return ScenarioResult(
|
||||
success=False,
|
||||
data=result_data,
|
||||
screenshots=screenshots,
|
||||
error=f"Echo scenario failed: {str(e)}",
|
||||
)
|
||||
@@ -0,0 +1,5 @@
|
||||
"""Keycloak browser automation scenarios."""
|
||||
|
||||
from app.playwright_scenarios.keycloak.initial_setup import KeycloakInitialSetup
|
||||
|
||||
__all__ = ["KeycloakInitialSetup"]
|
||||
@@ -0,0 +1,272 @@
|
||||
"""Keycloak initial setup scenario.
|
||||
|
||||
Automates the first-time setup for a fresh Keycloak installation.
|
||||
This scenario:
|
||||
1. Navigates to the Keycloak admin console
|
||||
2. Logs in with the admin credentials (set via env vars)
|
||||
3. Creates a "letsbe" realm
|
||||
4. Configures basic realm settings
|
||||
"""
|
||||
|
||||
from typing import Any
|
||||
|
||||
from playwright.async_api import Page
|
||||
|
||||
from app.playwright_scenarios import register_scenario
|
||||
from app.playwright_scenarios.base import BaseScenario, ScenarioOptions, ScenarioResult
|
||||
|
||||
|
||||
@register_scenario
|
||||
class KeycloakInitialSetup(BaseScenario):
|
||||
"""Automate Keycloak initial realm setup.
|
||||
|
||||
This scenario handles the initial configuration after Keycloak is deployed.
|
||||
It logs into the admin console and creates the "letsbe" realm with
|
||||
appropriate settings.
|
||||
|
||||
Keycloak admin credentials are set via environment variables during
|
||||
deployment (KEYCLOAK_ADMIN / KEYCLOAK_ADMIN_PASSWORD), so this scenario
|
||||
only needs to create the realm.
|
||||
|
||||
Required inputs:
|
||||
base_url: The Keycloak instance URL (e.g., https://auth.example.com)
|
||||
admin_user: Admin username (set during deployment)
|
||||
admin_password: Admin password (set during deployment)
|
||||
|
||||
Optional inputs:
|
||||
realm_name: Name of the realm to create (default: "letsbe")
|
||||
|
||||
Result data:
|
||||
login_successful: Whether admin login succeeded
|
||||
realm_created: Whether the realm was created
|
||||
realm_name: Name of the created realm
|
||||
already_configured: True if realm already exists
|
||||
"""
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return "keycloak_initial_setup"
|
||||
|
||||
@property
|
||||
def required_inputs(self) -> list[str]:
|
||||
return ["base_url", "admin_user", "admin_password"]
|
||||
|
||||
@property
|
||||
def optional_inputs(self) -> list[str]:
|
||||
return ["realm_name"]
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return "Automate Keycloak admin login and realm creation"
|
||||
|
||||
async def execute(
|
||||
self,
|
||||
page: Page,
|
||||
inputs: dict[str, Any],
|
||||
options: ScenarioOptions,
|
||||
) -> ScenarioResult:
|
||||
"""Execute the Keycloak initial setup.
|
||||
|
||||
Args:
|
||||
page: Playwright Page object
|
||||
inputs: Scenario inputs (base_url, admin_user, admin_password)
|
||||
options: Scenario options
|
||||
|
||||
Returns:
|
||||
ScenarioResult with setup status
|
||||
"""
|
||||
base_url = inputs["base_url"].rstrip("/")
|
||||
admin_user = inputs["admin_user"]
|
||||
admin_password = inputs["admin_password"]
|
||||
realm_name = inputs.get("realm_name", "letsbe")
|
||||
|
||||
screenshots = []
|
||||
result_data = {
|
||||
"login_successful": False,
|
||||
"realm_created": False,
|
||||
"realm_name": realm_name,
|
||||
"already_configured": False,
|
||||
}
|
||||
|
||||
try:
|
||||
# Navigate to Keycloak admin console
|
||||
admin_url = f"{base_url}/admin/master/console/"
|
||||
await page.goto(admin_url, wait_until="networkidle")
|
||||
|
||||
# Keycloak redirects to login page
|
||||
# Wait for the login form
|
||||
username_input = page.locator('input#username, input[name="username"]')
|
||||
await username_input.wait_for(state="visible", timeout=15000)
|
||||
|
||||
# Fill login form
|
||||
await username_input.fill(admin_user)
|
||||
|
||||
password_input = page.locator('input#password, input[name="password"]')
|
||||
await password_input.fill(admin_password)
|
||||
|
||||
# Click login button
|
||||
login_button = page.locator(
|
||||
'button#kc-login, '
|
||||
'input#kc-login, '
|
||||
'button[type="submit"], '
|
||||
'input[type="submit"]'
|
||||
)
|
||||
await login_button.click()
|
||||
|
||||
# Wait for admin console to load
|
||||
try:
|
||||
await page.wait_for_url(
|
||||
lambda url: "/admin" in url and "login" not in url.lower(),
|
||||
timeout=30000,
|
||||
)
|
||||
result_data["login_successful"] = True
|
||||
except Exception:
|
||||
# Check for error message
|
||||
error_el = page.locator('.alert-error, .kc-feedback-text, #input-error')
|
||||
if await error_el.count() > 0:
|
||||
error_text = await error_el.first.text_content()
|
||||
return ScenarioResult(
|
||||
success=False,
|
||||
data=result_data,
|
||||
screenshots=screenshots,
|
||||
error=f"Login failed: {error_text}",
|
||||
)
|
||||
return ScenarioResult(
|
||||
success=False,
|
||||
data=result_data,
|
||||
screenshots=screenshots,
|
||||
error="Login failed - could not reach admin console",
|
||||
)
|
||||
|
||||
# Check if realm already exists by navigating to realm selector
|
||||
# Look for the realm dropdown or realm list
|
||||
realm_selector = page.locator(
|
||||
'[data-testid="realmSelector"], '
|
||||
'.pf-c-dropdown__toggle, '
|
||||
'#realm-select'
|
||||
)
|
||||
|
||||
if await realm_selector.count() > 0:
|
||||
await realm_selector.first.click()
|
||||
await page.wait_for_timeout(1000)
|
||||
|
||||
# Check if our realm already exists in the dropdown
|
||||
existing_realm = page.locator(
|
||||
f'a:has-text("{realm_name}"), '
|
||||
f'button:has-text("{realm_name}"), '
|
||||
f'[data-testid="realmSelector"] >> text="{realm_name}"'
|
||||
)
|
||||
if await existing_realm.count() > 0:
|
||||
result_data["already_configured"] = True
|
||||
result_data["realm_created"] = True
|
||||
|
||||
# Click away to close dropdown
|
||||
await page.keyboard.press("Escape")
|
||||
|
||||
return ScenarioResult(
|
||||
success=True,
|
||||
data=result_data,
|
||||
screenshots=screenshots,
|
||||
error=None,
|
||||
)
|
||||
|
||||
# Close dropdown
|
||||
await page.keyboard.press("Escape")
|
||||
|
||||
# Create new realm
|
||||
# Navigate to realm creation page
|
||||
create_realm_button = page.locator(
|
||||
'a:has-text("Create Realm"), '
|
||||
'button:has-text("Create Realm"), '
|
||||
'a:has-text("Create realm"), '
|
||||
'button:has-text("Create realm"), '
|
||||
'[data-testid="add-realm"]'
|
||||
)
|
||||
|
||||
if await create_realm_button.count() > 0:
|
||||
await create_realm_button.first.click()
|
||||
else:
|
||||
# Try navigating directly
|
||||
await page.goto(
|
||||
f"{base_url}/admin/master/console/#/create/realm",
|
||||
wait_until="networkidle",
|
||||
)
|
||||
|
||||
await page.wait_for_timeout(2000)
|
||||
|
||||
# Fill in realm name
|
||||
realm_name_input = page.locator(
|
||||
'input#kc-realm, '
|
||||
'input[name="realm"], '
|
||||
'input[data-testid="realmName"], '
|
||||
'input#name'
|
||||
)
|
||||
await realm_name_input.wait_for(state="visible", timeout=10000)
|
||||
await realm_name_input.fill(realm_name)
|
||||
|
||||
# Ensure realm is enabled
|
||||
enabled_toggle = page.locator(
|
||||
'input[name="enabled"], '
|
||||
'[data-testid="realmEnabled"]'
|
||||
)
|
||||
if await enabled_toggle.count() > 0:
|
||||
is_checked = await enabled_toggle.first.is_checked()
|
||||
if not is_checked:
|
||||
await enabled_toggle.first.click()
|
||||
|
||||
# Take screenshot before creating
|
||||
if options.screenshot_on_success and options.artifacts_dir:
|
||||
pre_create_path = options.artifacts_dir / "keycloak_pre_create.png"
|
||||
await page.screenshot(path=str(pre_create_path))
|
||||
screenshots.append(str(pre_create_path))
|
||||
|
||||
# Click Create button
|
||||
create_button = page.locator(
|
||||
'button:has-text("Create"), '
|
||||
'button[type="submit"]'
|
||||
).first
|
||||
await create_button.click()
|
||||
|
||||
# Wait for realm to be created (redirects to realm settings)
|
||||
await page.wait_for_timeout(3000)
|
||||
|
||||
# Verify realm was created by checking URL or page content
|
||||
current_url = page.url
|
||||
if realm_name in current_url or "realm-settings" in current_url:
|
||||
result_data["realm_created"] = True
|
||||
else:
|
||||
# Check for success notification
|
||||
success_el = page.locator(
|
||||
'.pf-c-alert.pf-m-success, '
|
||||
'[class*="success"], '
|
||||
':has-text("Realm created")'
|
||||
)
|
||||
if await success_el.count() > 0:
|
||||
result_data["realm_created"] = True
|
||||
|
||||
# Take final screenshot
|
||||
if options.screenshot_on_success and options.artifacts_dir:
|
||||
final_path = options.artifacts_dir / "keycloak_setup_complete.png"
|
||||
await page.screenshot(path=str(final_path))
|
||||
screenshots.append(str(final_path))
|
||||
|
||||
return ScenarioResult(
|
||||
success=result_data["realm_created"],
|
||||
data=result_data,
|
||||
screenshots=screenshots,
|
||||
error=None if result_data["realm_created"] else "Realm creation may not have completed",
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
# Take error screenshot
|
||||
if options.screenshot_on_failure and options.artifacts_dir:
|
||||
error_path = options.artifacts_dir / "keycloak_setup_error.png"
|
||||
await page.screenshot(path=str(error_path))
|
||||
screenshots.append(str(error_path))
|
||||
|
||||
return ScenarioResult(
|
||||
success=False,
|
||||
data=result_data,
|
||||
screenshots=screenshots,
|
||||
error=f"Keycloak setup failed: {str(e)}",
|
||||
)
|
||||
@@ -0,0 +1,5 @@
|
||||
"""n8n browser automation scenarios."""
|
||||
|
||||
from app.playwright_scenarios.n8n.initial_setup import N8nInitialSetup
|
||||
|
||||
__all__ = ["N8nInitialSetup"]
|
||||
@@ -0,0 +1,264 @@
|
||||
"""n8n initial setup scenario.
|
||||
|
||||
Automates the first-time setup for a fresh n8n installation.
|
||||
This scenario:
|
||||
1. Navigates to the n8n setup page
|
||||
2. Creates the owner account with email and password
|
||||
3. Skips optional setup steps
|
||||
"""
|
||||
|
||||
import secrets
|
||||
import string
|
||||
from typing import Any
|
||||
|
||||
from playwright.async_api import Page
|
||||
|
||||
from app.playwright_scenarios import register_scenario
|
||||
from app.playwright_scenarios.base import BaseScenario, ScenarioOptions, ScenarioResult
|
||||
|
||||
|
||||
def generate_secure_password(length: int = 24) -> str:
|
||||
"""Generate a cryptographically secure password.
|
||||
|
||||
Args:
|
||||
length: Password length (default: 24)
|
||||
|
||||
Returns:
|
||||
A secure random password with mixed characters
|
||||
"""
|
||||
alphabet = string.ascii_letters + string.digits + "!@#$%^&*"
|
||||
password = [
|
||||
secrets.choice(string.ascii_lowercase),
|
||||
secrets.choice(string.ascii_uppercase),
|
||||
secrets.choice(string.digits),
|
||||
secrets.choice("!@#$%^&*"),
|
||||
]
|
||||
password.extend(secrets.choice(alphabet) for _ in range(length - 4))
|
||||
password_list = list(password)
|
||||
secrets.SystemRandom().shuffle(password_list)
|
||||
return "".join(password_list)
|
||||
|
||||
|
||||
@register_scenario
|
||||
class N8nInitialSetup(BaseScenario):
|
||||
"""Automate n8n first-time owner account setup.
|
||||
|
||||
This scenario handles the initial owner account creation when
|
||||
n8n is freshly installed. It fills in the account details
|
||||
and completes the setup wizard.
|
||||
|
||||
Required inputs:
|
||||
base_url: The n8n instance URL (e.g., https://n8n.example.com)
|
||||
admin_email: Email address for the owner account
|
||||
|
||||
Optional inputs:
|
||||
admin_password: Password for owner account (auto-generated if not provided)
|
||||
admin_first_name: First name for the owner (default: "Admin")
|
||||
admin_last_name: Last name for the owner (default: "User")
|
||||
|
||||
Result data:
|
||||
setup_completed: Whether initial setup was completed
|
||||
admin_email: The configured owner email address
|
||||
admin_password: The password (generated or provided) - STORE SECURELY
|
||||
already_configured: True if n8n was already set up
|
||||
"""
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return "n8n_initial_setup"
|
||||
|
||||
@property
|
||||
def required_inputs(self) -> list[str]:
|
||||
return ["base_url", "admin_email"]
|
||||
|
||||
@property
|
||||
def optional_inputs(self) -> list[str]:
|
||||
return ["admin_password", "admin_first_name", "admin_last_name"]
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return "Automate n8n first-time owner account setup"
|
||||
|
||||
async def execute(
|
||||
self,
|
||||
page: Page,
|
||||
inputs: dict[str, Any],
|
||||
options: ScenarioOptions,
|
||||
) -> ScenarioResult:
|
||||
"""Execute the n8n initial setup.
|
||||
|
||||
Args:
|
||||
page: Playwright Page object
|
||||
inputs: Scenario inputs (base_url, admin_email)
|
||||
options: Scenario options
|
||||
|
||||
Returns:
|
||||
ScenarioResult with setup status and credentials
|
||||
"""
|
||||
base_url = inputs["base_url"].rstrip("/")
|
||||
admin_email = inputs["admin_email"]
|
||||
admin_password = inputs.get("admin_password") or generate_secure_password()
|
||||
admin_first_name = inputs.get("admin_first_name", "Admin")
|
||||
admin_last_name = inputs.get("admin_last_name", "User")
|
||||
|
||||
screenshots = []
|
||||
result_data = {
|
||||
"setup_completed": False,
|
||||
"admin_email": admin_email,
|
||||
"admin_password": admin_password,
|
||||
"already_configured": False,
|
||||
}
|
||||
|
||||
try:
|
||||
# Navigate to n8n
|
||||
await page.goto(base_url, wait_until="networkidle")
|
||||
|
||||
current_url = page.url
|
||||
|
||||
# Check if already configured (redirects to signin)
|
||||
if "/signin" in current_url:
|
||||
result_data["already_configured"] = True
|
||||
result_data["setup_completed"] = True
|
||||
return ScenarioResult(
|
||||
success=True,
|
||||
data=result_data,
|
||||
screenshots=screenshots,
|
||||
error=None,
|
||||
)
|
||||
|
||||
# n8n setup page should show the owner setup form
|
||||
# Look for setup form elements
|
||||
email_input = page.locator(
|
||||
'input[name="email"], '
|
||||
'input[type="email"], '
|
||||
'input[placeholder*="email" i], '
|
||||
'input[autocomplete="email"]'
|
||||
)
|
||||
|
||||
if await email_input.count() == 0:
|
||||
# Try navigating to setup URL
|
||||
await page.goto(f"{base_url}/setup", wait_until="networkidle")
|
||||
|
||||
if "/signin" in page.url:
|
||||
result_data["already_configured"] = True
|
||||
result_data["setup_completed"] = True
|
||||
return ScenarioResult(
|
||||
success=True,
|
||||
data=result_data,
|
||||
screenshots=screenshots,
|
||||
error=None,
|
||||
)
|
||||
|
||||
# Fill in the owner setup form
|
||||
# First name
|
||||
first_name_input = page.locator(
|
||||
'input[name="firstName"], '
|
||||
'input[name="first_name"], '
|
||||
'input[placeholder*="first" i], '
|
||||
'input[autocomplete="given-name"]'
|
||||
).first
|
||||
if await first_name_input.count() > 0:
|
||||
await first_name_input.wait_for(state="visible", timeout=10000)
|
||||
await first_name_input.fill(admin_first_name)
|
||||
|
||||
# Last name
|
||||
last_name_input = page.locator(
|
||||
'input[name="lastName"], '
|
||||
'input[name="last_name"], '
|
||||
'input[placeholder*="last" i], '
|
||||
'input[autocomplete="family-name"]'
|
||||
).first
|
||||
if await last_name_input.count() > 0:
|
||||
await last_name_input.fill(admin_last_name)
|
||||
|
||||
# Email
|
||||
email_input = page.locator(
|
||||
'input[name="email"], '
|
||||
'input[type="email"], '
|
||||
'input[placeholder*="email" i]'
|
||||
).first
|
||||
await email_input.wait_for(state="visible", timeout=10000)
|
||||
await email_input.fill(admin_email)
|
||||
|
||||
# Password
|
||||
password_input = page.locator(
|
||||
'input[name="password"], '
|
||||
'input[type="password"]'
|
||||
).first
|
||||
await password_input.fill(admin_password)
|
||||
|
||||
# Take screenshot before submitting
|
||||
if options.screenshot_on_success and options.artifacts_dir:
|
||||
pre_submit_path = options.artifacts_dir / "n8n_pre_submit.png"
|
||||
await page.screenshot(path=str(pre_submit_path))
|
||||
screenshots.append(str(pre_submit_path))
|
||||
|
||||
# Click Next / Create Account button
|
||||
submit_button = page.locator(
|
||||
'button:has-text("Next"), '
|
||||
'button:has-text("Create"), '
|
||||
'button:has-text("Get started"), '
|
||||
'button[type="submit"]'
|
||||
).first
|
||||
await submit_button.click()
|
||||
|
||||
# Wait for next step or dashboard
|
||||
await page.wait_for_timeout(3000)
|
||||
|
||||
# n8n may show additional setup steps (personalization, usage, etc.)
|
||||
# Skip through them
|
||||
for _ in range(3):
|
||||
skip_button = page.locator(
|
||||
'button:has-text("Skip"), '
|
||||
'a:has-text("Skip"), '
|
||||
'button:has-text("Get started"), '
|
||||
'button:has-text("Next")'
|
||||
)
|
||||
if await skip_button.count() > 0:
|
||||
await skip_button.first.click()
|
||||
await page.wait_for_timeout(2000)
|
||||
else:
|
||||
break
|
||||
|
||||
# Check if we reached the workflow editor or dashboard
|
||||
await page.wait_for_timeout(2000)
|
||||
current_url = page.url
|
||||
|
||||
if any(kw in current_url for kw in ["/workflow", "/home", "/dashboard"]):
|
||||
result_data["setup_completed"] = True
|
||||
else:
|
||||
# Check for indicators of successful setup
|
||||
canvas = page.locator(
|
||||
'.workflow-canvas, '
|
||||
'[class*="workflow"], '
|
||||
'[class*="canvas"], '
|
||||
'#app'
|
||||
)
|
||||
if await canvas.count() > 0:
|
||||
result_data["setup_completed"] = True
|
||||
|
||||
# Take final screenshot
|
||||
if options.screenshot_on_success and options.artifacts_dir:
|
||||
final_path = options.artifacts_dir / "n8n_setup_complete.png"
|
||||
await page.screenshot(path=str(final_path))
|
||||
screenshots.append(str(final_path))
|
||||
|
||||
return ScenarioResult(
|
||||
success=result_data["setup_completed"],
|
||||
data=result_data,
|
||||
screenshots=screenshots,
|
||||
error=None if result_data["setup_completed"] else "Setup may not have completed",
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
if options.screenshot_on_failure and options.artifacts_dir:
|
||||
error_path = options.artifacts_dir / "n8n_setup_error.png"
|
||||
await page.screenshot(path=str(error_path))
|
||||
screenshots.append(str(error_path))
|
||||
|
||||
return ScenarioResult(
|
||||
success=False,
|
||||
data=result_data,
|
||||
screenshots=screenshots,
|
||||
error=f"n8n setup failed: {str(e)}",
|
||||
)
|
||||
@@ -0,0 +1,5 @@
|
||||
"""Nextcloud browser automation scenarios."""
|
||||
|
||||
from app.playwright_scenarios.nextcloud.initial_setup import NextcloudInitialSetup
|
||||
|
||||
__all__ = ["NextcloudInitialSetup"]
|
||||
@@ -0,0 +1,231 @@
|
||||
"""Nextcloud initial setup scenario.
|
||||
|
||||
Automates the first-time setup wizard for a fresh Nextcloud installation.
|
||||
This scenario:
|
||||
1. Navigates to the Nextcloud instance
|
||||
2. Creates the admin account
|
||||
3. Optionally skips recommended apps installation
|
||||
4. Verifies successful login to the dashboard
|
||||
"""
|
||||
|
||||
from typing import Any
|
||||
|
||||
from playwright.async_api import Page, expect
|
||||
|
||||
from app.playwright_scenarios import register_scenario
|
||||
from app.playwright_scenarios.base import BaseScenario, ScenarioOptions, ScenarioResult
|
||||
|
||||
|
||||
@register_scenario
|
||||
class NextcloudInitialSetup(BaseScenario):
|
||||
"""Automate Nextcloud first-time setup wizard.
|
||||
|
||||
This scenario handles the initial admin account creation when
|
||||
Nextcloud is freshly installed. It's idempotent - if setup is
|
||||
already complete, it will detect this and succeed.
|
||||
|
||||
Required inputs:
|
||||
base_url: The Nextcloud instance URL (e.g., https://cloud.example.com)
|
||||
admin_username: Username for the admin account
|
||||
admin_password: Password for the admin account
|
||||
|
||||
Optional inputs:
|
||||
skip_recommended_apps: Skip the recommended apps step (default: True)
|
||||
|
||||
Result data:
|
||||
admin_created: Whether a new admin was created (False if already setup)
|
||||
login_successful: Whether login to dashboard succeeded
|
||||
setup_skipped: True if Nextcloud was already configured
|
||||
"""
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return "nextcloud_initial_setup"
|
||||
|
||||
@property
|
||||
def required_inputs(self) -> list[str]:
|
||||
return ["base_url", "admin_username", "admin_password"]
|
||||
|
||||
@property
|
||||
def optional_inputs(self) -> list[str]:
|
||||
return ["skip_recommended_apps"]
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return "Automate Nextcloud first-time admin setup wizard"
|
||||
|
||||
async def execute(
|
||||
self,
|
||||
page: Page,
|
||||
inputs: dict[str, Any],
|
||||
options: ScenarioOptions,
|
||||
) -> ScenarioResult:
|
||||
"""Execute the Nextcloud initial setup.
|
||||
|
||||
Args:
|
||||
page: Playwright Page object
|
||||
inputs: Scenario inputs (base_url, admin_username, admin_password)
|
||||
options: Scenario options
|
||||
|
||||
Returns:
|
||||
ScenarioResult with setup status
|
||||
"""
|
||||
base_url = inputs["base_url"].rstrip("/")
|
||||
admin_username = inputs["admin_username"]
|
||||
admin_password = inputs["admin_password"]
|
||||
skip_recommended_apps = inputs.get("skip_recommended_apps", True)
|
||||
|
||||
screenshots = []
|
||||
result_data = {
|
||||
"admin_created": False,
|
||||
"login_successful": False,
|
||||
"setup_skipped": False,
|
||||
}
|
||||
|
||||
try:
|
||||
# Navigate to Nextcloud
|
||||
await page.goto(base_url, wait_until="networkidle")
|
||||
|
||||
# Check if we're on the setup page or login page
|
||||
current_url = page.url
|
||||
|
||||
# Detect if setup is already complete (redirects to login)
|
||||
if "/login" in current_url or await page.locator('input[name="user"]').count() > 0:
|
||||
# Already configured, try to login
|
||||
result_data["setup_skipped"] = True
|
||||
login_success = await self._try_login(
|
||||
page, admin_username, admin_password
|
||||
)
|
||||
result_data["login_successful"] = login_success
|
||||
|
||||
return ScenarioResult(
|
||||
success=login_success,
|
||||
data=result_data,
|
||||
screenshots=screenshots,
|
||||
error=None if login_success else "Login failed - check credentials",
|
||||
)
|
||||
|
||||
# We're on the setup page - create admin account
|
||||
# Wait for the setup form to be visible
|
||||
admin_user_input = page.locator('input[id="adminlogin"], input[name="adminlogin"]')
|
||||
await admin_user_input.wait_for(state="visible", timeout=10000)
|
||||
|
||||
# Fill in admin credentials
|
||||
await admin_user_input.fill(admin_username)
|
||||
|
||||
admin_pass_input = page.locator('input[id="adminpass"], input[name="adminpass"]')
|
||||
await admin_pass_input.fill(admin_password)
|
||||
|
||||
# Check for data directory input (may or may not be present)
|
||||
data_dir_input = page.locator('input[id="directory"]')
|
||||
if await data_dir_input.count() > 0 and await data_dir_input.is_visible():
|
||||
# Keep default data directory
|
||||
pass
|
||||
|
||||
# Click install/finish setup button
|
||||
# Nextcloud uses various button texts depending on version
|
||||
install_button = page.locator(
|
||||
'input[type="submit"][value*="Install"], '
|
||||
'input[type="submit"][value*="Finish"], '
|
||||
'button:has-text("Install"), '
|
||||
'button:has-text("Finish setup")'
|
||||
)
|
||||
await install_button.click()
|
||||
|
||||
# Wait for installation to complete (this can take a while)
|
||||
# Look for either dashboard or recommended apps screen
|
||||
try:
|
||||
await page.wait_for_url(
|
||||
lambda url: "/apps" in url or "/index.php" in url or "dashboard" in url.lower(),
|
||||
timeout=120000, # 2 minutes for installation
|
||||
)
|
||||
except Exception:
|
||||
# May be on recommended apps screen
|
||||
pass
|
||||
|
||||
result_data["admin_created"] = True
|
||||
|
||||
# Handle recommended apps screen if present
|
||||
if skip_recommended_apps:
|
||||
skip_button = page.locator(
|
||||
'button:has-text("Skip"), '
|
||||
'a:has-text("Skip"), '
|
||||
'.skip-button'
|
||||
)
|
||||
if await skip_button.count() > 0:
|
||||
await skip_button.first.click()
|
||||
await page.wait_for_load_state("networkidle")
|
||||
|
||||
# Verify we're logged in by checking for user menu or dashboard elements
|
||||
dashboard_indicators = page.locator(
|
||||
'#user-menu, '
|
||||
'.user-menu, '
|
||||
'[data-id="dashboard"], '
|
||||
'#nextcloud, '
|
||||
'.app-dashboard'
|
||||
)
|
||||
|
||||
try:
|
||||
await dashboard_indicators.first.wait_for(state="visible", timeout=30000)
|
||||
result_data["login_successful"] = True
|
||||
except Exception:
|
||||
# Try one more check - look for any indication we're logged in
|
||||
if await page.locator('.header-menu').count() > 0:
|
||||
result_data["login_successful"] = True
|
||||
|
||||
# Take a screenshot of the final state if requested
|
||||
if options.screenshot_on_success and options.artifacts_dir:
|
||||
screenshot_path = options.artifacts_dir / "setup_complete.png"
|
||||
await page.screenshot(path=str(screenshot_path))
|
||||
screenshots.append(str(screenshot_path))
|
||||
|
||||
success = result_data["admin_created"] and result_data["login_successful"]
|
||||
return ScenarioResult(
|
||||
success=success,
|
||||
data=result_data,
|
||||
screenshots=screenshots,
|
||||
error=None if success else "Setup completed but verification failed",
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
return ScenarioResult(
|
||||
success=False,
|
||||
data=result_data,
|
||||
screenshots=screenshots,
|
||||
error=f"Nextcloud setup failed: {str(e)}",
|
||||
)
|
||||
|
||||
async def _try_login(self, page: Page, username: str, password: str) -> bool:
|
||||
"""Attempt to login to an already-configured Nextcloud.
|
||||
|
||||
Args:
|
||||
page: Playwright Page object (should be on login page)
|
||||
username: Username to login with
|
||||
password: Password to login with
|
||||
|
||||
Returns:
|
||||
True if login succeeded, False otherwise
|
||||
"""
|
||||
try:
|
||||
# Fill login form
|
||||
await page.locator('input[name="user"]').fill(username)
|
||||
await page.locator('input[name="password"]').fill(password)
|
||||
|
||||
# Submit login
|
||||
await page.locator('input[type="submit"], button[type="submit"]').click()
|
||||
|
||||
# Wait for redirect to dashboard
|
||||
await page.wait_for_url(
|
||||
lambda url: "/login" not in url,
|
||||
timeout=30000,
|
||||
)
|
||||
|
||||
# Check for login error message
|
||||
error_msg = page.locator('.warning, .error, [class*="error"]')
|
||||
if await error_msg.count() > 0 and await error_msg.first.is_visible():
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
except Exception:
|
||||
return False
|
||||
@@ -0,0 +1,5 @@
|
||||
"""Poste.io browser automation scenarios."""
|
||||
|
||||
from app.playwright_scenarios.poste.initial_setup import PosteInitialSetup
|
||||
|
||||
__all__ = ["PosteInitialSetup"]
|
||||
@@ -0,0 +1,233 @@
|
||||
"""Poste.io initial setup scenario.
|
||||
|
||||
Automates the first-time setup for a fresh Poste.io mail server installation.
|
||||
This scenario:
|
||||
1. Navigates to the Poste.io admin setup page
|
||||
2. Configures the mailserver hostname
|
||||
3. Creates the admin email account with a generated password
|
||||
4. Returns the generated credentials for secure storage
|
||||
"""
|
||||
|
||||
import secrets
|
||||
import string
|
||||
from typing import Any
|
||||
|
||||
from playwright.async_api import Page
|
||||
|
||||
from app.playwright_scenarios import register_scenario
|
||||
from app.playwright_scenarios.base import BaseScenario, ScenarioOptions, ScenarioResult
|
||||
|
||||
|
||||
def generate_secure_password(length: int = 24) -> str:
|
||||
"""Generate a cryptographically secure password.
|
||||
|
||||
Args:
|
||||
length: Password length (default: 24)
|
||||
|
||||
Returns:
|
||||
A secure random password with mixed characters
|
||||
"""
|
||||
# Use a mix of letters, digits, and safe special characters
|
||||
alphabet = string.ascii_letters + string.digits + "!@#$%^&*"
|
||||
# Ensure at least one of each type
|
||||
password = [
|
||||
secrets.choice(string.ascii_lowercase),
|
||||
secrets.choice(string.ascii_uppercase),
|
||||
secrets.choice(string.digits),
|
||||
secrets.choice("!@#$%^&*"),
|
||||
]
|
||||
# Fill the rest randomly
|
||||
password.extend(secrets.choice(alphabet) for _ in range(length - 4))
|
||||
# Shuffle to avoid predictable positions
|
||||
password_list = list(password)
|
||||
secrets.SystemRandom().shuffle(password_list)
|
||||
return "".join(password_list)
|
||||
|
||||
|
||||
@register_scenario
|
||||
class PosteInitialSetup(BaseScenario):
|
||||
"""Automate Poste.io first-time setup wizard.
|
||||
|
||||
This scenario handles the initial server configuration when
|
||||
Poste.io is freshly installed. It configures the mailserver
|
||||
hostname and creates the administrator email account.
|
||||
|
||||
Required inputs:
|
||||
base_url: The Poste.io instance URL (e.g., https://mail.example.com)
|
||||
admin_email: Admin email address (e.g., admin@example.com)
|
||||
|
||||
Optional inputs:
|
||||
admin_password: Password for admin account (auto-generated if not provided)
|
||||
mailserver_hostname: Override mailserver hostname (defaults to URL hostname)
|
||||
|
||||
Result data:
|
||||
setup_completed: Whether initial setup was completed
|
||||
admin_email: The configured admin email address
|
||||
admin_password: The password (generated or provided) - STORE SECURELY
|
||||
mailserver_hostname: The configured hostname
|
||||
already_configured: True if Poste was already set up
|
||||
"""
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return "poste_initial_setup"
|
||||
|
||||
@property
|
||||
def required_inputs(self) -> list[str]:
|
||||
return ["base_url", "admin_email"]
|
||||
|
||||
@property
|
||||
def optional_inputs(self) -> list[str]:
|
||||
return ["admin_password", "mailserver_hostname"]
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return "Automate Poste.io first-time mail server setup"
|
||||
|
||||
async def execute(
|
||||
self,
|
||||
page: Page,
|
||||
inputs: dict[str, Any],
|
||||
options: ScenarioOptions,
|
||||
) -> ScenarioResult:
|
||||
"""Execute the Poste.io initial setup.
|
||||
|
||||
Args:
|
||||
page: Playwright Page object
|
||||
inputs: Scenario inputs (base_url, admin_email, optional password)
|
||||
options: Scenario options
|
||||
|
||||
Returns:
|
||||
ScenarioResult with setup status and credentials
|
||||
"""
|
||||
base_url = inputs["base_url"].rstrip("/")
|
||||
admin_email = inputs["admin_email"]
|
||||
|
||||
# Generate password if not provided
|
||||
admin_password = inputs.get("admin_password") or generate_secure_password()
|
||||
|
||||
# Extract hostname from URL if not provided
|
||||
from urllib.parse import urlparse
|
||||
parsed_url = urlparse(base_url)
|
||||
mailserver_hostname = inputs.get("mailserver_hostname") or parsed_url.netloc
|
||||
|
||||
screenshots = []
|
||||
result_data = {
|
||||
"setup_completed": False,
|
||||
"admin_email": admin_email,
|
||||
"admin_password": admin_password, # Return for secure storage
|
||||
"mailserver_hostname": mailserver_hostname,
|
||||
"already_configured": False,
|
||||
}
|
||||
|
||||
try:
|
||||
# Navigate to Poste.io
|
||||
await page.goto(base_url, wait_until="networkidle")
|
||||
|
||||
current_url = page.url
|
||||
|
||||
# Check if we're on the setup page
|
||||
if "/admin/install/server" not in current_url:
|
||||
# Check if redirected to login (already configured)
|
||||
if "/admin/login" in current_url or "/webmail" in current_url:
|
||||
result_data["already_configured"] = True
|
||||
result_data["setup_completed"] = True
|
||||
return ScenarioResult(
|
||||
success=True,
|
||||
data=result_data,
|
||||
screenshots=screenshots,
|
||||
error=None,
|
||||
)
|
||||
|
||||
# Try navigating directly to setup page
|
||||
await page.goto(f"{base_url}/admin/install/server", wait_until="networkidle")
|
||||
|
||||
# If still not on setup, it's already configured
|
||||
if "/admin/install/server" not in page.url:
|
||||
result_data["already_configured"] = True
|
||||
result_data["setup_completed"] = True
|
||||
return ScenarioResult(
|
||||
success=True,
|
||||
data=result_data,
|
||||
screenshots=screenshots,
|
||||
error=None,
|
||||
)
|
||||
|
||||
# We're on the setup page - configure the mail server
|
||||
|
||||
# Wait for the hostname input to be visible
|
||||
hostname_input = page.locator('input[placeholder*="mail.example.com"]')
|
||||
await hostname_input.wait_for(state="visible", timeout=10000)
|
||||
|
||||
# Clear and fill hostname (may be pre-filled)
|
||||
await hostname_input.clear()
|
||||
await hostname_input.fill(mailserver_hostname)
|
||||
|
||||
# Fill admin email
|
||||
admin_email_input = page.locator('input[placeholder*="admin@example.com"]')
|
||||
await admin_email_input.wait_for(state="visible", timeout=5000)
|
||||
await admin_email_input.fill(admin_email)
|
||||
|
||||
# Fill password
|
||||
password_input = page.locator('input[type="password"], input[placeholder*="Password"]').last
|
||||
await password_input.wait_for(state="visible", timeout=5000)
|
||||
await password_input.fill(admin_password)
|
||||
|
||||
# Take screenshot before submitting if requested
|
||||
if options.screenshot_on_success and options.artifacts_dir:
|
||||
pre_submit_path = options.artifacts_dir / "poste_pre_submit.png"
|
||||
await page.screenshot(path=str(pre_submit_path))
|
||||
screenshots.append(str(pre_submit_path))
|
||||
|
||||
# Click Submit button
|
||||
submit_button = page.locator('button:has-text("Submit")')
|
||||
await submit_button.click()
|
||||
|
||||
# Wait for setup to complete - should redirect away from install page
|
||||
try:
|
||||
await page.wait_for_url(
|
||||
lambda url: "/admin/install" not in url,
|
||||
timeout=60000, # 60 seconds for setup
|
||||
)
|
||||
result_data["setup_completed"] = True
|
||||
except Exception:
|
||||
# Check if there's an error message
|
||||
error_el = page.locator('.error, .alert-danger, [class*="error"]')
|
||||
if await error_el.count() > 0:
|
||||
error_text = await error_el.first.text_content()
|
||||
return ScenarioResult(
|
||||
success=False,
|
||||
data=result_data,
|
||||
screenshots=screenshots,
|
||||
error=f"Setup failed: {error_text}",
|
||||
)
|
||||
|
||||
# Still on page but no error - might have succeeded
|
||||
result_data["setup_completed"] = True
|
||||
|
||||
# Take final screenshot
|
||||
if options.screenshot_on_success and options.artifacts_dir:
|
||||
final_path = options.artifacts_dir / "poste_setup_complete.png"
|
||||
await page.screenshot(path=str(final_path))
|
||||
screenshots.append(str(final_path))
|
||||
|
||||
return ScenarioResult(
|
||||
success=result_data["setup_completed"],
|
||||
data=result_data,
|
||||
screenshots=screenshots,
|
||||
error=None,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
# Take error screenshot
|
||||
if options.screenshot_on_failure and options.artifacts_dir:
|
||||
error_path = options.artifacts_dir / "poste_setup_error.png"
|
||||
await page.screenshot(path=str(error_path))
|
||||
screenshots.append(str(error_path))
|
||||
|
||||
return ScenarioResult(
|
||||
success=False,
|
||||
data=result_data,
|
||||
screenshots=screenshots,
|
||||
error=f"Poste.io setup failed: {str(e)}",
|
||||
)
|
||||
@@ -0,0 +1,5 @@
|
||||
"""Umami browser automation scenarios."""
|
||||
|
||||
from app.playwright_scenarios.umami.initial_setup import UmamiInitialSetup
|
||||
|
||||
__all__ = ["UmamiInitialSetup"]
|
||||
@@ -0,0 +1,291 @@
|
||||
"""Umami initial setup scenario.
|
||||
|
||||
Automates the first-time setup for a fresh Umami installation.
|
||||
This scenario:
|
||||
1. Navigates to the Umami login page
|
||||
2. Logs in with default credentials (admin / umami)
|
||||
3. Changes the admin password
|
||||
4. Optionally adds the first website to track
|
||||
"""
|
||||
|
||||
import secrets
|
||||
import string
|
||||
from typing import Any
|
||||
|
||||
from playwright.async_api import Page
|
||||
|
||||
from app.playwright_scenarios import register_scenario
|
||||
from app.playwright_scenarios.base import BaseScenario, ScenarioOptions, ScenarioResult
|
||||
|
||||
|
||||
def generate_secure_password(length: int = 24) -> str:
|
||||
"""Generate a cryptographically secure password.
|
||||
|
||||
Args:
|
||||
length: Password length (default: 24)
|
||||
|
||||
Returns:
|
||||
A secure random password with mixed characters
|
||||
"""
|
||||
alphabet = string.ascii_letters + string.digits + "!@#$%^&*"
|
||||
password = [
|
||||
secrets.choice(string.ascii_lowercase),
|
||||
secrets.choice(string.ascii_uppercase),
|
||||
secrets.choice(string.digits),
|
||||
secrets.choice("!@#$%^&*"),
|
||||
]
|
||||
password.extend(secrets.choice(alphabet) for _ in range(length - 4))
|
||||
password_list = list(password)
|
||||
secrets.SystemRandom().shuffle(password_list)
|
||||
return "".join(password_list)
|
||||
|
||||
|
||||
@register_scenario
|
||||
class UmamiInitialSetup(BaseScenario):
|
||||
"""Automate Umami first-time setup.
|
||||
|
||||
This scenario handles the initial configuration after Umami is deployed.
|
||||
Umami ships with default credentials (admin / umami). This scenario
|
||||
logs in with those defaults, changes the password, and optionally
|
||||
adds the first website to track.
|
||||
|
||||
Required inputs:
|
||||
base_url: The Umami instance URL (e.g., https://analytics.example.com)
|
||||
|
||||
Optional inputs:
|
||||
admin_password: New password for admin (auto-generated if not provided)
|
||||
website_name: Name of the first website to add
|
||||
website_url: URL of the first website to track
|
||||
|
||||
Result data:
|
||||
setup_completed: Whether initial setup was completed
|
||||
admin_password: The new admin password - STORE SECURELY
|
||||
password_changed: Whether the default password was changed
|
||||
website_added: Whether a website was added
|
||||
already_configured: True if default password no longer works
|
||||
"""
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return "umami_initial_setup"
|
||||
|
||||
@property
|
||||
def required_inputs(self) -> list[str]:
|
||||
return ["base_url"]
|
||||
|
||||
@property
|
||||
def optional_inputs(self) -> list[str]:
|
||||
return ["admin_password", "website_name", "website_url"]
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return "Automate Umami first-time password change and website setup"
|
||||
|
||||
async def execute(
|
||||
self,
|
||||
page: Page,
|
||||
inputs: dict[str, Any],
|
||||
options: ScenarioOptions,
|
||||
) -> ScenarioResult:
|
||||
"""Execute the Umami initial setup.
|
||||
|
||||
Args:
|
||||
page: Playwright Page object
|
||||
inputs: Scenario inputs (base_url)
|
||||
options: Scenario options
|
||||
|
||||
Returns:
|
||||
ScenarioResult with setup status and credentials
|
||||
"""
|
||||
base_url = inputs["base_url"].rstrip("/")
|
||||
new_password = inputs.get("admin_password") or generate_secure_password()
|
||||
website_name = inputs.get("website_name")
|
||||
website_url = inputs.get("website_url")
|
||||
|
||||
screenshots = []
|
||||
result_data = {
|
||||
"setup_completed": False,
|
||||
"admin_password": new_password,
|
||||
"password_changed": False,
|
||||
"website_added": False,
|
||||
"already_configured": False,
|
||||
}
|
||||
|
||||
try:
|
||||
# Navigate to Umami login page
|
||||
login_url = f"{base_url}/login"
|
||||
await page.goto(login_url, wait_until="networkidle")
|
||||
|
||||
# Look for login form
|
||||
username_input = page.locator(
|
||||
'input[name="username"], '
|
||||
'input[id="username"], '
|
||||
'input[placeholder*="username" i]'
|
||||
)
|
||||
await username_input.wait_for(state="visible", timeout=10000)
|
||||
|
||||
# Try default credentials: admin / umami
|
||||
await username_input.fill("admin")
|
||||
|
||||
password_input = page.locator(
|
||||
'input[name="password"], '
|
||||
'input[type="password"]'
|
||||
).first
|
||||
await password_input.fill("umami")
|
||||
|
||||
# Click login
|
||||
login_button = page.locator(
|
||||
'button:has-text("Login"), '
|
||||
'button:has-text("Sign in"), '
|
||||
'button[type="submit"]'
|
||||
).first
|
||||
await login_button.click()
|
||||
|
||||
# Wait for navigation
|
||||
await page.wait_for_timeout(3000)
|
||||
|
||||
# Check if login succeeded
|
||||
current_url = page.url
|
||||
if "/login" in current_url:
|
||||
# Default password may have already been changed
|
||||
error_el = page.locator(
|
||||
'.error, [class*="error"], [class*="alert"]'
|
||||
)
|
||||
if await error_el.count() > 0:
|
||||
result_data["already_configured"] = True
|
||||
result_data["setup_completed"] = True
|
||||
return ScenarioResult(
|
||||
success=True,
|
||||
data=result_data,
|
||||
screenshots=screenshots,
|
||||
error=None,
|
||||
)
|
||||
|
||||
# Logged in successfully with default password - change it
|
||||
# Navigate to profile/settings to change password
|
||||
settings_url = f"{base_url}/settings/profile"
|
||||
await page.goto(settings_url, wait_until="networkidle")
|
||||
|
||||
# Look for password change form
|
||||
current_password_input = page.locator(
|
||||
'input[name="currentPassword"], '
|
||||
'input[name="current_password"], '
|
||||
'input[placeholder*="current" i]'
|
||||
).first
|
||||
|
||||
if await current_password_input.count() > 0:
|
||||
await current_password_input.wait_for(state="visible", timeout=10000)
|
||||
await current_password_input.fill("umami")
|
||||
|
||||
new_password_input = page.locator(
|
||||
'input[name="newPassword"], '
|
||||
'input[name="new_password"], '
|
||||
'input[placeholder*="new" i]'
|
||||
).first
|
||||
await new_password_input.fill(new_password)
|
||||
|
||||
confirm_password_input = page.locator(
|
||||
'input[name="confirmPassword"], '
|
||||
'input[name="confirm_password"], '
|
||||
'input[placeholder*="confirm" i]'
|
||||
).first
|
||||
if await confirm_password_input.count() > 0:
|
||||
await confirm_password_input.fill(new_password)
|
||||
|
||||
# Save password
|
||||
save_button = page.locator(
|
||||
'button:has-text("Save"), '
|
||||
'button:has-text("Change"), '
|
||||
'button:has-text("Update"), '
|
||||
'button[type="submit"]'
|
||||
).first
|
||||
await save_button.click()
|
||||
|
||||
await page.wait_for_timeout(2000)
|
||||
|
||||
# Check for success
|
||||
success_el = page.locator(
|
||||
'[class*="success"], '
|
||||
':has-text("saved"), '
|
||||
':has-text("updated")'
|
||||
)
|
||||
if await success_el.count() > 0:
|
||||
result_data["password_changed"] = True
|
||||
else:
|
||||
# Assume success if no error visible
|
||||
error_el = page.locator('[class*="error"]')
|
||||
if await error_el.count() == 0:
|
||||
result_data["password_changed"] = True
|
||||
|
||||
# Optionally add first website
|
||||
if website_name and website_url:
|
||||
websites_url = f"{base_url}/settings/websites"
|
||||
await page.goto(websites_url, wait_until="networkidle")
|
||||
|
||||
# Click Add Website button
|
||||
add_button = page.locator(
|
||||
'button:has-text("Add website"), '
|
||||
'button:has-text("Add"), '
|
||||
'a:has-text("Add website")'
|
||||
).first
|
||||
|
||||
if await add_button.count() > 0:
|
||||
await add_button.click()
|
||||
await page.wait_for_timeout(1000)
|
||||
|
||||
# Fill website name
|
||||
name_input = page.locator(
|
||||
'input[name="name"], '
|
||||
'input[placeholder*="name" i]'
|
||||
).first
|
||||
if await name_input.count() > 0:
|
||||
await name_input.fill(website_name)
|
||||
|
||||
# Fill website URL/domain
|
||||
url_input = page.locator(
|
||||
'input[name="domain"], '
|
||||
'input[name="url"], '
|
||||
'input[placeholder*="domain" i], '
|
||||
'input[placeholder*="url" i]'
|
||||
).first
|
||||
if await url_input.count() > 0:
|
||||
await url_input.fill(website_url)
|
||||
|
||||
# Save
|
||||
save_button = page.locator(
|
||||
'button:has-text("Save"), '
|
||||
'button:has-text("Create"), '
|
||||
'button[type="submit"]'
|
||||
).first
|
||||
await save_button.click()
|
||||
await page.wait_for_timeout(2000)
|
||||
|
||||
result_data["website_added"] = True
|
||||
|
||||
result_data["setup_completed"] = True
|
||||
|
||||
# Take final screenshot
|
||||
if options.screenshot_on_success and options.artifacts_dir:
|
||||
final_path = options.artifacts_dir / "umami_setup_complete.png"
|
||||
await page.screenshot(path=str(final_path))
|
||||
screenshots.append(str(final_path))
|
||||
|
||||
return ScenarioResult(
|
||||
success=True,
|
||||
data=result_data,
|
||||
screenshots=screenshots,
|
||||
error=None,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
if options.screenshot_on_failure and options.artifacts_dir:
|
||||
error_path = options.artifacts_dir / "umami_setup_error.png"
|
||||
await page.screenshot(path=str(error_path))
|
||||
screenshots.append(str(error_path))
|
||||
|
||||
return ScenarioResult(
|
||||
success=False,
|
||||
data=result_data,
|
||||
screenshots=screenshots,
|
||||
error=f"Umami setup failed: {str(e)}",
|
||||
)
|
||||
@@ -0,0 +1,5 @@
|
||||
"""Uptime Kuma browser automation scenarios."""
|
||||
|
||||
from app.playwright_scenarios.uptime_kuma.initial_setup import UptimeKumaInitialSetup
|
||||
|
||||
__all__ = ["UptimeKumaInitialSetup"]
|
||||
@@ -0,0 +1,229 @@
|
||||
"""Uptime Kuma initial setup scenario.
|
||||
|
||||
Automates the first-time setup for a fresh Uptime Kuma installation.
|
||||
This scenario:
|
||||
1. Navigates to the Uptime Kuma setup page
|
||||
2. Creates the admin account with username and password
|
||||
"""
|
||||
|
||||
import secrets
|
||||
import string
|
||||
from typing import Any
|
||||
|
||||
from playwright.async_api import Page
|
||||
|
||||
from app.playwright_scenarios import register_scenario
|
||||
from app.playwright_scenarios.base import BaseScenario, ScenarioOptions, ScenarioResult
|
||||
|
||||
|
||||
def generate_secure_password(length: int = 24) -> str:
|
||||
"""Generate a cryptographically secure password.
|
||||
|
||||
Args:
|
||||
length: Password length (default: 24)
|
||||
|
||||
Returns:
|
||||
A secure random password with mixed characters
|
||||
"""
|
||||
alphabet = string.ascii_letters + string.digits + "!@#$%^&*"
|
||||
password = [
|
||||
secrets.choice(string.ascii_lowercase),
|
||||
secrets.choice(string.ascii_uppercase),
|
||||
secrets.choice(string.digits),
|
||||
secrets.choice("!@#$%^&*"),
|
||||
]
|
||||
password.extend(secrets.choice(alphabet) for _ in range(length - 4))
|
||||
password_list = list(password)
|
||||
secrets.SystemRandom().shuffle(password_list)
|
||||
return "".join(password_list)
|
||||
|
||||
|
||||
@register_scenario
|
||||
class UptimeKumaInitialSetup(BaseScenario):
|
||||
"""Automate Uptime Kuma first-time admin account setup.
|
||||
|
||||
This scenario handles the initial admin account creation when
|
||||
Uptime Kuma is freshly installed. On first launch, Uptime Kuma
|
||||
shows a setup page to create the admin account.
|
||||
|
||||
Required inputs:
|
||||
base_url: The Uptime Kuma instance URL (e.g., https://status.example.com)
|
||||
|
||||
Optional inputs:
|
||||
admin_username: Username for the admin account (default: "admin")
|
||||
admin_password: Password for admin account (auto-generated if not provided)
|
||||
|
||||
Result data:
|
||||
setup_completed: Whether initial setup was completed
|
||||
admin_username: The configured admin username
|
||||
admin_password: The password (generated or provided) - STORE SECURELY
|
||||
already_configured: True if Uptime Kuma was already set up
|
||||
"""
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return "uptime_kuma_initial_setup"
|
||||
|
||||
@property
|
||||
def required_inputs(self) -> list[str]:
|
||||
return ["base_url"]
|
||||
|
||||
@property
|
||||
def optional_inputs(self) -> list[str]:
|
||||
return ["admin_username", "admin_password"]
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return "Automate Uptime Kuma first-time admin account setup"
|
||||
|
||||
async def execute(
|
||||
self,
|
||||
page: Page,
|
||||
inputs: dict[str, Any],
|
||||
options: ScenarioOptions,
|
||||
) -> ScenarioResult:
|
||||
"""Execute the Uptime Kuma initial setup.
|
||||
|
||||
Args:
|
||||
page: Playwright Page object
|
||||
inputs: Scenario inputs (base_url)
|
||||
options: Scenario options
|
||||
|
||||
Returns:
|
||||
ScenarioResult with setup status and credentials
|
||||
"""
|
||||
base_url = inputs["base_url"].rstrip("/")
|
||||
admin_username = inputs.get("admin_username", "admin")
|
||||
admin_password = inputs.get("admin_password") or generate_secure_password()
|
||||
|
||||
screenshots = []
|
||||
result_data = {
|
||||
"setup_completed": False,
|
||||
"admin_username": admin_username,
|
||||
"admin_password": admin_password,
|
||||
"already_configured": False,
|
||||
}
|
||||
|
||||
try:
|
||||
# Navigate to Uptime Kuma
|
||||
await page.goto(base_url, wait_until="networkidle")
|
||||
|
||||
current_url = page.url
|
||||
|
||||
# Uptime Kuma shows setup page on first visit, login page after
|
||||
# Check if we're on the setup page
|
||||
setup_heading = page.locator(
|
||||
'h1:has-text("Setup"), '
|
||||
':has-text("Create your admin account")'
|
||||
)
|
||||
|
||||
# Check if already configured (shows login form)
|
||||
login_form = page.locator(
|
||||
'form:has(input[autocomplete="username"]), '
|
||||
'h1:has-text("Login")'
|
||||
)
|
||||
|
||||
if await login_form.count() > 0 and await setup_heading.count() == 0:
|
||||
result_data["already_configured"] = True
|
||||
result_data["setup_completed"] = True
|
||||
return ScenarioResult(
|
||||
success=True,
|
||||
data=result_data,
|
||||
screenshots=screenshots,
|
||||
error=None,
|
||||
)
|
||||
|
||||
# We're on the setup page - fill in the admin account
|
||||
# Username field
|
||||
username_input = page.locator(
|
||||
'input[autocomplete="username"], '
|
||||
'input[name="username"], '
|
||||
'input[id="floatingInput"], '
|
||||
'input[placeholder*="username" i]'
|
||||
).first
|
||||
await username_input.wait_for(state="visible", timeout=10000)
|
||||
await username_input.fill(admin_username)
|
||||
|
||||
# Password field
|
||||
password_input = page.locator(
|
||||
'input[type="password"][autocomplete="new-password"], '
|
||||
'input[name="password"], '
|
||||
'input[type="password"]'
|
||||
).first
|
||||
await password_input.fill(admin_password)
|
||||
|
||||
# Confirm password field (Uptime Kuma requires password confirmation)
|
||||
confirm_input = page.locator(
|
||||
'input[type="password"][autocomplete="new-password"]'
|
||||
)
|
||||
if await confirm_input.count() > 1:
|
||||
# Second password field is the confirm field
|
||||
await confirm_input.nth(1).fill(admin_password)
|
||||
else:
|
||||
# Try alternative selector
|
||||
confirm_input = page.locator(
|
||||
'input[name="repeatPassword"], '
|
||||
'input[name="confirm_password"], '
|
||||
'input[placeholder*="repeat" i], '
|
||||
'input[placeholder*="confirm" i]'
|
||||
).first
|
||||
if await confirm_input.count() > 0:
|
||||
await confirm_input.fill(admin_password)
|
||||
|
||||
# Take screenshot before submitting
|
||||
if options.screenshot_on_success and options.artifacts_dir:
|
||||
pre_submit_path = options.artifacts_dir / "uptime_kuma_pre_submit.png"
|
||||
await page.screenshot(path=str(pre_submit_path))
|
||||
screenshots.append(str(pre_submit_path))
|
||||
|
||||
# Click Create / Submit button
|
||||
submit_button = page.locator(
|
||||
'button:has-text("Create"), '
|
||||
'button:has-text("Submit"), '
|
||||
'button:has-text("Register"), '
|
||||
'button[type="submit"]'
|
||||
).first
|
||||
await submit_button.click()
|
||||
|
||||
# Wait for redirect to dashboard
|
||||
try:
|
||||
await page.wait_for_url(
|
||||
lambda url: "/dashboard" in url or "/setup" not in url,
|
||||
timeout=30000,
|
||||
)
|
||||
result_data["setup_completed"] = True
|
||||
except Exception:
|
||||
# Check if on dashboard by looking for dashboard elements
|
||||
dashboard_el = page.locator(
|
||||
'.dashboard, '
|
||||
'[class*="dashboard"], '
|
||||
':has-text("Add New Monitor")'
|
||||
)
|
||||
if await dashboard_el.count() > 0:
|
||||
result_data["setup_completed"] = True
|
||||
|
||||
# Take final screenshot
|
||||
if options.screenshot_on_success and options.artifacts_dir:
|
||||
final_path = options.artifacts_dir / "uptime_kuma_setup_complete.png"
|
||||
await page.screenshot(path=str(final_path))
|
||||
screenshots.append(str(final_path))
|
||||
|
||||
return ScenarioResult(
|
||||
success=result_data["setup_completed"],
|
||||
data=result_data,
|
||||
screenshots=screenshots,
|
||||
error=None if result_data["setup_completed"] else "Setup may not have completed",
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
if options.screenshot_on_failure and options.artifacts_dir:
|
||||
error_path = options.artifacts_dir / "uptime_kuma_setup_error.png"
|
||||
await page.screenshot(path=str(error_path))
|
||||
screenshots.append(str(error_path))
|
||||
|
||||
return ScenarioResult(
|
||||
success=False,
|
||||
data=result_data,
|
||||
screenshots=screenshots,
|
||||
error=f"Uptime Kuma setup failed: {str(e)}",
|
||||
)
|
||||
261
letsbe-sysadmin-agent/app/task_manager.py
Normal file
261
letsbe-sysadmin-agent/app/task_manager.py
Normal file
@@ -0,0 +1,261 @@
|
||||
"""Task polling and execution management."""
|
||||
|
||||
import asyncio
|
||||
import random
|
||||
import time
|
||||
import traceback
|
||||
from typing import Optional
|
||||
|
||||
from app.clients.orchestrator_client import (
|
||||
CircuitBreakerOpen,
|
||||
EventLevel,
|
||||
OrchestratorClient,
|
||||
Task,
|
||||
TaskStatus,
|
||||
)
|
||||
from app.config import Settings, get_settings
|
||||
from app.executors import ExecutionResult, get_executor
|
||||
from app.utils.logger import get_logger
|
||||
|
||||
logger = get_logger("task_manager")
|
||||
|
||||
|
||||
class TaskManager:
|
||||
"""Manage task polling, execution, and result submission.
|
||||
|
||||
Features:
|
||||
- Concurrent task execution with semaphore
|
||||
- Circuit breaker integration
|
||||
- Event logging for each task
|
||||
- Error handling and result persistence
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
client: OrchestratorClient,
|
||||
settings: Optional[Settings] = None,
|
||||
):
|
||||
self.client = client
|
||||
self.settings = settings or get_settings()
|
||||
self._shutdown_event = asyncio.Event()
|
||||
self._semaphore = asyncio.Semaphore(self.settings.max_concurrent_tasks)
|
||||
self._active_tasks: set[str] = set()
|
||||
|
||||
async def poll_loop(self) -> None:
|
||||
"""Run the task polling loop until shutdown.
|
||||
|
||||
Continuously polls for new tasks and dispatches them for execution.
|
||||
"""
|
||||
if not self.client.agent_id:
|
||||
logger.warning("poll_loop_not_registered")
|
||||
return
|
||||
|
||||
logger.info(
|
||||
"poll_loop_started",
|
||||
interval=self.settings.poll_interval,
|
||||
max_concurrent=self.settings.max_concurrent_tasks,
|
||||
)
|
||||
|
||||
consecutive_failures = 0
|
||||
backoff_multiplier = 1.0
|
||||
|
||||
while not self._shutdown_event.is_set():
|
||||
try:
|
||||
# Check circuit breaker
|
||||
task = await self.client.fetch_next_task()
|
||||
|
||||
if task:
|
||||
# Reset backoff on successful fetch
|
||||
consecutive_failures = 0
|
||||
backoff_multiplier = 1.0
|
||||
|
||||
# Dispatch task (non-blocking)
|
||||
asyncio.create_task(self._execute_task(task))
|
||||
else:
|
||||
logger.debug("no_tasks_available")
|
||||
|
||||
except CircuitBreakerOpen:
|
||||
logger.warning("poll_circuit_breaker_open")
|
||||
backoff_multiplier = min(backoff_multiplier * 2, 8.0)
|
||||
|
||||
except Exception as e:
|
||||
consecutive_failures += 1
|
||||
backoff_multiplier = min(backoff_multiplier * 1.5, 8.0)
|
||||
logger.error(
|
||||
"poll_error",
|
||||
error=str(e),
|
||||
consecutive_failures=consecutive_failures,
|
||||
)
|
||||
|
||||
# Calculate next poll interval
|
||||
interval = self.settings.poll_interval * backoff_multiplier
|
||||
# Add jitter (0-25% of interval)
|
||||
interval += random.uniform(0, interval * 0.25)
|
||||
|
||||
# Wait for next poll or shutdown
|
||||
try:
|
||||
await asyncio.wait_for(
|
||||
self._shutdown_event.wait(),
|
||||
timeout=interval,
|
||||
)
|
||||
break # Shutdown requested
|
||||
except asyncio.TimeoutError:
|
||||
pass # Normal timeout, continue polling
|
||||
|
||||
# Wait for active tasks to complete
|
||||
if self._active_tasks:
|
||||
logger.info("waiting_for_active_tasks", count=len(self._active_tasks))
|
||||
# Give tasks a grace period
|
||||
await asyncio.sleep(5)
|
||||
|
||||
logger.info("poll_loop_stopped")
|
||||
|
||||
async def _execute_task(self, task: Task) -> None:
|
||||
"""Execute a single task with concurrency control.
|
||||
|
||||
Args:
|
||||
task: Task to execute
|
||||
"""
|
||||
# Acquire semaphore for concurrency control
|
||||
async with self._semaphore:
|
||||
self._active_tasks.add(task.id)
|
||||
|
||||
try:
|
||||
await self._run_task(task)
|
||||
finally:
|
||||
self._active_tasks.discard(task.id)
|
||||
|
||||
async def _run_task(self, task: Task) -> None:
|
||||
"""Run task execution and handle results.
|
||||
|
||||
Args:
|
||||
task: Task to execute
|
||||
"""
|
||||
start_time = time.time()
|
||||
|
||||
logger.info(
|
||||
"task_started",
|
||||
task_id=task.id,
|
||||
task_type=task.type,
|
||||
tenant_id=task.tenant_id,
|
||||
)
|
||||
|
||||
# Send start event
|
||||
await self.client.send_event(
|
||||
EventLevel.INFO,
|
||||
f"Task started: {task.type}",
|
||||
task_id=task.id,
|
||||
metadata={"payload_keys": list(task.payload.keys())},
|
||||
)
|
||||
|
||||
# Mark task as in progress
|
||||
await self.client.update_task(task.id, TaskStatus.RUNNING)
|
||||
|
||||
try:
|
||||
# Get executor for task type
|
||||
executor = get_executor(task.type)
|
||||
|
||||
# Execute task
|
||||
result = await executor.execute(task.payload)
|
||||
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
|
||||
if result.success:
|
||||
logger.info(
|
||||
"task_completed",
|
||||
task_id=task.id,
|
||||
task_type=task.type,
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
await self.client.update_task(
|
||||
task.id,
|
||||
TaskStatus.COMPLETED,
|
||||
result=result.data,
|
||||
)
|
||||
|
||||
await self.client.send_event(
|
||||
EventLevel.INFO,
|
||||
f"Task completed: {task.type}",
|
||||
task_id=task.id,
|
||||
metadata={"duration_ms": duration_ms},
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
"task_failed",
|
||||
task_id=task.id,
|
||||
task_type=task.type,
|
||||
error=result.error,
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
await self.client.update_task(
|
||||
task.id,
|
||||
TaskStatus.FAILED,
|
||||
result=result.data,
|
||||
error=result.error,
|
||||
)
|
||||
|
||||
await self.client.send_event(
|
||||
EventLevel.ERROR,
|
||||
f"Task failed: {task.type}",
|
||||
task_id=task.id,
|
||||
metadata={"error": result.error, "duration_ms": duration_ms},
|
||||
)
|
||||
|
||||
except ValueError as e:
|
||||
# Unknown task type or validation error
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
error_msg = str(e)
|
||||
|
||||
logger.error(
|
||||
"task_validation_error",
|
||||
task_id=task.id,
|
||||
task_type=task.type,
|
||||
error=error_msg,
|
||||
)
|
||||
|
||||
await self.client.update_task(
|
||||
task.id,
|
||||
TaskStatus.FAILED,
|
||||
error=error_msg,
|
||||
)
|
||||
|
||||
await self.client.send_event(
|
||||
EventLevel.ERROR,
|
||||
f"Task validation failed: {task.type}",
|
||||
task_id=task.id,
|
||||
metadata={"error": error_msg},
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
# Unexpected error
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
error_msg = str(e)
|
||||
tb = traceback.format_exc()
|
||||
|
||||
logger.error(
|
||||
"task_exception",
|
||||
task_id=task.id,
|
||||
task_type=task.type,
|
||||
error=error_msg,
|
||||
traceback=tb,
|
||||
)
|
||||
|
||||
await self.client.update_task(
|
||||
task.id,
|
||||
TaskStatus.FAILED,
|
||||
error=error_msg,
|
||||
)
|
||||
|
||||
await self.client.send_event(
|
||||
EventLevel.ERROR,
|
||||
f"Task exception: {task.type}",
|
||||
task_id=task.id,
|
||||
metadata={"error": error_msg, "traceback": tb[:500]},
|
||||
)
|
||||
|
||||
async def shutdown(self) -> None:
|
||||
"""Initiate graceful shutdown."""
|
||||
logger.info("task_manager_shutdown_initiated")
|
||||
self._shutdown_event.set()
|
||||
15
letsbe-sysadmin-agent/app/utils/__init__.py
Normal file
15
letsbe-sysadmin-agent/app/utils/__init__.py
Normal file
@@ -0,0 +1,15 @@
|
||||
"""Utility modules for the agent."""
|
||||
|
||||
from .logger import get_logger
|
||||
from .validation import (
|
||||
validate_shell_command,
|
||||
validate_file_path,
|
||||
sanitize_input,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"get_logger",
|
||||
"validate_shell_command",
|
||||
"validate_file_path",
|
||||
"sanitize_input",
|
||||
]
|
||||
156
letsbe-sysadmin-agent/app/utils/credential_reader.py
Normal file
156
letsbe-sysadmin-agent/app/utils/credential_reader.py
Normal file
@@ -0,0 +1,156 @@
|
||||
"""
|
||||
Credential reader utility for reading credentials from the credentials.env file.
|
||||
Used by the agent to report credentials back to the Hub during heartbeat.
|
||||
"""
|
||||
|
||||
import os
|
||||
import stat
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from app.utils.logger import get_logger
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
# Default credentials file location
|
||||
CREDENTIALS_FILE = Path("/opt/letsbe/env/credentials.env")
|
||||
|
||||
|
||||
def check_credentials_permissions(path: str) -> None:
|
||||
"""Warn if credentials file has overly permissive permissions."""
|
||||
try:
|
||||
if not os.path.exists(path):
|
||||
return
|
||||
file_stat = os.stat(path)
|
||||
mode = file_stat.st_mode
|
||||
# Check if group or others have any permissions
|
||||
if mode & (stat.S_IRWXG | stat.S_IRWXO):
|
||||
logger.warning(
|
||||
f"Credentials file {path} has overly permissive permissions "
|
||||
f"(mode={oct(mode)}). Recommended: chmod 600"
|
||||
)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
|
||||
def read_credentials_file(file_path: Optional[Path] = None) -> dict[str, str]:
|
||||
"""
|
||||
Read credentials.env file and return as a dictionary.
|
||||
|
||||
Args:
|
||||
file_path: Optional path to credentials file. Defaults to /opt/letsbe/env/credentials.env
|
||||
|
||||
Returns:
|
||||
Dictionary of key-value pairs from the credentials file
|
||||
"""
|
||||
credentials: dict[str, str] = {}
|
||||
creds_file = file_path or CREDENTIALS_FILE
|
||||
|
||||
if not creds_file.exists():
|
||||
logger.debug(f"Credentials file not found: {creds_file}")
|
||||
return credentials
|
||||
|
||||
check_credentials_permissions(str(creds_file))
|
||||
|
||||
try:
|
||||
with open(creds_file, 'r') as f:
|
||||
for line_num, line in enumerate(f, 1):
|
||||
line = line.strip()
|
||||
# Skip empty lines and comments
|
||||
if not line or line.startswith('#'):
|
||||
continue
|
||||
|
||||
# Parse KEY=VALUE
|
||||
if '=' in line:
|
||||
key, value = line.split('=', 1)
|
||||
credentials[key.strip()] = value.strip()
|
||||
else:
|
||||
logger.warning(f"Invalid line {line_num} in credentials file: {line}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to read credentials file: {e}")
|
||||
|
||||
return credentials
|
||||
|
||||
|
||||
def get_portainer_credentials() -> Optional[dict[str, str]]:
|
||||
"""
|
||||
Extract Portainer-specific credentials from the credentials file.
|
||||
|
||||
Returns:
|
||||
Dictionary with 'username' and 'password' keys, or None if not configured
|
||||
"""
|
||||
creds = read_credentials_file()
|
||||
|
||||
username = creds.get('PORTAINER_ADMIN_USER')
|
||||
password = creds.get('PORTAINER_ADMIN_PASSWORD')
|
||||
|
||||
if username and password:
|
||||
return {
|
||||
'username': username,
|
||||
'password': password,
|
||||
}
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def get_all_tool_credentials() -> dict[str, dict[str, str]]:
|
||||
"""
|
||||
Extract all tool credentials from the credentials file.
|
||||
Groups credentials by tool name.
|
||||
|
||||
Returns:
|
||||
Dictionary where keys are tool names and values are credential dictionaries
|
||||
"""
|
||||
creds = read_credentials_file()
|
||||
tool_credentials: dict[str, dict[str, str]] = {}
|
||||
|
||||
# Portainer credentials
|
||||
portainer = get_portainer_credentials()
|
||||
if portainer:
|
||||
tool_credentials['portainer'] = portainer
|
||||
|
||||
# Add other tool credentials as needed
|
||||
# Example patterns that might exist in credentials.env:
|
||||
# NEXTCLOUD_ADMIN_USER, NEXTCLOUD_ADMIN_PASSWORD
|
||||
# KEYCLOAK_ADMIN_USER, KEYCLOAK_ADMIN_PASSWORD
|
||||
# etc.
|
||||
|
||||
tool_mappings = [
|
||||
('nextcloud', ['NEXTCLOUD_ADMIN_USER', 'NEXTCLOUD_ADMIN_PASSWORD']),
|
||||
('keycloak', ['KEYCLOAK_ADMIN_USER', 'KEYCLOAK_ADMIN_PASSWORD']),
|
||||
('minio', ['MINIO_ROOT_USER', 'MINIO_ROOT_PASSWORD']),
|
||||
('poste', ['POSTE_ADMIN_EMAIL', 'POSTE_ADMIN_PASSWORD']),
|
||||
]
|
||||
|
||||
for tool_name, (user_key, pass_key) in tool_mappings:
|
||||
username = creds.get(user_key)
|
||||
password = creds.get(pass_key)
|
||||
if username and password:
|
||||
tool_credentials[tool_name] = {
|
||||
'username': username,
|
||||
'password': password,
|
||||
}
|
||||
|
||||
return tool_credentials
|
||||
|
||||
|
||||
def get_credential_hash() -> str:
|
||||
"""
|
||||
Generate a hash of the credentials file content.
|
||||
Used to detect changes without sending full credentials each time.
|
||||
|
||||
Returns:
|
||||
SHA-256 hash of the credentials file content, or empty string if file doesn't exist
|
||||
"""
|
||||
import hashlib
|
||||
|
||||
if not CREDENTIALS_FILE.exists():
|
||||
return ""
|
||||
|
||||
try:
|
||||
content = CREDENTIALS_FILE.read_bytes()
|
||||
return hashlib.sha256(content).hexdigest()
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to hash credentials file: {e}")
|
||||
return ""
|
||||
74
letsbe-sysadmin-agent/app/utils/logger.py
Normal file
74
letsbe-sysadmin-agent/app/utils/logger.py
Normal file
@@ -0,0 +1,74 @@
|
||||
"""Structured logging setup using structlog."""
|
||||
|
||||
import logging
|
||||
import sys
|
||||
from functools import lru_cache
|
||||
|
||||
import structlog
|
||||
|
||||
|
||||
def configure_logging(log_level: str = "INFO", log_json: bool = True) -> None:
|
||||
"""Configure structlog with JSON or console output.
|
||||
|
||||
Args:
|
||||
log_level: Logging level (DEBUG, INFO, WARNING, ERROR)
|
||||
log_json: If True, output JSON logs; otherwise, use colored console output
|
||||
"""
|
||||
# Set up standard library logging
|
||||
logging.basicConfig(
|
||||
format="%(message)s",
|
||||
stream=sys.stdout,
|
||||
level=getattr(logging, log_level.upper(), logging.INFO),
|
||||
)
|
||||
|
||||
# Common processors
|
||||
shared_processors: list[structlog.typing.Processor] = [
|
||||
structlog.contextvars.merge_contextvars,
|
||||
structlog.processors.add_log_level,
|
||||
structlog.processors.StackInfoRenderer(),
|
||||
structlog.dev.set_exc_info,
|
||||
structlog.processors.TimeStamper(fmt="iso"),
|
||||
]
|
||||
|
||||
if log_json:
|
||||
# JSON output for production
|
||||
structlog.configure(
|
||||
processors=[
|
||||
*shared_processors,
|
||||
structlog.processors.dict_tracebacks,
|
||||
structlog.processors.JSONRenderer(),
|
||||
],
|
||||
wrapper_class=structlog.make_filtering_bound_logger(
|
||||
getattr(logging, log_level.upper(), logging.INFO)
|
||||
),
|
||||
context_class=dict,
|
||||
logger_factory=structlog.PrintLoggerFactory(),
|
||||
cache_logger_on_first_use=True,
|
||||
)
|
||||
else:
|
||||
# Colored console output for development
|
||||
structlog.configure(
|
||||
processors=[
|
||||
*shared_processors,
|
||||
structlog.dev.ConsoleRenderer(colors=True),
|
||||
],
|
||||
wrapper_class=structlog.make_filtering_bound_logger(
|
||||
getattr(logging, log_level.upper(), logging.INFO)
|
||||
),
|
||||
context_class=dict,
|
||||
logger_factory=structlog.PrintLoggerFactory(),
|
||||
cache_logger_on_first_use=True,
|
||||
)
|
||||
|
||||
|
||||
@lru_cache
|
||||
def get_logger(name: str = "agent") -> structlog.stdlib.BoundLogger:
|
||||
"""Get a bound logger instance.
|
||||
|
||||
Args:
|
||||
name: Logger name for context
|
||||
|
||||
Returns:
|
||||
Configured structlog bound logger
|
||||
"""
|
||||
return structlog.get_logger(name)
|
||||
425
letsbe-sysadmin-agent/app/utils/validation.py
Normal file
425
letsbe-sysadmin-agent/app/utils/validation.py
Normal file
@@ -0,0 +1,425 @@
|
||||
"""Security validation utilities for safe command and file operations."""
|
||||
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
# Shell metacharacters that must NEVER appear in commands
|
||||
# These can be used for command injection attacks
|
||||
FORBIDDEN_SHELL_PATTERNS = re.compile(r'[`$();|&<>]')
|
||||
|
||||
# ENV key validation pattern: uppercase letters, numbers, underscore; must start with letter
|
||||
ENV_KEY_PATTERN = re.compile(r'^[A-Z][A-Z0-9_]*$')
|
||||
|
||||
# Dangerous Docker flags that must never be allowed
|
||||
DANGEROUS_DOCKER_FLAGS = re.compile(
|
||||
r'--privileged|--pid[=\s]+host|--net[=\s]+host|--network[=\s]+host|'
|
||||
r'--cap-add|--security-opt|--device[=\s]|--ipc[=\s]+host'
|
||||
)
|
||||
|
||||
# Docker subcommands that are explicitly blocked (too dangerous)
|
||||
BLOCKED_DOCKER_SUBCOMMANDS = {"run", "exec", "build", "push", "pull", "load", "import", "commit", "cp", "export"}
|
||||
|
||||
# Allowed commands with their argument validation patterns and timeouts
|
||||
# Keys are ABSOLUTE paths to prevent PATH hijacking
|
||||
ALLOWED_COMMANDS: dict[str, dict] = {
|
||||
# File system inspection
|
||||
"/usr/bin/ls": {
|
||||
"args_pattern": r"^[-alhrRtS\s/\w.]*$",
|
||||
"timeout": 30,
|
||||
"description": "List directory contents",
|
||||
},
|
||||
"/usr/bin/cat": {
|
||||
"args_pattern": r"^[\w./\-]+$",
|
||||
"timeout": 30,
|
||||
"description": "Display file contents",
|
||||
},
|
||||
"/usr/bin/df": {
|
||||
"args_pattern": r"^[-hT\s/\w]*$",
|
||||
"timeout": 30,
|
||||
"description": "Disk space usage",
|
||||
},
|
||||
"/usr/bin/free": {
|
||||
"args_pattern": r"^[-hmg\s]*$",
|
||||
"timeout": 30,
|
||||
"description": "Memory usage",
|
||||
},
|
||||
"/usr/bin/du": {
|
||||
"args_pattern": r"^[-shc\s/\w.]*$",
|
||||
"timeout": 60,
|
||||
"description": "Directory size",
|
||||
},
|
||||
# Docker operations (only compose, ps, logs, inspect, stats allowed)
|
||||
"/usr/bin/docker": {
|
||||
"args_pattern": r"^(compose|ps|logs|inspect|stats)[\s\w.\-/:]*$",
|
||||
"timeout": 300,
|
||||
"description": "Docker operations (compose, ps, logs, inspect, stats only)",
|
||||
},
|
||||
# Service management
|
||||
"/usr/bin/systemctl": {
|
||||
"args_pattern": r"^(status|restart|start|stop|enable|disable|is-active)\s+[\w\-@.]+$",
|
||||
"timeout": 60,
|
||||
"description": "Systemd service management",
|
||||
},
|
||||
# Network diagnostics
|
||||
"/usr/bin/curl": {
|
||||
"args_pattern": r"^(-s\s+)?-o\s+/dev/null\s+-w\s+['\"]?%\{[^}]+\}['\"]?\s+https?://[\w.\-/:]+$",
|
||||
"timeout": 30,
|
||||
"description": "HTTP health checks only",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
class ValidationError(Exception):
|
||||
"""Raised when validation fails."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
def validate_shell_command(cmd: str, args: str = "") -> tuple[str, list[str], int]:
|
||||
"""Validate a shell command against security policies.
|
||||
|
||||
Args:
|
||||
cmd: The command to execute (should be absolute path)
|
||||
args: Command arguments as a string
|
||||
|
||||
Returns:
|
||||
Tuple of (absolute_cmd_path, args_list, timeout)
|
||||
|
||||
Raises:
|
||||
ValidationError: If the command or arguments fail validation
|
||||
"""
|
||||
# Normalize command path
|
||||
cmd = cmd.strip()
|
||||
|
||||
# Check for forbidden patterns in command
|
||||
if FORBIDDEN_SHELL_PATTERNS.search(cmd):
|
||||
raise ValidationError(f"Command contains forbidden characters: {cmd}")
|
||||
|
||||
# Check for forbidden patterns in arguments
|
||||
if args and FORBIDDEN_SHELL_PATTERNS.search(args):
|
||||
raise ValidationError(f"Arguments contain forbidden characters: {args}")
|
||||
|
||||
# Verify command is in allowlist
|
||||
if cmd not in ALLOWED_COMMANDS:
|
||||
# Try to find if user provided just the command name
|
||||
for allowed_cmd in ALLOWED_COMMANDS:
|
||||
if allowed_cmd.endswith(f"/{cmd}"):
|
||||
raise ValidationError(
|
||||
f"Command '{cmd}' must use absolute path: {allowed_cmd}"
|
||||
)
|
||||
raise ValidationError(f"Command not in allowlist: {cmd}")
|
||||
|
||||
schema = ALLOWED_COMMANDS[cmd]
|
||||
|
||||
# Validate arguments against pattern
|
||||
if args:
|
||||
args = args.strip()
|
||||
if not re.match(schema["args_pattern"], args):
|
||||
raise ValidationError(
|
||||
f"Arguments do not match allowed pattern for {cmd}: {args}"
|
||||
)
|
||||
|
||||
# Extra validation for Docker commands
|
||||
if cmd == "/usr/bin/docker" and args:
|
||||
# Block dangerous Docker subcommands
|
||||
first_arg = args.split()[0] if args.split() else ""
|
||||
if first_arg in BLOCKED_DOCKER_SUBCOMMANDS:
|
||||
raise ValidationError(
|
||||
f"Docker subcommand '{first_arg}' is not allowed"
|
||||
)
|
||||
# Block dangerous Docker flags
|
||||
if DANGEROUS_DOCKER_FLAGS.search(args):
|
||||
raise ValidationError(
|
||||
f"Docker arguments contain dangerous flags: {args}"
|
||||
)
|
||||
|
||||
# Parse arguments into list (safely, no shell interpretation)
|
||||
args_list = args.split() if args else []
|
||||
|
||||
return cmd, args_list, schema["timeout"]
|
||||
|
||||
|
||||
def validate_file_path(
|
||||
path: str,
|
||||
allowed_root: str,
|
||||
must_exist: bool = False,
|
||||
max_size: Optional[int] = None,
|
||||
) -> Path:
|
||||
"""Validate a file path against security policies.
|
||||
|
||||
Args:
|
||||
path: The file path to validate
|
||||
allowed_root: The root directory that path must be within
|
||||
must_exist: If True, verify the file exists
|
||||
max_size: If provided, verify file size is under limit (for existing files)
|
||||
|
||||
Returns:
|
||||
Resolved Path object
|
||||
|
||||
Raises:
|
||||
ValidationError: If the path fails validation
|
||||
"""
|
||||
# Reject paths with obvious traversal attempts
|
||||
if ".." in path:
|
||||
raise ValidationError(f"Path contains directory traversal: {path}")
|
||||
|
||||
# Convert to Path objects
|
||||
try:
|
||||
file_path = Path(path).expanduser()
|
||||
root_path = Path(allowed_root).expanduser().resolve()
|
||||
except (ValueError, RuntimeError) as e:
|
||||
raise ValidationError(f"Invalid path format: {e}")
|
||||
|
||||
# Resolve to canonical path (follows symlinks, resolves ..)
|
||||
try:
|
||||
resolved_path = file_path.resolve()
|
||||
except (OSError, RuntimeError) as e:
|
||||
raise ValidationError(f"Cannot resolve path: {e}")
|
||||
|
||||
# Verify path is within allowed root
|
||||
try:
|
||||
resolved_path.relative_to(root_path)
|
||||
except ValueError:
|
||||
raise ValidationError(
|
||||
f"Path {resolved_path} is outside allowed root {root_path}"
|
||||
)
|
||||
|
||||
# Check existence if required
|
||||
if must_exist and not resolved_path.exists():
|
||||
raise ValidationError(f"File does not exist: {resolved_path}")
|
||||
|
||||
# Check file size if applicable
|
||||
if max_size is not None and resolved_path.is_file():
|
||||
file_size = resolved_path.stat().st_size
|
||||
if file_size > max_size:
|
||||
raise ValidationError(
|
||||
f"File size {file_size} exceeds limit {max_size}: {resolved_path}"
|
||||
)
|
||||
|
||||
return resolved_path
|
||||
|
||||
|
||||
def sanitize_input(text: str, max_length: int = 10000) -> str:
|
||||
"""Sanitize text input by removing dangerous characters.
|
||||
|
||||
Args:
|
||||
text: Input text to sanitize
|
||||
max_length: Maximum allowed length
|
||||
|
||||
Returns:
|
||||
Sanitized text
|
||||
|
||||
Raises:
|
||||
ValidationError: If input exceeds max length
|
||||
"""
|
||||
if len(text) > max_length:
|
||||
raise ValidationError(f"Input exceeds maximum length of {max_length}")
|
||||
|
||||
# Remove null bytes and other control characters (except newlines and tabs)
|
||||
sanitized = "".join(
|
||||
char for char in text
|
||||
if char in "\n\t" or (ord(char) >= 32 and ord(char) != 127)
|
||||
)
|
||||
|
||||
return sanitized
|
||||
|
||||
|
||||
def validate_compose_path(path: str, allowed_paths: list[str]) -> Path:
|
||||
"""Validate a docker-compose file path.
|
||||
|
||||
Args:
|
||||
path: Path to compose file
|
||||
allowed_paths: List of allowed parent directories
|
||||
|
||||
Returns:
|
||||
Resolved Path object
|
||||
|
||||
Raises:
|
||||
ValidationError: If path is not in allowed directories
|
||||
"""
|
||||
if ".." in path:
|
||||
raise ValidationError(f"Path contains directory traversal: {path}")
|
||||
|
||||
try:
|
||||
resolved = Path(path).expanduser().resolve()
|
||||
except (ValueError, RuntimeError) as e:
|
||||
raise ValidationError(f"Invalid compose path: {e}")
|
||||
|
||||
# Check if path is within any allowed directory
|
||||
for allowed in allowed_paths:
|
||||
try:
|
||||
allowed_path = Path(allowed).expanduser().resolve()
|
||||
resolved.relative_to(allowed_path)
|
||||
# Path is within this allowed directory
|
||||
if not resolved.exists():
|
||||
raise ValidationError(f"Compose file does not exist: {resolved}")
|
||||
if not resolved.name.endswith((".yml", ".yaml")):
|
||||
raise ValidationError(f"Not a YAML file: {resolved}")
|
||||
return resolved
|
||||
except ValueError:
|
||||
# Not within this allowed path, try next
|
||||
continue
|
||||
|
||||
raise ValidationError(
|
||||
f"Compose path {resolved} is not in allowed directories: {allowed_paths}"
|
||||
)
|
||||
|
||||
|
||||
def validate_env_key(key: str) -> bool:
|
||||
"""Validate an environment variable key format.
|
||||
|
||||
Keys must:
|
||||
- Start with an uppercase letter (A-Z)
|
||||
- Contain only uppercase letters, numbers, and underscores
|
||||
|
||||
Args:
|
||||
key: The environment variable key to validate
|
||||
|
||||
Returns:
|
||||
True if valid
|
||||
|
||||
Raises:
|
||||
ValidationError: If the key format is invalid
|
||||
"""
|
||||
if not key:
|
||||
raise ValidationError("ENV key cannot be empty")
|
||||
|
||||
if not ENV_KEY_PATTERN.match(key):
|
||||
raise ValidationError(
|
||||
f"Invalid ENV key format '{key}': must match ^[A-Z][A-Z0-9_]*$"
|
||||
)
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def is_domain_allowed(url: str, allowed_domains: list[str]) -> bool:
|
||||
"""Check if a URL's domain is in the allowed list.
|
||||
|
||||
Supports:
|
||||
- Exact domain match: "cloud.example.com"
|
||||
- Wildcard subdomain: "*.example.com" (matches any subdomain)
|
||||
- Port specification: "cloud.example.com:8443"
|
||||
|
||||
Args:
|
||||
url: The URL to check
|
||||
allowed_domains: List of allowed domain patterns
|
||||
|
||||
Returns:
|
||||
True if the domain is allowed, False otherwise
|
||||
|
||||
Examples:
|
||||
>>> is_domain_allowed("https://cloud.example.com/path", ["cloud.example.com"])
|
||||
True
|
||||
>>> is_domain_allowed("https://sub.example.com", ["*.example.com"])
|
||||
True
|
||||
>>> is_domain_allowed("https://evil.com", ["example.com"])
|
||||
False
|
||||
"""
|
||||
from urllib.parse import urlparse
|
||||
|
||||
if not url or not allowed_domains:
|
||||
return False
|
||||
|
||||
try:
|
||||
parsed = urlparse(url)
|
||||
url_host = parsed.netloc.lower()
|
||||
|
||||
# Handle URLs without scheme (shouldn't happen, but be safe)
|
||||
if not url_host and parsed.path:
|
||||
# URL might be like "example.com/path" without scheme
|
||||
url_host = parsed.path.split("/")[0].lower()
|
||||
|
||||
if not url_host:
|
||||
return False
|
||||
|
||||
# Extract port if present in URL
|
||||
if ":" in url_host:
|
||||
url_domain, url_port = url_host.rsplit(":", 1)
|
||||
else:
|
||||
url_domain = url_host
|
||||
url_port = None
|
||||
|
||||
for pattern in allowed_domains:
|
||||
pattern = pattern.lower().strip()
|
||||
|
||||
# Extract port from pattern if present
|
||||
if ":" in pattern and not pattern.startswith("*."):
|
||||
pattern_domain, pattern_port = pattern.rsplit(":", 1)
|
||||
elif ":" in pattern:
|
||||
# Handle "*.example.com:8443"
|
||||
parts = pattern.split(":")
|
||||
pattern_domain = parts[0]
|
||||
pattern_port = parts[1] if len(parts) > 1 else None
|
||||
else:
|
||||
pattern_domain = pattern
|
||||
pattern_port = None
|
||||
|
||||
# If pattern specifies a port, URL must match that port
|
||||
if pattern_port and url_port != pattern_port:
|
||||
continue
|
||||
|
||||
# Wildcard subdomain match
|
||||
if pattern_domain.startswith("*."):
|
||||
suffix = pattern_domain[2:] # Remove "*."
|
||||
# Match the suffix or the exact domain without subdomain
|
||||
if url_domain == suffix or url_domain.endswith("." + suffix):
|
||||
return True
|
||||
else:
|
||||
# Exact match
|
||||
if url_domain == pattern_domain:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
def validate_allowed_domains(domains: list[str]) -> list[str]:
|
||||
"""Validate and normalize a list of allowed domains.
|
||||
|
||||
Args:
|
||||
domains: List of domain patterns to validate
|
||||
|
||||
Returns:
|
||||
List of normalized domain patterns
|
||||
|
||||
Raises:
|
||||
ValidationError: If any domain pattern is invalid
|
||||
"""
|
||||
if not domains:
|
||||
raise ValidationError("allowed_domains cannot be empty")
|
||||
|
||||
normalized = []
|
||||
for domain in domains:
|
||||
domain = domain.strip().lower()
|
||||
|
||||
if not domain:
|
||||
raise ValidationError("Empty domain in allowed_domains list")
|
||||
|
||||
# Basic format validation
|
||||
if domain.startswith("http://") or domain.startswith("https://"):
|
||||
raise ValidationError(
|
||||
f"Domain should not include protocol: {domain}. "
|
||||
"Use 'example.com' not 'https://example.com'"
|
||||
)
|
||||
|
||||
# Wildcard validation
|
||||
if "*" in domain:
|
||||
if not domain.startswith("*."):
|
||||
raise ValidationError(
|
||||
f"Invalid wildcard pattern: {domain}. "
|
||||
"Wildcards must be at the start: '*.example.com'"
|
||||
)
|
||||
# Ensure there's something after the wildcard
|
||||
suffix = domain[2:]
|
||||
if "." not in suffix or suffix.startswith("."):
|
||||
raise ValidationError(
|
||||
f"Invalid wildcard pattern: {domain}. "
|
||||
"Must have a valid domain after '*.' like '*.example.com'"
|
||||
)
|
||||
|
||||
normalized.append(domain)
|
||||
|
||||
return normalized
|
||||
76
letsbe-sysadmin-agent/chromium-seccomp.json
Normal file
76
letsbe-sysadmin-agent/chromium-seccomp.json
Normal file
@@ -0,0 +1,76 @@
|
||||
{
|
||||
"comment": "Chromium-compatible seccomp profile - allows syscalls needed by Chromium/Playwright",
|
||||
"defaultAction": "SCMP_ACT_ERRNO",
|
||||
"architectures": ["SCMP_ARCH_X86_64", "SCMP_ARCH_X86", "SCMP_ARCH_AARCH64"],
|
||||
"syscalls": [
|
||||
{
|
||||
"names": [
|
||||
"accept", "accept4", "access", "adjtimex", "alarm", "bind", "brk",
|
||||
"capget", "capset", "chdir", "chmod", "chown", "chown32", "clock_adjtime",
|
||||
"clock_getres", "clock_gettime", "clock_nanosleep", "clone", "clone3", "close",
|
||||
"connect", "copy_file_range", "creat", "dup", "dup2", "dup3",
|
||||
"epoll_create", "epoll_create1", "epoll_ctl", "epoll_ctl_old",
|
||||
"epoll_pwait", "epoll_pwait2", "epoll_wait", "epoll_wait_old",
|
||||
"eventfd", "eventfd2", "execve", "execveat", "exit", "exit_group",
|
||||
"faccessat", "faccessat2", "fadvise64", "fallocate", "fanotify_mark",
|
||||
"fchdir", "fchmod", "fchmodat", "fchown", "fchown32", "fchownat",
|
||||
"fcntl", "fcntl64", "fdatasync", "fgetxattr", "flistxattr",
|
||||
"flock", "fork", "fsetxattr", "fstat", "fstat64", "fstatat64",
|
||||
"fstatfs", "fstatfs64", "fsync", "ftruncate", "ftruncate64",
|
||||
"futex", "futex_waitv", "getcwd", "getdents", "getdents64",
|
||||
"getegid", "getegid32", "geteuid", "geteuid32", "getgid", "getgid32",
|
||||
"getgroups", "getgroups32", "getitimer", "getpeername", "getpgid",
|
||||
"getpgrp", "getpid", "getppid", "getpriority", "getrandom",
|
||||
"getresgid", "getresgid32", "getresuid", "getresuid32",
|
||||
"getrlimit", "get_robust_list", "getrusage", "getsid", "getsockname",
|
||||
"getsockopt", "get_thread_area", "gettid", "gettimeofday", "getuid",
|
||||
"getuid32", "getxattr", "inotify_add_watch", "inotify_init",
|
||||
"inotify_init1", "inotify_rm_watch", "io_cancel", "ioctl",
|
||||
"io_destroy", "io_getevents", "io_pgetevents", "ioprio_get",
|
||||
"ioprio_set", "io_setup", "io_submit", "io_uring_enter",
|
||||
"io_uring_register", "io_uring_setup", "ipc", "kill", "landlock_add_rule",
|
||||
"landlock_create_ruleset", "landlock_restrict_self", "lchown", "lchown32",
|
||||
"lgetxattr", "link", "linkat", "listen", "listxattr", "llistxattr",
|
||||
"lseek", "lstat", "lstat64", "madvise", "membarrier", "memfd_create",
|
||||
"memfd_secret", "mincore", "mkdir", "mkdirat", "mknod", "mknodat",
|
||||
"mlock", "mlock2", "mlockall", "mmap", "mmap2", "mprotect",
|
||||
"mq_getsetattr", "mq_notify", "mq_open", "mq_timedreceive",
|
||||
"mq_timedsend", "mq_unlink", "mremap", "msgctl", "msgget", "msgrcv",
|
||||
"msgsnd", "msync", "munlock", "munlockall", "munmap", "nanosleep",
|
||||
"newfstatat", "open", "openat", "openat2", "pause", "pidfd_open",
|
||||
"pidfd_send_signal", "pipe", "pipe2", "poll", "ppoll", "prctl",
|
||||
"pread64", "preadv", "preadv2", "prlimit64", "process_mrelease",
|
||||
"pselect6", "pwrite64", "pwritev", "pwritev2", "read", "readahead",
|
||||
"readlink", "readlinkat", "readv", "recv", "recvfrom", "recvmmsg",
|
||||
"recvmsg", "remap_file_pages", "removexattr", "rename", "renameat",
|
||||
"renameat2", "restart_syscall", "rmdir", "rseq", "rt_sigaction",
|
||||
"rt_sigpending", "rt_sigprocmask", "rt_sigqueueinfo", "rt_sigreturn",
|
||||
"rt_sigsuspend", "rt_sigtimedwait", "rt_tgsigqueueinfo",
|
||||
"sched_getaffinity", "sched_getattr", "sched_getparam",
|
||||
"sched_get_priority_max", "sched_get_priority_min",
|
||||
"sched_getscheduler", "sched_setaffinity", "sched_setattr",
|
||||
"sched_setparam", "sched_setscheduler", "sched_yield", "seccomp",
|
||||
"select", "semctl", "semget", "semop", "semtimedop", "send",
|
||||
"sendfile", "sendfile64", "sendmmsg", "sendmsg", "sendto",
|
||||
"setfsgid", "setfsgid32", "setfsuid", "setfsuid32", "setgid",
|
||||
"setgid32", "setgroups", "setgroups32", "setitimer", "setpgid",
|
||||
"setpriority", "setregid", "setregid32", "setresgid", "setresgid32",
|
||||
"setresuid", "setresuid32", "setreuid", "setreuid32", "setrlimit",
|
||||
"set_robust_list", "setsid", "setsockopt", "set_thread_area",
|
||||
"set_tid_address", "setuid", "setuid32", "setxattr", "shmat",
|
||||
"shmctl", "shmdt", "shmget", "shutdown", "sigaltstack",
|
||||
"signalfd", "signalfd4", "sigprocmask", "sigreturn", "socket",
|
||||
"socketcall", "socketpair", "splice", "stat", "stat64", "statfs",
|
||||
"statfs64", "statx", "symlink", "symlinkat", "sync",
|
||||
"sync_file_range", "syncfs", "sysinfo", "tee", "tgkill",
|
||||
"time", "timer_create", "timer_delete", "timer_getoverrun",
|
||||
"timer_gettime", "timer_settime", "timerfd_create",
|
||||
"timerfd_gettime", "timerfd_settime", "times", "tkill", "truncate",
|
||||
"truncate64", "ugetrlimit", "umask", "uname", "unlink", "unlinkat",
|
||||
"unshare", "utime", "utimensat", "utimes", "vfork", "vmsplice",
|
||||
"wait4", "waitid", "waitpid", "write", "writev"
|
||||
],
|
||||
"action": "SCMP_ACT_ALLOW"
|
||||
}
|
||||
]
|
||||
}
|
||||
34
letsbe-sysadmin-agent/docker-compose.local.yml
Normal file
34
letsbe-sysadmin-agent/docker-compose.local.yml
Normal file
@@ -0,0 +1,34 @@
|
||||
# docker-compose.local.yml
|
||||
#
|
||||
# Agent configuration for LOCAL_MODE (single-tenant deployment)
|
||||
#
|
||||
# Usage:
|
||||
# docker compose -f docker-compose.yml -f docker-compose.local.yml up
|
||||
#
|
||||
# Or set COMPOSE_FILE environment variable:
|
||||
# export COMPOSE_FILE=docker-compose.yml:docker-compose.local.yml
|
||||
# docker compose up
|
||||
#
|
||||
# Required environment variables:
|
||||
# LOCAL_AGENT_KEY - Key for local agent registration (matches orchestrator's LOCAL_AGENT_KEY)
|
||||
#
|
||||
# Optional environment variables:
|
||||
# ORCHESTRATOR_URL - URL for orchestrator (default: http://orchestrator:8000 for Docker network)
|
||||
|
||||
services:
|
||||
agent:
|
||||
environment:
|
||||
# Enable LOCAL_MODE for single-tenant registration
|
||||
LOCAL_MODE: "true"
|
||||
|
||||
# Key for local registration (NOT ADMIN_API_KEY - this has minimal scope)
|
||||
# Separate from ADMIN_API_KEY - can ONLY register the local agent
|
||||
LOCAL_AGENT_KEY: "${LOCAL_AGENT_KEY}"
|
||||
|
||||
# Use Docker service name when running in combined stack
|
||||
# Override to host.docker.internal:8000 if running orchestrator separately
|
||||
ORCHESTRATOR_URL: "${ORCHESTRATOR_URL:-http://orchestrator:8000}"
|
||||
|
||||
# Clear multi-tenant settings (not needed in LOCAL_MODE)
|
||||
REGISTRATION_TOKEN: ""
|
||||
TENANT_ID: ""
|
||||
117
letsbe-sysadmin-agent/docker-compose.prod.yml
Normal file
117
letsbe-sysadmin-agent/docker-compose.prod.yml
Normal file
@@ -0,0 +1,117 @@
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
agent:
|
||||
image: code.letsbe.solutions/letsbe/sysadmin-agent:latest
|
||||
container_name: letsbe-agent
|
||||
|
||||
environment:
|
||||
# Required: Orchestrator connection
|
||||
- ORCHESTRATOR_URL=${ORCHESTRATOR_URL}
|
||||
|
||||
# Registration token (new secure flow)
|
||||
# Only needed for first-time registration. After registration,
|
||||
# credentials are persisted to the path below.
|
||||
- REGISTRATION_TOKEN=${REGISTRATION_TOKEN:-}
|
||||
|
||||
# Credentials path - must match the volume mount for persistence
|
||||
# Agent runs as root, so ~ expands to /root, but volume is at /home/agent
|
||||
- CREDENTIALS_PATH=/home/agent/.letsbe-agent/credentials.json
|
||||
|
||||
# Timing (seconds)
|
||||
- HEARTBEAT_INTERVAL=${HEARTBEAT_INTERVAL:-30}
|
||||
- POLL_INTERVAL=${POLL_INTERVAL:-5}
|
||||
|
||||
# Logging
|
||||
- LOG_LEVEL=${LOG_LEVEL:-INFO}
|
||||
- LOG_JSON=${LOG_JSON:-true}
|
||||
|
||||
# Resilience
|
||||
- MAX_CONCURRENT_TASKS=${MAX_CONCURRENT_TASKS:-3}
|
||||
- BACKOFF_BASE=${BACKOFF_BASE:-1.0}
|
||||
- BACKOFF_MAX=${BACKOFF_MAX:-60.0}
|
||||
- CIRCUIT_BREAKER_THRESHOLD=${CIRCUIT_BREAKER_THRESHOLD:-5}
|
||||
- CIRCUIT_BREAKER_COOLDOWN=${CIRCUIT_BREAKER_COOLDOWN:-30}
|
||||
|
||||
# Security
|
||||
- ALLOWED_FILE_ROOT=${ALLOWED_FILE_ROOT:-/opt/letsbe}
|
||||
- MAX_FILE_SIZE=${MAX_FILE_SIZE:-10485760}
|
||||
- SHELL_TIMEOUT=${SHELL_TIMEOUT:-60}
|
||||
|
||||
# Playwright browser automation
|
||||
- PLAYWRIGHT_ARTIFACTS_DIR=/opt/letsbe/playwright-artifacts
|
||||
- PLAYWRIGHT_DEFAULT_TIMEOUT_MS=60000
|
||||
- PLAYWRIGHT_NAVIGATION_TIMEOUT_MS=120000
|
||||
|
||||
# MCP Browser Sidecar connection (for LLM-driven browser control)
|
||||
- MCP_BROWSER_URL=http://mcp-browser:8931
|
||||
- MCP_BROWSER_API_KEY=${MCP_BROWSER_API_KEY:-}
|
||||
|
||||
volumes:
|
||||
# Docker socket for container management
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
|
||||
# Host directory mounts for real infrastructure access
|
||||
- /opt/letsbe/env:/opt/letsbe/env
|
||||
- /opt/letsbe/stacks:/opt/letsbe/stacks
|
||||
- /opt/letsbe/nginx:/opt/letsbe/nginx
|
||||
|
||||
# Credential persistence (survives restarts without re-registration)
|
||||
- agent_home:/home/agent/.letsbe-agent
|
||||
|
||||
# Playwright artifacts storage
|
||||
- playwright_artifacts:/opt/letsbe/playwright-artifacts
|
||||
|
||||
security_opt:
|
||||
- seccomp=./chromium-seccomp.json
|
||||
|
||||
user: root
|
||||
restart: unless-stopped
|
||||
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
cpus: '1.5'
|
||||
memory: 1G
|
||||
reservations:
|
||||
cpus: '0.25'
|
||||
memory: 256M
|
||||
|
||||
mcp-browser:
|
||||
image: code.letsbe.solutions/letsbe/mcp-browser:latest
|
||||
container_name: letsbe-mcp-browser
|
||||
|
||||
environment:
|
||||
- MAX_SESSIONS=${MAX_SESSIONS:-3}
|
||||
- IDLE_TIMEOUT_SECONDS=${IDLE_TIMEOUT_SECONDS:-300}
|
||||
- MAX_SESSION_LIFETIME_SECONDS=${MAX_SESSION_LIFETIME_SECONDS:-1800}
|
||||
- MAX_ACTIONS_PER_SESSION=${MAX_ACTIONS_PER_SESSION:-50}
|
||||
- LOG_LEVEL=${LOG_LEVEL:-INFO}
|
||||
- LOG_JSON=${LOG_JSON:-true}
|
||||
- SCREENSHOTS_DIR=/screenshots
|
||||
- API_KEY=${MCP_BROWSER_API_KEY:-}
|
||||
|
||||
volumes:
|
||||
- mcp_screenshots:/screenshots
|
||||
|
||||
security_opt:
|
||||
- seccomp=./chromium-seccomp.json
|
||||
|
||||
restart: unless-stopped
|
||||
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
cpus: '1.5'
|
||||
memory: 1G
|
||||
reservations:
|
||||
cpus: '0.25'
|
||||
memory: 256M
|
||||
|
||||
volumes:
|
||||
agent_home:
|
||||
name: letsbe-agent-home
|
||||
playwright_artifacts:
|
||||
name: letsbe-playwright-artifacts
|
||||
mcp_screenshots:
|
||||
name: letsbe-mcp-screenshots
|
||||
97
letsbe-sysadmin-agent/docker-compose.yml
Normal file
97
letsbe-sysadmin-agent/docker-compose.yml
Normal file
@@ -0,0 +1,97 @@
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
agent:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
container_name: letsbe-agent
|
||||
|
||||
environment:
|
||||
# Required: Orchestrator connection
|
||||
- ORCHESTRATOR_URL=${ORCHESTRATOR_URL:-http://host.docker.internal:8000}
|
||||
|
||||
# Registration token for first-time registration (multi-use tokens recommended)
|
||||
# After registration, credentials are persisted and token is no longer needed
|
||||
- REGISTRATION_TOKEN=${REGISTRATION_TOKEN:-}
|
||||
|
||||
# Credentials path - must match the volume mount for persistence across restarts
|
||||
# Agent runs as root, so ~ expands to /root, but volume is at /home/agent/.letsbe-agent
|
||||
- CREDENTIALS_PATH=/home/agent/.letsbe-agent/credentials.json
|
||||
|
||||
# Legacy auth (deprecated - use REGISTRATION_TOKEN instead)
|
||||
- AGENT_TOKEN=${AGENT_TOKEN:-}
|
||||
|
||||
# Tenant assignment (set automatically after registration via token)
|
||||
- TENANT_ID=${TENANT_ID:-}
|
||||
|
||||
# Timing (seconds)
|
||||
- HEARTBEAT_INTERVAL=${HEARTBEAT_INTERVAL:-30}
|
||||
- POLL_INTERVAL=${POLL_INTERVAL:-5}
|
||||
|
||||
# Logging
|
||||
- LOG_LEVEL=${LOG_LEVEL:-DEBUG}
|
||||
- LOG_JSON=${LOG_JSON:-false}
|
||||
|
||||
# Resilience
|
||||
- MAX_CONCURRENT_TASKS=${MAX_CONCURRENT_TASKS:-3}
|
||||
- BACKOFF_BASE=${BACKOFF_BASE:-1.0}
|
||||
- BACKOFF_MAX=${BACKOFF_MAX:-60.0}
|
||||
- CIRCUIT_BREAKER_THRESHOLD=${CIRCUIT_BREAKER_THRESHOLD:-5}
|
||||
- CIRCUIT_BREAKER_COOLDOWN=${CIRCUIT_BREAKER_COOLDOWN:-30}
|
||||
|
||||
# Security
|
||||
- ALLOWED_FILE_ROOT=${ALLOWED_FILE_ROOT:-/opt/letsbe}
|
||||
- MAX_FILE_SIZE=${MAX_FILE_SIZE:-10485760}
|
||||
- SHELL_TIMEOUT=${SHELL_TIMEOUT:-60}
|
||||
|
||||
# Playwright browser automation
|
||||
- PLAYWRIGHT_ARTIFACTS_DIR=${PLAYWRIGHT_ARTIFACTS_DIR:-/opt/letsbe/playwright-artifacts}
|
||||
- PLAYWRIGHT_DEFAULT_TIMEOUT_MS=${PLAYWRIGHT_DEFAULT_TIMEOUT_MS:-60000}
|
||||
- PLAYWRIGHT_NAVIGATION_TIMEOUT_MS=${PLAYWRIGHT_NAVIGATION_TIMEOUT_MS:-120000}
|
||||
|
||||
volumes:
|
||||
# Docker socket for docker executor
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
|
||||
# Hot reload in development
|
||||
- ./app:/app/app:ro
|
||||
- ./tests:/app/tests:ro
|
||||
- ./pytest.ini:/app/pytest.ini:ro
|
||||
|
||||
# Host directory mounts for real infrastructure access
|
||||
- /opt/letsbe/env:/opt/letsbe/env
|
||||
- /opt/letsbe/stacks:/opt/letsbe/stacks
|
||||
- /opt/letsbe/nginx:/opt/letsbe/nginx
|
||||
|
||||
# Credentials and pending results persistence
|
||||
- agent_home:/home/agent/.letsbe-agent
|
||||
|
||||
# Playwright artifacts storage
|
||||
- playwright_artifacts:/opt/letsbe/playwright-artifacts
|
||||
|
||||
# Security options for Chromium sandboxing
|
||||
security_opt:
|
||||
- seccomp=./chromium-seccomp.json
|
||||
|
||||
# Run as root for Docker socket access in dev
|
||||
# In production, use Docker group membership instead
|
||||
user: root
|
||||
|
||||
restart: unless-stopped
|
||||
|
||||
# Resource limits (increased for Playwright browser automation)
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
cpus: '1.5'
|
||||
memory: 1G
|
||||
reservations:
|
||||
cpus: '0.25'
|
||||
memory: 256M
|
||||
|
||||
volumes:
|
||||
agent_home:
|
||||
name: letsbe-agent-home
|
||||
playwright_artifacts:
|
||||
name: letsbe-playwright-artifacts
|
||||
8
letsbe-sysadmin-agent/pytest.ini
Normal file
8
letsbe-sysadmin-agent/pytest.ini
Normal file
@@ -0,0 +1,8 @@
|
||||
[pytest]
|
||||
testpaths = tests
|
||||
asyncio_mode = auto
|
||||
asyncio_default_fixture_loop_scope = function
|
||||
python_files = test_*.py
|
||||
python_classes = Test*
|
||||
python_functions = test_*
|
||||
addopts = -v --tb=short
|
||||
12
letsbe-sysadmin-agent/requirements.txt
Normal file
12
letsbe-sysadmin-agent/requirements.txt
Normal file
@@ -0,0 +1,12 @@
|
||||
httpx>=0.27.0
|
||||
structlog>=24.0.0
|
||||
python-dotenv>=1.0.0
|
||||
pydantic>=2.0.0
|
||||
pydantic-settings>=2.0.0
|
||||
|
||||
# Browser automation
|
||||
playwright==1.49.1
|
||||
|
||||
# Testing
|
||||
pytest>=8.0.0
|
||||
pytest-asyncio>=0.23.0
|
||||
1
letsbe-sysadmin-agent/tests/__init__.py
Normal file
1
letsbe-sysadmin-agent/tests/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Test suite for LetsBe SysAdmin Agent."""
|
||||
55
letsbe-sysadmin-agent/tests/conftest.py
Normal file
55
letsbe-sysadmin-agent/tests/conftest.py
Normal file
@@ -0,0 +1,55 @@
|
||||
"""Pytest configuration and shared fixtures."""
|
||||
|
||||
import os
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def temp_env_root(tmp_path):
|
||||
"""Create a temporary directory to act as /opt/letsbe/env."""
|
||||
env_dir = tmp_path / "opt" / "letsbe" / "env"
|
||||
env_dir.mkdir(parents=True)
|
||||
return env_dir
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_settings(temp_env_root):
|
||||
"""Mock settings with temporary paths."""
|
||||
settings = MagicMock()
|
||||
settings.allowed_env_root = str(temp_env_root)
|
||||
settings.allowed_file_root = str(temp_env_root.parent / "data")
|
||||
settings.allowed_stacks_root = str(temp_env_root.parent / "stacks")
|
||||
settings.max_file_size = 10 * 1024 * 1024
|
||||
return settings
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_get_settings(mock_settings):
|
||||
"""Patch get_settings to return mock settings."""
|
||||
with patch("app.executors.env_update_executor.get_settings", return_value=mock_settings):
|
||||
yield mock_settings
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_env_content():
|
||||
"""Sample ENV file content for testing."""
|
||||
return """# Database configuration
|
||||
DATABASE_URL=postgres://localhost/mydb
|
||||
API_KEY=secret123
|
||||
|
||||
# Feature flags
|
||||
DEBUG=true
|
||||
LOG_LEVEL=info
|
||||
"""
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def existing_env_file(temp_env_root, sample_env_content):
|
||||
"""Create an existing ENV file for testing updates."""
|
||||
env_file = temp_env_root / "app.env"
|
||||
env_file.write_text(sample_env_content)
|
||||
return env_file
|
||||
1
letsbe-sysadmin-agent/tests/executors/__init__.py
Normal file
1
letsbe-sysadmin-agent/tests/executors/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Tests for executor modules."""
|
||||
495
letsbe-sysadmin-agent/tests/executors/test_composite_executor.py
Normal file
495
letsbe-sysadmin-agent/tests/executors/test_composite_executor.py
Normal file
@@ -0,0 +1,495 @@
|
||||
"""Unit tests for CompositeExecutor."""
|
||||
|
||||
import pytest
|
||||
from unittest.mock import MagicMock, patch, AsyncMock
|
||||
|
||||
from app.executors.base import ExecutionResult
|
||||
|
||||
|
||||
# Patch the logger before importing the executor
|
||||
with patch("app.utils.logger.get_logger", return_value=MagicMock()):
|
||||
from app.executors.composite_executor import CompositeExecutor
|
||||
|
||||
|
||||
class TestCompositeExecutor:
|
||||
"""Tests for CompositeExecutor."""
|
||||
|
||||
@pytest.fixture
|
||||
def executor(self):
|
||||
"""Create executor with mocked logger."""
|
||||
with patch("app.executors.base.get_logger", return_value=MagicMock()):
|
||||
return CompositeExecutor()
|
||||
|
||||
def _create_mock_executor(self, success: bool, data: dict, error: str | None = None):
|
||||
"""Create a mock executor that returns specified result."""
|
||||
mock_executor = MagicMock()
|
||||
mock_executor.execute = AsyncMock(return_value=ExecutionResult(
|
||||
success=success,
|
||||
data=data,
|
||||
error=error,
|
||||
))
|
||||
return mock_executor
|
||||
|
||||
# =========================================================================
|
||||
# HAPPY PATH TESTS
|
||||
# =========================================================================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_two_steps_both_succeed(self, executor):
|
||||
"""Test successful execution of two steps."""
|
||||
mock_env_executor = self._create_mock_executor(
|
||||
success=True,
|
||||
data={"updated_keys": ["API_KEY"], "removed_keys": [], "path": "/opt/letsbe/env/app.env"},
|
||||
)
|
||||
mock_docker_executor = self._create_mock_executor(
|
||||
success=True,
|
||||
data={"compose_dir": "/opt/letsbe/stacks/myapp", "pull_ran": True, "logs": {}},
|
||||
)
|
||||
|
||||
def mock_get_executor(task_type: str):
|
||||
if task_type == "ENV_UPDATE":
|
||||
return mock_env_executor
|
||||
elif task_type == "DOCKER_RELOAD":
|
||||
return mock_docker_executor
|
||||
raise ValueError(f"Unknown task type: {task_type}")
|
||||
|
||||
with patch("app.executors.get_executor", side_effect=mock_get_executor):
|
||||
result = await executor.execute({
|
||||
"steps": [
|
||||
{"type": "ENV_UPDATE", "payload": {"path": "/opt/letsbe/env/app.env", "updates": {"API_KEY": "secret"}}},
|
||||
{"type": "DOCKER_RELOAD", "payload": {"compose_dir": "/opt/letsbe/stacks/myapp", "pull": True}},
|
||||
]
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert result.error is None
|
||||
assert len(result.data["steps"]) == 2
|
||||
|
||||
# Verify first step
|
||||
assert result.data["steps"][0]["index"] == 0
|
||||
assert result.data["steps"][0]["type"] == "ENV_UPDATE"
|
||||
assert result.data["steps"][0]["status"] == "completed"
|
||||
assert result.data["steps"][0]["result"]["updated_keys"] == ["API_KEY"]
|
||||
|
||||
# Verify second step
|
||||
assert result.data["steps"][1]["index"] == 1
|
||||
assert result.data["steps"][1]["type"] == "DOCKER_RELOAD"
|
||||
assert result.data["steps"][1]["status"] == "completed"
|
||||
assert result.data["steps"][1]["result"]["compose_dir"] == "/opt/letsbe/stacks/myapp"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_single_step_succeeds(self, executor):
|
||||
"""Test successful execution of single step."""
|
||||
mock_executor = self._create_mock_executor(
|
||||
success=True,
|
||||
data={"written": True, "path": "/opt/letsbe/env/test.env", "size": 100},
|
||||
)
|
||||
|
||||
with patch("app.executors.get_executor", return_value=mock_executor):
|
||||
result = await executor.execute({
|
||||
"steps": [
|
||||
{"type": "FILE_WRITE", "payload": {"path": "/opt/letsbe/env/test.env", "content": "KEY=value"}},
|
||||
]
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert len(result.data["steps"]) == 1
|
||||
assert result.data["steps"][0]["status"] == "completed"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_three_steps_all_succeed(self, executor):
|
||||
"""Test successful execution of three steps."""
|
||||
mock_executor = self._create_mock_executor(success=True, data={"success": True})
|
||||
|
||||
with patch("app.executors.get_executor", return_value=mock_executor):
|
||||
result = await executor.execute({
|
||||
"steps": [
|
||||
{"type": "FILE_WRITE", "payload": {}},
|
||||
{"type": "ENV_UPDATE", "payload": {}},
|
||||
{"type": "DOCKER_RELOAD", "payload": {}},
|
||||
]
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert len(result.data["steps"]) == 3
|
||||
assert all(s["status"] == "completed" for s in result.data["steps"])
|
||||
|
||||
# =========================================================================
|
||||
# FAILURE HANDLING TESTS
|
||||
# =========================================================================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_first_step_fails_stops_execution(self, executor):
|
||||
"""Test that first step failure stops execution."""
|
||||
mock_executor = self._create_mock_executor(
|
||||
success=False,
|
||||
data={"partial": "data"},
|
||||
error="Validation failed: invalid key format",
|
||||
)
|
||||
|
||||
with patch("app.executors.get_executor", return_value=mock_executor):
|
||||
result = await executor.execute({
|
||||
"steps": [
|
||||
{"type": "ENV_UPDATE", "payload": {}},
|
||||
{"type": "DOCKER_RELOAD", "payload": {}}, # Should NOT be called
|
||||
]
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "Step 0 (ENV_UPDATE) failed" in result.error
|
||||
assert "invalid key format" in result.error
|
||||
assert len(result.data["steps"]) == 1 # Only first step
|
||||
assert result.data["steps"][0]["status"] == "failed"
|
||||
assert result.data["steps"][0]["error"] == "Validation failed: invalid key format"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_second_step_fails_preserves_first_result(self, executor):
|
||||
"""Test that second step failure preserves first step result."""
|
||||
mock_env_executor = self._create_mock_executor(
|
||||
success=True,
|
||||
data={"updated_keys": ["KEY1"]},
|
||||
)
|
||||
mock_docker_executor = self._create_mock_executor(
|
||||
success=False,
|
||||
data={},
|
||||
error="No compose file found",
|
||||
)
|
||||
|
||||
call_count = [0]
|
||||
|
||||
def mock_get_executor(task_type: str):
|
||||
call_count[0] += 1
|
||||
if task_type == "ENV_UPDATE":
|
||||
return mock_env_executor
|
||||
elif task_type == "DOCKER_RELOAD":
|
||||
return mock_docker_executor
|
||||
raise ValueError(f"Unknown task type: {task_type}")
|
||||
|
||||
with patch("app.executors.get_executor", side_effect=mock_get_executor):
|
||||
result = await executor.execute({
|
||||
"steps": [
|
||||
{"type": "ENV_UPDATE", "payload": {}},
|
||||
{"type": "DOCKER_RELOAD", "payload": {}},
|
||||
]
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "Step 1 (DOCKER_RELOAD) failed" in result.error
|
||||
assert len(result.data["steps"]) == 2
|
||||
|
||||
# First step completed
|
||||
assert result.data["steps"][0]["index"] == 0
|
||||
assert result.data["steps"][0]["status"] == "completed"
|
||||
assert result.data["steps"][0]["result"]["updated_keys"] == ["KEY1"]
|
||||
|
||||
# Second step failed
|
||||
assert result.data["steps"][1]["index"] == 1
|
||||
assert result.data["steps"][1]["status"] == "failed"
|
||||
assert result.data["steps"][1]["error"] == "No compose file found"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_executor_raises_exception(self, executor):
|
||||
"""Test handling of executor that raises exception."""
|
||||
mock_executor = MagicMock()
|
||||
mock_executor.execute = AsyncMock(side_effect=RuntimeError("Unexpected database error"))
|
||||
|
||||
with patch("app.executors.get_executor", return_value=mock_executor):
|
||||
result = await executor.execute({
|
||||
"steps": [
|
||||
{"type": "ENV_UPDATE", "payload": {}},
|
||||
]
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "Step 0 (ENV_UPDATE) failed" in result.error
|
||||
assert "Unexpected database error" in result.error
|
||||
assert len(result.data["steps"]) == 1
|
||||
assert result.data["steps"][0]["status"] == "failed"
|
||||
assert "Unexpected database error" in result.data["steps"][0]["error"]
|
||||
|
||||
# =========================================================================
|
||||
# VALIDATION TESTS
|
||||
# =========================================================================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_empty_steps_validation_error(self, executor):
|
||||
"""Test that empty steps list fails validation."""
|
||||
result = await executor.execute({"steps": []})
|
||||
|
||||
assert result.success is False
|
||||
assert "cannot be empty" in result.error
|
||||
assert result.data["steps"] == []
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_missing_steps_field(self, executor):
|
||||
"""Test that missing steps field raises ValueError."""
|
||||
with pytest.raises(ValueError, match="Missing required fields: steps"):
|
||||
await executor.execute({})
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_steps_not_a_list(self, executor):
|
||||
"""Test that non-list steps fails validation."""
|
||||
result = await executor.execute({"steps": "not a list"})
|
||||
|
||||
assert result.success is False
|
||||
assert "must be a list" in result.error
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_step_missing_type_field(self, executor):
|
||||
"""Test that step without type field fails."""
|
||||
result = await executor.execute({
|
||||
"steps": [
|
||||
{"payload": {"key": "value"}}
|
||||
]
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "Step 0 missing 'type' field" in result.error
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_step_not_a_dict(self, executor):
|
||||
"""Test that non-dict step fails validation."""
|
||||
result = await executor.execute({
|
||||
"steps": ["not a dict"]
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "Step 0 is not a valid step definition" in result.error
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_unknown_step_type_fails(self, executor):
|
||||
"""Test that unknown step type fails with clear error."""
|
||||
with patch("app.executors.get_executor") as mock_get:
|
||||
mock_get.side_effect = ValueError("Unknown task type: INVALID_TYPE. Available: ['ECHO', 'SHELL']")
|
||||
|
||||
result = await executor.execute({
|
||||
"steps": [
|
||||
{"type": "INVALID_TYPE", "payload": {}}
|
||||
]
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "Unknown task type" in result.error
|
||||
assert "INVALID_TYPE" in result.error
|
||||
|
||||
# =========================================================================
|
||||
# RESULT STRUCTURE TESTS
|
||||
# =========================================================================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_result_has_correct_structure(self, executor):
|
||||
"""Test that result has all required fields."""
|
||||
mock_executor = self._create_mock_executor(
|
||||
success=True,
|
||||
data={"key": "value"},
|
||||
)
|
||||
|
||||
with patch("app.executors.get_executor", return_value=mock_executor):
|
||||
result = await executor.execute({
|
||||
"steps": [
|
||||
{"type": "ECHO", "payload": {"message": "test"}},
|
||||
]
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert "steps" in result.data
|
||||
assert isinstance(result.data["steps"], list)
|
||||
|
||||
step = result.data["steps"][0]
|
||||
assert "index" in step
|
||||
assert "type" in step
|
||||
assert "status" in step
|
||||
assert "result" in step
|
||||
assert step["index"] == 0
|
||||
assert step["type"] == "ECHO"
|
||||
assert step["status"] == "completed"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_error_field_present_on_failure(self, executor):
|
||||
"""Test that error field is present in step result on failure."""
|
||||
mock_executor = self._create_mock_executor(
|
||||
success=False,
|
||||
data={},
|
||||
error="Something went wrong",
|
||||
)
|
||||
|
||||
with patch("app.executors.get_executor", return_value=mock_executor):
|
||||
result = await executor.execute({
|
||||
"steps": [
|
||||
{"type": "SHELL", "payload": {}},
|
||||
]
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "error" in result.data["steps"][0]
|
||||
assert result.data["steps"][0]["error"] == "Something went wrong"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_error_field_absent_on_success(self, executor):
|
||||
"""Test that error field is not present in step result on success."""
|
||||
mock_executor = self._create_mock_executor(
|
||||
success=True,
|
||||
data={"result": "ok"},
|
||||
error=None,
|
||||
)
|
||||
|
||||
with patch("app.executors.get_executor", return_value=mock_executor):
|
||||
result = await executor.execute({
|
||||
"steps": [
|
||||
{"type": "ECHO", "payload": {}},
|
||||
]
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert "error" not in result.data["steps"][0]
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_propagates_underlying_executor_results(self, executor):
|
||||
"""Test that underlying executor data is propagated correctly."""
|
||||
specific_data = {
|
||||
"updated_keys": ["DB_HOST", "DB_PORT"],
|
||||
"removed_keys": ["OLD_KEY"],
|
||||
"path": "/opt/letsbe/env/database.env",
|
||||
"custom_field": "custom_value",
|
||||
}
|
||||
mock_executor = self._create_mock_executor(
|
||||
success=True,
|
||||
data=specific_data,
|
||||
)
|
||||
|
||||
with patch("app.executors.get_executor", return_value=mock_executor):
|
||||
result = await executor.execute({
|
||||
"steps": [
|
||||
{"type": "ENV_UPDATE", "payload": {}},
|
||||
]
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert result.data["steps"][0]["result"] == specific_data
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_duration_ms_populated(self, executor):
|
||||
"""Test that duration_ms is populated."""
|
||||
mock_executor = self._create_mock_executor(success=True, data={})
|
||||
|
||||
with patch("app.executors.get_executor", return_value=mock_executor):
|
||||
result = await executor.execute({
|
||||
"steps": [
|
||||
{"type": "ECHO", "payload": {}},
|
||||
]
|
||||
})
|
||||
|
||||
assert result.duration_ms is not None
|
||||
assert result.duration_ms >= 0
|
||||
|
||||
# =========================================================================
|
||||
# PAYLOAD HANDLING TESTS
|
||||
# =========================================================================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_step_payload_defaults_to_empty_dict(self, executor):
|
||||
"""Test that missing payload in step defaults to empty dict."""
|
||||
mock_executor = MagicMock()
|
||||
mock_executor.execute = AsyncMock(return_value=ExecutionResult(success=True, data={}))
|
||||
|
||||
with patch("app.executors.get_executor", return_value=mock_executor):
|
||||
result = await executor.execute({
|
||||
"steps": [
|
||||
{"type": "ECHO"} # No payload field
|
||||
]
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
# Verify execute was called with empty dict
|
||||
mock_executor.execute.assert_called_once_with({})
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_step_payload_passed_correctly(self, executor):
|
||||
"""Test that step payload is passed to executor correctly."""
|
||||
mock_executor = MagicMock()
|
||||
mock_executor.execute = AsyncMock(return_value=ExecutionResult(success=True, data={}))
|
||||
|
||||
expected_payload = {"path": "/opt/letsbe/env/app.env", "updates": {"KEY": "value"}}
|
||||
|
||||
with patch("app.executors.get_executor", return_value=mock_executor):
|
||||
await executor.execute({
|
||||
"steps": [
|
||||
{"type": "ENV_UPDATE", "payload": expected_payload}
|
||||
]
|
||||
})
|
||||
|
||||
mock_executor.execute.assert_called_once_with(expected_payload)
|
||||
|
||||
# =========================================================================
|
||||
# TASK TYPE TEST
|
||||
# =========================================================================
|
||||
|
||||
def test_task_type(self, executor):
|
||||
"""Test task_type property."""
|
||||
assert executor.task_type == "COMPOSITE"
|
||||
|
||||
# =========================================================================
|
||||
# EXECUTION ORDER TESTS
|
||||
# =========================================================================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_steps_executed_in_order(self, executor):
|
||||
"""Test that steps are executed in sequential order."""
|
||||
execution_order = []
|
||||
|
||||
def create_tracking_executor(name: str):
|
||||
mock = MagicMock()
|
||||
async def track_execute(payload):
|
||||
execution_order.append(name)
|
||||
return ExecutionResult(success=True, data={"name": name})
|
||||
mock.execute = track_execute
|
||||
return mock
|
||||
|
||||
def mock_get_executor(task_type: str):
|
||||
return create_tracking_executor(task_type)
|
||||
|
||||
with patch("app.executors.get_executor", side_effect=mock_get_executor):
|
||||
result = await executor.execute({
|
||||
"steps": [
|
||||
{"type": "STEP_A", "payload": {}},
|
||||
{"type": "STEP_B", "payload": {}},
|
||||
{"type": "STEP_C", "payload": {}},
|
||||
]
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert execution_order == ["STEP_A", "STEP_B", "STEP_C"]
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_failure_stops_subsequent_steps(self, executor):
|
||||
"""Test that failure at step N prevents steps N+1 and beyond from running."""
|
||||
execution_order = []
|
||||
|
||||
def create_tracking_executor(name: str, should_fail: bool = False):
|
||||
mock = MagicMock()
|
||||
async def track_execute(payload):
|
||||
execution_order.append(name)
|
||||
return ExecutionResult(
|
||||
success=not should_fail,
|
||||
data={},
|
||||
error="Failed" if should_fail else None,
|
||||
)
|
||||
mock.execute = track_execute
|
||||
return mock
|
||||
|
||||
def mock_get_executor(task_type: str):
|
||||
if task_type == "STEP_B":
|
||||
return create_tracking_executor(task_type, should_fail=True)
|
||||
return create_tracking_executor(task_type)
|
||||
|
||||
with patch("app.executors.get_executor", side_effect=mock_get_executor):
|
||||
result = await executor.execute({
|
||||
"steps": [
|
||||
{"type": "STEP_A", "payload": {}},
|
||||
{"type": "STEP_B", "payload": {}}, # This fails
|
||||
{"type": "STEP_C", "payload": {}}, # Should NOT run
|
||||
]
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert execution_order == ["STEP_A", "STEP_B"] # STEP_C not executed
|
||||
467
letsbe-sysadmin-agent/tests/executors/test_docker_executor.py
Normal file
467
letsbe-sysadmin-agent/tests/executors/test_docker_executor.py
Normal file
@@ -0,0 +1,467 @@
|
||||
"""Unit tests for DockerExecutor."""
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock, patch, AsyncMock
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
# Patch the logger before importing the executor
|
||||
with patch("app.utils.logger.get_logger", return_value=MagicMock()):
|
||||
from app.executors.docker_executor import DockerExecutor
|
||||
|
||||
|
||||
class TestDockerExecutor:
|
||||
"""Tests for DockerExecutor."""
|
||||
|
||||
@pytest.fixture
|
||||
def executor(self):
|
||||
"""Create executor with mocked logger."""
|
||||
with patch("app.executors.base.get_logger", return_value=MagicMock()):
|
||||
return DockerExecutor()
|
||||
|
||||
@pytest.fixture
|
||||
def temp_stacks_root(self, tmp_path):
|
||||
"""Create a temporary stacks root directory."""
|
||||
stacks_dir = tmp_path / "opt" / "letsbe" / "stacks"
|
||||
stacks_dir.mkdir(parents=True)
|
||||
return stacks_dir
|
||||
|
||||
@pytest.fixture
|
||||
def mock_settings(self, temp_stacks_root):
|
||||
"""Mock settings with temporary paths."""
|
||||
settings = MagicMock()
|
||||
settings.allowed_stacks_root = str(temp_stacks_root)
|
||||
return settings
|
||||
|
||||
@pytest.fixture
|
||||
def mock_get_settings(self, mock_settings):
|
||||
"""Patch get_settings to return mock settings."""
|
||||
with patch("app.executors.docker_executor.get_settings", return_value=mock_settings):
|
||||
yield mock_settings
|
||||
|
||||
@pytest.fixture
|
||||
def sample_compose_content(self):
|
||||
"""Sample docker-compose.yml content."""
|
||||
return """version: '3.8'
|
||||
services:
|
||||
app:
|
||||
image: nginx:latest
|
||||
ports:
|
||||
- "80:80"
|
||||
"""
|
||||
|
||||
@pytest.fixture
|
||||
def stack_with_docker_compose_yml(self, temp_stacks_root, sample_compose_content):
|
||||
"""Create a stack with docker-compose.yml."""
|
||||
stack_dir = temp_stacks_root / "myapp"
|
||||
stack_dir.mkdir()
|
||||
compose_file = stack_dir / "docker-compose.yml"
|
||||
compose_file.write_text(sample_compose_content)
|
||||
return stack_dir
|
||||
|
||||
@pytest.fixture
|
||||
def stack_with_compose_yml(self, temp_stacks_root, sample_compose_content):
|
||||
"""Create a stack with compose.yml (no docker-compose.yml)."""
|
||||
stack_dir = temp_stacks_root / "otherapp"
|
||||
stack_dir.mkdir()
|
||||
compose_file = stack_dir / "compose.yml"
|
||||
compose_file.write_text(sample_compose_content)
|
||||
return stack_dir
|
||||
|
||||
@pytest.fixture
|
||||
def stack_without_compose(self, temp_stacks_root):
|
||||
"""Create a stack without any compose file."""
|
||||
stack_dir = temp_stacks_root / "emptyapp"
|
||||
stack_dir.mkdir()
|
||||
return stack_dir
|
||||
|
||||
# =========================================================================
|
||||
# SUCCESS CASES
|
||||
# =========================================================================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_success_with_docker_compose_yml(
|
||||
self, executor, mock_get_settings, stack_with_docker_compose_yml
|
||||
):
|
||||
"""Test successful reload with docker-compose.yml."""
|
||||
with patch.object(executor, "_run_compose_command") as mock_run:
|
||||
mock_run.return_value = (0, "Container started", "")
|
||||
|
||||
result = await executor.execute({
|
||||
"compose_dir": str(stack_with_docker_compose_yml),
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert result.data["compose_dir"] == str(stack_with_docker_compose_yml)
|
||||
assert result.data["compose_file"] == str(stack_with_docker_compose_yml / "docker-compose.yml")
|
||||
assert result.data["pull_ran"] is False
|
||||
assert "up" in result.data["logs"]
|
||||
|
||||
# Verify only 'up' command was called
|
||||
mock_run.assert_called_once()
|
||||
call_args = mock_run.call_args
|
||||
assert call_args[0][2] == ["up", "-d", "--remove-orphans"]
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_success_with_compose_yml_fallback(
|
||||
self, executor, mock_get_settings, stack_with_compose_yml
|
||||
):
|
||||
"""Test successful reload with compose.yml fallback."""
|
||||
with patch.object(executor, "_run_compose_command") as mock_run:
|
||||
mock_run.return_value = (0, "Container started", "")
|
||||
|
||||
result = await executor.execute({
|
||||
"compose_dir": str(stack_with_compose_yml),
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert result.data["compose_file"] == str(stack_with_compose_yml / "compose.yml")
|
||||
assert result.data["pull_ran"] is False
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_docker_compose_yml_preferred_over_compose_yml(
|
||||
self, executor, mock_get_settings, temp_stacks_root, sample_compose_content
|
||||
):
|
||||
"""Test that docker-compose.yml is preferred over compose.yml."""
|
||||
stack_dir = temp_stacks_root / "bothfiles"
|
||||
stack_dir.mkdir()
|
||||
(stack_dir / "docker-compose.yml").write_text(sample_compose_content)
|
||||
(stack_dir / "compose.yml").write_text(sample_compose_content)
|
||||
|
||||
with patch.object(executor, "_run_compose_command") as mock_run:
|
||||
mock_run.return_value = (0, "", "")
|
||||
|
||||
result = await executor.execute({
|
||||
"compose_dir": str(stack_dir),
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert result.data["compose_file"] == str(stack_dir / "docker-compose.yml")
|
||||
|
||||
# =========================================================================
|
||||
# PULL PARAMETER TESTS
|
||||
# =========================================================================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_pull_false_only_up_called(
|
||||
self, executor, mock_get_settings, stack_with_docker_compose_yml
|
||||
):
|
||||
"""Test that pull=false only runs 'up' command."""
|
||||
with patch.object(executor, "_run_compose_command") as mock_run:
|
||||
mock_run.return_value = (0, "", "")
|
||||
|
||||
result = await executor.execute({
|
||||
"compose_dir": str(stack_with_docker_compose_yml),
|
||||
"pull": False,
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert result.data["pull_ran"] is False
|
||||
assert "pull" not in result.data["logs"]
|
||||
assert "up" in result.data["logs"]
|
||||
|
||||
# Only one call (up)
|
||||
assert mock_run.call_count == 1
|
||||
call_args = mock_run.call_args
|
||||
assert call_args[0][2] == ["up", "-d", "--remove-orphans"]
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_pull_true_both_commands_called(
|
||||
self, executor, mock_get_settings, stack_with_docker_compose_yml
|
||||
):
|
||||
"""Test that pull=true runs both 'pull' and 'up' commands."""
|
||||
with patch.object(executor, "_run_compose_command") as mock_run:
|
||||
mock_run.return_value = (0, "output", "")
|
||||
|
||||
result = await executor.execute({
|
||||
"compose_dir": str(stack_with_docker_compose_yml),
|
||||
"pull": True,
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert result.data["pull_ran"] is True
|
||||
assert "pull" in result.data["logs"]
|
||||
assert "up" in result.data["logs"]
|
||||
|
||||
# Two calls: pull then up
|
||||
assert mock_run.call_count == 2
|
||||
calls = mock_run.call_args_list
|
||||
assert calls[0][0][2] == ["pull"]
|
||||
assert calls[1][0][2] == ["up", "-d", "--remove-orphans"]
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_pull_fails_stops_execution(
|
||||
self, executor, mock_get_settings, stack_with_docker_compose_yml
|
||||
):
|
||||
"""Test that pull failure stops execution before 'up'."""
|
||||
with patch.object(executor, "_run_compose_command") as mock_run:
|
||||
mock_run.return_value = (1, "", "Error pulling images")
|
||||
|
||||
result = await executor.execute({
|
||||
"compose_dir": str(stack_with_docker_compose_yml),
|
||||
"pull": True,
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert result.data["pull_ran"] is True
|
||||
assert "pull" in result.data["logs"]
|
||||
assert "up" not in result.data["logs"]
|
||||
assert "pull failed" in result.error.lower()
|
||||
|
||||
# Only one call (pull)
|
||||
assert mock_run.call_count == 1
|
||||
|
||||
# =========================================================================
|
||||
# FAILURE CASES
|
||||
# =========================================================================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_missing_compose_file(
|
||||
self, executor, mock_get_settings, stack_without_compose
|
||||
):
|
||||
"""Test failure when no compose file is found."""
|
||||
result = await executor.execute({
|
||||
"compose_dir": str(stack_without_compose),
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "No compose file found" in result.error
|
||||
assert "docker-compose.yml" in result.error
|
||||
assert "compose.yml" in result.error
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_up_command_fails(
|
||||
self, executor, mock_get_settings, stack_with_docker_compose_yml
|
||||
):
|
||||
"""Test failure when 'up' command fails."""
|
||||
with patch.object(executor, "_run_compose_command") as mock_run:
|
||||
mock_run.return_value = (1, "", "Error: container crashed")
|
||||
|
||||
result = await executor.execute({
|
||||
"compose_dir": str(stack_with_docker_compose_yml),
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "Docker up failed" in result.error
|
||||
assert "up" in result.data["logs"]
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_missing_compose_dir_parameter(self, executor, mock_get_settings):
|
||||
"""Test failure when compose_dir is missing from payload."""
|
||||
with pytest.raises(ValueError, match="Missing required fields: compose_dir"):
|
||||
await executor.execute({})
|
||||
|
||||
# =========================================================================
|
||||
# PATH SECURITY TESTS
|
||||
# =========================================================================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_reject_path_outside_allowed_root(
|
||||
self, executor, mock_get_settings, tmp_path
|
||||
):
|
||||
"""Test rejection of compose_dir outside allowed stacks root."""
|
||||
outside_dir = tmp_path / "outside"
|
||||
outside_dir.mkdir()
|
||||
(outside_dir / "docker-compose.yml").write_text("version: '3'\n")
|
||||
|
||||
result = await executor.execute({
|
||||
"compose_dir": str(outside_dir),
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "validation failed" in result.error.lower()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_reject_path_traversal_attack(
|
||||
self, executor, mock_get_settings, temp_stacks_root
|
||||
):
|
||||
"""Test rejection of path traversal attempts."""
|
||||
malicious_path = str(temp_stacks_root / ".." / ".." / "etc")
|
||||
|
||||
result = await executor.execute({
|
||||
"compose_dir": malicious_path,
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "traversal" in result.error.lower() or "validation" in result.error.lower()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_reject_nonexistent_directory(
|
||||
self, executor, mock_get_settings, temp_stacks_root
|
||||
):
|
||||
"""Test rejection of nonexistent directory."""
|
||||
result = await executor.execute({
|
||||
"compose_dir": str(temp_stacks_root / "doesnotexist"),
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "validation failed" in result.error.lower() or "does not exist" in result.error.lower()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_reject_file_instead_of_directory(
|
||||
self, executor, mock_get_settings, temp_stacks_root
|
||||
):
|
||||
"""Test rejection when compose_dir points to a file instead of directory."""
|
||||
file_path = temp_stacks_root / "notadir.yml"
|
||||
file_path.write_text("version: '3'\n")
|
||||
|
||||
result = await executor.execute({
|
||||
"compose_dir": str(file_path),
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "not a directory" in result.error.lower() or "validation" in result.error.lower()
|
||||
|
||||
# =========================================================================
|
||||
# TIMEOUT AND ERROR HANDLING TESTS
|
||||
# =========================================================================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_timeout_handling(
|
||||
self, executor, mock_get_settings, stack_with_docker_compose_yml
|
||||
):
|
||||
"""Test timeout handling."""
|
||||
with patch.object(executor, "_run_compose_command") as mock_run:
|
||||
mock_run.side_effect = asyncio.TimeoutError()
|
||||
|
||||
result = await executor.execute({
|
||||
"compose_dir": str(stack_with_docker_compose_yml),
|
||||
"timeout": 10,
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "timed out" in result.error.lower()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_unexpected_exception_handling(
|
||||
self, executor, mock_get_settings, stack_with_docker_compose_yml
|
||||
):
|
||||
"""Test handling of unexpected exceptions."""
|
||||
with patch.object(executor, "_run_compose_command") as mock_run:
|
||||
mock_run.side_effect = RuntimeError("Unexpected error")
|
||||
|
||||
result = await executor.execute({
|
||||
"compose_dir": str(stack_with_docker_compose_yml),
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "Unexpected error" in result.error
|
||||
|
||||
# =========================================================================
|
||||
# OUTPUT STRUCTURE TESTS
|
||||
# =========================================================================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_result_structure_on_success(
|
||||
self, executor, mock_get_settings, stack_with_docker_compose_yml
|
||||
):
|
||||
"""Test that result has correct structure on success."""
|
||||
with patch.object(executor, "_run_compose_command") as mock_run:
|
||||
mock_run.return_value = (0, "stdout content", "stderr content")
|
||||
|
||||
result = await executor.execute({
|
||||
"compose_dir": str(stack_with_docker_compose_yml),
|
||||
"pull": True,
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert "compose_dir" in result.data
|
||||
assert "compose_file" in result.data
|
||||
assert "pull_ran" in result.data
|
||||
assert "logs" in result.data
|
||||
assert isinstance(result.data["logs"], dict)
|
||||
assert "pull" in result.data["logs"]
|
||||
assert "up" in result.data["logs"]
|
||||
assert result.duration_ms is not None
|
||||
assert result.error is None
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_logs_combine_stdout_stderr(
|
||||
self, executor, mock_get_settings, stack_with_docker_compose_yml
|
||||
):
|
||||
"""Test that logs contain both stdout and stderr."""
|
||||
with patch.object(executor, "_run_compose_command") as mock_run:
|
||||
mock_run.return_value = (0, "stdout line", "stderr line")
|
||||
|
||||
result = await executor.execute({
|
||||
"compose_dir": str(stack_with_docker_compose_yml),
|
||||
})
|
||||
|
||||
assert "stdout line" in result.data["logs"]["up"]
|
||||
assert "stderr line" in result.data["logs"]["up"]
|
||||
|
||||
# =========================================================================
|
||||
# INTERNAL METHOD TESTS
|
||||
# =========================================================================
|
||||
|
||||
def test_find_compose_file_docker_compose_yml(
|
||||
self, executor, stack_with_docker_compose_yml
|
||||
):
|
||||
"""Test _find_compose_file finds docker-compose.yml."""
|
||||
result = executor._find_compose_file(stack_with_docker_compose_yml)
|
||||
assert result == stack_with_docker_compose_yml / "docker-compose.yml"
|
||||
|
||||
def test_find_compose_file_compose_yml(
|
||||
self, executor, stack_with_compose_yml
|
||||
):
|
||||
"""Test _find_compose_file finds compose.yml."""
|
||||
result = executor._find_compose_file(stack_with_compose_yml)
|
||||
assert result == stack_with_compose_yml / "compose.yml"
|
||||
|
||||
def test_find_compose_file_not_found(
|
||||
self, executor, stack_without_compose
|
||||
):
|
||||
"""Test _find_compose_file returns None when not found."""
|
||||
result = executor._find_compose_file(stack_without_compose)
|
||||
assert result is None
|
||||
|
||||
def test_combine_output_both_present(self, executor):
|
||||
"""Test _combine_output with both stdout and stderr."""
|
||||
result = executor._combine_output("stdout", "stderr")
|
||||
assert result == "stdout\nstderr"
|
||||
|
||||
def test_combine_output_stdout_only(self, executor):
|
||||
"""Test _combine_output with only stdout."""
|
||||
result = executor._combine_output("stdout", "")
|
||||
assert result == "stdout"
|
||||
|
||||
def test_combine_output_stderr_only(self, executor):
|
||||
"""Test _combine_output with only stderr."""
|
||||
result = executor._combine_output("", "stderr")
|
||||
assert result == "stderr"
|
||||
|
||||
def test_combine_output_both_empty(self, executor):
|
||||
"""Test _combine_output with empty strings."""
|
||||
result = executor._combine_output("", "")
|
||||
assert result == ""
|
||||
|
||||
# =========================================================================
|
||||
# TASK TYPE TEST
|
||||
# =========================================================================
|
||||
|
||||
def test_task_type(self, executor):
|
||||
"""Test task_type property."""
|
||||
assert executor.task_type == "DOCKER_RELOAD"
|
||||
|
||||
# =========================================================================
|
||||
# CUSTOM TIMEOUT TEST
|
||||
# =========================================================================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_custom_timeout_passed_to_command(
|
||||
self, executor, mock_get_settings, stack_with_docker_compose_yml
|
||||
):
|
||||
"""Test that custom timeout is passed to subprocess."""
|
||||
with patch.object(executor, "_run_compose_command") as mock_run:
|
||||
mock_run.return_value = (0, "", "")
|
||||
|
||||
await executor.execute({
|
||||
"compose_dir": str(stack_with_docker_compose_yml),
|
||||
"timeout": 120,
|
||||
})
|
||||
|
||||
call_args = mock_run.call_args
|
||||
assert call_args[0][3] == 120 # timeout argument
|
||||
@@ -0,0 +1,403 @@
|
||||
"""Unit tests for EnvInspectExecutor."""
|
||||
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
# Patch the logger before importing the executor
|
||||
with patch("app.utils.logger.get_logger", return_value=MagicMock()):
|
||||
from app.executors.env_inspect_executor import EnvInspectExecutor
|
||||
|
||||
|
||||
class TestEnvInspectExecutor:
|
||||
"""Test suite for EnvInspectExecutor."""
|
||||
|
||||
@pytest.fixture
|
||||
def executor(self):
|
||||
"""Create executor instance with mocked logger."""
|
||||
with patch("app.executors.base.get_logger", return_value=MagicMock()):
|
||||
return EnvInspectExecutor()
|
||||
|
||||
@pytest.fixture
|
||||
def temp_env_root(self, tmp_path):
|
||||
"""Create a temporary directory to act as /opt/letsbe/env."""
|
||||
env_dir = tmp_path / "opt" / "letsbe" / "env"
|
||||
env_dir.mkdir(parents=True)
|
||||
return env_dir
|
||||
|
||||
@pytest.fixture
|
||||
def mock_settings(self, temp_env_root):
|
||||
"""Mock settings with temporary env root."""
|
||||
settings = MagicMock()
|
||||
settings.allowed_env_root = str(temp_env_root)
|
||||
return settings
|
||||
|
||||
# ==================== Basic Inspection Tests ====================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_inspect_all_keys(self, executor, temp_env_root, mock_settings):
|
||||
"""Test reading all keys when no filter is provided."""
|
||||
with patch("app.executors.env_inspect_executor.get_settings", return_value=mock_settings):
|
||||
env_path = temp_env_root / "app.env"
|
||||
env_path.write_text("KEY1=value1\nKEY2=value2\nKEY3=value3\n")
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(env_path),
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert result.data["path"] == str(env_path)
|
||||
assert result.data["keys"] == {
|
||||
"KEY1": "value1",
|
||||
"KEY2": "value2",
|
||||
"KEY3": "value3",
|
||||
}
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_inspect_selected_keys(self, executor, temp_env_root, mock_settings):
|
||||
"""Test reading only selected keys."""
|
||||
with patch("app.executors.env_inspect_executor.get_settings", return_value=mock_settings):
|
||||
env_path = temp_env_root / "app.env"
|
||||
env_path.write_text("KEY1=value1\nKEY2=value2\nKEY3=value3\n")
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(env_path),
|
||||
"keys": ["KEY1", "KEY3"],
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert result.data["keys"] == {
|
||||
"KEY1": "value1",
|
||||
"KEY3": "value3",
|
||||
}
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_inspect_selected_keys_ignores_unknown(self, executor, temp_env_root, mock_settings):
|
||||
"""Test that unknown keys in filter are silently ignored."""
|
||||
with patch("app.executors.env_inspect_executor.get_settings", return_value=mock_settings):
|
||||
env_path = temp_env_root / "app.env"
|
||||
env_path.write_text("KEY1=value1\nKEY2=value2\n")
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(env_path),
|
||||
"keys": ["KEY1", "NONEXISTENT", "ALSO_MISSING"],
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert result.data["keys"] == {"KEY1": "value1"}
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_inspect_empty_keys_filter(self, executor, temp_env_root, mock_settings):
|
||||
"""Test with empty keys filter returns nothing."""
|
||||
with patch("app.executors.env_inspect_executor.get_settings", return_value=mock_settings):
|
||||
env_path = temp_env_root / "app.env"
|
||||
env_path.write_text("KEY1=value1\n")
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(env_path),
|
||||
"keys": [],
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert result.data["keys"] == {}
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_inspect_with_keys_null(self, executor, temp_env_root, mock_settings):
|
||||
"""Test that keys=null returns all keys (same as omitting keys)."""
|
||||
with patch("app.executors.env_inspect_executor.get_settings", return_value=mock_settings):
|
||||
env_path = temp_env_root / "app.env"
|
||||
env_path.write_text("KEY1=value1\nKEY2=value2\n")
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(env_path),
|
||||
"keys": None,
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert result.data["keys"] == {"KEY1": "value1", "KEY2": "value2"}
|
||||
|
||||
# ==================== File Not Found Tests ====================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_missing_file(self, executor, temp_env_root, mock_settings):
|
||||
"""Test error when file does not exist."""
|
||||
with patch("app.executors.env_inspect_executor.get_settings", return_value=mock_settings):
|
||||
env_path = temp_env_root / "nonexistent.env"
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(env_path),
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "does not exist" in result.error or "Path validation failed" in result.error
|
||||
|
||||
# ==================== Path Validation Tests ====================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_path_traversal_rejected(self, executor, temp_env_root, mock_settings):
|
||||
"""Test rejection of directory traversal attempts."""
|
||||
with patch("app.executors.env_inspect_executor.get_settings", return_value=mock_settings):
|
||||
result = await executor.execute({
|
||||
"path": str(temp_env_root / ".." / ".." / "etc" / "passwd"),
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "Path validation failed" in result.error or "traversal" in result.error.lower()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_path_outside_allowed_root(self, executor, temp_env_root, mock_settings):
|
||||
"""Test rejection of paths outside allowed root."""
|
||||
with patch("app.executors.env_inspect_executor.get_settings", return_value=mock_settings):
|
||||
result = await executor.execute({
|
||||
"path": "/etc/passwd",
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "Path validation failed" in result.error
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_path_in_allowed_root_nested(self, executor, temp_env_root, mock_settings):
|
||||
"""Test acceptance of valid nested path within allowed root."""
|
||||
with patch("app.executors.env_inspect_executor.get_settings", return_value=mock_settings):
|
||||
nested_dir = temp_env_root / "subdir"
|
||||
nested_dir.mkdir()
|
||||
env_path = nested_dir / "app.env"
|
||||
env_path.write_text("KEY=value\n")
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(env_path),
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert result.data["keys"] == {"KEY": "value"}
|
||||
|
||||
# ==================== Payload Validation Tests ====================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_missing_path_payload(self, executor):
|
||||
"""Test rejection of payload without path."""
|
||||
with pytest.raises(ValueError, match="Missing required field: path"):
|
||||
await executor.execute({})
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_missing_path_payload_with_keys(self, executor):
|
||||
"""Test rejection of payload with keys but no path."""
|
||||
with pytest.raises(ValueError, match="Missing required field: path"):
|
||||
await executor.execute({
|
||||
"keys": ["KEY1"],
|
||||
})
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_reject_invalid_keys_type(self, executor, temp_env_root, mock_settings):
|
||||
"""Test rejection when keys is not a list or null."""
|
||||
with patch("app.executors.env_inspect_executor.get_settings", return_value=mock_settings):
|
||||
env_path = temp_env_root / "app.env"
|
||||
env_path.write_text("KEY=value\n")
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(env_path),
|
||||
"keys": {"not": "a_list"},
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "'keys' must be a list" in result.error
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_reject_keys_as_string(self, executor, temp_env_root, mock_settings):
|
||||
"""Test rejection when keys is a string instead of list."""
|
||||
with patch("app.executors.env_inspect_executor.get_settings", return_value=mock_settings):
|
||||
env_path = temp_env_root / "app.env"
|
||||
env_path.write_text("KEY=value\n")
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(env_path),
|
||||
"keys": "KEY",
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "'keys' must be a list" in result.error
|
||||
|
||||
# ==================== Task Type Tests ====================
|
||||
|
||||
def test_task_type_property(self, executor):
|
||||
"""Test that task_type returns ENV_INSPECT."""
|
||||
assert executor.task_type == "ENV_INSPECT"
|
||||
|
||||
# ==================== Registry Integration Tests ====================
|
||||
|
||||
def test_registry_integration(self):
|
||||
"""Test that ENV_INSPECT is registered in executor registry."""
|
||||
from app.executors import get_executor
|
||||
|
||||
executor = get_executor("ENV_INSPECT")
|
||||
assert executor is not None
|
||||
assert executor.task_type == "ENV_INSPECT"
|
||||
|
||||
# ==================== Empty File Tests ====================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_empty_file(self, executor, temp_env_root, mock_settings):
|
||||
"""Test reading an empty ENV file returns empty keys dict."""
|
||||
with patch("app.executors.env_inspect_executor.get_settings", return_value=mock_settings):
|
||||
env_path = temp_env_root / "empty.env"
|
||||
env_path.write_text("")
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(env_path),
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert result.data["keys"] == {}
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_file_with_only_comments(self, executor, temp_env_root, mock_settings):
|
||||
"""Test reading file with only comments returns empty keys dict."""
|
||||
with patch("app.executors.env_inspect_executor.get_settings", return_value=mock_settings):
|
||||
env_path = temp_env_root / "comments.env"
|
||||
env_path.write_text("# This is a comment\n# Another comment\n")
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(env_path),
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert result.data["keys"] == {}
|
||||
|
||||
# ==================== Parsing Tests ====================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_parses_quoted_values_double(self, executor, temp_env_root, mock_settings):
|
||||
"""Test parsing of double-quoted values."""
|
||||
with patch("app.executors.env_inspect_executor.get_settings", return_value=mock_settings):
|
||||
env_path = temp_env_root / "app.env"
|
||||
env_path.write_text('KEY="value with spaces"\n')
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(env_path),
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert result.data["keys"]["KEY"] == "value with spaces"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_parses_quoted_values_single(self, executor, temp_env_root, mock_settings):
|
||||
"""Test parsing of single-quoted values."""
|
||||
with patch("app.executors.env_inspect_executor.get_settings", return_value=mock_settings):
|
||||
env_path = temp_env_root / "app.env"
|
||||
env_path.write_text("KEY='single quoted'\n")
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(env_path),
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert result.data["keys"]["KEY"] == "single quoted"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_parses_value_with_equals(self, executor, temp_env_root, mock_settings):
|
||||
"""Test parsing values containing equals signs."""
|
||||
with patch("app.executors.env_inspect_executor.get_settings", return_value=mock_settings):
|
||||
env_path = temp_env_root / "app.env"
|
||||
env_path.write_text("URL=postgres://user:pass@host/db?opt=val\n")
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(env_path),
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert result.data["keys"]["URL"] == "postgres://user:pass@host/db?opt=val"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_ignores_comment_lines(self, executor, temp_env_root, mock_settings):
|
||||
"""Test that comment lines are ignored."""
|
||||
with patch("app.executors.env_inspect_executor.get_settings", return_value=mock_settings):
|
||||
env_path = temp_env_root / "app.env"
|
||||
env_path.write_text("# Comment\nKEY=value\n# Another comment\nKEY2=value2\n")
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(env_path),
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert result.data["keys"] == {"KEY": "value", "KEY2": "value2"}
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_ignores_empty_lines(self, executor, temp_env_root, mock_settings):
|
||||
"""Test that empty lines are ignored."""
|
||||
with patch("app.executors.env_inspect_executor.get_settings", return_value=mock_settings):
|
||||
env_path = temp_env_root / "app.env"
|
||||
env_path.write_text("KEY1=value1\n\n\nKEY2=value2\n")
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(env_path),
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert result.data["keys"] == {"KEY1": "value1", "KEY2": "value2"}
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_handles_whitespace_around_key_value(self, executor, temp_env_root, mock_settings):
|
||||
"""Test handling of whitespace around keys and values."""
|
||||
with patch("app.executors.env_inspect_executor.get_settings", return_value=mock_settings):
|
||||
env_path = temp_env_root / "app.env"
|
||||
env_path.write_text(" KEY1 = value1 \n")
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(env_path),
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert result.data["keys"]["KEY1"] == "value1"
|
||||
|
||||
|
||||
class TestEnvInspectExecutorInternal:
|
||||
"""Tests for internal methods of EnvInspectExecutor."""
|
||||
|
||||
@pytest.fixture
|
||||
def executor(self):
|
||||
"""Create executor instance with mocked logger."""
|
||||
with patch("app.executors.base.get_logger", return_value=MagicMock()):
|
||||
return EnvInspectExecutor()
|
||||
|
||||
def test_parse_env_file_basic(self, executor):
|
||||
"""Test basic ENV file parsing."""
|
||||
content = "KEY1=value1\nKEY2=value2\n"
|
||||
result = executor._parse_env_file(content)
|
||||
assert result == {"KEY1": "value1", "KEY2": "value2"}
|
||||
|
||||
def test_parse_env_file_with_comments(self, executor):
|
||||
"""Test parsing ignores comments."""
|
||||
content = "# Comment\nKEY=value\n# Another comment\n"
|
||||
result = executor._parse_env_file(content)
|
||||
assert result == {"KEY": "value"}
|
||||
|
||||
def test_parse_env_file_with_empty_lines(self, executor):
|
||||
"""Test parsing ignores empty lines."""
|
||||
content = "KEY1=value1\n\n\nKEY2=value2\n"
|
||||
result = executor._parse_env_file(content)
|
||||
assert result == {"KEY1": "value1", "KEY2": "value2"}
|
||||
|
||||
def test_parse_env_file_with_quotes(self, executor):
|
||||
"""Test parsing handles quoted values."""
|
||||
content = 'KEY1="quoted value"\nKEY2=\'single quoted\'\n'
|
||||
result = executor._parse_env_file(content)
|
||||
assert result == {"KEY1": "quoted value", "KEY2": "single quoted"}
|
||||
|
||||
def test_parse_env_file_with_equals_in_value(self, executor):
|
||||
"""Test parsing handles equals signs in values."""
|
||||
content = "URL=postgres://user:pass@host/db?opt=val\n"
|
||||
result = executor._parse_env_file(content)
|
||||
assert result == {"URL": "postgres://user:pass@host/db?opt=val"}
|
||||
|
||||
def test_parse_env_file_empty(self, executor):
|
||||
"""Test parsing empty content."""
|
||||
content = ""
|
||||
result = executor._parse_env_file(content)
|
||||
assert result == {}
|
||||
|
||||
def test_parse_env_file_only_comments(self, executor):
|
||||
"""Test parsing content with only comments."""
|
||||
content = "# Comment 1\n# Comment 2\n"
|
||||
result = executor._parse_env_file(content)
|
||||
assert result == {}
|
||||
@@ -0,0 +1,582 @@
|
||||
"""Unit tests for EnvUpdateExecutor."""
|
||||
|
||||
import os
|
||||
import stat
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
# Patch the logger before importing the executor
|
||||
with patch("app.utils.logger.get_logger", return_value=MagicMock()):
|
||||
from app.executors.env_update_executor import EnvUpdateExecutor
|
||||
|
||||
|
||||
class TestEnvUpdateExecutor:
|
||||
"""Test suite for EnvUpdateExecutor."""
|
||||
|
||||
@pytest.fixture
|
||||
def executor(self):
|
||||
"""Create executor instance with mocked logger."""
|
||||
with patch("app.executors.base.get_logger", return_value=MagicMock()):
|
||||
return EnvUpdateExecutor()
|
||||
|
||||
@pytest.fixture
|
||||
def temp_env_root(self, tmp_path):
|
||||
"""Create a temporary directory to act as /opt/letsbe/env."""
|
||||
env_dir = tmp_path / "opt" / "letsbe" / "env"
|
||||
env_dir.mkdir(parents=True)
|
||||
return env_dir
|
||||
|
||||
@pytest.fixture
|
||||
def mock_settings(self, temp_env_root):
|
||||
"""Mock settings with temporary env root."""
|
||||
settings = MagicMock()
|
||||
settings.allowed_env_root = str(temp_env_root)
|
||||
return settings
|
||||
|
||||
# ==================== New File Creation Tests ====================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_create_new_env_file(self, executor, temp_env_root, mock_settings):
|
||||
"""Test creating a new ENV file when it doesn't exist."""
|
||||
with patch("app.executors.env_update_executor.get_settings", return_value=mock_settings):
|
||||
env_path = temp_env_root / "newapp.env"
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(env_path),
|
||||
"updates": {
|
||||
"DATABASE_URL": "postgres://localhost/mydb",
|
||||
"API_KEY": "secret123",
|
||||
},
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert set(result.data["updated_keys"]) == {"DATABASE_URL", "API_KEY"}
|
||||
assert result.data["removed_keys"] == []
|
||||
assert result.data["path"] == str(env_path)
|
||||
|
||||
# Verify file was created
|
||||
assert env_path.exists()
|
||||
content = env_path.read_text()
|
||||
assert "API_KEY=secret123" in content
|
||||
assert "DATABASE_URL=postgres://localhost/mydb" in content
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_create_env_file_in_nested_directory(self, executor, temp_env_root, mock_settings):
|
||||
"""Test creating ENV file in a nested directory that doesn't exist."""
|
||||
with patch("app.executors.env_update_executor.get_settings", return_value=mock_settings):
|
||||
env_path = temp_env_root / "subdir" / "app.env"
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(env_path),
|
||||
"updates": {"KEY": "value"},
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert env_path.exists()
|
||||
|
||||
# ==================== Update Existing Keys Tests ====================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_update_existing_keys(self, executor, temp_env_root, mock_settings):
|
||||
"""Test updating existing keys in an ENV file."""
|
||||
with patch("app.executors.env_update_executor.get_settings", return_value=mock_settings):
|
||||
env_path = temp_env_root / "app.env"
|
||||
env_path.write_text("EXISTING_KEY=old_value\nANOTHER_KEY=keep_this\n")
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(env_path),
|
||||
"updates": {"EXISTING_KEY": "new_value"},
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert "EXISTING_KEY" in result.data["updated_keys"]
|
||||
|
||||
content = env_path.read_text()
|
||||
assert "EXISTING_KEY=new_value" in content
|
||||
assert "ANOTHER_KEY=keep_this" in content
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_add_new_keys_to_existing_file(self, executor, temp_env_root, mock_settings):
|
||||
"""Test adding new keys to an existing ENV file."""
|
||||
with patch("app.executors.env_update_executor.get_settings", return_value=mock_settings):
|
||||
env_path = temp_env_root / "app.env"
|
||||
env_path.write_text("EXISTING_KEY=value\n")
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(env_path),
|
||||
"updates": {"NEW_KEY": "new_value"},
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
content = env_path.read_text()
|
||||
assert "EXISTING_KEY=value" in content
|
||||
assert "NEW_KEY=new_value" in content
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_preserves_key_values(self, executor, temp_env_root, mock_settings):
|
||||
"""Test that existing key values are preserved when not updated."""
|
||||
with patch("app.executors.env_update_executor.get_settings", return_value=mock_settings):
|
||||
env_path = temp_env_root / "app.env"
|
||||
env_path.write_text("KEY1=value1\nKEY2=value2\n")
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(env_path),
|
||||
"updates": {"KEY1": "updated"},
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
content = env_path.read_text()
|
||||
assert "KEY1=updated" in content
|
||||
assert "KEY2=value2" in content
|
||||
|
||||
# ==================== Remove Keys Tests ====================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_remove_existing_keys(self, executor, temp_env_root, mock_settings):
|
||||
"""Test removing existing keys from an ENV file."""
|
||||
with patch("app.executors.env_update_executor.get_settings", return_value=mock_settings):
|
||||
env_path = temp_env_root / "app.env"
|
||||
env_path.write_text("KEEP_KEY=value\nREMOVE_KEY=to_remove\nANOTHER_KEEP=keep\n")
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(env_path),
|
||||
"remove_keys": ["REMOVE_KEY"],
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert result.data["removed_keys"] == ["REMOVE_KEY"]
|
||||
assert result.data["updated_keys"] == []
|
||||
|
||||
content = env_path.read_text()
|
||||
assert "REMOVE_KEY" not in content
|
||||
assert "KEEP_KEY=value" in content
|
||||
assert "ANOTHER_KEEP=keep" in content
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_remove_nonexistent_key(self, executor, temp_env_root, mock_settings):
|
||||
"""Test removing a key that doesn't exist (should succeed but not report as removed)."""
|
||||
with patch("app.executors.env_update_executor.get_settings", return_value=mock_settings):
|
||||
env_path = temp_env_root / "app.env"
|
||||
env_path.write_text("EXISTING_KEY=value\n")
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(env_path),
|
||||
"remove_keys": ["NONEXISTENT_KEY"],
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert result.data["removed_keys"] == [] # Not reported as removed since it didn't exist
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_update_and_remove_together(self, executor, temp_env_root, mock_settings):
|
||||
"""Test updating and removing keys in the same operation."""
|
||||
with patch("app.executors.env_update_executor.get_settings", return_value=mock_settings):
|
||||
env_path = temp_env_root / "app.env"
|
||||
env_path.write_text("KEY1=old\nKEY2=remove_me\nKEY3=keep\n")
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(env_path),
|
||||
"updates": {"KEY1": "new", "NEW_KEY": "added"},
|
||||
"remove_keys": ["KEY2"],
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert "KEY1" in result.data["updated_keys"]
|
||||
assert "NEW_KEY" in result.data["updated_keys"]
|
||||
assert result.data["removed_keys"] == ["KEY2"]
|
||||
|
||||
content = env_path.read_text()
|
||||
assert "KEY1=new" in content
|
||||
assert "NEW_KEY=added" in content
|
||||
assert "KEY3=keep" in content
|
||||
assert "KEY2" not in content
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_remove_multiple_keys(self, executor, temp_env_root, mock_settings):
|
||||
"""Test removing multiple keys at once."""
|
||||
with patch("app.executors.env_update_executor.get_settings", return_value=mock_settings):
|
||||
env_path = temp_env_root / "app.env"
|
||||
env_path.write_text("A=1\nB=2\nC=3\nD=4\n")
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(env_path),
|
||||
"remove_keys": ["A", "C"],
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert set(result.data["removed_keys"]) == {"A", "C"}
|
||||
|
||||
content = env_path.read_text()
|
||||
assert "A=" not in content
|
||||
assert "C=" not in content
|
||||
assert "B=2" in content
|
||||
assert "D=4" in content
|
||||
|
||||
# ==================== Invalid Key Name Tests ====================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_reject_invalid_update_key_lowercase(self, executor, temp_env_root, mock_settings):
|
||||
"""Test rejection of lowercase keys in updates."""
|
||||
with patch("app.executors.env_update_executor.get_settings", return_value=mock_settings):
|
||||
env_path = temp_env_root / "app.env"
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(env_path),
|
||||
"updates": {"invalid_key": "value"},
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "Invalid ENV key format" in result.error
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_reject_invalid_update_key_starts_with_number(self, executor, temp_env_root, mock_settings):
|
||||
"""Test rejection of keys starting with a number."""
|
||||
with patch("app.executors.env_update_executor.get_settings", return_value=mock_settings):
|
||||
env_path = temp_env_root / "app.env"
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(env_path),
|
||||
"updates": {"1INVALID": "value"},
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "Invalid ENV key format" in result.error
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_reject_invalid_update_key_special_chars(self, executor, temp_env_root, mock_settings):
|
||||
"""Test rejection of keys with special characters."""
|
||||
with patch("app.executors.env_update_executor.get_settings", return_value=mock_settings):
|
||||
env_path = temp_env_root / "app.env"
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(env_path),
|
||||
"updates": {"INVALID-KEY": "value"},
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "Invalid ENV key format" in result.error
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_reject_invalid_remove_key(self, executor, temp_env_root, mock_settings):
|
||||
"""Test rejection of invalid keys in remove_keys."""
|
||||
with patch("app.executors.env_update_executor.get_settings", return_value=mock_settings):
|
||||
env_path = temp_env_root / "app.env"
|
||||
env_path.write_text("VALID_KEY=value\n")
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(env_path),
|
||||
"remove_keys": ["invalid_lowercase"],
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "Invalid ENV key format" in result.error
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_accept_valid_key_formats(self, executor, temp_env_root, mock_settings):
|
||||
"""Test acceptance of various valid key formats."""
|
||||
with patch("app.executors.env_update_executor.get_settings", return_value=mock_settings):
|
||||
env_path = temp_env_root / "app.env"
|
||||
|
||||
valid_keys = {
|
||||
"A": "1",
|
||||
"AB": "2",
|
||||
"A1": "3",
|
||||
"A_B": "4",
|
||||
"ABC123_XYZ": "5",
|
||||
"DATABASE_URL": "postgres://localhost/db",
|
||||
}
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(env_path),
|
||||
"updates": valid_keys,
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert set(result.data["updated_keys"]) == set(valid_keys.keys())
|
||||
|
||||
# ==================== Path Validation Tests ====================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_reject_path_outside_allowed_root(self, executor, temp_env_root, mock_settings):
|
||||
"""Test rejection of paths outside /opt/letsbe/env."""
|
||||
with patch("app.executors.env_update_executor.get_settings", return_value=mock_settings):
|
||||
# Try to write to parent directory
|
||||
result = await executor.execute({
|
||||
"path": "/etc/passwd",
|
||||
"updates": {"HACK": "attempt"},
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "Path validation failed" in result.error
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_reject_path_traversal_attack(self, executor, temp_env_root, mock_settings):
|
||||
"""Test rejection of directory traversal attempts."""
|
||||
with patch("app.executors.env_update_executor.get_settings", return_value=mock_settings):
|
||||
result = await executor.execute({
|
||||
"path": str(temp_env_root / ".." / ".." / "etc" / "passwd"),
|
||||
"updates": {"HACK": "attempt"},
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "Path validation failed" in result.error or "traversal" in result.error.lower()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_accept_valid_path_in_allowed_root(self, executor, temp_env_root, mock_settings):
|
||||
"""Test acceptance of valid paths within allowed root."""
|
||||
with patch("app.executors.env_update_executor.get_settings", return_value=mock_settings):
|
||||
env_path = temp_env_root / "valid" / "path" / "app.env"
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(env_path),
|
||||
"updates": {"VALID": "path"},
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
|
||||
# ==================== Payload Validation Tests ====================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_reject_missing_path(self, executor):
|
||||
"""Test rejection of payload without path."""
|
||||
with pytest.raises(ValueError, match="Missing required field: path"):
|
||||
await executor.execute({
|
||||
"updates": {"KEY": "value"},
|
||||
})
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_reject_empty_operations(self, executor, temp_env_root, mock_settings):
|
||||
"""Test rejection when neither updates nor remove_keys provided."""
|
||||
with patch("app.executors.env_update_executor.get_settings", return_value=mock_settings):
|
||||
result = await executor.execute({
|
||||
"path": str(temp_env_root / "app.env"),
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "At least one of 'updates' or 'remove_keys'" in result.error
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_reject_invalid_updates_type(self, executor, temp_env_root, mock_settings):
|
||||
"""Test rejection when updates is not a dict."""
|
||||
with patch("app.executors.env_update_executor.get_settings", return_value=mock_settings):
|
||||
result = await executor.execute({
|
||||
"path": str(temp_env_root / "app.env"),
|
||||
"updates": ["not", "a", "dict"],
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "'updates' must be a dictionary" in result.error
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_reject_invalid_remove_keys_type(self, executor, temp_env_root, mock_settings):
|
||||
"""Test rejection when remove_keys is not a list."""
|
||||
with patch("app.executors.env_update_executor.get_settings", return_value=mock_settings):
|
||||
result = await executor.execute({
|
||||
"path": str(temp_env_root / "app.env"),
|
||||
"remove_keys": {"not": "a_list"},
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "'remove_keys' must be a list" in result.error
|
||||
|
||||
# ==================== File Permission Tests ====================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.skipif(os.name == "nt", reason="chmod not fully supported on Windows")
|
||||
async def test_file_permissions_640(self, executor, temp_env_root, mock_settings):
|
||||
"""Test that created files have 640 permissions."""
|
||||
with patch("app.executors.env_update_executor.get_settings", return_value=mock_settings):
|
||||
env_path = temp_env_root / "secure.env"
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(env_path),
|
||||
"updates": {"SECRET": "value"},
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
|
||||
# Check file permissions
|
||||
file_stat = env_path.stat()
|
||||
# 0o640 = owner rw, group r, others none
|
||||
expected_mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP
|
||||
actual_mode = stat.S_IMODE(file_stat.st_mode)
|
||||
assert actual_mode == expected_mode, f"Expected {oct(expected_mode)}, got {oct(actual_mode)}"
|
||||
|
||||
# ==================== ENV File Parsing Tests ====================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_parse_quoted_values(self, executor, temp_env_root, mock_settings):
|
||||
"""Test parsing of quoted values in ENV files."""
|
||||
with patch("app.executors.env_update_executor.get_settings", return_value=mock_settings):
|
||||
env_path = temp_env_root / "app.env"
|
||||
env_path.write_text('QUOTED="value with spaces"\nSINGLE=\'single quoted\'\n')
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(env_path),
|
||||
"updates": {"NEW": "added"},
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
content = env_path.read_text()
|
||||
# Values should be preserved (without extra quotes in the parsed form)
|
||||
assert "NEW=added" in content
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_handle_values_with_equals(self, executor, temp_env_root, mock_settings):
|
||||
"""Test handling of values containing equals signs."""
|
||||
with patch("app.executors.env_update_executor.get_settings", return_value=mock_settings):
|
||||
env_path = temp_env_root / "app.env"
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(env_path),
|
||||
"updates": {"URL": "postgres://user:pass@host/db?opt=val"},
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
content = env_path.read_text()
|
||||
# Values with = should be quoted
|
||||
assert 'URL="postgres://user:pass@host/db?opt=val"' in content
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_keys_sorted_in_output(self, executor, temp_env_root, mock_settings):
|
||||
"""Test that keys are sorted alphabetically in output."""
|
||||
with patch("app.executors.env_update_executor.get_settings", return_value=mock_settings):
|
||||
env_path = temp_env_root / "app.env"
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(env_path),
|
||||
"updates": {
|
||||
"ZEBRA": "last",
|
||||
"APPLE": "first",
|
||||
"MANGO": "middle",
|
||||
},
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
content = env_path.read_text()
|
||||
lines = [l for l in content.splitlines() if l]
|
||||
keys = [l.split("=")[0] for l in lines]
|
||||
assert keys == sorted(keys)
|
||||
|
||||
# ==================== Edge Cases ====================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_empty_env_file(self, executor, temp_env_root, mock_settings):
|
||||
"""Test handling of empty existing ENV file."""
|
||||
with patch("app.executors.env_update_executor.get_settings", return_value=mock_settings):
|
||||
env_path = temp_env_root / "empty.env"
|
||||
env_path.write_text("")
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(env_path),
|
||||
"updates": {"NEW_KEY": "value"},
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
content = env_path.read_text()
|
||||
assert "NEW_KEY=value" in content
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_remove_all_keys(self, executor, temp_env_root, mock_settings):
|
||||
"""Test removing all keys results in empty file."""
|
||||
with patch("app.executors.env_update_executor.get_settings", return_value=mock_settings):
|
||||
env_path = temp_env_root / "app.env"
|
||||
env_path.write_text("ONLY_KEY=value\n")
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(env_path),
|
||||
"remove_keys": ["ONLY_KEY"],
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
content = env_path.read_text()
|
||||
assert content == ""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_value_with_newline(self, executor, temp_env_root, mock_settings):
|
||||
"""Test handling values with newlines (should be quoted)."""
|
||||
with patch("app.executors.env_update_executor.get_settings", return_value=mock_settings):
|
||||
env_path = temp_env_root / "app.env"
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(env_path),
|
||||
"updates": {"MULTILINE": "line1\nline2"},
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
content = env_path.read_text()
|
||||
assert 'MULTILINE="line1\nline2"' in content
|
||||
|
||||
|
||||
class TestEnvUpdateExecutorInternal:
|
||||
"""Tests for internal methods of EnvUpdateExecutor."""
|
||||
|
||||
@pytest.fixture
|
||||
def executor(self):
|
||||
"""Create executor instance with mocked logger."""
|
||||
with patch("app.executors.base.get_logger", return_value=MagicMock()):
|
||||
return EnvUpdateExecutor()
|
||||
|
||||
def test_parse_env_file_basic(self, executor):
|
||||
"""Test basic ENV file parsing."""
|
||||
content = "KEY1=value1\nKEY2=value2\n"
|
||||
result = executor._parse_env_file(content)
|
||||
assert result == {"KEY1": "value1", "KEY2": "value2"}
|
||||
|
||||
def test_parse_env_file_with_comments(self, executor):
|
||||
"""Test parsing ignores comments."""
|
||||
content = "# Comment\nKEY=value\n# Another comment\n"
|
||||
result = executor._parse_env_file(content)
|
||||
assert result == {"KEY": "value"}
|
||||
|
||||
def test_parse_env_file_with_empty_lines(self, executor):
|
||||
"""Test parsing ignores empty lines."""
|
||||
content = "KEY1=value1\n\n\nKEY2=value2\n"
|
||||
result = executor._parse_env_file(content)
|
||||
assert result == {"KEY1": "value1", "KEY2": "value2"}
|
||||
|
||||
def test_parse_env_file_with_quotes(self, executor):
|
||||
"""Test parsing handles quoted values."""
|
||||
content = 'KEY1="quoted value"\nKEY2=\'single quoted\'\n'
|
||||
result = executor._parse_env_file(content)
|
||||
assert result == {"KEY1": "quoted value", "KEY2": "single quoted"}
|
||||
|
||||
def test_parse_env_file_with_equals_in_value(self, executor):
|
||||
"""Test parsing handles equals signs in values."""
|
||||
content = "URL=postgres://user:pass@host/db?opt=val\n"
|
||||
result = executor._parse_env_file(content)
|
||||
assert result == {"URL": "postgres://user:pass@host/db?opt=val"}
|
||||
|
||||
def test_serialize_env_basic(self, executor):
|
||||
"""Test basic ENV serialization."""
|
||||
env_dict = {"KEY1": "value1", "KEY2": "value2"}
|
||||
result = executor._serialize_env(env_dict)
|
||||
assert "KEY1=value1" in result
|
||||
assert "KEY2=value2" in result
|
||||
|
||||
def test_serialize_env_sorted(self, executor):
|
||||
"""Test serialization produces sorted output."""
|
||||
env_dict = {"ZEBRA": "z", "APPLE": "a"}
|
||||
result = executor._serialize_env(env_dict)
|
||||
lines = result.strip().split("\n")
|
||||
assert lines[0].startswith("APPLE=")
|
||||
assert lines[1].startswith("ZEBRA=")
|
||||
|
||||
def test_serialize_env_quotes_special_values(self, executor):
|
||||
"""Test serialization quotes values with special characters."""
|
||||
env_dict = {
|
||||
"SPACES": "has spaces",
|
||||
"EQUALS": "has=equals",
|
||||
"NEWLINE": "has\nnewline",
|
||||
}
|
||||
result = executor._serialize_env(env_dict)
|
||||
assert 'SPACES="has spaces"' in result
|
||||
assert 'EQUALS="has=equals"' in result
|
||||
assert 'NEWLINE="has\nnewline"' in result
|
||||
|
||||
def test_serialize_env_empty_dict(self, executor):
|
||||
"""Test serialization of empty dict."""
|
||||
result = executor._serialize_env({})
|
||||
assert result == ""
|
||||
253
letsbe-sysadmin-agent/tests/executors/test_file_executor.py
Normal file
253
letsbe-sysadmin-agent/tests/executors/test_file_executor.py
Normal file
@@ -0,0 +1,253 @@
|
||||
"""Unit tests for FileExecutor (FILE_WRITE)."""
|
||||
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
# Patch the logger before importing the executor
|
||||
with patch("app.utils.logger.get_logger", return_value=MagicMock()):
|
||||
from app.executors.file_executor import FileExecutor
|
||||
|
||||
|
||||
class TestFileExecutor:
|
||||
"""Test suite for FileExecutor."""
|
||||
|
||||
@pytest.fixture
|
||||
def executor(self):
|
||||
"""Create executor instance with mocked logger."""
|
||||
with patch("app.executors.base.get_logger", return_value=MagicMock()):
|
||||
return FileExecutor()
|
||||
|
||||
@pytest.fixture
|
||||
def temp_file_root(self, tmp_path):
|
||||
"""Create a temporary directory to act as /opt/letsbe."""
|
||||
file_dir = tmp_path / "opt" / "letsbe"
|
||||
file_dir.mkdir(parents=True)
|
||||
return file_dir
|
||||
|
||||
@pytest.fixture
|
||||
def mock_settings(self, temp_file_root):
|
||||
"""Mock settings with temporary file root."""
|
||||
settings = MagicMock()
|
||||
settings.allowed_file_root = str(temp_file_root)
|
||||
settings.allowed_env_root = str(temp_file_root / "env")
|
||||
settings.max_file_size = 1_048_576 # 1MB
|
||||
return settings
|
||||
|
||||
# ==================== Happy Path Tests ====================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_write_file_happy_path(self, executor, temp_file_root, mock_settings):
|
||||
"""Test writing a simple file."""
|
||||
with patch("app.executors.file_executor.get_settings", return_value=mock_settings):
|
||||
file_path = temp_file_root / "test.txt"
|
||||
content = "Hello, World!"
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(file_path),
|
||||
"content": content,
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert result.data["written"] is True
|
||||
assert result.data["path"] == str(file_path)
|
||||
assert result.data["size"] == len(content.encode("utf-8"))
|
||||
assert file_path.read_text() == content
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_write_file_in_config_directory(self, executor, temp_file_root, mock_settings):
|
||||
"""Test writing file in /opt/letsbe/config subdirectory.
|
||||
|
||||
This verifies that config paths are valid under allowed_file_root.
|
||||
"""
|
||||
with patch("app.executors.file_executor.get_settings", return_value=mock_settings):
|
||||
config_dir = temp_file_root / "config" / "app"
|
||||
config_dir.mkdir(parents=True)
|
||||
file_path = config_dir / "settings.json"
|
||||
content = '{"debug": false}'
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(file_path),
|
||||
"content": content,
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert result.data["written"] is True
|
||||
assert file_path.read_text() == content
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_write_file_nested_config_directory(self, executor, temp_file_root, mock_settings):
|
||||
"""Test writing file in nested config subdirectory."""
|
||||
with patch("app.executors.file_executor.get_settings", return_value=mock_settings):
|
||||
config_dir = temp_file_root / "config" / "nginx" / "sites-available"
|
||||
config_dir.mkdir(parents=True)
|
||||
file_path = config_dir / "default.conf"
|
||||
content = "server { listen 80; }"
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(file_path),
|
||||
"content": content,
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert file_path.read_text() == content
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_write_creates_parent_directories(self, executor, temp_file_root, mock_settings):
|
||||
"""Test that parent directories are created automatically."""
|
||||
with patch("app.executors.file_executor.get_settings", return_value=mock_settings):
|
||||
file_path = temp_file_root / "new" / "nested" / "dir" / "file.txt"
|
||||
content = "content"
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(file_path),
|
||||
"content": content,
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert file_path.exists()
|
||||
assert file_path.read_text() == content
|
||||
|
||||
# ==================== Write Mode Tests ====================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.skipif(
|
||||
__import__("os").name == "nt",
|
||||
reason="os.rename on Windows fails when target exists; executor uses os.rename instead of os.replace"
|
||||
)
|
||||
async def test_write_mode_overwrites(self, executor, temp_file_root, mock_settings):
|
||||
"""Test that write mode overwrites existing content."""
|
||||
with patch("app.executors.file_executor.get_settings", return_value=mock_settings):
|
||||
file_path = temp_file_root / "test.txt"
|
||||
file_path.write_text("old content")
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(file_path),
|
||||
"content": "new content",
|
||||
"mode": "write",
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert file_path.read_text() == "new content"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_append_mode(self, executor, temp_file_root, mock_settings):
|
||||
"""Test append mode adds to existing content."""
|
||||
with patch("app.executors.file_executor.get_settings", return_value=mock_settings):
|
||||
file_path = temp_file_root / "test.txt"
|
||||
file_path.write_text("line1\n")
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(file_path),
|
||||
"content": "line2\n",
|
||||
"mode": "append",
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert file_path.read_text() == "line1\nline2\n"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_invalid_mode_rejected(self, executor, temp_file_root, mock_settings):
|
||||
"""Test rejection of invalid mode."""
|
||||
with patch("app.executors.file_executor.get_settings", return_value=mock_settings):
|
||||
result = await executor.execute({
|
||||
"path": str(temp_file_root / "test.txt"),
|
||||
"content": "content",
|
||||
"mode": "invalid",
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "Invalid mode" in result.error
|
||||
|
||||
# ==================== Path Validation Tests ====================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_path_outside_allowed_root_rejected(self, executor, temp_file_root, mock_settings):
|
||||
"""Test rejection of paths outside allowed root."""
|
||||
with patch("app.executors.file_executor.get_settings", return_value=mock_settings):
|
||||
result = await executor.execute({
|
||||
"path": "/etc/passwd",
|
||||
"content": "hack",
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "Validation failed" in result.error
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_path_traversal_rejected(self, executor, temp_file_root, mock_settings):
|
||||
"""Test rejection of directory traversal attempts."""
|
||||
with patch("app.executors.file_executor.get_settings", return_value=mock_settings):
|
||||
result = await executor.execute({
|
||||
"path": str(temp_file_root / ".." / ".." / "etc" / "passwd"),
|
||||
"content": "hack",
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "Validation failed" in result.error or "traversal" in result.error.lower()
|
||||
|
||||
# ==================== Payload Validation Tests ====================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_missing_path_payload(self, executor):
|
||||
"""Test rejection of payload without path."""
|
||||
with pytest.raises(ValueError, match="Missing required fields: path"):
|
||||
await executor.execute({
|
||||
"content": "content",
|
||||
})
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_missing_content_payload(self, executor):
|
||||
"""Test rejection of payload without content."""
|
||||
with pytest.raises(ValueError, match="Missing required fields: content"):
|
||||
await executor.execute({
|
||||
"path": "/some/path",
|
||||
})
|
||||
|
||||
# ==================== Task Type and Registry Tests ====================
|
||||
|
||||
def test_task_type_property(self, executor):
|
||||
"""Test that task_type returns FILE_WRITE."""
|
||||
assert executor.task_type == "FILE_WRITE"
|
||||
|
||||
def test_registry_integration(self):
|
||||
"""Test that FILE_WRITE is registered in executor registry."""
|
||||
from app.executors import get_executor
|
||||
|
||||
executor = get_executor("FILE_WRITE")
|
||||
assert executor is not None
|
||||
assert executor.task_type == "FILE_WRITE"
|
||||
|
||||
# ==================== Duration Tests ====================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_duration_ms_populated(self, executor, temp_file_root, mock_settings):
|
||||
"""Test that duration_ms is populated in result."""
|
||||
with patch("app.executors.file_executor.get_settings", return_value=mock_settings):
|
||||
file_path = temp_file_root / "test.txt"
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(file_path),
|
||||
"content": "content",
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert result.duration_ms is not None
|
||||
assert result.duration_ms >= 0
|
||||
|
||||
# ==================== UTF-8 Tests ====================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_utf8_content(self, executor, temp_file_root, mock_settings):
|
||||
"""Test writing UTF-8 encoded content."""
|
||||
with patch("app.executors.file_executor.get_settings", return_value=mock_settings):
|
||||
file_path = temp_file_root / "utf8.txt"
|
||||
content = "Hello 世界! こんにちは 🎉"
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(file_path),
|
||||
"content": content,
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert file_path.read_text(encoding="utf-8") == content
|
||||
@@ -0,0 +1,412 @@
|
||||
"""Unit tests for FileInspectExecutor."""
|
||||
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
# Patch the logger before importing the executor
|
||||
with patch("app.utils.logger.get_logger", return_value=MagicMock()):
|
||||
from app.executors.file_inspect_executor import FileInspectExecutor
|
||||
|
||||
|
||||
class TestFileInspectExecutor:
|
||||
"""Test suite for FileInspectExecutor."""
|
||||
|
||||
@pytest.fixture
|
||||
def executor(self):
|
||||
"""Create executor instance with mocked logger."""
|
||||
with patch("app.executors.base.get_logger", return_value=MagicMock()):
|
||||
return FileInspectExecutor()
|
||||
|
||||
@pytest.fixture
|
||||
def temp_file_root(self, tmp_path):
|
||||
"""Create a temporary directory to act as /opt/letsbe."""
|
||||
file_dir = tmp_path / "opt" / "letsbe"
|
||||
file_dir.mkdir(parents=True)
|
||||
return file_dir
|
||||
|
||||
@pytest.fixture
|
||||
def mock_settings(self, temp_file_root):
|
||||
"""Mock settings with temporary file root."""
|
||||
settings = MagicMock()
|
||||
settings.allowed_file_root = str(temp_file_root)
|
||||
return settings
|
||||
|
||||
# ==================== Happy Path Tests ====================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_inspect_file_happy_path(self, executor, temp_file_root, mock_settings):
|
||||
"""Test reading a small file with default max_bytes."""
|
||||
with patch("app.executors.file_inspect_executor.get_settings", return_value=mock_settings):
|
||||
file_path = temp_file_root / "test.txt"
|
||||
content = "Hello, World!\nThis is a test file."
|
||||
# Write in binary mode to avoid platform line ending conversion
|
||||
file_path.write_bytes(content.encode("utf-8"))
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(file_path),
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert result.data["path"] == str(file_path)
|
||||
assert result.data["content"] == content
|
||||
assert result.data["bytes_read"] == len(content.encode("utf-8"))
|
||||
assert result.data["truncated"] is False
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_inspect_file_with_custom_max_bytes(self, executor, temp_file_root, mock_settings):
|
||||
"""Test reading with custom max_bytes value."""
|
||||
with patch("app.executors.file_inspect_executor.get_settings", return_value=mock_settings):
|
||||
file_path = temp_file_root / "test.txt"
|
||||
content = "Short content"
|
||||
file_path.write_text(content)
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(file_path),
|
||||
"max_bytes": 8192,
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert result.data["content"] == content
|
||||
assert result.data["truncated"] is False
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_inspect_file_nested_directory(self, executor, temp_file_root, mock_settings):
|
||||
"""Test reading file in nested directory."""
|
||||
with patch("app.executors.file_inspect_executor.get_settings", return_value=mock_settings):
|
||||
nested_dir = temp_file_root / "subdir" / "nested"
|
||||
nested_dir.mkdir(parents=True)
|
||||
file_path = nested_dir / "config.txt"
|
||||
content = "nested file content"
|
||||
file_path.write_text(content)
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(file_path),
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert result.data["content"] == content
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_inspect_file_in_config_directory(self, executor, temp_file_root, mock_settings):
|
||||
"""Test reading file in /opt/letsbe/config subdirectory.
|
||||
|
||||
This verifies that config paths are valid under allowed_file_root.
|
||||
"""
|
||||
with patch("app.executors.file_inspect_executor.get_settings", return_value=mock_settings):
|
||||
config_dir = temp_file_root / "config" / "nginx"
|
||||
config_dir.mkdir(parents=True)
|
||||
file_path = config_dir / "nginx.conf"
|
||||
content = "server { listen 80; }"
|
||||
file_path.write_text(content)
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(file_path),
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert result.data["content"] == content
|
||||
assert result.data["path"] == str(file_path)
|
||||
|
||||
# ==================== Truncation Tests ====================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_inspect_file_with_truncation(self, executor, temp_file_root, mock_settings):
|
||||
"""Test truncation when file is larger than max_bytes."""
|
||||
with patch("app.executors.file_inspect_executor.get_settings", return_value=mock_settings):
|
||||
file_path = temp_file_root / "large.txt"
|
||||
content = "A" * 100 # 100 bytes
|
||||
file_path.write_text(content)
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(file_path),
|
||||
"max_bytes": 10,
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert result.data["truncated"] is True
|
||||
assert result.data["bytes_read"] == 10
|
||||
assert result.data["content"] == "A" * 10
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_inspect_file_exact_size_no_truncation(self, executor, temp_file_root, mock_settings):
|
||||
"""Test no truncation when file is exactly max_bytes."""
|
||||
with patch("app.executors.file_inspect_executor.get_settings", return_value=mock_settings):
|
||||
file_path = temp_file_root / "exact.txt"
|
||||
content = "X" * 50
|
||||
file_path.write_text(content)
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(file_path),
|
||||
"max_bytes": 50,
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert result.data["truncated"] is False
|
||||
assert result.data["bytes_read"] == 50
|
||||
assert result.data["content"] == content
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_inspect_file_one_byte_over_truncates(self, executor, temp_file_root, mock_settings):
|
||||
"""Test truncation when file is one byte over max_bytes."""
|
||||
with patch("app.executors.file_inspect_executor.get_settings", return_value=mock_settings):
|
||||
file_path = temp_file_root / "over.txt"
|
||||
content = "Y" * 51
|
||||
file_path.write_text(content)
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(file_path),
|
||||
"max_bytes": 50,
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert result.data["truncated"] is True
|
||||
assert result.data["bytes_read"] == 50
|
||||
|
||||
# ==================== File Not Found Tests ====================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_file_does_not_exist(self, executor, temp_file_root, mock_settings):
|
||||
"""Test error when file does not exist."""
|
||||
with patch("app.executors.file_inspect_executor.get_settings", return_value=mock_settings):
|
||||
file_path = temp_file_root / "nonexistent.txt"
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(file_path),
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "does not exist" in result.error or "Path validation failed" in result.error
|
||||
|
||||
# ==================== Path Validation Tests ====================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_path_outside_allowed_root_rejected(self, executor, temp_file_root, mock_settings):
|
||||
"""Test rejection of paths outside allowed root."""
|
||||
with patch("app.executors.file_inspect_executor.get_settings", return_value=mock_settings):
|
||||
result = await executor.execute({
|
||||
"path": "/etc/passwd",
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "Path validation failed" in result.error
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_path_traversal_rejected(self, executor, temp_file_root, mock_settings):
|
||||
"""Test rejection of directory traversal attempts."""
|
||||
with patch("app.executors.file_inspect_executor.get_settings", return_value=mock_settings):
|
||||
result = await executor.execute({
|
||||
"path": str(temp_file_root / ".." / ".." / "etc" / "passwd"),
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "Path validation failed" in result.error or "traversal" in result.error.lower()
|
||||
|
||||
# ==================== max_bytes Validation Tests ====================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_invalid_max_bytes_type_string(self, executor, temp_file_root, mock_settings):
|
||||
"""Test rejection when max_bytes is a non-numeric string."""
|
||||
with patch("app.executors.file_inspect_executor.get_settings", return_value=mock_settings):
|
||||
file_path = temp_file_root / "test.txt"
|
||||
file_path.write_text("content")
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(file_path),
|
||||
"max_bytes": "not-a-number",
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "Invalid max_bytes" in result.error
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_invalid_max_bytes_type_none(self, executor, temp_file_root, mock_settings):
|
||||
"""Test that None max_bytes uses default (success case)."""
|
||||
with patch("app.executors.file_inspect_executor.get_settings", return_value=mock_settings):
|
||||
file_path = temp_file_root / "test.txt"
|
||||
file_path.write_text("content")
|
||||
|
||||
# Note: None should trigger default, not error
|
||||
# But explicitly passing None might be different from omitting
|
||||
result = await executor.execute({
|
||||
"path": str(file_path),
|
||||
"max_bytes": None,
|
||||
})
|
||||
|
||||
# None cannot be converted to int, so this should fail
|
||||
assert result.success is False
|
||||
assert "Invalid max_bytes" in result.error
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_max_bytes_zero_rejected(self, executor, temp_file_root, mock_settings):
|
||||
"""Test rejection when max_bytes is zero."""
|
||||
with patch("app.executors.file_inspect_executor.get_settings", return_value=mock_settings):
|
||||
file_path = temp_file_root / "test.txt"
|
||||
file_path.write_text("content")
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(file_path),
|
||||
"max_bytes": 0,
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "max_bytes must be between 1 and" in result.error
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_max_bytes_negative_rejected(self, executor, temp_file_root, mock_settings):
|
||||
"""Test rejection when max_bytes is negative."""
|
||||
with patch("app.executors.file_inspect_executor.get_settings", return_value=mock_settings):
|
||||
file_path = temp_file_root / "test.txt"
|
||||
file_path.write_text("content")
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(file_path),
|
||||
"max_bytes": -100,
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "max_bytes must be between 1 and" in result.error
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_max_bytes_over_limit_rejected(self, executor, temp_file_root, mock_settings):
|
||||
"""Test rejection when max_bytes exceeds 1MB."""
|
||||
with patch("app.executors.file_inspect_executor.get_settings", return_value=mock_settings):
|
||||
file_path = temp_file_root / "test.txt"
|
||||
file_path.write_text("content")
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(file_path),
|
||||
"max_bytes": 2_000_000, # 2MB
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "max_bytes must be between 1 and" in result.error
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_max_bytes_at_limit_accepted(self, executor, temp_file_root, mock_settings):
|
||||
"""Test acceptance of max_bytes at exactly 1MB."""
|
||||
with patch("app.executors.file_inspect_executor.get_settings", return_value=mock_settings):
|
||||
file_path = temp_file_root / "test.txt"
|
||||
file_path.write_text("small content")
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(file_path),
|
||||
"max_bytes": 1_048_576, # Exactly 1MB
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_max_bytes_as_string_number_accepted(self, executor, temp_file_root, mock_settings):
|
||||
"""Test acceptance of max_bytes as numeric string."""
|
||||
with patch("app.executors.file_inspect_executor.get_settings", return_value=mock_settings):
|
||||
file_path = temp_file_root / "test.txt"
|
||||
file_path.write_text("content")
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(file_path),
|
||||
"max_bytes": "4096", # String that can be converted to int
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
|
||||
# ==================== Payload Validation Tests ====================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_missing_path_payload(self, executor):
|
||||
"""Test rejection of payload without path."""
|
||||
with pytest.raises(ValueError, match="Missing required field: path"):
|
||||
await executor.execute({})
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_missing_path_with_max_bytes(self, executor):
|
||||
"""Test rejection of payload with max_bytes but no path."""
|
||||
with pytest.raises(ValueError, match="Missing required field: path"):
|
||||
await executor.execute({
|
||||
"max_bytes": 4096,
|
||||
})
|
||||
|
||||
# ==================== Task Type and Registry Tests ====================
|
||||
|
||||
def test_task_type_property(self, executor):
|
||||
"""Test that task_type returns FILE_INSPECT."""
|
||||
assert executor.task_type == "FILE_INSPECT"
|
||||
|
||||
def test_registry_integration(self):
|
||||
"""Test that FILE_INSPECT is registered in executor registry."""
|
||||
from app.executors import get_executor
|
||||
|
||||
executor = get_executor("FILE_INSPECT")
|
||||
assert executor is not None
|
||||
assert executor.task_type == "FILE_INSPECT"
|
||||
|
||||
# ==================== Empty File Tests ====================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_empty_file(self, executor, temp_file_root, mock_settings):
|
||||
"""Test reading an empty file."""
|
||||
with patch("app.executors.file_inspect_executor.get_settings", return_value=mock_settings):
|
||||
file_path = temp_file_root / "empty.txt"
|
||||
file_path.write_text("")
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(file_path),
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert result.data["content"] == ""
|
||||
assert result.data["bytes_read"] == 0
|
||||
assert result.data["truncated"] is False
|
||||
|
||||
# ==================== Binary/UTF-8 Tests ====================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_utf8_content(self, executor, temp_file_root, mock_settings):
|
||||
"""Test reading UTF-8 encoded content."""
|
||||
with patch("app.executors.file_inspect_executor.get_settings", return_value=mock_settings):
|
||||
file_path = temp_file_root / "utf8.txt"
|
||||
content = "Hello 世界! こんにちは 🎉"
|
||||
file_path.write_text(content, encoding="utf-8")
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(file_path),
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert result.data["content"] == content
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_binary_with_replacement(self, executor, temp_file_root, mock_settings):
|
||||
"""Test that invalid UTF-8 bytes are replaced."""
|
||||
with patch("app.executors.file_inspect_executor.get_settings", return_value=mock_settings):
|
||||
file_path = temp_file_root / "binary.bin"
|
||||
# Write bytes that are not valid UTF-8
|
||||
file_path.write_bytes(b"Hello\xff\xfeWorld")
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(file_path),
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
# Invalid bytes should be replaced with replacement character
|
||||
assert "Hello" in result.data["content"]
|
||||
assert "World" in result.data["content"]
|
||||
assert "\ufffd" in result.data["content"] # Replacement character
|
||||
|
||||
# ==================== Duration Tests ====================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_duration_ms_populated(self, executor, temp_file_root, mock_settings):
|
||||
"""Test that duration_ms is populated in result."""
|
||||
with patch("app.executors.file_inspect_executor.get_settings", return_value=mock_settings):
|
||||
file_path = temp_file_root / "test.txt"
|
||||
file_path.write_text("content")
|
||||
|
||||
result = await executor.execute({
|
||||
"path": str(file_path),
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert result.duration_ms is not None
|
||||
assert result.duration_ms >= 0
|
||||
524
letsbe-sysadmin-agent/tests/executors/test_nextcloud_executor.py
Normal file
524
letsbe-sysadmin-agent/tests/executors/test_nextcloud_executor.py
Normal file
@@ -0,0 +1,524 @@
|
||||
"""Unit tests for NextcloudSetDomainExecutor."""
|
||||
|
||||
import asyncio
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
# Patch the logger before importing the executor
|
||||
with patch("app.utils.logger.get_logger", return_value=MagicMock()):
|
||||
from app.executors.nextcloud_executor import NextcloudSetDomainExecutor
|
||||
|
||||
|
||||
class TestNextcloudSetDomainExecutor:
|
||||
"""Tests for NextcloudSetDomainExecutor."""
|
||||
|
||||
@pytest.fixture
|
||||
def executor(self):
|
||||
"""Create executor with mocked logger."""
|
||||
with patch("app.executors.base.get_logger", return_value=MagicMock()):
|
||||
return NextcloudSetDomainExecutor()
|
||||
|
||||
@pytest.fixture
|
||||
def temp_nextcloud_stack(self, tmp_path):
|
||||
"""Create a temporary Nextcloud stack directory with compose file."""
|
||||
stack_dir = tmp_path / "opt" / "letsbe" / "stacks" / "nextcloud"
|
||||
stack_dir.mkdir(parents=True)
|
||||
compose_file = stack_dir / "docker-compose.yml"
|
||||
compose_file.write_text("""version: '3.8'
|
||||
services:
|
||||
app:
|
||||
image: nextcloud:latest
|
||||
""")
|
||||
return stack_dir
|
||||
|
||||
@pytest.fixture
|
||||
def executor_with_temp_stack(self, executor, temp_nextcloud_stack):
|
||||
"""Configure executor to use temporary stack directory."""
|
||||
executor.NEXTCLOUD_STACK_DIR = str(temp_nextcloud_stack)
|
||||
return executor
|
||||
|
||||
# =========================================================================
|
||||
# TASK TYPE TEST
|
||||
# =========================================================================
|
||||
|
||||
def test_task_type(self, executor):
|
||||
"""Test that task_type property returns correct value."""
|
||||
assert executor.task_type == "NEXTCLOUD_SET_DOMAIN"
|
||||
|
||||
# =========================================================================
|
||||
# URL PARSING TESTS
|
||||
# =========================================================================
|
||||
|
||||
def test_parse_public_url_with_https(self, executor):
|
||||
"""Test URL parsing with explicit https scheme."""
|
||||
scheme, host, normalized_url = executor._parse_public_url("https://cloud.example.com")
|
||||
assert scheme == "https"
|
||||
assert host == "cloud.example.com"
|
||||
assert normalized_url == "https://cloud.example.com"
|
||||
|
||||
def test_parse_public_url_with_http(self, executor):
|
||||
"""Test URL parsing with http scheme."""
|
||||
scheme, host, normalized_url = executor._parse_public_url("http://cloud.example.com")
|
||||
assert scheme == "http"
|
||||
assert host == "cloud.example.com"
|
||||
assert normalized_url == "http://cloud.example.com"
|
||||
|
||||
def test_parse_public_url_with_port(self, executor):
|
||||
"""Test URL parsing with port number."""
|
||||
scheme, host, normalized_url = executor._parse_public_url("https://cloud.example.com:8443")
|
||||
assert scheme == "https"
|
||||
assert host == "cloud.example.com:8443"
|
||||
assert normalized_url == "https://cloud.example.com:8443"
|
||||
|
||||
def test_parse_public_url_without_scheme_defaults_to_https(self, executor):
|
||||
"""Test that URLs without scheme default to https."""
|
||||
scheme, host, normalized_url = executor._parse_public_url("cloud.example.com")
|
||||
assert scheme == "https"
|
||||
assert host == "cloud.example.com"
|
||||
assert normalized_url == "https://cloud.example.com"
|
||||
|
||||
def test_parse_public_url_trailing_slash_stripped(self, executor):
|
||||
"""Test that trailing slash is stripped from URL."""
|
||||
scheme, host, normalized_url = executor._parse_public_url("https://cloud.example.com/")
|
||||
assert scheme == "https"
|
||||
assert host == "cloud.example.com"
|
||||
assert normalized_url == "https://cloud.example.com"
|
||||
|
||||
def test_parse_public_url_with_path(self, executor):
|
||||
"""Test URL parsing with path (trailing slash stripped)."""
|
||||
scheme, host, normalized_url = executor._parse_public_url("https://cloud.example.com/nextcloud/")
|
||||
assert scheme == "https"
|
||||
assert host == "cloud.example.com"
|
||||
assert normalized_url == "https://cloud.example.com/nextcloud"
|
||||
|
||||
def test_parse_public_url_empty_raises_error(self, executor):
|
||||
"""Test that empty URL raises ValueError."""
|
||||
with pytest.raises(ValueError, match="cannot be empty"):
|
||||
executor._parse_public_url("")
|
||||
|
||||
def test_parse_public_url_whitespace_only_raises_error(self, executor):
|
||||
"""Test that whitespace-only URL raises ValueError."""
|
||||
with pytest.raises(ValueError, match="cannot be empty"):
|
||||
executor._parse_public_url(" ")
|
||||
|
||||
def test_parse_public_url_invalid_no_host_raises_error(self, executor):
|
||||
"""Test that URL with no host raises ValueError."""
|
||||
with pytest.raises(ValueError, match="no host found"):
|
||||
executor._parse_public_url("https://")
|
||||
|
||||
# =========================================================================
|
||||
# SUCCESS CASES
|
||||
# =========================================================================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_success_all_commands(self, executor_with_temp_stack):
|
||||
"""Test successful domain configuration with all three occ commands."""
|
||||
with patch.object(executor_with_temp_stack, "_run_occ_command") as mock_run:
|
||||
mock_run.return_value = (0, "System config value overwritehost set to cloud.example.com", "")
|
||||
|
||||
result = await executor_with_temp_stack.execute({
|
||||
"public_url": "https://cloud.example.com"
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert result.data["public_url"] == "https://cloud.example.com"
|
||||
assert result.data["host"] == "cloud.example.com"
|
||||
assert result.data["scheme"] == "https"
|
||||
assert result.data["commands_executed"] == 3
|
||||
assert "overwritehost" in result.data["logs"]
|
||||
assert "overwriteprotocol" in result.data["logs"]
|
||||
assert "overwrite.cli.url" in result.data["logs"]
|
||||
assert result.error is None
|
||||
assert result.duration_ms is not None
|
||||
|
||||
# Verify all three commands were called
|
||||
assert mock_run.call_count == 3
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_success_with_http_scheme(self, executor_with_temp_stack):
|
||||
"""Test successful configuration with http scheme."""
|
||||
with patch.object(executor_with_temp_stack, "_run_occ_command") as mock_run:
|
||||
mock_run.return_value = (0, "success", "")
|
||||
|
||||
result = await executor_with_temp_stack.execute({
|
||||
"public_url": "http://cloud.example.com"
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert result.data["scheme"] == "http"
|
||||
assert result.data["public_url"] == "http://cloud.example.com"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_success_url_without_scheme(self, executor_with_temp_stack):
|
||||
"""Test successful configuration when URL lacks scheme (defaults to https)."""
|
||||
with patch.object(executor_with_temp_stack, "_run_occ_command") as mock_run:
|
||||
mock_run.return_value = (0, "success", "")
|
||||
|
||||
result = await executor_with_temp_stack.execute({
|
||||
"public_url": "cloud.example.com"
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert result.data["scheme"] == "https"
|
||||
assert result.data["host"] == "cloud.example.com"
|
||||
assert result.data["public_url"] == "https://cloud.example.com"
|
||||
|
||||
# =========================================================================
|
||||
# COMMAND ARGUMENTS TESTS
|
||||
# =========================================================================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_command_arguments_correct(self, executor_with_temp_stack):
|
||||
"""Test that occ commands receive correct arguments."""
|
||||
calls = []
|
||||
|
||||
async def capture_calls(compose_file, occ_args, timeout):
|
||||
calls.append(occ_args)
|
||||
return (0, "success", "")
|
||||
|
||||
with patch.object(executor_with_temp_stack, "_run_occ_command", side_effect=capture_calls):
|
||||
await executor_with_temp_stack.execute({
|
||||
"public_url": "https://cloud.example.com"
|
||||
})
|
||||
|
||||
assert len(calls) == 3
|
||||
assert calls[0] == ["config:system:set", "overwritehost", "--value=cloud.example.com"]
|
||||
assert calls[1] == ["config:system:set", "overwriteprotocol", "--value=https"]
|
||||
assert calls[2] == ["config:system:set", "overwrite.cli.url", "--value=https://cloud.example.com"]
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_command_arguments_with_port(self, executor_with_temp_stack):
|
||||
"""Test that host with port is passed correctly to occ command."""
|
||||
calls = []
|
||||
|
||||
async def capture_calls(compose_file, occ_args, timeout):
|
||||
calls.append(occ_args)
|
||||
return (0, "success", "")
|
||||
|
||||
with patch.object(executor_with_temp_stack, "_run_occ_command", side_effect=capture_calls):
|
||||
await executor_with_temp_stack.execute({
|
||||
"public_url": "https://cloud.example.com:8443"
|
||||
})
|
||||
|
||||
assert calls[0] == ["config:system:set", "overwritehost", "--value=cloud.example.com:8443"]
|
||||
|
||||
# =========================================================================
|
||||
# PAYLOAD VALIDATION TESTS
|
||||
# =========================================================================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_missing_public_url_raises_error(self, executor_with_temp_stack):
|
||||
"""Test that missing public_url raises ValueError."""
|
||||
with pytest.raises(ValueError, match="Missing required fields"):
|
||||
await executor_with_temp_stack.execute({})
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_invalid_url_returns_failure(self, executor_with_temp_stack):
|
||||
"""Test that invalid URL returns failure result."""
|
||||
result = await executor_with_temp_stack.execute({
|
||||
"public_url": ""
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "cannot be empty" in result.error
|
||||
|
||||
# =========================================================================
|
||||
# COMPOSE FILE TESTS
|
||||
# =========================================================================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_compose_file_not_found(self, executor, tmp_path):
|
||||
"""Test failure when compose file doesn't exist."""
|
||||
nonexistent_dir = tmp_path / "nonexistent"
|
||||
nonexistent_dir.mkdir()
|
||||
executor.NEXTCLOUD_STACK_DIR = str(nonexistent_dir)
|
||||
|
||||
result = await executor.execute({
|
||||
"public_url": "https://cloud.example.com"
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "compose file not found" in result.error.lower()
|
||||
assert "docker-compose.yml" in result.error
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_compose_yml_fallback(self, executor, tmp_path):
|
||||
"""Test that compose.yml is used as fallback."""
|
||||
stack_dir = tmp_path / "nextcloud"
|
||||
stack_dir.mkdir()
|
||||
(stack_dir / "compose.yml").write_text("version: '3'\n")
|
||||
executor.NEXTCLOUD_STACK_DIR = str(stack_dir)
|
||||
|
||||
with patch.object(executor, "_run_occ_command") as mock_run:
|
||||
mock_run.return_value = (0, "success", "")
|
||||
|
||||
result = await executor.execute({
|
||||
"public_url": "https://cloud.example.com"
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
|
||||
# =========================================================================
|
||||
# COMMAND FAILURE TESTS
|
||||
# =========================================================================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_first_command_fails(self, executor_with_temp_stack):
|
||||
"""Test that failure on first occ command returns partial results."""
|
||||
with patch.object(executor_with_temp_stack, "_run_occ_command") as mock_run:
|
||||
mock_run.return_value = (1, "", "Error: Unable to write config")
|
||||
|
||||
result = await executor_with_temp_stack.execute({
|
||||
"public_url": "https://cloud.example.com"
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert result.data["commands_executed"] == 1
|
||||
assert result.data["failed_command"] == "overwritehost"
|
||||
assert result.data["failed_args"] == ["config:system:set", "overwritehost", "--value=cloud.example.com"]
|
||||
assert "overwritehost" in result.data["logs"]
|
||||
assert "occ overwritehost failed with exit code 1" in result.error
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_second_command_fails(self, executor_with_temp_stack):
|
||||
"""Test that failure on second occ command returns partial results."""
|
||||
call_count = 0
|
||||
|
||||
async def mock_occ(compose_file, occ_args, timeout):
|
||||
nonlocal call_count
|
||||
call_count += 1
|
||||
if call_count == 1:
|
||||
return (0, "success", "") # First succeeds
|
||||
return (1, "", "Error setting overwriteprotocol") # Second fails
|
||||
|
||||
with patch.object(executor_with_temp_stack, "_run_occ_command", side_effect=mock_occ):
|
||||
result = await executor_with_temp_stack.execute({
|
||||
"public_url": "https://cloud.example.com"
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert result.data["commands_executed"] == 2
|
||||
assert result.data["failed_command"] == "overwriteprotocol"
|
||||
assert "overwritehost" in result.data["logs"]
|
||||
assert "overwriteprotocol" in result.data["logs"]
|
||||
assert "overwrite.cli.url" not in result.data["logs"]
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_third_command_fails(self, executor_with_temp_stack):
|
||||
"""Test that failure on third occ command returns partial results."""
|
||||
call_count = 0
|
||||
|
||||
async def mock_occ(compose_file, occ_args, timeout):
|
||||
nonlocal call_count
|
||||
call_count += 1
|
||||
if call_count <= 2:
|
||||
return (0, "success", "") # First two succeed
|
||||
return (1, "", "Error setting overwrite.cli.url") # Third fails
|
||||
|
||||
with patch.object(executor_with_temp_stack, "_run_occ_command", side_effect=mock_occ):
|
||||
result = await executor_with_temp_stack.execute({
|
||||
"public_url": "https://cloud.example.com"
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert result.data["commands_executed"] == 3
|
||||
assert result.data["failed_command"] == "overwrite.cli.url"
|
||||
assert "overwritehost" in result.data["logs"]
|
||||
assert "overwriteprotocol" in result.data["logs"]
|
||||
assert "overwrite.cli.url" in result.data["logs"]
|
||||
|
||||
# =========================================================================
|
||||
# TIMEOUT HANDLING TESTS
|
||||
# =========================================================================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_timeout_handling(self, executor_with_temp_stack):
|
||||
"""Test that asyncio.TimeoutError is caught and returns failure."""
|
||||
with patch.object(executor_with_temp_stack, "_run_occ_command") as mock_run:
|
||||
mock_run.side_effect = asyncio.TimeoutError()
|
||||
|
||||
result = await executor_with_temp_stack.execute({
|
||||
"public_url": "https://cloud.example.com"
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "timed out" in result.error.lower()
|
||||
assert result.data["commands_executed"] == 0
|
||||
assert result.duration_ms is not None
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_timeout_after_partial_success(self, executor_with_temp_stack):
|
||||
"""Test timeout after some commands succeeded."""
|
||||
call_count = 0
|
||||
|
||||
async def mock_occ(compose_file, occ_args, timeout):
|
||||
nonlocal call_count
|
||||
call_count += 1
|
||||
if call_count == 1:
|
||||
return (0, "success", "")
|
||||
raise asyncio.TimeoutError()
|
||||
|
||||
with patch.object(executor_with_temp_stack, "_run_occ_command", side_effect=mock_occ):
|
||||
result = await executor_with_temp_stack.execute({
|
||||
"public_url": "https://cloud.example.com"
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "timed out" in result.error.lower()
|
||||
assert result.data["commands_executed"] == 1
|
||||
assert "overwritehost" in result.data["logs"]
|
||||
|
||||
# =========================================================================
|
||||
# EXCEPTION HANDLING TESTS
|
||||
# =========================================================================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_unexpected_exception_handling(self, executor_with_temp_stack):
|
||||
"""Test that unexpected exceptions are caught and returned."""
|
||||
with patch.object(executor_with_temp_stack, "_run_occ_command") as mock_run:
|
||||
mock_run.side_effect = RuntimeError("Unexpected docker error")
|
||||
|
||||
result = await executor_with_temp_stack.execute({
|
||||
"public_url": "https://cloud.example.com"
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "Unexpected docker error" in result.error
|
||||
assert result.duration_ms is not None
|
||||
|
||||
# =========================================================================
|
||||
# RESULT STRUCTURE TESTS
|
||||
# =========================================================================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_result_structure_on_success(self, executor_with_temp_stack):
|
||||
"""Test that successful result contains all expected keys."""
|
||||
with patch.object(executor_with_temp_stack, "_run_occ_command") as mock_run:
|
||||
mock_run.return_value = (0, "success", "")
|
||||
|
||||
result = await executor_with_temp_stack.execute({
|
||||
"public_url": "https://cloud.example.com"
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert "public_url" in result.data
|
||||
assert "host" in result.data
|
||||
assert "scheme" in result.data
|
||||
assert "commands_executed" in result.data
|
||||
assert "logs" in result.data
|
||||
assert result.error is None
|
||||
assert result.duration_ms is not None
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_result_structure_on_failure(self, executor_with_temp_stack):
|
||||
"""Test that failure result contains all expected keys including failed_command."""
|
||||
with patch.object(executor_with_temp_stack, "_run_occ_command") as mock_run:
|
||||
mock_run.return_value = (1, "", "error")
|
||||
|
||||
result = await executor_with_temp_stack.execute({
|
||||
"public_url": "https://cloud.example.com"
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "public_url" in result.data
|
||||
assert "host" in result.data
|
||||
assert "scheme" in result.data
|
||||
assert "commands_executed" in result.data
|
||||
assert "failed_command" in result.data
|
||||
assert "failed_args" in result.data
|
||||
assert "logs" in result.data
|
||||
assert result.error is not None
|
||||
assert result.duration_ms is not None
|
||||
|
||||
# =========================================================================
|
||||
# OUTPUT COMBINATION TESTS
|
||||
# =========================================================================
|
||||
|
||||
def test_combine_output_both(self, executor):
|
||||
"""Test combining stdout and stderr."""
|
||||
result = executor._combine_output("stdout content", "stderr content")
|
||||
assert "stdout content" in result
|
||||
assert "stderr content" in result
|
||||
|
||||
def test_combine_output_stdout_only(self, executor):
|
||||
"""Test combining with only stdout."""
|
||||
result = executor._combine_output("stdout content", "")
|
||||
assert result == "stdout content"
|
||||
|
||||
def test_combine_output_stderr_only(self, executor):
|
||||
"""Test combining with only stderr."""
|
||||
result = executor._combine_output("", "stderr content")
|
||||
assert result == "stderr content"
|
||||
|
||||
def test_combine_output_empty(self, executor):
|
||||
"""Test combining empty outputs."""
|
||||
result = executor._combine_output("", "")
|
||||
assert result == ""
|
||||
|
||||
# =========================================================================
|
||||
# FIND COMPOSE FILE TESTS
|
||||
# =========================================================================
|
||||
|
||||
def test_find_compose_file_docker_compose_yml(self, executor, tmp_path):
|
||||
"""Test finding docker-compose.yml."""
|
||||
(tmp_path / "docker-compose.yml").write_text("version: '3'\n")
|
||||
result = executor._find_compose_file(tmp_path)
|
||||
assert result == tmp_path / "docker-compose.yml"
|
||||
|
||||
def test_find_compose_file_compose_yml(self, executor, tmp_path):
|
||||
"""Test finding compose.yml as fallback."""
|
||||
(tmp_path / "compose.yml").write_text("version: '3'\n")
|
||||
result = executor._find_compose_file(tmp_path)
|
||||
assert result == tmp_path / "compose.yml"
|
||||
|
||||
def test_find_compose_file_prefers_docker_compose_yml(self, executor, tmp_path):
|
||||
"""Test that docker-compose.yml is preferred over compose.yml."""
|
||||
(tmp_path / "docker-compose.yml").write_text("version: '3'\n")
|
||||
(tmp_path / "compose.yml").write_text("version: '3'\n")
|
||||
result = executor._find_compose_file(tmp_path)
|
||||
assert result == tmp_path / "docker-compose.yml"
|
||||
|
||||
def test_find_compose_file_not_found(self, executor, tmp_path):
|
||||
"""Test returning None when no compose file found."""
|
||||
result = executor._find_compose_file(tmp_path)
|
||||
assert result is None
|
||||
|
||||
# =========================================================================
|
||||
# CUSTOM TIMEOUT TESTS
|
||||
# =========================================================================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_custom_timeout_passed(self, executor_with_temp_stack):
|
||||
"""Test that custom timeout from payload is passed to _run_occ_command."""
|
||||
received_timeouts = []
|
||||
|
||||
async def capture_timeout(compose_file, occ_args, timeout):
|
||||
received_timeouts.append(timeout)
|
||||
return (0, "success", "")
|
||||
|
||||
with patch.object(executor_with_temp_stack, "_run_occ_command", side_effect=capture_timeout):
|
||||
await executor_with_temp_stack.execute({
|
||||
"public_url": "https://cloud.example.com",
|
||||
"timeout": 120,
|
||||
})
|
||||
|
||||
assert all(t == 120 for t in received_timeouts)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_default_timeout_used(self, executor_with_temp_stack):
|
||||
"""Test that default timeout is used when not specified in payload."""
|
||||
received_timeouts = []
|
||||
|
||||
async def capture_timeout(compose_file, occ_args, timeout):
|
||||
received_timeouts.append(timeout)
|
||||
return (0, "success", "")
|
||||
|
||||
with patch.object(executor_with_temp_stack, "_run_occ_command", side_effect=capture_timeout):
|
||||
await executor_with_temp_stack.execute({
|
||||
"public_url": "https://cloud.example.com",
|
||||
})
|
||||
|
||||
assert all(t == executor_with_temp_stack.DEFAULT_COMMAND_TIMEOUT for t in received_timeouts)
|
||||
@@ -0,0 +1,450 @@
|
||||
"""Unit tests for PlaywrightExecutor.
|
||||
|
||||
These tests focus on validation logic without launching browsers.
|
||||
Browser-based integration tests are skipped by default (SKIP_BROWSER_TESTS=true).
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
# Mock playwright module before any imports that might use it
|
||||
sys.modules["playwright"] = MagicMock()
|
||||
sys.modules["playwright.async_api"] = MagicMock()
|
||||
|
||||
# Patch the logger before importing the executor
|
||||
with patch("app.utils.logger.get_logger", return_value=MagicMock()):
|
||||
from app.utils.validation import is_domain_allowed, validate_allowed_domains, ValidationError
|
||||
|
||||
|
||||
class TestDomainValidation:
|
||||
"""Test domain allowlist validation functions."""
|
||||
|
||||
# ==================== is_domain_allowed Tests ====================
|
||||
|
||||
def test_exact_domain_match(self):
|
||||
"""Test exact domain matching."""
|
||||
assert is_domain_allowed("https://cloud.example.com/path", ["cloud.example.com"]) is True
|
||||
assert is_domain_allowed("https://cloud.example.com", ["cloud.example.com"]) is True
|
||||
assert is_domain_allowed("http://cloud.example.com", ["cloud.example.com"]) is True
|
||||
|
||||
def test_exact_domain_no_match(self):
|
||||
"""Test exact domain non-matching."""
|
||||
assert is_domain_allowed("https://evil.com/path", ["cloud.example.com"]) is False
|
||||
assert is_domain_allowed("https://sub.cloud.example.com", ["cloud.example.com"]) is False
|
||||
|
||||
def test_wildcard_subdomain_match(self):
|
||||
"""Test wildcard subdomain matching."""
|
||||
assert is_domain_allowed("https://sub.example.com", ["*.example.com"]) is True
|
||||
assert is_domain_allowed("https://deep.sub.example.com", ["*.example.com"]) is True
|
||||
assert is_domain_allowed("https://example.com", ["*.example.com"]) is True
|
||||
|
||||
def test_wildcard_subdomain_no_match(self):
|
||||
"""Test wildcard subdomain non-matching."""
|
||||
assert is_domain_allowed("https://evil.com", ["*.example.com"]) is False
|
||||
assert is_domain_allowed("https://example.org", ["*.example.com"]) is False
|
||||
|
||||
def test_domain_with_port(self):
|
||||
"""Test domain matching with port specification."""
|
||||
assert is_domain_allowed("https://cloud.example.com:8443/path", ["cloud.example.com:8443"]) is True
|
||||
assert is_domain_allowed("https://cloud.example.com:8443", ["cloud.example.com:8443"]) is True
|
||||
# Wrong port should not match
|
||||
assert is_domain_allowed("https://cloud.example.com:9000", ["cloud.example.com:8443"]) is False
|
||||
# No port in URL should not match port-specific pattern
|
||||
assert is_domain_allowed("https://cloud.example.com", ["cloud.example.com:8443"]) is False
|
||||
|
||||
def test_multiple_allowed_domains(self):
|
||||
"""Test with multiple allowed domains."""
|
||||
allowed = ["cloud.example.com", "mail.example.com", "*.internal.com"]
|
||||
assert is_domain_allowed("https://cloud.example.com", allowed) is True
|
||||
assert is_domain_allowed("https://mail.example.com", allowed) is True
|
||||
assert is_domain_allowed("https://app.internal.com", allowed) is True
|
||||
assert is_domain_allowed("https://evil.com", allowed) is False
|
||||
|
||||
def test_empty_inputs(self):
|
||||
"""Test with empty inputs."""
|
||||
assert is_domain_allowed("", ["example.com"]) is False
|
||||
assert is_domain_allowed("https://example.com", []) is False
|
||||
assert is_domain_allowed("", []) is False
|
||||
|
||||
def test_case_insensitive(self):
|
||||
"""Test case-insensitive matching."""
|
||||
assert is_domain_allowed("https://Cloud.Example.COM", ["cloud.example.com"]) is True
|
||||
assert is_domain_allowed("https://cloud.example.com", ["Cloud.Example.COM"]) is True
|
||||
|
||||
# ==================== validate_allowed_domains Tests ====================
|
||||
|
||||
def test_validate_valid_domains(self):
|
||||
"""Test validation of valid domain patterns."""
|
||||
result = validate_allowed_domains(["example.com", "cloud.example.com"])
|
||||
assert result == ["example.com", "cloud.example.com"]
|
||||
|
||||
def test_validate_wildcard_domains(self):
|
||||
"""Test validation of wildcard domain patterns."""
|
||||
result = validate_allowed_domains(["*.example.com", "*.internal.org"])
|
||||
assert result == ["*.example.com", "*.internal.org"]
|
||||
|
||||
def test_validate_with_ports(self):
|
||||
"""Test validation of domains with ports."""
|
||||
result = validate_allowed_domains(["example.com:8080", "cloud.example.com:8443"])
|
||||
assert result == ["example.com:8080", "cloud.example.com:8443"]
|
||||
|
||||
def test_validate_empty_list_raises(self):
|
||||
"""Test that empty list raises ValidationError."""
|
||||
with pytest.raises(ValidationError, match="cannot be empty"):
|
||||
validate_allowed_domains([])
|
||||
|
||||
def test_validate_protocol_raises(self):
|
||||
"""Test that domains with protocol raise ValidationError."""
|
||||
with pytest.raises(ValidationError, match="should not include protocol"):
|
||||
validate_allowed_domains(["https://example.com"])
|
||||
|
||||
def test_validate_invalid_wildcard_raises(self):
|
||||
"""Test that invalid wildcards raise ValidationError."""
|
||||
with pytest.raises(ValidationError, match="Wildcards must be at the start"):
|
||||
validate_allowed_domains(["example.*.com"])
|
||||
|
||||
with pytest.raises(ValidationError, match="Wildcards must be at the start"):
|
||||
validate_allowed_domains(["*"])
|
||||
|
||||
def test_validate_normalizes_case(self):
|
||||
"""Test that validation normalizes to lowercase."""
|
||||
result = validate_allowed_domains(["Example.COM", "CLOUD.Example.com"])
|
||||
assert result == ["example.com", "cloud.example.com"]
|
||||
|
||||
|
||||
class TestPlaywrightExecutor:
|
||||
"""Test suite for PlaywrightExecutor."""
|
||||
|
||||
@pytest.fixture
|
||||
def executor(self):
|
||||
"""Create executor instance with mocked logger."""
|
||||
with patch("app.executors.base.get_logger", return_value=MagicMock()):
|
||||
from app.executors.playwright_executor import PlaywrightExecutor
|
||||
return PlaywrightExecutor()
|
||||
|
||||
@pytest.fixture
|
||||
def mock_settings(self, tmp_path):
|
||||
"""Mock settings with temporary paths."""
|
||||
settings = MagicMock()
|
||||
settings.playwright_artifacts_dir = str(tmp_path / "playwright-artifacts")
|
||||
settings.playwright_default_timeout_ms = 60000
|
||||
settings.playwright_navigation_timeout_ms = 120000
|
||||
return settings
|
||||
|
||||
# ==================== Validation Error Tests ====================
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_missing_scenario_field(self, executor, mock_settings):
|
||||
"""Test that missing scenario field returns error."""
|
||||
with patch("app.executors.playwright_executor.get_settings", return_value=mock_settings):
|
||||
result = await executor.execute({
|
||||
"inputs": {"base_url": "https://example.com"},
|
||||
"options": {"allowed_domains": ["example.com"]}
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "Missing required fields: scenario" in result.error
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_missing_inputs_field(self, executor, mock_settings):
|
||||
"""Test that missing inputs field returns error."""
|
||||
with patch("app.executors.playwright_executor.get_settings", return_value=mock_settings):
|
||||
result = await executor.execute({
|
||||
"scenario": "test_scenario",
|
||||
"options": {"allowed_domains": ["example.com"]}
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "Missing required fields: inputs" in result.error
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_missing_allowed_domains(self, executor, mock_settings):
|
||||
"""Test that missing allowed_domains returns security error."""
|
||||
with patch("app.executors.playwright_executor.get_settings", return_value=mock_settings):
|
||||
result = await executor.execute({
|
||||
"scenario": "test_scenario",
|
||||
"inputs": {"base_url": "https://example.com"},
|
||||
"options": {}
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "allowed_domains" in result.error
|
||||
assert "required" in result.error.lower()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_missing_options_means_no_domains(self, executor, mock_settings):
|
||||
"""Test that missing options dict means no allowed_domains."""
|
||||
with patch("app.executors.playwright_executor.get_settings", return_value=mock_settings):
|
||||
result = await executor.execute({
|
||||
"scenario": "test_scenario",
|
||||
"inputs": {"base_url": "https://example.com"},
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "allowed_domains" in result.error
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_invalid_allowed_domains_format(self, executor, mock_settings):
|
||||
"""Test that invalid domain patterns return error."""
|
||||
with patch("app.executors.playwright_executor.get_settings", return_value=mock_settings):
|
||||
result = await executor.execute({
|
||||
"scenario": "test_scenario",
|
||||
"inputs": {"base_url": "https://example.com"},
|
||||
"options": {"allowed_domains": ["https://example.com"]} # Protocol not allowed
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "Invalid allowed_domains" in result.error
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_unknown_scenario(self, executor, mock_settings):
|
||||
"""Test that unknown scenario returns error with available list."""
|
||||
with patch("app.executors.playwright_executor.get_settings", return_value=mock_settings):
|
||||
result = await executor.execute({
|
||||
"scenario": "nonexistent_scenario",
|
||||
"inputs": {"base_url": "https://example.com"},
|
||||
"options": {"allowed_domains": ["example.com"]}
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "Unknown scenario" in result.error
|
||||
assert "nonexistent_scenario" in result.error
|
||||
assert "available_scenarios" in result.data
|
||||
|
||||
# ==================== Task Type Tests ====================
|
||||
|
||||
def test_task_type_is_playwright(self, executor):
|
||||
"""Test that executor reports correct task type."""
|
||||
assert executor.task_type == "PLAYWRIGHT"
|
||||
|
||||
|
||||
class TestScenarioRegistry:
|
||||
"""Test scenario registration and lookup."""
|
||||
|
||||
def test_register_and_get_scenario(self):
|
||||
"""Test registering and retrieving a scenario."""
|
||||
from app.playwright_scenarios import get_scenario, get_scenario_names, _SCENARIO_REGISTRY
|
||||
from app.playwright_scenarios import register_scenario, BaseScenario, ScenarioResult
|
||||
|
||||
# Clear registry for clean test
|
||||
original_registry = _SCENARIO_REGISTRY.copy()
|
||||
_SCENARIO_REGISTRY.clear()
|
||||
|
||||
try:
|
||||
@register_scenario
|
||||
class TestScenario(BaseScenario):
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return "test_scenario"
|
||||
|
||||
@property
|
||||
def required_inputs(self) -> list[str]:
|
||||
return ["base_url"]
|
||||
|
||||
async def execute(self, page, inputs, options) -> ScenarioResult:
|
||||
return ScenarioResult(success=True, data={})
|
||||
|
||||
# Should find the registered scenario
|
||||
scenario = get_scenario("test_scenario")
|
||||
assert scenario is not None
|
||||
assert scenario.name == "test_scenario"
|
||||
|
||||
# Should be in the list
|
||||
names = get_scenario_names()
|
||||
assert "test_scenario" in names
|
||||
|
||||
finally:
|
||||
# Restore original registry
|
||||
_SCENARIO_REGISTRY.clear()
|
||||
_SCENARIO_REGISTRY.update(original_registry)
|
||||
|
||||
def test_get_unknown_scenario_returns_none(self):
|
||||
"""Test that unknown scenario lookup returns None."""
|
||||
from app.playwright_scenarios import get_scenario
|
||||
|
||||
scenario = get_scenario("definitely_does_not_exist_xyz123")
|
||||
assert scenario is None
|
||||
|
||||
|
||||
class TestScenarioOptions:
|
||||
"""Test ScenarioOptions dataclass."""
|
||||
|
||||
def test_default_values(self):
|
||||
"""Test default option values."""
|
||||
from app.playwright_scenarios import ScenarioOptions
|
||||
|
||||
options = ScenarioOptions()
|
||||
assert options.timeout_ms == 60000
|
||||
assert options.screenshot_on_failure is True
|
||||
assert options.screenshot_on_success is False
|
||||
assert options.save_trace is False
|
||||
assert options.allowed_domains == []
|
||||
assert options.artifacts_dir is None
|
||||
|
||||
def test_custom_values(self):
|
||||
"""Test custom option values."""
|
||||
from app.playwright_scenarios import ScenarioOptions
|
||||
|
||||
options = ScenarioOptions(
|
||||
timeout_ms=30000,
|
||||
screenshot_on_failure=False,
|
||||
screenshot_on_success=True,
|
||||
save_trace=True,
|
||||
allowed_domains=["example.com"],
|
||||
artifacts_dir=Path("/tmp/artifacts"),
|
||||
)
|
||||
assert options.timeout_ms == 30000
|
||||
assert options.screenshot_on_failure is False
|
||||
assert options.screenshot_on_success is True
|
||||
assert options.save_trace is True
|
||||
assert options.allowed_domains == ["example.com"]
|
||||
assert options.artifacts_dir == Path("/tmp/artifacts")
|
||||
|
||||
def test_string_artifacts_dir_converted(self):
|
||||
"""Test that string artifacts_dir is converted to Path."""
|
||||
from app.playwright_scenarios import ScenarioOptions
|
||||
|
||||
options = ScenarioOptions(artifacts_dir="/tmp/artifacts")
|
||||
assert isinstance(options.artifacts_dir, Path)
|
||||
# Path separators differ by OS, just check it's a valid Path
|
||||
assert options.artifacts_dir == Path("/tmp/artifacts")
|
||||
|
||||
|
||||
class TestScenarioResult:
|
||||
"""Test ScenarioResult dataclass."""
|
||||
|
||||
def test_success_result(self):
|
||||
"""Test successful result creation."""
|
||||
from app.playwright_scenarios import ScenarioResult
|
||||
|
||||
result = ScenarioResult(
|
||||
success=True,
|
||||
data={"setup": "complete"},
|
||||
screenshots=["/tmp/success.png"],
|
||||
)
|
||||
assert result.success is True
|
||||
assert result.data == {"setup": "complete"}
|
||||
assert result.screenshots == ["/tmp/success.png"]
|
||||
assert result.error is None
|
||||
|
||||
def test_failure_result(self):
|
||||
"""Test failure result creation."""
|
||||
from app.playwright_scenarios import ScenarioResult
|
||||
|
||||
result = ScenarioResult(
|
||||
success=False,
|
||||
data={},
|
||||
error="Element not found",
|
||||
)
|
||||
assert result.success is False
|
||||
assert result.error == "Element not found"
|
||||
|
||||
|
||||
class TestBaseScenario:
|
||||
"""Test BaseScenario ABC."""
|
||||
|
||||
def test_validate_inputs_missing(self):
|
||||
"""Test input validation returns missing keys."""
|
||||
from app.playwright_scenarios import BaseScenario, ScenarioResult
|
||||
|
||||
class TestScenario(BaseScenario):
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return "test"
|
||||
|
||||
@property
|
||||
def required_inputs(self) -> list[str]:
|
||||
return ["base_url", "username", "password"]
|
||||
|
||||
async def execute(self, page, inputs, options) -> ScenarioResult:
|
||||
return ScenarioResult(success=True, data={})
|
||||
|
||||
scenario = TestScenario()
|
||||
|
||||
# Missing all inputs
|
||||
missing = scenario.validate_inputs({})
|
||||
assert "base_url" in missing
|
||||
assert "username" in missing
|
||||
assert "password" in missing
|
||||
|
||||
# Missing some inputs
|
||||
missing = scenario.validate_inputs({"base_url": "https://example.com"})
|
||||
assert "base_url" not in missing
|
||||
assert "username" in missing
|
||||
assert "password" in missing
|
||||
|
||||
# All inputs present
|
||||
missing = scenario.validate_inputs({
|
||||
"base_url": "https://example.com",
|
||||
"username": "admin",
|
||||
"password": "secret",
|
||||
})
|
||||
assert missing == []
|
||||
|
||||
def test_default_optional_inputs(self):
|
||||
"""Test default optional inputs is empty."""
|
||||
from app.playwright_scenarios import BaseScenario, ScenarioResult
|
||||
|
||||
class TestScenario(BaseScenario):
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return "test"
|
||||
|
||||
@property
|
||||
def required_inputs(self) -> list[str]:
|
||||
return ["base_url"]
|
||||
|
||||
async def execute(self, page, inputs, options) -> ScenarioResult:
|
||||
return ScenarioResult(success=True, data={})
|
||||
|
||||
scenario = TestScenario()
|
||||
assert scenario.optional_inputs == []
|
||||
|
||||
def test_default_description(self):
|
||||
"""Test default description uses name."""
|
||||
from app.playwright_scenarios import BaseScenario, ScenarioResult
|
||||
|
||||
class TestScenario(BaseScenario):
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return "my_test_scenario"
|
||||
|
||||
@property
|
||||
def required_inputs(self) -> list[str]:
|
||||
return []
|
||||
|
||||
async def execute(self, page, inputs, options) -> ScenarioResult:
|
||||
return ScenarioResult(success=True, data={})
|
||||
|
||||
scenario = TestScenario()
|
||||
assert "my_test_scenario" in scenario.description
|
||||
|
||||
|
||||
# Skip browser tests by default
|
||||
SKIP_BROWSER_TESTS = os.environ.get("SKIP_BROWSER_TESTS", "true").lower() == "true"
|
||||
|
||||
|
||||
@pytest.mark.skipif(SKIP_BROWSER_TESTS, reason="Browser tests skipped (set SKIP_BROWSER_TESTS=false to run)")
|
||||
class TestPlaywrightExecutorIntegration:
|
||||
"""Integration tests that require a real browser.
|
||||
|
||||
These tests are skipped by default. Set SKIP_BROWSER_TESTS=false to run.
|
||||
"""
|
||||
|
||||
@pytest.fixture
|
||||
def executor(self):
|
||||
"""Create executor instance."""
|
||||
with patch("app.executors.base.get_logger", return_value=MagicMock()):
|
||||
from app.executors.playwright_executor import PlaywrightExecutor
|
||||
return PlaywrightExecutor()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_domain_blocking_in_browser(self, executor, tmp_path):
|
||||
"""Test that blocked domains are actually blocked in browser."""
|
||||
# This would require a mock HTTP server and real browser
|
||||
# Implementation deferred to manual testing
|
||||
pass
|
||||
81
letsbe-sysadmin-agent/tests/integration_docker_test.py
Normal file
81
letsbe-sysadmin-agent/tests/integration_docker_test.py
Normal file
@@ -0,0 +1,81 @@
|
||||
"""Integration test for DockerExecutor with real Docker."""
|
||||
|
||||
import asyncio
|
||||
import sys
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
# Add parent directory to path for imports
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||||
|
||||
|
||||
def main():
|
||||
# Create a real temp directory structure
|
||||
with tempfile.TemporaryDirectory() as tmp:
|
||||
stacks_root = Path(tmp) / "stacks"
|
||||
stack_dir = stacks_root / "test-app"
|
||||
stack_dir.mkdir(parents=True)
|
||||
|
||||
# Create a minimal compose file
|
||||
compose_content = """services:
|
||||
test:
|
||||
image: alpine:latest
|
||||
command: echo 'Hello from integration test'
|
||||
"""
|
||||
compose_file = stack_dir / "docker-compose.yml"
|
||||
compose_file.write_text(compose_content)
|
||||
|
||||
print(f"Created stack at: {stack_dir}")
|
||||
print(f"Compose file: {compose_file}")
|
||||
|
||||
# Import executor with mocked logger
|
||||
with patch("app.executors.base.get_logger", return_value=MagicMock()):
|
||||
from app.executors.docker_executor import DockerExecutor
|
||||
executor = DockerExecutor()
|
||||
|
||||
# Mock settings to use our temp directory
|
||||
mock_settings = MagicMock()
|
||||
mock_settings.allowed_stacks_root = str(stacks_root)
|
||||
|
||||
async def run_test():
|
||||
with patch("app.executors.docker_executor.get_settings", return_value=mock_settings):
|
||||
# Test 1: Without pull
|
||||
print("\n=== Test 1: pull=False ===")
|
||||
result = await executor.execute({
|
||||
"compose_dir": str(stack_dir),
|
||||
"pull": False,
|
||||
"timeout": 60,
|
||||
})
|
||||
print(f"Success: {result.success}")
|
||||
print(f"compose_file: {result.data.get('compose_file')}")
|
||||
print(f"pull_ran: {result.data.get('pull_ran')}")
|
||||
if result.error:
|
||||
print(f"Error: {result.error}")
|
||||
up_logs = result.data.get("logs", {}).get("up", "")
|
||||
print(f"Logs (up): {up_logs[:300] if up_logs else 'empty'}")
|
||||
|
||||
# Test 2: With pull
|
||||
print("\n=== Test 2: pull=True ===")
|
||||
result2 = await executor.execute({
|
||||
"compose_dir": str(stack_dir),
|
||||
"pull": True,
|
||||
"timeout": 60,
|
||||
})
|
||||
print(f"Success: {result2.success}")
|
||||
print(f"pull_ran: {result2.data.get('pull_ran')}")
|
||||
pull_logs = result2.data.get("logs", {}).get("pull", "")
|
||||
print(f"Logs (pull): {pull_logs[:300] if pull_logs else 'empty'}")
|
||||
|
||||
return result.success and result2.success
|
||||
|
||||
success = asyncio.run(run_test())
|
||||
print(f"\n{'=' * 50}")
|
||||
print(f"INTEGRATION TEST: {'PASSED' if success else 'FAILED'}")
|
||||
print(f"{'=' * 50}")
|
||||
return success
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
success = main()
|
||||
sys.exit(0 if success else 1)
|
||||
Reference in New Issue
Block a user