Include full contents of all nested repositories
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
69
letsbe-sysadmin-agent/app/executors/__init__.py
Normal file
69
letsbe-sysadmin-agent/app/executors/__init__.py
Normal file
@@ -0,0 +1,69 @@
|
||||
"""Task executors registry."""
|
||||
|
||||
from typing import Type
|
||||
|
||||
from app.executors.base import BaseExecutor, ExecutionResult
|
||||
from app.executors.composite_executor import CompositeExecutor
|
||||
from app.executors.docker_executor import DockerExecutor
|
||||
from app.executors.echo_executor import EchoExecutor
|
||||
from app.executors.env_inspect_executor import EnvInspectExecutor
|
||||
from app.executors.env_update_executor import EnvUpdateExecutor
|
||||
from app.executors.file_executor import FileExecutor
|
||||
from app.executors.file_inspect_executor import FileInspectExecutor
|
||||
from app.executors.nextcloud_executor import NextcloudSetDomainExecutor
|
||||
from app.executors.playwright_executor import PlaywrightExecutor
|
||||
from app.executors.shell_executor import ShellExecutor
|
||||
|
||||
# Registry mapping task types to executor classes
|
||||
EXECUTOR_REGISTRY: dict[str, Type[BaseExecutor]] = {
|
||||
"ECHO": EchoExecutor,
|
||||
"SHELL": ShellExecutor,
|
||||
"FILE_WRITE": FileExecutor,
|
||||
"ENV_UPDATE": EnvUpdateExecutor,
|
||||
"ENV_INSPECT": EnvInspectExecutor,
|
||||
"FILE_INSPECT": FileInspectExecutor,
|
||||
"DOCKER_RELOAD": DockerExecutor,
|
||||
"COMPOSITE": CompositeExecutor,
|
||||
"PLAYWRIGHT": PlaywrightExecutor,
|
||||
"NEXTCLOUD_SET_DOMAIN": NextcloudSetDomainExecutor,
|
||||
}
|
||||
|
||||
|
||||
def get_executor(task_type: str) -> BaseExecutor:
|
||||
"""Get an executor instance for a task type.
|
||||
|
||||
Args:
|
||||
task_type: The type of task to execute
|
||||
|
||||
Returns:
|
||||
Executor instance
|
||||
|
||||
Raises:
|
||||
ValueError: If task type is not registered
|
||||
"""
|
||||
if task_type not in EXECUTOR_REGISTRY:
|
||||
raise ValueError(
|
||||
f"Unknown task type: {task_type}. "
|
||||
f"Available: {list(EXECUTOR_REGISTRY.keys())}"
|
||||
)
|
||||
|
||||
executor_class = EXECUTOR_REGISTRY[task_type]
|
||||
return executor_class()
|
||||
|
||||
|
||||
__all__ = [
|
||||
"BaseExecutor",
|
||||
"ExecutionResult",
|
||||
"EchoExecutor",
|
||||
"ShellExecutor",
|
||||
"FileExecutor",
|
||||
"FileInspectExecutor",
|
||||
"EnvUpdateExecutor",
|
||||
"EnvInspectExecutor",
|
||||
"DockerExecutor",
|
||||
"CompositeExecutor",
|
||||
"PlaywrightExecutor",
|
||||
"NextcloudSetDomainExecutor",
|
||||
"EXECUTOR_REGISTRY",
|
||||
"get_executor",
|
||||
]
|
||||
59
letsbe-sysadmin-agent/app/executors/base.py
Normal file
59
letsbe-sysadmin-agent/app/executors/base.py
Normal file
@@ -0,0 +1,59 @@
|
||||
"""Base executor class for all task types."""
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Optional
|
||||
|
||||
from app.utils.logger import get_logger
|
||||
|
||||
|
||||
@dataclass
|
||||
class ExecutionResult:
|
||||
"""Result of task execution."""
|
||||
|
||||
success: bool
|
||||
data: dict[str, Any]
|
||||
error: Optional[str] = None
|
||||
duration_ms: Optional[float] = None
|
||||
|
||||
|
||||
class BaseExecutor(ABC):
|
||||
"""Abstract base class for task executors.
|
||||
|
||||
All executors must implement the execute() method.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.logger = get_logger(self.__class__.__name__)
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def task_type(self) -> str:
|
||||
"""Return the task type this executor handles."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def execute(self, payload: dict[str, Any]) -> ExecutionResult:
|
||||
"""Execute the task with the given payload.
|
||||
|
||||
Args:
|
||||
payload: Task-specific payload data
|
||||
|
||||
Returns:
|
||||
ExecutionResult with success status and result data
|
||||
"""
|
||||
pass
|
||||
|
||||
def validate_payload(self, payload: dict[str, Any], required_fields: list[str]) -> None:
|
||||
"""Validate that required fields are present in payload.
|
||||
|
||||
Args:
|
||||
payload: Task payload
|
||||
required_fields: List of required field names
|
||||
|
||||
Raises:
|
||||
ValueError: If a required field is missing
|
||||
"""
|
||||
missing = [f for f in required_fields if f not in payload]
|
||||
if missing:
|
||||
raise ValueError(f"Missing required fields: {', '.join(missing)}")
|
||||
207
letsbe-sysadmin-agent/app/executors/composite_executor.py
Normal file
207
letsbe-sysadmin-agent/app/executors/composite_executor.py
Normal file
@@ -0,0 +1,207 @@
|
||||
"""Composite executor for sequential task execution."""
|
||||
|
||||
import time
|
||||
from typing import Any
|
||||
|
||||
from app.executors.base import BaseExecutor, ExecutionResult
|
||||
|
||||
|
||||
class CompositeExecutor(BaseExecutor):
|
||||
"""Execute a sequence of tasks in order.
|
||||
|
||||
Executes each task in the sequence using the appropriate executor.
|
||||
Stops on first failure and returns partial results.
|
||||
|
||||
Security measures:
|
||||
- Each sub-task uses the same validated executors
|
||||
- Sequential execution only (no parallelism)
|
||||
- Stops immediately on first failure
|
||||
|
||||
Payload:
|
||||
{
|
||||
"steps": [
|
||||
{"type": "ENV_UPDATE", "payload": {...}},
|
||||
{"type": "DOCKER_RELOAD", "payload": {...}}
|
||||
]
|
||||
}
|
||||
|
||||
Result (success):
|
||||
{
|
||||
"steps": [
|
||||
{"index": 0, "type": "ENV_UPDATE", "status": "completed", "result": {...}},
|
||||
{"index": 1, "type": "DOCKER_RELOAD", "status": "completed", "result": {...}}
|
||||
]
|
||||
}
|
||||
|
||||
Result (failure at step 1):
|
||||
ExecutionResult.success = False
|
||||
ExecutionResult.error = "Step 1 (DOCKER_RELOAD) failed: <error message>"
|
||||
ExecutionResult.data = {
|
||||
"steps": [
|
||||
{"index": 0, "type": "ENV_UPDATE", "status": "completed", "result": {...}},
|
||||
{"index": 1, "type": "DOCKER_RELOAD", "status": "failed", "error": "..."}
|
||||
]
|
||||
}
|
||||
"""
|
||||
|
||||
@property
|
||||
def task_type(self) -> str:
|
||||
return "COMPOSITE"
|
||||
|
||||
async def execute(self, payload: dict[str, Any]) -> ExecutionResult:
|
||||
"""Execute a sequence of tasks.
|
||||
|
||||
Args:
|
||||
payload: Must contain "steps" list of step definitions
|
||||
|
||||
Returns:
|
||||
ExecutionResult with execution summary
|
||||
"""
|
||||
self.validate_payload(payload, ["steps"])
|
||||
|
||||
steps = payload["steps"]
|
||||
|
||||
# Validate steps is a non-empty list
|
||||
if not isinstance(steps, list):
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={"steps": []},
|
||||
error="'steps' must be a list of step definitions",
|
||||
)
|
||||
|
||||
if not steps:
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={"steps": []},
|
||||
error="'steps' cannot be empty",
|
||||
)
|
||||
|
||||
# Import registry here to avoid circular imports
|
||||
from app.executors import get_executor
|
||||
|
||||
self.logger.info(
|
||||
"composite_starting",
|
||||
total_steps=len(steps),
|
||||
step_types=[step.get("type", "UNKNOWN") if isinstance(step, dict) else "INVALID" for step in steps],
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
results: list[dict[str, Any]] = []
|
||||
|
||||
for i, step in enumerate(steps):
|
||||
# Validate step structure
|
||||
if not isinstance(step, dict):
|
||||
self.logger.error("composite_invalid_step", step_index=i)
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={"steps": results},
|
||||
error=f"Step {i} is not a valid step definition (must be dict)",
|
||||
)
|
||||
|
||||
step_type = step.get("type")
|
||||
step_payload = step.get("payload", {})
|
||||
|
||||
if not step_type:
|
||||
self.logger.error("composite_missing_type", step_index=i)
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={"steps": results},
|
||||
error=f"Step {i} missing 'type' field",
|
||||
)
|
||||
|
||||
self.logger.info(
|
||||
"composite_step_starting",
|
||||
step_index=i,
|
||||
step_type=step_type,
|
||||
)
|
||||
|
||||
# Get executor for this step type
|
||||
try:
|
||||
executor = get_executor(step_type)
|
||||
except ValueError as e:
|
||||
self.logger.error(
|
||||
"composite_unknown_type",
|
||||
step_index=i,
|
||||
step_type=step_type,
|
||||
error=str(e),
|
||||
)
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={"steps": results},
|
||||
error=f"Step {i} ({step_type}) failed: {e}",
|
||||
)
|
||||
|
||||
# Execute the step
|
||||
try:
|
||||
result = await executor.execute(step_payload)
|
||||
|
||||
step_result: dict[str, Any] = {
|
||||
"index": i,
|
||||
"type": step_type,
|
||||
"status": "completed" if result.success else "failed",
|
||||
"result": result.data,
|
||||
}
|
||||
if result.error:
|
||||
step_result["error"] = result.error
|
||||
|
||||
results.append(step_result)
|
||||
|
||||
self.logger.info(
|
||||
"composite_step_completed",
|
||||
step_index=i,
|
||||
step_type=step_type,
|
||||
success=result.success,
|
||||
)
|
||||
|
||||
# Stop on first failure
|
||||
if not result.success:
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
self.logger.warning(
|
||||
"composite_step_failed",
|
||||
step_index=i,
|
||||
step_type=step_type,
|
||||
error=result.error,
|
||||
)
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={"steps": results},
|
||||
error=f"Step {i} ({step_type}) failed: {result.error}",
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
self.logger.error(
|
||||
"composite_step_exception",
|
||||
step_index=i,
|
||||
step_type=step_type,
|
||||
error=str(e),
|
||||
)
|
||||
# Add failed step to results
|
||||
results.append({
|
||||
"index": i,
|
||||
"type": step_type,
|
||||
"status": "failed",
|
||||
"error": str(e),
|
||||
})
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={"steps": results},
|
||||
error=f"Step {i} ({step_type}) failed: {e}",
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
# All steps completed successfully
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
|
||||
self.logger.info(
|
||||
"composite_completed",
|
||||
steps_completed=len(results),
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
return ExecutionResult(
|
||||
success=True,
|
||||
data={"steps": results},
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
290
letsbe-sysadmin-agent/app/executors/docker_executor.py
Normal file
290
letsbe-sysadmin-agent/app/executors/docker_executor.py
Normal file
@@ -0,0 +1,290 @@
|
||||
"""Docker Compose executor for container management."""
|
||||
|
||||
import asyncio
|
||||
import subprocess
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from app.config import get_settings
|
||||
from app.executors.base import BaseExecutor, ExecutionResult
|
||||
from app.utils.validation import ValidationError, validate_file_path
|
||||
|
||||
|
||||
class DockerExecutor(BaseExecutor):
|
||||
"""Execute Docker Compose operations with security controls.
|
||||
|
||||
Security measures:
|
||||
- Directory validation against allowed stacks root
|
||||
- Compose file existence verification
|
||||
- Path traversal prevention
|
||||
- Timeout enforcement on each subprocess
|
||||
- No shell=True, command list only
|
||||
|
||||
Payload:
|
||||
{
|
||||
"compose_dir": "/opt/letsbe/stacks/myapp",
|
||||
"pull": true # Optional, defaults to false
|
||||
}
|
||||
|
||||
Result:
|
||||
{
|
||||
"compose_dir": "/opt/letsbe/stacks/myapp",
|
||||
"compose_file": "/opt/letsbe/stacks/myapp/docker-compose.yml",
|
||||
"pull_ran": true,
|
||||
"logs": {
|
||||
"pull": "<stdout+stderr>",
|
||||
"up": "<stdout+stderr>"
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
# Compose file search order
|
||||
COMPOSE_FILE_NAMES = ["docker-compose.yml", "compose.yml"]
|
||||
|
||||
# Default timeout for each docker command (seconds)
|
||||
DEFAULT_COMMAND_TIMEOUT = 300
|
||||
|
||||
@property
|
||||
def task_type(self) -> str:
|
||||
return "DOCKER_RELOAD"
|
||||
|
||||
async def execute(self, payload: dict[str, Any]) -> ExecutionResult:
|
||||
"""Execute Docker Compose pull (optional) and up -d --remove-orphans.
|
||||
|
||||
Args:
|
||||
payload: Must contain "compose_dir", optionally "pull" (bool) and "timeout"
|
||||
|
||||
Returns:
|
||||
ExecutionResult with reload confirmation and logs
|
||||
"""
|
||||
self.validate_payload(payload, ["compose_dir"])
|
||||
settings = get_settings()
|
||||
|
||||
compose_dir = payload["compose_dir"]
|
||||
pull = payload.get("pull", False)
|
||||
timeout = payload.get("timeout", self.DEFAULT_COMMAND_TIMEOUT)
|
||||
|
||||
# Validate compose directory is under allowed stacks root
|
||||
try:
|
||||
validated_dir = validate_file_path(
|
||||
compose_dir,
|
||||
settings.allowed_stacks_root,
|
||||
must_exist=True,
|
||||
)
|
||||
except ValidationError as e:
|
||||
self.logger.warning("docker_dir_validation_failed", path=compose_dir, error=str(e))
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={},
|
||||
error=f"Directory validation failed: {e}",
|
||||
)
|
||||
|
||||
# Verify it's actually a directory
|
||||
if not validated_dir.is_dir():
|
||||
self.logger.warning("docker_not_directory", path=compose_dir)
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={},
|
||||
error=f"Path is not a directory: {compose_dir}",
|
||||
)
|
||||
|
||||
# Find compose file in order of preference
|
||||
compose_file = self._find_compose_file(validated_dir)
|
||||
if compose_file is None:
|
||||
self.logger.warning("docker_compose_not_found", dir=compose_dir)
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={},
|
||||
error=f"No compose file found in {compose_dir}. "
|
||||
f"Looked for: {', '.join(self.COMPOSE_FILE_NAMES)}",
|
||||
)
|
||||
|
||||
self.logger.info(
|
||||
"docker_reloading",
|
||||
compose_dir=str(validated_dir),
|
||||
compose_file=str(compose_file),
|
||||
pull=pull,
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
logs: dict[str, str] = {}
|
||||
pull_ran = False
|
||||
|
||||
try:
|
||||
# Run pull if requested
|
||||
if pull:
|
||||
pull_ran = True
|
||||
exit_code, stdout, stderr = await self._run_compose_command(
|
||||
compose_file,
|
||||
validated_dir,
|
||||
["pull"],
|
||||
timeout,
|
||||
)
|
||||
logs["pull"] = self._combine_output(stdout, stderr)
|
||||
|
||||
if exit_code != 0:
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
self.logger.warning(
|
||||
"docker_pull_failed",
|
||||
compose_dir=str(validated_dir),
|
||||
exit_code=exit_code,
|
||||
stderr=stderr[:500] if stderr else None,
|
||||
)
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={
|
||||
"compose_dir": str(validated_dir),
|
||||
"compose_file": str(compose_file),
|
||||
"pull_ran": pull_ran,
|
||||
"logs": logs,
|
||||
},
|
||||
error=f"Docker pull failed with exit code {exit_code}",
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
# Run up -d --remove-orphans
|
||||
exit_code, stdout, stderr = await self._run_compose_command(
|
||||
compose_file,
|
||||
validated_dir,
|
||||
["up", "-d", "--remove-orphans"],
|
||||
timeout,
|
||||
)
|
||||
logs["up"] = self._combine_output(stdout, stderr)
|
||||
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
success = exit_code == 0
|
||||
|
||||
if success:
|
||||
self.logger.info(
|
||||
"docker_reloaded",
|
||||
compose_dir=str(validated_dir),
|
||||
exit_code=exit_code,
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
else:
|
||||
self.logger.warning(
|
||||
"docker_reload_failed",
|
||||
compose_dir=str(validated_dir),
|
||||
exit_code=exit_code,
|
||||
stderr=stderr[:500] if stderr else None,
|
||||
)
|
||||
|
||||
return ExecutionResult(
|
||||
success=success,
|
||||
data={
|
||||
"compose_dir": str(validated_dir),
|
||||
"compose_file": str(compose_file),
|
||||
"pull_ran": pull_ran,
|
||||
"logs": logs,
|
||||
},
|
||||
error=f"Docker up failed with exit code {exit_code}" if not success else None,
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
self.logger.error("docker_timeout", compose_dir=str(validated_dir), timeout=timeout)
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={
|
||||
"compose_dir": str(validated_dir),
|
||||
"compose_file": str(compose_file),
|
||||
"pull_ran": pull_ran,
|
||||
"logs": logs,
|
||||
},
|
||||
error=f"Docker operation timed out after {timeout} seconds",
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
self.logger.error("docker_error", compose_dir=str(validated_dir), error=str(e))
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={
|
||||
"compose_dir": str(validated_dir),
|
||||
"compose_file": str(compose_file),
|
||||
"pull_ran": pull_ran,
|
||||
"logs": logs,
|
||||
},
|
||||
error=str(e),
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
def _find_compose_file(self, compose_dir: Path) -> Path | None:
|
||||
"""Find compose file in the directory.
|
||||
|
||||
Searches in order: docker-compose.yml, compose.yml
|
||||
|
||||
Args:
|
||||
compose_dir: Directory to search in
|
||||
|
||||
Returns:
|
||||
Path to compose file, or None if not found
|
||||
"""
|
||||
for filename in self.COMPOSE_FILE_NAMES:
|
||||
compose_file = compose_dir / filename
|
||||
if compose_file.exists():
|
||||
return compose_file
|
||||
return None
|
||||
|
||||
def _combine_output(self, stdout: str, stderr: str) -> str:
|
||||
"""Combine stdout and stderr into a single string.
|
||||
|
||||
Args:
|
||||
stdout: Standard output
|
||||
stderr: Standard error
|
||||
|
||||
Returns:
|
||||
Combined output string
|
||||
"""
|
||||
parts = []
|
||||
if stdout:
|
||||
parts.append(stdout)
|
||||
if stderr:
|
||||
parts.append(stderr)
|
||||
return "\n".join(parts)
|
||||
|
||||
async def _run_compose_command(
|
||||
self,
|
||||
compose_file: Path,
|
||||
compose_dir: Path,
|
||||
args: list[str],
|
||||
timeout: int,
|
||||
) -> tuple[int, str, str]:
|
||||
"""Run a docker compose command.
|
||||
|
||||
Args:
|
||||
compose_file: Path to compose file
|
||||
compose_dir: Working directory
|
||||
args: Additional arguments after 'docker compose -f <file>'
|
||||
timeout: Operation timeout in seconds
|
||||
|
||||
Returns:
|
||||
Tuple of (exit_code, stdout, stderr)
|
||||
"""
|
||||
def _run() -> tuple[int, str, str]:
|
||||
# Build command: docker compose -f <file> <args>
|
||||
cmd = [
|
||||
"docker",
|
||||
"compose",
|
||||
"-f",
|
||||
str(compose_file),
|
||||
] + args
|
||||
|
||||
# Run command from compose directory, no shell=True
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=timeout,
|
||||
cwd=str(compose_dir),
|
||||
)
|
||||
|
||||
return result.returncode, result.stdout, result.stderr
|
||||
|
||||
return await asyncio.wait_for(
|
||||
asyncio.to_thread(_run),
|
||||
timeout=timeout + 30, # Watchdog with buffer
|
||||
)
|
||||
45
letsbe-sysadmin-agent/app/executors/echo_executor.py
Normal file
45
letsbe-sysadmin-agent/app/executors/echo_executor.py
Normal file
@@ -0,0 +1,45 @@
|
||||
"""Echo executor for testing and debugging."""
|
||||
|
||||
from typing import Any
|
||||
|
||||
from app.executors.base import BaseExecutor, ExecutionResult
|
||||
|
||||
|
||||
class EchoExecutor(BaseExecutor):
|
||||
"""Simple echo executor that returns the payload as-is.
|
||||
|
||||
Used for testing connectivity and task flow.
|
||||
|
||||
Payload:
|
||||
{
|
||||
"message": "string to echo back"
|
||||
}
|
||||
|
||||
Result:
|
||||
{
|
||||
"echoed": "string that was sent"
|
||||
}
|
||||
"""
|
||||
|
||||
@property
|
||||
def task_type(self) -> str:
|
||||
return "ECHO"
|
||||
|
||||
async def execute(self, payload: dict[str, Any]) -> ExecutionResult:
|
||||
"""Echo back the payload message.
|
||||
|
||||
Args:
|
||||
payload: Must contain "message" field
|
||||
|
||||
Returns:
|
||||
ExecutionResult with the echoed message
|
||||
"""
|
||||
self.validate_payload(payload, ["message"])
|
||||
|
||||
message = payload["message"]
|
||||
self.logger.info("echo_executing", message=message)
|
||||
|
||||
return ExecutionResult(
|
||||
success=True,
|
||||
data={"echoed": message},
|
||||
)
|
||||
161
letsbe-sysadmin-agent/app/executors/env_inspect_executor.py
Normal file
161
letsbe-sysadmin-agent/app/executors/env_inspect_executor.py
Normal file
@@ -0,0 +1,161 @@
|
||||
"""ENV file inspection executor for reading current values."""
|
||||
|
||||
import time
|
||||
from typing import Any
|
||||
|
||||
from app.config import get_settings
|
||||
from app.executors.base import BaseExecutor, ExecutionResult
|
||||
from app.utils.validation import ValidationError, validate_file_path
|
||||
|
||||
|
||||
class EnvInspectExecutor(BaseExecutor):
|
||||
"""Read ENV files to inspect current values.
|
||||
|
||||
Security measures:
|
||||
- Path validation against allowed env root (/opt/letsbe/env)
|
||||
- Directory traversal prevention
|
||||
- File must exist (no blind path probing)
|
||||
- Read-only operation (no file modification)
|
||||
|
||||
Payload:
|
||||
{
|
||||
"path": "/opt/letsbe/env/chatwoot.env",
|
||||
"keys": ["FRONTEND_URL", "BACKEND_URL"] # optional, null returns all
|
||||
}
|
||||
|
||||
Result (success):
|
||||
{
|
||||
"path": "/opt/letsbe/env/chatwoot.env",
|
||||
"keys": {
|
||||
"FRONTEND_URL": "https://...",
|
||||
"BACKEND_URL": "https://..."
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
@property
|
||||
def task_type(self) -> str:
|
||||
return "ENV_INSPECT"
|
||||
|
||||
async def execute(self, payload: dict[str, Any]) -> ExecutionResult:
|
||||
"""Read ENV file and return current key-value pairs.
|
||||
|
||||
Args:
|
||||
payload: Must contain "path", optionally "keys" to filter
|
||||
|
||||
Returns:
|
||||
ExecutionResult with dict of key-value pairs
|
||||
"""
|
||||
# Path is always required
|
||||
if "path" not in payload:
|
||||
raise ValueError("Missing required field: path")
|
||||
|
||||
settings = get_settings()
|
||||
|
||||
file_path = payload["path"]
|
||||
requested_keys = payload.get("keys")
|
||||
|
||||
# Validate keys is a list if provided
|
||||
if requested_keys is not None and not isinstance(requested_keys, list):
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={},
|
||||
error="'keys' must be a list of key names or null",
|
||||
)
|
||||
|
||||
# Validate path is under allowed env root
|
||||
try:
|
||||
validated_path = validate_file_path(
|
||||
file_path,
|
||||
settings.allowed_env_root,
|
||||
must_exist=True, # File MUST exist for inspect
|
||||
)
|
||||
except ValidationError as e:
|
||||
self.logger.warning("env_path_validation_failed", path=file_path, error=str(e))
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={},
|
||||
error=f"Path validation failed: {e}",
|
||||
)
|
||||
|
||||
self.logger.info(
|
||||
"env_inspecting",
|
||||
path=str(validated_path),
|
||||
filter_keys=requested_keys,
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
|
||||
try:
|
||||
# Read and parse the ENV file
|
||||
content = validated_path.read_text(encoding="utf-8")
|
||||
all_keys = self._parse_env_file(content)
|
||||
|
||||
# Filter keys if requested
|
||||
if requested_keys is None:
|
||||
result_keys = all_keys
|
||||
else:
|
||||
# Return only requested keys that exist (ignore unknown)
|
||||
result_keys = {k: v for k, v in all_keys.items() if k in requested_keys}
|
||||
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
|
||||
self.logger.info(
|
||||
"env_inspected",
|
||||
path=str(validated_path),
|
||||
keys_returned=len(result_keys),
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
return ExecutionResult(
|
||||
success=True,
|
||||
data={
|
||||
"path": str(validated_path),
|
||||
"keys": result_keys,
|
||||
},
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
self.logger.error("env_inspect_error", path=str(validated_path), error=str(e))
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={},
|
||||
error=str(e),
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
def _parse_env_file(self, content: str) -> dict[str, str]:
|
||||
"""Parse ENV file content into key-value dict.
|
||||
|
||||
Handles:
|
||||
- KEY=value format
|
||||
- Lines starting with # (comments)
|
||||
- Empty lines
|
||||
- Whitespace trimming
|
||||
- Quoted values (single and double quotes)
|
||||
|
||||
Args:
|
||||
content: Raw ENV file content
|
||||
|
||||
Returns:
|
||||
Dict of key-value pairs
|
||||
"""
|
||||
env_dict = {}
|
||||
for line in content.splitlines():
|
||||
line = line.strip()
|
||||
# Skip empty lines and comments
|
||||
if not line or line.startswith("#"):
|
||||
continue
|
||||
# Split on first = only
|
||||
if "=" in line:
|
||||
key, value = line.split("=", 1)
|
||||
key = key.strip()
|
||||
value = value.strip()
|
||||
# Remove surrounding quotes if present
|
||||
if (value.startswith('"') and value.endswith('"')) or \
|
||||
(value.startswith("'") and value.endswith("'")):
|
||||
value = value[1:-1]
|
||||
env_dict[key] = value
|
||||
return env_dict
|
||||
285
letsbe-sysadmin-agent/app/executors/env_update_executor.py
Normal file
285
letsbe-sysadmin-agent/app/executors/env_update_executor.py
Normal file
@@ -0,0 +1,285 @@
|
||||
"""ENV file update executor with atomic writes and key validation."""
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
import stat
|
||||
import tempfile
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from app.config import get_settings
|
||||
from app.executors.base import BaseExecutor, ExecutionResult
|
||||
from app.utils.validation import ValidationError, validate_env_key, validate_file_path
|
||||
|
||||
|
||||
class EnvUpdateExecutor(BaseExecutor):
|
||||
"""Update ENV files with key-value merging and removal.
|
||||
|
||||
Security measures:
|
||||
- Path validation against allowed env root (/opt/letsbe/env)
|
||||
- ENV key format validation (^[A-Z][A-Z0-9_]*$)
|
||||
- Atomic writes (temp file + fsync + rename)
|
||||
- Secure permissions (chmod 640)
|
||||
- Directory traversal prevention
|
||||
|
||||
Payload:
|
||||
{
|
||||
"path": "/opt/letsbe/env/chatwoot.env",
|
||||
"updates": {
|
||||
"DATABASE_URL": "postgres://localhost/mydb",
|
||||
"API_KEY": "secret123"
|
||||
},
|
||||
"remove_keys": ["OLD_KEY", "DEPRECATED_VAR"] # optional
|
||||
}
|
||||
|
||||
Result:
|
||||
{
|
||||
"updated_keys": ["DATABASE_URL", "API_KEY"],
|
||||
"removed_keys": ["OLD_KEY"],
|
||||
"path": "/opt/letsbe/env/chatwoot.env"
|
||||
}
|
||||
"""
|
||||
|
||||
# Secure file permissions: owner rw, group r, others none (640)
|
||||
FILE_MODE = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP # 0o640
|
||||
|
||||
@property
|
||||
def task_type(self) -> str:
|
||||
return "ENV_UPDATE"
|
||||
|
||||
async def execute(self, payload: dict[str, Any]) -> ExecutionResult:
|
||||
"""Update ENV file with new key-value pairs and optional removals.
|
||||
|
||||
Args:
|
||||
payload: Must contain "path" and at least one of "updates" or "remove_keys"
|
||||
|
||||
Returns:
|
||||
ExecutionResult with lists of updated and removed keys
|
||||
"""
|
||||
# Path is always required
|
||||
if "path" not in payload:
|
||||
raise ValueError("Missing required field: path")
|
||||
|
||||
settings = get_settings()
|
||||
|
||||
file_path = payload["path"]
|
||||
updates = payload.get("updates", {})
|
||||
remove_keys = payload.get("remove_keys", [])
|
||||
|
||||
# Validate that at least one operation is provided
|
||||
if not updates and not remove_keys:
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={},
|
||||
error="At least one of 'updates' or 'remove_keys' must be provided",
|
||||
)
|
||||
|
||||
# Validate updates is a dict if provided
|
||||
if updates and not isinstance(updates, dict):
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={},
|
||||
error="'updates' must be a dictionary of key-value pairs",
|
||||
)
|
||||
|
||||
# Validate remove_keys is a list if provided
|
||||
if remove_keys and not isinstance(remove_keys, list):
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={},
|
||||
error="'remove_keys' must be a list of key names",
|
||||
)
|
||||
|
||||
# Validate path is under allowed env root
|
||||
try:
|
||||
validated_path = validate_file_path(
|
||||
file_path,
|
||||
settings.allowed_env_root,
|
||||
must_exist=False,
|
||||
)
|
||||
except ValidationError as e:
|
||||
self.logger.warning("env_path_validation_failed", path=file_path, error=str(e))
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={},
|
||||
error=f"Path validation failed: {e}",
|
||||
)
|
||||
|
||||
# Validate all update keys match pattern
|
||||
try:
|
||||
for key in updates.keys():
|
||||
validate_env_key(key)
|
||||
except ValidationError as e:
|
||||
self.logger.warning("env_key_validation_failed", error=str(e))
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={},
|
||||
error=str(e),
|
||||
)
|
||||
|
||||
# Validate all remove_keys match pattern
|
||||
try:
|
||||
for key in remove_keys:
|
||||
if not isinstance(key, str):
|
||||
raise ValidationError(f"remove_keys must contain strings, got: {type(key).__name__}")
|
||||
validate_env_key(key)
|
||||
except ValidationError as e:
|
||||
self.logger.warning("env_remove_key_validation_failed", error=str(e))
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={},
|
||||
error=str(e),
|
||||
)
|
||||
|
||||
self.logger.info(
|
||||
"env_updating",
|
||||
path=str(validated_path),
|
||||
update_keys=list(updates.keys()) if updates else [],
|
||||
remove_keys=remove_keys,
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
|
||||
try:
|
||||
# Read existing ENV file if it exists
|
||||
existing_env = {}
|
||||
if validated_path.exists():
|
||||
content = validated_path.read_text(encoding="utf-8")
|
||||
existing_env = self._parse_env_file(content)
|
||||
|
||||
# Track which keys were actually removed (existed before)
|
||||
actually_removed = [k for k in remove_keys if k in existing_env]
|
||||
|
||||
# Apply updates (new values overwrite existing)
|
||||
merged_env = {**existing_env, **updates}
|
||||
|
||||
# Remove specified keys
|
||||
for key in remove_keys:
|
||||
merged_env.pop(key, None)
|
||||
|
||||
# Serialize and write atomically with secure permissions
|
||||
new_content = self._serialize_env(merged_env)
|
||||
await self._atomic_write_secure(validated_path, new_content.encode("utf-8"))
|
||||
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
|
||||
self.logger.info(
|
||||
"env_updated",
|
||||
path=str(validated_path),
|
||||
updated_keys=list(updates.keys()) if updates else [],
|
||||
removed_keys=actually_removed,
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
return ExecutionResult(
|
||||
success=True,
|
||||
data={
|
||||
"updated_keys": list(updates.keys()) if updates else [],
|
||||
"removed_keys": actually_removed,
|
||||
"path": str(validated_path),
|
||||
},
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
self.logger.error("env_update_error", path=str(validated_path), error=str(e))
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={},
|
||||
error=str(e),
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
def _parse_env_file(self, content: str) -> dict[str, str]:
|
||||
"""Parse ENV file content into key-value dict.
|
||||
|
||||
Handles:
|
||||
- KEY=value format
|
||||
- Lines starting with # (comments)
|
||||
- Empty lines
|
||||
- Whitespace trimming
|
||||
- Quoted values (single and double quotes)
|
||||
|
||||
Args:
|
||||
content: Raw ENV file content
|
||||
|
||||
Returns:
|
||||
Dict of key-value pairs
|
||||
"""
|
||||
env_dict = {}
|
||||
for line in content.splitlines():
|
||||
line = line.strip()
|
||||
# Skip empty lines and comments
|
||||
if not line or line.startswith("#"):
|
||||
continue
|
||||
# Split on first = only
|
||||
if "=" in line:
|
||||
key, value = line.split("=", 1)
|
||||
key = key.strip()
|
||||
value = value.strip()
|
||||
# Remove surrounding quotes if present
|
||||
if (value.startswith('"') and value.endswith('"')) or \
|
||||
(value.startswith("'") and value.endswith("'")):
|
||||
value = value[1:-1]
|
||||
env_dict[key] = value
|
||||
return env_dict
|
||||
|
||||
def _serialize_env(self, env_dict: dict[str, str]) -> str:
|
||||
"""Serialize dict to ENV file format.
|
||||
|
||||
Args:
|
||||
env_dict: Key-value pairs
|
||||
|
||||
Returns:
|
||||
ENV file content string with sorted keys
|
||||
"""
|
||||
lines = []
|
||||
for key, value in sorted(env_dict.items()):
|
||||
# Quote values that contain spaces, newlines, or equals signs
|
||||
if " " in str(value) or "\n" in str(value) or "=" in str(value):
|
||||
value = f'"{value}"'
|
||||
lines.append(f"{key}={value}")
|
||||
return "\n".join(lines) + "\n" if lines else ""
|
||||
|
||||
async def _atomic_write_secure(self, path: Path, content: bytes) -> int:
|
||||
"""Write file atomically with secure permissions.
|
||||
|
||||
Uses temp file + fsync + rename pattern for atomicity.
|
||||
Sets chmod 640 (owner rw, group r, others none) for security.
|
||||
|
||||
Args:
|
||||
path: Target file path
|
||||
content: Content to write
|
||||
|
||||
Returns:
|
||||
Number of bytes written
|
||||
"""
|
||||
def _write() -> int:
|
||||
# Ensure parent directory exists
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Write to temp file in same directory (for atomic rename)
|
||||
fd, temp_path = tempfile.mkstemp(
|
||||
dir=path.parent,
|
||||
prefix=".tmp_",
|
||||
suffix=".env",
|
||||
)
|
||||
temp_path_obj = Path(temp_path)
|
||||
|
||||
try:
|
||||
os.write(fd, content)
|
||||
os.fsync(fd) # Ensure data is on disk
|
||||
finally:
|
||||
os.close(fd)
|
||||
|
||||
# Set secure permissions before rename (640)
|
||||
os.chmod(temp_path, self.FILE_MODE)
|
||||
|
||||
# Atomic rename
|
||||
os.replace(temp_path_obj, path)
|
||||
|
||||
return len(content)
|
||||
|
||||
return await asyncio.to_thread(_write)
|
||||
223
letsbe-sysadmin-agent/app/executors/file_executor.py
Normal file
223
letsbe-sysadmin-agent/app/executors/file_executor.py
Normal file
@@ -0,0 +1,223 @@
|
||||
"""File write executor with security controls."""
|
||||
|
||||
import os
|
||||
import tempfile
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from app.config import get_settings
|
||||
from app.executors.base import BaseExecutor, ExecutionResult
|
||||
from app.utils.validation import ValidationError, sanitize_input, validate_file_path
|
||||
|
||||
|
||||
class FileExecutor(BaseExecutor):
|
||||
"""Write files with strict security controls.
|
||||
|
||||
Security measures:
|
||||
- Path validation against allowed root directories
|
||||
- Directory traversal prevention
|
||||
- Maximum file size enforcement
|
||||
- Atomic writes (temp file + rename)
|
||||
- Content sanitization
|
||||
|
||||
Supported roots:
|
||||
- /opt/agent_data (general file operations)
|
||||
- /opt/letsbe/env (ENV file operations)
|
||||
|
||||
Payload:
|
||||
{
|
||||
"path": "/opt/letsbe/env/app.env",
|
||||
"content": "KEY=value\\nKEY2=value2",
|
||||
"mode": "write" # "write" (default) or "append"
|
||||
}
|
||||
|
||||
Result:
|
||||
{
|
||||
"written": true,
|
||||
"path": "/opt/letsbe/env/app.env",
|
||||
"size": 123
|
||||
}
|
||||
"""
|
||||
|
||||
@property
|
||||
def task_type(self) -> str:
|
||||
return "FILE_WRITE"
|
||||
|
||||
async def execute(self, payload: dict[str, Any]) -> ExecutionResult:
|
||||
"""Write content to a file.
|
||||
|
||||
Args:
|
||||
payload: Must contain "path" and "content", optionally "mode"
|
||||
|
||||
Returns:
|
||||
ExecutionResult with write confirmation
|
||||
"""
|
||||
self.validate_payload(payload, ["path", "content"])
|
||||
settings = get_settings()
|
||||
|
||||
file_path = payload["path"]
|
||||
content = payload["content"]
|
||||
mode = payload.get("mode", "write")
|
||||
|
||||
if mode not in ("write", "append"):
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={},
|
||||
error=f"Invalid mode: {mode}. Must be 'write' or 'append'",
|
||||
)
|
||||
|
||||
# Validate path against allowed roots (env or general)
|
||||
# Try env root first if path starts with it, otherwise use general root
|
||||
try:
|
||||
allowed_root = self._determine_allowed_root(file_path, settings)
|
||||
validated_path = validate_file_path(
|
||||
file_path,
|
||||
allowed_root,
|
||||
must_exist=False,
|
||||
)
|
||||
sanitized_content = sanitize_input(content, max_length=settings.max_file_size)
|
||||
except ValidationError as e:
|
||||
self.logger.warning("file_validation_failed", path=file_path, error=str(e))
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={},
|
||||
error=f"Validation failed: {e}",
|
||||
)
|
||||
|
||||
# Check content size
|
||||
content_bytes = sanitized_content.encode("utf-8")
|
||||
if len(content_bytes) > settings.max_file_size:
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={},
|
||||
error=f"Content size {len(content_bytes)} exceeds max {settings.max_file_size}",
|
||||
)
|
||||
|
||||
self.logger.info(
|
||||
"file_writing",
|
||||
path=str(validated_path),
|
||||
mode=mode,
|
||||
size=len(content_bytes),
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
|
||||
try:
|
||||
if mode == "write":
|
||||
bytes_written = await self._atomic_write(validated_path, content_bytes)
|
||||
else:
|
||||
bytes_written = await self._append(validated_path, content_bytes)
|
||||
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
|
||||
self.logger.info(
|
||||
"file_written",
|
||||
path=str(validated_path),
|
||||
bytes_written=bytes_written,
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
return ExecutionResult(
|
||||
success=True,
|
||||
data={
|
||||
"written": True,
|
||||
"path": str(validated_path),
|
||||
"size": bytes_written,
|
||||
},
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
self.logger.error("file_write_error", path=str(validated_path), error=str(e))
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={},
|
||||
error=str(e),
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
def _determine_allowed_root(self, file_path: str, settings) -> str:
|
||||
"""Determine which allowed root to use based on file path.
|
||||
|
||||
Args:
|
||||
file_path: The requested file path
|
||||
settings: Application settings
|
||||
|
||||
Returns:
|
||||
The appropriate allowed root directory
|
||||
"""
|
||||
from pathlib import Path as P
|
||||
|
||||
# Normalize the path for comparison
|
||||
normalized = str(P(file_path).expanduser())
|
||||
|
||||
# Check if path is under env root
|
||||
env_root = str(P(settings.allowed_env_root).expanduser())
|
||||
if normalized.startswith(env_root):
|
||||
return settings.allowed_env_root
|
||||
|
||||
# Default to general file root
|
||||
return settings.allowed_file_root
|
||||
|
||||
async def _atomic_write(self, path: Path, content: bytes) -> int:
|
||||
"""Write file atomically using temp file + rename.
|
||||
|
||||
Args:
|
||||
path: Target file path
|
||||
content: Content to write
|
||||
|
||||
Returns:
|
||||
Number of bytes written
|
||||
"""
|
||||
import asyncio
|
||||
|
||||
def _write() -> int:
|
||||
# Ensure parent directory exists
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Write to temp file in same directory (for atomic rename)
|
||||
fd, temp_path = tempfile.mkstemp(
|
||||
dir=path.parent,
|
||||
prefix=".tmp_",
|
||||
suffix=path.suffix,
|
||||
)
|
||||
|
||||
try:
|
||||
os.write(fd, content)
|
||||
os.fsync(fd) # Ensure data is on disk
|
||||
finally:
|
||||
os.close(fd)
|
||||
|
||||
# Atomic rename
|
||||
os.rename(temp_path, path)
|
||||
|
||||
return len(content)
|
||||
|
||||
return await asyncio.to_thread(_write)
|
||||
|
||||
async def _append(self, path: Path, content: bytes) -> int:
|
||||
"""Append content to file.
|
||||
|
||||
Args:
|
||||
path: Target file path
|
||||
content: Content to append
|
||||
|
||||
Returns:
|
||||
Number of bytes written
|
||||
"""
|
||||
import asyncio
|
||||
|
||||
def _append() -> int:
|
||||
# Ensure parent directory exists
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
with open(path, "ab") as f:
|
||||
written = f.write(content)
|
||||
f.flush()
|
||||
os.fsync(f.fileno())
|
||||
|
||||
return written
|
||||
|
||||
return await asyncio.to_thread(_append)
|
||||
153
letsbe-sysadmin-agent/app/executors/file_inspect_executor.py
Normal file
153
letsbe-sysadmin-agent/app/executors/file_inspect_executor.py
Normal file
@@ -0,0 +1,153 @@
|
||||
"""File inspection executor for reading portions of text files."""
|
||||
|
||||
import time
|
||||
from typing import Any
|
||||
|
||||
from app.config import get_settings
|
||||
from app.executors.base import BaseExecutor, ExecutionResult
|
||||
from app.utils.validation import ValidationError, validate_file_path
|
||||
|
||||
|
||||
class FileInspectExecutor(BaseExecutor):
|
||||
"""Read portions of files for inspection.
|
||||
|
||||
Security measures:
|
||||
- Path validation against allowed file root (/opt/letsbe)
|
||||
- Directory traversal prevention
|
||||
- File must exist (no blind path probing)
|
||||
- Read-only operation (no file modification)
|
||||
- Byte limit enforced (max 1MB)
|
||||
|
||||
Payload:
|
||||
{
|
||||
"path": "/opt/letsbe/env/chatwoot.env",
|
||||
"max_bytes": 4096 # optional, default 4096, max 1MB
|
||||
}
|
||||
|
||||
Result (success):
|
||||
{
|
||||
"path": "/opt/letsbe/env/chatwoot.env",
|
||||
"bytes_read": 123,
|
||||
"truncated": false,
|
||||
"content": "..."
|
||||
}
|
||||
"""
|
||||
|
||||
# Default and maximum byte limits
|
||||
DEFAULT_MAX_BYTES = 4096
|
||||
ABSOLUTE_MAX_BYTES = 1_048_576 # 1 MB
|
||||
|
||||
@property
|
||||
def task_type(self) -> str:
|
||||
return "FILE_INSPECT"
|
||||
|
||||
async def execute(self, payload: dict[str, Any]) -> ExecutionResult:
|
||||
"""Read file content up to max_bytes.
|
||||
|
||||
Args:
|
||||
payload: Must contain "path", optionally "max_bytes"
|
||||
|
||||
Returns:
|
||||
ExecutionResult with file content and metadata
|
||||
"""
|
||||
# Path is always required
|
||||
if "path" not in payload:
|
||||
raise ValueError("Missing required field: path")
|
||||
|
||||
settings = get_settings()
|
||||
|
||||
raw_path = payload["path"]
|
||||
max_bytes = payload.get("max_bytes", self.DEFAULT_MAX_BYTES)
|
||||
|
||||
# Validate max_bytes is a valid integer
|
||||
try:
|
||||
max_bytes_int = int(max_bytes)
|
||||
except (TypeError, ValueError):
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={},
|
||||
error=f"Invalid max_bytes value: {max_bytes!r}",
|
||||
)
|
||||
|
||||
# Validate max_bytes is within allowed range
|
||||
if max_bytes_int <= 0 or max_bytes_int > self.ABSOLUTE_MAX_BYTES:
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={},
|
||||
error=f"max_bytes must be between 1 and {self.ABSOLUTE_MAX_BYTES}",
|
||||
)
|
||||
|
||||
# Validate path is under allowed file root
|
||||
try:
|
||||
validated_path = validate_file_path(
|
||||
raw_path,
|
||||
settings.allowed_file_root,
|
||||
must_exist=True,
|
||||
)
|
||||
except ValidationError as e:
|
||||
self.logger.warning("file_path_validation_failed", path=raw_path, error=str(e))
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={},
|
||||
error=f"Path validation failed: {e}",
|
||||
)
|
||||
|
||||
self.logger.info(
|
||||
"file_inspecting",
|
||||
path=str(validated_path),
|
||||
max_bytes=max_bytes_int,
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
|
||||
try:
|
||||
# Read up to max_bytes + 1 to detect truncation
|
||||
with validated_path.open("rb") as f:
|
||||
content_bytes = f.read(max_bytes_int + 1)
|
||||
|
||||
truncated = len(content_bytes) > max_bytes_int
|
||||
if truncated:
|
||||
content_bytes = content_bytes[:max_bytes_int]
|
||||
|
||||
# Decode as UTF-8 with errors replaced
|
||||
content_text = content_bytes.decode("utf-8", errors="replace")
|
||||
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
|
||||
self.logger.info(
|
||||
"file_inspected",
|
||||
path=str(validated_path),
|
||||
bytes_read=len(content_bytes),
|
||||
truncated=truncated,
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
return ExecutionResult(
|
||||
success=True,
|
||||
data={
|
||||
"path": str(validated_path),
|
||||
"bytes_read": len(content_bytes),
|
||||
"truncated": truncated,
|
||||
"content": content_text,
|
||||
},
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
except OSError as e:
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
self.logger.error("file_inspect_read_error", path=str(validated_path), error=str(e))
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={},
|
||||
error=f"Failed to read file: {e}",
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
except Exception as e:
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
self.logger.error("file_inspect_error", path=str(validated_path), error=str(e))
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={},
|
||||
error=str(e),
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
358
letsbe-sysadmin-agent/app/executors/nextcloud_executor.py
Normal file
358
letsbe-sysadmin-agent/app/executors/nextcloud_executor.py
Normal file
@@ -0,0 +1,358 @@
|
||||
"""Nextcloud domain configuration executor."""
|
||||
|
||||
import asyncio
|
||||
import subprocess
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from app.executors.base import BaseExecutor, ExecutionResult
|
||||
|
||||
|
||||
class NextcloudSetDomainExecutor(BaseExecutor):
|
||||
"""Execute Nextcloud domain configuration via occ commands.
|
||||
|
||||
This executor configures Nextcloud's external domain settings by running
|
||||
occ config:system:set commands via docker compose exec. It keeps the
|
||||
Orchestrator unaware of container names, occ paths, and docker-compose syntax.
|
||||
|
||||
Security measures:
|
||||
- URL parsing with validation
|
||||
- No shell=True, command list only
|
||||
- Timeout enforcement on each subprocess
|
||||
|
||||
Payload:
|
||||
{
|
||||
"public_url": "https://cloud.example.com"
|
||||
}
|
||||
|
||||
Result (success):
|
||||
{
|
||||
"public_url": "https://cloud.example.com",
|
||||
"host": "cloud.example.com",
|
||||
"scheme": "https",
|
||||
"commands_executed": 3,
|
||||
"logs": {
|
||||
"overwritehost": "<stdout+stderr>",
|
||||
"overwriteprotocol": "<stdout+stderr>",
|
||||
"overwrite.cli.url": "<stdout+stderr>"
|
||||
}
|
||||
}
|
||||
|
||||
Result (failure):
|
||||
{
|
||||
"public_url": "https://cloud.example.com",
|
||||
"host": "cloud.example.com",
|
||||
"scheme": "https",
|
||||
"commands_executed": 2,
|
||||
"failed_command": "overwriteprotocol",
|
||||
"failed_args": ["config:system:set", "overwriteprotocol", "--value=https"],
|
||||
"logs": {...}
|
||||
}
|
||||
"""
|
||||
|
||||
# TODO: These constants may need adjustment based on actual Nextcloud stack setup
|
||||
NEXTCLOUD_STACK_DIR = "/opt/letsbe/stacks/nextcloud"
|
||||
NEXTCLOUD_SERVICE_NAME = "app"
|
||||
NEXTCLOUD_OCC_PATH = "/var/www/html/occ"
|
||||
NEXTCLOUD_USER = "www-data"
|
||||
|
||||
# Compose file search order (matches DockerExecutor)
|
||||
COMPOSE_FILE_NAMES = ["docker-compose.yml", "compose.yml"]
|
||||
|
||||
# Default timeout for each occ command (seconds)
|
||||
DEFAULT_COMMAND_TIMEOUT = 60
|
||||
|
||||
@property
|
||||
def task_type(self) -> str:
|
||||
return "NEXTCLOUD_SET_DOMAIN"
|
||||
|
||||
async def execute(self, payload: dict[str, Any]) -> ExecutionResult:
|
||||
"""Execute Nextcloud domain configuration commands.
|
||||
|
||||
Runs three occ config:system:set commands to configure:
|
||||
- overwritehost: The domain/host portion of the URL
|
||||
- overwriteprotocol: The scheme (http/https)
|
||||
- overwrite.cli.url: The full public URL
|
||||
|
||||
Args:
|
||||
payload: Must contain "public_url", optionally "timeout"
|
||||
|
||||
Returns:
|
||||
ExecutionResult with configuration confirmation and logs
|
||||
"""
|
||||
self.validate_payload(payload, ["public_url"])
|
||||
|
||||
public_url = payload["public_url"]
|
||||
timeout = payload.get("timeout", self.DEFAULT_COMMAND_TIMEOUT)
|
||||
|
||||
# Parse URL into components
|
||||
try:
|
||||
scheme, host, normalized_url = self._parse_public_url(public_url)
|
||||
except ValueError as e:
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={"public_url": public_url},
|
||||
error=str(e),
|
||||
)
|
||||
|
||||
# Find compose file in the Nextcloud stack directory
|
||||
stack_dir = Path(self.NEXTCLOUD_STACK_DIR)
|
||||
compose_file = self._find_compose_file(stack_dir)
|
||||
|
||||
if compose_file is None:
|
||||
self.logger.warning("nextcloud_compose_not_found", dir=self.NEXTCLOUD_STACK_DIR)
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={"public_url": public_url, "host": host, "scheme": scheme},
|
||||
error=f"Nextcloud compose file not found in {self.NEXTCLOUD_STACK_DIR}. "
|
||||
f"Looked for: {', '.join(self.COMPOSE_FILE_NAMES)}",
|
||||
)
|
||||
|
||||
self.logger.info(
|
||||
"nextcloud_setting_domain",
|
||||
public_url=normalized_url,
|
||||
host=host,
|
||||
scheme=scheme,
|
||||
compose_file=str(compose_file),
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
logs: dict[str, str] = {}
|
||||
commands_executed = 0
|
||||
|
||||
# Define the three occ commands to run
|
||||
occ_commands = [
|
||||
("overwritehost", ["config:system:set", "overwritehost", f"--value={host}"]),
|
||||
("overwriteprotocol", ["config:system:set", "overwriteprotocol", f"--value={scheme}"]),
|
||||
("overwrite.cli.url", ["config:system:set", "overwrite.cli.url", f"--value={normalized_url}"]),
|
||||
]
|
||||
|
||||
try:
|
||||
for cmd_name, occ_args in occ_commands:
|
||||
exit_code, stdout, stderr = await self._run_occ_command(
|
||||
compose_file,
|
||||
occ_args,
|
||||
timeout,
|
||||
)
|
||||
logs[cmd_name] = self._combine_output(stdout, stderr)
|
||||
commands_executed += 1
|
||||
|
||||
if exit_code != 0:
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
self.logger.warning(
|
||||
"nextcloud_occ_command_failed",
|
||||
command=cmd_name,
|
||||
occ_args=occ_args,
|
||||
exit_code=exit_code,
|
||||
stderr=stderr[:500] if stderr else None,
|
||||
)
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={
|
||||
"public_url": normalized_url,
|
||||
"host": host,
|
||||
"scheme": scheme,
|
||||
"commands_executed": commands_executed,
|
||||
"failed_command": cmd_name,
|
||||
"failed_args": occ_args,
|
||||
"logs": logs,
|
||||
},
|
||||
error=f"occ {cmd_name} failed with exit code {exit_code}",
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
self.logger.info(
|
||||
"nextcloud_domain_set",
|
||||
public_url=normalized_url,
|
||||
host=host,
|
||||
scheme=scheme,
|
||||
commands_executed=commands_executed,
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
return ExecutionResult(
|
||||
success=True,
|
||||
data={
|
||||
"public_url": normalized_url,
|
||||
"host": host,
|
||||
"scheme": scheme,
|
||||
"commands_executed": commands_executed,
|
||||
"logs": logs,
|
||||
},
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
self.logger.error(
|
||||
"nextcloud_timeout",
|
||||
public_url=normalized_url,
|
||||
timeout=timeout,
|
||||
commands_executed=commands_executed,
|
||||
)
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={
|
||||
"public_url": normalized_url,
|
||||
"host": host,
|
||||
"scheme": scheme,
|
||||
"commands_executed": commands_executed,
|
||||
"logs": logs,
|
||||
},
|
||||
error=f"Nextcloud occ operation timed out after {timeout} seconds",
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
self.logger.error(
|
||||
"nextcloud_error",
|
||||
public_url=normalized_url,
|
||||
error=str(e),
|
||||
commands_executed=commands_executed,
|
||||
)
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={
|
||||
"public_url": normalized_url,
|
||||
"host": host,
|
||||
"scheme": scheme,
|
||||
"commands_executed": commands_executed,
|
||||
"logs": logs,
|
||||
},
|
||||
error=str(e),
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
def _parse_public_url(self, public_url: str) -> tuple[str, str, str]:
|
||||
"""Parse public URL into scheme, host, and normalized URL.
|
||||
|
||||
Args:
|
||||
public_url: Full URL like "https://cloud.example.com" or just "cloud.example.com"
|
||||
|
||||
Returns:
|
||||
Tuple of (scheme, host, normalized_url)
|
||||
- scheme: "http" or "https" (defaults to "https" if not provided)
|
||||
- host: Domain with optional port (e.g., "cloud.example.com:8443")
|
||||
- normalized_url: Full URL with trailing slash stripped
|
||||
|
||||
Raises:
|
||||
ValueError: If URL is invalid or missing host
|
||||
"""
|
||||
if not public_url or not public_url.strip():
|
||||
raise ValueError("public_url cannot be empty")
|
||||
|
||||
url = public_url.strip()
|
||||
|
||||
# Parse the URL
|
||||
parsed = urlparse(url)
|
||||
|
||||
# Extract scheme, default to "https" if not provided
|
||||
scheme = parsed.scheme if parsed.scheme else "https"
|
||||
|
||||
# Extract host (netloc includes port if present)
|
||||
host = parsed.netloc
|
||||
|
||||
# Handle URLs without scheme (e.g., "cloud.example.com" or "cloud.example.com/path")
|
||||
# urlparse treats "cloud.example.com" as a path, not netloc
|
||||
if not host and not parsed.scheme:
|
||||
# The URL was provided without a scheme, so we need to re-parse with scheme
|
||||
url_with_scheme = f"https://{url}"
|
||||
parsed = urlparse(url_with_scheme)
|
||||
host = parsed.netloc
|
||||
scheme = "https"
|
||||
|
||||
if not host:
|
||||
raise ValueError(f"Invalid URL - no host found: {public_url}")
|
||||
|
||||
# Reconstruct normalized URL (with trailing slash stripped)
|
||||
normalized_url = f"{scheme}://{host}"
|
||||
if parsed.path and parsed.path != "/":
|
||||
normalized_url += parsed.path.rstrip("/")
|
||||
|
||||
return scheme, host, normalized_url
|
||||
|
||||
def _find_compose_file(self, compose_dir: Path) -> Path | None:
|
||||
"""Find compose file in the directory.
|
||||
|
||||
Searches in order: docker-compose.yml, compose.yml
|
||||
|
||||
Args:
|
||||
compose_dir: Directory to search in
|
||||
|
||||
Returns:
|
||||
Path to compose file, or None if not found
|
||||
"""
|
||||
for filename in self.COMPOSE_FILE_NAMES:
|
||||
compose_file = compose_dir / filename
|
||||
if compose_file.exists():
|
||||
return compose_file
|
||||
return None
|
||||
|
||||
def _combine_output(self, stdout: str, stderr: str) -> str:
|
||||
"""Combine stdout and stderr into a single string.
|
||||
|
||||
Args:
|
||||
stdout: Standard output
|
||||
stderr: Standard error
|
||||
|
||||
Returns:
|
||||
Combined output string
|
||||
"""
|
||||
parts = []
|
||||
if stdout:
|
||||
parts.append(stdout)
|
||||
if stderr:
|
||||
parts.append(stderr)
|
||||
return "\n".join(parts)
|
||||
|
||||
async def _run_occ_command(
|
||||
self,
|
||||
compose_file: Path,
|
||||
occ_args: list[str],
|
||||
timeout: int,
|
||||
) -> tuple[int, str, str]:
|
||||
"""Run a Nextcloud occ command via docker compose exec.
|
||||
|
||||
Args:
|
||||
compose_file: Path to the docker-compose file
|
||||
occ_args: Arguments to pass to occ (e.g., ["config:system:set", "overwritehost", "--value=..."])
|
||||
timeout: Operation timeout in seconds
|
||||
|
||||
Returns:
|
||||
Tuple of (exit_code, stdout, stderr)
|
||||
"""
|
||||
def _run() -> tuple[int, str, str]:
|
||||
# Build command: docker compose -f <file> exec -T --user <user> <service> php <occ_path> <args>
|
||||
cmd = [
|
||||
"docker",
|
||||
"compose",
|
||||
"-f",
|
||||
str(compose_file),
|
||||
"exec",
|
||||
"-T", # Disable pseudo-TTY allocation
|
||||
"--user",
|
||||
self.NEXTCLOUD_USER,
|
||||
self.NEXTCLOUD_SERVICE_NAME,
|
||||
"php",
|
||||
self.NEXTCLOUD_OCC_PATH,
|
||||
] + occ_args
|
||||
|
||||
# Run command from stack directory, no shell=True
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=timeout,
|
||||
cwd=self.NEXTCLOUD_STACK_DIR,
|
||||
)
|
||||
|
||||
return result.returncode, result.stdout, result.stderr
|
||||
|
||||
return await asyncio.wait_for(
|
||||
asyncio.to_thread(_run),
|
||||
timeout=timeout + 30, # Watchdog with buffer
|
||||
)
|
||||
329
letsbe-sysadmin-agent/app/executors/playwright_executor.py
Normal file
329
letsbe-sysadmin-agent/app/executors/playwright_executor.py
Normal file
@@ -0,0 +1,329 @@
|
||||
"""Playwright browser automation executor.
|
||||
|
||||
Executes deterministic, scenario-based browser automation tasks.
|
||||
Each scenario is a reusable workflow registered in the scenario registry.
|
||||
"""
|
||||
|
||||
import time
|
||||
import uuid
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from playwright.async_api import async_playwright, Route, Request
|
||||
|
||||
from app.config import get_settings
|
||||
from app.executors.base import BaseExecutor, ExecutionResult
|
||||
from app.playwright_scenarios import get_scenario, get_scenario_names, ScenarioOptions
|
||||
from app.utils.validation import is_domain_allowed, validate_allowed_domains, ValidationError
|
||||
|
||||
|
||||
class PlaywrightExecutor(BaseExecutor):
|
||||
"""Browser automation executor using Playwright scenarios.
|
||||
|
||||
Executes pre-defined browser automation scenarios with strict security controls.
|
||||
Each execution creates an isolated browser context with domain restrictions.
|
||||
|
||||
Payload:
|
||||
{
|
||||
"scenario": "nextcloud_initial_setup", # Required: registered scenario name
|
||||
"inputs": { # Required: scenario-specific inputs
|
||||
"base_url": "https://cloud.example.com",
|
||||
"admin_username": "admin",
|
||||
"admin_password": "secret123"
|
||||
},
|
||||
"options": { # Optional configuration
|
||||
"timeout_ms": 60000, # Action timeout (default: 60000)
|
||||
"screenshot_on_failure": true, # Screenshot on fail (default: true)
|
||||
"screenshot_on_success": false, # Screenshot on success (default: false)
|
||||
"save_trace": false, # Save trace file (default: false)
|
||||
"allowed_domains": ["cloud.example.com"] # REQUIRED: domain allowlist
|
||||
}
|
||||
}
|
||||
|
||||
Security:
|
||||
- allowed_domains is REQUIRED - blocks all requests to non-listed domains
|
||||
- Browser runs in headless mode only (not configurable)
|
||||
- Each execution gets an isolated browser context
|
||||
- Artifacts are stored in per-task directories
|
||||
"""
|
||||
|
||||
@property
|
||||
def task_type(self) -> str:
|
||||
return "PLAYWRIGHT"
|
||||
|
||||
async def execute(self, payload: dict[str, Any]) -> ExecutionResult:
|
||||
"""Execute a Playwright scenario.
|
||||
|
||||
Args:
|
||||
payload: Task payload with scenario, inputs, and options
|
||||
|
||||
Returns:
|
||||
ExecutionResult with scenario output and artifact paths
|
||||
"""
|
||||
start_time = time.time()
|
||||
settings = get_settings()
|
||||
|
||||
try:
|
||||
# Validate required fields
|
||||
self.validate_payload(payload, ["scenario", "inputs"])
|
||||
|
||||
scenario_name = payload["scenario"]
|
||||
inputs = payload["inputs"]
|
||||
options_dict = payload.get("options", {})
|
||||
|
||||
# Validate allowed_domains is present
|
||||
allowed_domains = options_dict.get("allowed_domains")
|
||||
if not allowed_domains:
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={"scenario": scenario_name},
|
||||
error="Security error: 'allowed_domains' is required in options",
|
||||
duration_ms=(time.time() - start_time) * 1000,
|
||||
)
|
||||
|
||||
# Validate domain patterns
|
||||
try:
|
||||
allowed_domains = validate_allowed_domains(allowed_domains)
|
||||
except ValidationError as e:
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={"scenario": scenario_name},
|
||||
error=f"Invalid allowed_domains: {e}",
|
||||
duration_ms=(time.time() - start_time) * 1000,
|
||||
)
|
||||
|
||||
# Get scenario from registry
|
||||
scenario = get_scenario(scenario_name)
|
||||
if scenario is None:
|
||||
available = get_scenario_names()
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={
|
||||
"scenario": scenario_name,
|
||||
"available_scenarios": available,
|
||||
},
|
||||
error=f"Unknown scenario: '{scenario_name}'. Available: {available}",
|
||||
duration_ms=(time.time() - start_time) * 1000,
|
||||
)
|
||||
|
||||
# Validate scenario inputs
|
||||
missing_inputs = scenario.validate_inputs(inputs)
|
||||
if missing_inputs:
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={
|
||||
"scenario": scenario_name,
|
||||
"missing_inputs": missing_inputs,
|
||||
"required_inputs": scenario.required_inputs,
|
||||
},
|
||||
error=f"Missing required inputs: {missing_inputs}",
|
||||
duration_ms=(time.time() - start_time) * 1000,
|
||||
)
|
||||
|
||||
# Create artifacts directory for this execution
|
||||
task_id = str(uuid.uuid4())[:8]
|
||||
artifacts_dir = Path(settings.playwright_artifacts_dir) / f"task-{task_id}"
|
||||
artifacts_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Build scenario options
|
||||
scenario_options = ScenarioOptions(
|
||||
timeout_ms=options_dict.get("timeout_ms", settings.playwright_default_timeout_ms),
|
||||
screenshot_on_failure=options_dict.get("screenshot_on_failure", True),
|
||||
screenshot_on_success=options_dict.get("screenshot_on_success", False),
|
||||
save_trace=options_dict.get("save_trace", False),
|
||||
allowed_domains=allowed_domains,
|
||||
artifacts_dir=artifacts_dir,
|
||||
)
|
||||
|
||||
self.logger.info(
|
||||
"playwright_scenario_starting",
|
||||
scenario=scenario_name,
|
||||
task_id=task_id,
|
||||
allowed_domains=allowed_domains,
|
||||
)
|
||||
|
||||
# Execute scenario with browser
|
||||
result = await self._run_scenario(
|
||||
scenario=scenario,
|
||||
inputs=inputs,
|
||||
options=scenario_options,
|
||||
task_id=task_id,
|
||||
)
|
||||
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
|
||||
self.logger.info(
|
||||
"playwright_scenario_completed",
|
||||
scenario=scenario_name,
|
||||
success=result.success,
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
return ExecutionResult(
|
||||
success=result.success,
|
||||
data={
|
||||
"scenario": scenario_name,
|
||||
"result": result.data,
|
||||
"screenshots": result.screenshots,
|
||||
"artifacts_dir": str(artifacts_dir),
|
||||
"trace_path": result.trace_path,
|
||||
},
|
||||
error=result.error,
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
except ValueError as e:
|
||||
# Validation errors
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={},
|
||||
error=str(e),
|
||||
duration_ms=(time.time() - start_time) * 1000,
|
||||
)
|
||||
except Exception as e:
|
||||
self.logger.error(
|
||||
"playwright_executor_error",
|
||||
error=str(e),
|
||||
error_type=type(e).__name__,
|
||||
)
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={},
|
||||
error=f"Playwright executor error: {e}",
|
||||
duration_ms=(time.time() - start_time) * 1000,
|
||||
)
|
||||
|
||||
async def _run_scenario(
|
||||
self,
|
||||
scenario,
|
||||
inputs: dict[str, Any],
|
||||
options: ScenarioOptions,
|
||||
task_id: str,
|
||||
):
|
||||
"""Run a scenario with browser and domain restrictions.
|
||||
|
||||
Args:
|
||||
scenario: The scenario instance to execute
|
||||
inputs: Scenario inputs
|
||||
options: Scenario options
|
||||
task_id: Task identifier for logging
|
||||
|
||||
Returns:
|
||||
ScenarioResult from the scenario execution
|
||||
"""
|
||||
from app.playwright_scenarios import ScenarioResult
|
||||
|
||||
settings = get_settings()
|
||||
blocked_requests: list[str] = []
|
||||
|
||||
async def route_handler(route: Route, request: Request) -> None:
|
||||
"""Block requests to non-allowed domains."""
|
||||
url = request.url
|
||||
|
||||
if is_domain_allowed(url, options.allowed_domains):
|
||||
await route.continue_()
|
||||
else:
|
||||
blocked_requests.append(url)
|
||||
self.logger.warning(
|
||||
"playwright_blocked_request",
|
||||
url=url,
|
||||
task_id=task_id,
|
||||
)
|
||||
await route.abort("blockedbyclient")
|
||||
|
||||
async with async_playwright() as p:
|
||||
# Launch browser in headless mode (always)
|
||||
browser = await p.chromium.launch(
|
||||
headless=True,
|
||||
args=[
|
||||
"--no-sandbox",
|
||||
"--disable-setuid-sandbox",
|
||||
"--disable-dev-shm-usage",
|
||||
"--disable-gpu",
|
||||
],
|
||||
)
|
||||
|
||||
try:
|
||||
# Create isolated context
|
||||
context = await browser.new_context(
|
||||
viewport={"width": 1280, "height": 720},
|
||||
user_agent="LetsBe-SysAdmin-Agent/1.0 Playwright",
|
||||
)
|
||||
|
||||
# Set default timeouts
|
||||
context.set_default_timeout(options.timeout_ms)
|
||||
context.set_default_navigation_timeout(
|
||||
settings.playwright_navigation_timeout_ms
|
||||
)
|
||||
|
||||
# Start tracing if enabled
|
||||
if options.save_trace and options.artifacts_dir:
|
||||
await context.tracing.start(
|
||||
screenshots=True,
|
||||
snapshots=True,
|
||||
)
|
||||
|
||||
# Apply domain restrictions via route interception
|
||||
await context.route("**/*", route_handler)
|
||||
|
||||
# Create page
|
||||
page = await context.new_page()
|
||||
|
||||
try:
|
||||
# Run scenario setup hook
|
||||
await scenario.setup(page, options)
|
||||
|
||||
# Execute the scenario
|
||||
result = await scenario.execute(page, inputs, options)
|
||||
|
||||
# Take success screenshot if enabled
|
||||
if options.screenshot_on_success and options.artifacts_dir:
|
||||
screenshot_path = options.artifacts_dir / "success.png"
|
||||
await page.screenshot(path=str(screenshot_path))
|
||||
result.screenshots.append(str(screenshot_path))
|
||||
|
||||
except Exception as e:
|
||||
# Capture failure screenshot
|
||||
screenshots = []
|
||||
if options.screenshot_on_failure and options.artifacts_dir:
|
||||
try:
|
||||
screenshot_path = options.artifacts_dir / "failure.png"
|
||||
await page.screenshot(path=str(screenshot_path))
|
||||
screenshots.append(str(screenshot_path))
|
||||
except Exception as screenshot_error:
|
||||
self.logger.warning(
|
||||
"playwright_screenshot_failed",
|
||||
error=str(screenshot_error),
|
||||
)
|
||||
|
||||
result = ScenarioResult(
|
||||
success=False,
|
||||
data={"blocked_requests": blocked_requests},
|
||||
screenshots=screenshots,
|
||||
error=str(e),
|
||||
)
|
||||
|
||||
finally:
|
||||
# Run scenario teardown hook
|
||||
try:
|
||||
await scenario.teardown(page, options)
|
||||
except Exception as teardown_error:
|
||||
self.logger.warning(
|
||||
"playwright_teardown_error",
|
||||
error=str(teardown_error),
|
||||
)
|
||||
|
||||
# Stop tracing and save
|
||||
if options.save_trace and options.artifacts_dir:
|
||||
trace_path = options.artifacts_dir / "trace.zip"
|
||||
await context.tracing.stop(path=str(trace_path))
|
||||
result.trace_path = str(trace_path)
|
||||
|
||||
# Add blocked requests info
|
||||
if blocked_requests:
|
||||
result.data["blocked_requests"] = blocked_requests
|
||||
|
||||
return result
|
||||
|
||||
finally:
|
||||
await browser.close()
|
||||
163
letsbe-sysadmin-agent/app/executors/shell_executor.py
Normal file
163
letsbe-sysadmin-agent/app/executors/shell_executor.py
Normal file
@@ -0,0 +1,163 @@
|
||||
"""Shell command executor with strict security controls."""
|
||||
|
||||
import asyncio
|
||||
import time
|
||||
from typing import Any, Optional
|
||||
|
||||
from app.config import get_settings
|
||||
from app.executors.base import BaseExecutor, ExecutionResult
|
||||
from app.utils.validation import ValidationError, validate_shell_command
|
||||
|
||||
|
||||
class ShellExecutor(BaseExecutor):
|
||||
"""Execute shell commands with strict security controls.
|
||||
|
||||
Security measures:
|
||||
- Absolute path allowlist for commands
|
||||
- Per-command argument validation via regex
|
||||
- Forbidden shell metacharacter blocking
|
||||
- No shell=True (prevents shell injection)
|
||||
- Timeout enforcement with watchdog
|
||||
- Runs via asyncio.to_thread to avoid blocking
|
||||
|
||||
Payload:
|
||||
{
|
||||
"cmd": "/usr/bin/ls", # Must be absolute path
|
||||
"args": "-la /opt/data", # Optional arguments
|
||||
"timeout": 60 # Optional timeout override
|
||||
}
|
||||
|
||||
Result:
|
||||
{
|
||||
"exit_code": 0,
|
||||
"stdout": "...",
|
||||
"stderr": "...",
|
||||
"duration_ms": 123.45
|
||||
}
|
||||
"""
|
||||
|
||||
@property
|
||||
def task_type(self) -> str:
|
||||
return "SHELL"
|
||||
|
||||
async def execute(self, payload: dict[str, Any]) -> ExecutionResult:
|
||||
"""Execute a shell command.
|
||||
|
||||
Args:
|
||||
payload: Must contain "cmd", optionally "args" and "timeout"
|
||||
|
||||
Returns:
|
||||
ExecutionResult with command output
|
||||
"""
|
||||
self.validate_payload(payload, ["cmd"])
|
||||
settings = get_settings()
|
||||
|
||||
cmd = payload["cmd"]
|
||||
args_str = payload.get("args", "")
|
||||
timeout_override = payload.get("timeout")
|
||||
|
||||
# Validate command and arguments
|
||||
try:
|
||||
validated_cmd, args_list, default_timeout = validate_shell_command(cmd, args_str)
|
||||
except ValidationError as e:
|
||||
self.logger.warning("shell_validation_failed", cmd=cmd, error=str(e))
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={"exit_code": -1, "stdout": "", "stderr": ""},
|
||||
error=f"Validation failed: {e}",
|
||||
)
|
||||
|
||||
# Determine timeout
|
||||
timeout = timeout_override if timeout_override is not None else default_timeout
|
||||
timeout = min(timeout, settings.shell_timeout) # Cap at global max
|
||||
|
||||
self.logger.info(
|
||||
"shell_executing",
|
||||
cmd=validated_cmd,
|
||||
args=args_list,
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
|
||||
try:
|
||||
# Run in thread pool to avoid blocking event loop
|
||||
result = await asyncio.wait_for(
|
||||
self._run_subprocess(validated_cmd, args_list),
|
||||
timeout=timeout * 2, # Watchdog at 2x timeout
|
||||
)
|
||||
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
exit_code, stdout, stderr = result
|
||||
|
||||
success = exit_code == 0
|
||||
|
||||
self.logger.info(
|
||||
"shell_completed",
|
||||
cmd=validated_cmd,
|
||||
exit_code=exit_code,
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
return ExecutionResult(
|
||||
success=success,
|
||||
data={
|
||||
"exit_code": exit_code,
|
||||
"stdout": stdout,
|
||||
"stderr": stderr,
|
||||
},
|
||||
error=stderr if not success else None,
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
self.logger.error("shell_timeout", cmd=validated_cmd, timeout=timeout)
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={"exit_code": -1, "stdout": "", "stderr": ""},
|
||||
error=f"Command timed out after {timeout} seconds",
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
self.logger.error("shell_error", cmd=validated_cmd, error=str(e))
|
||||
return ExecutionResult(
|
||||
success=False,
|
||||
data={"exit_code": -1, "stdout": "", "stderr": ""},
|
||||
error=str(e),
|
||||
duration_ms=duration_ms,
|
||||
)
|
||||
|
||||
async def _run_subprocess(
|
||||
self,
|
||||
cmd: str,
|
||||
args: list[str],
|
||||
) -> tuple[int, str, str]:
|
||||
"""Run subprocess in thread pool.
|
||||
|
||||
Args:
|
||||
cmd: Command to run (absolute path)
|
||||
args: Command arguments
|
||||
|
||||
Returns:
|
||||
Tuple of (exit_code, stdout, stderr)
|
||||
"""
|
||||
import subprocess
|
||||
|
||||
def _run() -> tuple[int, str, str]:
|
||||
# Build full command list
|
||||
full_cmd = [cmd] + args
|
||||
|
||||
# Run WITHOUT shell=True for security
|
||||
result = subprocess.run(
|
||||
full_cmd,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=get_settings().shell_timeout,
|
||||
)
|
||||
|
||||
return result.returncode, result.stdout, result.stderr
|
||||
|
||||
return await asyncio.to_thread(_run)
|
||||
Reference in New Issue
Block a user