1349 lines
47 KiB
Python
1349 lines
47 KiB
Python
"""
|
|
Comprehensive tests for pyservectl CLI tool.
|
|
|
|
Tests cover:
|
|
- CLI main entry point and commands
|
|
- State management
|
|
- Output formatting utilities
|
|
- Service runner
|
|
- Config management commands
|
|
- Init command templates
|
|
"""
|
|
|
|
import asyncio
|
|
import json
|
|
import os
|
|
import tempfile
|
|
import time
|
|
from pathlib import Path
|
|
from unittest.mock import AsyncMock, MagicMock, patch
|
|
|
|
import pytest
|
|
import yaml
|
|
from click.testing import CliRunner
|
|
|
|
from pyserve.ctl.main import cli, Context, DEFAULT_CONFIG, DEFAULT_STATE_DIR
|
|
from pyserve.ctl.state import (
|
|
ProjectState,
|
|
ServiceHealth,
|
|
ServiceState,
|
|
StateManager,
|
|
)
|
|
from pyserve.ctl.output import (
|
|
console,
|
|
create_services_table,
|
|
format_bytes,
|
|
format_health,
|
|
format_status,
|
|
format_uptime,
|
|
print_error,
|
|
print_info,
|
|
print_success,
|
|
print_warning,
|
|
)
|
|
|
|
|
|
# =============================================================================
|
|
# Fixtures
|
|
# =============================================================================
|
|
|
|
|
|
@pytest.fixture
|
|
def runner():
|
|
"""Click CLI test runner."""
|
|
return CliRunner()
|
|
|
|
|
|
@pytest.fixture
|
|
def temp_dir():
|
|
"""Temporary directory for tests."""
|
|
with tempfile.TemporaryDirectory() as tmpdir:
|
|
yield Path(tmpdir)
|
|
|
|
|
|
@pytest.fixture
|
|
def state_dir(temp_dir):
|
|
"""Temporary state directory."""
|
|
state_path = temp_dir / ".pyserve"
|
|
state_path.mkdir()
|
|
return state_path
|
|
|
|
|
|
@pytest.fixture
|
|
def state_manager(state_dir):
|
|
"""StateManager instance with temp directory."""
|
|
return StateManager(state_dir, project="test-project")
|
|
|
|
|
|
@pytest.fixture
|
|
def sample_config(temp_dir):
|
|
"""Create a sample config.yaml file."""
|
|
config_content = """
|
|
server:
|
|
host: 0.0.0.0
|
|
port: 8080
|
|
backlog: 100
|
|
proxy_timeout: 30.0
|
|
|
|
logging:
|
|
level: INFO
|
|
console_output: true
|
|
|
|
extensions:
|
|
- type: process_orchestration
|
|
config:
|
|
port_range: [9000, 9999]
|
|
apps:
|
|
- name: api
|
|
path: /api
|
|
app_path: myapp.api:app
|
|
workers: 2
|
|
health_check_path: /health
|
|
- name: admin
|
|
path: /admin
|
|
app_path: myapp.admin:app
|
|
workers: 1
|
|
- type: routing
|
|
config:
|
|
regex_locations:
|
|
"=/health":
|
|
return: "200 OK"
|
|
"""
|
|
config_path = temp_dir / "config.yaml"
|
|
config_path.write_text(config_content)
|
|
return config_path
|
|
|
|
|
|
@pytest.fixture
|
|
def isolated_runner(temp_dir, sample_config):
|
|
"""CLI runner with isolated environment."""
|
|
runner = CliRunner()
|
|
with runner.isolated_filesystem(temp_dir=str(temp_dir)):
|
|
# Copy config to current directory
|
|
Path("config.yaml").write_text(sample_config.read_text())
|
|
yield runner
|
|
|
|
|
|
# =============================================================================
|
|
# Context Tests
|
|
# =============================================================================
|
|
|
|
|
|
class TestContext:
|
|
"""Tests for Context class."""
|
|
|
|
def test_context_defaults(self):
|
|
"""Test Context default values."""
|
|
ctx = Context()
|
|
assert ctx.config_file == DEFAULT_CONFIG
|
|
assert ctx.state_dir == Path(DEFAULT_STATE_DIR)
|
|
assert ctx.verbose is False
|
|
assert ctx.debug is False
|
|
assert ctx.project is None
|
|
|
|
def test_context_config_lazy_load(self, temp_dir, sample_config):
|
|
"""Test lazy loading of config."""
|
|
ctx = Context()
|
|
ctx.config_file = str(sample_config)
|
|
|
|
# First access should load config
|
|
config = ctx.config
|
|
assert config is not None
|
|
assert config.server.port == 8080
|
|
|
|
# Second access should return cached config
|
|
assert ctx.config is config
|
|
|
|
def test_context_config_missing_file(self, temp_dir):
|
|
"""Test config loading with missing file."""
|
|
ctx = Context()
|
|
ctx.config_file = str(temp_dir / "nonexistent.yaml")
|
|
|
|
# Should return default config
|
|
config = ctx.config
|
|
assert config is not None
|
|
|
|
def test_context_state_lazy_load(self, state_dir):
|
|
"""Test lazy loading of state."""
|
|
ctx = Context()
|
|
ctx.state_dir = state_dir
|
|
ctx.project = "test"
|
|
|
|
state = ctx.state
|
|
assert state is not None
|
|
assert isinstance(state, StateManager)
|
|
|
|
# Second access should return cached state
|
|
assert ctx.state is state
|
|
|
|
|
|
# =============================================================================
|
|
# CLI Main Tests
|
|
# =============================================================================
|
|
|
|
|
|
class TestCLIMain:
|
|
"""Tests for main CLI entry point."""
|
|
|
|
def test_cli_no_command_shows_help(self, runner):
|
|
"""Test CLI without command shows help."""
|
|
result = runner.invoke(cli)
|
|
assert result.exit_code == 0
|
|
assert "PyServeCTL" in result.output
|
|
assert "Quick Start:" in result.output
|
|
|
|
def test_cli_help(self, runner):
|
|
"""Test --help option."""
|
|
result = runner.invoke(cli, ["--help"])
|
|
assert result.exit_code == 0
|
|
assert "PyServeCTL" in result.output
|
|
assert "Commands:" in result.output
|
|
|
|
def test_cli_version(self, runner):
|
|
"""Test --version option."""
|
|
result = runner.invoke(cli, ["--version"])
|
|
assert result.exit_code == 0
|
|
assert "pyservectl" in result.output
|
|
|
|
def test_cli_verbose_flag(self, runner):
|
|
"""Test --verbose flag is passed to context."""
|
|
result = runner.invoke(cli, ["--verbose", "--help"])
|
|
assert result.exit_code == 0
|
|
|
|
def test_cli_debug_flag(self, runner):
|
|
"""Test --debug flag is passed to context."""
|
|
result = runner.invoke(cli, ["--debug", "--help"])
|
|
assert result.exit_code == 0
|
|
|
|
def test_cli_config_option(self, runner, temp_dir, sample_config):
|
|
"""Test --config option."""
|
|
result = runner.invoke(cli, ["-c", str(sample_config), "--help"])
|
|
assert result.exit_code == 0
|
|
|
|
def test_cli_project_option(self, runner):
|
|
"""Test --project option."""
|
|
result = runner.invoke(cli, ["-p", "myproject", "--help"])
|
|
assert result.exit_code == 0
|
|
|
|
|
|
# =============================================================================
|
|
# State Management Tests
|
|
# =============================================================================
|
|
|
|
|
|
class TestServiceHealth:
|
|
"""Tests for ServiceHealth dataclass."""
|
|
|
|
def test_default_values(self):
|
|
"""Test default values."""
|
|
health = ServiceHealth()
|
|
assert health.status == "unknown"
|
|
assert health.last_check is None
|
|
assert health.failures == 0
|
|
assert health.response_time_ms is None
|
|
|
|
def test_custom_values(self):
|
|
"""Test custom values."""
|
|
health = ServiceHealth(
|
|
status="healthy",
|
|
last_check=1234567890.0,
|
|
failures=2,
|
|
response_time_ms=50.5,
|
|
)
|
|
assert health.status == "healthy"
|
|
assert health.last_check == 1234567890.0
|
|
assert health.failures == 2
|
|
assert health.response_time_ms == 50.5
|
|
|
|
|
|
class TestServiceState:
|
|
"""Tests for ServiceState dataclass."""
|
|
|
|
def test_default_values(self):
|
|
"""Test default values."""
|
|
state = ServiceState(name="test")
|
|
assert state.name == "test"
|
|
assert state.state == "stopped"
|
|
assert state.pid is None
|
|
assert state.port == 0
|
|
assert state.workers == 0
|
|
assert state.started_at is None
|
|
assert state.restart_count == 0
|
|
assert isinstance(state.health, ServiceHealth)
|
|
|
|
def test_uptime_not_started(self):
|
|
"""Test uptime when not started."""
|
|
state = ServiceState(name="test")
|
|
assert state.uptime == 0.0
|
|
|
|
def test_uptime_running(self):
|
|
"""Test uptime calculation."""
|
|
state = ServiceState(name="test", started_at=time.time() - 100)
|
|
assert 99 <= state.uptime <= 101
|
|
|
|
def test_to_dict(self):
|
|
"""Test serialization to dict."""
|
|
state = ServiceState(
|
|
name="api",
|
|
state="running",
|
|
pid=1234,
|
|
port=9000,
|
|
workers=2,
|
|
)
|
|
data = state.to_dict()
|
|
|
|
assert data["name"] == "api"
|
|
assert data["state"] == "running"
|
|
assert data["pid"] == 1234
|
|
assert data["port"] == 9000
|
|
assert data["workers"] == 2
|
|
assert "health" in data
|
|
|
|
def test_from_dict(self):
|
|
"""Test deserialization from dict."""
|
|
data = {
|
|
"name": "api",
|
|
"state": "running",
|
|
"pid": 1234,
|
|
"port": 9000,
|
|
"workers": 2,
|
|
"started_at": 1234567890.0,
|
|
"restart_count": 1,
|
|
"health": {"status": "healthy", "failures": 0},
|
|
"config_hash": "abc123",
|
|
}
|
|
state = ServiceState.from_dict(data)
|
|
|
|
assert state.name == "api"
|
|
assert state.state == "running"
|
|
assert state.pid == 1234
|
|
assert state.health.status == "healthy"
|
|
|
|
|
|
class TestProjectState:
|
|
"""Tests for ProjectState dataclass."""
|
|
|
|
def test_default_values(self):
|
|
"""Test default values."""
|
|
state = ProjectState()
|
|
assert state.version == "1.0"
|
|
assert state.project == ""
|
|
assert state.services == {}
|
|
|
|
def test_to_dict(self):
|
|
"""Test serialization."""
|
|
state = ProjectState(
|
|
project="myproject",
|
|
config_file="config.yaml",
|
|
)
|
|
state.services["api"] = ServiceState(name="api", state="running")
|
|
|
|
data = state.to_dict()
|
|
assert data["project"] == "myproject"
|
|
assert "api" in data["services"]
|
|
|
|
def test_from_dict(self):
|
|
"""Test deserialization."""
|
|
data = {
|
|
"version": "1.0",
|
|
"project": "myproject",
|
|
"config_file": "config.yaml",
|
|
"config_hash": "",
|
|
"started_at": None,
|
|
"daemon_pid": None,
|
|
"services": {
|
|
"api": {
|
|
"name": "api",
|
|
"state": "running",
|
|
"pid": 1234,
|
|
"port": 9000,
|
|
"workers": 1,
|
|
"started_at": None,
|
|
"restart_count": 0,
|
|
"health": {},
|
|
"config_hash": "",
|
|
}
|
|
},
|
|
}
|
|
state = ProjectState.from_dict(data)
|
|
assert state.project == "myproject"
|
|
assert "api" in state.services
|
|
|
|
|
|
class TestStateManager:
|
|
"""Tests for StateManager class."""
|
|
|
|
def test_init(self, state_dir):
|
|
"""Test initialization."""
|
|
manager = StateManager(state_dir, project="test")
|
|
assert manager.state_dir == state_dir
|
|
assert manager.project == "test"
|
|
|
|
def test_paths(self, state_manager, state_dir):
|
|
"""Test path properties."""
|
|
assert state_manager.state_file == state_dir / "state.json"
|
|
assert state_manager.pid_file == state_dir / "pyserve.pid"
|
|
assert state_manager.socket_file == state_dir / "pyserve.sock"
|
|
assert state_manager.logs_dir == state_dir / "logs"
|
|
|
|
def test_ensure_dirs(self, state_manager):
|
|
"""Test directory creation."""
|
|
state_manager.ensure_dirs()
|
|
assert state_manager.state_dir.exists()
|
|
assert state_manager.logs_dir.exists()
|
|
|
|
def test_load_empty(self, state_manager):
|
|
"""Test loading when no state file exists."""
|
|
state = state_manager.load()
|
|
assert isinstance(state, ProjectState)
|
|
assert state.project == "test-project"
|
|
|
|
def test_save_and_load(self, state_manager):
|
|
"""Test save and load cycle."""
|
|
state_manager.update_service("api", state="running", pid=1234, port=9000)
|
|
|
|
# Create new manager to force reload
|
|
manager2 = StateManager(state_manager.state_dir, project="test-project")
|
|
manager2._state = None # Force reload
|
|
state = manager2.load()
|
|
|
|
assert "api" in state.services
|
|
assert state.services["api"].pid == 1234
|
|
|
|
def test_update_service_new(self, state_manager):
|
|
"""Test updating a new service."""
|
|
service = state_manager.update_service("api", state="running", port=9000)
|
|
|
|
assert service.name == "api"
|
|
assert service.state == "running"
|
|
assert service.port == 9000
|
|
|
|
def test_update_service_existing(self, state_manager):
|
|
"""Test updating an existing service."""
|
|
state_manager.update_service("api", state="running", port=9000)
|
|
service = state_manager.update_service("api", state="stopped", pid=None)
|
|
|
|
assert service.state == "stopped"
|
|
assert service.port == 9000 # Should preserve
|
|
|
|
def test_remove_service(self, state_manager):
|
|
"""Test removing a service."""
|
|
state_manager.update_service("api", state="running")
|
|
state_manager.remove_service("api")
|
|
|
|
assert state_manager.get_service("api") is None
|
|
|
|
def test_get_service(self, state_manager):
|
|
"""Test getting a service."""
|
|
state_manager.update_service("api", state="running")
|
|
|
|
service = state_manager.get_service("api")
|
|
assert service is not None
|
|
assert service.name == "api"
|
|
|
|
assert state_manager.get_service("nonexistent") is None
|
|
|
|
def test_get_all_services(self, state_manager):
|
|
"""Test getting all services."""
|
|
state_manager.update_service("api", state="running")
|
|
state_manager.update_service("admin", state="stopped")
|
|
|
|
services = state_manager.get_all_services()
|
|
assert len(services) == 2
|
|
assert "api" in services
|
|
assert "admin" in services
|
|
|
|
def test_clear(self, state_manager):
|
|
"""Test clearing state."""
|
|
state_manager.update_service("api", state="running")
|
|
state_manager.clear()
|
|
|
|
assert len(state_manager.get_all_services()) == 0
|
|
|
|
def test_daemon_pid_not_running(self, state_manager):
|
|
"""Test daemon PID when not running."""
|
|
assert state_manager.is_daemon_running() is False
|
|
assert state_manager.get_daemon_pid() is None
|
|
|
|
def test_daemon_pid_operations(self, state_manager):
|
|
"""Test daemon PID set/get/clear."""
|
|
# Set PID (but process won't actually exist)
|
|
state_manager.set_daemon_pid(99999)
|
|
|
|
# Should return None since process doesn't exist
|
|
assert state_manager.is_daemon_running() is False
|
|
|
|
# Clear
|
|
state_manager.clear_daemon_pid()
|
|
assert not state_manager.pid_file.exists()
|
|
|
|
def test_get_service_log_file(self, state_manager):
|
|
"""Test getting service log file path."""
|
|
log_file = state_manager.get_service_log_file("api")
|
|
assert log_file == state_manager.logs_dir / "api.log"
|
|
|
|
def test_compute_config_hash(self, state_manager, temp_dir):
|
|
"""Test config hash computation."""
|
|
config_path = temp_dir / "test.yaml"
|
|
config_path.write_text("test: value")
|
|
|
|
hash1 = state_manager.compute_config_hash(str(config_path))
|
|
assert len(hash1) == 16
|
|
|
|
# Same content should give same hash
|
|
hash2 = state_manager.compute_config_hash(str(config_path))
|
|
assert hash1 == hash2
|
|
|
|
# Different content should give different hash
|
|
config_path.write_text("test: different")
|
|
hash3 = state_manager.compute_config_hash(str(config_path))
|
|
assert hash1 != hash3
|
|
|
|
def test_compute_config_hash_missing_file(self, state_manager):
|
|
"""Test config hash with missing file."""
|
|
hash_val = state_manager.compute_config_hash("/nonexistent/file.yaml")
|
|
assert hash_val == ""
|
|
|
|
|
|
# =============================================================================
|
|
# Output Formatting Tests
|
|
# =============================================================================
|
|
|
|
|
|
class TestOutputFormatting:
|
|
"""Tests for output formatting utilities."""
|
|
|
|
def test_format_status(self):
|
|
"""Test status formatting."""
|
|
assert "running" in format_status("running")
|
|
assert "stopped" in format_status("stopped")
|
|
assert "failed" in format_status("failed")
|
|
assert "starting" in format_status("starting")
|
|
assert "stopping" in format_status("stopping")
|
|
assert "restarting" in format_status("restarting")
|
|
assert "pending" in format_status("pending")
|
|
assert format_status("unknown") == "unknown"
|
|
|
|
def test_format_health(self):
|
|
"""Test health formatting."""
|
|
assert "healthy" in format_health("healthy")
|
|
assert "unhealthy" in format_health("unhealthy")
|
|
assert "degraded" in format_health("degraded")
|
|
assert "unknown" in format_health("unknown")
|
|
assert format_health("-") is not None
|
|
|
|
def test_format_uptime_zero(self):
|
|
"""Test uptime formatting for zero."""
|
|
assert format_uptime(0) == "-"
|
|
assert format_uptime(-10) == "-"
|
|
|
|
def test_format_uptime_seconds(self):
|
|
"""Test uptime formatting for seconds."""
|
|
assert format_uptime(30) == "30s"
|
|
assert format_uptime(59) == "59s"
|
|
|
|
def test_format_uptime_minutes(self):
|
|
"""Test uptime formatting for minutes."""
|
|
assert format_uptime(60) == "1m 0s"
|
|
assert format_uptime(90) == "1m 30s"
|
|
assert format_uptime(3599) == "59m 59s"
|
|
|
|
def test_format_uptime_hours(self):
|
|
"""Test uptime formatting for hours."""
|
|
assert format_uptime(3600) == "1h 0m"
|
|
assert format_uptime(7200) == "2h 0m"
|
|
assert format_uptime(5400) == "1h 30m"
|
|
|
|
def test_format_uptime_days(self):
|
|
"""Test uptime formatting for days."""
|
|
assert format_uptime(86400) == "1d 0h"
|
|
assert format_uptime(172800) == "2d 0h"
|
|
assert format_uptime(90000) == "1d 1h"
|
|
|
|
def test_format_bytes(self):
|
|
"""Test bytes formatting."""
|
|
assert format_bytes(500) == "500.0B"
|
|
assert format_bytes(1024) == "1.0KB"
|
|
assert format_bytes(1536) == "1.5KB"
|
|
assert format_bytes(1048576) == "1.0MB"
|
|
assert format_bytes(1073741824) == "1.0GB"
|
|
|
|
def test_create_services_table(self):
|
|
"""Test services table creation."""
|
|
table = create_services_table()
|
|
assert table is not None
|
|
# Check columns exist
|
|
column_names = [col.header for col in table.columns]
|
|
assert "NAME" in column_names
|
|
assert "STATUS" in column_names
|
|
assert "PORTS" in column_names
|
|
|
|
|
|
# =============================================================================
|
|
# Init Command Tests
|
|
# =============================================================================
|
|
|
|
|
|
class TestInitCommand:
|
|
"""Tests for init command."""
|
|
|
|
def test_init_basic_template(self, runner, temp_dir):
|
|
"""Test init with basic template."""
|
|
with runner.isolated_filesystem(temp_dir=str(temp_dir)):
|
|
result = runner.invoke(cli, ["init"])
|
|
|
|
assert result.exit_code == 0
|
|
assert Path("config.yaml").exists()
|
|
assert Path("static").exists()
|
|
assert Path("templates").exists()
|
|
assert Path("logs").exists()
|
|
assert Path(".pyserve").exists()
|
|
|
|
def test_init_orchestration_template(self, runner, temp_dir):
|
|
"""Test init with orchestration template."""
|
|
with runner.isolated_filesystem(temp_dir=str(temp_dir)):
|
|
result = runner.invoke(cli, ["init", "-t", "orchestration"])
|
|
|
|
assert result.exit_code == 0
|
|
config = Path("config.yaml").read_text()
|
|
assert "process_orchestration" in config
|
|
|
|
def test_init_asgi_template(self, runner, temp_dir):
|
|
"""Test init with asgi template."""
|
|
with runner.isolated_filesystem(temp_dir=str(temp_dir)):
|
|
result = runner.invoke(cli, ["init", "-t", "asgi"])
|
|
|
|
assert result.exit_code == 0
|
|
config = Path("config.yaml").read_text()
|
|
assert "asgi_mount" in config
|
|
|
|
def test_init_full_template(self, runner, temp_dir):
|
|
"""Test init with full template."""
|
|
with runner.isolated_filesystem(temp_dir=str(temp_dir)):
|
|
result = runner.invoke(cli, ["init", "-t", "full"])
|
|
|
|
assert result.exit_code == 0
|
|
config = Path("config.yaml").read_text()
|
|
assert "process_orchestration" in config
|
|
assert "routing" in config
|
|
|
|
def test_init_custom_output(self, runner, temp_dir):
|
|
"""Test init with custom output file."""
|
|
with runner.isolated_filesystem(temp_dir=str(temp_dir)):
|
|
result = runner.invoke(cli, ["init", "-o", "custom.yaml"])
|
|
|
|
assert result.exit_code == 0
|
|
assert Path("custom.yaml").exists()
|
|
|
|
def test_init_no_overwrite(self, runner, temp_dir):
|
|
"""Test init won't overwrite without force."""
|
|
with runner.isolated_filesystem(temp_dir=str(temp_dir)):
|
|
Path("config.yaml").write_text("existing content")
|
|
|
|
result = runner.invoke(cli, ["init"], input="n\n")
|
|
|
|
assert Path("config.yaml").read_text() == "existing content"
|
|
|
|
def test_init_force_overwrite(self, runner, temp_dir):
|
|
"""Test init with force flag."""
|
|
with runner.isolated_filesystem(temp_dir=str(temp_dir)):
|
|
Path("config.yaml").write_text("existing content")
|
|
|
|
result = runner.invoke(cli, ["init", "-f"])
|
|
|
|
assert result.exit_code == 0
|
|
assert Path("config.yaml").read_text() != "existing content"
|
|
|
|
def test_init_list_templates(self, runner, temp_dir):
|
|
"""Test listing available templates."""
|
|
with runner.isolated_filesystem(temp_dir=str(temp_dir)):
|
|
result = runner.invoke(cli, ["init", "--list-templates"])
|
|
|
|
assert result.exit_code == 0
|
|
assert "basic" in result.output
|
|
assert "orchestration" in result.output
|
|
assert "asgi" in result.output
|
|
assert "full" in result.output
|
|
|
|
|
|
# =============================================================================
|
|
# Config Command Tests
|
|
# =============================================================================
|
|
|
|
|
|
class TestConfigCommand:
|
|
"""Tests for config command."""
|
|
|
|
def test_config_validate_success(self, runner, temp_dir, sample_config):
|
|
"""Test successful config validation."""
|
|
with runner.isolated_filesystem(temp_dir=str(temp_dir)):
|
|
Path("config.yaml").write_text(sample_config.read_text())
|
|
|
|
result = runner.invoke(cli, ["config", "validate"])
|
|
|
|
assert result.exit_code == 0
|
|
assert "valid" in result.output.lower()
|
|
|
|
def test_config_validate_missing_file(self, runner, temp_dir):
|
|
"""Test validation with missing config."""
|
|
with runner.isolated_filesystem(temp_dir=str(temp_dir)):
|
|
result = runner.invoke(cli, ["config", "validate"])
|
|
|
|
assert result.exit_code != 0
|
|
|
|
def test_config_show_yaml(self, runner, temp_dir, sample_config):
|
|
"""Test showing config as YAML."""
|
|
with runner.isolated_filesystem(temp_dir=str(temp_dir)):
|
|
Path("config.yaml").write_text(sample_config.read_text())
|
|
|
|
result = runner.invoke(cli, ["config", "show"])
|
|
|
|
assert result.exit_code == 0
|
|
|
|
def test_config_show_json(self, runner, temp_dir, sample_config):
|
|
"""Test showing config as JSON."""
|
|
with runner.isolated_filesystem(temp_dir=str(temp_dir)):
|
|
Path("config.yaml").write_text(sample_config.read_text())
|
|
|
|
result = runner.invoke(cli, ["config", "show", "--format", "json"])
|
|
|
|
assert result.exit_code == 0
|
|
|
|
def test_config_show_section(self, runner, temp_dir, sample_config):
|
|
"""Test showing specific section."""
|
|
with runner.isolated_filesystem(temp_dir=str(temp_dir)):
|
|
Path("config.yaml").write_text(sample_config.read_text())
|
|
|
|
result = runner.invoke(cli, ["config", "show", "--section", "server"])
|
|
|
|
assert result.exit_code == 0
|
|
|
|
def test_config_get_value(self, runner, temp_dir, sample_config):
|
|
"""Test getting a specific value."""
|
|
with runner.isolated_filesystem(temp_dir=str(temp_dir)):
|
|
Path("config.yaml").write_text(sample_config.read_text())
|
|
|
|
result = runner.invoke(cli, ["config", "get", "server.port"])
|
|
|
|
assert result.exit_code == 0
|
|
assert "8080" in result.output
|
|
|
|
def test_config_get_nested(self, runner, temp_dir, sample_config):
|
|
"""Test getting nested value."""
|
|
with runner.isolated_filesystem(temp_dir=str(temp_dir)):
|
|
Path("config.yaml").write_text(sample_config.read_text())
|
|
|
|
result = runner.invoke(cli, ["config", "get", "server.host"])
|
|
|
|
assert result.exit_code == 0
|
|
assert "0.0.0.0" in result.output
|
|
|
|
def test_config_set_value(self, runner, temp_dir, sample_config):
|
|
"""Test setting a value."""
|
|
with runner.isolated_filesystem(temp_dir=str(temp_dir)):
|
|
Path("config.yaml").write_text(sample_config.read_text())
|
|
|
|
result = runner.invoke(cli, ["config", "set", "server.port", "9090"])
|
|
|
|
assert result.exit_code == 0
|
|
|
|
# Verify change
|
|
config = yaml.safe_load(Path("config.yaml").read_text())
|
|
assert config["server"]["port"] == 9090
|
|
|
|
def test_config_diff(self, runner, temp_dir):
|
|
"""Test comparing two configs."""
|
|
with runner.isolated_filesystem(temp_dir=str(temp_dir)):
|
|
config1 = {"server": {"port": 8080}}
|
|
config2 = {"server": {"port": 9090}}
|
|
|
|
Path("config1.yaml").write_text(yaml.dump(config1))
|
|
Path("config2.yaml").write_text(yaml.dump(config2))
|
|
|
|
result = runner.invoke(cli, ["config", "diff", "config1.yaml", "config2.yaml"])
|
|
|
|
assert result.exit_code == 0
|
|
|
|
|
|
# =============================================================================
|
|
# PS/Status Command Tests
|
|
# =============================================================================
|
|
|
|
|
|
class TestPsCommand:
|
|
"""Tests for ps/status command."""
|
|
|
|
def test_ps_no_services(self, runner, temp_dir, sample_config):
|
|
"""Test ps with no running services."""
|
|
with runner.isolated_filesystem(temp_dir=str(temp_dir)):
|
|
Path("config.yaml").write_text(sample_config.read_text())
|
|
|
|
result = runner.invoke(cli, ["ps"])
|
|
|
|
assert result.exit_code == 0
|
|
assert "No services" in result.output or "running" not in result.output.lower()
|
|
|
|
def test_ps_with_services(self, runner, temp_dir, sample_config):
|
|
"""Test ps with services in state."""
|
|
with runner.isolated_filesystem(temp_dir=str(temp_dir)):
|
|
Path("config.yaml").write_text(sample_config.read_text())
|
|
|
|
# Create state with services
|
|
state_dir = Path(".pyserve")
|
|
state_dir.mkdir(exist_ok=True)
|
|
state_data = {
|
|
"version": "1.0",
|
|
"project": "test",
|
|
"config_file": "",
|
|
"config_hash": "",
|
|
"started_at": None,
|
|
"daemon_pid": None,
|
|
"services": {
|
|
"api": {
|
|
"name": "api",
|
|
"state": "running",
|
|
"pid": 1234,
|
|
"port": 9000,
|
|
"workers": 2,
|
|
"started_at": time.time(),
|
|
"restart_count": 0,
|
|
"health": {"status": "healthy"},
|
|
"config_hash": "",
|
|
}
|
|
},
|
|
}
|
|
(state_dir / "state.json").write_text(json.dumps(state_data))
|
|
|
|
result = runner.invoke(cli, ["ps"])
|
|
|
|
assert result.exit_code == 0
|
|
assert "api" in result.output
|
|
|
|
def test_ps_show_all(self, runner, temp_dir, sample_config):
|
|
"""Test ps with --all flag."""
|
|
with runner.isolated_filesystem(temp_dir=str(temp_dir)):
|
|
Path("config.yaml").write_text(sample_config.read_text())
|
|
|
|
result = runner.invoke(cli, ["ps", "-a"])
|
|
|
|
assert result.exit_code == 0
|
|
|
|
def test_ps_quiet(self, runner, temp_dir, sample_config):
|
|
"""Test ps with --quiet flag."""
|
|
with runner.isolated_filesystem(temp_dir=str(temp_dir)):
|
|
Path("config.yaml").write_text(sample_config.read_text())
|
|
|
|
# Create state with services
|
|
state_dir = Path(".pyserve")
|
|
state_dir.mkdir(exist_ok=True)
|
|
state_data = {
|
|
"version": "1.0",
|
|
"project": "test",
|
|
"config_file": "",
|
|
"config_hash": "",
|
|
"started_at": None,
|
|
"daemon_pid": None,
|
|
"services": {
|
|
"api": {
|
|
"name": "api",
|
|
"state": "running",
|
|
"pid": 1234,
|
|
"port": 9000,
|
|
"workers": 2,
|
|
"started_at": time.time(),
|
|
"restart_count": 0,
|
|
"health": {},
|
|
"config_hash": "",
|
|
}
|
|
},
|
|
}
|
|
(state_dir / "state.json").write_text(json.dumps(state_data))
|
|
|
|
result = runner.invoke(cli, ["ps", "-q"])
|
|
|
|
assert result.exit_code == 0
|
|
# Quiet mode should only show service names
|
|
assert "api" in result.output
|
|
|
|
def test_ps_json_format(self, runner, temp_dir, sample_config):
|
|
"""Test ps with JSON output."""
|
|
with runner.isolated_filesystem(temp_dir=str(temp_dir)):
|
|
Path("config.yaml").write_text(sample_config.read_text())
|
|
|
|
# Create state with services
|
|
state_dir = Path(".pyserve")
|
|
state_dir.mkdir(exist_ok=True)
|
|
state_data = {
|
|
"version": "1.0",
|
|
"project": "test",
|
|
"config_file": "",
|
|
"config_hash": "",
|
|
"started_at": None,
|
|
"daemon_pid": None,
|
|
"services": {
|
|
"api": {
|
|
"name": "api",
|
|
"state": "running",
|
|
"pid": 1234,
|
|
"port": 9000,
|
|
"workers": 2,
|
|
"started_at": time.time(),
|
|
"restart_count": 0,
|
|
"health": {},
|
|
"config_hash": "",
|
|
}
|
|
},
|
|
}
|
|
(state_dir / "state.json").write_text(json.dumps(state_data))
|
|
|
|
result = runner.invoke(cli, ["ps", "--format", "json"])
|
|
|
|
assert result.exit_code == 0
|
|
|
|
def test_status_alias(self, runner, temp_dir, sample_config):
|
|
"""Test that 'status' is alias for 'ps'."""
|
|
with runner.isolated_filesystem(temp_dir=str(temp_dir)):
|
|
Path("config.yaml").write_text(sample_config.read_text())
|
|
|
|
result = runner.invoke(cli, ["status"])
|
|
|
|
assert result.exit_code == 0
|
|
|
|
|
|
# =============================================================================
|
|
# Up/Down Command Tests
|
|
# =============================================================================
|
|
|
|
|
|
class TestUpDownCommands:
|
|
"""Tests for up and down commands."""
|
|
|
|
def test_up_no_config(self, runner, temp_dir):
|
|
"""Test up without config file."""
|
|
with runner.isolated_filesystem(temp_dir=str(temp_dir)):
|
|
result = runner.invoke(cli, ["up"])
|
|
|
|
assert result.exit_code != 0
|
|
|
|
def test_up_help(self, runner):
|
|
"""Test up command help."""
|
|
result = runner.invoke(cli, ["up", "--help"])
|
|
|
|
assert result.exit_code == 0
|
|
assert "Start services" in result.output
|
|
assert "--detach" in result.output
|
|
assert "--scale" in result.output
|
|
|
|
def test_down_no_daemon(self, runner, temp_dir, sample_config):
|
|
"""Test down when no daemon running."""
|
|
with runner.isolated_filesystem(temp_dir=str(temp_dir)):
|
|
Path("config.yaml").write_text(sample_config.read_text())
|
|
|
|
result = runner.invoke(cli, ["down"])
|
|
|
|
# Should succeed even with no daemon
|
|
assert result.exit_code == 0
|
|
|
|
def test_down_help(self, runner):
|
|
"""Test down command help."""
|
|
result = runner.invoke(cli, ["down", "--help"])
|
|
|
|
assert result.exit_code == 0
|
|
assert "Stop" in result.output
|
|
assert "--timeout" in result.output
|
|
|
|
|
|
# =============================================================================
|
|
# Start/Stop/Restart Command Tests
|
|
# =============================================================================
|
|
|
|
|
|
class TestServiceCommands:
|
|
"""Tests for start/stop/restart commands."""
|
|
|
|
def test_start_help(self, runner):
|
|
"""Test start command help."""
|
|
result = runner.invoke(cli, ["start", "--help"])
|
|
|
|
assert result.exit_code == 0
|
|
assert "Start one or more services" in result.output
|
|
|
|
def test_stop_help(self, runner):
|
|
"""Test stop command help."""
|
|
result = runner.invoke(cli, ["stop", "--help"])
|
|
|
|
assert result.exit_code == 0
|
|
assert "Stop one or more services" in result.output
|
|
|
|
def test_restart_help(self, runner):
|
|
"""Test restart command help."""
|
|
result = runner.invoke(cli, ["restart", "--help"])
|
|
|
|
assert result.exit_code == 0
|
|
assert "Restart one or more services" in result.output
|
|
|
|
def test_start_requires_services(self, runner, temp_dir, sample_config):
|
|
"""Test start requires service names."""
|
|
with runner.isolated_filesystem(temp_dir=str(temp_dir)):
|
|
Path("config.yaml").write_text(sample_config.read_text())
|
|
|
|
result = runner.invoke(cli, ["start"])
|
|
|
|
# Should fail without service names
|
|
assert result.exit_code != 0
|
|
|
|
def test_stop_requires_services(self, runner, temp_dir, sample_config):
|
|
"""Test stop requires service names."""
|
|
with runner.isolated_filesystem(temp_dir=str(temp_dir)):
|
|
Path("config.yaml").write_text(sample_config.read_text())
|
|
|
|
result = runner.invoke(cli, ["stop"])
|
|
|
|
# Should fail without service names
|
|
assert result.exit_code != 0
|
|
|
|
|
|
# =============================================================================
|
|
# Logs Command Tests
|
|
# =============================================================================
|
|
|
|
|
|
class TestLogsCommand:
|
|
"""Tests for logs command."""
|
|
|
|
def test_logs_help(self, runner):
|
|
"""Test logs command help."""
|
|
result = runner.invoke(cli, ["logs", "--help"])
|
|
|
|
assert result.exit_code == 0
|
|
assert "View service logs" in result.output
|
|
assert "--follow" in result.output
|
|
assert "--tail" in result.output
|
|
|
|
def test_logs_no_services(self, runner, temp_dir, sample_config):
|
|
"""Test logs with no services."""
|
|
with runner.isolated_filesystem(temp_dir=str(temp_dir)):
|
|
Path("config.yaml").write_text(sample_config.read_text())
|
|
Path(".pyserve").mkdir()
|
|
|
|
result = runner.invoke(cli, ["logs"])
|
|
|
|
assert result.exit_code == 0
|
|
|
|
def test_logs_with_log_file(self, runner, temp_dir, sample_config):
|
|
"""Test logs with existing log file."""
|
|
with runner.isolated_filesystem(temp_dir=str(temp_dir)):
|
|
Path("config.yaml").write_text(sample_config.read_text())
|
|
|
|
# Create state and logs
|
|
state_dir = Path(".pyserve")
|
|
state_dir.mkdir()
|
|
logs_dir = state_dir / "logs"
|
|
logs_dir.mkdir()
|
|
|
|
(logs_dir / "api.log").write_text("2024-01-01 12:00:00 INFO: Test log\n")
|
|
|
|
state_data = {
|
|
"version": "1.0",
|
|
"project": "test",
|
|
"config_file": "",
|
|
"config_hash": "",
|
|
"started_at": None,
|
|
"daemon_pid": None,
|
|
"services": {
|
|
"api": {
|
|
"name": "api",
|
|
"state": "running",
|
|
"pid": 1234,
|
|
"port": 9000,
|
|
"workers": 1,
|
|
"started_at": None,
|
|
"restart_count": 0,
|
|
"health": {},
|
|
"config_hash": "",
|
|
}
|
|
},
|
|
}
|
|
(state_dir / "state.json").write_text(json.dumps(state_data))
|
|
|
|
result = runner.invoke(cli, ["logs", "api"])
|
|
|
|
assert result.exit_code == 0
|
|
assert "Test log" in result.output
|
|
|
|
|
|
# =============================================================================
|
|
# Health Command Tests
|
|
# =============================================================================
|
|
|
|
|
|
class TestHealthCommand:
|
|
"""Tests for health command."""
|
|
|
|
def test_health_help(self, runner):
|
|
"""Test health command help."""
|
|
result = runner.invoke(cli, ["health", "--help"])
|
|
|
|
assert result.exit_code == 0
|
|
assert "Check health" in result.output
|
|
assert "--timeout" in result.output
|
|
|
|
def test_health_no_services(self, runner, temp_dir, sample_config):
|
|
"""Test health with no services."""
|
|
with runner.isolated_filesystem(temp_dir=str(temp_dir)):
|
|
Path("config.yaml").write_text(sample_config.read_text())
|
|
Path(".pyserve").mkdir()
|
|
|
|
result = runner.invoke(cli, ["health"])
|
|
|
|
assert result.exit_code == 0
|
|
assert "No services" in result.output
|
|
|
|
|
|
# =============================================================================
|
|
# Scale Command Tests
|
|
# =============================================================================
|
|
|
|
|
|
class TestScaleCommand:
|
|
"""Tests for scale command."""
|
|
|
|
def test_scale_help(self, runner):
|
|
"""Test scale command help."""
|
|
result = runner.invoke(cli, ["scale", "--help"])
|
|
|
|
assert result.exit_code == 0
|
|
assert "Scale services" in result.output
|
|
assert "SERVICE=NUM" in result.output
|
|
|
|
def test_scale_invalid_format(self, runner, temp_dir, sample_config):
|
|
"""Test scale with invalid format."""
|
|
with runner.isolated_filesystem(temp_dir=str(temp_dir)):
|
|
Path("config.yaml").write_text(sample_config.read_text())
|
|
|
|
result = runner.invoke(cli, ["scale", "api"]) # Missing =NUM
|
|
|
|
assert result.exit_code != 0
|
|
|
|
def test_scale_requires_argument(self, runner, temp_dir, sample_config):
|
|
"""Test scale requires argument."""
|
|
with runner.isolated_filesystem(temp_dir=str(temp_dir)):
|
|
Path("config.yaml").write_text(sample_config.read_text())
|
|
|
|
result = runner.invoke(cli, ["scale"])
|
|
|
|
assert result.exit_code != 0
|
|
|
|
|
|
# =============================================================================
|
|
# Top Command Tests
|
|
# =============================================================================
|
|
|
|
|
|
class TestTopCommand:
|
|
"""Tests for top command."""
|
|
|
|
def test_top_help(self, runner):
|
|
"""Test top command help."""
|
|
result = runner.invoke(cli, ["top", "--help"])
|
|
|
|
assert result.exit_code == 0
|
|
assert "Live monitoring" in result.output
|
|
assert "--refresh" in result.output
|
|
|
|
def test_top_no_daemon(self, runner, temp_dir, sample_config):
|
|
"""Test top when no daemon running."""
|
|
with runner.isolated_filesystem(temp_dir=str(temp_dir)):
|
|
Path("config.yaml").write_text(sample_config.read_text())
|
|
Path(".pyserve").mkdir()
|
|
|
|
result = runner.invoke(cli, ["top"])
|
|
|
|
assert result.exit_code == 0
|
|
assert "No services running" in result.output
|
|
|
|
|
|
# =============================================================================
|
|
# Service Runner Tests
|
|
# =============================================================================
|
|
|
|
|
|
class TestServiceRunner:
|
|
"""Tests for ServiceRunner class."""
|
|
|
|
def test_parse_services_empty(self, temp_dir):
|
|
"""Test parsing with no services."""
|
|
from pyserve.config import Config
|
|
from pyserve.ctl._runner import ServiceRunner
|
|
|
|
config = Config()
|
|
state_manager = StateManager(temp_dir / ".pyserve")
|
|
|
|
runner = ServiceRunner(config, state_manager)
|
|
assert len(runner.get_services()) == 0
|
|
|
|
def test_parse_services_from_config(self, temp_dir, sample_config):
|
|
"""Test parsing services from config."""
|
|
from pyserve.config import Config
|
|
from pyserve.ctl._runner import ServiceRunner
|
|
|
|
config = Config.from_yaml(str(sample_config))
|
|
state_manager = StateManager(temp_dir / ".pyserve")
|
|
|
|
runner = ServiceRunner(config, state_manager)
|
|
services = runner.get_services()
|
|
|
|
assert "api" in services
|
|
assert "admin" in services
|
|
assert services["api"].workers == 2
|
|
assert services["admin"].workers == 1
|
|
|
|
def test_get_service(self, temp_dir, sample_config):
|
|
"""Test getting single service."""
|
|
from pyserve.config import Config
|
|
from pyserve.ctl._runner import ServiceRunner
|
|
|
|
config = Config.from_yaml(str(sample_config))
|
|
state_manager = StateManager(temp_dir / ".pyserve")
|
|
|
|
runner = ServiceRunner(config, state_manager)
|
|
|
|
service = runner.get_service("api")
|
|
assert service is not None
|
|
assert service.name == "api"
|
|
|
|
assert runner.get_service("nonexistent") is None
|
|
|
|
|
|
# =============================================================================
|
|
# Integration Tests
|
|
# =============================================================================
|
|
|
|
|
|
class TestIntegration:
|
|
"""Integration tests for complete workflows."""
|
|
|
|
def test_init_then_validate(self, runner, temp_dir):
|
|
"""Test init followed by config validate."""
|
|
with runner.isolated_filesystem(temp_dir=str(temp_dir)):
|
|
# Init
|
|
result = runner.invoke(cli, ["init"])
|
|
assert result.exit_code == 0
|
|
|
|
# Validate
|
|
result = runner.invoke(cli, ["config", "validate"])
|
|
assert result.exit_code == 0
|
|
|
|
def test_init_then_show_config(self, runner, temp_dir):
|
|
"""Test init followed by config show."""
|
|
with runner.isolated_filesystem(temp_dir=str(temp_dir)):
|
|
# Init
|
|
result = runner.invoke(cli, ["init", "-t", "orchestration"])
|
|
assert result.exit_code == 0
|
|
|
|
# Show config
|
|
result = runner.invoke(cli, ["config", "show"])
|
|
assert result.exit_code == 0
|
|
|
|
def test_state_persistence(self, runner, temp_dir):
|
|
"""Test that state persists across commands."""
|
|
with runner.isolated_filesystem(temp_dir=str(temp_dir)):
|
|
runner.invoke(cli, ["init"])
|
|
|
|
# Create state manually
|
|
state_dir = Path(".pyserve")
|
|
state_manager = StateManager(state_dir)
|
|
state_manager.update_service("api", state="running", pid=1234, port=9000)
|
|
|
|
# Check state with ps
|
|
result = runner.invoke(cli, ["ps"])
|
|
assert result.exit_code == 0
|
|
assert "api" in result.output
|
|
|
|
def test_environment_variables(self, runner, temp_dir, sample_config):
|
|
"""Test environment variable support."""
|
|
with runner.isolated_filesystem(temp_dir=str(temp_dir)):
|
|
Path("config.yaml").write_text(sample_config.read_text())
|
|
|
|
# Test PYSERVE_CONFIG env var
|
|
result = runner.invoke(
|
|
cli,
|
|
["config", "show"],
|
|
env={"PYSERVE_CONFIG": "config.yaml"}
|
|
)
|
|
assert result.exit_code == 0
|
|
|
|
|
|
# =============================================================================
|
|
# Edge Cases and Error Handling
|
|
# =============================================================================
|
|
|
|
|
|
class TestEdgeCases:
|
|
"""Tests for edge cases and error handling."""
|
|
|
|
def test_corrupted_state_file(self, runner, temp_dir, sample_config):
|
|
"""Test handling of corrupted state file."""
|
|
with runner.isolated_filesystem(temp_dir=str(temp_dir)):
|
|
Path("config.yaml").write_text(sample_config.read_text())
|
|
|
|
state_dir = Path(".pyserve")
|
|
state_dir.mkdir()
|
|
(state_dir / "state.json").write_text("invalid json {{{")
|
|
|
|
# Should handle gracefully
|
|
result = runner.invoke(cli, ["ps"])
|
|
assert result.exit_code == 0
|
|
|
|
def test_invalid_yaml_config(self, runner, temp_dir):
|
|
"""Test handling of invalid YAML config."""
|
|
with runner.isolated_filesystem(temp_dir=str(temp_dir)):
|
|
Path("config.yaml").write_text("invalid: yaml: content:")
|
|
|
|
result = runner.invoke(cli, ["config", "validate"])
|
|
assert result.exit_code != 0
|
|
|
|
def test_missing_required_fields(self, runner, temp_dir):
|
|
"""Test config with missing fields."""
|
|
with runner.isolated_filesystem(temp_dir=str(temp_dir)):
|
|
Path("config.yaml").write_text("extensions: []")
|
|
|
|
result = runner.invoke(cli, ["config", "validate"])
|
|
# Should validate (minimal config is OK)
|
|
assert result.exit_code == 0
|
|
|
|
def test_empty_config_file(self, runner, temp_dir):
|
|
"""Test empty config file."""
|
|
with runner.isolated_filesystem(temp_dir=str(temp_dir)):
|
|
Path("config.yaml").write_text("")
|
|
|
|
result = runner.invoke(cli, ["config", "validate"])
|
|
assert result.exit_code != 0
|
|
|
|
def test_nonexistent_service(self, runner, temp_dir, sample_config):
|
|
"""Test operations on nonexistent service."""
|
|
with runner.isolated_filesystem(temp_dir=str(temp_dir)):
|
|
Path("config.yaml").write_text(sample_config.read_text())
|
|
Path(".pyserve").mkdir()
|
|
|
|
result = runner.invoke(cli, ["logs", "nonexistent"])
|
|
# Should handle gracefully
|
|
assert result.exit_code == 0
|
|
|
|
def test_special_characters_in_service_name(self, runner, temp_dir):
|
|
"""Test handling of special characters."""
|
|
with runner.isolated_filesystem(temp_dir=str(temp_dir)):
|
|
runner.invoke(cli, ["init"])
|
|
|
|
state_dir = Path(".pyserve")
|
|
state_manager = StateManager(state_dir)
|
|
state_manager.update_service("api-v2", state="running")
|
|
state_manager.update_service("admin_service", state="stopped")
|
|
|
|
result = runner.invoke(cli, ["ps", "-a"])
|
|
assert result.exit_code == 0
|
|
|
|
|
|
if __name__ == "__main__":
|
|
pytest.main([__file__, "-v"])
|