Skip to content

Commit

Permalink
fix: fixed some missing engine attributes;
Browse files Browse the repository at this point in the history
tests: fixed testing environment.
  • Loading branch information
luccadibe committed Dec 8, 2024
1 parent 7075ea3 commit a7fcf7c
Show file tree
Hide file tree
Showing 9 changed files with 220 additions and 18 deletions.
4 changes: 1 addition & 3 deletions backend/.dockerignore
Original file line number Diff line number Diff line change
@@ -1,3 +1 @@
.venv
mnt/
locust/
.venv
1 change: 1 addition & 0 deletions backend/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
mnt/
6 changes: 3 additions & 3 deletions backend/Dockerfile.test
Original file line number Diff line number Diff line change
Expand Up @@ -8,12 +8,12 @@ WORKDIR /backend
# Copy project files
COPY . /backend


RUN uv sync --frozen --no-cache
RUN uv pip install ".[test]"

RUN uv pip install httpx uvicorn[standard] anyio
# Create data directories
RUN mkdir -p /mnt/oxn-data/experiments


# Default command runs tests
CMD ["uv", "run","pytest", "tests/", "-v"]
CMD ["uv", "run", "pytest", "test_main.py", "-v", "--tb=short", "-s"]
2 changes: 1 addition & 1 deletion backend/internal/engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def __init__(self, configuration_path=None, report_path=None, out_path=None, out
assert configuration_path is not None, "Configuration path must be specified"
self.config = configuration_path
"""The path to the configuration file for this engine"""
self.spec = None
self.spec = spec
"""The loaded experiment specification"""
self.report_path = report_path
"""The path to write the experiment report to"""
Expand Down
31 changes: 26 additions & 5 deletions backend/internal/experiment_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
import logging

from backend.internal.engine import Engine
from backend.internal.kubernetes_orchestrator import KubernetesOrchestrator

logger = logging.getLogger(__name__)

Expand All @@ -15,13 +16,16 @@ def __init__(self, base_path):
self.base_path = Path(base_path)
self.experiments_dir = self.base_path / 'experiments'
self.lock_file = self.base_path / '.lock'
self.counter = 0

# Ensure directories exist
self.experiments_dir.mkdir(parents=True, exist_ok=True)

def create_experiment(self, name, config):
"""Create new experiment directory and config file"""
experiment_id = str(int(time.time()))
self.acquire_lock()
experiment_id = str(self.counter) + str(int(time.time()))
self.counter += 1
experiment_dir = self.experiments_dir / experiment_id

experiment = {
Expand Down Expand Up @@ -50,15 +54,19 @@ def create_experiment(self, name, config):
with open(experiment_dir / 'experiment.json', 'w') as f:
json.dump(experiment, f, indent=2)

self.release_lock()
return experiment

def get_experiment(self, experiment_id):
"""Get experiment config"""
self.acquire_lock()
try:
with open(self.experiments_dir / experiment_id / 'experiment.json') as f:
return json.load(f)
except FileNotFoundError:
return None
finally:
self.release_lock()

def run_experiment(self, experiment_id, output_format, runs):
"""Run experiment"""
Expand All @@ -67,7 +75,17 @@ def run_experiment(self, experiment_id, output_format, runs):
experiment = self.get_experiment(experiment_id)['spec']
report_path = self.experiments_dir / experiment_id / 'report'
out_path = self.experiments_dir / experiment_id / 'data'
engine = Engine(configuration_path=None, report_path=report_path, out_path=out_path, out_formats=[output_format], orchestrator_class=None, spec=experiment)

orchestrator = KubernetesOrchestrator(experiment_config=experiment)

engine = Engine(
configuration_path=experiment,
report_path=report_path,
out_path=out_path,
out_formats=[output_format],
orchestrator_class=orchestrator,
spec=experiment
)

engine.run(runs=runs, orchestration_timeout=None, randomize=False, accounting=False)

Expand Down Expand Up @@ -100,9 +118,12 @@ def list_experiments(self):
def acquire_lock(self):
"""File-based locking using fcntl"""
try:
self.lock_fd = open(self.lock_file, 'w')
fcntl.flock(self.lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
return True
# store the lock file path as an instance variable if not already open
if not hasattr(self, 'lock_fd'):
self.lock_fd = open(self.lock_file, 'w')
fcntl.flock(self.lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
return True
return False # lock is held
except (IOError, BlockingIOError):
return False

Expand Down
9 changes: 8 additions & 1 deletion backend/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,11 +126,18 @@ async def list_experiments(
"""
List all experiments
"""
return experiment_manager.list_experiments()
experiments = experiment_manager.list_experiments()
# Convert dict to list for response validation
return list(experiments.values())


@app.get("/health")
async def health_check():
"""Simple health check endpoint"""
return {"status": "healthy"}

@app.get("/experiments/{experiment_id}/config")
async def get_experiment_config(experiment_id: str):
"""Get experiment configuration"""
return experiment_manager.get_experiment(experiment_id)

2 changes: 1 addition & 1 deletion backend/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -39,4 +39,4 @@ test = [
]
[tool.pytest.ini_options]
pythonpath = ["."]
asyncio_default_fixture_loop_scope = "session"
asyncio_default_fixture_loop_scope = "session"
171 changes: 171 additions & 0 deletions backend/test_main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,171 @@
from fastapi.testclient import TestClient
import pytest
import time
from httpx import ASGITransport, AsyncClient
from .main import app
print("attempting to create client")
client = TestClient(app)
print("Client created")
@pytest.fixture
def sample_config():
"""Sample experiment configuration"""
return {
"experiment": {
"name": "big",
"version": "0.0.1",
"orchestrator": "kubernetes",
"services": {
"jaeger": {
"name": "astronomy-shop-jaeger-query",
"namespace": "system-under-evaluation"
},
"prometheus": [
{
"name": "astronomy-shop-prometheus-server",
"namespace": "system-under-evaluation",
"target": "sue"
},
{
"name": "kube-prometheus-kube-prome-prometheus",
"namespace": "oxn-external-monitoring",
"target": "oxn"
}
]
},
"responses": [
{
"name": "frontend_traces",
"type": "trace",
"service_name": "frontend",
"left_window": "10s",
"right_window": "10s",
"limit": 1
},
{
"name": "system_CPU",
"type": "metric",
"metric_name": "sum(rate(container_cpu_usage_seconds_total{namespace=\"system-under-evaluation\"}[1m]))",
"left_window": "10s",
"right_window": "10s",
"step": 1,
"target": "oxn"
},
{
"name": "recommendation_deployment_CPU",
"type": "metric",
"metric_name": "sum(rate(container_cpu_usage_seconds_total{namespace=\"system-under-evaluation\", pod=~\"astronomy-shop-recommendationservice.*\"}[90s])) by (pod)",
"left_window": "10s",
"right_window": "10s",
"step": 1,
"target": "oxn"
}
],
"treatments": [
{
"name": "empty_treatment",
"action": "empty",
"params": { "duration": "1m" }
}
],
"sue": {
"compose": "opentelemetry-demo/docker-compose.yml",
"exclude": ["loadgenerator"],
"required": [
{ "namespace": "system-under-evaluation", "name": "astronomy-shop-prometheus-server" }
]
},
"loadgen": {
"run_time": "20m",
"max_users": 500,
"spawn_rate": 50,
"locust_files": ["/backend/locust/locust_basic_interaction.py", "/backend/locust/locust_otel_demo.py"],
"target": { "name": "astronomy-shop-frontendproxy", "namespace": "system-under-evaluation", "port": 8080 }
}
}
}
@pytest.fixture
def anyio_backend():
return 'asyncio'

@pytest.mark.anyio
async def test_create_experiment(sample_config):
print("Creating experiment")
async with AsyncClient(
transport=ASGITransport(app=app), base_url="http://test"
) as ac:
response = await ac.post("/experiments", json={"name": "Test Experiment 1", "config": sample_config})
assert response.status_code == 200
assert response.json()["name"] == "Test Experiment 1"


@pytest.mark.anyio
async def test_list_experiments(sample_config):
print("Listing experiments")
async with AsyncClient(
transport=ASGITransport(app=app), base_url="http://test"
) as ac:
# Create first test experiment
await ac.post("/experiments", json={"name": "List Test 1", "config": sample_config})

# Get initial list
response = await ac.get("/experiments")
initial_count = len(response.json())
assert response.status_code == 200
assert initial_count > 0

# Create second test experiment
await ac.post("/experiments", json={"name": "List Test 2", "config": sample_config})

# Get updated list
response = await ac.get("/experiments")
assert response.status_code == 200
assert len(response.json()) == initial_count + 1


@pytest.mark.anyio
async def test_get_experiment_config(sample_config):
async with AsyncClient(
transport=ASGITransport(app=app), base_url="http://test"
) as ac:
create_response = await ac.post("/experiments", json={"name": "Test Experiment 2", "config": sample_config})
id = create_response.json()["id"]
print(f"Created experiment with id: {id}")
response = await ac.get(f"/experiments/{id}/config")
assert response.status_code == 200


@pytest.mark.anyio
async def test_run_experiment(sample_config):
async with AsyncClient(
transport=ASGITransport(app=app), base_url="http://test"
) as ac:
create_response = await ac.post("/experiments", json={"name": "Test Experiment 3", "config": sample_config})
id = create_response.json()["id"]
response = await ac.post(f"/experiments/{id}/run", json={"output_format": "json", "runs": 1})
assert response.status_code == 200
assert response.json()["status"] == "accepted"


@pytest.mark.anyio
async def test_get_experiment_status(sample_config):
async with AsyncClient(
transport=ASGITransport(app=app), base_url="http://test"
) as ac:
create_response = await ac.post("/experiments", json={"name": "Test Experiment 4", "config": sample_config})
id = create_response.json()["id"]
response = await ac.get(f"/experiments/{id}/status")
assert response.status_code == 200
assert response.json()["status"] == "PENDING"

await ac.post(f"/experiments/{id}/run", json={"output_format": "json", "runs": 1})

# Poll status with timeout
start_time = time.time()
while time.time() - start_time < 30:
response = await ac.get(f"/experiments/{id}/status")
if response.json()["status"] == "RUNNING":
break
await asyncio.sleep(0.5)

assert response.status_code == 200
assert response.json()["status"] == "RUNNING"
12 changes: 8 additions & 4 deletions backend/tests/test_experiment_manager.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,14 @@
import time
import unittest.mock
import pytest
from pathlib import Path
import json
import shutil
import tempfile
from backend.internal.experiment_manager import ExperimentManager



@pytest.fixture
def test_dir():
"""Create a temporary directory for test data"""
Expand Down Expand Up @@ -165,6 +169,7 @@ def test_list_experiments(experiment_manager, sample_config):
"""Test listing all experiments"""
# Create a few experiments
exp1 = experiment_manager.create_experiment("Test 1", sample_config)
time.sleep(1)
exp2 = experiment_manager.create_experiment("Test 2", sample_config)

experiments = experiment_manager.list_experiments()
Expand Down Expand Up @@ -192,8 +197,8 @@ async def test_run_experiment(experiment_manager, sample_config):
config=sample_config
)

# Mock the Engine class to avoid actual execution
with pytest.mock.patch('backend.internal.engine.Engine') as MockEngine:
# Engine class mock
with unittest.mock.patch('backend.internal.engine.Engine') as MockEngine:
mock_engine = MockEngine.return_value

experiment_manager.run_experiment(
Expand All @@ -202,11 +207,10 @@ async def test_run_experiment(experiment_manager, sample_config):
runs=1
)

# Verify Engine was called with correct parameters
MockEngine.assert_called_once()
mock_engine.run.assert_called_once_with(
runs=1,
orchestration_timeout=None,
randomize=False,
accounting=False
)
)

0 comments on commit a7fcf7c

Please sign in to comment.