Files
ASF_tools/asf-cloud-server/testarena/backend/app/tasks.py
2025-11-24 02:14:25 +01:00

110 lines
4.1 KiB
Python

import subprocess
import asyncio
from sqlalchemy.orm import Session
from . import crud, models, database
from .socket_manager import manager
import json
import os
import time
import shutil
# In Docker, scripts are in /app/scripts
# But for local testing, we might need a relative path or env var.
# We'll stick to the Docker path assumption or use relative.
SCRIPTS_DIR = os.getenv("SCRIPTS_DIR", "scripts")
def get_scenarios(branch_name: str):
try:
# In a real scenario, we might need to git checkout first.
# For now, just run the script.
# Ensure we are running from the root of the backend if using relative paths
script_path = os.path.join(os.getcwd(), SCRIPTS_DIR, "get_scenarios.sh")
if not os.path.exists(script_path):
# Fallback for docker absolute path
script_path = f"/app/scripts/get_scenarios.sh"
result = subprocess.run(
[script_path, branch_name],
capture_output=True,
text=True,
check=True
)
# Expecting JSON output from script
return json.loads(result.stdout)
except subprocess.CalledProcessError as e:
print(f"Error getting scenarios: {e.stderr}")
return []
except Exception as e:
print(f"Error: {e}")
return []
async def run_job_task(job_id: int):
db = database.SessionLocal()
try:
job = crud.get_job(db, job_id)
if not job:
return
crud.update_job_status(db, job_id, "running")
await manager.broadcast(json.dumps({"type": "job_update", "job_id": job_id, "status": "running"}))
# Run the script
# run_tests.sh <branch> <scenarios_json> <env> <mode> <job_id>
scenarios_str = json.dumps(job.scenarios)
script_path = os.path.join(os.getcwd(), SCRIPTS_DIR, "run_tests.sh")
if not os.path.exists(script_path):
script_path = f"/app/scripts/run_tests.sh"
process = await asyncio.create_subprocess_exec(
script_path,
job.branch_name,
scenarios_str,
job.environment,
job.test_mode,
str(job_id),
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE
)
stdout, stderr = await process.communicate()
if process.returncode == 0:
# Assume script generates index.html at /results/{job_id}/index.html
# We need to map where the script writes.
# For now, let's assume the script handles the file writing to a shared volume.
# In Docker, we'll mount /app/results
result_path = f"/results/{job_id}/index.html"
crud.update_job_status(db, job_id, "passed", result_path=result_path, duration="1m 30s") # Mock duration
await manager.broadcast(json.dumps({"type": "job_update", "job_id": job_id, "status": "passed"}))
else:
print(f"Script failed: {stderr.decode()}")
crud.update_job_status(db, job_id, "failed")
await manager.broadcast(json.dumps({"type": "job_update", "job_id": job_id, "status": "failed"}))
except Exception as e:
print(f"Job failed: {e}")
crud.update_job_status(db, job_id, "failed")
await manager.broadcast(json.dumps({"type": "job_update", "job_id": job_id, "status": "failed"}))
finally:
db.close()
async def cleanup_old_results():
while True:
try:
print("Running cleanup...")
results_dir = "/results"
if os.path.exists(results_dir):
now = time.time()
for job_id in os.listdir(results_dir):
job_path = os.path.join(results_dir, job_id)
if os.path.isdir(job_path):
mtime = os.path.getmtime(job_path)
if now - mtime > 7 * 86400: # 7 days
print(f"Deleting old result: {job_path}")
shutil.rmtree(job_path)
except Exception as e:
print(f"Cleanup error: {e}")
await asyncio.sleep(86400) # 24 hours