This commit is contained in:
2025-12-27 01:13:30 +01:00
commit aa1f5e30d9
14 changed files with 1500 additions and 0 deletions

44
deploy.sh Normal file
View File

@@ -0,0 +1,44 @@
#!/bin/bash
# TestArena Deployment Script
# Run this script with sudo: sudo ./deploy.sh
set -e
echo "🚀 Starting TestArena Deployment..."
# 1. Install Dependencies
echo "📦 Installing dependencies..."
apt-get update
apt-get install -y nginx python3-pip python3-venv
# 2. Set up Python Virtual Environment
echo "🐍 Setting up Python environment..."
python3 -m venv venv
source venv/bin/activate
pip install fastapi uvicorn sqlalchemy
# 3. Configure Nginx
echo "🌐 Configuring Nginx..."
cp nginx/testarena.conf /etc/nginx/sites-available/testarena
ln -sf /etc/nginx/sites-available/testarena /etc/nginx/sites-enabled/
rm -f /etc/nginx/sites-enabled/default
# 4. Create Data Directory
echo "📁 Creating data directory..."
mkdir -p /home/asf/testarena
chown -R asf:asf /home/asf/testarena
chmod -R 755 /home/asf/testarena
# 5. Restart Nginx
echo "🔄 Restarting Nginx..."
nginx -t
systemctl restart nginx
echo "✅ Deployment complete!"
echo "--------------------------------------------------"
echo "Dashboard: http://asf-server.duckdns.org:8080/"
echo "Results: http://asf-server.duckdns.org:8080/results/"
echo "--------------------------------------------------"
echo "To start the app: source venv/bin/activate && uvicorn testarena_app.main:app --host 0.0.0.0 --port 8000"
echo "To start the worker: source venv/bin/activate && python3 -m testarena_app.worker"

80
deployment_guide.md Normal file
View File

@@ -0,0 +1,80 @@
# TestArena Deployment & Testing Guide
This guide explains how to deploy and test the TestArena backend application on your Ubuntu Server.
## 🚀 Deployment Steps
### 1. Clone the Repository
Ensure you have the code on your server in a directory like `/home/asf/testarena_pc_backend`.
### 2. Run the Deployment Script
The deployment script automates Nginx configuration and dependency installation.
```bash
sudo chmod +x deploy.sh
sudo ./deploy.sh
```
### 3. Start the Application Services
You should run these in the background or using a process manager like `pm2` or `systemd`.
**Start the API Server:**
```bash
source venv/bin/activate
uvicorn testarena_app.main:app --host 0.0.0.0 --port 8000
```
**Start the Background Worker:**
```bash
source venv/bin/activate
python3 -m testarena_app.worker
```
---
## 🧪 Testing the System
### 1. Verify Dashboard Access
Open your browser and navigate to:
`http://asf-server.duckdns.org:8080/`
You should see the modern, colorful TestArena dashboard.
### 2. Verify Results Browsing
Navigate to:
`http://asf-server.duckdns.org:8080/results/`
You should see an automatic directory listing of `/home/asf/testarena/`.
### 3. Test the Queue API
Run the following `curl` command to queue a test task:
```bash
curl -X POST http://asf-server.duckdns.org:8080/api/queue \
-H "Content-Type: application/json" \
-d '{
"test_queue_001": [
"staging",
{
"task_1": "/home/asf/scenarios/test1.py",
"task_2": "/home/asf/scenarios/test2.py"
}
]
}'
```
### 4. Verify Worker Execution
- Check the dashboard; you should see the new queue appear and its status change from `Waiting` to `Running` and then `Finished`.
- Check the filesystem:
```bash
ls -R /home/asf/testarena/test_queue_001
```
You should see `queue_status.json` and any results generated by `tpf_execution.py`.
### 5. Test Abortion
Queue another task and click the **Abort** button on the dashboard. Verify that the status changes to `Aborted` in both the dashboard and the `queue_status.json` file.
---
## 🛠️ Troubleshooting
- **Nginx Errors**: Check logs with `sudo tail -f /var/log/nginx/error.log`.
- **FastAPI Errors**: Check the terminal where `uvicorn` is running.
- **Permission Issues**: Ensure `/home/asf/testarena` is writable by the user running the app.
- **Port 8080 Blocked**: Ensure your firewall (ufw) allows traffic on port 8080: `sudo ufw allow 8080`.

124
gitea_repo_controller.sh Normal file
View File

@@ -0,0 +1,124 @@
#!/bin/bash
set -e
# ----------------------------------------
# 1. Configuration (UPDATE IF NEEDED)
# ----------------------------------------
GIT_USERNAME="asfautomation"
GIT_PASSWORD="asfautomation"
REPO_HOST="gitea.nabd-co.com"
REPO_PATH="ASF-Nabd/ASF-SH"
TARGET_DIR="TPF/Sensor_hub_repo"
# ----------------------------------------
# 2. URL Encoding Function
# ----------------------------------------
urlencode() {
perl -pe 's/([^a-zA-Z0-9_.-])/sprintf("%%%02x", ord($1))/ge'
}
ENCODED_USERNAME=$(printf '%s' "${GIT_USERNAME}" | urlencode)
ENCODED_PASSWORD=$(printf '%s' "${GIT_PASSWORD}" | urlencode)
AUTH_URL="https://${ENCODED_USERNAME}:${ENCODED_PASSWORD}@${REPO_HOST}/${REPO_PATH}.git"
# ----------------------------------------
# 3. Command & Arguments
# ----------------------------------------
COMMAND="$1"
BRANCH_NAME="$2"
# ----------------------------------------
# 4. Functions
# ----------------------------------------
clone_repo() {
if [ -d "${TARGET_DIR}" ]; then
echo " Repository already exists. Skipping clone."
return 0
fi
echo "📥 Cloning repository..."
git clone "${AUTH_URL}" "${TARGET_DIR}"
echo "✅ Clone completed."
}
checkout_branch() {
if [ -z "${BRANCH_NAME}" ]; then
echo "❌ Branch name is required for checkout."
exit 1
fi
if [ ! -d "${TARGET_DIR}" ]; then
echo "❌ Repository not found. Run clone first."
exit 1
fi
cd "${TARGET_DIR}"
echo "📦 Stashing local changes (including untracked)..."
git stash push -u -m "automation-stash-before-checkout" || true
echo "🔄 Fetching latest changes..."
git fetch origin
echo "🌿 Checking out main branch..."
git checkout main
echo "⬇️ Pulling latest main..."
git pull "${AUTH_URL}" main
echo "🌿 Checking out target branch: ${BRANCH_NAME}"
if git show-ref --verify --quiet "refs/heads/${BRANCH_NAME}"; then
git checkout "${BRANCH_NAME}"
else
git checkout -b "${BRANCH_NAME}" "origin/${BRANCH_NAME}"
fi
echo "⬆️ Rebasing '${BRANCH_NAME}' onto latest main..."
git rebase main
cd - >/dev/null
echo "✅ Checkout and rebase completed successfully."
}
delete_repo() {
if [ -d "${TARGET_DIR}" ]; then
echo "🗑️ Deleting repository directory..."
rm -rf "${TARGET_DIR}"
echo "✅ Repository deleted."
else
echo " Repository directory does not exist."
fi
}
# ----------------------------------------
# 5. Main Execution
# ----------------------------------------
case "${COMMAND}" in
clone)
clone_repo
;;
checkout)
checkout_branch
;;
delete)
delete_repo
;;
*)
echo "❌ Invalid command."
echo "Usage:"
echo " $0 clone"
echo " $0 checkout <branch>"
echo " $0 delete"
exit 1
;;
esac
echo "----------------------------------------"
echo "✔ Automation script finished successfully"
echo "----------------------------------------"

58
nginx/testarena.conf Normal file
View File

@@ -0,0 +1,58 @@
# TestArena Nginx Configuration
# This file should be placed in /etc/nginx/sites-available/testarena
# and symlinked to /etc/nginx/sites-enabled/testarena
server {
listen 8080;
server_name _;
# Security: Prevent directory traversal and restrict symlinks
disable_symlinks on;
# Root directory for the results (autoindex)
location /results/ {
alias /home/asf/testarena/;
# Enable autoindex with requested features
autoindex on;
autoindex_exact_size off; # Human-readable sizes
autoindex_localtime on; # Local time
# Read-only access
limit_except GET {
deny all;
}
# Prevent execution of scripts
location ~* \.(php|pl|py|sh|cgi)$ {
return 403;
}
}
# Proxy requests to the FastAPI application
location / {
proxy_pass http://127.0.0.1:8000;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# WebSocket support (if needed in future)
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
# Custom error pages
error_page 404 /404.html;
location = /404.html {
root /usr/share/nginx/html;
internal;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
internal;
}
}

103
scenario_exe_parser.py Normal file
View File

@@ -0,0 +1,103 @@
import xml.etree.ElementTree as ET
import os
import sys
import json
from collections import defaultdict
from pathlib import Path
# Get the directory of the current Python file
current_directory = os.path.dirname(os.path.abspath(__file__))
COMPONENT_DIR = os.path.join(current_directory, "Sensor_hub_repo", "components")
def finalize_output(data_obj):
# Convert defaultdict to standard dict recursively
# This removes the <lambda> and <class 'list'> metadata
standard_dict = json.loads(json.dumps(data_obj))
# Print ONLY the JSON string to stdout
#print(json.dumps(standard_dict, indent=4))
return standard_dict
def parse_test_scenario(xml_file_path):
"""
Parses a test scenario XML file and extracts the configuration and all
test case IDs mapped to their execution commands.
Args:
xml_file_path (str): The path to the XML file to parse.
Returns:
dict: A dictionary in the format:
{
'config': <config_value>,
'test_cases': {
<test_case_id>: <test_exec_command>,
...
}
}
Returns an empty dictionary on error.
"""
if not os.path.exists(xml_file_path):
print(f"Error: File not found at '{xml_file_path}'")
return {}
try:
# 1. Parse the XML file
tree = ET.parse(xml_file_path)
root = tree.getroot()
except ET.ParseError as e:
print(f"Error: Failed to parse XML file. Details: {e}")
return {}
except Exception as e:
print(f"An unexpected error occurred during file parsing: {e}")
return {}
# Initialize the final structured output
parsed_data = {
'config': '',
'test_cases': {}
}
# 2. Extract the mandatory <config> value
config_element = root.find('config')
if config_element is not None and config_element.text:
parsed_data['config'] = config_element.text.strip()
# 3. Iterate over all <test_case> elements and extract ID and Exec
for tc in root.findall('test_case'):
tc_id_element = tc.find('test_case_id')
tc_exec_element = tc.find('test_exec')
# Use strip() and check against None for safety, even if validation passed
tc_id = tc_id_element.text.strip() if tc_id_element is not None and tc_id_element.text else "UNKNOWN_ID"
tc_exec = tc_exec_element.text.strip() if tc_exec_element is not None and tc_exec_element.text else "UNKNOWN_EXEC"
# Add to the test_cases dictionary
parsed_data['test_cases'][tc_id] = tc_exec
return parsed_data
if __name__ == "__main__":
# Define a default path to test against
default_test_file = 'sample_scenario.xml'
# Allow passing the file path as a command-line argument for flexibility
file_to_check = sys.argv[1] if len(sys.argv) > 1 else print({})
file_path = os.path.join(COMPONENT_DIR, file_to_check)
print(f"--- XML Test Scenario Parser ---")
print(f"Parsing file: {file_to_check}\n")
# Run the parser
scenario_data = parse_test_scenario(file_path)
# Print results
# if scenario_data:
# print("✅ Parsing Successful. Extracted Data Structure:")
# print(f"CONFIG: {scenario_data['config']}")
# print("\nTEST CASES:")
# for test_id, command in scenario_data['test_cases'].items():
# print(f" - {test_id}:\n '{command}'")
print(finalize_output(scenario_data))
#return finalize_output(scenario_data['test_cases'])

165
scenario_execution.py Normal file
View File

@@ -0,0 +1,165 @@
import os
import sys
import json
from scenario_exe_parser import parse_test_scenario
import subprocess
import os
import sys
import json
import subprocess
# Assuming parse_test_scenario is imported correctly
# from scenario_exe_parser import parse_test_scenario
# --- Global Paths ---
current_directory = os.path.dirname(os.path.abspath(__file__))
REPO_PATH = os.path.join(current_directory, "Sensor_hub_repo")
COMPONENT_DIR = os.path.join(REPO_PATH, "components")
RESULT_PATH = "/home/asf/testarena"
# The HTML Template
REPORT_TEMPLATE = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>ESP32 Test Execution Report</title>
<style>
body { font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; margin: 40px; background-color: #f4f7f6; }
h2 { color: #333; border-bottom: 2px solid #ccc; padding-bottom: 10px; }
table { width: 100%; border-collapse: collapse; margin: 20px 0; background-color: #fff; box-shadow: 0 2px 5px rgba(0,0,0,0.1); }
th, td { padding: 12px 15px; text-align: left; border-bottom: 1px solid #ddd; }
th { background-color: #2c3e50; color: white; text-transform: uppercase; letter-spacing: 0.1em; }
.status-pass { color: #ffffff; background-color: #27ae60; padding: 4px 12px; border-radius: 4px; font-weight: bold; }
.status-fail { color: #ffffff; background-color: #c0392b; padding: 4px 12px; border-radius: 4px; font-weight: bold; }
a { color: #2980b9; text-decoration: none; font-weight: bold; }
a:hover { text-decoration: underline; }
tr:hover { background-color: #f1f1f1; }
</style>
</head>
<body>
<h2>Overall Scenario Summary</h2>
<table>
<thead>
<tr>
<th>Scenario Name</th>
<th>Final Result</th>
</tr>
</thead>
<tbody>
<tr>
<td>{{scenario_name}}</td>
<td><span class="{{overall_class}}">{{overall_status}}</span></td>
</tr>
</tbody>
</table>
<h2>Detailed Test Cases</h2>
<table>
<thead>
<tr>
<th>Test Case ID</th>
<th>Result</th>
<th>Execution Log</th>
</tr>
</thead>
<tbody>
{{test_case_rows}}
</tbody>
</table>
</body>
</html>
"""
def run_test_suite(tasks):
aggregated_results = {}
shell_script = "./TPF/test_execution.sh"
if os.name != 'nt':
subprocess.run(["chmod", "+x", shell_script])
for task in tasks:
print(f"--- Starting Task: {task['id']} ---")
result = subprocess.run(
[shell_script, task['id'], task['cmd'], task['path'], REPO_PATH],
capture_output=True, text=True
)
print(result.stdout)
json_found = False
for line in result.stdout.splitlines():
if line.startswith("FINAL_JSON_OUTPUT:"):
json_string = line.replace("FINAL_JSON_OUTPUT:", "").strip()
try:
task_json = json.loads(json_string)
aggregated_results.update(task_json)
json_found = True
except json.JSONDecodeError as e:
print(f"!!! JSON Parsing Error: {e}")
if not json_found:
aggregated_results[task['id']] = ["ERROR", "N/A"]
return aggregated_results
def generate_html_report(scenario_name, results, output_path):
all_passed = all(info[0] == "PASS" for info in results.values())
overall_status = "PASS" if all_passed else "FAIL"
overall_class = "status-pass" if all_passed else "status-fail"
test_case_rows = ""
for tc_id, info in results.items():
status = info[0]
log_url = info[1]
status_class = "status-pass" if status == "PASS" else "status-fail"
test_case_rows += f"""
<tr>
<td>{tc_id}</td>
<td><span class="{status_class}">{status}</span></td>
<td><a href="{log_url}" target="_blank">View Log</a></td>
</tr>
"""
# Use the global REPORT_TEMPLATE
report_content = REPORT_TEMPLATE.replace("{{scenario_name}}", scenario_name) \
.replace("{{overall_status}}", overall_status) \
.replace("{{overall_class}}", overall_class) \
.replace("{{test_case_rows}}", test_case_rows)
report_file = os.path.join(output_path, "execution_report.html")
with open(report_file, "w") as f:
f.write(report_content)
print(f"HTML Report generated at: {report_file}")
def save_summary(results, task_id_path):
json_path = os.path.join(task_id_path, "final_summary.json")
with open(json_path, "w") as f:
json.dump(results, f, indent=4)
print(f"\nFinal results saved to {json_path}")
if __name__ == "__main__":
queue_id = "1234"
scenario_path = "application_layer/business_stack/actuator_manager/test/actuator_manager_init_test.test_scenario.xml"
task_id = "56754"
# Path logic
queue_path = os.path.join(RESULT_PATH, queue_id)
task_id_path = os.path.join(queue_path, task_id) # Corrected pathing
os.makedirs(task_id_path, exist_ok=True)
# Note: Ensure parse_test_scenario is defined or imported
scenario_data = parse_test_scenario(os.path.join(COMPONENT_DIR, scenario_path))
my_tasks = []
sub_tasks_data = scenario_data['test_cases']
for case_id, exec_cmd in sub_tasks_data.items():
my_tasks.append({
"id": case_id,
"cmd": exec_cmd,
"path": task_id_path
})
final_data = run_test_suite(my_tasks)
save_summary(final_data, task_id_path)
# Generate report INSIDE the task folder
generate_html_report(os.path.basename(scenario_path), final_data, task_id_path)

147
scenario_scan.py Normal file
View File

@@ -0,0 +1,147 @@
import os
import sys
from collections import defaultdict
from pathlib import Path
import json
# Get the directory of the current Python file
current_directory = os.path.dirname(os.path.abspath(__file__))
repo_root = Path(current_directory).parents[1]
COMPONENT_DIR = os.path.join(repo_root, "components")
DEBUG = False
def finalize_output(data_obj):
# Convert defaultdict to standard dict recursively
# This removes the <lambda> and <class 'list'> metadata
standard_dict = json.loads(json.dumps(data_obj))
# Print ONLY the JSON string to stdout
#print(json.dumps(standard_dict, indent=4))
return standard_dict
def find_test_scenarios(root_dir):
"""
Recursively searches the given root directory for files ending with
'.test_scenario.xml' and returns a dictionary mapping scenario names to their
paths relative to the root directory.
Args:
root_dir (str): The absolute path to the starting directory (e.g., 'COMPONENTS').
Returns:
dict[str, str]: A dictionary mapping scenario names (without suffix) to
their relative file paths.
"""
if not os.path.isdir(root_dir):
print(f"Error: Directory not found or not accessible: {root_dir}")
return {} # Return empty dictionary
if DEBUG:
print(f"Scanning directory: '{root_dir}'...")
scenario_suffix = ".test_scenario.xml"
# Dictionary comprehension: {scenario_name: relative_path}
scenarios_map = {
# Key: Scenario name (filename without suffix)
filename.replace(scenario_suffix, ""):
# Value: Relative path
os.path.relpath(os.path.join(dirpath, filename), root_dir)
for dirpath, _, filenames in os.walk(root_dir)
for filename in filenames if filename.endswith(scenario_suffix)
}
return scenarios_map
def organize_by_layer_component(scenarios_map):
"""
Organizes scenario paths into a nested dictionary structure based on the file path:
{Layer_Folder: {Component_Folder: [scenario_name, ...]}}
It assumes the Layer is the first folder and the Component is the folder
preceding the 'test' directory (i.e., the third-to-last segment).
Args:
scenarios_map (dict[str, str]): Dictionary mapping scenario names to their
relative file paths.
Returns:
defaultdict: Nested dictionary (Layer -> Component -> List of Scenario Names).
"""
organized_data = defaultdict(lambda: defaultdict(list))
# Iterate over the scenario name and path
for scenario_name, path in scenarios_map.items():
# Split path into segments using the OS separator
segments = path.split(os.sep)
# Layer is the first segment (e.g., 'application_layer', 'drivers')
layer = segments[0]
# Component is the third-to-last segment (e.g., 'actuator_manager', 'ammonia')
# We assume the file is inside a 'test' folder inside a component folder.
if len(segments) >= 3:
component = segments[-3]
else:
# Fallback for scenarios found too close to the root
component = "Root_Component"
# Populate the nested dictionary
organized_data[layer][component].append(scenario_name)
return organized_data
def scenario_scan(components_root_dir):
"""
Main function to scan for test scenarios, print the organized structure, and
return the resulting dictionaries.
Returns:
tuple[defaultdict, dict]: The organized layer/component structure and the
raw dictionary of scenario names to paths.
"""
# 1. Find all relative paths (now a dictionary: {name: path})
found_scenarios_map = find_test_scenarios(components_root_dir)
if not found_scenarios_map:
print(f"\nNo files ending with '.test_scenario.xml' were found in {components_root_dir}.")
# Return empty structures if nothing is found
return defaultdict(lambda: defaultdict(list)), {}
num_scenarios = len(found_scenarios_map)
if DEBUG:
# 2. Print the simple list of found paths
print(f"\n--- Found {num_scenarios} Test Scenarios ---")
for scenario_name, path in found_scenarios_map.items():
print(f"Scenario: '{scenario_name}' | Relative Path: {os.path.join("components",path)}")
# 3. Organize into the layer/component structure
organized_scenarios = organize_by_layer_component(found_scenarios_map)
if DEBUG:
# 4. Print the organized structure
print("\n--- Organized Layer/Component Structure ---")
for layer, components in organized_scenarios.items():
print(f"\n[LAYER] {layer.upper()}:")
for component, scenarios in components.items():
scenario_list = ", ".join(scenarios)
print(f" [Component] {component}: {scenario_list}")
return organized_scenarios, found_scenarios_map
if __name__ == "__main__":
# The return value from scenario_scan now includes the dictionary you requested
organized_data, scenario_map = scenario_scan(COMPONENT_DIR)
combined_result = {
"organized_data": finalize_output(organized_data),
"scenario_map": finalize_output(scenario_map)
}
# 3. Print the combined object as a single JSON string
# This is what will be captured by the SSH command
print(json.dumps(combined_result))

60
test_execution.sh Normal file
View File

@@ -0,0 +1,60 @@
#!/bin/bash
# Check if correct number of arguments are provided (now 4)
if [ "$#" -ne 4 ]; then
echo "Usage: $0 <task_id> <command> <result_dir> <repo_path>"
exit 1
fi
TASK_ID=$1
CMD=$2
RESULT_DIR=$3
REPO_PATH=$4
echo $TASK_ID
# Create result directory if it doesn't exist (absolute path)
mkdir -p "$RESULT_DIR"
# Use realpath on the (now-existing) result dir and a clearer filename
LOG_FILE="$(realpath "$RESULT_DIR")/${TASK_ID}-logging.html"
# Initialize HTML file with basic styling
cat <<EOF > "$LOG_FILE"
<html>
<head>
<style>
body { font-family: monospace; background-color: #1e1e1e; color: #d4d4d4; padding: 20px; }
.cmd { color: #569cd6; font-weight: bold; }
.repo { color: #ce9178; }
.output { white-space: pre-wrap; display: block; margin-top: 10px; border-left: 3px solid #666; padding-left: 10px; }
</style>
</head>
<body>
<h2>Execution Log for Task: $TASK_ID</h2>
<p class="repo">Working Directory: $REPO_PATH</p>
<p class="cmd">Executing: $CMD</p>
<hr>
<div class="output">
EOF
# 1. CD into the repo path
# 2. Execute command and capture output
# 3. PIPESTATUS[1] captures the exit code of the CMD, not the 'cd' or 'tee'
cd "$REPO_PATH" && eval "$CMD" 2>&1 | tee -a >(sed 's/$/<br>/' >> "$LOG_FILE")
EXIT_CODE=${PIPESTATUS[0]}
# Close HTML tags
echo "</div></body></html>" >> "$LOG_FILE"
# Determine PASS/FAIL
if [ $EXIT_CODE -eq 0 ]; then
RESULT="PASS"
else
RESULT="FAIL"
fi
EVIDENCE_URL="file://$LOG_FILE"
# Return JSON output
# ... (rest of the script remains the same)
# Return JSON output with a unique marker prefix
printf 'FINAL_JSON_OUTPUT:{"%s": ["%s", "%s"]}\n' "$TASK_ID" "$RESULT" "$EVIDENCE_URL"

16
testarena_app/database.py Normal file
View File

@@ -0,0 +1,16 @@
from sqlalchemy import create_all_engines, create_engine
from sqlalchemy.orm import sessionmaker
import os
# Using SQLite for simplicity as requested
DATABASE_URL = "sqlite:///d:/ASF - course/ASF_01/ASF_tools/asf-pc-server/testarena_pc_backend/testarena_app/testarena.db"
engine = create_engine(DATABASE_URL, connect_args={"check_same_thread": False})
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()

139
testarena_app/main.py Normal file
View File

@@ -0,0 +1,139 @@
from fastapi import FastAPI, Depends, HTTPException, BackgroundTasks
from fastapi.staticfiles import StaticFiles
from fastapi.responses import FileResponse
from sqlalchemy.orm import Session
import os
import json
import uuid
from typing import Dict, List
from . import models, database
app = FastAPI(title="TestArena API")
# Mount static files
static_dir = os.path.join(os.path.dirname(__file__), "static")
os.makedirs(static_dir, exist_ok=True)
app.mount("/static", StaticFiles(directory=static_dir), name="static")
# Base directory for data as requested
BASE_DATA_DIR = "/home/asf/testarena"
# For local development on Windows, we might need to adjust this,
# but I'll stick to the user's requirement for the final version.
if os.name == 'nt':
BASE_DATA_DIR = "d:/ASF - course/ASF_01/ASF_tools/asf-pc-server/testarena_pc_backend/testarena_data"
# Ensure base directory exists
os.makedirs(BASE_DATA_DIR, exist_ok=True)
# Initialize database
models.Base.metadata.create_all(bind=database.engine)
@app.post("/api/queue")
async def queue_task(payload: Dict, db: Session = Depends(database.get_db)):
"""
Input json contain {<queue_ID> :[environment, "<TASK_ID>" : "<path to scenario>],}
"""
try:
queue_id = list(payload.keys())[0]
data = payload[queue_id]
environment = data[0]
tasks_data = data[1] # This is a dict {"TASK_ID": "path"}
# 1. Create folder
queue_dir = os.path.join(BASE_DATA_DIR, queue_id)
os.makedirs(queue_dir, exist_ok=True)
# 2. Create queue_status.json
status_file = os.path.join(queue_dir, "queue_status.json")
queue_status = {
"queue_id": queue_id,
"status": "Waiting",
"tasks": {}
}
# 3. Save to database and prepare status file
new_queue = models.Queue(id=queue_id, environment=environment, status="Waiting")
db.add(new_queue)
for task_id, scenario_path in tasks_data.items():
new_task = models.Task(id=task_id, queue_id=queue_id, scenario_path=scenario_path, status="Waiting")
db.add(new_task)
queue_status["tasks"][task_id] = "Waiting"
with open(status_file, 'w') as f:
json.dump(queue_status, f, indent=4)
db.commit()
return {"status": "Queue OK", "queue_id": queue_id}
except Exception as e:
return {"status": "Error", "message": str(e)}
@app.get("/api/status/{id}")
async def get_status(id: str, db: Session = Depends(database.get_db)):
# Check if it's a queue ID
queue = db.query(models.Queue).filter(models.Queue.id == id).first()
if queue:
return {"id": id, "type": "queue", "status": queue.status}
# Check if it's a task ID
task = db.query(models.Task).filter(models.Task.id == id).first()
if task:
return {"id": id, "type": "task", "status": task.status}
raise HTTPException(status_code=404, detail="ID not found")
@app.post("/api/abort/{id}")
async def abort_task(id: str, db: Session = Depends(database.get_db)):
# Abort queue
queue = db.query(models.Queue).filter(models.Queue.id == id).first()
if queue:
queue.status = "Aborted"
# Abort all tasks in queue
tasks = db.query(models.Task).filter(models.Task.queue_id == id).all()
for t in tasks:
if t.status in ["Waiting", "Running"]:
t.status = "Aborted"
# Update queue_status.json
queue_dir = os.path.join(BASE_DATA_DIR, id)
status_file = os.path.join(queue_dir, "queue_status.json")
if os.path.exists(status_file):
with open(status_file, 'r') as f:
data = json.load(f)
data["status"] = "Aborted"
for tid in data["tasks"]:
if data["tasks"][tid] in ["Waiting", "Running"]:
data["tasks"][tid] = "Aborted"
with open(status_file, 'w') as f:
json.dump(data, f, indent=4)
db.commit()
return {"id": id, "status": "Aborted"}
# Abort single task
task = db.query(models.Task).filter(models.Task.id == id).first()
if task:
task.status = "Aborted"
# Update queue_status.json
queue_dir = os.path.join(BASE_DATA_DIR, task.queue_id)
status_file = os.path.join(queue_dir, "queue_status.json")
if os.path.exists(status_file):
with open(status_file, 'r') as f:
data = json.load(f)
data["tasks"][id] = "Aborted"
with open(status_file, 'w') as f:
json.dump(data, f, indent=4)
db.commit()
return {"id": id, "status": "Aborted"}
raise HTTPException(status_code=404, detail="ID not found")
@app.get("/api/queues")
async def list_queues(db: Session = Depends(database.get_db)):
queues = db.query(models.Queue).order_by(models.Queue.created_at.desc()).all()
return queues
@app.get("/")
async def root():
return FileResponse(os.path.join(static_dir, "index.html"))

27
testarena_app/models.py Normal file
View File

@@ -0,0 +1,27 @@
from sqlalchemy import Column, Integer, String, DateTime, ForeignKey, JSON
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
import datetime
Base = declarative_base()
class Queue(Base):
__tablename__ = "queues"
id = Column(String, primary_key=True, index=True)
status = Column(String, default="Waiting") # Finished, Waiting, Running, Aborted
created_at = Column(DateTime, default=datetime.datetime.utcnow)
environment = Column(String)
tasks = relationship("Task", back_populates="queue", cascade="all, delete-orphan")
class Task(Base):
__tablename__ = "tasks"
id = Column(String, primary_key=True, index=True)
queue_id = Column(String, ForeignKey("queues.id"))
scenario_path = Column(String)
status = Column(String, default="Waiting") # Finished, Waiting, Running, Aborted
result = Column(JSON, nullable=True)
queue = relationship("Queue", back_populates="tasks")

View File

@@ -0,0 +1,407 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>TestArena | Modern Dashboard</title>
<link href="https://fonts.googleapis.com/css2?family=Outfit:wght@300;400;600;700&display=swap" rel="stylesheet">
<style>
:root {
--primary: #6366f1;
--primary-glow: rgba(99, 102, 241, 0.5);
--secondary: #ec4899;
--accent: #8b5cf6;
--bg: #0f172a;
--card-bg: rgba(30, 41, 59, 0.7);
--text: #f8fafc;
--text-muted: #94a3b8;
--success: #10b981;
--warning: #f59e0b;
--danger: #ef4444;
--glass: rgba(255, 255, 255, 0.05);
--glass-border: rgba(255, 255, 255, 0.1);
}
* {
box-sizing: border-box;
margin: 0;
padding: 0;
}
body {
font-family: 'Outfit', sans-serif;
background: radial-gradient(circle at top right, #1e1b4b, #0f172a);
color: var(--text);
min-height: 100vh;
padding: 2rem;
overflow-x: hidden;
}
.container {
max-width: 1200px;
margin: 0 auto;
position: relative;
}
/* Decorative blobs */
.blob {
position: absolute;
width: 300px;
height: 300px;
background: var(--primary-glow);
filter: blur(100px);
border-radius: 50%;
z-index: -1;
animation: move 20s infinite alternate;
}
@keyframes move {
from {
transform: translate(-10%, -10%);
}
to {
transform: translate(20%, 20%);
}
}
header {
display: flex;
justify-content: space-between;
align-items: center;
margin-bottom: 3rem;
padding: 1.5rem;
background: var(--glass);
backdrop-filter: blur(12px);
border: 1px solid var(--glass-border);
border-radius: 1.5rem;
}
.logo {
display: flex;
align-items: center;
gap: 0.75rem;
font-size: 1.75rem;
font-weight: 700;
background: linear-gradient(to right, var(--primary), var(--secondary));
-webkit-background-clip: text;
-webkit-text-fill-color: transparent;
}
.nav-links {
display: flex;
gap: 1.5rem;
}
.nav-links a {
color: var(--text);
text-decoration: none;
font-weight: 600;
transition: color 0.3s;
}
.nav-links a:hover {
color: var(--primary);
}
.status-badge {
padding: 0.5rem 1rem;
border-radius: 1rem;
font-size: 0.875rem;
font-weight: 600;
display: flex;
align-items: center;
gap: 0.5rem;
background: var(--glass);
border: 1px solid var(--glass-border);
}
.dot {
width: 8px;
height: 8px;
border-radius: 50%;
background: var(--warning);
box-shadow: 0 0 10px var(--warning);
}
.dot.online {
background: var(--success);
box-shadow: 0 0 10px var(--success);
}
.grid {
display: grid;
grid-template-columns: 2fr 1fr;
gap: 2rem;
}
.card {
background: var(--card-bg);
backdrop-filter: blur(16px);
border: 1px solid var(--glass-border);
border-radius: 1.5rem;
padding: 2rem;
box-shadow: 0 25px 50px -12px rgba(0, 0, 0, 0.5);
}
h2 {
font-size: 1.25rem;
margin-bottom: 1.5rem;
display: flex;
align-items: center;
gap: 0.5rem;
}
table {
width: 100%;
border-collapse: separate;
border-spacing: 0 0.75rem;
}
th {
text-align: left;
color: var(--text-muted);
font-weight: 600;
padding: 0 1rem;
font-size: 0.875rem;
}
td {
padding: 1rem;
background: rgba(255, 255, 255, 0.03);
}
td:first-child {
border-radius: 1rem 0 0 1rem;
}
td:last-child {
border-radius: 0 1rem 1rem 0;
}
.status-pill {
padding: 0.25rem 0.75rem;
border-radius: 0.75rem;
font-size: 0.75rem;
font-weight: 700;
text-transform: uppercase;
}
.status-waiting {
background: rgba(148, 163, 184, 0.1);
color: #94a3b8;
}
.status-running {
background: rgba(99, 102, 241, 0.1);
color: #818cf8;
border: 1px solid rgba(99, 102, 241, 0.3);
}
.status-finished {
background: rgba(16, 185, 129, 0.1);
color: #34d399;
}
.status-aborted {
background: rgba(239, 68, 68, 0.1);
color: #f87171;
}
.btn-abort {
background: rgba(239, 68, 68, 0.1);
color: #f87171;
border: 1px solid rgba(239, 68, 68, 0.2);
padding: 0.4rem 0.8rem;
border-radius: 0.75rem;
cursor: pointer;
font-weight: 600;
transition: all 0.3s;
}
.btn-abort:hover {
background: var(--danger);
color: white;
}
.log-container {
background: #020617;
border-radius: 1rem;
padding: 1.25rem;
height: 400px;
overflow-y: auto;
font-family: 'Fira Code', monospace;
font-size: 0.8125rem;
line-height: 1.6;
border: 1px solid var(--glass-border);
}
.log-entry {
margin-bottom: 0.5rem;
display: flex;
gap: 0.75rem;
}
.log-time {
color: var(--primary);
opacity: 0.7;
}
.log-msg {
color: #cbd5e1;
}
/* Custom Scrollbar */
::-webkit-scrollbar {
width: 8px;
}
::-webkit-scrollbar-track {
background: transparent;
}
::-webkit-scrollbar-thumb {
background: var(--glass-border);
border-radius: 4px;
}
::-webkit-scrollbar-thumb:hover {
background: rgba(255, 255, 255, 0.2);
}
</style>
</head>
<body>
<div class="blob"></div>
<div class="container">
<header>
<div class="logo">
<svg width="32" height="32" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2.5"
stroke-linecap="round" stroke-linejoin="round">
<path d="M12 2L2 7l10 5 10-5-10-5zM2 17l10 5 10-5M2 12l10 5 10-5" />
</svg>
TestArena
</div>
<nav class="nav-links">
<a href="/">Dashboard</a>
<a href="/results/" target="_blank">Browse Results</a>
</nav>
<div id="connection-status" class="status-badge">
<div class="dot"></div>
<span>Connecting...</span>
</div>
</header>
<div class="grid">
<div class="card">
<h2>
<svg width="20" height="20" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2"
stroke-linecap="round" stroke-linejoin="round">
<rect x="3" y="3" width="18" height="18" rx="2" ry="2" />
<line x1="3" y1="9" x2="21" y2="9" />
<line x1="9" y1="21" x2="9" y2="9" />
</svg>
Queue Monitor
</h2>
<table id="queue-table">
<thead>
<tr>
<th>Queue ID</th>
<th>Environment</th>
<th>Status</th>
<th>Actions</th>
</tr>
</thead>
<tbody>
<!-- Dynamic content -->
</tbody>
</table>
</div>
<div class="card">
<h2>
<svg width="20" height="20" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2"
stroke-linecap="round" stroke-linejoin="round">
<path d="M21 15a2 2 0 0 1-2 2H7l-4 4V5a2 2 0 0 1 2-2h14a2 2 0 0 1 2 2z" />
</svg>
Live System Logs
</h2>
<div id="logs" class="log-container">
<div class="log-entry">
<span class="log-time">23:34:52</span>
<span class="log-msg">System initialized. Waiting for connection...</span>
</div>
</div>
</div>
</div>
</div>
<script>
async function fetchStatus() {
try {
const response = await fetch('/api/queues');
const queues = await response.json();
const tbody = document.querySelector('#queue-table tbody');
tbody.innerHTML = '';
queues.forEach(q => {
const tr = document.createElement('tr');
tr.innerHTML = `
<td style="font-weight: 600;">${q.id}</td>
<td><span style="opacity: 0.8;">${q.environment}</span></td>
<td><span class="status-pill status-${q.status.toLowerCase()}">${q.status}</span></td>
<td>
<button class="btn-abort" onclick="abortQueue('${q.id}')">Abort</button>
</td>
`;
tbody.appendChild(tr);
});
const badge = document.getElementById('connection-status');
badge.querySelector('.dot').classList.add('online');
badge.querySelector('span').textContent = 'System Online';
} catch (e) {
const badge = document.getElementById('connection-status');
badge.querySelector('.dot').classList.remove('online');
badge.querySelector('span').textContent = 'Connection Lost';
}
}
async function abortQueue(id) {
if (confirm(`Are you sure you want to abort queue ${id}?`)) {
try {
await fetch(`/api/abort/${id}`, { method: 'POST' });
addLog(`Aborted queue: ${id}`, 'danger');
fetchStatus();
} catch (e) {
addLog(`Failed to abort queue: ${id}`, 'danger');
}
}
}
function addLog(msg, type = 'info') {
const logs = document.getElementById('logs');
const entry = document.createElement('div');
entry.className = 'log-entry';
const time = new Date().toLocaleTimeString([], { hour12: false });
entry.innerHTML = `
<span class="log-time">${time}</span>
<span class="log-msg">${msg}</span>
`;
logs.appendChild(entry);
logs.scrollTop = logs.scrollHeight;
}
// Initial fetch and poll
fetchStatus();
setInterval(fetchStatus, 3000);
// Simulate some system logs
setTimeout(() => addLog("Database connection established."), 1000);
setTimeout(() => addLog("Background worker is polling for tasks..."), 2000);
</script>
</body>
</html>

98
testarena_app/worker.py Normal file
View File

@@ -0,0 +1,98 @@
import time
import subprocess
import json
import os
from sqlalchemy.orm import Session
from . import models, database
# Base directory for data
BASE_DATA_DIR = "/home/asf/testarena"
if os.name == 'nt':
BASE_DATA_DIR = "d:/ASF - course/ASF_01/ASF_tools/asf-pc-server/testarena_pc_backend/testarena_data"
def update_json_status(queue_id, task_id, status, result=None):
queue_dir = os.path.join(BASE_DATA_DIR, queue_id)
status_file = os.path.join(queue_dir, "queue_status.json")
if os.path.exists(status_file):
with open(status_file, 'r') as f:
data = json.load(f)
if task_id:
data["tasks"][task_id] = status
else:
data["status"] = status
if result:
data["results"] = data.get("results", {})
data["results"][task_id] = result
with open(status_file, 'w') as f:
json.dump(data, f, indent=4)
def run_worker():
print("Worker started...")
while True:
db = database.SessionLocal()
try:
# Get next waiting queue
queue = db.query(models.Queue).filter(models.Queue.status == "Waiting").order_by(models.Queue.created_at).first()
if queue:
print(f"Processing queue: {queue.id}")
queue.status = "Running"
update_json_status(queue.id, None, "Running")
db.commit()
tasks = db.query(models.Task).filter(models.Task.queue_id == queue.id, models.Task.status == "Waiting").all()
for task in tasks:
# Check if queue was aborted mid-way
db.refresh(queue)
if queue.status == "Aborted":
break
print(f"Running task: {task.id}")
task.status = "Running"
update_json_status(queue.id, task.id, "Running")
db.commit()
try:
# Run tpf_execution.py [queue_id, scenario_path, task_id]
# Assuming tpf_execution.py is in the parent directory or accessible
script_path = "tpf_execution.py"
# For testing, let's assume it's in the same dir as the app or parent
cmd = ["python", script_path, queue.id, task.scenario_path, task.id]
result = subprocess.run(cmd, capture_output=True, text=True)
# Parse result if it returns json
try:
execution_result = json.loads(result.stdout)
except:
execution_result = {"output": result.stdout, "error": result.stderr}
task.status = "Finished"
task.result = execution_result
update_json_status(queue.id, task.id, "Finished", execution_result)
except Exception as e:
print(f"Error running task {task.id}: {e}")
task.status = "Error"
update_json_status(queue.id, task.id, "Error")
db.commit()
if queue.status != "Aborted":
queue.status = "Finished"
update_json_status(queue.id, None, "Finished")
db.commit()
time.sleep(5) # Poll every 5 seconds
except Exception as e:
print(f"Worker error: {e}")
time.sleep(10)
finally:
db.close()
if __name__ == "__main__":
run_worker()

32
tpf_execution.py Normal file
View File

@@ -0,0 +1,32 @@
import sys
import json
import time
import random
def main():
if len(sys.argv) < 4:
print("Usage: python tpf_execution.py <queue_id> <scenario_path> <task_id>")
sys.exit(1)
queue_id = sys.argv[1]
scenario_path = sys.argv[2]
task_id = sys.argv[3]
print(f"Starting execution for Task: {task_id} in Queue: {queue_id}")
print(f"Scenario: {scenario_path}")
# Simulate work
duration = random.randint(2, 5)
time.sleep(duration)
result = {
"task_id": task_id,
"status": "Success",
"duration": duration,
"details": f"Scenario {scenario_path} executed successfully."
}
print(json.dumps(result))
if __name__ == "__main__":
main()