From aa1f5e30d9f497dc936d481b42830829c4357009 Mon Sep 17 00:00:00 2001 From: mahmamdouh Date: Sat, 27 Dec 2025 01:13:30 +0100 Subject: [PATCH] init --- deploy.sh | 44 ++++ deployment_guide.md | 80 +++++++ gitea_repo_controller.sh | 124 ++++++++++ nginx/testarena.conf | 58 +++++ scenario_exe_parser.py | 103 ++++++++ scenario_execution.py | 165 +++++++++++++ scenario_scan.py | 147 ++++++++++++ test_execution.sh | 60 +++++ testarena_app/database.py | 16 ++ testarena_app/main.py | 139 +++++++++++ testarena_app/models.py | 27 +++ testarena_app/static/index.html | 407 ++++++++++++++++++++++++++++++++ testarena_app/worker.py | 98 ++++++++ tpf_execution.py | 32 +++ 14 files changed, 1500 insertions(+) create mode 100644 deploy.sh create mode 100644 deployment_guide.md create mode 100644 gitea_repo_controller.sh create mode 100644 nginx/testarena.conf create mode 100644 scenario_exe_parser.py create mode 100644 scenario_execution.py create mode 100644 scenario_scan.py create mode 100644 test_execution.sh create mode 100644 testarena_app/database.py create mode 100644 testarena_app/main.py create mode 100644 testarena_app/models.py create mode 100644 testarena_app/static/index.html create mode 100644 testarena_app/worker.py create mode 100644 tpf_execution.py diff --git a/deploy.sh b/deploy.sh new file mode 100644 index 0000000..5cb5f37 --- /dev/null +++ b/deploy.sh @@ -0,0 +1,44 @@ +#!/bin/bash + +# TestArena Deployment Script +# Run this script with sudo: sudo ./deploy.sh + +set -e + +echo "๐Ÿš€ Starting TestArena Deployment..." + +# 1. Install Dependencies +echo "๐Ÿ“ฆ Installing dependencies..." +apt-get update +apt-get install -y nginx python3-pip python3-venv + +# 2. Set up Python Virtual Environment +echo "๐Ÿ Setting up Python environment..." +python3 -m venv venv +source venv/bin/activate +pip install fastapi uvicorn sqlalchemy + +# 3. Configure Nginx +echo "๐ŸŒ Configuring Nginx..." +cp nginx/testarena.conf /etc/nginx/sites-available/testarena +ln -sf /etc/nginx/sites-available/testarena /etc/nginx/sites-enabled/ +rm -f /etc/nginx/sites-enabled/default + +# 4. Create Data Directory +echo "๐Ÿ“ Creating data directory..." +mkdir -p /home/asf/testarena +chown -R asf:asf /home/asf/testarena +chmod -R 755 /home/asf/testarena + +# 5. Restart Nginx +echo "๐Ÿ”„ Restarting Nginx..." +nginx -t +systemctl restart nginx + +echo "โœ… Deployment complete!" +echo "--------------------------------------------------" +echo "Dashboard: http://asf-server.duckdns.org:8080/" +echo "Results: http://asf-server.duckdns.org:8080/results/" +echo "--------------------------------------------------" +echo "To start the app: source venv/bin/activate && uvicorn testarena_app.main:app --host 0.0.0.0 --port 8000" +echo "To start the worker: source venv/bin/activate && python3 -m testarena_app.worker" diff --git a/deployment_guide.md b/deployment_guide.md new file mode 100644 index 0000000..cb22f05 --- /dev/null +++ b/deployment_guide.md @@ -0,0 +1,80 @@ +# TestArena Deployment & Testing Guide + +This guide explains how to deploy and test the TestArena backend application on your Ubuntu Server. + +## ๐Ÿš€ Deployment Steps + +### 1. Clone the Repository +Ensure you have the code on your server in a directory like `/home/asf/testarena_pc_backend`. + +### 2. Run the Deployment Script +The deployment script automates Nginx configuration and dependency installation. +```bash +sudo chmod +x deploy.sh +sudo ./deploy.sh +``` + +### 3. Start the Application Services +You should run these in the background or using a process manager like `pm2` or `systemd`. + +**Start the API Server:** +```bash +source venv/bin/activate +uvicorn testarena_app.main:app --host 0.0.0.0 --port 8000 +``` + +**Start the Background Worker:** +```bash +source venv/bin/activate +python3 -m testarena_app.worker +``` + +--- + +## ๐Ÿงช Testing the System + +### 1. Verify Dashboard Access +Open your browser and navigate to: +`http://asf-server.duckdns.org:8080/` +You should see the modern, colorful TestArena dashboard. + +### 2. Verify Results Browsing +Navigate to: +`http://asf-server.duckdns.org:8080/results/` +You should see an automatic directory listing of `/home/asf/testarena/`. + +### 3. Test the Queue API +Run the following `curl` command to queue a test task: +```bash +curl -X POST http://asf-server.duckdns.org:8080/api/queue \ +-H "Content-Type: application/json" \ +-d '{ + "test_queue_001": [ + "staging", + { + "task_1": "/home/asf/scenarios/test1.py", + "task_2": "/home/asf/scenarios/test2.py" + } + ] +}' +``` + +### 4. Verify Worker Execution +- Check the dashboard; you should see the new queue appear and its status change from `Waiting` to `Running` and then `Finished`. +- Check the filesystem: + ```bash + ls -R /home/asf/testarena/test_queue_001 + ``` + You should see `queue_status.json` and any results generated by `tpf_execution.py`. + +### 5. Test Abortion +Queue another task and click the **Abort** button on the dashboard. Verify that the status changes to `Aborted` in both the dashboard and the `queue_status.json` file. + +--- + +## ๐Ÿ› ๏ธ Troubleshooting + +- **Nginx Errors**: Check logs with `sudo tail -f /var/log/nginx/error.log`. +- **FastAPI Errors**: Check the terminal where `uvicorn` is running. +- **Permission Issues**: Ensure `/home/asf/testarena` is writable by the user running the app. +- **Port 8080 Blocked**: Ensure your firewall (ufw) allows traffic on port 8080: `sudo ufw allow 8080`. diff --git a/gitea_repo_controller.sh b/gitea_repo_controller.sh new file mode 100644 index 0000000..8e43ba3 --- /dev/null +++ b/gitea_repo_controller.sh @@ -0,0 +1,124 @@ +#!/bin/bash +set -e + +# ---------------------------------------- +# 1. Configuration (UPDATE IF NEEDED) +# ---------------------------------------- + +GIT_USERNAME="asfautomation" +GIT_PASSWORD="asfautomation" + +REPO_HOST="gitea.nabd-co.com" +REPO_PATH="ASF-Nabd/ASF-SH" +TARGET_DIR="TPF/Sensor_hub_repo" + +# ---------------------------------------- +# 2. URL Encoding Function +# ---------------------------------------- +urlencode() { + perl -pe 's/([^a-zA-Z0-9_.-])/sprintf("%%%02x", ord($1))/ge' +} + +ENCODED_USERNAME=$(printf '%s' "${GIT_USERNAME}" | urlencode) +ENCODED_PASSWORD=$(printf '%s' "${GIT_PASSWORD}" | urlencode) + + +AUTH_URL="https://${ENCODED_USERNAME}:${ENCODED_PASSWORD}@${REPO_HOST}/${REPO_PATH}.git" + +# ---------------------------------------- +# 3. Command & Arguments +# ---------------------------------------- +COMMAND="$1" +BRANCH_NAME="$2" + +# ---------------------------------------- +# 4. Functions +# ---------------------------------------- + +clone_repo() { + if [ -d "${TARGET_DIR}" ]; then + echo "โ„น๏ธ Repository already exists. Skipping clone." + return 0 + fi + + echo "๐Ÿ“ฅ Cloning repository..." + git clone "${AUTH_URL}" "${TARGET_DIR}" + echo "โœ… Clone completed." +} + +checkout_branch() { + if [ -z "${BRANCH_NAME}" ]; then + echo "โŒ Branch name is required for checkout." + exit 1 + fi + + if [ ! -d "${TARGET_DIR}" ]; then + echo "โŒ Repository not found. Run clone first." + exit 1 + fi + + cd "${TARGET_DIR}" + + echo "๐Ÿ“ฆ Stashing local changes (including untracked)..." + git stash push -u -m "automation-stash-before-checkout" || true + + echo "๐Ÿ”„ Fetching latest changes..." + git fetch origin + + echo "๐ŸŒฟ Checking out main branch..." + git checkout main + + echo "โฌ‡๏ธ Pulling latest main..." + git pull "${AUTH_URL}" main + + echo "๐ŸŒฟ Checking out target branch: ${BRANCH_NAME}" + if git show-ref --verify --quiet "refs/heads/${BRANCH_NAME}"; then + git checkout "${BRANCH_NAME}" + else + git checkout -b "${BRANCH_NAME}" "origin/${BRANCH_NAME}" + fi + + echo "โฌ†๏ธ Rebasing '${BRANCH_NAME}' onto latest main..." + git rebase main + + cd - >/dev/null + echo "โœ… Checkout and rebase completed successfully." +} + +delete_repo() { + if [ -d "${TARGET_DIR}" ]; then + echo "๐Ÿ—‘๏ธ Deleting repository directory..." + rm -rf "${TARGET_DIR}" + echo "โœ… Repository deleted." + else + echo "โ„น๏ธ Repository directory does not exist." + fi +} + +# ---------------------------------------- +# 5. Main Execution +# ---------------------------------------- + +case "${COMMAND}" in + clone) + clone_repo + ;; + checkout) + checkout_branch + ;; + delete) + delete_repo + ;; + *) + echo "โŒ Invalid command." + echo "Usage:" + echo " $0 clone" + echo " $0 checkout " + echo " $0 delete" + exit 1 + ;; +esac + +echo "----------------------------------------" +echo "โœ” Automation script finished successfully" +echo "----------------------------------------" diff --git a/nginx/testarena.conf b/nginx/testarena.conf new file mode 100644 index 0000000..e6cbd39 --- /dev/null +++ b/nginx/testarena.conf @@ -0,0 +1,58 @@ +# TestArena Nginx Configuration +# This file should be placed in /etc/nginx/sites-available/testarena +# and symlinked to /etc/nginx/sites-enabled/testarena + +server { + listen 8080; + server_name _; + + # Security: Prevent directory traversal and restrict symlinks + disable_symlinks on; + + # Root directory for the results (autoindex) + location /results/ { + alias /home/asf/testarena/; + + # Enable autoindex with requested features + autoindex on; + autoindex_exact_size off; # Human-readable sizes + autoindex_localtime on; # Local time + + # Read-only access + limit_except GET { + deny all; + } + + # Prevent execution of scripts + location ~* \.(php|pl|py|sh|cgi)$ { + return 403; + } + } + + # Proxy requests to the FastAPI application + location / { + proxy_pass http://127.0.0.1:8000; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + + # WebSocket support (if needed in future) + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + } + + # Custom error pages + error_page 404 /404.html; + location = /404.html { + root /usr/share/nginx/html; + internal; + } + + error_page 500 502 503 504 /50x.html; + location = /50x.html { + root /usr/share/nginx/html; + internal; + } +} diff --git a/scenario_exe_parser.py b/scenario_exe_parser.py new file mode 100644 index 0000000..64e8d94 --- /dev/null +++ b/scenario_exe_parser.py @@ -0,0 +1,103 @@ +import xml.etree.ElementTree as ET +import os +import sys +import json +from collections import defaultdict +from pathlib import Path + +# Get the directory of the current Python file +current_directory = os.path.dirname(os.path.abspath(__file__)) +COMPONENT_DIR = os.path.join(current_directory, "Sensor_hub_repo", "components") + +def finalize_output(data_obj): + # Convert defaultdict to standard dict recursively + # This removes the and metadata + standard_dict = json.loads(json.dumps(data_obj)) + + # Print ONLY the JSON string to stdout + #print(json.dumps(standard_dict, indent=4)) + return standard_dict + +def parse_test_scenario(xml_file_path): + """ + Parses a test scenario XML file and extracts the configuration and all + test case IDs mapped to their execution commands. + + Args: + xml_file_path (str): The path to the XML file to parse. + + Returns: + dict: A dictionary in the format: + { + 'config': , + 'test_cases': { + : , + ... + } + } + Returns an empty dictionary on error. + """ + if not os.path.exists(xml_file_path): + print(f"Error: File not found at '{xml_file_path}'") + return {} + + try: + # 1. Parse the XML file + tree = ET.parse(xml_file_path) + root = tree.getroot() + except ET.ParseError as e: + print(f"Error: Failed to parse XML file. Details: {e}") + return {} + except Exception as e: + print(f"An unexpected error occurred during file parsing: {e}") + return {} + + # Initialize the final structured output + parsed_data = { + 'config': '', + 'test_cases': {} + } + + # 2. Extract the mandatory value + config_element = root.find('config') + if config_element is not None and config_element.text: + parsed_data['config'] = config_element.text.strip() + + # 3. Iterate over all elements and extract ID and Exec + for tc in root.findall('test_case'): + tc_id_element = tc.find('test_case_id') + tc_exec_element = tc.find('test_exec') + + # Use strip() and check against None for safety, even if validation passed + tc_id = tc_id_element.text.strip() if tc_id_element is not None and tc_id_element.text else "UNKNOWN_ID" + tc_exec = tc_exec_element.text.strip() if tc_exec_element is not None and tc_exec_element.text else "UNKNOWN_EXEC" + + # Add to the test_cases dictionary + parsed_data['test_cases'][tc_id] = tc_exec + + return parsed_data + +if __name__ == "__main__": + # Define a default path to test against + default_test_file = 'sample_scenario.xml' + + # Allow passing the file path as a command-line argument for flexibility + file_to_check = sys.argv[1] if len(sys.argv) > 1 else print({}) + file_path = os.path.join(COMPONENT_DIR, file_to_check) + + print(f"--- XML Test Scenario Parser ---") + print(f"Parsing file: {file_to_check}\n") + + # Run the parser + scenario_data = parse_test_scenario(file_path) + + # Print results + # if scenario_data: + # print("โœ… Parsing Successful. Extracted Data Structure:") + # print(f"CONFIG: {scenario_data['config']}") + # print("\nTEST CASES:") + # for test_id, command in scenario_data['test_cases'].items(): + # print(f" - {test_id}:\n '{command}'") + + print(finalize_output(scenario_data)) + #return finalize_output(scenario_data['test_cases']) diff --git a/scenario_execution.py b/scenario_execution.py new file mode 100644 index 0000000..60947bf --- /dev/null +++ b/scenario_execution.py @@ -0,0 +1,165 @@ +import os +import sys +import json +from scenario_exe_parser import parse_test_scenario +import subprocess +import os +import sys +import json +import subprocess +# Assuming parse_test_scenario is imported correctly +# from scenario_exe_parser import parse_test_scenario + +# --- Global Paths --- +current_directory = os.path.dirname(os.path.abspath(__file__)) +REPO_PATH = os.path.join(current_directory, "Sensor_hub_repo") +COMPONENT_DIR = os.path.join(REPO_PATH, "components") +RESULT_PATH = "/home/asf/testarena" + +# The HTML Template +REPORT_TEMPLATE = """ + + + + + ESP32 Test Execution Report + + + +

Overall Scenario Summary

+ + + + + + + + + + + + + +
Scenario NameFinal Result
{{scenario_name}}{{overall_status}}
+ +

Detailed Test Cases

+ + + + + + + + + + {{test_case_rows}} + +
Test Case IDResultExecution Log
+ + +""" + +def run_test_suite(tasks): + aggregated_results = {} + shell_script = "./TPF/test_execution.sh" + if os.name != 'nt': + subprocess.run(["chmod", "+x", shell_script]) + + for task in tasks: + print(f"--- Starting Task: {task['id']} ---") + result = subprocess.run( + [shell_script, task['id'], task['cmd'], task['path'], REPO_PATH], + capture_output=True, text=True + ) + print(result.stdout) + + json_found = False + for line in result.stdout.splitlines(): + if line.startswith("FINAL_JSON_OUTPUT:"): + json_string = line.replace("FINAL_JSON_OUTPUT:", "").strip() + try: + task_json = json.loads(json_string) + aggregated_results.update(task_json) + json_found = True + except json.JSONDecodeError as e: + print(f"!!! JSON Parsing Error: {e}") + + if not json_found: + aggregated_results[task['id']] = ["ERROR", "N/A"] + return aggregated_results + +def generate_html_report(scenario_name, results, output_path): + all_passed = all(info[0] == "PASS" for info in results.values()) + overall_status = "PASS" if all_passed else "FAIL" + overall_class = "status-pass" if all_passed else "status-fail" + + test_case_rows = "" + for tc_id, info in results.items(): + status = info[0] + log_url = info[1] + status_class = "status-pass" if status == "PASS" else "status-fail" + + test_case_rows += f""" + + {tc_id} + {status} + View Log + + """ + + # Use the global REPORT_TEMPLATE + report_content = REPORT_TEMPLATE.replace("{{scenario_name}}", scenario_name) \ + .replace("{{overall_status}}", overall_status) \ + .replace("{{overall_class}}", overall_class) \ + .replace("{{test_case_rows}}", test_case_rows) + + report_file = os.path.join(output_path, "execution_report.html") + with open(report_file, "w") as f: + f.write(report_content) + print(f"HTML Report generated at: {report_file}") + +def save_summary(results, task_id_path): + json_path = os.path.join(task_id_path, "final_summary.json") + with open(json_path, "w") as f: + json.dump(results, f, indent=4) + print(f"\nFinal results saved to {json_path}") + +if __name__ == "__main__": + queue_id = "1234" + scenario_path = "application_layer/business_stack/actuator_manager/test/actuator_manager_init_test.test_scenario.xml" + task_id = "56754" + + # Path logic + queue_path = os.path.join(RESULT_PATH, queue_id) + task_id_path = os.path.join(queue_path, task_id) # Corrected pathing + + os.makedirs(task_id_path, exist_ok=True) + + # Note: Ensure parse_test_scenario is defined or imported + scenario_data = parse_test_scenario(os.path.join(COMPONENT_DIR, scenario_path)) + + my_tasks = [] + sub_tasks_data = scenario_data['test_cases'] + for case_id, exec_cmd in sub_tasks_data.items(): + my_tasks.append({ + "id": case_id, + "cmd": exec_cmd, + "path": task_id_path + }) + + final_data = run_test_suite(my_tasks) + save_summary(final_data, task_id_path) + + # Generate report INSIDE the task folder + generate_html_report(os.path.basename(scenario_path), final_data, task_id_path) \ No newline at end of file diff --git a/scenario_scan.py b/scenario_scan.py new file mode 100644 index 0000000..2b4a877 --- /dev/null +++ b/scenario_scan.py @@ -0,0 +1,147 @@ +import os +import sys +from collections import defaultdict +from pathlib import Path +import json + + +# Get the directory of the current Python file +current_directory = os.path.dirname(os.path.abspath(__file__)) +repo_root = Path(current_directory).parents[1] +COMPONENT_DIR = os.path.join(repo_root, "components") +DEBUG = False + + +def finalize_output(data_obj): + # Convert defaultdict to standard dict recursively + # This removes the and metadata + standard_dict = json.loads(json.dumps(data_obj)) + + # Print ONLY the JSON string to stdout + #print(json.dumps(standard_dict, indent=4)) + return standard_dict + +def find_test_scenarios(root_dir): + """ + Recursively searches the given root directory for files ending with + '.test_scenario.xml' and returns a dictionary mapping scenario names to their + paths relative to the root directory. + + Args: + root_dir (str): The absolute path to the starting directory (e.g., 'COMPONENTS'). + + Returns: + dict[str, str]: A dictionary mapping scenario names (without suffix) to + their relative file paths. + """ + if not os.path.isdir(root_dir): + print(f"Error: Directory not found or not accessible: {root_dir}") + return {} # Return empty dictionary + + if DEBUG: + print(f"Scanning directory: '{root_dir}'...") + + scenario_suffix = ".test_scenario.xml" + + # Dictionary comprehension: {scenario_name: relative_path} + scenarios_map = { + # Key: Scenario name (filename without suffix) + filename.replace(scenario_suffix, ""): + # Value: Relative path + os.path.relpath(os.path.join(dirpath, filename), root_dir) + + for dirpath, _, filenames in os.walk(root_dir) + for filename in filenames if filename.endswith(scenario_suffix) + } + + return scenarios_map + +def organize_by_layer_component(scenarios_map): + """ + Organizes scenario paths into a nested dictionary structure based on the file path: + {Layer_Folder: {Component_Folder: [scenario_name, ...]}} + + It assumes the Layer is the first folder and the Component is the folder + preceding the 'test' directory (i.e., the third-to-last segment). + + Args: + scenarios_map (dict[str, str]): Dictionary mapping scenario names to their + relative file paths. + + Returns: + defaultdict: Nested dictionary (Layer -> Component -> List of Scenario Names). + """ + organized_data = defaultdict(lambda: defaultdict(list)) + + # Iterate over the scenario name and path + for scenario_name, path in scenarios_map.items(): + # Split path into segments using the OS separator + segments = path.split(os.sep) + + # Layer is the first segment (e.g., 'application_layer', 'drivers') + layer = segments[0] + + # Component is the third-to-last segment (e.g., 'actuator_manager', 'ammonia') + # We assume the file is inside a 'test' folder inside a component folder. + if len(segments) >= 3: + component = segments[-3] + else: + # Fallback for scenarios found too close to the root + component = "Root_Component" + + # Populate the nested dictionary + organized_data[layer][component].append(scenario_name) + + return organized_data + +def scenario_scan(components_root_dir): + """ + Main function to scan for test scenarios, print the organized structure, and + return the resulting dictionaries. + + Returns: + tuple[defaultdict, dict]: The organized layer/component structure and the + raw dictionary of scenario names to paths. + """ + # 1. Find all relative paths (now a dictionary: {name: path}) + found_scenarios_map = find_test_scenarios(components_root_dir) + + if not found_scenarios_map: + print(f"\nNo files ending with '.test_scenario.xml' were found in {components_root_dir}.") + # Return empty structures if nothing is found + return defaultdict(lambda: defaultdict(list)), {} + + num_scenarios = len(found_scenarios_map) + + if DEBUG: + # 2. Print the simple list of found paths + print(f"\n--- Found {num_scenarios} Test Scenarios ---") + for scenario_name, path in found_scenarios_map.items(): + print(f"Scenario: '{scenario_name}' | Relative Path: {os.path.join("components",path)}") + + # 3. Organize into the layer/component structure + organized_scenarios = organize_by_layer_component(found_scenarios_map) + + if DEBUG: + # 4. Print the organized structure + print("\n--- Organized Layer/Component Structure ---") + for layer, components in organized_scenarios.items(): + print(f"\n[LAYER] {layer.upper()}:") + for component, scenarios in components.items(): + scenario_list = ", ".join(scenarios) + print(f" [Component] {component}: {scenario_list}") + + return organized_scenarios, found_scenarios_map + +if __name__ == "__main__": + # The return value from scenario_scan now includes the dictionary you requested + organized_data, scenario_map = scenario_scan(COMPONENT_DIR) + combined_result = { + "organized_data": finalize_output(organized_data), + "scenario_map": finalize_output(scenario_map) + } + + # 3. Print the combined object as a single JSON string + # This is what will be captured by the SSH command + print(json.dumps(combined_result)) + \ No newline at end of file diff --git a/test_execution.sh b/test_execution.sh new file mode 100644 index 0000000..9fc7a97 --- /dev/null +++ b/test_execution.sh @@ -0,0 +1,60 @@ +#!/bin/bash + +# Check if correct number of arguments are provided (now 4) +if [ "$#" -ne 4 ]; then + echo "Usage: $0 " + exit 1 +fi + +TASK_ID=$1 +CMD=$2 +RESULT_DIR=$3 +REPO_PATH=$4 +echo $TASK_ID +# Create result directory if it doesn't exist (absolute path) +mkdir -p "$RESULT_DIR" +# Use realpath on the (now-existing) result dir and a clearer filename +LOG_FILE="$(realpath "$RESULT_DIR")/${TASK_ID}-logging.html" + +# Initialize HTML file with basic styling +cat < "$LOG_FILE" + + + + + +

Execution Log for Task: $TASK_ID

+

Working Directory: $REPO_PATH

+

Executing: $CMD

+
+
+EOF + +# 1. CD into the repo path +# 2. Execute command and capture output +# 3. PIPESTATUS[1] captures the exit code of the CMD, not the 'cd' or 'tee' +cd "$REPO_PATH" && eval "$CMD" 2>&1 | tee -a >(sed 's/$/
/' >> "$LOG_FILE") +EXIT_CODE=${PIPESTATUS[0]} + +# Close HTML tags +echo "
" >> "$LOG_FILE" + +# Determine PASS/FAIL +if [ $EXIT_CODE -eq 0 ]; then + RESULT="PASS" +else + RESULT="FAIL" +fi + +EVIDENCE_URL="file://$LOG_FILE" + +# Return JSON output +# ... (rest of the script remains the same) + +# Return JSON output with a unique marker prefix +printf 'FINAL_JSON_OUTPUT:{"%s": ["%s", "%s"]}\n' "$TASK_ID" "$RESULT" "$EVIDENCE_URL" \ No newline at end of file diff --git a/testarena_app/database.py b/testarena_app/database.py new file mode 100644 index 0000000..131e2ea --- /dev/null +++ b/testarena_app/database.py @@ -0,0 +1,16 @@ +from sqlalchemy import create_all_engines, create_engine +from sqlalchemy.orm import sessionmaker +import os + +# Using SQLite for simplicity as requested +DATABASE_URL = "sqlite:///d:/ASF - course/ASF_01/ASF_tools/asf-pc-server/testarena_pc_backend/testarena_app/testarena.db" + +engine = create_engine(DATABASE_URL, connect_args={"check_same_thread": False}) +SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine) + +def get_db(): + db = SessionLocal() + try: + yield db + finally: + db.close() diff --git a/testarena_app/main.py b/testarena_app/main.py new file mode 100644 index 0000000..d0af081 --- /dev/null +++ b/testarena_app/main.py @@ -0,0 +1,139 @@ +from fastapi import FastAPI, Depends, HTTPException, BackgroundTasks +from fastapi.staticfiles import StaticFiles +from fastapi.responses import FileResponse +from sqlalchemy.orm import Session +import os +import json +import uuid +from typing import Dict, List +from . import models, database + +app = FastAPI(title="TestArena API") + +# Mount static files +static_dir = os.path.join(os.path.dirname(__file__), "static") +os.makedirs(static_dir, exist_ok=True) +app.mount("/static", StaticFiles(directory=static_dir), name="static") + +# Base directory for data as requested +BASE_DATA_DIR = "/home/asf/testarena" +# For local development on Windows, we might need to adjust this, +# but I'll stick to the user's requirement for the final version. +if os.name == 'nt': + BASE_DATA_DIR = "d:/ASF - course/ASF_01/ASF_tools/asf-pc-server/testarena_pc_backend/testarena_data" + +# Ensure base directory exists +os.makedirs(BASE_DATA_DIR, exist_ok=True) + +# Initialize database +models.Base.metadata.create_all(bind=database.engine) + +@app.post("/api/queue") +async def queue_task(payload: Dict, db: Session = Depends(database.get_db)): + """ + Input json contain { :[environment, "" : "],} + """ + try: + queue_id = list(payload.keys())[0] + data = payload[queue_id] + environment = data[0] + tasks_data = data[1] # This is a dict {"TASK_ID": "path"} + + # 1. Create folder + queue_dir = os.path.join(BASE_DATA_DIR, queue_id) + os.makedirs(queue_dir, exist_ok=True) + + # 2. Create queue_status.json + status_file = os.path.join(queue_dir, "queue_status.json") + queue_status = { + "queue_id": queue_id, + "status": "Waiting", + "tasks": {} + } + + # 3. Save to database and prepare status file + new_queue = models.Queue(id=queue_id, environment=environment, status="Waiting") + db.add(new_queue) + + for task_id, scenario_path in tasks_data.items(): + new_task = models.Task(id=task_id, queue_id=queue_id, scenario_path=scenario_path, status="Waiting") + db.add(new_task) + queue_status["tasks"][task_id] = "Waiting" + + with open(status_file, 'w') as f: + json.dump(queue_status, f, indent=4) + + db.commit() + return {"status": "Queue OK", "queue_id": queue_id} + except Exception as e: + return {"status": "Error", "message": str(e)} + +@app.get("/api/status/{id}") +async def get_status(id: str, db: Session = Depends(database.get_db)): + # Check if it's a queue ID + queue = db.query(models.Queue).filter(models.Queue.id == id).first() + if queue: + return {"id": id, "type": "queue", "status": queue.status} + + # Check if it's a task ID + task = db.query(models.Task).filter(models.Task.id == id).first() + if task: + return {"id": id, "type": "task", "status": task.status} + + raise HTTPException(status_code=404, detail="ID not found") + +@app.post("/api/abort/{id}") +async def abort_task(id: str, db: Session = Depends(database.get_db)): + # Abort queue + queue = db.query(models.Queue).filter(models.Queue.id == id).first() + if queue: + queue.status = "Aborted" + # Abort all tasks in queue + tasks = db.query(models.Task).filter(models.Task.queue_id == id).all() + for t in tasks: + if t.status in ["Waiting", "Running"]: + t.status = "Aborted" + + # Update queue_status.json + queue_dir = os.path.join(BASE_DATA_DIR, id) + status_file = os.path.join(queue_dir, "queue_status.json") + if os.path.exists(status_file): + with open(status_file, 'r') as f: + data = json.load(f) + data["status"] = "Aborted" + for tid in data["tasks"]: + if data["tasks"][tid] in ["Waiting", "Running"]: + data["tasks"][tid] = "Aborted" + with open(status_file, 'w') as f: + json.dump(data, f, indent=4) + + db.commit() + return {"id": id, "status": "Aborted"} + + # Abort single task + task = db.query(models.Task).filter(models.Task.id == id).first() + if task: + task.status = "Aborted" + # Update queue_status.json + queue_dir = os.path.join(BASE_DATA_DIR, task.queue_id) + status_file = os.path.join(queue_dir, "queue_status.json") + if os.path.exists(status_file): + with open(status_file, 'r') as f: + data = json.load(f) + data["tasks"][id] = "Aborted" + with open(status_file, 'w') as f: + json.dump(data, f, indent=4) + + db.commit() + return {"id": id, "status": "Aborted"} + + raise HTTPException(status_code=404, detail="ID not found") + +@app.get("/api/queues") +async def list_queues(db: Session = Depends(database.get_db)): + queues = db.query(models.Queue).order_by(models.Queue.created_at.desc()).all() + return queues + +@app.get("/") +async def root(): + return FileResponse(os.path.join(static_dir, "index.html")) diff --git a/testarena_app/models.py b/testarena_app/models.py new file mode 100644 index 0000000..c36ffc4 --- /dev/null +++ b/testarena_app/models.py @@ -0,0 +1,27 @@ +from sqlalchemy import Column, Integer, String, DateTime, ForeignKey, JSON +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy.orm import relationship +import datetime + +Base = declarative_base() + +class Queue(Base): + __tablename__ = "queues" + + id = Column(String, primary_key=True, index=True) + status = Column(String, default="Waiting") # Finished, Waiting, Running, Aborted + created_at = Column(DateTime, default=datetime.datetime.utcnow) + environment = Column(String) + + tasks = relationship("Task", back_populates="queue", cascade="all, delete-orphan") + +class Task(Base): + __tablename__ = "tasks" + + id = Column(String, primary_key=True, index=True) + queue_id = Column(String, ForeignKey("queues.id")) + scenario_path = Column(String) + status = Column(String, default="Waiting") # Finished, Waiting, Running, Aborted + result = Column(JSON, nullable=True) + + queue = relationship("Queue", back_populates="tasks") diff --git a/testarena_app/static/index.html b/testarena_app/static/index.html new file mode 100644 index 0000000..ffb34c6 --- /dev/null +++ b/testarena_app/static/index.html @@ -0,0 +1,407 @@ + + + + + + + TestArena | Modern Dashboard + + + + + +
+
+
+ + +
+
+ Connecting... +
+
+ +
+
+

+ + + + + + Queue Monitor +

+ + + + + + + + + + + + +
Queue IDEnvironmentStatusActions
+
+ +
+

+ + + + Live System Logs +

+
+
+ 23:34:52 + System initialized. Waiting for connection... +
+
+
+
+
+ + + + + \ No newline at end of file diff --git a/testarena_app/worker.py b/testarena_app/worker.py new file mode 100644 index 0000000..d9805ce --- /dev/null +++ b/testarena_app/worker.py @@ -0,0 +1,98 @@ +import time +import subprocess +import json +import os +from sqlalchemy.orm import Session +from . import models, database + +# Base directory for data +BASE_DATA_DIR = "/home/asf/testarena" +if os.name == 'nt': + BASE_DATA_DIR = "d:/ASF - course/ASF_01/ASF_tools/asf-pc-server/testarena_pc_backend/testarena_data" + +def update_json_status(queue_id, task_id, status, result=None): + queue_dir = os.path.join(BASE_DATA_DIR, queue_id) + status_file = os.path.join(queue_dir, "queue_status.json") + if os.path.exists(status_file): + with open(status_file, 'r') as f: + data = json.load(f) + + if task_id: + data["tasks"][task_id] = status + else: + data["status"] = status + + if result: + data["results"] = data.get("results", {}) + data["results"][task_id] = result + + with open(status_file, 'w') as f: + json.dump(data, f, indent=4) + +def run_worker(): + print("Worker started...") + while True: + db = database.SessionLocal() + try: + # Get next waiting queue + queue = db.query(models.Queue).filter(models.Queue.status == "Waiting").order_by(models.Queue.created_at).first() + + if queue: + print(f"Processing queue: {queue.id}") + queue.status = "Running" + update_json_status(queue.id, None, "Running") + db.commit() + + tasks = db.query(models.Task).filter(models.Task.queue_id == queue.id, models.Task.status == "Waiting").all() + + for task in tasks: + # Check if queue was aborted mid-way + db.refresh(queue) + if queue.status == "Aborted": + break + + print(f"Running task: {task.id}") + task.status = "Running" + update_json_status(queue.id, task.id, "Running") + db.commit() + + try: + # Run tpf_execution.py [queue_id, scenario_path, task_id] + # Assuming tpf_execution.py is in the parent directory or accessible + script_path = "tpf_execution.py" + # For testing, let's assume it's in the same dir as the app or parent + cmd = ["python", script_path, queue.id, task.scenario_path, task.id] + + result = subprocess.run(cmd, capture_output=True, text=True) + + # Parse result if it returns json + try: + execution_result = json.loads(result.stdout) + except: + execution_result = {"output": result.stdout, "error": result.stderr} + + task.status = "Finished" + task.result = execution_result + update_json_status(queue.id, task.id, "Finished", execution_result) + + except Exception as e: + print(f"Error running task {task.id}: {e}") + task.status = "Error" + update_json_status(queue.id, task.id, "Error") + + db.commit() + + if queue.status != "Aborted": + queue.status = "Finished" + update_json_status(queue.id, None, "Finished") + db.commit() + + time.sleep(5) # Poll every 5 seconds + except Exception as e: + print(f"Worker error: {e}") + time.sleep(10) + finally: + db.close() + +if __name__ == "__main__": + run_worker() diff --git a/tpf_execution.py b/tpf_execution.py new file mode 100644 index 0000000..9ede2cc --- /dev/null +++ b/tpf_execution.py @@ -0,0 +1,32 @@ +import sys +import json +import time +import random + +def main(): + if len(sys.argv) < 4: + print("Usage: python tpf_execution.py ") + sys.exit(1) + + queue_id = sys.argv[1] + scenario_path = sys.argv[2] + task_id = sys.argv[3] + + print(f"Starting execution for Task: {task_id} in Queue: {queue_id}") + print(f"Scenario: {scenario_path}") + + # Simulate work + duration = random.randint(2, 5) + time.sleep(duration) + + result = { + "task_id": task_id, + "status": "Success", + "duration": duration, + "details": f"Scenario {scenario_path} executed successfully." + } + + print(json.dumps(result)) + +if __name__ == "__main__": + main()