179 lines
6.3 KiB
Python
179 lines
6.3 KiB
Python
import os
|
|
import sys
|
|
import json
|
|
import subprocess
|
|
from scenario_exe_parser import parse_test_scenario
|
|
|
|
# --- Global Paths ---
|
|
current_directory = os.path.dirname(os.path.abspath(__file__))
|
|
REPO_PATH = os.path.join(current_directory, "Sensor_hub_repo")
|
|
COMPONENT_DIR = os.path.join(REPO_PATH, "components")
|
|
RESULT_PATH = "/home/asf/testarena"
|
|
|
|
# The HTML Template
|
|
REPORT_TEMPLATE = """
|
|
<!DOCTYPE html>
|
|
<html lang="en">
|
|
<head>
|
|
<meta charset="UTF-8">
|
|
<title>ESP32 Test Execution Report</title>
|
|
<style>
|
|
body { font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; margin: 40px; background-color: #f4f7f6; }
|
|
h2 { color: #333; border-bottom: 2px solid #ccc; padding-bottom: 10px; }
|
|
table { width: 100%; border-collapse: collapse; margin: 20px 0; background-color: #fff; box-shadow: 0 2px 5px rgba(0,0,0,0.1); }
|
|
th, td { padding: 12px 15px; text-align: left; border-bottom: 1px solid #ddd; }
|
|
th { background-color: #2c3e50; color: white; text-transform: uppercase; letter-spacing: 0.1em; }
|
|
.status-pass { color: #ffffff; background-color: #27ae60; padding: 4px 12px; border-radius: 4px; font-weight: bold; }
|
|
.status-fail { color: #ffffff; background-color: #c0392b; padding: 4px 12px; border-radius: 4px; font-weight: bold; }
|
|
a { color: #2980b9; text-decoration: none; font-weight: bold; }
|
|
a:hover { text-decoration: underline; }
|
|
tr:hover { background-color: #f1f1f1; }
|
|
</style>
|
|
</head>
|
|
<body>
|
|
<h2>Overall Scenario Summary</h2>
|
|
<table>
|
|
<thead>
|
|
<tr>
|
|
<th>Scenario Name</th>
|
|
<th>Final Result</th>
|
|
</tr>
|
|
</thead>
|
|
<tbody>
|
|
<tr>
|
|
<td>{{scenario_name}}</td>
|
|
<td><span class="{{overall_class}}">{{overall_status}}</span></td>
|
|
</tr>
|
|
</tbody>
|
|
</table>
|
|
|
|
<h2>Detailed Test Cases</h2>
|
|
<table>
|
|
<thead>
|
|
<tr>
|
|
<th>Test Case ID</th>
|
|
<th>Result</th>
|
|
<th>Execution Log</th>
|
|
</tr>
|
|
</thead>
|
|
<tbody>
|
|
{{test_case_rows}}
|
|
</tbody>
|
|
</table>
|
|
</body>
|
|
</html>
|
|
"""
|
|
|
|
def run_test_suite(tasks):
|
|
aggregated_results = {}
|
|
# Use path relative to this script
|
|
script_dir = os.path.dirname(os.path.abspath(__file__))
|
|
shell_script = os.path.join(script_dir, "test_execution.sh")
|
|
|
|
if os.name != 'nt':
|
|
subprocess.run(["chmod", "+x", shell_script])
|
|
|
|
for task in tasks:
|
|
print(f"--- Starting Task: {task['id']} ---")
|
|
|
|
# Use Popen to stream output in real-time
|
|
process = subprocess.Popen(
|
|
[shell_script, task['id'], task['cmd'], task['path'], REPO_PATH],
|
|
stdout=subprocess.PIPE,
|
|
stderr=subprocess.STDOUT,
|
|
text=True,
|
|
bufsize=1,
|
|
universal_newlines=True
|
|
)
|
|
|
|
full_output = ""
|
|
for line in process.stdout:
|
|
print(line, end="")
|
|
full_output += line
|
|
|
|
process.wait()
|
|
|
|
json_found = False
|
|
for line in full_output.splitlines():
|
|
if line.startswith("FINAL_JSON_OUTPUT:"):
|
|
json_string = line.replace("FINAL_JSON_OUTPUT:", "").strip()
|
|
try:
|
|
task_json = json.loads(json_string)
|
|
aggregated_results.update(task_json)
|
|
json_found = True
|
|
except json.JSONDecodeError as e:
|
|
print(f"!!! JSON Parsing Error: {e}")
|
|
|
|
if not json_found:
|
|
aggregated_results[task['id']] = ["ERROR", "N/A"]
|
|
return aggregated_results
|
|
|
|
def generate_html_report(scenario_name, results, output_path):
|
|
all_passed = all(info[0] == "PASS" for info in results.values())
|
|
overall_status = "PASS" if all_passed else "FAIL"
|
|
overall_class = "status-pass" if all_passed else "status-fail"
|
|
|
|
test_case_rows = ""
|
|
for tc_id, info in results.items():
|
|
status = info[0]
|
|
log_url = info[1]
|
|
log_url = log_url.replace("file:///home/asf/testarena/", "http://asf-server.duckdns.org:8080/results/")
|
|
status_class = "status-pass" if status == "PASS" else "status-fail"
|
|
|
|
test_case_rows += f"""
|
|
<tr>
|
|
<td>{tc_id}</td>
|
|
<td><span class="{status_class}">{status}</span></td>
|
|
<td><a href="{log_url}" target="_blank">View Log</a></td>
|
|
</tr>
|
|
"""
|
|
|
|
# Use the global REPORT_TEMPLATE
|
|
report_content = REPORT_TEMPLATE.replace("{{scenario_name}}", scenario_name) \
|
|
.replace("{{overall_status}}", overall_status) \
|
|
.replace("{{overall_class}}", overall_class) \
|
|
.replace("{{test_case_rows}}", test_case_rows)
|
|
|
|
report_file = os.path.join(output_path, "execution_report.html")
|
|
with open(report_file, "w") as f:
|
|
f.write(report_content)
|
|
print(f"HTML Report generated at: {report_file}")
|
|
|
|
def save_summary(results, task_id_path):
|
|
json_path = os.path.join(task_id_path, "final_summary.json")
|
|
with open(json_path, "w") as f:
|
|
json.dump(results, f, indent=4)
|
|
print(f"\nFinal results saved to {json_path}")
|
|
|
|
if __name__ == "__main__":
|
|
if len(sys.argv) > 3:
|
|
queue_id = sys.argv[1] #"1234"
|
|
scenario_path = sys.argv[2] #"application_layer/business_stack/actuator_manager/test/actuator_manager_init_test.test_scenario.xml"
|
|
task_id = sys.argv[3] #"56754"
|
|
else:
|
|
print("Usage: python scenario_execution.py <queue_id> <scenario_path> <task_id>")
|
|
sys.exit(1)
|
|
|
|
# Path logic
|
|
queue_path = os.path.join(RESULT_PATH, queue_id)
|
|
task_id_path = os.path.join(queue_path, task_id) # Corrected pathing
|
|
|
|
os.makedirs(task_id_path, exist_ok=True)
|
|
|
|
# Note: Ensure parse_test_scenario is defined or imported
|
|
scenario_data = parse_test_scenario(os.path.join(COMPONENT_DIR, scenario_path))
|
|
|
|
my_tasks = []
|
|
sub_tasks_data = scenario_data['test_cases']
|
|
for case_id, exec_cmd in sub_tasks_data.items():
|
|
my_tasks.append({
|
|
"id": case_id,
|
|
"cmd": exec_cmd,
|
|
"path": task_id_path
|
|
})
|
|
|
|
final_data = run_test_suite(my_tasks)
|
|
save_summary(final_data, task_id_path)
|
|
|
|
# Generate report INSIDE the task folder
|
|
generate_html_report(os.path.basename(scenario_path), final_data, task_id_path) |