|
| 1 | +# SPDX-FileCopyrightText: 2023-2025 Espressif Systems (Shanghai) CO LTD |
| 2 | +# SPDX-License-Identifier: CC0-1.0 |
| 3 | + |
| 4 | +import datetime |
| 5 | +import json |
| 6 | +from pathlib import Path |
| 7 | +import pytest |
| 8 | +from pytest_embedded import Dut |
| 9 | +import urllib.request |
| 10 | + |
| 11 | +BENCHMARK_RELEASES_URL = "https://github.com/espressif/esp-bsp/releases/download/benchmark-latest" |
| 12 | + |
| 13 | + |
| 14 | +def write_to_file(board, ext, text): |
| 15 | + with open("benchmark_" + board + ext, "a") as file: |
| 16 | + file.write(text) |
| 17 | + |
| 18 | + |
| 19 | +def read_json_file(board): |
| 20 | + try: |
| 21 | + url = f"{BENCHMARK_RELEASES_URL}/benchmark_{board}.json" |
| 22 | + with urllib.request.urlopen(url) as file: |
| 23 | + return json.load(file) |
| 24 | + except urllib.error.HTTPError: |
| 25 | + return [] |
| 26 | + except json.JSONDecodeError: |
| 27 | + return [] |
| 28 | + |
| 29 | + |
| 30 | +def find_test_results(json_obj, test): |
| 31 | + if json_obj: |
| 32 | + for t in json_obj["tests"]: |
| 33 | + if t["Name"] == test: |
| 34 | + return t |
| 35 | + |
| 36 | + |
| 37 | +def get_test_diff(test1, test2, name, positive): |
| 38 | + if not test1 or not test2 or not test1[name] or not test2[name]: |
| 39 | + return "" |
| 40 | + test1[name] = test1[name].replace("%", "") |
| 41 | + test2[name] = test2[name].replace("%", "") |
| 42 | + diff = int(test1[name]) - int(test2[name]) |
| 43 | + if diff == 0: |
| 44 | + return "" |
| 45 | + else: |
| 46 | + if positive: |
| 47 | + color = "red" if diff < 0 else "green" |
| 48 | + else: |
| 49 | + color = "green" if diff < 0 else "red" |
| 50 | + sign = "+" if diff > 0 else "" |
| 51 | + return f"*<span style=\"color:{color}\"><sub>({sign}{diff})</sub></span>*" |
| 52 | + |
| 53 | + |
| 54 | +@pytest.mark.esp_box_3 |
| 55 | +@pytest.mark.esp32_p4_function_ev_board |
| 56 | +@pytest.mark.esp32_s3_eye |
| 57 | +@pytest.mark.esp32_s3_lcd_ev_board |
| 58 | +@pytest.mark.esp32_s3_lcd_ev_board_2 |
| 59 | +@pytest.mark.m5dial |
| 60 | +@pytest.mark.m5stack_core_s3 |
| 61 | +@pytest.mark.m5stack_core_s3_se |
| 62 | +def test_example(dut: Dut, request) -> None: |
| 63 | + date = datetime.datetime.now() |
| 64 | + board = request.node.callspec.id |
| 65 | + |
| 66 | + # Wait for start benchmark |
| 67 | + dut.expect_exact('app_main: Display LVGL demo') |
| 68 | + dut.expect_exact('main_task: Returned from app_main()') |
| 69 | + |
| 70 | + file_path = Path(f"benchmark_" + board + ".md") |
| 71 | + file_path.unlink(missing_ok=True) |
| 72 | + file_path = Path(f"benchmark_" + board + ".json") |
| 73 | + file_path.unlink(missing_ok=True) |
| 74 | + |
| 75 | + output = { |
| 76 | + "date": date.strftime('%d.%m.%Y %H:%M'), |
| 77 | + "board": board |
| 78 | + } |
| 79 | + |
| 80 | + # Write board into file |
| 81 | + write_to_file(board, ".md", f"# Benchmark for BOARD " + board + "\n\n") |
| 82 | + write_to_file(board, ".md", f"**DATE:** " + date.strftime('%d.%m.%Y %H:%M') + "\n\n") |
| 83 | + # Get LVGL version write it into file |
| 84 | + outdata = dut.expect(r'Benchmark Summary \((.*) \)', timeout=200) |
| 85 | + output["LVGL"] = outdata[1].decode() |
| 86 | + write_to_file(board, ".md", f"**LVGL version:** " + outdata[1].decode() + "\n\n") |
| 87 | + outdata = dut.expect(r'Name, Avg. CPU, Avg. FPS, Avg. time, render time, flush time', timeout=200) |
| 88 | + write_to_file(board, ".md", f"| Name | Avg. CPU | Avg. FPS | Avg. time | render time | flush time |\n") |
| 89 | + write_to_file(board, ".md", f"| ---- | :------: | :------: | :-------: | :---------: | :--------: |\n") # noqa: E203 |
| 90 | + |
| 91 | + last_results = read_json_file(board) |
| 92 | + |
| 93 | + # Benchmark lines |
| 94 | + output["tests"] = [] |
| 95 | + for x in range(17): |
| 96 | + outdata = dut.expect(r'([\w \.]+),[ ]?(\d+%),[ ]?(\d+),[ ]?(\d+),[ ]?(\d+),[ ]?(\d+)', timeout=200) |
| 97 | + test_entry = { |
| 98 | + "Name": outdata[1].decode(), |
| 99 | + "Avg. CPU": outdata[2].decode(), |
| 100 | + "Avg. FPS": outdata[3].decode(), |
| 101 | + "Avg. time": outdata[4].decode(), |
| 102 | + "Render time": outdata[5].decode(), |
| 103 | + "Flush time": outdata[6].decode() |
| 104 | + } |
| 105 | + output["tests"].append(test_entry) |
| 106 | + |
| 107 | + last_test_result = find_test_results(last_results, test_entry["Name"]) |
| 108 | + write_to_file(board, ".md", f"| " + |
| 109 | + test_entry["Name"] + " | " + |
| 110 | + test_entry["Avg. CPU"] + " " + get_test_diff(test_entry, last_test_result, "Avg. CPU", False) + " | " + |
| 111 | + test_entry["Avg. FPS"] + " " + get_test_diff(test_entry, last_test_result, "Avg. FPS", True) + " | " + |
| 112 | + test_entry["Avg. time"] + " " + get_test_diff(test_entry, last_test_result, "Avg. time", False) + " | " + |
| 113 | + test_entry["Render time"] + " " + get_test_diff(test_entry, last_test_result, "Render time", False) + " | " + |
| 114 | + test_entry["Flush time"] + " " + get_test_diff(test_entry, last_test_result, "Flush time", False) + " |\n") |
| 115 | + |
| 116 | + write_to_file(board, ".md", "\n") |
| 117 | + write_to_file(board, ".md", "***") |
| 118 | + write_to_file(board, ".md", "\n\n") |
| 119 | + |
| 120 | + # Save JSON to file |
| 121 | + json_output = json.dumps(output, indent=4) |
| 122 | + write_to_file(board, ".json", json_output) |
0 commit comments