|
|
@@ -17,13 +17,12 @@
|
|
|
"""
|
|
|
Test script for unit test case.
|
|
|
"""
|
|
|
-
|
|
|
-import re
|
|
|
+import argparse
|
|
|
import os
|
|
|
+import re
|
|
|
import sys
|
|
|
-import time
|
|
|
-import argparse
|
|
|
import threading
|
|
|
+import time
|
|
|
|
|
|
try:
|
|
|
import TinyFW
|
|
|
@@ -47,9 +46,16 @@ import Env
|
|
|
from DUT import ExpectTimeout
|
|
|
from IDF.IDFApp import UT
|
|
|
|
|
|
+from TinyFW import TestCaseFailed
|
|
|
+from Utility import format_case_id, handle_unexpected_exception
|
|
|
|
|
|
UT_APP_BOOT_UP_DONE = "Press ENTER to see the list of tests."
|
|
|
+
|
|
|
+STRIP_CONFIG_PATTERN = re.compile(r'(.+?)(_\d+)?$')
|
|
|
+
|
|
|
+# matches e.g.: "rst:0xc (SW_CPU_RESET),boot:0x13 (SPI_FAST_FLASH_BOOT)"
|
|
|
RESET_PATTERN = re.compile(r"(ets [\w]{3}\s+[\d]{1,2} [\d]{4} [\d]{2}:[\d]{2}:[\d]{2}[^()]*\([\w].*?\))")
|
|
|
+
|
|
|
EXCEPTION_PATTERN = re.compile(r"(Guru Meditation Error: Core\s+\d panic'ed \([\w].*?\))")
|
|
|
ABORT_PATTERN = re.compile(r"(abort\(\) was called at PC 0x[a-fA-F\d]{8} on core \d)")
|
|
|
FINISH_PATTERN = re.compile(r"1 Tests (\d) Failures (\d) Ignored")
|
|
|
@@ -67,11 +73,26 @@ DUT_STARTUP_CHECK_RETRY_COUNT = 5
|
|
|
TEST_HISTORY_CHECK_TIMEOUT = 1
|
|
|
|
|
|
|
|
|
-class TestCaseFailed(AssertionError):
|
|
|
- pass
|
|
|
+def reset_reason_matches(reported_str, expected_str):
|
|
|
+ known_aliases = {
|
|
|
+ "_RESET": "_RST",
|
|
|
+ "POWERON_RESET": "POWERON",
|
|
|
+ "DEEPSLEEP_RESET": "DSLEEP",
|
|
|
+ }
|
|
|
+
|
|
|
+ if expected_str in reported_str:
|
|
|
+ return True
|
|
|
+
|
|
|
+ for token, alias in known_aliases.items():
|
|
|
+ if token in expected_str:
|
|
|
+ alt_expected_str = expected_str.replace(token, alias)
|
|
|
+ if alt_expected_str in reported_str:
|
|
|
+ return True
|
|
|
|
|
|
+ return False
|
|
|
|
|
|
-def format_test_case_config(test_case_data):
|
|
|
+
|
|
|
+def format_test_case_config(test_case_data, target='esp32'):
|
|
|
"""
|
|
|
convert the test case data to unified format.
|
|
|
We need to following info to run unit test cases:
|
|
|
@@ -91,6 +112,7 @@ def format_test_case_config(test_case_data):
|
|
|
If config is not specified for test case, then
|
|
|
|
|
|
:param test_case_data: string, list, or a dictionary list
|
|
|
+ :param target: target
|
|
|
:return: formatted data
|
|
|
"""
|
|
|
|
|
|
@@ -130,6 +152,9 @@ def format_test_case_config(test_case_data):
|
|
|
if "config" not in _case:
|
|
|
_case["config"] = "default"
|
|
|
|
|
|
+ if 'target' not in _case:
|
|
|
+ _case['target'] = target
|
|
|
+
|
|
|
return _case
|
|
|
|
|
|
if not isinstance(test_case_data, list):
|
|
|
@@ -156,6 +181,14 @@ def replace_app_bin(dut, name, new_app_bin):
|
|
|
break
|
|
|
|
|
|
|
|
|
+def format_case_name(case):
|
|
|
+ # we could split cases of same config into multiple binaries as we have limited rom space
|
|
|
+ # we should regard those configs like `default` and `default_2` as the same config
|
|
|
+ match = STRIP_CONFIG_PATTERN.match(case['config'])
|
|
|
+ stripped_config_name = match.group(1)
|
|
|
+ return format_case_id(case['name'], target=case['target'], config=stripped_config_name)
|
|
|
+
|
|
|
+
|
|
|
def reset_dut(dut):
|
|
|
dut.reset()
|
|
|
# esptool ``run`` cmd takes quite long time.
|
|
|
@@ -175,6 +208,14 @@ def reset_dut(dut):
|
|
|
raise AssertionError("Reset {} ({}) failed!".format(dut.name, dut.port))
|
|
|
|
|
|
|
|
|
+def log_test_case(description, test_case, ut_config):
|
|
|
+ Utility.console_log("Running {} '{}' (config {})".format(description, test_case['name'], ut_config),
|
|
|
+ color='orange')
|
|
|
+ Utility.console_log('Tags: %s' % ', '.join('%s=%s' % (k, v) for (k, v) in test_case.items()
|
|
|
+ if k != 'name' and v is not None),
|
|
|
+ color='orange')
|
|
|
+
|
|
|
+
|
|
|
def run_one_normal_case(dut, one_case, junit_test_case):
|
|
|
|
|
|
reset_dut(dut)
|
|
|
@@ -286,24 +327,17 @@ def run_unit_test_cases(env, extra_data):
|
|
|
|
|
|
for one_case in case_config[ut_config]:
|
|
|
# create junit report test case
|
|
|
- junit_test_case = TinyFW.JunitReport.create_test_case("[{}] {}".format(ut_config, one_case["name"]))
|
|
|
+ junit_test_case = TinyFW.JunitReport.create_test_case(format_case_name(one_case))
|
|
|
try:
|
|
|
run_one_normal_case(dut, one_case, junit_test_case)
|
|
|
except TestCaseFailed:
|
|
|
- failed_cases.append(one_case["name"])
|
|
|
+ failed_cases.append(format_case_name(one_case))
|
|
|
except Exception as e:
|
|
|
- junit_test_case.add_failure_info("Unexpected exception: " + str(e))
|
|
|
- failed_cases.append(one_case["name"])
|
|
|
+ handle_unexpected_exception(junit_test_case, e)
|
|
|
+ failed_cases.append(format_case_name(one_case))
|
|
|
finally:
|
|
|
TinyFW.JunitReport.test_case_finish(junit_test_case)
|
|
|
|
|
|
- # raise exception if any case fails
|
|
|
- if failed_cases:
|
|
|
- Utility.console_log("Failed Cases:", color="red")
|
|
|
- for _case_name in failed_cases:
|
|
|
- Utility.console_log("\t" + _case_name, color="red")
|
|
|
- raise AssertionError("Unit Test Failed")
|
|
|
-
|
|
|
|
|
|
class Handler(threading.Thread):
|
|
|
|
|
|
@@ -477,26 +511,22 @@ def run_multiple_devices_cases(env, extra_data):
|
|
|
Utility.console_log("Running unit test for config: " + ut_config, "O")
|
|
|
for one_case in case_config[ut_config]:
|
|
|
result = False
|
|
|
- junit_test_case = TinyFW.JunitReport.create_test_case("[{}] {}".format(ut_config, one_case["name"]))
|
|
|
+ junit_test_case = TinyFW.JunitReport.create_test_case(format_case_name(one_case))
|
|
|
try:
|
|
|
result = run_one_multiple_devices_case(duts, ut_config, env, one_case,
|
|
|
one_case.get('app_bin'), junit_test_case)
|
|
|
+ except TestCaseFailed:
|
|
|
+ pass # result is False, this is handled by the finally block
|
|
|
except Exception as e:
|
|
|
- junit_test_case.add_failure_info("Unexpected exception: " + str(e))
|
|
|
+ handle_unexpected_exception(junit_test_case, e)
|
|
|
finally:
|
|
|
if result:
|
|
|
- Utility.console_log("Success: " + one_case["name"], color="green")
|
|
|
+ Utility.console_log("Success: " + format_case_name(one_case), color="green")
|
|
|
else:
|
|
|
- failed_cases.append(one_case["name"])
|
|
|
- Utility.console_log("Failed: " + one_case["name"], color="red")
|
|
|
+ failed_cases.append(format_case_name(one_case))
|
|
|
+ Utility.console_log("Failed: " + format_case_name(one_case), color="red")
|
|
|
TinyFW.JunitReport.test_case_finish(junit_test_case)
|
|
|
|
|
|
- if failed_cases:
|
|
|
- Utility.console_log("Failed Cases:", color="red")
|
|
|
- for _case_name in failed_cases:
|
|
|
- Utility.console_log("\t" + _case_name, color="red")
|
|
|
- raise AssertionError("Unit Test Failed")
|
|
|
-
|
|
|
|
|
|
def run_one_multiple_stage_case(dut, one_case, junit_test_case):
|
|
|
reset_dut(dut)
|
|
|
@@ -525,7 +555,7 @@ def run_one_multiple_stage_case(dut, one_case, junit_test_case):
|
|
|
result = False
|
|
|
if len(one_case["reset"]) == len(exception_reset_list):
|
|
|
for i, exception in enumerate(exception_reset_list):
|
|
|
- if one_case["reset"][i] not in exception:
|
|
|
+ if not reset_reason_matches(exception, one_case["reset"][i]):
|
|
|
break
|
|
|
else:
|
|
|
result = True
|
|
|
@@ -546,9 +576,9 @@ def run_one_multiple_stage_case(dut, one_case, junit_test_case):
|
|
|
result = result and check_reset()
|
|
|
output = dut.stop_capture_raw_data()
|
|
|
if result:
|
|
|
- Utility.console_log("Success: " + one_case["name"], color="green")
|
|
|
+ Utility.console_log("Success: " + format_case_name(one_case), color="green")
|
|
|
else:
|
|
|
- Utility.console_log("Failed: " + one_case["name"], color="red")
|
|
|
+ Utility.console_log("Failed: " + format_case_name(one_case), color="red")
|
|
|
junit_test_case.add_failure_info(output)
|
|
|
raise TestCaseFailed()
|
|
|
stage_finish.append("break")
|
|
|
@@ -565,7 +595,7 @@ def run_one_multiple_stage_case(dut, one_case, junit_test_case):
|
|
|
# in this scenario reset should not happen
|
|
|
if int(data[1]):
|
|
|
# case ignored
|
|
|
- Utility.console_log("Ignored: " + one_case["name"], color="orange")
|
|
|
+ Utility.console_log("Ignored: " + format_case_name(one_case), color="orange")
|
|
|
junit_test_case.add_skipped_info("ignored")
|
|
|
# only passed in last stage will be regarded as real pass
|
|
|
if last_stage():
|
|
|
@@ -604,7 +634,7 @@ def run_one_multiple_stage_case(dut, one_case, junit_test_case):
|
|
|
def run_multiple_stage_cases(env, extra_data):
|
|
|
"""
|
|
|
extra_data can be 2 types of value
|
|
|
- 1. as dict: Mandantory keys: "name" and "child case num", optional keys: "reset" and others
|
|
|
+ 1. as dict: Mandatory keys: "name" and "child case num", optional keys: "reset" and others
|
|
|
3. as list of string or dict:
|
|
|
[case1, case2, case3, {"name": "restart from PRO CPU", "child case num": 2}, ...]
|
|
|
|
|
|
@@ -628,27 +658,19 @@ def run_multiple_stage_cases(env, extra_data):
|
|
|
dut.start_app()
|
|
|
|
|
|
for one_case in case_config[ut_config]:
|
|
|
- junit_test_case = TinyFW.JunitReport.create_test_case("[{}] {}".format(ut_config, one_case["name"]))
|
|
|
+ junit_test_case = TinyFW.JunitReport.create_test_case(format_case_name(one_case))
|
|
|
try:
|
|
|
run_one_multiple_stage_case(dut, one_case, junit_test_case)
|
|
|
except TestCaseFailed:
|
|
|
- failed_cases.append(one_case["name"])
|
|
|
+ failed_cases.append(format_case_name(one_case))
|
|
|
except Exception as e:
|
|
|
- junit_test_case.add_failure_info("Unexpected exception: " + str(e))
|
|
|
- failed_cases.append(one_case["name"])
|
|
|
+ handle_unexpected_exception(junit_test_case, e)
|
|
|
+ failed_cases.append(format_case_name(one_case))
|
|
|
finally:
|
|
|
TinyFW.JunitReport.test_case_finish(junit_test_case)
|
|
|
|
|
|
- # raise exception if any case fails
|
|
|
- if failed_cases:
|
|
|
- Utility.console_log("Failed Cases:", color="red")
|
|
|
- for _case_name in failed_cases:
|
|
|
- Utility.console_log("\t" + _case_name, color="red")
|
|
|
- raise AssertionError("Unit Test Failed")
|
|
|
-
|
|
|
|
|
|
def detect_update_unit_test_info(env, extra_data, app_bin):
|
|
|
-
|
|
|
case_config = format_test_case_config(extra_data)
|
|
|
|
|
|
for ut_config in case_config:
|
|
|
@@ -719,20 +741,16 @@ if __name__ == '__main__':
|
|
|
type=int,
|
|
|
default=1
|
|
|
)
|
|
|
- parser.add_argument("--env_config_file", "-e",
|
|
|
- help="test env config file",
|
|
|
- default=None
|
|
|
- )
|
|
|
- parser.add_argument("--app_bin", "-b",
|
|
|
- help="application binary file for flashing the chip",
|
|
|
- default=None
|
|
|
- )
|
|
|
- parser.add_argument(
|
|
|
- 'test',
|
|
|
- help='Comma separated list of <option>:<argument> where option can be "name" (default), "child case num", \
|
|
|
- "config", "timeout".',
|
|
|
- nargs='+'
|
|
|
- )
|
|
|
+ parser.add_argument('--env_config_file', '-e',
|
|
|
+ help='test env config file',
|
|
|
+ default=None)
|
|
|
+ parser.add_argument('--app_bin', '-b',
|
|
|
+ help='application binary file for flashing the chip',
|
|
|
+ default=None)
|
|
|
+ parser.add_argument('test',
|
|
|
+ help='Comma separated list of <option>:<argument> where option can be "name" (default), '
|
|
|
+ '"child case num", "config", "timeout".',
|
|
|
+ nargs='+')
|
|
|
args = parser.parse_args()
|
|
|
list_of_dicts = []
|
|
|
for test in args.test:
|
|
|
@@ -742,7 +760,7 @@ if __name__ == '__main__':
|
|
|
if len(test_item) == 0:
|
|
|
continue
|
|
|
pair = test_item.split(r':')
|
|
|
- if len(pair) == 1 or pair[0] is 'name':
|
|
|
+ if len(pair) == 1 or pair[0] == 'name':
|
|
|
test_dict['name'] = pair[0]
|
|
|
elif len(pair) == 2:
|
|
|
if pair[0] == 'timeout' or pair[0] == 'child case num':
|