unit_test.py 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808
  1. #!/usr/bin/env python
  2. #
  3. # Copyright 2018 Espressif Systems (Shanghai) PTE LTD
  4. #
  5. # Licensed under the Apache License, Version 2.0 (the "License");
  6. # you may not use this file except in compliance with the License.
  7. # You may obtain a copy of the License at
  8. #
  9. # http://www.apache.org/licenses/LICENSE-2.0
  10. #
  11. # Unless required by applicable law or agreed to in writing, software
  12. # distributed under the License is distributed on an "AS IS" BASIS,
  13. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. # See the License for the specific language governing permissions and
  15. # limitations under the License.
  16. """
  17. Test script for unit test case.
  18. """
  19. import argparse
  20. import re
  21. import threading
  22. import time
  23. import ttfw_idf
  24. from tiny_test_fw import DUT, Env, TinyFW, Utility
  25. from tiny_test_fw.TinyFW import TestCaseFailed
  26. from tiny_test_fw.Utility import format_case_id, handle_unexpected_exception
  27. UT_APP_BOOT_UP_DONE = "Press ENTER to see the list of tests."
  28. STRIP_CONFIG_PATTERN = re.compile(r'(.+?)(_\d+)?$')
  29. # matches e.g.: "rst:0xc (SW_CPU_RESET),boot:0x13 (SPI_FAST_FLASH_BOOT)"
  30. RESET_PATTERN = re.compile(r"(rst:0x[0-9a-fA-F]*\s\([\w].*?\),boot:0x[0-9a-fA-F]*\s\([\w].*?\))")
  31. EXCEPTION_PATTERN = re.compile(r"(Guru Meditation Error: Core\s+\d panic'ed \([\w].*?\))")
  32. ABORT_PATTERN = re.compile(r"(abort\(\) was called at PC 0x[a-fA-F\d]{8} on core \d)")
  33. FINISH_PATTERN = re.compile(r"1 Tests (\d) Failures (\d) Ignored")
  34. END_LIST_STR = r'\r?\nEnter test for running'
  35. TEST_PATTERN = re.compile(r'\((\d+)\)\s+"([^"]+)" ([^\r\n]+)\r?\n(' + END_LIST_STR + r')?')
  36. TEST_SUBMENU_PATTERN = re.compile(r'\s+\((\d+)\)\s+"[^"]+"\r?\n(?=(?=\()|(' + END_LIST_STR + r'))')
  37. UT_APP_PATH = "tools/unit-test-app"
  38. SIMPLE_TEST_ID = 0
  39. MULTI_STAGE_ID = 1
  40. MULTI_DEVICE_ID = 2
  41. DEFAULT_TIMEOUT = 20
  42. DUT_DELAY_AFTER_RESET = 2
  43. DUT_STARTUP_CHECK_RETRY_COUNT = 5
  44. TEST_HISTORY_CHECK_TIMEOUT = 2
  45. def reset_reason_matches(reported_str, expected_str):
  46. known_aliases = {
  47. "_RESET": "_RST",
  48. "POWERON_RESET": "POWERON",
  49. "DEEPSLEEP_RESET": "DSLEEP",
  50. }
  51. if expected_str in reported_str:
  52. return True
  53. for token, alias in known_aliases.items():
  54. if token in expected_str:
  55. alt_expected_str = expected_str.replace(token, alias)
  56. if alt_expected_str in reported_str:
  57. return True
  58. return False
  59. def format_test_case_config(test_case_data, target='esp32'):
  60. """
  61. convert the test case data to unified format.
  62. We need to following info to run unit test cases:
  63. 1. unit test app config
  64. 2. test case name
  65. 3. test case reset info
  66. the formatted case config is a dict, with ut app config as keys. The value is a list of test cases.
  67. Each test case is a dict with "name" and "reset" as keys. For example::
  68. case_config = {
  69. "default": [{"name": "restart from PRO CPU", "reset": "SW_CPU_RESET"}, {...}],
  70. "psram": [{"name": "restart from PRO CPU", "reset": "SW_CPU_RESET"}],
  71. }
  72. If config is not specified for test case, then
  73. :param test_case_data: string, list, or a dictionary list
  74. :param target: target
  75. :return: formatted data
  76. """
  77. case_config = dict()
  78. def parse_case(one_case_data):
  79. """ parse and format one case """
  80. def process_reset_list(reset_list):
  81. # strip space and remove white space only items
  82. _output = list()
  83. for _r in reset_list:
  84. _data = _r.strip(" ")
  85. if _data:
  86. _output.append(_data)
  87. return _output
  88. _case = dict()
  89. if isinstance(one_case_data, str):
  90. _temp = one_case_data.split(" [reset=")
  91. _case["name"] = _temp[0]
  92. try:
  93. _case["reset"] = process_reset_list(_temp[1][0:-1].split(","))
  94. except IndexError:
  95. _case["reset"] = list()
  96. elif isinstance(one_case_data, dict):
  97. _case = one_case_data.copy()
  98. assert "name" in _case
  99. if "reset" not in _case:
  100. _case["reset"] = list()
  101. else:
  102. if isinstance(_case["reset"], str):
  103. _case["reset"] = process_reset_list(_case["reset"].split(","))
  104. else:
  105. raise TypeError("Not supported type during parsing unit test case")
  106. if "config" not in _case:
  107. _case["config"] = "default"
  108. if 'target' not in _case:
  109. _case['target'] = target
  110. return _case
  111. if not isinstance(test_case_data, list):
  112. test_case_data = [test_case_data]
  113. for case_data in test_case_data:
  114. parsed_case = parse_case(case_data)
  115. try:
  116. case_config[parsed_case["config"]].append(parsed_case)
  117. except KeyError:
  118. case_config[parsed_case["config"]] = [parsed_case]
  119. return case_config
  120. def replace_app_bin(dut, name, new_app_bin):
  121. if new_app_bin is None:
  122. return
  123. search_pattern = '/{}.bin'.format(name)
  124. for i, config in enumerate(dut.download_config):
  125. if config.endswith(search_pattern):
  126. dut.download_config[i] = new_app_bin
  127. Utility.console_log("The replaced application binary is {}".format(new_app_bin), "O")
  128. break
  129. def format_case_name(case):
  130. # we could split cases of same config into multiple binaries as we have limited rom space
  131. # we should regard those configs like `default` and `default_2` as the same config
  132. match = STRIP_CONFIG_PATTERN.match(case['config'])
  133. stripped_config_name = match.group(1)
  134. return format_case_id(case['name'], target=case['target'], config=stripped_config_name)
  135. def reset_dut(dut):
  136. dut.reset()
  137. # esptool ``run`` cmd takes quite long time.
  138. # before reset finish, serial port is closed. therefore DUT could already bootup before serial port opened.
  139. # this could cause checking bootup print failed.
  140. # now use input cmd `-` and check test history to check if DUT is bootup.
  141. # we'll retry this step for a few times,
  142. # in case `dut.reset` returns during DUT bootup (when DUT can't process any command).
  143. #
  144. # during bootup, DUT might only receive part of the first `-` command.
  145. # If it only receive `\n`, then it will print all cases. It could take more than 5 seconds, reset check will fail.
  146. # To solve this problem, we will add a delay between reset and input `-` command. And we'll also enlarge expect timeout.
  147. time.sleep(DUT_DELAY_AFTER_RESET)
  148. for _ in range(DUT_STARTUP_CHECK_RETRY_COUNT):
  149. dut.write("-")
  150. try:
  151. dut.expect("0 Tests 0 Failures 0 Ignored", timeout=TEST_HISTORY_CHECK_TIMEOUT)
  152. break
  153. except DUT.ExpectTimeout:
  154. pass
  155. else:
  156. raise AssertionError("Reset {} ({}) failed!".format(dut.name, dut.port))
  157. def log_test_case(description, test_case, ut_config):
  158. Utility.console_log("Running {} '{}' (config {})".format(description, test_case['name'], ut_config),
  159. color='orange')
  160. Utility.console_log('Tags: %s' % ', '.join('%s=%s' % (k, v) for (k, v) in test_case.items()
  161. if k != 'name' and v is not None),
  162. color='orange')
  163. def run_one_normal_case(dut, one_case, junit_test_case):
  164. reset_dut(dut)
  165. dut.start_capture_raw_data()
  166. # run test case
  167. dut.write("\"{}\"".format(one_case["name"]))
  168. dut.expect("Running " + one_case["name"] + "...")
  169. exception_reset_list = []
  170. # we want to set this flag in callbacks (inner functions)
  171. # use list here so we can use append to set this flag
  172. test_finish = list()
  173. # expect callbacks
  174. def one_case_finish(result):
  175. """ one test finished, let expect loop break and log result """
  176. test_finish.append(True)
  177. output = dut.stop_capture_raw_data()
  178. if result:
  179. Utility.console_log("Success: " + format_case_name(one_case), color="green")
  180. else:
  181. Utility.console_log("Failed: " + format_case_name(one_case), color="red")
  182. junit_test_case.add_failure_info(output)
  183. raise TestCaseFailed()
  184. def handle_exception_reset(data):
  185. """
  186. just append data to exception list.
  187. exception list will be checked in ``handle_reset_finish``, once reset finished.
  188. """
  189. exception_reset_list.append(data[0])
  190. def handle_test_finish(data):
  191. """ test finished without reset """
  192. # in this scenario reset should not happen
  193. assert not exception_reset_list
  194. if int(data[1]):
  195. # case ignored
  196. Utility.console_log("Ignored: " + format_case_name(one_case), color="orange")
  197. junit_test_case.add_skipped_info("ignored")
  198. one_case_finish(not int(data[0]))
  199. def handle_reset_finish(data):
  200. """ reset happened and reboot finished """
  201. assert exception_reset_list # reboot but no exception/reset logged. should never happen
  202. result = False
  203. if len(one_case["reset"]) == len(exception_reset_list):
  204. for i, exception in enumerate(exception_reset_list):
  205. if not reset_reason_matches(exception, one_case["reset"][i]):
  206. break
  207. else:
  208. result = True
  209. if not result:
  210. err_msg = "Reset Check Failed: \r\n\tExpected: {}\r\n\tGet: {}".format(one_case["reset"],
  211. exception_reset_list)
  212. Utility.console_log(err_msg, color="orange")
  213. junit_test_case.add_failure_info(err_msg)
  214. one_case_finish(result)
  215. while not test_finish:
  216. try:
  217. timeout_value = one_case["timeout"]
  218. dut.expect_any((RESET_PATTERN, handle_exception_reset),
  219. (EXCEPTION_PATTERN, handle_exception_reset),
  220. (ABORT_PATTERN, handle_exception_reset),
  221. (FINISH_PATTERN, handle_test_finish),
  222. (UT_APP_BOOT_UP_DONE, handle_reset_finish),
  223. timeout=timeout_value)
  224. except DUT.ExpectTimeout:
  225. Utility.console_log("Timeout in expect (%s seconds)" % timeout_value, color="orange")
  226. junit_test_case.add_failure_info("timeout")
  227. one_case_finish(False)
  228. break
  229. @ttfw_idf.idf_unit_test(env_tag="UT_T1_1", junit_report_by_case=True)
  230. def run_unit_test_cases(env, extra_data):
  231. """
  232. extra_data can be three types of value
  233. 1. as string:
  234. 1. "case_name"
  235. 2. "case_name [reset=RESET_REASON]"
  236. 2. as dict:
  237. 1. with key like {"name": "Intr_alloc test, shared ints"}
  238. 2. with key like {"name": "restart from PRO CPU", "reset": "SW_CPU_RESET", "config": "psram"}
  239. 3. as list of string or dict:
  240. [case1, case2, case3, {"name": "restart from PRO CPU", "reset": "SW_CPU_RESET"}, ...]
  241. :param env: test env instance
  242. :param extra_data: the case name or case list or case dictionary
  243. :return: None
  244. """
  245. case_config = format_test_case_config(extra_data, env.default_dut_cls.TARGET)
  246. # we don't want stop on failed case (unless some special scenarios we can't handle)
  247. # this flag is used to log if any of the case failed during executing
  248. # Before exit test function this flag is used to log if the case fails
  249. failed_cases = []
  250. for ut_config in case_config:
  251. Utility.console_log("Running unit test for config: " + ut_config, "O")
  252. dut = env.get_dut("unit-test-app", app_path=UT_APP_PATH, app_config_name=ut_config, allow_dut_exception=True)
  253. if len(case_config[ut_config]) > 0:
  254. replace_app_bin(dut, "unit-test-app", case_config[ut_config][0].get('app_bin'))
  255. dut.start_app()
  256. Utility.console_log("Download finished, start running test cases", "O")
  257. for one_case in case_config[ut_config]:
  258. log_test_case("test case", one_case, ut_config)
  259. performance_items = []
  260. # create junit report test case
  261. junit_test_case = TinyFW.JunitReport.create_test_case(format_case_name(one_case))
  262. try:
  263. run_one_normal_case(dut, one_case, junit_test_case)
  264. performance_items = dut.get_performance_items()
  265. except TestCaseFailed:
  266. failed_cases.append(format_case_name(one_case))
  267. except Exception as e:
  268. handle_unexpected_exception(junit_test_case, e)
  269. failed_cases.append(format_case_name(one_case))
  270. finally:
  271. TinyFW.JunitReport.update_performance(performance_items)
  272. TinyFW.JunitReport.test_case_finish(junit_test_case)
  273. # close DUT when finish running all cases for one config
  274. env.close_dut(dut.name)
  275. class Handler(threading.Thread):
  276. WAIT_SIGNAL_PATTERN = re.compile(r'Waiting for signal: \[(.+)]!')
  277. SEND_SIGNAL_PATTERN = re.compile(r'Send signal: \[([^]]+)](\[([^]]+)])?!')
  278. FINISH_PATTERN = re.compile(r"1 Tests (\d) Failures (\d) Ignored")
  279. def __init__(self, dut, sent_signal_list, lock, parent_case_name, child_case_index, timeout):
  280. self.dut = dut
  281. self.sent_signal_list = sent_signal_list
  282. self.lock = lock
  283. self.parent_case_name = parent_case_name
  284. self.child_case_name = ""
  285. self.child_case_index = child_case_index + 1
  286. self.finish = False
  287. self.result = False
  288. self.output = ""
  289. self.fail_name = None
  290. self.timeout = timeout
  291. self.force_stop = threading.Event() # it show the running status
  292. reset_dut(self.dut) # reset the board to make it start from begining
  293. threading.Thread.__init__(self, name="{} Handler".format(dut))
  294. def run(self):
  295. self.dut.start_capture_raw_data()
  296. def get_child_case_name(data):
  297. self.child_case_name = data[0]
  298. time.sleep(1)
  299. self.dut.write(str(self.child_case_index))
  300. def one_device_case_finish(result):
  301. """ one test finished, let expect loop break and log result """
  302. self.finish = True
  303. self.result = result
  304. self.output = "[{}]\n\n{}\n".format(self.child_case_name,
  305. self.dut.stop_capture_raw_data())
  306. if not result:
  307. self.fail_name = self.child_case_name
  308. def device_wait_action(data):
  309. start_time = time.time()
  310. expected_signal = data[0].encode('utf-8')
  311. while 1:
  312. if time.time() > start_time + self.timeout:
  313. Utility.console_log("Timeout in device for function: %s" % self.child_case_name, color="orange")
  314. break
  315. with self.lock:
  316. for sent_signal in self.sent_signal_list:
  317. if expected_signal == sent_signal["name"]:
  318. self.dut.write(sent_signal["parameter"])
  319. self.sent_signal_list.remove(sent_signal)
  320. break
  321. else:
  322. time.sleep(0.01)
  323. continue
  324. break
  325. def device_send_action(data):
  326. with self.lock:
  327. self.sent_signal_list.append({
  328. "name": data[0].encode('utf-8'),
  329. "parameter": "" if data[2] is None else data[2].encode('utf-8')
  330. # no parameter means we only write EOL to DUT
  331. })
  332. def handle_device_test_finish(data):
  333. """ test finished without reset """
  334. # in this scenario reset should not happen
  335. if int(data[1]):
  336. # case ignored
  337. Utility.console_log("Ignored: " + self.child_case_name, color="orange")
  338. one_device_case_finish(not int(data[0]))
  339. try:
  340. time.sleep(1)
  341. self.dut.write("\"{}\"".format(self.parent_case_name))
  342. self.dut.expect("Running " + self.parent_case_name + "...")
  343. except DUT.ExpectTimeout:
  344. Utility.console_log("No case detected!", color="orange")
  345. while not self.finish and not self.force_stop.isSet():
  346. try:
  347. self.dut.expect_any((re.compile('\(' + str(self.child_case_index) + '\)\s"(\w+)"'), # noqa: W605 - regex
  348. get_child_case_name),
  349. (self.WAIT_SIGNAL_PATTERN, device_wait_action), # wait signal pattern
  350. (self.SEND_SIGNAL_PATTERN, device_send_action), # send signal pattern
  351. (self.FINISH_PATTERN, handle_device_test_finish), # test finish pattern
  352. timeout=self.timeout)
  353. except DUT.ExpectTimeout:
  354. Utility.console_log("Timeout in expect (%s seconds)" % self.timeout, color="orange")
  355. one_device_case_finish(False)
  356. break
  357. def stop(self):
  358. self.force_stop.set()
  359. def get_case_info(one_case):
  360. parent_case = one_case["name"]
  361. child_case_num = one_case["child case num"]
  362. return parent_case, child_case_num
  363. def get_dut(duts, env, name, ut_config, app_bin=None):
  364. if name in duts:
  365. dut = duts[name]
  366. else:
  367. dut = env.get_dut(name, app_path=UT_APP_PATH, app_config_name=ut_config, allow_dut_exception=True)
  368. duts[name] = dut
  369. replace_app_bin(dut, "unit-test-app", app_bin)
  370. dut.start_app() # download bin to board
  371. return dut
  372. def run_one_multiple_devices_case(duts, ut_config, env, one_case, app_bin, junit_test_case):
  373. lock = threading.RLock()
  374. threads = []
  375. send_signal_list = []
  376. result = True
  377. parent_case, case_num = get_case_info(one_case)
  378. for i in range(case_num):
  379. dut = get_dut(duts, env, "dut%d" % i, ut_config, app_bin)
  380. threads.append(Handler(dut, send_signal_list, lock,
  381. parent_case, i, one_case["timeout"]))
  382. for thread in threads:
  383. thread.setDaemon(True)
  384. thread.start()
  385. output = "Multiple Device Failed\n"
  386. for thread in threads:
  387. thread.join()
  388. result = result and thread.result
  389. output += thread.output
  390. if not thread.result:
  391. [thd.stop() for thd in threads]
  392. if not result:
  393. junit_test_case.add_failure_info(output)
  394. # collect performances from DUTs
  395. performance_items = []
  396. for dut_name in duts:
  397. performance_items.extend(duts[dut_name].get_performance_items())
  398. TinyFW.JunitReport.update_performance(performance_items)
  399. return result
  400. @ttfw_idf.idf_unit_test(env_tag="UT_T2_1", junit_report_by_case=True)
  401. def run_multiple_devices_cases(env, extra_data):
  402. """
  403. extra_data can be two types of value
  404. 1. as dict:
  405. e.g.
  406. {"name": "gpio master/slave test example",
  407. "child case num": 2,
  408. "config": "release",
  409. "env_tag": "UT_T2_1"}
  410. 2. as list dict:
  411. e.g.
  412. [{"name": "gpio master/slave test example1",
  413. "child case num": 2,
  414. "config": "release",
  415. "env_tag": "UT_T2_1"},
  416. {"name": "gpio master/slave test example2",
  417. "child case num": 2,
  418. "config": "release",
  419. "env_tag": "UT_T2_1"}]
  420. """
  421. failed_cases = []
  422. case_config = format_test_case_config(extra_data, env.default_dut_cls.TARGET)
  423. duts = {}
  424. for ut_config in case_config:
  425. Utility.console_log("Running unit test for config: " + ut_config, "O")
  426. for one_case in case_config[ut_config]:
  427. log_test_case("multi-device test", one_case, ut_config, )
  428. result = False
  429. junit_test_case = TinyFW.JunitReport.create_test_case(format_case_name(one_case))
  430. try:
  431. result = run_one_multiple_devices_case(duts, ut_config, env, one_case,
  432. one_case.get('app_bin'), junit_test_case)
  433. except TestCaseFailed:
  434. pass # result is False, this is handled by the finally block
  435. except Exception as e:
  436. handle_unexpected_exception(junit_test_case, e)
  437. finally:
  438. if result:
  439. Utility.console_log("Success: " + format_case_name(one_case), color="green")
  440. else:
  441. failed_cases.append(format_case_name(one_case))
  442. Utility.console_log("Failed: " + format_case_name(one_case), color="red")
  443. TinyFW.JunitReport.test_case_finish(junit_test_case)
  444. # close all DUTs when finish running all cases for one config
  445. for dut in duts:
  446. env.close_dut(dut)
  447. duts = {}
  448. def run_one_multiple_stage_case(dut, one_case, junit_test_case):
  449. reset_dut(dut)
  450. dut.start_capture_raw_data()
  451. exception_reset_list = []
  452. for test_stage in range(one_case["child case num"]):
  453. # select multi stage test case name
  454. dut.write("\"{}\"".format(one_case["name"]))
  455. dut.expect("Running " + one_case["name"] + "...")
  456. # select test function for current stage
  457. dut.write(str(test_stage + 1))
  458. # we want to set this flag in callbacks (inner functions)
  459. # use list here so we can use append to set this flag
  460. stage_finish = list()
  461. def last_stage():
  462. return test_stage == one_case["child case num"] - 1
  463. def check_reset():
  464. if one_case["reset"]:
  465. assert exception_reset_list # reboot but no exception/reset logged. should never happen
  466. result = False
  467. if len(one_case["reset"]) == len(exception_reset_list):
  468. for i, exception in enumerate(exception_reset_list):
  469. if not reset_reason_matches(exception, one_case["reset"][i]):
  470. break
  471. else:
  472. result = True
  473. if not result:
  474. err_msg = "Reset Check Failed: \r\n\tExpected: {}\r\n\tGet: {}".format(one_case["reset"],
  475. exception_reset_list)
  476. Utility.console_log(err_msg, color="orange")
  477. junit_test_case.add_failure_info(err_msg)
  478. else:
  479. # we allow omit reset in multi stage cases
  480. result = True
  481. return result
  482. # expect callbacks
  483. def one_case_finish(result):
  484. """ one test finished, let expect loop break and log result """
  485. # handle test finish
  486. result = result and check_reset()
  487. output = dut.stop_capture_raw_data()
  488. if result:
  489. Utility.console_log("Success: " + format_case_name(one_case), color="green")
  490. else:
  491. Utility.console_log("Failed: " + format_case_name(one_case), color="red")
  492. junit_test_case.add_failure_info(output)
  493. raise TestCaseFailed()
  494. stage_finish.append("break")
  495. def handle_exception_reset(data):
  496. """
  497. just append data to exception list.
  498. exception list will be checked in ``handle_reset_finish``, once reset finished.
  499. """
  500. exception_reset_list.append(data[0])
  501. def handle_test_finish(data):
  502. """ test finished without reset """
  503. # in this scenario reset should not happen
  504. if int(data[1]):
  505. # case ignored
  506. Utility.console_log("Ignored: " + format_case_name(one_case), color="orange")
  507. junit_test_case.add_skipped_info("ignored")
  508. # only passed in last stage will be regarded as real pass
  509. if last_stage():
  510. one_case_finish(not int(data[0]))
  511. else:
  512. Utility.console_log("test finished before enter last stage", color="orange")
  513. one_case_finish(False)
  514. def handle_next_stage(data):
  515. """ reboot finished. we goto next stage """
  516. if last_stage():
  517. # already last stage, should never goto next stage
  518. Utility.console_log("didn't finish at last stage", color="orange")
  519. one_case_finish(False)
  520. else:
  521. stage_finish.append("continue")
  522. while not stage_finish:
  523. try:
  524. timeout_value = one_case["timeout"]
  525. dut.expect_any((RESET_PATTERN, handle_exception_reset),
  526. (EXCEPTION_PATTERN, handle_exception_reset),
  527. (ABORT_PATTERN, handle_exception_reset),
  528. (FINISH_PATTERN, handle_test_finish),
  529. (UT_APP_BOOT_UP_DONE, handle_next_stage),
  530. timeout=timeout_value)
  531. except DUT.ExpectTimeout:
  532. Utility.console_log("Timeout in expect (%s seconds)" % timeout_value, color="orange")
  533. one_case_finish(False)
  534. break
  535. if stage_finish[0] == "break":
  536. # test breaks on current stage
  537. break
  538. @ttfw_idf.idf_unit_test(env_tag="UT_T1_1", junit_report_by_case=True)
  539. def run_multiple_stage_cases(env, extra_data):
  540. """
  541. extra_data can be 2 types of value
  542. 1. as dict: Mandatory keys: "name" and "child case num", optional keys: "reset" and others
  543. 3. as list of string or dict:
  544. [case1, case2, case3, {"name": "restart from PRO CPU", "child case num": 2}, ...]
  545. :param env: test env instance
  546. :param extra_data: the case name or case list or case dictionary
  547. :return: None
  548. """
  549. case_config = format_test_case_config(extra_data, env.default_dut_cls.TARGET)
  550. # we don't want stop on failed case (unless some special scenarios we can't handle)
  551. # this flag is used to log if any of the case failed during executing
  552. # Before exit test function this flag is used to log if the case fails
  553. failed_cases = []
  554. for ut_config in case_config:
  555. Utility.console_log("Running unit test for config: " + ut_config, "O")
  556. dut = env.get_dut("unit-test-app", app_path=UT_APP_PATH, app_config_name=ut_config, allow_dut_exception=True)
  557. if len(case_config[ut_config]) > 0:
  558. replace_app_bin(dut, "unit-test-app", case_config[ut_config][0].get('app_bin'))
  559. dut.start_app()
  560. for one_case in case_config[ut_config]:
  561. log_test_case("multi-stage test", one_case, ut_config)
  562. performance_items = []
  563. junit_test_case = TinyFW.JunitReport.create_test_case(format_case_name(one_case))
  564. try:
  565. run_one_multiple_stage_case(dut, one_case, junit_test_case)
  566. performance_items = dut.get_performance_items()
  567. except TestCaseFailed:
  568. failed_cases.append(format_case_name(one_case))
  569. except Exception as e:
  570. handle_unexpected_exception(junit_test_case, e)
  571. failed_cases.append(format_case_name(one_case))
  572. finally:
  573. TinyFW.JunitReport.update_performance(performance_items)
  574. TinyFW.JunitReport.test_case_finish(junit_test_case)
  575. # close DUT when finish running all cases for one config
  576. env.close_dut(dut.name)
  577. def detect_update_unit_test_info(env, extra_data, app_bin):
  578. case_config = format_test_case_config(extra_data, env.default_dut_cls.TARGET)
  579. for ut_config in case_config:
  580. dut = env.get_dut("unit-test-app", app_path=UT_APP_PATH, app_config_name=ut_config)
  581. replace_app_bin(dut, "unit-test-app", app_bin)
  582. dut.start_app()
  583. reset_dut(dut)
  584. # get the list of test cases
  585. dut.write("")
  586. dut.expect("Here's the test menu, pick your combo:", timeout=DEFAULT_TIMEOUT)
  587. def find_update_dic(name, _t, _timeout, child_case_num=None):
  588. for _case_data in extra_data:
  589. if _case_data['name'] == name:
  590. _case_data['type'] = _t
  591. if 'timeout' not in _case_data:
  592. _case_data['timeout'] = _timeout
  593. if child_case_num:
  594. _case_data['child case num'] = child_case_num
  595. try:
  596. while True:
  597. data = dut.expect(TEST_PATTERN, timeout=DEFAULT_TIMEOUT)
  598. test_case_name = data[1]
  599. m = re.search(r'\[timeout=(\d+)\]', data[2])
  600. if m:
  601. timeout = int(m.group(1))
  602. else:
  603. timeout = 30
  604. m = re.search(r'\[multi_stage\]', data[2])
  605. if m:
  606. test_case_type = MULTI_STAGE_ID
  607. else:
  608. m = re.search(r'\[multi_device\]', data[2])
  609. if m:
  610. test_case_type = MULTI_DEVICE_ID
  611. else:
  612. test_case_type = SIMPLE_TEST_ID
  613. find_update_dic(test_case_name, test_case_type, timeout)
  614. if data[3] and re.search(END_LIST_STR, data[3]):
  615. break
  616. continue
  617. # find the last submenu item
  618. data = dut.expect(TEST_SUBMENU_PATTERN, timeout=DEFAULT_TIMEOUT)
  619. find_update_dic(test_case_name, test_case_type, timeout, child_case_num=int(data[0]))
  620. if data[1] and re.search(END_LIST_STR, data[1]):
  621. break
  622. # check if the unit test case names are correct, i.e. they could be found in the device
  623. for _dic in extra_data:
  624. if 'type' not in _dic:
  625. raise ValueError("Unit test \"{}\" doesn't exist in the flashed device!".format(_dic.get('name')))
  626. except DUT.ExpectTimeout:
  627. Utility.console_log("Timeout during getting the test list", color="red")
  628. finally:
  629. dut.close()
  630. # These options are the same for all configs, therefore there is no need to continue
  631. break
  632. if __name__ == '__main__':
  633. parser = argparse.ArgumentParser()
  634. parser.add_argument(
  635. '--repeat', '-r',
  636. help='Number of repetitions for the test(s). Default is 1.',
  637. type=int,
  638. default=1
  639. )
  640. parser.add_argument('--env_config_file', '-e',
  641. help='test env config file',
  642. default=None)
  643. parser.add_argument('--app_bin', '-b',
  644. help='application binary file for flashing the chip',
  645. default=None)
  646. parser.add_argument('test',
  647. help='Comma separated list of <option>:<argument> where option can be "name" (default), '
  648. '"child case num", "config", "timeout".',
  649. nargs='+')
  650. args = parser.parse_args()
  651. list_of_dicts = []
  652. for test in args.test:
  653. test_args = test.split(r',')
  654. test_dict = dict()
  655. for test_item in test_args:
  656. if len(test_item) == 0:
  657. continue
  658. pair = test_item.split(r':', 1)
  659. if len(pair) == 1 or pair[0] is 'name':
  660. test_dict['name'] = pair[0]
  661. elif len(pair) == 2:
  662. if pair[0] == 'timeout' or pair[0] == 'child case num':
  663. test_dict[pair[0]] = int(pair[1])
  664. else:
  665. test_dict[pair[0]] = pair[1]
  666. else:
  667. raise ValueError('Error in argument item {} of {}'.format(test_item, test))
  668. test_dict['app_bin'] = args.app_bin
  669. list_of_dicts.append(test_dict)
  670. TinyFW.set_default_config(env_config_file=args.env_config_file)
  671. env_config = TinyFW.get_default_config()
  672. env_config['app'] = ttfw_idf.UT
  673. env_config['dut'] = ttfw_idf.IDFDUT
  674. env_config['test_suite_name'] = 'unit_test_parsing'
  675. test_env = Env.Env(**env_config)
  676. detect_update_unit_test_info(test_env, extra_data=list_of_dicts, app_bin=args.app_bin)
  677. for index in range(1, args.repeat + 1):
  678. if args.repeat > 1:
  679. Utility.console_log("Repetition {}".format(index), color="green")
  680. for dic in list_of_dicts:
  681. t = dic.get('type', SIMPLE_TEST_ID)
  682. if t == SIMPLE_TEST_ID:
  683. run_unit_test_cases(extra_data=dic)
  684. elif t == MULTI_STAGE_ID:
  685. run_multiple_stage_cases(extra_data=dic)
  686. elif t == MULTI_DEVICE_ID:
  687. run_multiple_devices_cases(extra_data=dic)
  688. else:
  689. raise ValueError('Unknown type {} of {}'.format(t, dic.get('name')))