unit_test.py 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789
  1. #!/usr/bin/env python
  2. #
  3. # Copyright 2018 Espressif Systems (Shanghai) PTE LTD
  4. #
  5. # Licensed under the Apache License, Version 2.0 (the "License");
  6. # you may not use this file except in compliance with the License.
  7. # You may obtain a copy of the License at
  8. #
  9. # http://www.apache.org/licenses/LICENSE-2.0
  10. #
  11. # Unless required by applicable law or agreed to in writing, software
  12. # distributed under the License is distributed on an "AS IS" BASIS,
  13. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. # See the License for the specific language governing permissions and
  15. # limitations under the License.
  16. """
  17. Test script for unit test case.
  18. """
  19. import re
  20. import time
  21. import argparse
  22. import threading
  23. from tiny_test_fw import TinyFW, Utility, Env, DUT
  24. import ttfw_idf
  25. UT_APP_BOOT_UP_DONE = "Press ENTER to see the list of tests."
  26. RESET_PATTERN = re.compile(r"(ets [\w]{3}\s+[\d]{1,2} [\d]{4} [\d]{2}:[\d]{2}:[\d]{2}[^()]*\([\w].*?\))")
  27. EXCEPTION_PATTERN = re.compile(r"(Guru Meditation Error: Core\s+\d panic'ed \([\w].*?\))")
  28. ABORT_PATTERN = re.compile(r"(abort\(\) was called at PC 0x[a-fA-F\d]{8} on core \d)")
  29. FINISH_PATTERN = re.compile(r"1 Tests (\d) Failures (\d) Ignored")
  30. END_LIST_STR = r'\r?\nEnter test for running'
  31. TEST_PATTERN = re.compile(r'\((\d+)\)\s+"([^"]+)" ([^\r\n]+)\r?\n(' + END_LIST_STR + r')?')
  32. TEST_SUBMENU_PATTERN = re.compile(r'\s+\((\d+)\)\s+"[^"]+"\r?\n(?=(?=\()|(' + END_LIST_STR + r'))')
  33. UT_APP_PATH = "tools/unit-test-app"
  34. SIMPLE_TEST_ID = 0
  35. MULTI_STAGE_ID = 1
  36. MULTI_DEVICE_ID = 2
  37. DEFAULT_TIMEOUT = 20
  38. DUT_DELAY_AFTER_RESET = 2
  39. DUT_STARTUP_CHECK_RETRY_COUNT = 5
  40. TEST_HISTORY_CHECK_TIMEOUT = 2
  41. class TestCaseFailed(AssertionError):
  42. pass
  43. def format_test_case_config(test_case_data):
  44. """
  45. convert the test case data to unified format.
  46. We need to following info to run unit test cases:
  47. 1. unit test app config
  48. 2. test case name
  49. 3. test case reset info
  50. the formatted case config is a dict, with ut app config as keys. The value is a list of test cases.
  51. Each test case is a dict with "name" and "reset" as keys. For example::
  52. case_config = {
  53. "default": [{"name": "restart from PRO CPU", "reset": "SW_CPU_RESET"}, {...}],
  54. "psram": [{"name": "restart from PRO CPU", "reset": "SW_CPU_RESET"}],
  55. }
  56. If config is not specified for test case, then
  57. :param test_case_data: string, list, or a dictionary list
  58. :return: formatted data
  59. """
  60. case_config = dict()
  61. def parse_case(one_case_data):
  62. """ parse and format one case """
  63. def process_reset_list(reset_list):
  64. # strip space and remove white space only items
  65. _output = list()
  66. for _r in reset_list:
  67. _data = _r.strip(" ")
  68. if _data:
  69. _output.append(_data)
  70. return _output
  71. _case = dict()
  72. if isinstance(one_case_data, str):
  73. _temp = one_case_data.split(" [reset=")
  74. _case["name"] = _temp[0]
  75. try:
  76. _case["reset"] = process_reset_list(_temp[1][0:-1].split(","))
  77. except IndexError:
  78. _case["reset"] = list()
  79. elif isinstance(one_case_data, dict):
  80. _case = one_case_data.copy()
  81. assert "name" in _case
  82. if "reset" not in _case:
  83. _case["reset"] = list()
  84. else:
  85. if isinstance(_case["reset"], str):
  86. _case["reset"] = process_reset_list(_case["reset"].split(","))
  87. else:
  88. raise TypeError("Not supported type during parsing unit test case")
  89. if "config" not in _case:
  90. _case["config"] = "default"
  91. return _case
  92. if not isinstance(test_case_data, list):
  93. test_case_data = [test_case_data]
  94. for case_data in test_case_data:
  95. parsed_case = parse_case(case_data)
  96. try:
  97. case_config[parsed_case["config"]].append(parsed_case)
  98. except KeyError:
  99. case_config[parsed_case["config"]] = [parsed_case]
  100. return case_config
  101. def replace_app_bin(dut, name, new_app_bin):
  102. if new_app_bin is None:
  103. return
  104. search_pattern = '/{}.bin'.format(name)
  105. for i, config in enumerate(dut.download_config):
  106. if config.endswith(search_pattern):
  107. dut.download_config[i] = new_app_bin
  108. Utility.console_log("The replaced application binary is {}".format(new_app_bin), "O")
  109. break
  110. def format_case_name(case):
  111. return "[{}] {}".format(case["config"], case["name"])
  112. def reset_dut(dut):
  113. dut.reset()
  114. # esptool ``run`` cmd takes quite long time.
  115. # before reset finish, serial port is closed. therefore DUT could already bootup before serial port opened.
  116. # this could cause checking bootup print failed.
  117. # now use input cmd `-` and check test history to check if DUT is bootup.
  118. # we'll retry this step for a few times,
  119. # in case `dut.reset` returns during DUT bootup (when DUT can't process any command).
  120. #
  121. # during bootup, DUT might only receive part of the first `-` command.
  122. # If it only receive `\n`, then it will print all cases. It could take more than 5 seconds, reset check will fail.
  123. # To solve this problem, we will add a delay between reset and input `-` command. And we'll also enlarge expect timeout.
  124. time.sleep(DUT_DELAY_AFTER_RESET)
  125. for _ in range(DUT_STARTUP_CHECK_RETRY_COUNT):
  126. dut.write("-")
  127. try:
  128. dut.expect("0 Tests 0 Failures 0 Ignored", timeout=TEST_HISTORY_CHECK_TIMEOUT)
  129. break
  130. except DUT.ExpectTimeout:
  131. pass
  132. else:
  133. raise AssertionError("Reset {} ({}) failed!".format(dut.name, dut.port))
  134. def run_one_normal_case(dut, one_case, junit_test_case):
  135. reset_dut(dut)
  136. dut.start_capture_raw_data()
  137. # run test case
  138. dut.write("\"{}\"".format(one_case["name"]))
  139. dut.expect("Running " + one_case["name"] + "...")
  140. exception_reset_list = []
  141. # we want to set this flag in callbacks (inner functions)
  142. # use list here so we can use append to set this flag
  143. test_finish = list()
  144. # expect callbacks
  145. def one_case_finish(result):
  146. """ one test finished, let expect loop break and log result """
  147. test_finish.append(True)
  148. output = dut.stop_capture_raw_data()
  149. if result:
  150. Utility.console_log("Success: " + format_case_name(one_case), color="green")
  151. else:
  152. Utility.console_log("Failed: " + format_case_name(one_case), color="red")
  153. junit_test_case.add_failure_info(output)
  154. raise TestCaseFailed()
  155. def handle_exception_reset(data):
  156. """
  157. just append data to exception list.
  158. exception list will be checked in ``handle_reset_finish``, once reset finished.
  159. """
  160. exception_reset_list.append(data[0])
  161. def handle_test_finish(data):
  162. """ test finished without reset """
  163. # in this scenario reset should not happen
  164. assert not exception_reset_list
  165. if int(data[1]):
  166. # case ignored
  167. Utility.console_log("Ignored: " + format_case_name(one_case), color="orange")
  168. junit_test_case.add_skipped_info("ignored")
  169. one_case_finish(not int(data[0]))
  170. def handle_reset_finish(data):
  171. """ reset happened and reboot finished """
  172. assert exception_reset_list # reboot but no exception/reset logged. should never happen
  173. result = False
  174. if len(one_case["reset"]) == len(exception_reset_list):
  175. for i, exception in enumerate(exception_reset_list):
  176. if one_case["reset"][i] not in exception:
  177. break
  178. else:
  179. result = True
  180. if not result:
  181. err_msg = "Reset Check Failed: \r\n\tExpected: {}\r\n\tGet: {}".format(one_case["reset"],
  182. exception_reset_list)
  183. Utility.console_log(err_msg, color="orange")
  184. junit_test_case.add_failure_info(err_msg)
  185. one_case_finish(result)
  186. while not test_finish:
  187. try:
  188. dut.expect_any((RESET_PATTERN, handle_exception_reset),
  189. (EXCEPTION_PATTERN, handle_exception_reset),
  190. (ABORT_PATTERN, handle_exception_reset),
  191. (FINISH_PATTERN, handle_test_finish),
  192. (UT_APP_BOOT_UP_DONE, handle_reset_finish),
  193. timeout=one_case["timeout"])
  194. except DUT.ExpectTimeout:
  195. Utility.console_log("Timeout in expect", color="orange")
  196. junit_test_case.add_failure_info("timeout")
  197. one_case_finish(False)
  198. break
  199. @ttfw_idf.idf_unit_test(env_tag="UT_T1_1", junit_report_by_case=True)
  200. def run_unit_test_cases(env, extra_data):
  201. """
  202. extra_data can be three types of value
  203. 1. as string:
  204. 1. "case_name"
  205. 2. "case_name [reset=RESET_REASON]"
  206. 2. as dict:
  207. 1. with key like {"name": "Intr_alloc test, shared ints"}
  208. 2. with key like {"name": "restart from PRO CPU", "reset": "SW_CPU_RESET", "config": "psram"}
  209. 3. as list of string or dict:
  210. [case1, case2, case3, {"name": "restart from PRO CPU", "reset": "SW_CPU_RESET"}, ...]
  211. :param env: test env instance
  212. :param extra_data: the case name or case list or case dictionary
  213. :return: None
  214. """
  215. case_config = format_test_case_config(extra_data)
  216. # we don't want stop on failed case (unless some special scenarios we can't handle)
  217. # this flag is used to log if any of the case failed during executing
  218. # Before exit test function this flag is used to log if the case fails
  219. failed_cases = []
  220. for ut_config in case_config:
  221. Utility.console_log("Running unit test for config: " + ut_config, "O")
  222. dut = env.get_dut("unit-test-app", app_path=UT_APP_PATH, app_config_name=ut_config, allow_dut_exception=True)
  223. if len(case_config[ut_config]) > 0:
  224. replace_app_bin(dut, "unit-test-app", case_config[ut_config][0].get('app_bin'))
  225. dut.start_app()
  226. Utility.console_log("Download finished, start running test cases", "O")
  227. for one_case in case_config[ut_config]:
  228. performance_items = []
  229. # create junit report test case
  230. junit_test_case = TinyFW.JunitReport.create_test_case("[{}] {}".format(ut_config, one_case["name"]))
  231. try:
  232. run_one_normal_case(dut, one_case, junit_test_case)
  233. performance_items = dut.get_performance_items()
  234. except TestCaseFailed:
  235. failed_cases.append(format_case_name(one_case))
  236. except Exception as e:
  237. junit_test_case.add_failure_info("Unexpected exception: " + str(e))
  238. failed_cases.append(format_case_name(one_case))
  239. finally:
  240. TinyFW.JunitReport.update_performance(performance_items)
  241. TinyFW.JunitReport.test_case_finish(junit_test_case)
  242. # close DUT when finish running all cases for one config
  243. env.close_dut(dut.name)
  244. # raise exception if any case fails
  245. if failed_cases:
  246. Utility.console_log("Failed Cases:", color="red")
  247. for _case_name in failed_cases:
  248. Utility.console_log("\t" + _case_name, color="red")
  249. raise AssertionError("Unit Test Failed")
  250. class Handler(threading.Thread):
  251. WAIT_SIGNAL_PATTERN = re.compile(r'Waiting for signal: \[(.+)]!')
  252. SEND_SIGNAL_PATTERN = re.compile(r'Send signal: \[([^]]+)](\[([^]]+)])?!')
  253. FINISH_PATTERN = re.compile(r"1 Tests (\d) Failures (\d) Ignored")
  254. def __init__(self, dut, sent_signal_list, lock, parent_case_name, child_case_index, timeout):
  255. self.dut = dut
  256. self.sent_signal_list = sent_signal_list
  257. self.lock = lock
  258. self.parent_case_name = parent_case_name
  259. self.child_case_name = ""
  260. self.child_case_index = child_case_index + 1
  261. self.finish = False
  262. self.result = False
  263. self.output = ""
  264. self.fail_name = None
  265. self.timeout = timeout
  266. self.force_stop = threading.Event() # it show the running status
  267. reset_dut(self.dut) # reset the board to make it start from begining
  268. threading.Thread.__init__(self, name="{} Handler".format(dut))
  269. def run(self):
  270. self.dut.start_capture_raw_data()
  271. def get_child_case_name(data):
  272. self.child_case_name = data[0]
  273. time.sleep(1)
  274. self.dut.write(str(self.child_case_index))
  275. def one_device_case_finish(result):
  276. """ one test finished, let expect loop break and log result """
  277. self.finish = True
  278. self.result = result
  279. self.output = "[{}]\n\n{}\n".format(self.child_case_name,
  280. self.dut.stop_capture_raw_data())
  281. if not result:
  282. self.fail_name = self.child_case_name
  283. def device_wait_action(data):
  284. start_time = time.time()
  285. expected_signal = data[0].encode('utf-8')
  286. while 1:
  287. if time.time() > start_time + self.timeout:
  288. Utility.console_log("Timeout in device for function: %s" % self.child_case_name, color="orange")
  289. break
  290. with self.lock:
  291. for sent_signal in self.sent_signal_list:
  292. if expected_signal == sent_signal["name"]:
  293. self.dut.write(sent_signal["parameter"])
  294. self.sent_signal_list.remove(sent_signal)
  295. break
  296. else:
  297. time.sleep(0.01)
  298. continue
  299. break
  300. def device_send_action(data):
  301. with self.lock:
  302. self.sent_signal_list.append({
  303. "name": data[0].encode('utf-8'),
  304. "parameter": "" if data[2] is None else data[2].encode('utf-8')
  305. # no parameter means we only write EOL to DUT
  306. })
  307. def handle_device_test_finish(data):
  308. """ test finished without reset """
  309. # in this scenario reset should not happen
  310. if int(data[1]):
  311. # case ignored
  312. Utility.console_log("Ignored: " + self.child_case_name, color="orange")
  313. one_device_case_finish(not int(data[0]))
  314. try:
  315. time.sleep(1)
  316. self.dut.write("\"{}\"".format(self.parent_case_name))
  317. self.dut.expect("Running " + self.parent_case_name + "...")
  318. except DUT.ExpectTimeout:
  319. Utility.console_log("No case detected!", color="orange")
  320. while not self.finish and not self.force_stop.isSet():
  321. try:
  322. self.dut.expect_any((re.compile('\(' + str(self.child_case_index) + '\)\s"(\w+)"'), # noqa: W605 - regex
  323. get_child_case_name),
  324. (self.WAIT_SIGNAL_PATTERN, device_wait_action), # wait signal pattern
  325. (self.SEND_SIGNAL_PATTERN, device_send_action), # send signal pattern
  326. (self.FINISH_PATTERN, handle_device_test_finish), # test finish pattern
  327. timeout=self.timeout)
  328. except DUT.ExpectTimeout:
  329. Utility.console_log("Timeout in expect", color="orange")
  330. one_device_case_finish(False)
  331. break
  332. def stop(self):
  333. self.force_stop.set()
  334. def get_case_info(one_case):
  335. parent_case = one_case["name"]
  336. child_case_num = one_case["child case num"]
  337. return parent_case, child_case_num
  338. def get_dut(duts, env, name, ut_config, app_bin=None):
  339. if name in duts:
  340. dut = duts[name]
  341. else:
  342. dut = env.get_dut(name, app_path=UT_APP_PATH, app_config_name=ut_config, allow_dut_exception=True)
  343. duts[name] = dut
  344. replace_app_bin(dut, "unit-test-app", app_bin)
  345. dut.start_app() # download bin to board
  346. return dut
  347. def run_one_multiple_devices_case(duts, ut_config, env, one_case, app_bin, junit_test_case):
  348. lock = threading.RLock()
  349. threads = []
  350. send_signal_list = []
  351. result = True
  352. parent_case, case_num = get_case_info(one_case)
  353. for i in range(case_num):
  354. dut = get_dut(duts, env, "dut%d" % i, ut_config, app_bin)
  355. threads.append(Handler(dut, send_signal_list, lock,
  356. parent_case, i, one_case["timeout"]))
  357. for thread in threads:
  358. thread.setDaemon(True)
  359. thread.start()
  360. output = "Multiple Device Failed\n"
  361. for thread in threads:
  362. thread.join()
  363. result = result and thread.result
  364. output += thread.output
  365. if not thread.result:
  366. [thd.stop() for thd in threads]
  367. if not result:
  368. junit_test_case.add_failure_info(output)
  369. # collect performances from DUTs
  370. performance_items = []
  371. for dut_name in duts:
  372. performance_items.extend(duts[dut_name].get_performance_items())
  373. TinyFW.JunitReport.update_performance(performance_items)
  374. return result
  375. @ttfw_idf.idf_unit_test(env_tag="UT_T2_1", junit_report_by_case=True)
  376. def run_multiple_devices_cases(env, extra_data):
  377. """
  378. extra_data can be two types of value
  379. 1. as dict:
  380. e.g.
  381. {"name": "gpio master/slave test example",
  382. "child case num": 2,
  383. "config": "release",
  384. "env_tag": "UT_T2_1"}
  385. 2. as list dict:
  386. e.g.
  387. [{"name": "gpio master/slave test example1",
  388. "child case num": 2,
  389. "config": "release",
  390. "env_tag": "UT_T2_1"},
  391. {"name": "gpio master/slave test example2",
  392. "child case num": 2,
  393. "config": "release",
  394. "env_tag": "UT_T2_1"}]
  395. """
  396. failed_cases = []
  397. case_config = format_test_case_config(extra_data)
  398. duts = {}
  399. for ut_config in case_config:
  400. Utility.console_log("Running unit test for config: " + ut_config, "O")
  401. for one_case in case_config[ut_config]:
  402. result = False
  403. junit_test_case = TinyFW.JunitReport.create_test_case("[{}] {}".format(ut_config, one_case["name"]))
  404. try:
  405. result = run_one_multiple_devices_case(duts, ut_config, env, one_case,
  406. one_case.get('app_bin'), junit_test_case)
  407. except Exception as e:
  408. junit_test_case.add_failure_info("Unexpected exception: " + str(e))
  409. finally:
  410. if result:
  411. Utility.console_log("Success: " + format_case_name(one_case), color="green")
  412. else:
  413. failed_cases.append(format_case_name(one_case))
  414. Utility.console_log("Failed: " + format_case_name(one_case), color="red")
  415. TinyFW.JunitReport.test_case_finish(junit_test_case)
  416. # close all DUTs when finish running all cases for one config
  417. for dut in duts:
  418. env.close_dut(dut)
  419. duts = {}
  420. if failed_cases:
  421. Utility.console_log("Failed Cases:", color="red")
  422. for _case_name in failed_cases:
  423. Utility.console_log("\t" + _case_name, color="red")
  424. raise AssertionError("Unit Test Failed")
  425. def run_one_multiple_stage_case(dut, one_case, junit_test_case):
  426. reset_dut(dut)
  427. dut.start_capture_raw_data()
  428. exception_reset_list = []
  429. for test_stage in range(one_case["child case num"]):
  430. # select multi stage test case name
  431. dut.write("\"{}\"".format(one_case["name"]))
  432. dut.expect("Running " + one_case["name"] + "...")
  433. # select test function for current stage
  434. dut.write(str(test_stage + 1))
  435. # we want to set this flag in callbacks (inner functions)
  436. # use list here so we can use append to set this flag
  437. stage_finish = list()
  438. def last_stage():
  439. return test_stage == one_case["child case num"] - 1
  440. def check_reset():
  441. if one_case["reset"]:
  442. assert exception_reset_list # reboot but no exception/reset logged. should never happen
  443. result = False
  444. if len(one_case["reset"]) == len(exception_reset_list):
  445. for i, exception in enumerate(exception_reset_list):
  446. if one_case["reset"][i] not in exception:
  447. break
  448. else:
  449. result = True
  450. if not result:
  451. err_msg = "Reset Check Failed: \r\n\tExpected: {}\r\n\tGet: {}".format(one_case["reset"],
  452. exception_reset_list)
  453. Utility.console_log(err_msg, color="orange")
  454. junit_test_case.add_failure_info(err_msg)
  455. else:
  456. # we allow omit reset in multi stage cases
  457. result = True
  458. return result
  459. # expect callbacks
  460. def one_case_finish(result):
  461. """ one test finished, let expect loop break and log result """
  462. # handle test finish
  463. result = result and check_reset()
  464. output = dut.stop_capture_raw_data()
  465. if result:
  466. Utility.console_log("Success: " + format_case_name(one_case), color="green")
  467. else:
  468. Utility.console_log("Failed: " + format_case_name(one_case), color="red")
  469. junit_test_case.add_failure_info(output)
  470. raise TestCaseFailed()
  471. stage_finish.append("break")
  472. def handle_exception_reset(data):
  473. """
  474. just append data to exception list.
  475. exception list will be checked in ``handle_reset_finish``, once reset finished.
  476. """
  477. exception_reset_list.append(data[0])
  478. def handle_test_finish(data):
  479. """ test finished without reset """
  480. # in this scenario reset should not happen
  481. if int(data[1]):
  482. # case ignored
  483. Utility.console_log("Ignored: " + format_case_name(one_case), color="orange")
  484. junit_test_case.add_skipped_info("ignored")
  485. # only passed in last stage will be regarded as real pass
  486. if last_stage():
  487. one_case_finish(not int(data[0]))
  488. else:
  489. Utility.console_log("test finished before enter last stage", color="orange")
  490. one_case_finish(False)
  491. def handle_next_stage(data):
  492. """ reboot finished. we goto next stage """
  493. if last_stage():
  494. # already last stage, should never goto next stage
  495. Utility.console_log("didn't finish at last stage", color="orange")
  496. one_case_finish(False)
  497. else:
  498. stage_finish.append("continue")
  499. while not stage_finish:
  500. try:
  501. dut.expect_any((RESET_PATTERN, handle_exception_reset),
  502. (EXCEPTION_PATTERN, handle_exception_reset),
  503. (ABORT_PATTERN, handle_exception_reset),
  504. (FINISH_PATTERN, handle_test_finish),
  505. (UT_APP_BOOT_UP_DONE, handle_next_stage),
  506. timeout=one_case["timeout"])
  507. except DUT.ExpectTimeout:
  508. Utility.console_log("Timeout in expect", color="orange")
  509. one_case_finish(False)
  510. break
  511. if stage_finish[0] == "break":
  512. # test breaks on current stage
  513. break
  514. @ttfw_idf.idf_unit_test(env_tag="UT_T1_1", junit_report_by_case=True)
  515. def run_multiple_stage_cases(env, extra_data):
  516. """
  517. extra_data can be 2 types of value
  518. 1. as dict: Mandantory keys: "name" and "child case num", optional keys: "reset" and others
  519. 3. as list of string or dict:
  520. [case1, case2, case3, {"name": "restart from PRO CPU", "child case num": 2}, ...]
  521. :param env: test env instance
  522. :param extra_data: the case name or case list or case dictionary
  523. :return: None
  524. """
  525. case_config = format_test_case_config(extra_data)
  526. # we don't want stop on failed case (unless some special scenarios we can't handle)
  527. # this flag is used to log if any of the case failed during executing
  528. # Before exit test function this flag is used to log if the case fails
  529. failed_cases = []
  530. for ut_config in case_config:
  531. Utility.console_log("Running unit test for config: " + ut_config, "O")
  532. dut = env.get_dut("unit-test-app", app_path=UT_APP_PATH, app_config_name=ut_config, allow_dut_exception=True)
  533. if len(case_config[ut_config]) > 0:
  534. replace_app_bin(dut, "unit-test-app", case_config[ut_config][0].get('app_bin'))
  535. dut.start_app()
  536. for one_case in case_config[ut_config]:
  537. performance_items = []
  538. junit_test_case = TinyFW.JunitReport.create_test_case("[{}] {}".format(ut_config, one_case["name"]))
  539. try:
  540. run_one_multiple_stage_case(dut, one_case, junit_test_case)
  541. performance_items = dut.get_performance_items()
  542. except TestCaseFailed:
  543. failed_cases.append(format_case_name(one_case))
  544. except Exception as e:
  545. junit_test_case.add_failure_info("Unexpected exception: " + str(e))
  546. failed_cases.append(format_case_name(one_case))
  547. finally:
  548. TinyFW.JunitReport.update_performance(performance_items)
  549. TinyFW.JunitReport.test_case_finish(junit_test_case)
  550. # close DUT when finish running all cases for one config
  551. env.close_dut(dut.name)
  552. # raise exception if any case fails
  553. if failed_cases:
  554. Utility.console_log("Failed Cases:", color="red")
  555. for _case_name in failed_cases:
  556. Utility.console_log("\t" + _case_name, color="red")
  557. raise AssertionError("Unit Test Failed")
  558. def detect_update_unit_test_info(env, extra_data, app_bin):
  559. case_config = format_test_case_config(extra_data)
  560. for ut_config in case_config:
  561. dut = env.get_dut("unit-test-app", app_path=UT_APP_PATH, app_config_name=ut_config)
  562. replace_app_bin(dut, "unit-test-app", app_bin)
  563. dut.start_app()
  564. reset_dut(dut)
  565. # get the list of test cases
  566. dut.write("")
  567. dut.expect("Here's the test menu, pick your combo:", timeout=DEFAULT_TIMEOUT)
  568. def find_update_dic(name, _t, _timeout, child_case_num=None):
  569. for _case_data in extra_data:
  570. if _case_data['name'] == name:
  571. _case_data['type'] = _t
  572. if 'timeout' not in _case_data:
  573. _case_data['timeout'] = _timeout
  574. if child_case_num:
  575. _case_data['child case num'] = child_case_num
  576. try:
  577. while True:
  578. data = dut.expect(TEST_PATTERN, timeout=DEFAULT_TIMEOUT)
  579. test_case_name = data[1]
  580. m = re.search(r'\[timeout=(\d+)\]', data[2])
  581. if m:
  582. timeout = int(m.group(1))
  583. else:
  584. timeout = 30
  585. m = re.search(r'\[multi_stage\]', data[2])
  586. if m:
  587. test_case_type = MULTI_STAGE_ID
  588. else:
  589. m = re.search(r'\[multi_device\]', data[2])
  590. if m:
  591. test_case_type = MULTI_DEVICE_ID
  592. else:
  593. test_case_type = SIMPLE_TEST_ID
  594. find_update_dic(test_case_name, test_case_type, timeout)
  595. if data[3] and re.search(END_LIST_STR, data[3]):
  596. break
  597. continue
  598. # find the last submenu item
  599. data = dut.expect(TEST_SUBMENU_PATTERN, timeout=DEFAULT_TIMEOUT)
  600. find_update_dic(test_case_name, test_case_type, timeout, child_case_num=int(data[0]))
  601. if data[1] and re.search(END_LIST_STR, data[1]):
  602. break
  603. # check if the unit test case names are correct, i.e. they could be found in the device
  604. for _dic in extra_data:
  605. if 'type' not in _dic:
  606. raise ValueError("Unit test \"{}\" doesn't exist in the flashed device!".format(_dic.get('name')))
  607. except DUT.ExpectTimeout:
  608. Utility.console_log("Timeout during getting the test list", color="red")
  609. finally:
  610. dut.close()
  611. # These options are the same for all configs, therefore there is no need to continue
  612. break
  613. if __name__ == '__main__':
  614. parser = argparse.ArgumentParser()
  615. parser.add_argument(
  616. '--repeat', '-r',
  617. help='Number of repetitions for the test(s). Default is 1.',
  618. type=int,
  619. default=1
  620. )
  621. parser.add_argument("--env_config_file", "-e",
  622. help="test env config file",
  623. default=None
  624. )
  625. parser.add_argument("--app_bin", "-b",
  626. help="application binary file for flashing the chip",
  627. default=None
  628. )
  629. parser.add_argument(
  630. 'test',
  631. help='Comma separated list of <option>:<argument> where option can be "name" (default), "child case num", \
  632. "config", "timeout".',
  633. nargs='+'
  634. )
  635. args = parser.parse_args()
  636. list_of_dicts = []
  637. for test in args.test:
  638. test_args = test.split(r',')
  639. test_dict = dict()
  640. for test_item in test_args:
  641. if len(test_item) == 0:
  642. continue
  643. pair = test_item.split(r':')
  644. if len(pair) == 1 or pair[0] is 'name':
  645. test_dict['name'] = pair[0]
  646. elif len(pair) == 2:
  647. if pair[0] == 'timeout' or pair[0] == 'child case num':
  648. test_dict[pair[0]] = int(pair[1])
  649. else:
  650. test_dict[pair[0]] = pair[1]
  651. else:
  652. raise ValueError('Error in argument item {} of {}'.format(test_item, test))
  653. test_dict['app_bin'] = args.app_bin
  654. list_of_dicts.append(test_dict)
  655. TinyFW.set_default_config(env_config_file=args.env_config_file)
  656. env_config = TinyFW.get_default_config()
  657. env_config['app'] = ttfw_idf.UT
  658. env_config['dut'] = ttfw_idf.IDFDUT
  659. env_config['test_suite_name'] = 'unit_test_parsing'
  660. test_env = Env.Env(**env_config)
  661. detect_update_unit_test_info(test_env, extra_data=list_of_dicts, app_bin=args.app_bin)
  662. for index in range(1, args.repeat + 1):
  663. if args.repeat > 1:
  664. Utility.console_log("Repetition {}".format(index), color="green")
  665. for dic in list_of_dicts:
  666. t = dic.get('type', SIMPLE_TEST_ID)
  667. if t == SIMPLE_TEST_ID:
  668. run_unit_test_cases(extra_data=dic)
  669. elif t == MULTI_STAGE_ID:
  670. run_multiple_stage_cases(extra_data=dic)
  671. elif t == MULTI_DEVICE_ID:
  672. run_multiple_devices_cases(extra_data=dic)
  673. else:
  674. raise ValueError('Unknown type {} of {}'.format(t, dic.get('name')))