unit_test.py 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778
  1. #!/usr/bin/env python
  2. #
  3. # Copyright 2018 Espressif Systems (Shanghai) PTE LTD
  4. #
  5. # Licensed under the Apache License, Version 2.0 (the "License");
  6. # you may not use this file except in compliance with the License.
  7. # You may obtain a copy of the License at
  8. #
  9. # http://www.apache.org/licenses/LICENSE-2.0
  10. #
  11. # Unless required by applicable law or agreed to in writing, software
  12. # distributed under the License is distributed on an "AS IS" BASIS,
  13. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. # See the License for the specific language governing permissions and
  15. # limitations under the License.
  16. """
  17. Test script for unit test case.
  18. """
  19. import re
  20. import os
  21. import sys
  22. import time
  23. import argparse
  24. import threading
  25. try:
  26. import TinyFW
  27. except ImportError:
  28. # if we want to run test case outside `tiny-test-fw` folder,
  29. # we need to insert tiny-test-fw path into sys path
  30. test_fw_path = os.getenv("TEST_FW_PATH")
  31. if test_fw_path and test_fw_path not in sys.path:
  32. sys.path.insert(0, test_fw_path)
  33. else:
  34. # or try the copy in IDF
  35. idf_path = os.getenv("IDF_PATH")
  36. tiny_test_path = idf_path + "/tools/tiny-test-fw"
  37. if os.path.exists(tiny_test_path):
  38. sys.path.insert(0, tiny_test_path)
  39. import TinyFW
  40. import IDF
  41. import Utility
  42. import Env
  43. from DUT import ExpectTimeout
  44. from IDF.IDFApp import UT
  45. UT_APP_BOOT_UP_DONE = "Press ENTER to see the list of tests."
  46. RESET_PATTERN = re.compile(r"(ets [\w]{3}\s+[\d]{1,2} [\d]{4} [\d]{2}:[\d]{2}:[\d]{2}[^()]*\([\w].*?\))")
  47. EXCEPTION_PATTERN = re.compile(r"(Guru Meditation Error: Core\s+\d panic'ed \([\w].*?\))")
  48. ABORT_PATTERN = re.compile(r"(abort\(\) was called at PC 0x[a-fA-F\d]{8} on core \d)")
  49. FINISH_PATTERN = re.compile(r"1 Tests (\d) Failures (\d) Ignored")
  50. END_LIST_STR = r'\r?\nEnter test for running'
  51. TEST_PATTERN = re.compile(r'\((\d+)\)\s+"([^"]+)" ([^\r\n]+)\r?\n(' + END_LIST_STR + r')?')
  52. TEST_SUBMENU_PATTERN = re.compile(r'\s+\((\d+)\)\s+"[^"]+"\r?\n(?=(?=\()|(' + END_LIST_STR + r'))')
  53. SIMPLE_TEST_ID = 0
  54. MULTI_STAGE_ID = 1
  55. MULTI_DEVICE_ID = 2
  56. DEFAULT_TIMEOUT = 20
  57. DUT_STARTUP_CHECK_RETRY_COUNT = 5
  58. TEST_HISTORY_CHECK_TIMEOUT = 1
  59. class TestCaseFailed(AssertionError):
  60. pass
  61. def format_test_case_config(test_case_data):
  62. """
  63. convert the test case data to unified format.
  64. We need to following info to run unit test cases:
  65. 1. unit test app config
  66. 2. test case name
  67. 3. test case reset info
  68. the formatted case config is a dict, with ut app config as keys. The value is a list of test cases.
  69. Each test case is a dict with "name" and "reset" as keys. For example::
  70. case_config = {
  71. "default": [{"name": "restart from PRO CPU", "reset": "SW_CPU_RESET"}, {...}],
  72. "psram": [{"name": "restart from PRO CPU", "reset": "SW_CPU_RESET"}],
  73. }
  74. If config is not specified for test case, then
  75. :param test_case_data: string, list, or a dictionary list
  76. :return: formatted data
  77. """
  78. case_config = dict()
  79. def parse_case(one_case_data):
  80. """ parse and format one case """
  81. def process_reset_list(reset_list):
  82. # strip space and remove white space only items
  83. _output = list()
  84. for _r in reset_list:
  85. _data = _r.strip(" ")
  86. if _data:
  87. _output.append(_data)
  88. return _output
  89. _case = dict()
  90. if isinstance(one_case_data, str):
  91. _temp = one_case_data.split(" [reset=")
  92. _case["name"] = _temp[0]
  93. try:
  94. _case["reset"] = process_reset_list(_temp[1][0:-1].split(","))
  95. except IndexError:
  96. _case["reset"] = list()
  97. elif isinstance(one_case_data, dict):
  98. _case = one_case_data.copy()
  99. assert "name" in _case
  100. if "reset" not in _case:
  101. _case["reset"] = list()
  102. else:
  103. if isinstance(_case["reset"], str):
  104. _case["reset"] = process_reset_list(_case["reset"].split(","))
  105. else:
  106. raise TypeError("Not supported type during parsing unit test case")
  107. if "config" not in _case:
  108. _case["config"] = "default"
  109. return _case
  110. if not isinstance(test_case_data, list):
  111. test_case_data = [test_case_data]
  112. for case_data in test_case_data:
  113. parsed_case = parse_case(case_data)
  114. try:
  115. case_config[parsed_case["config"]].append(parsed_case)
  116. except KeyError:
  117. case_config[parsed_case["config"]] = [parsed_case]
  118. return case_config
  119. def replace_app_bin(dut, name, new_app_bin):
  120. if new_app_bin is None:
  121. return
  122. search_pattern = '/{}.bin'.format(name)
  123. for i, config in enumerate(dut.download_config):
  124. if config.endswith(search_pattern):
  125. dut.download_config[i] = new_app_bin
  126. Utility.console_log("The replaced application binary is {}".format(new_app_bin), "O")
  127. break
  128. def reset_dut(dut):
  129. dut.reset()
  130. # esptool ``run`` cmd takes quite long time.
  131. # before reset finish, serial port is closed. therefore DUT could already bootup before serial port opened.
  132. # this could cause checking bootup print failed.
  133. # now use input cmd `-` and check test history to check if DUT is bootup.
  134. # we'll retry this step for a few times,
  135. # in case `dut.reset` returns during DUT bootup (when DUT can't process any command).
  136. for _ in range(DUT_STARTUP_CHECK_RETRY_COUNT):
  137. dut.write("-")
  138. try:
  139. dut.expect("0 Tests 0 Failures 0 Ignored", timeout=TEST_HISTORY_CHECK_TIMEOUT)
  140. break
  141. except ExpectTimeout:
  142. pass
  143. else:
  144. raise AssertionError("Reset {} ({}) failed!".format(dut.name, dut.port))
  145. def run_one_normal_case(dut, one_case, junit_test_case):
  146. reset_dut(dut)
  147. dut.start_capture_raw_data()
  148. # run test case
  149. dut.write("\"{}\"".format(one_case["name"]))
  150. dut.expect("Running " + one_case["name"] + "...")
  151. exception_reset_list = []
  152. # we want to set this flag in callbacks (inner functions)
  153. # use list here so we can use append to set this flag
  154. test_finish = list()
  155. # expect callbacks
  156. def one_case_finish(result):
  157. """ one test finished, let expect loop break and log result """
  158. test_finish.append(True)
  159. output = dut.stop_capture_raw_data()
  160. if result:
  161. Utility.console_log("Success: " + one_case["name"], color="green")
  162. else:
  163. Utility.console_log("Failed: " + one_case["name"], color="red")
  164. junit_test_case.add_failure_info(output)
  165. raise TestCaseFailed()
  166. def handle_exception_reset(data):
  167. """
  168. just append data to exception list.
  169. exception list will be checked in ``handle_reset_finish``, once reset finished.
  170. """
  171. exception_reset_list.append(data[0])
  172. def handle_test_finish(data):
  173. """ test finished without reset """
  174. # in this scenario reset should not happen
  175. assert not exception_reset_list
  176. if int(data[1]):
  177. # case ignored
  178. Utility.console_log("Ignored: " + one_case["name"], color="orange")
  179. junit_test_case.add_skipped_info("ignored")
  180. one_case_finish(not int(data[0]))
  181. def handle_reset_finish(data):
  182. """ reset happened and reboot finished """
  183. assert exception_reset_list # reboot but no exception/reset logged. should never happen
  184. result = False
  185. if len(one_case["reset"]) == len(exception_reset_list):
  186. for i, exception in enumerate(exception_reset_list):
  187. if one_case["reset"][i] not in exception:
  188. break
  189. else:
  190. result = True
  191. if not result:
  192. err_msg = "Reset Check Failed: \r\n\tExpected: {}\r\n\tGet: {}".format(one_case["reset"],
  193. exception_reset_list)
  194. Utility.console_log(err_msg, color="orange")
  195. junit_test_case.add_error_info(err_msg)
  196. one_case_finish(result)
  197. while not test_finish:
  198. try:
  199. dut.expect_any((RESET_PATTERN, handle_exception_reset),
  200. (EXCEPTION_PATTERN, handle_exception_reset),
  201. (ABORT_PATTERN, handle_exception_reset),
  202. (FINISH_PATTERN, handle_test_finish),
  203. (UT_APP_BOOT_UP_DONE, handle_reset_finish),
  204. timeout=one_case["timeout"])
  205. except ExpectTimeout:
  206. Utility.console_log("Timeout in expect", color="orange")
  207. junit_test_case.add_failure_info("timeout")
  208. one_case_finish(False)
  209. break
  210. @IDF.idf_unit_test(env_tag="UT_T1_1", junit_report_by_case=True)
  211. def run_unit_test_cases(env, extra_data):
  212. """
  213. extra_data can be three types of value
  214. 1. as string:
  215. 1. "case_name"
  216. 2. "case_name [reset=RESET_REASON]"
  217. 2. as dict:
  218. 1. with key like {"name": "Intr_alloc test, shared ints"}
  219. 2. with key like {"name": "restart from PRO CPU", "reset": "SW_CPU_RESET", "config": "psram"}
  220. 3. as list of string or dict:
  221. [case1, case2, case3, {"name": "restart from PRO CPU", "reset": "SW_CPU_RESET"}, ...]
  222. :param env: test env instance
  223. :param extra_data: the case name or case list or case dictionary
  224. :return: None
  225. """
  226. case_config = format_test_case_config(extra_data)
  227. # we don't want stop on failed case (unless some special scenarios we can't handle)
  228. # this flag is used to log if any of the case failed during executing
  229. # Before exit test function this flag is used to log if the case fails
  230. failed_cases = []
  231. for ut_config in case_config:
  232. Utility.console_log("Running unit test for config: " + ut_config, "O")
  233. dut = env.get_dut("unit-test-app", app_path=ut_config)
  234. if len(case_config[ut_config]) > 0:
  235. replace_app_bin(dut, "unit-test-app", case_config[ut_config][0].get('app_bin'))
  236. dut.start_app()
  237. Utility.console_log("Download finished, start running test cases", "O")
  238. for one_case in case_config[ut_config]:
  239. # create junit report test case
  240. junit_test_case = TinyFW.JunitReport.create_test_case("[{}] {}".format(ut_config, one_case["name"]))
  241. try:
  242. run_one_normal_case(dut, one_case, junit_test_case)
  243. except TestCaseFailed:
  244. failed_cases.append(one_case["name"])
  245. except Exception as e:
  246. junit_test_case.add_failure_info("Unexpected exception: " + str(e))
  247. failed_cases.append(one_case["name"])
  248. finally:
  249. TinyFW.JunitReport.test_case_finish(junit_test_case)
  250. # raise exception if any case fails
  251. if failed_cases:
  252. Utility.console_log("Failed Cases:", color="red")
  253. for _case_name in failed_cases:
  254. Utility.console_log("\t" + _case_name, color="red")
  255. raise AssertionError("Unit Test Failed")
  256. class Handler(threading.Thread):
  257. WAIT_SIGNAL_PATTERN = re.compile(r'Waiting for signal: \[(.+)]!')
  258. SEND_SIGNAL_PATTERN = re.compile(r'Send signal: \[([^]]+)](\[([^]]+)])?!')
  259. FINISH_PATTERN = re.compile(r"1 Tests (\d) Failures (\d) Ignored")
  260. def __init__(self, dut, sent_signal_list, lock, parent_case_name, child_case_index, timeout):
  261. self.dut = dut
  262. self.sent_signal_list = sent_signal_list
  263. self.lock = lock
  264. self.parent_case_name = parent_case_name
  265. self.child_case_name = ""
  266. self.child_case_index = child_case_index + 1
  267. self.finish = False
  268. self.result = False
  269. self.output = ""
  270. self.fail_name = None
  271. self.timeout = timeout
  272. self.force_stop = threading.Event() # it show the running status
  273. reset_dut(self.dut) # reset the board to make it start from begining
  274. threading.Thread.__init__(self, name="{} Handler".format(dut))
  275. def run(self):
  276. self.dut.start_capture_raw_data()
  277. def get_child_case_name(data):
  278. self.child_case_name = data[0]
  279. time.sleep(1)
  280. self.dut.write(str(self.child_case_index))
  281. def one_device_case_finish(result):
  282. """ one test finished, let expect loop break and log result """
  283. self.finish = True
  284. self.result = result
  285. self.output = "[{}]\n\n{}\n".format(self.child_case_name,
  286. self.dut.stop_capture_raw_data())
  287. if not result:
  288. self.fail_name = self.child_case_name
  289. def device_wait_action(data):
  290. start_time = time.time()
  291. expected_signal = data[0]
  292. while 1:
  293. if time.time() > start_time + self.timeout:
  294. Utility.console_log("Timeout in device for function: %s" % self.child_case_name, color="orange")
  295. break
  296. with self.lock:
  297. for sent_signal in self.sent_signal_list:
  298. if expected_signal == sent_signal["name"]:
  299. self.dut.write(sent_signal["parameter"])
  300. self.sent_signal_list.remove(sent_signal)
  301. break
  302. else:
  303. time.sleep(0.01)
  304. continue
  305. break
  306. def device_send_action(data):
  307. with self.lock:
  308. self.sent_signal_list.append({
  309. "name": data[0].encode('utf-8'),
  310. "parameter": "" if data[2] is None else data[2].encode('utf-8')
  311. # no parameter means we only write EOL to DUT
  312. })
  313. def handle_device_test_finish(data):
  314. """ test finished without reset """
  315. # in this scenario reset should not happen
  316. if int(data[1]):
  317. # case ignored
  318. Utility.console_log("Ignored: " + self.child_case_name, color="orange")
  319. one_device_case_finish(not int(data[0]))
  320. try:
  321. time.sleep(1)
  322. self.dut.write("\"{}\"".format(self.parent_case_name))
  323. self.dut.expect("Running " + self.parent_case_name + "...")
  324. except ExpectTimeout:
  325. Utility.console_log("No case detected!", color="orange")
  326. while not self.finish and not self.force_stop.isSet():
  327. try:
  328. self.dut.expect_any((re.compile('\(' + str(self.child_case_index) + '\)\s"(\w+)"'), # noqa: W605 - regex
  329. get_child_case_name),
  330. (self.WAIT_SIGNAL_PATTERN, device_wait_action), # wait signal pattern
  331. (self.SEND_SIGNAL_PATTERN, device_send_action), # send signal pattern
  332. (self.FINISH_PATTERN, handle_device_test_finish), # test finish pattern
  333. timeout=self.timeout)
  334. except ExpectTimeout:
  335. Utility.console_log("Timeout in expect", color="orange")
  336. one_device_case_finish(False)
  337. break
  338. def stop(self):
  339. self.force_stop.set()
  340. def get_case_info(one_case):
  341. parent_case = one_case["name"]
  342. child_case_num = one_case["child case num"]
  343. return parent_case, child_case_num
  344. def get_dut(duts, env, name, ut_config, app_bin=None):
  345. if name in duts:
  346. dut = duts[name]
  347. else:
  348. dut = env.get_dut(name, app_path=ut_config)
  349. duts[name] = dut
  350. replace_app_bin(dut, "unit-test-app", app_bin)
  351. dut.start_app() # download bin to board
  352. return dut
  353. def run_one_multiple_devices_case(duts, ut_config, env, one_case, app_bin, junit_test_case):
  354. lock = threading.RLock()
  355. threads = []
  356. send_signal_list = []
  357. result = True
  358. parent_case, case_num = get_case_info(one_case)
  359. for i in range(case_num):
  360. dut = get_dut(duts, env, "dut%d" % i, ut_config, app_bin)
  361. threads.append(Handler(dut, send_signal_list, lock,
  362. parent_case, i, one_case["timeout"]))
  363. for thread in threads:
  364. thread.setDaemon(True)
  365. thread.start()
  366. output = "Multiple Device Failed\n"
  367. for thread in threads:
  368. thread.join()
  369. result = result and thread.result
  370. output += thread.output
  371. if not thread.result:
  372. [thd.stop() for thd in threads]
  373. if not result:
  374. junit_test_case.add_failure_info(output)
  375. return result
  376. @IDF.idf_unit_test(env_tag="UT_T2_1", junit_report_by_case=True)
  377. def run_multiple_devices_cases(env, extra_data):
  378. """
  379. extra_data can be two types of value
  380. 1. as dict:
  381. e.g.
  382. {"name": "gpio master/slave test example",
  383. "child case num": 2,
  384. "config": "release",
  385. "env_tag": "UT_T2_1"}
  386. 2. as list dict:
  387. e.g.
  388. [{"name": "gpio master/slave test example1",
  389. "child case num": 2,
  390. "config": "release",
  391. "env_tag": "UT_T2_1"},
  392. {"name": "gpio master/slave test example2",
  393. "child case num": 2,
  394. "config": "release",
  395. "env_tag": "UT_T2_1"}]
  396. """
  397. failed_cases = []
  398. case_config = format_test_case_config(extra_data)
  399. duts = {}
  400. for ut_config in case_config:
  401. Utility.console_log("Running unit test for config: " + ut_config, "O")
  402. for one_case in case_config[ut_config]:
  403. result = False
  404. junit_test_case = TinyFW.JunitReport.create_test_case("[{}] {}".format(ut_config, one_case["name"]))
  405. try:
  406. result = run_one_multiple_devices_case(duts, ut_config, env, one_case,
  407. one_case.get('app_bin'), junit_test_case)
  408. except Exception as e:
  409. junit_test_case.add_failure_info("Unexpected exception: " + str(e))
  410. finally:
  411. if result:
  412. Utility.console_log("Success: " + one_case["name"], color="green")
  413. else:
  414. failed_cases.append(one_case["name"])
  415. Utility.console_log("Failed: " + one_case["name"], color="red")
  416. TinyFW.JunitReport.test_case_finish(junit_test_case)
  417. if failed_cases:
  418. Utility.console_log("Failed Cases:", color="red")
  419. for _case_name in failed_cases:
  420. Utility.console_log("\t" + _case_name, color="red")
  421. raise AssertionError("Unit Test Failed")
  422. def run_one_multiple_stage_case(dut, one_case, junit_test_case):
  423. reset_dut(dut)
  424. dut.start_capture_raw_data()
  425. exception_reset_list = []
  426. for test_stage in range(one_case["child case num"]):
  427. # select multi stage test case name
  428. dut.write("\"{}\"".format(one_case["name"]))
  429. dut.expect("Running " + one_case["name"] + "...")
  430. # select test function for current stage
  431. dut.write(str(test_stage + 1))
  432. # we want to set this flag in callbacks (inner functions)
  433. # use list here so we can use append to set this flag
  434. stage_finish = list()
  435. def last_stage():
  436. return test_stage == one_case["child case num"] - 1
  437. def check_reset():
  438. if one_case["reset"]:
  439. assert exception_reset_list # reboot but no exception/reset logged. should never happen
  440. result = False
  441. if len(one_case["reset"]) == len(exception_reset_list):
  442. for i, exception in enumerate(exception_reset_list):
  443. if one_case["reset"][i] not in exception:
  444. break
  445. else:
  446. result = True
  447. if not result:
  448. err_msg = "Reset Check Failed: \r\n\tExpected: {}\r\n\tGet: {}".format(one_case["reset"],
  449. exception_reset_list)
  450. Utility.console_log(err_msg, color="orange")
  451. junit_test_case.add_failure_info(err_msg)
  452. else:
  453. # we allow omit reset in multi stage cases
  454. result = True
  455. return result
  456. # expect callbacks
  457. def one_case_finish(result):
  458. """ one test finished, let expect loop break and log result """
  459. # handle test finish
  460. result = result and check_reset()
  461. output = dut.stop_capture_raw_data()
  462. if result:
  463. Utility.console_log("Success: " + one_case["name"], color="green")
  464. else:
  465. Utility.console_log("Failed: " + one_case["name"], color="red")
  466. junit_test_case.add_failure_info(output)
  467. raise TestCaseFailed()
  468. stage_finish.append("break")
  469. def handle_exception_reset(data):
  470. """
  471. just append data to exception list.
  472. exception list will be checked in ``handle_reset_finish``, once reset finished.
  473. """
  474. exception_reset_list.append(data[0])
  475. def handle_test_finish(data):
  476. """ test finished without reset """
  477. # in this scenario reset should not happen
  478. if int(data[1]):
  479. # case ignored
  480. Utility.console_log("Ignored: " + one_case["name"], color="orange")
  481. junit_test_case.add_skipped_info("ignored")
  482. # only passed in last stage will be regarded as real pass
  483. if last_stage():
  484. one_case_finish(not int(data[0]))
  485. else:
  486. Utility.console_log("test finished before enter last stage", color="orange")
  487. one_case_finish(False)
  488. def handle_next_stage(data):
  489. """ reboot finished. we goto next stage """
  490. if last_stage():
  491. # already last stage, should never goto next stage
  492. Utility.console_log("didn't finish at last stage", color="orange")
  493. one_case_finish(False)
  494. else:
  495. stage_finish.append("continue")
  496. while not stage_finish:
  497. try:
  498. dut.expect_any((RESET_PATTERN, handle_exception_reset),
  499. (EXCEPTION_PATTERN, handle_exception_reset),
  500. (ABORT_PATTERN, handle_exception_reset),
  501. (FINISH_PATTERN, handle_test_finish),
  502. (UT_APP_BOOT_UP_DONE, handle_next_stage),
  503. timeout=one_case["timeout"])
  504. except ExpectTimeout:
  505. Utility.console_log("Timeout in expect", color="orange")
  506. one_case_finish(False)
  507. break
  508. if stage_finish[0] == "break":
  509. # test breaks on current stage
  510. break
  511. @IDF.idf_unit_test(env_tag="UT_T1_1", junit_report_by_case=True)
  512. def run_multiple_stage_cases(env, extra_data):
  513. """
  514. extra_data can be 2 types of value
  515. 1. as dict: Mandantory keys: "name" and "child case num", optional keys: "reset" and others
  516. 3. as list of string or dict:
  517. [case1, case2, case3, {"name": "restart from PRO CPU", "child case num": 2}, ...]
  518. :param env: test env instance
  519. :param extra_data: the case name or case list or case dictionary
  520. :return: None
  521. """
  522. case_config = format_test_case_config(extra_data)
  523. # we don't want stop on failed case (unless some special scenarios we can't handle)
  524. # this flag is used to log if any of the case failed during executing
  525. # Before exit test function this flag is used to log if the case fails
  526. failed_cases = []
  527. for ut_config in case_config:
  528. Utility.console_log("Running unit test for config: " + ut_config, "O")
  529. dut = env.get_dut("unit-test-app", app_path=ut_config)
  530. if len(case_config[ut_config]) > 0:
  531. replace_app_bin(dut, "unit-test-app", case_config[ut_config][0].get('app_bin'))
  532. dut.start_app()
  533. for one_case in case_config[ut_config]:
  534. junit_test_case = TinyFW.JunitReport.create_test_case("[{}] {}".format(ut_config, one_case["name"]))
  535. try:
  536. run_one_multiple_stage_case(dut, one_case, junit_test_case)
  537. except TestCaseFailed:
  538. failed_cases.append(one_case["name"])
  539. except Exception as e:
  540. junit_test_case.add_failure_info("Unexpected exception: " + str(e))
  541. failed_cases.append(one_case["name"])
  542. finally:
  543. TinyFW.JunitReport.test_case_finish(junit_test_case)
  544. # raise exception if any case fails
  545. if failed_cases:
  546. Utility.console_log("Failed Cases:", color="red")
  547. for _case_name in failed_cases:
  548. Utility.console_log("\t" + _case_name, color="red")
  549. raise AssertionError("Unit Test Failed")
  550. def detect_update_unit_test_info(env, extra_data, app_bin):
  551. case_config = format_test_case_config(extra_data)
  552. for ut_config in case_config:
  553. dut = env.get_dut("unit-test-app", app_path=ut_config)
  554. replace_app_bin(dut, "unit-test-app", app_bin)
  555. dut.start_app()
  556. reset_dut(dut)
  557. # get the list of test cases
  558. dut.write("")
  559. dut.expect("Here's the test menu, pick your combo:", timeout=DEFAULT_TIMEOUT)
  560. def find_update_dic(name, _t, _timeout, child_case_num=None):
  561. for _case_data in extra_data:
  562. if _case_data['name'] == name:
  563. _case_data['type'] = _t
  564. if 'timeout' not in _case_data:
  565. _case_data['timeout'] = _timeout
  566. if child_case_num:
  567. _case_data['child case num'] = child_case_num
  568. try:
  569. while True:
  570. data = dut.expect(TEST_PATTERN, timeout=DEFAULT_TIMEOUT)
  571. test_case_name = data[1]
  572. m = re.search(r'\[timeout=(\d+)\]', data[2])
  573. if m:
  574. timeout = int(m.group(1))
  575. else:
  576. timeout = 30
  577. m = re.search(r'\[multi_stage\]', data[2])
  578. if m:
  579. test_case_type = MULTI_STAGE_ID
  580. else:
  581. m = re.search(r'\[multi_device\]', data[2])
  582. if m:
  583. test_case_type = MULTI_DEVICE_ID
  584. else:
  585. test_case_type = SIMPLE_TEST_ID
  586. find_update_dic(test_case_name, test_case_type, timeout)
  587. if data[3] and re.search(END_LIST_STR, data[3]):
  588. break
  589. continue
  590. # find the last submenu item
  591. data = dut.expect(TEST_SUBMENU_PATTERN, timeout=DEFAULT_TIMEOUT)
  592. find_update_dic(test_case_name, test_case_type, timeout, child_case_num=int(data[0]))
  593. if data[1] and re.search(END_LIST_STR, data[1]):
  594. break
  595. # check if the unit test case names are correct, i.e. they could be found in the device
  596. for _dic in extra_data:
  597. if 'type' not in _dic:
  598. raise ValueError("Unit test \"{}\" doesn't exist in the flashed device!".format(_dic.get('name')))
  599. except ExpectTimeout:
  600. Utility.console_log("Timeout during getting the test list", color="red")
  601. finally:
  602. dut.close()
  603. # These options are the same for all configs, therefore there is no need to continue
  604. break
  605. if __name__ == '__main__':
  606. parser = argparse.ArgumentParser()
  607. parser.add_argument(
  608. '--repeat', '-r',
  609. help='Number of repetitions for the test(s). Default is 1.',
  610. type=int,
  611. default=1
  612. )
  613. parser.add_argument("--env_config_file", "-e",
  614. help="test env config file",
  615. default=None
  616. )
  617. parser.add_argument("--app_bin", "-b",
  618. help="application binary file for flashing the chip",
  619. default=None
  620. )
  621. parser.add_argument(
  622. 'test',
  623. help='Comma separated list of <option>:<argument> where option can be "name" (default), "child case num", \
  624. "config", "timeout".',
  625. nargs='+'
  626. )
  627. args = parser.parse_args()
  628. list_of_dicts = []
  629. for test in args.test:
  630. test_args = test.split(r',')
  631. test_dict = dict()
  632. for test_item in test_args:
  633. if len(test_item) == 0:
  634. continue
  635. pair = test_item.split(r':')
  636. if len(pair) == 1 or pair[0] is 'name':
  637. test_dict['name'] = pair[0]
  638. elif len(pair) == 2:
  639. if pair[0] == 'timeout' or pair[0] == 'child case num':
  640. test_dict[pair[0]] = int(pair[1])
  641. else:
  642. test_dict[pair[0]] = pair[1]
  643. else:
  644. raise ValueError('Error in argument item {} of {}'.format(test_item, test))
  645. test_dict['app_bin'] = args.app_bin
  646. list_of_dicts.append(test_dict)
  647. TinyFW.set_default_config(env_config_file=args.env_config_file)
  648. env_config = TinyFW.get_default_config()
  649. env_config['app'] = UT
  650. env_config['dut'] = IDF.IDFDUT
  651. env_config['test_suite_name'] = 'unit_test_parsing'
  652. test_env = Env.Env(**env_config)
  653. detect_update_unit_test_info(test_env, extra_data=list_of_dicts, app_bin=args.app_bin)
  654. for index in range(1, args.repeat + 1):
  655. if args.repeat > 1:
  656. Utility.console_log("Repetition {}".format(index), color="green")
  657. for dic in list_of_dicts:
  658. t = dic.get('type', SIMPLE_TEST_ID)
  659. if t == SIMPLE_TEST_ID:
  660. run_unit_test_cases(extra_data=dic)
  661. elif t == MULTI_STAGE_ID:
  662. run_multiple_stage_cases(extra_data=dic)
  663. elif t == MULTI_DEVICE_ID:
  664. run_multiple_devices_cases(extra_data=dic)
  665. else:
  666. raise ValueError('Unknown type {} of {}'.format(t, dic.get('name')))