conftest.py 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784
  1. # SPDX-FileCopyrightText: 2021-2023 Espressif Systems (Shanghai) CO LTD
  2. # SPDX-License-Identifier: Apache-2.0
  3. # pylint: disable=W0621 # redefined-outer-name
  4. # This file is a pytest root configuration file and provide the following functionalities:
  5. # 1. Defines a few fixtures that could be used under the whole project.
  6. # 2. Defines a few hook functions.
  7. #
  8. # IDF is using [pytest](https://github.com/pytest-dev/pytest) and
  9. # [pytest-embedded plugin](https://github.com/espressif/pytest-embedded) as its example test framework.
  10. #
  11. # This is an experimental feature, and if you found any bug or have any question, please report to
  12. # https://github.com/espressif/pytest-embedded/issues
  13. import glob
  14. import json
  15. import logging
  16. import os
  17. import re
  18. import sys
  19. import xml.etree.ElementTree as ET
  20. from datetime import datetime
  21. from fnmatch import fnmatch
  22. from typing import Callable, Dict, List, Optional, Tuple
  23. import pytest
  24. from _pytest.config import Config, ExitCode
  25. from _pytest.fixtures import FixtureRequest
  26. from _pytest.main import Session
  27. from _pytest.nodes import Item
  28. from _pytest.python import Function
  29. from _pytest.reports import TestReport
  30. from _pytest.runner import CallInfo
  31. from _pytest.terminal import TerminalReporter
  32. from pytest_embedded.plugin import multi_dut_argument, multi_dut_fixture
  33. from pytest_embedded.utils import find_by_suffix
  34. from pytest_embedded_idf.dut import IdfDut
  35. try:
  36. from idf_ci_utils import IDF_PATH, to_list
  37. from idf_unity_tester import CaseTester
  38. except ImportError:
  39. sys.path.append(os.path.join(os.path.dirname(__file__), 'tools', 'ci'))
  40. from idf_ci_utils import IDF_PATH, to_list
  41. from idf_unity_tester import CaseTester
  42. try:
  43. import common_test_methods # noqa: F401
  44. except ImportError:
  45. sys.path.append(os.path.join(os.path.dirname(__file__), 'tools', 'ci', 'python_packages'))
  46. import common_test_methods # noqa: F401
  47. SUPPORTED_TARGETS = ['esp32', 'esp32s2', 'esp32c3', 'esp32s3', 'esp32c2', 'esp32c6', 'esp32h2']
  48. PREVIEW_TARGETS: List[str] = [] # this PREVIEW_TARGETS excludes 'linux' target
  49. DEFAULT_SDKCONFIG = 'default'
  50. TARGET_MARKERS = {
  51. 'esp32': 'support esp32 target',
  52. 'esp32s2': 'support esp32s2 target',
  53. 'esp32s3': 'support esp32s3 target',
  54. 'esp32c3': 'support esp32c3 target',
  55. 'esp32c2': 'support esp32c2 target',
  56. 'esp32c6': 'support esp32c6 target',
  57. 'esp32h2': 'support esp32h2 target',
  58. 'linux': 'support linux target',
  59. }
  60. SPECIAL_MARKERS = {
  61. 'supported_targets': "support all officially announced supported targets ('esp32', 'esp32s2', 'esp32c3', 'esp32s3', 'esp32c2', 'esp32c6')",
  62. 'preview_targets': "support all preview targets ('none')",
  63. 'all_targets': 'support all targets, including supported ones and preview ones',
  64. 'temp_skip_ci': 'temp skip tests for specified targets only in ci',
  65. 'temp_skip': 'temp skip tests for specified targets both in ci and locally',
  66. 'nightly_run': 'tests should be executed as part of the nightly trigger pipeline',
  67. 'host_test': 'tests which should not be built at the build stage, and instead built in host_test stage',
  68. 'qemu': 'build and test using qemu-system-xtensa, not real target',
  69. }
  70. ENV_MARKERS = {
  71. # single-dut markers
  72. 'generic': 'tests should be run on generic runners',
  73. 'flash_suspend': 'support flash suspend feature',
  74. 'ip101': 'connected via wired 10/100M ethernet',
  75. 'lan8720': 'connected via LAN8720 ethernet transceiver',
  76. 'quad_psram': 'runners with quad psram',
  77. 'octal_psram': 'runners with octal psram',
  78. 'usb_host': 'usb host runners',
  79. 'usb_host_flash_disk': 'usb host runners with USB flash disk attached',
  80. 'usb_device': 'usb device runners',
  81. 'ethernet_ota': 'ethernet OTA runners',
  82. 'flash_encryption': 'Flash Encryption runners',
  83. 'flash_encryption_f4r8': 'Flash Encryption runners with 4-line flash and 8-line psram',
  84. 'flash_encryption_f8r8': 'Flash Encryption runners with 8-line flash and 8-line psram',
  85. 'flash_multi': 'Multiple flash chips tests',
  86. 'psram': 'Chip has 4-line psram',
  87. 'ir_transceiver': 'runners with a pair of IR transmitter and receiver',
  88. 'twai_transceiver': 'runners with a TWAI PHY transceiver',
  89. 'flash_encryption_wifi_high_traffic': 'Flash Encryption runners with wifi high traffic support',
  90. 'ethernet': 'ethernet runner',
  91. 'ethernet_flash_8m': 'ethernet runner with 8mb flash',
  92. 'ethernet_router': 'both the runner and dut connect to the same router through ethernet NIC',
  93. 'ethernet_vlan': 'ethernet runner GARM-32-SH-1-R16S5N3',
  94. 'wifi_ap': 'a wifi AP in the environment',
  95. 'wifi_router': 'both the runner and dut connect to the same wifi router',
  96. 'wifi_high_traffic': 'wifi high traffic runners',
  97. 'wifi_wlan': 'wifi runner with a wireless NIC',
  98. 'Example_ShieldBox_Basic': 'basic configuration of the AP and ESP DUT placed in shielded box',
  99. 'Example_ShieldBox': 'multiple shielded APs connected to shielded ESP DUT via RF cable with programmable attenuator',
  100. 'xtal_26mhz': 'runner with 26MHz xtal on board',
  101. 'xtal_40mhz': 'runner with 40MHz xtal on board',
  102. 'external_flash': 'external flash memory connected via VSPI (FSPI)',
  103. 'sdcard_sdmode': 'sdcard running in SD mode',
  104. 'sdcard_spimode': 'sdcard running in SPI mode',
  105. 'emmc': 'eMMC card',
  106. 'MSPI_F8R8': 'runner with Octal Flash and Octal PSRAM',
  107. 'MSPI_F4R8': 'runner with Quad Flash and Octal PSRAM',
  108. 'MSPI_F4R4': 'runner with Quad Flash and Quad PSRAM',
  109. 'jtag': 'runner where the chip is accessible through JTAG as well',
  110. 'usb_serial_jtag': 'runner where the chip is accessible through builtin JTAG as well',
  111. 'adc': 'ADC related tests should run on adc runners',
  112. 'xtal32k': 'Runner with external 32k crystal connected',
  113. 'no32kXtal': 'Runner with no external 32k crystal connected',
  114. 'multi_dut_modbus_rs485': 'a pair of runners connected by RS485 bus',
  115. 'psramv0': 'Runner with PSRAM version 0',
  116. 'esp32eco3': 'Runner with esp32 eco3 connected',
  117. 'ecdsa_efuse': 'Runner with test ECDSA private keys programmed in efuse',
  118. 'ccs811': 'Runner with CCS811 connected',
  119. 'eth_w5500': 'SPI Ethernet module with two W5500',
  120. 'nvs_encr_hmac': 'Runner with test HMAC key programmed in efuse',
  121. 'i2c_oled': 'Runner with ssd1306 I2C oled connected',
  122. 'httpbin': 'runner for tests that need to access the httpbin service',
  123. # multi-dut markers
  124. 'ieee802154': 'ieee802154 related tests should run on ieee802154 runners.',
  125. 'openthread_br': 'tests should be used for openthread border router.',
  126. 'openthread_sleep': 'tests should be used for openthread sleepy device.',
  127. 'zigbee_multi_dut': 'zigbee runner which have multiple duts.',
  128. 'wifi_two_dut': 'tests should be run on runners which has two wifi duts connected.',
  129. 'generic_multi_device': 'generic multiple devices whose corresponding gpio pins are connected to each other.',
  130. 'twai_network': 'multiple runners form a TWAI network.',
  131. 'sdio_master_slave': 'Test sdio multi board.',
  132. }
  133. SUB_JUNIT_FILENAME = 'dut.xml'
  134. ##################
  135. # Help Functions #
  136. ##################
  137. def format_case_id(target: Optional[str], config: Optional[str], case: str, is_qemu: bool = False) -> str:
  138. parts = []
  139. if target:
  140. parts.append((str(target) + '_qemu') if is_qemu else str(target))
  141. if config:
  142. parts.append(str(config))
  143. parts.append(case)
  144. return '.'.join(parts)
  145. def item_marker_names(item: Item) -> List[str]:
  146. return [marker.name for marker in item.iter_markers()]
  147. def item_target_marker_names(item: Item) -> List[str]:
  148. res = set()
  149. for marker in item.iter_markers():
  150. if marker.name in TARGET_MARKERS:
  151. res.add(marker.name)
  152. return sorted(res)
  153. def item_env_marker_names(item: Item) -> List[str]:
  154. res = set()
  155. for marker in item.iter_markers():
  156. if marker.name in ENV_MARKERS:
  157. res.add(marker.name)
  158. return sorted(res)
  159. def item_skip_targets(item: Item) -> List[str]:
  160. def _get_temp_markers_disabled_targets(marker_name: str) -> List[str]:
  161. temp_marker = item.get_closest_marker(marker_name)
  162. if not temp_marker:
  163. return []
  164. # temp markers should always use keyword arguments `targets` and `reason`
  165. if not temp_marker.kwargs.get('targets') or not temp_marker.kwargs.get('reason'):
  166. raise ValueError(
  167. f'`{marker_name}` should always use keyword arguments `targets` and `reason`. '
  168. f'For example: '
  169. f'`@pytest.mark.{marker_name}(targets=["esp32"], reason="IDF-xxxx, will fix it ASAP")`'
  170. )
  171. return to_list(temp_marker.kwargs['targets']) # type: ignore
  172. temp_skip_ci_targets = _get_temp_markers_disabled_targets('temp_skip_ci')
  173. temp_skip_targets = _get_temp_markers_disabled_targets('temp_skip')
  174. # in CI we skip the union of `temp_skip` and `temp_skip_ci`
  175. if os.getenv('CI_JOB_ID'):
  176. skip_targets = list(set(temp_skip_ci_targets).union(set(temp_skip_targets)))
  177. else: # we use `temp_skip` locally
  178. skip_targets = temp_skip_targets
  179. return skip_targets
  180. def get_target_marker_from_expr(markexpr: str) -> str:
  181. candidates = set()
  182. # we use `-m "esp32 and generic"` in our CI to filter the test cases
  183. # this doesn't cover all use cases, but fit what we do in CI.
  184. for marker in markexpr.split('and'):
  185. marker = marker.strip()
  186. if marker in TARGET_MARKERS:
  187. candidates.add(marker)
  188. if len(candidates) > 1:
  189. raise ValueError(f'Specified more than one target markers: {candidates}. Please specify no more than one.')
  190. elif len(candidates) == 1:
  191. return candidates.pop()
  192. else:
  193. raise ValueError('Please specify one target marker via "--target [TARGET]" or via "-m [TARGET]"')
  194. def merge_junit_files(junit_files: List[str], target_path: str) -> None:
  195. if len(junit_files) <= 1:
  196. return
  197. merged_testsuite: ET.Element = ET.Element('testsuite')
  198. testcases: Dict[str, ET.Element] = {}
  199. for junit in junit_files:
  200. logging.info(f'Merging {junit} to {target_path}')
  201. tree: ET.ElementTree = ET.parse(junit)
  202. testsuite: ET.Element = tree.getroot()
  203. for testcase in testsuite.findall('testcase'):
  204. name: str = testcase.get('name') if testcase.get('name') else '' # type: ignore
  205. if name not in testcases:
  206. testcases[name] = testcase
  207. merged_testsuite.append(testcase)
  208. continue
  209. existing_testcase = testcases[name]
  210. for element_name in ['failure', 'error']:
  211. for element in testcase.findall(element_name):
  212. existing_element = existing_testcase.find(element_name)
  213. if existing_element is None:
  214. existing_testcase.append(element)
  215. else:
  216. existing_element.attrib.setdefault('message', '') # type: ignore
  217. existing_element.attrib['message'] += '. ' + element.get('message', '') # type: ignore
  218. os.remove(junit)
  219. merged_testsuite.set('tests', str(len(merged_testsuite.findall('testcase'))))
  220. merged_testsuite.set('failures', str(len(merged_testsuite.findall('.//testcase/failure'))))
  221. merged_testsuite.set('errors', str(len(merged_testsuite.findall('.//testcase/error'))))
  222. merged_testsuite.set('skipped', str(len(merged_testsuite.findall('.//testcase/skipped'))))
  223. with open(target_path, 'wb') as fw:
  224. fw.write(ET.tostring(merged_testsuite))
  225. ############
  226. # Fixtures #
  227. ############
  228. @pytest.fixture(scope='session')
  229. def idf_path() -> str:
  230. return os.path.dirname(__file__)
  231. @pytest.fixture(scope='session', autouse=True)
  232. def session_tempdir() -> str:
  233. _tmpdir = os.path.join(
  234. os.path.dirname(__file__),
  235. 'pytest_embedded_log',
  236. datetime.now().strftime('%Y-%m-%d_%H-%M-%S'),
  237. )
  238. os.makedirs(_tmpdir, exist_ok=True)
  239. return _tmpdir
  240. @pytest.fixture
  241. def case_tester(dut: IdfDut, **kwargs): # type: ignore
  242. yield CaseTester(dut, **kwargs)
  243. @pytest.fixture
  244. @multi_dut_argument
  245. def config(request: FixtureRequest) -> str:
  246. return getattr(request, 'param', None) or DEFAULT_SDKCONFIG
  247. @pytest.fixture
  248. def test_func_name(request: FixtureRequest) -> str:
  249. return request.node.function.__name__ # type: ignore
  250. @pytest.fixture
  251. def test_case_name(request: FixtureRequest, target: str, config: str) -> str:
  252. is_qemu = request._pyfuncitem.get_closest_marker('qemu') is not None
  253. return format_case_id(target, config, request.node.originalname, is_qemu=is_qemu)
  254. @pytest.fixture
  255. @multi_dut_fixture
  256. def build_dir(request: FixtureRequest, app_path: str, target: Optional[str], config: Optional[str]) -> str:
  257. """
  258. Check local build dir with the following priority:
  259. 1. build_<target>_<config>
  260. 2. build_<target>
  261. 3. build_<config>
  262. 4. build
  263. Returns:
  264. valid build directory
  265. """
  266. check_dirs = []
  267. if target is not None and config is not None:
  268. check_dirs.append(f'build_{target}_{config}')
  269. if target is not None:
  270. check_dirs.append(f'build_{target}')
  271. if config is not None:
  272. check_dirs.append(f'build_{config}')
  273. check_dirs.append('build')
  274. idf_pytest_embedded = request.config.stash[_idf_pytest_embedded_key]
  275. build_dir = None
  276. if idf_pytest_embedded.apps_list is not None:
  277. for check_dir in check_dirs:
  278. binary_path = os.path.join(app_path, check_dir)
  279. if binary_path in idf_pytest_embedded.apps_list:
  280. build_dir = check_dir
  281. break
  282. if build_dir is None:
  283. pytest.skip(
  284. f'app path {app_path} with target {target} and config {config} is not listed in app info list files'
  285. )
  286. return '' # not reachable, to fool mypy
  287. if build_dir:
  288. check_dirs = [build_dir]
  289. for check_dir in check_dirs:
  290. binary_path = os.path.join(app_path, check_dir)
  291. if os.path.isdir(binary_path):
  292. logging.info(f'find valid binary path: {binary_path}')
  293. return check_dir
  294. logging.warning('checking binary path: %s... missing... try another place', binary_path)
  295. raise ValueError(
  296. f'no build dir valid. Please build the binary via "idf.py -B {check_dirs[0]} build" and run pytest again'
  297. )
  298. @pytest.fixture(autouse=True)
  299. @multi_dut_fixture
  300. def junit_properties(test_case_name: str, record_xml_attribute: Callable[[str, object], None]) -> None:
  301. """
  302. This fixture is autoused and will modify the junit report test case name to <target>.<config>.<case_name>
  303. """
  304. record_xml_attribute('name', test_case_name)
  305. ######################
  306. # Log Util Functions #
  307. ######################
  308. @pytest.fixture
  309. def log_performance(record_property: Callable[[str, object], None]) -> Callable[[str, str], None]:
  310. """
  311. log performance item with pre-defined format to the console
  312. and record it under the ``properties`` tag in the junit report if available.
  313. """
  314. def real_func(item: str, value: str) -> None:
  315. """
  316. :param item: performance item name
  317. :param value: performance value
  318. """
  319. logging.info('[Performance][%s]: %s', item, value)
  320. record_property(item, value)
  321. return real_func
  322. @pytest.fixture
  323. def check_performance(idf_path: str) -> Callable[[str, float, str], None]:
  324. """
  325. check if the given performance item meets the passing standard or not
  326. """
  327. def real_func(item: str, value: float, target: str) -> None:
  328. """
  329. :param item: performance item name
  330. :param value: performance item value
  331. :param target: target chip
  332. :raise: AssertionError: if check fails
  333. """
  334. def _find_perf_item(operator: str, path: str) -> float:
  335. with open(path, 'r') as f:
  336. data = f.read()
  337. match = re.search(r'#define\s+IDF_PERFORMANCE_{}_{}\s+([\d.]+)'.format(operator, item.upper()), data)
  338. return float(match.group(1)) # type: ignore
  339. def _check_perf(operator: str, standard_value: float) -> None:
  340. if operator == 'MAX':
  341. ret = value <= standard_value
  342. else:
  343. ret = value >= standard_value
  344. if not ret:
  345. raise AssertionError(
  346. "[Performance] {} value is {}, doesn't meet pass standard {}".format(item, value, standard_value)
  347. )
  348. path_prefix = os.path.join(idf_path, 'components', 'idf_test', 'include')
  349. performance_files = (
  350. os.path.join(path_prefix, target, 'idf_performance_target.h'),
  351. os.path.join(path_prefix, 'idf_performance.h'),
  352. )
  353. found_item = False
  354. for op in ['MIN', 'MAX']:
  355. for performance_file in performance_files:
  356. try:
  357. standard = _find_perf_item(op, performance_file)
  358. except (IOError, AttributeError):
  359. # performance file doesn't exist or match is not found in it
  360. continue
  361. _check_perf(op, standard)
  362. found_item = True
  363. break
  364. if not found_item:
  365. raise AssertionError('Failed to get performance standard for {}'.format(item))
  366. return real_func
  367. @pytest.fixture
  368. def log_minimum_free_heap_size(dut: IdfDut, config: str) -> Callable[..., None]:
  369. def real_func() -> None:
  370. res = dut.expect(r'Minimum free heap size: (\d+) bytes')
  371. logging.info(
  372. '\n------ heap size info ------\n'
  373. '[app_name] {}\n'
  374. '[config_name] {}\n'
  375. '[target] {}\n'
  376. '[minimum_free_heap_size] {} Bytes\n'
  377. '------ heap size end ------'.format(
  378. os.path.basename(dut.app.app_path),
  379. config,
  380. dut.target,
  381. res.group(1).decode('utf8'),
  382. )
  383. )
  384. return real_func
  385. @pytest.fixture
  386. def dev_password(request: FixtureRequest) -> str:
  387. return request.config.getoption('dev_passwd') or ''
  388. @pytest.fixture
  389. def dev_user(request: FixtureRequest) -> str:
  390. return request.config.getoption('dev_user') or ''
  391. ##################
  392. # Hook functions #
  393. ##################
  394. def pytest_addoption(parser: pytest.Parser) -> None:
  395. idf_group = parser.getgroup('idf')
  396. idf_group.addoption(
  397. '--sdkconfig',
  398. help='sdkconfig postfix, like sdkconfig.ci.<config>. (Default: None, which would build all found apps)',
  399. )
  400. idf_group.addoption('--known-failure-cases-file', help='known failure cases file path')
  401. idf_group.addoption(
  402. '--dev-user',
  403. help='user name associated with some specific device/service used during the test execution',
  404. )
  405. idf_group.addoption(
  406. '--dev-passwd',
  407. help='password associated with some specific device/service used during the test execution',
  408. )
  409. idf_group.addoption(
  410. '--app-info-basedir',
  411. default=IDF_PATH,
  412. help='app info base directory. specify this value when you\'re building under a '
  413. 'different IDF_PATH. (Default: $IDF_PATH)',
  414. )
  415. idf_group.addoption(
  416. '--app-info-filepattern',
  417. help='glob pattern to specify the files that include built app info generated by '
  418. '`idf-build-apps --collect-app-info ...`. will not raise ValueError when binary '
  419. 'paths not exist in local file system if not listed recorded in the app info.',
  420. )
  421. _idf_pytest_embedded_key = pytest.StashKey['IdfPytestEmbedded']()
  422. _item_failed_cases_key = pytest.StashKey[list]()
  423. _item_failed_key = pytest.StashKey[bool]()
  424. def pytest_configure(config: Config) -> None:
  425. # cli option "--target"
  426. target = config.getoption('target') or ''
  427. help_commands = ['--help', '--fixtures', '--markers', '--version']
  428. for cmd in help_commands:
  429. if cmd in config.invocation_params.args:
  430. target = 'unneeded'
  431. break
  432. if not target: # also could specify through markexpr via "-m"
  433. target = get_target_marker_from_expr(config.getoption('markexpr') or '')
  434. apps_list = None
  435. app_info_basedir = config.getoption('app_info_basedir')
  436. app_info_filepattern = config.getoption('app_info_filepattern')
  437. if app_info_filepattern:
  438. apps_list = []
  439. for file in glob.glob(os.path.join(IDF_PATH, app_info_filepattern)):
  440. with open(file) as fr:
  441. for line in fr.readlines():
  442. if not line.strip():
  443. continue
  444. # each line is a valid json
  445. app_info = json.loads(line.strip())
  446. if app_info_basedir and app_info['app_dir'].startswith(app_info_basedir):
  447. relative_app_dir = os.path.relpath(app_info['app_dir'], app_info_basedir)
  448. apps_list.append(os.path.join(IDF_PATH, os.path.join(relative_app_dir, app_info['build_dir'])))
  449. print('Detected app: ', apps_list[-1])
  450. else:
  451. print(
  452. f'WARNING: app_info base dir {app_info_basedir} not recognizable in {app_info["app_dir"]}, skipping...'
  453. )
  454. continue
  455. config.stash[_idf_pytest_embedded_key] = IdfPytestEmbedded(
  456. target=target,
  457. sdkconfig=config.getoption('sdkconfig'),
  458. known_failure_cases_file=config.getoption('known_failure_cases_file'),
  459. apps_list=apps_list,
  460. )
  461. config.pluginmanager.register(config.stash[_idf_pytest_embedded_key])
  462. for name, description in {**TARGET_MARKERS, **ENV_MARKERS, **SPECIAL_MARKERS}.items():
  463. config.addinivalue_line('markers', f'{name}: {description}')
  464. def pytest_unconfigure(config: Config) -> None:
  465. _pytest_embedded = config.stash.get(_idf_pytest_embedded_key, None)
  466. if _pytest_embedded:
  467. del config.stash[_idf_pytest_embedded_key]
  468. config.pluginmanager.unregister(_pytest_embedded)
  469. class IdfPytestEmbedded:
  470. def __init__(
  471. self,
  472. target: str,
  473. sdkconfig: Optional[str] = None,
  474. known_failure_cases_file: Optional[str] = None,
  475. apps_list: Optional[List[str]] = None,
  476. ):
  477. # CLI options to filter the test cases
  478. self.target = target.lower()
  479. self.sdkconfig = sdkconfig
  480. self.known_failure_patterns = self._parse_known_failure_cases_file(known_failure_cases_file)
  481. self.apps_list = apps_list
  482. self._failed_cases: List[Tuple[str, bool, bool]] = [] # (test_case_name, is_known_failure_cases, is_xfail)
  483. @property
  484. def failed_cases(self) -> List[str]:
  485. return [case for case, is_known, is_xfail in self._failed_cases if not is_known and not is_xfail]
  486. @property
  487. def known_failure_cases(self) -> List[str]:
  488. return [case for case, is_known, _ in self._failed_cases if is_known]
  489. @property
  490. def xfail_cases(self) -> List[str]:
  491. return [case for case, _, is_xfail in self._failed_cases if is_xfail]
  492. @staticmethod
  493. def _parse_known_failure_cases_file(
  494. known_failure_cases_file: Optional[str] = None,
  495. ) -> List[str]:
  496. if not known_failure_cases_file or not os.path.isfile(known_failure_cases_file):
  497. return []
  498. patterns = []
  499. with open(known_failure_cases_file) as fr:
  500. for line in fr.readlines():
  501. if not line:
  502. continue
  503. if not line.strip():
  504. continue
  505. without_comments = line.split('#')[0].strip()
  506. if without_comments:
  507. patterns.append(without_comments)
  508. return patterns
  509. @pytest.hookimpl(tryfirst=True)
  510. def pytest_sessionstart(self, session: Session) -> None:
  511. # same behavior for vanilla pytest-embedded '--target'
  512. session.config.option.target = self.target
  513. @pytest.hookimpl(tryfirst=True)
  514. def pytest_collection_modifyitems(self, items: List[Function]) -> None:
  515. # sort by file path and callspec.config
  516. # implement like this since this is a limitation of pytest, couldn't get fixture values while collecting
  517. # https://github.com/pytest-dev/pytest/discussions/9689
  518. # after sort the test apps, the test may use the app cache to reduce the flash times.
  519. def _get_param_config(_item: Function) -> str:
  520. if hasattr(_item, 'callspec'):
  521. return _item.callspec.params.get('config', DEFAULT_SDKCONFIG) # type: ignore
  522. return DEFAULT_SDKCONFIG
  523. items.sort(key=lambda x: (os.path.dirname(x.path), _get_param_config(x)))
  524. # set default timeout 10 minutes for each case
  525. for item in items:
  526. if 'timeout' not in item.keywords:
  527. item.add_marker(pytest.mark.timeout(10 * 60))
  528. # add markers for special markers
  529. for item in items:
  530. if 'supported_targets' in item.keywords:
  531. for _target in SUPPORTED_TARGETS:
  532. item.add_marker(_target)
  533. if 'preview_targets' in item.keywords:
  534. for _target in PREVIEW_TARGETS:
  535. item.add_marker(_target)
  536. if 'all_targets' in item.keywords:
  537. for _target in [*SUPPORTED_TARGETS, *PREVIEW_TARGETS]:
  538. item.add_marker(_target)
  539. # add 'xtal_40mhz' tag as a default tag for esp32c2 target
  540. # only add this marker for esp32c2 cases
  541. if (
  542. self.target == 'esp32c2'
  543. and 'esp32c2' in item_marker_names(item)
  544. and 'xtal_26mhz' not in item_marker_names(item)
  545. ):
  546. item.add_marker('xtal_40mhz')
  547. # filter all the test cases with "nightly_run" marker
  548. if os.getenv('INCLUDE_NIGHTLY_RUN') == '1':
  549. # Do not filter nightly_run cases
  550. pass
  551. elif os.getenv('NIGHTLY_RUN') == '1':
  552. items[:] = [item for item in items if 'nightly_run' in item_marker_names(item)]
  553. else:
  554. items[:] = [item for item in items if 'nightly_run' not in item_marker_names(item)]
  555. # filter all the test cases with target and skip_targets
  556. items[:] = [
  557. item
  558. for item in items
  559. if self.target in item_marker_names(item) and self.target not in item_skip_targets(item)
  560. ]
  561. # filter all the test cases with cli option "config"
  562. if self.sdkconfig:
  563. items[:] = [item for item in items if _get_param_config(item) == self.sdkconfig]
  564. def pytest_runtest_makereport(self, item: Function, call: CallInfo[None]) -> Optional[TestReport]:
  565. report = TestReport.from_item_and_call(item, call)
  566. if item.stash.get(_item_failed_key, None) is None:
  567. item.stash[_item_failed_key] = False
  568. if report.outcome == 'failed':
  569. # Mark the failed test cases
  570. #
  571. # This hook function would be called in 3 phases, setup, call, teardown.
  572. # the report.outcome is the outcome of the single call of current phase, which is independent
  573. # the call phase outcome is the test result
  574. item.stash[_item_failed_key] = True
  575. if call.when == 'teardown':
  576. item_failed = item.stash[_item_failed_key]
  577. if item_failed:
  578. # unity real test cases
  579. failed_sub_cases = item.stash.get(_item_failed_cases_key, [])
  580. if failed_sub_cases:
  581. for test_case_name in failed_sub_cases:
  582. self._failed_cases.append((test_case_name, self._is_known_failure(test_case_name), False))
  583. else: # the case iteself is failing
  584. test_case_name = item.funcargs.get('test_case_name', '')
  585. if test_case_name:
  586. self._failed_cases.append(
  587. (
  588. test_case_name,
  589. self._is_known_failure(test_case_name),
  590. report.keywords.get('xfail', False),
  591. )
  592. )
  593. return report
  594. def _is_known_failure(self, case_id: str) -> bool:
  595. for pattern in self.known_failure_patterns:
  596. if case_id == pattern:
  597. return True
  598. if fnmatch(case_id, pattern):
  599. return True
  600. return False
  601. @pytest.hookimpl(trylast=True)
  602. def pytest_runtest_teardown(self, item: Function) -> None:
  603. """
  604. Format the test case generated junit reports
  605. """
  606. tempdir = item.funcargs.get('test_case_tempdir')
  607. if not tempdir:
  608. return
  609. junits = find_by_suffix('.xml', tempdir)
  610. if not junits:
  611. return
  612. if len(junits) > 1:
  613. merge_junit_files(junits, os.path.join(tempdir, SUB_JUNIT_FILENAME))
  614. junits = [os.path.join(tempdir, SUB_JUNIT_FILENAME)]
  615. is_qemu = item.get_closest_marker('qemu') is not None
  616. failed_sub_cases = []
  617. target = item.funcargs['target']
  618. config = item.funcargs['config']
  619. for junit in junits:
  620. xml = ET.parse(junit)
  621. testcases = xml.findall('.//testcase')
  622. for case in testcases:
  623. # modify the junit files
  624. new_case_name = format_case_id(target, config, case.attrib['name'], is_qemu=is_qemu)
  625. case.attrib['name'] = new_case_name
  626. if 'file' in case.attrib:
  627. case.attrib['file'] = case.attrib['file'].replace('/IDF/', '') # our unity test framework
  628. # collect real failure cases
  629. if case.find('failure') is not None:
  630. failed_sub_cases.append(new_case_name)
  631. xml.write(junit)
  632. item.stash[_item_failed_cases_key] = failed_sub_cases
  633. def pytest_sessionfinish(self, session: Session, exitstatus: int) -> None:
  634. if exitstatus != 0:
  635. if exitstatus == ExitCode.NO_TESTS_COLLECTED:
  636. session.exitstatus = 0
  637. elif self.known_failure_cases and not self.failed_cases:
  638. session.exitstatus = 0
  639. def pytest_terminal_summary(self, terminalreporter: TerminalReporter) -> None:
  640. if self.known_failure_cases:
  641. terminalreporter.section('Known failure cases', bold=True, yellow=True)
  642. terminalreporter.line('\n'.join(self.known_failure_cases))
  643. if self.xfail_cases:
  644. terminalreporter.section('xfail cases', bold=True, yellow=True)
  645. terminalreporter.line('\n'.join(self.xfail_cases))
  646. if self.failed_cases:
  647. terminalreporter.section('Failed cases', bold=True, red=True)
  648. terminalreporter.line('\n'.join(self.failed_cases))