conftest.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459
  1. # SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
  2. # SPDX-License-Identifier: Apache-2.0
  3. # pylint: disable=W0621 # redefined-outer-name
  4. # This file is a pytest root configuration file and provide the following functionalities:
  5. # 1. Defines a few fixtures that could be used under the whole project.
  6. # 2. Defines a few hook functions.
  7. #
  8. # IDF is using [pytest](https://github.com/pytest-dev/pytest) and
  9. # [pytest-embedded plugin](https://github.com/espressif/pytest-embedded) as its example test framework.
  10. #
  11. # This is an experimental feature, and if you found any bug or have any question, please report to
  12. # https://github.com/espressif/pytest-embedded/issues
  13. import logging
  14. import os
  15. import sys
  16. import xml.etree.ElementTree as ET
  17. from datetime import datetime
  18. from fnmatch import fnmatch
  19. from typing import Callable, List, Optional, Tuple
  20. import pytest
  21. from _pytest.config import Config, ExitCode
  22. from _pytest.fixtures import FixtureRequest
  23. from _pytest.main import Session
  24. from _pytest.nodes import Item
  25. from _pytest.python import Function
  26. from _pytest.reports import TestReport
  27. from _pytest.runner import CallInfo
  28. from _pytest.terminal import TerminalReporter
  29. from pytest_embedded.plugin import multi_dut_argument, multi_dut_fixture
  30. from pytest_embedded.utils import find_by_suffix
  31. from pytest_embedded_idf.dut import IdfDut
  32. try:
  33. from idf_unity_tester import CaseTester
  34. except ImportError:
  35. sys.path.append(os.path.join(os.path.dirname(__file__), 'tools', 'ci'))
  36. from idf_unity_tester import CaseTester
  37. try:
  38. import common_test_methods # noqa: F401
  39. except ImportError:
  40. sys.path.append(os.path.join(os.path.dirname(__file__), 'tools', 'ci', 'python_packages'))
  41. import common_test_methods # noqa: F401
  42. SUPPORTED_TARGETS = ['esp32', 'esp32s2', 'esp32c3', 'esp32s3', 'esp32c2', 'esp32c6']
  43. PREVIEW_TARGETS = ['esp32h4'] # this PREVIEW_TARGETS excludes 'linux' target
  44. DEFAULT_SDKCONFIG = 'default'
  45. ##################
  46. # Help Functions #
  47. ##################
  48. def is_target_marker(marker: str) -> bool:
  49. if marker.startswith('esp32') or marker.startswith('esp8') or marker == 'linux':
  50. return True
  51. return False
  52. def format_case_id(target: Optional[str], config: Optional[str], case: str) -> str:
  53. return f'{target}.{config}.{case}'
  54. def item_marker_names(item: Item) -> List[str]:
  55. return [marker.name for marker in item.iter_markers()]
  56. def get_target_marker(markexpr: str) -> str:
  57. candidates = set()
  58. # we use `-m "esp32 and generic"` in our CI to filter the test cases
  59. for marker in markexpr.split('and'):
  60. marker = marker.strip()
  61. if is_target_marker(marker):
  62. candidates.add(marker)
  63. if len(candidates) > 1:
  64. raise ValueError(
  65. f'Specified more than one target markers: {candidates}. Please specify no more than one.'
  66. )
  67. elif len(candidates) == 1:
  68. return candidates.pop()
  69. else:
  70. raise ValueError(
  71. 'Please specify one target marker via "--target [TARGET]" or via "-m [TARGET]"'
  72. )
  73. ############
  74. # Fixtures #
  75. ############
  76. @pytest.fixture(scope='session', autouse=True)
  77. def session_tempdir() -> str:
  78. _tmpdir = os.path.join(
  79. os.path.dirname(__file__),
  80. 'pytest_embedded_log',
  81. datetime.now().strftime('%Y-%m-%d_%H-%M-%S'),
  82. )
  83. os.makedirs(_tmpdir, exist_ok=True)
  84. return _tmpdir
  85. @pytest.fixture()
  86. def log_minimum_free_heap_size(dut: IdfDut, config: str) -> Callable[..., None]:
  87. def real_func() -> None:
  88. res = dut.expect(r'Minimum free heap size: (\d+) bytes')
  89. logging.info(
  90. '\n------ heap size info ------\n'
  91. '[app_name] {}\n'
  92. '[config_name] {}\n'
  93. '[target] {}\n'
  94. '[minimum_free_heap_size] {} Bytes\n'
  95. '------ heap size end ------'.format(
  96. os.path.basename(dut.app.app_path),
  97. config,
  98. dut.target,
  99. res.group(1).decode('utf8'),
  100. )
  101. )
  102. return real_func
  103. @pytest.fixture
  104. def case_tester(dut: IdfDut, **kwargs): # type: ignore
  105. yield CaseTester(dut, **kwargs)
  106. @pytest.fixture
  107. @multi_dut_argument
  108. def config(request: FixtureRequest) -> str:
  109. return getattr(request, 'param', None) or DEFAULT_SDKCONFIG
  110. @pytest.fixture
  111. def test_func_name(request: FixtureRequest) -> str:
  112. return request.node.function.__name__ # type: ignore
  113. @pytest.fixture
  114. def test_case_name(request: FixtureRequest, target: str, config: str) -> str:
  115. return format_case_id(target, config, request.node.originalname)
  116. @pytest.fixture
  117. @multi_dut_fixture
  118. def build_dir(app_path: str, target: Optional[str], config: Optional[str]) -> str:
  119. """
  120. Check local build dir with the following priority:
  121. 1. build_<target>_<config>
  122. 2. build_<target>
  123. 3. build_<config>
  124. 4. build
  125. Args:
  126. app_path: app path
  127. target: target
  128. config: config
  129. Returns:
  130. valid build directory
  131. """
  132. check_dirs = []
  133. if target is not None and config is not None:
  134. check_dirs.append(f'build_{target}_{config}')
  135. if target is not None:
  136. check_dirs.append(f'build_{target}')
  137. if config is not None:
  138. check_dirs.append(f'build_{config}')
  139. check_dirs.append('build')
  140. for check_dir in check_dirs:
  141. binary_path = os.path.join(app_path, check_dir)
  142. if os.path.isdir(binary_path):
  143. logging.info(f'find valid binary path: {binary_path}')
  144. return check_dir
  145. logging.warning(
  146. 'checking binary path: %s... missing... try another place', binary_path
  147. )
  148. recommend_place = check_dirs[0]
  149. raise ValueError(
  150. f'no build dir valid. Please build the binary via "idf.py -B {recommend_place} build" and run pytest again'
  151. )
  152. @pytest.fixture(autouse=True)
  153. @multi_dut_fixture
  154. def junit_properties(
  155. test_case_name: str, record_xml_attribute: Callable[[str, object], None]
  156. ) -> None:
  157. """
  158. This fixture is autoused and will modify the junit report test case name to <target>.<config>.<case_name>
  159. """
  160. record_xml_attribute('name', test_case_name)
  161. ##################
  162. # Hook functions #
  163. ##################
  164. def pytest_addoption(parser: pytest.Parser) -> None:
  165. base_group = parser.getgroup('idf')
  166. base_group.addoption(
  167. '--sdkconfig',
  168. help='sdkconfig postfix, like sdkconfig.ci.<config>. (Default: None, which would build all found apps)',
  169. )
  170. base_group.addoption(
  171. '--known-failure-cases-file', help='known failure cases file path'
  172. )
  173. _idf_pytest_embedded_key = pytest.StashKey['IdfPytestEmbedded']
  174. def pytest_configure(config: Config) -> None:
  175. # cli option "--target"
  176. target = config.getoption('target') or ''
  177. help_commands = ['--help', '--fixtures', '--markers', '--version']
  178. for cmd in help_commands:
  179. if cmd in config.invocation_params.args:
  180. target = 'unneeded'
  181. break
  182. if not target: # also could specify through markexpr via "-m"
  183. target = get_target_marker(config.getoption('markexpr') or '')
  184. config.stash[_idf_pytest_embedded_key] = IdfPytestEmbedded(
  185. target=target,
  186. sdkconfig=config.getoption('sdkconfig'),
  187. known_failure_cases_file=config.getoption('known_failure_cases_file'),
  188. )
  189. config.pluginmanager.register(config.stash[_idf_pytest_embedded_key])
  190. def pytest_unconfigure(config: Config) -> None:
  191. _pytest_embedded = config.stash.get(_idf_pytest_embedded_key, None)
  192. if _pytest_embedded:
  193. del config.stash[_idf_pytest_embedded_key]
  194. config.pluginmanager.unregister(_pytest_embedded)
  195. class IdfPytestEmbedded:
  196. def __init__(
  197. self,
  198. target: Optional[str] = None,
  199. sdkconfig: Optional[str] = None,
  200. known_failure_cases_file: Optional[str] = None,
  201. ):
  202. # CLI options to filter the test cases
  203. self.target = target
  204. self.sdkconfig = sdkconfig
  205. self.known_failure_patterns = self._parse_known_failure_cases_file(
  206. known_failure_cases_file
  207. )
  208. self._failed_cases: List[
  209. Tuple[str, bool, bool]
  210. ] = [] # (test_case_name, is_known_failure_cases, is_xfail)
  211. @property
  212. def failed_cases(self) -> List[str]:
  213. return [
  214. case
  215. for case, is_known, is_xfail in self._failed_cases
  216. if not is_known and not is_xfail
  217. ]
  218. @property
  219. def known_failure_cases(self) -> List[str]:
  220. return [case for case, is_known, _ in self._failed_cases if is_known]
  221. @property
  222. def xfail_cases(self) -> List[str]:
  223. return [case for case, _, is_xfail in self._failed_cases if is_xfail]
  224. @staticmethod
  225. def _parse_known_failure_cases_file(
  226. known_failure_cases_file: Optional[str] = None,
  227. ) -> List[str]:
  228. if not known_failure_cases_file or not os.path.isfile(known_failure_cases_file):
  229. return []
  230. patterns = []
  231. with open(known_failure_cases_file) as fr:
  232. for line in fr.readlines():
  233. if not line:
  234. continue
  235. if not line.strip():
  236. continue
  237. without_comments = line.split('#')[0].strip()
  238. if without_comments:
  239. patterns.append(without_comments)
  240. return patterns
  241. @pytest.hookimpl(tryfirst=True)
  242. def pytest_sessionstart(self, session: Session) -> None:
  243. if self.target:
  244. self.target = self.target.lower()
  245. session.config.option.target = self.target
  246. @pytest.hookimpl(tryfirst=True)
  247. def pytest_collection_modifyitems(self, items: List[Function]) -> None:
  248. # sort by file path and callspec.config
  249. # implement like this since this is a limitation of pytest, couldn't get fixture values while collecting
  250. # https://github.com/pytest-dev/pytest/discussions/9689
  251. def _get_param_config(_item: Function) -> str:
  252. if hasattr(_item, 'callspec'):
  253. return _item.callspec.params.get('config', DEFAULT_SDKCONFIG) # type: ignore
  254. return DEFAULT_SDKCONFIG
  255. items.sort(key=lambda x: (os.path.dirname(x.path), _get_param_config(x)))
  256. # set default timeout 10 minutes for each case
  257. for item in items:
  258. if 'timeout' not in item.keywords:
  259. item.add_marker(pytest.mark.timeout(10 * 60))
  260. # add markers for special markers
  261. for item in items:
  262. skip_ci_marker = item.get_closest_marker('temp_skip_ci')
  263. skip_ci_targets: List[str] = []
  264. if skip_ci_marker:
  265. # `temp_skip_ci` should always use keyword arguments `targets` and `reason`
  266. if not skip_ci_marker.kwargs.get('targets') or not skip_ci_marker.kwargs.get('reason'):
  267. raise ValueError(
  268. f'`temp_skip_ci` should always use keyword arguments `targets` and `reason`. '
  269. f'For example: '
  270. f'`@pytest.mark.temp_skip_ci(targets=["esp32"], reason="IDF-xxxx, will fix it ASAP")`'
  271. )
  272. skip_ci_targets = skip_ci_marker.kwargs['targets']
  273. if 'supported_targets' in item.keywords:
  274. for _target in SUPPORTED_TARGETS:
  275. if _target not in skip_ci_targets:
  276. item.add_marker(_target)
  277. if 'preview_targets' in item.keywords:
  278. for _target in PREVIEW_TARGETS:
  279. if _target not in skip_ci_targets:
  280. item.add_marker(_target)
  281. if 'all_targets' in item.keywords:
  282. for _target in [*SUPPORTED_TARGETS, *PREVIEW_TARGETS]:
  283. if _target not in skip_ci_targets:
  284. item.add_marker(_target)
  285. # `temp_skip_ci(targets=...)` can't work with specified single target
  286. for skip_ci_target in skip_ci_targets:
  287. if skip_ci_target in item.keywords:
  288. raise ValueError(
  289. '`skip_ci_targets` can only work with '
  290. '`supported_targets`, `preview_targets`, `all_targets` markers'
  291. )
  292. # add 'xtal_40mhz' tag as a default tag for esp32c2 target
  293. for item in items:
  294. if 'esp32c2' in item_marker_names(item) and 'xtal_26mhz' not in item_marker_names(item):
  295. item.add_marker('xtal_40mhz')
  296. # filter all the test cases with "nightly_run" marker
  297. if os.getenv('INCLUDE_NIGHTLY_RUN') == '1':
  298. # Do not filter nightly_run cases
  299. pass
  300. elif os.getenv('NIGHTLY_RUN') == '1':
  301. items[:] = [
  302. item for item in items if 'nightly_run' in item_marker_names(item)
  303. ]
  304. else:
  305. items[:] = [
  306. item for item in items if 'nightly_run' not in item_marker_names(item)
  307. ]
  308. # filter all the test cases with "--target"
  309. if self.target:
  310. items[:] = [
  311. item for item in items if self.target in item_marker_names(item)
  312. ]
  313. # filter all the test cases with cli option "config"
  314. if self.sdkconfig:
  315. items[:] = [
  316. item for item in items if _get_param_config(item) == self.sdkconfig
  317. ]
  318. def pytest_runtest_makereport(
  319. self, item: Function, call: CallInfo[None]
  320. ) -> Optional[TestReport]:
  321. report = TestReport.from_item_and_call(item, call)
  322. if report.outcome == 'failed':
  323. test_case_name = item.funcargs.get('test_case_name', '')
  324. is_known_failure = self._is_known_failure(test_case_name)
  325. is_xfail = report.keywords.get('xfail', False)
  326. self._failed_cases.append((test_case_name, is_known_failure, is_xfail))
  327. return report
  328. def _is_known_failure(self, case_id: str) -> bool:
  329. for pattern in self.known_failure_patterns:
  330. if case_id == pattern:
  331. return True
  332. if fnmatch(case_id, pattern):
  333. return True
  334. return False
  335. @pytest.hookimpl(trylast=True)
  336. def pytest_runtest_teardown(self, item: Function) -> None:
  337. """
  338. Format the test case generated junit reports
  339. """
  340. tempdir = item.funcargs.get('test_case_tempdir')
  341. if not tempdir:
  342. return
  343. junits = find_by_suffix('.xml', tempdir)
  344. if not junits:
  345. return
  346. target = item.funcargs['target']
  347. config = item.funcargs['config']
  348. for junit in junits:
  349. xml = ET.parse(junit)
  350. testcases = xml.findall('.//testcase')
  351. for case in testcases:
  352. case.attrib['name'] = format_case_id(
  353. target, config, case.attrib['name']
  354. )
  355. if 'file' in case.attrib:
  356. case.attrib['file'] = case.attrib['file'].replace(
  357. '/IDF/', ''
  358. ) # our unity test framework
  359. xml.write(junit)
  360. def pytest_sessionfinish(self, session: Session, exitstatus: int) -> None:
  361. if exitstatus != 0:
  362. if exitstatus == ExitCode.NO_TESTS_COLLECTED:
  363. session.exitstatus = 0
  364. elif self.known_failure_cases and not self.failed_cases:
  365. session.exitstatus = 0
  366. def pytest_terminal_summary(self, terminalreporter: TerminalReporter) -> None:
  367. if self.known_failure_cases:
  368. terminalreporter.section('Known failure cases', bold=True, yellow=True)
  369. terminalreporter.line('\n'.join(self.known_failure_cases))
  370. if self.xfail_cases:
  371. terminalreporter.section('xfail cases', bold=True, yellow=True)
  372. terminalreporter.line('\n'.join(self.xfail_cases))
  373. if self.failed_cases:
  374. terminalreporter.section('Failed cases', bold=True, red=True)
  375. terminalreporter.line('\n'.join(self.failed_cases))