conftest.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324
  1. # SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
  2. # SPDX-License-Identifier: Apache-2.0
  3. # pylint: disable=W0621 # redefined-outer-name
  4. # This file is a pytest root configuration file and provide the following functionalities:
  5. # 1. Defines a few fixtures that could be used under the whole project.
  6. # 2. Defines a few hook functions.
  7. #
  8. # IDF is using [pytest](https://github.com/pytest-dev/pytest) and
  9. # [pytest-embedded plugin](https://github.com/espressif/pytest-embedded) as its example test framework.
  10. #
  11. # This is an experimental feature, and if you found any bug or have any question, please report to
  12. # https://github.com/espressif/pytest-embedded/issues
  13. import logging
  14. import os
  15. import sys
  16. import xml.etree.ElementTree as ET
  17. from fnmatch import fnmatch
  18. from typing import Callable, List, Optional, Tuple
  19. import pytest
  20. from _pytest.config import Config
  21. from _pytest.fixtures import FixtureRequest
  22. from _pytest.main import Session
  23. from _pytest.nodes import Item
  24. from _pytest.python import Function
  25. from _pytest.reports import TestReport
  26. from _pytest.runner import CallInfo
  27. from _pytest.terminal import TerminalReporter
  28. from pytest_embedded.plugin import apply_count, parse_configuration
  29. from pytest_embedded.utils import find_by_suffix
  30. SUPPORTED_TARGETS = ['esp32', 'esp32s2', 'esp32c3', 'esp32s3']
  31. PREVIEW_TARGETS = ['linux', 'esp32h2', 'esp32c2']
  32. DEFAULT_SDKCONFIG = 'default'
  33. ##################
  34. # Help Functions #
  35. ##################
  36. def is_target_marker(marker: str) -> bool:
  37. if marker.startswith('esp32'):
  38. return True
  39. if marker.startswith('esp8'):
  40. return True
  41. return False
  42. def format_case_id(target: Optional[str], config: Optional[str], case: str) -> str:
  43. return f'{target}.{config}.{case}'
  44. def item_marker_names(item: Item) -> List[str]:
  45. return [marker.name for marker in item.iter_markers()]
  46. ############
  47. # Fixtures #
  48. ############
  49. @pytest.fixture
  50. @parse_configuration
  51. def config(request: FixtureRequest) -> str:
  52. return getattr(request, 'param', None) or DEFAULT_SDKCONFIG
  53. @pytest.fixture
  54. def test_func_name(request: FixtureRequest) -> str:
  55. return request.node.function.__name__ # type: ignore
  56. @pytest.fixture
  57. def test_case_name(request: FixtureRequest, target: str, config: str) -> str:
  58. return format_case_id(target, config, request.node.originalname)
  59. @pytest.fixture
  60. @apply_count
  61. def build_dir(
  62. app_path: str, target: Optional[str], config: Optional[str]
  63. ) -> str:
  64. """
  65. Check local build dir with the following priority:
  66. 1. build_<target>_<config>
  67. 2. build_<target>
  68. 3. build_<config>
  69. 4. build
  70. Args:
  71. app_path: app path
  72. target: target
  73. config: config
  74. Returns:
  75. valid build directory
  76. """
  77. check_dirs = []
  78. if target is not None and config is not None:
  79. check_dirs.append(f'build_{target}_{config}')
  80. if target is not None:
  81. check_dirs.append(f'build_{target}')
  82. if config is not None:
  83. check_dirs.append(f'build_{config}')
  84. check_dirs.append('build')
  85. for check_dir in check_dirs:
  86. binary_path = os.path.join(app_path, check_dir)
  87. if os.path.isdir(binary_path):
  88. logging.info(f'find valid binary path: {binary_path}')
  89. return check_dir
  90. logging.warning(
  91. 'checking binary path: %s... missing... try another place', binary_path
  92. )
  93. recommend_place = check_dirs[0]
  94. logging.error(
  95. f'no build dir valid. Please build the binary via "idf.py -B {recommend_place} build" and run pytest again'
  96. )
  97. sys.exit(1)
  98. @pytest.fixture(autouse=True)
  99. @apply_count
  100. def junit_properties(
  101. test_case_name: str, record_xml_attribute: Callable[[str, object], None]
  102. ) -> None:
  103. """
  104. This fixture is autoused and will modify the junit report test case name to <target>.<config>.<case_name>
  105. """
  106. record_xml_attribute('name', test_case_name)
  107. ##################
  108. # Hook functions #
  109. ##################
  110. def pytest_addoption(parser: pytest.Parser) -> None:
  111. base_group = parser.getgroup('idf')
  112. base_group.addoption(
  113. '--sdkconfig',
  114. help='sdkconfig postfix, like sdkconfig.ci.<config>. (Default: None, which would build all found apps)',
  115. )
  116. base_group.addoption(
  117. '--known-failure-cases-file', help='known failure cases file path'
  118. )
  119. _idf_pytest_embedded_key = pytest.StashKey['IdfPytestEmbedded']
  120. def pytest_configure(config: Config) -> None:
  121. config.stash[_idf_pytest_embedded_key] = IdfPytestEmbedded(
  122. target=config.getoption('target'),
  123. sdkconfig=config.getoption('sdkconfig'),
  124. known_failure_cases_file=config.getoption('known_failure_cases_file'),
  125. )
  126. config.pluginmanager.register(config.stash[_idf_pytest_embedded_key])
  127. def pytest_unconfigure(config: Config) -> None:
  128. _pytest_embedded = config.stash.get(_idf_pytest_embedded_key, None)
  129. if _pytest_embedded:
  130. del config.stash[_idf_pytest_embedded_key]
  131. config.pluginmanager.unregister(_pytest_embedded)
  132. class IdfPytestEmbedded:
  133. def __init__(
  134. self,
  135. target: Optional[str] = None,
  136. sdkconfig: Optional[str] = None,
  137. known_failure_cases_file: Optional[str] = None,
  138. ):
  139. # CLI options to filter the test cases
  140. self.target = target
  141. self.sdkconfig = sdkconfig
  142. self.known_failure_patterns = self._parse_known_failure_cases_file(
  143. known_failure_cases_file
  144. )
  145. self._failed_cases: List[
  146. Tuple[str, bool]
  147. ] = [] # (test_case_name, is_known_failure_cases)
  148. @property
  149. def failed_cases(self) -> List[str]:
  150. return [case for case, is_known in self._failed_cases if not is_known]
  151. @property
  152. def known_failure_cases(self) -> List[str]:
  153. return [case for case, is_known in self._failed_cases if is_known]
  154. @staticmethod
  155. def _parse_known_failure_cases_file(
  156. known_failure_cases_file: Optional[str] = None,
  157. ) -> List[str]:
  158. if not known_failure_cases_file or not os.path.isfile(known_failure_cases_file):
  159. return []
  160. patterns = []
  161. with open(known_failure_cases_file) as fr:
  162. for line in fr.readlines():
  163. if not line:
  164. continue
  165. if not line.strip():
  166. continue
  167. without_comments = line.split('#')[0].strip()
  168. if without_comments:
  169. patterns.append(without_comments)
  170. return patterns
  171. @pytest.hookimpl(tryfirst=True)
  172. def pytest_sessionstart(self, session: Session) -> None:
  173. if self.target:
  174. self.target = self.target.lower()
  175. session.config.option.target = self.target
  176. @pytest.hookimpl(tryfirst=True)
  177. def pytest_collection_modifyitems(self, items: List[Function]) -> None:
  178. # sort by file path and callspec.config
  179. # implement like this since this is a limitation of pytest, couldn't get fixture values while collecting
  180. # https://github.com/pytest-dev/pytest/discussions/9689
  181. def _get_param_config(_item: Function) -> str:
  182. if hasattr(_item, 'callspec'):
  183. return _item.callspec.params.get('config', DEFAULT_SDKCONFIG) # type: ignore
  184. return DEFAULT_SDKCONFIG
  185. items.sort(key=lambda x: (os.path.dirname(x.path), _get_param_config(x)))
  186. # add markers for special markers
  187. for item in items:
  188. if 'supported_targets' in item_marker_names(item):
  189. for _target in SUPPORTED_TARGETS:
  190. item.add_marker(_target)
  191. if 'preview_targets' in item_marker_names(item):
  192. for _target in PREVIEW_TARGETS:
  193. item.add_marker(_target)
  194. if 'all_targets' in item_marker_names(item):
  195. for _target in [*SUPPORTED_TARGETS, *PREVIEW_TARGETS]:
  196. item.add_marker(_target)
  197. # FIXME: temporarily modify the s3 runner tag "generic" to "s3_generic" due to the deep sleep bug
  198. if 'generic' in item_marker_names(item) and 'esp32s3' in item_marker_names(item):
  199. item.add_marker('generic_s3_fixme')
  200. # filter all the test cases with "--target"
  201. if self.target:
  202. items[:] = [
  203. item for item in items if self.target in item_marker_names(item)
  204. ]
  205. # filter all the test cases with cli option "config"
  206. if self.sdkconfig:
  207. items[:] = [
  208. item for item in items if _get_param_config(item) == self.sdkconfig
  209. ]
  210. def pytest_runtest_makereport(
  211. self, item: Function, call: CallInfo[None]
  212. ) -> Optional[TestReport]:
  213. if call.when == 'setup':
  214. return None
  215. report = TestReport.from_item_and_call(item, call)
  216. if report.outcome == 'failed':
  217. test_case_name = item.funcargs.get('test_case_name', '')
  218. is_known_failure = self._is_known_failure(test_case_name)
  219. self._failed_cases.append((test_case_name, is_known_failure))
  220. return report
  221. def _is_known_failure(self, case_id: str) -> bool:
  222. for pattern in self.known_failure_patterns:
  223. if case_id == pattern:
  224. return True
  225. if fnmatch(case_id, pattern):
  226. return True
  227. return False
  228. @pytest.hookimpl(trylast=True)
  229. def pytest_runtest_teardown(self, item: Function) -> None:
  230. """
  231. Format the test case generated junit reports
  232. """
  233. tempdir = item.funcargs.get('test_case_tempdir')
  234. if not tempdir:
  235. return
  236. junits = find_by_suffix('.xml', tempdir)
  237. if not junits:
  238. return
  239. target = item.funcargs['target']
  240. config = item.funcargs['config']
  241. for junit in junits:
  242. xml = ET.parse(junit)
  243. testcases = xml.findall('.//testcase')
  244. for case in testcases:
  245. case.attrib['name'] = format_case_id(
  246. target, config, case.attrib['name']
  247. )
  248. if 'file' in case.attrib:
  249. case.attrib['file'] = case.attrib['file'].replace(
  250. '/IDF/', ''
  251. ) # our unity test framework
  252. xml.write(junit)
  253. def pytest_sessionfinish(self, session: Session, exitstatus: int) -> None:
  254. if exitstatus != 0 and self.known_failure_cases and not self.failed_cases:
  255. session.exitstatus = 0
  256. def pytest_terminal_summary(self, terminalreporter: TerminalReporter) -> None:
  257. if self.known_failure_cases:
  258. terminalreporter.section('Known failure cases', bold=True, yellow=True)
  259. terminalreporter.line('\n'.join(self.known_failure_cases))
  260. if self.failed_cases:
  261. terminalreporter.section('Failed cases', bold=True, red=True)
  262. terminalreporter.line('\n'.join(self.failed_cases))