Explorar el Código

Merge branch 'feature/ci_optimize_assign_test' into 'master'

CI: optimize assign test

See merge request espressif/esp-idf!6785
He Yin Ling hace 6 años
padre
commit
d839f39ed8

+ 5 - 106
tools/ci/config/target-test.yml

@@ -176,7 +176,7 @@ example_test_002:
     - cd $TEST_FW_PATH
     - cd $TEST_FW_PATH
     # run test
     # run test
     - python Runner.py $TEST_CASE_PATH -c $CONFIG_FILE -e $ENV_FILE
     - python Runner.py $TEST_CASE_PATH -c $CONFIG_FILE -e $ENV_FILE
-  
+
 
 
 .example_test_003:
 .example_test_003:
   extends: .example_test_template
   extends: .example_test_template
@@ -245,17 +245,16 @@ example_test_010:
 
 
 UT_001:
 UT_001:
   extends: .unit_test_template
   extends: .unit_test_template
-  parallel: 50
+  parallel: 28
   tags:
   tags:
     - ESP32_IDF
     - ESP32_IDF
     - UT_T1_1
     - UT_T1_1
 
 
 # Max. allowed value of 'parallel' is 50.
 # Max. allowed value of 'parallel' is 50.
-# See UT_030 below if you want to add more unit test jobs.
 
 
 UT_002:
 UT_002:
   extends: .unit_test_template
   extends: .unit_test_template
-  parallel: 30
+  parallel: 9
   tags:
   tags:
     - ESP32_IDF
     - ESP32_IDF
     - UT_T1_1
     - UT_T1_1
@@ -263,14 +262,12 @@ UT_002:
 
 
 UT_003:
 UT_003:
   extends: .unit_test_template
   extends: .unit_test_template
-  parallel: 3
   tags:
   tags:
     - ESP32_IDF
     - ESP32_IDF
     - UT_T1_SDMODE
     - UT_T1_SDMODE
 
 
 UT_004:
 UT_004:
   extends: .unit_test_template
   extends: .unit_test_template
-  parallel: 3
   tags:
   tags:
     - ESP32_IDF
     - ESP32_IDF
     - UT_T1_SPIMODE
     - UT_T1_SPIMODE
@@ -289,13 +286,6 @@ UT_006:
     - UT_T1_SPIMODE
     - UT_T1_SPIMODE
     - psram
     - psram
 
 
-UT_007:
-  extends: .unit_test_template
-  parallel: 4
-  tags:
-    - ESP32_IDF
-    - UT_T1_GPIO
-
 UT_008:
 UT_008:
   extends: .unit_test_template
   extends: .unit_test_template
   tags:
   tags:
@@ -303,13 +293,6 @@ UT_008:
     - UT_T1_GPIO
     - UT_T1_GPIO
     - psram
     - psram
 
 
-UT_009:
-  extends: .unit_test_template
-  parallel: 4
-  tags:
-    - ESP32_IDF
-    - UT_T1_PCNT
-
 UT_010:
 UT_010:
   extends: .unit_test_template
   extends: .unit_test_template
   tags:
   tags:
@@ -317,13 +300,6 @@ UT_010:
     - UT_T1_PCNT
     - UT_T1_PCNT
     - psram
     - psram
 
 
-UT_011:
-  extends: .unit_test_template
-  parallel: 4
-  tags:
-    - ESP32_IDF
-    - UT_T1_LEDC
-
 UT_012:
 UT_012:
   extends: .unit_test_template
   extends: .unit_test_template
   tags:
   tags:
@@ -331,13 +307,6 @@ UT_012:
     - UT_T1_LEDC
     - UT_T1_LEDC
     - psram
     - psram
 
 
-UT_013:
-  extends: .unit_test_template
-  parallel: 4
-  tags:
-    - ESP32_IDF
-    - UT_T2_RS485
-
 UT_014:
 UT_014:
   extends: .unit_test_template
   extends: .unit_test_template
   tags:
   tags:
@@ -347,7 +316,6 @@ UT_014:
 
 
 UT_015:
 UT_015:
   extends: .unit_test_template
   extends: .unit_test_template
-  parallel: 4
   tags:
   tags:
     - ESP32_IDF
     - ESP32_IDF
     - UT_T1_RMT
     - UT_T1_RMT
@@ -361,26 +329,18 @@ UT_016:
 
 
 UT_017:
 UT_017:
   extends: .unit_test_template
   extends: .unit_test_template
-  parallel: 3
   tags:
   tags:
     - ESP32_IDF
     - ESP32_IDF
     - EMMC
     - EMMC
 
 
 UT_018:
 UT_018:
   extends: .unit_test_template
   extends: .unit_test_template
-  parallel: 5
+  parallel: 2
   tags:
   tags:
     - ESP32_IDF
     - ESP32_IDF
     - UT_T1_1
     - UT_T1_1
     - 8Mpsram
     - 8Mpsram
 
 
-UT_019:
-  extends: .unit_test_template
-  parallel: 4
-  tags:
-    - ESP32_IDF
-    - Example_SPI_Multi_device
-
 UT_020:
 UT_020:
   extends: .unit_test_template
   extends: .unit_test_template
   tags:
   tags:
@@ -388,13 +348,6 @@ UT_020:
     - Example_SPI_Multi_device
     - Example_SPI_Multi_device
     - psram
     - psram
 
 
-UT_021:
-  extends: .unit_test_template
-  parallel: 4
-  tags:
-    - ESP32_IDF
-    - UT_T2_I2C
-
 UT_022:
 UT_022:
   extends: .unit_test_template
   extends: .unit_test_template
   tags:
   tags:
@@ -404,7 +357,6 @@ UT_022:
 
 
 UT_023:
 UT_023:
   extends: .unit_test_template
   extends: .unit_test_template
-  parallel: 4
   tags:
   tags:
     - ESP32_IDF
     - ESP32_IDF
     - UT_T1_MCPWM
     - UT_T1_MCPWM
@@ -416,13 +368,6 @@ UT_024:
     - UT_T1_MCPWM
     - UT_T1_MCPWM
     - psram
     - psram
 
 
-UT_025:
-  extends: .unit_test_template
-  parallel: 4
-  tags:
-    - ESP32_IDF
-    - UT_T1_I2S
-
 UT_026:
 UT_026:
   extends: .unit_test_template
   extends: .unit_test_template
   tags:
   tags:
@@ -430,13 +375,6 @@ UT_026:
     - UT_T1_I2S
     - UT_T1_I2S
     - psram
     - psram
 
 
-UT_027:
-  extends: .unit_test_template
-  parallel: 3
-  tags:
-    - ESP32_IDF
-    - UT_T2_1
-
 UT_028:
 UT_028:
   extends: .unit_test_template
   extends: .unit_test_template
   tags:
   tags:
@@ -444,34 +382,12 @@ UT_028:
     - UT_T2_1
     - UT_T2_1
     - psram
     - psram
 
 
-UT_029:
-  extends: .unit_test_template
-  tags:
-    - ESP32_IDF
-    - UT_T2_1
-    - 8Mpsram
-
-# Gitlab parallel max value is 50. We need to create another UT job if parallel is larger than 50.
-UT_030:
-  extends: .unit_test_template
-  parallel: 10
-  tags:
-    - ESP32_IDF
-    - UT_T1_1
-
 UT_031:
 UT_031:
   extends: .unit_test_template
   extends: .unit_test_template
   tags:
   tags:
     - ESP32_IDF
     - ESP32_IDF
     - UT_T1_FlashEncryption
     - UT_T1_FlashEncryption
 
 
-UT_032:
-  extends: .unit_test_template
-  parallel: 4
-  tags:
-    - ESP32_IDF
-    - UT_T2_Ethernet
-
 UT_033:
 UT_033:
   extends: .unit_test_template
   extends: .unit_test_template
   tags:
   tags:
@@ -481,21 +397,19 @@ UT_033:
 
 
 UT_034:
 UT_034:
   extends: .unit_test_template
   extends: .unit_test_template
-  parallel: 4
   tags:
   tags:
     - ESP32_IDF
     - ESP32_IDF
     - UT_T1_ESP_FLASH
     - UT_T1_ESP_FLASH
 
 
 UT_035:
 UT_035:
   extends: .unit_test_template
   extends: .unit_test_template
-  parallel: 35
+  parallel: 16
   tags:
   tags:
     - ESP32S2BETA_IDF
     - ESP32S2BETA_IDF
     - UT_T1_1
     - UT_T1_1
 
 
 UT_036:
 UT_036:
   extends: .unit_test_template
   extends: .unit_test_template
-  parallel: 2
   tags:
   tags:
     - ESP32_IDF
     - ESP32_IDF
     - UT_T1_PSRAMV0
     - UT_T1_PSRAMV0
@@ -503,18 +417,10 @@ UT_036:
 
 
 UT_037:
 UT_037:
   extends: .unit_test_template
   extends: .unit_test_template
-  parallel: 4
   tags:
   tags:
     - ESP32S2BETA_IDF
     - ESP32S2BETA_IDF
     - UT_T1_LEDC
     - UT_T1_LEDC
 
 
-UT_040:
-  extends: .unit_test_template
-  parallel: 3
-  tags:
-    - ESP32_IDF
-    - UT_T1_no32kXTAL
-
 UT_041:
 UT_041:
   extends: .unit_test_template
   extends: .unit_test_template
   tags:
   tags:
@@ -522,13 +428,6 @@ UT_041:
     - UT_T1_no32kXTAL
     - UT_T1_no32kXTAL
     - psram
     - psram
 
 
-UT_042:
-  extends: .unit_test_template
-  parallel: 3
-  tags:
-    - ESP32_IDF
-    - UT_T1_32kXTAL
-
 UT_043:
 UT_043:
   extends: .unit_test_template
   extends: .unit_test_template
   tags:
   tags:

+ 63 - 35
tools/tiny-test-fw/CIAssignUnitTest.py

@@ -24,12 +24,17 @@ except ImportError:
 
 
 
 
 class Group(CIAssignTest.Group):
 class Group(CIAssignTest.Group):
-    SORT_KEYS = ["config", "test environment", "multi_device", "multi_stage", "tags", "chip_target"]
-    MAX_CASE = 30
+    SORT_KEYS = ["test environment", "tags", "chip_target"]
+    MAX_CASE = 50
     ATTR_CONVERT_TABLE = {
     ATTR_CONVERT_TABLE = {
         "execution_time": "execution time"
         "execution_time": "execution time"
     }
     }
     CI_JOB_MATCH_KEYS = ["test environment"]
     CI_JOB_MATCH_KEYS = ["test environment"]
+    DUT_CLS_NAME = {
+        "esp32": "ESP32DUT",
+        "esp32s2beta": "ESP32S2DUT",
+        "esp8266": "ESP8266DUT",
+    }
 
 
     def __init__(self, case):
     def __init__(self, case):
         super(Group, self).__init__(case)
         super(Group, self).__init__(case)
@@ -42,13 +47,28 @@ class Group(CIAssignTest.Group):
             attr = Group.ATTR_CONVERT_TABLE[attr]
             attr = Group.ATTR_CONVERT_TABLE[attr]
         return case[attr]
         return case[attr]
 
 
-    def _create_extra_data(self, test_function):
+    def add_extra_case(self, case):
+        """ If current group contains all tags required by case, then add succeed """
+        added = False
+        if self.accept_new_case():
+            for key in self.filters:
+                if self._get_case_attr(case, key) != self.filters[key]:
+                    if key == "tags":
+                        if self._get_case_attr(case, key).issubset(self.filters[key]):
+                            continue
+                    break
+            else:
+                self.case_list.append(case)
+                added = True
+        return added
+
+    def _create_extra_data(self, test_cases, test_function):
         """
         """
         For unit test case, we need to copy some attributes of test cases into config file.
         For unit test case, we need to copy some attributes of test cases into config file.
         So unit test function knows how to run the case.
         So unit test function knows how to run the case.
         """
         """
         case_data = []
         case_data = []
-        for case in self.case_list:
+        for case in test_cases:
             one_case_data = {
             one_case_data = {
                 "config": self._get_case_attr(case, "config"),
                 "config": self._get_case_attr(case, "config"),
                 "name": self._get_case_attr(case, "summary"),
                 "name": self._get_case_attr(case, "summary"),
@@ -67,19 +87,26 @@ class Group(CIAssignTest.Group):
             case_data.append(one_case_data)
             case_data.append(one_case_data)
         return case_data
         return case_data
 
 
-    def _map_test_function(self):
+    def _divide_case_by_test_function(self):
         """
         """
-        determine which test function to use according to current test case
+        divide cases of current test group by test function they need to use
 
 
-        :return: test function name to use
+        :return: dict of list of cases for each test functions
         """
         """
-        if self.filters["multi_device"] == "Yes":
-            test_function = "run_multiple_devices_cases"
-        elif self.filters["multi_stage"] == "Yes":
-            test_function = "run_multiple_stage_cases"
-        else:
-            test_function = "run_unit_test_cases"
-        return test_function
+        case_by_test_function = {
+            "run_multiple_devices_cases": [],
+            "run_multiple_stage_cases": [],
+            "run_unit_test_cases": [],
+        }
+
+        for case in self.case_list:
+            if case["multi_device"] == "Yes":
+                case_by_test_function["run_multiple_devices_cases"].append(case)
+            elif case["multi_stage"] == "Yes":
+                case_by_test_function["run_multiple_stage_cases"].append(case)
+            else:
+                case_by_test_function["run_unit_test_cases"].append(case)
+        return case_by_test_function
 
 
     def output(self):
     def output(self):
         """
         """
@@ -87,35 +114,30 @@ class Group(CIAssignTest.Group):
 
 
         :return: {"Filter": case filter, "CaseConfig": list of case configs for cases in this group}
         :return: {"Filter": case filter, "CaseConfig": list of case configs for cases in this group}
         """
         """
-        test_function = self._map_test_function()
+
+        target = self._get_case_attr(self.case_list[0], "chip_target")
+        if target:
+            overwrite = {
+                "dut": {
+                    "path": "IDF/IDFDUT.py",
+                    "class": self.DUT_CLS_NAME[target],
+                }
+            }
+        else:
+            overwrite = dict()
+
+        case_by_test_function = self._divide_case_by_test_function()
 
 
         output_data = {
         output_data = {
             # we don't need filter for test function, as UT uses a few test functions for all cases
             # we don't need filter for test function, as UT uses a few test functions for all cases
             "CaseConfig": [
             "CaseConfig": [
                 {
                 {
                     "name": test_function,
                     "name": test_function,
-                    "extra_data": self._create_extra_data(test_function),
-                }
+                    "extra_data": self._create_extra_data(test_cases, test_function),
+                    "overwrite": overwrite,
+                } for test_function, test_cases in case_by_test_function.iteritems() if test_cases
             ],
             ],
         }
         }
-
-        target = self._get_case_attr(self.case_list[0], "chip_target")
-        if target is not None:
-            target_dut = {
-                "esp32": "ESP32DUT",
-                "esp32s2beta": "ESP32S2DUT",
-                "esp8266": "ESP8266DUT",
-            }[target]
-            output_data.update({
-                "Filter": {
-                    "overwrite": {
-                        "dut": {
-                            "path": "IDF/IDFDUT.py",
-                            "class": target_dut,
-                        }
-                    }
-                }
-            })
         return output_data
         return output_data
 
 
 
 
@@ -135,6 +157,8 @@ class UnitTestAssignTest(CIAssignTest.AssignTest):
             with open(test_case_path, "r") as f:
             with open(test_case_path, "r") as f:
                 raw_data = yaml.load(f, Loader=Loader)
                 raw_data = yaml.load(f, Loader=Loader)
             test_cases = raw_data["test cases"]
             test_cases = raw_data["test cases"]
+            for case in test_cases:
+                case["tags"] = set(case["tags"])
         except IOError:
         except IOError:
             print("Test case path is invalid. Should only happen when use @bot to skip unit test.")
             print("Test case path is invalid. Should only happen when use @bot to skip unit test.")
             test_cases = []
             test_cases = []
@@ -160,6 +184,10 @@ class UnitTestAssignTest(CIAssignTest.AssignTest):
                         # case don't have this key, regard as filter success
                         # case don't have this key, regard as filter success
                         filtered_cases.append(case)
                         filtered_cases.append(case)
                 test_cases = filtered_cases
                 test_cases = filtered_cases
+        # sort cases with configs and test functions
+        # in later stage cases with similar attributes are more likely to be assigned to the same job
+        # it will reduce the count of flash DUT operations
+        test_cases.sort(key=lambda x: x["config"] + x["multi_stage"] + x["multi_device"])
         return test_cases
         return test_cases
 
 
 
 

+ 77 - 3
tools/tiny-test-fw/Utility/CIAssignTest.py

@@ -105,6 +105,20 @@ class Group(object):
                 added = True
                 added = True
         return added
         return added
 
 
+    def add_extra_case(self, case):
+        """
+        By default (``add_case`` method), cases will only be added when have equal values of all filters with group.
+        But in some cases, we also want to add cases which are not best fit.
+        For example, one group has can run cases require (A, B). It can also accept cases require (A, ) and (B, ).
+        When assign failed by best fit, we will use this method to try if we can assign all failed cases.
+
+        If subclass want to retry, they need to overwrite this method.
+        Logic can be applied to handle such scenario could be different for different cases.
+
+        :return: True if accepted else False
+        """
+        pass
+
     def output(self):
     def output(self):
         """
         """
         output data for job configs
         output data for job configs
@@ -193,6 +207,26 @@ class AssignTest(object):
                 groups.append(self.case_group(case))
                 groups.append(self.case_group(case))
         return groups
         return groups
 
 
+    def _assign_failed_cases(self, assigned_groups, failed_groups):
+        """ try to assign failed cases to already assigned test groups """
+        still_failed_groups = []
+        failed_cases = []
+        for group in failed_groups:
+            failed_cases.extend(group.case_list)
+        for case in failed_cases:
+            # first try to assign to already assigned groups
+            for group in assigned_groups:
+                if group.add_extra_case(case):
+                    break
+            else:
+                # if failed, group the failed cases
+                for group in still_failed_groups:
+                    if group.add_case(case):
+                        break
+                else:
+                    still_failed_groups.append(self.case_group(case))
+        return still_failed_groups
+
     @staticmethod
     @staticmethod
     def _apply_bot_filter():
     def _apply_bot_filter():
         """
         """
@@ -218,6 +252,21 @@ class AssignTest(object):
             test_count = int(test_count)
             test_count = int(test_count)
             self.test_cases *= test_count
             self.test_cases *= test_count
 
 
+    @staticmethod
+    def _count_groups_by_keys(test_groups):
+        """
+        Count the number of test groups by job match keys.
+        It's an important information to update CI config file.
+        """
+        group_count = dict()
+        for group in test_groups:
+            key = ",".join(group.ci_job_match_keys)
+            try:
+                group_count[key] += 1
+            except KeyError:
+                group_count[key] = 1
+        return group_count
+
     def assign_cases(self):
     def assign_cases(self):
         """
         """
         separate test cases to groups and assign test cases to CI jobs.
         separate test cases to groups and assign test cases to CI jobs.
@@ -226,21 +275,46 @@ class AssignTest(object):
         :return: None
         :return: None
         """
         """
         failed_to_assign = []
         failed_to_assign = []
+        assigned_groups = []
         case_filter = self._apply_bot_filter()
         case_filter = self._apply_bot_filter()
         self.test_cases = self._search_cases(self.test_case_path, case_filter)
         self.test_cases = self._search_cases(self.test_case_path, case_filter)
         self._apply_bot_test_count()
         self._apply_bot_test_count()
         test_groups = self._group_cases()
         test_groups = self._group_cases()
+
         for group in test_groups:
         for group in test_groups:
             for job in self.jobs:
             for job in self.jobs:
                 if job.match_group(group):
                 if job.match_group(group):
                     job.assign_group(group)
                     job.assign_group(group)
+                    assigned_groups.append(group)
                     break
                     break
             else:
             else:
                 failed_to_assign.append(group)
                 failed_to_assign.append(group)
+
+        if failed_to_assign:
+            failed_to_assign = self._assign_failed_cases(assigned_groups, failed_to_assign)
+
+        # print debug info
+        # total requirement of current pipeline
+        required_group_count = self._count_groups_by_keys(test_groups)
+        console_log("Required job count by tags:")
+        for tags in required_group_count:
+            console_log("\t{}: {}".format(tags, required_group_count[tags]))
+
+        # number of unused jobs
+        not_used_jobs = [job for job in self.jobs if "case group" not in job]
+        if not_used_jobs:
+            console_log("{} jobs not used. Please check if you define too much jobs".format(len(not_used_jobs)), "O")
+        for job in not_used_jobs:
+            console_log("\t{}".format(job["name"]), "O")
+
+        # failures
         if failed_to_assign:
         if failed_to_assign:
-            console_log("Too many test cases vs jobs to run. Please add the following jobs to tools/ci/config/target-test.yml with specific tags:", "R")
-            for group in failed_to_assign:
-                console_log("* Add job with: " + ",".join(group.ci_job_match_keys), "R")
+            console_log("Too many test cases vs jobs to run. "
+                        "Please increase parallel count in tools/ci/config/target-test.yml "
+                        "for jobs with specific tags:", "R")
+            failed_group_count = self._count_groups_by_keys(failed_to_assign)
+            for tags in failed_group_count:
+                console_log("\t{}: {}".format(tags, failed_group_count[tags]), "R")
             raise RuntimeError("Failed to assign test case to CI jobs")
             raise RuntimeError("Failed to assign test case to CI jobs")
 
 
     def output_configs(self, output_path):
     def output_configs(self, output_path):

+ 3 - 3
tools/tiny-test-fw/Utility/CaseConfig.py

@@ -159,7 +159,7 @@ class Parser(object):
         configs = cls.DEFAULT_CONFIG.copy()
         configs = cls.DEFAULT_CONFIG.copy()
         if config_file:
         if config_file:
             with open(config_file, "r") as f:
             with open(config_file, "r") as f:
-                configs.update(yaml.load(f), Loader=Loader)
+                configs.update(yaml.load(f, Loader=Loader))
         return configs
         return configs
 
 
     @classmethod
     @classmethod
@@ -190,9 +190,9 @@ class Parser(object):
         test_case_list = []
         test_case_list = []
         for _config in configs["CaseConfig"]:
         for _config in configs["CaseConfig"]:
             _filter = configs["Filter"].copy()
             _filter = configs["Filter"].copy()
+            _overwrite = cls.handle_overwrite_args(_config.pop("overwrite", dict()))
+            _extra_data = _config.pop("extra_data", None)
             _filter.update(_config)
             _filter.update(_config)
-            _overwrite = cls.handle_overwrite_args(_filter.pop("overwrite", dict()))
-            _extra_data = _filter.pop("extra_data", None)
             for test_method in test_methods:
             for test_method in test_methods:
                 if _filter_one_case(test_method, _filter):
                 if _filter_one_case(test_method, _filter):
                     test_case_list.append(TestCase.TestCase(test_method, _extra_data, **_overwrite))
                     test_case_list.append(TestCase.TestCase(test_method, _extra_data, **_overwrite))

+ 18 - 6
tools/tiny-test-fw/Utility/__init__.py

@@ -38,11 +38,23 @@ def console_log(data, color="white", end="\n"):
     sys.stdout.flush()
     sys.stdout.flush()
 
 
 
 
+__LOADED_MODULES = dict()
+# we should only load one module once.
+# if we load one module twice,
+# python will regard the same object loaded in the first time and second time as different objects.
+# it will lead to strange errors like `isinstance(object, type_of_this_object)` return False
+
+
 def load_source(name, path):
 def load_source(name, path):
     try:
     try:
-        from importlib.machinery import SourceFileLoader
-        return SourceFileLoader(name, path).load_module()
-    except ImportError:
-        # importlib.machinery doesn't exists in Python 2 so we will use imp (deprecated in Python 3)
-        import imp
-        return imp.load_source(name, path)
+        return __LOADED_MODULES[name]
+    except KeyError:
+        try:
+            from importlib.machinery import SourceFileLoader
+            ret = SourceFileLoader(name, path).load_module()
+        except ImportError:
+            # importlib.machinery doesn't exists in Python 2 so we will use imp (deprecated in Python 3)
+            import imp
+            ret = imp.load_source(name, path)
+        __LOADED_MODULES[name] = ret
+        return ret

+ 25 - 13
tools/unit-test-app/unit_test.py

@@ -158,6 +158,10 @@ def replace_app_bin(dut, name, new_app_bin):
             break
             break
 
 
 
 
+def format_case_name(case):
+    return "[{}] {}".format(case["config"], case["name"])
+
+
 def reset_dut(dut):
 def reset_dut(dut):
     dut.reset()
     dut.reset()
     # esptool ``run`` cmd takes quite long time.
     # esptool ``run`` cmd takes quite long time.
@@ -203,9 +207,9 @@ def run_one_normal_case(dut, one_case, junit_test_case):
         test_finish.append(True)
         test_finish.append(True)
         output = dut.stop_capture_raw_data()
         output = dut.stop_capture_raw_data()
         if result:
         if result:
-            Utility.console_log("Success: " + one_case["name"], color="green")
+            Utility.console_log("Success: " + format_case_name(one_case), color="green")
         else:
         else:
-            Utility.console_log("Failed: " + one_case["name"], color="red")
+            Utility.console_log("Failed: " + format_case_name(one_case), color="red")
             junit_test_case.add_failure_info(output)
             junit_test_case.add_failure_info(output)
             raise TestCaseFailed()
             raise TestCaseFailed()
 
 
@@ -222,7 +226,7 @@ def run_one_normal_case(dut, one_case, junit_test_case):
         assert not exception_reset_list
         assert not exception_reset_list
         if int(data[1]):
         if int(data[1]):
             # case ignored
             # case ignored
-            Utility.console_log("Ignored: " + one_case["name"], color="orange")
+            Utility.console_log("Ignored: " + format_case_name(one_case), color="orange")
             junit_test_case.add_skipped_info("ignored")
             junit_test_case.add_skipped_info("ignored")
         one_case_finish(not int(data[0]))
         one_case_finish(not int(data[0]))
 
 
@@ -299,13 +303,15 @@ def run_unit_test_cases(env, extra_data):
                 run_one_normal_case(dut, one_case, junit_test_case)
                 run_one_normal_case(dut, one_case, junit_test_case)
                 performance_items = dut.get_performance_items()
                 performance_items = dut.get_performance_items()
             except TestCaseFailed:
             except TestCaseFailed:
-                failed_cases.append(one_case["name"])
+                failed_cases.append(format_case_name(one_case))
             except Exception as e:
             except Exception as e:
                 junit_test_case.add_failure_info("Unexpected exception: " + str(e))
                 junit_test_case.add_failure_info("Unexpected exception: " + str(e))
-                failed_cases.append(one_case["name"])
+                failed_cases.append(format_case_name(one_case))
             finally:
             finally:
                 TinyFW.JunitReport.update_performance(performance_items)
                 TinyFW.JunitReport.update_performance(performance_items)
                 TinyFW.JunitReport.test_case_finish(junit_test_case)
                 TinyFW.JunitReport.test_case_finish(junit_test_case)
+        # close DUT when finish running all cases for one config
+        env.close_dut(dut.name)
 
 
     # raise exception if any case fails
     # raise exception if any case fails
     if failed_cases:
     if failed_cases:
@@ -502,11 +508,15 @@ def run_multiple_devices_cases(env, extra_data):
                 junit_test_case.add_failure_info("Unexpected exception: " + str(e))
                 junit_test_case.add_failure_info("Unexpected exception: " + str(e))
             finally:
             finally:
                 if result:
                 if result:
-                    Utility.console_log("Success: " + one_case["name"], color="green")
+                    Utility.console_log("Success: " + format_case_name(one_case), color="green")
                 else:
                 else:
-                    failed_cases.append(one_case["name"])
-                    Utility.console_log("Failed: " + one_case["name"], color="red")
+                    failed_cases.append(format_case_name(one_case))
+                    Utility.console_log("Failed: " + format_case_name(one_case), color="red")
                 TinyFW.JunitReport.test_case_finish(junit_test_case)
                 TinyFW.JunitReport.test_case_finish(junit_test_case)
+        # close all DUTs when finish running all cases for one config
+        for dut in duts:
+            env.close_dut(dut)
+        duts = {}
 
 
     if failed_cases:
     if failed_cases:
         Utility.console_log("Failed Cases:", color="red")
         Utility.console_log("Failed Cases:", color="red")
@@ -563,9 +573,9 @@ def run_one_multiple_stage_case(dut, one_case, junit_test_case):
             result = result and check_reset()
             result = result and check_reset()
             output = dut.stop_capture_raw_data()
             output = dut.stop_capture_raw_data()
             if result:
             if result:
-                Utility.console_log("Success: " + one_case["name"], color="green")
+                Utility.console_log("Success: " + format_case_name(one_case), color="green")
             else:
             else:
-                Utility.console_log("Failed: " + one_case["name"], color="red")
+                Utility.console_log("Failed: " + format_case_name(one_case), color="red")
                 junit_test_case.add_failure_info(output)
                 junit_test_case.add_failure_info(output)
                 raise TestCaseFailed()
                 raise TestCaseFailed()
             stage_finish.append("break")
             stage_finish.append("break")
@@ -582,7 +592,7 @@ def run_one_multiple_stage_case(dut, one_case, junit_test_case):
             # in this scenario reset should not happen
             # in this scenario reset should not happen
             if int(data[1]):
             if int(data[1]):
                 # case ignored
                 # case ignored
-                Utility.console_log("Ignored: " + one_case["name"], color="orange")
+                Utility.console_log("Ignored: " + format_case_name(one_case), color="orange")
                 junit_test_case.add_skipped_info("ignored")
                 junit_test_case.add_skipped_info("ignored")
             # only passed in last stage will be regarded as real pass
             # only passed in last stage will be regarded as real pass
             if last_stage():
             if last_stage():
@@ -651,13 +661,15 @@ def run_multiple_stage_cases(env, extra_data):
                 run_one_multiple_stage_case(dut, one_case, junit_test_case)
                 run_one_multiple_stage_case(dut, one_case, junit_test_case)
                 performance_items = dut.get_performance_items()
                 performance_items = dut.get_performance_items()
             except TestCaseFailed:
             except TestCaseFailed:
-                failed_cases.append(one_case["name"])
+                failed_cases.append(format_case_name(one_case))
             except Exception as e:
             except Exception as e:
                 junit_test_case.add_failure_info("Unexpected exception: " + str(e))
                 junit_test_case.add_failure_info("Unexpected exception: " + str(e))
-                failed_cases.append(one_case["name"])
+                failed_cases.append(format_case_name(one_case))
             finally:
             finally:
                 TinyFW.JunitReport.update_performance(performance_items)
                 TinyFW.JunitReport.update_performance(performance_items)
                 TinyFW.JunitReport.test_case_finish(junit_test_case)
                 TinyFW.JunitReport.test_case_finish(junit_test_case)
+        # close DUT when finish running all cases for one config
+        env.close_dut(dut.name)
 
 
     # raise exception if any case fails
     # raise exception if any case fails
     if failed_cases:
     if failed_cases: