ソースを参照

Merge branch 'refactor/ldgen' into 'master'

refactor: ldgen

Closes IDF-605 and IDFGH-6271

See merge request espressif/esp-idf!16509
Fu Hanxi 4 年 前
コミット
60c5b37bfe

+ 5 - 4
.gitlab-ci.yml

@@ -56,11 +56,12 @@ variables:
 
 
   # Docker images
   # Docker images
   BOT_DOCKER_IMAGE_TAG: ":latest"
   BOT_DOCKER_IMAGE_TAG: ":latest"
-  ESP_IDF_DOC_ENV_IMAGE: "$CI_DOCKER_REGISTRY/esp-idf-doc-env:v4.4-1-v5"
-  ESP_ENV_IMAGE: "$CI_DOCKER_REGISTRY/esp-env:v4.4-1"
-  AFL_FUZZER_TEST_IMAGE: "$CI_DOCKER_REGISTRY/afl-fuzzer-test:v4.4-1-1"
-  CLANG_STATIC_ANALYSIS_IMAGE: "${CI_DOCKER_REGISTRY}/clang-static-analysis:v4.4-1-2"
+  ESP_ENV_IMAGE: "${CI_DOCKER_REGISTRY}/esp-env-v5.0:1"
+  AFL_FUZZER_TEST_IMAGE: "${CI_DOCKER_REGISTRY}/afl-fuzzer-test-v5.0:1-1"
+  CLANG_STATIC_ANALYSIS_IMAGE: "${CI_DOCKER_REGISTRY}/clang-static-analysis-v5.0:1-1"
+  ESP_IDF_DOC_ENV_IMAGE: "${CI_DOCKER_REGISTRY}/esp-idf-doc-env-v5.0:1-1"
   SONARQUBE_SCANNER_IMAGE: "${CI_DOCKER_REGISTRY}/sonarqube-scanner:3"
   SONARQUBE_SCANNER_IMAGE: "${CI_DOCKER_REGISTRY}/sonarqube-scanner:3"
+  LINUX_SHELL_IMAGE: "${CI_DOCKER_REGISTRY}/linux-shells:2"
 
 
   # target test config file, used by assign test job
   # target test config file, used by assign test job
   CI_TARGET_TEST_CONFIG_FILE: "$CI_PROJECT_DIR/.gitlab/ci/target-test.yml"
   CI_TARGET_TEST_CONFIG_FILE: "$CI_PROJECT_DIR/.gitlab/ci/target-test.yml"

+ 4 - 6
.gitlab/ci/host-test.yml

@@ -57,10 +57,8 @@ test_ldgen_on_host:
   extends: .host_test_template
   extends: .host_test_template
   script:
   script:
     - cd tools/ldgen/test
     - cd tools/ldgen/test
-    - ./test_fragments.py
-    - ./test_generation.py
-    - ./test_entity.py
-    - ./test_output_commands.py
+    - export PYTHONPATH=$PYTHONPATH:..
+    - python -m unittest
   variables:
   variables:
     LC_ALL: C.UTF-8
     LC_ALL: C.UTF-8
 
 
@@ -317,7 +315,7 @@ test_mkuf2:
 
 
 test_autocomplete:
 test_autocomplete:
   extends: .host_test_template
   extends: .host_test_template
-  image: $CI_DOCKER_REGISTRY/linux-shells:1
+  image: $LINUX_SHELL_IMAGE
   artifacts:
   artifacts:
     when: on_failure
     when: on_failure
     paths:
     paths:
@@ -328,7 +326,7 @@ test_autocomplete:
 
 
 test_detect_python:
 test_detect_python:
   extends: .host_test_template
   extends: .host_test_template
-  image: $CI_DOCKER_REGISTRY/linux-shells:1
+  image: $LINUX_SHELL_IMAGE
   script:
   script:
     - cd ${IDF_PATH}
     - cd ${IDF_PATH}
     - shellcheck -s sh tools/detect_python.sh
     - shellcheck -s sh tools/detect_python.sh

+ 1 - 0
.pylintrc

@@ -151,6 +151,7 @@ disable=print-statement,
         too-many-branches,
         too-many-branches,
         too-many-statements,
         too-many-statements,
         ungrouped-imports,  # since we have isort in pre-commit
         ungrouped-imports,  # since we have isort in pre-commit
+        no-name-in-module,  # since we have flake8 to check this
 
 
 # Enable the message, report, category or checker with the given id(s). You can
 # Enable the message, report, category or checker with the given id(s). You can
 # either give multiple identifier separated by comma (,) or put this option
 # either give multiple identifier separated by comma (,) or put this option

+ 5 - 10
components/espcoredump/test/test_espcoredump.sh

@@ -11,26 +11,21 @@ else
   elf_dir=$1
   elf_dir=$1
 fi
 fi
 
 
-if ! command -v coverage &> /dev/null; then
-  echo "coverage could not be found, please install it ('pip install coverage')"
-  exit 1
-fi
-
 SUPPORTED_TARGETS=("esp32" "esp32s2" "esp32c3" "esp32s3" )
 SUPPORTED_TARGETS=("esp32" "esp32s2" "esp32c3" "esp32s3" )
 res=0
 res=0
-coverage erase
+python -m coverage erase
 for chip in "${SUPPORTED_TARGETS[@]}"; do
 for chip in "${SUPPORTED_TARGETS[@]}"; do
   {
   {
     echo "run b64 decoding tests on $chip"
     echo "run b64 decoding tests on $chip"
-    coverage run -a --source=corefile ../espcoredump.py --chip="$chip" --gdb-timeout-sec 5 info_corefile -m -t b64 -c "${chip}/coredump.b64" -s "${chip}/core.elf" "${elf_dir}/${chip}.elf" &>"${chip}/output" &&
+    python -m coverage run -a --source=corefile ../espcoredump.py --chip="$chip" --gdb-timeout-sec 5 info_corefile -m -t b64 -c "${chip}/coredump.b64" -s "${chip}/core.elf" "${elf_dir}/${chip}.elf" &>"${chip}/output" &&
       diff "${chip}/expected_output" "${chip}/output" &&
       diff "${chip}/expected_output" "${chip}/output" &&
-      coverage run -a --source=corefile ../espcoredump.py --chip="$chip" --gdb-timeout-sec 5 info_corefile -m -t elf -c "${chip}/core.elf" "${elf_dir}/${chip}.elf" &>"${chip}/output2" &&
+      python -m coverage run -a --source=corefile ../espcoredump.py --chip="$chip" --gdb-timeout-sec 5 info_corefile -m -t elf -c "${chip}/core.elf" "${elf_dir}/${chip}.elf" &>"${chip}/output2" &&
       diff "${chip}/expected_output" "${chip}/output2"
       diff "${chip}/expected_output" "${chip}/output2"
   } || {
   } || {
     echo 'The test for espcoredump has failed!'
     echo 'The test for espcoredump has failed!'
     res=1
     res=1
   }
   }
 done
 done
-coverage run -a --source=corefile ./test_espcoredump.py
-coverage report ../corefile/*.py ../espcoredump.py
+python -m coverage run -a --source=corefile ./test_espcoredump.py
+python -m coverage report ../corefile/*.py ../espcoredump.py
 exit $res
 exit $res

+ 6 - 13
components/lwip/test_afl_host/esp_netif_loopback_mock.c

@@ -1,17 +1,10 @@
-// Copyright 2020 Espressif Systems (Shanghai) CO LTD
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+/*
+ * SPDX-FileCopyrightText: 2020-2022 Espressif Systems (Shanghai) CO LTD
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
 
 
+#include <features.h>
 #include "esp_netif_lwip_internal.h"
 #include "esp_netif_lwip_internal.h"
 
 
 esp_err_t esp_netif_get_ip_info(esp_netif_t *esp_netif, esp_netif_ip_info_t *ip_info)
 esp_err_t esp_netif_get_ip_info(esp_netif_t *esp_netif, esp_netif_ip_info_t *ip_info)

+ 1 - 0
components/lwip/test_afl_host/no_warn_host.h

@@ -3,3 +3,4 @@
 #define __warning__ deprecated
 #define __warning__ deprecated
 #define IRAM_ATTR
 #define IRAM_ATTR
 #define __ESP_ATTR_H__
 #define __ESP_ATTR_H__
+#include <features.h>

+ 6 - 13
components/mdns/test_afl_fuzz_host/esp32_mock.h

@@ -1,16 +1,8 @@
-// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+/*
+ * SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
 #ifndef _ESP32_COMPAT_H_
 #ifndef _ESP32_COMPAT_H_
 #define _ESP32_COMPAT_H_
 #define _ESP32_COMPAT_H_
 
 
@@ -20,6 +12,7 @@
 #define _ESP_TASK_H_
 #define _ESP_TASK_H_
 
 
 #ifdef USE_BSD_STRING
 #ifdef USE_BSD_STRING
+#include <features.h>
 #include <bsd/string.h>
 #include <bsd/string.h>
 #endif
 #endif
 #include <stdint.h>
 #include <stdint.h>

+ 28 - 25
docs/en/api-guides/linker-script-generation.rst

@@ -24,16 +24,16 @@ This section presents a guide for quickly placing code/data to RAM and RTC memor
 
 
 For this guide, suppose we have the following::
 For this guide, suppose we have the following::
 
 
-    - components/
-                    - my_component/
-                                    - CMakeLists.txt
-                                    - component.mk
-                                    - Kconfig
-                                    - src/
-                                          - my_src1.c
-                                          - my_src2.c
-                                          - my_src3.c
-                                    - my_linker_fragment_file.lf
+    component
+    └── my_component
+        └── CMakeLists.txt
+            ├── component.mk
+            ├── Kconfig
+            ├── src/
+            │   ├── my_src1.c
+            │   ├── my_src2.c
+            │   └── my_src3.c
+            └── my_linker_fragment_file.lf
 
 
 - a component named ``my_component`` that is archived as library ``libmy_component.a`` during build
 - a component named ``my_component`` that is archived as library ``libmy_component.a`` during build
 - three source files archived under the library, ``my_src1.c``, ``my_src2.c`` and ``my_src3.c`` which are compiled as ``my_src1.o``, ``my_src2.o`` and ``my_src3.o``, respectively
 - three source files archived under the library, ``my_src1.c``, ``my_src2.c`` and ``my_src3.c`` which are compiled as ``my_src1.o``, ``my_src2.o`` and ``my_src3.o``, respectively
@@ -71,7 +71,7 @@ Placing object files
 """"""""""""""""""""
 """"""""""""""""""""
 
 
 Suppose the entirety of ``my_src1.o`` is performance-critical, so it is desirable to place it in RAM. On the other hand, the entirety of ``my_src2.o`` contains symbols needed coming out of deep sleep, so it needs to be put under RTC memory.
 Suppose the entirety of ``my_src1.o`` is performance-critical, so it is desirable to place it in RAM. On the other hand, the entirety of ``my_src2.o`` contains symbols needed coming out of deep sleep, so it needs to be put under RTC memory.
-In the the linker fragment file, we can write:
+In the linker fragment file, we can write:
 
 
 .. code-block:: none
 .. code-block:: none
 
 
@@ -125,6 +125,9 @@ Similarly, this places the entire component in RTC memory:
     entries:
     entries:
         * (rtc)
         * (rtc)
 
 
+
+.. _ldgen-conditional-placements:
+
 Configuration-dependent placements
 Configuration-dependent placements
 """"""""""""""""""""""""""""""""""
 """"""""""""""""""""""""""""""""""
 
 
@@ -224,6 +227,9 @@ The three fragment types share a common grammar:
 - type: Corresponds to the fragment type, can either be ``sections``, ``scheme`` or ``mapping``.
 - type: Corresponds to the fragment type, can either be ``sections``, ``scheme`` or ``mapping``.
 - name: The name of the fragment, should be unique for the specified fragment type.
 - name: The name of the fragment, should be unique for the specified fragment type.
 - key, value: Contents of the fragment; each fragment type may support different keys and different grammars for the key values.
 - key, value: Contents of the fragment; each fragment type may support different keys and different grammars for the key values.
+   
+    - For :ref:`sections<ldgen-sections-fragment>` and :ref:`scheme<ldgen-scheme-fragment>`, the only supported key is ``entries``
+    - For :ref:`mappings<ldgen-mapping-fragment>`, both ``archive`` and ``entries`` are supported.
 
 
 .. note::
 .. note::
 
 
@@ -286,24 +292,10 @@ Condition checking behaves as you would expect an ``if...elseif/elif...else`` bl
         key_2:
         key_2:
             value_b
             value_b
 
 
-
 **Comments**
 **Comments**
 
 
 Comment in linker fragment files begin with ``#``. Like in other languages, comment are used to provide helpful descriptions and documentation and are ignored during processing.
 Comment in linker fragment files begin with ``#``. Like in other languages, comment are used to provide helpful descriptions and documentation and are ignored during processing.
 
 
-Compatibility with ESP-IDF v3.x Linker Script Fragment Files
-""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
-
-ESP-IDF v4.0 brings some changes to the linker script fragment file grammar:
-
-- indentation is enforced and improperly indented fragment files generate a parse exception; this was not enforced in the old version but previous documentation and examples demonstrates properly indented grammar
-- move to ``if...elif...else`` structure for conditionals, with the ability to nest checks and place entire fragments themselves inside conditionals
-- mapping fragments now requires a name like other fragment types
-
-Linker script generator should be able to parse ESP-IDF v3.x linker fragment files that are indented properly (as demonstrated by the ESP-IDF v3.x version of this document). Backward compatibility with the previous mapping fragment grammar (optional name and the old grammar for conditionals) has also been retained but with a deprecation warning. Users should switch to the newer grammar discussed in this document as support for the old grammar is planned to be removed in the future.
-
-Note that linker fragment files using the new ESP-IDF v4.0 grammar is not supported on ESP-IDF v3.x, however.
-
 Types
 Types
 """""
 """""
 
 
@@ -608,3 +600,14 @@ Then the corresponding excerpt from the generated linker script will be as follo
     Rule generated from the default scheme entry 	``iram -> iram0_text``. Since the default scheme specifies an ``iram -> iram0_text`` entry, it too is placed wherever ``iram0_text`` is referenced by a marker. Since it is a rule generated from the default scheme, it comes first among all other rules collected under the same target name.
     Rule generated from the default scheme entry 	``iram -> iram0_text``. Since the default scheme specifies an ``iram -> iram0_text`` entry, it too is placed wherever ``iram0_text`` is referenced by a marker. Since it is a rule generated from the default scheme, it comes first among all other rules collected under the same target name.
 
 
     The linker script template currently used is :component_file:`esp_system/ld/{IDF_TARGET_PATH_NAME}/sections.ld.in`; the generated output script ``sections.ld`` is put under its build directory.
     The linker script template currently used is :component_file:`esp_system/ld/{IDF_TARGET_PATH_NAME}/sections.ld.in`; the generated output script ``sections.ld`` is put under its build directory.
+
+.. _ldgen-migrate-lf-grammar :
+
+Migrate to ESP-IDF v5.0 Linker Script Fragment Files Grammar
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The old grammar supported in ESP-IDF v3.x would be dropped in ESP-IDF v5.0. Here are a few notes on how to migrate properly:
+
+1. Now indentation is enforced and improperly indented fragment files would generate a runtime parse exception. This was not enforced in the old version but previous documentation and examples demonstrate properly indented grammar.
+2. Migrate the old condition entry to the ``if...elif...else`` structure for conditionals. You can refer to the :ref:`earlier chapter<ldgen-conditional-placements>` for detailed grammar.
+3. mapping fragments now requires a name like other fragment types.

+ 8 - 0
docs/en/migration-guides/build-system.rst

@@ -1,4 +1,12 @@
 Migrate Build System to ESP-IDF 5.0
 Migrate Build System to ESP-IDF 5.0
 ===================================
 ===================================
 
 
+Migrating from make to cmake
+----------------------------
+
 Please follow the :ref:`build system <migrating_from_make>` guide for migrating make-based projects no longer supported in ESP-IDF v5.0.
 Please follow the :ref:`build system <migrating_from_make>` guide for migrating make-based projects no longer supported in ESP-IDF v5.0.
+
+Update fragment file grammar
+----------------------------
+
+Please follow the :ref:`migrate linker script fragment files grammar<ldgen-migrate-lf-grammar>` chapter for migrating v3.x grammar to the new one.

+ 26 - 24
docs/zh_CN/api-guides/linker-script-generation.rst

@@ -24,16 +24,16 @@
 
 
 假设用户有::
 假设用户有::
 
 
-    - components/
-                    - my_component/
-                                    - CMakeLists.txt
-                                    - component.mk
-                                    - Kconfig
-                                    - src/
-                                          - my_src1.c
-                                          - my_src2.c
-                                          - my_src3.c
-                                    - my_linker_fragment_file.lf
+    component
+    └── my_component
+        └── CMakeLists.txt
+            ├── component.mk
+            ├── Kconfig
+            ├── src/
+            │   ├── my_src1.c
+            │   ├── my_src2.c
+            │   └── my_src3.c
+            └── my_linker_fragment_file.lf
 
 
 - 名为 ``my_component`` 的组件,在构建过程中存储为 ``libmy_component.a`` 库文件
 - 名为 ``my_component`` 的组件,在构建过程中存储为 ``libmy_component.a`` 库文件
 - 库文件包含的三个源文件:``my_src1.c``、``my_src2.c`` 和 ``my_src3.c``,编译后分别为 ``my_src1.o``、``my_src2.o`` 和 ``my_src3.o``
 - 库文件包含的三个源文件:``my_src1.c``、``my_src2.c`` 和 ``my_src3.c``,编译后分别为 ``my_src1.o``、``my_src2.o`` 和 ``my_src3.o``
@@ -125,6 +125,8 @@
     entries:
     entries:
         * (rtc)
         * (rtc)
 
 
+.. _ldgen-conditional-placements :
+
 根据具体配置存放
 根据具体配置存放
 """"""""""""""""""""
 """"""""""""""""""""
 
 
@@ -225,6 +227,9 @@
 - 名称:片段名称,指定片段类型的片段名称应唯一。
 - 名称:片段名称,指定片段类型的片段名称应唯一。
 - 键值:片段内容。每个片段类型可支持不同的键值和不同的键值语法。
 - 键值:片段内容。每个片段类型可支持不同的键值和不同的键值语法。
 
 
+    - 在 ``段`` 和 ``协议`` 中,仅支持 ``entries`` 键。
+    - 在 ``映射`` 中,键支持 ``archive`` 和 ``entries``。
+
 .. note::
 .. note::
 
 
     多个片段的类型和名称相同时会引发异常。
     多个片段的类型和名称相同时会引发异常。
@@ -286,24 +291,10 @@
         key_2:
         key_2:
             value_b
             value_b
 
 
-
 **注释**
 **注释**
 
 
 链接器片段文件中的注释以 ``#`` 开头。和在其他语言中一样,注释提供了有用的描述和资料,在处理过程中会被忽略。
 链接器片段文件中的注释以 ``#`` 开头。和在其他语言中一样,注释提供了有用的描述和资料,在处理过程中会被忽略。
 
 
-与 ESP-IDF v3.x 链接器脚本片段文件兼容
-""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
-
-ESP-IDF v4.0 变更了链接器脚本片段文件使用的一些语法:
-
-- 必须缩进,缩进不当的文件会产生解析异常;旧版本不强制缩进,但之前的文档和示例均遵循了正确的缩进语法
-- 条件改用 ``if...elif...else`` 结构,可以嵌套检查,将完整片段置于条件内
-- 映射片段和其他片段类型一样,需有名称
-
-链接器脚本生成器可解析 ESP-IDF v3.x 版本中缩进正确的链接器片段文件(如 ESP-IDF v3.x 版本中的本文件所示),依然可以向后兼容此前的映射片段语法(可选名称和条件的旧语法),但是会有弃用警告。用户应换成本文档介绍的新语法,因为旧语法将在未来停用。
-
-请注意,ESP-IDF v3.x 不支持使用 ESP-IDF v4.0 新语法的链接器片段文件。
-
 类型
 类型
 """""""
 """""""
 
 
@@ -608,3 +599,14 @@ ESP-IDF v4.0 变更了链接器脚本片段文件使用的一些语法:
     这是根据默认协议条目 ``iram -> iram0_text`` 生成的规则。默认协议指定了 ``iram -> iram0_text`` 条目,因此生成的规则同样也放在被 ``iram0_text`` 标记的地方。由于该规则是根据默认协议生成的,因此在同一目标下收集的所有规则下排在第一位。
     这是根据默认协议条目 ``iram -> iram0_text`` 生成的规则。默认协议指定了 ``iram -> iram0_text`` 条目,因此生成的规则同样也放在被 ``iram0_text`` 标记的地方。由于该规则是根据默认协议生成的,因此在同一目标下收集的所有规则下排在第一位。
 
 
     目前使用的链接器脚本模板是 :component_file:`esp_system/ld/{IDF_TARGET_PATH_NAME}/sections.ld.in`,生成的脚本存放在构建目录下。
     目前使用的链接器脚本模板是 :component_file:`esp_system/ld/{IDF_TARGET_PATH_NAME}/sections.ld.in`,生成的脚本存放在构建目录下。
+
+.. _ldgen-migrate-lf-grammar :
+
+将链接器脚本片段文件语法迁移至 ESP-IDF v5.0 适应版本
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ESP-IDF v5.0 中将不再支持 ESP-IDF v3.x 中链接器脚本片段文件的旧式语法。在迁移的过程中需注意以下几点:
+
+- 必须缩进,缩进不当的文件会产生解析异常;旧版本不强制缩进,但之前的文档和示例均遵循了正确的缩进语法
+- 条件改用 ``if...elif...else`` 结构,可以参照 :ref:`之前的章节<ldgen-conditional-placements>`
+- 映射片段和其他片段类型一样,需有名称

+ 1 - 1
requirements.txt

@@ -15,7 +15,7 @@ cryptography>=2.1.4
 # We do have cryptography binary on https://dl.espressif.com/pypi for ARM
 # We do have cryptography binary on https://dl.espressif.com/pypi for ARM
 # On https://pypi.org/ are no ARM binaries as standard now
 # On https://pypi.org/ are no ARM binaries as standard now
 
 
-pyparsing>=2.0.3,<2.4.0
+pyparsing>=3.0.3  # https://github.com/pyparsing/pyparsing/issues/319 is fixed in 3.0.3
 pyelftools>=0.22
 pyelftools>=0.22
 idf-component-manager>=0.2.99-beta
 idf-component-manager>=0.2.99-beta
 
 

+ 2 - 1
tools/ci/check_build_warnings.py

@@ -35,7 +35,8 @@ IGNORE_WARNS = [
         r'changes choice state',
         r'changes choice state',
         r'crosstool_version_check\.cmake',
         r'crosstool_version_check\.cmake',
         r'CryptographyDeprecationWarning',
         r'CryptographyDeprecationWarning',
-        r'Warning: \d+/\d+ app partitions are too small for binary'
+        r'Warning: \d+/\d+ app partitions are too small for binary',
+        r'CMake Deprecation Warning at main/lib/tinyxml2/CMakeLists\.txt:11 \(cmake_policy\)',
     ]
     ]
 ]
 ]
 
 

+ 0 - 4
tools/ci/check_copyright_ignore.txt

@@ -1243,7 +1243,6 @@ components/lwip/test_afl_host/dhcp_di.h
 components/lwip/test_afl_host/dhcpserver_di.h
 components/lwip/test_afl_host/dhcpserver_di.h
 components/lwip/test_afl_host/dns_di.h
 components/lwip/test_afl_host/dns_di.h
 components/lwip/test_afl_host/esp_attr.h
 components/lwip/test_afl_host/esp_attr.h
-components/lwip/test_afl_host/esp_netif_loopback_mock.c
 components/lwip/test_afl_host/network_mock.c
 components/lwip/test_afl_host/network_mock.c
 components/lwip/test_afl_host/no_warn_host.h
 components/lwip/test_afl_host/no_warn_host.h
 components/lwip/test_afl_host/test_dhcp_client.c
 components/lwip/test_afl_host/test_dhcp_client.c
@@ -1350,7 +1349,6 @@ components/mdns/mdns_networking_lwip.c
 components/mdns/private_include/mdns_networking.h
 components/mdns/private_include/mdns_networking.h
 components/mdns/test/test_mdns.c
 components/mdns/test/test_mdns.c
 components/mdns/test_afl_fuzz_host/esp32_mock.c
 components/mdns/test_afl_fuzz_host/esp32_mock.c
-components/mdns/test_afl_fuzz_host/esp32_mock.h
 components/mdns/test_afl_fuzz_host/esp_attr.h
 components/mdns/test_afl_fuzz_host/esp_attr.h
 components/mdns/test_afl_fuzz_host/esp_netif_mock.c
 components/mdns/test_afl_fuzz_host/esp_netif_mock.c
 components/mdns/test_afl_fuzz_host/mdns_di.h
 components/mdns/test_afl_fuzz_host/mdns_di.h
@@ -3028,8 +3026,6 @@ tools/ldgen/output_commands.py
 tools/ldgen/samples/template.ld
 tools/ldgen/samples/template.ld
 tools/ldgen/sdkconfig.py
 tools/ldgen/sdkconfig.py
 tools/ldgen/test/data/linker_script.ld
 tools/ldgen/test/data/linker_script.ld
-tools/ldgen/test/test_entity.py
-tools/ldgen/test/test_output_commands.py
 tools/mass_mfg/mfg_gen.py
 tools/mass_mfg/mfg_gen.py
 tools/mkdfu.py
 tools/mkdfu.py
 tools/mkuf2.py
 tools/mkuf2.py

+ 6 - 6
tools/ci/mypy_ignore_list.txt

@@ -233,13 +233,13 @@ tools/kconfig_new/test/confgen/test_confgen.py
 tools/kconfig_new/test/confserver/test_confserver.py
 tools/kconfig_new/test/confserver/test_confserver.py
 tools/kconfig_new/test/gen_kconfig_doc/test_kconfig_out.py
 tools/kconfig_new/test/gen_kconfig_doc/test_kconfig_out.py
 tools/kconfig_new/test/gen_kconfig_doc/test_target_visibility.py
 tools/kconfig_new/test/gen_kconfig_doc/test_target_visibility.py
-tools/ldgen/fragments.py
-tools/ldgen/generation.py
 tools/ldgen/ldgen.py
 tools/ldgen/ldgen.py
-tools/ldgen/ldgen_common.py
-tools/ldgen/linker_script.py
-tools/ldgen/output_commands.py
-tools/ldgen/sdkconfig.py
+tools/ldgen/ldgen/entity.py
+tools/ldgen/ldgen/fragments.py
+tools/ldgen/ldgen/generation.py
+tools/ldgen/ldgen/linker_script.py
+tools/ldgen/ldgen/output_commands.py
+tools/ldgen/ldgen/sdkconfig.py
 tools/ldgen/test/test_entity.py
 tools/ldgen/test/test_entity.py
 tools/ldgen/test/test_fragments.py
 tools/ldgen/test/test_fragments.py
 tools/ldgen/test/test_generation.py
 tools/ldgen/test/test_generation.py

+ 4 - 4
tools/esp_app_trace/test/logtrace/test.sh

@@ -1,8 +1,8 @@
 #!/usr/bin/env bash
 #!/usr/bin/env bash
 
 
-{ coverage debug sys \
-    && coverage erase &> output \
-    && coverage run -a $IDF_PATH/tools/esp_app_trace/logtrace_proc.py adc_log.trc test.elf &>> output \
+{ python -m coverage debug sys \
+    && python -m coverage erase &> output \
+    && python -m coverage run -a $IDF_PATH/tools/esp_app_trace/logtrace_proc.py adc_log.trc test.elf &>> output \
     && diff output expected_output \
     && diff output expected_output \
-    && coverage report \
+    && python -m coverage report \
 ; } || { echo 'The test for logtrace_proc has failed. Please examine the artifacts.' ; exit 1; }
 ; } || { echo 'The test for logtrace_proc has failed. Please examine the artifacts.' ; exit 1; }

+ 16 - 16
tools/esp_app_trace/test/sysview/test.sh

@@ -1,29 +1,29 @@
 #!/usr/bin/env bash
 #!/usr/bin/env bash
 
 
-{ coverage debug sys \
-    && coverage erase &> output \
-    && coverage run -a $IDF_PATH/tools/esp_app_trace/sysviewtrace_proc.py -d -p -b test.elf cpu0.svdat cpu1.svdat &>> output \
+{ python -m coverage debug sys \
+    && python -m coverage erase &> output \
+    && python -m coverage run -a $IDF_PATH/tools/esp_app_trace/sysviewtrace_proc.py -d -p -b test.elf cpu0.svdat cpu1.svdat &>> output \
     && diff output expected_output \
     && diff output expected_output \
-    && coverage report \
+    && python -m coverage report \
 ; } || { echo 'The test for sysviewtrace_proc has failed. Please examine the artifacts.' ; exit 1; }
 ; } || { echo 'The test for sysviewtrace_proc has failed. Please examine the artifacts.' ; exit 1; }
 
 
-{ coverage debug sys \
-    && coverage erase &> output.json \
-    && coverage run -a $IDF_PATH/tools/esp_app_trace/sysviewtrace_proc.py -j -b test.elf cpu0.svdat cpu1.svdat &>> output.json \
+{ python -m coverage debug sys \
+    && python -m coverage erase &> output.json \
+    && python -m coverage run -a $IDF_PATH/tools/esp_app_trace/sysviewtrace_proc.py -j -b test.elf cpu0.svdat cpu1.svdat &>> output.json \
     && diff output.json expected_output.json \
     && diff output.json expected_output.json \
-    && coverage report \
+    && python -m coverage report \
 ; } || { echo 'The test for sysviewtrace_proc JSON functionality has failed. Please examine the artifacts.' ; exit 1; }
 ; } || { echo 'The test for sysviewtrace_proc JSON functionality has failed. Please examine the artifacts.' ; exit 1; }
 
 
-{ coverage debug sys \
-    && coverage erase &> output \
-    && coverage run -a $IDF_PATH/tools/esp_app_trace/sysviewtrace_proc.py -d -p -b sysview_tracing_heap_log.elf heap_log_mcore.svdat &>> output \
+{ python -m coverage debug sys \
+    && python -m coverage erase &> output \
+    && python -m coverage run -a $IDF_PATH/tools/esp_app_trace/sysviewtrace_proc.py -d -p -b sysview_tracing_heap_log.elf heap_log_mcore.svdat &>> output \
     && diff output expected_output_mcore \
     && diff output expected_output_mcore \
-    && coverage report \
+    && python -m coverage report \
 ; } || { echo 'The test for mcore sysviewtrace_proc functionality has failed. Please examine the artifacts.' ; exit 1; }
 ; } || { echo 'The test for mcore sysviewtrace_proc functionality has failed. Please examine the artifacts.' ; exit 1; }
 
 
-{ coverage debug sys \
-    && coverage erase &> output.json \
-    && coverage run -a $IDF_PATH/tools/esp_app_trace/sysviewtrace_proc.py -j -b sysview_tracing_heap_log.elf heap_log_mcore.svdat &>> output.json \
+{ python -m coverage debug sys \
+    && python -m coverage erase &> output.json \
+    && python -m coverage run -a $IDF_PATH/tools/esp_app_trace/sysviewtrace_proc.py -j -b sysview_tracing_heap_log.elf heap_log_mcore.svdat &>> output.json \
     && diff output.json expected_output_mcore.json \
     && diff output.json expected_output_mcore.json \
-    && coverage report \
+    && python -m coverage report \
 ; } || { echo 'The test for mcore sysviewtrace_proc JSON functionality has failed. Please examine the artifacts.' ; exit 1; }
 ; } || { echo 'The test for mcore sysviewtrace_proc JSON functionality has failed. Please examine the artifacts.' ; exit 1; }

+ 0 - 607
tools/ldgen/fragments.py

@@ -1,607 +0,0 @@
-#
-# SPDX-FileCopyrightText: 2021 Espressif Systems (Shanghai) CO LTD
-# SPDX-License-Identifier: Apache-2.0
-#
-import abc
-import os
-import re
-from collections import namedtuple
-from enum import Enum
-
-from entity import Entity
-from pyparsing import (Combine, Forward, Group, Keyword, Literal, OneOrMore, Optional, Or, ParseFatalException,
-                       Suppress, Word, ZeroOrMore, alphanums, alphas, delimitedList, indentedBlock, nums,
-                       originalTextFor, restOfLine)
-from sdkconfig import SDKConfig
-
-
-class FragmentFile():
-    """
-    Processes a fragment file and stores all parsed fragments. For
-    more information on how this class interacts with classes for the different fragment types,
-    see description of Fragment.
-    """
-
-    def __init__(self, fragment_file, sdkconfig):
-        try:
-            fragment_file = open(fragment_file, 'r')
-        except TypeError:
-            pass
-
-        path = os.path.realpath(fragment_file.name)
-
-        indent_stack = [1]
-
-        class parse_ctx:
-            fragment = None  # current fragment
-            key = ''  # current key
-            keys = list()  # list of keys parsed
-            key_grammar = None  # current key grammar
-
-            @staticmethod
-            def reset():
-                parse_ctx.fragment_instance = None
-                parse_ctx.key = ''
-                parse_ctx.keys = list()
-                parse_ctx.key_grammar = None
-
-        def fragment_type_parse_action(toks):
-            parse_ctx.reset()
-            parse_ctx.fragment = FRAGMENT_TYPES[toks[0]]()  # create instance of the fragment
-            return None
-
-        def expand_conditionals(toks, stmts):
-            try:
-                stmt = toks['value']
-                stmts.append(stmt)
-            except KeyError:
-                try:
-                    conditions = toks['conditional']
-                    for condition in conditions:
-                        try:
-                            _toks = condition[1]
-                            _cond = condition[0]
-                            if sdkconfig.evaluate_expression(_cond):
-                                expand_conditionals(_toks, stmts)
-                                break
-                        except IndexError:
-                            expand_conditionals(condition[0], stmts)
-                except KeyError:
-                    for tok in toks:
-                        expand_conditionals(tok, stmts)
-
-        def key_body_parsed(pstr, loc, toks):
-            stmts = list()
-            expand_conditionals(toks, stmts)
-
-            if parse_ctx.key_grammar.min and len(stmts) < parse_ctx.key_grammar.min:
-                raise ParseFatalException(pstr, loc, "fragment requires at least %d values for key '%s'" %
-                                          (parse_ctx.key_grammar.min, parse_ctx.key))
-
-            if parse_ctx.key_grammar.max and len(stmts) > parse_ctx.key_grammar.max:
-                raise ParseFatalException(pstr, loc, "fragment requires at most %d values for key '%s'" %
-                                          (parse_ctx.key_grammar.max, parse_ctx.key))
-
-            try:
-                parse_ctx.fragment.set_key_value(parse_ctx.key, stmts)
-            except Exception as e:
-                raise ParseFatalException(pstr, loc, "unable to add key '%s'; %s" % (parse_ctx.key, str(e)))
-            return None
-
-        key = Word(alphanums + '_') + Suppress(':')
-        key_stmt = Forward()
-
-        condition_block = indentedBlock(key_stmt, indent_stack)
-        key_stmts = OneOrMore(condition_block)
-        key_body = Suppress(key) + key_stmts
-        key_body.setParseAction(key_body_parsed)
-
-        condition = originalTextFor(SDKConfig.get_expression_grammar()).setResultsName('condition')
-        if_condition = Group(Suppress('if') + condition + Suppress(':') + condition_block)
-        elif_condition = Group(Suppress('elif') + condition + Suppress(':') + condition_block)
-        else_condition = Group(Suppress('else') + Suppress(':') + condition_block)
-        conditional = (if_condition + Optional(OneOrMore(elif_condition)) + Optional(else_condition)).setResultsName('conditional')
-
-        def key_parse_action(pstr, loc, toks):
-            key = toks[0]
-
-            if key in parse_ctx.keys:
-                raise ParseFatalException(pstr, loc, "duplicate key '%s' value definition" % parse_ctx.key)
-
-            parse_ctx.key = key
-            parse_ctx.keys.append(key)
-
-            try:
-                parse_ctx.key_grammar = parse_ctx.fragment.get_key_grammars()[key]
-                key_grammar = parse_ctx.key_grammar.grammar
-            except KeyError:
-                raise ParseFatalException(pstr, loc, "key '%s' is not supported by fragment" % key)
-            except Exception as e:
-                raise ParseFatalException(pstr, loc, "unable to parse key '%s'; %s" % (key, str(e)))
-
-            key_stmt << (conditional | Group(key_grammar).setResultsName('value'))
-
-            return None
-
-        def name_parse_action(pstr, loc, toks):
-            parse_ctx.fragment.name = toks[0]
-
-        key.setParseAction(key_parse_action)
-
-        ftype = Word(alphas).setParseAction(fragment_type_parse_action)
-        fid = Suppress(':') + Word(alphanums + '_.').setResultsName('name')
-        fid.setParseAction(name_parse_action)
-        header = Suppress('[') + ftype + fid + Suppress(']')
-
-        def fragment_parse_action(pstr, loc, toks):
-            key_grammars = parse_ctx.fragment.get_key_grammars()
-            required_keys = set([k for (k,v) in key_grammars.items() if v.required])
-            present_keys = required_keys.intersection(set(parse_ctx.keys))
-            if present_keys != required_keys:
-                raise ParseFatalException(pstr, loc, 'required keys %s for fragment not found' %
-                                          list(required_keys - present_keys))
-            return parse_ctx.fragment
-
-        fragment_stmt = Forward()
-        fragment_block = indentedBlock(fragment_stmt, indent_stack)
-
-        fragment_if_condition = Group(Suppress('if') + condition + Suppress(':') + fragment_block)
-        fragment_elif_condition = Group(Suppress('elif') + condition + Suppress(':') + fragment_block)
-        fragment_else_condition = Group(Suppress('else') + Suppress(':') + fragment_block)
-        fragment_conditional = (fragment_if_condition + Optional(OneOrMore(fragment_elif_condition)) +
-                                Optional(fragment_else_condition)).setResultsName('conditional')
-
-        fragment = (header + OneOrMore(indentedBlock(key_body, indent_stack, False))).setResultsName('value')
-        fragment.setParseAction(fragment_parse_action)
-        fragment.ignore('#' + restOfLine)
-
-        deprecated_mapping = DeprecatedMapping.get_fragment_grammar(sdkconfig, fragment_file.name).setResultsName('value')
-
-        fragment_stmt << (Group(deprecated_mapping) | Group(fragment) | Group(fragment_conditional))
-
-        def fragment_stmt_parsed(pstr, loc, toks):
-            stmts = list()
-            expand_conditionals(toks, stmts)
-            return stmts
-
-        parser = ZeroOrMore(fragment_stmt)
-        parser.setParseAction(fragment_stmt_parsed)
-
-        self.fragments = parser.parseFile(fragment_file, parseAll=True)
-
-        for fragment in self.fragments:
-            fragment.path = path
-
-
-class Fragment():
-    """
-    Base class for a fragment that can be parsed from a fragment file. All fragments
-    share the common grammar:
-
-    [type:name]
-    key1:value1
-    key2:value2
-    ...
-
-    Supporting a new fragment type means deriving a concrete class which specifies
-    key-value pairs that the fragment supports and what to do with the parsed key-value pairs.
-
-    The new fragment must also be appended to FRAGMENT_TYPES, specifying the
-    keyword for the type and the derived class.
-
-    The key of the key-value pair is a simple keyword string. Other parameters
-    that describe the key-value pair is specified in Fragment.KeyValue:
-        1. grammar - pyparsing grammar to parse the value of key-value pair
-        2. min - the minimum number of value in the key entry, None means no minimum
-        3. max - the maximum number of value in the key entry, None means no maximum
-        4. required - if the key-value pair is required in the fragment
-
-    Setting min=max=1 means that the key has a single value.
-
-    FragmentFile provides conditional expression evaluation, enforcing
-    the parameters for Fragment.Keyvalue.
-    """
-    __metaclass__ = abc.ABCMeta
-
-    KeyValue = namedtuple('KeyValue', 'grammar min max required')
-
-    IDENTIFIER = Word(alphas + '_', alphanums + '_')
-    ENTITY = Word(alphanums + '.-_$+')
-
-    @abc.abstractmethod
-    def set_key_value(self, key, parse_results):
-        pass
-
-    @abc.abstractmethod
-    def get_key_grammars(self):
-        pass
-
-
-class Sections(Fragment):
-    """
-    Fragment which contains list of input sections.
-
-    [sections:<name>]
-    entries:
-        .section1
-        .section2
-        ...
-    """
-
-    # Unless quoted, symbol names start with a letter, underscore, or point
-    # and may include any letters, underscores, digits, points, and hyphens.
-    GNU_LD_SYMBOLS = Word(alphas + '_.', alphanums + '._-')
-
-    entries_grammar = Combine(GNU_LD_SYMBOLS + Optional('+'))
-
-    grammars = {
-        'entries': Fragment.KeyValue(entries_grammar.setResultsName('section'), 1, None, True)
-    }
-
-    """
-    Utility function that returns a list of sections given a sections fragment entry,
-    with the '+' notation and symbol concatenation handled automatically.
-    """
-    @staticmethod
-    def get_section_data_from_entry(sections_entry, symbol=None):
-        if not symbol:
-            sections = list()
-            sections.append(sections_entry.replace('+', ''))
-            sections.append(sections_entry.replace('+', '.*'))
-            return sections
-        else:
-            if sections_entry.endswith('+'):
-                section = sections_entry.replace('+', '.*')
-                expansion = section.replace('.*', '.' + symbol)
-                return (section, expansion)
-            else:
-                return (sections_entry, None)
-
-    def set_key_value(self, key, parse_results):
-        if key == 'entries':
-            self.entries = set()
-            for result in parse_results:
-                self.entries.add(result['section'])
-
-    def get_key_grammars(self):
-        return self.__class__.grammars
-
-
-class Scheme(Fragment):
-    """
-    Fragment which defines where the input sections defined in a Sections fragment
-    is going to end up, the target. The targets are markers in a linker script template
-    (see LinkerScript in linker_script.py).
-
-    [scheme:<name>]
-    entries:
-        sections1 -> target1
-        ...
-    """
-
-    grammars = {
-        'entries': Fragment.KeyValue(Fragment.IDENTIFIER.setResultsName('sections') + Suppress('->') +
-                                     Fragment.IDENTIFIER.setResultsName('target'), 1, None, True)
-    }
-
-    def set_key_value(self, key, parse_results):
-        if key == 'entries':
-            self.entries = set()
-            for result in parse_results:
-                self.entries.add((result['sections'], result['target']))
-
-    def get_key_grammars(self):
-        return self.__class__.grammars
-
-
-class Mapping(Fragment):
-    """
-    Fragment which attaches a scheme to entities (see Entity in entity.py), specifying where the input
-    sections of the entity will end up.
-
-    [mapping:<name>]
-    archive: lib1.a
-    entries:
-        obj1:symbol1 (scheme1); section1 -> target1 KEEP SURROUND(sym1) ...
-        obj2 (scheme2)
-        ...
-
-    Ultimately, an `entity (scheme)` entry generates an
-    input section description (see https://sourceware.org/binutils/docs/ld/Input-Section.html)
-    in the output linker script. It is possible to attach 'flags' to the
-    `entity (scheme)` to generate different output commands or to
-    emit additional keywords in the generated input section description. The
-    input section description, as well as other output commands, is defined in
-    output_commands.py.
-    """
-
-    class Flag():
-        PRE_POST = (Optional(Suppress(',') + Suppress('pre').setParseAction(lambda: True).setResultsName('pre')) +
-                    Optional(Suppress(',') + Suppress('post').setParseAction(lambda: True).setResultsName('post')))
-
-    class Surround(Flag):
-        def __init__(self, symbol):
-            self.symbol = symbol
-            self.pre = True
-            self.post = True
-
-        @staticmethod
-        def get_grammar():
-            # SURROUND(symbol)
-            #
-            # '__symbol_start', '__symbol_end' is generated before and after
-            # the corresponding input section description, respectively.
-            grammar = (Keyword('SURROUND').suppress() +
-                       Suppress('(') +
-                       Fragment.IDENTIFIER.setResultsName('symbol') +
-                       Suppress(')'))
-
-            grammar.setParseAction(lambda tok: Mapping.Surround(tok.symbol))
-            return grammar
-
-        def __eq__(self, other):
-            return (isinstance(other, Mapping.Surround) and
-                    self.symbol == other.symbol)
-
-    class Align(Flag):
-
-        def __init__(self, alignment, pre=True, post=False):
-            self.alignment = alignment
-            self.pre = pre
-            self.post = post
-
-        @staticmethod
-        def get_grammar():
-            # ALIGN(alignment, [, pre, post]).
-            #
-            # Generates alignment command before and/or after the corresponding
-            # input section description, depending whether pre, post or
-            # both are specified.
-            grammar = (Keyword('ALIGN').suppress() +
-                       Suppress('(') +
-                       Word(nums).setResultsName('alignment') +
-                       Mapping.Flag.PRE_POST +
-                       Suppress(')'))
-
-            def on_parse(tok):
-                alignment = int(tok.alignment)
-                if tok.pre == '' and tok.post == '':
-                    res = Mapping.Align(alignment)
-                elif tok.pre != '' and tok.post == '':
-                    res = Mapping.Align(alignment, tok.pre)
-                elif tok.pre == '' and tok.post != '':
-                    res = Mapping.Align(alignment, False, tok.post)
-                else:
-                    res = Mapping.Align(alignment, tok.pre, tok.post)
-                return res
-
-            grammar.setParseAction(on_parse)
-            return grammar
-
-        def __eq__(self, other):
-            return (isinstance(other, Mapping.Align) and
-                    self.alignment == other.alignment and
-                    self.pre == other.pre and
-                    self.post == other.post)
-
-    class Keep(Flag):
-
-        def __init__(self):
-            pass
-
-        @staticmethod
-        def get_grammar():
-            # KEEP()
-            #
-            # Surrounds input section description with KEEP command.
-            grammar = Keyword('KEEP()').setParseAction(Mapping.Keep)
-            return grammar
-
-        def __eq__(self, other):
-            return isinstance(other, Mapping.Keep)
-
-    class Sort(Flag):
-        class Type(Enum):
-            NAME = 0
-            ALIGNMENT = 1
-            INIT_PRIORITY = 2
-
-        def __init__(self, first, second=None):
-            self.first = first
-            self.second = second
-
-        @staticmethod
-        def get_grammar():
-            # SORT([sort_by_first, sort_by_second])
-            #
-            # where sort_by_first, sort_by_second = {name, alignment, init_priority}
-            #
-            # Emits SORT_BY_NAME, SORT_BY_ALIGNMENT or SORT_BY_INIT_PRIORITY
-            # depending on arguments. Nested sort follows linker script rules.
-            keywords = Keyword('name') | Keyword('alignment') | Keyword('init_priority')
-            grammar = (Keyword('SORT').suppress() + Suppress('(') +
-                       keywords.setResultsName('first') +
-                       Optional(Suppress(',') + keywords.setResultsName('second')) + Suppress(')'))
-
-            grammar.setParseAction(lambda tok: Mapping.Sort(tok.first, tok.second if tok.second != '' else None))
-            return grammar
-
-        def __eq__(self, other):
-            return (isinstance(other, Mapping.Sort) and
-                    self.first == other.first and
-                    self.second == other.second)
-
-    def __init__(self):
-        Fragment.__init__(self)
-        self.entries = set()
-        # k = (obj, symbol, scheme)
-        # v = list((section, target), Mapping.Flag))
-        self.flags = dict()
-        self.deprecated = False
-
-    def set_key_value(self, key, parse_results):
-        if key == 'archive':
-            self.archive = parse_results[0]['archive']
-        elif key == 'entries':
-            for result in parse_results:
-                obj = None
-                symbol = None
-                scheme = None
-
-                obj = result['object']
-
-                try:
-                    symbol = result['symbol']
-                except KeyError:
-                    pass
-
-                scheme = result['scheme']
-
-                mapping = (obj, symbol, scheme)
-                self.entries.add(mapping)
-
-                try:
-                    parsed_flags = result['sections_target_flags']
-                except KeyError:
-                    parsed_flags = []
-
-                if parsed_flags:
-                    entry_flags = []
-                    for pf in parsed_flags:
-                        entry_flags.append((pf.sections, pf.target, list(pf.flags)))
-
-                    try:
-                        existing_flags = self.flags[mapping]
-                    except KeyError:
-                        existing_flags = list()
-                        self.flags[mapping] = existing_flags
-
-                    existing_flags.extend(entry_flags)
-
-    def get_key_grammars(self):
-        # There are three possible patterns for mapping entries:
-        #       obj:symbol (scheme)
-        #       obj (scheme)
-        #       * (scheme)
-        # Flags can be specified for section->target in the scheme specified, ex:
-        #       obj (scheme); section->target SURROUND(symbol), section2->target2 ALIGN(4)
-        obj = Fragment.ENTITY.setResultsName('object')
-        symbol = Suppress(':') + Fragment.IDENTIFIER.setResultsName('symbol')
-        scheme = Suppress('(') + Fragment.IDENTIFIER.setResultsName('scheme') + Suppress(')')
-
-        # The flags are specified for section->target in the scheme specified
-        sections_target = Scheme.grammars['entries'].grammar
-
-        flag = Or([f.get_grammar() for f in [Mapping.Keep, Mapping.Align, Mapping.Surround, Mapping.Sort]])
-
-        section_target_flags = Group(sections_target + Group(OneOrMore(flag)).setResultsName('flags'))
-
-        pattern1 = obj + symbol
-        pattern2 = obj
-        pattern3 = Literal(Entity.ALL).setResultsName('object')
-
-        entry = ((pattern1 | pattern2 | pattern3) + scheme +
-                 Optional(Suppress(';') + delimitedList(section_target_flags).setResultsName('sections_target_flags')))
-
-        grammars = {
-            'archive': Fragment.KeyValue(Or([Fragment.ENTITY, Word(Entity.ALL)]).setResultsName('archive'), 1, 1, True),
-            'entries': Fragment.KeyValue(entry, 0, None, True)
-        }
-
-        return grammars
-
-
-class DeprecatedMapping():
-    """
-    Mapping fragment with old grammar in versions older than ESP-IDF v4.0. Does not conform to
-    requirements of the Fragment class and thus is limited when it comes to conditional expression
-    evaluation.
-    """
-
-    # Name of the default condition entry
-    DEFAULT_CONDITION = 'default'
-
-    @staticmethod
-    def get_fragment_grammar(sdkconfig, fragment_file):
-
-        # Match header [mapping]
-        header = Suppress('[') + Suppress('mapping') + Suppress(']')
-
-        # There are three possible patterns for mapping entries:
-        #       obj:symbol (scheme)
-        #       obj (scheme)
-        #       * (scheme)
-        obj = Fragment.ENTITY.setResultsName('object')
-        symbol = Suppress(':') + Fragment.IDENTIFIER.setResultsName('symbol')
-        scheme = Suppress('(') + Fragment.IDENTIFIER.setResultsName('scheme') + Suppress(')')
-
-        pattern1 = Group(obj + symbol + scheme)
-        pattern2 = Group(obj + scheme)
-        pattern3 = Group(Literal(Entity.ALL).setResultsName('object') + scheme)
-
-        mapping_entry = pattern1 | pattern2 | pattern3
-
-        # To simplify parsing, classify groups of condition-mapping entry into two types: normal and default
-        # A normal grouping is one with a non-default condition. The default grouping is one which contains the
-        # default condition
-        mapping_entries = Group(ZeroOrMore(mapping_entry)).setResultsName('mappings')
-
-        normal_condition = Suppress(':') + originalTextFor(SDKConfig.get_expression_grammar())
-        default_condition = Optional(Suppress(':') + Literal(DeprecatedMapping.DEFAULT_CONDITION))
-
-        normal_group = Group(normal_condition.setResultsName('condition') + mapping_entries)
-        default_group = Group(default_condition + mapping_entries).setResultsName('default_group')
-
-        normal_groups = Group(ZeroOrMore(normal_group)).setResultsName('normal_groups')
-
-        # Any mapping fragment definition can have zero or more normal group and only one default group as a last entry.
-        archive = Suppress('archive') + Suppress(':') + Fragment.ENTITY.setResultsName('archive')
-        entries = Suppress('entries') + Suppress(':') + (normal_groups + default_group).setResultsName('entries')
-
-        mapping = Group(header + archive + entries)
-        mapping.ignore('#' + restOfLine)
-
-        def parsed_deprecated_mapping(pstr, loc, toks):
-            fragment = Mapping()
-            fragment.archive = toks[0].archive
-            fragment.name = re.sub(r'[^0-9a-zA-Z]+', '_', fragment.archive)
-            fragment.deprecated = True
-
-            fragment.entries = set()
-            condition_true = False
-            for entries in toks[0].entries[0]:
-                condition  = next(iter(entries.condition.asList())).strip()
-                condition_val = sdkconfig.evaluate_expression(condition)
-
-                if condition_val:
-                    for entry in entries[1]:
-                        fragment.entries.add((entry.object, None if entry.symbol == '' else entry.symbol, entry.scheme))
-                    condition_true = True
-                    break
-
-            if not fragment.entries and not condition_true:
-                try:
-                    entries = toks[0].entries[1][1]
-                except IndexError:
-                    entries = toks[0].entries[1][0]
-                for entry in entries:
-                    fragment.entries.add((entry.object, None if entry.symbol == '' else entry.symbol, entry.scheme))
-
-            if not fragment.entries:
-                fragment.entries.add(('*', None, 'default'))
-
-            dep_warning = str(ParseFatalException(pstr, loc,
-                              'Warning: Deprecated old-style mapping fragment parsed in file %s.' % fragment_file))
-
-            print(dep_warning)
-            return fragment
-
-        mapping.setParseAction(parsed_deprecated_mapping)
-        return mapping
-
-
-FRAGMENT_TYPES = {
-    'sections': Sections,
-    'scheme': Scheme,
-    'mapping': Mapping
-}

+ 9 - 9
tools/ldgen/ldgen.py

@@ -1,6 +1,6 @@
 #!/usr/bin/env python
 #!/usr/bin/env python
 #
 #
-# SPDX-FileCopyrightText: 2021 Espressif Systems (Shanghai) CO LTD
+# SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
 # SPDX-License-Identifier: Apache-2.0
 # SPDX-License-Identifier: Apache-2.0
 #
 #
 
 
@@ -13,13 +13,13 @@ import sys
 import tempfile
 import tempfile
 from io import StringIO
 from io import StringIO
 
 
-from entity import EntityDB
-from fragments import FragmentFile
-from generation import Generation
-from ldgen_common import LdGenFailure
-from linker_script import LinkerScript
+from ldgen.entity import EntityDB
+from ldgen.fragments import parse_fragment_file
+from ldgen.generation import Generation
+from ldgen.ldgen_common import LdGenFailure
+from ldgen.linker_script import LinkerScript
+from ldgen.sdkconfig import SDKConfig
 from pyparsing import ParseException, ParseFatalException
 from pyparsing import ParseException, ParseFatalException
-from sdkconfig import SDKConfig
 
 
 try:
 try:
     import confgen
     import confgen
@@ -148,12 +148,12 @@ def main():
 
 
         for fragment_file in fragment_files:
         for fragment_file in fragment_files:
             try:
             try:
-                fragment_file = FragmentFile(fragment_file, sdkconfig)
+                fragment_file = parse_fragment_file(fragment_file, sdkconfig)
             except (ParseException, ParseFatalException) as e:
             except (ParseException, ParseFatalException) as e:
                 # ParseException is raised on incorrect grammar
                 # ParseException is raised on incorrect grammar
                 # ParseFatalException is raised on correct grammar, but inconsistent contents (ex. duplicate
                 # ParseFatalException is raised on correct grammar, but inconsistent contents (ex. duplicate
                 # keys, key unsupported by fragment, unexpected number of values, etc.)
                 # keys, key unsupported by fragment, unexpected number of values, etc.)
-                raise LdGenFailure('failed to parse %s\n%s' % (fragment_file.name, str(e)))
+                raise LdGenFailure('failed to parse %s\n%s' % (fragment_file, str(e)))
             generation_model.add_fragments_from_file(fragment_file)
             generation_model.add_fragments_from_file(fragment_file)
 
 
         mapping_rules = generation_model.generate(sections_infos)
         mapping_rules = generation_model.generate(sections_infos)

+ 0 - 0
tools/ldgen/__init__.py → tools/ldgen/ldgen/__init__.py


+ 20 - 29
tools/ldgen/entity.py → tools/ldgen/ldgen/entity.py

@@ -1,17 +1,6 @@
 #
 #
-# Copyright 2021 Espressif Systems (Shanghai) CO LTD
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
+# SPDX-License-Identifier: Apache-2.0
 #
 #
 
 
 import collections
 import collections
@@ -21,11 +10,11 @@ from enum import Enum
 from functools import total_ordering
 from functools import total_ordering
 
 
 from pyparsing import (Group, Literal, OneOrMore, ParseException, SkipTo, Suppress, White, Word, ZeroOrMore, alphas,
 from pyparsing import (Group, Literal, OneOrMore, ParseException, SkipTo, Suppress, White, Word, ZeroOrMore, alphas,
-                       nums, restOfLine)
+                       nums, rest_of_line)
 
 
 
 
 @total_ordering
 @total_ordering
-class Entity():
+class Entity:
     """
     """
     An entity refers to a library, object, symbol whose input
     An entity refers to a library, object, symbol whose input
     sections can be placed or excluded from placement.
     sections can be placed or excluded from placement.
@@ -60,7 +49,7 @@ class Entity():
         else:
         else:
             raise ValueError("Invalid arguments '(%s, %s, %s)'" % (archive, obj, symbol))
             raise ValueError("Invalid arguments '(%s, %s, %s)'" % (archive, obj, symbol))
 
 
-        self.archive  = archive
+        self.archive = archive
         self.obj = obj
         self.obj = obj
         self.symbol = symbol
         self.symbol = symbol
 
 
@@ -93,7 +82,7 @@ class Entity():
         return '%s:%s %s' % self.__repr__()
         return '%s:%s %s' % self.__repr__()
 
 
     def __repr__(self):
     def __repr__(self):
-        return (self.archive, self.obj, self.symbol)
+        return self.archive, self.obj, self.symbol
 
 
     def __getitem__(self, spec):
     def __getitem__(self, spec):
         res = None
         res = None
@@ -108,7 +97,7 @@ class Entity():
         return res
         return res
 
 
 
 
-class EntityDB():
+class EntityDB:
     """
     """
     Collection of entities extracted from libraries known in the build.
     Collection of entities extracted from libraries known in the build.
     Allows retrieving a list of archives, a list of object files in an archive
     Allows retrieving a list of archives, a list of object files in an archive
@@ -127,11 +116,10 @@ class EntityDB():
         archive_path = (Literal('In archive').suppress() +
         archive_path = (Literal('In archive').suppress() +
                         White().suppress() +
                         White().suppress() +
                         # trim the colon and line ending characters from archive_path
                         # trim the colon and line ending characters from archive_path
-                        restOfLine.setResultsName('archive_path').setParseAction(lambda s, loc, toks: s.rstrip(':\n\r ')))
+                        rest_of_line.set_results_name('archive_path').set_parse_action(
+                            lambda s, loc, toks: s.rstrip(':\n\r ')))
         parser = archive_path
         parser = archive_path
 
 
-        results = None
-
         try:
         try:
             results = parser.parseString(first_line, parseAll=True)
             results = parser.parseString(first_line, parseAll=True)
         except ParseException as p:
         except ParseException as p:
@@ -142,7 +130,7 @@ class EntityDB():
 
 
     def _get_infos_from_file(self, info):
     def _get_infos_from_file(self, info):
         # {object}:  file format elf32-xtensa-le
         # {object}:  file format elf32-xtensa-le
-        object_line = SkipTo(':').setResultsName('object') + Suppress(restOfLine)
+        object_line = SkipTo(':').set_results_name('object') + Suppress(rest_of_line)
 
 
         # Sections:
         # Sections:
         # Idx Name ...
         # Idx Name ...
@@ -151,13 +139,14 @@ class EntityDB():
 
 
         # 00 {section} 0000000 ...
         # 00 {section} 0000000 ...
         #              CONTENTS, ALLOC, ....
         #              CONTENTS, ALLOC, ....
-        section_entry = Suppress(Word(nums)) + SkipTo(' ') + Suppress(restOfLine) + \
-            Suppress(ZeroOrMore(Word(alphas) + Literal(',')) + Word(alphas))
-
-        content = Group(object_line + section_start + section_header + Group(OneOrMore(section_entry)).setResultsName('sections'))
-        parser = Group(ZeroOrMore(content)).setResultsName('contents')
+        section_entry = (Suppress(Word(nums)) + SkipTo(' ') + Suppress(rest_of_line)
+                         + Suppress(ZeroOrMore(Word(alphas) + Literal(',')) + Word(alphas)))
 
 
-        results = None
+        content = Group(object_line
+                        + section_start
+                        + section_header
+                        + Group(OneOrMore(section_entry)).set_results_name('sections'))
+        parser = Group(ZeroOrMore(content)).set_results_name('contents')
 
 
         try:
         try:
             results = parser.parseString(info.content, parseAll=True)
             results = parser.parseString(info.content, parseAll=True)
@@ -192,7 +181,9 @@ class EntityDB():
 
 
     def _match_obj(self, archive, obj):
     def _match_obj(self, archive, obj):
         objs = self.get_objects(archive)
         objs = self.get_objects(archive)
-        match_objs = fnmatch.filter(objs, obj + '.o') + fnmatch.filter(objs, obj + '.*.obj') + fnmatch.filter(objs, obj + '.obj')
+        match_objs = (fnmatch.filter(objs, obj + '.o')
+                      + fnmatch.filter(objs, obj + '.*.obj')
+                      + fnmatch.filter(objs, obj + '.obj'))
 
 
         if len(match_objs) > 1:
         if len(match_objs) > 1:
             raise ValueError("Multiple matches for object: '%s: %s': %s" % (archive, obj, str(match_objs)))
             raise ValueError("Multiple matches for object: '%s: %s': %s" % (archive, obj, str(match_objs)))

+ 473 - 0
tools/ldgen/ldgen/fragments.py

@@ -0,0 +1,473 @@
+#
+# SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
+# SPDX-License-Identifier: Apache-2.0
+#
+
+from typing import Any, Dict, List, Optional, Set, Tuple, Union
+
+from pyparsing import (Combine, Forward, Group, IndentedBlock, Keyword, LineEnd, Literal, OneOrMore, Opt,
+                       ParseFatalException, SkipTo, Suppress, Word, ZeroOrMore, alphanums, alphas, delimited_list,
+                       nums, rest_of_line)
+
+
+class Empty:
+    """
+    Return `Empty()` when the sdkconfig does not meet the conditional statements.
+    """
+
+    def __repr__(self):
+        return '<EMPTY>'
+
+    def __bool__(self):
+        return False
+
+
+class Fragment:
+    """
+    Base class for a fragment that can be parsed from a fragment file.
+    """
+    IDENTIFIER = Word(alphas + '_', alphanums + '_')
+    ENTITY = Word(alphanums + '.-_$+')
+
+    def __init__(self, name: str, entries: Set[Union[str, Tuple[str]]]):
+        self.name = name
+        self.entries = entries
+
+    def __repr__(self):
+        return str(self.__dict__)
+
+
+class Sections(Fragment):
+    """
+    Fragment which contains list of input sections.
+
+    [sections:<name>]
+    entries:
+        .section1
+        .section2
+        ...
+    """
+    # Unless quoted, symbol names start with a letter, underscore, or point
+    # and may include any letters, underscores, digits, points, and hyphens.
+    ENTRY = Combine(Word(alphas + '_.', alphanums + '._-') + Opt('+')) + LineEnd().suppress()
+
+    @staticmethod
+    def parse_entry(toks):
+        # section
+        return toks[0]
+
+    @staticmethod
+    def parse(s, loc, toks):
+        this = toks[0]
+
+        name = this[0]
+        entries = {entry for entry in this[1] if entry}
+
+        if not entries:
+            raise ParseFatalException(s, loc, 'Sections entries shouldn\'t be empty')
+
+        return Sections(name, entries)
+
+    @staticmethod
+    def get_section_data_from_entry(sections_entry, symbol=None):
+        """
+        Returns a list of sections given a sections fragment entry,
+        with the '+' notation and symbol concatenation handled automatically.
+        """
+        if not symbol:
+            sections = list()
+            sections.append(sections_entry.replace('+', ''))
+            sections.append(sections_entry.replace('+', '.*'))
+            return sections
+        else:
+            if sections_entry.endswith('+'):
+                section = sections_entry.replace('+', '.*')
+                expansion = section.replace('.*', '.' + symbol)
+                return section, expansion
+            else:
+                return sections_entry, None
+
+
+class Scheme(Fragment):
+    """
+    Fragment which defines where the input sections defined in a Sections fragment
+    is going to end up, the target. The targets are markers in a linker script template
+    (see LinkerScript in linker_script.py).
+
+    [scheme:<name>]
+    entries:
+        sections1 -> target1
+        ...
+    """
+    ENTRY = Fragment.IDENTIFIER + Suppress('->') + Fragment.IDENTIFIER + LineEnd().suppress()
+
+    @staticmethod
+    def parse_entry(toks):
+        # section, target
+        return toks[0], toks[1]
+
+    @staticmethod
+    def parse(s, loc, toks):
+        this = toks[0]
+
+        name = this[0]
+        entries = {entry for entry in this[1] if entry}
+
+        if not entries:
+            raise ParseFatalException(s, loc, 'Scheme entries shouldn\'t be empty')
+
+        return Scheme(name, entries)
+
+
+class EntryFlag:
+    def __repr__(self):
+        return str(self.__dict__)
+
+
+class Surround(EntryFlag):
+    """
+    SURROUND(symbol)
+
+    '__symbol_start', '__symbol_end' is generated before and after
+    the corresponding input section description, respectively.
+    """
+    SURROUND = (Keyword('SURROUND').suppress()
+                + Suppress('(')
+                + Fragment.IDENTIFIER
+                + Suppress(')'))
+
+    def __init__(self, symbol: str):
+        self.symbol = symbol
+        self.pre = True
+        self.post = True
+
+    def __eq__(self, other):
+        if isinstance(other, Surround):
+            if self.symbol == other.symbol and self.pre == other.pre and self.post == other.post:
+                return True
+
+        return False
+
+    @staticmethod
+    def parse(toks):
+        return Surround(toks[0])
+
+
+class Align(EntryFlag):
+    """
+    ALIGN(alignment, [, pre, post]).
+
+    Generates alignment command before and/or after the corresponding
+    input section description, depending on whether pre, post or
+    both are specified.
+    """
+    PRE = Opt(Suppress(',') + Suppress('pre')).set_results_name('pre')
+    POST = Opt(Suppress(',') + Suppress('post')).set_results_name('post')
+
+    ALIGN = (Keyword('ALIGN').suppress()
+             + Suppress('(')
+             + Word(nums)
+             + PRE
+             + POST
+             + Suppress(')'))
+
+    def __init__(self, alignment, pre=True, post=False):
+        self.alignment = alignment
+        self.pre = pre
+        self.post = post
+
+    def __eq__(self, other):
+        if isinstance(other, Align):
+            if self.alignment == other.alignment and self.pre == other.pre and self.post == other.post:
+                return True
+
+        return False
+
+    @staticmethod
+    def parse(toks):
+        alignment = int(toks[0])
+        if toks.post == '':
+            return Align(alignment)
+
+        if toks.pre == '' and toks.post != '':
+            return Align(alignment, False, True)
+
+        return Align(alignment, True, True)
+
+
+class Keep(EntryFlag):
+    """
+    KEEP()
+
+    Surrounds input section description with KEEP command.
+    """
+    KEEP = Keyword('KEEP()')
+
+    def __eq__(self, other):
+        if isinstance(other, Keep):
+            return True
+
+        return False
+
+    @staticmethod
+    def parse():
+        return Keep()
+
+
+class Sort(EntryFlag):
+    """
+    SORT([sort_by_first, sort_by_second])
+
+    where sort_by_first, sort_by_second = {name, alignment, init_priority}
+
+    Emits SORT_BY_NAME, SORT_BY_ALIGNMENT or SORT_BY_INIT_PRIORITY
+    depending on arguments. Nested sort follows linker script rules.
+    """
+    _keywords = Keyword('name') | Keyword('alignment') | Keyword('init_priority')
+    SORT = (Keyword('SORT').suppress()
+            + Suppress('(')
+            + _keywords.set_results_name('first')
+            + Opt(Suppress(',') + _keywords.set_results_name('second'))
+            + Suppress(')'))
+
+    def __init__(self, first: str, second: Optional[str] = None):
+        self.first = first
+        self.second = second
+
+    def __eq__(self, other):
+        if isinstance(other, Sort):
+            if self.first == other.first and self.second == other.second:
+                return True
+
+        return False
+
+    @staticmethod
+    def parse(toks):
+        return Sort(toks.first, toks.second or None)
+
+
+class Flag:
+    _section_target = Fragment.IDENTIFIER + Suppress('->') + Fragment.IDENTIFIER
+    _flag = (Surround.SURROUND.set_parse_action(Surround.parse)
+             | Align.ALIGN.set_parse_action(Align.parse)
+             | Keep.KEEP.set_parse_action(Keep.parse)
+             | Sort.SORT.set_parse_action(Sort.parse))
+
+    FLAG = _section_target + OneOrMore(_flag)
+
+    def __init__(self, section: str, target: str, flags: List[EntryFlag]):
+        self.section = section
+        self.target = target
+        self.flags = flags
+
+    def __eq__(self, other):
+        if isinstance(other, Flag):
+            if self.section == other.section and self.target == other.target and len(self.flags) == len(other.flags):
+                for i, j in zip(self.flags, other.flags):
+                    if i != j:
+                        break
+                else:
+                    return True
+
+        return False
+
+    @staticmethod
+    def parse(toks):
+        return Flag(toks[0], toks[1], toks[2:])
+
+    def __repr__(self):
+        return str(self.__dict__)
+
+
+class Mapping(Fragment):
+    """
+    Fragment which attaches a scheme to entities (see Entity in entity.py), specifying where the input
+    sections of the entity will end up.
+
+    [mapping:<name>]
+    archive: lib1.a
+    entries:
+        obj1:symbol1 (scheme1); section1 -> target1 KEEP SURROUND(sym1) ...
+        obj2 (scheme2)
+        ...
+
+    Ultimately, an `entity (scheme)` entry generates an
+    input section description (see https://sourceware.org/binutils/docs/ld/Input-Section.html)
+    in the output linker script. It is possible to attach 'flags' to the
+    `entity (scheme)` to generate different output commands or to
+    emit additional keywords in the generated input section description. The
+    input section description, as well as other output commands, is defined in
+    output_commands.py.
+    """
+
+    _any = Literal('*')
+    _obj = Word(alphas + '_', alphanums + '-_').set_results_name('object')
+    _sym = Fragment.IDENTIFIER.set_results_name('symbol')
+
+    # There are three possible patterns for mapping entries:
+    #       obj:symbol (scheme)
+    #       obj (scheme)
+    #       * (scheme)
+    _entry = (((_obj + Opt(Suppress(':') + _sym)) | _any.set_results_name('object'))
+              + Suppress('(')
+              + Fragment.IDENTIFIER.set_results_name('section')
+              + Suppress(')'))
+
+    ENTRY = _entry + LineEnd().suppress()
+    ARCHIVE = (Word(alphanums + '.-_$+') | Literal('*')) + LineEnd().suppress()
+
+    # Flags can be specified for section->target in the scheme specified, ex:
+    #       obj (scheme);
+    #           section->target SURROUND(symbol),
+    #           section2->target2 ALIGN(4)
+    ENTRY_WITH_FLAG = (_entry + Suppress(';')
+                       + delimited_list(Flag.FLAG.set_parse_action(Flag.parse)))
+
+    def __init__(self, archive: str, flags: Dict[Any, Flag], *args, **kwargs):
+        super().__init__(*args, **kwargs)
+        self.archive = archive
+        self.flags = flags
+
+    @staticmethod
+    def parse_archive(s, loc, toks):
+        this = toks[0][0]
+        if len(this) != 1:
+            raise ParseFatalException(s, loc, 'Could only specify one archive file in one mapping fragment')
+
+        return this[0]
+
+    @staticmethod
+    def parse_entry(toks):
+        return toks.object, toks.symbol or None, toks.section
+
+    @staticmethod
+    def parse_entry_with_flag(toks):
+        entry = toks.object, toks.symbol or None, toks.section
+        return {
+            entry: [tok for tok in toks if isinstance(tok, Flag)]
+        }
+
+    @staticmethod
+    def parse_entries(toks):
+        return toks[0]
+
+    @staticmethod
+    def parse(toks):
+        this = toks[0]
+
+        name = this[0]
+        archive = this[1]
+        entries_or_dict_with_flags = this[2]
+
+        entries = set()
+        flags = dict()
+        for item in entries_or_dict_with_flags:
+            if isinstance(item, Empty):
+                continue
+            elif isinstance(item, dict):  # entry with flags
+                for k, v in item.items():
+                    entries.add(k)
+                    if k in flags:
+                        flags[k].extend(v)
+                    else:
+                        flags[k] = v
+            else:
+                entries.add(item)
+
+        return Mapping(archive=archive, name=name, entries=entries, flags=flags)
+
+
+class FragmentFile:
+    """
+    Processes a fragment file and stores all parsed fragments. For
+    more information on how this class interacts with classes for the different fragment types,
+    see description of Fragment.
+    """
+
+    def __init__(self, fragments: List[Fragment]):
+        self.path = None  # assign later, couldn't pass extra argument while parsing
+        self.fragments: List[Fragment] = fragments
+
+    def __repr__(self):
+        return str(self.__dict__)
+
+
+def parse_fragment_file(path, sdkconfig):
+    def parse_conditional(toks):
+        this = toks[0]
+        for stmt in this:
+            if stmt[0] in ['if', 'elif']:  # if/elif
+                if sdkconfig.evaluate_expression(stmt.condition):
+                    return stmt[-1]
+            else:  # else
+                return stmt[-1]
+
+        return Empty()
+
+    def get_conditional_stmt(_stmt):
+        condition = SkipTo(':').set_results_name('condition') + Suppress(':')
+        _suite = IndentedBlock(_stmt)
+
+        if_decl = Literal('if') + condition
+        elif_decl = Literal('elif') + condition
+        else_decl = Literal('else:')
+        if_ = Group(if_decl + _suite)
+        elif_ = Group(elif_decl + _suite)
+        else_ = Group(else_decl + _suite)
+        return Group(if_ + Opt(OneOrMore(elif_)) + Opt(else_)).set_parse_action(parse_conditional)
+
+    def get_suite(_stmt):
+        __stmt = Forward()
+        __conditional = get_conditional_stmt(__stmt)
+        __stmt <<= (comment
+                    | _stmt
+                    | __conditional)
+        return IndentedBlock(__stmt)
+
+    def parse(toks):
+        return FragmentFile([tok for tok in toks if not isinstance(tok, Empty)])
+
+    # comment
+    comment = (Literal('#') + rest_of_line).set_parse_action(lambda s, l, t: Empty())
+
+    # section
+    section_entry = Sections.ENTRY.set_parse_action(Sections.parse_entry)
+    section_entries_suite = get_suite(section_entry)
+    section_header = Suppress('[sections:') + Fragment.IDENTIFIER + Suppress(']') + LineEnd().suppress()
+    section = Group(section_header
+                    + Suppress('entries:')
+                    + section_entries_suite).set_parse_action(Sections.parse)
+
+    # scheme
+    scheme_entry = Scheme.ENTRY.set_parse_action(Scheme.parse_entry)
+    scheme_entries_suite = get_suite(scheme_entry)
+    scheme_header = Suppress('[scheme:') + Fragment.IDENTIFIER + Suppress(']') + LineEnd().suppress()
+    scheme = Group(scheme_header
+                   + Suppress('entries:')
+                   + scheme_entries_suite).set_parse_action(Scheme.parse)
+    # mapping
+    mapping_archive = Mapping.ARCHIVE
+    mapping_archive_suite = get_suite(mapping_archive)
+
+    mapping_entry = Mapping.ENTRY.set_parse_action(Mapping.parse_entry)
+    mapping_entry_with_flag = Mapping.ENTRY_WITH_FLAG.set_parse_action(Mapping.parse_entry_with_flag)
+    mapping_entries_suite = get_suite(mapping_entry | mapping_entry_with_flag)
+
+    mapping_header = Suppress('[mapping:') + Fragment.IDENTIFIER + Suppress(']')
+    mapping = Group(mapping_header
+                    + Group(Suppress('archive:')
+                            + mapping_archive_suite).set_parse_action(Mapping.parse_archive)
+                    + Group(Suppress('entries:')
+                            + mapping_entries_suite).set_parse_action(Mapping.parse_entries)
+                    ).set_parse_action(Mapping.parse)
+
+    # highest level
+    fragment = (section
+                | scheme
+                | mapping
+                | get_conditional_stmt(section | scheme | mapping))
+    parser = ZeroOrMore(fragment).ignore(comment).set_parse_action(parse)
+    fragment_file = parser.parse_file(path, parse_all=True)[0]
+    fragment_file.path = path
+
+    return fragment_file

+ 52 - 51
tools/ldgen/generation.py → tools/ldgen/ldgen/generation.py

@@ -1,5 +1,5 @@
 #
 #
-# SPDX-FileCopyrightText: 2021 Espressif Systems (Shanghai) CO LTD
+# SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
 # SPDX-License-Identifier: Apache-2.0
 # SPDX-License-Identifier: Apache-2.0
 #
 #
 
 
@@ -8,13 +8,13 @@ import fnmatch
 import itertools
 import itertools
 from collections import namedtuple
 from collections import namedtuple
 
 
-from entity import Entity
-from fragments import Mapping, Scheme, Sections
-from ldgen_common import LdGenFailure
-from output_commands import AlignAtAddress, InputSectionDesc, SymbolAtAddress
+from .entity import Entity
+from .fragments import Keep, Scheme, Sections, Sort, Surround
+from .ldgen_common import LdGenFailure
+from .output_commands import AlignAtAddress, InputSectionDesc, SymbolAtAddress
 
 
 
 
-class Placement():
+class Placement:
     """
     """
     A Placement is an assignment of an entity's input sections to a target
     A Placement is an assignment of an entity's input sections to a target
     in the output linker script - a precursor to the input section description.
     in the output linker script - a precursor to the input section description.
@@ -97,7 +97,7 @@ class Placement():
         self.subplacements.add(subplacement)
         self.subplacements.add(subplacement)
 
 
 
 
-class EntityNode():
+class EntityNode:
     """
     """
     Node in entity tree. An EntityNode
     Node in entity tree. An EntityNode
     is created from an Entity (see entity.py).
     is created from an Entity (see entity.py).
@@ -134,12 +134,12 @@ class EntityNode():
 
 
     def add_child(self, entity):
     def add_child(self, entity):
         child_specificity = self.entity.specificity.value + 1
         child_specificity = self.entity.specificity.value + 1
-        assert(child_specificity <= Entity.Specificity.SYMBOL.value)
+        assert (child_specificity <= Entity.Specificity.SYMBOL.value)
         name = entity[Entity.Specificity(child_specificity)]
         name = entity[Entity.Specificity(child_specificity)]
-        assert(name and name != Entity.ALL)
+        assert (name and name != Entity.ALL)
 
 
         child = [c for c in self.children if c.name == name]
         child = [c for c in self.children if c.name == name]
-        assert(len(child) <= 1)
+        assert (len(child) <= 1)
 
 
         if not child:
         if not child:
             child = self.child_t(self, name)
             child = self.child_t(self, name)
@@ -174,7 +174,7 @@ class EntityNode():
         for sections in self.get_output_sections():
         for sections in self.get_output_sections():
             placement = self.placements[sections]
             placement = self.placements[sections]
             if placement.is_significant():
             if placement.is_significant():
-                assert(placement.node == self)
+                assert (placement.node == self)
 
 
                 keep = False
                 keep = False
                 sort = None
                 sort = None
@@ -183,16 +183,16 @@ class EntityNode():
                 placement_flags = placement.flags if placement.flags is not None else []
                 placement_flags = placement.flags if placement.flags is not None else []
 
 
                 for flag in placement_flags:
                 for flag in placement_flags:
-                    if isinstance(flag, Mapping.Keep):
+                    if isinstance(flag, Keep):
                         keep = True
                         keep = True
-                    elif isinstance(flag, Mapping.Sort):
+                    elif isinstance(flag, Sort):
                         sort = (flag.first, flag.second)
                         sort = (flag.first, flag.second)
                     else:  # SURROUND or ALIGN
                     else:  # SURROUND or ALIGN
                         surround_type.append(flag)
                         surround_type.append(flag)
 
 
                 for flag in surround_type:
                 for flag in surround_type:
                     if flag.pre:
                     if flag.pre:
-                        if isinstance(flag, Mapping.Surround):
+                        if isinstance(flag, Surround):
                             commands[placement.target].append(SymbolAtAddress('_%s_start' % flag.symbol))
                             commands[placement.target].append(SymbolAtAddress('_%s_start' % flag.symbol))
                         else:  # ALIGN
                         else:  # ALIGN
                             commands[placement.target].append(AlignAtAddress(flag.alignment))
                             commands[placement.target].append(AlignAtAddress(flag.alignment))
@@ -202,11 +202,12 @@ class EntityNode():
                 placement_sections = frozenset(placement.sections)
                 placement_sections = frozenset(placement.sections)
                 command_sections = sections if sections == placement_sections else placement_sections
                 command_sections = sections if sections == placement_sections else placement_sections
 
 
-                command = InputSectionDesc(placement.node.entity, command_sections, [e.node.entity for e in placement.exclusions], keep, sort)
+                command = InputSectionDesc(placement.node.entity, command_sections,
+                                           [e.node.entity for e in placement.exclusions], keep, sort)
                 commands[placement.target].append(command)
                 commands[placement.target].append(command)
 
 
-                # Generate commands for intermediate, non-explicit exclusion placements here, so that they can be enclosed by
-                # flags that affect the parent placement.
+                # Generate commands for intermediate, non-explicit exclusion placements here,
+                # so that they can be enclosed by flags that affect the parent placement.
                 for subplacement in placement.subplacements:
                 for subplacement in placement.subplacements:
                     if not subplacement.flags and not subplacement.explicit:
                     if not subplacement.flags and not subplacement.explicit:
                         command = InputSectionDesc(subplacement.node.entity, subplacement.sections,
                         command = InputSectionDesc(subplacement.node.entity, subplacement.sections,
@@ -215,7 +216,7 @@ class EntityNode():
 
 
                 for flag in surround_type:
                 for flag in surround_type:
                     if flag.post:
                     if flag.post:
-                        if isinstance(flag, Mapping.Surround):
+                        if isinstance(flag, Surround):
                             commands[placement.target].append(SymbolAtAddress('_%s_end' % flag.symbol))
                             commands[placement.target].append(SymbolAtAddress('_%s_end' % flag.symbol))
                         else:  # ALIGN
                         else:  # ALIGN
                             commands[placement.target].append(AlignAtAddress(flag.alignment))
                             commands[placement.target].append(AlignAtAddress(flag.alignment))
@@ -248,6 +249,7 @@ class SymbolNode(EntityNode):
     Entities at depth=3. Represents entities with archive, object
     Entities at depth=3. Represents entities with archive, object
     and symbol specified.
     and symbol specified.
     """
     """
+
     def __init__(self, parent, name):
     def __init__(self, parent, name):
         EntityNode.__init__(self, parent, name)
         EntityNode.__init__(self, parent, name)
         self.entity = Entity(self.parent.parent.name, self.parent.name)
         self.entity = Entity(self.parent.parent.name, self.parent.name)
@@ -270,6 +272,7 @@ class ObjectNode(EntityNode):
     An intermediate placement on this node is created, if one does not exist,
     An intermediate placement on this node is created, if one does not exist,
     and is the one excluded from its basis placement.
     and is the one excluded from its basis placement.
     """
     """
+
     def __init__(self, parent, name):
     def __init__(self, parent, name):
         EntityNode.__init__(self, parent, name)
         EntityNode.__init__(self, parent, name)
         self.child_t = SymbolNode
         self.child_t = SymbolNode
@@ -334,6 +337,7 @@ class ArchiveNode(EntityNode):
     """
     """
     Entities at depth=1. Represents entities with archive specified.
     Entities at depth=1. Represents entities with archive specified.
     """
     """
+
     def __init__(self, parent, name):
     def __init__(self, parent, name):
         EntityNode.__init__(self, parent, name)
         EntityNode.__init__(self, parent, name)
         self.child_t = ObjectNode
         self.child_t = ObjectNode
@@ -345,6 +349,7 @@ class RootNode(EntityNode):
     Single entity at depth=0. Represents entities with no specific members
     Single entity at depth=0. Represents entities with no specific members
     specified.
     specified.
     """
     """
+
     def __init__(self):
     def __init__(self):
         EntityNode.__init__(self, None, Entity.ALL)
         EntityNode.__init__(self, None, Entity.ALL)
         self.child_t = ArchiveNode
         self.child_t = ArchiveNode
@@ -382,7 +387,7 @@ class Generation:
 
 
             for (sections_name, target_name) in scheme.entries:
             for (sections_name, target_name) in scheme.entries:
                 # Get the sections under the bucket 'target_name'. If this bucket does not exist
                 # Get the sections under the bucket 'target_name'. If this bucket does not exist
-                # is is created automatically
+                # is created automatically
                 sections_in_bucket = sections_bucket[target_name]
                 sections_in_bucket = sections_bucket[target_name]
 
 
                 try:
                 try:
@@ -433,9 +438,9 @@ class Generation:
                 entity = Entity(archive, obj, symbol)
                 entity = Entity(archive, obj, symbol)
 
 
                 # Check the entity exists
                 # Check the entity exists
-                if (self.check_mappings and
-                        entity.specificity.value > Entity.Specificity.ARCHIVE.value and
-                        mapping.name not in self.check_mapping_exceptions):
+                if (self.check_mappings
+                        and entity.specificity.value > Entity.Specificity.ARCHIVE.value
+                        and mapping.name not in self.check_mapping_exceptions):
                     if not entities.check_exists(entity):
                     if not entities.check_exists(entity):
                         message = "'%s' not found" % str(entity)
                         message = "'%s' not found" % str(entity)
                         raise GenerationException(message, mapping)
                         raise GenerationException(message, mapping)
@@ -444,11 +449,11 @@ class Generation:
                     flags = mapping.flags[(obj, symbol, scheme_name)]
                     flags = mapping.flags[(obj, symbol, scheme_name)]
                     # Check if all section->target defined in the current
                     # Check if all section->target defined in the current
                     # scheme.
                     # scheme.
-                    for (s, t, f) in flags:
-                        if (t not in scheme_dictionary[scheme_name].keys() or
-                                s not in [_s.name for _s in scheme_dictionary[scheme_name][t]]):
-
-                            message = "%s->%s not defined in scheme '%s'" % (s, t, scheme_name)
+                    for flag in flags:
+                        if (flag.target not in scheme_dictionary[scheme_name].keys()
+                                or flag.section not in
+                                [_s.name for _s in scheme_dictionary[scheme_name][flag.target]]):
+                            message = "%s->%s not defined in scheme '%s'" % (flag.section, flag.target, scheme_name)
                             raise GenerationException(message, mapping)
                             raise GenerationException(message, mapping)
                 else:
                 else:
                     flags = None
                     flags = None
@@ -460,9 +465,9 @@ class Generation:
                         _flags = []
                         _flags = []
 
 
                         if flags:
                         if flags:
-                            for (s, t, f) in flags:
-                                if (s, t) == (section.name, target):
-                                    _flags.extend(f)
+                            for flag in flags:
+                                if (flag.section, flag.target) == (section.name, target):
+                                    _flags.extend(flag.flags)
 
 
                         sections_str = get_section_strs(section)
                         sections_str = get_section_strs(section)
 
 
@@ -477,18 +482,18 @@ class Generation:
                             entity_mappings[key] = Generation.EntityMapping(entity, sections_str, target, _flags)
                             entity_mappings[key] = Generation.EntityMapping(entity, sections_str, target, _flags)
                         else:
                         else:
                             # Check for conflicts.
                             # Check for conflicts.
-                            if (target != existing.target):
+                            if target != existing.target:
                                 raise GenerationException('Sections mapped to multiple targets.', mapping)
                                 raise GenerationException('Sections mapped to multiple targets.', mapping)
 
 
                             # Combine flags here if applicable, to simplify
                             # Combine flags here if applicable, to simplify
                             # insertion logic.
                             # insertion logic.
-                            if (_flags or existing.flags):
-                                if ((_flags and not existing.flags) or (not _flags and existing.flags)):
+                            if _flags or existing.flags:
+                                if (_flags and not existing.flags) or (not _flags and existing.flags):
                                     _flags.extend(existing.flags)
                                     _flags.extend(existing.flags)
                                     entity_mappings[key] = Generation.EntityMapping(entity,
                                     entity_mappings[key] = Generation.EntityMapping(entity,
                                                                                     sections_str,
                                                                                     sections_str,
                                                                                     target, _flags)
                                                                                     target, _flags)
-                                elif (_flags == existing.flags):
+                                elif _flags == existing.flags:
                                     pass
                                     pass
                                 else:
                                 else:
                                     raise GenerationException('Conflicting flags specified.', mapping)
                                     raise GenerationException('Conflicting flags specified.', mapping)
@@ -517,26 +522,22 @@ class Generation:
 
 
     def add_fragments_from_file(self, fragment_file):
     def add_fragments_from_file(self, fragment_file):
         for fragment in fragment_file.fragments:
         for fragment in fragment_file.fragments:
-            dict_to_append_to = None
-
-            if isinstance(fragment, Mapping) and fragment.deprecated and fragment.name in self.mappings.keys():
-                self.mappings[fragment.name].entries |= fragment.entries
+            if isinstance(fragment, Scheme):
+                dict_to_append_to = self.schemes
+            elif isinstance(fragment, Sections):
+                dict_to_append_to = self.placements
             else:
             else:
-                if isinstance(fragment, Scheme):
-                    dict_to_append_to = self.schemes
-                elif isinstance(fragment, Sections):
-                    dict_to_append_to = self.placements
-                else:
-                    dict_to_append_to = self.mappings
+                dict_to_append_to = self.mappings
 
 
-                # Raise exception when the fragment of the same type is already in the stored fragments
-                if fragment.name in dict_to_append_to.keys():
-                    stored = dict_to_append_to[fragment.name].path
-                    new = fragment.path
-                    message = "Duplicate definition of fragment '%s' found in %s and %s." % (fragment.name, stored, new)
-                    raise GenerationException(message)
+            # Raise exception when the fragment of the same type is already in the stored fragments
+            if fragment.name in dict_to_append_to:
+                stored = dict_to_append_to[fragment.name].path
+                new = fragment.path
+                message = "Duplicate definition of fragment '%s' found in %s and %s." % (
+                    fragment.name, stored, new)
+                raise GenerationException(message)
 
 
-                dict_to_append_to[fragment.name] = fragment
+            dict_to_append_to[fragment.name] = fragment
 
 
 
 
 class GenerationException(LdGenFailure):
 class GenerationException(LdGenFailure):

+ 9 - 0
tools/ldgen/ldgen/ldgen_common.py

@@ -0,0 +1,9 @@
+#
+# SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
+# SPDX-License-Identifier: Apache-2.0
+#
+
+class LdGenFailure(RuntimeError):
+    """
+    Parent class for any ldgen runtime failure which is due to input data
+    """

+ 11 - 13
tools/ldgen/linker_script.py → tools/ldgen/ldgen/linker_script.py

@@ -1,15 +1,16 @@
 #
 #
-# SPDX-FileCopyrightText: 2021 Espressif Systems (Shanghai) CO LTD
+# SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
 # SPDX-License-Identifier: Apache-2.0
 # SPDX-License-Identifier: Apache-2.0
 #
 #
 
 
 import collections
 import collections
 import os
 import os
 
 
-from fragments import Fragment
-from generation import GenerationException
 from pyparsing import ParseException, Suppress, White
 from pyparsing import ParseException, Suppress, White
 
 
+from .fragments import Fragment
+from .generation import GenerationException
+
 
 
 class LinkerScript:
 class LinkerScript:
     """
     """
@@ -32,24 +33,21 @@ class LinkerScript:
         lines = template_file.readlines()
         lines = template_file.readlines()
 
 
         target = Fragment.IDENTIFIER
         target = Fragment.IDENTIFIER
-        reference = Suppress('mapping') + Suppress('[') + target.setResultsName('target') + Suppress(']')
-        pattern = White(' \t').setResultsName('indent') + reference
+        reference = Suppress('mapping') + Suppress('[') + target + Suppress(']')
+        pattern = White(' \t') + reference
 
 
         # Find the markers in the template file line by line. If line does not match marker grammar,
         # Find the markers in the template file line by line. If line does not match marker grammar,
         # set it as a literal to be copied as is to the output file.
         # set it as a literal to be copied as is to the output file.
         for line in lines:
         for line in lines:
             try:
             try:
-                parsed = pattern.parseString(line)
-
-                indent = parsed.indent
-                target = parsed.target
-
-                marker = LinkerScript.Marker(target, indent, [])
-
-                self.members.append(marker)
+                parsed = pattern.parse_string(line)
             except ParseException:
             except ParseException:
                 # Does not match marker syntax
                 # Does not match marker syntax
                 self.members.append(line)
                 self.members.append(line)
+            else:
+                indent, target = parsed
+                marker = LinkerScript.Marker(target, indent, [])
+                self.members.append(marker)
 
 
     def fill(self, mapping_rules):
     def fill(self, mapping_rules):
         for member in self.members:
         for member in self.members:

+ 9 - 20
tools/ldgen/output_commands.py → tools/ldgen/ldgen/output_commands.py

@@ -1,26 +1,15 @@
 #
 #
-# Copyright 2021 Espressif Systems (Shanghai) CO LTD
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
+# SPDX-License-Identifier: Apache-2.0
 #
 #
 
 
-from entity import Entity
+from .entity import Entity
 
 
 # Contains classes for output section commands referred to in
 # Contains classes for output section commands referred to in
 # https://www.acrc.bris.ac.uk/acrc/RedHat/rhel-ld-en-4/sections.html#OUTPUT-SECTION-DESCRIPTION.
 # https://www.acrc.bris.ac.uk/acrc/RedHat/rhel-ld-en-4/sections.html#OUTPUT-SECTION-DESCRIPTION.
 
 
 
 
-class AlignAtAddress():
+class AlignAtAddress:
     """
     """
     Outputs assignment of builtin function ALIGN to current
     Outputs assignment of builtin function ALIGN to current
     position:
     position:
@@ -42,7 +31,7 @@ class AlignAtAddress():
                 self.alignment == other.alignment)
                 self.alignment == other.alignment)
 
 
 
 
-class SymbolAtAddress():
+class SymbolAtAddress:
     """
     """
     Outputs assignment of builtin function ABSOLUTE to a symbol
     Outputs assignment of builtin function ABSOLUTE to a symbol
     for current position:
     for current position:
@@ -65,7 +54,7 @@ class SymbolAtAddress():
                 self.symbol == other.symbol)
                 self.symbol == other.symbol)
 
 
 
 
-class InputSectionDesc():
+class InputSectionDesc:
     """
     """
     Outputs an input section description as described in
     Outputs an input section description as described in
     https://www.acrc.bris.ac.uk/acrc/RedHat/rhel-ld-en-4/sections.html#INPUT-SECTION.
     https://www.acrc.bris.ac.uk/acrc/RedHat/rhel-ld-en-4/sections.html#INPUT-SECTION.
@@ -76,7 +65,7 @@ class InputSectionDesc():
     """
     """
 
 
     def __init__(self, entity, sections, exclusions=None, keep=False, sort=None):
     def __init__(self, entity, sections, exclusions=None, keep=False, sort=None):
-        assert(entity.specificity != Entity.Specificity.SYMBOL)
+        assert (entity.specificity != Entity.Specificity.SYMBOL)
 
 
         self.entity = entity
         self.entity = entity
         self.sections = set(sections)
         self.sections = set(sections)
@@ -84,8 +73,8 @@ class InputSectionDesc():
         self.exclusions = set()
         self.exclusions = set()
 
 
         if exclusions:
         if exclusions:
-            assert(not [e for e in exclusions if e.specificity == Entity.Specificity.SYMBOL or
-                        e.specificity == Entity.Specificity.NONE])
+            assert (not [e for e in exclusions if e.specificity == Entity.Specificity.SYMBOL or
+                         e.specificity == Entity.Specificity.NONE])
             self.exclusions = set(exclusions)
             self.exclusions = set(exclusions)
         else:
         else:
             self.exclusions = set()
             self.exclusions = set()

+ 26 - 0
tools/ldgen/ldgen/sdkconfig.py

@@ -0,0 +1,26 @@
+#
+# SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
+# SPDX-License-Identifier: Apache-2.0
+#
+
+import kconfiglib
+
+
+class SDKConfig:
+    """
+    Evaluates conditional expressions based on the build's sdkconfig and Kconfig files.
+    """
+    def __init__(self, kconfig_file, sdkconfig_file):
+        self.config = kconfiglib.Kconfig(kconfig_file)
+        self.config.load_config(sdkconfig_file)
+        self.config.warn = False  # eval_string may contain un-declared symbol
+
+    def evaluate_expression(self, expression):
+        result = self.config.eval_string(expression)
+
+        if result == 0:  # n
+            return False
+        elif result == 2:  # y
+            return True
+        else:  # m
+            raise Exception('unsupported config expression result')

+ 0 - 23
tools/ldgen/ldgen_common.py

@@ -1,23 +0,0 @@
-#
-# Copyright 2021 Espressif Systems (Shanghai) CO LTD
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-
-class LdGenFailure(RuntimeError):
-    """
-    Parent class for any ldgen runtime failure which is due to input data
-    """
-    def __init__(self, message):
-        super(LdGenFailure, self).__init__(message)

+ 0 - 73
tools/ldgen/sdkconfig.py

@@ -1,73 +0,0 @@
-#
-# Copyright 2021 Espressif Systems (Shanghai) CO LTD
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-import kconfiglib
-from pyparsing import (Combine, Group, Literal, Optional, Word, alphanums, hexnums, infixNotation, nums, oneOf,
-                       opAssoc, printables, quotedString, removeQuotes)
-
-
-class SDKConfig:
-    """
-    Evaluates conditional expressions based on the build's sdkconfig and Kconfig files.
-    This also defines the grammar of conditional expressions.
-    """
-
-    # A configuration entry is in the form CONFIG=VALUE. Definitions of components of that grammar
-    IDENTIFIER = Word(alphanums.upper() + '_')
-
-    HEX = Combine('0x' + Word(hexnums)).setParseAction(lambda t:int(t[0], 16))
-    DECIMAL = Combine(Optional(Literal('+') | Literal('-')) + Word(nums)).setParseAction(lambda t:int(t[0]))
-    LITERAL = Word(printables.replace(':', ''))
-    QUOTED_LITERAL = quotedString.setParseAction(removeQuotes)
-
-    VALUE = HEX | DECIMAL | LITERAL | QUOTED_LITERAL
-
-    # Operators supported by the expression evaluation
-    OPERATOR = oneOf(['=', '!=', '>', '<', '<=', '>='])
-
-    def __init__(self, kconfig_file, sdkconfig_file):
-        self.config = kconfiglib.Kconfig(kconfig_file)
-        self.config.load_config(sdkconfig_file)
-
-    def evaluate_expression(self, expression):
-        result = self.config.eval_string(expression)
-
-        if result == 0:  # n
-            return False
-        elif result == 2:  # y
-            return True
-        else:  # m
-            raise Exception('unsupported config expression result')
-
-    @staticmethod
-    def get_expression_grammar():
-        identifier = SDKConfig.IDENTIFIER.setResultsName('identifier')
-        operator = SDKConfig.OPERATOR.setResultsName('operator')
-        value = SDKConfig.VALUE.setResultsName('value')
-
-        test_binary = identifier + operator + value
-        test_single = identifier
-
-        test = test_binary | test_single
-
-        condition = Group(Optional('(').suppress() + test + Optional(')').suppress())
-
-        grammar = infixNotation(condition, [
-                                ('!', 1, opAssoc.RIGHT),
-                                ('&&', 2, opAssoc.LEFT),
-                                ('||',  2, opAssoc.LEFT)])
-
-        return grammar

+ 6 - 16
tools/ldgen/test/test_entity.py

@@ -1,29 +1,19 @@
 #!/usr/bin/env python
 #!/usr/bin/env python
 # coding=utf-8
 # coding=utf-8
 #
 #
-# Copyright 2018-2020 Espressif Systems (Shanghai) CO LTD
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# SPDX-FileCopyrightText: 2018-2022 Espressif Systems (Shanghai) CO LTD
+# SPDX-License-Identifier: Apache-2.0
 #
 #
 
 
+import os
 import sys
 import sys
 import unittest
 import unittest
 
 
 try:
 try:
-    from entity import Entity, EntityDB
+    from ldgen.entity import Entity, EntityDB
 except ImportError:
 except ImportError:
-    sys.path.append('../')
-    from entity import Entity, EntityDB
+    sys.path.append(os.path.dirname(os.path.dirname(__file__)))
+    from ldgen.entity import Entity, EntityDB
 
 
 
 
 class EntityTest(unittest.TestCase):
 class EntityTest(unittest.TestCase):

ファイルの差分が大きいため隠しています
+ 181 - 362
tools/ldgen/test/test_fragments.py


+ 18 - 131
tools/ldgen/test/test_generation.py

@@ -1,6 +1,6 @@
 #!/usr/bin/env python
 #!/usr/bin/env python
 #
 #
-# SPDX-FileCopyrightText: 2021 Espressif Systems (Shanghai) CO LTD
+# SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
 # SPDX-License-Identifier: Apache-2.0
 # SPDX-License-Identifier: Apache-2.0
 #
 #
 
 
@@ -10,20 +10,23 @@ import os
 import sys
 import sys
 import tempfile
 import tempfile
 import unittest
 import unittest
+from io import StringIO
 
 
 try:
 try:
-    from generation import Generation, GenerationException
+    from ldgen.entity import Entity, EntityDB
+    from ldgen.fragments import parse_fragment_file
+    from ldgen.generation import Generation, GenerationException
+    from ldgen.linker_script import LinkerScript
+    from ldgen.output_commands import AlignAtAddress, InputSectionDesc, SymbolAtAddress
+    from ldgen.sdkconfig import SDKConfig
 except ImportError:
 except ImportError:
-    sys.path.append('../')
-    from generation import Generation, GenerationException
-
-from io import StringIO
-
-from entity import Entity, EntityDB
-from fragments import FragmentFile
-from linker_script import LinkerScript
-from output_commands import AlignAtAddress, InputSectionDesc, SymbolAtAddress
-from sdkconfig import SDKConfig
+    sys.path.append(os.path.dirname(os.path.dirname(__file__)))
+    from ldgen.entity import Entity, EntityDB
+    from ldgen.fragments import parse_fragment_file
+    from ldgen.generation import Generation, GenerationException
+    from ldgen.linker_script import LinkerScript
+    from ldgen.output_commands import AlignAtAddress, InputSectionDesc, SymbolAtAddress
+    from ldgen.sdkconfig import SDKConfig
 
 
 ROOT = Entity('*')
 ROOT = Entity('*')
 
 
@@ -58,9 +61,8 @@ class GenerationTest(unittest.TestCase):
 
 
         self.sdkconfig = SDKConfig('data/Kconfig', 'data/sdkconfig')
         self.sdkconfig = SDKConfig('data/Kconfig', 'data/sdkconfig')
 
 
-        with open('data/base.lf') as fragment_file_obj:
-            fragment_file = FragmentFile(fragment_file_obj, self.sdkconfig)
-            self.generation.add_fragments_from_file(fragment_file)
+        fragment_file = parse_fragment_file('data/base.lf', self.sdkconfig)
+        self.generation.add_fragments_from_file(fragment_file)
 
 
         self.entities = EntityDB()
         self.entities = EntityDB()
 
 
@@ -78,7 +80,7 @@ class GenerationTest(unittest.TestCase):
 
 
     def add_fragments(self, text):
     def add_fragments(self, text):
         fragment_file = self.create_fragment_file(text)
         fragment_file = self.create_fragment_file(text)
-        fragment_file = FragmentFile(fragment_file, self.sdkconfig)
+        fragment_file = parse_fragment_file(fragment_file, self.sdkconfig)
         self.generation.add_fragments_from_file(fragment_file)
         self.generation.add_fragments_from_file(fragment_file)
 
 
     def write(self, expected, actual):
     def write(self, expected, actual):
@@ -1062,43 +1064,6 @@ entries:
         with self.assertRaises(GenerationException):
         with self.assertRaises(GenerationException):
             self.generation.generate(self.entities)
             self.generation.generate(self.entities)
 
 
-    def test_disambiguated_obj(self):
-        # Test command generation for disambiguated entry. Should produce similar
-        # results to test_nondefault_mapping_symbol.
-        mapping = u"""
-[mapping:test]
-archive: libfreertos.a
-entries:
-    port.c:xPortGetTickRateHz (noflash)                 #1
-"""
-        port = Entity('libfreertos.a', 'port.c')
-        self.add_fragments(mapping)
-        actual = self.generation.generate(self.entities)
-        expected = self.generate_default_rules()
-
-        flash_text = expected['flash_text']
-        iram0_text = expected['iram0_text']
-
-        # Generate exclusion in flash_text                                                A
-        flash_text[0].exclusions.add(port)
-
-        # Generate intermediate command                                                   B
-        # List all relevant sections except the symbol
-        # being mapped
-        port_sections = self.entities.get_sections('libfreertos.a', 'port.c')
-        filtered_sections = fnmatch.filter(port_sections, '.literal.*')
-        filtered_sections.extend(fnmatch.filter(port_sections, '.text.*'))
-
-        filtered_sections = [s for s in filtered_sections if not s.endswith('xPortGetTickRateHz')]
-        filtered_sections.append('.text')
-
-        flash_text.append(InputSectionDesc(port, set(filtered_sections), []))
-
-        # Input section commands in iram_text for #1                                     C
-        iram0_text.append(InputSectionDesc(port, set(['.text.xPortGetTickRateHz', '.literal.xPortGetTickRateHz']), []))
-
-        self.compare_rules(expected, actual)
-
     def test_root_mapping_fragment_conflict(self):
     def test_root_mapping_fragment_conflict(self):
         # Test that root mapping fragments are also checked for
         # Test that root mapping fragments are also checked for
         # conflicts.
         # conflicts.
@@ -1258,84 +1223,6 @@ entries:
 
 
             self.compare_rules(expected, actual)
             self.compare_rules(expected, actual)
 
 
-    def test_conditional_on_scheme_legacy_mapping_00(self):
-        # Test use of conditional scheme on legacy mapping fragment grammar.
-        mapping = u"""
-[mapping]
-archive: lib.a
-entries:
-    * (cond_noflash)
-"""
-        self._test_conditional_on_scheme(0, mapping)
-
-    def test_conditional_on_scheme_legacy_mapping_01(self):
-        # Test use of conditional scheme on legacy mapping fragment grammar.
-        mapping = u"""
-[mapping]
-archive: lib.a
-entries:
-    * (cond_noflash)
-"""
-        self._test_conditional_on_scheme(0, mapping)
-
-    def test_conditional_entries_legacy_mapping_fragment(self):
-        # Test conditional entries on legacy mapping fragment grammar.
-        mapping = u"""
-[mapping:default]
-archive: *
-entries:
-    * (default)
-
-[mapping]
-archive: lib.a
-entries:
-    : PERFORMANCE_LEVEL = 0
-    : PERFORMANCE_LEVEL = 1
-    obj1 (noflash)
-    : PERFORMANCE_LEVEL = 2
-    obj1 (noflash)
-    obj2 (noflash)
-    : PERFORMANCE_LEVEL = 3
-    obj1 (noflash)
-    obj2 (noflash)
-    obj3 (noflash)
-"""
-        self.test_conditional_mapping(mapping)
-
-    def test_multiple_fragment_same_lib_conditional_legacy(self):
-        # Test conditional entries on legacy mapping fragment grammar
-        # across multiple fragments.
-        mapping = u"""
-[mapping:default]
-archive: *
-entries:
-    * (default)
-
-[mapping]
-archive: lib.a
-entries:
-    : PERFORMANCE_LEVEL = 0
-    : PERFORMANCE_LEVEL = 1
-    obj1 (noflash)
-    : PERFORMANCE_LEVEL = 2
-    obj1 (noflash)
-    : PERFORMANCE_LEVEL = 3
-    obj1 (noflash)
-
-[mapping]
-archive: lib.a
-entries:
-    : PERFORMANCE_LEVEL = 1
-    obj1 (noflash) # ignore duplicate definition
-    : PERFORMANCE_LEVEL = 2
-    obj2 (noflash)
-    : PERFORMANCE_LEVEL = 3
-    obj2 (noflash)
-    obj3 (noflash)
-"""
-
-        self.test_conditional_mapping(mapping)
-
     def test_multiple_fragment_same_lib_conditional(self):
     def test_multiple_fragment_same_lib_conditional(self):
         # Test conditional entries on new mapping fragment grammar.
         # Test conditional entries on new mapping fragment grammar.
         # across multiple fragments.
         # across multiple fragments.

+ 8 - 17
tools/ldgen/test/test_output_commands.py

@@ -1,30 +1,21 @@
 #!/usr/bin/env python
 #!/usr/bin/env python
 #
 #
-# Copyright 2021 Espressif Systems (Shanghai) CO LTD
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
+# SPDX-License-Identifier: Apache-2.0
 #
 #
 
 
+import os
 import sys
 import sys
 import unittest
 import unittest
 
 
 try:
 try:
-    from output_commands import AlignAtAddress, InputSectionDesc, SymbolAtAddress
+    from ldgen.entity import Entity
+    from ldgen.output_commands import AlignAtAddress, InputSectionDesc, SymbolAtAddress
 except ImportError:
 except ImportError:
-    sys.path.append('../')
-    from output_commands import InputSectionDesc, SymbolAtAddress, AlignAtAddress
+    sys.path.append(os.path.dirname(os.path.dirname(__file__)))
+    from ldgen.entity import Entity
+    from ldgen.output_commands import AlignAtAddress, InputSectionDesc, SymbolAtAddress
 
 
-from entity import Entity
 
 
 SECTIONS = ['.text', '.text.*', '.literal', '.literal.*']
 SECTIONS = ['.text', '.text.*', '.literal', '.literal.*']
 
 

この差分においてかなりの量のファイルが変更されているため、一部のファイルを表示していません