Browse Source

CMSIS-NN: Fix static code issues

* Resolve BAD_SHIFT, DIVIDE_BY_ZERO and some CERT INT34-C issues.
* Do not build q7 files, i.e. only s8 files.
* Add softmax unit test for "sum=0" case.

Change-Id: I986f8982a70c55b0a23a84664c29c5037a0823b1
Måns Nilsson 4 years ago
parent
commit
fbe8d6069f

+ 1 - 2
CMSIS/NN/.gitignore

@@ -1,4 +1,3 @@
 Tests/UnitTest/TestCases/*/Unity/TestRunner/*
 Tests/UnitTest/Output/*
-Tests/UnitTest/Unity/*
-Tests/UnitTest/*.ld
+Tests/UnitTest/Unity/*

+ 1 - 1
CMSIS/NN/Source/ActivationFunctions/CMakeLists.txt

@@ -4,7 +4,7 @@ project(CMSISNNActivation)
 
 include(configLib)
 
-file(GLOB SRC "./*_*.c")
+file(GLOB SRC "./*_s8.c")
 
 add_library(CMSISNNActivation STATIC ${SRC})
 

+ 11 - 10
CMSIS/NN/Source/BasicMathFunctions/arm_elementwise_add_s8.c

@@ -21,8 +21,8 @@
  * Title:        arm_elementwise_add_s8
  * Description:  Element wise add
  *
- * $Date:        09. October 2020
- * $Revision:    V.2.5.2
+ * $Date:        01. March 2021
+ * $Revision:    V.2.5.3
  *
  * Target Processor:  Cortex-M CPUs
  *
@@ -153,10 +153,11 @@ arm_status arm_elementwise_add_s8(const int8_t *input_1_vect,
         b_2 = __SADD16(b_2, offset_2_packed);
 
         /* Sum 1 */
-        input_1 = (int16_t)(b_1 & 0x0FFFFL) << left_shift;
+        input_1 = (b_1 & 0x0FFFF) << left_shift;
+
         SAT_INPUT(input_1, input_1_mult, input_1_shift);
 
-        input_2 = (int16_t)(b_2 & 0x0FFFFL) << left_shift;
+        input_2 = (b_2 & 0x0FFFF) << left_shift;
         SAT_INPUT(input_2, input_2_mult, input_2_shift);
 
         sum = input_1 + input_2;
@@ -167,10 +168,10 @@ arm_status arm_elementwise_add_s8(const int8_t *input_1_vect,
         r1 = (q7_t)sum;
 
         /* Sum 3 */
-        input_1 = (int16_t)((b_1 >> 16) & 0x0FFFFL) << left_shift;
+        input_1 = ((b_1 >> 16) & 0x0FFFF) << left_shift;
         SAT_INPUT(input_1, input_1_mult, input_1_shift);
 
-        input_2 = (int16_t)((b_2 >> 16) & 0x0FFFFL) << left_shift;
+        input_2 = ((b_2 >> 16) & 0x0FFFF) << left_shift;
         SAT_INPUT(input_2, input_2_mult, input_2_shift);
 
         sum = input_1 + input_2;
@@ -181,10 +182,10 @@ arm_status arm_elementwise_add_s8(const int8_t *input_1_vect,
         r3 = (q7_t)sum;
 
         /* Sum 2 */
-        input_1 = (int16_t)(a_1 & 0x0FFFFL) << left_shift;
+        input_1 = (a_1 & 0x0FFFF) << left_shift;
         SAT_INPUT(input_1, input_1_mult, input_1_shift);
 
-        input_2 = (int16_t)(a_2 & 0x0FFFFL) << left_shift;
+        input_2 = (a_2 & 0x0FFFF) << left_shift;
         SAT_INPUT(input_2, input_2_mult, input_2_shift);
 
         sum = input_1 + input_2;
@@ -195,10 +196,10 @@ arm_status arm_elementwise_add_s8(const int8_t *input_1_vect,
         r2 = (q7_t)sum;
 
         /* Sum 4 */
-        input_1 = (int16_t)((a_1 >> 16) & 0x0FFFFL) << left_shift;
+        input_1 = ((a_1 >> 16) & 0x0FFFF) << left_shift;
         SAT_INPUT(input_1, input_1_mult, input_1_shift);
 
-        input_2 = (int16_t)((a_2 >> 16) & 0x0FFFFL) << left_shift;
+        input_2 = ((a_2 >> 16) & 0x0FFFF) << left_shift;
         SAT_INPUT(input_2, input_2_mult, input_2_shift);
 
         sum = input_1 + input_2;

+ 1 - 2
CMSIS/NN/Source/ConvolutionFunctions/CMakeLists.txt

@@ -4,8 +4,7 @@ project(CMSISNNConvolutions)
 
 include(configLib)
 
-file(GLOB SRC "./*_*.c")
-
+file(GLOB SRC "./*_s8*.c")
 add_library(CMSISNNConvolutions STATIC ${SRC})
 
 configLib(CMSISNNConvolutions ${ROOT})

+ 1 - 1
CMSIS/NN/Source/FullyConnectedFunctions/CMakeLists.txt

@@ -4,7 +4,7 @@ project(CMSISNNFullyConnected)
 
 include(configLib)
 
-file(GLOB SRC "./*_*.c")
+file(GLOB SRC "./*_s8.c")
 
 add_library(CMSISNNFullyConnected STATIC ${SRC})
 

+ 2 - 2
CMSIS/NN/Source/NNSupportFunctions/CMakeLists.txt

@@ -4,9 +4,9 @@ project(CMSISNNSupport)
 
 include(configLib)
 
-file(GLOB SRC "./*_*.c")
-
+file(GLOB SRC "./*_s8.c")
 add_library(CMSISNNSupport STATIC ${SRC})
+target_sources(CMSISNNSupport PUBLIC arm_q7_to_q15_with_offset.c)
 
 configLib(CMSISNNSupport ${ROOT})
 configDsp(CMSISNNSupport ${ROOT})

+ 1 - 1
CMSIS/NN/Source/PoolingFunctions/CMakeLists.txt

@@ -4,7 +4,7 @@ project(CMSISNNPooling)
 
 include(configLib)
 
-file(GLOB SRC "./*_*.c")
+file(GLOB SRC "./*_s8.c")
 
 add_library(CMSISNNPooling STATIC ${SRC})
 

+ 22 - 2
CMSIS/NN/Source/PoolingFunctions/arm_avgpool_s8.c

@@ -21,8 +21,8 @@
  * Title:        arm_avgpool_s8.c
  * Description:  Pooling function implementations
  *
- * $Date:        09. October 2020
- * $Revision:    V.2.0.3
+ * $Date:        01. March 2021
+ * $Revision:    V.2.0.4
  *
  * Target Processor:  Cortex-M CPUs
  *
@@ -157,6 +157,12 @@ arm_status arm_avgpool_s8(const cmsis_nn_context *ctx,
                     }
                 }
 
+                // Prevent static code issue DIVIDE_BY_ZERO.
+                if (count == 0)
+                {
+                    return ARM_MATH_ARGUMENT_ERROR;
+                }
+
                 sumV1[0] = sumV1[0] > 0 ? (sumV1[0] + count / 2) / count : (sumV1[0] - count / 2) / count;
                 sumV1[1] = sumV1[1] > 0 ? (sumV1[1] + count / 2) / count : (sumV1[1] - count / 2) / count;
                 sumV1[2] = sumV1[2] > 0 ? (sumV1[2] + count / 2) / count : (sumV1[2] - count / 2) / count;
@@ -300,6 +306,13 @@ arm_status arm_avgpool_s8(const cmsis_nn_context *ctx,
                     count++;
                 }
             }
+
+            // Prevent static code issue DIVIDE_BY_ZERO.
+            if (count == 0)
+            {
+                return ARM_MATH_ARGUMENT_ERROR;
+            }
+
             scale_q31_to_q7_and_clamp(buffer, dst, ch_src, count, act_min, act_max);
             dst += ch_src;
         }
@@ -331,6 +344,13 @@ arm_status arm_avgpool_s8(const cmsis_nn_context *ctx,
                         }
                     }
                 }
+
+                // Prevent static code issue DIVIDE_BY_ZERO.
+                if (count == 0)
+                {
+                    return ARM_MATH_ARGUMENT_ERROR;
+                }
+
                 sum = sum > 0 ? (sum + count / 2) / count : (sum - count / 2) / count;
                 sum = MAX(sum, act_min);
                 sum = MIN(sum, act_max);

+ 1 - 1
CMSIS/NN/Source/SoftmaxFunctions/CMakeLists.txt

@@ -4,7 +4,7 @@ project(CMSISNNSoftmax)
 
 include(configLib)
 
-file(GLOB SRC "./*_*.c")
+file(GLOB SRC "./*_s8.c")
 
 add_library(CMSISNNSoftmax STATIC ${SRC})
 

+ 4 - 4
CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_s8.c

@@ -21,8 +21,8 @@
  * Title:        arm_softmax_s8.c
  * Description:  S8 softmax function
  *
- * $Date:        09. October 2020
- * $Revision:    V.2.0.1
+ * $Date:        01. March 2021
+ * $Revision:    V.2.0.2
  *
  * Target Processor:  Cortex-M cores
  *
@@ -149,7 +149,7 @@ void arm_softmax_s8(const int8_t *input,
 
         const int32_t headroom = __CLZ((uint32_t)sum);
         const int32_t bits_over_unit = ACCUM_BITS - headroom + 23;
-        const int32_t shifted_scale = ONE_OVER1((sum << headroom) - (1 << 31));
+        const int32_t shifted_scale = ONE_OVER1((sum > 0 ? sum << headroom : 0) - (1 << 31));
 
         vec_count = row_size / 4;
         idx = 0;
@@ -234,7 +234,7 @@ void arm_softmax_s8(const int8_t *input,
 
         const int32_t headroom = __CLZ(sum);
         const int32_t bits_over_unit = ACCUM_BITS - headroom + 23;
-        const int32_t shifted_scale = ONE_OVER1((sum << headroom) - (1 << 31));
+        const int32_t shifted_scale = ONE_OVER1((sum > 0 ? sum << headroom : 0) - (1 << 31));
 
         for (col = 0; col < row_size; ++col)
         {

+ 1 - 0
CMSIS/NN/Tests/UnitTest/CMakeLists.txt

@@ -80,6 +80,7 @@ add_subdirectory(TestCases/test_arm_depthwise_conv_s8)
 add_subdirectory(TestCases/test_arm_depthwise_conv_s8_opt)
 add_subdirectory(TestCases/test_arm_fully_connected_s8)
 add_subdirectory(TestCases/test_arm_max_pool_s8)
+add_subdirectory(TestCases/test_arm_softmax_s8)
 
 set(MAKE_CMD "python3")
 set(MAKE_CMD_SCRIPT "${CMAKE_CURRENT_SOURCE_DIR}/unittest_targets.py")

+ 2 - 0
CMSIS/NN/Tests/UnitTest/PregeneratedData/softmax/input.txt

@@ -0,0 +1,2 @@
+# 1,5
+-5.000000000000000000e+00,-3.000000000000000000e+00,1.000000000000000000e+00,0.000000000000000000e+00,-6.000000000000000000e+00

+ 12 - 0
CMSIS/NN/Tests/UnitTest/PregeneratedData/softmax/params.txt

@@ -0,0 +1,12 @@
+1
+1
+22
+12
+6
+5
+9
+5
+1
+1
+1
+1

+ 26 - 0
CMSIS/NN/Tests/UnitTest/TestCases/TestData/softmax/config_data.h

@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2010-2021 Arm Limited or its affiliates. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Generated by generate_test_data.py using TFL version 2.4.1 as reference.
+#pragma once
+#define SOFTMAX_NUM_ROWS 1
+#define SOFTMAX_ROW_SIZE 5
+#define SOFTMAX_INPUT_MULT 1077952576
+#define SOFTMAX_INPUT_LEFT_SHIFT 23
+#define SOFTMAX_DIFF_MIN -248
+#define SOFTMAX_DST_SIZE 5

+ 23 - 0
CMSIS/NN/Tests/UnitTest/TestCases/TestData/softmax/input_data.h

@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2010-2021 Arm Limited or its affiliates. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Generated by generate_test_data.py using TFL version 2.4.1 as reference.
+#pragma once
+#include <stdint.h>
+
+const q7_t softmax_input[5] = {-80, -48, 16, 0, -96};

+ 23 - 0
CMSIS/NN/Tests/UnitTest/TestCases/TestData/softmax/output_ref_data.h

@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2010-2021 Arm Limited or its affiliates. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Generated by generate_test_data.py using TFL version 2.4.1 as reference.
+#pragma once
+#include <stdint.h>
+
+const q7_t softmax_output_ref[5] = {-128, -125, 56, -60, -128};

+ 22 - 0
CMSIS/NN/Tests/UnitTest/TestCases/TestData/softmax/test_data.h

@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2010-2021 Arm Limited or its affiliates. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Generated by generate_test_data.py using TFL version 2.4.1 as reference.
+#include "config_data.h"
+#include "input_data.h"
+#include "output_ref_data.h"

+ 30 - 0
CMSIS/NN/Tests/UnitTest/TestCases/test_arm_softmax_s8/CMakeLists.txt

@@ -0,0 +1,30 @@
+#
+# Copyright (C) 2010-2021 Arm Limited or its affiliates. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the License); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+add_cmsis_nn_unit_test_executable(test_arm_softmax_s8)
+
+target_sources(test_arm_softmax_s8 PRIVATE
+    Unity/unity_test_arm_softmax_s8.c
+    Unity/TestRunner/unity_test_arm_softmax_s8_runner.c
+    ${CMSIS_PATH}/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_s8.c)
+
+target_include_directories(test_arm_softmax_s8 PRIVATE ${CMSIS_PATH}/CMSIS/NN/Include)
+target_include_directories(test_arm_softmax_s8 PRIVATE ${CMSIS_PATH}/CMSIS/DSP/Include)
+target_include_directories(test_arm_softmax_s8 PRIVATE ${CMSIS_PATH}/CMSIS/Core/Include)
+
+target_link_libraries(test_arm_softmax_s8 LINK_PUBLIC unity)

+ 49 - 0
CMSIS/NN/Tests/UnitTest/TestCases/test_arm_softmax_s8/Unity/unity_test_arm_softmax_s8.c

@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "../test_arm_softmax_s8.c"
+#include "unity.h"
+
+#ifdef USING_FVP_CORSTONE_300
+extern void uart_init(void);
+#endif
+
+/* This function is called from the autogenerated file.
+ * The name must be exactly like this
+ */
+void setUp(void)
+{ /* This is run before EACH TEST */
+#ifdef USING_FVP_CORSTONE_300
+    uart_init();
+#endif
+}
+
+/* This function is called from the autogenerated file.
+ * The name must be exactly like this
+ */
+void tearDown(void) {}
+
+void test_softmax_arm_softmax_s8(void) { softmax_arm_softmax_s8(); }
+
+void test_softmax1_arm_softmax_s8(void) { softmax_invalid_diff_min_arm_softmax_s8(); }

+ 61 - 0
CMSIS/NN/Tests/UnitTest/TestCases/test_arm_softmax_s8/test_arm_softmax_s8.c

@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2010-2021 Arm Limited or its affiliates. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "unity.h"
+#include <arm_nnfunctions.h>
+
+#include "../TestData/maxpooling/test_data.h"
+#include "../TestData/softmax/test_data.h"
+#include "../Utils/validate.h"
+
+#define REPEAT_NUM (2)
+
+void softmax_arm_softmax_s8(void)
+{
+    const int32_t num_rows = SOFTMAX_NUM_ROWS;
+    const int32_t row_size = SOFTMAX_ROW_SIZE;
+    const int32_t mult = SOFTMAX_INPUT_MULT;
+    const int32_t shift = SOFTMAX_INPUT_LEFT_SHIFT;
+    const int32_t diff_min = SOFTMAX_DIFF_MIN;
+    const q7_t *input_data = softmax_input;
+    int8_t output[SOFTMAX_DST_SIZE];
+
+    for (int i = 0; i < REPEAT_NUM; i++)
+    {
+        arm_softmax_s8(input_data, num_rows, row_size, mult, shift, diff_min, output);
+        TEST_ASSERT_TRUE(validate(output, softmax_output_ref, SOFTMAX_DST_SIZE));
+    }
+}
+
+void softmax_invalid_diff_min_arm_softmax_s8(void)
+{
+    const q7_t softmax_expect_invalid_output[] = {-128, -128, -128, -128, -128};
+    const int32_t num_rows = SOFTMAX_NUM_ROWS;
+    const int32_t row_size = SOFTMAX_ROW_SIZE;
+    const int32_t mult = SOFTMAX_INPUT_MULT;
+    const int32_t shift = SOFTMAX_INPUT_LEFT_SHIFT;
+    const int32_t diff_min = 0x7FFFFFFF;
+    const q7_t *input_data = softmax_input;
+    int8_t output[SOFTMAX_DST_SIZE];
+
+    for (int i = 0; i < REPEAT_NUM; i++)
+    {
+        arm_softmax_s8(input_data, num_rows, row_size, mult, shift, diff_min, output);
+        TEST_ASSERT_TRUE(validate(output, softmax_expect_invalid_output, SOFTMAX_DST_SIZE));
+    }
+}

+ 81 - 16
CMSIS/NN/Tests/UnitTest/generate_test_data.py

@@ -65,7 +65,7 @@ def parse_args():
     parser.add_argument('--regenerate-biases', action='store_true', help="Regenerate and store new biases.")
     parser.add_argument('-a', '--regenerate-all', action='store_true', help="Regenerate and store all data.")
     parser.add_argument('-t', '--testtype', type=str, default='conv', choices=['conv', 'depthwise_conv', 'avgpool',
-                                                                               'maxpool', 'fully_connected'],
+                                                                               'maxpool', 'fully_connected', 'softmax'],
                         help='Type of test.')
     parser.add_argument('--run-all-testsets', action='store_true', help="Run the script for all existing test "
                         "sets. Regenerate all, partially all or no input data (output may still change, depending on"
@@ -320,7 +320,7 @@ class TestSettings(ABC):
         f.write("#define {}_INPUT_OFFSET {}\n".format(prefix, -self.input_zero_point))
         f.write("#define {}_OUTPUT_OFFSET {}\n".format(prefix, self.output_zero_point))
 
-    def write_c_config_header(self):
+    def write_c_config_header(self, write_common_parameters=True):
         filename = self.config_data
 
         self.generated_header_files.append(filename)
@@ -331,20 +331,21 @@ class TestSettings(ABC):
         print("Writing C header with config data {}...".format(filepath))
         with open(filepath, "w+") as f:
             self.write_c_common_header(f)
-            f.write("#define {}_OUT_CH {}\n".format(prefix, self.output_ch))
-            f.write("#define {}_IN_CH {}\n".format(prefix, self.input_ch))
-            f.write("#define {}_INPUT_W {}\n".format(prefix, self.x_input))
-            f.write("#define {}_INPUT_H {}\n".format(prefix, self.y_input))
-            f.write("#define {}_DST_SIZE {}\n".format(prefix, self.x_output * self.y_output * self.output_ch
-                                                      * self.batches))
-            f.write("#define {}_INPUT_SIZE {}\n".format(prefix, self.x_input * self.y_input * self.input_ch))
-            if self.relu6:
-                f.write("#define {}_OUT_ACTIVATION_MIN {}\n".format(prefix, 0))
-                f.write("#define {}_OUT_ACTIVATION_MAX {}\n".format(prefix, 6))
-            else:
-                f.write("#define {}_OUT_ACTIVATION_MIN {}\n".format(prefix, self.out_activation_min))
-                f.write("#define {}_OUT_ACTIVATION_MAX {}\n".format(prefix, self.out_activation_max))
-            f.write("#define {}_INPUT_BATCHES {}\n".format(prefix, self.batches))
+            if (write_common_parameters):
+                f.write("#define {}_OUT_CH {}\n".format(prefix, self.output_ch))
+                f.write("#define {}_IN_CH {}\n".format(prefix, self.input_ch))
+                f.write("#define {}_INPUT_W {}\n".format(prefix, self.x_input))
+                f.write("#define {}_INPUT_H {}\n".format(prefix, self.y_input))
+                f.write("#define {}_DST_SIZE {}\n".format(prefix, self.x_output * self.y_output * self.output_ch
+                                                          * self.batches))
+                f.write("#define {}_INPUT_SIZE {}\n".format(prefix, self.x_input * self.y_input * self.input_ch))
+                if self.relu6:
+                    f.write("#define {}_OUT_ACTIVATION_MIN {}\n".format(prefix, 0))
+                    f.write("#define {}_OUT_ACTIVATION_MAX {}\n".format(prefix, 6))
+                else:
+                    f.write("#define {}_OUT_ACTIVATION_MIN {}\n".format(prefix, self.INT8_MIN))
+                    f.write("#define {}_OUT_ACTIVATION_MAX {}\n".format(prefix, self.INT8_MAX))
+                f.write("#define {}_INPUT_BATCHES {}\n".format(prefix, self.batches))
         self.format_output_file(filepath)
 
     def generate_c_array(self, name, array, datatype="q7_t"):
@@ -763,6 +764,67 @@ class FullyConnectedSettings(TestSettings):
         self.write_c_header_wrapper()
 
 
+class SoftmaxSettings(TestSettings):
+    softmax_input_integer_bits = 5
+
+    def __init__(self, dataset, testtype, args, x_in=5, y_in=1, randmin=-7, randmax=7):
+        super().__init__(dataset, testtype, args, 1, 1, x_in, y_in, 1, 1, 1, 1, False, randmin,
+                         randmax)
+        self.output_scale = 1 / 256
+        self.output_zero_point = -128
+        self.x_input = self.x_output = x_in
+        self.y_input = self.y_output = y_in
+
+        input_real_multiplier = min(self.input_scale * (1 << (31 - self.softmax_input_integer_bits)),
+                                    (1 << 31) - 1)
+        (self.input_multiplier, self.input_left_shift) = self.quantize_scale(input_real_multiplier)
+
+        self.diff_min = ((1 << self.softmax_input_integer_bits) - 1) * \
+                        (1 << (31 - self.softmax_input_integer_bits)) / \
+                        (1 << self.input_left_shift)
+        self.diff_min = math.floor(self.diff_min)
+
+    def write_c_config_header(self):
+        super().write_c_config_header(write_common_parameters=False)
+
+        filename = self.config_data
+        filepath = self.headers_dir + filename
+        prefix = self.testdataset.upper()
+
+        with open(filepath, "a") as f:
+            f.write("#define {}_NUM_ROWS {}\n".format(prefix, self.y_input))
+            f.write("#define {}_ROW_SIZE {}\n".format(prefix, self.x_input))
+            f.write("#define {}_INPUT_MULT {}\n".format(prefix, self.input_multiplier))
+            f.write("#define {}_INPUT_LEFT_SHIFT {}\n".format(prefix, self.input_left_shift))
+            f.write("#define {}_DIFF_MIN {}\n".format(prefix, -self.diff_min))
+            f.write("#define {}_DST_SIZE {}\n".format(prefix, self.x_output * self.y_output))
+
+    def get_softmax_randomized_input_data(self, input_data):
+        # Generate or load saved input data unless hardcoded data provided.
+        if input_data is not None:
+            input_data = tf.reshape(input_data, [self.y_input, self.x_input])
+        else:
+            input_data = self.get_randomized_data([self.y_input, self.x_input],
+                                                  self.inputs_table_file,
+                                                  regenerate=self.regenerate_new_input)
+        return input_data
+
+    def softmax(self, indata):
+        indata = tf.cast(indata, dtype=tf.dtypes.float32)
+        out = tf.nn.softmax(indata)
+        return out
+
+    def generate_data(self, input_data=None, weights=None, biases=None):
+        input_data = self.get_softmax_randomized_input_data(input_data)
+        result = self.softmax(input_data)
+
+        self.generate_c_array("input", self.convert_tensor(input_data, self.quantize_input))
+        self.generate_c_array("output_ref", self.convert_tensor(result, self.quantize_output))
+
+        self.write_c_config_header()
+        self.write_c_header_wrapper()
+
+
 def load_all_testdatasets():
     """
     Add all new testdata sets here
@@ -897,6 +959,9 @@ def load_all_testdatasets():
     ALL_TESTDATA_SETS[dataset] = PoolingSettings(dataset, type_of_test, args, channels=1, x_in=4, y_in=2, stride_x=2,
                                                  stride_y=2, w_x=2, w_y=2, pad=False, randmin=-20, randmax=-5,
                                                  relu6=True)
+    type_of_test = 'softmax'
+    dataset = 'softmax'
+    ALL_TESTDATA_SETS[dataset] = SoftmaxSettings(dataset, type_of_test, args, x_in=5, y_in=1)
 
 
 if __name__ == '__main__':

+ 1 - 0
CMSIS/NN/Tests/UnitTest/unittest_targets.py

@@ -179,6 +179,7 @@ def test_target(target, args, main_test):
                              ' --source ' + CMSIS_PATH + 'NN/Source/PoolingFunctions/'
                              ' --source ' + CMSIS_PATH + 'NN/Source/NNSupportFunctions/'
                              ' --source ' + CMSIS_PATH + 'NN/Source/FullyConnectedFunctions/'
+                             ' --source ' + CMSIS_PATH + 'NN/Source/SoftmaxFunctions/'
                              + cmsis_flags +
                              additional_options,
                              flash_error_msg, die=die)