Parcourir la source

CMSIS-NN: Correct bug in softmax (#1442)

Måns Nilsson il y a 3 ans
Parent
commit
9dc018e601

+ 4 - 6
CMSIS/NN/Source/SoftmaxFunctions/arm_nn_softmax_common_s8.c

@@ -21,8 +21,8 @@
  * Title:        arm_nn_softmax_common_s8.c
  * Description:  Softmax with s8 input and output of s8 or s16.
  *
- * $Date:        9 March 2022
- * $Revision:    V.1.0.0
+ * $Date:        17 March 2022
+ * $Revision:    V.1.0.1
  *
  * Target Processor:  Cortex-M processors
  * -------------------------------------------------------------------- */
@@ -88,7 +88,7 @@ void arm_nn_softmax_common_s8(const int8_t *input,
 
         if (int16_output)
         {
-            int16_t *output_s16 = (int16_t *)output;
+            int16_t *output_s16 = (int16_t *)output + row_idx * row_size;
 
             bits_over_unit = ACCUM_BITS - headroom + 15;
 
@@ -108,11 +108,10 @@ void arm_nn_softmax_common_s8(const int8_t *input,
                     output_s16[col] = NN_Q15_MIN;
                 }
             }
-            output_s16 += row_size;
         }
         else
         {
-            int8_t *output_s8 = (int8_t *)output;
+            int8_t *output_s8 = (int8_t *)output + row_idx * row_size;
 
             bits_over_unit = ACCUM_BITS - headroom + 23;
 
@@ -131,7 +130,6 @@ void arm_nn_softmax_common_s8(const int8_t *input,
                     output_s8[col] = NN_Q7_MIN;
                 }
             }
-            output_s8 += row_size;
         }
 
         input += row_size;

+ 3 - 2
CMSIS/NN/Tests/UnitTest/PregeneratedData/softmax/input.txt

@@ -1,2 +1,3 @@
-# 1,5
--9.900000000000000000e+01,-1.220000000000000000e+02,-6.700000000000000000e+01,1.000000000000000000e+01,-8.200000000000000000e+01
+# 2,5
+1.010000000000000000e+02,4.900000000000000000e+01,6.000000000000000000e+00,-3.400000000000000000e+01,-7.500000000000000000e+01
+-7.900000000000000000e+01,-3.800000000000000000e+01,1.200000000000000000e+02,-5.500000000000000000e+01,1.150000000000000000e+02

+ 4 - 2
CMSIS/NN/Tests/UnitTest/PregeneratedData/softmax_s16/input.txt

@@ -1,2 +1,4 @@
-# 1,10
-1.847000000000000000e+03,2.831400000000000000e+04,-1.539900000000000000e+04,-2.144500000000000000e+04,2.364600000000000000e+04,-3.276600000000000000e+04,2.952100000000000000e+04,-5.296000000000000000e+03,-1.753000000000000000e+03,-1.600400000000000000e+04
+# 3,10
+-9.644000000000000000e+03,-2.804200000000000000e+04,-8.526000000000000000e+03,2.976500000000000000e+04,-5.843000000000000000e+03,-5.893000000000000000e+03,-2.149600000000000000e+04,2.003300000000000000e+04,2.367700000000000000e+04,1.887500000000000000e+04
+-2.711800000000000000e+04,-1.526600000000000000e+04,1.731500000000000000e+04,2.460900000000000000e+04,2.425700000000000000e+04,8.343000000000000000e+03,-2.746000000000000000e+03,-2.751300000000000000e+04,1.995000000000000000e+04,-2.418100000000000000e+04
+-1.106800000000000000e+04,-2.725700000000000000e+04,-2.705100000000000000e+04,1.859500000000000000e+04,-2.223400000000000000e+04,5.207000000000000000e+03,2.493300000000000000e+04,-2.798600000000000000e+04,3.141200000000000000e+04,2.852200000000000000e+04

+ 3 - 2
CMSIS/NN/Tests/UnitTest/PregeneratedData/softmax_s8_s16/input.txt

@@ -1,2 +1,3 @@
-# 1,12
--5.000000000000000000e+01,2.200000000000000000e+01,5.900000000000000000e+01,-6.900000000000000000e+01,1.500000000000000000e+01,3.100000000000000000e+01,-7.300000000000000000e+01,7.400000000000000000e+01,-5.700000000000000000e+01,-7.900000000000000000e+01,-1.070000000000000000e+02,1.900000000000000000e+01
+# 2,12
+-4.400000000000000000e+01,3.700000000000000000e+01,-3.900000000000000000e+01,4.600000000000000000e+01,2.000000000000000000e+01,5.600000000000000000e+01,1.700000000000000000e+01,-6.900000000000000000e+01,-7.300000000000000000e+01,2.500000000000000000e+01,-1.240000000000000000e+02,1.250000000000000000e+02
+1.700000000000000000e+01,2.200000000000000000e+01,1.800000000000000000e+01,-8.900000000000000000e+01,1.110000000000000000e+02,6.700000000000000000e+01,-7.800000000000000000e+01,9.000000000000000000e+00,-5.400000000000000000e+01,1.300000000000000000e+01,-3.600000000000000000e+01,7.500000000000000000e+01

+ 2 - 2
CMSIS/NN/Tests/UnitTest/TestCases/TestData/softmax/config_data.h

@@ -1,8 +1,8 @@
 // Generated by generate_test_data.py using TFL version 2.6.0 as reference.
 #pragma once
-#define SOFTMAX_NUM_ROWS 1
+#define SOFTMAX_NUM_ROWS 2
 #define SOFTMAX_ROW_SIZE 5
 #define SOFTMAX_INPUT_MULT 1077952640
 #define SOFTMAX_INPUT_LEFT_SHIFT 19
 #define SOFTMAX_DIFF_MIN -3968
-#define SOFTMAX_DST_SIZE 5
+#define SOFTMAX_DST_SIZE 10

+ 1 - 1
CMSIS/NN/Tests/UnitTest/TestCases/TestData/softmax/input_data.h

@@ -2,4 +2,4 @@
 #pragma once
 #include <stdint.h>
 
-const q7_t softmax_input[5] = {-99, -122, -67, 10, -82};
+const q7_t softmax_input[10] = {101, 49, 6, -34, -75, -79, -38, 120, -55, 115};

+ 1 - 1
CMSIS/NN/Tests/UnitTest/TestCases/TestData/softmax/output_ref_data.h

@@ -2,4 +2,4 @@
 #pragma once
 #include <stdint.h>
 
-const q7_t softmax_output_ref[5] = {-83, -87, -77, -59, -80};
+const q7_t softmax_output_ref[10] = {-57, -70, -79, -86, -92, -94, -88, -54, -91, -56};

+ 2 - 2
CMSIS/NN/Tests/UnitTest/TestCases/TestData/softmax_s16/config_data.h

@@ -1,7 +1,7 @@
 // Generated by generate_test_data.py using TFL version 2.6.0 as reference.
 #pragma once
-#define SOFTMAX_S16_NUM_ROWS 1
+#define SOFTMAX_S16_NUM_ROWS 3
 #define SOFTMAX_S16_ROW_SIZE 10
 #define SOFTMAX_S16_INPUT_MULT 1718013132
 #define SOFTMAX_S16_INPUT_LEFT_SHIFT -2
-#define SOFTMAX_S16_DST_SIZE 10
+#define SOFTMAX_S16_DST_SIZE 30

+ 3 - 1
CMSIS/NN/Tests/UnitTest/TestCases/TestData/softmax_s16/input_data.h

@@ -2,4 +2,6 @@
 #pragma once
 #include <stdint.h>
 
-const q15_t softmax_s16_input[10] = {1847, 28314, -15399, -21445, 23646, -32766, 29521, -5296, -1753, -16004};
+const q15_t softmax_s16_input[30] = {-9644,  -28042, -8526,  29765, -5843,  -5893, -21496, 20033,  23677, 18875,
+                                     -27118, -15266, 17315,  24609, 24257,  8343,  -2746,  -27513, 19950, -24181,
+                                     -11068, -27257, -27051, 18595, -22234, 5207,  24933,  -27986, 31412, 28522};

+ 3 - 1
CMSIS/NN/Tests/UnitTest/TestCases/TestData/softmax_s16/output_ref_data.h

@@ -2,4 +2,6 @@
 #pragma once
 #include <stdint.h>
 
-const q15_t softmax_s16_output_ref[10] = {2920, 6547, 1725, 1434, 5678, 1015, 6793, 2347, 2616, 1693};
+const q15_t softmax_s16_output_ref[30] = {1986, 1133, 2055, 6611, 2230, 2227, 1383, 4912, 5490, 4742,
+                                          1200, 1723, 4657, 5818, 5756, 3542, 2525, 1186, 5047, 1313,
+                                          1856, 1133, 1140, 4590, 1320, 3051, 5570, 1108, 6787, 6214};

+ 2 - 2
CMSIS/NN/Tests/UnitTest/TestCases/TestData/softmax_s8_s16/config_data.h

@@ -1,8 +1,8 @@
 // Generated by generate_test_data.py using TFL version 2.6.0 as reference.
 #pragma once
-#define SOFTMAX_S8_S16_NUM_ROWS 1
+#define SOFTMAX_S8_S16_NUM_ROWS 2
 #define SOFTMAX_S8_S16_ROW_SIZE 12
 #define SOFTMAX_S8_S16_INPUT_MULT 1078071151
 #define SOFTMAX_S8_S16_INPUT_LEFT_SHIFT 19
 #define SOFTMAX_S8_S16_DIFF_MIN -3968
-#define SOFTMAX_S8_S16_DST_SIZE 12
+#define SOFTMAX_S8_S16_DST_SIZE 24

+ 2 - 1
CMSIS/NN/Tests/UnitTest/TestCases/TestData/softmax_s8_s16/input_data.h

@@ -2,4 +2,5 @@
 #pragma once
 #include <stdint.h>
 
-const q7_t softmax_s8_s16_input[12] = {-50, 22, 59, -69, 15, 31, -73, 74, -57, -79, -107, 19};
+const q7_t softmax_s8_s16_input[24] = {-44, 37, -39, 46,  20,  56, 17,  -69, -73, 25, -124, 125,
+                                       17,  22, 18,  -89, 111, 67, -78, 9,   -54, 13, -36,  75};

+ 3 - 2
CMSIS/NN/Tests/UnitTest/TestCases/TestData/softmax_s8_s16/output_ref_data.h

@@ -2,5 +2,6 @@
 #pragma once
 #include <stdint.h>
 
-const q15_t softmax_s8_s16_output_ref[12] =
-    {-28076, -26545, -25573, -28413, -26714, -26322, -28481, -25138, -28203, -28581, -29016, -26618};
+const q15_t softmax_s8_s16_output_ref[24] = {-28290, -26615, -28201, -26394, -27012, -26139, -27079, -28708,
+                                             -28771, -26898, -29496, -24079, -27220, -27110, -27199, -29107,
+                                             -24747, -26018, -28946, -27392, -28569, -27307, -28262, -25803};

+ 9 - 9
CMSIS/NN/Tests/UnitTest/generate_test_data.py

@@ -876,9 +876,9 @@ class SoftmaxSettings(TestSettings):
             # Create a one-layer Keras model.
             model = tf.keras.models.Sequential()
             input_shape = (self.y_input, self.x_input)
-            model.add(tf.keras.layers.Softmax(input_shape=input_shape[1:]))
+            model.add(tf.keras.layers.Softmax(input_shape=input_shape))
 
-            interpreter = self.convert_and_interpret(model, inttype, input_data)
+            interpreter = self.convert_and_interpret(model, inttype, tf.expand_dims(input_data, axis=0))
             output_details = interpreter.get_output_details()
             interpreter.invoke()
             output_data = interpreter.get_tensor(output_details[0]["index"])
@@ -1391,25 +1391,25 @@ def load_all_testdatasets():
 
     type_of_test = 'softmax'
     dataset = 'softmax'
-    ALL_TESTDATA_SETS[dataset] = SoftmaxSettings(dataset, type_of_test, args, x_in=5, y_in=1)
+    ALL_TESTDATA_SETS[dataset] = SoftmaxSettings(dataset, type_of_test, args, x_in=5, y_in=2)
     dataset = 'softmax_s16'
-    ALL_TESTDATA_SETS[dataset] = SoftmaxSettings(dataset, type_of_test, args, x_in=10, y_in=1, int16xint8=True,
+    ALL_TESTDATA_SETS[dataset] = SoftmaxSettings(dataset, type_of_test, args, x_in=10, y_in=3, int16xint8=True,
                                                  randmin=INT16_MIN, randmax=INT16_MAX)
     dataset = 'softmax_s8_s16'
-    ALL_TESTDATA_SETS[dataset] = SoftmaxSettings(dataset, type_of_test, args, x_in=12, y_in=1, inInt8outInt16=True)
+    ALL_TESTDATA_SETS[dataset] = SoftmaxSettings(dataset, type_of_test, args, x_in=12, y_in=2, inInt8outInt16=True)
 
     type_of_test = 'svdf'
     dataset = 'svdf'
-    ALL_TESTDATA_SETS[dataset] = SVDFSettings(dataset, type_of_test, args,  batches=2, number_inputs=2, rank=8,
+    ALL_TESTDATA_SETS[dataset] = SVDFSettings(dataset, type_of_test, args, batches=2, number_inputs=2, rank=8,
                                               memory_size=8, input_size=3, number_units=3)
     dataset = 'svdf_1'
-    ALL_TESTDATA_SETS[dataset] = SVDFSettings(dataset, type_of_test, args,  batches=3, number_inputs=2, rank=1,
+    ALL_TESTDATA_SETS[dataset] = SVDFSettings(dataset, type_of_test, args, batches=3, number_inputs=2, rank=1,
                                               memory_size=2, input_size=7, number_units=5)
     dataset = 'svdf_2'
-    ALL_TESTDATA_SETS[dataset] = SVDFSettings(dataset, type_of_test, args,  batches=3, number_inputs=2, rank=2,
+    ALL_TESTDATA_SETS[dataset] = SVDFSettings(dataset, type_of_test, args, batches=3, number_inputs=2, rank=2,
                                               memory_size=2, input_size=7, number_units=5, generate_bias=False)
     dataset = 'svdf_3'
-    ALL_TESTDATA_SETS[dataset] = SVDFSettings(dataset, type_of_test, args,  batches=1, number_inputs=2, rank=1,
+    ALL_TESTDATA_SETS[dataset] = SVDFSettings(dataset, type_of_test, args, batches=1, number_inputs=2, rank=1,
                                               memory_size=2, input_size=20, number_units=12, generate_bias=False)
 
     type_of_test = 'add'