arm_softmax_q15.c 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118
  1. /*
  2. * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Licensed under the Apache License, Version 2.0 (the License); you may
  7. * not use this file except in compliance with the License.
  8. * You may obtain a copy of the License at
  9. *
  10. * www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing, software
  13. * distributed under the License is distributed on an AS IS BASIS, WITHOUT
  14. * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. * See the License for the specific language governing permissions and
  16. * limitations under the License.
  17. */
  18. /* ----------------------------------------------------------------------
  19. * Project: CMSIS NN Library
  20. * Title: arm_softmax_q15.c
  21. * Description: Q15 softmax function
  22. *
  23. * $Date: 09. October 2020
  24. * $Revision: V.1.0.1
  25. *
  26. * Target Processor: Cortex-M cores
  27. *
  28. * -------------------------------------------------------------------- */
  29. #include "arm_nnfunctions.h"
  30. /**
  31. * @ingroup groupNN
  32. */
  33. /**
  34. * @addtogroup Softmax
  35. * @{
  36. */
  37. /**
  38. * @brief Q15 softmax function
  39. * @param[in] vec_in pointer to input vector
  40. * @param[in] dim_vec input vector dimention
  41. * @param[out] p_out pointer to output vector
  42. *
  43. * @details
  44. *
  45. * Here, instead of typical e based softmax, we use
  46. * 2-based softmax, i.e.,:
  47. *
  48. * y_i = 2^(x_i) / sum(2^x_j)
  49. *
  50. * The relative output will be different here.
  51. * But mathematically, the gradient will be the same
  52. * with a log(2) scaling factor.
  53. *
  54. */
  55. void arm_softmax_q15(const q15_t *vec_in, const uint16_t dim_vec, q15_t *p_out)
  56. {
  57. q31_t sum;
  58. int16_t i;
  59. uint8_t shift;
  60. q31_t base;
  61. base = -1 * 0x100000;
  62. for (i = 0; i < dim_vec; i++)
  63. {
  64. if (vec_in[i] > base)
  65. {
  66. base = vec_in[i];
  67. }
  68. }
  69. /* we ignore really small values
  70. * anyway, they will be 0 after shrinking
  71. * to q15_t
  72. */
  73. base = base - 16;
  74. sum = 0;
  75. for (i = 0; i < dim_vec; i++)
  76. {
  77. if (vec_in[i] > base)
  78. {
  79. shift = (uint8_t)__USAT(vec_in[i] - base, 5);
  80. sum += 0x1 << shift;
  81. }
  82. }
  83. /* This is effectively (0x1 << 32) / sum */
  84. int64_t div_base = 0x100000000LL;
  85. int output_base = (int32_t)(div_base / sum);
  86. /* Final confidence will be output_base >> ( 17 - (vec_in[i] - base) )
  87. * so 32768 (0x1<<15) -> 100% confidence when sum = 0x1 << 16, output_base = 0x1 << 16
  88. * and vec_in[i]-base = 16
  89. */
  90. for (i = 0; i < dim_vec; i++)
  91. {
  92. if (vec_in[i] > base)
  93. {
  94. /* Here minimum value of 17+base-vec[i] will be 1 */
  95. shift = (uint8_t)__USAT(17 + base - vec_in[i], 5);
  96. p_out[i] = (q15_t)__SSAT((output_base >> shift), 16);
  97. }
  98. else
  99. {
  100. p_out[i] = 0;
  101. }
  102. }
  103. }
  104. /**
  105. * @} end of Softmax group
  106. */