arm_mat_mult_fast_q15.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537
  1. /* ----------------------------------------------------------------------
  2. * Copyright (C) 2010-2016 ARM Limited. All rights reserved.
  3. *
  4. * $Date: 26. October 2016
  5. * $Revision: V.1.4.5 a
  6. *
  7. * Project: CMSIS DSP Library
  8. * Title: arm_mat_mult_fast_q15.c
  9. *
  10. * Description: Q15 matrix multiplication (fast variant)
  11. *
  12. * Target Processor: Cortex-M4/Cortex-M3
  13. *
  14. * Redistribution and use in source and binary forms, with or without
  15. * modification, are permitted provided that the following conditions
  16. * are met:
  17. * - Redistributions of source code must retain the above copyright
  18. * notice, this list of conditions and the following disclaimer.
  19. * - Redistributions in binary form must reproduce the above copyright
  20. * notice, this list of conditions and the following disclaimer in
  21. * the documentation and/or other materials provided with the
  22. * distribution.
  23. * - Neither the name of ARM LIMITED nor the names of its contributors
  24. * may be used to endorse or promote products derived from this
  25. * software without specific prior written permission.
  26. *
  27. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  28. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  29. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
  30. * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  31. * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  32. * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
  33. * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  34. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  35. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  36. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
  37. * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  38. * POSSIBILITY OF SUCH DAMAGE.
  39. * -------------------------------------------------------------------- */
  40. #include "arm_math.h"
  41. /**
  42. * @ingroup groupMatrix
  43. */
  44. /**
  45. * @addtogroup MatrixMult
  46. * @{
  47. */
  48. /**
  49. * @brief Q15 matrix multiplication (fast variant) for Cortex-M3 and Cortex-M4
  50. * @param[in] *pSrcA points to the first input matrix structure
  51. * @param[in] *pSrcB points to the second input matrix structure
  52. * @param[out] *pDst points to output matrix structure
  53. * @param[in] *pState points to the array for storing intermediate results
  54. * @return The function returns either
  55. * <code>ARM_MATH_SIZE_MISMATCH</code> or <code>ARM_MATH_SUCCESS</code> based on the outcome of size checking.
  56. *
  57. * @details
  58. * <b>Scaling and Overflow Behavior:</b>
  59. *
  60. * \par
  61. * The difference between the function arm_mat_mult_q15() and this fast variant is that
  62. * the fast variant use a 32-bit rather than a 64-bit accumulator.
  63. * The result of each 1.15 x 1.15 multiplication is truncated to
  64. * 2.30 format. These intermediate results are accumulated in a 32-bit register in 2.30
  65. * format. Finally, the accumulator is saturated and converted to a 1.15 result.
  66. *
  67. * \par
  68. * The fast version has the same overflow behavior as the standard version but provides
  69. * less precision since it discards the low 16 bits of each multiplication result.
  70. * In order to avoid overflows completely the input signals must be scaled down.
  71. * Scale down one of the input matrices by log2(numColsA) bits to
  72. * avoid overflows, as a total of numColsA additions are computed internally for each
  73. * output element.
  74. *
  75. * \par
  76. * See <code>arm_mat_mult_q15()</code> for a slower implementation of this function
  77. * which uses 64-bit accumulation to provide higher precision.
  78. */
  79. arm_status arm_mat_mult_fast_q15(
  80. const arm_matrix_instance_q15 * pSrcA,
  81. const arm_matrix_instance_q15 * pSrcB,
  82. arm_matrix_instance_q15 * pDst,
  83. q15_t * pState)
  84. {
  85. q31_t sum; /* accumulator */
  86. q15_t *pSrcBT = pState; /* input data matrix pointer for transpose */
  87. q15_t *pInA = pSrcA->pData; /* input data matrix pointer A of Q15 type */
  88. q15_t *pInB = pSrcB->pData; /* input data matrix pointer B of Q15 type */
  89. q15_t *px; /* Temporary output data matrix pointer */
  90. uint16_t numRowsA = pSrcA->numRows; /* number of rows of input matrix A */
  91. uint16_t numColsB = pSrcB->numCols; /* number of columns of input matrix B */
  92. uint16_t numColsA = pSrcA->numCols; /* number of columns of input matrix A */
  93. uint16_t numRowsB = pSrcB->numRows; /* number of rows of input matrix A */
  94. uint32_t col, i = 0u, row = numRowsB, colCnt; /* loop counters */
  95. arm_status status; /* status of matrix multiplication */
  96. #ifndef UNALIGNED_SUPPORT_DISABLE
  97. q31_t in; /* Temporary variable to hold the input value */
  98. q31_t inA1, inA2, inB1, inB2;
  99. q31_t sum2, sum3, sum4;
  100. q15_t *pInA2, *pInB2, *px2;
  101. uint32_t j = 0;
  102. #else
  103. q15_t in; /* Temporary variable to hold the input value */
  104. q15_t inA1, inA2, inB1, inB2;
  105. #endif /* #ifndef UNALIGNED_SUPPORT_DISABLE */
  106. #ifdef ARM_MATH_MATRIX_CHECK
  107. /* Check for matrix mismatch condition */
  108. if((pSrcA->numCols != pSrcB->numRows) ||
  109. (pSrcA->numRows != pDst->numRows) || (pSrcB->numCols != pDst->numCols))
  110. {
  111. /* Set status as ARM_MATH_SIZE_MISMATCH */
  112. status = ARM_MATH_SIZE_MISMATCH;
  113. }
  114. else
  115. #endif
  116. {
  117. /* Matrix transpose */
  118. do
  119. {
  120. /* Apply loop unrolling and exchange the columns with row elements */
  121. col = numColsB >> 2;
  122. /* The pointer px is set to starting address of the column being processed */
  123. px = pSrcBT + i;
  124. /* First part of the processing with loop unrolling. Compute 4 outputs at a time.
  125. ** a second loop below computes the remaining 1 to 3 samples. */
  126. while(col > 0u)
  127. {
  128. #ifndef UNALIGNED_SUPPORT_DISABLE
  129. /* Read two elements from the row */
  130. in = *__SIMD32(pInB)++;
  131. /* Unpack and store one element in the destination */
  132. #ifndef ARM_MATH_BIG_ENDIAN
  133. *px = (q15_t) in;
  134. #else
  135. *px = (q15_t) ((in & (q31_t) 0xffff0000) >> 16);
  136. #endif /* #ifndef ARM_MATH_BIG_ENDIAN */
  137. /* Update the pointer px to point to the next row of the transposed matrix */
  138. px += numRowsB;
  139. /* Unpack and store the second element in the destination */
  140. #ifndef ARM_MATH_BIG_ENDIAN
  141. *px = (q15_t) ((in & (q31_t) 0xffff0000) >> 16);
  142. #else
  143. *px = (q15_t) in;
  144. #endif /* #ifndef ARM_MATH_BIG_ENDIAN */
  145. /* Update the pointer px to point to the next row of the transposed matrix */
  146. px += numRowsB;
  147. /* Read two elements from the row */
  148. in = *__SIMD32(pInB)++;
  149. /* Unpack and store one element in the destination */
  150. #ifndef ARM_MATH_BIG_ENDIAN
  151. *px = (q15_t) in;
  152. #else
  153. *px = (q15_t) ((in & (q31_t) 0xffff0000) >> 16);
  154. #endif /* #ifndef ARM_MATH_BIG_ENDIAN */
  155. /* Update the pointer px to point to the next row of the transposed matrix */
  156. px += numRowsB;
  157. /* Unpack and store the second element in the destination */
  158. #ifndef ARM_MATH_BIG_ENDIAN
  159. *px = (q15_t) ((in & (q31_t) 0xffff0000) >> 16);
  160. #else
  161. *px = (q15_t) in;
  162. #endif /* #ifndef ARM_MATH_BIG_ENDIAN */
  163. #else
  164. /* Read one element from the row */
  165. in = *pInB++;
  166. /* Store one element in the destination */
  167. *px = in;
  168. /* Update the pointer px to point to the next row of the transposed matrix */
  169. px += numRowsB;
  170. /* Read one element from the row */
  171. in = *pInB++;
  172. /* Store one element in the destination */
  173. *px = in;
  174. /* Update the pointer px to point to the next row of the transposed matrix */
  175. px += numRowsB;
  176. /* Read one element from the row */
  177. in = *pInB++;
  178. /* Store one element in the destination */
  179. *px = in;
  180. /* Update the pointer px to point to the next row of the transposed matrix */
  181. px += numRowsB;
  182. /* Read one element from the row */
  183. in = *pInB++;
  184. /* Store one element in the destination */
  185. *px = in;
  186. #endif /* #ifndef UNALIGNED_SUPPORT_DISABLE */
  187. /* Update the pointer px to point to the next row of the transposed matrix */
  188. px += numRowsB;
  189. /* Decrement the column loop counter */
  190. col--;
  191. }
  192. /* If the columns of pSrcB is not a multiple of 4, compute any remaining output samples here.
  193. ** No loop unrolling is used. */
  194. col = numColsB % 0x4u;
  195. while(col > 0u)
  196. {
  197. /* Read and store the input element in the destination */
  198. *px = *pInB++;
  199. /* Update the pointer px to point to the next row of the transposed matrix */
  200. px += numRowsB;
  201. /* Decrement the column loop counter */
  202. col--;
  203. }
  204. i++;
  205. /* Decrement the row loop counter */
  206. row--;
  207. } while(row > 0u);
  208. /* Reset the variables for the usage in the following multiplication process */
  209. row = numRowsA;
  210. i = 0u;
  211. px = pDst->pData;
  212. #ifndef UNALIGNED_SUPPORT_DISABLE
  213. /* Process two rows from matrix A at a time and output two rows at a time */
  214. row = row >> 1;
  215. px2 = px + numColsB;
  216. #endif
  217. /* The following loop performs the dot-product of each row in pSrcA with each column in pSrcB */
  218. /* row loop */
  219. while(row > 0u)
  220. {
  221. /* For every row wise process, the column loop counter is to be initiated */
  222. col = numColsB;
  223. /* For every row wise process, the pIn2 pointer is set
  224. ** to the starting address of the transposed pSrcB data */
  225. pInB = pSrcBT;
  226. #ifndef UNALIGNED_SUPPORT_DISABLE
  227. /* Process two (transposed) columns from matrix B at a time */
  228. col = col >> 1;
  229. j = 0;
  230. #endif
  231. /* column loop */
  232. while (col > 0u)
  233. {
  234. /* Set the variable sum, that acts as accumulator, to zero */
  235. sum = 0;
  236. /* Initiate the pointer pInA to point to the starting address of the column being processed */
  237. pInA = pSrcA->pData + i;
  238. #ifndef UNALIGNED_SUPPORT_DISABLE
  239. sum2 = 0;
  240. sum3 = 0;
  241. sum4 = 0;
  242. pInB = pSrcBT + j;
  243. pInA2 = pInA + numColsA;
  244. pInB2 = pInB + numRowsB;
  245. /* Read in two elements at once - alows dual MAC instruction */
  246. colCnt = numColsA >> 1;
  247. #else
  248. colCnt = numColsA >> 2;
  249. #endif
  250. /* matrix multiplication */
  251. while(colCnt > 0u)
  252. {
  253. /* c(m,n) = a(1,1)*b(1,1) + a(1,2) * b(2,1) + .... + a(m,p)*b(p,n) */
  254. #ifndef UNALIGNED_SUPPORT_DISABLE
  255. inA1 = *__SIMD32(pInA)++;
  256. inB1 = *__SIMD32(pInB)++;
  257. inA2 = *__SIMD32(pInA2)++;
  258. inB2 = *__SIMD32(pInB2)++;
  259. sum = __SMLAD(inA1, inB1, sum);
  260. sum2 = __SMLAD(inA1, inB2, sum2);
  261. sum3 = __SMLAD(inA2, inB1, sum3);
  262. sum4 = __SMLAD(inA2, inB2, sum4);
  263. #else
  264. inA1 = *pInA;
  265. inB1 = *pInB;
  266. sum += inA1 * inB1;
  267. inA2 = pInA[1];
  268. inB2 = pInB[1];
  269. sum += inA2 * inB2;
  270. inA1 = pInA[2];
  271. inB1 = pInB[2];
  272. sum += inA1 * inB1;
  273. inA2 = pInA[3];
  274. inB2 = pInB[3];
  275. sum += inA2 * inB2;
  276. pInA += 4;
  277. pInB += 4;
  278. #endif /* #ifndef UNALIGNED_SUPPORT_DISABLE */
  279. /* Decrement the loop counter */
  280. colCnt--;
  281. }
  282. /* process odd column samples */
  283. #ifndef UNALIGNED_SUPPORT_DISABLE
  284. if (numColsA & 1u) {
  285. inA1 = *pInA++;
  286. inB1 = *pInB++;
  287. inA2 = *pInA2++;
  288. inB2 = *pInB2++;
  289. sum += inA1 * inB1;
  290. sum2 += inA1 * inB2;
  291. sum3 += inA2 * inB1;
  292. sum4 += inA2 * inB2;
  293. }
  294. #else
  295. colCnt = numColsA % 0x4u;
  296. while(colCnt > 0u)
  297. {
  298. /* c(m,n) = a(1,1)*b(1,1) + a(1,2) * b(2,1) + .... + a(m,p)*b(p,n) */
  299. sum += (q31_t) (*pInA++) * (*pInB++);
  300. colCnt--;
  301. }
  302. #endif
  303. /* Saturate and store the result in the destination buffer */
  304. *px++ = (q15_t) (sum >> 15);
  305. #ifndef UNALIGNED_SUPPORT_DISABLE
  306. *px++ = (q15_t) (sum2 >> 15);
  307. *px2++ = (q15_t) (sum3 >> 15);
  308. *px2++ = (q15_t) (sum4 >> 15);
  309. j += numRowsB * 2;
  310. #endif
  311. /* Decrement the column loop counter */
  312. col--;
  313. }
  314. i = i + numColsA;
  315. #ifndef UNALIGNED_SUPPORT_DISABLE
  316. i = i + numColsA;
  317. px = px2 + (numColsB & 1u);
  318. px2 = px + numColsB;
  319. #endif
  320. /* Decrement the row loop counter */
  321. row--;
  322. }
  323. /* Compute any remaining odd row/column below */
  324. #ifndef UNALIGNED_SUPPORT_DISABLE
  325. /* Compute remaining output column */
  326. if (numColsB & 1u) {
  327. /* Avoid redundant computation of last element */
  328. row = numRowsA & (~0x1);
  329. /* Point to remaining unfilled column in output matrix */
  330. px = pDst->pData+numColsB-1;
  331. pInA = pSrcA->pData;
  332. /* row loop */
  333. while (row > 0)
  334. {
  335. /* point to last column in matrix B */
  336. pInB = pSrcBT + numRowsB*(numColsB-1);
  337. /* Set the variable sum, that acts as accumulator, to zero */
  338. sum = 0;
  339. /* Compute 4 columns at once */
  340. colCnt = numColsA >> 2;
  341. /* matrix multiplication */
  342. while(colCnt > 0u)
  343. {
  344. inA1 = *__SIMD32(pInA)++;
  345. inA2 = *__SIMD32(pInA)++;
  346. inB1 = *__SIMD32(pInB)++;
  347. inB2 = *__SIMD32(pInB)++;
  348. sum = __SMLAD(inA1, inB1, sum);
  349. sum = __SMLAD(inA2, inB2, sum);
  350. /* Decrement the loop counter */
  351. colCnt--;
  352. }
  353. colCnt = numColsA & 3u;
  354. while(colCnt > 0u) {
  355. sum += (q31_t) (*pInA++) * (*pInB++);
  356. colCnt--;
  357. }
  358. /* Store the result in the destination buffer */
  359. *px = (q15_t) (sum >> 15);
  360. px += numColsB;
  361. /* Decrement the row loop counter */
  362. row--;
  363. }
  364. }
  365. /* Compute remaining output row */
  366. if (numRowsA & 1u) {
  367. /* point to last row in output matrix */
  368. px = pDst->pData+(numColsB)*(numRowsA-1);
  369. pInB = pSrcBT;
  370. col = numColsB;
  371. i = 0u;
  372. /* col loop */
  373. while (col > 0)
  374. {
  375. /* point to last row in matrix A */
  376. pInA = pSrcA->pData + (numRowsA-1)*numColsA;
  377. /* Set the variable sum, that acts as accumulator, to zero */
  378. sum = 0;
  379. /* Compute 4 columns at once */
  380. colCnt = numColsA >> 2;
  381. /* matrix multiplication */
  382. while(colCnt > 0u)
  383. {
  384. inA1 = *__SIMD32(pInA)++;
  385. inA2 = *__SIMD32(pInA)++;
  386. inB1 = *__SIMD32(pInB)++;
  387. inB2 = *__SIMD32(pInB)++;
  388. sum = __SMLAD(inA1, inB1, sum);
  389. sum = __SMLAD(inA2, inB2, sum);
  390. /* Decrement the loop counter */
  391. colCnt--;
  392. }
  393. colCnt = numColsA & 3u;
  394. while(colCnt > 0u) {
  395. sum += (q31_t) (*pInA++) * (*pInB++);
  396. colCnt--;
  397. }
  398. /* Store the result in the destination buffer */
  399. *px++ = (q15_t) (sum >> 15);
  400. /* Decrement the col loop counter */
  401. col--;
  402. }
  403. }
  404. #endif /* #ifndef UNALIGNED_SUPPORT_DISABLE */
  405. /* set status as ARM_MATH_SUCCESS */
  406. status = ARM_MATH_SUCCESS;
  407. }
  408. /* Return to application */
  409. return (status);
  410. }
  411. /**
  412. * @} end of MatrixMult group
  413. */