riscv_nnfunctions.h 202 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669
  1. /*
  2. * SPDX-FileCopyrightText: Copyright 2010-2024 Arm Limited and/or its affiliates <open-source-office@arm.com>
  3. * Copyright (c) 2022 Nuclei Limited. All rights reserved.
  4. *
  5. * SPDX-License-Identifier: Apache-2.0
  6. *
  7. * Licensed under the Apache License, Version 2.0 (the License); you may
  8. * not use this file except in compliance with the License.
  9. * You may obtain a copy of the License at
  10. *
  11. * www.apache.org/licenses/LICENSE-2.0
  12. *
  13. * Unless required by applicable law or agreed to in writing, software
  14. * distributed under the License is distributed on an AS IS BASIS, WITHOUT
  15. * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  16. * See the License for the specific language governing permissions and
  17. * limitations under the License.
  18. */
  19. /* ----------------------------------------------------------------------
  20. * Project: NMSIS NN Library
  21. * Title: riscv_nnfunctions.h
  22. * Description: Public header file for NMSIS NN Library
  23. *
  24. * $Date: 04 November 2024
  25. * $Revision: V.18.0.0
  26. *
  27. * Target Processor: RISC-V Cores
  28. * -------------------------------------------------------------------- */
  29. /**
  30. * @defgroup Public Public
  31. * A collection of functions to perform basic operations for neural network layers. Functions with a _s8 suffix support
  32. * TensorFlow Lite framework.
  33. */
  34. #ifndef RISCV_NNFUNCTIONS_H
  35. #define RISCV_NNFUNCTIONS_H
  36. #include "riscv_nn_math_types.h"
  37. #include "riscv_nn_types.h"
  38. #define USE_INTRINSIC
  39. #ifdef __cplusplus
  40. extern "C" {
  41. #endif
  42. /**
  43. * @defgroup NNConv Convolution Functions
  44. *
  45. * Collection of convolution, depthwise convolution functions and their variants.
  46. *
  47. * The convolution is implemented in 2 steps: im2col and General Matrix Multiplication(GEMM)
  48. *
  49. * im2col is a process of converting each patch of image data into
  50. * a column. After im2col, the convolution is computed as matrix-matrix
  51. * multiplication.
  52. *
  53. * To reduce the memory footprint, the im2col is performed partially.
  54. * Each iteration, only a few column (i.e., patches) are generated followed
  55. * by GEMM.
  56. *
  57. */
  58. /**
  59. * @brief s4 convolution layer wrapper function with the main purpose to call the optimal kernel available in
  60. * nmsis-nn to perform the convolution.
  61. *
  62. * @param[in, out] ctx Function context that contains the additional buffer if required by the function.
  63. * riscv_convolve_wrapper_s4_get_buffer_size will return the buffer_size if required.
  64. * The caller is expected to clear the buffer ,if applicable, for security reasons.
  65. * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...).
  66. * Range of conv_params->input_offset : [-127, 128]
  67. * Range of conv_params->output_offset : [-128, 127]
  68. * @param[in] quant_params Per-channel quantization info.
  69. * It contains the multiplier and shift values to be applied to each output channel
  70. * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
  71. * @param[in] input_data Input (activation) data pointer. Data type: int8
  72. * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the
  73. * spatial filter dimensions
  74. * @param[in] filter_data Filter data pointer. Data type: int8 packed with 2x int4
  75. * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
  76. * @param[in] bias_data Bias data pointer. Data type: int32
  77. * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
  78. * @param[out] output_data Output data pointer. Data type: int8
  79. *
  80. * @return The function returns either
  81. * <code>RISCV_NMSIS_NN_ARG_ERROR</code> if argument constraints fail. or,
  82. * <code>RISCV_NMSIS_NN_SUCCESS</code> on successful completion.
  83. *
  84. */
  85. riscv_nmsis_nn_status riscv_convolve_wrapper_s4(const nmsis_nn_context *ctx,
  86. const nmsis_nn_conv_params *conv_params,
  87. const nmsis_nn_per_channel_quant_params *quant_params,
  88. const nmsis_nn_dims *input_dims,
  89. const int8_t *input_data,
  90. const nmsis_nn_dims *filter_dims,
  91. const int8_t *filter_data,
  92. const nmsis_nn_dims *bias_dims,
  93. const int32_t *bias_data,
  94. const nmsis_nn_dims *output_dims,
  95. int8_t *output_data);
  96. /**
  97. * @brief Get the required buffer size for riscv_convolve_wrapper_s4
  98. *
  99. * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...).
  100. * Range of conv_params->input_offset : [-127, 128]
  101. * Range of conv_params->output_offset : [-128, 127]
  102. * @param[in] input_dims Input (activation) dimensions. Format: [N, H, W, C_IN]
  103. * @param[in] filter_dims Filter dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the spatial
  104. * filter dimensions
  105. * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
  106. *
  107. * @return The function returns required buffer size(bytes)
  108. *
  109. */
  110. int32_t riscv_convolve_wrapper_s4_get_buffer_size(const nmsis_nn_conv_params *conv_params,
  111. const nmsis_nn_dims *input_dims,
  112. const nmsis_nn_dims *filter_dims,
  113. const nmsis_nn_dims *output_dims);
  114. /**
  115. * @brief Get the required buffer size for riscv_convolve_wrapper_s4 for processors with DSP extension.
  116. * Refer to riscv_convolve_wrapper_s4_get_buffer_size() for function argument details.
  117. *
  118. * @note Intended for compilation on Host. If compiling for an Riscv target, use
  119. * riscv_convolve_wrapper_s4_get_buffer_size().
  120. *
  121. */
  122. int32_t riscv_convolve_wrapper_s4_get_buffer_size_dsp(const nmsis_nn_conv_params *conv_params,
  123. const nmsis_nn_dims *input_dims,
  124. const nmsis_nn_dims *filter_dims,
  125. const nmsis_nn_dims *output_dims);
  126. /**
  127. * @brief s8 convolution layer wrapper function with the main purpose to call the optimal kernel available in
  128. * nmsis-nn to perform the convolution.
  129. *
  130. * @param[in, out] ctx Function context that contains the additional buffer if required by the function.
  131. * riscv_convolve_wrapper_s8_get_buffer_size will return the buffer_size if required.
  132. * The caller is expected to clear the buffer ,if applicable, for security reasons.
  133. * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...).
  134. * Range of conv_params->input_offset : [-127, 128]
  135. * Range of conv_params->output_offset : [-128, 127]
  136. * @param[in] quant_params Per-channel quantization info.
  137. * It contains the multiplier and shift values to be applied to each output channel
  138. * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
  139. * @param[in] input_data Input (activation) data pointer. Data type: int8
  140. * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the
  141. * spatial filter dimensions
  142. * @param[in] filter_data Filter data pointer. Data type: int8
  143. * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
  144. * @param[in] bias_data Bias data pointer. Data type: int32
  145. * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
  146. * @param[out] output_data Output data pointer. Data type: int8
  147. *
  148. * @return The function returns either
  149. * <code>RISCV_NMSIS_NN_ARG_ERROR</code> if argument constraints fail. or,
  150. * <code>RISCV_NMSIS_NN_SUCCESS</code> on successful completion.
  151. *
  152. */
  153. riscv_nmsis_nn_status riscv_convolve_wrapper_s8(const nmsis_nn_context *ctx,
  154. const nmsis_nn_conv_params *conv_params,
  155. const nmsis_nn_per_channel_quant_params *quant_params,
  156. const nmsis_nn_dims *input_dims,
  157. const int8_t *input_data,
  158. const nmsis_nn_dims *filter_dims,
  159. const int8_t *filter_data,
  160. const nmsis_nn_dims *bias_dims,
  161. const int32_t *bias_data,
  162. const nmsis_nn_dims *output_dims,
  163. int8_t *output_data);
  164. /**
  165. * @brief Get the required buffer size for riscv_convolve_wrapper_s8
  166. *
  167. * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...).
  168. * Range of conv_params->input_offset : [-127, 128]
  169. * Range of conv_params->output_offset : [-128, 127]
  170. * @param[in] input_dims Input (activation) dimensions. Format: [N, H, W, C_IN]
  171. * @param[in] filter_dims Filter dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the spatial
  172. * filter dimensions
  173. * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
  174. *
  175. * @return The function returns required buffer size(bytes)
  176. *
  177. */
  178. int32_t riscv_convolve_wrapper_s8_get_buffer_size(const nmsis_nn_conv_params *conv_params,
  179. const nmsis_nn_dims *input_dims,
  180. const nmsis_nn_dims *filter_dims,
  181. const nmsis_nn_dims *output_dims);
  182. /**
  183. * @brief Get the required buffer size for riscv_convolve_wrapper_s8 for processors with DSP extension.
  184. * Refer to riscv_convolve_wrapper_s8_get_buffer_size() for function argument details.
  185. *
  186. * @note Intended for compilation on Host. If compiling for an Riscv target, use
  187. * riscv_convolve_wrapper_s8_get_buffer_size().
  188. *
  189. */
  190. int32_t riscv_convolve_wrapper_s8_get_buffer_size_dsp(const nmsis_nn_conv_params *conv_params,
  191. const nmsis_nn_dims *input_dims,
  192. const nmsis_nn_dims *filter_dims,
  193. const nmsis_nn_dims *output_dims);
  194. /**
  195. * @brief s16 convolution layer wrapper function with the main purpose to call the optimal kernel available in
  196. * nmsis-nn to perform the convolution.
  197. *
  198. * @param[in, out] ctx Function context that contains the additional buffer if required by the function.
  199. * riscv_convolve_wrapper_s8_get_buffer_size will return the buffer_size if required
  200. * The caller is expected to clear the buffer, if applicable, for security reasons.
  201. * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...).
  202. * conv_params->input_offset : Not used
  203. * conv_params->output_offset : Not used
  204. * @param[in] quant_params Per-channel quantization info.
  205. * It contains the multiplier and shift values to be applied to each output channel
  206. * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
  207. * @param[in] input_data Input (activation) data pointer. Data type: int16
  208. * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the
  209. * spatial filter dimensions
  210. * @param[in] filter_data Filter data pointer. Data type: int8
  211. * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
  212. * @param[in] bias_data Struct with optional bias data pointer. Bias data type can be int64 or int32 depending
  213. * flag in struct.
  214. * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
  215. * @param[out] output_data Output data pointer. Data type: int16
  216. *
  217. * @return The function returns either
  218. * <code>RISCV_NMSIS_NN_ARG_ERROR</code> if argument constraints fail. or,
  219. * <code>RISCV_NMSIS_NN_SUCCESS</code> on successful completion.
  220. *
  221. */
  222. riscv_nmsis_nn_status riscv_convolve_wrapper_s16(const nmsis_nn_context *ctx,
  223. const nmsis_nn_conv_params *conv_params,
  224. const nmsis_nn_per_channel_quant_params *quant_params,
  225. const nmsis_nn_dims *input_dims,
  226. const int16_t *input_data,
  227. const nmsis_nn_dims *filter_dims,
  228. const int8_t *filter_data,
  229. const nmsis_nn_dims *bias_dims,
  230. const nmsis_nn_bias_data *bias_data,
  231. const nmsis_nn_dims *output_dims,
  232. int16_t *output_data);
  233. /**
  234. * @brief Get the required buffer size for riscv_convolve_wrapper_s16.
  235. *
  236. * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...).
  237. * conv_params->input_offset : Not used
  238. * conv_params->output_offset : Not used
  239. * @param[in] input_dims Input (activation) dimensions. Format: [N, H, W, C_IN]
  240. * @param[in] filter_dims Filter dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the spatial
  241. * filter dimensions
  242. * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
  243. *
  244. * @return The function returns required buffer size(bytes)
  245. *
  246. */
  247. int32_t riscv_convolve_wrapper_s16_get_buffer_size(const nmsis_nn_conv_params *conv_params,
  248. const nmsis_nn_dims *input_dims,
  249. const nmsis_nn_dims *filter_dims,
  250. const nmsis_nn_dims *output_dims);
  251. /**
  252. * @brief Get the required buffer size for riscv_convolve_wrapper_s16 for for processors with DSP extension.
  253. * Refer to riscv_convolve_wrapper_s16_get_buffer_size() for function argument details.
  254. *
  255. * @note Intended for compilation on Host. If compiling for an Riscv target, use
  256. * riscv_convolve_wrapper_s16_get_buffer_size().
  257. *
  258. */
  259. int32_t riscv_convolve_wrapper_s16_get_buffer_size_dsp(const nmsis_nn_conv_params *conv_params,
  260. const nmsis_nn_dims *input_dims,
  261. const nmsis_nn_dims *filter_dims,
  262. const nmsis_nn_dims *output_dims);
  263. /**
  264. * @brief Basic s4 convolution function
  265. * @param[in, out] ctx Function context that contains the additional buffer if required by the function.
  266. * riscv_convolve_s4_get_buffer_size will return the buffer_size if required.
  267. * The caller is expected to clear the buffer ,if applicable, for security reasons.
  268. * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...).
  269. * Range of conv_params->input_offset : [-127, 128]
  270. * Range of conv_params->output_offset : [-128, 127]
  271. * @param[in] quant_params Per-channel quantization info.
  272. * It contains the multiplier and shift values to be applied to each output channel
  273. * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
  274. * @param[in] input_data Input (activation) data pointer. Data type: int8
  275. * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the
  276. * spatial filter dimensions
  277. * @param[in] filter_data Packed Filter data pointer. Data type: int8 packed with 2x int4
  278. * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
  279. * @param[in] bias_data Optional bias data pointer. Data type: int32
  280. * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
  281. * @param[out] output_data Output data pointer. Data type: int8
  282. * @return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code>
  283. *
  284. * @details
  285. * 1. Supported framework: TensorFlow Lite micro
  286. * 2. Additional memory is required for optimization. Refer to argument 'ctx' for details.
  287. *
  288. */
  289. riscv_nmsis_nn_status riscv_convolve_s4(const nmsis_nn_context *ctx,
  290. const nmsis_nn_conv_params *conv_params,
  291. const nmsis_nn_per_channel_quant_params *quant_params,
  292. const nmsis_nn_dims *input_dims,
  293. const int8_t *input_data,
  294. const nmsis_nn_dims *filter_dims,
  295. const int8_t *filter_data,
  296. const nmsis_nn_dims *bias_dims,
  297. const int32_t *bias_data,
  298. const nmsis_nn_dims *output_dims,
  299. int8_t *output_data);
  300. /**
  301. * @brief Basic s8 convolution function
  302. * @param[in, out] ctx Function context that contains the additional buffer if required by the function.
  303. * riscv_convolve_s8_get_buffer_size will return the buffer_size if required.
  304. * The caller is expected to clear the buffer, if applicable, for security reasons.
  305. * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...).
  306. * Range of conv_params->input_offset : [-127, 128]
  307. * Range of conv_params->output_offset : [-128, 127]
  308. * @param[in] quant_params Per-channel quantization info.
  309. * It contains the multiplier and shift values to be applied to each output channel
  310. * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
  311. * @param[in] input_data Input (activation) data pointer. Data type: int8
  312. * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, CK] where HK, WK and CK are the
  313. * spatial filter dimensions. CK != C_IN is used for grouped convolution, in which
  314. * case the required conditions are C_IN = N * CK and C_OUT = N * M for N groups of
  315. * size M.
  316. * @param[in] filter_data Filter data pointer. Data type: int8
  317. * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
  318. * @param[in] bias_data Optional bias data pointer. Data type: int32
  319. * @param[in] upscale_dims Inserts zeroes to upscale the input in h/w dimensions if set to 2. This is used for
  320. * tranposed convolution.
  321. * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
  322. * @param[out] output_data Output data pointer. Data type: int8
  323. *
  324. * @return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code> if successful or
  325. * <code>RISCV_NMSIS_NN_ARG_ERROR</code> if incorrect arguments or
  326. * <code>RISCV_NMSIS_NN_NO_IMPL_ERROR</code>
  327. *
  328. * @details
  329. * 1. Supported framework: TensorFlow Lite micro
  330. * 2. Additional memory is required for optimization. Refer to argument 'ctx' for details.
  331. *
  332. */
  333. riscv_nmsis_nn_status riscv_convolve_s8(const nmsis_nn_context *ctx,
  334. const nmsis_nn_conv_params *conv_params,
  335. const nmsis_nn_per_channel_quant_params *quant_params,
  336. const nmsis_nn_dims *input_dims,
  337. const int8_t *input_data,
  338. const nmsis_nn_dims *filter_dims,
  339. const int8_t *filter_data,
  340. const nmsis_nn_dims *bias_dims,
  341. const int32_t *bias_data,
  342. const nmsis_nn_dims *upscale_dims,
  343. const nmsis_nn_dims *output_dims,
  344. int8_t *output_data);
  345. /**
  346. * @brief Get the required buffer size for s4 convolution function
  347. *
  348. * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
  349. * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK
  350. * are the spatial filter dimensions
  351. * @return The function returns required buffer size(bytes)
  352. *
  353. */
  354. int32_t riscv_convolve_s4_get_buffer_size(const nmsis_nn_dims *input_dims, const nmsis_nn_dims *filter_dims);
  355. /**
  356. * @brief Get the required buffer size for s8 convolution function
  357. *
  358. * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
  359. * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK
  360. * are the spatial filter dimensions
  361. * @return The function returns required buffer size(bytes)
  362. *
  363. */
  364. int32_t riscv_convolve_s8_get_buffer_size(const nmsis_nn_dims *input_dims, const nmsis_nn_dims *filter_dims);
  365. /**
  366. * @brief Wrapper to select optimal transposed convolution algorithm depending on parameters.
  367. * @param[in, out] ctx Function context that contains the additional buffer if required by the
  368. * function.
  369. * riscv_transpose_conv_s8_get_buffer_size will return the buffer_size if required.
  370. * The caller is expected to clear the buffer, if applicable, for security
  371. reasons.
  372. * @param[in, out] output_ctx Temporary scratch buffer.
  373. * The size required size is: output width * output height * output channel * 4
  374. * The caller is expected to clear the buffer, if applicable, for security
  375. * reasons.
  376. * @param[in] transpose_conv_params Convolution parameters (e.g. strides, dilations, pads,...).
  377. * Range of transpose_conv_params->input_offset : [-127, 128]
  378. * Range of transpose_conv_params->output_offset : [-128, 127]
  379. * @param[in] quant_params Per-channel quantization info.
  380. * It contains the multiplier and shift values to be applied to each out channel.
  381. * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
  382. * @param[in] input_data Input (activation) data pointer. Data type: int8
  383. * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the
  384. * spatial filter dimensions
  385. * @param[in] filter_data Filter data pointer. Data type: int8
  386. * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
  387. * @param[in] bias_data Optional bias data pointer. Data type: int32
  388. * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
  389. * @param[out] output_data Output data pointer. Data type: int8
  390. * @return The function returns either
  391. * <code>RISCV_NMSIS_NN_ARG_ERROR</code> if argument constraints fail. or,
  392. * <code>RISCV_NMSIS_NN_SUCCESS</code> on successful completion.
  393. *
  394. * @details
  395. * 1. Supported framework: TensorFlow Lite micro
  396. * 2. Additional memory is required for optimization. Refer to arguments 'ctx' and 'output_ctx' for details.
  397. *
  398. */
  399. riscv_nmsis_nn_status riscv_transpose_conv_wrapper_s8(const nmsis_nn_context *ctx,
  400. const nmsis_nn_context *output_ctx,
  401. const nmsis_nn_transpose_conv_params *transpose_conv_params,
  402. const nmsis_nn_per_channel_quant_params *quant_params,
  403. const nmsis_nn_dims *input_dims,
  404. const int8_t *input_data,
  405. const nmsis_nn_dims *filter_dims,
  406. const int8_t *filter_data,
  407. const nmsis_nn_dims *bias_dims,
  408. const int32_t *bias_data,
  409. const nmsis_nn_dims *output_dims,
  410. int8_t *output_data);
  411. /**
  412. * @brief Basic s8 transpose convolution function
  413. * @param[in, out] ctx Function context that contains the additional buffer if required by the
  414. * function.
  415. * riscv_transpose_conv_s8_get_buffer_size will return the buffer_size if required.
  416. * The caller is expected to clear the buffer, if applicable, for security
  417. reasons.
  418. * @param[in, out] output_ctx Temporary scratch buffer.
  419. * The size required size is: output width * output height * output channel * 4
  420. * The caller is expected to clear the buffer, if applicable, for security
  421. * reasons.
  422. * @param[in] transpose_conv_params Convolution parameters (e.g. strides, dilations, pads,...).
  423. * Range of transpose_conv_params->input_offset : [-127, 128]
  424. * Range of transpose_conv_params->output_offset : [-128, 127]
  425. * @param[in] quant_params Per-channel quantization info.
  426. * It contains the multiplier and shift values to be applied to each out channel.
  427. * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
  428. * @param[in] input_data Input (activation) data pointer. Data type: int8
  429. * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the
  430. * spatial filter dimensions
  431. * @param[in] filter_data Filter data pointer. Data type: int8
  432. * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
  433. * @param[in] bias_data Optional bias data pointer. Data type: int32
  434. * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
  435. * @param[out] output_data Output data pointer. Data type: int8
  436. * @return The function returns either
  437. * <code>RISCV_NMSIS_NN_ARG_ERROR</code> if argument constraints fail. or,
  438. * <code>RISCV_NMSIS_NN_SUCCESS</code> on successful completion.
  439. *
  440. * @details
  441. * 1. Supported framework: TensorFlow Lite micro
  442. * 2. Additional memory is required for optimization. Refer to arguments 'ctx' and 'output_ctx' for details.
  443. *
  444. */
  445. riscv_nmsis_nn_status riscv_transpose_conv_s8(const nmsis_nn_context *ctx,
  446. const nmsis_nn_context *output_ctx,
  447. const nmsis_nn_transpose_conv_params *transpose_conv_params,
  448. const nmsis_nn_per_channel_quant_params *quant_params,
  449. const nmsis_nn_dims *input_dims,
  450. const int8_t *input_data,
  451. const nmsis_nn_dims *filter_dims,
  452. const int8_t *filter_data,
  453. const nmsis_nn_dims *bias_dims,
  454. const int32_t *bias_data,
  455. const nmsis_nn_dims *output_dims,
  456. int8_t *output_data);
  457. /**
  458. * @brief Get the required buffer size for ctx in s8 transpose conv function
  459. *
  460. * @param[in] transposed_conv_params Transposed convolution parameters
  461. * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
  462. * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK
  463. * are the spatial filter dimensions
  464. * @param[in] out_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
  465. * @return The function returns required buffer size(bytes)
  466. *
  467. */
  468. int32_t riscv_transpose_conv_s8_get_buffer_size(const nmsis_nn_transpose_conv_params *transposed_conv_params,
  469. const nmsis_nn_dims *input_dims,
  470. const nmsis_nn_dims *filter_dims,
  471. const nmsis_nn_dims *out_dims);
  472. /**
  473. * @brief Get the required buffer size for output_ctx in s8 transpose conv function
  474. *
  475. * @param[in] transposed_conv_params Transposed convolution parameters
  476. * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
  477. * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK
  478. * are the spatial filter dimensions
  479. * @return The function returns required buffer size(bytes)
  480. *
  481. */
  482. int32_t riscv_transpose_conv_s8_get_reverse_conv_buffer_size(const nmsis_nn_transpose_conv_params *transposed_conv_params,
  483. const nmsis_nn_dims *input_dims,
  484. const nmsis_nn_dims *filter_dims);
  485. /**
  486. * @brief Get size of additional buffer required by riscv_transpose_conv_s8() for processors with DSP extension.
  487. * Refer to riscv_transpose_conv_s8_get_buffer_size() for function argument details.
  488. *
  489. * @note Intended for compilation on Host. If compiling for an Riscv target, use
  490. * riscv_transpose_conv_s8_get_buffer_size().
  491. *
  492. */
  493. int32_t riscv_transpose_conv_s8_get_buffer_size_dsp(const nmsis_nn_dims *input_dims,
  494. const nmsis_nn_dims *filter_dims,
  495. const nmsis_nn_dims *out_dims);
  496. /**
  497. * @brief Basic s16 convolution function
  498. * @param[in, out] ctx Function context that contains the additional buffer if required by the function.
  499. * riscv_convolve_s16_get_buffer_size will return the buffer_size if required.
  500. * The caller is expected to clear the buffer, if applicable, for security reasons.
  501. * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...).
  502. * conv_params->input_offset : Not used
  503. * conv_params->output_offset : Not used
  504. * @param[in] quant_params Per-channel quantization info.
  505. * It contains the multiplier and shift values to be applied to each output channel
  506. * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
  507. * @param[in] input_data Input (activation) data pointer. Data type: int16
  508. * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the
  509. * spatial filter dimensions
  510. * @param[in] filter_data Filter data pointer. Data type: int8
  511. * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
  512. * @param[in] bias_data Struct with optional bias data pointer. Bias data type can be int64 or int32 depending
  513. * flag in struct.
  514. * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
  515. * @param[out] output_data Output data pointer. Data type: int16
  516. *
  517. * @return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code> if successful or
  518. * <code>RISCV_NMSIS_NN_ARG_ERROR</code> if incorrect arguments or
  519. * <code>RISCV_NMSIS_NN_NO_IMPL_ERROR</code>
  520. *
  521. * @details
  522. * 1. Supported framework: TensorFlow Lite micro
  523. * 2. Additional memory is required for optimization. Refer to argument 'ctx' for details.
  524. *
  525. */
  526. riscv_nmsis_nn_status riscv_convolve_s16(const nmsis_nn_context *ctx,
  527. const nmsis_nn_conv_params *conv_params,
  528. const nmsis_nn_per_channel_quant_params *quant_params,
  529. const nmsis_nn_dims *input_dims,
  530. const int16_t *input_data,
  531. const nmsis_nn_dims *filter_dims,
  532. const int8_t *filter_data,
  533. const nmsis_nn_dims *bias_dims,
  534. const nmsis_nn_bias_data *bias_data,
  535. const nmsis_nn_dims *output_dims,
  536. int16_t *output_data);
  537. /**
  538. * @brief Get the required buffer size for s16 convolution function
  539. *
  540. * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
  541. * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK
  542. * are the spatial filter dimensions
  543. * @return The function returns required buffer size(bytes)
  544. *
  545. */
  546. int32_t riscv_convolve_s16_get_buffer_size(const nmsis_nn_dims *input_dims, const nmsis_nn_dims *filter_dims);
  547. /**
  548. * @brief Fast s4 version for 1x1 convolution (non-square shape)
  549. *
  550. * @param[in, out] ctx Function context that contains the additional buffer if required by the function.
  551. * riscv_convolve_1x1_s4_fast_get_buffer_size will return the buffer_size if required.
  552. * The caller is expected to clear the buffer ,if applicable, for security reasons.
  553. * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...).
  554. * Range of conv_params->input_offset : [-127, 128]
  555. * Range of conv_params->output_offset : [-128, 127]
  556. * @param[in] quant_params Per-channel quantization info.
  557. * It contains the multiplier and shift values to be applied to each output channel
  558. * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
  559. * @param[in] input_data Input (activation) data pointer. Data type: int8
  560. * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, 1, 1, C_IN]
  561. * @param[in] filter_data Filter data pointer. Data type: int8 packed with 2x int4
  562. * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
  563. * @param[in] bias_data Optional bias data pointer. Data type: int32
  564. * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
  565. * @param[out] output_data Output data pointer. Data type: int8
  566. *
  567. * @return The function returns either
  568. * <code>RISCV_NMSIS_NN_ARG_ERROR</code> if argument constraints fail. or,
  569. * <code>RISCV_NMSIS_NN_SUCCESS</code> on successful completion.
  570. *
  571. * @details
  572. * - Supported framework : TensorFlow Lite Micro
  573. * - The following constrains on the arguments apply
  574. * -# conv_params->padding.w = conv_params->padding.h = 0
  575. * -# conv_params->stride.w = conv_params->stride.h = 1
  576. *
  577. */
  578. riscv_nmsis_nn_status riscv_convolve_1x1_s4_fast(const nmsis_nn_context *ctx,
  579. const nmsis_nn_conv_params *conv_params,
  580. const nmsis_nn_per_channel_quant_params *quant_params,
  581. const nmsis_nn_dims *input_dims,
  582. const int8_t *input_data,
  583. const nmsis_nn_dims *filter_dims,
  584. const int8_t *filter_data,
  585. const nmsis_nn_dims *bias_dims,
  586. const int32_t *bias_data,
  587. const nmsis_nn_dims *output_dims,
  588. int8_t *output_data);
  589. /**
  590. * @brief s4 version for 1x1 convolution with support for non-unity stride values
  591. *
  592. * @param[in, out] ctx Function context that contains the additional buffer if required by the function.
  593. * None is required by this function.
  594. * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...).
  595. * Range of conv_params->input_offset : [-127, 128]
  596. * Range of conv_params->output_offset : [-128, 127]
  597. * @param[in] quant_params Per-channel quantization info.
  598. * It contains the multiplier and shift values to be applied to each output channel
  599. * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
  600. * @param[in] input_data Input (activation) data pointer. Data type: int8
  601. * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, 1, 1, C_IN]
  602. * @param[in] filter_data Filter data pointer. Data type: int8 packed with 2x int4
  603. * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
  604. * @param[in] bias_data Optional bias data pointer. Data type: int32
  605. * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
  606. * @param[out] output_data Output data pointer. Data type: int8
  607. *
  608. * @return The function returns either
  609. * <code>RISCV_NMSIS_NN_ARG_ERROR</code> if argument constraints fail. or,
  610. * <code>RISCV_NMSIS_NN_SUCCESS</code> on successful completion.
  611. * @details
  612. * - Supported framework : TensorFlow Lite Micro
  613. * - The following constrains on the arguments apply
  614. * -# conv_params->padding.w = conv_params->padding.h = 0
  615. *
  616. */
  617. riscv_nmsis_nn_status riscv_convolve_1x1_s4(const nmsis_nn_context *ctx,
  618. const nmsis_nn_conv_params *conv_params,
  619. const nmsis_nn_per_channel_quant_params *quant_params,
  620. const nmsis_nn_dims *input_dims,
  621. const int8_t *input_data,
  622. const nmsis_nn_dims *filter_dims,
  623. const int8_t *filter_data,
  624. const nmsis_nn_dims *bias_dims,
  625. const int32_t *bias_data,
  626. const nmsis_nn_dims *output_dims,
  627. int8_t *output_data);
  628. /**
  629. * @brief Fast s8 version for 1x1 convolution (non-square shape)
  630. *
  631. * @param[in, out] ctx Function context that contains the additional buffer if required by the function.
  632. * riscv_convolve_1x1_s8_fast_get_buffer_size will return the buffer_size if required.
  633. * The caller is expected to clear the buffer, if applicable, for security reasons.
  634. * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...).
  635. * Range of conv_params->input_offset : [-127, 128]
  636. * Range of conv_params->output_offset : [-128, 127]
  637. * @param[in] quant_params Per-channel quantization info.
  638. * It contains the multiplier and shift values to be applied to each output channel
  639. * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
  640. * @param[in] input_data Input (activation) data pointer. Data type: int8
  641. * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, 1, 1, C_IN]
  642. * @param[in] filter_data Filter data pointer. Data type: int8
  643. * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
  644. * @param[in] bias_data Optional bias data pointer. Data type: int32
  645. * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
  646. * @param[out] output_data Output data pointer. Data type: int8
  647. *
  648. * @return The function returns either
  649. * <code>RISCV_NMSIS_NN_ARG_ERROR</code> if argument constraints fail. or,
  650. * <code>RISCV_NMSIS_NN_SUCCESS</code> on successful completion.
  651. *
  652. * @details
  653. * - Supported framework : TensorFlow Lite Micro
  654. * - The following constrains on the arguments apply
  655. * -# conv_params->padding.w = conv_params->padding.h = 0
  656. * -# conv_params->stride.w = conv_params->stride.h = 1
  657. *
  658. */
  659. riscv_nmsis_nn_status riscv_convolve_1x1_s8_fast(const nmsis_nn_context *ctx,
  660. const nmsis_nn_conv_params *conv_params,
  661. const nmsis_nn_per_channel_quant_params *quant_params,
  662. const nmsis_nn_dims *input_dims,
  663. const int8_t *input_data,
  664. const nmsis_nn_dims *filter_dims,
  665. const int8_t *filter_data,
  666. const nmsis_nn_dims *bias_dims,
  667. const int32_t *bias_data,
  668. const nmsis_nn_dims *output_dims,
  669. int8_t *output_data);
  670. /**
  671. * @brief Get the required buffer size for riscv_convolve_1x1_s4_fast
  672. *
  673. * @param[in] input_dims Input (activation) dimensions
  674. * @return The function returns the required buffer size in bytes
  675. *
  676. */
  677. int32_t riscv_convolve_1x1_s4_fast_get_buffer_size(const nmsis_nn_dims *input_dims);
  678. /**
  679. * @brief Get the required buffer size for riscv_convolve_1x1_s8_fast
  680. *
  681. * @param[in] input_dims Input (activation) dimensions
  682. * @return The function returns the required buffer size in bytes
  683. *
  684. */
  685. int32_t riscv_convolve_1x1_s8_fast_get_buffer_size(const nmsis_nn_dims *input_dims);
  686. /**
  687. * @brief s8 version for 1x1 convolution with support for non-unity stride values
  688. *
  689. * @param[in, out] ctx Function context that contains the additional buffer if required by the function.
  690. * None is required by this function.
  691. * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...).
  692. * Range of conv_params->input_offset : [-127, 128]
  693. * Range of conv_params->output_offset : [-128, 127]
  694. * @param[in] quant_params Per-channel quantization info.
  695. * It contains the multiplier and shift values to be applied to each output channel
  696. * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
  697. * @param[in] input_data Input (activation) data pointer. Data type: int8
  698. * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, 1, 1, C_IN]
  699. * @param[in] filter_data Filter data pointer. Data type: int8
  700. * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
  701. * @param[in] bias_data Optional bias data pointer. Data type: int32
  702. * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
  703. * @param[out] output_data Output data pointer. Data type: int8
  704. *
  705. * @return The function returns either
  706. * <code>RISCV_NMSIS_NN_ARG_ERROR</code> if argument constraints fail. or,
  707. * <code>RISCV_NMSIS_NN_SUCCESS</code> on successful completion.
  708. * @details
  709. * - Supported framework : TensorFlow Lite Micro
  710. * - The following constrains on the arguments apply
  711. * -# conv_params->padding.w = conv_params->padding.h = 0
  712. *
  713. */
  714. riscv_nmsis_nn_status riscv_convolve_1x1_s8(const nmsis_nn_context *ctx,
  715. const nmsis_nn_conv_params *conv_params,
  716. const nmsis_nn_per_channel_quant_params *quant_params,
  717. const nmsis_nn_dims *input_dims,
  718. const int8_t *input_data,
  719. const nmsis_nn_dims *filter_dims,
  720. const int8_t *filter_data,
  721. const nmsis_nn_dims *bias_dims,
  722. const int32_t *bias_data,
  723. const nmsis_nn_dims *output_dims,
  724. int8_t *output_data);
  725. /**
  726. * @brief Basic Q7 convolution function
  727. * @param[in] Im_in pointer to input tensor
  728. * @param[in] dim_im_in input tensor dimension
  729. * @param[in] ch_im_in number of input tensor channels
  730. * @param[in] wt pointer to kernel weights
  731. * @param[in] ch_im_out number of filters, i.e., output tensor channels
  732. * @param[in] dim_kernel filter kernel size
  733. * @param[in] padding padding sizes
  734. * @param[in] stride convolution stride
  735. * @param[in] bias pointer to bias
  736. * @param[in] bias_shift amount of left-shift for bias
  737. * @param[in] out_shift amount of right-shift for output
  738. * @param[in,out] Im_out pointer to output tensor
  739. * @param[in] dim_im_out output tensor dimension
  740. * @param[in,out] bufferA pointer to buffer space for input
  741. * @param[in,out] bufferB pointer to buffer space for output
  742. * @return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code>
  743. *
  744. */
  745. riscv_nmsis_nn_status riscv_convolve_HWC_q7_basic(const q7_t *Im_in,
  746. const uint16_t dim_im_in,
  747. const uint16_t ch_im_in,
  748. const q7_t *wt,
  749. const uint16_t ch_im_out,
  750. const uint16_t dim_kernel,
  751. const uint16_t padding,
  752. const uint16_t stride,
  753. const q7_t *bias,
  754. const uint16_t bias_shift,
  755. const uint16_t out_shift,
  756. q7_t *Im_out,
  757. const uint16_t dim_im_out,
  758. q15_t *bufferA,
  759. q7_t *bufferB);
  760. /**
  761. * @brief Basic Q7 convolution function (non-square shape)
  762. * @param[in] Im_in pointer to input tensor
  763. * @param[in] dim_im_in_x input tensor dimension x
  764. * @param[in] dim_im_in_y input tensor dimension y
  765. * @param[in] ch_im_in number of input tensor channels
  766. * @param[in] wt pointer to kernel weights
  767. * @param[in] ch_im_out number of filters, i.e., output tensor channels
  768. * @param[in] dim_kernel_x filter kernel size x
  769. * @param[in] dim_kernel_y filter kernel size y
  770. * @param[in] padding_x padding size x
  771. * @param[in] padding_y padding size y
  772. * @param[in] stride_x convolution stride x
  773. * @param[in] stride_y convolution stride y
  774. * @param[in] bias pointer to bias
  775. * @param[in] bias_shift amount of left-shift for bias
  776. * @param[in] out_shift amount of right-shift for output
  777. * @param[in,out] Im_out pointer to output tensor
  778. * @param[in] dim_im_out_x output tensor dimension x
  779. * @param[in] dim_im_out_y output tensor dimension y
  780. * @param[in,out] bufferA pointer to buffer space for input
  781. * @param[in,out] bufferB pointer to buffer space for output
  782. * @return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code>
  783. */
  784. riscv_nmsis_nn_status riscv_convolve_HWC_q7_basic_nonsquare(const q7_t *Im_in,
  785. const uint16_t dim_im_in_x,
  786. const uint16_t dim_im_in_y,
  787. const uint16_t ch_im_in,
  788. const q7_t *wt,
  789. const uint16_t ch_im_out,
  790. const uint16_t dim_kernel_x,
  791. const uint16_t dim_kernel_y,
  792. const uint16_t padding_x,
  793. const uint16_t padding_y,
  794. const uint16_t stride_x,
  795. const uint16_t stride_y,
  796. const q7_t *bias,
  797. const uint16_t bias_shift,
  798. const uint16_t out_shift,
  799. q7_t *Im_out,
  800. const uint16_t dim_im_out_x,
  801. const uint16_t dim_im_out_y,
  802. q15_t *bufferA,
  803. q7_t *bufferB);
  804. /**
  805. * @brief Basic Q15 convolution function
  806. * @param[in] Im_in pointer to input tensor
  807. * @param[in] dim_im_in input tensor dimension
  808. * @param[in] ch_im_in number of input tensor channels
  809. * @param[in] wt pointer to kernel weights
  810. * @param[in] ch_im_out number of filters, i.e., output tensor channels
  811. * @param[in] dim_kernel filter kernel size
  812. * @param[in] padding padding sizes
  813. * @param[in] stride convolution stride
  814. * @param[in] bias pointer to bias
  815. * @param[in] bias_shift amount of left-shift for bias
  816. * @param[in] out_shift amount of right-shift for output
  817. * @param[in,out] Im_out pointer to output tensor
  818. * @param[in] dim_im_out output tensor dimension
  819. * @param[in,out] bufferA pointer to buffer space for input
  820. * @param[in,out] bufferB pointer to buffer space for output
  821. * @return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code>
  822. *
  823. */
  824. riscv_nmsis_nn_status riscv_convolve_HWC_q15_basic(const q15_t *Im_in,
  825. const uint16_t dim_im_in,
  826. const uint16_t ch_im_in,
  827. const q15_t *wt,
  828. const uint16_t ch_im_out,
  829. const uint16_t dim_kernel,
  830. const uint16_t padding,
  831. const uint16_t stride,
  832. const q15_t *bias,
  833. const uint16_t bias_shift,
  834. const uint16_t out_shift,
  835. q15_t *Im_out,
  836. const uint16_t dim_im_out,
  837. q15_t *bufferA,
  838. q7_t *bufferB);
  839. /**
  840. * @brief Fast Q7 convolution function
  841. * @param[in] Im_in pointer to input tensor
  842. * @param[in] dim_im_in input tensor dimension
  843. * @param[in] ch_im_in number of input tensor channels
  844. * @param[in] wt pointer to kernel weights
  845. * @param[in] ch_im_out number of filters, i.e., output tensor channels
  846. * @param[in] dim_kernel filter kernel size
  847. * @param[in] padding padding sizes
  848. * @param[in] stride convolution stride
  849. * @param[in] bias pointer to bias
  850. * @param[in] bias_shift amount of left-shift for bias
  851. * @param[in] out_shift amount of right-shift for output
  852. * @param[in,out] Im_out pointer to output tensor
  853. * @param[in] dim_im_out output tensor dimension
  854. * @param[in,out] bufferA pointer to buffer space for input
  855. * @param[in,out] bufferB pointer to buffer space for output
  856. * @return The function returns either
  857. * <code>RISCV_NMSIS_NN_SIZE_MISMATCH</code> or <code>RISCV_NMSIS_NN_SUCCESS</code> based on the outcome of size checking.
  858. *
  859. * This function is the version with full list of optimization tricks, but with
  860. * some contraints:
  861. * ch_im_in is multiple of 4
  862. * ch_im_out is multiple of 2
  863. */
  864. riscv_nmsis_nn_status riscv_convolve_HWC_q7_fast(const q7_t *Im_in,
  865. const uint16_t dim_im_in,
  866. const uint16_t ch_im_in,
  867. const q7_t *wt,
  868. const uint16_t ch_im_out,
  869. const uint16_t dim_kernel,
  870. const uint16_t padding,
  871. const uint16_t stride,
  872. const q7_t *bias,
  873. const uint16_t bias_shift,
  874. const uint16_t out_shift,
  875. q7_t *Im_out,
  876. const uint16_t dim_im_out,
  877. q15_t *bufferA,
  878. q7_t *bufferB);
  879. /**
  880. * @brief Fast Q7 convolution function (non-sqaure shape)
  881. * @param[in] Im_in pointer to input tensor
  882. * @param[in] dim_im_in_x input tensor dimension x
  883. * @param[in] dim_im_in_y input tensor dimension y
  884. * @param[in] ch_im_in number of input tensor channels
  885. * @param[in] wt pointer to kernel weights
  886. * @param[in] ch_im_out number of filters, i.e., output tensor channels
  887. * @param[in] dim_kernel_x filter kernel size x
  888. * @param[in] dim_kernel_y filter kernel size y
  889. * @param[in] padding_x padding size x
  890. * @param[in] padding_y padding size y
  891. * @param[in] stride_x convolution stride x
  892. * @param[in] stride_y convolution stride y
  893. * @param[in] bias pointer to bias
  894. * @param[in] bias_shift amount of left-shift for bias
  895. * @param[in] out_shift amount of right-shift for output
  896. * @param[in,out] Im_out pointer to output tensor
  897. * @param[in] dim_im_out_x output tensor dimension x
  898. * @param[in] dim_im_out_y output tensor dimension y
  899. * @param[in,out] bufferA pointer to buffer space for input
  900. * @param[in,out] bufferB pointer to buffer space for output
  901. * @return The function returns either
  902. * <code>RISCV_NMSIS_NN_SIZE_MISMATCH</code> or <code>RISCV_NMSIS_NN_SUCCESS</code> based on the outcome of size checking.
  903. *
  904. * This function is the version with full list of optimization tricks, but with
  905. * some contraints:
  906. * ch_im_in is multiple of 4
  907. * ch_im_out is multiple of 2
  908. */
  909. riscv_nmsis_nn_status riscv_convolve_HWC_q7_fast_nonsquare(const q7_t *Im_in,
  910. const uint16_t dim_im_in_x,
  911. const uint16_t dim_im_in_y,
  912. const uint16_t ch_im_in,
  913. const q7_t *wt,
  914. const uint16_t ch_im_out,
  915. const uint16_t dim_kernel_x,
  916. const uint16_t dim_kernel_y,
  917. const uint16_t padding_x,
  918. const uint16_t padding_y,
  919. const uint16_t stride_x,
  920. const uint16_t stride_y,
  921. const q7_t *bias,
  922. const uint16_t bias_shift,
  923. const uint16_t out_shift,
  924. q7_t *Im_out,
  925. const uint16_t dim_im_out_x,
  926. const uint16_t dim_im_out_y,
  927. q15_t *bufferA,
  928. q7_t *bufferB);
  929. /**
  930. * @brief Fast Q7 version of 1x1 convolution (non-sqaure shape)
  931. * @param[in] Im_in pointer to input tensor
  932. * @param[in] dim_im_in_x input tensor dimension x
  933. * @param[in] dim_im_in_y input tensor dimension y
  934. * @param[in] ch_im_in number of input tensor channels
  935. * @param[in] wt pointer to kernel weights
  936. * @param[in] ch_im_out number of filters, i.e., output tensor channels
  937. * @param[in] dim_kernel_x filter kernel size x
  938. * @param[in] dim_kernel_y filter kernel size y
  939. * @param[in] padding_x padding size x
  940. * @param[in] padding_y padding size y
  941. * @param[in] stride_x convolution stride x
  942. * @param[in] stride_y convolution stride y
  943. * @param[in] bias pointer to bias
  944. * @param[in] bias_shift amount of left-shift for bias
  945. * @param[in] out_shift amount of right-shift for output
  946. * @param[in,out] Im_out pointer to output tensor
  947. * @param[in] dim_im_out_x output tensor dimension x
  948. * @param[in] dim_im_out_y output tensor dimension y
  949. * @param[in,out] bufferA pointer to buffer space for input
  950. * @param[in,out] bufferB pointer to buffer space for output
  951. * @return The function returns either
  952. * <code>RISCV_NMSIS_NN_SIZE_MISMATCH</code> if argument constraints fail. or,
  953. * <code>RISCV_NMSIS_NN_SUCCESS</code> on successful completion.
  954. *
  955. * This function implement convolution with 1x1 kernel size (i.e., dim_kernel_x=1
  956. * and dim_kernel_y=1). It can be used for
  957. * second half of MobileNets after depthwise separable convolution.
  958. *
  959. * This function is the version with full list of optimization tricks, but with
  960. * some contraints:
  961. * ch_im_in is multiple of 4
  962. * ch_im_out is multiple of 2
  963. */
  964. riscv_nmsis_nn_status riscv_convolve_1x1_HWC_q7_fast_nonsquare(const q7_t *Im_in,
  965. const uint16_t dim_im_in_x,
  966. const uint16_t dim_im_in_y,
  967. const uint16_t ch_im_in,
  968. const q7_t *wt,
  969. const uint16_t ch_im_out,
  970. const uint16_t dim_kernel_x,
  971. const uint16_t dim_kernel_y,
  972. const uint16_t padding_x,
  973. const uint16_t padding_y,
  974. const uint16_t stride_x,
  975. const uint16_t stride_y,
  976. const q7_t *bias,
  977. const uint16_t bias_shift,
  978. const uint16_t out_shift,
  979. q7_t *Im_out,
  980. const uint16_t dim_im_out_x,
  981. const uint16_t dim_im_out_y,
  982. q15_t *bufferA,
  983. q7_t *bufferB);
  984. /**
  985. * @brief Fast s8 version for 1x1 convolution (non-square shape)
  986. *
  987. * @param[in, out] ctx Function context that contains the additional buffer if required by the function.
  988. riscv_convolve_1x1_s8_fast_get_buffer_size will return the buffer_size if required
  989. * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...).
  990. * Range of conv_params->input_offset : [-127, 128]
  991. * Range of conv_params->output_offset : [-128, 127]
  992. * @param[in] quant_params Per-channel quantization info.
  993. * It contains the multiplier and shift values to be applied to each output channel
  994. * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
  995. * @param[in] input_data Input (activation) data pointer. Data type: int8
  996. * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, 1, 1, C_IN]
  997. * @param[in] filter_data Filter data pointer. Data type: int8
  998. * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
  999. * @param[in] bias_data Optional bias data pointer. Data type: int32
  1000. * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
  1001. * @param[out] output_data Output data pointer. Data type: int8
  1002. *
  1003. * @return The function returns either
  1004. * <code>RISCV_NMSIS_NN_SIZE_MISMATCH</code> if argument constraints fail. or,
  1005. * <code>RISCV_NMSIS_NN_SUCCESS</code> on successful completion.
  1006. *
  1007. * @details
  1008. * - Supported framework : TensorFlow Lite Micro
  1009. * - The following constrains on the arguments apply
  1010. * -# input_dims->c is a multiple of 4
  1011. * -# conv_params->padding.w = conv_params->padding.h = 0
  1012. * -# conv_params->stride.w = conv_params->stride.h = 1
  1013. *
  1014. */
  1015. riscv_nmsis_nn_status riscv_convolve_1x1_s8_fast(const nmsis_nn_context *ctx,
  1016. const nmsis_nn_conv_params *conv_params,
  1017. const nmsis_nn_per_channel_quant_params *quant_params,
  1018. const nmsis_nn_dims *input_dims,
  1019. const int8_t *input_data,
  1020. const nmsis_nn_dims *filter_dims,
  1021. const int8_t *filter_data,
  1022. const nmsis_nn_dims *bias_dims,
  1023. const int32_t *bias_data,
  1024. const nmsis_nn_dims *output_dims,
  1025. int8_t *output_data);
  1026. /**
  1027. * @brief Get the required buffer size for riscv_convolve_1x1_s8_fast
  1028. *
  1029. * @param[in] input_dims Input (activation) dimensions
  1030. * @return The function returns the required buffer size in bytes
  1031. *
  1032. */
  1033. int32_t riscv_convolve_1x1_s8_fast_get_buffer_size(const nmsis_nn_dims *input_dims);
  1034. /**
  1035. * @brief 1xn convolution
  1036. *
  1037. * @param[in, out] ctx Function context that contains the additional buffer if required by the function.
  1038. * riscv_convolve_1_x_n_s8_get_buffer_size will return the buffer_size if required
  1039. * The caller is expected to clear the buffer, if applicable, for security reasons.
  1040. * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...).
  1041. * Range of conv_params->input_offset : [-127, 128]
  1042. * Range of conv_params->output_offset : [-128, 127]
  1043. * @param[in] quant_params Per-channel quantization info.
  1044. * It contains the multiplier and shift values to be applied to each output channel
  1045. * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
  1046. * @param[in] input_data Input (activation) data pointer. Data type: int8
  1047. * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, 1, WK, C_IN] where WK is the horizontal
  1048. * spatial filter dimension
  1049. * @param[in] filter_data Filter data pointer. Data type: int8
  1050. * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
  1051. * @param[in] bias_data Optional bias data pointer. Data type: int32
  1052. * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
  1053. * @param[out] output_data Output data pointer. Data type: int8
  1054. *
  1055. * @return The function returns either
  1056. * <code>RISCV_NMSIS_NN_ARG_ERROR</code> if argument constraints fail. or,
  1057. * <code>RISCV_NMSIS_NN_SUCCESS</code> on successful completion.
  1058. *
  1059. * @details
  1060. * - Supported framework : TensorFlow Lite Micro
  1061. * - The following constrains on the arguments apply
  1062. * -# input_dims->n equals 1
  1063. * -# ouput_dims->w is a multiple of 4
  1064. * -# Explicit constraints(since it is for 1xN convolution)
  1065. * -## input_dims->h equals 1
  1066. * -## output_dims->h equals 1
  1067. * -## filter_dims->h equals 1
  1068. *@todo Remove constraint on output_dims->w to make the function generic.
  1069. *
  1070. */
  1071. riscv_nmsis_nn_status riscv_convolve_1_x_n_s8(const nmsis_nn_context *ctx,
  1072. const nmsis_nn_conv_params *conv_params,
  1073. const nmsis_nn_per_channel_quant_params *quant_params,
  1074. const nmsis_nn_dims *input_dims,
  1075. const int8_t *input_data,
  1076. const nmsis_nn_dims *filter_dims,
  1077. const int8_t *filter_data,
  1078. const nmsis_nn_dims *bias_dims,
  1079. const int32_t *bias_data,
  1080. const nmsis_nn_dims *output_dims,
  1081. int8_t *output_data);
  1082. /**
  1083. * @brief Q7 version of convolution for RGB image
  1084. * @param[in] Im_in pointer to input tensor
  1085. * @param[in] dim_im_in input tensor dimension
  1086. * @param[in] ch_im_in number of input tensor channels
  1087. * @param[in] wt pointer to kernel weights
  1088. * @param[in] ch_im_out number of filters, i.e., output tensor channels
  1089. * @param[in] dim_kernel filter kernel size
  1090. * @param[in] padding padding sizes
  1091. * @param[in] stride convolution stride
  1092. * @param[in] bias pointer to bias
  1093. * @param[in] bias_shift amount of left-shift for bias
  1094. * @param[in] out_shift amount of right-shift for output
  1095. * @param[in,out] Im_out pointer to output tensor
  1096. * @param[in] dim_im_out output tensor dimension
  1097. * @param[in,out] bufferA pointer to buffer space for input
  1098. * @param[in,out] bufferB pointer to buffer space for output
  1099. * @return The function returns either
  1100. * <code>RISCV_NMSIS_NN_SIZE_MISMATCH</code> or <code>RISCV_NMSIS_NN_SUCCESS</code> based on the outcome of size checking.
  1101. *
  1102. * This kernel is written exclusively for convolution with ch_im_in
  1103. * equals 3. This applies on the first layer of CNNs which has input
  1104. * image with RGB format.
  1105. */
  1106. riscv_nmsis_nn_status riscv_convolve_HWC_q7_RGB(const q7_t *Im_in,
  1107. const uint16_t dim_im_in,
  1108. const uint16_t ch_im_in,
  1109. const q7_t *wt,
  1110. const uint16_t ch_im_out,
  1111. const uint16_t dim_kernel,
  1112. const uint16_t padding,
  1113. const uint16_t stride,
  1114. const q7_t *bias,
  1115. const uint16_t bias_shift,
  1116. const uint16_t out_shift,
  1117. q7_t *Im_out,
  1118. const uint16_t dim_im_out,
  1119. q15_t *bufferA,
  1120. q7_t *bufferB);
  1121. /**
  1122. * @brief Fast Q15 convolution function
  1123. * @param[in] Im_in pointer to input tensor
  1124. * @param[in] dim_im_in input tensor dimension
  1125. * @param[in] ch_im_in number of input tensor channels
  1126. * @param[in] wt pointer to kernel weights
  1127. * @param[in] ch_im_out number of filters, i.e., output tensor channels
  1128. * @param[in] dim_kernel filter kernel size
  1129. * @param[in] padding padding sizes
  1130. * @param[in] stride convolution stride
  1131. * @param[in] bias pointer to bias
  1132. * @param[in] bias_shift amount of left-shift for bias
  1133. * @param[in] out_shift amount of right-shift for output
  1134. * @param[in,out] Im_out pointer to output tensor
  1135. * @param[in] dim_im_out output tensor dimension
  1136. * @param[in,out] bufferA pointer to buffer space for input
  1137. * @param[in,out] bufferB pointer to buffer space for output
  1138. * @return The function returns either
  1139. * <code>RISCV_NMSIS_NN_SIZE_MISMATCH</code> or <code>RISCV_NMSIS_NN_SUCCESS</code> based on the outcome of size checking.
  1140. *
  1141. * This function is the version with full list of optimization tricks, but with
  1142. * some contraints:
  1143. * ch_im_in is multiple of 2
  1144. * ch_im_out is multiple of 2
  1145. * dim_im_out is a multiple of 2
  1146. */
  1147. riscv_nmsis_nn_status riscv_convolve_HWC_q15_fast(const q15_t *Im_in,
  1148. const uint16_t dim_im_in,
  1149. const uint16_t ch_im_in,
  1150. const q15_t *wt,
  1151. const uint16_t ch_im_out,
  1152. const uint16_t dim_kernel,
  1153. const uint16_t padding,
  1154. const uint16_t stride,
  1155. const q15_t *bias,
  1156. const uint16_t bias_shift,
  1157. const uint16_t out_shift,
  1158. q15_t *Im_out,
  1159. const uint16_t dim_im_out,
  1160. q15_t *bufferA,
  1161. q7_t *bufferB);
  1162. /**
  1163. * @brief Fast Q15 convolution function (non-sqaure shape)
  1164. * @param[in] Im_in pointer to input tensor
  1165. * @param[in] dim_im_in_x input tensor dimension x
  1166. * @param[in] dim_im_in_y input tensor dimension y
  1167. * @param[in] ch_im_in number of input tensor channels
  1168. * @param[in] wt pointer to kernel weights
  1169. * @param[in] ch_im_out number of filters, i.e., output tensor channels
  1170. * @param[in] dim_kernel_x filter kernel size x
  1171. * @param[in] dim_kernel_y filter kernel size y
  1172. * @param[in] padding_x padding size x
  1173. * @param[in] padding_y padding size y
  1174. * @param[in] stride_x convolution stride x
  1175. * @param[in] stride_y convolution stride y
  1176. * @param[in] bias pointer to bias
  1177. * @param[in] bias_shift amount of left-shift for bias
  1178. * @param[in] out_shift amount of right-shift for output
  1179. * @param[in,out] Im_out pointer to output tensor
  1180. * @param[in] dim_im_out_x output tensor dimension x
  1181. * @param[in] dim_im_out_y output tensor dimension y
  1182. * @param[in,out] bufferA pointer to buffer space for input
  1183. * @param[in,out] bufferB pointer to buffer space for output
  1184. * @return The function returns either
  1185. * <code>RISCV_NMSIS_NN_SIZE_MISMATCH</code> or <code>RISCV_NMSIS_NN_SUCCESS</code> based on the outcome of size checking.
  1186. *
  1187. * @details
  1188. *
  1189. * <b>Buffer size:</b>
  1190. *
  1191. * bufferA size: 2*ch_im_in*dim_kernel*dim_kernel
  1192. *
  1193. * bufferB size: 0
  1194. *
  1195. * <b>Input dimension constraints:</b>
  1196. *
  1197. * ch_im_in is multiple of 2
  1198. *
  1199. * ch_im_out is multipe of 2
  1200. *
  1201. */
  1202. riscv_nmsis_nn_status riscv_convolve_HWC_q15_fast_nonsquare(const q15_t *Im_in,
  1203. const uint16_t dim_im_in_x,
  1204. const uint16_t dim_im_in_y,
  1205. const uint16_t ch_im_in,
  1206. const q15_t *wt,
  1207. const uint16_t ch_im_out,
  1208. const uint16_t dim_kernel_x,
  1209. const uint16_t dim_kernel_y,
  1210. const uint16_t padding_x,
  1211. const uint16_t padding_y,
  1212. const uint16_t stride_x,
  1213. const uint16_t stride_y,
  1214. const q15_t *bias,
  1215. const uint16_t bias_shift,
  1216. const uint16_t out_shift,
  1217. q15_t *Im_out,
  1218. const uint16_t dim_im_out_x,
  1219. const uint16_t dim_im_out_y,
  1220. q15_t *bufferA,
  1221. q7_t *bufferB);
  1222. /**
  1223. * @brief Q7 depthwise separable convolution function
  1224. * @param[in] Im_in pointer to input tensor
  1225. * @param[in] dim_im_in input tensor dimension
  1226. * @param[in] ch_im_in number of input tensor channels
  1227. * @param[in] wt pointer to kernel weights
  1228. * @param[in] ch_im_out number of filters, i.e., output tensor channels
  1229. * @param[in] dim_kernel filter kernel size
  1230. * @param[in] padding padding sizes
  1231. * @param[in] stride convolution stride
  1232. * @param[in] bias pointer to bias
  1233. * @param[in] bias_shift amount of left-shift for bias
  1234. * @param[in] out_shift amount of right-shift for output
  1235. * @param[in,out] Im_out pointer to output tensor
  1236. * @param[in] dim_im_out output tensor dimension
  1237. * @param[in,out] bufferA pointer to buffer space for input
  1238. * @param[in,out] bufferB pointer to buffer space for output
  1239. * @return The function returns either
  1240. * <code>RISCV_NMSIS_NN_SIZE_MISMATCH</code> or <code>RISCV_NMSIS_NN_SUCCESS</code> based on the outcome of size checking.
  1241. *
  1242. * This function is the version with full list of optimization tricks, but with
  1243. * some contraints:
  1244. * ch_im_in is multiple of 2
  1245. * ch_im_out is multiple of 2
  1246. */
  1247. riscv_nmsis_nn_status riscv_depthwise_separable_conv_HWC_q7(const q7_t *Im_in,
  1248. const uint16_t dim_im_in,
  1249. const uint16_t ch_im_in,
  1250. const q7_t *wt,
  1251. const uint16_t ch_im_out,
  1252. const uint16_t dim_kernel,
  1253. const uint16_t padding,
  1254. const uint16_t stride,
  1255. const q7_t *bias,
  1256. const uint16_t bias_shift,
  1257. const uint16_t out_shift,
  1258. q7_t *Im_out,
  1259. const uint16_t dim_im_out,
  1260. q15_t *bufferA,
  1261. q7_t *bufferB);
  1262. /**
  1263. * @brief Q7 depthwise separable convolution function (non-square shape)
  1264. * @param[in] Im_in pointer to input tensor
  1265. * @param[in] dim_im_in_x input tensor dimension x
  1266. * @param[in] dim_im_in_y input tensor dimension y
  1267. * @param[in] ch_im_in number of input tensor channels
  1268. * @param[in] wt pointer to kernel weights
  1269. * @param[in] ch_im_out number of filters, i.e., output tensor channels
  1270. * @param[in] dim_kernel_x filter kernel size x
  1271. * @param[in] dim_kernel_y filter kernel size y
  1272. * @param[in] padding_x padding sizes x
  1273. * @param[in] padding_y padding sizes y
  1274. * @param[in] stride_x convolution stride x
  1275. * @param[in] stride_y convolution stride y
  1276. * @param[in] bias pointer to bias
  1277. * @param[in] bias_shift amount of left-shift for bias
  1278. * @param[in] out_shift amount of right-shift for output
  1279. * @param[in,out] Im_out pointer to output tensor
  1280. * @param[in] dim_im_out_x output tensor dimension x
  1281. * @param[in] dim_im_out_y output tensor dimension y
  1282. * @param[in,out] bufferA pointer to buffer space for input
  1283. * @param[in,out] bufferB pointer to buffer space for output
  1284. * @return The function returns either
  1285. * <code>RISCV_NMSIS_NN_SIZE_MISMATCH</code> or <code>RISCV_NMSIS_NN_SUCCESS</code> based on the outcome of size checking.
  1286. *
  1287. * This function is the version with full list of optimization tricks, but with
  1288. * some contraints:
  1289. * ch_im_in is multiple of 2
  1290. * ch_im_out is multiple of 2
  1291. */
  1292. riscv_nmsis_nn_status riscv_depthwise_separable_conv_HWC_q7_nonsquare(const q7_t *Im_in,
  1293. const uint16_t dim_im_in_x,
  1294. const uint16_t dim_im_in_y,
  1295. const uint16_t ch_im_in,
  1296. const q7_t *wt,
  1297. const uint16_t ch_im_out,
  1298. const uint16_t dim_kernel_x,
  1299. const uint16_t dim_kernel_y,
  1300. const uint16_t padding_x,
  1301. const uint16_t padding_y,
  1302. const uint16_t stride_x,
  1303. const uint16_t stride_y,
  1304. const q7_t *bias,
  1305. const uint16_t bias_shift,
  1306. const uint16_t out_shift,
  1307. q7_t *Im_out,
  1308. const uint16_t dim_im_out_x,
  1309. const uint16_t dim_im_out_y,
  1310. q15_t *bufferA,
  1311. q7_t *bufferB);
  1312. /**
  1313. * @brief 1xn convolution for s4 weights
  1314. *
  1315. * @param[in, out] ctx Function context that contains the additional buffer if required by the function.
  1316. * riscv_convolve_1_x_n_s4_get_buffer_size will return the buffer_size if required
  1317. * The caller is expected to clear the buffer, if applicable, for security reasons.
  1318. * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...).
  1319. * Range of conv_params->input_offset : [-127, 128]
  1320. * Range of conv_params->output_offset : [-128, 127]
  1321. * @param[in] quant_params Per-channel quantization info.
  1322. * It contains the multiplier and shift values to be applied to each output channel
  1323. * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
  1324. * @param[in] input_data Input (activation) data pointer. Data type: int8
  1325. * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, 1, WK, C_IN] where WK is the horizontal
  1326. * spatial filter dimension
  1327. * @param[in] filter_data Filter data pointer. Data type: int8 as packed int4
  1328. * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
  1329. * @param[in] bias_data Optional bias data pointer. Data type: int32
  1330. * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
  1331. * @param[out] output_data Output data pointer. Data type: int8
  1332. *
  1333. * @return The function returns either
  1334. * <code>RISCV_NMSIS_NN_ARG_ERROR</code> if argument constraints fail. or,
  1335. * <code>RISCV_NMSIS_NN_SUCCESS</code> on successful completion.
  1336. *
  1337. * @details
  1338. * - Supported framework : TensorFlow Lite Micro
  1339. * - The following constrains on the arguments apply
  1340. * -# stride.w * input_dims->c is a multiple of 4
  1341. * -# Explicit constraints(since it is for 1xN convolution)
  1342. * -## input_dims->h equals 1
  1343. * -## output_dims->h equals 1
  1344. * -## filter_dims->h equals 1
  1345. *@todo Remove constraint on output_dims->w to make the function generic.
  1346. *
  1347. */
  1348. riscv_nmsis_nn_status riscv_convolve_1_x_n_s4(const nmsis_nn_context *ctx,
  1349. const nmsis_nn_conv_params *conv_params,
  1350. const nmsis_nn_per_channel_quant_params *quant_params,
  1351. const nmsis_nn_dims *input_dims,
  1352. const int8_t *input_data,
  1353. const nmsis_nn_dims *filter_dims,
  1354. const int8_t *filter_data,
  1355. const nmsis_nn_dims *bias_dims,
  1356. const int32_t *bias_data,
  1357. const nmsis_nn_dims *output_dims,
  1358. int8_t *output_data);
  1359. /**
  1360. * @brief Get the required additional buffer size for 1xn convolution
  1361. *
  1362. * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...).
  1363. * Range of conv_params->input_offset : [-127, 128]
  1364. * Range of conv_params->output_offset : [-128, 127]
  1365. * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
  1366. * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, 1, WK, C_IN] where WK is the
  1367. * horizontal spatial filter dimension
  1368. * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
  1369. *
  1370. * @return The function returns required buffer size(bytes)
  1371. *
  1372. */
  1373. int32_t riscv_convolve_1_x_n_s8_get_buffer_size(const nmsis_nn_conv_params *conv_params,
  1374. const nmsis_nn_dims *input_dims,
  1375. const nmsis_nn_dims *filter_dims,
  1376. const nmsis_nn_dims *output_dims);
  1377. /**
  1378. * @brief Get the required additional buffer size for 1xn convolution
  1379. *
  1380. * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...).
  1381. * Range of conv_params->input_offset : [-127, 128]
  1382. * Range of conv_params->output_offset : [-128, 127]
  1383. * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
  1384. * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, 1, WK, C_IN] where WK is the
  1385. * horizontal spatial filter dimension
  1386. * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
  1387. *
  1388. * @return The function returns required buffer size(bytes)
  1389. *
  1390. */
  1391. int32_t riscv_convolve_1_x_n_s4_get_buffer_size(const nmsis_nn_conv_params *conv_params,
  1392. const nmsis_nn_dims *input_dims,
  1393. const nmsis_nn_dims *filter_dims,
  1394. const nmsis_nn_dims *output_dims);
  1395. /**
  1396. * @brief Wrapper function to pick the right optimized s8 depthwise convolution function
  1397. *
  1398. * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function
  1399. * definition file to see if an additional buffer is required.
  1400. * Optional function {API}_get_buffer_size() provides the buffer
  1401. * size if required.
  1402. * The caller is expected to clear the buffer, if applicable, for security reasons.
  1403. * @param[in] dw_conv_params Depthwise convolution parameters (e.g. strides, dilations, pads,...)
  1404. * dw_conv_params->dilation is not used.
  1405. * Range of dw_conv_params->input_offset : [-127, 128]
  1406. * Range of dw_conv_params->output_offset : [-128, 127]
  1407. * @param[in] quant_params Per-channel quantization info.
  1408. * It contains the multiplier and shift values to be applied to each
  1409. * output channel
  1410. * @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN]
  1411. * Batch argument N is not used and assumed to be 1.
  1412. * @param[in] input_data Input (activation) data pointer. Data type: int8
  1413. * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT]
  1414. * @param[in] filter_data Filter data pointer. Data type: int8
  1415. * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
  1416. * @param[in] bias_data Bias data pointer. Data type: int32
  1417. * @param[in] output_dims Output tensor dimensions. Format: [1, H, W, C_OUT]
  1418. * @param[in, out] output_data Output data pointer. Data type: int8
  1419. * @return The function returns
  1420. * <code>RISCV_NMSIS_NN_SUCCESS</code> - Successful completion.
  1421. *
  1422. * @details
  1423. * - Supported framework: TensorFlow Lite
  1424. * - Picks one of the the following functions
  1425. * -# riscv_depthwise_conv_s8()
  1426. * -# riscv_depthwise_conv_3x3_s8() - RISC-V CPUs with DSP extension only
  1427. * -# riscv_depthwise_conv_s8_opt()
  1428. * - Check details of riscv_depthwise_conv_s8_opt() for potential data that can be accessed outside of the
  1429. * boundary.
  1430. */
  1431. riscv_nmsis_nn_status riscv_depthwise_conv_wrapper_s8(const nmsis_nn_context *ctx,
  1432. const nmsis_nn_dw_conv_params *dw_conv_params,
  1433. const nmsis_nn_per_channel_quant_params *quant_params,
  1434. const nmsis_nn_dims *input_dims,
  1435. const int8_t *input_data,
  1436. const nmsis_nn_dims *filter_dims,
  1437. const int8_t *filter_data,
  1438. const nmsis_nn_dims *bias_dims,
  1439. const int32_t *bias_data,
  1440. const nmsis_nn_dims *output_dims,
  1441. int8_t *output_data);
  1442. /**
  1443. * @brief Wrapper function to pick the right optimized s4 depthwise convolution function
  1444. *
  1445. * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function
  1446. * definition file to see if an additional buffer is required.
  1447. * Optional function {API}_get_buffer_size() provides the buffer
  1448. * size if required.
  1449. * The caller is expected to clear the buffer ,if applicable, for security reasons.
  1450. * @param[in] dw_conv_params Depthwise convolution parameters (e.g. strides, dilations, pads,...)
  1451. * dw_conv_params->dilation is not used.
  1452. * Range of dw_conv_params->input_offset : [-127, 128]
  1453. * Range of dw_conv_params->output_offset : [-128, 127]
  1454. * @param[in] quant_params Per-channel quantization info.
  1455. * It contains the multiplier and shift values to be applied to each
  1456. * output channel
  1457. * @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN]
  1458. * Batch argument N is not used and assumed to be 1.
  1459. * @param[in] input_data Input (activation) data pointer. Data type: int8
  1460. * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT]
  1461. * @param[in] filter_data Filter data pointer. Data type: int8_t packed 4-bit weights, e.g four sequential
  1462. * weights [0x1, 0x2, 0x3, 0x4] packed as [0x21, 0x43].
  1463. * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
  1464. * @param[in] bias_data Bias data pointer. Data type: int32
  1465. * @param[in] output_dims Output tensor dimensions. Format: [1, H, W, C_OUT]
  1466. * @param[in, out] output_data Output data pointer. Data type: int8
  1467. * @return The function returns
  1468. * <code>RISCV_NMSIS_NN_SUCCESS</code> - Successful completion.
  1469. *
  1470. * @details
  1471. * - Supported framework: TensorFlow Lite
  1472. */
  1473. riscv_nmsis_nn_status riscv_depthwise_conv_wrapper_s4(const nmsis_nn_context *ctx,
  1474. const nmsis_nn_dw_conv_params *dw_conv_params,
  1475. const nmsis_nn_per_channel_quant_params *quant_params,
  1476. const nmsis_nn_dims *input_dims,
  1477. const int8_t *input_data,
  1478. const nmsis_nn_dims *filter_dims,
  1479. const int8_t *filter_data,
  1480. const nmsis_nn_dims *bias_dims,
  1481. const int32_t *bias_data,
  1482. const nmsis_nn_dims *output_dims,
  1483. int8_t *output_data);
  1484. /**
  1485. * @brief Get size of additional buffer required by riscv_depthwise_conv_wrapper_s8()
  1486. *
  1487. * @param[in] dw_conv_params Depthwise convolution parameters (e.g. strides, dilations, pads,...)
  1488. * Range of dw_conv_params->input_offset : [-127, 128]
  1489. * Range of dw_conv_params->input_offset : [-128, 127]
  1490. * @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN]
  1491. * Batch argument N is not used and assumed to be 1.
  1492. * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT]
  1493. * @param[in] output_dims Output tensor dimensions. Format: [1, H, W, C_OUT]
  1494. * @return Size of additional memory required for optimizations in bytes.
  1495. *
  1496. */
  1497. int32_t riscv_depthwise_conv_wrapper_s8_get_buffer_size(const nmsis_nn_dw_conv_params *dw_conv_params,
  1498. const nmsis_nn_dims *input_dims,
  1499. const nmsis_nn_dims *filter_dims,
  1500. const nmsis_nn_dims *output_dims);
  1501. /**
  1502. * @brief Get size of additional buffer required by riscv_depthwise_conv_wrapper_s8() for processors with DSP extension.
  1503. * Refer to riscv_depthwise_conv_wrapper_s8_get_buffer_size() for function argument details.
  1504. *
  1505. * @note Intended for compilation on Host. If compiling for an Riscv target, use
  1506. * riscv_depthwise_conv_wrapper_s8_get_buffer_size().
  1507. *
  1508. */
  1509. int32_t riscv_depthwise_conv_wrapper_s8_get_buffer_size_dsp(const nmsis_nn_dw_conv_params *dw_conv_params,
  1510. const nmsis_nn_dims *input_dims,
  1511. const nmsis_nn_dims *filter_dims,
  1512. const nmsis_nn_dims *output_dims);
  1513. /**
  1514. * @brief Get size of additional buffer required by riscv_depthwise_conv_wrapper_s4()
  1515. *
  1516. * @param[in] dw_conv_params Depthwise convolution parameters (e.g. strides, dilations, pads,...)
  1517. * Range of dw_conv_params->input_offset : [-127, 128]
  1518. * Range of dw_conv_params->input_offset : [-128, 127]
  1519. * @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN]
  1520. * Batch argument N is not used and assumed to be 1.
  1521. * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT]
  1522. * @param[in] output_dims Output tensor dimensions. Format: [1, H, W, C_OUT]
  1523. * @return Size of additional memory required for optimizations in bytes.
  1524. *
  1525. */
  1526. int32_t riscv_depthwise_conv_wrapper_s4_get_buffer_size(const nmsis_nn_dw_conv_params *dw_conv_params,
  1527. const nmsis_nn_dims *input_dims,
  1528. const nmsis_nn_dims *filter_dims,
  1529. const nmsis_nn_dims *output_dims);
  1530. /**
  1531. * @brief Get size of additional buffer required by riscv_depthwise_conv_wrapper_s4() for processors with DSP extension.
  1532. * Refer to riscv_depthwise_conv_wrapper_s4_get_buffer_size() for function argument details.
  1533. *
  1534. * @note Intended for compilation on Host. If compiling for an Riscv target, use
  1535. * riscv_depthwise_conv_wrapper_s4_get_buffer_size().
  1536. *
  1537. */
  1538. int32_t riscv_depthwise_conv_wrapper_s4_get_buffer_size_dsp(const nmsis_nn_dw_conv_params *dw_conv_params,
  1539. const nmsis_nn_dims *input_dims,
  1540. const nmsis_nn_dims *filter_dims,
  1541. const nmsis_nn_dims *output_dims);
  1542. /**
  1543. * @brief Basic s8 depthwise convolution function that doesn't have any constraints on the input dimensions.
  1544. *
  1545. * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function
  1546. * definition file to see if an additional buffer is required.
  1547. * Optional function {API}_get_buffer_size() provides the buffer
  1548. * size if an additional buffer is required exists if additional memory is.
  1549. * The caller is expected to clear the buffer, if applicable, for security reasons.
  1550. * @param[in] dw_conv_params Depthwise convolution parameters (e.g. strides, dilations, pads,...)
  1551. * dw_conv_params->dilation is not used.
  1552. * Range of dw_conv_params->input_offset : [-127, 128]
  1553. * Range of dw_conv_params->input_offset : [-128, 127]
  1554. * @param[in] quant_params Per-channel quantization info.
  1555. * It contains the multiplier and shift values to be applied to each
  1556. * output channel
  1557. * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
  1558. * Batch argument N is not used.
  1559. * @param[in] input_data Input (activation) data pointer. Data type: int8
  1560. * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT]
  1561. * @param[in] filter_data Filter data pointer. Data type: int8
  1562. * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
  1563. * @param[in] bias_data Bias data pointer. Data type: int32
  1564. * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
  1565. * @param[in, out] output_data Output data pointer. Data type: int8
  1566. * @return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code>
  1567. *
  1568. * @details
  1569. * - Supported framework: TensorFlow Lite
  1570. */
  1571. riscv_nmsis_nn_status riscv_depthwise_conv_s8(const nmsis_nn_context *ctx,
  1572. const nmsis_nn_dw_conv_params *dw_conv_params,
  1573. const nmsis_nn_per_channel_quant_params *quant_params,
  1574. const nmsis_nn_dims *input_dims,
  1575. const int8_t *input_data,
  1576. const nmsis_nn_dims *filter_dims,
  1577. const int8_t *filter_data,
  1578. const nmsis_nn_dims *bias_dims,
  1579. const int32_t *bias_data,
  1580. const nmsis_nn_dims *output_dims,
  1581. int8_t *output_data);
  1582. /**
  1583. * @brief Basic s4 depthwise convolution function that doesn't have any constraints on the input dimensions.
  1584. *
  1585. * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function
  1586. * definition file to see if an additional buffer is required.
  1587. * Optional function {API}_get_buffer_size() provides the buffer
  1588. * size if an additional buffer is required exists if additional memory is.
  1589. * The caller is expected to clear the buffer ,if applicable, for security reasons.
  1590. * @param[in] dw_conv_params Depthwise convolution parameters (e.g. strides, dilations, pads,...)
  1591. * dw_conv_params->dilation is not used.
  1592. * Range of dw_conv_params->input_offset : [-127, 128]
  1593. * Range of dw_conv_params->input_offset : [-128, 127]
  1594. * @param[in] quant_params Per-channel quantization info.
  1595. * It contains the multiplier and shift values to be applied to each
  1596. * output channel
  1597. * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
  1598. * Batch argument N is not used.
  1599. * @param[in] input Input (activation) data pointer. Data type: int8
  1600. * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT]
  1601. * @param[in] kernel Filter data pointer. Data type: int8_t packed 4-bit weights, e.g four sequential
  1602. * weights [0x1, 0x2, 0x3, 0x4] packed as [0x21, 0x43].
  1603. * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
  1604. * @param[in] bias Bias data pointer. Data type: int32
  1605. * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
  1606. * @param[in, out] output Output data pointer. Data type: int8
  1607. * @return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code>
  1608. *
  1609. * @details
  1610. * - Supported framework: TensorFlow Lite
  1611. */
  1612. riscv_nmsis_nn_status riscv_depthwise_conv_s4(const nmsis_nn_context *ctx,
  1613. const nmsis_nn_dw_conv_params *dw_conv_params,
  1614. const nmsis_nn_per_channel_quant_params *quant_params,
  1615. const nmsis_nn_dims *input_dims,
  1616. const int8_t *input,
  1617. const nmsis_nn_dims *filter_dims,
  1618. const int8_t *kernel,
  1619. const nmsis_nn_dims *bias_dims,
  1620. const int32_t *bias,
  1621. const nmsis_nn_dims *output_dims,
  1622. int8_t *output);
  1623. /**
  1624. * @brief Basic s16 depthwise convolution function that doesn't have any constraints on the input dimensions.
  1625. *
  1626. * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function
  1627. * definition file to see if an additional buffer is required.
  1628. * Optional function {API}_get_buffer_size() provides the buffer
  1629. * size if an additional buffer is required.
  1630. * exists if additional memory is.
  1631. * The caller is expected to clear the buffer, if applicable, for security reasons.
  1632. * @param[in] dw_conv_params Depthwise convolution parameters (e.g. strides, dilations, pads,...)
  1633. * conv_params->input_offset : Not used
  1634. * conv_params->output_offset : Not used
  1635. * @param[in] quant_params Per-channel quantization info.
  1636. * It contains the multiplier and shift values to be applied to each
  1637. * output channel
  1638. * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
  1639. * Batch argument N is not used.
  1640. * @param[in] input_data Input (activation) data pointer. Data type: int8
  1641. * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT]
  1642. * @param[in] filter_data Filter data pointer. Data type: int8
  1643. * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
  1644. * @param[in] bias_data Bias data pointer. Data type: int64
  1645. * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
  1646. * @param[in, out] output_data Output data pointer. Data type: int16
  1647. * @return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code>
  1648. *
  1649. * @details
  1650. * - Supported framework: TensorFlow Lite
  1651. */
  1652. riscv_nmsis_nn_status riscv_depthwise_conv_s16(const nmsis_nn_context *ctx,
  1653. const nmsis_nn_dw_conv_params *dw_conv_params,
  1654. const nmsis_nn_per_channel_quant_params *quant_params,
  1655. const nmsis_nn_dims *input_dims,
  1656. const int16_t *input_data,
  1657. const nmsis_nn_dims *filter_dims,
  1658. const int8_t *filter_data,
  1659. const nmsis_nn_dims *bias_dims,
  1660. const int64_t *bias_data,
  1661. const nmsis_nn_dims *output_dims,
  1662. int16_t *output_data);
  1663. /**
  1664. * @brief Wrapper function to pick the right optimized s16 depthwise convolution function
  1665. *
  1666. * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function
  1667. * definition file to see if an additional buffer is required.
  1668. * Optional function {API}_get_buffer_size() provides the buffer
  1669. * size if required.
  1670. * The caller is expected to clear the buffer, if applicable, for security reasons.
  1671. * @param[in] dw_conv_params Depthwise convolution parameters (e.g. strides, dilations, pads,...)
  1672. * dw_conv_params->dilation is not used.
  1673. * Range of dw_conv_params->input_offset : Not used
  1674. * Range of dw_conv_params->output_offset : Not used
  1675. * @param[in] quant_params Per-channel quantization info.
  1676. * It contains the multiplier and shift values to be applied to each
  1677. * output channel
  1678. * @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN]
  1679. * Batch argument N is not used and assumed to be 1.
  1680. * @param[in] input_data Input (activation) data pointer. Data type: int16
  1681. * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT]
  1682. * @param[in] filter_data Filter data pointer. Data type: int8
  1683. * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
  1684. * @param[in] bias_data Bias data pointer. Data type: int64
  1685. * @param[in] output_dims Output tensor dimensions. Format: [1, H, W, C_OUT]
  1686. * @param[in, out] output_data Output data pointer. Data type: int16
  1687. * @return The function returns
  1688. * <code>RISCV_NMSIS_NN_SUCCESS</code> - Successful completion.
  1689. *
  1690. * @details
  1691. * - Supported framework: TensorFlow Lite
  1692. * - Picks one of the the following functions
  1693. * -# riscv_depthwise_conv_s16()
  1694. * -# riscv_depthwise_conv_fast_s16() - RISC-V CPUs with DSP extension only
  1695. */
  1696. riscv_nmsis_nn_status riscv_depthwise_conv_wrapper_s16(const nmsis_nn_context *ctx,
  1697. const nmsis_nn_dw_conv_params *dw_conv_params,
  1698. const nmsis_nn_per_channel_quant_params *quant_params,
  1699. const nmsis_nn_dims *input_dims,
  1700. const int16_t *input_data,
  1701. const nmsis_nn_dims *filter_dims,
  1702. const int8_t *filter_data,
  1703. const nmsis_nn_dims *bias_dims,
  1704. const int64_t *bias_data,
  1705. const nmsis_nn_dims *output_dims,
  1706. int16_t *output_data);
  1707. /**
  1708. * @brief Get size of additional buffer required by riscv_depthwise_conv_wrapper_s16()
  1709. *
  1710. * @param[in] dw_conv_params Depthwise convolution parameters (e.g. strides, dilations, pads,...)
  1711. * Range of dw_conv_params->input_offset : Not used
  1712. * Range of dw_conv_params->input_offset : Not used
  1713. * @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN]
  1714. * Batch argument N is not used and assumed to be 1.
  1715. * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT]
  1716. * @param[in] output_dims Output tensor dimensions. Format: [1, H, W, C_OUT]
  1717. * @return Size of additional memory required for optimizations in bytes.
  1718. *
  1719. */
  1720. int32_t riscv_depthwise_conv_wrapper_s16_get_buffer_size(const nmsis_nn_dw_conv_params *dw_conv_params,
  1721. const nmsis_nn_dims *input_dims,
  1722. const nmsis_nn_dims *filter_dims,
  1723. const nmsis_nn_dims *output_dims);
  1724. /**
  1725. * @brief Get size of additional buffer required by riscv_depthwise_conv_wrapper_s16() for processors with DSP extension.
  1726. * Refer to riscv_depthwise_conv_wrapper_s16_get_buffer_size() for function argument details.
  1727. *
  1728. * @note Intended for compilation on Host. If compiling for an Riscv target, use
  1729. * riscv_depthwise_conv_wrapper_s16_get_buffer_size().
  1730. *
  1731. */
  1732. int32_t riscv_depthwise_conv_wrapper_s16_get_buffer_size_dsp(const nmsis_nn_dw_conv_params *dw_conv_params,
  1733. const nmsis_nn_dims *input_dims,
  1734. const nmsis_nn_dims *filter_dims,
  1735. const nmsis_nn_dims *output_dims);
  1736. /**
  1737. * @brief Optimized s16 depthwise convolution function with constraint that in_channel equals out_channel.
  1738. * Refer riscv_depthwise_conv_s16() for function argument details.
  1739. *
  1740. * @return The function returns one of the following
  1741. * <code>RISCV_NMSIS_NN_ARG_ERROR</code> - ctx-buff == NULL and
  1742. * riscv_depthwise_conv_fast_s16_get_buffer_size() > 0 or
  1743. * input channel != output channel or
  1744. * ch_mult != 1
  1745. *
  1746. * <code>RISCV_NMSIS_NN_SUCCESS</code> - Successful operation
  1747. *
  1748. * @details
  1749. * - Supported framework: TensorFlow Lite
  1750. * - The following constrains on the arguments apply
  1751. * -# Number of input channel equals number of output channels or ch_mult equals 1
  1752. * - Reccomended when number of channels is 4 or greater.
  1753. *
  1754. */
  1755. riscv_nmsis_nn_status riscv_depthwise_conv_fast_s16(const nmsis_nn_context *ctx,
  1756. const nmsis_nn_dw_conv_params *dw_conv_params,
  1757. const nmsis_nn_per_channel_quant_params *quant_params,
  1758. const nmsis_nn_dims *input_dims,
  1759. const int16_t *input_data,
  1760. const nmsis_nn_dims *filter_dims,
  1761. const int8_t *filter_data,
  1762. const nmsis_nn_dims *bias_dims,
  1763. const int64_t *bias_data,
  1764. const nmsis_nn_dims *output_dims,
  1765. int16_t *output_data);
  1766. /**
  1767. * @brief Get the required buffer size for optimized s16 depthwise convolution
  1768. * function with constraint that in_channel equals out_channel.
  1769. * @param[in] input_dims Input (activation) tensor dimensions. Format: [1, H, W, C_IN]
  1770. * Batch argument N is not used.
  1771. * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT]
  1772. * @return The function returns required buffer size in bytes
  1773. *
  1774. */
  1775. int32_t riscv_depthwise_conv_fast_s16_get_buffer_size(const nmsis_nn_dims *input_dims, const nmsis_nn_dims *filter_dims);
  1776. /**
  1777. * @brief Optimized s8 depthwise convolution function for 3x3 kernel size with some constraints on
  1778. * the input arguments(documented below). Refer riscv_depthwise_conv_s8() for function
  1779. * argument details.
  1780. *
  1781. * @return The function returns one of the following
  1782. * <code>RISCV_NMSIS_NN_ARG_ERROR</code> - Unsupported dimension of tensors
  1783. * - Unsupported pad size along the x axis
  1784. * <code>RISCV_NMSIS_NN_SUCCESS</code> - Successful operation
  1785. *
  1786. * @details
  1787. * - Supported framework : TensorFlow Lite Micro
  1788. * - The following constrains on the arguments apply
  1789. * -# Number of input channel equals number of output channels
  1790. * -# Filter height and width equals 3
  1791. * -# Padding along x is either 0 or 1.
  1792. *
  1793. */
  1794. riscv_nmsis_nn_status riscv_depthwise_conv_3x3_s8(const nmsis_nn_context *ctx,
  1795. const nmsis_nn_dw_conv_params *dw_conv_params,
  1796. const nmsis_nn_per_channel_quant_params *quant_params,
  1797. const nmsis_nn_dims *input_dims,
  1798. const int8_t *input_data,
  1799. const nmsis_nn_dims *filter_dims,
  1800. const int8_t *filter_data,
  1801. const nmsis_nn_dims *bias_dims,
  1802. const int32_t *bias_data,
  1803. const nmsis_nn_dims *output_dims,
  1804. int8_t *output_data);
  1805. /**
  1806. * @brief Optimized s8 depthwise convolution function with constraint that in_channel equals out_channel.
  1807. * Refer riscv_depthwise_conv_s8() for function argument details.
  1808. *
  1809. * @return The function returns one of the following
  1810. * <code>RISCV_NMSIS_NN_ARG_ERROR</code> - input channel != output channel or
  1811. * ch_mult != 1
  1812. * <code>RISCV_NMSIS_NN_SUCCESS</code> - Successful operation
  1813. *
  1814. * @details
  1815. * - Supported framework: TensorFlow Lite
  1816. * - The following constrains on the arguments apply
  1817. * -# Number of input channel equals number of output channels or ch_mult equals 1
  1818. * - Reccomended when number of channels is 4 or greater.
  1819. *
  1820. */
  1821. riscv_nmsis_nn_status riscv_depthwise_conv_s8_opt(const nmsis_nn_context *ctx,
  1822. const nmsis_nn_dw_conv_params *dw_conv_params,
  1823. const nmsis_nn_per_channel_quant_params *quant_params,
  1824. const nmsis_nn_dims *input_dims,
  1825. const int8_t *input_data,
  1826. const nmsis_nn_dims *filter_dims,
  1827. const int8_t *filter_data,
  1828. const nmsis_nn_dims *bias_dims,
  1829. const int32_t *bias_data,
  1830. const nmsis_nn_dims *output_dims,
  1831. int8_t *output_data);
  1832. /**
  1833. * @brief Optimized s4 depthwise convolution function with constraint that in_channel equals out_channel.
  1834. * Refer riscv_depthwise_conv_s4() for function argument details.
  1835. *
  1836. * @return The function returns one of the following
  1837. * <code>RISCV_NMSIS_NN_ARG_ERROR</code> - input channel != output channel or
  1838. * ch_mult != 1
  1839. * <code>RISCV_NMSIS_NN_SUCCESS</code> - Successful operation
  1840. *
  1841. * @details
  1842. * - Supported framework: TensorFlow Lite
  1843. * - The following constrains on the arguments apply
  1844. * -# Number of input channel equals number of output channels or ch_mult equals 1
  1845. * - Reccomended when number of channels is 4 or greater.
  1846. *
  1847. */
  1848. riscv_nmsis_nn_status riscv_depthwise_conv_s4_opt(const nmsis_nn_context *ctx,
  1849. const nmsis_nn_dw_conv_params *dw_conv_params,
  1850. const nmsis_nn_per_channel_quant_params *quant_params,
  1851. const nmsis_nn_dims *input_dims,
  1852. const int8_t *input_data,
  1853. const nmsis_nn_dims *filter_dims,
  1854. const int8_t *filter_data,
  1855. const nmsis_nn_dims *bias_dims,
  1856. const int32_t *bias_data,
  1857. const nmsis_nn_dims *output_dims,
  1858. int8_t *output_data);
  1859. /**
  1860. * @brief Get the required buffer size for optimized s8 depthwise convolution
  1861. * function with constraint that in_channel equals out_channel.
  1862. * @param[in] input_dims Input (activation) tensor dimensions. Format: [1, H, W, C_IN]
  1863. * Batch argument N is not used.
  1864. * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT]
  1865. * @return The function returns required buffer size in bytes
  1866. *
  1867. */
  1868. int32_t riscv_depthwise_conv_s8_opt_get_buffer_size(const nmsis_nn_dims *input_dims, const nmsis_nn_dims *filter_dims);
  1869. /**
  1870. * @brief Get the required buffer size for optimized s4 depthwise convolution
  1871. * function with constraint that in_channel equals out_channel.
  1872. * @param[in] input_dims Input (activation) tensor dimensions. Format: [1, H, W, C_IN]
  1873. * Batch argument N is not used.
  1874. * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT]
  1875. * @return The function returns required buffer size in bytes
  1876. *
  1877. */
  1878. int32_t riscv_depthwise_conv_s4_opt_get_buffer_size(const nmsis_nn_dims *input_dims, const nmsis_nn_dims *filter_dims);
  1879. /**
  1880. * @defgroup FC Fully-connected Layer Functions
  1881. *
  1882. * Collection of fully-connected and matrix multiplication functions.
  1883. *
  1884. * Fully-connected layer is basically a matrix-vector multiplication
  1885. * with bias. The matrix is the weights and the input/output vectors
  1886. * are the activation values. Supported {weight, activation} precisions
  1887. * include {8-bit, 8-bit} and {8-bit, 16-bit}
  1888. *
  1889. *
  1890. */
  1891. /**
  1892. *@brief Q7 basic fully-connected layer function
  1893. *@param[in] pV pointer to input vector
  1894. *@param[in] pM pointer to matrix weights
  1895. *@param[in] dim_vec length of the vector
  1896. *@param[in] num_of_rows number of rows in weight matrix
  1897. *@param[in] bias_shift amount of left-shift for bias
  1898. *@param[in] out_shift amount of right-shift for output
  1899. *@param[in] bias pointer to bias
  1900. *@param[in,out] pOut pointer to output vector
  1901. *@param[in,out] vec_buffer pointer to buffer space for input
  1902. *@return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code>
  1903. *
  1904. */
  1905. riscv_nmsis_nn_status riscv_fully_connected_q7(const q7_t *pV,
  1906. const q7_t *pM,
  1907. const uint16_t dim_vec,
  1908. const uint16_t num_of_rows,
  1909. const uint16_t bias_shift,
  1910. const uint16_t out_shift,
  1911. const q7_t *bias,
  1912. q7_t *pOut,
  1913. q15_t *vec_buffer);
  1914. /**
  1915. * @brief Basic s4 Fully Connected function.
  1916. *
  1917. * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function
  1918. * definition file to see if an additional buffer is required.
  1919. * Optional function {API}_get_buffer_size() provides the buffer
  1920. * size if an additional buffer is required.
  1921. * The caller is expected to clear the buffer ,if applicable, for security reasons.
  1922. * @param[in] fc_params Fully Connected layer parameters.
  1923. * Range of fc_params->input_offset : [-127, 128]
  1924. * fc_params->filter_offset : 0
  1925. * Range of fc_params->output_offset : [-128, 127]
  1926. * @param[in] quant_params Per-tensor quantization info.
  1927. * It contains the multiplier and shift value to be applied to the output tensor.
  1928. * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
  1929. * Input dimension is taken as Nx(H * W * C_IN)
  1930. * @param[in] input_data Input (activation) data pointer. Data type: int8
  1931. * @param[in] filter_dims Two dimensional filter dimensions. Format: [N, C]
  1932. * N : accumulation depth and equals (H * W * C_IN) from input_dims
  1933. * C : output depth and equals C_OUT in output_dims
  1934. * H & W : Not used
  1935. * @param[in] filter_data Filter data pointer. Data type: int8_t packed 4-bit weights, e.g four sequential
  1936. * weights [0x1, 0x2, 0x3, 0x4] packed as [0x21, 0x43].
  1937. * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
  1938. * N, H, W : Not used
  1939. * @param[in] bias_data Bias data pointer. Data type: int32
  1940. * @param[in] output_dims Output tensor dimensions. Format: [N, C_OUT]
  1941. * N : Batches
  1942. * C_OUT : Output depth
  1943. * H & W : Not used.
  1944. * @param[in, out] output_data Output data pointer. Data type: int8
  1945. * @return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code>
  1946. *
  1947. * @details
  1948. * - Supported framework: TensorFlow Lite
  1949. */
  1950. riscv_nmsis_nn_status riscv_fully_connected_s4(const nmsis_nn_context *ctx,
  1951. const nmsis_nn_fc_params *fc_params,
  1952. const nmsis_nn_per_tensor_quant_params *quant_params,
  1953. const nmsis_nn_dims *input_dims,
  1954. const int8_t *input_data,
  1955. const nmsis_nn_dims *filter_dims,
  1956. const int8_t *filter_data,
  1957. const nmsis_nn_dims *bias_dims,
  1958. const int32_t *bias_data,
  1959. const nmsis_nn_dims *output_dims,
  1960. int8_t *output_data);
  1961. /**
  1962. * @brief Basic s8 Fully Connected function.
  1963. *
  1964. * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function
  1965. * definition file to see if an additional buffer is required.
  1966. * Optional function {API}_get_buffer_size() provides the buffer
  1967. * size if an additional buffer is required.
  1968. * The caller is expected to clear the buffer, if applicable, for security reasons.
  1969. * @param[in] fc_params Fully Connected layer parameters.
  1970. * Range of fc_params->input_offset : [-127, 128]
  1971. * fc_params->filter_offset : 0
  1972. * Range of fc_params->output_offset : [-128, 127]
  1973. * @param[in] quant_params Per-tensor quantization info.
  1974. * It contains the multiplier and shift value to be applied to the output tensor.
  1975. * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
  1976. * Input dimension is taken as Nx(H * W * C_IN)
  1977. * @param[in] input_data Input (activation) data pointer. Data type: int8
  1978. * @param[in] filter_dims Two dimensional filter dimensions. Format: [N, C]
  1979. * N : accumulation depth and equals (H * W * C_IN) from input_dims
  1980. * C : output depth and equals C_OUT in output_dims
  1981. * H & W : Not used
  1982. * @param[in] filter_data Filter data pointer. Data type: int8
  1983. * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
  1984. * N, H, W : Not used
  1985. * @param[in] bias_data Bias data pointer. Data type: int32
  1986. * @param[in] output_dims Output tensor dimensions. Format: [N, C_OUT]
  1987. * N : Batches
  1988. * C_OUT : Output depth
  1989. * H & W : Not used.
  1990. * @param[in, out] output_data Output data pointer. Data type: int8
  1991. *
  1992. * @return The function returns either
  1993. * <code>RISCV_NMSIS_NN_ARG_ERROR</code> if argument constraints fail. or,
  1994. * <code>RISCV_NMSIS_NN_SUCCESS</code> on successful completion.
  1995. *
  1996. * @details
  1997. * - Supported framework: TensorFlow Lite
  1998. */
  1999. riscv_nmsis_nn_status riscv_fully_connected_s8(const nmsis_nn_context *ctx,
  2000. const nmsis_nn_fc_params *fc_params,
  2001. const nmsis_nn_per_tensor_quant_params *quant_params,
  2002. const nmsis_nn_dims *input_dims,
  2003. const int8_t *input_data,
  2004. const nmsis_nn_dims *filter_dims,
  2005. const int8_t *filter_data,
  2006. const nmsis_nn_dims *bias_dims,
  2007. const int32_t *bias_data,
  2008. const nmsis_nn_dims *output_dims,
  2009. int8_t *output_data);
  2010. /**
  2011. * @brief Basic s8 Fully Connected function using per channel quantization.
  2012. *
  2013. * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function
  2014. * definition file to see if an additional buffer is required.
  2015. * Optional function {API}_get_buffer_size() provides the buffer
  2016. * size if an additional buffer is required.
  2017. * The caller is expected to clear the buffer, if applicable, for security reasons.
  2018. * @param[in] fc_params Fully Connected layer parameters.
  2019. * Range of fc_params->input_offset : [-127, 128]
  2020. * fc_params->filter_offset : 0
  2021. * Range of fc_params->output_offset : [-128, 127]
  2022. * @param[in] quant_params Per-channel quantization info.
  2023. * It contains the multiplier and shift values to be applied to each output channel
  2024. * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
  2025. * Input dimension is taken as Nx(H * W * C_IN)
  2026. * @param[in] input_data Input (activation) data pointer. Data type: int8
  2027. * @param[in] filter_dims Two dimensional filter dimensions. Format: [N, C]
  2028. * N : accumulation depth and equals (H * W * C_IN) from input_dims
  2029. * C : output depth and equals C_OUT in output_dims
  2030. * H & W : Not used
  2031. * @param[in] filter_data Filter data pointer. Data type: int8
  2032. * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
  2033. * N, H, W : Not used
  2034. * @param[in] bias_data Bias data pointer. Data type: int32
  2035. * @param[in] output_dims Output tensor dimensions. Format: [N, C_OUT]
  2036. * N : Batches
  2037. * C_OUT : Output depth
  2038. * H & W : Not used.
  2039. * @param[in, out] output_data Output data pointer. Data type: int8
  2040. *
  2041. * @return The function returns either
  2042. * <code>RISCV_NMSIS_NN_ARG_ERROR</code> if argument constraints fail. or,
  2043. * <code>RISCV_NMSIS_NN_SUCCESS</code> on successful completion.
  2044. *
  2045. * @details
  2046. * - Supported framework: TensorFlow Lite
  2047. */
  2048. riscv_nmsis_nn_status riscv_fully_connected_per_channel_s8(const nmsis_nn_context *ctx,
  2049. const nmsis_nn_fc_params *fc_params,
  2050. const nmsis_nn_per_channel_quant_params *quant_params,
  2051. const nmsis_nn_dims *input_dims,
  2052. const int8_t *input_data,
  2053. const nmsis_nn_dims *filter_dims,
  2054. const int8_t *filter_data,
  2055. const nmsis_nn_dims *bias_dims,
  2056. const int32_t *bias_data,
  2057. const nmsis_nn_dims *output_dims,
  2058. int8_t *output_data);
  2059. /**
  2060. * @brief s8 Fully Connected layer wrapper function
  2061. *
  2062. * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function
  2063. * definition file to see if an additional buffer is required.
  2064. * Optional function {API}_get_buffer_size() provides the buffer
  2065. * size if an additional buffer is required.
  2066. * The caller is expected to clear the buffer, if applicable, for security reasons.
  2067. * @param[in] fc_params Fully Connected layer parameters.
  2068. * Range of fc_params->input_offset : [-127, 128]
  2069. * fc_params->filter_offset : 0
  2070. * Range of fc_params->output_offset : [-128, 127]
  2071. * @param[in] quant_params Per-channel or per-tensor quantization info. Check struct defintion for details.
  2072. * It contains the multiplier and shift value(s) to be applied to each output channel
  2073. * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
  2074. * Input dimension is taken as Nx(H * W * C_IN)
  2075. * @param[in] input_data Input (activation) data pointer. Data type: int8
  2076. * @param[in] filter_dims Two dimensional filter dimensions. Format: [N, C]
  2077. * N : accumulation depth and equals (H * W * C_IN) from input_dims
  2078. * C : output depth and equals C_OUT in output_dims
  2079. * H & W : Not used
  2080. * @param[in] filter_data Filter data pointer. Data type: int8
  2081. * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
  2082. * N, H, W : Not used
  2083. * @param[in] bias_data Bias data pointer. Data type: int32
  2084. * @param[in] output_dims Output tensor dimensions. Format: [N, C_OUT]
  2085. * N : Batches
  2086. * C_OUT : Output depth
  2087. * H & W : Not used.
  2088. * @param[in, out] output_data Output data pointer. Data type: int8
  2089. *
  2090. * @return The function returns either
  2091. * <code>RISCV_NMSIS_NN_ARG_ERROR</code> if argument constraints fail. or,
  2092. * <code>RISCV_NMSIS_NN_SUCCESS</code> on successful completion.
  2093. *
  2094. * @details
  2095. * - Supported framework: TensorFlow Lite
  2096. */
  2097. riscv_nmsis_nn_status riscv_fully_connected_wrapper_s8(const nmsis_nn_context *ctx,
  2098. const nmsis_nn_fc_params *fc_params,
  2099. const nmsis_nn_quant_params *quant_params,
  2100. const nmsis_nn_dims *input_dims,
  2101. const int8_t *input_data,
  2102. const nmsis_nn_dims *filter_dims,
  2103. const int8_t *filter_data,
  2104. const nmsis_nn_dims *bias_dims,
  2105. const int32_t *bias_data,
  2106. const nmsis_nn_dims *output_dims,
  2107. int8_t *output_data);
  2108. /**
  2109. * @brief Calculate the sum of each row in vector_data, multiply by lhs_offset and optionally add s32 bias_data.
  2110. * @param[in, out] vector_sum_buf Buffer for vector sums
  2111. * @param[in] vector_cols Number of vector columns
  2112. * @param[in] vector_rows Number of vector rows
  2113. * @param[in] vector_data Vector of weigths data
  2114. * @param[in] lhs_offset Constant multiplied with each sum
  2115. * @param[in] rhs_offset Constant added to each vector element before sum
  2116. * @param[in] bias_data Vector of bias data, added to each sum.
  2117. * @return The function returns
  2118. * <code>RISCV_NMSIS_NN_SUCCESS</code> - Successful operation
  2119. */
  2120. riscv_nmsis_nn_status riscv_vector_sum_s8(int32_t *vector_sum_buf,
  2121. const int32_t vector_cols,
  2122. const int32_t vector_rows,
  2123. const int8_t *vector_data,
  2124. const int32_t lhs_offset,
  2125. const int32_t rhs_offset,
  2126. const int32_t *bias_data);
  2127. /**
  2128. * @brief Calculate the sum of each row in vector_data, multiply by lhs_offset and optionally add s64 bias_data.
  2129. * @param[in, out] vector_sum_buf Buffer for vector sums
  2130. * @param[in] vector_cols Number of vector columns
  2131. * @param[in] vector_rows Number of vector rows
  2132. * @param[in] vector_data Vector of weigths data
  2133. * @param[in] lhs_offset Constant multiplied with each sum
  2134. * @param[in] bias_data Vector of bias data, added to each sum.
  2135. * @return The function returns
  2136. * <code>RISCV_NMSIS_NN_SUCCESS</code> - Successful operation
  2137. */
  2138. riscv_nmsis_nn_status riscv_vector_sum_s8_s64(int64_t *vector_sum_buf,
  2139. const int32_t vector_cols,
  2140. const int32_t vector_rows,
  2141. const int8_t *vector_data,
  2142. const int32_t lhs_offset,
  2143. const int64_t *bias_data);
  2144. /**
  2145. * @brief Get size of additional buffer required by riscv_fully_connected_s8().
  2146. * See also riscv_vector_sum_s8, which is required if buffer size is > 0.
  2147. * @param[in] filter_dims dimension of filter
  2148. * @return The function returns required buffer size in bytes
  2149. *
  2150. */
  2151. int32_t riscv_fully_connected_s8_get_buffer_size(const nmsis_nn_dims *filter_dims);
  2152. /**
  2153. * @brief Get size of additional buffer required by riscv_fully_connected_s8() for processors with DSP extension.
  2154. * Refer to riscv_fully_connected_s8_get_buffer_size() for function argument details.
  2155. *
  2156. * @note Intended for compilation on Host. If compiling for an Riscv target, use
  2157. * riscv_fully_connected_s8_get_buffer_size().
  2158. *
  2159. */
  2160. int32_t riscv_fully_connected_s8_get_buffer_size_dsp(const nmsis_nn_dims *filter_dims);
  2161. /**
  2162. * @brief Basic s16 Fully Connected function.
  2163. *
  2164. * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function
  2165. * definition file to see if an additional buffer is required.
  2166. * Optional function {API}_get_buffer_size() provides the buffer
  2167. * size if an additional buffer is required.
  2168. * The caller is expected to clear the buffer, if applicable, for security reasons.
  2169. * @param[in] fc_params Fully Connected layer parameters.
  2170. * fc_params->input_offset : 0
  2171. * fc_params->filter_offset : 0
  2172. * fc_params->output_offset : 0
  2173. * @param[in] quant_params Per-tensor quantization info.
  2174. * It contains the multiplier and shift value to be applied to the output tensor.
  2175. * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
  2176. * Input dimension is taken as Nx(H * W * C_IN)
  2177. * @param[in] input_data Input (activation) data pointer. Data type: int16
  2178. * @param[in] filter_dims Two dimensional filter dimensions. Format: [N, C]
  2179. * N : accumulation depth and equals (H * W * C_IN) from input_dims
  2180. * C : output depth and equals C_OUT in output_dims
  2181. * H & W : Not used
  2182. * @param[in] filter_data Filter data pointer. Data type: int8
  2183. * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
  2184. * N, H, W : Not used
  2185. * @param[in] bias_data Bias data pointer. Data type: int64
  2186. * @param[in] output_dims Output tensor dimensions. Format: [N, C_OUT]
  2187. * N : Batches
  2188. * C_OUT : Output depth
  2189. * H & W : Not used.
  2190. * @param[in, out] output_data Output data pointer. Data type: int16
  2191. * @return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code>
  2192. *
  2193. * @details
  2194. * - Supported framework: TensorFlow Lite
  2195. */
  2196. riscv_nmsis_nn_status riscv_fully_connected_s16(const nmsis_nn_context *ctx,
  2197. const nmsis_nn_fc_params *fc_params,
  2198. const nmsis_nn_per_tensor_quant_params *quant_params,
  2199. const nmsis_nn_dims *input_dims,
  2200. const int16_t *input_data,
  2201. const nmsis_nn_dims *filter_dims,
  2202. const int8_t *filter_data,
  2203. const nmsis_nn_dims *bias_dims,
  2204. const int64_t *bias_data,
  2205. const nmsis_nn_dims *output_dims,
  2206. int16_t *output_data);
  2207. /**
  2208. * @brief Get size of additional buffer required by riscv_fully_connected_s16().
  2209. * @param[in] filter_dims dimension of filter
  2210. * @return The function returns required buffer size in bytes
  2211. *
  2212. */
  2213. int32_t riscv_fully_connected_s16_get_buffer_size(const nmsis_nn_dims *filter_dims);
  2214. /**
  2215. * @brief Get size of additional buffer required by riscv_fully_connected_s16() for processors with DSP extension.
  2216. * Refer to riscv_fully_connected_s16_get_buffer_size() for function argument details.
  2217. *
  2218. * @note Intended for compilation on Host. If compiling for an Arm target, use
  2219. * riscv_fully_connected_s16_get_buffer_size().
  2220. *
  2221. */
  2222. int32_t riscv_fully_connected_s16_get_buffer_size_dsp(const nmsis_nn_dims *filter_dims);
  2223. /**
  2224. * @brief Q7 opt fully-connected layer function
  2225. * @param[in] pV pointer to input vector
  2226. * @param[in] pM pointer to matrix weights
  2227. * @param[in] dim_vec length of the vector
  2228. * @param[in] num_of_rows number of rows in weight matrix
  2229. * @param[in] bias_shift amount of left-shift for bias
  2230. * @param[in] out_shift amount of right-shift for output
  2231. * @param[in] bias pointer to bias
  2232. * @param[in,out] pOut pointer to output vector
  2233. * @param[in,out] vec_buffer pointer to buffer space for input
  2234. * @return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code>
  2235. *
  2236. */
  2237. riscv_nmsis_nn_status riscv_fully_connected_q7_opt(const q7_t *pV,
  2238. const q7_t *pM,
  2239. const uint16_t dim_vec,
  2240. const uint16_t num_of_rows,
  2241. const uint16_t bias_shift,
  2242. const uint16_t out_shift,
  2243. const q7_t *bias,
  2244. q7_t *pOut,
  2245. q15_t *vec_buffer);
  2246. /**
  2247. * @brief Q15 basic fully-connected layer function
  2248. * @param[in] pV pointer to input vector
  2249. * @param[in] pM pointer to matrix weights
  2250. * @param[in] dim_vec length of the vector
  2251. * @param[in] num_of_rows number of rows in weight matrix
  2252. * @param[in] bias_shift amount of left-shift for bias
  2253. * @param[in] out_shift amount of right-shift for output
  2254. * @param[in] bias pointer to bias
  2255. * @param[in,out] pOut pointer to output vector
  2256. * @param[in,out] vec_buffer pointer to buffer space for input
  2257. * @return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code>
  2258. *
  2259. */
  2260. riscv_nmsis_nn_status riscv_fully_connected_q15(const q15_t *pV,
  2261. const q15_t *pM,
  2262. const uint16_t dim_vec,
  2263. const uint16_t num_of_rows,
  2264. const uint16_t bias_shift,
  2265. const uint16_t out_shift,
  2266. const q15_t *bias,
  2267. q15_t *pOut,
  2268. q15_t *vec_buffer);
  2269. /**
  2270. * @brief Q15 opt fully-connected layer function
  2271. * @param[in] pV pointer to input vector
  2272. * @param[in] pM pointer to matrix weights
  2273. * @param[in] dim_vec length of the vector
  2274. * @param[in] num_of_rows number of rows in weight matrix
  2275. * @param[in] bias_shift amount of left-shift for bias
  2276. * @param[in] out_shift amount of right-shift for output
  2277. * @param[in] bias pointer to bias
  2278. * @param[in,out] pOut pointer to output vector
  2279. * @param[in,out] vec_buffer pointer to buffer space for input
  2280. * @return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code>
  2281. *
  2282. */
  2283. riscv_nmsis_nn_status riscv_fully_connected_q15_opt(const q15_t *pV,
  2284. const q15_t *pM,
  2285. const uint16_t dim_vec,
  2286. const uint16_t num_of_rows,
  2287. const uint16_t bias_shift,
  2288. const uint16_t out_shift,
  2289. const q15_t *bias,
  2290. q15_t *pOut,
  2291. q15_t *vec_buffer);
  2292. /**
  2293. * @brief Mixed Q15-Q7 fully-connected layer function
  2294. * @param[in] pV pointer to input vector
  2295. * @param[in] pM pointer to matrix weights
  2296. * @param[in] dim_vec length of the vector
  2297. * @param[in] num_of_rows number of rows in weight matrix
  2298. * @param[in] bias_shift amount of left-shift for bias
  2299. * @param[in] out_shift amount of right-shift for output
  2300. * @param[in] bias pointer to bias
  2301. * @param[in,out] pOut pointer to output vector
  2302. * @param[in,out] vec_buffer pointer to buffer space for input
  2303. * @return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code>
  2304. *
  2305. */
  2306. riscv_nmsis_nn_status riscv_fully_connected_mat_q7_vec_q15(const q15_t *pV,
  2307. const q7_t *pM,
  2308. const uint16_t dim_vec,
  2309. const uint16_t num_of_rows,
  2310. const uint16_t bias_shift,
  2311. const uint16_t out_shift,
  2312. const q7_t *bias,
  2313. q15_t *pOut,
  2314. q15_t *vec_buffer);
  2315. /**
  2316. * @brief Mixed Q15-Q7 opt fully-connected layer function
  2317. * @param[in] pV pointer to input vector
  2318. * @param[in] pM pointer to matrix weights
  2319. * @param[in] dim_vec length of the vector
  2320. * @param[in] num_of_rows number of rows in weight matrix
  2321. * @param[in] bias_shift amount of left-shift for bias
  2322. * @param[in] out_shift amount of right-shift for output
  2323. * @param[in] bias pointer to bias
  2324. * @param[in,out] pOut pointer to output vector
  2325. * @param[in,out] vec_buffer pointer to buffer space for input
  2326. * @return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code>
  2327. *
  2328. */
  2329. riscv_nmsis_nn_status riscv_fully_connected_mat_q7_vec_q15_opt(const q15_t *pV,
  2330. const q7_t *pM,
  2331. const uint16_t dim_vec,
  2332. const uint16_t num_of_rows,
  2333. const uint16_t bias_shift,
  2334. const uint16_t out_shift,
  2335. const q7_t *bias,
  2336. q15_t *pOut,
  2337. q15_t *vec_buffer);
  2338. /**
  2339. * @brief Matrix-Multiplication Kernels for Convolution
  2340. *
  2341. * These functions are used within convolution layer functions for
  2342. * matrix multiplication.
  2343. *
  2344. * The implementation is similar to NMSIS-DSP riscv_mat_mult functions
  2345. * with one Q7 and one Q15 operands. The Q15 operand is the im2col
  2346. * output which is always with 2 columns.
  2347. *
  2348. */
  2349. /**
  2350. * @brief Matrix-multiplication function for convolution
  2351. * @param[in] pA pointer to operand A
  2352. * @param[in] pInBuffer pointer to operand B, always conssists of 2 vectors
  2353. * @param[in] ch_im_out numRow of A
  2354. * @param[in] numCol_A numCol of A
  2355. * @param[in] bias_shift amount of left-shift for bias
  2356. * @param[in] out_shift amount of right-shift for output
  2357. * @param[in] bias the bias
  2358. * @param[in,out] pOut pointer to output
  2359. * @return The function returns the incremented output pointer
  2360. */
  2361. q7_t *riscv_nn_mat_mult_kernel_q7_q15(const q7_t *pA,
  2362. const q15_t *pInBuffer,
  2363. const uint16_t ch_im_out,
  2364. const uint16_t numCol_A,
  2365. const uint16_t bias_shift,
  2366. const uint16_t out_shift,
  2367. const q7_t *bias,
  2368. q7_t *pOut);
  2369. /**
  2370. * @brief Matrix-multiplication function for convolution
  2371. * @param[in] pA pointer to operand A, q7 type
  2372. * @param[in] pInBuffer pointer to operand B, q7 type
  2373. * @param[in] ch_im_out numRow of A
  2374. * @param[in] numCol_A numCol of A
  2375. * @param[in] bias_shift amount of left-shift for bias
  2376. * @param[in] out_shift amount of right-shift for output
  2377. * @param[in] bias the bias
  2378. * @param[in,out] pOut pointer to output
  2379. * @return The function returns the incremented output pointer
  2380. */
  2381. q7_t *riscv_nn_mat_mult_kernel_q7(const q7_t * pA,
  2382. const q7_t * pInBuffer,
  2383. const uint16_t ch_im_out,
  2384. const uint16_t numCol_A,
  2385. const uint16_t bias_shift,
  2386. const uint16_t out_shift,
  2387. const q7_t * bias,
  2388. q7_t * pOut);
  2389. /*
  2390. * Other functions
  2391. * These layers are typically not timing critical
  2392. * Basic implementation is supported here
  2393. */
  2394. /**
  2395. * @defgroup groupElementwise Elementwise Functions
  2396. *
  2397. * Elementwise add and multiplication functions.
  2398. *
  2399. */
  2400. /**
  2401. * @brief s8 elementwise add of two vectors
  2402. * @param[in] input_1_vect pointer to input vector 1
  2403. * @param[in] input_2_vect pointer to input vector 2
  2404. * @param[in] input_1_offset offset for input 1. Range: -127 to 128
  2405. * @param[in] input_1_mult multiplier for input 1
  2406. * @param[in] input_1_shift shift for input 1
  2407. * @param[in] input_2_offset offset for input 2. Range: -127 to 128
  2408. * @param[in] input_2_mult multiplier for input 2
  2409. * @param[in] input_2_shift shift for input 2
  2410. * @param[in] left_shift input left shift
  2411. * @param[in,out] output pointer to output vector
  2412. * @param[in] out_offset output offset. Range: -128 to 127
  2413. * @param[in] out_mult output multiplier
  2414. * @param[in] out_shift output shift
  2415. * @param[in] out_activation_min minimum value to clamp output to. Min: -128
  2416. * @param[in] out_activation_max maximum value to clamp output to. Max: 127
  2417. * @param[in] block_size number of samples
  2418. * @return The function returns RISCV_NMSIS_NN_SUCCESS
  2419. */
  2420. riscv_nmsis_nn_status riscv_elementwise_add_s8(const int8_t *input_1_vect,
  2421. const int8_t *input_2_vect,
  2422. const int32_t input_1_offset,
  2423. const int32_t input_1_mult,
  2424. const int32_t input_1_shift,
  2425. const int32_t input_2_offset,
  2426. const int32_t input_2_mult,
  2427. const int32_t input_2_shift,
  2428. const int32_t left_shift,
  2429. int8_t *output,
  2430. const int32_t out_offset,
  2431. const int32_t out_mult,
  2432. const int32_t out_shift,
  2433. const int32_t out_activation_min,
  2434. const int32_t out_activation_max,
  2435. const int32_t block_size);
  2436. /**
  2437. * @brief s16 elementwise add of two vectors
  2438. * @param[in] input_1_vect pointer to input vector 1
  2439. * @param[in] input_2_vect pointer to input vector 2
  2440. * @param[in] input_1_offset offset for input 1. Not used.
  2441. * @param[in] input_1_mult multiplier for input 1
  2442. * @param[in] input_1_shift shift for input 1
  2443. * @param[in] input_2_offset offset for input 2. Not used.
  2444. * @param[in] input_2_mult multiplier for input 2
  2445. * @param[in] input_2_shift shift for input 2
  2446. * @param[in] left_shift input left shift
  2447. * @param[in,out] output pointer to output vector
  2448. * @param[in] out_offset output offset. Not used.
  2449. * @param[in] out_mult output multiplier
  2450. * @param[in] out_shift output shift
  2451. * @param[in] out_activation_min minimum value to clamp output to. Min: -32768
  2452. * @param[in] out_activation_max maximum value to clamp output to. Max: 32767
  2453. * @param[in] block_size number of samples
  2454. * @return The function returns RISCV_NMSIS_NN_SUCCESS
  2455. */
  2456. riscv_nmsis_nn_status riscv_elementwise_add_s16(const int16_t *input_1_vect,
  2457. const int16_t *input_2_vect,
  2458. const int32_t input_1_offset,
  2459. const int32_t input_1_mult,
  2460. const int32_t input_1_shift,
  2461. const int32_t input_2_offset,
  2462. const int32_t input_2_mult,
  2463. const int32_t input_2_shift,
  2464. const int32_t left_shift,
  2465. int16_t *output,
  2466. const int32_t out_offset,
  2467. const int32_t out_mult,
  2468. const int32_t out_shift,
  2469. const int32_t out_activation_min,
  2470. const int32_t out_activation_max,
  2471. const int32_t block_size);
  2472. /**
  2473. * @brief s8 elementwise multiplication
  2474. * @param[in] input_1_vect pointer to input vector 1
  2475. * @param[in] input_2_vect pointer to input vector 2
  2476. * @param[in] input_1_offset offset for input 1. Range: -127 to 128
  2477. * @param[in] input_2_offset offset for input 2. Range: -127 to 128
  2478. * @param[in,out] output pointer to output vector
  2479. * @param[in] out_offset output offset. Range: -128 to 127
  2480. * @param[in] out_mult output multiplier
  2481. * @param[in] out_shift output shift
  2482. * @param[in] out_activation_min minimum value to clamp output to. Min: -128
  2483. * @param[in] out_activation_max maximum value to clamp output to. Max: 127
  2484. * @param[in] block_size number of samples
  2485. * @return The function returns RISCV_NMSIS_NN_SUCCESS
  2486. *
  2487. * @details Supported framework: TensorFlow Lite micro
  2488. */
  2489. riscv_nmsis_nn_status riscv_elementwise_mul_s8(const int8_t *input_1_vect,
  2490. const int8_t *input_2_vect,
  2491. const int32_t input_1_offset,
  2492. const int32_t input_2_offset,
  2493. int8_t *output,
  2494. const int32_t out_offset,
  2495. const int32_t out_mult,
  2496. const int32_t out_shift,
  2497. const int32_t out_activation_min,
  2498. const int32_t out_activation_max,
  2499. const int32_t block_size);
  2500. /**
  2501. * @brief s16 elementwise multiplication
  2502. * @param[in] input_1_vect pointer to input vector 1
  2503. * @param[in] input_2_vect pointer to input vector 2
  2504. * @param[in] input_1_offset offset for input 1. Not used.
  2505. * @param[in] input_2_offset offset for input 2. Not used.
  2506. * @param[in,out] output pointer to output vector
  2507. * @param[in] out_offset output offset. Not used.
  2508. * @param[in] out_mult output multiplier
  2509. * @param[in] out_shift output shift
  2510. * @param[in] out_activation_min minimum value to clamp output to. Min: -32768
  2511. * @param[in] out_activation_max maximum value to clamp output to. Max: 32767
  2512. * @param[in] block_size number of samples
  2513. * @return The function returns RISCV_NMSIS_NN_SUCCESS
  2514. *
  2515. * @details Supported framework: TensorFlow Lite micro
  2516. */
  2517. riscv_nmsis_nn_status riscv_elementwise_mul_s16(const int16_t *input_1_vect,
  2518. const int16_t *input_2_vect,
  2519. const int32_t input_1_offset,
  2520. const int32_t input_2_offset,
  2521. int16_t *output,
  2522. const int32_t out_offset,
  2523. const int32_t out_mult,
  2524. const int32_t out_shift,
  2525. const int32_t out_activation_min,
  2526. const int32_t out_activation_max,
  2527. const int32_t block_size);
  2528. /**
  2529. * @defgroup Acti Activation Functions
  2530. *
  2531. * Perform activation layers, including ReLU (Rectified Linear Unit),
  2532. * sigmoid and tanh
  2533. *
  2534. */
  2535. /**
  2536. * @brief Q7 RELU function
  2537. * @param[in,out] data pointer to input
  2538. * @param[in] size number of elements
  2539. */
  2540. void riscv_relu_q7(int8_t *data, uint16_t size);
  2541. /**
  2542. * @brief s8 ReLU6 function
  2543. * @param[in,out] data pointer to input
  2544. * @param[in] size number of elements
  2545. */
  2546. void riscv_relu6_s8(int8_t *data, uint16_t size);
  2547. /**
  2548. * @brief Q15 RELU function
  2549. * @param[in,out] data pointer to input
  2550. * @param[in] size number of elements
  2551. */
  2552. void riscv_relu_q15(int16_t *data, uint16_t size);
  2553. /**
  2554. * @brief Q7 neural network activation function using direct table look-up
  2555. * @param[in,out] data pointer to input
  2556. * @param[in] size number of elements
  2557. * @param[in] int_width bit-width of the integer part, assume to be smaller than 3
  2558. * @param[in] type type of activation functions
  2559. * @return none.
  2560. */
  2561. void riscv_nn_activations_direct_q7(q7_t *data, uint16_t size, uint16_t int_width, riscv_nn_activation_type type);
  2562. /**
  2563. * @brief Q15 neural network activation function using direct table look-up
  2564. * @param[in,out] data pointer to input
  2565. * @param[in] size number of elements
  2566. * @param[in] int_width bit-width of the integer part, assume to be smaller than 3
  2567. * @param[in] type type of activation functions
  2568. * @return none.
  2569. *
  2570. * @details
  2571. *
  2572. * This is the direct table look-up approach.
  2573. *
  2574. * Assume here the integer part of the fixed-point is <= 3.
  2575. * More than 3 just not making much sense, makes no difference with
  2576. * saturation followed by any of these activation functions.
  2577. */
  2578. void riscv_nn_activations_direct_q15(q15_t *data, uint16_t size, uint16_t int_width, riscv_nn_activation_type type);
  2579. /**
  2580. * @brief s16 neural network activation function using direct table look-up
  2581. * @param[in] input pointer to input data
  2582. * @param[out] output pointer to output
  2583. * @param[in] size number of elements
  2584. * @param[in] left_shift bit-width of the integer part, assumed to be smaller than 3.
  2585. * @param[in] type type of activation functions
  2586. * @return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code>
  2587. *
  2588. * @details Supported framework: TensorFlow Lite for Microcontrollers.
  2589. * This activation function must be bit precise congruent with the corresponding TFLM tanh and sigmoid activation
  2590. * functions
  2591. */
  2592. riscv_nmsis_nn_status riscv_nn_activation_s16(const int16_t *input,
  2593. int16_t *output,
  2594. const int32_t size,
  2595. const int32_t left_shift,
  2596. const riscv_nn_activation_type type);
  2597. /**
  2598. * @defgroup Pooling Pooling Functions
  2599. *
  2600. * Perform pooling functions, including max pooling and average pooling
  2601. *
  2602. */
  2603. /**
  2604. * @brief Q7 max pooling function
  2605. * @param[in] Im_in pointer to input tensor
  2606. * @param[in] dim_im_in input tensor dimension
  2607. * @param[in] ch_im_in number of input tensor channels
  2608. * @param[in] dim_kernel filter kernel size
  2609. * @param[in] padding padding sizes
  2610. * @param[in] stride convolution stride
  2611. * @param[in] dim_im_out output tensor dimension
  2612. * @param[in,out] bufferA pointer to buffer space for input
  2613. * @param[in,out] Im_out pointer to output tensor
  2614. * @return none.
  2615. *
  2616. */
  2617. void riscv_maxpool_q7_HWC(q7_t *Im_in,
  2618. const uint16_t dim_im_in,
  2619. const uint16_t ch_im_in,
  2620. const uint16_t dim_kernel,
  2621. const uint16_t padding,
  2622. const uint16_t stride,
  2623. const uint16_t dim_im_out,
  2624. q7_t *bufferA,
  2625. q7_t *Im_out);
  2626. /**
  2627. * @brief Q7 average pooling function
  2628. * @param[in] Im_in pointer to input tensor
  2629. * @param[in] dim_im_in input tensor dimension
  2630. * @param[in] ch_im_in number of input tensor channels
  2631. * @param[in] dim_kernel filter kernel size
  2632. * @param[in] padding padding sizes
  2633. * @param[in] stride convolution stride
  2634. * @param[in] dim_im_out output tensor dimension
  2635. * @param[in,out] bufferA pointer to buffer space for input
  2636. * @param[in,out] Im_out pointer to output tensor
  2637. * @return none.
  2638. *
  2639. */
  2640. void riscv_avepool_q7_HWC(q7_t *Im_in,
  2641. const uint16_t dim_im_in,
  2642. const uint16_t ch_im_in,
  2643. const uint16_t dim_kernel,
  2644. const uint16_t padding,
  2645. const uint16_t stride,
  2646. const uint16_t dim_im_out,
  2647. q7_t *bufferA,
  2648. q7_t *Im_out);
  2649. /**
  2650. * @defgroup Pooling Pooling Functions
  2651. *
  2652. * Perform max and average pooling operations
  2653. *
  2654. */
  2655. /**
  2656. * @brief s8 average pooling function.
  2657. *
  2658. * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function
  2659. * definition file to see if an additional buffer is required.
  2660. * Optional function {API}_get_buffer_size() provides the buffer
  2661. * size if an additional buffer is required.
  2662. * The caller is expected to clear the buffer, if applicable, for security reasons.
  2663. * @param[in] pool_params Pooling parameters
  2664. * @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN]
  2665. * @param[in] input_data Input (activation) data pointer. Data type: int8
  2666. * @param[in] filter_dims Filter tensor dimensions. Format: [H, W]
  2667. * Argument N and C are not used.
  2668. * @param[in] output_dims Output tensor dimensions. Format: [H, W, C_OUT]
  2669. * Argument N is not used.
  2670. * C_OUT equals C_IN.
  2671. * @param[in, out] output_data Output data pointer. Data type: int8
  2672. *
  2673. * @return The function returns either
  2674. * <code>RISCV_NMSIS_NN_ARG_ERROR</code> if argument constraints fail. or,
  2675. * <code>RISCV_NMSIS_NN_SUCCESS</code> on successful completion.
  2676. *
  2677. * @details
  2678. * - Supported Framework: TensorFlow Lite
  2679. *
  2680. */
  2681. riscv_nmsis_nn_status riscv_avgpool_s8(const nmsis_nn_context *ctx,
  2682. const nmsis_nn_pool_params *pool_params,
  2683. const nmsis_nn_dims *input_dims,
  2684. const int8_t *input_data,
  2685. const nmsis_nn_dims *filter_dims,
  2686. const nmsis_nn_dims *output_dims,
  2687. int8_t *output_data);
  2688. /**
  2689. * @brief Get the required buffer size for S8 average pooling function
  2690. * @param[in] dim_dst_width output tensor dimension
  2691. * @param[in] ch_src number of input tensor channels
  2692. * @return The function returns required buffer size in bytes
  2693. *
  2694. */
  2695. int32_t riscv_avgpool_s8_get_buffer_size(const int dim_dst_width, const int ch_src);
  2696. /**
  2697. * @brief Get the required buffer size for S8 average pooling function for processors with DSP extension.
  2698. * Refer to riscv_avgpool_s8_get_buffer_size() for function argument details.
  2699. *
  2700. * @note Intended for compilation on Host. If compiling for an Riscv target, use
  2701. * riscv_avgpool_s8_get_buffer_size().
  2702. *
  2703. */
  2704. int32_t riscv_avgpool_s8_get_buffer_size_dsp(const int dim_dst_width, const int ch_src);
  2705. /**
  2706. * @brief s16 average pooling function.
  2707. *
  2708. * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function
  2709. * definition file to see if an additional buffer is required.
  2710. * Optional function {API}_get_buffer_size() provides the buffer
  2711. * size if an additional buffer is required.
  2712. * The caller is expected to clear the buffer, if applicable, for security reasons.
  2713. * @param[in] pool_params Pooling parameters
  2714. * @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN]
  2715. * @param[in] input_data Input (activation) data pointer. Data type: int16
  2716. * @param[in] filter_dims Filter tensor dimensions. Format: [H, W]
  2717. * Argument N and C are not used.
  2718. * @param[in] output_dims Output tensor dimensions. Format: [H, W, C_OUT]
  2719. * Argument N is not used.
  2720. * C_OUT equals C_IN.
  2721. * @param[in, out] output_data Output data pointer. Data type: int16
  2722. *
  2723. * @return The function returns
  2724. * <code>RISCV_NMSIS_NN_SUCCESS</code> - Successful operation
  2725. * <code>RISCV_NMSIS_NN_ARG_ERROR</code> - In case of invalid arguments
  2726. *
  2727. * @details
  2728. * - Supported Framework: TensorFlow Lite
  2729. *
  2730. */
  2731. riscv_nmsis_nn_status riscv_avgpool_s16(const nmsis_nn_context *ctx,
  2732. const nmsis_nn_pool_params *pool_params,
  2733. const nmsis_nn_dims *input_dims,
  2734. const int16_t *input_data,
  2735. const nmsis_nn_dims *filter_dims,
  2736. const nmsis_nn_dims *output_dims,
  2737. int16_t *output_data);
  2738. /**
  2739. * @brief Get the required buffer size for S16 average pooling function
  2740. * @param[in] dim_dst_width output tensor dimension
  2741. * @param[in] ch_src number of input tensor channels
  2742. * @return The function returns required buffer size in bytes
  2743. *
  2744. */
  2745. int32_t riscv_avgpool_s16_get_buffer_size(const int dim_dst_width, const int ch_src);
  2746. /**
  2747. * @brief Get the required buffer size for S16 average pooling function for processors with DSP extension.
  2748. * Refer to riscv_avgpool_s16_get_buffer_size() for function argument details.
  2749. *
  2750. * @note Intended for compilation on Host. If compiling for an Riscv target, use
  2751. * riscv_avgpool_s16_get_buffer_size().
  2752. *
  2753. */
  2754. int32_t riscv_avgpool_s16_get_buffer_size_dsp(const int dim_dst_width, const int ch_src);
  2755. /**
  2756. * @brief s8 max pooling function.
  2757. *
  2758. * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function
  2759. * definition file to see if an additional buffer is required.
  2760. * Optional function {API}_get_buffer_size() provides the buffer
  2761. * size if an additional buffer is required.
  2762. * The caller is expected to clear the buffer, if applicable, for security reasons.
  2763. * @param[in] pool_params Pooling parameters
  2764. * @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN]
  2765. * @param[in] input_data Input (activation) data pointer. The input tensor must not
  2766. * overlap with the output tensor. Data type: int8
  2767. * @param[in] filter_dims Filter tensor dimensions. Format: [H, W]
  2768. * Argument N and C are not used.
  2769. * @param[in] output_dims Output tensor dimensions. Format: [H, W, C_OUT]
  2770. * Argument N is not used.
  2771. * C_OUT equals C_IN.
  2772. * @param[in, out] output_data Output data pointer. Data type: int8
  2773. *
  2774. * @return The function returns either
  2775. * <code>RISCV_NMSIS_NN_ARG_ERROR</code> if argument constraints fail. or,
  2776. * <code>RISCV_NMSIS_NN_SUCCESS</code> on successful completion.
  2777. *
  2778. * @details
  2779. * - Supported Framework: TensorFlow Lite
  2780. *
  2781. */
  2782. riscv_nmsis_nn_status riscv_max_pool_s8(const nmsis_nn_context *ctx,
  2783. const nmsis_nn_pool_params *pool_params,
  2784. const nmsis_nn_dims *input_dims,
  2785. const int8_t *input_data,
  2786. const nmsis_nn_dims *filter_dims,
  2787. const nmsis_nn_dims *output_dims,
  2788. int8_t *output_data);
  2789. /**
  2790. * @brief s16 max pooling function.
  2791. *
  2792. * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function
  2793. * definition file to see if an additional buffer is required.
  2794. * Optional function {API}_get_buffer_size() provides the buffer
  2795. * size if an additional buffer is required.
  2796. * The caller is expected to clear the buffer, if applicable, for security reasons.
  2797. * @param[in] pool_params Pooling parameters
  2798. * @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN]
  2799. * @param[in] src Input (activation) data pointer. The input tensor must not
  2800. * overlap with the output tensor. Data type: int16
  2801. * @param[in] filter_dims Filter tensor dimensions. Format: [H, W]
  2802. * Argument N and C are not used.
  2803. * @param[in] output_dims Output tensor dimensions. Format: [H, W, C_OUT]
  2804. * Argument N is not used.
  2805. * C_OUT equals C_IN.
  2806. * @param[in, out] dst Output data pointer. Data type: int16
  2807. *
  2808. * @return The function returns either
  2809. * <code>RISCV_NMSIS_NN_ARG_ERROR</code> if argument constraints fail. or,
  2810. * <code>RISCV_NMSIS_NN_SUCCESS</code> on successful completion.
  2811. *
  2812. * @details
  2813. * - Supported Framework: TensorFlow Lite
  2814. *
  2815. */
  2816. riscv_nmsis_nn_status riscv_max_pool_s16(const nmsis_nn_context *ctx,
  2817. const nmsis_nn_pool_params *pool_params,
  2818. const nmsis_nn_dims *input_dims,
  2819. const int16_t *src,
  2820. const nmsis_nn_dims *filter_dims,
  2821. const nmsis_nn_dims *output_dims,
  2822. int16_t *dst);
  2823. /**
  2824. * @defgroup Softmax Softmax Functions
  2825. *
  2826. *
  2827. */
  2828. /**
  2829. * @brief Q7 softmax function
  2830. * @param[in] vec_in pointer to input vector
  2831. * @param[in] dim_vec input vector dimension
  2832. * @param[out] p_out pointer to output vector
  2833. *
  2834. * @note This function is an optimized version which is not bit-accurate with
  2835. * TensorFlow Lite's kernel
  2836. *
  2837. */
  2838. void riscv_softmax_q7(const q7_t *vec_in, const uint16_t dim_vec, q7_t *p_out);
  2839. /**
  2840. * @brief Q7 softmax function with batch parameter
  2841. * @param[in] vec_in pointer to input vector
  2842. * @param[in] nb_batches number of batches
  2843. * @param[in] dim_vec input vector dimension
  2844. * @param[out] p_out pointer to output vector
  2845. * @return none.
  2846. *
  2847. * @note This function is an optimized version which is not bit-accurate with
  2848. * TensorFlow Lite's kernel
  2849. *
  2850. */
  2851. void riscv_softmax_with_batch_q7(const q7_t *vec_in, const uint16_t nb_batches, const uint16_t dim_vec, q7_t *p_out);
  2852. /**
  2853. * @brief Q15 softmax function
  2854. * @param[in] vec_in pointer to input vector
  2855. * @param[in] dim_vec input vector dimension
  2856. * @param[out] p_out pointer to output vector
  2857. * @return none.
  2858. *
  2859. * @note This function is an optimized version which is not bit-accurate with
  2860. * TensorFlow Lite's kernel
  2861. *
  2862. */
  2863. void riscv_softmax_q15(const q15_t *vec_in, const uint16_t dim_vec, q15_t *p_out);
  2864. /**
  2865. * @brief S8 softmax function
  2866. * @param[in] input Pointer to the input tensor
  2867. * @param[in] num_rows Number of rows in the input tensor
  2868. * @param[in] row_size Number of elements in each input row
  2869. * @param[in] mult Input quantization multiplier
  2870. * @param[in] shift Input quantization shift within the range [0, 31]
  2871. * @param[in] diff_min Minimum difference with max in row. Used to check if
  2872. * the quantized exponential operation can be performed
  2873. * @param[out] output Pointer to the output tensor
  2874. *
  2875. * @note Supported framework: TensorFlow Lite micro (bit-accurate)
  2876. *
  2877. */
  2878. void riscv_softmax_s8(const int8_t *input,
  2879. const int32_t num_rows,
  2880. const int32_t row_size,
  2881. const int32_t mult,
  2882. const int32_t shift,
  2883. const int32_t diff_min,
  2884. int8_t *output);
  2885. /**
  2886. * @brief S8 to s16 softmax function
  2887. * @param[in] input Pointer to the input tensor
  2888. * @param[in] num_rows Number of rows in the input tensor
  2889. * @param[in] row_size Number of elements in each input row
  2890. * @param[in] mult Input quantization multiplier
  2891. * @param[in] shift Input quantization shift within the range [0, 31]
  2892. * @param[in] diff_min Minimum difference with max in row. Used to check if
  2893. * the quantized exponential operation can be performed
  2894. * @param[out] output Pointer to the output tensor
  2895. *
  2896. * @note Supported framework: TensorFlow Lite micro (bit-accurate)
  2897. *
  2898. */
  2899. void riscv_softmax_s8_s16(const int8_t *input,
  2900. const int32_t num_rows,
  2901. const int32_t row_size,
  2902. const int32_t mult,
  2903. const int32_t shift,
  2904. const int32_t diff_min,
  2905. int16_t *output);
  2906. /**
  2907. * @brief S16 softmax function
  2908. * @param[in] input Pointer to the input tensor
  2909. * @param[in] num_rows Number of rows in the input tensor
  2910. * @param[in] row_size Number of elements in each input row
  2911. * @param[in] mult Input quantization multiplier
  2912. * @param[in] shift Input quantization shift within the range [0, 31]
  2913. * @param[in] softmax_params Softmax s16 layer parameters with two pointers to LUTs speficied below.
  2914. * For indexing the high 9 bits are used and 7 remaining for interpolation.
  2915. * That means 512 entries for the 9-bit indexing and 1 extra for interpolation, i.e. 513
  2916. * values for each LUT.
  2917. * - Lookup table for exp(x), where x uniform distributed between [-10.0 , 0.0]
  2918. * - Lookup table for 1 / (1 + x), where x uniform distributed between [0.0 , 1.0]
  2919. * @param[out] output Pointer to the output tensor
  2920. * @return The function returns
  2921. * <code>RISCV_NMSIS_NN_ARG_ERROR</code> Argument error check failed
  2922. * <code>RISCV_NMSIS_NN_SUCCESS</code> - Successful operation
  2923. *
  2924. * @note Supported framework: TensorFlow Lite micro (bit-accurate)
  2925. *
  2926. */
  2927. riscv_nmsis_nn_status riscv_softmax_s16(const int16_t *input,
  2928. const int32_t num_rows,
  2929. const int32_t row_size,
  2930. const int32_t mult,
  2931. const int32_t shift,
  2932. const nmsis_nn_softmax_lut_s16 *softmax_params,
  2933. int16_t *output);
  2934. /**
  2935. * @brief U8 softmax function
  2936. * @param[in] input Pointer to the input tensor
  2937. * @param[in] num_rows Number of rows in the input tensor
  2938. * @param[in] row_size Number of elements in each input row
  2939. * @param[in] mult Input quantization multiplier
  2940. * @param[in] shift Input quantization shift within the range [0, 31]
  2941. * @param[in] diff_min Minimum difference with max in row. Used to check if
  2942. * the quantized exponential operation can be performed
  2943. * @param[out] output Pointer to the output tensor
  2944. *
  2945. * @note Supported framework: TensorFlow Lite micro (bit-accurate)
  2946. *
  2947. */
  2948. void riscv_softmax_u8(const uint8_t *input,
  2949. const int32_t num_rows,
  2950. const int32_t row_size,
  2951. const int32_t mult,
  2952. const int32_t shift,
  2953. const int32_t diff_min,
  2954. uint8_t *output);
  2955. /**
  2956. * @defgroup Reshape Reshape Functions
  2957. *
  2958. */
  2959. /**
  2960. * @brief Reshape a s8 vector into another with different shape
  2961. * @param[in] input points to the s8 input vector
  2962. * @param[out] output points to the s8 output vector
  2963. * @param[in] total_size total size of the input and output vectors in bytes
  2964. *
  2965. * @note The output is expected to be in a memory area that does not overlap with the input's
  2966. *
  2967. */
  2968. void riscv_reshape_s8(const int8_t *input, int8_t *output, const uint32_t total_size);
  2969. /**
  2970. * @defgroup Transpose Transpose Functions
  2971. *
  2972. */
  2973. /**
  2974. * @brief Basic transpose function
  2975. *
  2976. * @param[in] input_data Input (activation) data pointer. Data type: int8
  2977. * @param[out] output_data Output data pointer. Data type: int8
  2978. * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
  2979. * @param[in] output_dims Output tensor dimensions. Format may be arbitrary relative to input format.
  2980. * The output dimension will depend on the permutation dimensions.
  2981. * In other words the out dimensions are the result of applying the permutation
  2982. * to the input dimensions.
  2983. * @param[in] transpose_params Transpose parameters. Contains permutation dimensions.
  2984. *
  2985. * @return The function returns either
  2986. * <code>RISCV_NMSIS_NN_ARG_ERROR</code> if argument constraints fail. or,
  2987. * <code>RISCV_NMSIS_NN_SUCCESS</code> on successful completion.
  2988. *
  2989. */
  2990. riscv_nmsis_nn_status riscv_transpose_s8(const int8_t *input_data,
  2991. int8_t *const output_data,
  2992. const nmsis_nn_dims *const input_dims,
  2993. const nmsis_nn_dims *const output_dims,
  2994. const nmsis_nn_transpose_params *const transpose_params);
  2995. /**
  2996. * @defgroup Concatenation Concatenation Functions
  2997. *
  2998. */
  2999. /**
  3000. * @brief int8/uint8 concatenation function to be used for concatenating N-tensors along the X axis
  3001. * This function should be called for each input tensor to concatenate. The argument offset_x
  3002. * will be used to store the input tensor in the correct position in the output tensor
  3003. *
  3004. * i.e. offset_x = 0
  3005. * for(i = 0 i < num_input_tensors; ++i)
  3006. * {
  3007. * riscv_concatenation_s8_x(&input[i], ..., &output, ..., ..., offset_x)
  3008. * offset_x += input_x[i]
  3009. * }
  3010. *
  3011. * This function assumes that the output tensor has:
  3012. * -# The same height of the input tensor
  3013. * -# The same number of channels of the input tensor
  3014. * -# The same batch size of the input tensor
  3015. *
  3016. * Unless specified otherwise, arguments are mandatory.
  3017. *
  3018. * @note This function, data layout independent, can be used to concatenate either int8 or uint8 tensors because it
  3019. * does not involve any arithmetic operation
  3020. *
  3021. * @param[in] input Pointer to input tensor. Input tensor must not overlap with the output tensor.
  3022. * @param[in] input_x Width of input tensor
  3023. * @param[in] input_y Height of input tensor
  3024. * @param[in] input_z Channels in input tensor
  3025. * @param[in] input_w Batch size in input tensor
  3026. * @param[out] output Pointer to output tensor. Expected to be at least
  3027. * (input_x * input_y * input_z * input_w) + offset_x
  3028. * bytes.
  3029. * @param[in] output_x Width of output tensor
  3030. * @param[in] offset_x The offset (in number of elements) on the X axis to start concatenating the input tensor
  3031. * It is user responsibility to provide the correct value
  3032. *
  3033. * <b> Input constraints</b>
  3034. * offset_x is less than output_x
  3035. *
  3036. */
  3037. void riscv_concatenation_s8_x(const int8_t *input,
  3038. const uint16_t input_x,
  3039. const uint16_t input_y,
  3040. const uint16_t input_z,
  3041. const uint16_t input_w,
  3042. int8_t *output,
  3043. const uint16_t output_x,
  3044. const uint32_t offset_x);
  3045. /**
  3046. * @brief int8/uint8 concatenation function to be used for concatenating N-tensors along the Y axis
  3047. * This function should be called for each input tensor to concatenate. The argument offset_y
  3048. * will be used to store the input tensor in the correct position in the output tensor
  3049. *
  3050. * i.e. offset_y = 0
  3051. * for(i = 0 i < num_input_tensors; ++i)
  3052. * {
  3053. * riscv_concatenation_s8_y(&input[i], ..., &output, ..., ..., offset_y)
  3054. * offset_y += input_y[i]
  3055. * }
  3056. *
  3057. * This function assumes that the output tensor has:
  3058. * -# The same width of the input tensor
  3059. * -# The same number of channels of the input tensor
  3060. * -# The same batch size of the input tensor
  3061. *
  3062. * Unless specified otherwise, arguments are mandatory.
  3063. *
  3064. * @note This function, data layout independent, can be used to concatenate either int8 or uint8 tensors because it
  3065. * does not involve any arithmetic operation
  3066. *
  3067. * @param[in] input Pointer to input tensor. Input tensor must not overlap with the output tensor.
  3068. * @param[in] input_x Width of input tensor
  3069. * @param[in] input_y Height of input tensor
  3070. * @param[in] input_z Channels in input tensor
  3071. * @param[in] input_w Batch size in input tensor
  3072. * @param[out] output Pointer to output tensor. Expected to be at least
  3073. * (input_z * input_w * input_x * input_y) + offset_y
  3074. * bytes.
  3075. * @param[in] output_y Height of output tensor
  3076. * @param[in] offset_y The offset on the Y axis to start concatenating the input tensor
  3077. * It is user responsibility to provide the correct value
  3078. *
  3079. * <b> Input constraints</b>
  3080. * offset_y is less than output_y
  3081. *
  3082. */
  3083. void riscv_concatenation_s8_y(const int8_t *input,
  3084. const uint16_t input_x,
  3085. const uint16_t input_y,
  3086. const uint16_t input_z,
  3087. const uint16_t input_w,
  3088. int8_t *output,
  3089. const uint16_t output_y,
  3090. const uint32_t offset_y);
  3091. /**
  3092. * @brief int8/uint8 concatenation function to be used for concatenating N-tensors along the Z axis
  3093. * This function should be called for each input tensor to concatenate. The argument offset_z
  3094. * will be used to store the input tensor in the correct position in the output tensor
  3095. *
  3096. * i.e. offset_z = 0
  3097. * for(i = 0 i < num_input_tensors; ++i)
  3098. * {
  3099. * riscv_concatenation_s8_z(&input[i], ..., &output, ..., ..., offset_z)
  3100. * offset_z += input_z[i]
  3101. * }
  3102. *
  3103. * This function assumes that the output tensor has:
  3104. * -# The same width of the input tensor
  3105. * -# The same height of the input tensor
  3106. * -# The same batch size of the input tensor
  3107. *
  3108. * Unless specified otherwise, arguments are mandatory.
  3109. *
  3110. * @note This function, data layout independent, can be used to concatenate either int8 or uint8 tensors because it
  3111. * does not involve any arithmetic operation
  3112. *
  3113. * @param[in] input Pointer to input tensor. Input tensor must not overlap with output tensor.
  3114. * @param[in] input_x Width of input tensor
  3115. * @param[in] input_y Height of input tensor
  3116. * @param[in] input_z Channels in input tensor
  3117. * @param[in] input_w Batch size in input tensor
  3118. * @param[out] output Pointer to output tensor. Expected to be at least
  3119. * (input_x * input_y * input_z * input_w) + offset_z
  3120. * bytes.
  3121. * @param[in] output_z Channels in output tensor
  3122. * @param[in] offset_z The offset on the Z axis to start concatenating the input tensor
  3123. * It is user responsibility to provide the correct value
  3124. *
  3125. * <b> Input constraints</b>
  3126. * offset_z is less than output_z
  3127. *
  3128. */
  3129. void riscv_concatenation_s8_z(const int8_t *input,
  3130. const uint16_t input_x,
  3131. const uint16_t input_y,
  3132. const uint16_t input_z,
  3133. const uint16_t input_w,
  3134. int8_t *output,
  3135. const uint16_t output_z,
  3136. const uint32_t offset_z);
  3137. /**
  3138. * @brief int8/uint8 concatenation function to be used for concatenating N-tensors along the W axis (Batch size)
  3139. * This function should be called for each input tensor to concatenate. The argument offset_w
  3140. * will be used to store the input tensor in the correct position in the output tensor
  3141. *
  3142. * i.e. offset_w = 0
  3143. * for(i = 0 i < num_input_tensors; ++i)
  3144. * {
  3145. * riscv_concatenation_s8_w(&input[i], ..., &output, ..., ..., offset_w)
  3146. * offset_w += input_w[i]
  3147. * }
  3148. *
  3149. * This function assumes that the output tensor has:
  3150. * -# The same width of the input tensor
  3151. * -# The same height of the input tensor
  3152. * -# The same number o channels of the input tensor
  3153. *
  3154. * Unless specified otherwise, arguments are mandatory.
  3155. *
  3156. * @note This function, data layout independent, can be used to concatenate either int8 or uint8 tensors because it
  3157. * does not involve any arithmetic operation
  3158. *
  3159. * @param[in] input Pointer to input tensor
  3160. * @param[in] input_x Width of input tensor
  3161. * @param[in] input_y Height of input tensor
  3162. * @param[in] input_z Channels in input tensor
  3163. * @param[in] input_w Batch size in input tensor
  3164. * @param[out] output Pointer to output tensor. Expected to be at least
  3165. * input_x * input_y * input_z * input_w
  3166. * bytes.
  3167. * @param[in] offset_w The offset on the W axis to start concatenating the input tensor
  3168. * It is user responsibility to provide the correct value
  3169. *
  3170. */
  3171. void riscv_concatenation_s8_w(const int8_t *input,
  3172. const uint16_t input_x,
  3173. const uint16_t input_y,
  3174. const uint16_t input_z,
  3175. const uint16_t input_w,
  3176. int8_t *output,
  3177. const uint32_t offset_w);
  3178. /**
  3179. * @defgroup SVDF SVDF Functions
  3180. *
  3181. */
  3182. /**
  3183. * @brief s8 SVDF function with 8 bit state tensor and 8 bit time weights
  3184. *
  3185. * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function
  3186. * definition file to see if an additional buffer is required.
  3187. * Optional function riscv_fully_connected_s8_get_buffer_size() provides the buffer
  3188. * size if an additional buffer is required.
  3189. * The caller is expected to clear the buffer, if applicable, for security reasons.
  3190. * @param[in] input_ctx Temporary scratch buffer
  3191. * The caller is expected to clear the buffer, if applicable, for security reasons.
  3192. * @param[in] output_ctx Temporary output scratch buffer
  3193. * The caller is expected to clear the buffer, if applicable, for security reasons.
  3194. * @param[in] svdf_params SVDF Parameters
  3195. * Range of svdf_params->input_offset : [-128, 127]
  3196. * Range of svdf_params->output_offset : [-128, 127]
  3197. * @param[in] input_quant_params Input quantization parameters
  3198. * @param[in] output_quant_params Output quantization parameters
  3199. * @param[in] input_dims Input tensor dimensions
  3200. * @param[in] input_data Pointer to input tensor
  3201. * @param[in] state_dims State tensor dimensions
  3202. * @param[in] state_data Pointer to state tensor
  3203. * @param[in] weights_feature_dims Weights (feature) tensor dimensions
  3204. * @param[in] weights_feature_data Pointer to the weights (feature) tensor
  3205. * @param[in] weights_time_dims Weights (time) tensor dimensions
  3206. * @param[in] weights_time_data Pointer to the weights (time) tensor
  3207. * @param[in] bias_dims Bias tensor dimensions
  3208. * @param[in] bias_data Pointer to bias tensor
  3209. * @param[in] output_dims Output tensor dimensions
  3210. * @param[out] output_data Pointer to the output tensor
  3211. *
  3212. * @return The function returns either
  3213. * <code>RISCV_NMSIS_NN_ARG_ERROR</code> if argument constraints fail. or,
  3214. * <code>RISCV_NMSIS_NN_SUCCESS</code> on successful completion.
  3215. *
  3216. * @details
  3217. * 1. Supported framework: TensorFlow Lite micro
  3218. */
  3219. riscv_nmsis_nn_status riscv_svdf_s8(const nmsis_nn_context *ctx,
  3220. const nmsis_nn_context *input_ctx,
  3221. const nmsis_nn_context *output_ctx,
  3222. const nmsis_nn_svdf_params *svdf_params,
  3223. const nmsis_nn_per_tensor_quant_params *input_quant_params,
  3224. const nmsis_nn_per_tensor_quant_params *output_quant_params,
  3225. const nmsis_nn_dims *input_dims,
  3226. const int8_t *input_data,
  3227. const nmsis_nn_dims *state_dims,
  3228. int8_t *state_data,
  3229. const nmsis_nn_dims *weights_feature_dims,
  3230. const int8_t *weights_feature_data,
  3231. const nmsis_nn_dims *weights_time_dims,
  3232. const int8_t *weights_time_data,
  3233. const nmsis_nn_dims *bias_dims,
  3234. const int32_t *bias_data,
  3235. const nmsis_nn_dims *output_dims,
  3236. int8_t *output_data);
  3237. /**
  3238. * @brief s8 SVDF function with 16 bit state tensor and 16 bit time weights
  3239. *
  3240. * @param[in] input_ctx Temporary scratch buffer
  3241. * The caller is expected to clear the buffer, if applicable, for security reasons.
  3242. * @param[in] output_ctx Temporary output scratch buffer
  3243. * The caller is expected to clear the buffer, if applicable, for security reasons.
  3244. * @param[in] svdf_params SVDF Parameters
  3245. * Range of svdf_params->input_offset : [-128, 127]
  3246. * Range of svdf_params->output_offset : [-128, 127]
  3247. * @param[in] input_quant_params Input quantization parameters
  3248. * @param[in] output_quant_params Output quantization parameters
  3249. * @param[in] input_dims Input tensor dimensions
  3250. * @param[in] input_data Pointer to input tensor
  3251. * @param[in] state_dims State tensor dimensions
  3252. * @param[in] state_data Pointer to state tensor
  3253. * @param[in] weights_feature_dims Weights (feature) tensor dimensions
  3254. * @param[in] weights_feature_data Pointer to the weights (feature) tensor
  3255. * @param[in] weights_time_dims Weights (time) tensor dimensions
  3256. * @param[in] weights_time_data Pointer to the weights (time) tensor
  3257. * @param[in] bias_dims Bias tensor dimensions
  3258. * @param[in] bias_data Pointer to bias tensor
  3259. * @param[in] output_dims Output tensor dimensions
  3260. * @param[out] output_data Pointer to the output tensor
  3261. *
  3262. * @return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code>
  3263. *
  3264. * @details
  3265. * 1. Supported framework: TensorFlow Lite micro
  3266. */
  3267. riscv_nmsis_nn_status riscv_svdf_state_s16_s8(const nmsis_nn_context *input_ctx,
  3268. const nmsis_nn_context *output_ctx,
  3269. const nmsis_nn_svdf_params *svdf_params,
  3270. const nmsis_nn_per_tensor_quant_params *input_quant_params,
  3271. const nmsis_nn_per_tensor_quant_params *output_quant_params,
  3272. const nmsis_nn_dims *input_dims,
  3273. const int8_t *input_data,
  3274. const nmsis_nn_dims *state_dims,
  3275. int16_t *state_data,
  3276. const nmsis_nn_dims *weights_feature_dims,
  3277. const int8_t *weights_feature_data,
  3278. const nmsis_nn_dims *weights_time_dims,
  3279. const int16_t *weights_time_data,
  3280. const nmsis_nn_dims *bias_dims,
  3281. const int32_t *bias_data,
  3282. const nmsis_nn_dims *output_dims,
  3283. int8_t *output_data);
  3284. /**
  3285. * @brief Get size of additional buffer required by riscv_svdf_s8().
  3286. * @param[in] filter_dims dimension of filter
  3287. * @return The function returns required buffer size in bytes
  3288. *
  3289. */
  3290. int32_t riscv_svdf_s8_get_buffer_size(const nmsis_nn_dims *filter_dims);
  3291. /**
  3292. * @brief Get size of additional buffer required by riscv_svdf_s8() for processors with DSP extension.
  3293. * Refer to riscv_svdf_s8_get_buffer_size() for function argument details.
  3294. *
  3295. * @note Intended for compilation on Host. If compiling for an Arm target, use
  3296. * riscv_svdf_s8_get_buffer_size().
  3297. *
  3298. */
  3299. int32_t riscv_svdf_s8_get_buffer_size_dsp(const nmsis_nn_dims *filter_dims);
  3300. /**
  3301. * @defgroup LSTM LSTM Layer Functions
  3302. *
  3303. */
  3304. /**
  3305. * @brief LSTM unidirectional function with 8 bit input and output and 16 bit gate output, 32 bit bias.
  3306. *
  3307. * @param[in] input Pointer to input data
  3308. * @param[out] output Pointer to output data
  3309. * @param[in] params Struct containing all information about the lstm operator, see riscv_nn_types.
  3310. * @param[in] buffers Struct containing pointers to all temporary scratch buffers needed for the
  3311. * lstm operator, see riscv_nn_types.
  3312. *
  3313. *
  3314. * @return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code>
  3315. *
  3316. * @details
  3317. * 1. Supported framework: TensorFlow Lite Micro
  3318. *
  3319. */
  3320. riscv_nmsis_nn_status riscv_lstm_unidirectional_s8(const int8_t *input,
  3321. int8_t *output,
  3322. const nmsis_nn_lstm_params *params,
  3323. nmsis_nn_lstm_context *buffers);
  3324. /**
  3325. * @brief LSTM unidirectional function with 16 bit input and output and 16 bit gate output, 64 bit bias.
  3326. *
  3327. * @param[in] input Pointer to input data
  3328. * @param[out] output Pointer to output data
  3329. * @param[in] params Struct containing all information about the lstm operator, see riscv_nn_types.
  3330. * @param[in] buffers Struct containing pointers to all temporary scratch buffers needed for the
  3331. * lstm operator, see riscv_nn_types.
  3332. *
  3333. *
  3334. * @return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code>
  3335. *
  3336. * @details
  3337. * 1. Supported framework: TensorFlow Lite Micro
  3338. *
  3339. */
  3340. riscv_nmsis_nn_status riscv_lstm_unidirectional_s16(const int16_t *input,
  3341. int16_t *output,
  3342. const nmsis_nn_lstm_params *params,
  3343. nmsis_nn_lstm_context *buffers);
  3344. /**
  3345. * @brief Batch matmul function with 8 bit input and output.
  3346. *
  3347. * @param[in] ctx Temporary scratch buffer
  3348. * The caller is expected to clear the buffer, if applicable, for security reasons.
  3349. * Optional function riscv_fully_connected_s8_get_buffer_size() provides the buffer
  3350. * size if an additional buffer is required.
  3351. * @param[in] bmm_params Batch matmul Parameters
  3352. * Adjoint flags are currently unused.
  3353. * @param[in] quant_params Quantization parameters
  3354. * @param[in] input_lhs_dims Input lhs tensor dimensions.
  3355. * This should be NHWC where lhs C = rhs C
  3356. * @param[in] input_lhs Pointer to input tensor
  3357. * @param[in] input_rhs_dims Input lhs tensor dimensions.
  3358. * This is expected to be transposed so
  3359. * should be NHWC where lhs C = rhs C
  3360. * @param[in] input_rhs Pointer to transposed input tensor
  3361. * @param[in] output_dims Output tensor dimensions
  3362. * @param[out] output Pointer to the output tensor
  3363. *
  3364. * @return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code>
  3365. *
  3366. * @details
  3367. * 1. Supported framework: TensorFlow Lite Micro
  3368. * 2. Performs row * row matrix multiplication with the RHS transposed.
  3369. *
  3370. */
  3371. riscv_nmsis_nn_status riscv_batch_matmul_s8(const nmsis_nn_context *ctx,
  3372. const nmsis_nn_bmm_params *bmm_params,
  3373. const nmsis_nn_per_tensor_quant_params *quant_params,
  3374. const nmsis_nn_dims *input_lhs_dims,
  3375. const int8_t *input_lhs,
  3376. const nmsis_nn_dims *input_rhs_dims,
  3377. const int8_t *input_rhs,
  3378. const nmsis_nn_dims *output_dims,
  3379. int8_t *output);
  3380. /**
  3381. * @brief Batch matmul function with 16 bit input and output.
  3382. *
  3383. * @param[in] ctx Temporary scratch buffer
  3384. * The caller is expected to clear the buffer, if applicable, for security reasons.
  3385. * Optional function riscv_fully_connected_s8_get_buffer_size() provides the buffer
  3386. * size if an additional buffer is required.
  3387. * @param[in] bmm_params Batch matmul Parameters
  3388. * Adjoint flags are currently unused.
  3389. * @param[in] quant_params Quantization parameters
  3390. * @param[in] input_lhs_dims Input lhs tensor dimensions.
  3391. * This should be NHWC where LHS.C = RHS.C
  3392. * @param[in] input_lhs Pointer to input tensor
  3393. * @param[in] input_rhs_dims Input lhs tensor dimensions.
  3394. * This is expected to be transposed so
  3395. * should be NHWC where LHS.C = RHS.C
  3396. * @param[in] input_rhs Pointer to transposed input tensor
  3397. * @param[in] output_dims Output tensor dimensions
  3398. * @param[out] output Pointer to the output tensor
  3399. *
  3400. * @return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code>
  3401. *
  3402. * @details
  3403. * 1. Supported framework: TensorFlow Lite Micro
  3404. * 2. Performs row * row matrix multiplication with the RHS transposed.
  3405. *
  3406. */
  3407. riscv_nmsis_nn_status riscv_batch_matmul_s16(const nmsis_nn_context *ctx,
  3408. const nmsis_nn_bmm_params *bmm_params,
  3409. const nmsis_nn_per_tensor_quant_params *quant_params,
  3410. const nmsis_nn_dims *input_lhs_dims,
  3411. const int16_t *input_lhs,
  3412. const nmsis_nn_dims *input_rhs_dims,
  3413. const int16_t *input_rhs,
  3414. const nmsis_nn_dims *output_dims,
  3415. int16_t *output);
  3416. /**
  3417. * @defgroup Pad Pad Layer Functions:
  3418. *
  3419. */
  3420. /**
  3421. * @brief Expands the size of the input by adding constant values before and after the data, in all dimensions.
  3422. *
  3423. * @param[in] input Pointer to input data
  3424. * @param[out] output Pointer to output data
  3425. * @param[in] pad_value Value to pad with
  3426. * @param[in] input_size Input tensor dimensions
  3427. * @param[in] pre_pad Padding to apply before data in each dimension
  3428. * @param[in] post_pad Padding to apply after data in each dimension
  3429. *
  3430. * @return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code>
  3431. *
  3432. */
  3433. riscv_nmsis_nn_status riscv_pad_s8(const int8_t *input,
  3434. int8_t *output,
  3435. const int8_t pad_value,
  3436. const nmsis_nn_dims *input_size,
  3437. const nmsis_nn_dims *pre_pad,
  3438. const nmsis_nn_dims *post_pad);
  3439. /**
  3440. * @brief Elementwise binary minimum with 8bit data.
  3441. *
  3442. * @param[in] ctx Temporary scratch buffer
  3443. * The caller is expected to clear the buffer, if applicable, for security reasons.
  3444. * @param[in] input_1_data Pointer to input1 tensor
  3445. * @param[in] input_1_dims Input1 tensor dimensions
  3446. * @param[in] input_2_data Pointer to input2 tensor
  3447. * @param[in] input_2_dims Input2 tensor dimensions
  3448. * @param[out] output_data Pointer to the output tensor
  3449. * @param[in] output_dims Output tensor dimensions
  3450. *
  3451. * @return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code>
  3452. *
  3453. * @details
  3454. * 1. Supported framework: TensorFlow Lite Micro
  3455. *
  3456. */
  3457. riscv_nmsis_nn_status riscv_minimum_s8(const nmsis_nn_context *ctx,
  3458. const int8_t *input_1_data,
  3459. const nmsis_nn_dims *input_1_dims,
  3460. const int8_t *input_2_data,
  3461. const nmsis_nn_dims *input_2_dims,
  3462. int8_t *output_data,
  3463. const nmsis_nn_dims *output_dims);
  3464. /**
  3465. * @brief Elementwise binary maximum with 8bit data.
  3466. *
  3467. * @param[in] ctx Temporary scratch buffer
  3468. * The caller is expected to clear the buffer, if applicable, for security reasons.
  3469. * @param[in] input_1_data Pointer to input1 tensor
  3470. * @param[in] input_1_dims Input1 tensor dimensions
  3471. * @param[in] input_2_data Pointer to input2 tensor
  3472. * @param[in] input_2_dims Input2 tensor dimensions
  3473. * @param[out] output_data Pointer to the output tensor
  3474. * @param[in] output_dims Output tensor dimensions
  3475. *
  3476. * @return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code>
  3477. *
  3478. * @details
  3479. * 1. Supported framework: TensorFlow Lite Micro
  3480. *
  3481. */
  3482. riscv_nmsis_nn_status riscv_maximum_s8(const nmsis_nn_context *ctx,
  3483. const int8_t *input_1_data,
  3484. const nmsis_nn_dims *input_1_dims,
  3485. const int8_t *input_2_data,
  3486. const nmsis_nn_dims *input_2_dims,
  3487. int8_t *output_data,
  3488. const nmsis_nn_dims *output_dims);
  3489. #ifdef __cplusplus
  3490. }
  3491. #endif
  3492. #endif /* RISCV_NNFUNCTIONS_H */