| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669 |
- /*
- * SPDX-FileCopyrightText: Copyright 2010-2024 Arm Limited and/or its affiliates <open-source-office@arm.com>
- * Copyright (c) 2022 Nuclei Limited. All rights reserved.
- *
- * SPDX-License-Identifier: Apache-2.0
- *
- * Licensed under the Apache License, Version 2.0 (the License); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an AS IS BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
- /* ----------------------------------------------------------------------
- * Project: NMSIS NN Library
- * Title: riscv_nnfunctions.h
- * Description: Public header file for NMSIS NN Library
- *
- * $Date: 04 November 2024
- * $Revision: V.18.0.0
- *
- * Target Processor: RISC-V Cores
- * -------------------------------------------------------------------- */
- /**
- * @defgroup Public Public
- * A collection of functions to perform basic operations for neural network layers. Functions with a _s8 suffix support
- * TensorFlow Lite framework.
- */
- #ifndef RISCV_NNFUNCTIONS_H
- #define RISCV_NNFUNCTIONS_H
- #include "riscv_nn_math_types.h"
- #include "riscv_nn_types.h"
- #define USE_INTRINSIC
- #ifdef __cplusplus
- extern "C" {
- #endif
- /**
- * @defgroup NNConv Convolution Functions
- *
- * Collection of convolution, depthwise convolution functions and their variants.
- *
- * The convolution is implemented in 2 steps: im2col and General Matrix Multiplication(GEMM)
- *
- * im2col is a process of converting each patch of image data into
- * a column. After im2col, the convolution is computed as matrix-matrix
- * multiplication.
- *
- * To reduce the memory footprint, the im2col is performed partially.
- * Each iteration, only a few column (i.e., patches) are generated followed
- * by GEMM.
- *
- */
- /**
- * @brief s4 convolution layer wrapper function with the main purpose to call the optimal kernel available in
- * nmsis-nn to perform the convolution.
- *
- * @param[in, out] ctx Function context that contains the additional buffer if required by the function.
- * riscv_convolve_wrapper_s4_get_buffer_size will return the buffer_size if required.
- * The caller is expected to clear the buffer ,if applicable, for security reasons.
- * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...).
- * Range of conv_params->input_offset : [-127, 128]
- * Range of conv_params->output_offset : [-128, 127]
- * @param[in] quant_params Per-channel quantization info.
- * It contains the multiplier and shift values to be applied to each output channel
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
- * @param[in] input_data Input (activation) data pointer. Data type: int8
- * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the
- * spatial filter dimensions
- * @param[in] filter_data Filter data pointer. Data type: int8 packed with 2x int4
- * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
- * @param[in] bias_data Bias data pointer. Data type: int32
- * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
- * @param[out] output_data Output data pointer. Data type: int8
- *
- * @return The function returns either
- * <code>RISCV_NMSIS_NN_ARG_ERROR</code> if argument constraints fail. or,
- * <code>RISCV_NMSIS_NN_SUCCESS</code> on successful completion.
- *
- */
- riscv_nmsis_nn_status riscv_convolve_wrapper_s4(const nmsis_nn_context *ctx,
- const nmsis_nn_conv_params *conv_params,
- const nmsis_nn_per_channel_quant_params *quant_params,
- const nmsis_nn_dims *input_dims,
- const int8_t *input_data,
- const nmsis_nn_dims *filter_dims,
- const int8_t *filter_data,
- const nmsis_nn_dims *bias_dims,
- const int32_t *bias_data,
- const nmsis_nn_dims *output_dims,
- int8_t *output_data);
- /**
- * @brief Get the required buffer size for riscv_convolve_wrapper_s4
- *
- * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...).
- * Range of conv_params->input_offset : [-127, 128]
- * Range of conv_params->output_offset : [-128, 127]
- * @param[in] input_dims Input (activation) dimensions. Format: [N, H, W, C_IN]
- * @param[in] filter_dims Filter dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the spatial
- * filter dimensions
- * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
- *
- * @return The function returns required buffer size(bytes)
- *
- */
- int32_t riscv_convolve_wrapper_s4_get_buffer_size(const nmsis_nn_conv_params *conv_params,
- const nmsis_nn_dims *input_dims,
- const nmsis_nn_dims *filter_dims,
- const nmsis_nn_dims *output_dims);
- /**
- * @brief Get the required buffer size for riscv_convolve_wrapper_s4 for processors with DSP extension.
- * Refer to riscv_convolve_wrapper_s4_get_buffer_size() for function argument details.
- *
- * @note Intended for compilation on Host. If compiling for an Riscv target, use
- * riscv_convolve_wrapper_s4_get_buffer_size().
- *
- */
- int32_t riscv_convolve_wrapper_s4_get_buffer_size_dsp(const nmsis_nn_conv_params *conv_params,
- const nmsis_nn_dims *input_dims,
- const nmsis_nn_dims *filter_dims,
- const nmsis_nn_dims *output_dims);
- /**
- * @brief s8 convolution layer wrapper function with the main purpose to call the optimal kernel available in
- * nmsis-nn to perform the convolution.
- *
- * @param[in, out] ctx Function context that contains the additional buffer if required by the function.
- * riscv_convolve_wrapper_s8_get_buffer_size will return the buffer_size if required.
- * The caller is expected to clear the buffer ,if applicable, for security reasons.
- * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...).
- * Range of conv_params->input_offset : [-127, 128]
- * Range of conv_params->output_offset : [-128, 127]
- * @param[in] quant_params Per-channel quantization info.
- * It contains the multiplier and shift values to be applied to each output channel
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
- * @param[in] input_data Input (activation) data pointer. Data type: int8
- * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the
- * spatial filter dimensions
- * @param[in] filter_data Filter data pointer. Data type: int8
- * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
- * @param[in] bias_data Bias data pointer. Data type: int32
- * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
- * @param[out] output_data Output data pointer. Data type: int8
- *
- * @return The function returns either
- * <code>RISCV_NMSIS_NN_ARG_ERROR</code> if argument constraints fail. or,
- * <code>RISCV_NMSIS_NN_SUCCESS</code> on successful completion.
- *
- */
- riscv_nmsis_nn_status riscv_convolve_wrapper_s8(const nmsis_nn_context *ctx,
- const nmsis_nn_conv_params *conv_params,
- const nmsis_nn_per_channel_quant_params *quant_params,
- const nmsis_nn_dims *input_dims,
- const int8_t *input_data,
- const nmsis_nn_dims *filter_dims,
- const int8_t *filter_data,
- const nmsis_nn_dims *bias_dims,
- const int32_t *bias_data,
- const nmsis_nn_dims *output_dims,
- int8_t *output_data);
- /**
- * @brief Get the required buffer size for riscv_convolve_wrapper_s8
- *
- * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...).
- * Range of conv_params->input_offset : [-127, 128]
- * Range of conv_params->output_offset : [-128, 127]
- * @param[in] input_dims Input (activation) dimensions. Format: [N, H, W, C_IN]
- * @param[in] filter_dims Filter dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the spatial
- * filter dimensions
- * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
- *
- * @return The function returns required buffer size(bytes)
- *
- */
- int32_t riscv_convolve_wrapper_s8_get_buffer_size(const nmsis_nn_conv_params *conv_params,
- const nmsis_nn_dims *input_dims,
- const nmsis_nn_dims *filter_dims,
- const nmsis_nn_dims *output_dims);
- /**
- * @brief Get the required buffer size for riscv_convolve_wrapper_s8 for processors with DSP extension.
- * Refer to riscv_convolve_wrapper_s8_get_buffer_size() for function argument details.
- *
- * @note Intended for compilation on Host. If compiling for an Riscv target, use
- * riscv_convolve_wrapper_s8_get_buffer_size().
- *
- */
- int32_t riscv_convolve_wrapper_s8_get_buffer_size_dsp(const nmsis_nn_conv_params *conv_params,
- const nmsis_nn_dims *input_dims,
- const nmsis_nn_dims *filter_dims,
- const nmsis_nn_dims *output_dims);
- /**
- * @brief s16 convolution layer wrapper function with the main purpose to call the optimal kernel available in
- * nmsis-nn to perform the convolution.
- *
- * @param[in, out] ctx Function context that contains the additional buffer if required by the function.
- * riscv_convolve_wrapper_s8_get_buffer_size will return the buffer_size if required
- * The caller is expected to clear the buffer, if applicable, for security reasons.
- * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...).
- * conv_params->input_offset : Not used
- * conv_params->output_offset : Not used
- * @param[in] quant_params Per-channel quantization info.
- * It contains the multiplier and shift values to be applied to each output channel
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
- * @param[in] input_data Input (activation) data pointer. Data type: int16
- * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the
- * spatial filter dimensions
- * @param[in] filter_data Filter data pointer. Data type: int8
- * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
- * @param[in] bias_data Struct with optional bias data pointer. Bias data type can be int64 or int32 depending
- * flag in struct.
- * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
- * @param[out] output_data Output data pointer. Data type: int16
- *
- * @return The function returns either
- * <code>RISCV_NMSIS_NN_ARG_ERROR</code> if argument constraints fail. or,
- * <code>RISCV_NMSIS_NN_SUCCESS</code> on successful completion.
- *
- */
- riscv_nmsis_nn_status riscv_convolve_wrapper_s16(const nmsis_nn_context *ctx,
- const nmsis_nn_conv_params *conv_params,
- const nmsis_nn_per_channel_quant_params *quant_params,
- const nmsis_nn_dims *input_dims,
- const int16_t *input_data,
- const nmsis_nn_dims *filter_dims,
- const int8_t *filter_data,
- const nmsis_nn_dims *bias_dims,
- const nmsis_nn_bias_data *bias_data,
- const nmsis_nn_dims *output_dims,
- int16_t *output_data);
- /**
- * @brief Get the required buffer size for riscv_convolve_wrapper_s16.
- *
- * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...).
- * conv_params->input_offset : Not used
- * conv_params->output_offset : Not used
- * @param[in] input_dims Input (activation) dimensions. Format: [N, H, W, C_IN]
- * @param[in] filter_dims Filter dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the spatial
- * filter dimensions
- * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
- *
- * @return The function returns required buffer size(bytes)
- *
- */
- int32_t riscv_convolve_wrapper_s16_get_buffer_size(const nmsis_nn_conv_params *conv_params,
- const nmsis_nn_dims *input_dims,
- const nmsis_nn_dims *filter_dims,
- const nmsis_nn_dims *output_dims);
- /**
- * @brief Get the required buffer size for riscv_convolve_wrapper_s16 for for processors with DSP extension.
- * Refer to riscv_convolve_wrapper_s16_get_buffer_size() for function argument details.
- *
- * @note Intended for compilation on Host. If compiling for an Riscv target, use
- * riscv_convolve_wrapper_s16_get_buffer_size().
- *
- */
- int32_t riscv_convolve_wrapper_s16_get_buffer_size_dsp(const nmsis_nn_conv_params *conv_params,
- const nmsis_nn_dims *input_dims,
- const nmsis_nn_dims *filter_dims,
- const nmsis_nn_dims *output_dims);
- /**
- * @brief Basic s4 convolution function
- * @param[in, out] ctx Function context that contains the additional buffer if required by the function.
- * riscv_convolve_s4_get_buffer_size will return the buffer_size if required.
- * The caller is expected to clear the buffer ,if applicable, for security reasons.
- * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...).
- * Range of conv_params->input_offset : [-127, 128]
- * Range of conv_params->output_offset : [-128, 127]
- * @param[in] quant_params Per-channel quantization info.
- * It contains the multiplier and shift values to be applied to each output channel
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
- * @param[in] input_data Input (activation) data pointer. Data type: int8
- * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the
- * spatial filter dimensions
- * @param[in] filter_data Packed Filter data pointer. Data type: int8 packed with 2x int4
- * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
- * @param[in] bias_data Optional bias data pointer. Data type: int32
- * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
- * @param[out] output_data Output data pointer. Data type: int8
- * @return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code>
- *
- * @details
- * 1. Supported framework: TensorFlow Lite micro
- * 2. Additional memory is required for optimization. Refer to argument 'ctx' for details.
- *
- */
- riscv_nmsis_nn_status riscv_convolve_s4(const nmsis_nn_context *ctx,
- const nmsis_nn_conv_params *conv_params,
- const nmsis_nn_per_channel_quant_params *quant_params,
- const nmsis_nn_dims *input_dims,
- const int8_t *input_data,
- const nmsis_nn_dims *filter_dims,
- const int8_t *filter_data,
- const nmsis_nn_dims *bias_dims,
- const int32_t *bias_data,
- const nmsis_nn_dims *output_dims,
- int8_t *output_data);
- /**
- * @brief Basic s8 convolution function
- * @param[in, out] ctx Function context that contains the additional buffer if required by the function.
- * riscv_convolve_s8_get_buffer_size will return the buffer_size if required.
- * The caller is expected to clear the buffer, if applicable, for security reasons.
- * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...).
- * Range of conv_params->input_offset : [-127, 128]
- * Range of conv_params->output_offset : [-128, 127]
- * @param[in] quant_params Per-channel quantization info.
- * It contains the multiplier and shift values to be applied to each output channel
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
- * @param[in] input_data Input (activation) data pointer. Data type: int8
- * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, CK] where HK, WK and CK are the
- * spatial filter dimensions. CK != C_IN is used for grouped convolution, in which
- * case the required conditions are C_IN = N * CK and C_OUT = N * M for N groups of
- * size M.
- * @param[in] filter_data Filter data pointer. Data type: int8
- * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
- * @param[in] bias_data Optional bias data pointer. Data type: int32
- * @param[in] upscale_dims Inserts zeroes to upscale the input in h/w dimensions if set to 2. This is used for
- * tranposed convolution.
- * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
- * @param[out] output_data Output data pointer. Data type: int8
- *
- * @return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code> if successful or
- * <code>RISCV_NMSIS_NN_ARG_ERROR</code> if incorrect arguments or
- * <code>RISCV_NMSIS_NN_NO_IMPL_ERROR</code>
- *
- * @details
- * 1. Supported framework: TensorFlow Lite micro
- * 2. Additional memory is required for optimization. Refer to argument 'ctx' for details.
- *
- */
- riscv_nmsis_nn_status riscv_convolve_s8(const nmsis_nn_context *ctx,
- const nmsis_nn_conv_params *conv_params,
- const nmsis_nn_per_channel_quant_params *quant_params,
- const nmsis_nn_dims *input_dims,
- const int8_t *input_data,
- const nmsis_nn_dims *filter_dims,
- const int8_t *filter_data,
- const nmsis_nn_dims *bias_dims,
- const int32_t *bias_data,
- const nmsis_nn_dims *upscale_dims,
- const nmsis_nn_dims *output_dims,
- int8_t *output_data);
- /**
- * @brief Get the required buffer size for s4 convolution function
- *
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
- * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK
- * are the spatial filter dimensions
- * @return The function returns required buffer size(bytes)
- *
- */
- int32_t riscv_convolve_s4_get_buffer_size(const nmsis_nn_dims *input_dims, const nmsis_nn_dims *filter_dims);
- /**
- * @brief Get the required buffer size for s8 convolution function
- *
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
- * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK
- * are the spatial filter dimensions
- * @return The function returns required buffer size(bytes)
- *
- */
- int32_t riscv_convolve_s8_get_buffer_size(const nmsis_nn_dims *input_dims, const nmsis_nn_dims *filter_dims);
- /**
- * @brief Wrapper to select optimal transposed convolution algorithm depending on parameters.
- * @param[in, out] ctx Function context that contains the additional buffer if required by the
- * function.
- * riscv_transpose_conv_s8_get_buffer_size will return the buffer_size if required.
- * The caller is expected to clear the buffer, if applicable, for security
- reasons.
- * @param[in, out] output_ctx Temporary scratch buffer.
- * The size required size is: output width * output height * output channel * 4
- * The caller is expected to clear the buffer, if applicable, for security
- * reasons.
- * @param[in] transpose_conv_params Convolution parameters (e.g. strides, dilations, pads,...).
- * Range of transpose_conv_params->input_offset : [-127, 128]
- * Range of transpose_conv_params->output_offset : [-128, 127]
- * @param[in] quant_params Per-channel quantization info.
- * It contains the multiplier and shift values to be applied to each out channel.
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
- * @param[in] input_data Input (activation) data pointer. Data type: int8
- * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the
- * spatial filter dimensions
- * @param[in] filter_data Filter data pointer. Data type: int8
- * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
- * @param[in] bias_data Optional bias data pointer. Data type: int32
- * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
- * @param[out] output_data Output data pointer. Data type: int8
- * @return The function returns either
- * <code>RISCV_NMSIS_NN_ARG_ERROR</code> if argument constraints fail. or,
- * <code>RISCV_NMSIS_NN_SUCCESS</code> on successful completion.
- *
- * @details
- * 1. Supported framework: TensorFlow Lite micro
- * 2. Additional memory is required for optimization. Refer to arguments 'ctx' and 'output_ctx' for details.
- *
- */
- riscv_nmsis_nn_status riscv_transpose_conv_wrapper_s8(const nmsis_nn_context *ctx,
- const nmsis_nn_context *output_ctx,
- const nmsis_nn_transpose_conv_params *transpose_conv_params,
- const nmsis_nn_per_channel_quant_params *quant_params,
- const nmsis_nn_dims *input_dims,
- const int8_t *input_data,
- const nmsis_nn_dims *filter_dims,
- const int8_t *filter_data,
- const nmsis_nn_dims *bias_dims,
- const int32_t *bias_data,
- const nmsis_nn_dims *output_dims,
- int8_t *output_data);
- /**
- * @brief Basic s8 transpose convolution function
- * @param[in, out] ctx Function context that contains the additional buffer if required by the
- * function.
- * riscv_transpose_conv_s8_get_buffer_size will return the buffer_size if required.
- * The caller is expected to clear the buffer, if applicable, for security
- reasons.
- * @param[in, out] output_ctx Temporary scratch buffer.
- * The size required size is: output width * output height * output channel * 4
- * The caller is expected to clear the buffer, if applicable, for security
- * reasons.
- * @param[in] transpose_conv_params Convolution parameters (e.g. strides, dilations, pads,...).
- * Range of transpose_conv_params->input_offset : [-127, 128]
- * Range of transpose_conv_params->output_offset : [-128, 127]
- * @param[in] quant_params Per-channel quantization info.
- * It contains the multiplier and shift values to be applied to each out channel.
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
- * @param[in] input_data Input (activation) data pointer. Data type: int8
- * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the
- * spatial filter dimensions
- * @param[in] filter_data Filter data pointer. Data type: int8
- * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
- * @param[in] bias_data Optional bias data pointer. Data type: int32
- * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
- * @param[out] output_data Output data pointer. Data type: int8
- * @return The function returns either
- * <code>RISCV_NMSIS_NN_ARG_ERROR</code> if argument constraints fail. or,
- * <code>RISCV_NMSIS_NN_SUCCESS</code> on successful completion.
- *
- * @details
- * 1. Supported framework: TensorFlow Lite micro
- * 2. Additional memory is required for optimization. Refer to arguments 'ctx' and 'output_ctx' for details.
- *
- */
- riscv_nmsis_nn_status riscv_transpose_conv_s8(const nmsis_nn_context *ctx,
- const nmsis_nn_context *output_ctx,
- const nmsis_nn_transpose_conv_params *transpose_conv_params,
- const nmsis_nn_per_channel_quant_params *quant_params,
- const nmsis_nn_dims *input_dims,
- const int8_t *input_data,
- const nmsis_nn_dims *filter_dims,
- const int8_t *filter_data,
- const nmsis_nn_dims *bias_dims,
- const int32_t *bias_data,
- const nmsis_nn_dims *output_dims,
- int8_t *output_data);
- /**
- * @brief Get the required buffer size for ctx in s8 transpose conv function
- *
- * @param[in] transposed_conv_params Transposed convolution parameters
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
- * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK
- * are the spatial filter dimensions
- * @param[in] out_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
- * @return The function returns required buffer size(bytes)
- *
- */
- int32_t riscv_transpose_conv_s8_get_buffer_size(const nmsis_nn_transpose_conv_params *transposed_conv_params,
- const nmsis_nn_dims *input_dims,
- const nmsis_nn_dims *filter_dims,
- const nmsis_nn_dims *out_dims);
- /**
- * @brief Get the required buffer size for output_ctx in s8 transpose conv function
- *
- * @param[in] transposed_conv_params Transposed convolution parameters
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
- * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK
- * are the spatial filter dimensions
- * @return The function returns required buffer size(bytes)
- *
- */
- int32_t riscv_transpose_conv_s8_get_reverse_conv_buffer_size(const nmsis_nn_transpose_conv_params *transposed_conv_params,
- const nmsis_nn_dims *input_dims,
- const nmsis_nn_dims *filter_dims);
- /**
- * @brief Get size of additional buffer required by riscv_transpose_conv_s8() for processors with DSP extension.
- * Refer to riscv_transpose_conv_s8_get_buffer_size() for function argument details.
- *
- * @note Intended for compilation on Host. If compiling for an Riscv target, use
- * riscv_transpose_conv_s8_get_buffer_size().
- *
- */
- int32_t riscv_transpose_conv_s8_get_buffer_size_dsp(const nmsis_nn_dims *input_dims,
- const nmsis_nn_dims *filter_dims,
- const nmsis_nn_dims *out_dims);
- /**
- * @brief Basic s16 convolution function
- * @param[in, out] ctx Function context that contains the additional buffer if required by the function.
- * riscv_convolve_s16_get_buffer_size will return the buffer_size if required.
- * The caller is expected to clear the buffer, if applicable, for security reasons.
- * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...).
- * conv_params->input_offset : Not used
- * conv_params->output_offset : Not used
- * @param[in] quant_params Per-channel quantization info.
- * It contains the multiplier and shift values to be applied to each output channel
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
- * @param[in] input_data Input (activation) data pointer. Data type: int16
- * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the
- * spatial filter dimensions
- * @param[in] filter_data Filter data pointer. Data type: int8
- * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
- * @param[in] bias_data Struct with optional bias data pointer. Bias data type can be int64 or int32 depending
- * flag in struct.
- * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
- * @param[out] output_data Output data pointer. Data type: int16
- *
- * @return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code> if successful or
- * <code>RISCV_NMSIS_NN_ARG_ERROR</code> if incorrect arguments or
- * <code>RISCV_NMSIS_NN_NO_IMPL_ERROR</code>
- *
- * @details
- * 1. Supported framework: TensorFlow Lite micro
- * 2. Additional memory is required for optimization. Refer to argument 'ctx' for details.
- *
- */
- riscv_nmsis_nn_status riscv_convolve_s16(const nmsis_nn_context *ctx,
- const nmsis_nn_conv_params *conv_params,
- const nmsis_nn_per_channel_quant_params *quant_params,
- const nmsis_nn_dims *input_dims,
- const int16_t *input_data,
- const nmsis_nn_dims *filter_dims,
- const int8_t *filter_data,
- const nmsis_nn_dims *bias_dims,
- const nmsis_nn_bias_data *bias_data,
- const nmsis_nn_dims *output_dims,
- int16_t *output_data);
- /**
- * @brief Get the required buffer size for s16 convolution function
- *
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
- * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK
- * are the spatial filter dimensions
- * @return The function returns required buffer size(bytes)
- *
- */
- int32_t riscv_convolve_s16_get_buffer_size(const nmsis_nn_dims *input_dims, const nmsis_nn_dims *filter_dims);
- /**
- * @brief Fast s4 version for 1x1 convolution (non-square shape)
- *
- * @param[in, out] ctx Function context that contains the additional buffer if required by the function.
- * riscv_convolve_1x1_s4_fast_get_buffer_size will return the buffer_size if required.
- * The caller is expected to clear the buffer ,if applicable, for security reasons.
- * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...).
- * Range of conv_params->input_offset : [-127, 128]
- * Range of conv_params->output_offset : [-128, 127]
- * @param[in] quant_params Per-channel quantization info.
- * It contains the multiplier and shift values to be applied to each output channel
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
- * @param[in] input_data Input (activation) data pointer. Data type: int8
- * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, 1, 1, C_IN]
- * @param[in] filter_data Filter data pointer. Data type: int8 packed with 2x int4
- * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
- * @param[in] bias_data Optional bias data pointer. Data type: int32
- * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
- * @param[out] output_data Output data pointer. Data type: int8
- *
- * @return The function returns either
- * <code>RISCV_NMSIS_NN_ARG_ERROR</code> if argument constraints fail. or,
- * <code>RISCV_NMSIS_NN_SUCCESS</code> on successful completion.
- *
- * @details
- * - Supported framework : TensorFlow Lite Micro
- * - The following constrains on the arguments apply
- * -# conv_params->padding.w = conv_params->padding.h = 0
- * -# conv_params->stride.w = conv_params->stride.h = 1
- *
- */
- riscv_nmsis_nn_status riscv_convolve_1x1_s4_fast(const nmsis_nn_context *ctx,
- const nmsis_nn_conv_params *conv_params,
- const nmsis_nn_per_channel_quant_params *quant_params,
- const nmsis_nn_dims *input_dims,
- const int8_t *input_data,
- const nmsis_nn_dims *filter_dims,
- const int8_t *filter_data,
- const nmsis_nn_dims *bias_dims,
- const int32_t *bias_data,
- const nmsis_nn_dims *output_dims,
- int8_t *output_data);
- /**
- * @brief s4 version for 1x1 convolution with support for non-unity stride values
- *
- * @param[in, out] ctx Function context that contains the additional buffer if required by the function.
- * None is required by this function.
- * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...).
- * Range of conv_params->input_offset : [-127, 128]
- * Range of conv_params->output_offset : [-128, 127]
- * @param[in] quant_params Per-channel quantization info.
- * It contains the multiplier and shift values to be applied to each output channel
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
- * @param[in] input_data Input (activation) data pointer. Data type: int8
- * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, 1, 1, C_IN]
- * @param[in] filter_data Filter data pointer. Data type: int8 packed with 2x int4
- * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
- * @param[in] bias_data Optional bias data pointer. Data type: int32
- * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
- * @param[out] output_data Output data pointer. Data type: int8
- *
- * @return The function returns either
- * <code>RISCV_NMSIS_NN_ARG_ERROR</code> if argument constraints fail. or,
- * <code>RISCV_NMSIS_NN_SUCCESS</code> on successful completion.
- * @details
- * - Supported framework : TensorFlow Lite Micro
- * - The following constrains on the arguments apply
- * -# conv_params->padding.w = conv_params->padding.h = 0
- *
- */
- riscv_nmsis_nn_status riscv_convolve_1x1_s4(const nmsis_nn_context *ctx,
- const nmsis_nn_conv_params *conv_params,
- const nmsis_nn_per_channel_quant_params *quant_params,
- const nmsis_nn_dims *input_dims,
- const int8_t *input_data,
- const nmsis_nn_dims *filter_dims,
- const int8_t *filter_data,
- const nmsis_nn_dims *bias_dims,
- const int32_t *bias_data,
- const nmsis_nn_dims *output_dims,
- int8_t *output_data);
- /**
- * @brief Fast s8 version for 1x1 convolution (non-square shape)
- *
- * @param[in, out] ctx Function context that contains the additional buffer if required by the function.
- * riscv_convolve_1x1_s8_fast_get_buffer_size will return the buffer_size if required.
- * The caller is expected to clear the buffer, if applicable, for security reasons.
- * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...).
- * Range of conv_params->input_offset : [-127, 128]
- * Range of conv_params->output_offset : [-128, 127]
- * @param[in] quant_params Per-channel quantization info.
- * It contains the multiplier and shift values to be applied to each output channel
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
- * @param[in] input_data Input (activation) data pointer. Data type: int8
- * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, 1, 1, C_IN]
- * @param[in] filter_data Filter data pointer. Data type: int8
- * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
- * @param[in] bias_data Optional bias data pointer. Data type: int32
- * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
- * @param[out] output_data Output data pointer. Data type: int8
- *
- * @return The function returns either
- * <code>RISCV_NMSIS_NN_ARG_ERROR</code> if argument constraints fail. or,
- * <code>RISCV_NMSIS_NN_SUCCESS</code> on successful completion.
- *
- * @details
- * - Supported framework : TensorFlow Lite Micro
- * - The following constrains on the arguments apply
- * -# conv_params->padding.w = conv_params->padding.h = 0
- * -# conv_params->stride.w = conv_params->stride.h = 1
- *
- */
- riscv_nmsis_nn_status riscv_convolve_1x1_s8_fast(const nmsis_nn_context *ctx,
- const nmsis_nn_conv_params *conv_params,
- const nmsis_nn_per_channel_quant_params *quant_params,
- const nmsis_nn_dims *input_dims,
- const int8_t *input_data,
- const nmsis_nn_dims *filter_dims,
- const int8_t *filter_data,
- const nmsis_nn_dims *bias_dims,
- const int32_t *bias_data,
- const nmsis_nn_dims *output_dims,
- int8_t *output_data);
- /**
- * @brief Get the required buffer size for riscv_convolve_1x1_s4_fast
- *
- * @param[in] input_dims Input (activation) dimensions
- * @return The function returns the required buffer size in bytes
- *
- */
- int32_t riscv_convolve_1x1_s4_fast_get_buffer_size(const nmsis_nn_dims *input_dims);
- /**
- * @brief Get the required buffer size for riscv_convolve_1x1_s8_fast
- *
- * @param[in] input_dims Input (activation) dimensions
- * @return The function returns the required buffer size in bytes
- *
- */
- int32_t riscv_convolve_1x1_s8_fast_get_buffer_size(const nmsis_nn_dims *input_dims);
- /**
- * @brief s8 version for 1x1 convolution with support for non-unity stride values
- *
- * @param[in, out] ctx Function context that contains the additional buffer if required by the function.
- * None is required by this function.
- * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...).
- * Range of conv_params->input_offset : [-127, 128]
- * Range of conv_params->output_offset : [-128, 127]
- * @param[in] quant_params Per-channel quantization info.
- * It contains the multiplier and shift values to be applied to each output channel
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
- * @param[in] input_data Input (activation) data pointer. Data type: int8
- * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, 1, 1, C_IN]
- * @param[in] filter_data Filter data pointer. Data type: int8
- * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
- * @param[in] bias_data Optional bias data pointer. Data type: int32
- * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
- * @param[out] output_data Output data pointer. Data type: int8
- *
- * @return The function returns either
- * <code>RISCV_NMSIS_NN_ARG_ERROR</code> if argument constraints fail. or,
- * <code>RISCV_NMSIS_NN_SUCCESS</code> on successful completion.
- * @details
- * - Supported framework : TensorFlow Lite Micro
- * - The following constrains on the arguments apply
- * -# conv_params->padding.w = conv_params->padding.h = 0
- *
- */
- riscv_nmsis_nn_status riscv_convolve_1x1_s8(const nmsis_nn_context *ctx,
- const nmsis_nn_conv_params *conv_params,
- const nmsis_nn_per_channel_quant_params *quant_params,
- const nmsis_nn_dims *input_dims,
- const int8_t *input_data,
- const nmsis_nn_dims *filter_dims,
- const int8_t *filter_data,
- const nmsis_nn_dims *bias_dims,
- const int32_t *bias_data,
- const nmsis_nn_dims *output_dims,
- int8_t *output_data);
- /**
- * @brief Basic Q7 convolution function
- * @param[in] Im_in pointer to input tensor
- * @param[in] dim_im_in input tensor dimension
- * @param[in] ch_im_in number of input tensor channels
- * @param[in] wt pointer to kernel weights
- * @param[in] ch_im_out number of filters, i.e., output tensor channels
- * @param[in] dim_kernel filter kernel size
- * @param[in] padding padding sizes
- * @param[in] stride convolution stride
- * @param[in] bias pointer to bias
- * @param[in] bias_shift amount of left-shift for bias
- * @param[in] out_shift amount of right-shift for output
- * @param[in,out] Im_out pointer to output tensor
- * @param[in] dim_im_out output tensor dimension
- * @param[in,out] bufferA pointer to buffer space for input
- * @param[in,out] bufferB pointer to buffer space for output
- * @return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code>
- *
- */
- riscv_nmsis_nn_status riscv_convolve_HWC_q7_basic(const q7_t *Im_in,
- const uint16_t dim_im_in,
- const uint16_t ch_im_in,
- const q7_t *wt,
- const uint16_t ch_im_out,
- const uint16_t dim_kernel,
- const uint16_t padding,
- const uint16_t stride,
- const q7_t *bias,
- const uint16_t bias_shift,
- const uint16_t out_shift,
- q7_t *Im_out,
- const uint16_t dim_im_out,
- q15_t *bufferA,
- q7_t *bufferB);
- /**
- * @brief Basic Q7 convolution function (non-square shape)
- * @param[in] Im_in pointer to input tensor
- * @param[in] dim_im_in_x input tensor dimension x
- * @param[in] dim_im_in_y input tensor dimension y
- * @param[in] ch_im_in number of input tensor channels
- * @param[in] wt pointer to kernel weights
- * @param[in] ch_im_out number of filters, i.e., output tensor channels
- * @param[in] dim_kernel_x filter kernel size x
- * @param[in] dim_kernel_y filter kernel size y
- * @param[in] padding_x padding size x
- * @param[in] padding_y padding size y
- * @param[in] stride_x convolution stride x
- * @param[in] stride_y convolution stride y
- * @param[in] bias pointer to bias
- * @param[in] bias_shift amount of left-shift for bias
- * @param[in] out_shift amount of right-shift for output
- * @param[in,out] Im_out pointer to output tensor
- * @param[in] dim_im_out_x output tensor dimension x
- * @param[in] dim_im_out_y output tensor dimension y
- * @param[in,out] bufferA pointer to buffer space for input
- * @param[in,out] bufferB pointer to buffer space for output
- * @return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code>
- */
- riscv_nmsis_nn_status riscv_convolve_HWC_q7_basic_nonsquare(const q7_t *Im_in,
- const uint16_t dim_im_in_x,
- const uint16_t dim_im_in_y,
- const uint16_t ch_im_in,
- const q7_t *wt,
- const uint16_t ch_im_out,
- const uint16_t dim_kernel_x,
- const uint16_t dim_kernel_y,
- const uint16_t padding_x,
- const uint16_t padding_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const q7_t *bias,
- const uint16_t bias_shift,
- const uint16_t out_shift,
- q7_t *Im_out,
- const uint16_t dim_im_out_x,
- const uint16_t dim_im_out_y,
- q15_t *bufferA,
- q7_t *bufferB);
- /**
- * @brief Basic Q15 convolution function
- * @param[in] Im_in pointer to input tensor
- * @param[in] dim_im_in input tensor dimension
- * @param[in] ch_im_in number of input tensor channels
- * @param[in] wt pointer to kernel weights
- * @param[in] ch_im_out number of filters, i.e., output tensor channels
- * @param[in] dim_kernel filter kernel size
- * @param[in] padding padding sizes
- * @param[in] stride convolution stride
- * @param[in] bias pointer to bias
- * @param[in] bias_shift amount of left-shift for bias
- * @param[in] out_shift amount of right-shift for output
- * @param[in,out] Im_out pointer to output tensor
- * @param[in] dim_im_out output tensor dimension
- * @param[in,out] bufferA pointer to buffer space for input
- * @param[in,out] bufferB pointer to buffer space for output
- * @return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code>
- *
- */
- riscv_nmsis_nn_status riscv_convolve_HWC_q15_basic(const q15_t *Im_in,
- const uint16_t dim_im_in,
- const uint16_t ch_im_in,
- const q15_t *wt,
- const uint16_t ch_im_out,
- const uint16_t dim_kernel,
- const uint16_t padding,
- const uint16_t stride,
- const q15_t *bias,
- const uint16_t bias_shift,
- const uint16_t out_shift,
- q15_t *Im_out,
- const uint16_t dim_im_out,
- q15_t *bufferA,
- q7_t *bufferB);
- /**
- * @brief Fast Q7 convolution function
- * @param[in] Im_in pointer to input tensor
- * @param[in] dim_im_in input tensor dimension
- * @param[in] ch_im_in number of input tensor channels
- * @param[in] wt pointer to kernel weights
- * @param[in] ch_im_out number of filters, i.e., output tensor channels
- * @param[in] dim_kernel filter kernel size
- * @param[in] padding padding sizes
- * @param[in] stride convolution stride
- * @param[in] bias pointer to bias
- * @param[in] bias_shift amount of left-shift for bias
- * @param[in] out_shift amount of right-shift for output
- * @param[in,out] Im_out pointer to output tensor
- * @param[in] dim_im_out output tensor dimension
- * @param[in,out] bufferA pointer to buffer space for input
- * @param[in,out] bufferB pointer to buffer space for output
- * @return The function returns either
- * <code>RISCV_NMSIS_NN_SIZE_MISMATCH</code> or <code>RISCV_NMSIS_NN_SUCCESS</code> based on the outcome of size checking.
- *
- * This function is the version with full list of optimization tricks, but with
- * some contraints:
- * ch_im_in is multiple of 4
- * ch_im_out is multiple of 2
- */
- riscv_nmsis_nn_status riscv_convolve_HWC_q7_fast(const q7_t *Im_in,
- const uint16_t dim_im_in,
- const uint16_t ch_im_in,
- const q7_t *wt,
- const uint16_t ch_im_out,
- const uint16_t dim_kernel,
- const uint16_t padding,
- const uint16_t stride,
- const q7_t *bias,
- const uint16_t bias_shift,
- const uint16_t out_shift,
- q7_t *Im_out,
- const uint16_t dim_im_out,
- q15_t *bufferA,
- q7_t *bufferB);
- /**
- * @brief Fast Q7 convolution function (non-sqaure shape)
- * @param[in] Im_in pointer to input tensor
- * @param[in] dim_im_in_x input tensor dimension x
- * @param[in] dim_im_in_y input tensor dimension y
- * @param[in] ch_im_in number of input tensor channels
- * @param[in] wt pointer to kernel weights
- * @param[in] ch_im_out number of filters, i.e., output tensor channels
- * @param[in] dim_kernel_x filter kernel size x
- * @param[in] dim_kernel_y filter kernel size y
- * @param[in] padding_x padding size x
- * @param[in] padding_y padding size y
- * @param[in] stride_x convolution stride x
- * @param[in] stride_y convolution stride y
- * @param[in] bias pointer to bias
- * @param[in] bias_shift amount of left-shift for bias
- * @param[in] out_shift amount of right-shift for output
- * @param[in,out] Im_out pointer to output tensor
- * @param[in] dim_im_out_x output tensor dimension x
- * @param[in] dim_im_out_y output tensor dimension y
- * @param[in,out] bufferA pointer to buffer space for input
- * @param[in,out] bufferB pointer to buffer space for output
- * @return The function returns either
- * <code>RISCV_NMSIS_NN_SIZE_MISMATCH</code> or <code>RISCV_NMSIS_NN_SUCCESS</code> based on the outcome of size checking.
- *
- * This function is the version with full list of optimization tricks, but with
- * some contraints:
- * ch_im_in is multiple of 4
- * ch_im_out is multiple of 2
- */
- riscv_nmsis_nn_status riscv_convolve_HWC_q7_fast_nonsquare(const q7_t *Im_in,
- const uint16_t dim_im_in_x,
- const uint16_t dim_im_in_y,
- const uint16_t ch_im_in,
- const q7_t *wt,
- const uint16_t ch_im_out,
- const uint16_t dim_kernel_x,
- const uint16_t dim_kernel_y,
- const uint16_t padding_x,
- const uint16_t padding_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const q7_t *bias,
- const uint16_t bias_shift,
- const uint16_t out_shift,
- q7_t *Im_out,
- const uint16_t dim_im_out_x,
- const uint16_t dim_im_out_y,
- q15_t *bufferA,
- q7_t *bufferB);
- /**
- * @brief Fast Q7 version of 1x1 convolution (non-sqaure shape)
- * @param[in] Im_in pointer to input tensor
- * @param[in] dim_im_in_x input tensor dimension x
- * @param[in] dim_im_in_y input tensor dimension y
- * @param[in] ch_im_in number of input tensor channels
- * @param[in] wt pointer to kernel weights
- * @param[in] ch_im_out number of filters, i.e., output tensor channels
- * @param[in] dim_kernel_x filter kernel size x
- * @param[in] dim_kernel_y filter kernel size y
- * @param[in] padding_x padding size x
- * @param[in] padding_y padding size y
- * @param[in] stride_x convolution stride x
- * @param[in] stride_y convolution stride y
- * @param[in] bias pointer to bias
- * @param[in] bias_shift amount of left-shift for bias
- * @param[in] out_shift amount of right-shift for output
- * @param[in,out] Im_out pointer to output tensor
- * @param[in] dim_im_out_x output tensor dimension x
- * @param[in] dim_im_out_y output tensor dimension y
- * @param[in,out] bufferA pointer to buffer space for input
- * @param[in,out] bufferB pointer to buffer space for output
- * @return The function returns either
- * <code>RISCV_NMSIS_NN_SIZE_MISMATCH</code> if argument constraints fail. or,
- * <code>RISCV_NMSIS_NN_SUCCESS</code> on successful completion.
- *
- * This function implement convolution with 1x1 kernel size (i.e., dim_kernel_x=1
- * and dim_kernel_y=1). It can be used for
- * second half of MobileNets after depthwise separable convolution.
- *
- * This function is the version with full list of optimization tricks, but with
- * some contraints:
- * ch_im_in is multiple of 4
- * ch_im_out is multiple of 2
- */
- riscv_nmsis_nn_status riscv_convolve_1x1_HWC_q7_fast_nonsquare(const q7_t *Im_in,
- const uint16_t dim_im_in_x,
- const uint16_t dim_im_in_y,
- const uint16_t ch_im_in,
- const q7_t *wt,
- const uint16_t ch_im_out,
- const uint16_t dim_kernel_x,
- const uint16_t dim_kernel_y,
- const uint16_t padding_x,
- const uint16_t padding_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const q7_t *bias,
- const uint16_t bias_shift,
- const uint16_t out_shift,
- q7_t *Im_out,
- const uint16_t dim_im_out_x,
- const uint16_t dim_im_out_y,
- q15_t *bufferA,
- q7_t *bufferB);
- /**
- * @brief Fast s8 version for 1x1 convolution (non-square shape)
- *
- * @param[in, out] ctx Function context that contains the additional buffer if required by the function.
- riscv_convolve_1x1_s8_fast_get_buffer_size will return the buffer_size if required
- * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...).
- * Range of conv_params->input_offset : [-127, 128]
- * Range of conv_params->output_offset : [-128, 127]
- * @param[in] quant_params Per-channel quantization info.
- * It contains the multiplier and shift values to be applied to each output channel
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
- * @param[in] input_data Input (activation) data pointer. Data type: int8
- * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, 1, 1, C_IN]
- * @param[in] filter_data Filter data pointer. Data type: int8
- * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
- * @param[in] bias_data Optional bias data pointer. Data type: int32
- * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
- * @param[out] output_data Output data pointer. Data type: int8
- *
- * @return The function returns either
- * <code>RISCV_NMSIS_NN_SIZE_MISMATCH</code> if argument constraints fail. or,
- * <code>RISCV_NMSIS_NN_SUCCESS</code> on successful completion.
- *
- * @details
- * - Supported framework : TensorFlow Lite Micro
- * - The following constrains on the arguments apply
- * -# input_dims->c is a multiple of 4
- * -# conv_params->padding.w = conv_params->padding.h = 0
- * -# conv_params->stride.w = conv_params->stride.h = 1
- *
- */
- riscv_nmsis_nn_status riscv_convolve_1x1_s8_fast(const nmsis_nn_context *ctx,
- const nmsis_nn_conv_params *conv_params,
- const nmsis_nn_per_channel_quant_params *quant_params,
- const nmsis_nn_dims *input_dims,
- const int8_t *input_data,
- const nmsis_nn_dims *filter_dims,
- const int8_t *filter_data,
- const nmsis_nn_dims *bias_dims,
- const int32_t *bias_data,
- const nmsis_nn_dims *output_dims,
- int8_t *output_data);
- /**
- * @brief Get the required buffer size for riscv_convolve_1x1_s8_fast
- *
- * @param[in] input_dims Input (activation) dimensions
- * @return The function returns the required buffer size in bytes
- *
- */
- int32_t riscv_convolve_1x1_s8_fast_get_buffer_size(const nmsis_nn_dims *input_dims);
- /**
- * @brief 1xn convolution
- *
- * @param[in, out] ctx Function context that contains the additional buffer if required by the function.
- * riscv_convolve_1_x_n_s8_get_buffer_size will return the buffer_size if required
- * The caller is expected to clear the buffer, if applicable, for security reasons.
- * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...).
- * Range of conv_params->input_offset : [-127, 128]
- * Range of conv_params->output_offset : [-128, 127]
- * @param[in] quant_params Per-channel quantization info.
- * It contains the multiplier and shift values to be applied to each output channel
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
- * @param[in] input_data Input (activation) data pointer. Data type: int8
- * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, 1, WK, C_IN] where WK is the horizontal
- * spatial filter dimension
- * @param[in] filter_data Filter data pointer. Data type: int8
- * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
- * @param[in] bias_data Optional bias data pointer. Data type: int32
- * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
- * @param[out] output_data Output data pointer. Data type: int8
- *
- * @return The function returns either
- * <code>RISCV_NMSIS_NN_ARG_ERROR</code> if argument constraints fail. or,
- * <code>RISCV_NMSIS_NN_SUCCESS</code> on successful completion.
- *
- * @details
- * - Supported framework : TensorFlow Lite Micro
- * - The following constrains on the arguments apply
- * -# input_dims->n equals 1
- * -# ouput_dims->w is a multiple of 4
- * -# Explicit constraints(since it is for 1xN convolution)
- * -## input_dims->h equals 1
- * -## output_dims->h equals 1
- * -## filter_dims->h equals 1
- *@todo Remove constraint on output_dims->w to make the function generic.
- *
- */
- riscv_nmsis_nn_status riscv_convolve_1_x_n_s8(const nmsis_nn_context *ctx,
- const nmsis_nn_conv_params *conv_params,
- const nmsis_nn_per_channel_quant_params *quant_params,
- const nmsis_nn_dims *input_dims,
- const int8_t *input_data,
- const nmsis_nn_dims *filter_dims,
- const int8_t *filter_data,
- const nmsis_nn_dims *bias_dims,
- const int32_t *bias_data,
- const nmsis_nn_dims *output_dims,
- int8_t *output_data);
- /**
- * @brief Q7 version of convolution for RGB image
- * @param[in] Im_in pointer to input tensor
- * @param[in] dim_im_in input tensor dimension
- * @param[in] ch_im_in number of input tensor channels
- * @param[in] wt pointer to kernel weights
- * @param[in] ch_im_out number of filters, i.e., output tensor channels
- * @param[in] dim_kernel filter kernel size
- * @param[in] padding padding sizes
- * @param[in] stride convolution stride
- * @param[in] bias pointer to bias
- * @param[in] bias_shift amount of left-shift for bias
- * @param[in] out_shift amount of right-shift for output
- * @param[in,out] Im_out pointer to output tensor
- * @param[in] dim_im_out output tensor dimension
- * @param[in,out] bufferA pointer to buffer space for input
- * @param[in,out] bufferB pointer to buffer space for output
- * @return The function returns either
- * <code>RISCV_NMSIS_NN_SIZE_MISMATCH</code> or <code>RISCV_NMSIS_NN_SUCCESS</code> based on the outcome of size checking.
- *
- * This kernel is written exclusively for convolution with ch_im_in
- * equals 3. This applies on the first layer of CNNs which has input
- * image with RGB format.
- */
- riscv_nmsis_nn_status riscv_convolve_HWC_q7_RGB(const q7_t *Im_in,
- const uint16_t dim_im_in,
- const uint16_t ch_im_in,
- const q7_t *wt,
- const uint16_t ch_im_out,
- const uint16_t dim_kernel,
- const uint16_t padding,
- const uint16_t stride,
- const q7_t *bias,
- const uint16_t bias_shift,
- const uint16_t out_shift,
- q7_t *Im_out,
- const uint16_t dim_im_out,
- q15_t *bufferA,
- q7_t *bufferB);
- /**
- * @brief Fast Q15 convolution function
- * @param[in] Im_in pointer to input tensor
- * @param[in] dim_im_in input tensor dimension
- * @param[in] ch_im_in number of input tensor channels
- * @param[in] wt pointer to kernel weights
- * @param[in] ch_im_out number of filters, i.e., output tensor channels
- * @param[in] dim_kernel filter kernel size
- * @param[in] padding padding sizes
- * @param[in] stride convolution stride
- * @param[in] bias pointer to bias
- * @param[in] bias_shift amount of left-shift for bias
- * @param[in] out_shift amount of right-shift for output
- * @param[in,out] Im_out pointer to output tensor
- * @param[in] dim_im_out output tensor dimension
- * @param[in,out] bufferA pointer to buffer space for input
- * @param[in,out] bufferB pointer to buffer space for output
- * @return The function returns either
- * <code>RISCV_NMSIS_NN_SIZE_MISMATCH</code> or <code>RISCV_NMSIS_NN_SUCCESS</code> based on the outcome of size checking.
- *
- * This function is the version with full list of optimization tricks, but with
- * some contraints:
- * ch_im_in is multiple of 2
- * ch_im_out is multiple of 2
- * dim_im_out is a multiple of 2
- */
- riscv_nmsis_nn_status riscv_convolve_HWC_q15_fast(const q15_t *Im_in,
- const uint16_t dim_im_in,
- const uint16_t ch_im_in,
- const q15_t *wt,
- const uint16_t ch_im_out,
- const uint16_t dim_kernel,
- const uint16_t padding,
- const uint16_t stride,
- const q15_t *bias,
- const uint16_t bias_shift,
- const uint16_t out_shift,
- q15_t *Im_out,
- const uint16_t dim_im_out,
- q15_t *bufferA,
- q7_t *bufferB);
- /**
- * @brief Fast Q15 convolution function (non-sqaure shape)
- * @param[in] Im_in pointer to input tensor
- * @param[in] dim_im_in_x input tensor dimension x
- * @param[in] dim_im_in_y input tensor dimension y
- * @param[in] ch_im_in number of input tensor channels
- * @param[in] wt pointer to kernel weights
- * @param[in] ch_im_out number of filters, i.e., output tensor channels
- * @param[in] dim_kernel_x filter kernel size x
- * @param[in] dim_kernel_y filter kernel size y
- * @param[in] padding_x padding size x
- * @param[in] padding_y padding size y
- * @param[in] stride_x convolution stride x
- * @param[in] stride_y convolution stride y
- * @param[in] bias pointer to bias
- * @param[in] bias_shift amount of left-shift for bias
- * @param[in] out_shift amount of right-shift for output
- * @param[in,out] Im_out pointer to output tensor
- * @param[in] dim_im_out_x output tensor dimension x
- * @param[in] dim_im_out_y output tensor dimension y
- * @param[in,out] bufferA pointer to buffer space for input
- * @param[in,out] bufferB pointer to buffer space for output
- * @return The function returns either
- * <code>RISCV_NMSIS_NN_SIZE_MISMATCH</code> or <code>RISCV_NMSIS_NN_SUCCESS</code> based on the outcome of size checking.
- *
- * @details
- *
- * <b>Buffer size:</b>
- *
- * bufferA size: 2*ch_im_in*dim_kernel*dim_kernel
- *
- * bufferB size: 0
- *
- * <b>Input dimension constraints:</b>
- *
- * ch_im_in is multiple of 2
- *
- * ch_im_out is multipe of 2
- *
- */
- riscv_nmsis_nn_status riscv_convolve_HWC_q15_fast_nonsquare(const q15_t *Im_in,
- const uint16_t dim_im_in_x,
- const uint16_t dim_im_in_y,
- const uint16_t ch_im_in,
- const q15_t *wt,
- const uint16_t ch_im_out,
- const uint16_t dim_kernel_x,
- const uint16_t dim_kernel_y,
- const uint16_t padding_x,
- const uint16_t padding_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const q15_t *bias,
- const uint16_t bias_shift,
- const uint16_t out_shift,
- q15_t *Im_out,
- const uint16_t dim_im_out_x,
- const uint16_t dim_im_out_y,
- q15_t *bufferA,
- q7_t *bufferB);
- /**
- * @brief Q7 depthwise separable convolution function
- * @param[in] Im_in pointer to input tensor
- * @param[in] dim_im_in input tensor dimension
- * @param[in] ch_im_in number of input tensor channels
- * @param[in] wt pointer to kernel weights
- * @param[in] ch_im_out number of filters, i.e., output tensor channels
- * @param[in] dim_kernel filter kernel size
- * @param[in] padding padding sizes
- * @param[in] stride convolution stride
- * @param[in] bias pointer to bias
- * @param[in] bias_shift amount of left-shift for bias
- * @param[in] out_shift amount of right-shift for output
- * @param[in,out] Im_out pointer to output tensor
- * @param[in] dim_im_out output tensor dimension
- * @param[in,out] bufferA pointer to buffer space for input
- * @param[in,out] bufferB pointer to buffer space for output
- * @return The function returns either
- * <code>RISCV_NMSIS_NN_SIZE_MISMATCH</code> or <code>RISCV_NMSIS_NN_SUCCESS</code> based on the outcome of size checking.
- *
- * This function is the version with full list of optimization tricks, but with
- * some contraints:
- * ch_im_in is multiple of 2
- * ch_im_out is multiple of 2
- */
- riscv_nmsis_nn_status riscv_depthwise_separable_conv_HWC_q7(const q7_t *Im_in,
- const uint16_t dim_im_in,
- const uint16_t ch_im_in,
- const q7_t *wt,
- const uint16_t ch_im_out,
- const uint16_t dim_kernel,
- const uint16_t padding,
- const uint16_t stride,
- const q7_t *bias,
- const uint16_t bias_shift,
- const uint16_t out_shift,
- q7_t *Im_out,
- const uint16_t dim_im_out,
- q15_t *bufferA,
- q7_t *bufferB);
- /**
- * @brief Q7 depthwise separable convolution function (non-square shape)
- * @param[in] Im_in pointer to input tensor
- * @param[in] dim_im_in_x input tensor dimension x
- * @param[in] dim_im_in_y input tensor dimension y
- * @param[in] ch_im_in number of input tensor channels
- * @param[in] wt pointer to kernel weights
- * @param[in] ch_im_out number of filters, i.e., output tensor channels
- * @param[in] dim_kernel_x filter kernel size x
- * @param[in] dim_kernel_y filter kernel size y
- * @param[in] padding_x padding sizes x
- * @param[in] padding_y padding sizes y
- * @param[in] stride_x convolution stride x
- * @param[in] stride_y convolution stride y
- * @param[in] bias pointer to bias
- * @param[in] bias_shift amount of left-shift for bias
- * @param[in] out_shift amount of right-shift for output
- * @param[in,out] Im_out pointer to output tensor
- * @param[in] dim_im_out_x output tensor dimension x
- * @param[in] dim_im_out_y output tensor dimension y
- * @param[in,out] bufferA pointer to buffer space for input
- * @param[in,out] bufferB pointer to buffer space for output
- * @return The function returns either
- * <code>RISCV_NMSIS_NN_SIZE_MISMATCH</code> or <code>RISCV_NMSIS_NN_SUCCESS</code> based on the outcome of size checking.
- *
- * This function is the version with full list of optimization tricks, but with
- * some contraints:
- * ch_im_in is multiple of 2
- * ch_im_out is multiple of 2
- */
- riscv_nmsis_nn_status riscv_depthwise_separable_conv_HWC_q7_nonsquare(const q7_t *Im_in,
- const uint16_t dim_im_in_x,
- const uint16_t dim_im_in_y,
- const uint16_t ch_im_in,
- const q7_t *wt,
- const uint16_t ch_im_out,
- const uint16_t dim_kernel_x,
- const uint16_t dim_kernel_y,
- const uint16_t padding_x,
- const uint16_t padding_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const q7_t *bias,
- const uint16_t bias_shift,
- const uint16_t out_shift,
- q7_t *Im_out,
- const uint16_t dim_im_out_x,
- const uint16_t dim_im_out_y,
- q15_t *bufferA,
- q7_t *bufferB);
- /**
- * @brief 1xn convolution for s4 weights
- *
- * @param[in, out] ctx Function context that contains the additional buffer if required by the function.
- * riscv_convolve_1_x_n_s4_get_buffer_size will return the buffer_size if required
- * The caller is expected to clear the buffer, if applicable, for security reasons.
- * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...).
- * Range of conv_params->input_offset : [-127, 128]
- * Range of conv_params->output_offset : [-128, 127]
- * @param[in] quant_params Per-channel quantization info.
- * It contains the multiplier and shift values to be applied to each output channel
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
- * @param[in] input_data Input (activation) data pointer. Data type: int8
- * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, 1, WK, C_IN] where WK is the horizontal
- * spatial filter dimension
- * @param[in] filter_data Filter data pointer. Data type: int8 as packed int4
- * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
- * @param[in] bias_data Optional bias data pointer. Data type: int32
- * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
- * @param[out] output_data Output data pointer. Data type: int8
- *
- * @return The function returns either
- * <code>RISCV_NMSIS_NN_ARG_ERROR</code> if argument constraints fail. or,
- * <code>RISCV_NMSIS_NN_SUCCESS</code> on successful completion.
- *
- * @details
- * - Supported framework : TensorFlow Lite Micro
- * - The following constrains on the arguments apply
- * -# stride.w * input_dims->c is a multiple of 4
- * -# Explicit constraints(since it is for 1xN convolution)
- * -## input_dims->h equals 1
- * -## output_dims->h equals 1
- * -## filter_dims->h equals 1
- *@todo Remove constraint on output_dims->w to make the function generic.
- *
- */
- riscv_nmsis_nn_status riscv_convolve_1_x_n_s4(const nmsis_nn_context *ctx,
- const nmsis_nn_conv_params *conv_params,
- const nmsis_nn_per_channel_quant_params *quant_params,
- const nmsis_nn_dims *input_dims,
- const int8_t *input_data,
- const nmsis_nn_dims *filter_dims,
- const int8_t *filter_data,
- const nmsis_nn_dims *bias_dims,
- const int32_t *bias_data,
- const nmsis_nn_dims *output_dims,
- int8_t *output_data);
- /**
- * @brief Get the required additional buffer size for 1xn convolution
- *
- * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...).
- * Range of conv_params->input_offset : [-127, 128]
- * Range of conv_params->output_offset : [-128, 127]
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
- * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, 1, WK, C_IN] where WK is the
- * horizontal spatial filter dimension
- * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
- *
- * @return The function returns required buffer size(bytes)
- *
- */
- int32_t riscv_convolve_1_x_n_s8_get_buffer_size(const nmsis_nn_conv_params *conv_params,
- const nmsis_nn_dims *input_dims,
- const nmsis_nn_dims *filter_dims,
- const nmsis_nn_dims *output_dims);
- /**
- * @brief Get the required additional buffer size for 1xn convolution
- *
- * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...).
- * Range of conv_params->input_offset : [-127, 128]
- * Range of conv_params->output_offset : [-128, 127]
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
- * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, 1, WK, C_IN] where WK is the
- * horizontal spatial filter dimension
- * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
- *
- * @return The function returns required buffer size(bytes)
- *
- */
- int32_t riscv_convolve_1_x_n_s4_get_buffer_size(const nmsis_nn_conv_params *conv_params,
- const nmsis_nn_dims *input_dims,
- const nmsis_nn_dims *filter_dims,
- const nmsis_nn_dims *output_dims);
- /**
- * @brief Wrapper function to pick the right optimized s8 depthwise convolution function
- *
- * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function
- * definition file to see if an additional buffer is required.
- * Optional function {API}_get_buffer_size() provides the buffer
- * size if required.
- * The caller is expected to clear the buffer, if applicable, for security reasons.
- * @param[in] dw_conv_params Depthwise convolution parameters (e.g. strides, dilations, pads,...)
- * dw_conv_params->dilation is not used.
- * Range of dw_conv_params->input_offset : [-127, 128]
- * Range of dw_conv_params->output_offset : [-128, 127]
- * @param[in] quant_params Per-channel quantization info.
- * It contains the multiplier and shift values to be applied to each
- * output channel
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN]
- * Batch argument N is not used and assumed to be 1.
- * @param[in] input_data Input (activation) data pointer. Data type: int8
- * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT]
- * @param[in] filter_data Filter data pointer. Data type: int8
- * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
- * @param[in] bias_data Bias data pointer. Data type: int32
- * @param[in] output_dims Output tensor dimensions. Format: [1, H, W, C_OUT]
- * @param[in, out] output_data Output data pointer. Data type: int8
- * @return The function returns
- * <code>RISCV_NMSIS_NN_SUCCESS</code> - Successful completion.
- *
- * @details
- * - Supported framework: TensorFlow Lite
- * - Picks one of the the following functions
- * -# riscv_depthwise_conv_s8()
- * -# riscv_depthwise_conv_3x3_s8() - RISC-V CPUs with DSP extension only
- * -# riscv_depthwise_conv_s8_opt()
- * - Check details of riscv_depthwise_conv_s8_opt() for potential data that can be accessed outside of the
- * boundary.
- */
- riscv_nmsis_nn_status riscv_depthwise_conv_wrapper_s8(const nmsis_nn_context *ctx,
- const nmsis_nn_dw_conv_params *dw_conv_params,
- const nmsis_nn_per_channel_quant_params *quant_params,
- const nmsis_nn_dims *input_dims,
- const int8_t *input_data,
- const nmsis_nn_dims *filter_dims,
- const int8_t *filter_data,
- const nmsis_nn_dims *bias_dims,
- const int32_t *bias_data,
- const nmsis_nn_dims *output_dims,
- int8_t *output_data);
- /**
- * @brief Wrapper function to pick the right optimized s4 depthwise convolution function
- *
- * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function
- * definition file to see if an additional buffer is required.
- * Optional function {API}_get_buffer_size() provides the buffer
- * size if required.
- * The caller is expected to clear the buffer ,if applicable, for security reasons.
- * @param[in] dw_conv_params Depthwise convolution parameters (e.g. strides, dilations, pads,...)
- * dw_conv_params->dilation is not used.
- * Range of dw_conv_params->input_offset : [-127, 128]
- * Range of dw_conv_params->output_offset : [-128, 127]
- * @param[in] quant_params Per-channel quantization info.
- * It contains the multiplier and shift values to be applied to each
- * output channel
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN]
- * Batch argument N is not used and assumed to be 1.
- * @param[in] input_data Input (activation) data pointer. Data type: int8
- * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT]
- * @param[in] filter_data Filter data pointer. Data type: int8_t packed 4-bit weights, e.g four sequential
- * weights [0x1, 0x2, 0x3, 0x4] packed as [0x21, 0x43].
- * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
- * @param[in] bias_data Bias data pointer. Data type: int32
- * @param[in] output_dims Output tensor dimensions. Format: [1, H, W, C_OUT]
- * @param[in, out] output_data Output data pointer. Data type: int8
- * @return The function returns
- * <code>RISCV_NMSIS_NN_SUCCESS</code> - Successful completion.
- *
- * @details
- * - Supported framework: TensorFlow Lite
- */
- riscv_nmsis_nn_status riscv_depthwise_conv_wrapper_s4(const nmsis_nn_context *ctx,
- const nmsis_nn_dw_conv_params *dw_conv_params,
- const nmsis_nn_per_channel_quant_params *quant_params,
- const nmsis_nn_dims *input_dims,
- const int8_t *input_data,
- const nmsis_nn_dims *filter_dims,
- const int8_t *filter_data,
- const nmsis_nn_dims *bias_dims,
- const int32_t *bias_data,
- const nmsis_nn_dims *output_dims,
- int8_t *output_data);
- /**
- * @brief Get size of additional buffer required by riscv_depthwise_conv_wrapper_s8()
- *
- * @param[in] dw_conv_params Depthwise convolution parameters (e.g. strides, dilations, pads,...)
- * Range of dw_conv_params->input_offset : [-127, 128]
- * Range of dw_conv_params->input_offset : [-128, 127]
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN]
- * Batch argument N is not used and assumed to be 1.
- * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT]
- * @param[in] output_dims Output tensor dimensions. Format: [1, H, W, C_OUT]
- * @return Size of additional memory required for optimizations in bytes.
- *
- */
- int32_t riscv_depthwise_conv_wrapper_s8_get_buffer_size(const nmsis_nn_dw_conv_params *dw_conv_params,
- const nmsis_nn_dims *input_dims,
- const nmsis_nn_dims *filter_dims,
- const nmsis_nn_dims *output_dims);
- /**
- * @brief Get size of additional buffer required by riscv_depthwise_conv_wrapper_s8() for processors with DSP extension.
- * Refer to riscv_depthwise_conv_wrapper_s8_get_buffer_size() for function argument details.
- *
- * @note Intended for compilation on Host. If compiling for an Riscv target, use
- * riscv_depthwise_conv_wrapper_s8_get_buffer_size().
- *
- */
- int32_t riscv_depthwise_conv_wrapper_s8_get_buffer_size_dsp(const nmsis_nn_dw_conv_params *dw_conv_params,
- const nmsis_nn_dims *input_dims,
- const nmsis_nn_dims *filter_dims,
- const nmsis_nn_dims *output_dims);
- /**
- * @brief Get size of additional buffer required by riscv_depthwise_conv_wrapper_s4()
- *
- * @param[in] dw_conv_params Depthwise convolution parameters (e.g. strides, dilations, pads,...)
- * Range of dw_conv_params->input_offset : [-127, 128]
- * Range of dw_conv_params->input_offset : [-128, 127]
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN]
- * Batch argument N is not used and assumed to be 1.
- * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT]
- * @param[in] output_dims Output tensor dimensions. Format: [1, H, W, C_OUT]
- * @return Size of additional memory required for optimizations in bytes.
- *
- */
- int32_t riscv_depthwise_conv_wrapper_s4_get_buffer_size(const nmsis_nn_dw_conv_params *dw_conv_params,
- const nmsis_nn_dims *input_dims,
- const nmsis_nn_dims *filter_dims,
- const nmsis_nn_dims *output_dims);
- /**
- * @brief Get size of additional buffer required by riscv_depthwise_conv_wrapper_s4() for processors with DSP extension.
- * Refer to riscv_depthwise_conv_wrapper_s4_get_buffer_size() for function argument details.
- *
- * @note Intended for compilation on Host. If compiling for an Riscv target, use
- * riscv_depthwise_conv_wrapper_s4_get_buffer_size().
- *
- */
- int32_t riscv_depthwise_conv_wrapper_s4_get_buffer_size_dsp(const nmsis_nn_dw_conv_params *dw_conv_params,
- const nmsis_nn_dims *input_dims,
- const nmsis_nn_dims *filter_dims,
- const nmsis_nn_dims *output_dims);
- /**
- * @brief Basic s8 depthwise convolution function that doesn't have any constraints on the input dimensions.
- *
- * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function
- * definition file to see if an additional buffer is required.
- * Optional function {API}_get_buffer_size() provides the buffer
- * size if an additional buffer is required exists if additional memory is.
- * The caller is expected to clear the buffer, if applicable, for security reasons.
- * @param[in] dw_conv_params Depthwise convolution parameters (e.g. strides, dilations, pads,...)
- * dw_conv_params->dilation is not used.
- * Range of dw_conv_params->input_offset : [-127, 128]
- * Range of dw_conv_params->input_offset : [-128, 127]
- * @param[in] quant_params Per-channel quantization info.
- * It contains the multiplier and shift values to be applied to each
- * output channel
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
- * Batch argument N is not used.
- * @param[in] input_data Input (activation) data pointer. Data type: int8
- * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT]
- * @param[in] filter_data Filter data pointer. Data type: int8
- * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
- * @param[in] bias_data Bias data pointer. Data type: int32
- * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
- * @param[in, out] output_data Output data pointer. Data type: int8
- * @return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code>
- *
- * @details
- * - Supported framework: TensorFlow Lite
- */
- riscv_nmsis_nn_status riscv_depthwise_conv_s8(const nmsis_nn_context *ctx,
- const nmsis_nn_dw_conv_params *dw_conv_params,
- const nmsis_nn_per_channel_quant_params *quant_params,
- const nmsis_nn_dims *input_dims,
- const int8_t *input_data,
- const nmsis_nn_dims *filter_dims,
- const int8_t *filter_data,
- const nmsis_nn_dims *bias_dims,
- const int32_t *bias_data,
- const nmsis_nn_dims *output_dims,
- int8_t *output_data);
- /**
- * @brief Basic s4 depthwise convolution function that doesn't have any constraints on the input dimensions.
- *
- * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function
- * definition file to see if an additional buffer is required.
- * Optional function {API}_get_buffer_size() provides the buffer
- * size if an additional buffer is required exists if additional memory is.
- * The caller is expected to clear the buffer ,if applicable, for security reasons.
- * @param[in] dw_conv_params Depthwise convolution parameters (e.g. strides, dilations, pads,...)
- * dw_conv_params->dilation is not used.
- * Range of dw_conv_params->input_offset : [-127, 128]
- * Range of dw_conv_params->input_offset : [-128, 127]
- * @param[in] quant_params Per-channel quantization info.
- * It contains the multiplier and shift values to be applied to each
- * output channel
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
- * Batch argument N is not used.
- * @param[in] input Input (activation) data pointer. Data type: int8
- * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT]
- * @param[in] kernel Filter data pointer. Data type: int8_t packed 4-bit weights, e.g four sequential
- * weights [0x1, 0x2, 0x3, 0x4] packed as [0x21, 0x43].
- * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
- * @param[in] bias Bias data pointer. Data type: int32
- * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
- * @param[in, out] output Output data pointer. Data type: int8
- * @return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code>
- *
- * @details
- * - Supported framework: TensorFlow Lite
- */
- riscv_nmsis_nn_status riscv_depthwise_conv_s4(const nmsis_nn_context *ctx,
- const nmsis_nn_dw_conv_params *dw_conv_params,
- const nmsis_nn_per_channel_quant_params *quant_params,
- const nmsis_nn_dims *input_dims,
- const int8_t *input,
- const nmsis_nn_dims *filter_dims,
- const int8_t *kernel,
- const nmsis_nn_dims *bias_dims,
- const int32_t *bias,
- const nmsis_nn_dims *output_dims,
- int8_t *output);
- /**
- * @brief Basic s16 depthwise convolution function that doesn't have any constraints on the input dimensions.
- *
- * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function
- * definition file to see if an additional buffer is required.
- * Optional function {API}_get_buffer_size() provides the buffer
- * size if an additional buffer is required.
- * exists if additional memory is.
- * The caller is expected to clear the buffer, if applicable, for security reasons.
- * @param[in] dw_conv_params Depthwise convolution parameters (e.g. strides, dilations, pads,...)
- * conv_params->input_offset : Not used
- * conv_params->output_offset : Not used
- * @param[in] quant_params Per-channel quantization info.
- * It contains the multiplier and shift values to be applied to each
- * output channel
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
- * Batch argument N is not used.
- * @param[in] input_data Input (activation) data pointer. Data type: int8
- * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT]
- * @param[in] filter_data Filter data pointer. Data type: int8
- * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
- * @param[in] bias_data Bias data pointer. Data type: int64
- * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
- * @param[in, out] output_data Output data pointer. Data type: int16
- * @return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code>
- *
- * @details
- * - Supported framework: TensorFlow Lite
- */
- riscv_nmsis_nn_status riscv_depthwise_conv_s16(const nmsis_nn_context *ctx,
- const nmsis_nn_dw_conv_params *dw_conv_params,
- const nmsis_nn_per_channel_quant_params *quant_params,
- const nmsis_nn_dims *input_dims,
- const int16_t *input_data,
- const nmsis_nn_dims *filter_dims,
- const int8_t *filter_data,
- const nmsis_nn_dims *bias_dims,
- const int64_t *bias_data,
- const nmsis_nn_dims *output_dims,
- int16_t *output_data);
- /**
- * @brief Wrapper function to pick the right optimized s16 depthwise convolution function
- *
- * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function
- * definition file to see if an additional buffer is required.
- * Optional function {API}_get_buffer_size() provides the buffer
- * size if required.
- * The caller is expected to clear the buffer, if applicable, for security reasons.
- * @param[in] dw_conv_params Depthwise convolution parameters (e.g. strides, dilations, pads,...)
- * dw_conv_params->dilation is not used.
- * Range of dw_conv_params->input_offset : Not used
- * Range of dw_conv_params->output_offset : Not used
- * @param[in] quant_params Per-channel quantization info.
- * It contains the multiplier and shift values to be applied to each
- * output channel
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN]
- * Batch argument N is not used and assumed to be 1.
- * @param[in] input_data Input (activation) data pointer. Data type: int16
- * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT]
- * @param[in] filter_data Filter data pointer. Data type: int8
- * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
- * @param[in] bias_data Bias data pointer. Data type: int64
- * @param[in] output_dims Output tensor dimensions. Format: [1, H, W, C_OUT]
- * @param[in, out] output_data Output data pointer. Data type: int16
- * @return The function returns
- * <code>RISCV_NMSIS_NN_SUCCESS</code> - Successful completion.
- *
- * @details
- * - Supported framework: TensorFlow Lite
- * - Picks one of the the following functions
- * -# riscv_depthwise_conv_s16()
- * -# riscv_depthwise_conv_fast_s16() - RISC-V CPUs with DSP extension only
- */
- riscv_nmsis_nn_status riscv_depthwise_conv_wrapper_s16(const nmsis_nn_context *ctx,
- const nmsis_nn_dw_conv_params *dw_conv_params,
- const nmsis_nn_per_channel_quant_params *quant_params,
- const nmsis_nn_dims *input_dims,
- const int16_t *input_data,
- const nmsis_nn_dims *filter_dims,
- const int8_t *filter_data,
- const nmsis_nn_dims *bias_dims,
- const int64_t *bias_data,
- const nmsis_nn_dims *output_dims,
- int16_t *output_data);
- /**
- * @brief Get size of additional buffer required by riscv_depthwise_conv_wrapper_s16()
- *
- * @param[in] dw_conv_params Depthwise convolution parameters (e.g. strides, dilations, pads,...)
- * Range of dw_conv_params->input_offset : Not used
- * Range of dw_conv_params->input_offset : Not used
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN]
- * Batch argument N is not used and assumed to be 1.
- * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT]
- * @param[in] output_dims Output tensor dimensions. Format: [1, H, W, C_OUT]
- * @return Size of additional memory required for optimizations in bytes.
- *
- */
- int32_t riscv_depthwise_conv_wrapper_s16_get_buffer_size(const nmsis_nn_dw_conv_params *dw_conv_params,
- const nmsis_nn_dims *input_dims,
- const nmsis_nn_dims *filter_dims,
- const nmsis_nn_dims *output_dims);
- /**
- * @brief Get size of additional buffer required by riscv_depthwise_conv_wrapper_s16() for processors with DSP extension.
- * Refer to riscv_depthwise_conv_wrapper_s16_get_buffer_size() for function argument details.
- *
- * @note Intended for compilation on Host. If compiling for an Riscv target, use
- * riscv_depthwise_conv_wrapper_s16_get_buffer_size().
- *
- */
- int32_t riscv_depthwise_conv_wrapper_s16_get_buffer_size_dsp(const nmsis_nn_dw_conv_params *dw_conv_params,
- const nmsis_nn_dims *input_dims,
- const nmsis_nn_dims *filter_dims,
- const nmsis_nn_dims *output_dims);
- /**
- * @brief Optimized s16 depthwise convolution function with constraint that in_channel equals out_channel.
- * Refer riscv_depthwise_conv_s16() for function argument details.
- *
- * @return The function returns one of the following
- * <code>RISCV_NMSIS_NN_ARG_ERROR</code> - ctx-buff == NULL and
- * riscv_depthwise_conv_fast_s16_get_buffer_size() > 0 or
- * input channel != output channel or
- * ch_mult != 1
- *
- * <code>RISCV_NMSIS_NN_SUCCESS</code> - Successful operation
- *
- * @details
- * - Supported framework: TensorFlow Lite
- * - The following constrains on the arguments apply
- * -# Number of input channel equals number of output channels or ch_mult equals 1
- * - Reccomended when number of channels is 4 or greater.
- *
- */
- riscv_nmsis_nn_status riscv_depthwise_conv_fast_s16(const nmsis_nn_context *ctx,
- const nmsis_nn_dw_conv_params *dw_conv_params,
- const nmsis_nn_per_channel_quant_params *quant_params,
- const nmsis_nn_dims *input_dims,
- const int16_t *input_data,
- const nmsis_nn_dims *filter_dims,
- const int8_t *filter_data,
- const nmsis_nn_dims *bias_dims,
- const int64_t *bias_data,
- const nmsis_nn_dims *output_dims,
- int16_t *output_data);
- /**
- * @brief Get the required buffer size for optimized s16 depthwise convolution
- * function with constraint that in_channel equals out_channel.
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [1, H, W, C_IN]
- * Batch argument N is not used.
- * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT]
- * @return The function returns required buffer size in bytes
- *
- */
- int32_t riscv_depthwise_conv_fast_s16_get_buffer_size(const nmsis_nn_dims *input_dims, const nmsis_nn_dims *filter_dims);
- /**
- * @brief Optimized s8 depthwise convolution function for 3x3 kernel size with some constraints on
- * the input arguments(documented below). Refer riscv_depthwise_conv_s8() for function
- * argument details.
- *
- * @return The function returns one of the following
- * <code>RISCV_NMSIS_NN_ARG_ERROR</code> - Unsupported dimension of tensors
- * - Unsupported pad size along the x axis
- * <code>RISCV_NMSIS_NN_SUCCESS</code> - Successful operation
- *
- * @details
- * - Supported framework : TensorFlow Lite Micro
- * - The following constrains on the arguments apply
- * -# Number of input channel equals number of output channels
- * -# Filter height and width equals 3
- * -# Padding along x is either 0 or 1.
- *
- */
- riscv_nmsis_nn_status riscv_depthwise_conv_3x3_s8(const nmsis_nn_context *ctx,
- const nmsis_nn_dw_conv_params *dw_conv_params,
- const nmsis_nn_per_channel_quant_params *quant_params,
- const nmsis_nn_dims *input_dims,
- const int8_t *input_data,
- const nmsis_nn_dims *filter_dims,
- const int8_t *filter_data,
- const nmsis_nn_dims *bias_dims,
- const int32_t *bias_data,
- const nmsis_nn_dims *output_dims,
- int8_t *output_data);
- /**
- * @brief Optimized s8 depthwise convolution function with constraint that in_channel equals out_channel.
- * Refer riscv_depthwise_conv_s8() for function argument details.
- *
- * @return The function returns one of the following
- * <code>RISCV_NMSIS_NN_ARG_ERROR</code> - input channel != output channel or
- * ch_mult != 1
- * <code>RISCV_NMSIS_NN_SUCCESS</code> - Successful operation
- *
- * @details
- * - Supported framework: TensorFlow Lite
- * - The following constrains on the arguments apply
- * -# Number of input channel equals number of output channels or ch_mult equals 1
- * - Reccomended when number of channels is 4 or greater.
- *
- */
- riscv_nmsis_nn_status riscv_depthwise_conv_s8_opt(const nmsis_nn_context *ctx,
- const nmsis_nn_dw_conv_params *dw_conv_params,
- const nmsis_nn_per_channel_quant_params *quant_params,
- const nmsis_nn_dims *input_dims,
- const int8_t *input_data,
- const nmsis_nn_dims *filter_dims,
- const int8_t *filter_data,
- const nmsis_nn_dims *bias_dims,
- const int32_t *bias_data,
- const nmsis_nn_dims *output_dims,
- int8_t *output_data);
- /**
- * @brief Optimized s4 depthwise convolution function with constraint that in_channel equals out_channel.
- * Refer riscv_depthwise_conv_s4() for function argument details.
- *
- * @return The function returns one of the following
- * <code>RISCV_NMSIS_NN_ARG_ERROR</code> - input channel != output channel or
- * ch_mult != 1
- * <code>RISCV_NMSIS_NN_SUCCESS</code> - Successful operation
- *
- * @details
- * - Supported framework: TensorFlow Lite
- * - The following constrains on the arguments apply
- * -# Number of input channel equals number of output channels or ch_mult equals 1
- * - Reccomended when number of channels is 4 or greater.
- *
- */
- riscv_nmsis_nn_status riscv_depthwise_conv_s4_opt(const nmsis_nn_context *ctx,
- const nmsis_nn_dw_conv_params *dw_conv_params,
- const nmsis_nn_per_channel_quant_params *quant_params,
- const nmsis_nn_dims *input_dims,
- const int8_t *input_data,
- const nmsis_nn_dims *filter_dims,
- const int8_t *filter_data,
- const nmsis_nn_dims *bias_dims,
- const int32_t *bias_data,
- const nmsis_nn_dims *output_dims,
- int8_t *output_data);
- /**
- * @brief Get the required buffer size for optimized s8 depthwise convolution
- * function with constraint that in_channel equals out_channel.
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [1, H, W, C_IN]
- * Batch argument N is not used.
- * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT]
- * @return The function returns required buffer size in bytes
- *
- */
- int32_t riscv_depthwise_conv_s8_opt_get_buffer_size(const nmsis_nn_dims *input_dims, const nmsis_nn_dims *filter_dims);
- /**
- * @brief Get the required buffer size for optimized s4 depthwise convolution
- * function with constraint that in_channel equals out_channel.
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [1, H, W, C_IN]
- * Batch argument N is not used.
- * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT]
- * @return The function returns required buffer size in bytes
- *
- */
- int32_t riscv_depthwise_conv_s4_opt_get_buffer_size(const nmsis_nn_dims *input_dims, const nmsis_nn_dims *filter_dims);
- /**
- * @defgroup FC Fully-connected Layer Functions
- *
- * Collection of fully-connected and matrix multiplication functions.
- *
- * Fully-connected layer is basically a matrix-vector multiplication
- * with bias. The matrix is the weights and the input/output vectors
- * are the activation values. Supported {weight, activation} precisions
- * include {8-bit, 8-bit} and {8-bit, 16-bit}
- *
- *
- */
- /**
- *@brief Q7 basic fully-connected layer function
- *@param[in] pV pointer to input vector
- *@param[in] pM pointer to matrix weights
- *@param[in] dim_vec length of the vector
- *@param[in] num_of_rows number of rows in weight matrix
- *@param[in] bias_shift amount of left-shift for bias
- *@param[in] out_shift amount of right-shift for output
- *@param[in] bias pointer to bias
- *@param[in,out] pOut pointer to output vector
- *@param[in,out] vec_buffer pointer to buffer space for input
- *@return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code>
- *
- */
- riscv_nmsis_nn_status riscv_fully_connected_q7(const q7_t *pV,
- const q7_t *pM,
- const uint16_t dim_vec,
- const uint16_t num_of_rows,
- const uint16_t bias_shift,
- const uint16_t out_shift,
- const q7_t *bias,
- q7_t *pOut,
- q15_t *vec_buffer);
- /**
- * @brief Basic s4 Fully Connected function.
- *
- * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function
- * definition file to see if an additional buffer is required.
- * Optional function {API}_get_buffer_size() provides the buffer
- * size if an additional buffer is required.
- * The caller is expected to clear the buffer ,if applicable, for security reasons.
- * @param[in] fc_params Fully Connected layer parameters.
- * Range of fc_params->input_offset : [-127, 128]
- * fc_params->filter_offset : 0
- * Range of fc_params->output_offset : [-128, 127]
- * @param[in] quant_params Per-tensor quantization info.
- * It contains the multiplier and shift value to be applied to the output tensor.
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
- * Input dimension is taken as Nx(H * W * C_IN)
- * @param[in] input_data Input (activation) data pointer. Data type: int8
- * @param[in] filter_dims Two dimensional filter dimensions. Format: [N, C]
- * N : accumulation depth and equals (H * W * C_IN) from input_dims
- * C : output depth and equals C_OUT in output_dims
- * H & W : Not used
- * @param[in] filter_data Filter data pointer. Data type: int8_t packed 4-bit weights, e.g four sequential
- * weights [0x1, 0x2, 0x3, 0x4] packed as [0x21, 0x43].
- * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
- * N, H, W : Not used
- * @param[in] bias_data Bias data pointer. Data type: int32
- * @param[in] output_dims Output tensor dimensions. Format: [N, C_OUT]
- * N : Batches
- * C_OUT : Output depth
- * H & W : Not used.
- * @param[in, out] output_data Output data pointer. Data type: int8
- * @return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code>
- *
- * @details
- * - Supported framework: TensorFlow Lite
- */
- riscv_nmsis_nn_status riscv_fully_connected_s4(const nmsis_nn_context *ctx,
- const nmsis_nn_fc_params *fc_params,
- const nmsis_nn_per_tensor_quant_params *quant_params,
- const nmsis_nn_dims *input_dims,
- const int8_t *input_data,
- const nmsis_nn_dims *filter_dims,
- const int8_t *filter_data,
- const nmsis_nn_dims *bias_dims,
- const int32_t *bias_data,
- const nmsis_nn_dims *output_dims,
- int8_t *output_data);
- /**
- * @brief Basic s8 Fully Connected function.
- *
- * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function
- * definition file to see if an additional buffer is required.
- * Optional function {API}_get_buffer_size() provides the buffer
- * size if an additional buffer is required.
- * The caller is expected to clear the buffer, if applicable, for security reasons.
- * @param[in] fc_params Fully Connected layer parameters.
- * Range of fc_params->input_offset : [-127, 128]
- * fc_params->filter_offset : 0
- * Range of fc_params->output_offset : [-128, 127]
- * @param[in] quant_params Per-tensor quantization info.
- * It contains the multiplier and shift value to be applied to the output tensor.
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
- * Input dimension is taken as Nx(H * W * C_IN)
- * @param[in] input_data Input (activation) data pointer. Data type: int8
- * @param[in] filter_dims Two dimensional filter dimensions. Format: [N, C]
- * N : accumulation depth and equals (H * W * C_IN) from input_dims
- * C : output depth and equals C_OUT in output_dims
- * H & W : Not used
- * @param[in] filter_data Filter data pointer. Data type: int8
- * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
- * N, H, W : Not used
- * @param[in] bias_data Bias data pointer. Data type: int32
- * @param[in] output_dims Output tensor dimensions. Format: [N, C_OUT]
- * N : Batches
- * C_OUT : Output depth
- * H & W : Not used.
- * @param[in, out] output_data Output data pointer. Data type: int8
- *
- * @return The function returns either
- * <code>RISCV_NMSIS_NN_ARG_ERROR</code> if argument constraints fail. or,
- * <code>RISCV_NMSIS_NN_SUCCESS</code> on successful completion.
- *
- * @details
- * - Supported framework: TensorFlow Lite
- */
- riscv_nmsis_nn_status riscv_fully_connected_s8(const nmsis_nn_context *ctx,
- const nmsis_nn_fc_params *fc_params,
- const nmsis_nn_per_tensor_quant_params *quant_params,
- const nmsis_nn_dims *input_dims,
- const int8_t *input_data,
- const nmsis_nn_dims *filter_dims,
- const int8_t *filter_data,
- const nmsis_nn_dims *bias_dims,
- const int32_t *bias_data,
- const nmsis_nn_dims *output_dims,
- int8_t *output_data);
- /**
- * @brief Basic s8 Fully Connected function using per channel quantization.
- *
- * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function
- * definition file to see if an additional buffer is required.
- * Optional function {API}_get_buffer_size() provides the buffer
- * size if an additional buffer is required.
- * The caller is expected to clear the buffer, if applicable, for security reasons.
- * @param[in] fc_params Fully Connected layer parameters.
- * Range of fc_params->input_offset : [-127, 128]
- * fc_params->filter_offset : 0
- * Range of fc_params->output_offset : [-128, 127]
- * @param[in] quant_params Per-channel quantization info.
- * It contains the multiplier and shift values to be applied to each output channel
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
- * Input dimension is taken as Nx(H * W * C_IN)
- * @param[in] input_data Input (activation) data pointer. Data type: int8
- * @param[in] filter_dims Two dimensional filter dimensions. Format: [N, C]
- * N : accumulation depth and equals (H * W * C_IN) from input_dims
- * C : output depth and equals C_OUT in output_dims
- * H & W : Not used
- * @param[in] filter_data Filter data pointer. Data type: int8
- * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
- * N, H, W : Not used
- * @param[in] bias_data Bias data pointer. Data type: int32
- * @param[in] output_dims Output tensor dimensions. Format: [N, C_OUT]
- * N : Batches
- * C_OUT : Output depth
- * H & W : Not used.
- * @param[in, out] output_data Output data pointer. Data type: int8
- *
- * @return The function returns either
- * <code>RISCV_NMSIS_NN_ARG_ERROR</code> if argument constraints fail. or,
- * <code>RISCV_NMSIS_NN_SUCCESS</code> on successful completion.
- *
- * @details
- * - Supported framework: TensorFlow Lite
- */
- riscv_nmsis_nn_status riscv_fully_connected_per_channel_s8(const nmsis_nn_context *ctx,
- const nmsis_nn_fc_params *fc_params,
- const nmsis_nn_per_channel_quant_params *quant_params,
- const nmsis_nn_dims *input_dims,
- const int8_t *input_data,
- const nmsis_nn_dims *filter_dims,
- const int8_t *filter_data,
- const nmsis_nn_dims *bias_dims,
- const int32_t *bias_data,
- const nmsis_nn_dims *output_dims,
- int8_t *output_data);
- /**
- * @brief s8 Fully Connected layer wrapper function
- *
- * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function
- * definition file to see if an additional buffer is required.
- * Optional function {API}_get_buffer_size() provides the buffer
- * size if an additional buffer is required.
- * The caller is expected to clear the buffer, if applicable, for security reasons.
- * @param[in] fc_params Fully Connected layer parameters.
- * Range of fc_params->input_offset : [-127, 128]
- * fc_params->filter_offset : 0
- * Range of fc_params->output_offset : [-128, 127]
- * @param[in] quant_params Per-channel or per-tensor quantization info. Check struct defintion for details.
- * It contains the multiplier and shift value(s) to be applied to each output channel
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
- * Input dimension is taken as Nx(H * W * C_IN)
- * @param[in] input_data Input (activation) data pointer. Data type: int8
- * @param[in] filter_dims Two dimensional filter dimensions. Format: [N, C]
- * N : accumulation depth and equals (H * W * C_IN) from input_dims
- * C : output depth and equals C_OUT in output_dims
- * H & W : Not used
- * @param[in] filter_data Filter data pointer. Data type: int8
- * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
- * N, H, W : Not used
- * @param[in] bias_data Bias data pointer. Data type: int32
- * @param[in] output_dims Output tensor dimensions. Format: [N, C_OUT]
- * N : Batches
- * C_OUT : Output depth
- * H & W : Not used.
- * @param[in, out] output_data Output data pointer. Data type: int8
- *
- * @return The function returns either
- * <code>RISCV_NMSIS_NN_ARG_ERROR</code> if argument constraints fail. or,
- * <code>RISCV_NMSIS_NN_SUCCESS</code> on successful completion.
- *
- * @details
- * - Supported framework: TensorFlow Lite
- */
- riscv_nmsis_nn_status riscv_fully_connected_wrapper_s8(const nmsis_nn_context *ctx,
- const nmsis_nn_fc_params *fc_params,
- const nmsis_nn_quant_params *quant_params,
- const nmsis_nn_dims *input_dims,
- const int8_t *input_data,
- const nmsis_nn_dims *filter_dims,
- const int8_t *filter_data,
- const nmsis_nn_dims *bias_dims,
- const int32_t *bias_data,
- const nmsis_nn_dims *output_dims,
- int8_t *output_data);
- /**
- * @brief Calculate the sum of each row in vector_data, multiply by lhs_offset and optionally add s32 bias_data.
- * @param[in, out] vector_sum_buf Buffer for vector sums
- * @param[in] vector_cols Number of vector columns
- * @param[in] vector_rows Number of vector rows
- * @param[in] vector_data Vector of weigths data
- * @param[in] lhs_offset Constant multiplied with each sum
- * @param[in] rhs_offset Constant added to each vector element before sum
- * @param[in] bias_data Vector of bias data, added to each sum.
- * @return The function returns
- * <code>RISCV_NMSIS_NN_SUCCESS</code> - Successful operation
- */
- riscv_nmsis_nn_status riscv_vector_sum_s8(int32_t *vector_sum_buf,
- const int32_t vector_cols,
- const int32_t vector_rows,
- const int8_t *vector_data,
- const int32_t lhs_offset,
- const int32_t rhs_offset,
- const int32_t *bias_data);
- /**
- * @brief Calculate the sum of each row in vector_data, multiply by lhs_offset and optionally add s64 bias_data.
- * @param[in, out] vector_sum_buf Buffer for vector sums
- * @param[in] vector_cols Number of vector columns
- * @param[in] vector_rows Number of vector rows
- * @param[in] vector_data Vector of weigths data
- * @param[in] lhs_offset Constant multiplied with each sum
- * @param[in] bias_data Vector of bias data, added to each sum.
- * @return The function returns
- * <code>RISCV_NMSIS_NN_SUCCESS</code> - Successful operation
- */
- riscv_nmsis_nn_status riscv_vector_sum_s8_s64(int64_t *vector_sum_buf,
- const int32_t vector_cols,
- const int32_t vector_rows,
- const int8_t *vector_data,
- const int32_t lhs_offset,
- const int64_t *bias_data);
- /**
- * @brief Get size of additional buffer required by riscv_fully_connected_s8().
- * See also riscv_vector_sum_s8, which is required if buffer size is > 0.
- * @param[in] filter_dims dimension of filter
- * @return The function returns required buffer size in bytes
- *
- */
- int32_t riscv_fully_connected_s8_get_buffer_size(const nmsis_nn_dims *filter_dims);
- /**
- * @brief Get size of additional buffer required by riscv_fully_connected_s8() for processors with DSP extension.
- * Refer to riscv_fully_connected_s8_get_buffer_size() for function argument details.
- *
- * @note Intended for compilation on Host. If compiling for an Riscv target, use
- * riscv_fully_connected_s8_get_buffer_size().
- *
- */
- int32_t riscv_fully_connected_s8_get_buffer_size_dsp(const nmsis_nn_dims *filter_dims);
- /**
- * @brief Basic s16 Fully Connected function.
- *
- * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function
- * definition file to see if an additional buffer is required.
- * Optional function {API}_get_buffer_size() provides the buffer
- * size if an additional buffer is required.
- * The caller is expected to clear the buffer, if applicable, for security reasons.
- * @param[in] fc_params Fully Connected layer parameters.
- * fc_params->input_offset : 0
- * fc_params->filter_offset : 0
- * fc_params->output_offset : 0
- * @param[in] quant_params Per-tensor quantization info.
- * It contains the multiplier and shift value to be applied to the output tensor.
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
- * Input dimension is taken as Nx(H * W * C_IN)
- * @param[in] input_data Input (activation) data pointer. Data type: int16
- * @param[in] filter_dims Two dimensional filter dimensions. Format: [N, C]
- * N : accumulation depth and equals (H * W * C_IN) from input_dims
- * C : output depth and equals C_OUT in output_dims
- * H & W : Not used
- * @param[in] filter_data Filter data pointer. Data type: int8
- * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
- * N, H, W : Not used
- * @param[in] bias_data Bias data pointer. Data type: int64
- * @param[in] output_dims Output tensor dimensions. Format: [N, C_OUT]
- * N : Batches
- * C_OUT : Output depth
- * H & W : Not used.
- * @param[in, out] output_data Output data pointer. Data type: int16
- * @return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code>
- *
- * @details
- * - Supported framework: TensorFlow Lite
- */
- riscv_nmsis_nn_status riscv_fully_connected_s16(const nmsis_nn_context *ctx,
- const nmsis_nn_fc_params *fc_params,
- const nmsis_nn_per_tensor_quant_params *quant_params,
- const nmsis_nn_dims *input_dims,
- const int16_t *input_data,
- const nmsis_nn_dims *filter_dims,
- const int8_t *filter_data,
- const nmsis_nn_dims *bias_dims,
- const int64_t *bias_data,
- const nmsis_nn_dims *output_dims,
- int16_t *output_data);
- /**
- * @brief Get size of additional buffer required by riscv_fully_connected_s16().
- * @param[in] filter_dims dimension of filter
- * @return The function returns required buffer size in bytes
- *
- */
- int32_t riscv_fully_connected_s16_get_buffer_size(const nmsis_nn_dims *filter_dims);
- /**
- * @brief Get size of additional buffer required by riscv_fully_connected_s16() for processors with DSP extension.
- * Refer to riscv_fully_connected_s16_get_buffer_size() for function argument details.
- *
- * @note Intended for compilation on Host. If compiling for an Arm target, use
- * riscv_fully_connected_s16_get_buffer_size().
- *
- */
- int32_t riscv_fully_connected_s16_get_buffer_size_dsp(const nmsis_nn_dims *filter_dims);
- /**
- * @brief Q7 opt fully-connected layer function
- * @param[in] pV pointer to input vector
- * @param[in] pM pointer to matrix weights
- * @param[in] dim_vec length of the vector
- * @param[in] num_of_rows number of rows in weight matrix
- * @param[in] bias_shift amount of left-shift for bias
- * @param[in] out_shift amount of right-shift for output
- * @param[in] bias pointer to bias
- * @param[in,out] pOut pointer to output vector
- * @param[in,out] vec_buffer pointer to buffer space for input
- * @return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code>
- *
- */
- riscv_nmsis_nn_status riscv_fully_connected_q7_opt(const q7_t *pV,
- const q7_t *pM,
- const uint16_t dim_vec,
- const uint16_t num_of_rows,
- const uint16_t bias_shift,
- const uint16_t out_shift,
- const q7_t *bias,
- q7_t *pOut,
- q15_t *vec_buffer);
- /**
- * @brief Q15 basic fully-connected layer function
- * @param[in] pV pointer to input vector
- * @param[in] pM pointer to matrix weights
- * @param[in] dim_vec length of the vector
- * @param[in] num_of_rows number of rows in weight matrix
- * @param[in] bias_shift amount of left-shift for bias
- * @param[in] out_shift amount of right-shift for output
- * @param[in] bias pointer to bias
- * @param[in,out] pOut pointer to output vector
- * @param[in,out] vec_buffer pointer to buffer space for input
- * @return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code>
- *
- */
- riscv_nmsis_nn_status riscv_fully_connected_q15(const q15_t *pV,
- const q15_t *pM,
- const uint16_t dim_vec,
- const uint16_t num_of_rows,
- const uint16_t bias_shift,
- const uint16_t out_shift,
- const q15_t *bias,
- q15_t *pOut,
- q15_t *vec_buffer);
- /**
- * @brief Q15 opt fully-connected layer function
- * @param[in] pV pointer to input vector
- * @param[in] pM pointer to matrix weights
- * @param[in] dim_vec length of the vector
- * @param[in] num_of_rows number of rows in weight matrix
- * @param[in] bias_shift amount of left-shift for bias
- * @param[in] out_shift amount of right-shift for output
- * @param[in] bias pointer to bias
- * @param[in,out] pOut pointer to output vector
- * @param[in,out] vec_buffer pointer to buffer space for input
- * @return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code>
- *
- */
- riscv_nmsis_nn_status riscv_fully_connected_q15_opt(const q15_t *pV,
- const q15_t *pM,
- const uint16_t dim_vec,
- const uint16_t num_of_rows,
- const uint16_t bias_shift,
- const uint16_t out_shift,
- const q15_t *bias,
- q15_t *pOut,
- q15_t *vec_buffer);
- /**
- * @brief Mixed Q15-Q7 fully-connected layer function
- * @param[in] pV pointer to input vector
- * @param[in] pM pointer to matrix weights
- * @param[in] dim_vec length of the vector
- * @param[in] num_of_rows number of rows in weight matrix
- * @param[in] bias_shift amount of left-shift for bias
- * @param[in] out_shift amount of right-shift for output
- * @param[in] bias pointer to bias
- * @param[in,out] pOut pointer to output vector
- * @param[in,out] vec_buffer pointer to buffer space for input
- * @return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code>
- *
- */
- riscv_nmsis_nn_status riscv_fully_connected_mat_q7_vec_q15(const q15_t *pV,
- const q7_t *pM,
- const uint16_t dim_vec,
- const uint16_t num_of_rows,
- const uint16_t bias_shift,
- const uint16_t out_shift,
- const q7_t *bias,
- q15_t *pOut,
- q15_t *vec_buffer);
- /**
- * @brief Mixed Q15-Q7 opt fully-connected layer function
- * @param[in] pV pointer to input vector
- * @param[in] pM pointer to matrix weights
- * @param[in] dim_vec length of the vector
- * @param[in] num_of_rows number of rows in weight matrix
- * @param[in] bias_shift amount of left-shift for bias
- * @param[in] out_shift amount of right-shift for output
- * @param[in] bias pointer to bias
- * @param[in,out] pOut pointer to output vector
- * @param[in,out] vec_buffer pointer to buffer space for input
- * @return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code>
- *
- */
- riscv_nmsis_nn_status riscv_fully_connected_mat_q7_vec_q15_opt(const q15_t *pV,
- const q7_t *pM,
- const uint16_t dim_vec,
- const uint16_t num_of_rows,
- const uint16_t bias_shift,
- const uint16_t out_shift,
- const q7_t *bias,
- q15_t *pOut,
- q15_t *vec_buffer);
- /**
- * @brief Matrix-Multiplication Kernels for Convolution
- *
- * These functions are used within convolution layer functions for
- * matrix multiplication.
- *
- * The implementation is similar to NMSIS-DSP riscv_mat_mult functions
- * with one Q7 and one Q15 operands. The Q15 operand is the im2col
- * output which is always with 2 columns.
- *
- */
- /**
- * @brief Matrix-multiplication function for convolution
- * @param[in] pA pointer to operand A
- * @param[in] pInBuffer pointer to operand B, always conssists of 2 vectors
- * @param[in] ch_im_out numRow of A
- * @param[in] numCol_A numCol of A
- * @param[in] bias_shift amount of left-shift for bias
- * @param[in] out_shift amount of right-shift for output
- * @param[in] bias the bias
- * @param[in,out] pOut pointer to output
- * @return The function returns the incremented output pointer
- */
- q7_t *riscv_nn_mat_mult_kernel_q7_q15(const q7_t *pA,
- const q15_t *pInBuffer,
- const uint16_t ch_im_out,
- const uint16_t numCol_A,
- const uint16_t bias_shift,
- const uint16_t out_shift,
- const q7_t *bias,
- q7_t *pOut);
- /**
- * @brief Matrix-multiplication function for convolution
- * @param[in] pA pointer to operand A, q7 type
- * @param[in] pInBuffer pointer to operand B, q7 type
- * @param[in] ch_im_out numRow of A
- * @param[in] numCol_A numCol of A
- * @param[in] bias_shift amount of left-shift for bias
- * @param[in] out_shift amount of right-shift for output
- * @param[in] bias the bias
- * @param[in,out] pOut pointer to output
- * @return The function returns the incremented output pointer
- */
- q7_t *riscv_nn_mat_mult_kernel_q7(const q7_t * pA,
- const q7_t * pInBuffer,
- const uint16_t ch_im_out,
- const uint16_t numCol_A,
- const uint16_t bias_shift,
- const uint16_t out_shift,
- const q7_t * bias,
- q7_t * pOut);
- /*
- * Other functions
- * These layers are typically not timing critical
- * Basic implementation is supported here
- */
- /**
- * @defgroup groupElementwise Elementwise Functions
- *
- * Elementwise add and multiplication functions.
- *
- */
- /**
- * @brief s8 elementwise add of two vectors
- * @param[in] input_1_vect pointer to input vector 1
- * @param[in] input_2_vect pointer to input vector 2
- * @param[in] input_1_offset offset for input 1. Range: -127 to 128
- * @param[in] input_1_mult multiplier for input 1
- * @param[in] input_1_shift shift for input 1
- * @param[in] input_2_offset offset for input 2. Range: -127 to 128
- * @param[in] input_2_mult multiplier for input 2
- * @param[in] input_2_shift shift for input 2
- * @param[in] left_shift input left shift
- * @param[in,out] output pointer to output vector
- * @param[in] out_offset output offset. Range: -128 to 127
- * @param[in] out_mult output multiplier
- * @param[in] out_shift output shift
- * @param[in] out_activation_min minimum value to clamp output to. Min: -128
- * @param[in] out_activation_max maximum value to clamp output to. Max: 127
- * @param[in] block_size number of samples
- * @return The function returns RISCV_NMSIS_NN_SUCCESS
- */
- riscv_nmsis_nn_status riscv_elementwise_add_s8(const int8_t *input_1_vect,
- const int8_t *input_2_vect,
- const int32_t input_1_offset,
- const int32_t input_1_mult,
- const int32_t input_1_shift,
- const int32_t input_2_offset,
- const int32_t input_2_mult,
- const int32_t input_2_shift,
- const int32_t left_shift,
- int8_t *output,
- const int32_t out_offset,
- const int32_t out_mult,
- const int32_t out_shift,
- const int32_t out_activation_min,
- const int32_t out_activation_max,
- const int32_t block_size);
- /**
- * @brief s16 elementwise add of two vectors
- * @param[in] input_1_vect pointer to input vector 1
- * @param[in] input_2_vect pointer to input vector 2
- * @param[in] input_1_offset offset for input 1. Not used.
- * @param[in] input_1_mult multiplier for input 1
- * @param[in] input_1_shift shift for input 1
- * @param[in] input_2_offset offset for input 2. Not used.
- * @param[in] input_2_mult multiplier for input 2
- * @param[in] input_2_shift shift for input 2
- * @param[in] left_shift input left shift
- * @param[in,out] output pointer to output vector
- * @param[in] out_offset output offset. Not used.
- * @param[in] out_mult output multiplier
- * @param[in] out_shift output shift
- * @param[in] out_activation_min minimum value to clamp output to. Min: -32768
- * @param[in] out_activation_max maximum value to clamp output to. Max: 32767
- * @param[in] block_size number of samples
- * @return The function returns RISCV_NMSIS_NN_SUCCESS
- */
- riscv_nmsis_nn_status riscv_elementwise_add_s16(const int16_t *input_1_vect,
- const int16_t *input_2_vect,
- const int32_t input_1_offset,
- const int32_t input_1_mult,
- const int32_t input_1_shift,
- const int32_t input_2_offset,
- const int32_t input_2_mult,
- const int32_t input_2_shift,
- const int32_t left_shift,
- int16_t *output,
- const int32_t out_offset,
- const int32_t out_mult,
- const int32_t out_shift,
- const int32_t out_activation_min,
- const int32_t out_activation_max,
- const int32_t block_size);
- /**
- * @brief s8 elementwise multiplication
- * @param[in] input_1_vect pointer to input vector 1
- * @param[in] input_2_vect pointer to input vector 2
- * @param[in] input_1_offset offset for input 1. Range: -127 to 128
- * @param[in] input_2_offset offset for input 2. Range: -127 to 128
- * @param[in,out] output pointer to output vector
- * @param[in] out_offset output offset. Range: -128 to 127
- * @param[in] out_mult output multiplier
- * @param[in] out_shift output shift
- * @param[in] out_activation_min minimum value to clamp output to. Min: -128
- * @param[in] out_activation_max maximum value to clamp output to. Max: 127
- * @param[in] block_size number of samples
- * @return The function returns RISCV_NMSIS_NN_SUCCESS
- *
- * @details Supported framework: TensorFlow Lite micro
- */
- riscv_nmsis_nn_status riscv_elementwise_mul_s8(const int8_t *input_1_vect,
- const int8_t *input_2_vect,
- const int32_t input_1_offset,
- const int32_t input_2_offset,
- int8_t *output,
- const int32_t out_offset,
- const int32_t out_mult,
- const int32_t out_shift,
- const int32_t out_activation_min,
- const int32_t out_activation_max,
- const int32_t block_size);
- /**
- * @brief s16 elementwise multiplication
- * @param[in] input_1_vect pointer to input vector 1
- * @param[in] input_2_vect pointer to input vector 2
- * @param[in] input_1_offset offset for input 1. Not used.
- * @param[in] input_2_offset offset for input 2. Not used.
- * @param[in,out] output pointer to output vector
- * @param[in] out_offset output offset. Not used.
- * @param[in] out_mult output multiplier
- * @param[in] out_shift output shift
- * @param[in] out_activation_min minimum value to clamp output to. Min: -32768
- * @param[in] out_activation_max maximum value to clamp output to. Max: 32767
- * @param[in] block_size number of samples
- * @return The function returns RISCV_NMSIS_NN_SUCCESS
- *
- * @details Supported framework: TensorFlow Lite micro
- */
- riscv_nmsis_nn_status riscv_elementwise_mul_s16(const int16_t *input_1_vect,
- const int16_t *input_2_vect,
- const int32_t input_1_offset,
- const int32_t input_2_offset,
- int16_t *output,
- const int32_t out_offset,
- const int32_t out_mult,
- const int32_t out_shift,
- const int32_t out_activation_min,
- const int32_t out_activation_max,
- const int32_t block_size);
- /**
- * @defgroup Acti Activation Functions
- *
- * Perform activation layers, including ReLU (Rectified Linear Unit),
- * sigmoid and tanh
- *
- */
- /**
- * @brief Q7 RELU function
- * @param[in,out] data pointer to input
- * @param[in] size number of elements
- */
- void riscv_relu_q7(int8_t *data, uint16_t size);
- /**
- * @brief s8 ReLU6 function
- * @param[in,out] data pointer to input
- * @param[in] size number of elements
- */
- void riscv_relu6_s8(int8_t *data, uint16_t size);
- /**
- * @brief Q15 RELU function
- * @param[in,out] data pointer to input
- * @param[in] size number of elements
- */
- void riscv_relu_q15(int16_t *data, uint16_t size);
- /**
- * @brief Q7 neural network activation function using direct table look-up
- * @param[in,out] data pointer to input
- * @param[in] size number of elements
- * @param[in] int_width bit-width of the integer part, assume to be smaller than 3
- * @param[in] type type of activation functions
- * @return none.
- */
- void riscv_nn_activations_direct_q7(q7_t *data, uint16_t size, uint16_t int_width, riscv_nn_activation_type type);
- /**
- * @brief Q15 neural network activation function using direct table look-up
- * @param[in,out] data pointer to input
- * @param[in] size number of elements
- * @param[in] int_width bit-width of the integer part, assume to be smaller than 3
- * @param[in] type type of activation functions
- * @return none.
- *
- * @details
- *
- * This is the direct table look-up approach.
- *
- * Assume here the integer part of the fixed-point is <= 3.
- * More than 3 just not making much sense, makes no difference with
- * saturation followed by any of these activation functions.
- */
- void riscv_nn_activations_direct_q15(q15_t *data, uint16_t size, uint16_t int_width, riscv_nn_activation_type type);
- /**
- * @brief s16 neural network activation function using direct table look-up
- * @param[in] input pointer to input data
- * @param[out] output pointer to output
- * @param[in] size number of elements
- * @param[in] left_shift bit-width of the integer part, assumed to be smaller than 3.
- * @param[in] type type of activation functions
- * @return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code>
- *
- * @details Supported framework: TensorFlow Lite for Microcontrollers.
- * This activation function must be bit precise congruent with the corresponding TFLM tanh and sigmoid activation
- * functions
- */
- riscv_nmsis_nn_status riscv_nn_activation_s16(const int16_t *input,
- int16_t *output,
- const int32_t size,
- const int32_t left_shift,
- const riscv_nn_activation_type type);
- /**
- * @defgroup Pooling Pooling Functions
- *
- * Perform pooling functions, including max pooling and average pooling
- *
- */
- /**
- * @brief Q7 max pooling function
- * @param[in] Im_in pointer to input tensor
- * @param[in] dim_im_in input tensor dimension
- * @param[in] ch_im_in number of input tensor channels
- * @param[in] dim_kernel filter kernel size
- * @param[in] padding padding sizes
- * @param[in] stride convolution stride
- * @param[in] dim_im_out output tensor dimension
- * @param[in,out] bufferA pointer to buffer space for input
- * @param[in,out] Im_out pointer to output tensor
- * @return none.
- *
- */
- void riscv_maxpool_q7_HWC(q7_t *Im_in,
- const uint16_t dim_im_in,
- const uint16_t ch_im_in,
- const uint16_t dim_kernel,
- const uint16_t padding,
- const uint16_t stride,
- const uint16_t dim_im_out,
- q7_t *bufferA,
- q7_t *Im_out);
- /**
- * @brief Q7 average pooling function
- * @param[in] Im_in pointer to input tensor
- * @param[in] dim_im_in input tensor dimension
- * @param[in] ch_im_in number of input tensor channels
- * @param[in] dim_kernel filter kernel size
- * @param[in] padding padding sizes
- * @param[in] stride convolution stride
- * @param[in] dim_im_out output tensor dimension
- * @param[in,out] bufferA pointer to buffer space for input
- * @param[in,out] Im_out pointer to output tensor
- * @return none.
- *
- */
- void riscv_avepool_q7_HWC(q7_t *Im_in,
- const uint16_t dim_im_in,
- const uint16_t ch_im_in,
- const uint16_t dim_kernel,
- const uint16_t padding,
- const uint16_t stride,
- const uint16_t dim_im_out,
- q7_t *bufferA,
- q7_t *Im_out);
- /**
- * @defgroup Pooling Pooling Functions
- *
- * Perform max and average pooling operations
- *
- */
- /**
- * @brief s8 average pooling function.
- *
- * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function
- * definition file to see if an additional buffer is required.
- * Optional function {API}_get_buffer_size() provides the buffer
- * size if an additional buffer is required.
- * The caller is expected to clear the buffer, if applicable, for security reasons.
- * @param[in] pool_params Pooling parameters
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN]
- * @param[in] input_data Input (activation) data pointer. Data type: int8
- * @param[in] filter_dims Filter tensor dimensions. Format: [H, W]
- * Argument N and C are not used.
- * @param[in] output_dims Output tensor dimensions. Format: [H, W, C_OUT]
- * Argument N is not used.
- * C_OUT equals C_IN.
- * @param[in, out] output_data Output data pointer. Data type: int8
- *
- * @return The function returns either
- * <code>RISCV_NMSIS_NN_ARG_ERROR</code> if argument constraints fail. or,
- * <code>RISCV_NMSIS_NN_SUCCESS</code> on successful completion.
- *
- * @details
- * - Supported Framework: TensorFlow Lite
- *
- */
- riscv_nmsis_nn_status riscv_avgpool_s8(const nmsis_nn_context *ctx,
- const nmsis_nn_pool_params *pool_params,
- const nmsis_nn_dims *input_dims,
- const int8_t *input_data,
- const nmsis_nn_dims *filter_dims,
- const nmsis_nn_dims *output_dims,
- int8_t *output_data);
- /**
- * @brief Get the required buffer size for S8 average pooling function
- * @param[in] dim_dst_width output tensor dimension
- * @param[in] ch_src number of input tensor channels
- * @return The function returns required buffer size in bytes
- *
- */
- int32_t riscv_avgpool_s8_get_buffer_size(const int dim_dst_width, const int ch_src);
- /**
- * @brief Get the required buffer size for S8 average pooling function for processors with DSP extension.
- * Refer to riscv_avgpool_s8_get_buffer_size() for function argument details.
- *
- * @note Intended for compilation on Host. If compiling for an Riscv target, use
- * riscv_avgpool_s8_get_buffer_size().
- *
- */
- int32_t riscv_avgpool_s8_get_buffer_size_dsp(const int dim_dst_width, const int ch_src);
- /**
- * @brief s16 average pooling function.
- *
- * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function
- * definition file to see if an additional buffer is required.
- * Optional function {API}_get_buffer_size() provides the buffer
- * size if an additional buffer is required.
- * The caller is expected to clear the buffer, if applicable, for security reasons.
- * @param[in] pool_params Pooling parameters
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN]
- * @param[in] input_data Input (activation) data pointer. Data type: int16
- * @param[in] filter_dims Filter tensor dimensions. Format: [H, W]
- * Argument N and C are not used.
- * @param[in] output_dims Output tensor dimensions. Format: [H, W, C_OUT]
- * Argument N is not used.
- * C_OUT equals C_IN.
- * @param[in, out] output_data Output data pointer. Data type: int16
- *
- * @return The function returns
- * <code>RISCV_NMSIS_NN_SUCCESS</code> - Successful operation
- * <code>RISCV_NMSIS_NN_ARG_ERROR</code> - In case of invalid arguments
- *
- * @details
- * - Supported Framework: TensorFlow Lite
- *
- */
- riscv_nmsis_nn_status riscv_avgpool_s16(const nmsis_nn_context *ctx,
- const nmsis_nn_pool_params *pool_params,
- const nmsis_nn_dims *input_dims,
- const int16_t *input_data,
- const nmsis_nn_dims *filter_dims,
- const nmsis_nn_dims *output_dims,
- int16_t *output_data);
- /**
- * @brief Get the required buffer size for S16 average pooling function
- * @param[in] dim_dst_width output tensor dimension
- * @param[in] ch_src number of input tensor channels
- * @return The function returns required buffer size in bytes
- *
- */
- int32_t riscv_avgpool_s16_get_buffer_size(const int dim_dst_width, const int ch_src);
- /**
- * @brief Get the required buffer size for S16 average pooling function for processors with DSP extension.
- * Refer to riscv_avgpool_s16_get_buffer_size() for function argument details.
- *
- * @note Intended for compilation on Host. If compiling for an Riscv target, use
- * riscv_avgpool_s16_get_buffer_size().
- *
- */
- int32_t riscv_avgpool_s16_get_buffer_size_dsp(const int dim_dst_width, const int ch_src);
- /**
- * @brief s8 max pooling function.
- *
- * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function
- * definition file to see if an additional buffer is required.
- * Optional function {API}_get_buffer_size() provides the buffer
- * size if an additional buffer is required.
- * The caller is expected to clear the buffer, if applicable, for security reasons.
- * @param[in] pool_params Pooling parameters
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN]
- * @param[in] input_data Input (activation) data pointer. The input tensor must not
- * overlap with the output tensor. Data type: int8
- * @param[in] filter_dims Filter tensor dimensions. Format: [H, W]
- * Argument N and C are not used.
- * @param[in] output_dims Output tensor dimensions. Format: [H, W, C_OUT]
- * Argument N is not used.
- * C_OUT equals C_IN.
- * @param[in, out] output_data Output data pointer. Data type: int8
- *
- * @return The function returns either
- * <code>RISCV_NMSIS_NN_ARG_ERROR</code> if argument constraints fail. or,
- * <code>RISCV_NMSIS_NN_SUCCESS</code> on successful completion.
- *
- * @details
- * - Supported Framework: TensorFlow Lite
- *
- */
- riscv_nmsis_nn_status riscv_max_pool_s8(const nmsis_nn_context *ctx,
- const nmsis_nn_pool_params *pool_params,
- const nmsis_nn_dims *input_dims,
- const int8_t *input_data,
- const nmsis_nn_dims *filter_dims,
- const nmsis_nn_dims *output_dims,
- int8_t *output_data);
- /**
- * @brief s16 max pooling function.
- *
- * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function
- * definition file to see if an additional buffer is required.
- * Optional function {API}_get_buffer_size() provides the buffer
- * size if an additional buffer is required.
- * The caller is expected to clear the buffer, if applicable, for security reasons.
- * @param[in] pool_params Pooling parameters
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN]
- * @param[in] src Input (activation) data pointer. The input tensor must not
- * overlap with the output tensor. Data type: int16
- * @param[in] filter_dims Filter tensor dimensions. Format: [H, W]
- * Argument N and C are not used.
- * @param[in] output_dims Output tensor dimensions. Format: [H, W, C_OUT]
- * Argument N is not used.
- * C_OUT equals C_IN.
- * @param[in, out] dst Output data pointer. Data type: int16
- *
- * @return The function returns either
- * <code>RISCV_NMSIS_NN_ARG_ERROR</code> if argument constraints fail. or,
- * <code>RISCV_NMSIS_NN_SUCCESS</code> on successful completion.
- *
- * @details
- * - Supported Framework: TensorFlow Lite
- *
- */
- riscv_nmsis_nn_status riscv_max_pool_s16(const nmsis_nn_context *ctx,
- const nmsis_nn_pool_params *pool_params,
- const nmsis_nn_dims *input_dims,
- const int16_t *src,
- const nmsis_nn_dims *filter_dims,
- const nmsis_nn_dims *output_dims,
- int16_t *dst);
- /**
- * @defgroup Softmax Softmax Functions
- *
- *
- */
- /**
- * @brief Q7 softmax function
- * @param[in] vec_in pointer to input vector
- * @param[in] dim_vec input vector dimension
- * @param[out] p_out pointer to output vector
- *
- * @note This function is an optimized version which is not bit-accurate with
- * TensorFlow Lite's kernel
- *
- */
- void riscv_softmax_q7(const q7_t *vec_in, const uint16_t dim_vec, q7_t *p_out);
- /**
- * @brief Q7 softmax function with batch parameter
- * @param[in] vec_in pointer to input vector
- * @param[in] nb_batches number of batches
- * @param[in] dim_vec input vector dimension
- * @param[out] p_out pointer to output vector
- * @return none.
- *
- * @note This function is an optimized version which is not bit-accurate with
- * TensorFlow Lite's kernel
- *
- */
- void riscv_softmax_with_batch_q7(const q7_t *vec_in, const uint16_t nb_batches, const uint16_t dim_vec, q7_t *p_out);
- /**
- * @brief Q15 softmax function
- * @param[in] vec_in pointer to input vector
- * @param[in] dim_vec input vector dimension
- * @param[out] p_out pointer to output vector
- * @return none.
- *
- * @note This function is an optimized version which is not bit-accurate with
- * TensorFlow Lite's kernel
- *
- */
- void riscv_softmax_q15(const q15_t *vec_in, const uint16_t dim_vec, q15_t *p_out);
- /**
- * @brief S8 softmax function
- * @param[in] input Pointer to the input tensor
- * @param[in] num_rows Number of rows in the input tensor
- * @param[in] row_size Number of elements in each input row
- * @param[in] mult Input quantization multiplier
- * @param[in] shift Input quantization shift within the range [0, 31]
- * @param[in] diff_min Minimum difference with max in row. Used to check if
- * the quantized exponential operation can be performed
- * @param[out] output Pointer to the output tensor
- *
- * @note Supported framework: TensorFlow Lite micro (bit-accurate)
- *
- */
- void riscv_softmax_s8(const int8_t *input,
- const int32_t num_rows,
- const int32_t row_size,
- const int32_t mult,
- const int32_t shift,
- const int32_t diff_min,
- int8_t *output);
- /**
- * @brief S8 to s16 softmax function
- * @param[in] input Pointer to the input tensor
- * @param[in] num_rows Number of rows in the input tensor
- * @param[in] row_size Number of elements in each input row
- * @param[in] mult Input quantization multiplier
- * @param[in] shift Input quantization shift within the range [0, 31]
- * @param[in] diff_min Minimum difference with max in row. Used to check if
- * the quantized exponential operation can be performed
- * @param[out] output Pointer to the output tensor
- *
- * @note Supported framework: TensorFlow Lite micro (bit-accurate)
- *
- */
- void riscv_softmax_s8_s16(const int8_t *input,
- const int32_t num_rows,
- const int32_t row_size,
- const int32_t mult,
- const int32_t shift,
- const int32_t diff_min,
- int16_t *output);
- /**
- * @brief S16 softmax function
- * @param[in] input Pointer to the input tensor
- * @param[in] num_rows Number of rows in the input tensor
- * @param[in] row_size Number of elements in each input row
- * @param[in] mult Input quantization multiplier
- * @param[in] shift Input quantization shift within the range [0, 31]
- * @param[in] softmax_params Softmax s16 layer parameters with two pointers to LUTs speficied below.
- * For indexing the high 9 bits are used and 7 remaining for interpolation.
- * That means 512 entries for the 9-bit indexing and 1 extra for interpolation, i.e. 513
- * values for each LUT.
- * - Lookup table for exp(x), where x uniform distributed between [-10.0 , 0.0]
- * - Lookup table for 1 / (1 + x), where x uniform distributed between [0.0 , 1.0]
- * @param[out] output Pointer to the output tensor
- * @return The function returns
- * <code>RISCV_NMSIS_NN_ARG_ERROR</code> Argument error check failed
- * <code>RISCV_NMSIS_NN_SUCCESS</code> - Successful operation
- *
- * @note Supported framework: TensorFlow Lite micro (bit-accurate)
- *
- */
- riscv_nmsis_nn_status riscv_softmax_s16(const int16_t *input,
- const int32_t num_rows,
- const int32_t row_size,
- const int32_t mult,
- const int32_t shift,
- const nmsis_nn_softmax_lut_s16 *softmax_params,
- int16_t *output);
- /**
- * @brief U8 softmax function
- * @param[in] input Pointer to the input tensor
- * @param[in] num_rows Number of rows in the input tensor
- * @param[in] row_size Number of elements in each input row
- * @param[in] mult Input quantization multiplier
- * @param[in] shift Input quantization shift within the range [0, 31]
- * @param[in] diff_min Minimum difference with max in row. Used to check if
- * the quantized exponential operation can be performed
- * @param[out] output Pointer to the output tensor
- *
- * @note Supported framework: TensorFlow Lite micro (bit-accurate)
- *
- */
- void riscv_softmax_u8(const uint8_t *input,
- const int32_t num_rows,
- const int32_t row_size,
- const int32_t mult,
- const int32_t shift,
- const int32_t diff_min,
- uint8_t *output);
- /**
- * @defgroup Reshape Reshape Functions
- *
- */
- /**
- * @brief Reshape a s8 vector into another with different shape
- * @param[in] input points to the s8 input vector
- * @param[out] output points to the s8 output vector
- * @param[in] total_size total size of the input and output vectors in bytes
- *
- * @note The output is expected to be in a memory area that does not overlap with the input's
- *
- */
- void riscv_reshape_s8(const int8_t *input, int8_t *output, const uint32_t total_size);
- /**
- * @defgroup Transpose Transpose Functions
- *
- */
- /**
- * @brief Basic transpose function
- *
- * @param[in] input_data Input (activation) data pointer. Data type: int8
- * @param[out] output_data Output data pointer. Data type: int8
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
- * @param[in] output_dims Output tensor dimensions. Format may be arbitrary relative to input format.
- * The output dimension will depend on the permutation dimensions.
- * In other words the out dimensions are the result of applying the permutation
- * to the input dimensions.
- * @param[in] transpose_params Transpose parameters. Contains permutation dimensions.
- *
- * @return The function returns either
- * <code>RISCV_NMSIS_NN_ARG_ERROR</code> if argument constraints fail. or,
- * <code>RISCV_NMSIS_NN_SUCCESS</code> on successful completion.
- *
- */
- riscv_nmsis_nn_status riscv_transpose_s8(const int8_t *input_data,
- int8_t *const output_data,
- const nmsis_nn_dims *const input_dims,
- const nmsis_nn_dims *const output_dims,
- const nmsis_nn_transpose_params *const transpose_params);
- /**
- * @defgroup Concatenation Concatenation Functions
- *
- */
- /**
- * @brief int8/uint8 concatenation function to be used for concatenating N-tensors along the X axis
- * This function should be called for each input tensor to concatenate. The argument offset_x
- * will be used to store the input tensor in the correct position in the output tensor
- *
- * i.e. offset_x = 0
- * for(i = 0 i < num_input_tensors; ++i)
- * {
- * riscv_concatenation_s8_x(&input[i], ..., &output, ..., ..., offset_x)
- * offset_x += input_x[i]
- * }
- *
- * This function assumes that the output tensor has:
- * -# The same height of the input tensor
- * -# The same number of channels of the input tensor
- * -# The same batch size of the input tensor
- *
- * Unless specified otherwise, arguments are mandatory.
- *
- * @note This function, data layout independent, can be used to concatenate either int8 or uint8 tensors because it
- * does not involve any arithmetic operation
- *
- * @param[in] input Pointer to input tensor. Input tensor must not overlap with the output tensor.
- * @param[in] input_x Width of input tensor
- * @param[in] input_y Height of input tensor
- * @param[in] input_z Channels in input tensor
- * @param[in] input_w Batch size in input tensor
- * @param[out] output Pointer to output tensor. Expected to be at least
- * (input_x * input_y * input_z * input_w) + offset_x
- * bytes.
- * @param[in] output_x Width of output tensor
- * @param[in] offset_x The offset (in number of elements) on the X axis to start concatenating the input tensor
- * It is user responsibility to provide the correct value
- *
- * <b> Input constraints</b>
- * offset_x is less than output_x
- *
- */
- void riscv_concatenation_s8_x(const int8_t *input,
- const uint16_t input_x,
- const uint16_t input_y,
- const uint16_t input_z,
- const uint16_t input_w,
- int8_t *output,
- const uint16_t output_x,
- const uint32_t offset_x);
- /**
- * @brief int8/uint8 concatenation function to be used for concatenating N-tensors along the Y axis
- * This function should be called for each input tensor to concatenate. The argument offset_y
- * will be used to store the input tensor in the correct position in the output tensor
- *
- * i.e. offset_y = 0
- * for(i = 0 i < num_input_tensors; ++i)
- * {
- * riscv_concatenation_s8_y(&input[i], ..., &output, ..., ..., offset_y)
- * offset_y += input_y[i]
- * }
- *
- * This function assumes that the output tensor has:
- * -# The same width of the input tensor
- * -# The same number of channels of the input tensor
- * -# The same batch size of the input tensor
- *
- * Unless specified otherwise, arguments are mandatory.
- *
- * @note This function, data layout independent, can be used to concatenate either int8 or uint8 tensors because it
- * does not involve any arithmetic operation
- *
- * @param[in] input Pointer to input tensor. Input tensor must not overlap with the output tensor.
- * @param[in] input_x Width of input tensor
- * @param[in] input_y Height of input tensor
- * @param[in] input_z Channels in input tensor
- * @param[in] input_w Batch size in input tensor
- * @param[out] output Pointer to output tensor. Expected to be at least
- * (input_z * input_w * input_x * input_y) + offset_y
- * bytes.
- * @param[in] output_y Height of output tensor
- * @param[in] offset_y The offset on the Y axis to start concatenating the input tensor
- * It is user responsibility to provide the correct value
- *
- * <b> Input constraints</b>
- * offset_y is less than output_y
- *
- */
- void riscv_concatenation_s8_y(const int8_t *input,
- const uint16_t input_x,
- const uint16_t input_y,
- const uint16_t input_z,
- const uint16_t input_w,
- int8_t *output,
- const uint16_t output_y,
- const uint32_t offset_y);
- /**
- * @brief int8/uint8 concatenation function to be used for concatenating N-tensors along the Z axis
- * This function should be called for each input tensor to concatenate. The argument offset_z
- * will be used to store the input tensor in the correct position in the output tensor
- *
- * i.e. offset_z = 0
- * for(i = 0 i < num_input_tensors; ++i)
- * {
- * riscv_concatenation_s8_z(&input[i], ..., &output, ..., ..., offset_z)
- * offset_z += input_z[i]
- * }
- *
- * This function assumes that the output tensor has:
- * -# The same width of the input tensor
- * -# The same height of the input tensor
- * -# The same batch size of the input tensor
- *
- * Unless specified otherwise, arguments are mandatory.
- *
- * @note This function, data layout independent, can be used to concatenate either int8 or uint8 tensors because it
- * does not involve any arithmetic operation
- *
- * @param[in] input Pointer to input tensor. Input tensor must not overlap with output tensor.
- * @param[in] input_x Width of input tensor
- * @param[in] input_y Height of input tensor
- * @param[in] input_z Channels in input tensor
- * @param[in] input_w Batch size in input tensor
- * @param[out] output Pointer to output tensor. Expected to be at least
- * (input_x * input_y * input_z * input_w) + offset_z
- * bytes.
- * @param[in] output_z Channels in output tensor
- * @param[in] offset_z The offset on the Z axis to start concatenating the input tensor
- * It is user responsibility to provide the correct value
- *
- * <b> Input constraints</b>
- * offset_z is less than output_z
- *
- */
- void riscv_concatenation_s8_z(const int8_t *input,
- const uint16_t input_x,
- const uint16_t input_y,
- const uint16_t input_z,
- const uint16_t input_w,
- int8_t *output,
- const uint16_t output_z,
- const uint32_t offset_z);
- /**
- * @brief int8/uint8 concatenation function to be used for concatenating N-tensors along the W axis (Batch size)
- * This function should be called for each input tensor to concatenate. The argument offset_w
- * will be used to store the input tensor in the correct position in the output tensor
- *
- * i.e. offset_w = 0
- * for(i = 0 i < num_input_tensors; ++i)
- * {
- * riscv_concatenation_s8_w(&input[i], ..., &output, ..., ..., offset_w)
- * offset_w += input_w[i]
- * }
- *
- * This function assumes that the output tensor has:
- * -# The same width of the input tensor
- * -# The same height of the input tensor
- * -# The same number o channels of the input tensor
- *
- * Unless specified otherwise, arguments are mandatory.
- *
- * @note This function, data layout independent, can be used to concatenate either int8 or uint8 tensors because it
- * does not involve any arithmetic operation
- *
- * @param[in] input Pointer to input tensor
- * @param[in] input_x Width of input tensor
- * @param[in] input_y Height of input tensor
- * @param[in] input_z Channels in input tensor
- * @param[in] input_w Batch size in input tensor
- * @param[out] output Pointer to output tensor. Expected to be at least
- * input_x * input_y * input_z * input_w
- * bytes.
- * @param[in] offset_w The offset on the W axis to start concatenating the input tensor
- * It is user responsibility to provide the correct value
- *
- */
- void riscv_concatenation_s8_w(const int8_t *input,
- const uint16_t input_x,
- const uint16_t input_y,
- const uint16_t input_z,
- const uint16_t input_w,
- int8_t *output,
- const uint32_t offset_w);
- /**
- * @defgroup SVDF SVDF Functions
- *
- */
- /**
- * @brief s8 SVDF function with 8 bit state tensor and 8 bit time weights
- *
- * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function
- * definition file to see if an additional buffer is required.
- * Optional function riscv_fully_connected_s8_get_buffer_size() provides the buffer
- * size if an additional buffer is required.
- * The caller is expected to clear the buffer, if applicable, for security reasons.
- * @param[in] input_ctx Temporary scratch buffer
- * The caller is expected to clear the buffer, if applicable, for security reasons.
- * @param[in] output_ctx Temporary output scratch buffer
- * The caller is expected to clear the buffer, if applicable, for security reasons.
- * @param[in] svdf_params SVDF Parameters
- * Range of svdf_params->input_offset : [-128, 127]
- * Range of svdf_params->output_offset : [-128, 127]
- * @param[in] input_quant_params Input quantization parameters
- * @param[in] output_quant_params Output quantization parameters
- * @param[in] input_dims Input tensor dimensions
- * @param[in] input_data Pointer to input tensor
- * @param[in] state_dims State tensor dimensions
- * @param[in] state_data Pointer to state tensor
- * @param[in] weights_feature_dims Weights (feature) tensor dimensions
- * @param[in] weights_feature_data Pointer to the weights (feature) tensor
- * @param[in] weights_time_dims Weights (time) tensor dimensions
- * @param[in] weights_time_data Pointer to the weights (time) tensor
- * @param[in] bias_dims Bias tensor dimensions
- * @param[in] bias_data Pointer to bias tensor
- * @param[in] output_dims Output tensor dimensions
- * @param[out] output_data Pointer to the output tensor
- *
- * @return The function returns either
- * <code>RISCV_NMSIS_NN_ARG_ERROR</code> if argument constraints fail. or,
- * <code>RISCV_NMSIS_NN_SUCCESS</code> on successful completion.
- *
- * @details
- * 1. Supported framework: TensorFlow Lite micro
- */
- riscv_nmsis_nn_status riscv_svdf_s8(const nmsis_nn_context *ctx,
- const nmsis_nn_context *input_ctx,
- const nmsis_nn_context *output_ctx,
- const nmsis_nn_svdf_params *svdf_params,
- const nmsis_nn_per_tensor_quant_params *input_quant_params,
- const nmsis_nn_per_tensor_quant_params *output_quant_params,
- const nmsis_nn_dims *input_dims,
- const int8_t *input_data,
- const nmsis_nn_dims *state_dims,
- int8_t *state_data,
- const nmsis_nn_dims *weights_feature_dims,
- const int8_t *weights_feature_data,
- const nmsis_nn_dims *weights_time_dims,
- const int8_t *weights_time_data,
- const nmsis_nn_dims *bias_dims,
- const int32_t *bias_data,
- const nmsis_nn_dims *output_dims,
- int8_t *output_data);
- /**
- * @brief s8 SVDF function with 16 bit state tensor and 16 bit time weights
- *
- * @param[in] input_ctx Temporary scratch buffer
- * The caller is expected to clear the buffer, if applicable, for security reasons.
- * @param[in] output_ctx Temporary output scratch buffer
- * The caller is expected to clear the buffer, if applicable, for security reasons.
- * @param[in] svdf_params SVDF Parameters
- * Range of svdf_params->input_offset : [-128, 127]
- * Range of svdf_params->output_offset : [-128, 127]
- * @param[in] input_quant_params Input quantization parameters
- * @param[in] output_quant_params Output quantization parameters
- * @param[in] input_dims Input tensor dimensions
- * @param[in] input_data Pointer to input tensor
- * @param[in] state_dims State tensor dimensions
- * @param[in] state_data Pointer to state tensor
- * @param[in] weights_feature_dims Weights (feature) tensor dimensions
- * @param[in] weights_feature_data Pointer to the weights (feature) tensor
- * @param[in] weights_time_dims Weights (time) tensor dimensions
- * @param[in] weights_time_data Pointer to the weights (time) tensor
- * @param[in] bias_dims Bias tensor dimensions
- * @param[in] bias_data Pointer to bias tensor
- * @param[in] output_dims Output tensor dimensions
- * @param[out] output_data Pointer to the output tensor
- *
- * @return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code>
- *
- * @details
- * 1. Supported framework: TensorFlow Lite micro
- */
- riscv_nmsis_nn_status riscv_svdf_state_s16_s8(const nmsis_nn_context *input_ctx,
- const nmsis_nn_context *output_ctx,
- const nmsis_nn_svdf_params *svdf_params,
- const nmsis_nn_per_tensor_quant_params *input_quant_params,
- const nmsis_nn_per_tensor_quant_params *output_quant_params,
- const nmsis_nn_dims *input_dims,
- const int8_t *input_data,
- const nmsis_nn_dims *state_dims,
- int16_t *state_data,
- const nmsis_nn_dims *weights_feature_dims,
- const int8_t *weights_feature_data,
- const nmsis_nn_dims *weights_time_dims,
- const int16_t *weights_time_data,
- const nmsis_nn_dims *bias_dims,
- const int32_t *bias_data,
- const nmsis_nn_dims *output_dims,
- int8_t *output_data);
- /**
- * @brief Get size of additional buffer required by riscv_svdf_s8().
- * @param[in] filter_dims dimension of filter
- * @return The function returns required buffer size in bytes
- *
- */
- int32_t riscv_svdf_s8_get_buffer_size(const nmsis_nn_dims *filter_dims);
- /**
- * @brief Get size of additional buffer required by riscv_svdf_s8() for processors with DSP extension.
- * Refer to riscv_svdf_s8_get_buffer_size() for function argument details.
- *
- * @note Intended for compilation on Host. If compiling for an Arm target, use
- * riscv_svdf_s8_get_buffer_size().
- *
- */
- int32_t riscv_svdf_s8_get_buffer_size_dsp(const nmsis_nn_dims *filter_dims);
- /**
- * @defgroup LSTM LSTM Layer Functions
- *
- */
- /**
- * @brief LSTM unidirectional function with 8 bit input and output and 16 bit gate output, 32 bit bias.
- *
- * @param[in] input Pointer to input data
- * @param[out] output Pointer to output data
- * @param[in] params Struct containing all information about the lstm operator, see riscv_nn_types.
- * @param[in] buffers Struct containing pointers to all temporary scratch buffers needed for the
- * lstm operator, see riscv_nn_types.
- *
- *
- * @return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code>
- *
- * @details
- * 1. Supported framework: TensorFlow Lite Micro
- *
- */
- riscv_nmsis_nn_status riscv_lstm_unidirectional_s8(const int8_t *input,
- int8_t *output,
- const nmsis_nn_lstm_params *params,
- nmsis_nn_lstm_context *buffers);
- /**
- * @brief LSTM unidirectional function with 16 bit input and output and 16 bit gate output, 64 bit bias.
- *
- * @param[in] input Pointer to input data
- * @param[out] output Pointer to output data
- * @param[in] params Struct containing all information about the lstm operator, see riscv_nn_types.
- * @param[in] buffers Struct containing pointers to all temporary scratch buffers needed for the
- * lstm operator, see riscv_nn_types.
- *
- *
- * @return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code>
- *
- * @details
- * 1. Supported framework: TensorFlow Lite Micro
- *
- */
- riscv_nmsis_nn_status riscv_lstm_unidirectional_s16(const int16_t *input,
- int16_t *output,
- const nmsis_nn_lstm_params *params,
- nmsis_nn_lstm_context *buffers);
- /**
- * @brief Batch matmul function with 8 bit input and output.
- *
- * @param[in] ctx Temporary scratch buffer
- * The caller is expected to clear the buffer, if applicable, for security reasons.
- * Optional function riscv_fully_connected_s8_get_buffer_size() provides the buffer
- * size if an additional buffer is required.
- * @param[in] bmm_params Batch matmul Parameters
- * Adjoint flags are currently unused.
- * @param[in] quant_params Quantization parameters
- * @param[in] input_lhs_dims Input lhs tensor dimensions.
- * This should be NHWC where lhs C = rhs C
- * @param[in] input_lhs Pointer to input tensor
- * @param[in] input_rhs_dims Input lhs tensor dimensions.
- * This is expected to be transposed so
- * should be NHWC where lhs C = rhs C
- * @param[in] input_rhs Pointer to transposed input tensor
- * @param[in] output_dims Output tensor dimensions
- * @param[out] output Pointer to the output tensor
- *
- * @return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code>
- *
- * @details
- * 1. Supported framework: TensorFlow Lite Micro
- * 2. Performs row * row matrix multiplication with the RHS transposed.
- *
- */
- riscv_nmsis_nn_status riscv_batch_matmul_s8(const nmsis_nn_context *ctx,
- const nmsis_nn_bmm_params *bmm_params,
- const nmsis_nn_per_tensor_quant_params *quant_params,
- const nmsis_nn_dims *input_lhs_dims,
- const int8_t *input_lhs,
- const nmsis_nn_dims *input_rhs_dims,
- const int8_t *input_rhs,
- const nmsis_nn_dims *output_dims,
- int8_t *output);
- /**
- * @brief Batch matmul function with 16 bit input and output.
- *
- * @param[in] ctx Temporary scratch buffer
- * The caller is expected to clear the buffer, if applicable, for security reasons.
- * Optional function riscv_fully_connected_s8_get_buffer_size() provides the buffer
- * size if an additional buffer is required.
- * @param[in] bmm_params Batch matmul Parameters
- * Adjoint flags are currently unused.
- * @param[in] quant_params Quantization parameters
- * @param[in] input_lhs_dims Input lhs tensor dimensions.
- * This should be NHWC where LHS.C = RHS.C
- * @param[in] input_lhs Pointer to input tensor
- * @param[in] input_rhs_dims Input lhs tensor dimensions.
- * This is expected to be transposed so
- * should be NHWC where LHS.C = RHS.C
- * @param[in] input_rhs Pointer to transposed input tensor
- * @param[in] output_dims Output tensor dimensions
- * @param[out] output Pointer to the output tensor
- *
- * @return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code>
- *
- * @details
- * 1. Supported framework: TensorFlow Lite Micro
- * 2. Performs row * row matrix multiplication with the RHS transposed.
- *
- */
- riscv_nmsis_nn_status riscv_batch_matmul_s16(const nmsis_nn_context *ctx,
- const nmsis_nn_bmm_params *bmm_params,
- const nmsis_nn_per_tensor_quant_params *quant_params,
- const nmsis_nn_dims *input_lhs_dims,
- const int16_t *input_lhs,
- const nmsis_nn_dims *input_rhs_dims,
- const int16_t *input_rhs,
- const nmsis_nn_dims *output_dims,
- int16_t *output);
- /**
- * @defgroup Pad Pad Layer Functions:
- *
- */
- /**
- * @brief Expands the size of the input by adding constant values before and after the data, in all dimensions.
- *
- * @param[in] input Pointer to input data
- * @param[out] output Pointer to output data
- * @param[in] pad_value Value to pad with
- * @param[in] input_size Input tensor dimensions
- * @param[in] pre_pad Padding to apply before data in each dimension
- * @param[in] post_pad Padding to apply after data in each dimension
- *
- * @return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code>
- *
- */
- riscv_nmsis_nn_status riscv_pad_s8(const int8_t *input,
- int8_t *output,
- const int8_t pad_value,
- const nmsis_nn_dims *input_size,
- const nmsis_nn_dims *pre_pad,
- const nmsis_nn_dims *post_pad);
- /**
- * @brief Elementwise binary minimum with 8bit data.
- *
- * @param[in] ctx Temporary scratch buffer
- * The caller is expected to clear the buffer, if applicable, for security reasons.
- * @param[in] input_1_data Pointer to input1 tensor
- * @param[in] input_1_dims Input1 tensor dimensions
- * @param[in] input_2_data Pointer to input2 tensor
- * @param[in] input_2_dims Input2 tensor dimensions
- * @param[out] output_data Pointer to the output tensor
- * @param[in] output_dims Output tensor dimensions
- *
- * @return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code>
- *
- * @details
- * 1. Supported framework: TensorFlow Lite Micro
- *
- */
- riscv_nmsis_nn_status riscv_minimum_s8(const nmsis_nn_context *ctx,
- const int8_t *input_1_data,
- const nmsis_nn_dims *input_1_dims,
- const int8_t *input_2_data,
- const nmsis_nn_dims *input_2_dims,
- int8_t *output_data,
- const nmsis_nn_dims *output_dims);
- /**
- * @brief Elementwise binary maximum with 8bit data.
- *
- * @param[in] ctx Temporary scratch buffer
- * The caller is expected to clear the buffer, if applicable, for security reasons.
- * @param[in] input_1_data Pointer to input1 tensor
- * @param[in] input_1_dims Input1 tensor dimensions
- * @param[in] input_2_data Pointer to input2 tensor
- * @param[in] input_2_dims Input2 tensor dimensions
- * @param[out] output_data Pointer to the output tensor
- * @param[in] output_dims Output tensor dimensions
- *
- * @return The function returns <code>RISCV_NMSIS_NN_SUCCESS</code>
- *
- * @details
- * 1. Supported framework: TensorFlow Lite Micro
- *
- */
- riscv_nmsis_nn_status riscv_maximum_s8(const nmsis_nn_context *ctx,
- const int8_t *input_1_data,
- const nmsis_nn_dims *input_1_dims,
- const int8_t *input_2_data,
- const nmsis_nn_dims *input_2_dims,
- int8_t *output_data,
- const nmsis_nn_dims *output_dims);
- #ifdef __cplusplus
- }
- #endif
- #endif /* RISCV_NNFUNCTIONS_H */
|