tasks.c 344 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696
  1. /*
  2. * FreeRTOS Kernel V11.1.0
  3. * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
  4. *
  5. * SPDX-License-Identifier: MIT
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a copy of
  8. * this software and associated documentation files (the "Software"), to deal in
  9. * the Software without restriction, including without limitation the rights to
  10. * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
  11. * the Software, and to permit persons to whom the Software is furnished to do so,
  12. * subject to the following conditions:
  13. *
  14. * The above copyright notice and this permission notice shall be included in all
  15. * copies or substantial portions of the Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
  19. * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
  20. * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
  21. * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  22. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  23. *
  24. * https://www.FreeRTOS.org
  25. * https://github.com/FreeRTOS
  26. *
  27. */
  28. /* Standard includes. */
  29. #include <stdlib.h>
  30. #include <string.h>
  31. /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
  32. * all the API functions to use the MPU wrappers. That should only be done when
  33. * task.h is included from an application file. */
  34. #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
  35. /* FreeRTOS includes. */
  36. #include "FreeRTOS.h"
  37. #include "task.h"
  38. #include "timers.h"
  39. #include "stack_macros.h"
  40. /* The default definitions are only available for non-MPU ports. The
  41. * reason is that the stack alignment requirements vary for different
  42. * architectures.*/
  43. #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configKERNEL_PROVIDED_STATIC_MEMORY == 1 ) && ( portUSING_MPU_WRAPPERS != 0 ) )
  44. #error configKERNEL_PROVIDED_STATIC_MEMORY cannot be set to 1 when using an MPU port. The vApplicationGet*TaskMemory() functions must be provided manually.
  45. #endif
  46. /* The MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined
  47. * for the header files above, but not in this file, in order to generate the
  48. * correct privileged Vs unprivileged linkage and placement. */
  49. #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
  50. /* Set configUSE_STATS_FORMATTING_FUNCTIONS to 2 to include the stats formatting
  51. * functions but without including stdio.h here. */
  52. #if ( configUSE_STATS_FORMATTING_FUNCTIONS == 1 )
  53. /* At the bottom of this file are two optional functions that can be used
  54. * to generate human readable text from the raw data generated by the
  55. * uxTaskGetSystemState() function. Note the formatting functions are provided
  56. * for convenience only, and are NOT considered part of the kernel. */
  57. #include <stdio.h>
  58. #endif /* configUSE_STATS_FORMATTING_FUNCTIONS == 1 ) */
  59. #if ( configUSE_PREEMPTION == 0 )
  60. /* If the cooperative scheduler is being used then a yield should not be
  61. * performed just because a higher priority task has been woken. */
  62. #define taskYIELD_TASK_CORE_IF_USING_PREEMPTION( pxTCB )
  63. #define taskYIELD_ANY_CORE_IF_USING_PREEMPTION( pxTCB )
  64. #else
  65. #if ( configNUMBER_OF_CORES == 1 )
  66. /* This macro requests the running task pxTCB to yield. In single core
  67. * scheduler, a running task always runs on core 0 and portYIELD_WITHIN_API()
  68. * can be used to request the task running on core 0 to yield. Therefore, pxTCB
  69. * is not used in this macro. */
  70. #define taskYIELD_TASK_CORE_IF_USING_PREEMPTION( pxTCB ) \
  71. do { \
  72. ( void ) ( pxTCB ); \
  73. portYIELD_WITHIN_API(); \
  74. } while( 0 )
  75. #define taskYIELD_ANY_CORE_IF_USING_PREEMPTION( pxTCB ) \
  76. do { \
  77. if( pxCurrentTCB->uxPriority < ( pxTCB )->uxPriority ) \
  78. { \
  79. portYIELD_WITHIN_API(); \
  80. } \
  81. else \
  82. { \
  83. mtCOVERAGE_TEST_MARKER(); \
  84. } \
  85. } while( 0 )
  86. #else /* if ( configNUMBER_OF_CORES == 1 ) */
  87. /* Yield the core on which this task is running. */
  88. #define taskYIELD_TASK_CORE_IF_USING_PREEMPTION( pxTCB ) prvYieldCore( ( pxTCB )->xTaskRunState )
  89. /* Yield for the task if a running task has priority lower than this task. */
  90. #define taskYIELD_ANY_CORE_IF_USING_PREEMPTION( pxTCB ) prvYieldForTask( pxTCB )
  91. #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
  92. #endif /* if ( configUSE_PREEMPTION == 0 ) */
  93. /* Values that can be assigned to the ucNotifyState member of the TCB. */
  94. #define taskNOT_WAITING_NOTIFICATION ( ( uint8_t ) 0 ) /* Must be zero as it is the initialised value. */
  95. #define taskWAITING_NOTIFICATION ( ( uint8_t ) 1 )
  96. #define taskNOTIFICATION_RECEIVED ( ( uint8_t ) 2 )
  97. /*
  98. * The value used to fill the stack of a task when the task is created. This
  99. * is used purely for checking the high water mark for tasks.
  100. */
  101. #define tskSTACK_FILL_BYTE ( 0xa5U )
  102. /* Bits used to record how a task's stack and TCB were allocated. */
  103. #define tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB ( ( uint8_t ) 0 )
  104. #define tskSTATICALLY_ALLOCATED_STACK_ONLY ( ( uint8_t ) 1 )
  105. #define tskSTATICALLY_ALLOCATED_STACK_AND_TCB ( ( uint8_t ) 2 )
  106. /* If any of the following are set then task stacks are filled with a known
  107. * value so the high water mark can be determined. If none of the following are
  108. * set then don't fill the stack so there is no unnecessary dependency on memset. */
  109. #if ( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) )
  110. #define tskSET_NEW_STACKS_TO_KNOWN_VALUE 1
  111. #else
  112. #define tskSET_NEW_STACKS_TO_KNOWN_VALUE 0
  113. #endif
  114. /*
  115. * Macros used by vListTask to indicate which state a task is in.
  116. */
  117. #define tskRUNNING_CHAR ( 'X' )
  118. #define tskBLOCKED_CHAR ( 'B' )
  119. #define tskREADY_CHAR ( 'R' )
  120. #define tskDELETED_CHAR ( 'D' )
  121. #define tskSUSPENDED_CHAR ( 'S' )
  122. /*
  123. * Some kernel aware debuggers require the data the debugger needs access to be
  124. * global, rather than file scope.
  125. */
  126. #ifdef portREMOVE_STATIC_QUALIFIER
  127. #define static
  128. #endif
  129. /* The name allocated to the Idle task. This can be overridden by defining
  130. * configIDLE_TASK_NAME in FreeRTOSConfig.h. */
  131. #ifndef configIDLE_TASK_NAME
  132. #define configIDLE_TASK_NAME "IDLE"
  133. #endif
  134. #if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 )
  135. /* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 0 then task selection is
  136. * performed in a generic way that is not optimised to any particular
  137. * microcontroller architecture. */
  138. /* uxTopReadyPriority holds the priority of the highest priority ready
  139. * state task. */
  140. #define taskRECORD_READY_PRIORITY( uxPriority ) \
  141. do { \
  142. if( ( uxPriority ) > uxTopReadyPriority ) \
  143. { \
  144. uxTopReadyPriority = ( uxPriority ); \
  145. } \
  146. } while( 0 ) /* taskRECORD_READY_PRIORITY */
  147. /*-----------------------------------------------------------*/
  148. #if ( configNUMBER_OF_CORES == 1 )
  149. #define taskSELECT_HIGHEST_PRIORITY_TASK() \
  150. do { \
  151. UBaseType_t uxTopPriority = uxTopReadyPriority; \
  152. \
  153. /* Find the highest priority queue that contains ready tasks. */ \
  154. while( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxTopPriority ] ) ) != pdFALSE ) \
  155. { \
  156. configASSERT( uxTopPriority ); \
  157. --uxTopPriority; \
  158. } \
  159. \
  160. /* listGET_OWNER_OF_NEXT_ENTRY indexes through the list, so the tasks of \
  161. * the same priority get an equal share of the processor time. */ \
  162. listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \
  163. uxTopReadyPriority = uxTopPriority; \
  164. } while( 0 ) /* taskSELECT_HIGHEST_PRIORITY_TASK */
  165. #else /* if ( configNUMBER_OF_CORES == 1 ) */
  166. #define taskSELECT_HIGHEST_PRIORITY_TASK( xCoreID ) prvSelectHighestPriorityTask( xCoreID )
  167. #endif /* if ( configNUMBER_OF_CORES == 1 ) */
  168. /*-----------------------------------------------------------*/
  169. /* Define away taskRESET_READY_PRIORITY() and portRESET_READY_PRIORITY() as
  170. * they are only required when a port optimised method of task selection is
  171. * being used. */
  172. #define taskRESET_READY_PRIORITY( uxPriority )
  173. #define portRESET_READY_PRIORITY( uxPriority, uxTopReadyPriority )
  174. #else /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
  175. /* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 1 then task selection is
  176. * performed in a way that is tailored to the particular microcontroller
  177. * architecture being used. */
  178. /* A port optimised version is provided. Call the port defined macros. */
  179. #define taskRECORD_READY_PRIORITY( uxPriority ) portRECORD_READY_PRIORITY( ( uxPriority ), uxTopReadyPriority )
  180. /*-----------------------------------------------------------*/
  181. #define taskSELECT_HIGHEST_PRIORITY_TASK() \
  182. do { \
  183. UBaseType_t uxTopPriority; \
  184. \
  185. /* Find the highest priority list that contains ready tasks. */ \
  186. portGET_HIGHEST_PRIORITY( uxTopPriority, uxTopReadyPriority ); \
  187. configASSERT( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ uxTopPriority ] ) ) > 0 ); \
  188. listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \
  189. } while( 0 )
  190. /*-----------------------------------------------------------*/
  191. /* A port optimised version is provided, call it only if the TCB being reset
  192. * is being referenced from a ready list. If it is referenced from a delayed
  193. * or suspended list then it won't be in a ready list. */
  194. #define taskRESET_READY_PRIORITY( uxPriority ) \
  195. do { \
  196. if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ ( uxPriority ) ] ) ) == ( UBaseType_t ) 0 ) \
  197. { \
  198. portRESET_READY_PRIORITY( ( uxPriority ), ( uxTopReadyPriority ) ); \
  199. } \
  200. } while( 0 )
  201. #endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
  202. /*-----------------------------------------------------------*/
  203. /* pxDelayedTaskList and pxOverflowDelayedTaskList are switched when the tick
  204. * count overflows. */
  205. #define taskSWITCH_DELAYED_LISTS() \
  206. do { \
  207. List_t * pxTemp; \
  208. \
  209. /* The delayed tasks list should be empty when the lists are switched. */ \
  210. configASSERT( ( listLIST_IS_EMPTY( pxDelayedTaskList ) ) ); \
  211. \
  212. pxTemp = pxDelayedTaskList; \
  213. pxDelayedTaskList = pxOverflowDelayedTaskList; \
  214. pxOverflowDelayedTaskList = pxTemp; \
  215. xNumOfOverflows = ( BaseType_t ) ( xNumOfOverflows + 1 ); \
  216. prvResetNextTaskUnblockTime(); \
  217. } while( 0 )
  218. /*-----------------------------------------------------------*/
  219. /*
  220. * Place the task represented by pxTCB into the appropriate ready list for
  221. * the task. It is inserted at the end of the list.
  222. */
  223. #define prvAddTaskToReadyList( pxTCB ) \
  224. do { \
  225. traceMOVED_TASK_TO_READY_STATE( pxTCB ); \
  226. taskRECORD_READY_PRIORITY( ( pxTCB )->uxPriority ); \
  227. listINSERT_END( &( pxReadyTasksLists[ ( pxTCB )->uxPriority ] ), &( ( pxTCB )->xStateListItem ) ); \
  228. tracePOST_MOVED_TASK_TO_READY_STATE( pxTCB ); \
  229. } while( 0 )
  230. /*-----------------------------------------------------------*/
  231. /*
  232. * Several functions take a TaskHandle_t parameter that can optionally be NULL,
  233. * where NULL is used to indicate that the handle of the currently executing
  234. * task should be used in place of the parameter. This macro simply checks to
  235. * see if the parameter is NULL and returns a pointer to the appropriate TCB.
  236. */
  237. #define prvGetTCBFromHandle( pxHandle ) ( ( ( pxHandle ) == NULL ) ? pxCurrentTCB : ( pxHandle ) )
  238. /* The item value of the event list item is normally used to hold the priority
  239. * of the task to which it belongs (coded to allow it to be held in reverse
  240. * priority order). However, it is occasionally borrowed for other purposes. It
  241. * is important its value is not updated due to a task priority change while it is
  242. * being used for another purpose. The following bit definition is used to inform
  243. * the scheduler that the value should not be changed - in which case it is the
  244. * responsibility of whichever module is using the value to ensure it gets set back
  245. * to its original value when it is released. */
  246. #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
  247. #define taskEVENT_LIST_ITEM_VALUE_IN_USE ( ( uint16_t ) 0x8000U )
  248. #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
  249. #define taskEVENT_LIST_ITEM_VALUE_IN_USE ( ( uint32_t ) 0x80000000U )
  250. #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_64_BITS )
  251. #define taskEVENT_LIST_ITEM_VALUE_IN_USE ( ( uint64_t ) 0x8000000000000000U )
  252. #endif
  253. /* Indicates that the task is not actively running on any core. */
  254. #define taskTASK_NOT_RUNNING ( ( BaseType_t ) ( -1 ) )
  255. /* Indicates that the task is actively running but scheduled to yield. */
  256. #define taskTASK_SCHEDULED_TO_YIELD ( ( BaseType_t ) ( -2 ) )
  257. /* Returns pdTRUE if the task is actively running and not scheduled to yield. */
  258. #if ( configNUMBER_OF_CORES == 1 )
  259. #define taskTASK_IS_RUNNING( pxTCB ) ( ( ( pxTCB ) == pxCurrentTCB ) ? ( pdTRUE ) : ( pdFALSE ) )
  260. #define taskTASK_IS_RUNNING_OR_SCHEDULED_TO_YIELD( pxTCB ) ( ( ( pxTCB ) == pxCurrentTCB ) ? ( pdTRUE ) : ( pdFALSE ) )
  261. #else
  262. #define taskTASK_IS_RUNNING( pxTCB ) ( ( ( ( pxTCB )->xTaskRunState >= ( BaseType_t ) 0 ) && ( ( pxTCB )->xTaskRunState < ( BaseType_t ) configNUMBER_OF_CORES ) ) ? ( pdTRUE ) : ( pdFALSE ) )
  263. #define taskTASK_IS_RUNNING_OR_SCHEDULED_TO_YIELD( pxTCB ) ( ( ( pxTCB )->xTaskRunState != taskTASK_NOT_RUNNING ) ? ( pdTRUE ) : ( pdFALSE ) )
  264. #endif
  265. /* Indicates that the task is an Idle task. */
  266. #define taskATTRIBUTE_IS_IDLE ( UBaseType_t ) ( 1U << 0U )
  267. #if ( ( configNUMBER_OF_CORES > 1 ) && ( portCRITICAL_NESTING_IN_TCB == 1 ) )
  268. #define portGET_CRITICAL_NESTING_COUNT() ( pxCurrentTCBs[ portGET_CORE_ID() ]->uxCriticalNesting )
  269. #define portSET_CRITICAL_NESTING_COUNT( x ) ( pxCurrentTCBs[ portGET_CORE_ID() ]->uxCriticalNesting = ( x ) )
  270. #define portINCREMENT_CRITICAL_NESTING_COUNT() ( pxCurrentTCBs[ portGET_CORE_ID() ]->uxCriticalNesting++ )
  271. #define portDECREMENT_CRITICAL_NESTING_COUNT() ( pxCurrentTCBs[ portGET_CORE_ID() ]->uxCriticalNesting-- )
  272. #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( portCRITICAL_NESTING_IN_TCB == 1 ) ) */
  273. #define taskBITS_PER_BYTE ( ( size_t ) 8 )
  274. #if ( configNUMBER_OF_CORES > 1 )
  275. /* Yields the given core. This must be called from a critical section and xCoreID
  276. * must be valid. This macro is not required in single core since there is only
  277. * one core to yield. */
  278. #define prvYieldCore( xCoreID ) \
  279. do { \
  280. if( ( xCoreID ) == ( BaseType_t ) portGET_CORE_ID() ) \
  281. { \
  282. /* Pending a yield for this core since it is in the critical section. */ \
  283. xYieldPendings[ ( xCoreID ) ] = pdTRUE; \
  284. } \
  285. else \
  286. { \
  287. /* Request other core to yield if it is not requested before. */ \
  288. if( pxCurrentTCBs[ ( xCoreID ) ]->xTaskRunState != taskTASK_SCHEDULED_TO_YIELD ) \
  289. { \
  290. portYIELD_CORE( xCoreID ); \
  291. pxCurrentTCBs[ ( xCoreID ) ]->xTaskRunState = taskTASK_SCHEDULED_TO_YIELD; \
  292. } \
  293. } \
  294. } while( 0 )
  295. #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
  296. /*-----------------------------------------------------------*/
  297. /*
  298. * Task control block. A task control block (TCB) is allocated for each task,
  299. * and stores task state information, including a pointer to the task's context
  300. * (the task's run time environment, including register values)
  301. */
  302. typedef struct tskTaskControlBlock /* The old naming convention is used to prevent breaking kernel aware debuggers. */
  303. {
  304. volatile StackType_t * pxTopOfStack; /**< Points to the location of the last item placed on the tasks stack. THIS MUST BE THE FIRST MEMBER OF THE TCB STRUCT. */
  305. #if ( portUSING_MPU_WRAPPERS == 1 )
  306. xMPU_SETTINGS xMPUSettings; /**< The MPU settings are defined as part of the port layer. THIS MUST BE THE SECOND MEMBER OF THE TCB STRUCT. */
  307. #endif
  308. #if ( configUSE_CORE_AFFINITY == 1 ) && ( configNUMBER_OF_CORES > 1 )
  309. UBaseType_t uxCoreAffinityMask; /**< Used to link the task to certain cores. UBaseType_t must have greater than or equal to the number of bits as configNUMBER_OF_CORES. */
  310. #endif
  311. ListItem_t xStateListItem; /**< The list that the state list item of a task is reference from denotes the state of that task (Ready, Blocked, Suspended ). */
  312. ListItem_t xEventListItem; /**< Used to reference a task from an event list. */
  313. UBaseType_t uxPriority; /**< The priority of the task. 0 is the lowest priority. */
  314. StackType_t * pxStack; /**< Points to the start of the stack. */
  315. #if ( configNUMBER_OF_CORES > 1 )
  316. volatile BaseType_t xTaskRunState; /**< Used to identify the core the task is running on, if the task is running. Otherwise, identifies the task's state - not running or yielding. */
  317. UBaseType_t uxTaskAttributes; /**< Task's attributes - currently used to identify the idle tasks. */
  318. #endif
  319. char pcTaskName[ configMAX_TASK_NAME_LEN ]; /**< Descriptive name given to the task when created. Facilitates debugging only. */
  320. #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
  321. BaseType_t xPreemptionDisable; /**< Used to prevent the task from being preempted. */
  322. #endif
  323. #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) )
  324. StackType_t * pxEndOfStack; /**< Points to the highest valid address for the stack. */
  325. #endif
  326. #if ( portCRITICAL_NESTING_IN_TCB == 1 )
  327. UBaseType_t uxCriticalNesting; /**< Holds the critical section nesting depth for ports that do not maintain their own count in the port layer. */
  328. #endif
  329. #if ( configUSE_TRACE_FACILITY == 1 )
  330. UBaseType_t uxTCBNumber; /**< Stores a number that increments each time a TCB is created. It allows debuggers to determine when a task has been deleted and then recreated. */
  331. UBaseType_t uxTaskNumber; /**< Stores a number specifically for use by third party trace code. */
  332. #endif
  333. #if ( configUSE_MUTEXES == 1 )
  334. UBaseType_t uxBasePriority; /**< The priority last assigned to the task - used by the priority inheritance mechanism. */
  335. UBaseType_t uxMutexesHeld;
  336. #endif
  337. #if ( configUSE_APPLICATION_TASK_TAG == 1 )
  338. TaskHookFunction_t pxTaskTag;
  339. #endif
  340. #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 )
  341. void * pvThreadLocalStoragePointers[ configNUM_THREAD_LOCAL_STORAGE_POINTERS ];
  342. #endif
  343. #if ( configGENERATE_RUN_TIME_STATS == 1 )
  344. configRUN_TIME_COUNTER_TYPE ulRunTimeCounter; /**< Stores the amount of time the task has spent in the Running state. */
  345. #endif
  346. #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 )
  347. configTLS_BLOCK_TYPE xTLSBlock; /**< Memory block used as Thread Local Storage (TLS) Block for the task. */
  348. #endif
  349. #if ( configUSE_TASK_NOTIFICATIONS == 1 )
  350. volatile uint32_t ulNotifiedValue[ configTASK_NOTIFICATION_ARRAY_ENTRIES ];
  351. volatile uint8_t ucNotifyState[ configTASK_NOTIFICATION_ARRAY_ENTRIES ];
  352. #endif
  353. /* See the comments in FreeRTOS.h with the definition of
  354. * tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE. */
  355. #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
  356. uint8_t ucStaticallyAllocated; /**< Set to pdTRUE if the task is a statically allocated to ensure no attempt is made to free the memory. */
  357. #endif
  358. #if ( INCLUDE_xTaskAbortDelay == 1 )
  359. uint8_t ucDelayAborted;
  360. #endif
  361. #if ( configUSE_POSIX_ERRNO == 1 )
  362. int iTaskErrno;
  363. #endif
  364. } tskTCB;
  365. /* The old tskTCB name is maintained above then typedefed to the new TCB_t name
  366. * below to enable the use of older kernel aware debuggers. */
  367. typedef tskTCB TCB_t;
  368. #if ( configNUMBER_OF_CORES == 1 )
  369. /* MISRA Ref 8.4.1 [Declaration shall be visible] */
  370. /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-84 */
  371. /* coverity[misra_c_2012_rule_8_4_violation] */
  372. portDONT_DISCARD PRIVILEGED_DATA TCB_t * volatile pxCurrentTCB = NULL;
  373. #else
  374. /* MISRA Ref 8.4.1 [Declaration shall be visible] */
  375. /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-84 */
  376. /* coverity[misra_c_2012_rule_8_4_violation] */
  377. portDONT_DISCARD PRIVILEGED_DATA TCB_t * volatile pxCurrentTCBs[ configNUMBER_OF_CORES ];
  378. #define pxCurrentTCB xTaskGetCurrentTaskHandle()
  379. #endif
  380. /* Lists for ready and blocked tasks. --------------------
  381. * xDelayedTaskList1 and xDelayedTaskList2 could be moved to function scope but
  382. * doing so breaks some kernel aware debuggers and debuggers that rely on removing
  383. * the static qualifier. */
  384. PRIVILEGED_DATA static List_t pxReadyTasksLists[ configMAX_PRIORITIES ]; /**< Prioritised ready tasks. */
  385. PRIVILEGED_DATA static List_t xDelayedTaskList1; /**< Delayed tasks. */
  386. PRIVILEGED_DATA static List_t xDelayedTaskList2; /**< Delayed tasks (two lists are used - one for delays that have overflowed the current tick count. */
  387. PRIVILEGED_DATA static List_t * volatile pxDelayedTaskList; /**< Points to the delayed task list currently being used. */
  388. PRIVILEGED_DATA static List_t * volatile pxOverflowDelayedTaskList; /**< Points to the delayed task list currently being used to hold tasks that have overflowed the current tick count. */
  389. PRIVILEGED_DATA static List_t xPendingReadyList; /**< Tasks that have been readied while the scheduler was suspended. They will be moved to the ready list when the scheduler is resumed. */
  390. #if ( INCLUDE_vTaskDelete == 1 )
  391. PRIVILEGED_DATA static List_t xTasksWaitingTermination; /**< Tasks that have been deleted - but their memory not yet freed. */
  392. PRIVILEGED_DATA static volatile UBaseType_t uxDeletedTasksWaitingCleanUp = ( UBaseType_t ) 0U;
  393. #endif
  394. #if ( INCLUDE_vTaskSuspend == 1 )
  395. PRIVILEGED_DATA static List_t xSuspendedTaskList; /**< Tasks that are currently suspended. */
  396. #endif
  397. /* Global POSIX errno. Its value is changed upon context switching to match
  398. * the errno of the currently running task. */
  399. #if ( configUSE_POSIX_ERRNO == 1 )
  400. int FreeRTOS_errno = 0;
  401. #endif
  402. /* Other file private variables. --------------------------------*/
  403. PRIVILEGED_DATA static volatile UBaseType_t uxCurrentNumberOfTasks = ( UBaseType_t ) 0U;
  404. PRIVILEGED_DATA static volatile TickType_t xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT;
  405. PRIVILEGED_DATA static volatile UBaseType_t uxTopReadyPriority = tskIDLE_PRIORITY;
  406. PRIVILEGED_DATA static volatile BaseType_t xSchedulerRunning = pdFALSE;
  407. PRIVILEGED_DATA static volatile TickType_t xPendedTicks = ( TickType_t ) 0U;
  408. PRIVILEGED_DATA static volatile BaseType_t xYieldPendings[ configNUMBER_OF_CORES ] = { pdFALSE };
  409. PRIVILEGED_DATA static volatile BaseType_t xNumOfOverflows = ( BaseType_t ) 0;
  410. PRIVILEGED_DATA static UBaseType_t uxTaskNumber = ( UBaseType_t ) 0U;
  411. PRIVILEGED_DATA static volatile TickType_t xNextTaskUnblockTime = ( TickType_t ) 0U; /* Initialised to portMAX_DELAY before the scheduler starts. */
  412. PRIVILEGED_DATA static TaskHandle_t xIdleTaskHandles[ configNUMBER_OF_CORES ]; /**< Holds the handles of the idle tasks. The idle tasks are created automatically when the scheduler is started. */
  413. /* Improve support for OpenOCD. The kernel tracks Ready tasks via priority lists.
  414. * For tracking the state of remote threads, OpenOCD uses uxTopUsedPriority
  415. * to determine the number of priority lists to read back from the remote target. */
  416. static const volatile UBaseType_t uxTopUsedPriority = configMAX_PRIORITIES - 1U;
  417. /* Context switches are held pending while the scheduler is suspended. Also,
  418. * interrupts must not manipulate the xStateListItem of a TCB, or any of the
  419. * lists the xStateListItem can be referenced from, if the scheduler is suspended.
  420. * If an interrupt needs to unblock a task while the scheduler is suspended then it
  421. * moves the task's event list item into the xPendingReadyList, ready for the
  422. * kernel to move the task from the pending ready list into the real ready list
  423. * when the scheduler is unsuspended. The pending ready list itself can only be
  424. * accessed from a critical section.
  425. *
  426. * Updates to uxSchedulerSuspended must be protected by both the task lock and the ISR lock
  427. * and must not be done from an ISR. Reads must be protected by either lock and may be done
  428. * from either an ISR or a task. */
  429. PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended = ( UBaseType_t ) 0U;
  430. #if ( configGENERATE_RUN_TIME_STATS == 1 )
  431. /* Do not move these variables to function scope as doing so prevents the
  432. * code working with debuggers that need to remove the static qualifier. */
  433. PRIVILEGED_DATA static configRUN_TIME_COUNTER_TYPE ulTaskSwitchedInTime[ configNUMBER_OF_CORES ] = { 0U }; /**< Holds the value of a timer/counter the last time a task was switched in. */
  434. PRIVILEGED_DATA static volatile configRUN_TIME_COUNTER_TYPE ulTotalRunTime[ configNUMBER_OF_CORES ] = { 0U }; /**< Holds the total amount of execution time as defined by the run time counter clock. */
  435. #endif
  436. /*-----------------------------------------------------------*/
  437. /* File private functions. --------------------------------*/
  438. /*
  439. * Creates the idle tasks during scheduler start.
  440. */
  441. static BaseType_t prvCreateIdleTasks( void );
  442. #if ( configNUMBER_OF_CORES > 1 )
  443. /*
  444. * Checks to see if another task moved the current task out of the ready
  445. * list while it was waiting to enter a critical section and yields, if so.
  446. */
  447. static void prvCheckForRunStateChange( void );
  448. #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
  449. #if ( configNUMBER_OF_CORES > 1 )
  450. /*
  451. * Yields a core, or cores if multiple priorities are not allowed to run
  452. * simultaneously, to allow the task pxTCB to run.
  453. */
  454. static void prvYieldForTask( const TCB_t * pxTCB );
  455. #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
  456. #if ( configNUMBER_OF_CORES > 1 )
  457. /*
  458. * Selects the highest priority available task for the given core.
  459. */
  460. static void prvSelectHighestPriorityTask( BaseType_t xCoreID );
  461. #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
  462. /**
  463. * Utility task that simply returns pdTRUE if the task referenced by xTask is
  464. * currently in the Suspended state, or pdFALSE if the task referenced by xTask
  465. * is in any other state.
  466. */
  467. #if ( INCLUDE_vTaskSuspend == 1 )
  468. static BaseType_t prvTaskIsTaskSuspended( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
  469. #endif /* INCLUDE_vTaskSuspend */
  470. /*
  471. * Utility to ready all the lists used by the scheduler. This is called
  472. * automatically upon the creation of the first task.
  473. */
  474. static void prvInitialiseTaskLists( void ) PRIVILEGED_FUNCTION;
  475. /*
  476. * The idle task, which as all tasks is implemented as a never ending loop.
  477. * The idle task is automatically created and added to the ready lists upon
  478. * creation of the first user task.
  479. *
  480. * In the FreeRTOS SMP, configNUMBER_OF_CORES - 1 passive idle tasks are also
  481. * created to ensure that each core has an idle task to run when no other
  482. * task is available to run.
  483. *
  484. * The portTASK_FUNCTION_PROTO() macro is used to allow port/compiler specific
  485. * language extensions. The equivalent prototype for these functions are:
  486. *
  487. * void prvIdleTask( void *pvParameters );
  488. * void prvPassiveIdleTask( void *pvParameters );
  489. *
  490. */
  491. static portTASK_FUNCTION_PROTO( prvIdleTask, pvParameters ) PRIVILEGED_FUNCTION;
  492. #if ( configNUMBER_OF_CORES > 1 )
  493. static portTASK_FUNCTION_PROTO( prvPassiveIdleTask, pvParameters ) PRIVILEGED_FUNCTION;
  494. #endif
  495. /*
  496. * Utility to free all memory allocated by the scheduler to hold a TCB,
  497. * including the stack pointed to by the TCB.
  498. *
  499. * This does not free memory allocated by the task itself (i.e. memory
  500. * allocated by calls to pvPortMalloc from within the tasks application code).
  501. */
  502. #if ( INCLUDE_vTaskDelete == 1 )
  503. static void prvDeleteTCB( TCB_t * pxTCB ) PRIVILEGED_FUNCTION;
  504. #endif
  505. /*
  506. * Used only by the idle task. This checks to see if anything has been placed
  507. * in the list of tasks waiting to be deleted. If so the task is cleaned up
  508. * and its TCB deleted.
  509. */
  510. static void prvCheckTasksWaitingTermination( void ) PRIVILEGED_FUNCTION;
  511. /*
  512. * The currently executing task is entering the Blocked state. Add the task to
  513. * either the current or the overflow delayed task list.
  514. */
  515. static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait,
  516. const BaseType_t xCanBlockIndefinitely ) PRIVILEGED_FUNCTION;
  517. /*
  518. * Fills an TaskStatus_t structure with information on each task that is
  519. * referenced from the pxList list (which may be a ready list, a delayed list,
  520. * a suspended list, etc.).
  521. *
  522. * THIS FUNCTION IS INTENDED FOR DEBUGGING ONLY, AND SHOULD NOT BE CALLED FROM
  523. * NORMAL APPLICATION CODE.
  524. */
  525. #if ( configUSE_TRACE_FACILITY == 1 )
  526. static UBaseType_t prvListTasksWithinSingleList( TaskStatus_t * pxTaskStatusArray,
  527. List_t * pxList,
  528. eTaskState eState ) PRIVILEGED_FUNCTION;
  529. #endif
  530. /*
  531. * Searches pxList for a task with name pcNameToQuery - returning a handle to
  532. * the task if it is found, or NULL if the task is not found.
  533. */
  534. #if ( INCLUDE_xTaskGetHandle == 1 )
  535. static TCB_t * prvSearchForNameWithinSingleList( List_t * pxList,
  536. const char pcNameToQuery[] ) PRIVILEGED_FUNCTION;
  537. #endif
  538. /*
  539. * When a task is created, the stack of the task is filled with a known value.
  540. * This function determines the 'high water mark' of the task stack by
  541. * determining how much of the stack remains at the original preset value.
  542. */
  543. #if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) )
  544. static configSTACK_DEPTH_TYPE prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte ) PRIVILEGED_FUNCTION;
  545. #endif
  546. /*
  547. * Return the amount of time, in ticks, that will pass before the kernel will
  548. * next move a task from the Blocked state to the Running state.
  549. *
  550. * This conditional compilation should use inequality to 0, not equality to 1.
  551. * This is to ensure portSUPPRESS_TICKS_AND_SLEEP() can be called when user
  552. * defined low power mode implementations require configUSE_TICKLESS_IDLE to be
  553. * set to a value other than 1.
  554. */
  555. #if ( configUSE_TICKLESS_IDLE != 0 )
  556. static TickType_t prvGetExpectedIdleTime( void ) PRIVILEGED_FUNCTION;
  557. #endif
  558. /*
  559. * Set xNextTaskUnblockTime to the time at which the next Blocked state task
  560. * will exit the Blocked state.
  561. */
  562. static void prvResetNextTaskUnblockTime( void ) PRIVILEGED_FUNCTION;
  563. #if ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 )
  564. /*
  565. * Helper function used to pad task names with spaces when printing out
  566. * human readable tables of task information.
  567. */
  568. static char * prvWriteNameToBuffer( char * pcBuffer,
  569. const char * pcTaskName ) PRIVILEGED_FUNCTION;
  570. #endif
  571. /*
  572. * Called after a Task_t structure has been allocated either statically or
  573. * dynamically to fill in the structure's members.
  574. */
  575. static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
  576. const char * const pcName,
  577. const configSTACK_DEPTH_TYPE uxStackDepth,
  578. void * const pvParameters,
  579. UBaseType_t uxPriority,
  580. TaskHandle_t * const pxCreatedTask,
  581. TCB_t * pxNewTCB,
  582. const MemoryRegion_t * const xRegions ) PRIVILEGED_FUNCTION;
  583. /*
  584. * Called after a new task has been created and initialised to place the task
  585. * under the control of the scheduler.
  586. */
  587. static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION;
  588. /*
  589. * Create a task with static buffer for both TCB and stack. Returns a handle to
  590. * the task if it is created successfully. Otherwise, returns NULL.
  591. */
  592. #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
  593. static TCB_t * prvCreateStaticTask( TaskFunction_t pxTaskCode,
  594. const char * const pcName,
  595. const configSTACK_DEPTH_TYPE uxStackDepth,
  596. void * const pvParameters,
  597. UBaseType_t uxPriority,
  598. StackType_t * const puxStackBuffer,
  599. StaticTask_t * const pxTaskBuffer,
  600. TaskHandle_t * const pxCreatedTask ) PRIVILEGED_FUNCTION;
  601. #endif /* #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) */
  602. /*
  603. * Create a restricted task with static buffer for both TCB and stack. Returns
  604. * a handle to the task if it is created successfully. Otherwise, returns NULL.
  605. */
  606. #if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
  607. static TCB_t * prvCreateRestrictedStaticTask( const TaskParameters_t * const pxTaskDefinition,
  608. TaskHandle_t * const pxCreatedTask ) PRIVILEGED_FUNCTION;
  609. #endif /* #if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) */
  610. /*
  611. * Create a restricted task with static buffer for task stack and allocated buffer
  612. * for TCB. Returns a handle to the task if it is created successfully. Otherwise,
  613. * returns NULL.
  614. */
  615. #if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
  616. static TCB_t * prvCreateRestrictedTask( const TaskParameters_t * const pxTaskDefinition,
  617. TaskHandle_t * const pxCreatedTask ) PRIVILEGED_FUNCTION;
  618. #endif /* #if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
  619. /*
  620. * Create a task with allocated buffer for both TCB and stack. Returns a handle to
  621. * the task if it is created successfully. Otherwise, returns NULL.
  622. */
  623. #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
  624. static TCB_t * prvCreateTask( TaskFunction_t pxTaskCode,
  625. const char * const pcName,
  626. const configSTACK_DEPTH_TYPE uxStackDepth,
  627. void * const pvParameters,
  628. UBaseType_t uxPriority,
  629. TaskHandle_t * const pxCreatedTask ) PRIVILEGED_FUNCTION;
  630. #endif /* #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) */
  631. /*
  632. * freertos_tasks_c_additions_init() should only be called if the user definable
  633. * macro FREERTOS_TASKS_C_ADDITIONS_INIT() is defined, as that is the only macro
  634. * called by the function.
  635. */
  636. #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT
  637. static void freertos_tasks_c_additions_init( void ) PRIVILEGED_FUNCTION;
  638. #endif
  639. #if ( configUSE_PASSIVE_IDLE_HOOK == 1 )
  640. extern void vApplicationPassiveIdleHook( void );
  641. #endif /* #if ( configUSE_PASSIVE_IDLE_HOOK == 1 ) */
  642. #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) )
  643. /*
  644. * Convert the snprintf return value to the number of characters
  645. * written. The following are the possible cases:
  646. *
  647. * 1. The buffer supplied to snprintf is large enough to hold the
  648. * generated string. The return value in this case is the number
  649. * of characters actually written, not counting the terminating
  650. * null character.
  651. * 2. The buffer supplied to snprintf is NOT large enough to hold
  652. * the generated string. The return value in this case is the
  653. * number of characters that would have been written if the
  654. * buffer had been sufficiently large, not counting the
  655. * terminating null character.
  656. * 3. Encoding error. The return value in this case is a negative
  657. * number.
  658. *
  659. * From 1 and 2 above ==> Only when the return value is non-negative
  660. * and less than the supplied buffer length, the string has been
  661. * completely written.
  662. */
  663. static size_t prvSnprintfReturnValueToCharsWritten( int iSnprintfReturnValue,
  664. size_t n );
  665. #endif /* #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) ) */
  666. /*-----------------------------------------------------------*/
  667. #if ( configNUMBER_OF_CORES > 1 )
  668. static void prvCheckForRunStateChange( void )
  669. {
  670. UBaseType_t uxPrevCriticalNesting;
  671. const TCB_t * pxThisTCB;
  672. /* This must only be called from within a task. */
  673. portASSERT_IF_IN_ISR();
  674. /* This function is always called with interrupts disabled
  675. * so this is safe. */
  676. pxThisTCB = pxCurrentTCBs[ portGET_CORE_ID() ];
  677. while( pxThisTCB->xTaskRunState == taskTASK_SCHEDULED_TO_YIELD )
  678. {
  679. /* We are only here if we just entered a critical section
  680. * or if we just suspended the scheduler, and another task
  681. * has requested that we yield.
  682. *
  683. * This is slightly complicated since we need to save and restore
  684. * the suspension and critical nesting counts, as well as release
  685. * and reacquire the correct locks. And then, do it all over again
  686. * if our state changed again during the reacquisition. */
  687. uxPrevCriticalNesting = portGET_CRITICAL_NESTING_COUNT();
  688. if( uxPrevCriticalNesting > 0U )
  689. {
  690. portSET_CRITICAL_NESTING_COUNT( 0U );
  691. portRELEASE_ISR_LOCK();
  692. }
  693. else
  694. {
  695. /* The scheduler is suspended. uxSchedulerSuspended is updated
  696. * only when the task is not requested to yield. */
  697. mtCOVERAGE_TEST_MARKER();
  698. }
  699. portRELEASE_TASK_LOCK();
  700. portMEMORY_BARRIER();
  701. configASSERT( pxThisTCB->xTaskRunState == taskTASK_SCHEDULED_TO_YIELD );
  702. portENABLE_INTERRUPTS();
  703. /* Enabling interrupts should cause this core to immediately
  704. * service the pending interrupt and yield. If the run state is still
  705. * yielding here then that is a problem. */
  706. configASSERT( pxThisTCB->xTaskRunState != taskTASK_SCHEDULED_TO_YIELD );
  707. portDISABLE_INTERRUPTS();
  708. portGET_TASK_LOCK();
  709. portGET_ISR_LOCK();
  710. portSET_CRITICAL_NESTING_COUNT( uxPrevCriticalNesting );
  711. if( uxPrevCriticalNesting == 0U )
  712. {
  713. portRELEASE_ISR_LOCK();
  714. }
  715. }
  716. }
  717. #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
  718. /*-----------------------------------------------------------*/
  719. #if ( configNUMBER_OF_CORES > 1 )
  720. static void prvYieldForTask( const TCB_t * pxTCB )
  721. {
  722. BaseType_t xLowestPriorityToPreempt;
  723. BaseType_t xCurrentCoreTaskPriority;
  724. BaseType_t xLowestPriorityCore = ( BaseType_t ) -1;
  725. BaseType_t xCoreID;
  726. #if ( configRUN_MULTIPLE_PRIORITIES == 0 )
  727. BaseType_t xYieldCount = 0;
  728. #endif /* #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) */
  729. /* This must be called from a critical section. */
  730. configASSERT( portGET_CRITICAL_NESTING_COUNT() > 0U );
  731. #if ( configRUN_MULTIPLE_PRIORITIES == 0 )
  732. /* No task should yield for this one if it is a lower priority
  733. * than priority level of currently ready tasks. */
  734. if( pxTCB->uxPriority >= uxTopReadyPriority )
  735. #else
  736. /* Yield is not required for a task which is already running. */
  737. if( taskTASK_IS_RUNNING( pxTCB ) == pdFALSE )
  738. #endif
  739. {
  740. xLowestPriorityToPreempt = ( BaseType_t ) pxTCB->uxPriority;
  741. /* xLowestPriorityToPreempt will be decremented to -1 if the priority of pxTCB
  742. * is 0. This is ok as we will give system idle tasks a priority of -1 below. */
  743. --xLowestPriorityToPreempt;
  744. for( xCoreID = ( BaseType_t ) 0; xCoreID < ( BaseType_t ) configNUMBER_OF_CORES; xCoreID++ )
  745. {
  746. xCurrentCoreTaskPriority = ( BaseType_t ) pxCurrentTCBs[ xCoreID ]->uxPriority;
  747. /* System idle tasks are being assigned a priority of tskIDLE_PRIORITY - 1 here. */
  748. if( ( pxCurrentTCBs[ xCoreID ]->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0U )
  749. {
  750. xCurrentCoreTaskPriority = ( BaseType_t ) ( xCurrentCoreTaskPriority - 1 );
  751. }
  752. if( ( taskTASK_IS_RUNNING( pxCurrentTCBs[ xCoreID ] ) != pdFALSE ) && ( xYieldPendings[ xCoreID ] == pdFALSE ) )
  753. {
  754. #if ( configRUN_MULTIPLE_PRIORITIES == 0 )
  755. if( taskTASK_IS_RUNNING( pxTCB ) == pdFALSE )
  756. #endif
  757. {
  758. if( xCurrentCoreTaskPriority <= xLowestPriorityToPreempt )
  759. {
  760. #if ( configUSE_CORE_AFFINITY == 1 )
  761. if( ( pxTCB->uxCoreAffinityMask & ( ( UBaseType_t ) 1U << ( UBaseType_t ) xCoreID ) ) != 0U )
  762. #endif
  763. {
  764. #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
  765. if( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == pdFALSE )
  766. #endif
  767. {
  768. xLowestPriorityToPreempt = xCurrentCoreTaskPriority;
  769. xLowestPriorityCore = xCoreID;
  770. }
  771. }
  772. }
  773. else
  774. {
  775. mtCOVERAGE_TEST_MARKER();
  776. }
  777. }
  778. #if ( configRUN_MULTIPLE_PRIORITIES == 0 )
  779. {
  780. /* Yield all currently running non-idle tasks with a priority lower than
  781. * the task that needs to run. */
  782. if( ( xCurrentCoreTaskPriority > ( ( BaseType_t ) tskIDLE_PRIORITY - 1 ) ) &&
  783. ( xCurrentCoreTaskPriority < ( BaseType_t ) pxTCB->uxPriority ) )
  784. {
  785. prvYieldCore( xCoreID );
  786. xYieldCount++;
  787. }
  788. else
  789. {
  790. mtCOVERAGE_TEST_MARKER();
  791. }
  792. }
  793. #endif /* #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) */
  794. }
  795. else
  796. {
  797. mtCOVERAGE_TEST_MARKER();
  798. }
  799. }
  800. #if ( configRUN_MULTIPLE_PRIORITIES == 0 )
  801. if( ( xYieldCount == 0 ) && ( xLowestPriorityCore >= 0 ) )
  802. #else /* #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) */
  803. if( xLowestPriorityCore >= 0 )
  804. #endif /* #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) */
  805. {
  806. prvYieldCore( xLowestPriorityCore );
  807. }
  808. #if ( configRUN_MULTIPLE_PRIORITIES == 0 )
  809. /* Verify that the calling core always yields to higher priority tasks. */
  810. if( ( ( pxCurrentTCBs[ portGET_CORE_ID() ]->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) == 0U ) &&
  811. ( pxTCB->uxPriority > pxCurrentTCBs[ portGET_CORE_ID() ]->uxPriority ) )
  812. {
  813. configASSERT( ( xYieldPendings[ portGET_CORE_ID() ] == pdTRUE ) ||
  814. ( taskTASK_IS_RUNNING( pxCurrentTCBs[ portGET_CORE_ID() ] ) == pdFALSE ) );
  815. }
  816. #endif
  817. }
  818. }
  819. #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
  820. /*-----------------------------------------------------------*/
  821. #if ( configNUMBER_OF_CORES > 1 )
  822. static void prvSelectHighestPriorityTask( BaseType_t xCoreID )
  823. {
  824. UBaseType_t uxCurrentPriority = uxTopReadyPriority;
  825. BaseType_t xTaskScheduled = pdFALSE;
  826. BaseType_t xDecrementTopPriority = pdTRUE;
  827. TCB_t * pxTCB = NULL;
  828. #if ( configUSE_CORE_AFFINITY == 1 )
  829. const TCB_t * pxPreviousTCB = NULL;
  830. #endif
  831. #if ( configRUN_MULTIPLE_PRIORITIES == 0 )
  832. BaseType_t xPriorityDropped = pdFALSE;
  833. #endif
  834. /* This function should be called when scheduler is running. */
  835. configASSERT( xSchedulerRunning == pdTRUE );
  836. /* A new task is created and a running task with the same priority yields
  837. * itself to run the new task. When a running task yields itself, it is still
  838. * in the ready list. This running task will be selected before the new task
  839. * since the new task is always added to the end of the ready list.
  840. * The other problem is that the running task still in the same position of
  841. * the ready list when it yields itself. It is possible that it will be selected
  842. * earlier then other tasks which waits longer than this task.
  843. *
  844. * To fix these problems, the running task should be put to the end of the
  845. * ready list before searching for the ready task in the ready list. */
  846. if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxCurrentTCBs[ xCoreID ]->uxPriority ] ),
  847. &pxCurrentTCBs[ xCoreID ]->xStateListItem ) == pdTRUE )
  848. {
  849. ( void ) uxListRemove( &pxCurrentTCBs[ xCoreID ]->xStateListItem );
  850. vListInsertEnd( &( pxReadyTasksLists[ pxCurrentTCBs[ xCoreID ]->uxPriority ] ),
  851. &pxCurrentTCBs[ xCoreID ]->xStateListItem );
  852. }
  853. while( xTaskScheduled == pdFALSE )
  854. {
  855. #if ( configRUN_MULTIPLE_PRIORITIES == 0 )
  856. {
  857. if( uxCurrentPriority < uxTopReadyPriority )
  858. {
  859. /* We can't schedule any tasks, other than idle, that have a
  860. * priority lower than the priority of a task currently running
  861. * on another core. */
  862. uxCurrentPriority = tskIDLE_PRIORITY;
  863. }
  864. }
  865. #endif
  866. if( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxCurrentPriority ] ) ) == pdFALSE )
  867. {
  868. const List_t * const pxReadyList = &( pxReadyTasksLists[ uxCurrentPriority ] );
  869. const ListItem_t * pxEndMarker = listGET_END_MARKER( pxReadyList );
  870. ListItem_t * pxIterator;
  871. /* The ready task list for uxCurrentPriority is not empty, so uxTopReadyPriority
  872. * must not be decremented any further. */
  873. xDecrementTopPriority = pdFALSE;
  874. for( pxIterator = listGET_HEAD_ENTRY( pxReadyList ); pxIterator != pxEndMarker; pxIterator = listGET_NEXT( pxIterator ) )
  875. {
  876. /* MISRA Ref 11.5.3 [Void pointer assignment] */
  877. /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
  878. /* coverity[misra_c_2012_rule_11_5_violation] */
  879. pxTCB = ( TCB_t * ) listGET_LIST_ITEM_OWNER( pxIterator );
  880. #if ( configRUN_MULTIPLE_PRIORITIES == 0 )
  881. {
  882. /* When falling back to the idle priority because only one priority
  883. * level is allowed to run at a time, we should ONLY schedule the true
  884. * idle tasks, not user tasks at the idle priority. */
  885. if( uxCurrentPriority < uxTopReadyPriority )
  886. {
  887. if( ( pxTCB->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) == 0U )
  888. {
  889. continue;
  890. }
  891. }
  892. }
  893. #endif /* #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) */
  894. if( pxTCB->xTaskRunState == taskTASK_NOT_RUNNING )
  895. {
  896. #if ( configUSE_CORE_AFFINITY == 1 )
  897. if( ( pxTCB->uxCoreAffinityMask & ( ( UBaseType_t ) 1U << ( UBaseType_t ) xCoreID ) ) != 0U )
  898. #endif
  899. {
  900. /* If the task is not being executed by any core swap it in. */
  901. pxCurrentTCBs[ xCoreID ]->xTaskRunState = taskTASK_NOT_RUNNING;
  902. #if ( configUSE_CORE_AFFINITY == 1 )
  903. pxPreviousTCB = pxCurrentTCBs[ xCoreID ];
  904. #endif
  905. pxTCB->xTaskRunState = xCoreID;
  906. pxCurrentTCBs[ xCoreID ] = pxTCB;
  907. xTaskScheduled = pdTRUE;
  908. }
  909. }
  910. else if( pxTCB == pxCurrentTCBs[ xCoreID ] )
  911. {
  912. configASSERT( ( pxTCB->xTaskRunState == xCoreID ) || ( pxTCB->xTaskRunState == taskTASK_SCHEDULED_TO_YIELD ) );
  913. #if ( configUSE_CORE_AFFINITY == 1 )
  914. if( ( pxTCB->uxCoreAffinityMask & ( ( UBaseType_t ) 1U << ( UBaseType_t ) xCoreID ) ) != 0U )
  915. #endif
  916. {
  917. /* The task is already running on this core, mark it as scheduled. */
  918. pxTCB->xTaskRunState = xCoreID;
  919. xTaskScheduled = pdTRUE;
  920. }
  921. }
  922. else
  923. {
  924. /* This task is running on the core other than xCoreID. */
  925. mtCOVERAGE_TEST_MARKER();
  926. }
  927. if( xTaskScheduled != pdFALSE )
  928. {
  929. /* A task has been selected to run on this core. */
  930. break;
  931. }
  932. }
  933. }
  934. else
  935. {
  936. if( xDecrementTopPriority != pdFALSE )
  937. {
  938. uxTopReadyPriority--;
  939. #if ( configRUN_MULTIPLE_PRIORITIES == 0 )
  940. {
  941. xPriorityDropped = pdTRUE;
  942. }
  943. #endif
  944. }
  945. }
  946. /* There are configNUMBER_OF_CORES Idle tasks created when scheduler started.
  947. * The scheduler should be able to select a task to run when uxCurrentPriority
  948. * is tskIDLE_PRIORITY. uxCurrentPriority is never decreased to value blow
  949. * tskIDLE_PRIORITY. */
  950. if( uxCurrentPriority > tskIDLE_PRIORITY )
  951. {
  952. uxCurrentPriority--;
  953. }
  954. else
  955. {
  956. /* This function is called when idle task is not created. Break the
  957. * loop to prevent uxCurrentPriority overrun. */
  958. break;
  959. }
  960. }
  961. #if ( configRUN_MULTIPLE_PRIORITIES == 0 )
  962. {
  963. if( xTaskScheduled == pdTRUE )
  964. {
  965. if( xPriorityDropped != pdFALSE )
  966. {
  967. /* There may be several ready tasks that were being prevented from running because there was
  968. * a higher priority task running. Now that the last of the higher priority tasks is no longer
  969. * running, make sure all the other idle tasks yield. */
  970. BaseType_t x;
  971. for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configNUMBER_OF_CORES; x++ )
  972. {
  973. if( ( pxCurrentTCBs[ x ]->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0U )
  974. {
  975. prvYieldCore( x );
  976. }
  977. }
  978. }
  979. }
  980. }
  981. #endif /* #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) */
  982. #if ( configUSE_CORE_AFFINITY == 1 )
  983. {
  984. if( xTaskScheduled == pdTRUE )
  985. {
  986. if( ( pxPreviousTCB != NULL ) && ( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxPreviousTCB->uxPriority ] ), &( pxPreviousTCB->xStateListItem ) ) != pdFALSE ) )
  987. {
  988. /* A ready task was just evicted from this core. See if it can be
  989. * scheduled on any other core. */
  990. UBaseType_t uxCoreMap = pxPreviousTCB->uxCoreAffinityMask;
  991. BaseType_t xLowestPriority = ( BaseType_t ) pxPreviousTCB->uxPriority;
  992. BaseType_t xLowestPriorityCore = -1;
  993. BaseType_t x;
  994. if( ( pxPreviousTCB->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0U )
  995. {
  996. xLowestPriority = xLowestPriority - 1;
  997. }
  998. if( ( uxCoreMap & ( ( UBaseType_t ) 1U << ( UBaseType_t ) xCoreID ) ) != 0U )
  999. {
  1000. /* pxPreviousTCB was removed from this core and this core is not excluded
  1001. * from it's core affinity mask.
  1002. *
  1003. * pxPreviousTCB is preempted by the new higher priority task
  1004. * pxCurrentTCBs[ xCoreID ]. When searching a new core for pxPreviousTCB,
  1005. * we do not need to look at the cores on which pxCurrentTCBs[ xCoreID ]
  1006. * is allowed to run. The reason is - when more than one cores are
  1007. * eligible for an incoming task, we preempt the core with the minimum
  1008. * priority task. Because this core (i.e. xCoreID) was preempted for
  1009. * pxCurrentTCBs[ xCoreID ], this means that all the others cores
  1010. * where pxCurrentTCBs[ xCoreID ] can run, are running tasks with priority
  1011. * no lower than pxPreviousTCB's priority. Therefore, the only cores where
  1012. * which can be preempted for pxPreviousTCB are the ones where
  1013. * pxCurrentTCBs[ xCoreID ] is not allowed to run (and obviously,
  1014. * pxPreviousTCB is allowed to run).
  1015. *
  1016. * This is an optimization which reduces the number of cores needed to be
  1017. * searched for pxPreviousTCB to run. */
  1018. uxCoreMap &= ~( pxCurrentTCBs[ xCoreID ]->uxCoreAffinityMask );
  1019. }
  1020. else
  1021. {
  1022. /* pxPreviousTCB's core affinity mask is changed and it is no longer
  1023. * allowed to run on this core. Searching all the cores in pxPreviousTCB's
  1024. * new core affinity mask to find a core on which it can run. */
  1025. }
  1026. uxCoreMap &= ( ( 1U << configNUMBER_OF_CORES ) - 1U );
  1027. for( x = ( ( BaseType_t ) configNUMBER_OF_CORES - 1 ); x >= ( BaseType_t ) 0; x-- )
  1028. {
  1029. UBaseType_t uxCore = ( UBaseType_t ) x;
  1030. BaseType_t xTaskPriority;
  1031. if( ( uxCoreMap & ( ( UBaseType_t ) 1U << uxCore ) ) != 0U )
  1032. {
  1033. xTaskPriority = ( BaseType_t ) pxCurrentTCBs[ uxCore ]->uxPriority;
  1034. if( ( pxCurrentTCBs[ uxCore ]->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0U )
  1035. {
  1036. xTaskPriority = xTaskPriority - ( BaseType_t ) 1;
  1037. }
  1038. uxCoreMap &= ~( ( UBaseType_t ) 1U << uxCore );
  1039. if( ( xTaskPriority < xLowestPriority ) &&
  1040. ( taskTASK_IS_RUNNING( pxCurrentTCBs[ uxCore ] ) != pdFALSE ) &&
  1041. ( xYieldPendings[ uxCore ] == pdFALSE ) )
  1042. {
  1043. #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
  1044. if( pxCurrentTCBs[ uxCore ]->xPreemptionDisable == pdFALSE )
  1045. #endif
  1046. {
  1047. xLowestPriority = xTaskPriority;
  1048. xLowestPriorityCore = ( BaseType_t ) uxCore;
  1049. }
  1050. }
  1051. }
  1052. }
  1053. if( xLowestPriorityCore >= 0 )
  1054. {
  1055. prvYieldCore( xLowestPriorityCore );
  1056. }
  1057. }
  1058. }
  1059. }
  1060. #endif /* #if ( configUSE_CORE_AFFINITY == 1 ) */
  1061. }
  1062. #endif /* ( configNUMBER_OF_CORES > 1 ) */
  1063. /*-----------------------------------------------------------*/
  1064. #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
  1065. static TCB_t * prvCreateStaticTask( TaskFunction_t pxTaskCode,
  1066. const char * const pcName,
  1067. const configSTACK_DEPTH_TYPE uxStackDepth,
  1068. void * const pvParameters,
  1069. UBaseType_t uxPriority,
  1070. StackType_t * const puxStackBuffer,
  1071. StaticTask_t * const pxTaskBuffer,
  1072. TaskHandle_t * const pxCreatedTask )
  1073. {
  1074. TCB_t * pxNewTCB;
  1075. configASSERT( puxStackBuffer != NULL );
  1076. configASSERT( pxTaskBuffer != NULL );
  1077. #if ( configASSERT_DEFINED == 1 )
  1078. {
  1079. /* Sanity check that the size of the structure used to declare a
  1080. * variable of type StaticTask_t equals the size of the real task
  1081. * structure. */
  1082. volatile size_t xSize = sizeof( StaticTask_t );
  1083. configASSERT( xSize == sizeof( TCB_t ) );
  1084. ( void ) xSize; /* Prevent unused variable warning when configASSERT() is not used. */
  1085. }
  1086. #endif /* configASSERT_DEFINED */
  1087. if( ( pxTaskBuffer != NULL ) && ( puxStackBuffer != NULL ) )
  1088. {
  1089. /* The memory used for the task's TCB and stack are passed into this
  1090. * function - use them. */
  1091. /* MISRA Ref 11.3.1 [Misaligned access] */
  1092. /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-113 */
  1093. /* coverity[misra_c_2012_rule_11_3_violation] */
  1094. pxNewTCB = ( TCB_t * ) pxTaskBuffer;
  1095. ( void ) memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) );
  1096. pxNewTCB->pxStack = ( StackType_t * ) puxStackBuffer;
  1097. #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
  1098. {
  1099. /* Tasks can be created statically or dynamically, so note this
  1100. * task was created statically in case the task is later deleted. */
  1101. pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB;
  1102. }
  1103. #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
  1104. prvInitialiseNewTask( pxTaskCode, pcName, uxStackDepth, pvParameters, uxPriority, pxCreatedTask, pxNewTCB, NULL );
  1105. }
  1106. else
  1107. {
  1108. pxNewTCB = NULL;
  1109. }
  1110. return pxNewTCB;
  1111. }
  1112. /*-----------------------------------------------------------*/
  1113. TaskHandle_t xTaskCreateStatic( TaskFunction_t pxTaskCode,
  1114. const char * const pcName,
  1115. const configSTACK_DEPTH_TYPE uxStackDepth,
  1116. void * const pvParameters,
  1117. UBaseType_t uxPriority,
  1118. StackType_t * const puxStackBuffer,
  1119. StaticTask_t * const pxTaskBuffer )
  1120. {
  1121. TaskHandle_t xReturn = NULL;
  1122. TCB_t * pxNewTCB;
  1123. traceENTER_xTaskCreateStatic( pxTaskCode, pcName, uxStackDepth, pvParameters, uxPriority, puxStackBuffer, pxTaskBuffer );
  1124. pxNewTCB = prvCreateStaticTask( pxTaskCode, pcName, uxStackDepth, pvParameters, uxPriority, puxStackBuffer, pxTaskBuffer, &xReturn );
  1125. if( pxNewTCB != NULL )
  1126. {
  1127. #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) )
  1128. {
  1129. /* Set the task's affinity before scheduling it. */
  1130. pxNewTCB->uxCoreAffinityMask = configTASK_DEFAULT_CORE_AFFINITY;
  1131. }
  1132. #endif
  1133. prvAddNewTaskToReadyList( pxNewTCB );
  1134. }
  1135. else
  1136. {
  1137. mtCOVERAGE_TEST_MARKER();
  1138. }
  1139. traceRETURN_xTaskCreateStatic( xReturn );
  1140. return xReturn;
  1141. }
  1142. /*-----------------------------------------------------------*/
  1143. #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) )
  1144. TaskHandle_t xTaskCreateStaticAffinitySet( TaskFunction_t pxTaskCode,
  1145. const char * const pcName,
  1146. const configSTACK_DEPTH_TYPE uxStackDepth,
  1147. void * const pvParameters,
  1148. UBaseType_t uxPriority,
  1149. StackType_t * const puxStackBuffer,
  1150. StaticTask_t * const pxTaskBuffer,
  1151. UBaseType_t uxCoreAffinityMask )
  1152. {
  1153. TaskHandle_t xReturn = NULL;
  1154. TCB_t * pxNewTCB;
  1155. traceENTER_xTaskCreateStaticAffinitySet( pxTaskCode, pcName, uxStackDepth, pvParameters, uxPriority, puxStackBuffer, pxTaskBuffer, uxCoreAffinityMask );
  1156. pxNewTCB = prvCreateStaticTask( pxTaskCode, pcName, uxStackDepth, pvParameters, uxPriority, puxStackBuffer, pxTaskBuffer, &xReturn );
  1157. if( pxNewTCB != NULL )
  1158. {
  1159. /* Set the task's affinity before scheduling it. */
  1160. pxNewTCB->uxCoreAffinityMask = uxCoreAffinityMask;
  1161. prvAddNewTaskToReadyList( pxNewTCB );
  1162. }
  1163. else
  1164. {
  1165. mtCOVERAGE_TEST_MARKER();
  1166. }
  1167. traceRETURN_xTaskCreateStaticAffinitySet( xReturn );
  1168. return xReturn;
  1169. }
  1170. #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */
  1171. #endif /* SUPPORT_STATIC_ALLOCATION */
  1172. /*-----------------------------------------------------------*/
  1173. #if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
  1174. static TCB_t * prvCreateRestrictedStaticTask( const TaskParameters_t * const pxTaskDefinition,
  1175. TaskHandle_t * const pxCreatedTask )
  1176. {
  1177. TCB_t * pxNewTCB;
  1178. configASSERT( pxTaskDefinition->puxStackBuffer != NULL );
  1179. configASSERT( pxTaskDefinition->pxTaskBuffer != NULL );
  1180. if( ( pxTaskDefinition->puxStackBuffer != NULL ) && ( pxTaskDefinition->pxTaskBuffer != NULL ) )
  1181. {
  1182. /* Allocate space for the TCB. Where the memory comes from depends
  1183. * on the implementation of the port malloc function and whether or
  1184. * not static allocation is being used. */
  1185. pxNewTCB = ( TCB_t * ) pxTaskDefinition->pxTaskBuffer;
  1186. ( void ) memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) );
  1187. /* Store the stack location in the TCB. */
  1188. pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer;
  1189. #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
  1190. {
  1191. /* Tasks can be created statically or dynamically, so note this
  1192. * task was created statically in case the task is later deleted. */
  1193. pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB;
  1194. }
  1195. #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
  1196. prvInitialiseNewTask( pxTaskDefinition->pvTaskCode,
  1197. pxTaskDefinition->pcName,
  1198. pxTaskDefinition->usStackDepth,
  1199. pxTaskDefinition->pvParameters,
  1200. pxTaskDefinition->uxPriority,
  1201. pxCreatedTask, pxNewTCB,
  1202. pxTaskDefinition->xRegions );
  1203. }
  1204. else
  1205. {
  1206. pxNewTCB = NULL;
  1207. }
  1208. return pxNewTCB;
  1209. }
  1210. /*-----------------------------------------------------------*/
  1211. BaseType_t xTaskCreateRestrictedStatic( const TaskParameters_t * const pxTaskDefinition,
  1212. TaskHandle_t * pxCreatedTask )
  1213. {
  1214. TCB_t * pxNewTCB;
  1215. BaseType_t xReturn;
  1216. traceENTER_xTaskCreateRestrictedStatic( pxTaskDefinition, pxCreatedTask );
  1217. configASSERT( pxTaskDefinition != NULL );
  1218. pxNewTCB = prvCreateRestrictedStaticTask( pxTaskDefinition, pxCreatedTask );
  1219. if( pxNewTCB != NULL )
  1220. {
  1221. #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) )
  1222. {
  1223. /* Set the task's affinity before scheduling it. */
  1224. pxNewTCB->uxCoreAffinityMask = configTASK_DEFAULT_CORE_AFFINITY;
  1225. }
  1226. #endif
  1227. prvAddNewTaskToReadyList( pxNewTCB );
  1228. xReturn = pdPASS;
  1229. }
  1230. else
  1231. {
  1232. xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
  1233. }
  1234. traceRETURN_xTaskCreateRestrictedStatic( xReturn );
  1235. return xReturn;
  1236. }
  1237. /*-----------------------------------------------------------*/
  1238. #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) )
  1239. BaseType_t xTaskCreateRestrictedStaticAffinitySet( const TaskParameters_t * const pxTaskDefinition,
  1240. UBaseType_t uxCoreAffinityMask,
  1241. TaskHandle_t * pxCreatedTask )
  1242. {
  1243. TCB_t * pxNewTCB;
  1244. BaseType_t xReturn;
  1245. traceENTER_xTaskCreateRestrictedStaticAffinitySet( pxTaskDefinition, uxCoreAffinityMask, pxCreatedTask );
  1246. configASSERT( pxTaskDefinition != NULL );
  1247. pxNewTCB = prvCreateRestrictedStaticTask( pxTaskDefinition, pxCreatedTask );
  1248. if( pxNewTCB != NULL )
  1249. {
  1250. /* Set the task's affinity before scheduling it. */
  1251. pxNewTCB->uxCoreAffinityMask = uxCoreAffinityMask;
  1252. prvAddNewTaskToReadyList( pxNewTCB );
  1253. xReturn = pdPASS;
  1254. }
  1255. else
  1256. {
  1257. xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
  1258. }
  1259. traceRETURN_xTaskCreateRestrictedStaticAffinitySet( xReturn );
  1260. return xReturn;
  1261. }
  1262. #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */
  1263. #endif /* ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) */
  1264. /*-----------------------------------------------------------*/
  1265. #if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
  1266. static TCB_t * prvCreateRestrictedTask( const TaskParameters_t * const pxTaskDefinition,
  1267. TaskHandle_t * const pxCreatedTask )
  1268. {
  1269. TCB_t * pxNewTCB;
  1270. configASSERT( pxTaskDefinition->puxStackBuffer );
  1271. if( pxTaskDefinition->puxStackBuffer != NULL )
  1272. {
  1273. /* MISRA Ref 11.5.1 [Malloc memory assignment] */
  1274. /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
  1275. /* coverity[misra_c_2012_rule_11_5_violation] */
  1276. pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) );
  1277. if( pxNewTCB != NULL )
  1278. {
  1279. ( void ) memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) );
  1280. /* Store the stack location in the TCB. */
  1281. pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer;
  1282. #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
  1283. {
  1284. /* Tasks can be created statically or dynamically, so note
  1285. * this task had a statically allocated stack in case it is
  1286. * later deleted. The TCB was allocated dynamically. */
  1287. pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_ONLY;
  1288. }
  1289. #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
  1290. prvInitialiseNewTask( pxTaskDefinition->pvTaskCode,
  1291. pxTaskDefinition->pcName,
  1292. pxTaskDefinition->usStackDepth,
  1293. pxTaskDefinition->pvParameters,
  1294. pxTaskDefinition->uxPriority,
  1295. pxCreatedTask, pxNewTCB,
  1296. pxTaskDefinition->xRegions );
  1297. }
  1298. }
  1299. else
  1300. {
  1301. pxNewTCB = NULL;
  1302. }
  1303. return pxNewTCB;
  1304. }
  1305. /*-----------------------------------------------------------*/
  1306. BaseType_t xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition,
  1307. TaskHandle_t * pxCreatedTask )
  1308. {
  1309. TCB_t * pxNewTCB;
  1310. BaseType_t xReturn;
  1311. traceENTER_xTaskCreateRestricted( pxTaskDefinition, pxCreatedTask );
  1312. pxNewTCB = prvCreateRestrictedTask( pxTaskDefinition, pxCreatedTask );
  1313. if( pxNewTCB != NULL )
  1314. {
  1315. #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) )
  1316. {
  1317. /* Set the task's affinity before scheduling it. */
  1318. pxNewTCB->uxCoreAffinityMask = configTASK_DEFAULT_CORE_AFFINITY;
  1319. }
  1320. #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */
  1321. prvAddNewTaskToReadyList( pxNewTCB );
  1322. xReturn = pdPASS;
  1323. }
  1324. else
  1325. {
  1326. xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
  1327. }
  1328. traceRETURN_xTaskCreateRestricted( xReturn );
  1329. return xReturn;
  1330. }
  1331. /*-----------------------------------------------------------*/
  1332. #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) )
  1333. BaseType_t xTaskCreateRestrictedAffinitySet( const TaskParameters_t * const pxTaskDefinition,
  1334. UBaseType_t uxCoreAffinityMask,
  1335. TaskHandle_t * pxCreatedTask )
  1336. {
  1337. TCB_t * pxNewTCB;
  1338. BaseType_t xReturn;
  1339. traceENTER_xTaskCreateRestrictedAffinitySet( pxTaskDefinition, uxCoreAffinityMask, pxCreatedTask );
  1340. pxNewTCB = prvCreateRestrictedTask( pxTaskDefinition, pxCreatedTask );
  1341. if( pxNewTCB != NULL )
  1342. {
  1343. /* Set the task's affinity before scheduling it. */
  1344. pxNewTCB->uxCoreAffinityMask = uxCoreAffinityMask;
  1345. prvAddNewTaskToReadyList( pxNewTCB );
  1346. xReturn = pdPASS;
  1347. }
  1348. else
  1349. {
  1350. xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
  1351. }
  1352. traceRETURN_xTaskCreateRestrictedAffinitySet( xReturn );
  1353. return xReturn;
  1354. }
  1355. #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */
  1356. #endif /* portUSING_MPU_WRAPPERS */
  1357. /*-----------------------------------------------------------*/
  1358. #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
  1359. static TCB_t * prvCreateTask( TaskFunction_t pxTaskCode,
  1360. const char * const pcName,
  1361. const configSTACK_DEPTH_TYPE uxStackDepth,
  1362. void * const pvParameters,
  1363. UBaseType_t uxPriority,
  1364. TaskHandle_t * const pxCreatedTask )
  1365. {
  1366. TCB_t * pxNewTCB;
  1367. /* If the stack grows down then allocate the stack then the TCB so the stack
  1368. * does not grow into the TCB. Likewise if the stack grows up then allocate
  1369. * the TCB then the stack. */
  1370. #if ( portSTACK_GROWTH > 0 )
  1371. {
  1372. /* Allocate space for the TCB. Where the memory comes from depends on
  1373. * the implementation of the port malloc function and whether or not static
  1374. * allocation is being used. */
  1375. /* MISRA Ref 11.5.1 [Malloc memory assignment] */
  1376. /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
  1377. /* coverity[misra_c_2012_rule_11_5_violation] */
  1378. pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) );
  1379. if( pxNewTCB != NULL )
  1380. {
  1381. ( void ) memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) );
  1382. /* Allocate space for the stack used by the task being created.
  1383. * The base of the stack memory stored in the TCB so the task can
  1384. * be deleted later if required. */
  1385. /* MISRA Ref 11.5.1 [Malloc memory assignment] */
  1386. /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
  1387. /* coverity[misra_c_2012_rule_11_5_violation] */
  1388. pxNewTCB->pxStack = ( StackType_t * ) pvPortMallocStack( ( ( ( size_t ) uxStackDepth ) * sizeof( StackType_t ) ) );
  1389. if( pxNewTCB->pxStack == NULL )
  1390. {
  1391. /* Could not allocate the stack. Delete the allocated TCB. */
  1392. vPortFree( pxNewTCB );
  1393. pxNewTCB = NULL;
  1394. }
  1395. }
  1396. }
  1397. #else /* portSTACK_GROWTH */
  1398. {
  1399. StackType_t * pxStack;
  1400. /* Allocate space for the stack used by the task being created. */
  1401. /* MISRA Ref 11.5.1 [Malloc memory assignment] */
  1402. /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
  1403. /* coverity[misra_c_2012_rule_11_5_violation] */
  1404. pxStack = pvPortMallocStack( ( ( ( size_t ) uxStackDepth ) * sizeof( StackType_t ) ) );
  1405. if( pxStack != NULL )
  1406. {
  1407. /* Allocate space for the TCB. */
  1408. /* MISRA Ref 11.5.1 [Malloc memory assignment] */
  1409. /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
  1410. /* coverity[misra_c_2012_rule_11_5_violation] */
  1411. pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) );
  1412. if( pxNewTCB != NULL )
  1413. {
  1414. ( void ) memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) );
  1415. /* Store the stack location in the TCB. */
  1416. pxNewTCB->pxStack = pxStack;
  1417. }
  1418. else
  1419. {
  1420. /* The stack cannot be used as the TCB was not created. Free
  1421. * it again. */
  1422. vPortFreeStack( pxStack );
  1423. }
  1424. }
  1425. else
  1426. {
  1427. pxNewTCB = NULL;
  1428. }
  1429. }
  1430. #endif /* portSTACK_GROWTH */
  1431. if( pxNewTCB != NULL )
  1432. {
  1433. #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
  1434. {
  1435. /* Tasks can be created statically or dynamically, so note this
  1436. * task was created dynamically in case it is later deleted. */
  1437. pxNewTCB->ucStaticallyAllocated = tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB;
  1438. }
  1439. #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
  1440. prvInitialiseNewTask( pxTaskCode, pcName, uxStackDepth, pvParameters, uxPriority, pxCreatedTask, pxNewTCB, NULL );
  1441. }
  1442. return pxNewTCB;
  1443. }
  1444. /*-----------------------------------------------------------*/
  1445. BaseType_t xTaskCreate( TaskFunction_t pxTaskCode,
  1446. const char * const pcName,
  1447. const configSTACK_DEPTH_TYPE uxStackDepth,
  1448. void * const pvParameters,
  1449. UBaseType_t uxPriority,
  1450. TaskHandle_t * const pxCreatedTask )
  1451. {
  1452. TCB_t * pxNewTCB;
  1453. BaseType_t xReturn;
  1454. traceENTER_xTaskCreate( pxTaskCode, pcName, uxStackDepth, pvParameters, uxPriority, pxCreatedTask );
  1455. pxNewTCB = prvCreateTask( pxTaskCode, pcName, uxStackDepth, pvParameters, uxPriority, pxCreatedTask );
  1456. if( pxNewTCB != NULL )
  1457. {
  1458. #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) )
  1459. {
  1460. /* Set the task's affinity before scheduling it. */
  1461. pxNewTCB->uxCoreAffinityMask = configTASK_DEFAULT_CORE_AFFINITY;
  1462. }
  1463. #endif
  1464. prvAddNewTaskToReadyList( pxNewTCB );
  1465. xReturn = pdPASS;
  1466. }
  1467. else
  1468. {
  1469. xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
  1470. }
  1471. traceRETURN_xTaskCreate( xReturn );
  1472. return xReturn;
  1473. }
  1474. /*-----------------------------------------------------------*/
  1475. #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) )
  1476. BaseType_t xTaskCreateAffinitySet( TaskFunction_t pxTaskCode,
  1477. const char * const pcName,
  1478. const configSTACK_DEPTH_TYPE uxStackDepth,
  1479. void * const pvParameters,
  1480. UBaseType_t uxPriority,
  1481. UBaseType_t uxCoreAffinityMask,
  1482. TaskHandle_t * const pxCreatedTask )
  1483. {
  1484. TCB_t * pxNewTCB;
  1485. BaseType_t xReturn;
  1486. traceENTER_xTaskCreateAffinitySet( pxTaskCode, pcName, uxStackDepth, pvParameters, uxPriority, uxCoreAffinityMask, pxCreatedTask );
  1487. pxNewTCB = prvCreateTask( pxTaskCode, pcName, uxStackDepth, pvParameters, uxPriority, pxCreatedTask );
  1488. if( pxNewTCB != NULL )
  1489. {
  1490. /* Set the task's affinity before scheduling it. */
  1491. pxNewTCB->uxCoreAffinityMask = uxCoreAffinityMask;
  1492. prvAddNewTaskToReadyList( pxNewTCB );
  1493. xReturn = pdPASS;
  1494. }
  1495. else
  1496. {
  1497. xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
  1498. }
  1499. traceRETURN_xTaskCreateAffinitySet( xReturn );
  1500. return xReturn;
  1501. }
  1502. #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */
  1503. #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
  1504. /*-----------------------------------------------------------*/
  1505. static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
  1506. const char * const pcName,
  1507. const configSTACK_DEPTH_TYPE uxStackDepth,
  1508. void * const pvParameters,
  1509. UBaseType_t uxPriority,
  1510. TaskHandle_t * const pxCreatedTask,
  1511. TCB_t * pxNewTCB,
  1512. const MemoryRegion_t * const xRegions )
  1513. {
  1514. StackType_t * pxTopOfStack;
  1515. UBaseType_t x;
  1516. #if ( portUSING_MPU_WRAPPERS == 1 )
  1517. /* Should the task be created in privileged mode? */
  1518. BaseType_t xRunPrivileged;
  1519. if( ( uxPriority & portPRIVILEGE_BIT ) != 0U )
  1520. {
  1521. xRunPrivileged = pdTRUE;
  1522. }
  1523. else
  1524. {
  1525. xRunPrivileged = pdFALSE;
  1526. }
  1527. uxPriority &= ~portPRIVILEGE_BIT;
  1528. #endif /* portUSING_MPU_WRAPPERS == 1 */
  1529. /* Avoid dependency on memset() if it is not required. */
  1530. #if ( tskSET_NEW_STACKS_TO_KNOWN_VALUE == 1 )
  1531. {
  1532. /* Fill the stack with a known value to assist debugging. */
  1533. ( void ) memset( pxNewTCB->pxStack, ( int ) tskSTACK_FILL_BYTE, ( size_t ) uxStackDepth * sizeof( StackType_t ) );
  1534. }
  1535. #endif /* tskSET_NEW_STACKS_TO_KNOWN_VALUE */
  1536. /* Calculate the top of stack address. This depends on whether the stack
  1537. * grows from high memory to low (as per the 80x86) or vice versa.
  1538. * portSTACK_GROWTH is used to make the result positive or negative as required
  1539. * by the port. */
  1540. #if ( portSTACK_GROWTH < 0 )
  1541. {
  1542. pxTopOfStack = &( pxNewTCB->pxStack[ uxStackDepth - ( configSTACK_DEPTH_TYPE ) 1 ] );
  1543. pxTopOfStack = ( StackType_t * ) ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack ) & ( ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) ) );
  1544. /* Check the alignment of the calculated top of stack is correct. */
  1545. configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0U ) );
  1546. #if ( configRECORD_STACK_HIGH_ADDRESS == 1 )
  1547. {
  1548. /* Also record the stack's high address, which may assist
  1549. * debugging. */
  1550. pxNewTCB->pxEndOfStack = pxTopOfStack;
  1551. }
  1552. #endif /* configRECORD_STACK_HIGH_ADDRESS */
  1553. }
  1554. #else /* portSTACK_GROWTH */
  1555. {
  1556. pxTopOfStack = pxNewTCB->pxStack;
  1557. pxTopOfStack = ( StackType_t * ) ( ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack ) + portBYTE_ALIGNMENT_MASK ) & ( ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) ) );
  1558. /* Check the alignment of the calculated top of stack is correct. */
  1559. configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0U ) );
  1560. /* The other extreme of the stack space is required if stack checking is
  1561. * performed. */
  1562. pxNewTCB->pxEndOfStack = pxNewTCB->pxStack + ( uxStackDepth - ( configSTACK_DEPTH_TYPE ) 1 );
  1563. }
  1564. #endif /* portSTACK_GROWTH */
  1565. /* Store the task name in the TCB. */
  1566. if( pcName != NULL )
  1567. {
  1568. for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ )
  1569. {
  1570. pxNewTCB->pcTaskName[ x ] = pcName[ x ];
  1571. /* Don't copy all configMAX_TASK_NAME_LEN if the string is shorter than
  1572. * configMAX_TASK_NAME_LEN characters just in case the memory after the
  1573. * string is not accessible (extremely unlikely). */
  1574. if( pcName[ x ] == ( char ) 0x00 )
  1575. {
  1576. break;
  1577. }
  1578. else
  1579. {
  1580. mtCOVERAGE_TEST_MARKER();
  1581. }
  1582. }
  1583. /* Ensure the name string is terminated in the case that the string length
  1584. * was greater or equal to configMAX_TASK_NAME_LEN. */
  1585. pxNewTCB->pcTaskName[ configMAX_TASK_NAME_LEN - 1U ] = '\0';
  1586. }
  1587. else
  1588. {
  1589. mtCOVERAGE_TEST_MARKER();
  1590. }
  1591. /* This is used as an array index so must ensure it's not too large. */
  1592. configASSERT( uxPriority < configMAX_PRIORITIES );
  1593. if( uxPriority >= ( UBaseType_t ) configMAX_PRIORITIES )
  1594. {
  1595. uxPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U;
  1596. }
  1597. else
  1598. {
  1599. mtCOVERAGE_TEST_MARKER();
  1600. }
  1601. pxNewTCB->uxPriority = uxPriority;
  1602. #if ( configUSE_MUTEXES == 1 )
  1603. {
  1604. pxNewTCB->uxBasePriority = uxPriority;
  1605. }
  1606. #endif /* configUSE_MUTEXES */
  1607. vListInitialiseItem( &( pxNewTCB->xStateListItem ) );
  1608. vListInitialiseItem( &( pxNewTCB->xEventListItem ) );
  1609. /* Set the pxNewTCB as a link back from the ListItem_t. This is so we can get
  1610. * back to the containing TCB from a generic item in a list. */
  1611. listSET_LIST_ITEM_OWNER( &( pxNewTCB->xStateListItem ), pxNewTCB );
  1612. /* Event lists are always in priority order. */
  1613. listSET_LIST_ITEM_VALUE( &( pxNewTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriority );
  1614. listSET_LIST_ITEM_OWNER( &( pxNewTCB->xEventListItem ), pxNewTCB );
  1615. #if ( portUSING_MPU_WRAPPERS == 1 )
  1616. {
  1617. vPortStoreTaskMPUSettings( &( pxNewTCB->xMPUSettings ), xRegions, pxNewTCB->pxStack, uxStackDepth );
  1618. }
  1619. #else
  1620. {
  1621. /* Avoid compiler warning about unreferenced parameter. */
  1622. ( void ) xRegions;
  1623. }
  1624. #endif
  1625. #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 )
  1626. {
  1627. /* Allocate and initialize memory for the task's TLS Block. */
  1628. configINIT_TLS_BLOCK( pxNewTCB->xTLSBlock, pxTopOfStack );
  1629. }
  1630. #endif
  1631. /* Initialize the TCB stack to look as if the task was already running,
  1632. * but had been interrupted by the scheduler. The return address is set
  1633. * to the start of the task function. Once the stack has been initialised
  1634. * the top of stack variable is updated. */
  1635. #if ( portUSING_MPU_WRAPPERS == 1 )
  1636. {
  1637. /* If the port has capability to detect stack overflow,
  1638. * pass the stack end address to the stack initialization
  1639. * function as well. */
  1640. #if ( portHAS_STACK_OVERFLOW_CHECKING == 1 )
  1641. {
  1642. #if ( portSTACK_GROWTH < 0 )
  1643. {
  1644. pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxStack, pxTaskCode, pvParameters, xRunPrivileged, &( pxNewTCB->xMPUSettings ) );
  1645. }
  1646. #else /* portSTACK_GROWTH */
  1647. {
  1648. pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxEndOfStack, pxTaskCode, pvParameters, xRunPrivileged, &( pxNewTCB->xMPUSettings ) );
  1649. }
  1650. #endif /* portSTACK_GROWTH */
  1651. }
  1652. #else /* portHAS_STACK_OVERFLOW_CHECKING */
  1653. {
  1654. pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters, xRunPrivileged, &( pxNewTCB->xMPUSettings ) );
  1655. }
  1656. #endif /* portHAS_STACK_OVERFLOW_CHECKING */
  1657. }
  1658. #else /* portUSING_MPU_WRAPPERS */
  1659. {
  1660. /* If the port has capability to detect stack overflow,
  1661. * pass the stack end address to the stack initialization
  1662. * function as well. */
  1663. #if ( portHAS_STACK_OVERFLOW_CHECKING == 1 )
  1664. {
  1665. #if ( portSTACK_GROWTH < 0 )
  1666. {
  1667. pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxStack, pxTaskCode, pvParameters );
  1668. }
  1669. #else /* portSTACK_GROWTH */
  1670. {
  1671. pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxEndOfStack, pxTaskCode, pvParameters );
  1672. }
  1673. #endif /* portSTACK_GROWTH */
  1674. }
  1675. #else /* portHAS_STACK_OVERFLOW_CHECKING */
  1676. {
  1677. pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters );
  1678. }
  1679. #endif /* portHAS_STACK_OVERFLOW_CHECKING */
  1680. }
  1681. #endif /* portUSING_MPU_WRAPPERS */
  1682. /* Initialize task state and task attributes. */
  1683. #if ( configNUMBER_OF_CORES > 1 )
  1684. {
  1685. pxNewTCB->xTaskRunState = taskTASK_NOT_RUNNING;
  1686. /* Is this an idle task? */
  1687. if( ( ( TaskFunction_t ) pxTaskCode == ( TaskFunction_t ) prvIdleTask ) || ( ( TaskFunction_t ) pxTaskCode == ( TaskFunction_t ) prvPassiveIdleTask ) )
  1688. {
  1689. pxNewTCB->uxTaskAttributes |= taskATTRIBUTE_IS_IDLE;
  1690. }
  1691. }
  1692. #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
  1693. if( pxCreatedTask != NULL )
  1694. {
  1695. /* Pass the handle out in an anonymous way. The handle can be used to
  1696. * change the created task's priority, delete the created task, etc.*/
  1697. *pxCreatedTask = ( TaskHandle_t ) pxNewTCB;
  1698. }
  1699. else
  1700. {
  1701. mtCOVERAGE_TEST_MARKER();
  1702. }
  1703. }
  1704. /*-----------------------------------------------------------*/
  1705. #if ( configNUMBER_OF_CORES == 1 )
  1706. static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
  1707. {
  1708. /* Ensure interrupts don't access the task lists while the lists are being
  1709. * updated. */
  1710. taskENTER_CRITICAL();
  1711. {
  1712. uxCurrentNumberOfTasks = ( UBaseType_t ) ( uxCurrentNumberOfTasks + 1U );
  1713. if( pxCurrentTCB == NULL )
  1714. {
  1715. /* There are no other tasks, or all the other tasks are in
  1716. * the suspended state - make this the current task. */
  1717. pxCurrentTCB = pxNewTCB;
  1718. if( uxCurrentNumberOfTasks == ( UBaseType_t ) 1 )
  1719. {
  1720. /* This is the first task to be created so do the preliminary
  1721. * initialisation required. We will not recover if this call
  1722. * fails, but we will report the failure. */
  1723. prvInitialiseTaskLists();
  1724. }
  1725. else
  1726. {
  1727. mtCOVERAGE_TEST_MARKER();
  1728. }
  1729. }
  1730. else
  1731. {
  1732. /* If the scheduler is not already running, make this task the
  1733. * current task if it is the highest priority task to be created
  1734. * so far. */
  1735. if( xSchedulerRunning == pdFALSE )
  1736. {
  1737. if( pxCurrentTCB->uxPriority <= pxNewTCB->uxPriority )
  1738. {
  1739. pxCurrentTCB = pxNewTCB;
  1740. }
  1741. else
  1742. {
  1743. mtCOVERAGE_TEST_MARKER();
  1744. }
  1745. }
  1746. else
  1747. {
  1748. mtCOVERAGE_TEST_MARKER();
  1749. }
  1750. }
  1751. uxTaskNumber++;
  1752. #if ( configUSE_TRACE_FACILITY == 1 )
  1753. {
  1754. /* Add a counter into the TCB for tracing only. */
  1755. pxNewTCB->uxTCBNumber = uxTaskNumber;
  1756. }
  1757. #endif /* configUSE_TRACE_FACILITY */
  1758. traceTASK_CREATE( pxNewTCB );
  1759. prvAddTaskToReadyList( pxNewTCB );
  1760. portSETUP_TCB( pxNewTCB );
  1761. }
  1762. taskEXIT_CRITICAL();
  1763. if( xSchedulerRunning != pdFALSE )
  1764. {
  1765. /* If the created task is of a higher priority than the current task
  1766. * then it should run now. */
  1767. taskYIELD_ANY_CORE_IF_USING_PREEMPTION( pxNewTCB );
  1768. }
  1769. else
  1770. {
  1771. mtCOVERAGE_TEST_MARKER();
  1772. }
  1773. }
  1774. #else /* #if ( configNUMBER_OF_CORES == 1 ) */
  1775. static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
  1776. {
  1777. /* Ensure interrupts don't access the task lists while the lists are being
  1778. * updated. */
  1779. taskENTER_CRITICAL();
  1780. {
  1781. uxCurrentNumberOfTasks++;
  1782. if( xSchedulerRunning == pdFALSE )
  1783. {
  1784. if( uxCurrentNumberOfTasks == ( UBaseType_t ) 1 )
  1785. {
  1786. /* This is the first task to be created so do the preliminary
  1787. * initialisation required. We will not recover if this call
  1788. * fails, but we will report the failure. */
  1789. prvInitialiseTaskLists();
  1790. }
  1791. else
  1792. {
  1793. mtCOVERAGE_TEST_MARKER();
  1794. }
  1795. /* All the cores start with idle tasks before the SMP scheduler
  1796. * is running. Idle tasks are assigned to cores when they are
  1797. * created in prvCreateIdleTasks(). */
  1798. }
  1799. uxTaskNumber++;
  1800. #if ( configUSE_TRACE_FACILITY == 1 )
  1801. {
  1802. /* Add a counter into the TCB for tracing only. */
  1803. pxNewTCB->uxTCBNumber = uxTaskNumber;
  1804. }
  1805. #endif /* configUSE_TRACE_FACILITY */
  1806. traceTASK_CREATE( pxNewTCB );
  1807. prvAddTaskToReadyList( pxNewTCB );
  1808. portSETUP_TCB( pxNewTCB );
  1809. if( xSchedulerRunning != pdFALSE )
  1810. {
  1811. /* If the created task is of a higher priority than another
  1812. * currently running task and preemption is on then it should
  1813. * run now. */
  1814. taskYIELD_ANY_CORE_IF_USING_PREEMPTION( pxNewTCB );
  1815. }
  1816. else
  1817. {
  1818. mtCOVERAGE_TEST_MARKER();
  1819. }
  1820. }
  1821. taskEXIT_CRITICAL();
  1822. }
  1823. #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
  1824. /*-----------------------------------------------------------*/
  1825. #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) )
  1826. static size_t prvSnprintfReturnValueToCharsWritten( int iSnprintfReturnValue,
  1827. size_t n )
  1828. {
  1829. size_t uxCharsWritten;
  1830. if( iSnprintfReturnValue < 0 )
  1831. {
  1832. /* Encoding error - Return 0 to indicate that nothing
  1833. * was written to the buffer. */
  1834. uxCharsWritten = 0;
  1835. }
  1836. else if( iSnprintfReturnValue >= ( int ) n )
  1837. {
  1838. /* This is the case when the supplied buffer is not
  1839. * large to hold the generated string. Return the
  1840. * number of characters actually written without
  1841. * counting the terminating NULL character. */
  1842. uxCharsWritten = n - 1U;
  1843. }
  1844. else
  1845. {
  1846. /* Complete string was written to the buffer. */
  1847. uxCharsWritten = ( size_t ) iSnprintfReturnValue;
  1848. }
  1849. return uxCharsWritten;
  1850. }
  1851. #endif /* #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) ) */
  1852. /*-----------------------------------------------------------*/
  1853. #if ( INCLUDE_vTaskDelete == 1 )
  1854. void vTaskDelete( TaskHandle_t xTaskToDelete )
  1855. {
  1856. TCB_t * pxTCB;
  1857. BaseType_t xDeleteTCBInIdleTask = pdFALSE;
  1858. BaseType_t xTaskIsRunningOrYielding;
  1859. traceENTER_vTaskDelete( xTaskToDelete );
  1860. taskENTER_CRITICAL();
  1861. {
  1862. /* If null is passed in here then it is the calling task that is
  1863. * being deleted. */
  1864. pxTCB = prvGetTCBFromHandle( xTaskToDelete );
  1865. /* Remove task from the ready/delayed list. */
  1866. if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
  1867. {
  1868. taskRESET_READY_PRIORITY( pxTCB->uxPriority );
  1869. }
  1870. else
  1871. {
  1872. mtCOVERAGE_TEST_MARKER();
  1873. }
  1874. /* Is the task waiting on an event also? */
  1875. if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
  1876. {
  1877. ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
  1878. }
  1879. else
  1880. {
  1881. mtCOVERAGE_TEST_MARKER();
  1882. }
  1883. /* Increment the uxTaskNumber also so kernel aware debuggers can
  1884. * detect that the task lists need re-generating. This is done before
  1885. * portPRE_TASK_DELETE_HOOK() as in the Windows port that macro will
  1886. * not return. */
  1887. uxTaskNumber++;
  1888. /* Use temp variable as distinct sequence points for reading volatile
  1889. * variables prior to a logical operator to ensure compliance with
  1890. * MISRA C 2012 Rule 13.5. */
  1891. xTaskIsRunningOrYielding = taskTASK_IS_RUNNING_OR_SCHEDULED_TO_YIELD( pxTCB );
  1892. /* If the task is running (or yielding), we must add it to the
  1893. * termination list so that an idle task can delete it when it is
  1894. * no longer running. */
  1895. if( ( xSchedulerRunning != pdFALSE ) && ( xTaskIsRunningOrYielding != pdFALSE ) )
  1896. {
  1897. /* A running task or a task which is scheduled to yield is being
  1898. * deleted. This cannot complete when the task is still running
  1899. * on a core, as a context switch to another task is required.
  1900. * Place the task in the termination list. The idle task will check
  1901. * the termination list and free up any memory allocated by the
  1902. * scheduler for the TCB and stack of the deleted task. */
  1903. vListInsertEnd( &xTasksWaitingTermination, &( pxTCB->xStateListItem ) );
  1904. /* Increment the ucTasksDeleted variable so the idle task knows
  1905. * there is a task that has been deleted and that it should therefore
  1906. * check the xTasksWaitingTermination list. */
  1907. ++uxDeletedTasksWaitingCleanUp;
  1908. /* Call the delete hook before portPRE_TASK_DELETE_HOOK() as
  1909. * portPRE_TASK_DELETE_HOOK() does not return in the Win32 port. */
  1910. traceTASK_DELETE( pxTCB );
  1911. /* Delete the task TCB in idle task. */
  1912. xDeleteTCBInIdleTask = pdTRUE;
  1913. /* The pre-delete hook is primarily for the Windows simulator,
  1914. * in which Windows specific clean up operations are performed,
  1915. * after which it is not possible to yield away from this task -
  1916. * hence xYieldPending is used to latch that a context switch is
  1917. * required. */
  1918. #if ( configNUMBER_OF_CORES == 1 )
  1919. portPRE_TASK_DELETE_HOOK( pxTCB, &( xYieldPendings[ 0 ] ) );
  1920. #else
  1921. portPRE_TASK_DELETE_HOOK( pxTCB, &( xYieldPendings[ pxTCB->xTaskRunState ] ) );
  1922. #endif
  1923. /* In the case of SMP, it is possible that the task being deleted
  1924. * is running on another core. We must evict the task before
  1925. * exiting the critical section to ensure that the task cannot
  1926. * take an action which puts it back on ready/state/event list,
  1927. * thereby nullifying the delete operation. Once evicted, the
  1928. * task won't be scheduled ever as it will no longer be on the
  1929. * ready list. */
  1930. #if ( configNUMBER_OF_CORES > 1 )
  1931. {
  1932. if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE )
  1933. {
  1934. if( pxTCB->xTaskRunState == ( BaseType_t ) portGET_CORE_ID() )
  1935. {
  1936. configASSERT( uxSchedulerSuspended == 0 );
  1937. taskYIELD_WITHIN_API();
  1938. }
  1939. else
  1940. {
  1941. prvYieldCore( pxTCB->xTaskRunState );
  1942. }
  1943. }
  1944. }
  1945. #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
  1946. }
  1947. else
  1948. {
  1949. --uxCurrentNumberOfTasks;
  1950. traceTASK_DELETE( pxTCB );
  1951. /* Reset the next expected unblock time in case it referred to
  1952. * the task that has just been deleted. */
  1953. prvResetNextTaskUnblockTime();
  1954. }
  1955. }
  1956. taskEXIT_CRITICAL();
  1957. /* If the task is not deleting itself, call prvDeleteTCB from outside of
  1958. * critical section. If a task deletes itself, prvDeleteTCB is called
  1959. * from prvCheckTasksWaitingTermination which is called from Idle task. */
  1960. if( xDeleteTCBInIdleTask != pdTRUE )
  1961. {
  1962. prvDeleteTCB( pxTCB );
  1963. }
  1964. /* Force a reschedule if it is the currently running task that has just
  1965. * been deleted. */
  1966. #if ( configNUMBER_OF_CORES == 1 )
  1967. {
  1968. if( xSchedulerRunning != pdFALSE )
  1969. {
  1970. if( pxTCB == pxCurrentTCB )
  1971. {
  1972. configASSERT( uxSchedulerSuspended == 0 );
  1973. taskYIELD_WITHIN_API();
  1974. }
  1975. else
  1976. {
  1977. mtCOVERAGE_TEST_MARKER();
  1978. }
  1979. }
  1980. }
  1981. #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
  1982. traceRETURN_vTaskDelete();
  1983. }
  1984. #endif /* INCLUDE_vTaskDelete */
  1985. /*-----------------------------------------------------------*/
  1986. #if ( INCLUDE_xTaskDelayUntil == 1 )
  1987. BaseType_t xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
  1988. const TickType_t xTimeIncrement )
  1989. {
  1990. TickType_t xTimeToWake;
  1991. BaseType_t xAlreadyYielded, xShouldDelay = pdFALSE;
  1992. traceENTER_xTaskDelayUntil( pxPreviousWakeTime, xTimeIncrement );
  1993. configASSERT( pxPreviousWakeTime );
  1994. configASSERT( ( xTimeIncrement > 0U ) );
  1995. vTaskSuspendAll();
  1996. {
  1997. /* Minor optimisation. The tick count cannot change in this
  1998. * block. */
  1999. const TickType_t xConstTickCount = xTickCount;
  2000. configASSERT( uxSchedulerSuspended == 1U );
  2001. /* Generate the tick time at which the task wants to wake. */
  2002. xTimeToWake = *pxPreviousWakeTime + xTimeIncrement;
  2003. if( xConstTickCount < *pxPreviousWakeTime )
  2004. {
  2005. /* The tick count has overflowed since this function was
  2006. * lasted called. In this case the only time we should ever
  2007. * actually delay is if the wake time has also overflowed,
  2008. * and the wake time is greater than the tick time. When this
  2009. * is the case it is as if neither time had overflowed. */
  2010. if( ( xTimeToWake < *pxPreviousWakeTime ) && ( xTimeToWake > xConstTickCount ) )
  2011. {
  2012. xShouldDelay = pdTRUE;
  2013. }
  2014. else
  2015. {
  2016. mtCOVERAGE_TEST_MARKER();
  2017. }
  2018. }
  2019. else
  2020. {
  2021. /* The tick time has not overflowed. In this case we will
  2022. * delay if either the wake time has overflowed, and/or the
  2023. * tick time is less than the wake time. */
  2024. if( ( xTimeToWake < *pxPreviousWakeTime ) || ( xTimeToWake > xConstTickCount ) )
  2025. {
  2026. xShouldDelay = pdTRUE;
  2027. }
  2028. else
  2029. {
  2030. mtCOVERAGE_TEST_MARKER();
  2031. }
  2032. }
  2033. /* Update the wake time ready for the next call. */
  2034. *pxPreviousWakeTime = xTimeToWake;
  2035. if( xShouldDelay != pdFALSE )
  2036. {
  2037. traceTASK_DELAY_UNTIL( xTimeToWake );
  2038. /* prvAddCurrentTaskToDelayedList() needs the block time, not
  2039. * the time to wake, so subtract the current tick count. */
  2040. prvAddCurrentTaskToDelayedList( xTimeToWake - xConstTickCount, pdFALSE );
  2041. }
  2042. else
  2043. {
  2044. mtCOVERAGE_TEST_MARKER();
  2045. }
  2046. }
  2047. xAlreadyYielded = xTaskResumeAll();
  2048. /* Force a reschedule if xTaskResumeAll has not already done so, we may
  2049. * have put ourselves to sleep. */
  2050. if( xAlreadyYielded == pdFALSE )
  2051. {
  2052. taskYIELD_WITHIN_API();
  2053. }
  2054. else
  2055. {
  2056. mtCOVERAGE_TEST_MARKER();
  2057. }
  2058. traceRETURN_xTaskDelayUntil( xShouldDelay );
  2059. return xShouldDelay;
  2060. }
  2061. #endif /* INCLUDE_xTaskDelayUntil */
  2062. /*-----------------------------------------------------------*/
  2063. #if ( INCLUDE_vTaskDelay == 1 )
  2064. void vTaskDelay( const TickType_t xTicksToDelay )
  2065. {
  2066. BaseType_t xAlreadyYielded = pdFALSE;
  2067. traceENTER_vTaskDelay( xTicksToDelay );
  2068. /* A delay time of zero just forces a reschedule. */
  2069. if( xTicksToDelay > ( TickType_t ) 0U )
  2070. {
  2071. vTaskSuspendAll();
  2072. {
  2073. configASSERT( uxSchedulerSuspended == 1U );
  2074. traceTASK_DELAY();
  2075. /* A task that is removed from the event list while the
  2076. * scheduler is suspended will not get placed in the ready
  2077. * list or removed from the blocked list until the scheduler
  2078. * is resumed.
  2079. *
  2080. * This task cannot be in an event list as it is the currently
  2081. * executing task. */
  2082. prvAddCurrentTaskToDelayedList( xTicksToDelay, pdFALSE );
  2083. }
  2084. xAlreadyYielded = xTaskResumeAll();
  2085. }
  2086. else
  2087. {
  2088. mtCOVERAGE_TEST_MARKER();
  2089. }
  2090. /* Force a reschedule if xTaskResumeAll has not already done so, we may
  2091. * have put ourselves to sleep. */
  2092. if( xAlreadyYielded == pdFALSE )
  2093. {
  2094. taskYIELD_WITHIN_API();
  2095. }
  2096. else
  2097. {
  2098. mtCOVERAGE_TEST_MARKER();
  2099. }
  2100. traceRETURN_vTaskDelay();
  2101. }
  2102. #endif /* INCLUDE_vTaskDelay */
  2103. /*-----------------------------------------------------------*/
  2104. #if ( ( INCLUDE_eTaskGetState == 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_xTaskAbortDelay == 1 ) )
  2105. eTaskState eTaskGetState( TaskHandle_t xTask )
  2106. {
  2107. eTaskState eReturn;
  2108. List_t const * pxStateList;
  2109. List_t const * pxEventList;
  2110. List_t const * pxDelayedList;
  2111. List_t const * pxOverflowedDelayedList;
  2112. const TCB_t * const pxTCB = xTask;
  2113. traceENTER_eTaskGetState( xTask );
  2114. configASSERT( pxTCB );
  2115. #if ( configNUMBER_OF_CORES == 1 )
  2116. if( pxTCB == pxCurrentTCB )
  2117. {
  2118. /* The task calling this function is querying its own state. */
  2119. eReturn = eRunning;
  2120. }
  2121. else
  2122. #endif
  2123. {
  2124. taskENTER_CRITICAL();
  2125. {
  2126. pxStateList = listLIST_ITEM_CONTAINER( &( pxTCB->xStateListItem ) );
  2127. pxEventList = listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) );
  2128. pxDelayedList = pxDelayedTaskList;
  2129. pxOverflowedDelayedList = pxOverflowDelayedTaskList;
  2130. }
  2131. taskEXIT_CRITICAL();
  2132. if( pxEventList == &xPendingReadyList )
  2133. {
  2134. /* The task has been placed on the pending ready list, so its
  2135. * state is eReady regardless of what list the task's state list
  2136. * item is currently placed on. */
  2137. eReturn = eReady;
  2138. }
  2139. else if( ( pxStateList == pxDelayedList ) || ( pxStateList == pxOverflowedDelayedList ) )
  2140. {
  2141. /* The task being queried is referenced from one of the Blocked
  2142. * lists. */
  2143. eReturn = eBlocked;
  2144. }
  2145. #if ( INCLUDE_vTaskSuspend == 1 )
  2146. else if( pxStateList == &xSuspendedTaskList )
  2147. {
  2148. /* The task being queried is referenced from the suspended
  2149. * list. Is it genuinely suspended or is it blocked
  2150. * indefinitely? */
  2151. if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL )
  2152. {
  2153. #if ( configUSE_TASK_NOTIFICATIONS == 1 )
  2154. {
  2155. BaseType_t x;
  2156. /* The task does not appear on the event list item of
  2157. * and of the RTOS objects, but could still be in the
  2158. * blocked state if it is waiting on its notification
  2159. * rather than waiting on an object. If not, is
  2160. * suspended. */
  2161. eReturn = eSuspended;
  2162. for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configTASK_NOTIFICATION_ARRAY_ENTRIES; x++ )
  2163. {
  2164. if( pxTCB->ucNotifyState[ x ] == taskWAITING_NOTIFICATION )
  2165. {
  2166. eReturn = eBlocked;
  2167. break;
  2168. }
  2169. }
  2170. }
  2171. #else /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
  2172. {
  2173. eReturn = eSuspended;
  2174. }
  2175. #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
  2176. }
  2177. else
  2178. {
  2179. eReturn = eBlocked;
  2180. }
  2181. }
  2182. #endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
  2183. #if ( INCLUDE_vTaskDelete == 1 )
  2184. else if( ( pxStateList == &xTasksWaitingTermination ) || ( pxStateList == NULL ) )
  2185. {
  2186. /* The task being queried is referenced from the deleted
  2187. * tasks list, or it is not referenced from any lists at
  2188. * all. */
  2189. eReturn = eDeleted;
  2190. }
  2191. #endif
  2192. else
  2193. {
  2194. #if ( configNUMBER_OF_CORES == 1 )
  2195. {
  2196. /* If the task is not in any other state, it must be in the
  2197. * Ready (including pending ready) state. */
  2198. eReturn = eReady;
  2199. }
  2200. #else /* #if ( configNUMBER_OF_CORES == 1 ) */
  2201. {
  2202. if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE )
  2203. {
  2204. /* Is it actively running on a core? */
  2205. eReturn = eRunning;
  2206. }
  2207. else
  2208. {
  2209. /* If the task is not in any other state, it must be in the
  2210. * Ready (including pending ready) state. */
  2211. eReturn = eReady;
  2212. }
  2213. }
  2214. #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
  2215. }
  2216. }
  2217. traceRETURN_eTaskGetState( eReturn );
  2218. return eReturn;
  2219. }
  2220. #endif /* INCLUDE_eTaskGetState */
  2221. /*-----------------------------------------------------------*/
  2222. #if ( INCLUDE_uxTaskPriorityGet == 1 )
  2223. UBaseType_t uxTaskPriorityGet( const TaskHandle_t xTask )
  2224. {
  2225. TCB_t const * pxTCB;
  2226. UBaseType_t uxReturn;
  2227. traceENTER_uxTaskPriorityGet( xTask );
  2228. taskENTER_CRITICAL();
  2229. {
  2230. /* If null is passed in here then it is the priority of the task
  2231. * that called uxTaskPriorityGet() that is being queried. */
  2232. pxTCB = prvGetTCBFromHandle( xTask );
  2233. uxReturn = pxTCB->uxPriority;
  2234. }
  2235. taskEXIT_CRITICAL();
  2236. traceRETURN_uxTaskPriorityGet( uxReturn );
  2237. return uxReturn;
  2238. }
  2239. #endif /* INCLUDE_uxTaskPriorityGet */
  2240. /*-----------------------------------------------------------*/
  2241. #if ( INCLUDE_uxTaskPriorityGet == 1 )
  2242. UBaseType_t uxTaskPriorityGetFromISR( const TaskHandle_t xTask )
  2243. {
  2244. TCB_t const * pxTCB;
  2245. UBaseType_t uxReturn;
  2246. UBaseType_t uxSavedInterruptStatus;
  2247. traceENTER_uxTaskPriorityGetFromISR( xTask );
  2248. /* RTOS ports that support interrupt nesting have the concept of a
  2249. * maximum system call (or maximum API call) interrupt priority.
  2250. * Interrupts that are above the maximum system call priority are keep
  2251. * permanently enabled, even when the RTOS kernel is in a critical section,
  2252. * but cannot make any calls to FreeRTOS API functions. If configASSERT()
  2253. * is defined in FreeRTOSConfig.h then
  2254. * portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
  2255. * failure if a FreeRTOS API function is called from an interrupt that has
  2256. * been assigned a priority above the configured maximum system call
  2257. * priority. Only FreeRTOS functions that end in FromISR can be called
  2258. * from interrupts that have been assigned a priority at or (logically)
  2259. * below the maximum system call interrupt priority. FreeRTOS maintains a
  2260. * separate interrupt safe API to ensure interrupt entry is as fast and as
  2261. * simple as possible. More information (albeit Cortex-M specific) is
  2262. * provided on the following link:
  2263. * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
  2264. portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
  2265. /* MISRA Ref 4.7.1 [Return value shall be checked] */
  2266. /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */
  2267. /* coverity[misra_c_2012_directive_4_7_violation] */
  2268. uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR();
  2269. {
  2270. /* If null is passed in here then it is the priority of the calling
  2271. * task that is being queried. */
  2272. pxTCB = prvGetTCBFromHandle( xTask );
  2273. uxReturn = pxTCB->uxPriority;
  2274. }
  2275. taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
  2276. traceRETURN_uxTaskPriorityGetFromISR( uxReturn );
  2277. return uxReturn;
  2278. }
  2279. #endif /* INCLUDE_uxTaskPriorityGet */
  2280. /*-----------------------------------------------------------*/
  2281. #if ( ( INCLUDE_uxTaskPriorityGet == 1 ) && ( configUSE_MUTEXES == 1 ) )
  2282. UBaseType_t uxTaskBasePriorityGet( const TaskHandle_t xTask )
  2283. {
  2284. TCB_t const * pxTCB;
  2285. UBaseType_t uxReturn;
  2286. traceENTER_uxTaskBasePriorityGet( xTask );
  2287. taskENTER_CRITICAL();
  2288. {
  2289. /* If null is passed in here then it is the base priority of the task
  2290. * that called uxTaskBasePriorityGet() that is being queried. */
  2291. pxTCB = prvGetTCBFromHandle( xTask );
  2292. uxReturn = pxTCB->uxBasePriority;
  2293. }
  2294. taskEXIT_CRITICAL();
  2295. traceRETURN_uxTaskBasePriorityGet( uxReturn );
  2296. return uxReturn;
  2297. }
  2298. #endif /* #if ( ( INCLUDE_uxTaskPriorityGet == 1 ) && ( configUSE_MUTEXES == 1 ) ) */
  2299. /*-----------------------------------------------------------*/
  2300. #if ( ( INCLUDE_uxTaskPriorityGet == 1 ) && ( configUSE_MUTEXES == 1 ) )
  2301. UBaseType_t uxTaskBasePriorityGetFromISR( const TaskHandle_t xTask )
  2302. {
  2303. TCB_t const * pxTCB;
  2304. UBaseType_t uxReturn;
  2305. UBaseType_t uxSavedInterruptStatus;
  2306. traceENTER_uxTaskBasePriorityGetFromISR( xTask );
  2307. /* RTOS ports that support interrupt nesting have the concept of a
  2308. * maximum system call (or maximum API call) interrupt priority.
  2309. * Interrupts that are above the maximum system call priority are keep
  2310. * permanently enabled, even when the RTOS kernel is in a critical section,
  2311. * but cannot make any calls to FreeRTOS API functions. If configASSERT()
  2312. * is defined in FreeRTOSConfig.h then
  2313. * portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
  2314. * failure if a FreeRTOS API function is called from an interrupt that has
  2315. * been assigned a priority above the configured maximum system call
  2316. * priority. Only FreeRTOS functions that end in FromISR can be called
  2317. * from interrupts that have been assigned a priority at or (logically)
  2318. * below the maximum system call interrupt priority. FreeRTOS maintains a
  2319. * separate interrupt safe API to ensure interrupt entry is as fast and as
  2320. * simple as possible. More information (albeit Cortex-M specific) is
  2321. * provided on the following link:
  2322. * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
  2323. portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
  2324. /* MISRA Ref 4.7.1 [Return value shall be checked] */
  2325. /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */
  2326. /* coverity[misra_c_2012_directive_4_7_violation] */
  2327. uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR();
  2328. {
  2329. /* If null is passed in here then it is the base priority of the calling
  2330. * task that is being queried. */
  2331. pxTCB = prvGetTCBFromHandle( xTask );
  2332. uxReturn = pxTCB->uxBasePriority;
  2333. }
  2334. taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
  2335. traceRETURN_uxTaskBasePriorityGetFromISR( uxReturn );
  2336. return uxReturn;
  2337. }
  2338. #endif /* #if ( ( INCLUDE_uxTaskPriorityGet == 1 ) && ( configUSE_MUTEXES == 1 ) ) */
  2339. /*-----------------------------------------------------------*/
  2340. #if ( INCLUDE_vTaskPrioritySet == 1 )
  2341. void vTaskPrioritySet( TaskHandle_t xTask,
  2342. UBaseType_t uxNewPriority )
  2343. {
  2344. TCB_t * pxTCB;
  2345. UBaseType_t uxCurrentBasePriority, uxPriorityUsedOnEntry;
  2346. BaseType_t xYieldRequired = pdFALSE;
  2347. #if ( configNUMBER_OF_CORES > 1 )
  2348. BaseType_t xYieldForTask = pdFALSE;
  2349. #endif
  2350. traceENTER_vTaskPrioritySet( xTask, uxNewPriority );
  2351. configASSERT( uxNewPriority < configMAX_PRIORITIES );
  2352. /* Ensure the new priority is valid. */
  2353. if( uxNewPriority >= ( UBaseType_t ) configMAX_PRIORITIES )
  2354. {
  2355. uxNewPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U;
  2356. }
  2357. else
  2358. {
  2359. mtCOVERAGE_TEST_MARKER();
  2360. }
  2361. taskENTER_CRITICAL();
  2362. {
  2363. /* If null is passed in here then it is the priority of the calling
  2364. * task that is being changed. */
  2365. pxTCB = prvGetTCBFromHandle( xTask );
  2366. traceTASK_PRIORITY_SET( pxTCB, uxNewPriority );
  2367. #if ( configUSE_MUTEXES == 1 )
  2368. {
  2369. uxCurrentBasePriority = pxTCB->uxBasePriority;
  2370. }
  2371. #else
  2372. {
  2373. uxCurrentBasePriority = pxTCB->uxPriority;
  2374. }
  2375. #endif
  2376. if( uxCurrentBasePriority != uxNewPriority )
  2377. {
  2378. /* The priority change may have readied a task of higher
  2379. * priority than a running task. */
  2380. if( uxNewPriority > uxCurrentBasePriority )
  2381. {
  2382. #if ( configNUMBER_OF_CORES == 1 )
  2383. {
  2384. if( pxTCB != pxCurrentTCB )
  2385. {
  2386. /* The priority of a task other than the currently
  2387. * running task is being raised. Is the priority being
  2388. * raised above that of the running task? */
  2389. if( uxNewPriority > pxCurrentTCB->uxPriority )
  2390. {
  2391. xYieldRequired = pdTRUE;
  2392. }
  2393. else
  2394. {
  2395. mtCOVERAGE_TEST_MARKER();
  2396. }
  2397. }
  2398. else
  2399. {
  2400. /* The priority of the running task is being raised,
  2401. * but the running task must already be the highest
  2402. * priority task able to run so no yield is required. */
  2403. }
  2404. }
  2405. #else /* #if ( configNUMBER_OF_CORES == 1 ) */
  2406. {
  2407. /* The priority of a task is being raised so
  2408. * perform a yield for this task later. */
  2409. xYieldForTask = pdTRUE;
  2410. }
  2411. #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
  2412. }
  2413. else if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE )
  2414. {
  2415. /* Setting the priority of a running task down means
  2416. * there may now be another task of higher priority that
  2417. * is ready to execute. */
  2418. #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
  2419. if( pxTCB->xPreemptionDisable == pdFALSE )
  2420. #endif
  2421. {
  2422. xYieldRequired = pdTRUE;
  2423. }
  2424. }
  2425. else
  2426. {
  2427. /* Setting the priority of any other task down does not
  2428. * require a yield as the running task must be above the
  2429. * new priority of the task being modified. */
  2430. }
  2431. /* Remember the ready list the task might be referenced from
  2432. * before its uxPriority member is changed so the
  2433. * taskRESET_READY_PRIORITY() macro can function correctly. */
  2434. uxPriorityUsedOnEntry = pxTCB->uxPriority;
  2435. #if ( configUSE_MUTEXES == 1 )
  2436. {
  2437. /* Only change the priority being used if the task is not
  2438. * currently using an inherited priority or the new priority
  2439. * is bigger than the inherited priority. */
  2440. if( ( pxTCB->uxBasePriority == pxTCB->uxPriority ) || ( uxNewPriority > pxTCB->uxPriority ) )
  2441. {
  2442. pxTCB->uxPriority = uxNewPriority;
  2443. }
  2444. else
  2445. {
  2446. mtCOVERAGE_TEST_MARKER();
  2447. }
  2448. /* The base priority gets set whatever. */
  2449. pxTCB->uxBasePriority = uxNewPriority;
  2450. }
  2451. #else /* if ( configUSE_MUTEXES == 1 ) */
  2452. {
  2453. pxTCB->uxPriority = uxNewPriority;
  2454. }
  2455. #endif /* if ( configUSE_MUTEXES == 1 ) */
  2456. /* Only reset the event list item value if the value is not
  2457. * being used for anything else. */
  2458. if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == ( ( TickType_t ) 0U ) )
  2459. {
  2460. listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxNewPriority ) );
  2461. }
  2462. else
  2463. {
  2464. mtCOVERAGE_TEST_MARKER();
  2465. }
  2466. /* If the task is in the blocked or suspended list we need do
  2467. * nothing more than change its priority variable. However, if
  2468. * the task is in a ready list it needs to be removed and placed
  2469. * in the list appropriate to its new priority. */
  2470. if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE )
  2471. {
  2472. /* The task is currently in its ready list - remove before
  2473. * adding it to its new ready list. As we are in a critical
  2474. * section we can do this even if the scheduler is suspended. */
  2475. if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
  2476. {
  2477. /* It is known that the task is in its ready list so
  2478. * there is no need to check again and the port level
  2479. * reset macro can be called directly. */
  2480. portRESET_READY_PRIORITY( uxPriorityUsedOnEntry, uxTopReadyPriority );
  2481. }
  2482. else
  2483. {
  2484. mtCOVERAGE_TEST_MARKER();
  2485. }
  2486. prvAddTaskToReadyList( pxTCB );
  2487. }
  2488. else
  2489. {
  2490. #if ( configNUMBER_OF_CORES == 1 )
  2491. {
  2492. mtCOVERAGE_TEST_MARKER();
  2493. }
  2494. #else
  2495. {
  2496. /* It's possible that xYieldForTask was already set to pdTRUE because
  2497. * its priority is being raised. However, since it is not in a ready list
  2498. * we don't actually need to yield for it. */
  2499. xYieldForTask = pdFALSE;
  2500. }
  2501. #endif
  2502. }
  2503. if( xYieldRequired != pdFALSE )
  2504. {
  2505. /* The running task priority is set down. Request the task to yield. */
  2506. taskYIELD_TASK_CORE_IF_USING_PREEMPTION( pxTCB );
  2507. }
  2508. else
  2509. {
  2510. #if ( configNUMBER_OF_CORES > 1 )
  2511. if( xYieldForTask != pdFALSE )
  2512. {
  2513. /* The priority of the task is being raised. If a running
  2514. * task has priority lower than this task, it should yield
  2515. * for this task. */
  2516. taskYIELD_ANY_CORE_IF_USING_PREEMPTION( pxTCB );
  2517. }
  2518. else
  2519. #endif /* if ( configNUMBER_OF_CORES > 1 ) */
  2520. {
  2521. mtCOVERAGE_TEST_MARKER();
  2522. }
  2523. }
  2524. /* Remove compiler warning about unused variables when the port
  2525. * optimised task selection is not being used. */
  2526. ( void ) uxPriorityUsedOnEntry;
  2527. }
  2528. }
  2529. taskEXIT_CRITICAL();
  2530. traceRETURN_vTaskPrioritySet();
  2531. }
  2532. #endif /* INCLUDE_vTaskPrioritySet */
  2533. /*-----------------------------------------------------------*/
  2534. #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) )
  2535. void vTaskCoreAffinitySet( const TaskHandle_t xTask,
  2536. UBaseType_t uxCoreAffinityMask )
  2537. {
  2538. TCB_t * pxTCB;
  2539. BaseType_t xCoreID;
  2540. UBaseType_t uxPrevCoreAffinityMask;
  2541. #if ( configUSE_PREEMPTION == 1 )
  2542. UBaseType_t uxPrevNotAllowedCores;
  2543. #endif
  2544. traceENTER_vTaskCoreAffinitySet( xTask, uxCoreAffinityMask );
  2545. taskENTER_CRITICAL();
  2546. {
  2547. pxTCB = prvGetTCBFromHandle( xTask );
  2548. uxPrevCoreAffinityMask = pxTCB->uxCoreAffinityMask;
  2549. pxTCB->uxCoreAffinityMask = uxCoreAffinityMask;
  2550. if( xSchedulerRunning != pdFALSE )
  2551. {
  2552. if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE )
  2553. {
  2554. xCoreID = ( BaseType_t ) pxTCB->xTaskRunState;
  2555. /* If the task can no longer run on the core it was running,
  2556. * request the core to yield. */
  2557. if( ( uxCoreAffinityMask & ( ( UBaseType_t ) 1U << ( UBaseType_t ) xCoreID ) ) == 0U )
  2558. {
  2559. prvYieldCore( xCoreID );
  2560. }
  2561. }
  2562. else
  2563. {
  2564. #if ( configUSE_PREEMPTION == 1 )
  2565. {
  2566. /* Calculate the cores on which this task was not allowed to
  2567. * run previously. */
  2568. uxPrevNotAllowedCores = ( ~uxPrevCoreAffinityMask ) & ( ( 1U << configNUMBER_OF_CORES ) - 1U );
  2569. /* Does the new core mask enables this task to run on any of the
  2570. * previously not allowed cores? If yes, check if this task can be
  2571. * scheduled on any of those cores. */
  2572. if( ( uxPrevNotAllowedCores & uxCoreAffinityMask ) != 0U )
  2573. {
  2574. prvYieldForTask( pxTCB );
  2575. }
  2576. }
  2577. #else /* #if( configUSE_PREEMPTION == 1 ) */
  2578. {
  2579. mtCOVERAGE_TEST_MARKER();
  2580. }
  2581. #endif /* #if( configUSE_PREEMPTION == 1 ) */
  2582. }
  2583. }
  2584. }
  2585. taskEXIT_CRITICAL();
  2586. traceRETURN_vTaskCoreAffinitySet();
  2587. }
  2588. #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */
  2589. /*-----------------------------------------------------------*/
  2590. #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) )
  2591. UBaseType_t vTaskCoreAffinityGet( ConstTaskHandle_t xTask )
  2592. {
  2593. const TCB_t * pxTCB;
  2594. UBaseType_t uxCoreAffinityMask;
  2595. traceENTER_vTaskCoreAffinityGet( xTask );
  2596. taskENTER_CRITICAL();
  2597. {
  2598. pxTCB = prvGetTCBFromHandle( xTask );
  2599. uxCoreAffinityMask = pxTCB->uxCoreAffinityMask;
  2600. }
  2601. taskEXIT_CRITICAL();
  2602. traceRETURN_vTaskCoreAffinityGet( uxCoreAffinityMask );
  2603. return uxCoreAffinityMask;
  2604. }
  2605. #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */
  2606. /*-----------------------------------------------------------*/
  2607. #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
  2608. void vTaskPreemptionDisable( const TaskHandle_t xTask )
  2609. {
  2610. TCB_t * pxTCB;
  2611. traceENTER_vTaskPreemptionDisable( xTask );
  2612. taskENTER_CRITICAL();
  2613. {
  2614. pxTCB = prvGetTCBFromHandle( xTask );
  2615. pxTCB->xPreemptionDisable = pdTRUE;
  2616. }
  2617. taskEXIT_CRITICAL();
  2618. traceRETURN_vTaskPreemptionDisable();
  2619. }
  2620. #endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
  2621. /*-----------------------------------------------------------*/
  2622. #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
  2623. void vTaskPreemptionEnable( const TaskHandle_t xTask )
  2624. {
  2625. TCB_t * pxTCB;
  2626. BaseType_t xCoreID;
  2627. traceENTER_vTaskPreemptionEnable( xTask );
  2628. taskENTER_CRITICAL();
  2629. {
  2630. pxTCB = prvGetTCBFromHandle( xTask );
  2631. pxTCB->xPreemptionDisable = pdFALSE;
  2632. if( xSchedulerRunning != pdFALSE )
  2633. {
  2634. if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE )
  2635. {
  2636. xCoreID = ( BaseType_t ) pxTCB->xTaskRunState;
  2637. prvYieldCore( xCoreID );
  2638. }
  2639. }
  2640. }
  2641. taskEXIT_CRITICAL();
  2642. traceRETURN_vTaskPreemptionEnable();
  2643. }
  2644. #endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
  2645. /*-----------------------------------------------------------*/
  2646. #if ( INCLUDE_vTaskSuspend == 1 )
  2647. void vTaskSuspend( TaskHandle_t xTaskToSuspend )
  2648. {
  2649. TCB_t * pxTCB;
  2650. traceENTER_vTaskSuspend( xTaskToSuspend );
  2651. taskENTER_CRITICAL();
  2652. {
  2653. /* If null is passed in here then it is the running task that is
  2654. * being suspended. */
  2655. pxTCB = prvGetTCBFromHandle( xTaskToSuspend );
  2656. traceTASK_SUSPEND( pxTCB );
  2657. /* Remove task from the ready/delayed list and place in the
  2658. * suspended list. */
  2659. if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
  2660. {
  2661. taskRESET_READY_PRIORITY( pxTCB->uxPriority );
  2662. }
  2663. else
  2664. {
  2665. mtCOVERAGE_TEST_MARKER();
  2666. }
  2667. /* Is the task waiting on an event also? */
  2668. if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
  2669. {
  2670. ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
  2671. }
  2672. else
  2673. {
  2674. mtCOVERAGE_TEST_MARKER();
  2675. }
  2676. vListInsertEnd( &xSuspendedTaskList, &( pxTCB->xStateListItem ) );
  2677. #if ( configUSE_TASK_NOTIFICATIONS == 1 )
  2678. {
  2679. BaseType_t x;
  2680. for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configTASK_NOTIFICATION_ARRAY_ENTRIES; x++ )
  2681. {
  2682. if( pxTCB->ucNotifyState[ x ] == taskWAITING_NOTIFICATION )
  2683. {
  2684. /* The task was blocked to wait for a notification, but is
  2685. * now suspended, so no notification was received. */
  2686. pxTCB->ucNotifyState[ x ] = taskNOT_WAITING_NOTIFICATION;
  2687. }
  2688. }
  2689. }
  2690. #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
  2691. /* In the case of SMP, it is possible that the task being suspended
  2692. * is running on another core. We must evict the task before
  2693. * exiting the critical section to ensure that the task cannot
  2694. * take an action which puts it back on ready/state/event list,
  2695. * thereby nullifying the suspend operation. Once evicted, the
  2696. * task won't be scheduled before it is resumed as it will no longer
  2697. * be on the ready list. */
  2698. #if ( configNUMBER_OF_CORES > 1 )
  2699. {
  2700. if( xSchedulerRunning != pdFALSE )
  2701. {
  2702. /* Reset the next expected unblock time in case it referred to the
  2703. * task that is now in the Suspended state. */
  2704. prvResetNextTaskUnblockTime();
  2705. if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE )
  2706. {
  2707. if( pxTCB->xTaskRunState == ( BaseType_t ) portGET_CORE_ID() )
  2708. {
  2709. /* The current task has just been suspended. */
  2710. configASSERT( uxSchedulerSuspended == 0 );
  2711. vTaskYieldWithinAPI();
  2712. }
  2713. else
  2714. {
  2715. prvYieldCore( pxTCB->xTaskRunState );
  2716. }
  2717. }
  2718. else
  2719. {
  2720. mtCOVERAGE_TEST_MARKER();
  2721. }
  2722. }
  2723. else
  2724. {
  2725. mtCOVERAGE_TEST_MARKER();
  2726. }
  2727. }
  2728. #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
  2729. }
  2730. taskEXIT_CRITICAL();
  2731. #if ( configNUMBER_OF_CORES == 1 )
  2732. {
  2733. UBaseType_t uxCurrentListLength;
  2734. if( xSchedulerRunning != pdFALSE )
  2735. {
  2736. /* Reset the next expected unblock time in case it referred to the
  2737. * task that is now in the Suspended state. */
  2738. taskENTER_CRITICAL();
  2739. {
  2740. prvResetNextTaskUnblockTime();
  2741. }
  2742. taskEXIT_CRITICAL();
  2743. }
  2744. else
  2745. {
  2746. mtCOVERAGE_TEST_MARKER();
  2747. }
  2748. if( pxTCB == pxCurrentTCB )
  2749. {
  2750. if( xSchedulerRunning != pdFALSE )
  2751. {
  2752. /* The current task has just been suspended. */
  2753. configASSERT( uxSchedulerSuspended == 0 );
  2754. portYIELD_WITHIN_API();
  2755. }
  2756. else
  2757. {
  2758. /* The scheduler is not running, but the task that was pointed
  2759. * to by pxCurrentTCB has just been suspended and pxCurrentTCB
  2760. * must be adjusted to point to a different task. */
  2761. /* Use a temp variable as a distinct sequence point for reading
  2762. * volatile variables prior to a comparison to ensure compliance
  2763. * with MISRA C 2012 Rule 13.2. */
  2764. uxCurrentListLength = listCURRENT_LIST_LENGTH( &xSuspendedTaskList );
  2765. if( uxCurrentListLength == uxCurrentNumberOfTasks )
  2766. {
  2767. /* No other tasks are ready, so set pxCurrentTCB back to
  2768. * NULL so when the next task is created pxCurrentTCB will
  2769. * be set to point to it no matter what its relative priority
  2770. * is. */
  2771. pxCurrentTCB = NULL;
  2772. }
  2773. else
  2774. {
  2775. vTaskSwitchContext();
  2776. }
  2777. }
  2778. }
  2779. else
  2780. {
  2781. mtCOVERAGE_TEST_MARKER();
  2782. }
  2783. }
  2784. #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
  2785. traceRETURN_vTaskSuspend();
  2786. }
  2787. #endif /* INCLUDE_vTaskSuspend */
  2788. /*-----------------------------------------------------------*/
  2789. #if ( INCLUDE_vTaskSuspend == 1 )
  2790. static BaseType_t prvTaskIsTaskSuspended( const TaskHandle_t xTask )
  2791. {
  2792. BaseType_t xReturn = pdFALSE;
  2793. const TCB_t * const pxTCB = xTask;
  2794. /* Accesses xPendingReadyList so must be called from a critical
  2795. * section. */
  2796. /* It does not make sense to check if the calling task is suspended. */
  2797. configASSERT( xTask );
  2798. /* Is the task being resumed actually in the suspended list? */
  2799. if( listIS_CONTAINED_WITHIN( &xSuspendedTaskList, &( pxTCB->xStateListItem ) ) != pdFALSE )
  2800. {
  2801. /* Has the task already been resumed from within an ISR? */
  2802. if( listIS_CONTAINED_WITHIN( &xPendingReadyList, &( pxTCB->xEventListItem ) ) == pdFALSE )
  2803. {
  2804. /* Is it in the suspended list because it is in the Suspended
  2805. * state, or because it is blocked with no timeout? */
  2806. if( listIS_CONTAINED_WITHIN( NULL, &( pxTCB->xEventListItem ) ) != pdFALSE )
  2807. {
  2808. #if ( configUSE_TASK_NOTIFICATIONS == 1 )
  2809. {
  2810. BaseType_t x;
  2811. /* The task does not appear on the event list item of
  2812. * and of the RTOS objects, but could still be in the
  2813. * blocked state if it is waiting on its notification
  2814. * rather than waiting on an object. If not, is
  2815. * suspended. */
  2816. xReturn = pdTRUE;
  2817. for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configTASK_NOTIFICATION_ARRAY_ENTRIES; x++ )
  2818. {
  2819. if( pxTCB->ucNotifyState[ x ] == taskWAITING_NOTIFICATION )
  2820. {
  2821. xReturn = pdFALSE;
  2822. break;
  2823. }
  2824. }
  2825. }
  2826. #else /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
  2827. {
  2828. xReturn = pdTRUE;
  2829. }
  2830. #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
  2831. }
  2832. else
  2833. {
  2834. mtCOVERAGE_TEST_MARKER();
  2835. }
  2836. }
  2837. else
  2838. {
  2839. mtCOVERAGE_TEST_MARKER();
  2840. }
  2841. }
  2842. else
  2843. {
  2844. mtCOVERAGE_TEST_MARKER();
  2845. }
  2846. return xReturn;
  2847. }
  2848. #endif /* INCLUDE_vTaskSuspend */
  2849. /*-----------------------------------------------------------*/
  2850. #if ( INCLUDE_vTaskSuspend == 1 )
  2851. void vTaskResume( TaskHandle_t xTaskToResume )
  2852. {
  2853. TCB_t * const pxTCB = xTaskToResume;
  2854. traceENTER_vTaskResume( xTaskToResume );
  2855. /* It does not make sense to resume the calling task. */
  2856. configASSERT( xTaskToResume );
  2857. #if ( configNUMBER_OF_CORES == 1 )
  2858. /* The parameter cannot be NULL as it is impossible to resume the
  2859. * currently executing task. */
  2860. if( ( pxTCB != pxCurrentTCB ) && ( pxTCB != NULL ) )
  2861. #else
  2862. /* The parameter cannot be NULL as it is impossible to resume the
  2863. * currently executing task. It is also impossible to resume a task
  2864. * that is actively running on another core but it is not safe
  2865. * to check their run state here. Therefore, we get into a critical
  2866. * section and check if the task is actually suspended or not. */
  2867. if( pxTCB != NULL )
  2868. #endif
  2869. {
  2870. taskENTER_CRITICAL();
  2871. {
  2872. if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE )
  2873. {
  2874. traceTASK_RESUME( pxTCB );
  2875. /* The ready list can be accessed even if the scheduler is
  2876. * suspended because this is inside a critical section. */
  2877. ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
  2878. prvAddTaskToReadyList( pxTCB );
  2879. /* This yield may not cause the task just resumed to run,
  2880. * but will leave the lists in the correct state for the
  2881. * next yield. */
  2882. taskYIELD_ANY_CORE_IF_USING_PREEMPTION( pxTCB );
  2883. }
  2884. else
  2885. {
  2886. mtCOVERAGE_TEST_MARKER();
  2887. }
  2888. }
  2889. taskEXIT_CRITICAL();
  2890. }
  2891. else
  2892. {
  2893. mtCOVERAGE_TEST_MARKER();
  2894. }
  2895. traceRETURN_vTaskResume();
  2896. }
  2897. #endif /* INCLUDE_vTaskSuspend */
  2898. /*-----------------------------------------------------------*/
  2899. #if ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) )
  2900. BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume )
  2901. {
  2902. BaseType_t xYieldRequired = pdFALSE;
  2903. TCB_t * const pxTCB = xTaskToResume;
  2904. UBaseType_t uxSavedInterruptStatus;
  2905. traceENTER_xTaskResumeFromISR( xTaskToResume );
  2906. configASSERT( xTaskToResume );
  2907. /* RTOS ports that support interrupt nesting have the concept of a
  2908. * maximum system call (or maximum API call) interrupt priority.
  2909. * Interrupts that are above the maximum system call priority are keep
  2910. * permanently enabled, even when the RTOS kernel is in a critical section,
  2911. * but cannot make any calls to FreeRTOS API functions. If configASSERT()
  2912. * is defined in FreeRTOSConfig.h then
  2913. * portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
  2914. * failure if a FreeRTOS API function is called from an interrupt that has
  2915. * been assigned a priority above the configured maximum system call
  2916. * priority. Only FreeRTOS functions that end in FromISR can be called
  2917. * from interrupts that have been assigned a priority at or (logically)
  2918. * below the maximum system call interrupt priority. FreeRTOS maintains a
  2919. * separate interrupt safe API to ensure interrupt entry is as fast and as
  2920. * simple as possible. More information (albeit Cortex-M specific) is
  2921. * provided on the following link:
  2922. * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
  2923. portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
  2924. /* MISRA Ref 4.7.1 [Return value shall be checked] */
  2925. /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */
  2926. /* coverity[misra_c_2012_directive_4_7_violation] */
  2927. uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
  2928. {
  2929. if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE )
  2930. {
  2931. traceTASK_RESUME_FROM_ISR( pxTCB );
  2932. /* Check the ready lists can be accessed. */
  2933. if( uxSchedulerSuspended == ( UBaseType_t ) 0U )
  2934. {
  2935. #if ( configNUMBER_OF_CORES == 1 )
  2936. {
  2937. /* Ready lists can be accessed so move the task from the
  2938. * suspended list to the ready list directly. */
  2939. if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
  2940. {
  2941. xYieldRequired = pdTRUE;
  2942. /* Mark that a yield is pending in case the user is not
  2943. * using the return value to initiate a context switch
  2944. * from the ISR using the port specific portYIELD_FROM_ISR(). */
  2945. xYieldPendings[ 0 ] = pdTRUE;
  2946. }
  2947. else
  2948. {
  2949. mtCOVERAGE_TEST_MARKER();
  2950. }
  2951. }
  2952. #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
  2953. ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
  2954. prvAddTaskToReadyList( pxTCB );
  2955. }
  2956. else
  2957. {
  2958. /* The delayed or ready lists cannot be accessed so the task
  2959. * is held in the pending ready list until the scheduler is
  2960. * unsuspended. */
  2961. vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) );
  2962. }
  2963. #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_PREEMPTION == 1 ) )
  2964. {
  2965. prvYieldForTask( pxTCB );
  2966. if( xYieldPendings[ portGET_CORE_ID() ] != pdFALSE )
  2967. {
  2968. xYieldRequired = pdTRUE;
  2969. }
  2970. }
  2971. #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_PREEMPTION == 1 ) ) */
  2972. }
  2973. else
  2974. {
  2975. mtCOVERAGE_TEST_MARKER();
  2976. }
  2977. }
  2978. taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
  2979. traceRETURN_xTaskResumeFromISR( xYieldRequired );
  2980. return xYieldRequired;
  2981. }
  2982. #endif /* ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) ) */
  2983. /*-----------------------------------------------------------*/
  2984. static BaseType_t prvCreateIdleTasks( void )
  2985. {
  2986. BaseType_t xReturn = pdPASS;
  2987. BaseType_t xCoreID;
  2988. char cIdleName[ configMAX_TASK_NAME_LEN ];
  2989. TaskFunction_t pxIdleTaskFunction = NULL;
  2990. BaseType_t xIdleTaskNameIndex;
  2991. for( xIdleTaskNameIndex = ( BaseType_t ) 0; xIdleTaskNameIndex < ( BaseType_t ) configMAX_TASK_NAME_LEN; xIdleTaskNameIndex++ )
  2992. {
  2993. cIdleName[ xIdleTaskNameIndex ] = configIDLE_TASK_NAME[ xIdleTaskNameIndex ];
  2994. /* Don't copy all configMAX_TASK_NAME_LEN if the string is shorter than
  2995. * configMAX_TASK_NAME_LEN characters just in case the memory after the
  2996. * string is not accessible (extremely unlikely). */
  2997. if( cIdleName[ xIdleTaskNameIndex ] == ( char ) 0x00 )
  2998. {
  2999. break;
  3000. }
  3001. else
  3002. {
  3003. mtCOVERAGE_TEST_MARKER();
  3004. }
  3005. }
  3006. /* Add each idle task at the lowest priority. */
  3007. for( xCoreID = ( BaseType_t ) 0; xCoreID < ( BaseType_t ) configNUMBER_OF_CORES; xCoreID++ )
  3008. {
  3009. #if ( configNUMBER_OF_CORES == 1 )
  3010. {
  3011. pxIdleTaskFunction = prvIdleTask;
  3012. }
  3013. #else /* #if ( configNUMBER_OF_CORES == 1 ) */
  3014. {
  3015. /* In the FreeRTOS SMP, configNUMBER_OF_CORES - 1 passive idle tasks
  3016. * are also created to ensure that each core has an idle task to
  3017. * run when no other task is available to run. */
  3018. if( xCoreID == 0 )
  3019. {
  3020. pxIdleTaskFunction = prvIdleTask;
  3021. }
  3022. else
  3023. {
  3024. pxIdleTaskFunction = prvPassiveIdleTask;
  3025. }
  3026. }
  3027. #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
  3028. /* Update the idle task name with suffix to differentiate the idle tasks.
  3029. * This function is not required in single core FreeRTOS since there is
  3030. * only one idle task. */
  3031. #if ( configNUMBER_OF_CORES > 1 )
  3032. {
  3033. /* Append the idle task number to the end of the name if there is space. */
  3034. if( xIdleTaskNameIndex < ( BaseType_t ) configMAX_TASK_NAME_LEN )
  3035. {
  3036. cIdleName[ xIdleTaskNameIndex ] = ( char ) ( xCoreID + '0' );
  3037. /* And append a null character if there is space. */
  3038. if( ( xIdleTaskNameIndex + 1 ) < ( BaseType_t ) configMAX_TASK_NAME_LEN )
  3039. {
  3040. cIdleName[ xIdleTaskNameIndex + 1 ] = '\0';
  3041. }
  3042. else
  3043. {
  3044. mtCOVERAGE_TEST_MARKER();
  3045. }
  3046. }
  3047. else
  3048. {
  3049. mtCOVERAGE_TEST_MARKER();
  3050. }
  3051. }
  3052. #endif /* if ( configNUMBER_OF_CORES > 1 ) */
  3053. #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
  3054. {
  3055. StaticTask_t * pxIdleTaskTCBBuffer = NULL;
  3056. StackType_t * pxIdleTaskStackBuffer = NULL;
  3057. configSTACK_DEPTH_TYPE uxIdleTaskStackSize;
  3058. /* The Idle task is created using user provided RAM - obtain the
  3059. * address of the RAM then create the idle task. */
  3060. #if ( configNUMBER_OF_CORES == 1 )
  3061. {
  3062. vApplicationGetIdleTaskMemory( &pxIdleTaskTCBBuffer, &pxIdleTaskStackBuffer, &uxIdleTaskStackSize );
  3063. }
  3064. #else
  3065. {
  3066. if( xCoreID == 0 )
  3067. {
  3068. vApplicationGetIdleTaskMemory( &pxIdleTaskTCBBuffer, &pxIdleTaskStackBuffer, &uxIdleTaskStackSize );
  3069. }
  3070. else
  3071. {
  3072. vApplicationGetPassiveIdleTaskMemory( &pxIdleTaskTCBBuffer, &pxIdleTaskStackBuffer, &uxIdleTaskStackSize, ( BaseType_t ) ( xCoreID - 1 ) );
  3073. }
  3074. }
  3075. #endif /* if ( configNUMBER_OF_CORES == 1 ) */
  3076. xIdleTaskHandles[ xCoreID ] = xTaskCreateStatic( pxIdleTaskFunction,
  3077. cIdleName,
  3078. uxIdleTaskStackSize,
  3079. ( void * ) NULL,
  3080. portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */
  3081. pxIdleTaskStackBuffer,
  3082. pxIdleTaskTCBBuffer );
  3083. if( xIdleTaskHandles[ xCoreID ] != NULL )
  3084. {
  3085. xReturn = pdPASS;
  3086. }
  3087. else
  3088. {
  3089. xReturn = pdFAIL;
  3090. }
  3091. }
  3092. #else /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) */
  3093. {
  3094. /* The Idle task is being created using dynamically allocated RAM. */
  3095. xReturn = xTaskCreate( pxIdleTaskFunction,
  3096. cIdleName,
  3097. configMINIMAL_STACK_SIZE,
  3098. ( void * ) NULL,
  3099. portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */
  3100. &xIdleTaskHandles[ xCoreID ] );
  3101. }
  3102. #endif /* configSUPPORT_STATIC_ALLOCATION */
  3103. /* Break the loop if any of the idle task is failed to be created. */
  3104. if( xReturn != pdPASS )
  3105. {
  3106. break;
  3107. }
  3108. else
  3109. {
  3110. #if ( configNUMBER_OF_CORES == 1 )
  3111. {
  3112. mtCOVERAGE_TEST_MARKER();
  3113. }
  3114. #else
  3115. {
  3116. /* Assign idle task to each core before SMP scheduler is running. */
  3117. xIdleTaskHandles[ xCoreID ]->xTaskRunState = xCoreID;
  3118. pxCurrentTCBs[ xCoreID ] = xIdleTaskHandles[ xCoreID ];
  3119. }
  3120. #endif
  3121. }
  3122. }
  3123. return xReturn;
  3124. }
  3125. /*-----------------------------------------------------------*/
  3126. void vTaskStartScheduler( void )
  3127. {
  3128. BaseType_t xReturn;
  3129. traceENTER_vTaskStartScheduler();
  3130. #if ( configUSE_CORE_AFFINITY == 1 ) && ( configNUMBER_OF_CORES > 1 )
  3131. {
  3132. /* Sanity check that the UBaseType_t must have greater than or equal to
  3133. * the number of bits as confNUMBER_OF_CORES. */
  3134. configASSERT( ( sizeof( UBaseType_t ) * taskBITS_PER_BYTE ) >= configNUMBER_OF_CORES );
  3135. }
  3136. #endif /* #if ( configUSE_CORE_AFFINITY == 1 ) && ( configNUMBER_OF_CORES > 1 ) */
  3137. xReturn = prvCreateIdleTasks();
  3138. #if ( configUSE_TIMERS == 1 )
  3139. {
  3140. if( xReturn == pdPASS )
  3141. {
  3142. xReturn = xTimerCreateTimerTask();
  3143. }
  3144. else
  3145. {
  3146. mtCOVERAGE_TEST_MARKER();
  3147. }
  3148. }
  3149. #endif /* configUSE_TIMERS */
  3150. if( xReturn == pdPASS )
  3151. {
  3152. /* freertos_tasks_c_additions_init() should only be called if the user
  3153. * definable macro FREERTOS_TASKS_C_ADDITIONS_INIT() is defined, as that is
  3154. * the only macro called by the function. */
  3155. #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT
  3156. {
  3157. freertos_tasks_c_additions_init();
  3158. }
  3159. #endif
  3160. /* Interrupts are turned off here, to ensure a tick does not occur
  3161. * before or during the call to xPortStartScheduler(). The stacks of
  3162. * the created tasks contain a status word with interrupts switched on
  3163. * so interrupts will automatically get re-enabled when the first task
  3164. * starts to run. */
  3165. portDISABLE_INTERRUPTS();
  3166. #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 )
  3167. {
  3168. /* Switch C-Runtime's TLS Block to point to the TLS
  3169. * block specific to the task that will run first. */
  3170. configSET_TLS_BLOCK( pxCurrentTCB->xTLSBlock );
  3171. }
  3172. #endif
  3173. xNextTaskUnblockTime = portMAX_DELAY;
  3174. xSchedulerRunning = pdTRUE;
  3175. xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT;
  3176. /* If configGENERATE_RUN_TIME_STATS is defined then the following
  3177. * macro must be defined to configure the timer/counter used to generate
  3178. * the run time counter time base. NOTE: If configGENERATE_RUN_TIME_STATS
  3179. * is set to 0 and the following line fails to build then ensure you do not
  3180. * have portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() defined in your
  3181. * FreeRTOSConfig.h file. */
  3182. portCONFIGURE_TIMER_FOR_RUN_TIME_STATS();
  3183. traceTASK_SWITCHED_IN();
  3184. /* Setting up the timer tick is hardware specific and thus in the
  3185. * portable interface. */
  3186. /* The return value for xPortStartScheduler is not required
  3187. * hence using a void datatype. */
  3188. ( void ) xPortStartScheduler();
  3189. /* In most cases, xPortStartScheduler() will not return. If it
  3190. * returns pdTRUE then there was not enough heap memory available
  3191. * to create either the Idle or the Timer task. If it returned
  3192. * pdFALSE, then the application called xTaskEndScheduler().
  3193. * Most ports don't implement xTaskEndScheduler() as there is
  3194. * nothing to return to. */
  3195. }
  3196. else
  3197. {
  3198. /* This line will only be reached if the kernel could not be started,
  3199. * because there was not enough FreeRTOS heap to create the idle task
  3200. * or the timer task. */
  3201. configASSERT( xReturn != errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY );
  3202. }
  3203. /* Prevent compiler warnings if INCLUDE_xTaskGetIdleTaskHandle is set to 0,
  3204. * meaning xIdleTaskHandles are not used anywhere else. */
  3205. ( void ) xIdleTaskHandles;
  3206. /* OpenOCD makes use of uxTopUsedPriority for thread debugging. Prevent uxTopUsedPriority
  3207. * from getting optimized out as it is no longer used by the kernel. */
  3208. ( void ) uxTopUsedPriority;
  3209. traceRETURN_vTaskStartScheduler();
  3210. }
  3211. /*-----------------------------------------------------------*/
  3212. void vTaskEndScheduler( void )
  3213. {
  3214. traceENTER_vTaskEndScheduler();
  3215. #if ( INCLUDE_vTaskDelete == 1 )
  3216. {
  3217. BaseType_t xCoreID;
  3218. #if ( configUSE_TIMERS == 1 )
  3219. {
  3220. /* Delete the timer task created by the kernel. */
  3221. vTaskDelete( xTimerGetTimerDaemonTaskHandle() );
  3222. }
  3223. #endif /* #if ( configUSE_TIMERS == 1 ) */
  3224. /* Delete Idle tasks created by the kernel.*/
  3225. for( xCoreID = 0; xCoreID < ( BaseType_t ) configNUMBER_OF_CORES; xCoreID++ )
  3226. {
  3227. vTaskDelete( xIdleTaskHandles[ xCoreID ] );
  3228. }
  3229. /* Idle task is responsible for reclaiming the resources of the tasks in
  3230. * xTasksWaitingTermination list. Since the idle task is now deleted and
  3231. * no longer going to run, we need to reclaim resources of all the tasks
  3232. * in the xTasksWaitingTermination list. */
  3233. prvCheckTasksWaitingTermination();
  3234. }
  3235. #endif /* #if ( INCLUDE_vTaskDelete == 1 ) */
  3236. /* Stop the scheduler interrupts and call the portable scheduler end
  3237. * routine so the original ISRs can be restored if necessary. The port
  3238. * layer must ensure interrupts enable bit is left in the correct state. */
  3239. portDISABLE_INTERRUPTS();
  3240. xSchedulerRunning = pdFALSE;
  3241. /* This function must be called from a task and the application is
  3242. * responsible for deleting that task after the scheduler is stopped. */
  3243. vPortEndScheduler();
  3244. traceRETURN_vTaskEndScheduler();
  3245. }
  3246. /*----------------------------------------------------------*/
  3247. void vTaskSuspendAll( void )
  3248. {
  3249. traceENTER_vTaskSuspendAll();
  3250. #if ( configNUMBER_OF_CORES == 1 )
  3251. {
  3252. /* A critical section is not required as the variable is of type
  3253. * BaseType_t. Please read Richard Barry's reply in the following link to a
  3254. * post in the FreeRTOS support forum before reporting this as a bug! -
  3255. * https://goo.gl/wu4acr */
  3256. /* portSOFTWARE_BARRIER() is only implemented for emulated/simulated ports that
  3257. * do not otherwise exhibit real time behaviour. */
  3258. portSOFTWARE_BARRIER();
  3259. /* The scheduler is suspended if uxSchedulerSuspended is non-zero. An increment
  3260. * is used to allow calls to vTaskSuspendAll() to nest. */
  3261. uxSchedulerSuspended = ( UBaseType_t ) ( uxSchedulerSuspended + 1U );
  3262. /* Enforces ordering for ports and optimised compilers that may otherwise place
  3263. * the above increment elsewhere. */
  3264. portMEMORY_BARRIER();
  3265. }
  3266. #else /* #if ( configNUMBER_OF_CORES == 1 ) */
  3267. {
  3268. UBaseType_t ulState;
  3269. /* This must only be called from within a task. */
  3270. portASSERT_IF_IN_ISR();
  3271. if( xSchedulerRunning != pdFALSE )
  3272. {
  3273. /* Writes to uxSchedulerSuspended must be protected by both the task AND ISR locks.
  3274. * We must disable interrupts before we grab the locks in the event that this task is
  3275. * interrupted and switches context before incrementing uxSchedulerSuspended.
  3276. * It is safe to re-enable interrupts after releasing the ISR lock and incrementing
  3277. * uxSchedulerSuspended since that will prevent context switches. */
  3278. ulState = portSET_INTERRUPT_MASK();
  3279. /* This must never be called from inside a critical section. */
  3280. configASSERT( portGET_CRITICAL_NESTING_COUNT() == 0 );
  3281. /* portSOFRWARE_BARRIER() is only implemented for emulated/simulated ports that
  3282. * do not otherwise exhibit real time behaviour. */
  3283. portSOFTWARE_BARRIER();
  3284. portGET_TASK_LOCK();
  3285. /* uxSchedulerSuspended is increased after prvCheckForRunStateChange. The
  3286. * purpose is to prevent altering the variable when fromISR APIs are readying
  3287. * it. */
  3288. if( uxSchedulerSuspended == 0U )
  3289. {
  3290. prvCheckForRunStateChange();
  3291. }
  3292. else
  3293. {
  3294. mtCOVERAGE_TEST_MARKER();
  3295. }
  3296. portGET_ISR_LOCK();
  3297. /* The scheduler is suspended if uxSchedulerSuspended is non-zero. An increment
  3298. * is used to allow calls to vTaskSuspendAll() to nest. */
  3299. ++uxSchedulerSuspended;
  3300. portRELEASE_ISR_LOCK();
  3301. portCLEAR_INTERRUPT_MASK( ulState );
  3302. }
  3303. else
  3304. {
  3305. mtCOVERAGE_TEST_MARKER();
  3306. }
  3307. }
  3308. #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
  3309. traceRETURN_vTaskSuspendAll();
  3310. }
  3311. /*----------------------------------------------------------*/
  3312. #if ( configUSE_TICKLESS_IDLE != 0 )
  3313. static TickType_t prvGetExpectedIdleTime( void )
  3314. {
  3315. TickType_t xReturn;
  3316. UBaseType_t uxHigherPriorityReadyTasks = pdFALSE;
  3317. /* uxHigherPriorityReadyTasks takes care of the case where
  3318. * configUSE_PREEMPTION is 0, so there may be tasks above the idle priority
  3319. * task that are in the Ready state, even though the idle task is
  3320. * running. */
  3321. #if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 )
  3322. {
  3323. if( uxTopReadyPriority > tskIDLE_PRIORITY )
  3324. {
  3325. uxHigherPriorityReadyTasks = pdTRUE;
  3326. }
  3327. }
  3328. #else
  3329. {
  3330. const UBaseType_t uxLeastSignificantBit = ( UBaseType_t ) 0x01;
  3331. /* When port optimised task selection is used the uxTopReadyPriority
  3332. * variable is used as a bit map. If bits other than the least
  3333. * significant bit are set then there are tasks that have a priority
  3334. * above the idle priority that are in the Ready state. This takes
  3335. * care of the case where the co-operative scheduler is in use. */
  3336. if( uxTopReadyPriority > uxLeastSignificantBit )
  3337. {
  3338. uxHigherPriorityReadyTasks = pdTRUE;
  3339. }
  3340. }
  3341. #endif /* if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 ) */
  3342. if( pxCurrentTCB->uxPriority > tskIDLE_PRIORITY )
  3343. {
  3344. xReturn = 0;
  3345. }
  3346. else if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > 1U )
  3347. {
  3348. /* There are other idle priority tasks in the ready state. If
  3349. * time slicing is used then the very next tick interrupt must be
  3350. * processed. */
  3351. xReturn = 0;
  3352. }
  3353. else if( uxHigherPriorityReadyTasks != pdFALSE )
  3354. {
  3355. /* There are tasks in the Ready state that have a priority above the
  3356. * idle priority. This path can only be reached if
  3357. * configUSE_PREEMPTION is 0. */
  3358. xReturn = 0;
  3359. }
  3360. else
  3361. {
  3362. xReturn = xNextTaskUnblockTime;
  3363. xReturn -= xTickCount;
  3364. }
  3365. return xReturn;
  3366. }
  3367. #endif /* configUSE_TICKLESS_IDLE */
  3368. /*----------------------------------------------------------*/
  3369. BaseType_t xTaskResumeAll( void )
  3370. {
  3371. TCB_t * pxTCB = NULL;
  3372. BaseType_t xAlreadyYielded = pdFALSE;
  3373. traceENTER_xTaskResumeAll();
  3374. #if ( configNUMBER_OF_CORES > 1 )
  3375. if( xSchedulerRunning != pdFALSE )
  3376. #endif
  3377. {
  3378. /* It is possible that an ISR caused a task to be removed from an event
  3379. * list while the scheduler was suspended. If this was the case then the
  3380. * removed task will have been added to the xPendingReadyList. Once the
  3381. * scheduler has been resumed it is safe to move all the pending ready
  3382. * tasks from this list into their appropriate ready list. */
  3383. taskENTER_CRITICAL();
  3384. {
  3385. BaseType_t xCoreID;
  3386. xCoreID = ( BaseType_t ) portGET_CORE_ID();
  3387. /* If uxSchedulerSuspended is zero then this function does not match a
  3388. * previous call to vTaskSuspendAll(). */
  3389. configASSERT( uxSchedulerSuspended != 0U );
  3390. uxSchedulerSuspended = ( UBaseType_t ) ( uxSchedulerSuspended - 1U );
  3391. portRELEASE_TASK_LOCK();
  3392. if( uxSchedulerSuspended == ( UBaseType_t ) 0U )
  3393. {
  3394. if( uxCurrentNumberOfTasks > ( UBaseType_t ) 0U )
  3395. {
  3396. /* Move any readied tasks from the pending list into the
  3397. * appropriate ready list. */
  3398. while( listLIST_IS_EMPTY( &xPendingReadyList ) == pdFALSE )
  3399. {
  3400. /* MISRA Ref 11.5.3 [Void pointer assignment] */
  3401. /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
  3402. /* coverity[misra_c_2012_rule_11_5_violation] */
  3403. pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xPendingReadyList ) );
  3404. listREMOVE_ITEM( &( pxTCB->xEventListItem ) );
  3405. portMEMORY_BARRIER();
  3406. listREMOVE_ITEM( &( pxTCB->xStateListItem ) );
  3407. prvAddTaskToReadyList( pxTCB );
  3408. #if ( configNUMBER_OF_CORES == 1 )
  3409. {
  3410. /* If the moved task has a priority higher than the current
  3411. * task then a yield must be performed. */
  3412. if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
  3413. {
  3414. xYieldPendings[ xCoreID ] = pdTRUE;
  3415. }
  3416. else
  3417. {
  3418. mtCOVERAGE_TEST_MARKER();
  3419. }
  3420. }
  3421. #else /* #if ( configNUMBER_OF_CORES == 1 ) */
  3422. {
  3423. /* All appropriate tasks yield at the moment a task is added to xPendingReadyList.
  3424. * If the current core yielded then vTaskSwitchContext() has already been called
  3425. * which sets xYieldPendings for the current core to pdTRUE. */
  3426. }
  3427. #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
  3428. }
  3429. if( pxTCB != NULL )
  3430. {
  3431. /* A task was unblocked while the scheduler was suspended,
  3432. * which may have prevented the next unblock time from being
  3433. * re-calculated, in which case re-calculate it now. Mainly
  3434. * important for low power tickless implementations, where
  3435. * this can prevent an unnecessary exit from low power
  3436. * state. */
  3437. prvResetNextTaskUnblockTime();
  3438. }
  3439. /* If any ticks occurred while the scheduler was suspended then
  3440. * they should be processed now. This ensures the tick count does
  3441. * not slip, and that any delayed tasks are resumed at the correct
  3442. * time.
  3443. *
  3444. * It should be safe to call xTaskIncrementTick here from any core
  3445. * since we are in a critical section and xTaskIncrementTick itself
  3446. * protects itself within a critical section. Suspending the scheduler
  3447. * from any core causes xTaskIncrementTick to increment uxPendedCounts. */
  3448. {
  3449. TickType_t xPendedCounts = xPendedTicks; /* Non-volatile copy. */
  3450. if( xPendedCounts > ( TickType_t ) 0U )
  3451. {
  3452. do
  3453. {
  3454. if( xTaskIncrementTick() != pdFALSE )
  3455. {
  3456. /* Other cores are interrupted from
  3457. * within xTaskIncrementTick(). */
  3458. xYieldPendings[ xCoreID ] = pdTRUE;
  3459. }
  3460. else
  3461. {
  3462. mtCOVERAGE_TEST_MARKER();
  3463. }
  3464. --xPendedCounts;
  3465. } while( xPendedCounts > ( TickType_t ) 0U );
  3466. xPendedTicks = 0;
  3467. }
  3468. else
  3469. {
  3470. mtCOVERAGE_TEST_MARKER();
  3471. }
  3472. }
  3473. if( xYieldPendings[ xCoreID ] != pdFALSE )
  3474. {
  3475. #if ( configUSE_PREEMPTION != 0 )
  3476. {
  3477. xAlreadyYielded = pdTRUE;
  3478. }
  3479. #endif /* #if ( configUSE_PREEMPTION != 0 ) */
  3480. #if ( configNUMBER_OF_CORES == 1 )
  3481. {
  3482. taskYIELD_TASK_CORE_IF_USING_PREEMPTION( pxCurrentTCB );
  3483. }
  3484. #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
  3485. }
  3486. else
  3487. {
  3488. mtCOVERAGE_TEST_MARKER();
  3489. }
  3490. }
  3491. }
  3492. else
  3493. {
  3494. mtCOVERAGE_TEST_MARKER();
  3495. }
  3496. }
  3497. taskEXIT_CRITICAL();
  3498. }
  3499. traceRETURN_xTaskResumeAll( xAlreadyYielded );
  3500. return xAlreadyYielded;
  3501. }
  3502. /*-----------------------------------------------------------*/
  3503. TickType_t xTaskGetTickCount( void )
  3504. {
  3505. TickType_t xTicks;
  3506. traceENTER_xTaskGetTickCount();
  3507. /* Critical section required if running on a 16 bit processor. */
  3508. portTICK_TYPE_ENTER_CRITICAL();
  3509. {
  3510. xTicks = xTickCount;
  3511. }
  3512. portTICK_TYPE_EXIT_CRITICAL();
  3513. traceRETURN_xTaskGetTickCount( xTicks );
  3514. return xTicks;
  3515. }
  3516. /*-----------------------------------------------------------*/
  3517. TickType_t xTaskGetTickCountFromISR( void )
  3518. {
  3519. TickType_t xReturn;
  3520. UBaseType_t uxSavedInterruptStatus;
  3521. traceENTER_xTaskGetTickCountFromISR();
  3522. /* RTOS ports that support interrupt nesting have the concept of a maximum
  3523. * system call (or maximum API call) interrupt priority. Interrupts that are
  3524. * above the maximum system call priority are kept permanently enabled, even
  3525. * when the RTOS kernel is in a critical section, but cannot make any calls to
  3526. * FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
  3527. * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
  3528. * failure if a FreeRTOS API function is called from an interrupt that has been
  3529. * assigned a priority above the configured maximum system call priority.
  3530. * Only FreeRTOS functions that end in FromISR can be called from interrupts
  3531. * that have been assigned a priority at or (logically) below the maximum
  3532. * system call interrupt priority. FreeRTOS maintains a separate interrupt
  3533. * safe API to ensure interrupt entry is as fast and as simple as possible.
  3534. * More information (albeit Cortex-M specific) is provided on the following
  3535. * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
  3536. portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
  3537. uxSavedInterruptStatus = portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR();
  3538. {
  3539. xReturn = xTickCount;
  3540. }
  3541. portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
  3542. traceRETURN_xTaskGetTickCountFromISR( xReturn );
  3543. return xReturn;
  3544. }
  3545. /*-----------------------------------------------------------*/
  3546. UBaseType_t uxTaskGetNumberOfTasks( void )
  3547. {
  3548. traceENTER_uxTaskGetNumberOfTasks();
  3549. /* A critical section is not required because the variables are of type
  3550. * BaseType_t. */
  3551. traceRETURN_uxTaskGetNumberOfTasks( uxCurrentNumberOfTasks );
  3552. return uxCurrentNumberOfTasks;
  3553. }
  3554. /*-----------------------------------------------------------*/
  3555. char * pcTaskGetName( TaskHandle_t xTaskToQuery )
  3556. {
  3557. TCB_t * pxTCB;
  3558. traceENTER_pcTaskGetName( xTaskToQuery );
  3559. /* If null is passed in here then the name of the calling task is being
  3560. * queried. */
  3561. pxTCB = prvGetTCBFromHandle( xTaskToQuery );
  3562. configASSERT( pxTCB );
  3563. traceRETURN_pcTaskGetName( &( pxTCB->pcTaskName[ 0 ] ) );
  3564. return &( pxTCB->pcTaskName[ 0 ] );
  3565. }
  3566. /*-----------------------------------------------------------*/
  3567. #if ( INCLUDE_xTaskGetHandle == 1 )
  3568. static TCB_t * prvSearchForNameWithinSingleList( List_t * pxList,
  3569. const char pcNameToQuery[] )
  3570. {
  3571. TCB_t * pxReturn = NULL;
  3572. TCB_t * pxTCB = NULL;
  3573. UBaseType_t x;
  3574. char cNextChar;
  3575. BaseType_t xBreakLoop;
  3576. const ListItem_t * pxEndMarker = listGET_END_MARKER( pxList );
  3577. ListItem_t * pxIterator;
  3578. /* This function is called with the scheduler suspended. */
  3579. if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 )
  3580. {
  3581. for( pxIterator = listGET_HEAD_ENTRY( pxList ); pxIterator != pxEndMarker; pxIterator = listGET_NEXT( pxIterator ) )
  3582. {
  3583. /* MISRA Ref 11.5.3 [Void pointer assignment] */
  3584. /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
  3585. /* coverity[misra_c_2012_rule_11_5_violation] */
  3586. pxTCB = listGET_LIST_ITEM_OWNER( pxIterator );
  3587. /* Check each character in the name looking for a match or
  3588. * mismatch. */
  3589. xBreakLoop = pdFALSE;
  3590. for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ )
  3591. {
  3592. cNextChar = pxTCB->pcTaskName[ x ];
  3593. if( cNextChar != pcNameToQuery[ x ] )
  3594. {
  3595. /* Characters didn't match. */
  3596. xBreakLoop = pdTRUE;
  3597. }
  3598. else if( cNextChar == ( char ) 0x00 )
  3599. {
  3600. /* Both strings terminated, a match must have been
  3601. * found. */
  3602. pxReturn = pxTCB;
  3603. xBreakLoop = pdTRUE;
  3604. }
  3605. else
  3606. {
  3607. mtCOVERAGE_TEST_MARKER();
  3608. }
  3609. if( xBreakLoop != pdFALSE )
  3610. {
  3611. break;
  3612. }
  3613. }
  3614. if( pxReturn != NULL )
  3615. {
  3616. /* The handle has been found. */
  3617. break;
  3618. }
  3619. }
  3620. }
  3621. else
  3622. {
  3623. mtCOVERAGE_TEST_MARKER();
  3624. }
  3625. return pxReturn;
  3626. }
  3627. #endif /* INCLUDE_xTaskGetHandle */
  3628. /*-----------------------------------------------------------*/
  3629. #if ( INCLUDE_xTaskGetHandle == 1 )
  3630. TaskHandle_t xTaskGetHandle( const char * pcNameToQuery )
  3631. {
  3632. UBaseType_t uxQueue = configMAX_PRIORITIES;
  3633. TCB_t * pxTCB;
  3634. traceENTER_xTaskGetHandle( pcNameToQuery );
  3635. /* Task names will be truncated to configMAX_TASK_NAME_LEN - 1 bytes. */
  3636. configASSERT( strlen( pcNameToQuery ) < configMAX_TASK_NAME_LEN );
  3637. vTaskSuspendAll();
  3638. {
  3639. /* Search the ready lists. */
  3640. do
  3641. {
  3642. uxQueue--;
  3643. pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) &( pxReadyTasksLists[ uxQueue ] ), pcNameToQuery );
  3644. if( pxTCB != NULL )
  3645. {
  3646. /* Found the handle. */
  3647. break;
  3648. }
  3649. } while( uxQueue > ( UBaseType_t ) tskIDLE_PRIORITY );
  3650. /* Search the delayed lists. */
  3651. if( pxTCB == NULL )
  3652. {
  3653. pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) pxDelayedTaskList, pcNameToQuery );
  3654. }
  3655. if( pxTCB == NULL )
  3656. {
  3657. pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) pxOverflowDelayedTaskList, pcNameToQuery );
  3658. }
  3659. #if ( INCLUDE_vTaskSuspend == 1 )
  3660. {
  3661. if( pxTCB == NULL )
  3662. {
  3663. /* Search the suspended list. */
  3664. pxTCB = prvSearchForNameWithinSingleList( &xSuspendedTaskList, pcNameToQuery );
  3665. }
  3666. }
  3667. #endif
  3668. #if ( INCLUDE_vTaskDelete == 1 )
  3669. {
  3670. if( pxTCB == NULL )
  3671. {
  3672. /* Search the deleted list. */
  3673. pxTCB = prvSearchForNameWithinSingleList( &xTasksWaitingTermination, pcNameToQuery );
  3674. }
  3675. }
  3676. #endif
  3677. }
  3678. ( void ) xTaskResumeAll();
  3679. traceRETURN_xTaskGetHandle( pxTCB );
  3680. return pxTCB;
  3681. }
  3682. #endif /* INCLUDE_xTaskGetHandle */
  3683. /*-----------------------------------------------------------*/
  3684. #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
  3685. BaseType_t xTaskGetStaticBuffers( TaskHandle_t xTask,
  3686. StackType_t ** ppuxStackBuffer,
  3687. StaticTask_t ** ppxTaskBuffer )
  3688. {
  3689. BaseType_t xReturn;
  3690. TCB_t * pxTCB;
  3691. traceENTER_xTaskGetStaticBuffers( xTask, ppuxStackBuffer, ppxTaskBuffer );
  3692. configASSERT( ppuxStackBuffer != NULL );
  3693. configASSERT( ppxTaskBuffer != NULL );
  3694. pxTCB = prvGetTCBFromHandle( xTask );
  3695. #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE == 1 )
  3696. {
  3697. if( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_AND_TCB )
  3698. {
  3699. *ppuxStackBuffer = pxTCB->pxStack;
  3700. /* MISRA Ref 11.3.1 [Misaligned access] */
  3701. /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-113 */
  3702. /* coverity[misra_c_2012_rule_11_3_violation] */
  3703. *ppxTaskBuffer = ( StaticTask_t * ) pxTCB;
  3704. xReturn = pdTRUE;
  3705. }
  3706. else if( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_ONLY )
  3707. {
  3708. *ppuxStackBuffer = pxTCB->pxStack;
  3709. *ppxTaskBuffer = NULL;
  3710. xReturn = pdTRUE;
  3711. }
  3712. else
  3713. {
  3714. xReturn = pdFALSE;
  3715. }
  3716. }
  3717. #else /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE == 1 */
  3718. {
  3719. *ppuxStackBuffer = pxTCB->pxStack;
  3720. *ppxTaskBuffer = ( StaticTask_t * ) pxTCB;
  3721. xReturn = pdTRUE;
  3722. }
  3723. #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE == 1 */
  3724. traceRETURN_xTaskGetStaticBuffers( xReturn );
  3725. return xReturn;
  3726. }
  3727. #endif /* configSUPPORT_STATIC_ALLOCATION */
  3728. /*-----------------------------------------------------------*/
  3729. #if ( configUSE_TRACE_FACILITY == 1 )
  3730. UBaseType_t uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
  3731. const UBaseType_t uxArraySize,
  3732. configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime )
  3733. {
  3734. UBaseType_t uxTask = 0, uxQueue = configMAX_PRIORITIES;
  3735. traceENTER_uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, pulTotalRunTime );
  3736. vTaskSuspendAll();
  3737. {
  3738. /* Is there a space in the array for each task in the system? */
  3739. if( uxArraySize >= uxCurrentNumberOfTasks )
  3740. {
  3741. /* Fill in an TaskStatus_t structure with information on each
  3742. * task in the Ready state. */
  3743. do
  3744. {
  3745. uxQueue--;
  3746. uxTask = ( UBaseType_t ) ( uxTask + prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &( pxReadyTasksLists[ uxQueue ] ), eReady ) );
  3747. } while( uxQueue > ( UBaseType_t ) tskIDLE_PRIORITY );
  3748. /* Fill in an TaskStatus_t structure with information on each
  3749. * task in the Blocked state. */
  3750. uxTask = ( UBaseType_t ) ( uxTask + prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), ( List_t * ) pxDelayedTaskList, eBlocked ) );
  3751. uxTask = ( UBaseType_t ) ( uxTask + prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), ( List_t * ) pxOverflowDelayedTaskList, eBlocked ) );
  3752. #if ( INCLUDE_vTaskDelete == 1 )
  3753. {
  3754. /* Fill in an TaskStatus_t structure with information on
  3755. * each task that has been deleted but not yet cleaned up. */
  3756. uxTask = ( UBaseType_t ) ( uxTask + prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &xTasksWaitingTermination, eDeleted ) );
  3757. }
  3758. #endif
  3759. #if ( INCLUDE_vTaskSuspend == 1 )
  3760. {
  3761. /* Fill in an TaskStatus_t structure with information on
  3762. * each task in the Suspended state. */
  3763. uxTask = ( UBaseType_t ) ( uxTask + prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &xSuspendedTaskList, eSuspended ) );
  3764. }
  3765. #endif
  3766. #if ( configGENERATE_RUN_TIME_STATS == 1 )
  3767. {
  3768. if( pulTotalRunTime != NULL )
  3769. {
  3770. #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE
  3771. portALT_GET_RUN_TIME_COUNTER_VALUE( ( *pulTotalRunTime ) );
  3772. #else
  3773. *pulTotalRunTime = ( configRUN_TIME_COUNTER_TYPE ) portGET_RUN_TIME_COUNTER_VALUE();
  3774. #endif
  3775. }
  3776. }
  3777. #else /* if ( configGENERATE_RUN_TIME_STATS == 1 ) */
  3778. {
  3779. if( pulTotalRunTime != NULL )
  3780. {
  3781. *pulTotalRunTime = 0;
  3782. }
  3783. }
  3784. #endif /* if ( configGENERATE_RUN_TIME_STATS == 1 ) */
  3785. }
  3786. else
  3787. {
  3788. mtCOVERAGE_TEST_MARKER();
  3789. }
  3790. }
  3791. ( void ) xTaskResumeAll();
  3792. traceRETURN_uxTaskGetSystemState( uxTask );
  3793. return uxTask;
  3794. }
  3795. #endif /* configUSE_TRACE_FACILITY */
  3796. /*----------------------------------------------------------*/
  3797. #if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
  3798. #if ( configNUMBER_OF_CORES == 1 )
  3799. TaskHandle_t xTaskGetIdleTaskHandle( void )
  3800. {
  3801. traceENTER_xTaskGetIdleTaskHandle();
  3802. /* If xTaskGetIdleTaskHandle() is called before the scheduler has been
  3803. * started, then xIdleTaskHandles will be NULL. */
  3804. configASSERT( ( xIdleTaskHandles[ 0 ] != NULL ) );
  3805. traceRETURN_xTaskGetIdleTaskHandle( xIdleTaskHandles[ 0 ] );
  3806. return xIdleTaskHandles[ 0 ];
  3807. }
  3808. #endif /* if ( configNUMBER_OF_CORES == 1 ) */
  3809. TaskHandle_t xTaskGetIdleTaskHandleForCore( BaseType_t xCoreID )
  3810. {
  3811. traceENTER_xTaskGetIdleTaskHandleForCore( xCoreID );
  3812. /* Ensure the core ID is valid. */
  3813. configASSERT( taskVALID_CORE_ID( xCoreID ) == pdTRUE );
  3814. /* If xTaskGetIdleTaskHandle() is called before the scheduler has been
  3815. * started, then xIdleTaskHandles will be NULL. */
  3816. configASSERT( ( xIdleTaskHandles[ xCoreID ] != NULL ) );
  3817. traceRETURN_xTaskGetIdleTaskHandleForCore( xIdleTaskHandles[ xCoreID ] );
  3818. return xIdleTaskHandles[ xCoreID ];
  3819. }
  3820. #endif /* INCLUDE_xTaskGetIdleTaskHandle */
  3821. /*----------------------------------------------------------*/
  3822. /* This conditional compilation should use inequality to 0, not equality to 1.
  3823. * This is to ensure vTaskStepTick() is available when user defined low power mode
  3824. * implementations require configUSE_TICKLESS_IDLE to be set to a value other than
  3825. * 1. */
  3826. #if ( configUSE_TICKLESS_IDLE != 0 )
  3827. void vTaskStepTick( TickType_t xTicksToJump )
  3828. {
  3829. TickType_t xUpdatedTickCount;
  3830. traceENTER_vTaskStepTick( xTicksToJump );
  3831. /* Correct the tick count value after a period during which the tick
  3832. * was suppressed. Note this does *not* call the tick hook function for
  3833. * each stepped tick. */
  3834. xUpdatedTickCount = xTickCount + xTicksToJump;
  3835. configASSERT( xUpdatedTickCount <= xNextTaskUnblockTime );
  3836. if( xUpdatedTickCount == xNextTaskUnblockTime )
  3837. {
  3838. /* Arrange for xTickCount to reach xNextTaskUnblockTime in
  3839. * xTaskIncrementTick() when the scheduler resumes. This ensures
  3840. * that any delayed tasks are resumed at the correct time. */
  3841. configASSERT( uxSchedulerSuspended != ( UBaseType_t ) 0U );
  3842. configASSERT( xTicksToJump != ( TickType_t ) 0 );
  3843. /* Prevent the tick interrupt modifying xPendedTicks simultaneously. */
  3844. taskENTER_CRITICAL();
  3845. {
  3846. xPendedTicks++;
  3847. }
  3848. taskEXIT_CRITICAL();
  3849. xTicksToJump--;
  3850. }
  3851. else
  3852. {
  3853. mtCOVERAGE_TEST_MARKER();
  3854. }
  3855. xTickCount += xTicksToJump;
  3856. traceINCREASE_TICK_COUNT( xTicksToJump );
  3857. traceRETURN_vTaskStepTick();
  3858. }
  3859. #endif /* configUSE_TICKLESS_IDLE */
  3860. /*----------------------------------------------------------*/
  3861. BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
  3862. {
  3863. BaseType_t xYieldOccurred;
  3864. traceENTER_xTaskCatchUpTicks( xTicksToCatchUp );
  3865. /* Must not be called with the scheduler suspended as the implementation
  3866. * relies on xPendedTicks being wound down to 0 in xTaskResumeAll(). */
  3867. configASSERT( uxSchedulerSuspended == ( UBaseType_t ) 0U );
  3868. /* Use xPendedTicks to mimic xTicksToCatchUp number of ticks occurring when
  3869. * the scheduler is suspended so the ticks are executed in xTaskResumeAll(). */
  3870. vTaskSuspendAll();
  3871. /* Prevent the tick interrupt modifying xPendedTicks simultaneously. */
  3872. taskENTER_CRITICAL();
  3873. {
  3874. xPendedTicks += xTicksToCatchUp;
  3875. }
  3876. taskEXIT_CRITICAL();
  3877. xYieldOccurred = xTaskResumeAll();
  3878. traceRETURN_xTaskCatchUpTicks( xYieldOccurred );
  3879. return xYieldOccurred;
  3880. }
  3881. /*----------------------------------------------------------*/
  3882. #if ( INCLUDE_xTaskAbortDelay == 1 )
  3883. BaseType_t xTaskAbortDelay( TaskHandle_t xTask )
  3884. {
  3885. TCB_t * pxTCB = xTask;
  3886. BaseType_t xReturn;
  3887. traceENTER_xTaskAbortDelay( xTask );
  3888. configASSERT( pxTCB );
  3889. vTaskSuspendAll();
  3890. {
  3891. /* A task can only be prematurely removed from the Blocked state if
  3892. * it is actually in the Blocked state. */
  3893. if( eTaskGetState( xTask ) == eBlocked )
  3894. {
  3895. xReturn = pdPASS;
  3896. /* Remove the reference to the task from the blocked list. An
  3897. * interrupt won't touch the xStateListItem because the
  3898. * scheduler is suspended. */
  3899. ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
  3900. /* Is the task waiting on an event also? If so remove it from
  3901. * the event list too. Interrupts can touch the event list item,
  3902. * even though the scheduler is suspended, so a critical section
  3903. * is used. */
  3904. taskENTER_CRITICAL();
  3905. {
  3906. if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
  3907. {
  3908. ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
  3909. /* This lets the task know it was forcibly removed from the
  3910. * blocked state so it should not re-evaluate its block time and
  3911. * then block again. */
  3912. pxTCB->ucDelayAborted = ( uint8_t ) pdTRUE;
  3913. }
  3914. else
  3915. {
  3916. mtCOVERAGE_TEST_MARKER();
  3917. }
  3918. }
  3919. taskEXIT_CRITICAL();
  3920. /* Place the unblocked task into the appropriate ready list. */
  3921. prvAddTaskToReadyList( pxTCB );
  3922. /* A task being unblocked cannot cause an immediate context
  3923. * switch if preemption is turned off. */
  3924. #if ( configUSE_PREEMPTION == 1 )
  3925. {
  3926. #if ( configNUMBER_OF_CORES == 1 )
  3927. {
  3928. /* Preemption is on, but a context switch should only be
  3929. * performed if the unblocked task has a priority that is
  3930. * higher than the currently executing task. */
  3931. if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
  3932. {
  3933. /* Pend the yield to be performed when the scheduler
  3934. * is unsuspended. */
  3935. xYieldPendings[ 0 ] = pdTRUE;
  3936. }
  3937. else
  3938. {
  3939. mtCOVERAGE_TEST_MARKER();
  3940. }
  3941. }
  3942. #else /* #if ( configNUMBER_OF_CORES == 1 ) */
  3943. {
  3944. taskENTER_CRITICAL();
  3945. {
  3946. prvYieldForTask( pxTCB );
  3947. }
  3948. taskEXIT_CRITICAL();
  3949. }
  3950. #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
  3951. }
  3952. #endif /* #if ( configUSE_PREEMPTION == 1 ) */
  3953. }
  3954. else
  3955. {
  3956. xReturn = pdFAIL;
  3957. }
  3958. }
  3959. ( void ) xTaskResumeAll();
  3960. traceRETURN_xTaskAbortDelay( xReturn );
  3961. return xReturn;
  3962. }
  3963. #endif /* INCLUDE_xTaskAbortDelay */
  3964. /*----------------------------------------------------------*/
  3965. BaseType_t xTaskIncrementTick( void )
  3966. {
  3967. TCB_t * pxTCB;
  3968. TickType_t xItemValue;
  3969. BaseType_t xSwitchRequired = pdFALSE;
  3970. #if ( configUSE_PREEMPTION == 1 ) && ( configNUMBER_OF_CORES > 1 )
  3971. BaseType_t xYieldRequiredForCore[ configNUMBER_OF_CORES ] = { pdFALSE };
  3972. #endif /* #if ( configUSE_PREEMPTION == 1 ) && ( configNUMBER_OF_CORES > 1 ) */
  3973. traceENTER_xTaskIncrementTick();
  3974. /* Called by the portable layer each time a tick interrupt occurs.
  3975. * Increments the tick then checks to see if the new tick value will cause any
  3976. * tasks to be unblocked. */
  3977. traceTASK_INCREMENT_TICK( xTickCount );
  3978. /* Tick increment should occur on every kernel timer event. Core 0 has the
  3979. * responsibility to increment the tick, or increment the pended ticks if the
  3980. * scheduler is suspended. If pended ticks is greater than zero, the core that
  3981. * calls xTaskResumeAll has the responsibility to increment the tick. */
  3982. if( uxSchedulerSuspended == ( UBaseType_t ) 0U )
  3983. {
  3984. /* Minor optimisation. The tick count cannot change in this
  3985. * block. */
  3986. const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1;
  3987. /* Increment the RTOS tick, switching the delayed and overflowed
  3988. * delayed lists if it wraps to 0. */
  3989. xTickCount = xConstTickCount;
  3990. if( xConstTickCount == ( TickType_t ) 0U )
  3991. {
  3992. taskSWITCH_DELAYED_LISTS();
  3993. }
  3994. else
  3995. {
  3996. mtCOVERAGE_TEST_MARKER();
  3997. }
  3998. /* See if this tick has made a timeout expire. Tasks are stored in
  3999. * the queue in the order of their wake time - meaning once one task
  4000. * has been found whose block time has not expired there is no need to
  4001. * look any further down the list. */
  4002. if( xConstTickCount >= xNextTaskUnblockTime )
  4003. {
  4004. for( ; ; )
  4005. {
  4006. if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE )
  4007. {
  4008. /* The delayed list is empty. Set xNextTaskUnblockTime
  4009. * to the maximum possible value so it is extremely
  4010. * unlikely that the
  4011. * if( xTickCount >= xNextTaskUnblockTime ) test will pass
  4012. * next time through. */
  4013. xNextTaskUnblockTime = portMAX_DELAY;
  4014. break;
  4015. }
  4016. else
  4017. {
  4018. /* The delayed list is not empty, get the value of the
  4019. * item at the head of the delayed list. This is the time
  4020. * at which the task at the head of the delayed list must
  4021. * be removed from the Blocked state. */
  4022. /* MISRA Ref 11.5.3 [Void pointer assignment] */
  4023. /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
  4024. /* coverity[misra_c_2012_rule_11_5_violation] */
  4025. pxTCB = listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList );
  4026. xItemValue = listGET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ) );
  4027. if( xConstTickCount < xItemValue )
  4028. {
  4029. /* It is not time to unblock this item yet, but the
  4030. * item value is the time at which the task at the head
  4031. * of the blocked list must be removed from the Blocked
  4032. * state - so record the item value in
  4033. * xNextTaskUnblockTime. */
  4034. xNextTaskUnblockTime = xItemValue;
  4035. break;
  4036. }
  4037. else
  4038. {
  4039. mtCOVERAGE_TEST_MARKER();
  4040. }
  4041. /* It is time to remove the item from the Blocked state. */
  4042. listREMOVE_ITEM( &( pxTCB->xStateListItem ) );
  4043. /* Is the task waiting on an event also? If so remove
  4044. * it from the event list. */
  4045. if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
  4046. {
  4047. listREMOVE_ITEM( &( pxTCB->xEventListItem ) );
  4048. }
  4049. else
  4050. {
  4051. mtCOVERAGE_TEST_MARKER();
  4052. }
  4053. /* Place the unblocked task into the appropriate ready
  4054. * list. */
  4055. prvAddTaskToReadyList( pxTCB );
  4056. /* A task being unblocked cannot cause an immediate
  4057. * context switch if preemption is turned off. */
  4058. #if ( configUSE_PREEMPTION == 1 )
  4059. {
  4060. #if ( configNUMBER_OF_CORES == 1 )
  4061. {
  4062. /* Preemption is on, but a context switch should
  4063. * only be performed if the unblocked task's
  4064. * priority is higher than the currently executing
  4065. * task.
  4066. * The case of equal priority tasks sharing
  4067. * processing time (which happens when both
  4068. * preemption and time slicing are on) is
  4069. * handled below.*/
  4070. if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
  4071. {
  4072. xSwitchRequired = pdTRUE;
  4073. }
  4074. else
  4075. {
  4076. mtCOVERAGE_TEST_MARKER();
  4077. }
  4078. }
  4079. #else /* #if( configNUMBER_OF_CORES == 1 ) */
  4080. {
  4081. prvYieldForTask( pxTCB );
  4082. }
  4083. #endif /* #if( configNUMBER_OF_CORES == 1 ) */
  4084. }
  4085. #endif /* #if ( configUSE_PREEMPTION == 1 ) */
  4086. }
  4087. }
  4088. }
  4089. /* Tasks of equal priority to the currently running task will share
  4090. * processing time (time slice) if preemption is on, and the application
  4091. * writer has not explicitly turned time slicing off. */
  4092. #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) )
  4093. {
  4094. #if ( configNUMBER_OF_CORES == 1 )
  4095. {
  4096. if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB->uxPriority ] ) ) > 1U )
  4097. {
  4098. xSwitchRequired = pdTRUE;
  4099. }
  4100. else
  4101. {
  4102. mtCOVERAGE_TEST_MARKER();
  4103. }
  4104. }
  4105. #else /* #if ( configNUMBER_OF_CORES == 1 ) */
  4106. {
  4107. BaseType_t xCoreID;
  4108. for( xCoreID = 0; xCoreID < ( ( BaseType_t ) configNUMBER_OF_CORES ); xCoreID++ )
  4109. {
  4110. if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCBs[ xCoreID ]->uxPriority ] ) ) > 1U )
  4111. {
  4112. xYieldRequiredForCore[ xCoreID ] = pdTRUE;
  4113. }
  4114. else
  4115. {
  4116. mtCOVERAGE_TEST_MARKER();
  4117. }
  4118. }
  4119. }
  4120. #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
  4121. }
  4122. #endif /* #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */
  4123. #if ( configUSE_TICK_HOOK == 1 )
  4124. {
  4125. /* Guard against the tick hook being called when the pended tick
  4126. * count is being unwound (when the scheduler is being unlocked). */
  4127. if( xPendedTicks == ( TickType_t ) 0 )
  4128. {
  4129. vApplicationTickHook();
  4130. }
  4131. else
  4132. {
  4133. mtCOVERAGE_TEST_MARKER();
  4134. }
  4135. }
  4136. #endif /* configUSE_TICK_HOOK */
  4137. #if ( configUSE_PREEMPTION == 1 )
  4138. {
  4139. #if ( configNUMBER_OF_CORES == 1 )
  4140. {
  4141. /* For single core the core ID is always 0. */
  4142. if( xYieldPendings[ 0 ] != pdFALSE )
  4143. {
  4144. xSwitchRequired = pdTRUE;
  4145. }
  4146. else
  4147. {
  4148. mtCOVERAGE_TEST_MARKER();
  4149. }
  4150. }
  4151. #else /* #if ( configNUMBER_OF_CORES == 1 ) */
  4152. {
  4153. BaseType_t xCoreID, xCurrentCoreID;
  4154. xCurrentCoreID = ( BaseType_t ) portGET_CORE_ID();
  4155. for( xCoreID = 0; xCoreID < ( BaseType_t ) configNUMBER_OF_CORES; xCoreID++ )
  4156. {
  4157. #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
  4158. if( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == pdFALSE )
  4159. #endif
  4160. {
  4161. if( ( xYieldRequiredForCore[ xCoreID ] != pdFALSE ) || ( xYieldPendings[ xCoreID ] != pdFALSE ) )
  4162. {
  4163. if( xCoreID == xCurrentCoreID )
  4164. {
  4165. xSwitchRequired = pdTRUE;
  4166. }
  4167. else
  4168. {
  4169. prvYieldCore( xCoreID );
  4170. }
  4171. }
  4172. else
  4173. {
  4174. mtCOVERAGE_TEST_MARKER();
  4175. }
  4176. }
  4177. }
  4178. }
  4179. #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
  4180. }
  4181. #endif /* #if ( configUSE_PREEMPTION == 1 ) */
  4182. }
  4183. else
  4184. {
  4185. xPendedTicks += 1U;
  4186. /* The tick hook gets called at regular intervals, even if the
  4187. * scheduler is locked. */
  4188. #if ( configUSE_TICK_HOOK == 1 )
  4189. {
  4190. vApplicationTickHook();
  4191. }
  4192. #endif
  4193. }
  4194. traceRETURN_xTaskIncrementTick( xSwitchRequired );
  4195. return xSwitchRequired;
  4196. }
  4197. /*-----------------------------------------------------------*/
  4198. #if ( configUSE_APPLICATION_TASK_TAG == 1 )
  4199. void vTaskSetApplicationTaskTag( TaskHandle_t xTask,
  4200. TaskHookFunction_t pxHookFunction )
  4201. {
  4202. TCB_t * xTCB;
  4203. traceENTER_vTaskSetApplicationTaskTag( xTask, pxHookFunction );
  4204. /* If xTask is NULL then it is the task hook of the calling task that is
  4205. * getting set. */
  4206. if( xTask == NULL )
  4207. {
  4208. xTCB = ( TCB_t * ) pxCurrentTCB;
  4209. }
  4210. else
  4211. {
  4212. xTCB = xTask;
  4213. }
  4214. /* Save the hook function in the TCB. A critical section is required as
  4215. * the value can be accessed from an interrupt. */
  4216. taskENTER_CRITICAL();
  4217. {
  4218. xTCB->pxTaskTag = pxHookFunction;
  4219. }
  4220. taskEXIT_CRITICAL();
  4221. traceRETURN_vTaskSetApplicationTaskTag();
  4222. }
  4223. #endif /* configUSE_APPLICATION_TASK_TAG */
  4224. /*-----------------------------------------------------------*/
  4225. #if ( configUSE_APPLICATION_TASK_TAG == 1 )
  4226. TaskHookFunction_t xTaskGetApplicationTaskTag( TaskHandle_t xTask )
  4227. {
  4228. TCB_t * pxTCB;
  4229. TaskHookFunction_t xReturn;
  4230. traceENTER_xTaskGetApplicationTaskTag( xTask );
  4231. /* If xTask is NULL then set the calling task's hook. */
  4232. pxTCB = prvGetTCBFromHandle( xTask );
  4233. /* Save the hook function in the TCB. A critical section is required as
  4234. * the value can be accessed from an interrupt. */
  4235. taskENTER_CRITICAL();
  4236. {
  4237. xReturn = pxTCB->pxTaskTag;
  4238. }
  4239. taskEXIT_CRITICAL();
  4240. traceRETURN_xTaskGetApplicationTaskTag( xReturn );
  4241. return xReturn;
  4242. }
  4243. #endif /* configUSE_APPLICATION_TASK_TAG */
  4244. /*-----------------------------------------------------------*/
  4245. #if ( configUSE_APPLICATION_TASK_TAG == 1 )
  4246. TaskHookFunction_t xTaskGetApplicationTaskTagFromISR( TaskHandle_t xTask )
  4247. {
  4248. TCB_t * pxTCB;
  4249. TaskHookFunction_t xReturn;
  4250. UBaseType_t uxSavedInterruptStatus;
  4251. traceENTER_xTaskGetApplicationTaskTagFromISR( xTask );
  4252. /* If xTask is NULL then set the calling task's hook. */
  4253. pxTCB = prvGetTCBFromHandle( xTask );
  4254. /* Save the hook function in the TCB. A critical section is required as
  4255. * the value can be accessed from an interrupt. */
  4256. /* MISRA Ref 4.7.1 [Return value shall be checked] */
  4257. /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */
  4258. /* coverity[misra_c_2012_directive_4_7_violation] */
  4259. uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
  4260. {
  4261. xReturn = pxTCB->pxTaskTag;
  4262. }
  4263. taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
  4264. traceRETURN_xTaskGetApplicationTaskTagFromISR( xReturn );
  4265. return xReturn;
  4266. }
  4267. #endif /* configUSE_APPLICATION_TASK_TAG */
  4268. /*-----------------------------------------------------------*/
  4269. #if ( configUSE_APPLICATION_TASK_TAG == 1 )
  4270. BaseType_t xTaskCallApplicationTaskHook( TaskHandle_t xTask,
  4271. void * pvParameter )
  4272. {
  4273. TCB_t * xTCB;
  4274. BaseType_t xReturn;
  4275. traceENTER_xTaskCallApplicationTaskHook( xTask, pvParameter );
  4276. /* If xTask is NULL then we are calling our own task hook. */
  4277. if( xTask == NULL )
  4278. {
  4279. xTCB = pxCurrentTCB;
  4280. }
  4281. else
  4282. {
  4283. xTCB = xTask;
  4284. }
  4285. if( xTCB->pxTaskTag != NULL )
  4286. {
  4287. xReturn = xTCB->pxTaskTag( pvParameter );
  4288. }
  4289. else
  4290. {
  4291. xReturn = pdFAIL;
  4292. }
  4293. traceRETURN_xTaskCallApplicationTaskHook( xReturn );
  4294. return xReturn;
  4295. }
  4296. #endif /* configUSE_APPLICATION_TASK_TAG */
  4297. /*-----------------------------------------------------------*/
  4298. #if ( configNUMBER_OF_CORES == 1 )
  4299. void vTaskSwitchContext( void )
  4300. {
  4301. traceENTER_vTaskSwitchContext();
  4302. if( uxSchedulerSuspended != ( UBaseType_t ) 0U )
  4303. {
  4304. /* The scheduler is currently suspended - do not allow a context
  4305. * switch. */
  4306. xYieldPendings[ 0 ] = pdTRUE;
  4307. }
  4308. else
  4309. {
  4310. xYieldPendings[ 0 ] = pdFALSE;
  4311. traceTASK_SWITCHED_OUT();
  4312. #if ( configGENERATE_RUN_TIME_STATS == 1 )
  4313. {
  4314. #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE
  4315. portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime[ 0 ] );
  4316. #else
  4317. ulTotalRunTime[ 0 ] = portGET_RUN_TIME_COUNTER_VALUE();
  4318. #endif
  4319. /* Add the amount of time the task has been running to the
  4320. * accumulated time so far. The time the task started running was
  4321. * stored in ulTaskSwitchedInTime. Note that there is no overflow
  4322. * protection here so count values are only valid until the timer
  4323. * overflows. The guard against negative values is to protect
  4324. * against suspect run time stat counter implementations - which
  4325. * are provided by the application, not the kernel. */
  4326. if( ulTotalRunTime[ 0 ] > ulTaskSwitchedInTime[ 0 ] )
  4327. {
  4328. pxCurrentTCB->ulRunTimeCounter += ( ulTotalRunTime[ 0 ] - ulTaskSwitchedInTime[ 0 ] );
  4329. }
  4330. else
  4331. {
  4332. mtCOVERAGE_TEST_MARKER();
  4333. }
  4334. ulTaskSwitchedInTime[ 0 ] = ulTotalRunTime[ 0 ];
  4335. }
  4336. #endif /* configGENERATE_RUN_TIME_STATS */
  4337. /* Check for stack overflow, if configured. */
  4338. taskCHECK_FOR_STACK_OVERFLOW();
  4339. /* Before the currently running task is switched out, save its errno. */
  4340. #if ( configUSE_POSIX_ERRNO == 1 )
  4341. {
  4342. pxCurrentTCB->iTaskErrno = FreeRTOS_errno;
  4343. }
  4344. #endif
  4345. /* Select a new task to run using either the generic C or port
  4346. * optimised asm code. */
  4347. /* MISRA Ref 11.5.3 [Void pointer assignment] */
  4348. /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
  4349. /* coverity[misra_c_2012_rule_11_5_violation] */
  4350. taskSELECT_HIGHEST_PRIORITY_TASK();
  4351. traceTASK_SWITCHED_IN();
  4352. /* Macro to inject port specific behaviour immediately after
  4353. * switching tasks, such as setting an end of stack watchpoint
  4354. * or reconfiguring the MPU. */
  4355. portTASK_SWITCH_HOOK( pxCurrentTCB );
  4356. /* After the new task is switched in, update the global errno. */
  4357. #if ( configUSE_POSIX_ERRNO == 1 )
  4358. {
  4359. FreeRTOS_errno = pxCurrentTCB->iTaskErrno;
  4360. }
  4361. #endif
  4362. #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 )
  4363. {
  4364. /* Switch C-Runtime's TLS Block to point to the TLS
  4365. * Block specific to this task. */
  4366. configSET_TLS_BLOCK( pxCurrentTCB->xTLSBlock );
  4367. }
  4368. #endif
  4369. }
  4370. traceRETURN_vTaskSwitchContext();
  4371. }
  4372. #else /* if ( configNUMBER_OF_CORES == 1 ) */
  4373. void vTaskSwitchContext( BaseType_t xCoreID )
  4374. {
  4375. traceENTER_vTaskSwitchContext();
  4376. /* Acquire both locks:
  4377. * - The ISR lock protects the ready list from simultaneous access by
  4378. * both other ISRs and tasks.
  4379. * - We also take the task lock to pause here in case another core has
  4380. * suspended the scheduler. We don't want to simply set xYieldPending
  4381. * and move on if another core suspended the scheduler. We should only
  4382. * do that if the current core has suspended the scheduler. */
  4383. portGET_TASK_LOCK(); /* Must always acquire the task lock first. */
  4384. portGET_ISR_LOCK();
  4385. {
  4386. /* vTaskSwitchContext() must never be called from within a critical section.
  4387. * This is not necessarily true for single core FreeRTOS, but it is for this
  4388. * SMP port. */
  4389. configASSERT( portGET_CRITICAL_NESTING_COUNT() == 0 );
  4390. if( uxSchedulerSuspended != ( UBaseType_t ) 0U )
  4391. {
  4392. /* The scheduler is currently suspended - do not allow a context
  4393. * switch. */
  4394. xYieldPendings[ xCoreID ] = pdTRUE;
  4395. }
  4396. else
  4397. {
  4398. xYieldPendings[ xCoreID ] = pdFALSE;
  4399. traceTASK_SWITCHED_OUT();
  4400. #if ( configGENERATE_RUN_TIME_STATS == 1 )
  4401. {
  4402. #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE
  4403. portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime[ xCoreID ] );
  4404. #else
  4405. ulTotalRunTime[ xCoreID ] = portGET_RUN_TIME_COUNTER_VALUE();
  4406. #endif
  4407. /* Add the amount of time the task has been running to the
  4408. * accumulated time so far. The time the task started running was
  4409. * stored in ulTaskSwitchedInTime. Note that there is no overflow
  4410. * protection here so count values are only valid until the timer
  4411. * overflows. The guard against negative values is to protect
  4412. * against suspect run time stat counter implementations - which
  4413. * are provided by the application, not the kernel. */
  4414. if( ulTotalRunTime[ xCoreID ] > ulTaskSwitchedInTime[ xCoreID ] )
  4415. {
  4416. pxCurrentTCBs[ xCoreID ]->ulRunTimeCounter += ( ulTotalRunTime[ xCoreID ] - ulTaskSwitchedInTime[ xCoreID ] );
  4417. }
  4418. else
  4419. {
  4420. mtCOVERAGE_TEST_MARKER();
  4421. }
  4422. ulTaskSwitchedInTime[ xCoreID ] = ulTotalRunTime[ xCoreID ];
  4423. }
  4424. #endif /* configGENERATE_RUN_TIME_STATS */
  4425. /* Check for stack overflow, if configured. */
  4426. taskCHECK_FOR_STACK_OVERFLOW();
  4427. /* Before the currently running task is switched out, save its errno. */
  4428. #if ( configUSE_POSIX_ERRNO == 1 )
  4429. {
  4430. pxCurrentTCBs[ xCoreID ]->iTaskErrno = FreeRTOS_errno;
  4431. }
  4432. #endif
  4433. /* Select a new task to run. */
  4434. taskSELECT_HIGHEST_PRIORITY_TASK( xCoreID );
  4435. traceTASK_SWITCHED_IN();
  4436. /* Macro to inject port specific behaviour immediately after
  4437. * switching tasks, such as setting an end of stack watchpoint
  4438. * or reconfiguring the MPU. */
  4439. portTASK_SWITCH_HOOK( pxCurrentTCBs[ portGET_CORE_ID() ] );
  4440. /* After the new task is switched in, update the global errno. */
  4441. #if ( configUSE_POSIX_ERRNO == 1 )
  4442. {
  4443. FreeRTOS_errno = pxCurrentTCBs[ xCoreID ]->iTaskErrno;
  4444. }
  4445. #endif
  4446. #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 )
  4447. {
  4448. /* Switch C-Runtime's TLS Block to point to the TLS
  4449. * Block specific to this task. */
  4450. configSET_TLS_BLOCK( pxCurrentTCBs[ xCoreID ]->xTLSBlock );
  4451. }
  4452. #endif
  4453. }
  4454. }
  4455. portRELEASE_ISR_LOCK();
  4456. portRELEASE_TASK_LOCK();
  4457. traceRETURN_vTaskSwitchContext();
  4458. }
  4459. #endif /* if ( configNUMBER_OF_CORES > 1 ) */
  4460. /*-----------------------------------------------------------*/
  4461. void vTaskPlaceOnEventList( List_t * const pxEventList,
  4462. const TickType_t xTicksToWait )
  4463. {
  4464. traceENTER_vTaskPlaceOnEventList( pxEventList, xTicksToWait );
  4465. configASSERT( pxEventList );
  4466. /* THIS FUNCTION MUST BE CALLED WITH THE
  4467. * SCHEDULER SUSPENDED AND THE QUEUE BEING ACCESSED LOCKED. */
  4468. /* Place the event list item of the TCB in the appropriate event list.
  4469. * This is placed in the list in priority order so the highest priority task
  4470. * is the first to be woken by the event.
  4471. *
  4472. * Note: Lists are sorted in ascending order by ListItem_t.xItemValue.
  4473. * Normally, the xItemValue of a TCB's ListItem_t members is:
  4474. * xItemValue = ( configMAX_PRIORITIES - uxPriority )
  4475. * Therefore, the event list is sorted in descending priority order.
  4476. *
  4477. * The queue that contains the event list is locked, preventing
  4478. * simultaneous access from interrupts. */
  4479. vListInsert( pxEventList, &( pxCurrentTCB->xEventListItem ) );
  4480. prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
  4481. traceRETURN_vTaskPlaceOnEventList();
  4482. }
  4483. /*-----------------------------------------------------------*/
  4484. void vTaskPlaceOnUnorderedEventList( List_t * pxEventList,
  4485. const TickType_t xItemValue,
  4486. const TickType_t xTicksToWait )
  4487. {
  4488. traceENTER_vTaskPlaceOnUnorderedEventList( pxEventList, xItemValue, xTicksToWait );
  4489. configASSERT( pxEventList );
  4490. /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by
  4491. * the event groups implementation. */
  4492. configASSERT( uxSchedulerSuspended != ( UBaseType_t ) 0U );
  4493. /* Store the item value in the event list item. It is safe to access the
  4494. * event list item here as interrupts won't access the event list item of a
  4495. * task that is not in the Blocked state. */
  4496. listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ), xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE );
  4497. /* Place the event list item of the TCB at the end of the appropriate event
  4498. * list. It is safe to access the event list here because it is part of an
  4499. * event group implementation - and interrupts don't access event groups
  4500. * directly (instead they access them indirectly by pending function calls to
  4501. * the task level). */
  4502. listINSERT_END( pxEventList, &( pxCurrentTCB->xEventListItem ) );
  4503. prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
  4504. traceRETURN_vTaskPlaceOnUnorderedEventList();
  4505. }
  4506. /*-----------------------------------------------------------*/
  4507. #if ( configUSE_TIMERS == 1 )
  4508. void vTaskPlaceOnEventListRestricted( List_t * const pxEventList,
  4509. TickType_t xTicksToWait,
  4510. const BaseType_t xWaitIndefinitely )
  4511. {
  4512. traceENTER_vTaskPlaceOnEventListRestricted( pxEventList, xTicksToWait, xWaitIndefinitely );
  4513. configASSERT( pxEventList );
  4514. /* This function should not be called by application code hence the
  4515. * 'Restricted' in its name. It is not part of the public API. It is
  4516. * designed for use by kernel code, and has special calling requirements -
  4517. * it should be called with the scheduler suspended. */
  4518. /* Place the event list item of the TCB in the appropriate event list.
  4519. * In this case it is assume that this is the only task that is going to
  4520. * be waiting on this event list, so the faster vListInsertEnd() function
  4521. * can be used in place of vListInsert. */
  4522. listINSERT_END( pxEventList, &( pxCurrentTCB->xEventListItem ) );
  4523. /* If the task should block indefinitely then set the block time to a
  4524. * value that will be recognised as an indefinite delay inside the
  4525. * prvAddCurrentTaskToDelayedList() function. */
  4526. if( xWaitIndefinitely != pdFALSE )
  4527. {
  4528. xTicksToWait = portMAX_DELAY;
  4529. }
  4530. traceTASK_DELAY_UNTIL( ( xTickCount + xTicksToWait ) );
  4531. prvAddCurrentTaskToDelayedList( xTicksToWait, xWaitIndefinitely );
  4532. traceRETURN_vTaskPlaceOnEventListRestricted();
  4533. }
  4534. #endif /* configUSE_TIMERS */
  4535. /*-----------------------------------------------------------*/
  4536. BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList )
  4537. {
  4538. TCB_t * pxUnblockedTCB;
  4539. BaseType_t xReturn;
  4540. traceENTER_xTaskRemoveFromEventList( pxEventList );
  4541. /* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be
  4542. * called from a critical section within an ISR. */
  4543. /* The event list is sorted in priority order, so the first in the list can
  4544. * be removed as it is known to be the highest priority. Remove the TCB from
  4545. * the delayed list, and add it to the ready list.
  4546. *
  4547. * If an event is for a queue that is locked then this function will never
  4548. * get called - the lock count on the queue will get modified instead. This
  4549. * means exclusive access to the event list is guaranteed here.
  4550. *
  4551. * This function assumes that a check has already been made to ensure that
  4552. * pxEventList is not empty. */
  4553. /* MISRA Ref 11.5.3 [Void pointer assignment] */
  4554. /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
  4555. /* coverity[misra_c_2012_rule_11_5_violation] */
  4556. pxUnblockedTCB = listGET_OWNER_OF_HEAD_ENTRY( pxEventList );
  4557. configASSERT( pxUnblockedTCB );
  4558. listREMOVE_ITEM( &( pxUnblockedTCB->xEventListItem ) );
  4559. if( uxSchedulerSuspended == ( UBaseType_t ) 0U )
  4560. {
  4561. listREMOVE_ITEM( &( pxUnblockedTCB->xStateListItem ) );
  4562. prvAddTaskToReadyList( pxUnblockedTCB );
  4563. #if ( configUSE_TICKLESS_IDLE != 0 )
  4564. {
  4565. /* If a task is blocked on a kernel object then xNextTaskUnblockTime
  4566. * might be set to the blocked task's time out time. If the task is
  4567. * unblocked for a reason other than a timeout xNextTaskUnblockTime is
  4568. * normally left unchanged, because it is automatically reset to a new
  4569. * value when the tick count equals xNextTaskUnblockTime. However if
  4570. * tickless idling is used it might be more important to enter sleep mode
  4571. * at the earliest possible time - so reset xNextTaskUnblockTime here to
  4572. * ensure it is updated at the earliest possible time. */
  4573. prvResetNextTaskUnblockTime();
  4574. }
  4575. #endif
  4576. }
  4577. else
  4578. {
  4579. /* The delayed and ready lists cannot be accessed, so hold this task
  4580. * pending until the scheduler is resumed. */
  4581. listINSERT_END( &( xPendingReadyList ), &( pxUnblockedTCB->xEventListItem ) );
  4582. }
  4583. #if ( configNUMBER_OF_CORES == 1 )
  4584. {
  4585. if( pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority )
  4586. {
  4587. /* Return true if the task removed from the event list has a higher
  4588. * priority than the calling task. This allows the calling task to know if
  4589. * it should force a context switch now. */
  4590. xReturn = pdTRUE;
  4591. /* Mark that a yield is pending in case the user is not using the
  4592. * "xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */
  4593. xYieldPendings[ 0 ] = pdTRUE;
  4594. }
  4595. else
  4596. {
  4597. xReturn = pdFALSE;
  4598. }
  4599. }
  4600. #else /* #if ( configNUMBER_OF_CORES == 1 ) */
  4601. {
  4602. xReturn = pdFALSE;
  4603. #if ( configUSE_PREEMPTION == 1 )
  4604. {
  4605. prvYieldForTask( pxUnblockedTCB );
  4606. if( xYieldPendings[ portGET_CORE_ID() ] != pdFALSE )
  4607. {
  4608. xReturn = pdTRUE;
  4609. }
  4610. }
  4611. #endif /* #if ( configUSE_PREEMPTION == 1 ) */
  4612. }
  4613. #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
  4614. traceRETURN_xTaskRemoveFromEventList( xReturn );
  4615. return xReturn;
  4616. }
  4617. /*-----------------------------------------------------------*/
  4618. void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem,
  4619. const TickType_t xItemValue )
  4620. {
  4621. TCB_t * pxUnblockedTCB;
  4622. traceENTER_vTaskRemoveFromUnorderedEventList( pxEventListItem, xItemValue );
  4623. /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by
  4624. * the event flags implementation. */
  4625. configASSERT( uxSchedulerSuspended != ( UBaseType_t ) 0U );
  4626. /* Store the new item value in the event list. */
  4627. listSET_LIST_ITEM_VALUE( pxEventListItem, xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE );
  4628. /* Remove the event list form the event flag. Interrupts do not access
  4629. * event flags. */
  4630. /* MISRA Ref 11.5.3 [Void pointer assignment] */
  4631. /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
  4632. /* coverity[misra_c_2012_rule_11_5_violation] */
  4633. pxUnblockedTCB = listGET_LIST_ITEM_OWNER( pxEventListItem );
  4634. configASSERT( pxUnblockedTCB );
  4635. listREMOVE_ITEM( pxEventListItem );
  4636. #if ( configUSE_TICKLESS_IDLE != 0 )
  4637. {
  4638. /* If a task is blocked on a kernel object then xNextTaskUnblockTime
  4639. * might be set to the blocked task's time out time. If the task is
  4640. * unblocked for a reason other than a timeout xNextTaskUnblockTime is
  4641. * normally left unchanged, because it is automatically reset to a new
  4642. * value when the tick count equals xNextTaskUnblockTime. However if
  4643. * tickless idling is used it might be more important to enter sleep mode
  4644. * at the earliest possible time - so reset xNextTaskUnblockTime here to
  4645. * ensure it is updated at the earliest possible time. */
  4646. prvResetNextTaskUnblockTime();
  4647. }
  4648. #endif
  4649. /* Remove the task from the delayed list and add it to the ready list. The
  4650. * scheduler is suspended so interrupts will not be accessing the ready
  4651. * lists. */
  4652. listREMOVE_ITEM( &( pxUnblockedTCB->xStateListItem ) );
  4653. prvAddTaskToReadyList( pxUnblockedTCB );
  4654. #if ( configNUMBER_OF_CORES == 1 )
  4655. {
  4656. if( pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority )
  4657. {
  4658. /* The unblocked task has a priority above that of the calling task, so
  4659. * a context switch is required. This function is called with the
  4660. * scheduler suspended so xYieldPending is set so the context switch
  4661. * occurs immediately that the scheduler is resumed (unsuspended). */
  4662. xYieldPendings[ 0 ] = pdTRUE;
  4663. }
  4664. }
  4665. #else /* #if ( configNUMBER_OF_CORES == 1 ) */
  4666. {
  4667. #if ( configUSE_PREEMPTION == 1 )
  4668. {
  4669. taskENTER_CRITICAL();
  4670. {
  4671. prvYieldForTask( pxUnblockedTCB );
  4672. }
  4673. taskEXIT_CRITICAL();
  4674. }
  4675. #endif
  4676. }
  4677. #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
  4678. traceRETURN_vTaskRemoveFromUnorderedEventList();
  4679. }
  4680. /*-----------------------------------------------------------*/
  4681. void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut )
  4682. {
  4683. traceENTER_vTaskSetTimeOutState( pxTimeOut );
  4684. configASSERT( pxTimeOut );
  4685. taskENTER_CRITICAL();
  4686. {
  4687. pxTimeOut->xOverflowCount = xNumOfOverflows;
  4688. pxTimeOut->xTimeOnEntering = xTickCount;
  4689. }
  4690. taskEXIT_CRITICAL();
  4691. traceRETURN_vTaskSetTimeOutState();
  4692. }
  4693. /*-----------------------------------------------------------*/
  4694. void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut )
  4695. {
  4696. traceENTER_vTaskInternalSetTimeOutState( pxTimeOut );
  4697. /* For internal use only as it does not use a critical section. */
  4698. pxTimeOut->xOverflowCount = xNumOfOverflows;
  4699. pxTimeOut->xTimeOnEntering = xTickCount;
  4700. traceRETURN_vTaskInternalSetTimeOutState();
  4701. }
  4702. /*-----------------------------------------------------------*/
  4703. BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
  4704. TickType_t * const pxTicksToWait )
  4705. {
  4706. BaseType_t xReturn;
  4707. traceENTER_xTaskCheckForTimeOut( pxTimeOut, pxTicksToWait );
  4708. configASSERT( pxTimeOut );
  4709. configASSERT( pxTicksToWait );
  4710. taskENTER_CRITICAL();
  4711. {
  4712. /* Minor optimisation. The tick count cannot change in this block. */
  4713. const TickType_t xConstTickCount = xTickCount;
  4714. const TickType_t xElapsedTime = xConstTickCount - pxTimeOut->xTimeOnEntering;
  4715. #if ( INCLUDE_xTaskAbortDelay == 1 )
  4716. if( pxCurrentTCB->ucDelayAborted != ( uint8_t ) pdFALSE )
  4717. {
  4718. /* The delay was aborted, which is not the same as a time out,
  4719. * but has the same result. */
  4720. pxCurrentTCB->ucDelayAborted = ( uint8_t ) pdFALSE;
  4721. xReturn = pdTRUE;
  4722. }
  4723. else
  4724. #endif
  4725. #if ( INCLUDE_vTaskSuspend == 1 )
  4726. if( *pxTicksToWait == portMAX_DELAY )
  4727. {
  4728. /* If INCLUDE_vTaskSuspend is set to 1 and the block time
  4729. * specified is the maximum block time then the task should block
  4730. * indefinitely, and therefore never time out. */
  4731. xReturn = pdFALSE;
  4732. }
  4733. else
  4734. #endif
  4735. if( ( xNumOfOverflows != pxTimeOut->xOverflowCount ) && ( xConstTickCount >= pxTimeOut->xTimeOnEntering ) )
  4736. {
  4737. /* The tick count is greater than the time at which
  4738. * vTaskSetTimeout() was called, but has also overflowed since
  4739. * vTaskSetTimeOut() was called. It must have wrapped all the way
  4740. * around and gone past again. This passed since vTaskSetTimeout()
  4741. * was called. */
  4742. xReturn = pdTRUE;
  4743. *pxTicksToWait = ( TickType_t ) 0;
  4744. }
  4745. else if( xElapsedTime < *pxTicksToWait )
  4746. {
  4747. /* Not a genuine timeout. Adjust parameters for time remaining. */
  4748. *pxTicksToWait -= xElapsedTime;
  4749. vTaskInternalSetTimeOutState( pxTimeOut );
  4750. xReturn = pdFALSE;
  4751. }
  4752. else
  4753. {
  4754. *pxTicksToWait = ( TickType_t ) 0;
  4755. xReturn = pdTRUE;
  4756. }
  4757. }
  4758. taskEXIT_CRITICAL();
  4759. traceRETURN_xTaskCheckForTimeOut( xReturn );
  4760. return xReturn;
  4761. }
  4762. /*-----------------------------------------------------------*/
  4763. void vTaskMissedYield( void )
  4764. {
  4765. traceENTER_vTaskMissedYield();
  4766. /* Must be called from within a critical section. */
  4767. xYieldPendings[ portGET_CORE_ID() ] = pdTRUE;
  4768. traceRETURN_vTaskMissedYield();
  4769. }
  4770. /*-----------------------------------------------------------*/
  4771. #if ( configUSE_TRACE_FACILITY == 1 )
  4772. UBaseType_t uxTaskGetTaskNumber( TaskHandle_t xTask )
  4773. {
  4774. UBaseType_t uxReturn;
  4775. TCB_t const * pxTCB;
  4776. traceENTER_uxTaskGetTaskNumber( xTask );
  4777. if( xTask != NULL )
  4778. {
  4779. pxTCB = xTask;
  4780. uxReturn = pxTCB->uxTaskNumber;
  4781. }
  4782. else
  4783. {
  4784. uxReturn = 0U;
  4785. }
  4786. traceRETURN_uxTaskGetTaskNumber( uxReturn );
  4787. return uxReturn;
  4788. }
  4789. #endif /* configUSE_TRACE_FACILITY */
  4790. /*-----------------------------------------------------------*/
  4791. #if ( configUSE_TRACE_FACILITY == 1 )
  4792. void vTaskSetTaskNumber( TaskHandle_t xTask,
  4793. const UBaseType_t uxHandle )
  4794. {
  4795. TCB_t * pxTCB;
  4796. traceENTER_vTaskSetTaskNumber( xTask, uxHandle );
  4797. if( xTask != NULL )
  4798. {
  4799. pxTCB = xTask;
  4800. pxTCB->uxTaskNumber = uxHandle;
  4801. }
  4802. traceRETURN_vTaskSetTaskNumber();
  4803. }
  4804. #endif /* configUSE_TRACE_FACILITY */
  4805. /*-----------------------------------------------------------*/
  4806. /*
  4807. * -----------------------------------------------------------
  4808. * The passive idle task.
  4809. * ----------------------------------------------------------
  4810. *
  4811. * The passive idle task is used for all the additional cores in a SMP
  4812. * system. There must be only 1 active idle task and the rest are passive
  4813. * idle tasks.
  4814. *
  4815. * The portTASK_FUNCTION() macro is used to allow port/compiler specific
  4816. * language extensions. The equivalent prototype for this function is:
  4817. *
  4818. * void prvPassiveIdleTask( void *pvParameters );
  4819. */
  4820. #if ( configNUMBER_OF_CORES > 1 )
  4821. static portTASK_FUNCTION( prvPassiveIdleTask, pvParameters )
  4822. {
  4823. ( void ) pvParameters;
  4824. taskYIELD();
  4825. for( ; configCONTROL_INFINITE_LOOP(); )
  4826. {
  4827. #if ( configUSE_PREEMPTION == 0 )
  4828. {
  4829. /* If we are not using preemption we keep forcing a task switch to
  4830. * see if any other task has become available. If we are using
  4831. * preemption we don't need to do this as any task becoming available
  4832. * will automatically get the processor anyway. */
  4833. taskYIELD();
  4834. }
  4835. #endif /* configUSE_PREEMPTION */
  4836. #if ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) )
  4837. {
  4838. /* When using preemption tasks of equal priority will be
  4839. * timesliced. If a task that is sharing the idle priority is ready
  4840. * to run then the idle task should yield before the end of the
  4841. * timeslice.
  4842. *
  4843. * A critical region is not required here as we are just reading from
  4844. * the list, and an occasional incorrect value will not matter. If
  4845. * the ready list at the idle priority contains one more task than the
  4846. * number of idle tasks, which is equal to the configured numbers of cores
  4847. * then a task other than the idle task is ready to execute. */
  4848. if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > ( UBaseType_t ) configNUMBER_OF_CORES )
  4849. {
  4850. taskYIELD();
  4851. }
  4852. else
  4853. {
  4854. mtCOVERAGE_TEST_MARKER();
  4855. }
  4856. }
  4857. #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) */
  4858. #if ( configUSE_PASSIVE_IDLE_HOOK == 1 )
  4859. {
  4860. /* Call the user defined function from within the idle task. This
  4861. * allows the application designer to add background functionality
  4862. * without the overhead of a separate task.
  4863. *
  4864. * This hook is intended to manage core activity such as disabling cores that go idle.
  4865. *
  4866. * NOTE: vApplicationPassiveIdleHook() MUST NOT, UNDER ANY CIRCUMSTANCES,
  4867. * CALL A FUNCTION THAT MIGHT BLOCK. */
  4868. vApplicationPassiveIdleHook();
  4869. }
  4870. #endif /* configUSE_PASSIVE_IDLE_HOOK */
  4871. }
  4872. }
  4873. #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
  4874. /*
  4875. * -----------------------------------------------------------
  4876. * The idle task.
  4877. * ----------------------------------------------------------
  4878. *
  4879. * The portTASK_FUNCTION() macro is used to allow port/compiler specific
  4880. * language extensions. The equivalent prototype for this function is:
  4881. *
  4882. * void prvIdleTask( void *pvParameters );
  4883. *
  4884. */
  4885. static portTASK_FUNCTION( prvIdleTask, pvParameters )
  4886. {
  4887. /* Stop warnings. */
  4888. ( void ) pvParameters;
  4889. /** THIS IS THE RTOS IDLE TASK - WHICH IS CREATED AUTOMATICALLY WHEN THE
  4890. * SCHEDULER IS STARTED. **/
  4891. /* In case a task that has a secure context deletes itself, in which case
  4892. * the idle task is responsible for deleting the task's secure context, if
  4893. * any. */
  4894. portALLOCATE_SECURE_CONTEXT( configMINIMAL_SECURE_STACK_SIZE );
  4895. #if ( configNUMBER_OF_CORES > 1 )
  4896. {
  4897. /* SMP all cores start up in the idle task. This initial yield gets the application
  4898. * tasks started. */
  4899. taskYIELD();
  4900. }
  4901. #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
  4902. for( ; configCONTROL_INFINITE_LOOP(); )
  4903. {
  4904. /* See if any tasks have deleted themselves - if so then the idle task
  4905. * is responsible for freeing the deleted task's TCB and stack. */
  4906. prvCheckTasksWaitingTermination();
  4907. #if ( configUSE_PREEMPTION == 0 )
  4908. {
  4909. /* If we are not using preemption we keep forcing a task switch to
  4910. * see if any other task has become available. If we are using
  4911. * preemption we don't need to do this as any task becoming available
  4912. * will automatically get the processor anyway. */
  4913. taskYIELD();
  4914. }
  4915. #endif /* configUSE_PREEMPTION */
  4916. #if ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) )
  4917. {
  4918. /* When using preemption tasks of equal priority will be
  4919. * timesliced. If a task that is sharing the idle priority is ready
  4920. * to run then the idle task should yield before the end of the
  4921. * timeslice.
  4922. *
  4923. * A critical region is not required here as we are just reading from
  4924. * the list, and an occasional incorrect value will not matter. If
  4925. * the ready list at the idle priority contains one more task than the
  4926. * number of idle tasks, which is equal to the configured numbers of cores
  4927. * then a task other than the idle task is ready to execute. */
  4928. if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > ( UBaseType_t ) configNUMBER_OF_CORES )
  4929. {
  4930. taskYIELD();
  4931. }
  4932. else
  4933. {
  4934. mtCOVERAGE_TEST_MARKER();
  4935. }
  4936. }
  4937. #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) */
  4938. #if ( configUSE_IDLE_HOOK == 1 )
  4939. {
  4940. /* Call the user defined function from within the idle task. */
  4941. vApplicationIdleHook();
  4942. }
  4943. #endif /* configUSE_IDLE_HOOK */
  4944. /* This conditional compilation should use inequality to 0, not equality
  4945. * to 1. This is to ensure portSUPPRESS_TICKS_AND_SLEEP() is called when
  4946. * user defined low power mode implementations require
  4947. * configUSE_TICKLESS_IDLE to be set to a value other than 1. */
  4948. #if ( configUSE_TICKLESS_IDLE != 0 )
  4949. {
  4950. TickType_t xExpectedIdleTime;
  4951. /* It is not desirable to suspend then resume the scheduler on
  4952. * each iteration of the idle task. Therefore, a preliminary
  4953. * test of the expected idle time is performed without the
  4954. * scheduler suspended. The result here is not necessarily
  4955. * valid. */
  4956. xExpectedIdleTime = prvGetExpectedIdleTime();
  4957. if( xExpectedIdleTime >= ( TickType_t ) configEXPECTED_IDLE_TIME_BEFORE_SLEEP )
  4958. {
  4959. vTaskSuspendAll();
  4960. {
  4961. /* Now the scheduler is suspended, the expected idle
  4962. * time can be sampled again, and this time its value can
  4963. * be used. */
  4964. configASSERT( xNextTaskUnblockTime >= xTickCount );
  4965. xExpectedIdleTime = prvGetExpectedIdleTime();
  4966. /* Define the following macro to set xExpectedIdleTime to 0
  4967. * if the application does not want
  4968. * portSUPPRESS_TICKS_AND_SLEEP() to be called. */
  4969. configPRE_SUPPRESS_TICKS_AND_SLEEP_PROCESSING( xExpectedIdleTime );
  4970. if( xExpectedIdleTime >= ( TickType_t ) configEXPECTED_IDLE_TIME_BEFORE_SLEEP )
  4971. {
  4972. traceLOW_POWER_IDLE_BEGIN();
  4973. portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime );
  4974. traceLOW_POWER_IDLE_END();
  4975. }
  4976. else
  4977. {
  4978. mtCOVERAGE_TEST_MARKER();
  4979. }
  4980. }
  4981. ( void ) xTaskResumeAll();
  4982. }
  4983. else
  4984. {
  4985. mtCOVERAGE_TEST_MARKER();
  4986. }
  4987. }
  4988. #endif /* configUSE_TICKLESS_IDLE */
  4989. #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_PASSIVE_IDLE_HOOK == 1 ) )
  4990. {
  4991. /* Call the user defined function from within the idle task. This
  4992. * allows the application designer to add background functionality
  4993. * without the overhead of a separate task.
  4994. *
  4995. * This hook is intended to manage core activity such as disabling cores that go idle.
  4996. *
  4997. * NOTE: vApplicationPassiveIdleHook() MUST NOT, UNDER ANY CIRCUMSTANCES,
  4998. * CALL A FUNCTION THAT MIGHT BLOCK. */
  4999. vApplicationPassiveIdleHook();
  5000. }
  5001. #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_PASSIVE_IDLE_HOOK == 1 ) ) */
  5002. }
  5003. }
  5004. /*-----------------------------------------------------------*/
  5005. #if ( configUSE_TICKLESS_IDLE != 0 )
  5006. eSleepModeStatus eTaskConfirmSleepModeStatus( void )
  5007. {
  5008. #if ( INCLUDE_vTaskSuspend == 1 )
  5009. /* The idle task exists in addition to the application tasks. */
  5010. const UBaseType_t uxNonApplicationTasks = configNUMBER_OF_CORES;
  5011. #endif /* INCLUDE_vTaskSuspend */
  5012. eSleepModeStatus eReturn = eStandardSleep;
  5013. traceENTER_eTaskConfirmSleepModeStatus();
  5014. /* This function must be called from a critical section. */
  5015. if( listCURRENT_LIST_LENGTH( &xPendingReadyList ) != 0U )
  5016. {
  5017. /* A task was made ready while the scheduler was suspended. */
  5018. eReturn = eAbortSleep;
  5019. }
  5020. else if( xYieldPendings[ portGET_CORE_ID() ] != pdFALSE )
  5021. {
  5022. /* A yield was pended while the scheduler was suspended. */
  5023. eReturn = eAbortSleep;
  5024. }
  5025. else if( xPendedTicks != 0U )
  5026. {
  5027. /* A tick interrupt has already occurred but was held pending
  5028. * because the scheduler is suspended. */
  5029. eReturn = eAbortSleep;
  5030. }
  5031. #if ( INCLUDE_vTaskSuspend == 1 )
  5032. else if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == ( uxCurrentNumberOfTasks - uxNonApplicationTasks ) )
  5033. {
  5034. /* If all the tasks are in the suspended list (which might mean they
  5035. * have an infinite block time rather than actually being suspended)
  5036. * then it is safe to turn all clocks off and just wait for external
  5037. * interrupts. */
  5038. eReturn = eNoTasksWaitingTimeout;
  5039. }
  5040. #endif /* INCLUDE_vTaskSuspend */
  5041. else
  5042. {
  5043. mtCOVERAGE_TEST_MARKER();
  5044. }
  5045. traceRETURN_eTaskConfirmSleepModeStatus( eReturn );
  5046. return eReturn;
  5047. }
  5048. #endif /* configUSE_TICKLESS_IDLE */
  5049. /*-----------------------------------------------------------*/
  5050. #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
  5051. void vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
  5052. BaseType_t xIndex,
  5053. void * pvValue )
  5054. {
  5055. TCB_t * pxTCB;
  5056. traceENTER_vTaskSetThreadLocalStoragePointer( xTaskToSet, xIndex, pvValue );
  5057. if( ( xIndex >= 0 ) &&
  5058. ( xIndex < ( BaseType_t ) configNUM_THREAD_LOCAL_STORAGE_POINTERS ) )
  5059. {
  5060. pxTCB = prvGetTCBFromHandle( xTaskToSet );
  5061. configASSERT( pxTCB != NULL );
  5062. pxTCB->pvThreadLocalStoragePointers[ xIndex ] = pvValue;
  5063. }
  5064. traceRETURN_vTaskSetThreadLocalStoragePointer();
  5065. }
  5066. #endif /* configNUM_THREAD_LOCAL_STORAGE_POINTERS */
  5067. /*-----------------------------------------------------------*/
  5068. #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
  5069. void * pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
  5070. BaseType_t xIndex )
  5071. {
  5072. void * pvReturn = NULL;
  5073. TCB_t * pxTCB;
  5074. traceENTER_pvTaskGetThreadLocalStoragePointer( xTaskToQuery, xIndex );
  5075. if( ( xIndex >= 0 ) &&
  5076. ( xIndex < ( BaseType_t ) configNUM_THREAD_LOCAL_STORAGE_POINTERS ) )
  5077. {
  5078. pxTCB = prvGetTCBFromHandle( xTaskToQuery );
  5079. pvReturn = pxTCB->pvThreadLocalStoragePointers[ xIndex ];
  5080. }
  5081. else
  5082. {
  5083. pvReturn = NULL;
  5084. }
  5085. traceRETURN_pvTaskGetThreadLocalStoragePointer( pvReturn );
  5086. return pvReturn;
  5087. }
  5088. #endif /* configNUM_THREAD_LOCAL_STORAGE_POINTERS */
  5089. /*-----------------------------------------------------------*/
  5090. #if ( portUSING_MPU_WRAPPERS == 1 )
  5091. void vTaskAllocateMPURegions( TaskHandle_t xTaskToModify,
  5092. const MemoryRegion_t * const pxRegions )
  5093. {
  5094. TCB_t * pxTCB;
  5095. traceENTER_vTaskAllocateMPURegions( xTaskToModify, pxRegions );
  5096. /* If null is passed in here then we are modifying the MPU settings of
  5097. * the calling task. */
  5098. pxTCB = prvGetTCBFromHandle( xTaskToModify );
  5099. vPortStoreTaskMPUSettings( &( pxTCB->xMPUSettings ), pxRegions, NULL, 0 );
  5100. traceRETURN_vTaskAllocateMPURegions();
  5101. }
  5102. #endif /* portUSING_MPU_WRAPPERS */
  5103. /*-----------------------------------------------------------*/
  5104. static void prvInitialiseTaskLists( void )
  5105. {
  5106. UBaseType_t uxPriority;
  5107. for( uxPriority = ( UBaseType_t ) 0U; uxPriority < ( UBaseType_t ) configMAX_PRIORITIES; uxPriority++ )
  5108. {
  5109. vListInitialise( &( pxReadyTasksLists[ uxPriority ] ) );
  5110. }
  5111. vListInitialise( &xDelayedTaskList1 );
  5112. vListInitialise( &xDelayedTaskList2 );
  5113. vListInitialise( &xPendingReadyList );
  5114. #if ( INCLUDE_vTaskDelete == 1 )
  5115. {
  5116. vListInitialise( &xTasksWaitingTermination );
  5117. }
  5118. #endif /* INCLUDE_vTaskDelete */
  5119. #if ( INCLUDE_vTaskSuspend == 1 )
  5120. {
  5121. vListInitialise( &xSuspendedTaskList );
  5122. }
  5123. #endif /* INCLUDE_vTaskSuspend */
  5124. /* Start with pxDelayedTaskList using list1 and the pxOverflowDelayedTaskList
  5125. * using list2. */
  5126. pxDelayedTaskList = &xDelayedTaskList1;
  5127. pxOverflowDelayedTaskList = &xDelayedTaskList2;
  5128. }
  5129. /*-----------------------------------------------------------*/
  5130. static void prvCheckTasksWaitingTermination( void )
  5131. {
  5132. /** THIS FUNCTION IS CALLED FROM THE RTOS IDLE TASK **/
  5133. #if ( INCLUDE_vTaskDelete == 1 )
  5134. {
  5135. TCB_t * pxTCB;
  5136. /* uxDeletedTasksWaitingCleanUp is used to prevent taskENTER_CRITICAL()
  5137. * being called too often in the idle task. */
  5138. while( uxDeletedTasksWaitingCleanUp > ( UBaseType_t ) 0U )
  5139. {
  5140. #if ( configNUMBER_OF_CORES == 1 )
  5141. {
  5142. taskENTER_CRITICAL();
  5143. {
  5144. {
  5145. /* MISRA Ref 11.5.3 [Void pointer assignment] */
  5146. /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
  5147. /* coverity[misra_c_2012_rule_11_5_violation] */
  5148. pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xTasksWaitingTermination ) );
  5149. ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
  5150. --uxCurrentNumberOfTasks;
  5151. --uxDeletedTasksWaitingCleanUp;
  5152. }
  5153. }
  5154. taskEXIT_CRITICAL();
  5155. prvDeleteTCB( pxTCB );
  5156. }
  5157. #else /* #if( configNUMBER_OF_CORES == 1 ) */
  5158. {
  5159. pxTCB = NULL;
  5160. taskENTER_CRITICAL();
  5161. {
  5162. /* For SMP, multiple idles can be running simultaneously
  5163. * and we need to check that other idles did not cleanup while we were
  5164. * waiting to enter the critical section. */
  5165. if( uxDeletedTasksWaitingCleanUp > ( UBaseType_t ) 0U )
  5166. {
  5167. /* MISRA Ref 11.5.3 [Void pointer assignment] */
  5168. /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
  5169. /* coverity[misra_c_2012_rule_11_5_violation] */
  5170. pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xTasksWaitingTermination ) );
  5171. if( pxTCB->xTaskRunState == taskTASK_NOT_RUNNING )
  5172. {
  5173. ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
  5174. --uxCurrentNumberOfTasks;
  5175. --uxDeletedTasksWaitingCleanUp;
  5176. }
  5177. else
  5178. {
  5179. /* The TCB to be deleted still has not yet been switched out
  5180. * by the scheduler, so we will just exit this loop early and
  5181. * try again next time. */
  5182. taskEXIT_CRITICAL();
  5183. break;
  5184. }
  5185. }
  5186. }
  5187. taskEXIT_CRITICAL();
  5188. if( pxTCB != NULL )
  5189. {
  5190. prvDeleteTCB( pxTCB );
  5191. }
  5192. }
  5193. #endif /* #if( configNUMBER_OF_CORES == 1 ) */
  5194. }
  5195. }
  5196. #endif /* INCLUDE_vTaskDelete */
  5197. }
  5198. /*-----------------------------------------------------------*/
  5199. #if ( configUSE_TRACE_FACILITY == 1 )
  5200. void vTaskGetInfo( TaskHandle_t xTask,
  5201. TaskStatus_t * pxTaskStatus,
  5202. BaseType_t xGetFreeStackSpace,
  5203. eTaskState eState )
  5204. {
  5205. TCB_t * pxTCB;
  5206. traceENTER_vTaskGetInfo( xTask, pxTaskStatus, xGetFreeStackSpace, eState );
  5207. /* xTask is NULL then get the state of the calling task. */
  5208. pxTCB = prvGetTCBFromHandle( xTask );
  5209. pxTaskStatus->xHandle = pxTCB;
  5210. pxTaskStatus->pcTaskName = ( const char * ) &( pxTCB->pcTaskName[ 0 ] );
  5211. pxTaskStatus->uxCurrentPriority = pxTCB->uxPriority;
  5212. pxTaskStatus->pxStackBase = pxTCB->pxStack;
  5213. #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) )
  5214. pxTaskStatus->pxTopOfStack = ( StackType_t * ) pxTCB->pxTopOfStack;
  5215. pxTaskStatus->pxEndOfStack = pxTCB->pxEndOfStack;
  5216. #endif
  5217. pxTaskStatus->xTaskNumber = pxTCB->uxTCBNumber;
  5218. #if ( ( configUSE_CORE_AFFINITY == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
  5219. {
  5220. pxTaskStatus->uxCoreAffinityMask = pxTCB->uxCoreAffinityMask;
  5221. }
  5222. #endif
  5223. #if ( configUSE_MUTEXES == 1 )
  5224. {
  5225. pxTaskStatus->uxBasePriority = pxTCB->uxBasePriority;
  5226. }
  5227. #else
  5228. {
  5229. pxTaskStatus->uxBasePriority = 0;
  5230. }
  5231. #endif
  5232. #if ( configGENERATE_RUN_TIME_STATS == 1 )
  5233. {
  5234. pxTaskStatus->ulRunTimeCounter = pxTCB->ulRunTimeCounter;
  5235. }
  5236. #else
  5237. {
  5238. pxTaskStatus->ulRunTimeCounter = ( configRUN_TIME_COUNTER_TYPE ) 0;
  5239. }
  5240. #endif
  5241. /* Obtaining the task state is a little fiddly, so is only done if the
  5242. * value of eState passed into this function is eInvalid - otherwise the
  5243. * state is just set to whatever is passed in. */
  5244. if( eState != eInvalid )
  5245. {
  5246. if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE )
  5247. {
  5248. pxTaskStatus->eCurrentState = eRunning;
  5249. }
  5250. else
  5251. {
  5252. pxTaskStatus->eCurrentState = eState;
  5253. #if ( INCLUDE_vTaskSuspend == 1 )
  5254. {
  5255. /* If the task is in the suspended list then there is a
  5256. * chance it is actually just blocked indefinitely - so really
  5257. * it should be reported as being in the Blocked state. */
  5258. if( eState == eSuspended )
  5259. {
  5260. vTaskSuspendAll();
  5261. {
  5262. if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
  5263. {
  5264. pxTaskStatus->eCurrentState = eBlocked;
  5265. }
  5266. else
  5267. {
  5268. #if ( configUSE_TASK_NOTIFICATIONS == 1 )
  5269. {
  5270. BaseType_t x;
  5271. /* The task does not appear on the event list item of
  5272. * and of the RTOS objects, but could still be in the
  5273. * blocked state if it is waiting on its notification
  5274. * rather than waiting on an object. If not, is
  5275. * suspended. */
  5276. for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configTASK_NOTIFICATION_ARRAY_ENTRIES; x++ )
  5277. {
  5278. if( pxTCB->ucNotifyState[ x ] == taskWAITING_NOTIFICATION )
  5279. {
  5280. pxTaskStatus->eCurrentState = eBlocked;
  5281. break;
  5282. }
  5283. }
  5284. }
  5285. #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
  5286. }
  5287. }
  5288. ( void ) xTaskResumeAll();
  5289. }
  5290. }
  5291. #endif /* INCLUDE_vTaskSuspend */
  5292. /* Tasks can be in pending ready list and other state list at the
  5293. * same time. These tasks are in ready state no matter what state
  5294. * list the task is in. */
  5295. taskENTER_CRITICAL();
  5296. {
  5297. if( listIS_CONTAINED_WITHIN( &xPendingReadyList, &( pxTCB->xEventListItem ) ) != pdFALSE )
  5298. {
  5299. pxTaskStatus->eCurrentState = eReady;
  5300. }
  5301. }
  5302. taskEXIT_CRITICAL();
  5303. }
  5304. }
  5305. else
  5306. {
  5307. pxTaskStatus->eCurrentState = eTaskGetState( pxTCB );
  5308. }
  5309. /* Obtaining the stack space takes some time, so the xGetFreeStackSpace
  5310. * parameter is provided to allow it to be skipped. */
  5311. if( xGetFreeStackSpace != pdFALSE )
  5312. {
  5313. #if ( portSTACK_GROWTH > 0 )
  5314. {
  5315. pxTaskStatus->usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxTCB->pxEndOfStack );
  5316. }
  5317. #else
  5318. {
  5319. pxTaskStatus->usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxTCB->pxStack );
  5320. }
  5321. #endif
  5322. }
  5323. else
  5324. {
  5325. pxTaskStatus->usStackHighWaterMark = 0;
  5326. }
  5327. traceRETURN_vTaskGetInfo();
  5328. }
  5329. #endif /* configUSE_TRACE_FACILITY */
  5330. /*-----------------------------------------------------------*/
  5331. #if ( configUSE_TRACE_FACILITY == 1 )
  5332. static UBaseType_t prvListTasksWithinSingleList( TaskStatus_t * pxTaskStatusArray,
  5333. List_t * pxList,
  5334. eTaskState eState )
  5335. {
  5336. UBaseType_t uxTask = 0;
  5337. const ListItem_t * pxEndMarker = listGET_END_MARKER( pxList );
  5338. ListItem_t * pxIterator;
  5339. TCB_t * pxTCB = NULL;
  5340. if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 )
  5341. {
  5342. /* Populate an TaskStatus_t structure within the
  5343. * pxTaskStatusArray array for each task that is referenced from
  5344. * pxList. See the definition of TaskStatus_t in task.h for the
  5345. * meaning of each TaskStatus_t structure member. */
  5346. for( pxIterator = listGET_HEAD_ENTRY( pxList ); pxIterator != pxEndMarker; pxIterator = listGET_NEXT( pxIterator ) )
  5347. {
  5348. /* MISRA Ref 11.5.3 [Void pointer assignment] */
  5349. /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
  5350. /* coverity[misra_c_2012_rule_11_5_violation] */
  5351. pxTCB = listGET_LIST_ITEM_OWNER( pxIterator );
  5352. vTaskGetInfo( ( TaskHandle_t ) pxTCB, &( pxTaskStatusArray[ uxTask ] ), pdTRUE, eState );
  5353. uxTask++;
  5354. }
  5355. }
  5356. else
  5357. {
  5358. mtCOVERAGE_TEST_MARKER();
  5359. }
  5360. return uxTask;
  5361. }
  5362. #endif /* configUSE_TRACE_FACILITY */
  5363. /*-----------------------------------------------------------*/
  5364. #if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) )
  5365. static configSTACK_DEPTH_TYPE prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte )
  5366. {
  5367. configSTACK_DEPTH_TYPE uxCount = 0U;
  5368. while( *pucStackByte == ( uint8_t ) tskSTACK_FILL_BYTE )
  5369. {
  5370. pucStackByte -= portSTACK_GROWTH;
  5371. uxCount++;
  5372. }
  5373. uxCount /= ( configSTACK_DEPTH_TYPE ) sizeof( StackType_t );
  5374. return uxCount;
  5375. }
  5376. #endif /* ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) ) */
  5377. /*-----------------------------------------------------------*/
  5378. #if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 )
  5379. /* uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are the
  5380. * same except for their return type. Using configSTACK_DEPTH_TYPE allows the
  5381. * user to determine the return type. It gets around the problem of the value
  5382. * overflowing on 8-bit types without breaking backward compatibility for
  5383. * applications that expect an 8-bit return type. */
  5384. configSTACK_DEPTH_TYPE uxTaskGetStackHighWaterMark2( TaskHandle_t xTask )
  5385. {
  5386. TCB_t * pxTCB;
  5387. uint8_t * pucEndOfStack;
  5388. configSTACK_DEPTH_TYPE uxReturn;
  5389. traceENTER_uxTaskGetStackHighWaterMark2( xTask );
  5390. /* uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are
  5391. * the same except for their return type. Using configSTACK_DEPTH_TYPE
  5392. * allows the user to determine the return type. It gets around the
  5393. * problem of the value overflowing on 8-bit types without breaking
  5394. * backward compatibility for applications that expect an 8-bit return
  5395. * type. */
  5396. pxTCB = prvGetTCBFromHandle( xTask );
  5397. #if portSTACK_GROWTH < 0
  5398. {
  5399. pucEndOfStack = ( uint8_t * ) pxTCB->pxStack;
  5400. }
  5401. #else
  5402. {
  5403. pucEndOfStack = ( uint8_t * ) pxTCB->pxEndOfStack;
  5404. }
  5405. #endif
  5406. uxReturn = prvTaskCheckFreeStackSpace( pucEndOfStack );
  5407. traceRETURN_uxTaskGetStackHighWaterMark2( uxReturn );
  5408. return uxReturn;
  5409. }
  5410. #endif /* INCLUDE_uxTaskGetStackHighWaterMark2 */
  5411. /*-----------------------------------------------------------*/
  5412. #if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
  5413. UBaseType_t uxTaskGetStackHighWaterMark( TaskHandle_t xTask )
  5414. {
  5415. TCB_t * pxTCB;
  5416. uint8_t * pucEndOfStack;
  5417. UBaseType_t uxReturn;
  5418. traceENTER_uxTaskGetStackHighWaterMark( xTask );
  5419. pxTCB = prvGetTCBFromHandle( xTask );
  5420. #if portSTACK_GROWTH < 0
  5421. {
  5422. pucEndOfStack = ( uint8_t * ) pxTCB->pxStack;
  5423. }
  5424. #else
  5425. {
  5426. pucEndOfStack = ( uint8_t * ) pxTCB->pxEndOfStack;
  5427. }
  5428. #endif
  5429. uxReturn = ( UBaseType_t ) prvTaskCheckFreeStackSpace( pucEndOfStack );
  5430. traceRETURN_uxTaskGetStackHighWaterMark( uxReturn );
  5431. return uxReturn;
  5432. }
  5433. #endif /* INCLUDE_uxTaskGetStackHighWaterMark */
  5434. /*-----------------------------------------------------------*/
  5435. #if ( INCLUDE_vTaskDelete == 1 )
  5436. static void prvDeleteTCB( TCB_t * pxTCB )
  5437. {
  5438. /* This call is required specifically for the TriCore port. It must be
  5439. * above the vPortFree() calls. The call is also used by ports/demos that
  5440. * want to allocate and clean RAM statically. */
  5441. portCLEAN_UP_TCB( pxTCB );
  5442. #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 )
  5443. {
  5444. /* Free up the memory allocated for the task's TLS Block. */
  5445. configDEINIT_TLS_BLOCK( pxTCB->xTLSBlock );
  5446. }
  5447. #endif
  5448. #if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) && ( portUSING_MPU_WRAPPERS == 0 ) )
  5449. {
  5450. /* The task can only have been allocated dynamically - free both
  5451. * the stack and TCB. */
  5452. vPortFreeStack( pxTCB->pxStack );
  5453. vPortFree( pxTCB );
  5454. }
  5455. #elif ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
  5456. {
  5457. /* The task could have been allocated statically or dynamically, so
  5458. * check what was statically allocated before trying to free the
  5459. * memory. */
  5460. if( pxTCB->ucStaticallyAllocated == tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB )
  5461. {
  5462. /* Both the stack and TCB were allocated dynamically, so both
  5463. * must be freed. */
  5464. vPortFreeStack( pxTCB->pxStack );
  5465. vPortFree( pxTCB );
  5466. }
  5467. else if( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_ONLY )
  5468. {
  5469. /* Only the stack was statically allocated, so the TCB is the
  5470. * only memory that must be freed. */
  5471. vPortFree( pxTCB );
  5472. }
  5473. else
  5474. {
  5475. /* Neither the stack nor the TCB were allocated dynamically, so
  5476. * nothing needs to be freed. */
  5477. configASSERT( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_AND_TCB );
  5478. mtCOVERAGE_TEST_MARKER();
  5479. }
  5480. }
  5481. #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
  5482. }
  5483. #endif /* INCLUDE_vTaskDelete */
  5484. /*-----------------------------------------------------------*/
  5485. static void prvResetNextTaskUnblockTime( void )
  5486. {
  5487. if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE )
  5488. {
  5489. /* The new current delayed list is empty. Set xNextTaskUnblockTime to
  5490. * the maximum possible value so it is extremely unlikely that the
  5491. * if( xTickCount >= xNextTaskUnblockTime ) test will pass until
  5492. * there is an item in the delayed list. */
  5493. xNextTaskUnblockTime = portMAX_DELAY;
  5494. }
  5495. else
  5496. {
  5497. /* The new current delayed list is not empty, get the value of
  5498. * the item at the head of the delayed list. This is the time at
  5499. * which the task at the head of the delayed list should be removed
  5500. * from the Blocked state. */
  5501. xNextTaskUnblockTime = listGET_ITEM_VALUE_OF_HEAD_ENTRY( pxDelayedTaskList );
  5502. }
  5503. }
  5504. /*-----------------------------------------------------------*/
  5505. #if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) || ( configNUMBER_OF_CORES > 1 )
  5506. #if ( configNUMBER_OF_CORES == 1 )
  5507. TaskHandle_t xTaskGetCurrentTaskHandle( void )
  5508. {
  5509. TaskHandle_t xReturn;
  5510. traceENTER_xTaskGetCurrentTaskHandle();
  5511. /* A critical section is not required as this is not called from
  5512. * an interrupt and the current TCB will always be the same for any
  5513. * individual execution thread. */
  5514. xReturn = pxCurrentTCB;
  5515. traceRETURN_xTaskGetCurrentTaskHandle( xReturn );
  5516. return xReturn;
  5517. }
  5518. #else /* #if ( configNUMBER_OF_CORES == 1 ) */
  5519. TaskHandle_t xTaskGetCurrentTaskHandle( void )
  5520. {
  5521. TaskHandle_t xReturn;
  5522. UBaseType_t uxSavedInterruptStatus;
  5523. traceENTER_xTaskGetCurrentTaskHandle();
  5524. uxSavedInterruptStatus = portSET_INTERRUPT_MASK();
  5525. {
  5526. xReturn = pxCurrentTCBs[ portGET_CORE_ID() ];
  5527. }
  5528. portCLEAR_INTERRUPT_MASK( uxSavedInterruptStatus );
  5529. traceRETURN_xTaskGetCurrentTaskHandle( xReturn );
  5530. return xReturn;
  5531. }
  5532. #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
  5533. TaskHandle_t xTaskGetCurrentTaskHandleForCore( BaseType_t xCoreID )
  5534. {
  5535. TaskHandle_t xReturn = NULL;
  5536. traceENTER_xTaskGetCurrentTaskHandleForCore( xCoreID );
  5537. if( taskVALID_CORE_ID( xCoreID ) != pdFALSE )
  5538. {
  5539. #if ( configNUMBER_OF_CORES == 1 )
  5540. xReturn = pxCurrentTCB;
  5541. #else /* #if ( configNUMBER_OF_CORES == 1 ) */
  5542. xReturn = pxCurrentTCBs[ xCoreID ];
  5543. #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
  5544. }
  5545. traceRETURN_xTaskGetCurrentTaskHandleForCore( xReturn );
  5546. return xReturn;
  5547. }
  5548. #endif /* ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
  5549. /*-----------------------------------------------------------*/
  5550. #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
  5551. BaseType_t xTaskGetSchedulerState( void )
  5552. {
  5553. BaseType_t xReturn;
  5554. traceENTER_xTaskGetSchedulerState();
  5555. if( xSchedulerRunning == pdFALSE )
  5556. {
  5557. xReturn = taskSCHEDULER_NOT_STARTED;
  5558. }
  5559. else
  5560. {
  5561. #if ( configNUMBER_OF_CORES > 1 )
  5562. taskENTER_CRITICAL();
  5563. #endif
  5564. {
  5565. if( uxSchedulerSuspended == ( UBaseType_t ) 0U )
  5566. {
  5567. xReturn = taskSCHEDULER_RUNNING;
  5568. }
  5569. else
  5570. {
  5571. xReturn = taskSCHEDULER_SUSPENDED;
  5572. }
  5573. }
  5574. #if ( configNUMBER_OF_CORES > 1 )
  5575. taskEXIT_CRITICAL();
  5576. #endif
  5577. }
  5578. traceRETURN_xTaskGetSchedulerState( xReturn );
  5579. return xReturn;
  5580. }
  5581. #endif /* ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) */
  5582. /*-----------------------------------------------------------*/
  5583. #if ( configUSE_MUTEXES == 1 )
  5584. BaseType_t xTaskPriorityInherit( TaskHandle_t const pxMutexHolder )
  5585. {
  5586. TCB_t * const pxMutexHolderTCB = pxMutexHolder;
  5587. BaseType_t xReturn = pdFALSE;
  5588. traceENTER_xTaskPriorityInherit( pxMutexHolder );
  5589. /* If the mutex is taken by an interrupt, the mutex holder is NULL. Priority
  5590. * inheritance is not applied in this scenario. */
  5591. if( pxMutexHolder != NULL )
  5592. {
  5593. /* If the holder of the mutex has a priority below the priority of
  5594. * the task attempting to obtain the mutex then it will temporarily
  5595. * inherit the priority of the task attempting to obtain the mutex. */
  5596. if( pxMutexHolderTCB->uxPriority < pxCurrentTCB->uxPriority )
  5597. {
  5598. /* Adjust the mutex holder state to account for its new
  5599. * priority. Only reset the event list item value if the value is
  5600. * not being used for anything else. */
  5601. if( ( listGET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == ( ( TickType_t ) 0U ) )
  5602. {
  5603. listSET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB->uxPriority );
  5604. }
  5605. else
  5606. {
  5607. mtCOVERAGE_TEST_MARKER();
  5608. }
  5609. /* If the task being modified is in the ready state it will need
  5610. * to be moved into a new list. */
  5611. if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxMutexHolderTCB->uxPriority ] ), &( pxMutexHolderTCB->xStateListItem ) ) != pdFALSE )
  5612. {
  5613. if( uxListRemove( &( pxMutexHolderTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
  5614. {
  5615. /* It is known that the task is in its ready list so
  5616. * there is no need to check again and the port level
  5617. * reset macro can be called directly. */
  5618. portRESET_READY_PRIORITY( pxMutexHolderTCB->uxPriority, uxTopReadyPriority );
  5619. }
  5620. else
  5621. {
  5622. mtCOVERAGE_TEST_MARKER();
  5623. }
  5624. /* Inherit the priority before being moved into the new list. */
  5625. pxMutexHolderTCB->uxPriority = pxCurrentTCB->uxPriority;
  5626. prvAddTaskToReadyList( pxMutexHolderTCB );
  5627. #if ( configNUMBER_OF_CORES > 1 )
  5628. {
  5629. /* The priority of the task is raised. Yield for this task
  5630. * if it is not running. */
  5631. if( taskTASK_IS_RUNNING( pxMutexHolderTCB ) != pdTRUE )
  5632. {
  5633. prvYieldForTask( pxMutexHolderTCB );
  5634. }
  5635. }
  5636. #endif /* if ( configNUMBER_OF_CORES > 1 ) */
  5637. }
  5638. else
  5639. {
  5640. /* Just inherit the priority. */
  5641. pxMutexHolderTCB->uxPriority = pxCurrentTCB->uxPriority;
  5642. }
  5643. traceTASK_PRIORITY_INHERIT( pxMutexHolderTCB, pxCurrentTCB->uxPriority );
  5644. /* Inheritance occurred. */
  5645. xReturn = pdTRUE;
  5646. }
  5647. else
  5648. {
  5649. if( pxMutexHolderTCB->uxBasePriority < pxCurrentTCB->uxPriority )
  5650. {
  5651. /* The base priority of the mutex holder is lower than the
  5652. * priority of the task attempting to take the mutex, but the
  5653. * current priority of the mutex holder is not lower than the
  5654. * priority of the task attempting to take the mutex.
  5655. * Therefore the mutex holder must have already inherited a
  5656. * priority, but inheritance would have occurred if that had
  5657. * not been the case. */
  5658. xReturn = pdTRUE;
  5659. }
  5660. else
  5661. {
  5662. mtCOVERAGE_TEST_MARKER();
  5663. }
  5664. }
  5665. }
  5666. else
  5667. {
  5668. mtCOVERAGE_TEST_MARKER();
  5669. }
  5670. traceRETURN_xTaskPriorityInherit( xReturn );
  5671. return xReturn;
  5672. }
  5673. #endif /* configUSE_MUTEXES */
  5674. /*-----------------------------------------------------------*/
  5675. #if ( configUSE_MUTEXES == 1 )
  5676. BaseType_t xTaskPriorityDisinherit( TaskHandle_t const pxMutexHolder )
  5677. {
  5678. TCB_t * const pxTCB = pxMutexHolder;
  5679. BaseType_t xReturn = pdFALSE;
  5680. traceENTER_xTaskPriorityDisinherit( pxMutexHolder );
  5681. if( pxMutexHolder != NULL )
  5682. {
  5683. /* A task can only have an inherited priority if it holds the mutex.
  5684. * If the mutex is held by a task then it cannot be given from an
  5685. * interrupt, and if a mutex is given by the holding task then it must
  5686. * be the running state task. */
  5687. configASSERT( pxTCB == pxCurrentTCB );
  5688. configASSERT( pxTCB->uxMutexesHeld );
  5689. ( pxTCB->uxMutexesHeld )--;
  5690. /* Has the holder of the mutex inherited the priority of another
  5691. * task? */
  5692. if( pxTCB->uxPriority != pxTCB->uxBasePriority )
  5693. {
  5694. /* Only disinherit if no other mutexes are held. */
  5695. if( pxTCB->uxMutexesHeld == ( UBaseType_t ) 0 )
  5696. {
  5697. /* A task can only have an inherited priority if it holds
  5698. * the mutex. If the mutex is held by a task then it cannot be
  5699. * given from an interrupt, and if a mutex is given by the
  5700. * holding task then it must be the running state task. Remove
  5701. * the holding task from the ready list. */
  5702. if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
  5703. {
  5704. portRESET_READY_PRIORITY( pxTCB->uxPriority, uxTopReadyPriority );
  5705. }
  5706. else
  5707. {
  5708. mtCOVERAGE_TEST_MARKER();
  5709. }
  5710. /* Disinherit the priority before adding the task into the
  5711. * new ready list. */
  5712. traceTASK_PRIORITY_DISINHERIT( pxTCB, pxTCB->uxBasePriority );
  5713. pxTCB->uxPriority = pxTCB->uxBasePriority;
  5714. /* Reset the event list item value. It cannot be in use for
  5715. * any other purpose if this task is running, and it must be
  5716. * running to give back the mutex. */
  5717. listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxTCB->uxPriority );
  5718. prvAddTaskToReadyList( pxTCB );
  5719. #if ( configNUMBER_OF_CORES > 1 )
  5720. {
  5721. /* The priority of the task is dropped. Yield the core on
  5722. * which the task is running. */
  5723. if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE )
  5724. {
  5725. prvYieldCore( pxTCB->xTaskRunState );
  5726. }
  5727. }
  5728. #endif /* if ( configNUMBER_OF_CORES > 1 ) */
  5729. /* Return true to indicate that a context switch is required.
  5730. * This is only actually required in the corner case whereby
  5731. * multiple mutexes were held and the mutexes were given back
  5732. * in an order different to that in which they were taken.
  5733. * If a context switch did not occur when the first mutex was
  5734. * returned, even if a task was waiting on it, then a context
  5735. * switch should occur when the last mutex is returned whether
  5736. * a task is waiting on it or not. */
  5737. xReturn = pdTRUE;
  5738. }
  5739. else
  5740. {
  5741. mtCOVERAGE_TEST_MARKER();
  5742. }
  5743. }
  5744. else
  5745. {
  5746. mtCOVERAGE_TEST_MARKER();
  5747. }
  5748. }
  5749. else
  5750. {
  5751. mtCOVERAGE_TEST_MARKER();
  5752. }
  5753. traceRETURN_xTaskPriorityDisinherit( xReturn );
  5754. return xReturn;
  5755. }
  5756. #endif /* configUSE_MUTEXES */
  5757. /*-----------------------------------------------------------*/
  5758. #if ( configUSE_MUTEXES == 1 )
  5759. void vTaskPriorityDisinheritAfterTimeout( TaskHandle_t const pxMutexHolder,
  5760. UBaseType_t uxHighestPriorityWaitingTask )
  5761. {
  5762. TCB_t * const pxTCB = pxMutexHolder;
  5763. UBaseType_t uxPriorityUsedOnEntry, uxPriorityToUse;
  5764. const UBaseType_t uxOnlyOneMutexHeld = ( UBaseType_t ) 1;
  5765. traceENTER_vTaskPriorityDisinheritAfterTimeout( pxMutexHolder, uxHighestPriorityWaitingTask );
  5766. if( pxMutexHolder != NULL )
  5767. {
  5768. /* If pxMutexHolder is not NULL then the holder must hold at least
  5769. * one mutex. */
  5770. configASSERT( pxTCB->uxMutexesHeld );
  5771. /* Determine the priority to which the priority of the task that
  5772. * holds the mutex should be set. This will be the greater of the
  5773. * holding task's base priority and the priority of the highest
  5774. * priority task that is waiting to obtain the mutex. */
  5775. if( pxTCB->uxBasePriority < uxHighestPriorityWaitingTask )
  5776. {
  5777. uxPriorityToUse = uxHighestPriorityWaitingTask;
  5778. }
  5779. else
  5780. {
  5781. uxPriorityToUse = pxTCB->uxBasePriority;
  5782. }
  5783. /* Does the priority need to change? */
  5784. if( pxTCB->uxPriority != uxPriorityToUse )
  5785. {
  5786. /* Only disinherit if no other mutexes are held. This is a
  5787. * simplification in the priority inheritance implementation. If
  5788. * the task that holds the mutex is also holding other mutexes then
  5789. * the other mutexes may have caused the priority inheritance. */
  5790. if( pxTCB->uxMutexesHeld == uxOnlyOneMutexHeld )
  5791. {
  5792. /* If a task has timed out because it already holds the
  5793. * mutex it was trying to obtain then it cannot of inherited
  5794. * its own priority. */
  5795. configASSERT( pxTCB != pxCurrentTCB );
  5796. /* Disinherit the priority, remembering the previous
  5797. * priority to facilitate determining the subject task's
  5798. * state. */
  5799. traceTASK_PRIORITY_DISINHERIT( pxTCB, uxPriorityToUse );
  5800. uxPriorityUsedOnEntry = pxTCB->uxPriority;
  5801. pxTCB->uxPriority = uxPriorityToUse;
  5802. /* Only reset the event list item value if the value is not
  5803. * being used for anything else. */
  5804. if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == ( ( TickType_t ) 0U ) )
  5805. {
  5806. listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriorityToUse );
  5807. }
  5808. else
  5809. {
  5810. mtCOVERAGE_TEST_MARKER();
  5811. }
  5812. /* If the running task is not the task that holds the mutex
  5813. * then the task that holds the mutex could be in either the
  5814. * Ready, Blocked or Suspended states. Only remove the task
  5815. * from its current state list if it is in the Ready state as
  5816. * the task's priority is going to change and there is one
  5817. * Ready list per priority. */
  5818. if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE )
  5819. {
  5820. if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
  5821. {
  5822. /* It is known that the task is in its ready list so
  5823. * there is no need to check again and the port level
  5824. * reset macro can be called directly. */
  5825. portRESET_READY_PRIORITY( pxTCB->uxPriority, uxTopReadyPriority );
  5826. }
  5827. else
  5828. {
  5829. mtCOVERAGE_TEST_MARKER();
  5830. }
  5831. prvAddTaskToReadyList( pxTCB );
  5832. #if ( configNUMBER_OF_CORES > 1 )
  5833. {
  5834. /* The priority of the task is dropped. Yield the core on
  5835. * which the task is running. */
  5836. if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE )
  5837. {
  5838. prvYieldCore( pxTCB->xTaskRunState );
  5839. }
  5840. }
  5841. #endif /* if ( configNUMBER_OF_CORES > 1 ) */
  5842. }
  5843. else
  5844. {
  5845. mtCOVERAGE_TEST_MARKER();
  5846. }
  5847. }
  5848. else
  5849. {
  5850. mtCOVERAGE_TEST_MARKER();
  5851. }
  5852. }
  5853. else
  5854. {
  5855. mtCOVERAGE_TEST_MARKER();
  5856. }
  5857. }
  5858. else
  5859. {
  5860. mtCOVERAGE_TEST_MARKER();
  5861. }
  5862. traceRETURN_vTaskPriorityDisinheritAfterTimeout();
  5863. }
  5864. #endif /* configUSE_MUTEXES */
  5865. /*-----------------------------------------------------------*/
  5866. #if ( configNUMBER_OF_CORES > 1 )
  5867. /* If not in a critical section then yield immediately.
  5868. * Otherwise set xYieldPendings to true to wait to
  5869. * yield until exiting the critical section.
  5870. */
  5871. void vTaskYieldWithinAPI( void )
  5872. {
  5873. traceENTER_vTaskYieldWithinAPI();
  5874. if( portGET_CRITICAL_NESTING_COUNT() == 0U )
  5875. {
  5876. portYIELD();
  5877. }
  5878. else
  5879. {
  5880. xYieldPendings[ portGET_CORE_ID() ] = pdTRUE;
  5881. }
  5882. traceRETURN_vTaskYieldWithinAPI();
  5883. }
  5884. #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
  5885. /*-----------------------------------------------------------*/
  5886. #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUMBER_OF_CORES == 1 ) )
  5887. void vTaskEnterCritical( void )
  5888. {
  5889. traceENTER_vTaskEnterCritical();
  5890. portDISABLE_INTERRUPTS();
  5891. if( xSchedulerRunning != pdFALSE )
  5892. {
  5893. ( pxCurrentTCB->uxCriticalNesting )++;
  5894. /* This is not the interrupt safe version of the enter critical
  5895. * function so assert() if it is being called from an interrupt
  5896. * context. Only API functions that end in "FromISR" can be used in an
  5897. * interrupt. Only assert if the critical nesting count is 1 to
  5898. * protect against recursive calls if the assert function also uses a
  5899. * critical section. */
  5900. if( pxCurrentTCB->uxCriticalNesting == 1U )
  5901. {
  5902. portASSERT_IF_IN_ISR();
  5903. }
  5904. }
  5905. else
  5906. {
  5907. mtCOVERAGE_TEST_MARKER();
  5908. }
  5909. traceRETURN_vTaskEnterCritical();
  5910. }
  5911. #endif /* #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUMBER_OF_CORES == 1 ) ) */
  5912. /*-----------------------------------------------------------*/
  5913. #if ( configNUMBER_OF_CORES > 1 )
  5914. void vTaskEnterCritical( void )
  5915. {
  5916. traceENTER_vTaskEnterCritical();
  5917. portDISABLE_INTERRUPTS();
  5918. if( xSchedulerRunning != pdFALSE )
  5919. {
  5920. if( portGET_CRITICAL_NESTING_COUNT() == 0U )
  5921. {
  5922. portGET_TASK_LOCK();
  5923. portGET_ISR_LOCK();
  5924. }
  5925. portINCREMENT_CRITICAL_NESTING_COUNT();
  5926. /* This is not the interrupt safe version of the enter critical
  5927. * function so assert() if it is being called from an interrupt
  5928. * context. Only API functions that end in "FromISR" can be used in an
  5929. * interrupt. Only assert if the critical nesting count is 1 to
  5930. * protect against recursive calls if the assert function also uses a
  5931. * critical section. */
  5932. if( portGET_CRITICAL_NESTING_COUNT() == 1U )
  5933. {
  5934. portASSERT_IF_IN_ISR();
  5935. if( uxSchedulerSuspended == 0U )
  5936. {
  5937. /* The only time there would be a problem is if this is called
  5938. * before a context switch and vTaskExitCritical() is called
  5939. * after pxCurrentTCB changes. Therefore this should not be
  5940. * used within vTaskSwitchContext(). */
  5941. prvCheckForRunStateChange();
  5942. }
  5943. }
  5944. }
  5945. else
  5946. {
  5947. mtCOVERAGE_TEST_MARKER();
  5948. }
  5949. traceRETURN_vTaskEnterCritical();
  5950. }
  5951. #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
  5952. /*-----------------------------------------------------------*/
  5953. #if ( configNUMBER_OF_CORES > 1 )
  5954. UBaseType_t vTaskEnterCriticalFromISR( void )
  5955. {
  5956. UBaseType_t uxSavedInterruptStatus = 0;
  5957. traceENTER_vTaskEnterCriticalFromISR();
  5958. if( xSchedulerRunning != pdFALSE )
  5959. {
  5960. uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
  5961. if( portGET_CRITICAL_NESTING_COUNT() == 0U )
  5962. {
  5963. portGET_ISR_LOCK();
  5964. }
  5965. portINCREMENT_CRITICAL_NESTING_COUNT();
  5966. }
  5967. else
  5968. {
  5969. mtCOVERAGE_TEST_MARKER();
  5970. }
  5971. traceRETURN_vTaskEnterCriticalFromISR( uxSavedInterruptStatus );
  5972. return uxSavedInterruptStatus;
  5973. }
  5974. #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
  5975. /*-----------------------------------------------------------*/
  5976. #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUMBER_OF_CORES == 1 ) )
  5977. void vTaskExitCritical( void )
  5978. {
  5979. traceENTER_vTaskExitCritical();
  5980. if( xSchedulerRunning != pdFALSE )
  5981. {
  5982. /* If pxCurrentTCB->uxCriticalNesting is zero then this function
  5983. * does not match a previous call to vTaskEnterCritical(). */
  5984. configASSERT( pxCurrentTCB->uxCriticalNesting > 0U );
  5985. /* This function should not be called in ISR. Use vTaskExitCriticalFromISR
  5986. * to exit critical section from ISR. */
  5987. portASSERT_IF_IN_ISR();
  5988. if( pxCurrentTCB->uxCriticalNesting > 0U )
  5989. {
  5990. ( pxCurrentTCB->uxCriticalNesting )--;
  5991. if( pxCurrentTCB->uxCriticalNesting == 0U )
  5992. {
  5993. portENABLE_INTERRUPTS();
  5994. }
  5995. else
  5996. {
  5997. mtCOVERAGE_TEST_MARKER();
  5998. }
  5999. }
  6000. else
  6001. {
  6002. mtCOVERAGE_TEST_MARKER();
  6003. }
  6004. }
  6005. else
  6006. {
  6007. mtCOVERAGE_TEST_MARKER();
  6008. }
  6009. traceRETURN_vTaskExitCritical();
  6010. }
  6011. #endif /* #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUMBER_OF_CORES == 1 ) ) */
  6012. /*-----------------------------------------------------------*/
  6013. #if ( configNUMBER_OF_CORES > 1 )
  6014. void vTaskExitCritical( void )
  6015. {
  6016. traceENTER_vTaskExitCritical();
  6017. if( xSchedulerRunning != pdFALSE )
  6018. {
  6019. /* If critical nesting count is zero then this function
  6020. * does not match a previous call to vTaskEnterCritical(). */
  6021. configASSERT( portGET_CRITICAL_NESTING_COUNT() > 0U );
  6022. /* This function should not be called in ISR. Use vTaskExitCriticalFromISR
  6023. * to exit critical section from ISR. */
  6024. portASSERT_IF_IN_ISR();
  6025. if( portGET_CRITICAL_NESTING_COUNT() > 0U )
  6026. {
  6027. portDECREMENT_CRITICAL_NESTING_COUNT();
  6028. if( portGET_CRITICAL_NESTING_COUNT() == 0U )
  6029. {
  6030. BaseType_t xYieldCurrentTask;
  6031. /* Get the xYieldPending stats inside the critical section. */
  6032. xYieldCurrentTask = xYieldPendings[ portGET_CORE_ID() ];
  6033. portRELEASE_ISR_LOCK();
  6034. portRELEASE_TASK_LOCK();
  6035. portENABLE_INTERRUPTS();
  6036. /* When a task yields in a critical section it just sets
  6037. * xYieldPending to true. So now that we have exited the
  6038. * critical section check if xYieldPending is true, and
  6039. * if so yield. */
  6040. if( xYieldCurrentTask != pdFALSE )
  6041. {
  6042. portYIELD();
  6043. }
  6044. }
  6045. else
  6046. {
  6047. mtCOVERAGE_TEST_MARKER();
  6048. }
  6049. }
  6050. else
  6051. {
  6052. mtCOVERAGE_TEST_MARKER();
  6053. }
  6054. }
  6055. else
  6056. {
  6057. mtCOVERAGE_TEST_MARKER();
  6058. }
  6059. traceRETURN_vTaskExitCritical();
  6060. }
  6061. #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
  6062. /*-----------------------------------------------------------*/
  6063. #if ( configNUMBER_OF_CORES > 1 )
  6064. void vTaskExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus )
  6065. {
  6066. traceENTER_vTaskExitCriticalFromISR( uxSavedInterruptStatus );
  6067. if( xSchedulerRunning != pdFALSE )
  6068. {
  6069. /* If critical nesting count is zero then this function
  6070. * does not match a previous call to vTaskEnterCritical(). */
  6071. configASSERT( portGET_CRITICAL_NESTING_COUNT() > 0U );
  6072. if( portGET_CRITICAL_NESTING_COUNT() > 0U )
  6073. {
  6074. portDECREMENT_CRITICAL_NESTING_COUNT();
  6075. if( portGET_CRITICAL_NESTING_COUNT() == 0U )
  6076. {
  6077. portRELEASE_ISR_LOCK();
  6078. portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
  6079. }
  6080. else
  6081. {
  6082. mtCOVERAGE_TEST_MARKER();
  6083. }
  6084. }
  6085. else
  6086. {
  6087. mtCOVERAGE_TEST_MARKER();
  6088. }
  6089. }
  6090. else
  6091. {
  6092. mtCOVERAGE_TEST_MARKER();
  6093. }
  6094. traceRETURN_vTaskExitCriticalFromISR();
  6095. }
  6096. #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
  6097. /*-----------------------------------------------------------*/
  6098. #if ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 )
  6099. static char * prvWriteNameToBuffer( char * pcBuffer,
  6100. const char * pcTaskName )
  6101. {
  6102. size_t x;
  6103. /* Start by copying the entire string. */
  6104. ( void ) strcpy( pcBuffer, pcTaskName );
  6105. /* Pad the end of the string with spaces to ensure columns line up when
  6106. * printed out. */
  6107. for( x = strlen( pcBuffer ); x < ( size_t ) ( ( size_t ) configMAX_TASK_NAME_LEN - 1U ); x++ )
  6108. {
  6109. pcBuffer[ x ] = ' ';
  6110. }
  6111. /* Terminate. */
  6112. pcBuffer[ x ] = ( char ) 0x00;
  6113. /* Return the new end of string. */
  6114. return &( pcBuffer[ x ] );
  6115. }
  6116. #endif /* ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) */
  6117. /*-----------------------------------------------------------*/
  6118. #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) )
  6119. void vTaskListTasks( char * pcWriteBuffer,
  6120. size_t uxBufferLength )
  6121. {
  6122. TaskStatus_t * pxTaskStatusArray;
  6123. size_t uxConsumedBufferLength = 0;
  6124. size_t uxCharsWrittenBySnprintf;
  6125. int iSnprintfReturnValue;
  6126. BaseType_t xOutputBufferFull = pdFALSE;
  6127. UBaseType_t uxArraySize, x;
  6128. char cStatus;
  6129. traceENTER_vTaskListTasks( pcWriteBuffer, uxBufferLength );
  6130. /*
  6131. * PLEASE NOTE:
  6132. *
  6133. * This function is provided for convenience only, and is used by many
  6134. * of the demo applications. Do not consider it to be part of the
  6135. * scheduler.
  6136. *
  6137. * vTaskListTasks() calls uxTaskGetSystemState(), then formats part of the
  6138. * uxTaskGetSystemState() output into a human readable table that
  6139. * displays task: names, states, priority, stack usage and task number.
  6140. * Stack usage specified as the number of unused StackType_t words stack can hold
  6141. * on top of stack - not the number of bytes.
  6142. *
  6143. * vTaskListTasks() has a dependency on the snprintf() C library function that
  6144. * might bloat the code size, use a lot of stack, and provide different
  6145. * results on different platforms. An alternative, tiny, third party,
  6146. * and limited functionality implementation of snprintf() is provided in
  6147. * many of the FreeRTOS/Demo sub-directories in a file called
  6148. * printf-stdarg.c (note printf-stdarg.c does not provide a full
  6149. * snprintf() implementation!).
  6150. *
  6151. * It is recommended that production systems call uxTaskGetSystemState()
  6152. * directly to get access to raw stats data, rather than indirectly
  6153. * through a call to vTaskListTasks().
  6154. */
  6155. /* Make sure the write buffer does not contain a string. */
  6156. *pcWriteBuffer = ( char ) 0x00;
  6157. /* Take a snapshot of the number of tasks in case it changes while this
  6158. * function is executing. */
  6159. uxArraySize = uxCurrentNumberOfTasks;
  6160. /* Allocate an array index for each task. NOTE! if
  6161. * configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will
  6162. * equate to NULL. */
  6163. /* MISRA Ref 11.5.1 [Malloc memory assignment] */
  6164. /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
  6165. /* coverity[misra_c_2012_rule_11_5_violation] */
  6166. pxTaskStatusArray = pvPortMalloc( uxCurrentNumberOfTasks * sizeof( TaskStatus_t ) );
  6167. if( pxTaskStatusArray != NULL )
  6168. {
  6169. /* Generate the (binary) data. */
  6170. uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, NULL );
  6171. /* Create a human readable table from the binary data. */
  6172. for( x = 0; x < uxArraySize; x++ )
  6173. {
  6174. switch( pxTaskStatusArray[ x ].eCurrentState )
  6175. {
  6176. case eRunning:
  6177. cStatus = tskRUNNING_CHAR;
  6178. break;
  6179. case eReady:
  6180. cStatus = tskREADY_CHAR;
  6181. break;
  6182. case eBlocked:
  6183. cStatus = tskBLOCKED_CHAR;
  6184. break;
  6185. case eSuspended:
  6186. cStatus = tskSUSPENDED_CHAR;
  6187. break;
  6188. case eDeleted:
  6189. cStatus = tskDELETED_CHAR;
  6190. break;
  6191. case eInvalid: /* Fall through. */
  6192. default: /* Should not get here, but it is included
  6193. * to prevent static checking errors. */
  6194. cStatus = ( char ) 0x00;
  6195. break;
  6196. }
  6197. /* Is there enough space in the buffer to hold task name? */
  6198. if( ( uxConsumedBufferLength + configMAX_TASK_NAME_LEN ) <= uxBufferLength )
  6199. {
  6200. /* Write the task name to the string, padding with spaces so it
  6201. * can be printed in tabular form more easily. */
  6202. pcWriteBuffer = prvWriteNameToBuffer( pcWriteBuffer, pxTaskStatusArray[ x ].pcTaskName );
  6203. /* Do not count the terminating null character. */
  6204. uxConsumedBufferLength = uxConsumedBufferLength + ( configMAX_TASK_NAME_LEN - 1U );
  6205. /* Is there space left in the buffer? -1 is done because snprintf
  6206. * writes a terminating null character. So we are essentially
  6207. * checking if the buffer has space to write at least one non-null
  6208. * character. */
  6209. if( uxConsumedBufferLength < ( uxBufferLength - 1U ) )
  6210. {
  6211. /* Write the rest of the string. */
  6212. #if ( ( configUSE_CORE_AFFINITY == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
  6213. /* MISRA Ref 21.6.1 [snprintf for utility] */
  6214. /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-216 */
  6215. /* coverity[misra_c_2012_rule_21_6_violation] */
  6216. iSnprintfReturnValue = snprintf( pcWriteBuffer,
  6217. uxBufferLength - uxConsumedBufferLength,
  6218. "\t%c\t%u\t%u\t%u\t0x%x\r\n",
  6219. cStatus,
  6220. ( unsigned int ) pxTaskStatusArray[ x ].uxCurrentPriority,
  6221. ( unsigned int ) pxTaskStatusArray[ x ].usStackHighWaterMark,
  6222. ( unsigned int ) pxTaskStatusArray[ x ].xTaskNumber,
  6223. ( unsigned int ) pxTaskStatusArray[ x ].uxCoreAffinityMask );
  6224. #else /* ( ( configUSE_CORE_AFFINITY == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
  6225. /* MISRA Ref 21.6.1 [snprintf for utility] */
  6226. /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-216 */
  6227. /* coverity[misra_c_2012_rule_21_6_violation] */
  6228. iSnprintfReturnValue = snprintf( pcWriteBuffer,
  6229. uxBufferLength - uxConsumedBufferLength,
  6230. "\t%c\t%u\t%u\t%u\r\n",
  6231. cStatus,
  6232. ( unsigned int ) pxTaskStatusArray[ x ].uxCurrentPriority,
  6233. ( unsigned int ) pxTaskStatusArray[ x ].usStackHighWaterMark,
  6234. ( unsigned int ) pxTaskStatusArray[ x ].xTaskNumber );
  6235. #endif /* ( ( configUSE_CORE_AFFINITY == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
  6236. uxCharsWrittenBySnprintf = prvSnprintfReturnValueToCharsWritten( iSnprintfReturnValue, uxBufferLength - uxConsumedBufferLength );
  6237. uxConsumedBufferLength += uxCharsWrittenBySnprintf;
  6238. pcWriteBuffer += uxCharsWrittenBySnprintf;
  6239. }
  6240. else
  6241. {
  6242. xOutputBufferFull = pdTRUE;
  6243. }
  6244. }
  6245. else
  6246. {
  6247. xOutputBufferFull = pdTRUE;
  6248. }
  6249. if( xOutputBufferFull == pdTRUE )
  6250. {
  6251. break;
  6252. }
  6253. }
  6254. /* Free the array again. NOTE! If configSUPPORT_DYNAMIC_ALLOCATION
  6255. * is 0 then vPortFree() will be #defined to nothing. */
  6256. vPortFree( pxTaskStatusArray );
  6257. }
  6258. else
  6259. {
  6260. mtCOVERAGE_TEST_MARKER();
  6261. }
  6262. traceRETURN_vTaskListTasks();
  6263. }
  6264. #endif /* ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) ) */
  6265. /*----------------------------------------------------------*/
  6266. #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configUSE_TRACE_FACILITY == 1 ) )
  6267. void vTaskGetRunTimeStatistics( char * pcWriteBuffer,
  6268. size_t uxBufferLength )
  6269. {
  6270. TaskStatus_t * pxTaskStatusArray;
  6271. size_t uxConsumedBufferLength = 0;
  6272. size_t uxCharsWrittenBySnprintf;
  6273. int iSnprintfReturnValue;
  6274. BaseType_t xOutputBufferFull = pdFALSE;
  6275. UBaseType_t uxArraySize, x;
  6276. configRUN_TIME_COUNTER_TYPE ulTotalTime = 0;
  6277. configRUN_TIME_COUNTER_TYPE ulStatsAsPercentage;
  6278. traceENTER_vTaskGetRunTimeStatistics( pcWriteBuffer, uxBufferLength );
  6279. /*
  6280. * PLEASE NOTE:
  6281. *
  6282. * This function is provided for convenience only, and is used by many
  6283. * of the demo applications. Do not consider it to be part of the
  6284. * scheduler.
  6285. *
  6286. * vTaskGetRunTimeStatistics() calls uxTaskGetSystemState(), then formats part
  6287. * of the uxTaskGetSystemState() output into a human readable table that
  6288. * displays the amount of time each task has spent in the Running state
  6289. * in both absolute and percentage terms.
  6290. *
  6291. * vTaskGetRunTimeStatistics() has a dependency on the snprintf() C library
  6292. * function that might bloat the code size, use a lot of stack, and
  6293. * provide different results on different platforms. An alternative,
  6294. * tiny, third party, and limited functionality implementation of
  6295. * snprintf() is provided in many of the FreeRTOS/Demo sub-directories in
  6296. * a file called printf-stdarg.c (note printf-stdarg.c does not provide
  6297. * a full snprintf() implementation!).
  6298. *
  6299. * It is recommended that production systems call uxTaskGetSystemState()
  6300. * directly to get access to raw stats data, rather than indirectly
  6301. * through a call to vTaskGetRunTimeStatistics().
  6302. */
  6303. /* Make sure the write buffer does not contain a string. */
  6304. *pcWriteBuffer = ( char ) 0x00;
  6305. /* Take a snapshot of the number of tasks in case it changes while this
  6306. * function is executing. */
  6307. uxArraySize = uxCurrentNumberOfTasks;
  6308. /* Allocate an array index for each task. NOTE! If
  6309. * configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will
  6310. * equate to NULL. */
  6311. /* MISRA Ref 11.5.1 [Malloc memory assignment] */
  6312. /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
  6313. /* coverity[misra_c_2012_rule_11_5_violation] */
  6314. pxTaskStatusArray = pvPortMalloc( uxCurrentNumberOfTasks * sizeof( TaskStatus_t ) );
  6315. if( pxTaskStatusArray != NULL )
  6316. {
  6317. /* Generate the (binary) data. */
  6318. uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, &ulTotalTime );
  6319. /* For percentage calculations. */
  6320. ulTotalTime /= ( ( configRUN_TIME_COUNTER_TYPE ) 100U );
  6321. /* Avoid divide by zero errors. */
  6322. if( ulTotalTime > 0U )
  6323. {
  6324. /* Create a human readable table from the binary data. */
  6325. for( x = 0; x < uxArraySize; x++ )
  6326. {
  6327. /* What percentage of the total run time has the task used?
  6328. * This will always be rounded down to the nearest integer.
  6329. * ulTotalRunTime has already been divided by 100. */
  6330. ulStatsAsPercentage = pxTaskStatusArray[ x ].ulRunTimeCounter / ulTotalTime;
  6331. /* Is there enough space in the buffer to hold task name? */
  6332. if( ( uxConsumedBufferLength + configMAX_TASK_NAME_LEN ) <= uxBufferLength )
  6333. {
  6334. /* Write the task name to the string, padding with
  6335. * spaces so it can be printed in tabular form more
  6336. * easily. */
  6337. pcWriteBuffer = prvWriteNameToBuffer( pcWriteBuffer, pxTaskStatusArray[ x ].pcTaskName );
  6338. /* Do not count the terminating null character. */
  6339. uxConsumedBufferLength = uxConsumedBufferLength + ( configMAX_TASK_NAME_LEN - 1U );
  6340. /* Is there space left in the buffer? -1 is done because snprintf
  6341. * writes a terminating null character. So we are essentially
  6342. * checking if the buffer has space to write at least one non-null
  6343. * character. */
  6344. if( uxConsumedBufferLength < ( uxBufferLength - 1U ) )
  6345. {
  6346. if( ulStatsAsPercentage > 0U )
  6347. {
  6348. #ifdef portLU_PRINTF_SPECIFIER_REQUIRED
  6349. {
  6350. /* MISRA Ref 21.6.1 [snprintf for utility] */
  6351. /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-216 */
  6352. /* coverity[misra_c_2012_rule_21_6_violation] */
  6353. iSnprintfReturnValue = snprintf( pcWriteBuffer,
  6354. uxBufferLength - uxConsumedBufferLength,
  6355. "\t%lu\t\t%lu%%\r\n",
  6356. pxTaskStatusArray[ x ].ulRunTimeCounter,
  6357. ulStatsAsPercentage );
  6358. }
  6359. #else /* ifdef portLU_PRINTF_SPECIFIER_REQUIRED */
  6360. {
  6361. /* sizeof( int ) == sizeof( long ) so a smaller
  6362. * printf() library can be used. */
  6363. /* MISRA Ref 21.6.1 [snprintf for utility] */
  6364. /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-216 */
  6365. /* coverity[misra_c_2012_rule_21_6_violation] */
  6366. iSnprintfReturnValue = snprintf( pcWriteBuffer,
  6367. uxBufferLength - uxConsumedBufferLength,
  6368. "\t%u\t\t%u%%\r\n",
  6369. ( unsigned int ) pxTaskStatusArray[ x ].ulRunTimeCounter,
  6370. ( unsigned int ) ulStatsAsPercentage );
  6371. }
  6372. #endif /* ifdef portLU_PRINTF_SPECIFIER_REQUIRED */
  6373. }
  6374. else
  6375. {
  6376. /* If the percentage is zero here then the task has
  6377. * consumed less than 1% of the total run time. */
  6378. #ifdef portLU_PRINTF_SPECIFIER_REQUIRED
  6379. {
  6380. /* MISRA Ref 21.6.1 [snprintf for utility] */
  6381. /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-216 */
  6382. /* coverity[misra_c_2012_rule_21_6_violation] */
  6383. iSnprintfReturnValue = snprintf( pcWriteBuffer,
  6384. uxBufferLength - uxConsumedBufferLength,
  6385. "\t%lu\t\t<1%%\r\n",
  6386. pxTaskStatusArray[ x ].ulRunTimeCounter );
  6387. }
  6388. #else
  6389. {
  6390. /* sizeof( int ) == sizeof( long ) so a smaller
  6391. * printf() library can be used. */
  6392. /* MISRA Ref 21.6.1 [snprintf for utility] */
  6393. /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-216 */
  6394. /* coverity[misra_c_2012_rule_21_6_violation] */
  6395. iSnprintfReturnValue = snprintf( pcWriteBuffer,
  6396. uxBufferLength - uxConsumedBufferLength,
  6397. "\t%u\t\t<1%%\r\n",
  6398. ( unsigned int ) pxTaskStatusArray[ x ].ulRunTimeCounter );
  6399. }
  6400. #endif /* ifdef portLU_PRINTF_SPECIFIER_REQUIRED */
  6401. }
  6402. uxCharsWrittenBySnprintf = prvSnprintfReturnValueToCharsWritten( iSnprintfReturnValue, uxBufferLength - uxConsumedBufferLength );
  6403. uxConsumedBufferLength += uxCharsWrittenBySnprintf;
  6404. pcWriteBuffer += uxCharsWrittenBySnprintf;
  6405. }
  6406. else
  6407. {
  6408. xOutputBufferFull = pdTRUE;
  6409. }
  6410. }
  6411. else
  6412. {
  6413. xOutputBufferFull = pdTRUE;
  6414. }
  6415. if( xOutputBufferFull == pdTRUE )
  6416. {
  6417. break;
  6418. }
  6419. }
  6420. }
  6421. else
  6422. {
  6423. mtCOVERAGE_TEST_MARKER();
  6424. }
  6425. /* Free the array again. NOTE! If configSUPPORT_DYNAMIC_ALLOCATION
  6426. * is 0 then vPortFree() will be #defined to nothing. */
  6427. vPortFree( pxTaskStatusArray );
  6428. }
  6429. else
  6430. {
  6431. mtCOVERAGE_TEST_MARKER();
  6432. }
  6433. traceRETURN_vTaskGetRunTimeStatistics();
  6434. }
  6435. #endif /* ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) ) */
  6436. /*-----------------------------------------------------------*/
  6437. TickType_t uxTaskResetEventItemValue( void )
  6438. {
  6439. TickType_t uxReturn;
  6440. traceENTER_uxTaskResetEventItemValue();
  6441. uxReturn = listGET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ) );
  6442. /* Reset the event list item to its normal value - so it can be used with
  6443. * queues and semaphores. */
  6444. listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB->uxPriority ) );
  6445. traceRETURN_uxTaskResetEventItemValue( uxReturn );
  6446. return uxReturn;
  6447. }
  6448. /*-----------------------------------------------------------*/
  6449. #if ( configUSE_MUTEXES == 1 )
  6450. TaskHandle_t pvTaskIncrementMutexHeldCount( void )
  6451. {
  6452. TCB_t * pxTCB;
  6453. traceENTER_pvTaskIncrementMutexHeldCount();
  6454. pxTCB = pxCurrentTCB;
  6455. /* If xSemaphoreCreateMutex() is called before any tasks have been created
  6456. * then pxCurrentTCB will be NULL. */
  6457. if( pxTCB != NULL )
  6458. {
  6459. ( pxTCB->uxMutexesHeld )++;
  6460. }
  6461. traceRETURN_pvTaskIncrementMutexHeldCount( pxTCB );
  6462. return pxTCB;
  6463. }
  6464. #endif /* configUSE_MUTEXES */
  6465. /*-----------------------------------------------------------*/
  6466. #if ( configUSE_TASK_NOTIFICATIONS == 1 )
  6467. uint32_t ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
  6468. BaseType_t xClearCountOnExit,
  6469. TickType_t xTicksToWait )
  6470. {
  6471. uint32_t ulReturn;
  6472. BaseType_t xAlreadyYielded, xShouldBlock = pdFALSE;
  6473. traceENTER_ulTaskGenericNotifyTake( uxIndexToWaitOn, xClearCountOnExit, xTicksToWait );
  6474. configASSERT( uxIndexToWaitOn < configTASK_NOTIFICATION_ARRAY_ENTRIES );
  6475. /* We suspend the scheduler here as prvAddCurrentTaskToDelayedList is a
  6476. * non-deterministic operation. */
  6477. vTaskSuspendAll();
  6478. {
  6479. /* We MUST enter a critical section to atomically check if a notification
  6480. * has occurred and set the flag to indicate that we are waiting for
  6481. * a notification. If we do not do so, a notification sent from an ISR
  6482. * will get lost. */
  6483. taskENTER_CRITICAL();
  6484. {
  6485. /* Only block if the notification count is not already non-zero. */
  6486. if( pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ] == 0U )
  6487. {
  6488. /* Mark this task as waiting for a notification. */
  6489. pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] = taskWAITING_NOTIFICATION;
  6490. if( xTicksToWait > ( TickType_t ) 0 )
  6491. {
  6492. xShouldBlock = pdTRUE;
  6493. }
  6494. else
  6495. {
  6496. mtCOVERAGE_TEST_MARKER();
  6497. }
  6498. }
  6499. else
  6500. {
  6501. mtCOVERAGE_TEST_MARKER();
  6502. }
  6503. }
  6504. taskEXIT_CRITICAL();
  6505. /* We are now out of the critical section but the scheduler is still
  6506. * suspended, so we are safe to do non-deterministic operations such
  6507. * as prvAddCurrentTaskToDelayedList. */
  6508. if( xShouldBlock == pdTRUE )
  6509. {
  6510. traceTASK_NOTIFY_TAKE_BLOCK( uxIndexToWaitOn );
  6511. prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
  6512. }
  6513. else
  6514. {
  6515. mtCOVERAGE_TEST_MARKER();
  6516. }
  6517. }
  6518. xAlreadyYielded = xTaskResumeAll();
  6519. /* Force a reschedule if xTaskResumeAll has not already done so. */
  6520. if( ( xShouldBlock == pdTRUE ) && ( xAlreadyYielded == pdFALSE ) )
  6521. {
  6522. taskYIELD_WITHIN_API();
  6523. }
  6524. else
  6525. {
  6526. mtCOVERAGE_TEST_MARKER();
  6527. }
  6528. taskENTER_CRITICAL();
  6529. {
  6530. traceTASK_NOTIFY_TAKE( uxIndexToWaitOn );
  6531. ulReturn = pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ];
  6532. if( ulReturn != 0U )
  6533. {
  6534. if( xClearCountOnExit != pdFALSE )
  6535. {
  6536. pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ] = ( uint32_t ) 0U;
  6537. }
  6538. else
  6539. {
  6540. pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ] = ulReturn - ( uint32_t ) 1;
  6541. }
  6542. }
  6543. else
  6544. {
  6545. mtCOVERAGE_TEST_MARKER();
  6546. }
  6547. pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] = taskNOT_WAITING_NOTIFICATION;
  6548. }
  6549. taskEXIT_CRITICAL();
  6550. traceRETURN_ulTaskGenericNotifyTake( ulReturn );
  6551. return ulReturn;
  6552. }
  6553. #endif /* configUSE_TASK_NOTIFICATIONS */
  6554. /*-----------------------------------------------------------*/
  6555. #if ( configUSE_TASK_NOTIFICATIONS == 1 )
  6556. BaseType_t xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
  6557. uint32_t ulBitsToClearOnEntry,
  6558. uint32_t ulBitsToClearOnExit,
  6559. uint32_t * pulNotificationValue,
  6560. TickType_t xTicksToWait )
  6561. {
  6562. BaseType_t xReturn, xAlreadyYielded, xShouldBlock = pdFALSE;
  6563. traceENTER_xTaskGenericNotifyWait( uxIndexToWaitOn, ulBitsToClearOnEntry, ulBitsToClearOnExit, pulNotificationValue, xTicksToWait );
  6564. configASSERT( uxIndexToWaitOn < configTASK_NOTIFICATION_ARRAY_ENTRIES );
  6565. /* We suspend the scheduler here as prvAddCurrentTaskToDelayedList is a
  6566. * non-deterministic operation. */
  6567. vTaskSuspendAll();
  6568. {
  6569. /* We MUST enter a critical section to atomically check and update the
  6570. * task notification value. If we do not do so, a notification from
  6571. * an ISR will get lost. */
  6572. taskENTER_CRITICAL();
  6573. {
  6574. /* Only block if a notification is not already pending. */
  6575. if( pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] != taskNOTIFICATION_RECEIVED )
  6576. {
  6577. /* Clear bits in the task's notification value as bits may get
  6578. * set by the notifying task or interrupt. This can be used
  6579. * to clear the value to zero. */
  6580. pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ] &= ~ulBitsToClearOnEntry;
  6581. /* Mark this task as waiting for a notification. */
  6582. pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] = taskWAITING_NOTIFICATION;
  6583. if( xTicksToWait > ( TickType_t ) 0 )
  6584. {
  6585. xShouldBlock = pdTRUE;
  6586. }
  6587. else
  6588. {
  6589. mtCOVERAGE_TEST_MARKER();
  6590. }
  6591. }
  6592. else
  6593. {
  6594. mtCOVERAGE_TEST_MARKER();
  6595. }
  6596. }
  6597. taskEXIT_CRITICAL();
  6598. /* We are now out of the critical section but the scheduler is still
  6599. * suspended, so we are safe to do non-deterministic operations such
  6600. * as prvAddCurrentTaskToDelayedList. */
  6601. if( xShouldBlock == pdTRUE )
  6602. {
  6603. traceTASK_NOTIFY_WAIT_BLOCK( uxIndexToWaitOn );
  6604. prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
  6605. }
  6606. else
  6607. {
  6608. mtCOVERAGE_TEST_MARKER();
  6609. }
  6610. }
  6611. xAlreadyYielded = xTaskResumeAll();
  6612. /* Force a reschedule if xTaskResumeAll has not already done so. */
  6613. if( ( xShouldBlock == pdTRUE ) && ( xAlreadyYielded == pdFALSE ) )
  6614. {
  6615. taskYIELD_WITHIN_API();
  6616. }
  6617. else
  6618. {
  6619. mtCOVERAGE_TEST_MARKER();
  6620. }
  6621. taskENTER_CRITICAL();
  6622. {
  6623. traceTASK_NOTIFY_WAIT( uxIndexToWaitOn );
  6624. if( pulNotificationValue != NULL )
  6625. {
  6626. /* Output the current notification value, which may or may not
  6627. * have changed. */
  6628. *pulNotificationValue = pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ];
  6629. }
  6630. /* If ucNotifyValue is set then either the task never entered the
  6631. * blocked state (because a notification was already pending) or the
  6632. * task unblocked because of a notification. Otherwise the task
  6633. * unblocked because of a timeout. */
  6634. if( pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] != taskNOTIFICATION_RECEIVED )
  6635. {
  6636. /* A notification was not received. */
  6637. xReturn = pdFALSE;
  6638. }
  6639. else
  6640. {
  6641. /* A notification was already pending or a notification was
  6642. * received while the task was waiting. */
  6643. pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ] &= ~ulBitsToClearOnExit;
  6644. xReturn = pdTRUE;
  6645. }
  6646. pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] = taskNOT_WAITING_NOTIFICATION;
  6647. }
  6648. taskEXIT_CRITICAL();
  6649. traceRETURN_xTaskGenericNotifyWait( xReturn );
  6650. return xReturn;
  6651. }
  6652. #endif /* configUSE_TASK_NOTIFICATIONS */
  6653. /*-----------------------------------------------------------*/
  6654. #if ( configUSE_TASK_NOTIFICATIONS == 1 )
  6655. BaseType_t xTaskGenericNotify( TaskHandle_t xTaskToNotify,
  6656. UBaseType_t uxIndexToNotify,
  6657. uint32_t ulValue,
  6658. eNotifyAction eAction,
  6659. uint32_t * pulPreviousNotificationValue )
  6660. {
  6661. TCB_t * pxTCB;
  6662. BaseType_t xReturn = pdPASS;
  6663. uint8_t ucOriginalNotifyState;
  6664. traceENTER_xTaskGenericNotify( xTaskToNotify, uxIndexToNotify, ulValue, eAction, pulPreviousNotificationValue );
  6665. configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES );
  6666. configASSERT( xTaskToNotify );
  6667. pxTCB = xTaskToNotify;
  6668. taskENTER_CRITICAL();
  6669. {
  6670. if( pulPreviousNotificationValue != NULL )
  6671. {
  6672. *pulPreviousNotificationValue = pxTCB->ulNotifiedValue[ uxIndexToNotify ];
  6673. }
  6674. ucOriginalNotifyState = pxTCB->ucNotifyState[ uxIndexToNotify ];
  6675. pxTCB->ucNotifyState[ uxIndexToNotify ] = taskNOTIFICATION_RECEIVED;
  6676. switch( eAction )
  6677. {
  6678. case eSetBits:
  6679. pxTCB->ulNotifiedValue[ uxIndexToNotify ] |= ulValue;
  6680. break;
  6681. case eIncrement:
  6682. ( pxTCB->ulNotifiedValue[ uxIndexToNotify ] )++;
  6683. break;
  6684. case eSetValueWithOverwrite:
  6685. pxTCB->ulNotifiedValue[ uxIndexToNotify ] = ulValue;
  6686. break;
  6687. case eSetValueWithoutOverwrite:
  6688. if( ucOriginalNotifyState != taskNOTIFICATION_RECEIVED )
  6689. {
  6690. pxTCB->ulNotifiedValue[ uxIndexToNotify ] = ulValue;
  6691. }
  6692. else
  6693. {
  6694. /* The value could not be written to the task. */
  6695. xReturn = pdFAIL;
  6696. }
  6697. break;
  6698. case eNoAction:
  6699. /* The task is being notified without its notify value being
  6700. * updated. */
  6701. break;
  6702. default:
  6703. /* Should not get here if all enums are handled.
  6704. * Artificially force an assert by testing a value the
  6705. * compiler can't assume is const. */
  6706. configASSERT( xTickCount == ( TickType_t ) 0 );
  6707. break;
  6708. }
  6709. traceTASK_NOTIFY( uxIndexToNotify );
  6710. /* If the task is in the blocked state specifically to wait for a
  6711. * notification then unblock it now. */
  6712. if( ucOriginalNotifyState == taskWAITING_NOTIFICATION )
  6713. {
  6714. listREMOVE_ITEM( &( pxTCB->xStateListItem ) );
  6715. prvAddTaskToReadyList( pxTCB );
  6716. /* The task should not have been on an event list. */
  6717. configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
  6718. #if ( configUSE_TICKLESS_IDLE != 0 )
  6719. {
  6720. /* If a task is blocked waiting for a notification then
  6721. * xNextTaskUnblockTime might be set to the blocked task's time
  6722. * out time. If the task is unblocked for a reason other than
  6723. * a timeout xNextTaskUnblockTime is normally left unchanged,
  6724. * because it will automatically get reset to a new value when
  6725. * the tick count equals xNextTaskUnblockTime. However if
  6726. * tickless idling is used it might be more important to enter
  6727. * sleep mode at the earliest possible time - so reset
  6728. * xNextTaskUnblockTime here to ensure it is updated at the
  6729. * earliest possible time. */
  6730. prvResetNextTaskUnblockTime();
  6731. }
  6732. #endif
  6733. /* Check if the notified task has a priority above the currently
  6734. * executing task. */
  6735. taskYIELD_ANY_CORE_IF_USING_PREEMPTION( pxTCB );
  6736. }
  6737. else
  6738. {
  6739. mtCOVERAGE_TEST_MARKER();
  6740. }
  6741. }
  6742. taskEXIT_CRITICAL();
  6743. traceRETURN_xTaskGenericNotify( xReturn );
  6744. return xReturn;
  6745. }
  6746. #endif /* configUSE_TASK_NOTIFICATIONS */
  6747. /*-----------------------------------------------------------*/
  6748. #if ( configUSE_TASK_NOTIFICATIONS == 1 )
  6749. BaseType_t xTaskGenericNotifyFromISR( TaskHandle_t xTaskToNotify,
  6750. UBaseType_t uxIndexToNotify,
  6751. uint32_t ulValue,
  6752. eNotifyAction eAction,
  6753. uint32_t * pulPreviousNotificationValue,
  6754. BaseType_t * pxHigherPriorityTaskWoken )
  6755. {
  6756. TCB_t * pxTCB;
  6757. uint8_t ucOriginalNotifyState;
  6758. BaseType_t xReturn = pdPASS;
  6759. UBaseType_t uxSavedInterruptStatus;
  6760. traceENTER_xTaskGenericNotifyFromISR( xTaskToNotify, uxIndexToNotify, ulValue, eAction, pulPreviousNotificationValue, pxHigherPriorityTaskWoken );
  6761. configASSERT( xTaskToNotify );
  6762. configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES );
  6763. /* RTOS ports that support interrupt nesting have the concept of a
  6764. * maximum system call (or maximum API call) interrupt priority.
  6765. * Interrupts that are above the maximum system call priority are keep
  6766. * permanently enabled, even when the RTOS kernel is in a critical section,
  6767. * but cannot make any calls to FreeRTOS API functions. If configASSERT()
  6768. * is defined in FreeRTOSConfig.h then
  6769. * portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
  6770. * failure if a FreeRTOS API function is called from an interrupt that has
  6771. * been assigned a priority above the configured maximum system call
  6772. * priority. Only FreeRTOS functions that end in FromISR can be called
  6773. * from interrupts that have been assigned a priority at or (logically)
  6774. * below the maximum system call interrupt priority. FreeRTOS maintains a
  6775. * separate interrupt safe API to ensure interrupt entry is as fast and as
  6776. * simple as possible. More information (albeit Cortex-M specific) is
  6777. * provided on the following link:
  6778. * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
  6779. portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
  6780. pxTCB = xTaskToNotify;
  6781. /* MISRA Ref 4.7.1 [Return value shall be checked] */
  6782. /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */
  6783. /* coverity[misra_c_2012_directive_4_7_violation] */
  6784. uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR();
  6785. {
  6786. if( pulPreviousNotificationValue != NULL )
  6787. {
  6788. *pulPreviousNotificationValue = pxTCB->ulNotifiedValue[ uxIndexToNotify ];
  6789. }
  6790. ucOriginalNotifyState = pxTCB->ucNotifyState[ uxIndexToNotify ];
  6791. pxTCB->ucNotifyState[ uxIndexToNotify ] = taskNOTIFICATION_RECEIVED;
  6792. switch( eAction )
  6793. {
  6794. case eSetBits:
  6795. pxTCB->ulNotifiedValue[ uxIndexToNotify ] |= ulValue;
  6796. break;
  6797. case eIncrement:
  6798. ( pxTCB->ulNotifiedValue[ uxIndexToNotify ] )++;
  6799. break;
  6800. case eSetValueWithOverwrite:
  6801. pxTCB->ulNotifiedValue[ uxIndexToNotify ] = ulValue;
  6802. break;
  6803. case eSetValueWithoutOverwrite:
  6804. if( ucOriginalNotifyState != taskNOTIFICATION_RECEIVED )
  6805. {
  6806. pxTCB->ulNotifiedValue[ uxIndexToNotify ] = ulValue;
  6807. }
  6808. else
  6809. {
  6810. /* The value could not be written to the task. */
  6811. xReturn = pdFAIL;
  6812. }
  6813. break;
  6814. case eNoAction:
  6815. /* The task is being notified without its notify value being
  6816. * updated. */
  6817. break;
  6818. default:
  6819. /* Should not get here if all enums are handled.
  6820. * Artificially force an assert by testing a value the
  6821. * compiler can't assume is const. */
  6822. configASSERT( xTickCount == ( TickType_t ) 0 );
  6823. break;
  6824. }
  6825. traceTASK_NOTIFY_FROM_ISR( uxIndexToNotify );
  6826. /* If the task is in the blocked state specifically to wait for a
  6827. * notification then unblock it now. */
  6828. if( ucOriginalNotifyState == taskWAITING_NOTIFICATION )
  6829. {
  6830. /* The task should not have been on an event list. */
  6831. configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
  6832. if( uxSchedulerSuspended == ( UBaseType_t ) 0U )
  6833. {
  6834. listREMOVE_ITEM( &( pxTCB->xStateListItem ) );
  6835. prvAddTaskToReadyList( pxTCB );
  6836. }
  6837. else
  6838. {
  6839. /* The delayed and ready lists cannot be accessed, so hold
  6840. * this task pending until the scheduler is resumed. */
  6841. listINSERT_END( &( xPendingReadyList ), &( pxTCB->xEventListItem ) );
  6842. }
  6843. #if ( configNUMBER_OF_CORES == 1 )
  6844. {
  6845. if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
  6846. {
  6847. /* The notified task has a priority above the currently
  6848. * executing task so a yield is required. */
  6849. if( pxHigherPriorityTaskWoken != NULL )
  6850. {
  6851. *pxHigherPriorityTaskWoken = pdTRUE;
  6852. }
  6853. /* Mark that a yield is pending in case the user is not
  6854. * using the "xHigherPriorityTaskWoken" parameter to an ISR
  6855. * safe FreeRTOS function. */
  6856. xYieldPendings[ 0 ] = pdTRUE;
  6857. }
  6858. else
  6859. {
  6860. mtCOVERAGE_TEST_MARKER();
  6861. }
  6862. }
  6863. #else /* #if ( configNUMBER_OF_CORES == 1 ) */
  6864. {
  6865. #if ( configUSE_PREEMPTION == 1 )
  6866. {
  6867. prvYieldForTask( pxTCB );
  6868. if( xYieldPendings[ portGET_CORE_ID() ] == pdTRUE )
  6869. {
  6870. if( pxHigherPriorityTaskWoken != NULL )
  6871. {
  6872. *pxHigherPriorityTaskWoken = pdTRUE;
  6873. }
  6874. }
  6875. }
  6876. #endif /* if ( configUSE_PREEMPTION == 1 ) */
  6877. }
  6878. #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
  6879. }
  6880. }
  6881. taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
  6882. traceRETURN_xTaskGenericNotifyFromISR( xReturn );
  6883. return xReturn;
  6884. }
  6885. #endif /* configUSE_TASK_NOTIFICATIONS */
  6886. /*-----------------------------------------------------------*/
  6887. #if ( configUSE_TASK_NOTIFICATIONS == 1 )
  6888. void vTaskGenericNotifyGiveFromISR( TaskHandle_t xTaskToNotify,
  6889. UBaseType_t uxIndexToNotify,
  6890. BaseType_t * pxHigherPriorityTaskWoken )
  6891. {
  6892. TCB_t * pxTCB;
  6893. uint8_t ucOriginalNotifyState;
  6894. UBaseType_t uxSavedInterruptStatus;
  6895. traceENTER_vTaskGenericNotifyGiveFromISR( xTaskToNotify, uxIndexToNotify, pxHigherPriorityTaskWoken );
  6896. configASSERT( xTaskToNotify );
  6897. configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES );
  6898. /* RTOS ports that support interrupt nesting have the concept of a
  6899. * maximum system call (or maximum API call) interrupt priority.
  6900. * Interrupts that are above the maximum system call priority are keep
  6901. * permanently enabled, even when the RTOS kernel is in a critical section,
  6902. * but cannot make any calls to FreeRTOS API functions. If configASSERT()
  6903. * is defined in FreeRTOSConfig.h then
  6904. * portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
  6905. * failure if a FreeRTOS API function is called from an interrupt that has
  6906. * been assigned a priority above the configured maximum system call
  6907. * priority. Only FreeRTOS functions that end in FromISR can be called
  6908. * from interrupts that have been assigned a priority at or (logically)
  6909. * below the maximum system call interrupt priority. FreeRTOS maintains a
  6910. * separate interrupt safe API to ensure interrupt entry is as fast and as
  6911. * simple as possible. More information (albeit Cortex-M specific) is
  6912. * provided on the following link:
  6913. * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
  6914. portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
  6915. pxTCB = xTaskToNotify;
  6916. /* MISRA Ref 4.7.1 [Return value shall be checked] */
  6917. /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */
  6918. /* coverity[misra_c_2012_directive_4_7_violation] */
  6919. uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR();
  6920. {
  6921. ucOriginalNotifyState = pxTCB->ucNotifyState[ uxIndexToNotify ];
  6922. pxTCB->ucNotifyState[ uxIndexToNotify ] = taskNOTIFICATION_RECEIVED;
  6923. /* 'Giving' is equivalent to incrementing a count in a counting
  6924. * semaphore. */
  6925. ( pxTCB->ulNotifiedValue[ uxIndexToNotify ] )++;
  6926. traceTASK_NOTIFY_GIVE_FROM_ISR( uxIndexToNotify );
  6927. /* If the task is in the blocked state specifically to wait for a
  6928. * notification then unblock it now. */
  6929. if( ucOriginalNotifyState == taskWAITING_NOTIFICATION )
  6930. {
  6931. /* The task should not have been on an event list. */
  6932. configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
  6933. if( uxSchedulerSuspended == ( UBaseType_t ) 0U )
  6934. {
  6935. listREMOVE_ITEM( &( pxTCB->xStateListItem ) );
  6936. prvAddTaskToReadyList( pxTCB );
  6937. }
  6938. else
  6939. {
  6940. /* The delayed and ready lists cannot be accessed, so hold
  6941. * this task pending until the scheduler is resumed. */
  6942. listINSERT_END( &( xPendingReadyList ), &( pxTCB->xEventListItem ) );
  6943. }
  6944. #if ( configNUMBER_OF_CORES == 1 )
  6945. {
  6946. if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
  6947. {
  6948. /* The notified task has a priority above the currently
  6949. * executing task so a yield is required. */
  6950. if( pxHigherPriorityTaskWoken != NULL )
  6951. {
  6952. *pxHigherPriorityTaskWoken = pdTRUE;
  6953. }
  6954. /* Mark that a yield is pending in case the user is not
  6955. * using the "xHigherPriorityTaskWoken" parameter in an ISR
  6956. * safe FreeRTOS function. */
  6957. xYieldPendings[ 0 ] = pdTRUE;
  6958. }
  6959. else
  6960. {
  6961. mtCOVERAGE_TEST_MARKER();
  6962. }
  6963. }
  6964. #else /* #if ( configNUMBER_OF_CORES == 1 ) */
  6965. {
  6966. #if ( configUSE_PREEMPTION == 1 )
  6967. {
  6968. prvYieldForTask( pxTCB );
  6969. if( xYieldPendings[ portGET_CORE_ID() ] == pdTRUE )
  6970. {
  6971. if( pxHigherPriorityTaskWoken != NULL )
  6972. {
  6973. *pxHigherPriorityTaskWoken = pdTRUE;
  6974. }
  6975. }
  6976. }
  6977. #endif /* #if ( configUSE_PREEMPTION == 1 ) */
  6978. }
  6979. #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
  6980. }
  6981. }
  6982. taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
  6983. traceRETURN_vTaskGenericNotifyGiveFromISR();
  6984. }
  6985. #endif /* configUSE_TASK_NOTIFICATIONS */
  6986. /*-----------------------------------------------------------*/
  6987. #if ( configUSE_TASK_NOTIFICATIONS == 1 )
  6988. BaseType_t xTaskGenericNotifyStateClear( TaskHandle_t xTask,
  6989. UBaseType_t uxIndexToClear )
  6990. {
  6991. TCB_t * pxTCB;
  6992. BaseType_t xReturn;
  6993. traceENTER_xTaskGenericNotifyStateClear( xTask, uxIndexToClear );
  6994. configASSERT( uxIndexToClear < configTASK_NOTIFICATION_ARRAY_ENTRIES );
  6995. /* If null is passed in here then it is the calling task that is having
  6996. * its notification state cleared. */
  6997. pxTCB = prvGetTCBFromHandle( xTask );
  6998. taskENTER_CRITICAL();
  6999. {
  7000. if( pxTCB->ucNotifyState[ uxIndexToClear ] == taskNOTIFICATION_RECEIVED )
  7001. {
  7002. pxTCB->ucNotifyState[ uxIndexToClear ] = taskNOT_WAITING_NOTIFICATION;
  7003. xReturn = pdPASS;
  7004. }
  7005. else
  7006. {
  7007. xReturn = pdFAIL;
  7008. }
  7009. }
  7010. taskEXIT_CRITICAL();
  7011. traceRETURN_xTaskGenericNotifyStateClear( xReturn );
  7012. return xReturn;
  7013. }
  7014. #endif /* configUSE_TASK_NOTIFICATIONS */
  7015. /*-----------------------------------------------------------*/
  7016. #if ( configUSE_TASK_NOTIFICATIONS == 1 )
  7017. uint32_t ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
  7018. UBaseType_t uxIndexToClear,
  7019. uint32_t ulBitsToClear )
  7020. {
  7021. TCB_t * pxTCB;
  7022. uint32_t ulReturn;
  7023. traceENTER_ulTaskGenericNotifyValueClear( xTask, uxIndexToClear, ulBitsToClear );
  7024. configASSERT( uxIndexToClear < configTASK_NOTIFICATION_ARRAY_ENTRIES );
  7025. /* If null is passed in here then it is the calling task that is having
  7026. * its notification state cleared. */
  7027. pxTCB = prvGetTCBFromHandle( xTask );
  7028. taskENTER_CRITICAL();
  7029. {
  7030. /* Return the notification as it was before the bits were cleared,
  7031. * then clear the bit mask. */
  7032. ulReturn = pxTCB->ulNotifiedValue[ uxIndexToClear ];
  7033. pxTCB->ulNotifiedValue[ uxIndexToClear ] &= ~ulBitsToClear;
  7034. }
  7035. taskEXIT_CRITICAL();
  7036. traceRETURN_ulTaskGenericNotifyValueClear( ulReturn );
  7037. return ulReturn;
  7038. }
  7039. #endif /* configUSE_TASK_NOTIFICATIONS */
  7040. /*-----------------------------------------------------------*/
  7041. #if ( configGENERATE_RUN_TIME_STATS == 1 )
  7042. configRUN_TIME_COUNTER_TYPE ulTaskGetRunTimeCounter( const TaskHandle_t xTask )
  7043. {
  7044. TCB_t * pxTCB;
  7045. traceENTER_ulTaskGetRunTimeCounter( xTask );
  7046. pxTCB = prvGetTCBFromHandle( xTask );
  7047. traceRETURN_ulTaskGetRunTimeCounter( pxTCB->ulRunTimeCounter );
  7048. return pxTCB->ulRunTimeCounter;
  7049. }
  7050. #endif /* if ( configGENERATE_RUN_TIME_STATS == 1 ) */
  7051. /*-----------------------------------------------------------*/
  7052. #if ( configGENERATE_RUN_TIME_STATS == 1 )
  7053. configRUN_TIME_COUNTER_TYPE ulTaskGetRunTimePercent( const TaskHandle_t xTask )
  7054. {
  7055. TCB_t * pxTCB;
  7056. configRUN_TIME_COUNTER_TYPE ulTotalTime, ulReturn;
  7057. traceENTER_ulTaskGetRunTimePercent( xTask );
  7058. ulTotalTime = ( configRUN_TIME_COUNTER_TYPE ) portGET_RUN_TIME_COUNTER_VALUE();
  7059. /* For percentage calculations. */
  7060. ulTotalTime /= ( configRUN_TIME_COUNTER_TYPE ) 100;
  7061. /* Avoid divide by zero errors. */
  7062. if( ulTotalTime > ( configRUN_TIME_COUNTER_TYPE ) 0 )
  7063. {
  7064. pxTCB = prvGetTCBFromHandle( xTask );
  7065. ulReturn = pxTCB->ulRunTimeCounter / ulTotalTime;
  7066. }
  7067. else
  7068. {
  7069. ulReturn = 0;
  7070. }
  7071. traceRETURN_ulTaskGetRunTimePercent( ulReturn );
  7072. return ulReturn;
  7073. }
  7074. #endif /* if ( configGENERATE_RUN_TIME_STATS == 1 ) */
  7075. /*-----------------------------------------------------------*/
  7076. #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
  7077. configRUN_TIME_COUNTER_TYPE ulTaskGetIdleRunTimeCounter( void )
  7078. {
  7079. configRUN_TIME_COUNTER_TYPE ulReturn = 0;
  7080. BaseType_t i;
  7081. traceENTER_ulTaskGetIdleRunTimeCounter();
  7082. for( i = 0; i < ( BaseType_t ) configNUMBER_OF_CORES; i++ )
  7083. {
  7084. ulReturn += xIdleTaskHandles[ i ]->ulRunTimeCounter;
  7085. }
  7086. traceRETURN_ulTaskGetIdleRunTimeCounter( ulReturn );
  7087. return ulReturn;
  7088. }
  7089. #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
  7090. /*-----------------------------------------------------------*/
  7091. #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
  7092. configRUN_TIME_COUNTER_TYPE ulTaskGetIdleRunTimePercent( void )
  7093. {
  7094. configRUN_TIME_COUNTER_TYPE ulTotalTime, ulReturn;
  7095. configRUN_TIME_COUNTER_TYPE ulRunTimeCounter = 0;
  7096. BaseType_t i;
  7097. traceENTER_ulTaskGetIdleRunTimePercent();
  7098. ulTotalTime = portGET_RUN_TIME_COUNTER_VALUE() * configNUMBER_OF_CORES;
  7099. /* For percentage calculations. */
  7100. ulTotalTime /= ( configRUN_TIME_COUNTER_TYPE ) 100;
  7101. /* Avoid divide by zero errors. */
  7102. if( ulTotalTime > ( configRUN_TIME_COUNTER_TYPE ) 0 )
  7103. {
  7104. for( i = 0; i < ( BaseType_t ) configNUMBER_OF_CORES; i++ )
  7105. {
  7106. ulRunTimeCounter += xIdleTaskHandles[ i ]->ulRunTimeCounter;
  7107. }
  7108. ulReturn = ulRunTimeCounter / ulTotalTime;
  7109. }
  7110. else
  7111. {
  7112. ulReturn = 0;
  7113. }
  7114. traceRETURN_ulTaskGetIdleRunTimePercent( ulReturn );
  7115. return ulReturn;
  7116. }
  7117. #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
  7118. /*-----------------------------------------------------------*/
  7119. static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait,
  7120. const BaseType_t xCanBlockIndefinitely )
  7121. {
  7122. TickType_t xTimeToWake;
  7123. const TickType_t xConstTickCount = xTickCount;
  7124. List_t * const pxDelayedList = pxDelayedTaskList;
  7125. List_t * const pxOverflowDelayedList = pxOverflowDelayedTaskList;
  7126. #if ( INCLUDE_xTaskAbortDelay == 1 )
  7127. {
  7128. /* About to enter a delayed list, so ensure the ucDelayAborted flag is
  7129. * reset to pdFALSE so it can be detected as having been set to pdTRUE
  7130. * when the task leaves the Blocked state. */
  7131. pxCurrentTCB->ucDelayAborted = ( uint8_t ) pdFALSE;
  7132. }
  7133. #endif
  7134. /* Remove the task from the ready list before adding it to the blocked list
  7135. * as the same list item is used for both lists. */
  7136. if( uxListRemove( &( pxCurrentTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
  7137. {
  7138. /* The current task must be in a ready list, so there is no need to
  7139. * check, and the port reset macro can be called directly. */
  7140. portRESET_READY_PRIORITY( pxCurrentTCB->uxPriority, uxTopReadyPriority );
  7141. }
  7142. else
  7143. {
  7144. mtCOVERAGE_TEST_MARKER();
  7145. }
  7146. #if ( INCLUDE_vTaskSuspend == 1 )
  7147. {
  7148. if( ( xTicksToWait == portMAX_DELAY ) && ( xCanBlockIndefinitely != pdFALSE ) )
  7149. {
  7150. /* Add the task to the suspended task list instead of a delayed task
  7151. * list to ensure it is not woken by a timing event. It will block
  7152. * indefinitely. */
  7153. listINSERT_END( &xSuspendedTaskList, &( pxCurrentTCB->xStateListItem ) );
  7154. }
  7155. else
  7156. {
  7157. /* Calculate the time at which the task should be woken if the event
  7158. * does not occur. This may overflow but this doesn't matter, the
  7159. * kernel will manage it correctly. */
  7160. xTimeToWake = xConstTickCount + xTicksToWait;
  7161. /* The list item will be inserted in wake time order. */
  7162. listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xStateListItem ), xTimeToWake );
  7163. if( xTimeToWake < xConstTickCount )
  7164. {
  7165. /* Wake time has overflowed. Place this item in the overflow
  7166. * list. */
  7167. traceMOVED_TASK_TO_OVERFLOW_DELAYED_LIST();
  7168. vListInsert( pxOverflowDelayedList, &( pxCurrentTCB->xStateListItem ) );
  7169. }
  7170. else
  7171. {
  7172. /* The wake time has not overflowed, so the current block list
  7173. * is used. */
  7174. traceMOVED_TASK_TO_DELAYED_LIST();
  7175. vListInsert( pxDelayedList, &( pxCurrentTCB->xStateListItem ) );
  7176. /* If the task entering the blocked state was placed at the
  7177. * head of the list of blocked tasks then xNextTaskUnblockTime
  7178. * needs to be updated too. */
  7179. if( xTimeToWake < xNextTaskUnblockTime )
  7180. {
  7181. xNextTaskUnblockTime = xTimeToWake;
  7182. }
  7183. else
  7184. {
  7185. mtCOVERAGE_TEST_MARKER();
  7186. }
  7187. }
  7188. }
  7189. }
  7190. #else /* INCLUDE_vTaskSuspend */
  7191. {
  7192. /* Calculate the time at which the task should be woken if the event
  7193. * does not occur. This may overflow but this doesn't matter, the kernel
  7194. * will manage it correctly. */
  7195. xTimeToWake = xConstTickCount + xTicksToWait;
  7196. /* The list item will be inserted in wake time order. */
  7197. listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xStateListItem ), xTimeToWake );
  7198. if( xTimeToWake < xConstTickCount )
  7199. {
  7200. traceMOVED_TASK_TO_OVERFLOW_DELAYED_LIST();
  7201. /* Wake time has overflowed. Place this item in the overflow list. */
  7202. vListInsert( pxOverflowDelayedList, &( pxCurrentTCB->xStateListItem ) );
  7203. }
  7204. else
  7205. {
  7206. traceMOVED_TASK_TO_DELAYED_LIST();
  7207. /* The wake time has not overflowed, so the current block list is used. */
  7208. vListInsert( pxDelayedList, &( pxCurrentTCB->xStateListItem ) );
  7209. /* If the task entering the blocked state was placed at the head of the
  7210. * list of blocked tasks then xNextTaskUnblockTime needs to be updated
  7211. * too. */
  7212. if( xTimeToWake < xNextTaskUnblockTime )
  7213. {
  7214. xNextTaskUnblockTime = xTimeToWake;
  7215. }
  7216. else
  7217. {
  7218. mtCOVERAGE_TEST_MARKER();
  7219. }
  7220. }
  7221. /* Avoid compiler warning when INCLUDE_vTaskSuspend is not 1. */
  7222. ( void ) xCanBlockIndefinitely;
  7223. }
  7224. #endif /* INCLUDE_vTaskSuspend */
  7225. }
  7226. /*-----------------------------------------------------------*/
  7227. #if ( portUSING_MPU_WRAPPERS == 1 )
  7228. xMPU_SETTINGS * xTaskGetMPUSettings( TaskHandle_t xTask )
  7229. {
  7230. TCB_t * pxTCB;
  7231. traceENTER_xTaskGetMPUSettings( xTask );
  7232. pxTCB = prvGetTCBFromHandle( xTask );
  7233. traceRETURN_xTaskGetMPUSettings( &( pxTCB->xMPUSettings ) );
  7234. return &( pxTCB->xMPUSettings );
  7235. }
  7236. #endif /* portUSING_MPU_WRAPPERS */
  7237. /*-----------------------------------------------------------*/
  7238. /* Code below here allows additional code to be inserted into this source file,
  7239. * especially where access to file scope functions and data is needed (for example
  7240. * when performing module tests). */
  7241. #ifdef FREERTOS_MODULE_TEST
  7242. #include "tasks_test_access_functions.h"
  7243. #endif
  7244. #if ( configINCLUDE_FREERTOS_TASK_C_ADDITIONS_H == 1 )
  7245. #include "freertos_tasks_c_additions.h"
  7246. #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT
  7247. static void freertos_tasks_c_additions_init( void )
  7248. {
  7249. FREERTOS_TASKS_C_ADDITIONS_INIT();
  7250. }
  7251. #endif
  7252. #endif /* if ( configINCLUDE_FREERTOS_TASK_C_ADDITIONS_H == 1 ) */
  7253. /*-----------------------------------------------------------*/
  7254. #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configKERNEL_PROVIDED_STATIC_MEMORY == 1 ) && ( portUSING_MPU_WRAPPERS == 0 ) )
  7255. /*
  7256. * This is the kernel provided implementation of vApplicationGetIdleTaskMemory()
  7257. * to provide the memory that is used by the Idle task. It is used when
  7258. * configKERNEL_PROVIDED_STATIC_MEMORY is set to 1. The application can provide
  7259. * it's own implementation of vApplicationGetIdleTaskMemory by setting
  7260. * configKERNEL_PROVIDED_STATIC_MEMORY to 0 or leaving it undefined.
  7261. */
  7262. void vApplicationGetIdleTaskMemory( StaticTask_t ** ppxIdleTaskTCBBuffer,
  7263. StackType_t ** ppxIdleTaskStackBuffer,
  7264. configSTACK_DEPTH_TYPE * puxIdleTaskStackSize )
  7265. {
  7266. static StaticTask_t xIdleTaskTCB;
  7267. static StackType_t uxIdleTaskStack[ configMINIMAL_STACK_SIZE ];
  7268. *ppxIdleTaskTCBBuffer = &( xIdleTaskTCB );
  7269. *ppxIdleTaskStackBuffer = &( uxIdleTaskStack[ 0 ] );
  7270. *puxIdleTaskStackSize = configMINIMAL_STACK_SIZE;
  7271. }
  7272. #if ( configNUMBER_OF_CORES > 1 )
  7273. void vApplicationGetPassiveIdleTaskMemory( StaticTask_t ** ppxIdleTaskTCBBuffer,
  7274. StackType_t ** ppxIdleTaskStackBuffer,
  7275. configSTACK_DEPTH_TYPE * puxIdleTaskStackSize,
  7276. BaseType_t xPassiveIdleTaskIndex )
  7277. {
  7278. static StaticTask_t xIdleTaskTCBs[ configNUMBER_OF_CORES - 1 ];
  7279. static StackType_t uxIdleTaskStacks[ configNUMBER_OF_CORES - 1 ][ configMINIMAL_STACK_SIZE ];
  7280. *ppxIdleTaskTCBBuffer = &( xIdleTaskTCBs[ xPassiveIdleTaskIndex ] );
  7281. *ppxIdleTaskStackBuffer = &( uxIdleTaskStacks[ xPassiveIdleTaskIndex ][ 0 ] );
  7282. *puxIdleTaskStackSize = configMINIMAL_STACK_SIZE;
  7283. }
  7284. #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
  7285. #endif /* #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configKERNEL_PROVIDED_STATIC_MEMORY == 1 ) && ( portUSING_MPU_WRAPPERS == 0 ) ) */
  7286. /*-----------------------------------------------------------*/
  7287. #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configKERNEL_PROVIDED_STATIC_MEMORY == 1 ) && ( portUSING_MPU_WRAPPERS == 0 ) )
  7288. /*
  7289. * This is the kernel provided implementation of vApplicationGetTimerTaskMemory()
  7290. * to provide the memory that is used by the Timer service task. It is used when
  7291. * configKERNEL_PROVIDED_STATIC_MEMORY is set to 1. The application can provide
  7292. * it's own implementation of vApplicationGetTimerTaskMemory by setting
  7293. * configKERNEL_PROVIDED_STATIC_MEMORY to 0 or leaving it undefined.
  7294. */
  7295. void vApplicationGetTimerTaskMemory( StaticTask_t ** ppxTimerTaskTCBBuffer,
  7296. StackType_t ** ppxTimerTaskStackBuffer,
  7297. configSTACK_DEPTH_TYPE * puxTimerTaskStackSize )
  7298. {
  7299. static StaticTask_t xTimerTaskTCB;
  7300. static StackType_t uxTimerTaskStack[ configTIMER_TASK_STACK_DEPTH ];
  7301. *ppxTimerTaskTCBBuffer = &( xTimerTaskTCB );
  7302. *ppxTimerTaskStackBuffer = &( uxTimerTaskStack[ 0 ] );
  7303. *puxTimerTaskStackSize = configTIMER_TASK_STACK_DEPTH;
  7304. }
  7305. #endif /* #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configKERNEL_PROVIDED_STATIC_MEMORY == 1 ) && ( portUSING_MPU_WRAPPERS == 0 ) ) */
  7306. /*-----------------------------------------------------------*/
  7307. /*
  7308. * Reset the state in this file. This state is normally initialized at start up.
  7309. * This function must be called by the application before restarting the
  7310. * scheduler.
  7311. */
  7312. void vTaskResetState( void )
  7313. {
  7314. BaseType_t xCoreID;
  7315. /* Task control block. */
  7316. #if ( configNUMBER_OF_CORES == 1 )
  7317. {
  7318. pxCurrentTCB = NULL;
  7319. }
  7320. #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
  7321. #if ( INCLUDE_vTaskDelete == 1 )
  7322. {
  7323. uxDeletedTasksWaitingCleanUp = ( UBaseType_t ) 0U;
  7324. }
  7325. #endif /* #if ( INCLUDE_vTaskDelete == 1 ) */
  7326. #if ( configUSE_POSIX_ERRNO == 1 )
  7327. {
  7328. FreeRTOS_errno = 0;
  7329. }
  7330. #endif /* #if ( configUSE_POSIX_ERRNO == 1 ) */
  7331. /* Other file private variables. */
  7332. uxCurrentNumberOfTasks = ( UBaseType_t ) 0U;
  7333. xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT;
  7334. uxTopReadyPriority = tskIDLE_PRIORITY;
  7335. xSchedulerRunning = pdFALSE;
  7336. xPendedTicks = ( TickType_t ) 0U;
  7337. for( xCoreID = 0; xCoreID < configNUMBER_OF_CORES; xCoreID++ )
  7338. {
  7339. xYieldPendings[ xCoreID ] = pdFALSE;
  7340. }
  7341. xNumOfOverflows = ( BaseType_t ) 0;
  7342. uxTaskNumber = ( UBaseType_t ) 0U;
  7343. xNextTaskUnblockTime = ( TickType_t ) 0U;
  7344. uxSchedulerSuspended = ( UBaseType_t ) 0U;
  7345. #if ( configGENERATE_RUN_TIME_STATS == 1 )
  7346. {
  7347. for( xCoreID = 0; xCoreID < configNUMBER_OF_CORES; xCoreID++ )
  7348. {
  7349. ulTaskSwitchedInTime[ xCoreID ] = 0U;
  7350. ulTotalRunTime[ xCoreID ] = 0U;
  7351. }
  7352. }
  7353. #endif /* #if ( configGENERATE_RUN_TIME_STATS == 1 ) */
  7354. }
  7355. /*-----------------------------------------------------------*/