tasks.c 262 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364
  1. /*
  2. * FreeRTOS Kernel V10.5.1 (ESP-IDF SMP modified)
  3. * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
  4. *
  5. * SPDX-FileCopyrightText: 2021 Amazon.com, Inc. or its affiliates
  6. *
  7. * SPDX-License-Identifier: MIT
  8. *
  9. * SPDX-FileContributor: 2023 Espressif Systems (Shanghai) CO LTD
  10. *
  11. * Permission is hereby granted, free of charge, to any person obtaining a copy of
  12. * this software and associated documentation files (the "Software"), to deal in
  13. * the Software without restriction, including without limitation the rights to
  14. * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
  15. * the Software, and to permit persons to whom the Software is furnished to do so,
  16. * subject to the following conditions:
  17. *
  18. * The above copyright notice and this permission notice shall be included in all
  19. * copies or substantial portions of the Software.
  20. *
  21. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  22. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
  23. * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
  24. * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
  25. * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  26. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  27. *
  28. * https://www.FreeRTOS.org
  29. * https://github.com/FreeRTOS
  30. *
  31. */
  32. /* Standard includes. */
  33. #include <stdlib.h>
  34. #include <string.h>
  35. /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
  36. * all the API functions to use the MPU wrappers. That should only be done when
  37. * task.h is included from an application file. */
  38. #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
  39. /* FreeRTOS includes. */
  40. #include "FreeRTOS.h"
  41. #include "task.h"
  42. #include "timers.h"
  43. #include "stack_macros.h"
  44. /* Include private IDF API additions for critical thread safety macros */
  45. #include "esp_private/freertos_idf_additions_priv.h"
  46. #include "freertos/idf_additions.h"
  47. /* Lint e9021, e961 and e750 are suppressed as a MISRA exception justified
  48. * because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined
  49. * for the header files above, but not in this file, in order to generate the
  50. * correct privileged Vs unprivileged linkage and placement. */
  51. #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */
  52. /* Set configUSE_STATS_FORMATTING_FUNCTIONS to 2 to include the stats formatting
  53. * functions but without including stdio.h here. */
  54. #if ( configUSE_STATS_FORMATTING_FUNCTIONS == 1 )
  55. /* At the bottom of this file are two optional functions that can be used
  56. * to generate human readable text from the raw data generated by the
  57. * uxTaskGetSystemState() function. Note the formatting functions are provided
  58. * for convenience only, and are NOT considered part of the kernel. */
  59. #include <stdio.h>
  60. #endif /* configUSE_STATS_FORMATTING_FUNCTIONS == 1 ) */
  61. /* Some code sections require extra critical sections when building for SMP
  62. * ( configNUMBER_OF_CORES > 1 ). */
  63. #if ( configNUMBER_OF_CORES > 1 )
  64. /* Macros that enter/exit a critical section only when building for SMP */
  65. #define taskENTER_CRITICAL_SMP_ONLY( pxLock ) taskENTER_CRITICAL( pxLock )
  66. #define taskEXIT_CRITICAL_SMP_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock )
  67. #define taskENTER_CRITICAL_ISR_SMP_ONLY( pxLock ) taskENTER_CRITICAL_ISR( pxLock )
  68. #define taskEXIT_CRITICAL_ISR_SMP_ONLY( pxLock ) taskEXIT_CRITICAL_ISR( pxLock )
  69. #define taskENTER_CRITICAL_SAFE_SMP_ONLY( pxLock ) prvTaskEnterCriticalSafeSMPOnly( pxLock )
  70. #define taskEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock ) prvTaskExitCriticalSafeSMPOnly( pxLock )
  71. /* Macros that enter/exit a critical section only when building for single-core */
  72. #define taskENTER_CRITICAL_SC_ONLY( pxLock ) taskENTER_CRITICAL( pxLock )
  73. #define taskEXIT_CRITICAL_SC_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock )
  74. /* Macros that enable/disable interrupts only when building for SMP */
  75. #define taskDISABLE_INTERRUPTS_ISR_SMP_ONLY() portSET_INTERRUPT_MASK_FROM_ISR()
  76. #define taskEnable_INTERRUPTS_ISR_SMP_ONLY( uxStatus ) portCLEAR_INTERRUPT_MASK_FROM_ISR( uxStatus )
  77. static inline __attribute__( ( always_inline ) )
  78. void prvTaskEnterCriticalSafeSMPOnly( portMUX_TYPE * pxLock )
  79. {
  80. if( portCHECK_IF_IN_ISR() == pdFALSE )
  81. {
  82. taskENTER_CRITICAL( pxLock );
  83. }
  84. else
  85. {
  86. #ifdef __clang_analyzer__
  87. /* Teach clang-tidy that ISR version macro can be different */
  88. configASSERT( 1 );
  89. #endif
  90. taskENTER_CRITICAL_ISR( pxLock );
  91. }
  92. }
  93. static inline __attribute__( ( always_inline ) )
  94. void prvTaskExitCriticalSafeSMPOnly( portMUX_TYPE * pxLock )
  95. {
  96. if( portCHECK_IF_IN_ISR() == pdFALSE )
  97. {
  98. taskEXIT_CRITICAL( pxLock );
  99. }
  100. else
  101. {
  102. #ifdef __clang_analyzer__
  103. /* Teach clang-tidy that ISR version macro can be different */
  104. configASSERT( 1 );
  105. #endif
  106. taskEXIT_CRITICAL_ISR( pxLock );
  107. }
  108. }
  109. #else /* configNUMBER_OF_CORES > 1 */
  110. /* Macros that enter/exit a critical section only when building for SMP */
  111. #define taskENTER_CRITICAL_SMP_ONLY( pxLock )
  112. #define taskEXIT_CRITICAL_SMP_ONLY( pxLock )
  113. #define taskENTER_CRITICAL_ISR_SMP_ONLY( pxLock )
  114. #define taskEXIT_CRITICAL_ISR_SMP_ONLY( pxLock )
  115. #define taskENTER_CRITICAL_SAFE_SMP_ONLY( pxLock )
  116. #define taskEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock )
  117. /* Macros that enter/exit a critical section only when building for single-core */
  118. #define taskENTER_CRITICAL_SC_ONLY( pxLock ) taskENTER_CRITICAL( pxLock )
  119. #define taskEXIT_CRITICAL_SC_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock )
  120. /* Macros that enable/disable interrupts only when building for SMP */
  121. #define taskDISABLE_INTERRUPTS_ISR_SMP_ONLY() ( ( UBaseType_t ) 0 )
  122. #define taskEnable_INTERRUPTS_ISR_SMP_ONLY( uxStatus ) ( ( void ) uxStatus )
  123. #endif /* configNUMBER_OF_CORES > 1 */
  124. #if ( configUSE_PREEMPTION == 0 )
  125. /* If the cooperative scheduler is being used then a yield should not be
  126. * performed just because a higher priority task has been woken. */
  127. #define taskYIELD_IF_USING_PREEMPTION()
  128. #else
  129. #define taskYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()
  130. #endif
  131. #if ( configNUMBER_OF_CORES > 1 )
  132. #define taskYIELD_CORE( xCoreID ) portYIELD_CORE( xCoreID )
  133. #endif /* configNUMBER_OF_CORES > 1 */
  134. /* Values that can be assigned to the ucNotifyState member of the TCB. */
  135. #define taskNOT_WAITING_NOTIFICATION ( ( uint8_t ) 0 ) /* Must be zero as it is the initialised value. */
  136. #define taskWAITING_NOTIFICATION ( ( uint8_t ) 1 )
  137. #define taskNOTIFICATION_RECEIVED ( ( uint8_t ) 2 )
  138. /*
  139. * The value used to fill the stack of a task when the task is created. This
  140. * is used purely for checking the high water mark for tasks.
  141. */
  142. #define tskSTACK_FILL_BYTE ( 0xa5U )
  143. /* Bits used to record how a task's stack and TCB were allocated. */
  144. #define tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB ( ( uint8_t ) 0 )
  145. #define tskSTATICALLY_ALLOCATED_STACK_ONLY ( ( uint8_t ) 1 )
  146. #define tskSTATICALLY_ALLOCATED_STACK_AND_TCB ( ( uint8_t ) 2 )
  147. /* If any of the following are set then task stacks are filled with a known
  148. * value so the high water mark can be determined. If none of the following are
  149. * set then don't fill the stack so there is no unnecessary dependency on memset. */
  150. #if ( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) )
  151. #define tskSET_NEW_STACKS_TO_KNOWN_VALUE 1
  152. #else
  153. #define tskSET_NEW_STACKS_TO_KNOWN_VALUE 0
  154. #endif
  155. /*
  156. * Macros used by vListTask to indicate which state a task is in.
  157. */
  158. #define tskRUNNING_CHAR ( 'X' )
  159. #define tskBLOCKED_CHAR ( 'B' )
  160. #define tskREADY_CHAR ( 'R' )
  161. #define tskDELETED_CHAR ( 'D' )
  162. #define tskSUSPENDED_CHAR ( 'S' )
  163. /*
  164. * Some kernel aware debuggers require the data the debugger needs access to to
  165. * be global, rather than file scope.
  166. */
  167. #ifdef portREMOVE_STATIC_QUALIFIER
  168. #define static
  169. #endif
  170. /* The name allocated to the Idle task. This can be overridden by defining
  171. * configIDLE_TASK_NAME in FreeRTOSConfig.h. */
  172. #ifndef configIDLE_TASK_NAME
  173. #define configIDLE_TASK_NAME "IDLE"
  174. #endif
  175. /*-----------------------------------------------------------*/
  176. /* Macros to check if an unblocked task causes a yield on the current core.
  177. * - pxTCB is the TCB of the task to check
  178. * - xCurCoreID is the current core's ID
  179. * - xYieldEqualPriority indicates whether a yield should occur if the unblocked
  180. * task's priority is equal to the priority of the task currently running on the
  181. * current core.
  182. * - uxTaskPriority is the task's priority
  183. * - xTaskCoreID is the task's core affinity */
  184. #if ( configNUMBER_OF_CORES > 1 )
  185. #define taskIS_YIELD_REQUIRED( pxTCB, xCurCoreID, xYieldEqualPriority ) prvIsYieldUsingPrioritySMP( ( pxTCB )->uxPriority, ( pxTCB )->xCoreID, xCurCoreID, xYieldEqualPriority )
  186. #define taskIS_YIELD_REQUIRED_USING_PRIORITY( uxTaskPriority, xTaskCoreID, xCurCoreID, xYieldEqualPriority ) prvIsYieldUsingPrioritySMP( uxTaskPriority, xTaskCoreID, xCurCoreID, xYieldEqualPriority )
  187. #else
  188. #define taskIS_YIELD_REQUIRED( pxTCB, xCurCoreID, xYieldEqualPriority ) \
  189. ( { \
  190. /* xCurCoreID is unused */ \
  191. ( void ) xCurCoreID; \
  192. ( ( ( pxTCB )->uxPriority + ( ( xYieldEqualPriority == pdTRUE ) ? 1 : 0 ) ) > pxCurrentTCBs[ 0 ]->uxPriority ) ? pdTRUE : pdFALSE; \
  193. } )
  194. #define taskIS_YIELD_REQUIRED_USING_PRIORITY( uxTaskPriority, xTaskCoreID, xCurCoreID, xYieldEqualPriority ) \
  195. ( { \
  196. /* xTaskCoreID and xCurCoreID are unused */ \
  197. ( void ) xTaskCoreID; \
  198. ( void ) xCurCoreID; \
  199. ( ( uxTaskPriority + ( ( xYieldEqualPriority == pdTRUE ) ? 1 : 0 ) ) >= pxCurrentTCBs[ 0 ]->uxPriority ) ? pdTRUE : pdFALSE; \
  200. } )
  201. #endif /* configNUMBER_OF_CORES > 1 */
  202. /*-----------------------------------------------------------*/
  203. /* Macros to check if a task has a compatible affinity with a particular core.
  204. * - xCore is the target core
  205. * - xCoreID is the affinity of the task to check
  206. *
  207. * This macro will always return true on single core as the concept of core
  208. * affinity doesn't exist. */
  209. #if ( configNUMBER_OF_CORES > 1 )
  210. #define taskIS_AFFINITY_COMPATIBLE( xCore, xCoreID ) ( ( ( ( xCoreID ) == xCore ) || ( ( xCoreID ) == tskNO_AFFINITY ) ) ? pdTRUE : pdFALSE )
  211. #else
  212. #define taskIS_AFFINITY_COMPATIBLE( xCore, xCoreID ) \
  213. ( { \
  214. /* xCoreID is unused */ \
  215. ( void ) xCoreID; \
  216. pdTRUE; \
  217. } )
  218. #endif /* configNUMBER_OF_CORES > 1 */
  219. /*-----------------------------------------------------------*/
  220. /* Macros to check if a particular task is a currently running. */
  221. #if ( configNUMBER_OF_CORES > 1 )
  222. #define taskIS_CURRENTLY_RUNNING( pxTCB ) ( ( ( ( pxTCB ) == pxCurrentTCBs[ 0 ] ) || ( ( pxTCB ) == pxCurrentTCBs[ 1 ] ) ) ? pdTRUE : pdFALSE )
  223. #define taskIS_CURRENTLY_RUNNING_ON_CORE( pxTCB, xCoreID ) ( ( ( pxTCB ) == pxCurrentTCBs[ ( xCoreID ) ] ) ? pdTRUE : pdFALSE )
  224. #else
  225. #define taskIS_CURRENTLY_RUNNING( pxTCB ) ( ( ( pxTCB ) == pxCurrentTCBs[ 0 ] ) ? pdTRUE : pdFALSE )
  226. #define taskIS_CURRENTLY_RUNNING_ON_CORE( pxTCB, xCoreID ) \
  227. ( { \
  228. /* xCoreID is unused */ \
  229. ( void ) xCoreID; \
  230. taskIS_CURRENTLY_RUNNING( pxTCB ); \
  231. } )
  232. #endif /* configNUMBER_OF_CORES > 1 */
  233. /*-----------------------------------------------------------*/
  234. /* Macro to check if a particular task can currently be scheduled (i.e., is
  235. * the scheduler suspended). */
  236. #if ( configNUMBER_OF_CORES > 1 )
  237. #define taskCAN_BE_SCHEDULED( pxTCB ) prvCheckTaskCanBeScheduledSMP( pxTCB )
  238. #else
  239. #define taskCAN_BE_SCHEDULED( pxTCB ) \
  240. ( { \
  241. /* pxTCB is unused */ \
  242. ( void ) pxTCB; \
  243. ( ( uxSchedulerSuspended[ 0 ] == ( UBaseType_t ) 0U ) ) ? pdTRUE : pdFALSE; \
  244. } )
  245. #endif /* configNUMBER_OF_CORES > 1 */
  246. /*-----------------------------------------------------------*/
  247. /* Macro to check if the scheduler is suspended (on the current core)
  248. *
  249. * There are various blocking tasks.c APIs that call configASSERT() to check if
  250. * the API is being called while the scheduler is suspended. However, these
  251. * asserts are done outside a critical section or interrupt disabled block.
  252. * Directly checking uxSchedulerSuspended[ portGET_CORE_ID() ] outside a
  253. * critical section can lead to false positives in SMP. Thus for SMP, we call
  254. * xTaskGetSchedulerState() instead.
  255. *
  256. * Take the following example of an unpinned Task A in SMP calling
  257. * uxSchedulerSuspended[ portGET_CORE_ID() ]:
  258. * - Task A calls portGET_CORE_ID() which is 0
  259. * - Task A gets preempted by Task B, Task A switches to core 1
  260. * - Task B on core 0 calls vTaskSuspendAll()
  261. * - Task A checks uxSchedulerSuspended[ 0 ] leading to a false positive
  262. */
  263. #if ( configNUMBER_OF_CORES > 1 )
  264. #define taskIS_SCHEDULER_SUSPENDED() ( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) ? pdTRUE : pdFALSE )
  265. #else
  266. #define taskIS_SCHEDULER_SUSPENDED() ( ( ( uxSchedulerSuspended[ 0 ] != ( UBaseType_t ) 0U ) ) ? pdTRUE : pdFALSE )
  267. #endif /* configNUMBER_OF_CORES > 1 */
  268. /*-----------------------------------------------------------*/
  269. #if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 )
  270. /* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 0 then task selection is
  271. * performed in a generic way that is not optimised to any particular
  272. * microcontroller architecture. */
  273. /* uxTopReadyPriority holds the priority of the highest priority ready
  274. * state task. */
  275. #define taskRECORD_READY_PRIORITY( uxPriority ) \
  276. { \
  277. if( ( uxPriority ) > uxTopReadyPriority ) \
  278. { \
  279. uxTopReadyPriority = ( uxPriority ); \
  280. } \
  281. } /* taskRECORD_READY_PRIORITY */
  282. /*-----------------------------------------------------------*/
  283. #if ( configNUMBER_OF_CORES > 1 )
  284. #define taskSELECT_HIGHEST_PRIORITY_TASK() prvSelectHighestPriorityTaskSMP()
  285. #else /* if ( configNUMBER_OF_CORES > 1 ) */
  286. #define taskSELECT_HIGHEST_PRIORITY_TASK() \
  287. { \
  288. UBaseType_t uxTopPriority = uxTopReadyPriority; \
  289. \
  290. /* Find the highest priority queue that contains ready tasks. */ \
  291. while( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxTopPriority ] ) ) ) \
  292. { \
  293. configASSERT( uxTopPriority ); \
  294. --uxTopPriority; \
  295. } \
  296. \
  297. /* listGET_OWNER_OF_NEXT_ENTRY indexes through the list, so the tasks of \
  298. * the same priority get an equal share of the processor time. */ \
  299. listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCBs[ 0 ], &( pxReadyTasksLists[ uxTopPriority ] ) ); \
  300. uxTopReadyPriority = uxTopPriority; \
  301. } /* taskSELECT_HIGHEST_PRIORITY_TASK */
  302. #endif /* if ( configNUMBER_OF_CORES > 1 ) */
  303. /*-----------------------------------------------------------*/
  304. /* Define away taskRESET_READY_PRIORITY() and portRESET_READY_PRIORITY() as
  305. * they are only required when a port optimised method of task selection is
  306. * being used. */
  307. #define taskRESET_READY_PRIORITY( uxPriority )
  308. #define portRESET_READY_PRIORITY( uxPriority, uxTopReadyPriority )
  309. #else /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
  310. /* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 1 then task selection is
  311. * performed in a way that is tailored to the particular microcontroller
  312. * architecture being used. */
  313. /* A port optimised version is provided. Call the port defined macros. */
  314. #define taskRECORD_READY_PRIORITY( uxPriority ) portRECORD_READY_PRIORITY( ( uxPriority ), uxTopReadyPriority )
  315. /*-----------------------------------------------------------*/
  316. #define taskSELECT_HIGHEST_PRIORITY_TASK() \
  317. { \
  318. UBaseType_t uxTopPriority; \
  319. \
  320. /* Find the highest priority list that contains ready tasks. */ \
  321. portGET_HIGHEST_PRIORITY( uxTopPriority, uxTopReadyPriority ); \
  322. configASSERT( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ uxTopPriority ] ) ) > 0 ); \
  323. listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCBs[ 0 ], &( pxReadyTasksLists[ uxTopPriority ] ) ); \
  324. } /* taskSELECT_HIGHEST_PRIORITY_TASK() */
  325. /*-----------------------------------------------------------*/
  326. /* A port optimised version is provided, call it only if the TCB being reset
  327. * is being referenced from a ready list. If it is referenced from a delayed
  328. * or suspended list then it won't be in a ready list. */
  329. #define taskRESET_READY_PRIORITY( uxPriority ) \
  330. { \
  331. if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ ( uxPriority ) ] ) ) == ( UBaseType_t ) 0 ) \
  332. { \
  333. portRESET_READY_PRIORITY( ( uxPriority ), ( uxTopReadyPriority ) ); \
  334. } \
  335. }
  336. #endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
  337. /*-----------------------------------------------------------*/
  338. /* pxDelayedTaskList and pxOverflowDelayedTaskList are switched when the tick
  339. * count overflows. */
  340. #define taskSWITCH_DELAYED_LISTS() \
  341. { \
  342. List_t * pxTemp; \
  343. \
  344. /* The delayed tasks list should be empty when the lists are switched. */ \
  345. configASSERT( ( listLIST_IS_EMPTY( pxDelayedTaskList ) ) ); \
  346. \
  347. pxTemp = pxDelayedTaskList; \
  348. pxDelayedTaskList = pxOverflowDelayedTaskList; \
  349. pxOverflowDelayedTaskList = pxTemp; \
  350. xNumOfOverflows++; \
  351. prvResetNextTaskUnblockTime(); \
  352. }
  353. /*-----------------------------------------------------------*/
  354. /*
  355. * Place the task represented by pxTCB into the appropriate ready list for
  356. * the task. It is inserted at the end of the list.
  357. */
  358. #define prvAddTaskToReadyList( pxTCB ) \
  359. traceMOVED_TASK_TO_READY_STATE( pxTCB ); \
  360. taskRECORD_READY_PRIORITY( ( pxTCB )->uxPriority ); \
  361. listINSERT_END( &( pxReadyTasksLists[ ( pxTCB )->uxPriority ] ), &( ( pxTCB )->xStateListItem ) ); \
  362. tracePOST_MOVED_TASK_TO_READY_STATE( pxTCB )
  363. /*-----------------------------------------------------------*/
  364. /*
  365. * Several functions take a TaskHandle_t parameter that can optionally be NULL,
  366. * where NULL is used to indicate that the handle of the currently executing
  367. * task should be used in place of the parameter. This macro simply checks to
  368. * see if the parameter is NULL and returns a pointer to the appropriate TCB.
  369. *
  370. * In SMP, calling xTaskGetCurrentTaskHandle() ensures atomic access to pxCurrentTCBs
  371. */
  372. #if ( configNUMBER_OF_CORES > 1 )
  373. #define prvGetTCBFromHandle( pxHandle ) ( ( ( pxHandle ) == NULL ) ? xTaskGetCurrentTaskHandle() : ( pxHandle ) )
  374. #else
  375. #define prvGetTCBFromHandle( pxHandle ) ( ( ( pxHandle ) == NULL ) ? pxCurrentTCBs[ 0 ] : ( pxHandle ) )
  376. #endif
  377. /* The item value of the event list item is normally used to hold the priority
  378. * of the task to which it belongs (coded to allow it to be held in reverse
  379. * priority order). However, it is occasionally borrowed for other purposes. It
  380. * is important its value is not updated due to a task priority change while it is
  381. * being used for another purpose. The following bit definition is used to inform
  382. * the scheduler that the value should not be changed - in which case it is the
  383. * responsibility of whichever module is using the value to ensure it gets set back
  384. * to its original value when it is released. */
  385. #if ( configUSE_16_BIT_TICKS == 1 )
  386. #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x8000U
  387. #else
  388. #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x80000000UL
  389. #endif
  390. /*
  391. * Task control block. A task control block (TCB) is allocated for each task,
  392. * and stores task state information, including a pointer to the task's context
  393. * (the task's run time environment, including register values)
  394. */
  395. typedef struct tskTaskControlBlock /* The old naming convention is used to prevent breaking kernel aware debuggers. */
  396. {
  397. volatile StackType_t * pxTopOfStack; /*< Points to the location of the last item placed on the tasks stack. THIS MUST BE THE FIRST MEMBER OF THE TCB STRUCT. */
  398. #if ( portUSING_MPU_WRAPPERS == 1 )
  399. xMPU_SETTINGS xMPUSettings; /*< The MPU settings are defined as part of the port layer. THIS MUST BE THE SECOND MEMBER OF THE TCB STRUCT. */
  400. #endif
  401. ListItem_t xStateListItem; /*< The list that the state list item of a task is reference from denotes the state of that task (Ready, Blocked, Suspended ). */
  402. ListItem_t xEventListItem; /*< Used to reference a task from an event list. */
  403. UBaseType_t uxPriority; /*< The priority of the task. 0 is the lowest priority. */
  404. StackType_t * pxStack; /*< Points to the start of the stack. */
  405. char pcTaskName[ configMAX_TASK_NAME_LEN ]; /*< Descriptive name given to the task when created. Facilitates debugging only. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
  406. /* Todo: Remove xCoreID for single core builds (IDF-7894) */
  407. BaseType_t xCoreID; /*< The core that this task is pinned to */
  408. #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) )
  409. StackType_t * pxEndOfStack; /*< Points to the highest valid address for the stack. */
  410. #endif
  411. #if ( portCRITICAL_NESTING_IN_TCB == 1 )
  412. UBaseType_t uxCriticalNesting; /*< Holds the critical section nesting depth for ports that do not maintain their own count in the port layer. */
  413. #endif
  414. #if ( configUSE_TRACE_FACILITY == 1 )
  415. UBaseType_t uxTCBNumber; /*< Stores a number that increments each time a TCB is created. It allows debuggers to determine when a task has been deleted and then recreated. */
  416. UBaseType_t uxTaskNumber; /*< Stores a number specifically for use by third party trace code. */
  417. #endif
  418. #if ( configUSE_MUTEXES == 1 )
  419. UBaseType_t uxBasePriority; /*< The priority last assigned to the task - used by the priority inheritance mechanism. */
  420. UBaseType_t uxMutexesHeld;
  421. #endif
  422. #if ( configUSE_APPLICATION_TASK_TAG == 1 )
  423. TaskHookFunction_t pxTaskTag;
  424. #endif
  425. #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 )
  426. void * pvThreadLocalStoragePointers[ configNUM_THREAD_LOCAL_STORAGE_POINTERS ];
  427. #endif
  428. #if ( configGENERATE_RUN_TIME_STATS == 1 )
  429. configRUN_TIME_COUNTER_TYPE ulRunTimeCounter; /*< Stores the amount of time the task has spent in the Running state. */
  430. #endif
  431. #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) )
  432. configTLS_BLOCK_TYPE xTLSBlock; /*< Memory block used as Thread Local Storage (TLS) Block for the task. */
  433. #endif
  434. #if ( configUSE_TASK_NOTIFICATIONS == 1 )
  435. volatile uint32_t ulNotifiedValue[ configTASK_NOTIFICATION_ARRAY_ENTRIES ];
  436. volatile uint8_t ucNotifyState[ configTASK_NOTIFICATION_ARRAY_ENTRIES ];
  437. #endif
  438. /* See the comments in FreeRTOS.h with the definition of
  439. * tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE. */
  440. #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */
  441. uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the task is a statically allocated to ensure no attempt is made to free the memory. */
  442. #endif
  443. #if ( INCLUDE_xTaskAbortDelay == 1 )
  444. uint8_t ucDelayAborted;
  445. #endif
  446. #if ( configUSE_POSIX_ERRNO == 1 )
  447. int iTaskErrno;
  448. #endif
  449. } tskTCB;
  450. /* The old tskTCB name is maintained above then typedefed to the new TCB_t name
  451. * below to enable the use of older kernel aware debuggers. */
  452. typedef tskTCB TCB_t;
  453. /*lint -save -e956 A manual analysis and inspection has been used to determine
  454. * which static variables must be declared volatile. */
  455. portDONT_DISCARD PRIVILEGED_DATA TCB_t * volatile pxCurrentTCBs[ configNUMBER_OF_CORES ] = { NULL };
  456. /* Lists for ready and blocked tasks. --------------------
  457. * xDelayedTaskList1 and xDelayedTaskList2 could be moved to function scope but
  458. * doing so breaks some kernel aware debuggers and debuggers that rely on removing
  459. * the static qualifier. */
  460. PRIVILEGED_DATA static List_t pxReadyTasksLists[ configMAX_PRIORITIES ]; /*< Prioritised ready tasks. */
  461. PRIVILEGED_DATA static List_t xDelayedTaskList1; /*< Delayed tasks. */
  462. PRIVILEGED_DATA static List_t xDelayedTaskList2; /*< Delayed tasks (two lists are used - one for delays that have overflowed the current tick count. */
  463. PRIVILEGED_DATA static List_t * volatile pxDelayedTaskList; /*< Points to the delayed task list currently being used. */
  464. PRIVILEGED_DATA static List_t * volatile pxOverflowDelayedTaskList; /*< Points to the delayed task list currently being used to hold tasks that have overflowed the current tick count. */
  465. PRIVILEGED_DATA static List_t xPendingReadyList[ configNUMBER_OF_CORES ]; /*< Tasks that have been readied while the scheduler was suspended. They will be moved to the ready list when the scheduler is resumed. */
  466. #if ( INCLUDE_vTaskDelete == 1 )
  467. PRIVILEGED_DATA static List_t xTasksWaitingTermination; /*< Tasks that have been deleted - but their memory not yet freed. */
  468. PRIVILEGED_DATA static volatile UBaseType_t uxDeletedTasksWaitingCleanUp = ( UBaseType_t ) 0U;
  469. #endif
  470. #if ( INCLUDE_vTaskSuspend == 1 )
  471. PRIVILEGED_DATA static List_t xSuspendedTaskList; /*< Tasks that are currently suspended. */
  472. #endif
  473. /* Global POSIX errno. Its value is changed upon context switching to match
  474. * the errno of the currently running task. */
  475. #if ( configUSE_POSIX_ERRNO == 1 )
  476. int FreeRTOS_errno = 0;
  477. #endif
  478. /* Other file private variables. --------------------------------*/
  479. PRIVILEGED_DATA static volatile UBaseType_t uxCurrentNumberOfTasks = ( UBaseType_t ) 0U;
  480. PRIVILEGED_DATA static volatile TickType_t xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT;
  481. PRIVILEGED_DATA static volatile UBaseType_t uxTopReadyPriority = tskIDLE_PRIORITY;
  482. PRIVILEGED_DATA static volatile BaseType_t xSchedulerRunning = pdFALSE;
  483. PRIVILEGED_DATA static volatile TickType_t xPendedTicks = ( TickType_t ) 0U;
  484. PRIVILEGED_DATA static volatile BaseType_t xYieldPending[ configNUMBER_OF_CORES ] = { pdFALSE };
  485. PRIVILEGED_DATA static volatile BaseType_t xNumOfOverflows = ( BaseType_t ) 0;
  486. PRIVILEGED_DATA static UBaseType_t uxTaskNumber = ( UBaseType_t ) 0U;
  487. PRIVILEGED_DATA static volatile TickType_t xNextTaskUnblockTime = ( TickType_t ) 0U; /* Initialised to portMAX_DELAY before the scheduler starts. */
  488. PRIVILEGED_DATA static TaskHandle_t xIdleTaskHandle[ configNUMBER_OF_CORES ] = { NULL }; /*< Holds the handle of the idle task. The idle task is created automatically when the scheduler is started. */
  489. /* Improve support for OpenOCD. The kernel tracks Ready tasks via priority lists.
  490. * For tracking the state of remote threads, OpenOCD uses uxTopUsedPriority
  491. * to determine the number of priority lists to read back from the remote target. */
  492. const volatile UBaseType_t uxTopUsedPriority = configMAX_PRIORITIES - 1U;
  493. /* Context switches are held pending while the scheduler is suspended. Also,
  494. * interrupts must not manipulate the xStateListItem of a TCB, or any of the
  495. * lists the xStateListItem can be referenced from, if the scheduler is suspended.
  496. * If an interrupt needs to unblock a task while the scheduler is suspended then it
  497. * moves the task's event list item into the xPendingReadyList, ready for the
  498. * kernel to move the task from the pending ready list into the real ready list
  499. * when the scheduler is unsuspended. The pending ready list itself can only be
  500. * accessed from a critical section. */
  501. PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended[ configNUMBER_OF_CORES ] = { ( UBaseType_t ) pdFALSE };
  502. #if ( configGENERATE_RUN_TIME_STATS == 1 )
  503. /* Do not move these variables to function scope as doing so prevents the
  504. * code working with debuggers that need to remove the static qualifier. */
  505. PRIVILEGED_DATA static configRUN_TIME_COUNTER_TYPE ulTaskSwitchedInTime[ configNUMBER_OF_CORES ] = { 0UL }; /*< Holds the value of a timer/counter the last time a task was switched in. */
  506. PRIVILEGED_DATA static volatile configRUN_TIME_COUNTER_TYPE ulTotalRunTime = 0UL; /*< Holds the total amount of execution time as defined by the run time counter clock. */
  507. #endif
  508. /* Spinlock required for SMP critical sections. This lock protects all of the
  509. * kernel's data structures such as various tasks lists, flags, and tick counts. */
  510. PRIVILEGED_DATA static portMUX_TYPE xKernelLock = portMUX_INITIALIZER_UNLOCKED;
  511. /*lint -restore */
  512. /*-----------------------------------------------------------*/
  513. /* File private functions. --------------------------------*/
  514. /**
  515. * Utility function to check whether a yield (on either core) is required after
  516. * unblocking (or changing the priority of) a particular task.
  517. *
  518. * - This function is the SMP replacement for checking if an unblocked task has
  519. * a higher (or equal) priority than the current task.
  520. * - It should be called before calling taskYIELD_IF_USING_PREEMPTION() or
  521. * before setting xYieldRequired
  522. * - If it is the other core that requires a yield, this function will
  523. * internally trigger the other core to yield
  524. *
  525. * Note: In some special instances, a yield is triggered if the unblocked task
  526. * has an equal priority (such as in xTaskResumeAll). Thus the
  527. * xYieldEqualPriority parameter specifies whether to yield if the current
  528. * task has equal priority.
  529. *
  530. * Scheduling Algorithm:
  531. * This function will bias towards yielding the current core.
  532. * - If the unblocked task has a higher (or equal) priority than the current
  533. * core, the current core is yielded regardless of the current priority of the
  534. * other core.
  535. * - A core (current or other) will only yield if their schedulers are not
  536. * suspended.
  537. *
  538. * Todo: This can be optimized (IDF-5772)
  539. *
  540. * Entry:
  541. * - This function must be called in a critical section
  542. * - A task must just have been unblocked, or its priority raised
  543. * Exit:
  544. * - Returns pdTRUE if the current core requires yielding
  545. * - The other core will be triggered to yield if required
  546. */
  547. #if ( configNUMBER_OF_CORES > 1 )
  548. static BaseType_t prvIsYieldUsingPrioritySMP( UBaseType_t uxTaskPriority,
  549. BaseType_t xTaskCoreID,
  550. BaseType_t xCurCoreID,
  551. BaseType_t xYieldEqualPriority ) PRIVILEGED_FUNCTION;
  552. #endif /* configNUMBER_OF_CORES > 1 */
  553. /**
  554. * Utility function to check whether a task can currently be scheduled on one
  555. * or more cores. This function is the SMP replacement for checking if
  556. * `uxSchedulerSuspended == 0`.
  557. *
  558. * - If a task is pinned, check the scheduler suspension state on the task's
  559. * pinned core. The task can be scheduled if the scheduler is not suspended on
  560. * the pinned core.
  561. * - If a task is unpinned, check the scheduler suspension state on both cores.
  562. * The task can be scheduled if the scheduler is not suspended on either of
  563. * the cores.
  564. */
  565. #if ( configNUMBER_OF_CORES > 1 )
  566. static BaseType_t prvCheckTaskCanBeScheduledSMP( TCB_t * pxTCB ) PRIVILEGED_FUNCTION;
  567. #endif /* configNUMBER_OF_CORES > 1 */
  568. /**
  569. * Utility function to select the highest priority and runnable task for the
  570. * current core.
  571. */
  572. #if ( configNUMBER_OF_CORES > 1 )
  573. static void prvSelectHighestPriorityTaskSMP( void ) PRIVILEGED_FUNCTION;
  574. #endif /* configNUMBER_OF_CORES > 1 */
  575. /**
  576. * Utility task that simply returns pdTRUE if the task referenced by xTask is
  577. * currently in the Suspended state, or pdFALSE if the task referenced by xTask
  578. * is in any other state.
  579. */
  580. #if ( INCLUDE_vTaskSuspend == 1 )
  581. static BaseType_t prvTaskIsTaskSuspended( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
  582. #endif /* INCLUDE_vTaskSuspend */
  583. /*
  584. * Utility to ready all the lists used by the scheduler. This is called
  585. * automatically upon the creation of the first task.
  586. */
  587. static void prvInitialiseTaskLists( void ) PRIVILEGED_FUNCTION;
  588. /*
  589. * The idle task, which as all tasks is implemented as a never ending loop.
  590. * The idle task is automatically created and added to the ready lists upon
  591. * creation of the first user task.
  592. *
  593. * The portTASK_FUNCTION_PROTO() macro is used to allow port/compiler specific
  594. * language extensions. The equivalent prototype for this function is:
  595. *
  596. * void prvIdleTask( void *pvParameters );
  597. *
  598. */
  599. static portTASK_FUNCTION_PROTO( prvIdleTask, pvParameters ) PRIVILEGED_FUNCTION;
  600. /*
  601. * Utility to free all memory allocated by the scheduler to hold a TCB,
  602. * including the stack pointed to by the TCB.
  603. *
  604. * This does not free memory allocated by the task itself (i.e. memory
  605. * allocated by calls to pvPortMalloc from within the tasks application code).
  606. */
  607. #if ( INCLUDE_vTaskDelete == 1 )
  608. static void prvDeleteTCB( TCB_t * pxTCB ) PRIVILEGED_FUNCTION;
  609. #endif
  610. /*
  611. * Used only by the idle task. This checks to see if anything has been placed
  612. * in the list of tasks waiting to be deleted. If so the task is cleaned up
  613. * and its TCB deleted.
  614. */
  615. static void prvCheckTasksWaitingTermination( void ) PRIVILEGED_FUNCTION;
  616. /*
  617. * The currently executing task is entering the Blocked state. Add the task to
  618. * either the current or the overflow delayed task list.
  619. */
  620. static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait,
  621. const BaseType_t xCanBlockIndefinitely ) PRIVILEGED_FUNCTION;
  622. /*
  623. * Fills an TaskStatus_t structure with information on each task that is
  624. * referenced from the pxList list (which may be a ready list, a delayed list,
  625. * a suspended list, etc.).
  626. *
  627. * THIS FUNCTION IS INTENDED FOR DEBUGGING ONLY, AND SHOULD NOT BE CALLED FROM
  628. * NORMAL APPLICATION CODE.
  629. */
  630. #if ( configUSE_TRACE_FACILITY == 1 )
  631. static UBaseType_t prvListTasksWithinSingleList( TaskStatus_t * pxTaskStatusArray,
  632. List_t * pxList,
  633. eTaskState eState ) PRIVILEGED_FUNCTION;
  634. #endif
  635. /*
  636. * Searches pxList for a task with name pcNameToQuery - returning a handle to
  637. * the task if it is found, or NULL if the task is not found.
  638. */
  639. #if ( INCLUDE_xTaskGetHandle == 1 )
  640. static TCB_t * prvSearchForNameWithinSingleList( List_t * pxList,
  641. const char pcNameToQuery[] ) PRIVILEGED_FUNCTION;
  642. #endif
  643. /*
  644. * When a task is created, the stack of the task is filled with a known value.
  645. * This function determines the 'high water mark' of the task stack by
  646. * determining how much of the stack remains at the original preset value.
  647. */
  648. #if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) )
  649. static configSTACK_DEPTH_TYPE prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte ) PRIVILEGED_FUNCTION;
  650. #endif
  651. /*
  652. * Return the amount of time, in ticks, that will pass before the kernel will
  653. * next move a task from the Blocked state to the Running state.
  654. *
  655. * This conditional compilation should use inequality to 0, not equality to 1.
  656. * This is to ensure portSUPPRESS_TICKS_AND_SLEEP() can be called when user
  657. * defined low power mode implementations require configUSE_TICKLESS_IDLE to be
  658. * set to a value other than 1.
  659. */
  660. #if ( configUSE_TICKLESS_IDLE != 0 )
  661. static TickType_t prvGetExpectedIdleTime( void ) PRIVILEGED_FUNCTION;
  662. #endif
  663. /*
  664. * Set xNextTaskUnblockTime to the time at which the next Blocked state task
  665. * will exit the Blocked state.
  666. */
  667. static void prvResetNextTaskUnblockTime( void ) PRIVILEGED_FUNCTION;
  668. #if ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 )
  669. /*
  670. * Helper function used to pad task names with spaces when printing out
  671. * human readable tables of task information.
  672. */
  673. static char * prvWriteNameToBuffer( char * pcBuffer,
  674. const char * pcTaskName ) PRIVILEGED_FUNCTION;
  675. #endif
  676. /*
  677. * Called after a Task_t structure has been allocated either statically or
  678. * dynamically to fill in the structure's members.
  679. */
  680. static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
  681. const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
  682. const uint32_t ulStackDepth,
  683. void * const pvParameters,
  684. UBaseType_t uxPriority,
  685. TaskHandle_t * const pxCreatedTask,
  686. TCB_t * pxNewTCB,
  687. const MemoryRegion_t * const xRegions,
  688. BaseType_t xCoreID ) PRIVILEGED_FUNCTION;
  689. /*
  690. * Called after a new task has been created and initialised to place the task
  691. * under the control of the scheduler.
  692. */
  693. static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION;
  694. /*
  695. * freertos_tasks_c_additions_init() should only be called if the user definable
  696. * macro FREERTOS_TASKS_C_ADDITIONS_INIT() is defined, as that is the only macro
  697. * called by the function.
  698. */
  699. #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT
  700. static void freertos_tasks_c_additions_init( void ) PRIVILEGED_FUNCTION;
  701. #endif
  702. /*-----------------------------------------------------------*/
  703. #if ( configNUMBER_OF_CORES > 1 )
  704. static BaseType_t prvIsYieldUsingPrioritySMP( UBaseType_t uxTaskPriority,
  705. BaseType_t xTaskCoreID,
  706. BaseType_t xCurCoreID,
  707. BaseType_t xYieldEqualPriority )
  708. {
  709. configASSERT( uxTaskPriority < configMAX_PRIORITIES );
  710. if( xYieldEqualPriority == pdTRUE )
  711. {
  712. /* Increment the task priority to achieve the same affect as
  713. * if( uxTaskPriority >= pxCurrentTCBs->uxPriority ). */
  714. uxTaskPriority++;
  715. }
  716. /* Indicate whether the current core needs to yield */
  717. BaseType_t xYieldRequiredCurrentCore;
  718. /* If the target task can run on the current core, and has a higher
  719. * priority than the current core, and the core has not suspended
  720. * scheduling, then yield the current core.
  721. * Todo: Make fair scheduling a configurable option (IDF-5772). */
  722. if( ( taskIS_AFFINITY_COMPATIBLE( xCurCoreID, xTaskCoreID ) == pdTRUE ) &&
  723. ( uxTaskPriority > pxCurrentTCBs[ xCurCoreID ]->uxPriority ) &&
  724. ( uxSchedulerSuspended[ xCurCoreID ] == ( UBaseType_t ) 0U ) )
  725. {
  726. /* Return true for the caller to yield the current core */
  727. xYieldRequiredCurrentCore = pdTRUE;
  728. }
  729. /* If the target task can run on the other core, and has a higher
  730. * priority then the other core, and the other core has not suspended
  731. * scheduling, then yield the other core */
  732. else if( ( taskIS_AFFINITY_COMPATIBLE( !xCurCoreID, xTaskCoreID ) == pdTRUE ) &&
  733. ( uxTaskPriority > pxCurrentTCBs[ !xCurCoreID ]->uxPriority ) &&
  734. ( uxSchedulerSuspended[ !xCurCoreID ] == ( UBaseType_t ) 0U ) )
  735. {
  736. /* Signal the other core to yield */
  737. taskYIELD_CORE( !xCurCoreID );
  738. xYieldRequiredCurrentCore = pdFALSE;
  739. }
  740. else
  741. {
  742. xYieldRequiredCurrentCore = pdFALSE;
  743. }
  744. return xYieldRequiredCurrentCore;
  745. }
  746. #endif /* configNUMBER_OF_CORES > 1 */
  747. /*-----------------------------------------------------------*/
  748. #if ( configNUMBER_OF_CORES > 1 )
  749. static BaseType_t prvCheckTaskCanBeScheduledSMP( TCB_t * pxTCB )
  750. {
  751. BaseType_t xReturn;
  752. if( pxTCB->xCoreID == tskNO_AFFINITY )
  753. {
  754. /* Task is unpinned. As long as one core has not suspended
  755. * scheduling, the task can be scheduled. */
  756. if( ( uxSchedulerSuspended[ 0 ] == ( UBaseType_t ) 0U ) || ( uxSchedulerSuspended[ 1 ] == ( UBaseType_t ) 0U ) )
  757. {
  758. xReturn = pdTRUE;
  759. }
  760. else
  761. {
  762. xReturn = pdFALSE;
  763. }
  764. }
  765. else if( uxSchedulerSuspended[ pxTCB->xCoreID ] == ( UBaseType_t ) 0U )
  766. {
  767. /* The task is pinned to a core. If it's pinned core has not
  768. * suspended scheduling, the task can be scheduled. */
  769. xReturn = pdTRUE;
  770. }
  771. else
  772. {
  773. xReturn = pdFALSE;
  774. }
  775. return xReturn;
  776. }
  777. #endif /* configNUMBER_OF_CORES > 1 */
  778. /*-----------------------------------------------------------*/
  779. #if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
  780. /* Todo: Add support for task restricted API (IDF-7895) */
  781. BaseType_t xTaskCreateRestrictedStatic( const TaskParameters_t * const pxTaskDefinition,
  782. TaskHandle_t * pxCreatedTask )
  783. {
  784. TCB_t * pxNewTCB;
  785. BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
  786. configASSERT( pxTaskDefinition->puxStackBuffer != NULL );
  787. configASSERT( pxTaskDefinition->pxTaskBuffer != NULL );
  788. if( ( pxTaskDefinition->puxStackBuffer != NULL ) && ( pxTaskDefinition->pxTaskBuffer != NULL ) )
  789. {
  790. /* Allocate space for the TCB. Where the memory comes from depends
  791. * on the implementation of the port malloc function and whether or
  792. * not static allocation is being used. */
  793. pxNewTCB = ( TCB_t * ) pxTaskDefinition->pxTaskBuffer;
  794. memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) );
  795. /* Store the stack location in the TCB. */
  796. pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer;
  797. #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
  798. {
  799. /* Tasks can be created statically or dynamically, so note this
  800. * task was created statically in case the task is later deleted. */
  801. pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB;
  802. }
  803. #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
  804. prvInitialiseNewTask( pxTaskDefinition->pvTaskCode,
  805. pxTaskDefinition->pcName,
  806. ( uint32_t ) pxTaskDefinition->usStackDepth,
  807. pxTaskDefinition->pvParameters,
  808. pxTaskDefinition->uxPriority,
  809. pxCreatedTask, pxNewTCB,
  810. pxTaskDefinition->xRegions );
  811. prvAddNewTaskToReadyList( pxNewTCB );
  812. xReturn = pdPASS;
  813. }
  814. return xReturn;
  815. }
  816. #endif /* ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) */
  817. /*-----------------------------------------------------------*/
  818. #if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
  819. /* Todo: Add support for task restricted API (IDF-7895) */
  820. BaseType_t xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition,
  821. TaskHandle_t * pxCreatedTask )
  822. {
  823. TCB_t * pxNewTCB;
  824. BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
  825. configASSERT( pxTaskDefinition->puxStackBuffer );
  826. if( pxTaskDefinition->puxStackBuffer != NULL )
  827. {
  828. /* Allocate space for the TCB. Where the memory comes from depends
  829. * on the implementation of the port malloc function and whether or
  830. * not static allocation is being used. */
  831. pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) );
  832. if( pxNewTCB != NULL )
  833. {
  834. memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) );
  835. /* Store the stack location in the TCB. */
  836. pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer;
  837. #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
  838. {
  839. /* Tasks can be created statically or dynamically, so note
  840. * this task had a statically allocated stack in case it is
  841. * later deleted. The TCB was allocated dynamically. */
  842. pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_ONLY;
  843. }
  844. #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
  845. prvInitialiseNewTask( pxTaskDefinition->pvTaskCode,
  846. pxTaskDefinition->pcName,
  847. ( uint32_t ) pxTaskDefinition->usStackDepth,
  848. pxTaskDefinition->pvParameters,
  849. pxTaskDefinition->uxPriority,
  850. pxCreatedTask, pxNewTCB,
  851. pxTaskDefinition->xRegions );
  852. prvAddNewTaskToReadyList( pxNewTCB );
  853. xReturn = pdPASS;
  854. }
  855. }
  856. return xReturn;
  857. }
  858. #endif /* portUSING_MPU_WRAPPERS */
  859. /*-----------------------------------------------------------*/
  860. static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
  861. const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
  862. const uint32_t ulStackDepth,
  863. void * const pvParameters,
  864. UBaseType_t uxPriority,
  865. TaskHandle_t * const pxCreatedTask,
  866. TCB_t * pxNewTCB,
  867. const MemoryRegion_t * const xRegions,
  868. BaseType_t xCoreID )
  869. {
  870. StackType_t * pxTopOfStack;
  871. UBaseType_t x;
  872. #if ( configNUMBER_OF_CORES > 1 )
  873. /* Check that xCoreID is valid */
  874. configASSERT( ( ( xCoreID >= 0 ) && ( xCoreID < configNUMBER_OF_CORES ) ) || ( xCoreID == tskNO_AFFINITY ) );
  875. #else
  876. /* Hard code xCoreID to 0 */
  877. xCoreID = 0;
  878. #endif
  879. #if ( portUSING_MPU_WRAPPERS == 1 )
  880. /* Should the task be created in privileged mode? */
  881. BaseType_t xRunPrivileged;
  882. if( ( uxPriority & portPRIVILEGE_BIT ) != 0U )
  883. {
  884. xRunPrivileged = pdTRUE;
  885. }
  886. else
  887. {
  888. xRunPrivileged = pdFALSE;
  889. }
  890. uxPriority &= ~portPRIVILEGE_BIT;
  891. #endif /* portUSING_MPU_WRAPPERS == 1 */
  892. /* Avoid dependency on memset() if it is not required. */
  893. #if ( tskSET_NEW_STACKS_TO_KNOWN_VALUE == 1 )
  894. {
  895. /* Fill the stack with a known value to assist debugging. */
  896. ( void ) memset( pxNewTCB->pxStack, ( int ) tskSTACK_FILL_BYTE, ( size_t ) ulStackDepth * sizeof( StackType_t ) );
  897. }
  898. #endif /* tskSET_NEW_STACKS_TO_KNOWN_VALUE */
  899. /* Calculate the top of stack address. This depends on whether the stack
  900. * grows from high memory to low (as per the 80x86) or vice versa.
  901. * portSTACK_GROWTH is used to make the result positive or negative as required
  902. * by the port. */
  903. #if ( portSTACK_GROWTH < 0 )
  904. {
  905. pxTopOfStack = &( pxNewTCB->pxStack[ ulStackDepth - ( uint32_t ) 1 ] );
  906. pxTopOfStack = ( StackType_t * ) ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack ) & ( ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) ) ); /*lint !e923 !e9033 !e9078 MISRA exception. Avoiding casts between pointers and integers is not practical. Size differences accounted for using portPOINTER_SIZE_TYPE type. Checked by assert(). */
  907. /* Check the alignment of the calculated top of stack is correct. */
  908. configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0UL ) );
  909. #if ( configRECORD_STACK_HIGH_ADDRESS == 1 )
  910. {
  911. /* Also record the stack's high address, which may assist
  912. * debugging. */
  913. pxNewTCB->pxEndOfStack = pxTopOfStack;
  914. }
  915. #endif /* configRECORD_STACK_HIGH_ADDRESS */
  916. }
  917. #else /* portSTACK_GROWTH */
  918. {
  919. pxTopOfStack = pxNewTCB->pxStack;
  920. /* Check the alignment of the stack buffer is correct. */
  921. configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxNewTCB->pxStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0UL ) );
  922. /* The other extreme of the stack space is required if stack checking is
  923. * performed. */
  924. pxNewTCB->pxEndOfStack = pxNewTCB->pxStack + ( ulStackDepth - ( uint32_t ) 1 );
  925. }
  926. #endif /* portSTACK_GROWTH */
  927. /* Store the task name in the TCB. */
  928. if( pcName != NULL )
  929. {
  930. for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ )
  931. {
  932. pxNewTCB->pcTaskName[ x ] = pcName[ x ];
  933. /* Don't copy all configMAX_TASK_NAME_LEN if the string is shorter than
  934. * configMAX_TASK_NAME_LEN characters just in case the memory after the
  935. * string is not accessible (extremely unlikely). */
  936. if( pcName[ x ] == ( char ) 0x00 )
  937. {
  938. break;
  939. }
  940. else
  941. {
  942. mtCOVERAGE_TEST_MARKER();
  943. }
  944. }
  945. /* Ensure the name string is terminated in the case that the string length
  946. * was greater or equal to configMAX_TASK_NAME_LEN. */
  947. pxNewTCB->pcTaskName[ configMAX_TASK_NAME_LEN - 1 ] = '\0';
  948. }
  949. else
  950. {
  951. mtCOVERAGE_TEST_MARKER();
  952. }
  953. /* This is used as an array index so must ensure it's not too large. */
  954. configASSERT( uxPriority < configMAX_PRIORITIES );
  955. if( uxPriority >= ( UBaseType_t ) configMAX_PRIORITIES )
  956. {
  957. uxPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U;
  958. }
  959. else
  960. {
  961. mtCOVERAGE_TEST_MARKER();
  962. }
  963. pxNewTCB->uxPriority = uxPriority;
  964. pxNewTCB->xCoreID = xCoreID; /* Todo: Remove xCoreID for single core builds (IDF-7894) */
  965. #if ( configUSE_MUTEXES == 1 )
  966. {
  967. pxNewTCB->uxBasePriority = uxPriority;
  968. }
  969. #endif /* configUSE_MUTEXES */
  970. vListInitialiseItem( &( pxNewTCB->xStateListItem ) );
  971. vListInitialiseItem( &( pxNewTCB->xEventListItem ) );
  972. /* Set the pxNewTCB as a link back from the ListItem_t. This is so we can get
  973. * back to the containing TCB from a generic item in a list. */
  974. listSET_LIST_ITEM_OWNER( &( pxNewTCB->xStateListItem ), pxNewTCB );
  975. /* Event lists are always in priority order. */
  976. listSET_LIST_ITEM_VALUE( &( pxNewTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
  977. listSET_LIST_ITEM_OWNER( &( pxNewTCB->xEventListItem ), pxNewTCB );
  978. #if ( portUSING_MPU_WRAPPERS == 1 )
  979. {
  980. vPortStoreTaskMPUSettings( &( pxNewTCB->xMPUSettings ), xRegions, pxNewTCB->pxStack, ulStackDepth );
  981. }
  982. #else
  983. {
  984. /* Avoid compiler warning about unreferenced parameter. */
  985. ( void ) xRegions;
  986. }
  987. #endif
  988. #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) )
  989. {
  990. /* Allocate and initialize memory for the task's TLS Block. */
  991. configINIT_TLS_BLOCK( pxNewTCB->xTLSBlock );
  992. }
  993. #endif
  994. /* Initialize the TCB stack to look as if the task was already running,
  995. * but had been interrupted by the scheduler. The return address is set
  996. * to the start of the task function. Once the stack has been initialised
  997. * the top of stack variable is updated. */
  998. #if ( portUSING_MPU_WRAPPERS == 1 )
  999. {
  1000. /* If the port has capability to detect stack overflow,
  1001. * pass the stack end address to the stack initialization
  1002. * function as well. */
  1003. #if ( portHAS_STACK_OVERFLOW_CHECKING == 1 )
  1004. {
  1005. #if ( portSTACK_GROWTH < 0 )
  1006. {
  1007. pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxStack, pxTaskCode, pvParameters, xRunPrivileged );
  1008. }
  1009. #else /* portSTACK_GROWTH */
  1010. {
  1011. pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxEndOfStack, pxTaskCode, pvParameters, xRunPrivileged );
  1012. }
  1013. #endif /* portSTACK_GROWTH */
  1014. }
  1015. #else /* portHAS_STACK_OVERFLOW_CHECKING */
  1016. {
  1017. pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters, xRunPrivileged );
  1018. }
  1019. #endif /* portHAS_STACK_OVERFLOW_CHECKING */
  1020. }
  1021. #else /* portUSING_MPU_WRAPPERS */
  1022. {
  1023. /* If the port has capability to detect stack overflow,
  1024. * pass the stack end address to the stack initialization
  1025. * function as well. */
  1026. #if ( portHAS_STACK_OVERFLOW_CHECKING == 1 )
  1027. {
  1028. #if ( portSTACK_GROWTH < 0 )
  1029. {
  1030. pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxStack, pxTaskCode, pvParameters );
  1031. }
  1032. #else /* portSTACK_GROWTH */
  1033. {
  1034. pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxEndOfStack, pxTaskCode, pvParameters );
  1035. }
  1036. #endif /* portSTACK_GROWTH */
  1037. }
  1038. #else /* portHAS_STACK_OVERFLOW_CHECKING */
  1039. {
  1040. pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters );
  1041. }
  1042. #endif /* portHAS_STACK_OVERFLOW_CHECKING */
  1043. }
  1044. #endif /* portUSING_MPU_WRAPPERS */
  1045. if( pxCreatedTask != NULL )
  1046. {
  1047. /* Pass the handle out in an anonymous way. The handle can be used to
  1048. * change the created task's priority, delete the created task, etc.*/
  1049. *pxCreatedTask = ( TaskHandle_t ) pxNewTCB;
  1050. }
  1051. else
  1052. {
  1053. mtCOVERAGE_TEST_MARKER();
  1054. }
  1055. }
  1056. /*-----------------------------------------------------------*/
  1057. static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
  1058. {
  1059. /* Ensure interrupts don't access the task lists while the lists are being
  1060. * updated. */
  1061. taskENTER_CRITICAL( &xKernelLock );
  1062. {
  1063. uxCurrentNumberOfTasks++;
  1064. if( uxCurrentNumberOfTasks == ( UBaseType_t ) 1 )
  1065. {
  1066. /* This is the first task to be created so do the preliminary
  1067. * initialisation required. We will not recover if this call
  1068. * fails, but we will report the failure. */
  1069. prvInitialiseTaskLists();
  1070. }
  1071. else
  1072. {
  1073. mtCOVERAGE_TEST_MARKER();
  1074. }
  1075. if( ( pxCurrentTCBs[ 0 ] == NULL ) && ( taskIS_AFFINITY_COMPATIBLE( 0, pxNewTCB->xCoreID ) == pdTRUE ) )
  1076. {
  1077. /* On core 0, there are no other tasks, or all the other tasks
  1078. * are in the suspended state - make this the current task. */
  1079. pxCurrentTCBs[ 0 ] = pxNewTCB;
  1080. }
  1081. #if ( configNUMBER_OF_CORES > 1 )
  1082. else if( ( pxCurrentTCBs[ 1 ] == NULL ) && ( taskIS_AFFINITY_COMPATIBLE( 1, pxNewTCB->xCoreID ) == pdTRUE ) )
  1083. {
  1084. /* On core 1, there are no other tasks, or all the other tasks
  1085. * are in the suspended state - make this the current task. */
  1086. pxCurrentTCBs[ 1 ] = pxNewTCB;
  1087. }
  1088. #endif /* configNUMBER_OF_CORES > 1 */
  1089. else
  1090. {
  1091. /* If the scheduler is not already running, make this task the
  1092. * current task if it is the highest priority task to be created
  1093. * so far. */
  1094. if( xSchedulerRunning == pdFALSE )
  1095. {
  1096. if( ( pxCurrentTCBs[ 0 ] != NULL ) &&
  1097. ( taskIS_AFFINITY_COMPATIBLE( 0, pxNewTCB->xCoreID ) == pdTRUE ) &&
  1098. ( pxCurrentTCBs[ 0 ]->uxPriority <= pxNewTCB->uxPriority ) )
  1099. {
  1100. pxCurrentTCBs[ 0 ] = pxNewTCB;
  1101. }
  1102. #if ( configNUMBER_OF_CORES > 1 )
  1103. else if( ( pxCurrentTCBs[ 1 ] != NULL ) &&
  1104. ( taskIS_AFFINITY_COMPATIBLE( 1, pxNewTCB->xCoreID ) == pdTRUE ) &&
  1105. ( pxCurrentTCBs[ 1 ]->uxPriority <= pxNewTCB->uxPriority ) )
  1106. {
  1107. pxCurrentTCBs[ 1 ] = pxNewTCB;
  1108. }
  1109. #endif /* configNUMBER_OF_CORES > 1 */
  1110. else
  1111. {
  1112. mtCOVERAGE_TEST_MARKER();
  1113. }
  1114. }
  1115. else
  1116. {
  1117. mtCOVERAGE_TEST_MARKER();
  1118. }
  1119. }
  1120. uxTaskNumber++;
  1121. #if ( configUSE_TRACE_FACILITY == 1 )
  1122. {
  1123. /* Add a counter into the TCB for tracing only. */
  1124. pxNewTCB->uxTCBNumber = uxTaskNumber;
  1125. }
  1126. #endif /* configUSE_TRACE_FACILITY */
  1127. traceTASK_CREATE( pxNewTCB );
  1128. prvAddTaskToReadyList( pxNewTCB );
  1129. portSETUP_TCB( pxNewTCB );
  1130. if( xSchedulerRunning != pdFALSE )
  1131. {
  1132. /* If the created task is of a higher priority than the current task
  1133. * then it should run now. */
  1134. if( taskIS_YIELD_REQUIRED( pxNewTCB, portGET_CORE_ID(), pdTRUE ) == pdTRUE )
  1135. {
  1136. taskYIELD_IF_USING_PREEMPTION();
  1137. }
  1138. else
  1139. {
  1140. mtCOVERAGE_TEST_MARKER();
  1141. }
  1142. }
  1143. else
  1144. {
  1145. mtCOVERAGE_TEST_MARKER();
  1146. }
  1147. }
  1148. taskEXIT_CRITICAL( &xKernelLock );
  1149. /* SINGLE-CORE MODIFICATION: Extended critical section so that SMP can check
  1150. * for yield inside critical section. */
  1151. }
  1152. /*-----------------------------------------------------------*/
  1153. #if ( INCLUDE_vTaskDelete == 1 )
  1154. void vTaskDelete( TaskHandle_t xTaskToDelete )
  1155. {
  1156. TCB_t * pxTCB;
  1157. BaseType_t xSelfDelete;
  1158. BaseType_t xIsCurRunning;
  1159. taskENTER_CRITICAL( &xKernelLock );
  1160. {
  1161. /* Get current core ID as we can no longer be preempted. */
  1162. const BaseType_t xCurCoreID = portGET_CORE_ID();
  1163. /* If null is passed in here then it is the calling task that is
  1164. * being deleted. */
  1165. pxTCB = prvGetTCBFromHandle( xTaskToDelete );
  1166. /* Remove task from the ready/delayed list. */
  1167. if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
  1168. {
  1169. taskRESET_READY_PRIORITY( pxTCB->uxPriority );
  1170. }
  1171. else
  1172. {
  1173. mtCOVERAGE_TEST_MARKER();
  1174. }
  1175. /* Is the task waiting on an event also? */
  1176. if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
  1177. {
  1178. ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
  1179. }
  1180. else
  1181. {
  1182. mtCOVERAGE_TEST_MARKER();
  1183. }
  1184. /* Increment the uxTaskNumber also so kernel aware debuggers can
  1185. * detect that the task lists need re-generating. This is done before
  1186. * portPRE_TASK_DELETE_HOOK() as in the Windows port that macro will
  1187. * not return. */
  1188. uxTaskNumber++;
  1189. /* Check if the task is deleting itself, or is currently running on
  1190. * the other core. */
  1191. if( taskIS_CURRENTLY_RUNNING_ON_CORE( pxTCB, xCurCoreID ) == pdTRUE )
  1192. {
  1193. xSelfDelete = pdTRUE;
  1194. xIsCurRunning = pdTRUE;
  1195. }
  1196. #if ( configNUMBER_OF_CORES > 1 )
  1197. else if( taskIS_CURRENTLY_RUNNING_ON_CORE( pxTCB, !xCurCoreID ) == pdTRUE )
  1198. {
  1199. xSelfDelete = pdFALSE;
  1200. xIsCurRunning = pdTRUE;
  1201. }
  1202. #endif /* configNUMBER_OF_CORES > 1 */
  1203. else
  1204. {
  1205. xSelfDelete = pdFALSE;
  1206. xIsCurRunning = pdFALSE;
  1207. }
  1208. if( xIsCurRunning == pdTRUE )
  1209. {
  1210. /* A task is deleting itself or is currently running. This
  1211. * cannot complete within the task itself, as a context switch
  1212. * to another task is required. Place the task in the
  1213. * termination list. The idle task will check the termination
  1214. * list and free up any memory allocated by the scheduler for
  1215. * the TCB and stack of the deleted task. */
  1216. vListInsertEnd( &xTasksWaitingTermination, &( pxTCB->xStateListItem ) );
  1217. /* Increment the ucTasksDeleted variable so the idle task knows
  1218. * there is a task that has been deleted and that it should therefore
  1219. * check the xTasksWaitingTermination list. */
  1220. ++uxDeletedTasksWaitingCleanUp;
  1221. /* Call the delete hook before portPRE_TASK_DELETE_HOOK() as
  1222. * portPRE_TASK_DELETE_HOOK() does not return in the Win32 port. */
  1223. traceTASK_DELETE( pxTCB );
  1224. /* The pre-delete hook is primarily for the Windows simulator,
  1225. * in which Windows specific clean up operations are performed,
  1226. * after which it is not possible to yield away from this task -
  1227. * hence xYieldPending is used to latch that a context switch is
  1228. * required. */
  1229. portPRE_TASK_DELETE_HOOK( pxTCB, &xYieldPending[ xCurCoreID ] );
  1230. #if ( configNUMBER_OF_CORES > 1 )
  1231. if( xSelfDelete == pdFALSE )
  1232. {
  1233. /* The task that is being deleted is currently running
  1234. * on the other core. Send a yield request to the other
  1235. * core so that the task is swapped out. */
  1236. taskYIELD_CORE( !xCurCoreID );
  1237. }
  1238. #else /* configNUMBER_OF_CORES > 1 */
  1239. /* xCurCoreID is unused */
  1240. ( void ) xCurCoreID;
  1241. #endif /* configNUMBER_OF_CORES > 1 */
  1242. }
  1243. else
  1244. {
  1245. --uxCurrentNumberOfTasks;
  1246. traceTASK_DELETE( pxTCB );
  1247. /* Reset the next expected unblock time in case it referred to
  1248. * the task that has just been deleted. */
  1249. prvResetNextTaskUnblockTime();
  1250. }
  1251. }
  1252. taskEXIT_CRITICAL( &xKernelLock );
  1253. /* If the task is currently running, call prvDeleteTCB from outside of
  1254. * critical section. If a task is currently running, prvDeleteTCB is
  1255. * called from prvCheckTasksWaitingTermination which is called from
  1256. * Idle task. */
  1257. if( xIsCurRunning == pdFALSE )
  1258. {
  1259. prvDeleteTCB( pxTCB );
  1260. }
  1261. /* For SMP, we need to take the kernel lock here as we are about to
  1262. * access kernel data structures. */
  1263. taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
  1264. {
  1265. /* Force a reschedule if it is the currently running task that has just
  1266. * been deleted. */
  1267. if( xSchedulerRunning != pdFALSE )
  1268. {
  1269. if( xSelfDelete == pdTRUE )
  1270. {
  1271. configASSERT( taskIS_SCHEDULER_SUSPENDED() == pdFALSE );
  1272. portYIELD_WITHIN_API();
  1273. }
  1274. else
  1275. {
  1276. mtCOVERAGE_TEST_MARKER();
  1277. }
  1278. }
  1279. }
  1280. /* Release the previously taken kernel lock. */
  1281. taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
  1282. }
  1283. #endif /* INCLUDE_vTaskDelete */
  1284. /*-----------------------------------------------------------*/
  1285. #if ( INCLUDE_xTaskDelayUntil == 1 )
  1286. BaseType_t xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
  1287. const TickType_t xTimeIncrement )
  1288. {
  1289. TickType_t xTimeToWake;
  1290. BaseType_t xAlreadyYielded, xShouldDelay = pdFALSE;
  1291. configASSERT( pxPreviousWakeTime );
  1292. configASSERT( ( xTimeIncrement > 0U ) );
  1293. configASSERT( taskIS_SCHEDULER_SUSPENDED() == pdFALSE );
  1294. prvENTER_CRITICAL_OR_SUSPEND_ALL( &xKernelLock );
  1295. {
  1296. /* Minor optimisation. The tick count cannot change in this
  1297. * block. */
  1298. const TickType_t xConstTickCount = xTickCount;
  1299. /* Generate the tick time at which the task wants to wake. */
  1300. xTimeToWake = *pxPreviousWakeTime + xTimeIncrement;
  1301. if( xConstTickCount < *pxPreviousWakeTime )
  1302. {
  1303. /* The tick count has overflowed since this function was
  1304. * lasted called. In this case the only time we should ever
  1305. * actually delay is if the wake time has also overflowed,
  1306. * and the wake time is greater than the tick time. When this
  1307. * is the case it is as if neither time had overflowed. */
  1308. if( ( xTimeToWake < *pxPreviousWakeTime ) && ( xTimeToWake > xConstTickCount ) )
  1309. {
  1310. xShouldDelay = pdTRUE;
  1311. }
  1312. else
  1313. {
  1314. mtCOVERAGE_TEST_MARKER();
  1315. }
  1316. }
  1317. else
  1318. {
  1319. /* The tick time has not overflowed. In this case we will
  1320. * delay if either the wake time has overflowed, and/or the
  1321. * tick time is less than the wake time. */
  1322. if( ( xTimeToWake < *pxPreviousWakeTime ) || ( xTimeToWake > xConstTickCount ) )
  1323. {
  1324. xShouldDelay = pdTRUE;
  1325. }
  1326. else
  1327. {
  1328. mtCOVERAGE_TEST_MARKER();
  1329. }
  1330. }
  1331. /* Update the wake time ready for the next call. */
  1332. *pxPreviousWakeTime = xTimeToWake;
  1333. if( xShouldDelay != pdFALSE )
  1334. {
  1335. traceTASK_DELAY_UNTIL( xTimeToWake );
  1336. /* prvAddCurrentTaskToDelayedList() needs the block time, not
  1337. * the time to wake, so subtract the current tick count. */
  1338. prvAddCurrentTaskToDelayedList( xTimeToWake - xConstTickCount, pdFALSE );
  1339. }
  1340. else
  1341. {
  1342. mtCOVERAGE_TEST_MARKER();
  1343. }
  1344. }
  1345. xAlreadyYielded = prvEXIT_CRITICAL_OR_RESUME_ALL( &xKernelLock );
  1346. /* Force a reschedule if xTaskResumeAll has not already done so, we may
  1347. * have put ourselves to sleep. */
  1348. if( xAlreadyYielded == pdFALSE )
  1349. {
  1350. portYIELD_WITHIN_API();
  1351. }
  1352. else
  1353. {
  1354. mtCOVERAGE_TEST_MARKER();
  1355. }
  1356. return xShouldDelay;
  1357. }
  1358. #endif /* INCLUDE_xTaskDelayUntil */
  1359. /*-----------------------------------------------------------*/
  1360. #if ( INCLUDE_vTaskDelay == 1 )
  1361. void vTaskDelay( const TickType_t xTicksToDelay )
  1362. {
  1363. BaseType_t xAlreadyYielded = pdFALSE;
  1364. /* A delay time of zero just forces a reschedule. */
  1365. if( xTicksToDelay > ( TickType_t ) 0U )
  1366. {
  1367. configASSERT( taskIS_SCHEDULER_SUSPENDED() == pdFALSE );
  1368. prvENTER_CRITICAL_OR_SUSPEND_ALL( &xKernelLock );
  1369. {
  1370. traceTASK_DELAY();
  1371. /* A task that is removed from the event list while the
  1372. * scheduler is suspended will not get placed in the ready
  1373. * list or removed from the blocked list until the scheduler
  1374. * is resumed.
  1375. *
  1376. * This task cannot be in an event list as it is the currently
  1377. * executing task. */
  1378. prvAddCurrentTaskToDelayedList( xTicksToDelay, pdFALSE );
  1379. }
  1380. xAlreadyYielded = prvEXIT_CRITICAL_OR_RESUME_ALL( &xKernelLock );
  1381. }
  1382. else
  1383. {
  1384. mtCOVERAGE_TEST_MARKER();
  1385. }
  1386. /* Force a reschedule if xTaskResumeAll has not already done so, we may
  1387. * have put ourselves to sleep. */
  1388. if( xAlreadyYielded == pdFALSE )
  1389. {
  1390. portYIELD_WITHIN_API();
  1391. }
  1392. else
  1393. {
  1394. mtCOVERAGE_TEST_MARKER();
  1395. }
  1396. }
  1397. #endif /* INCLUDE_vTaskDelay */
  1398. /*-----------------------------------------------------------*/
  1399. #if ( ( INCLUDE_eTaskGetState == 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_xTaskAbortDelay == 1 ) )
  1400. eTaskState eTaskGetState( TaskHandle_t xTask )
  1401. {
  1402. eTaskState eReturn;
  1403. List_t const * pxStateList;
  1404. List_t const * pxDelayedList;
  1405. List_t const * pxOverflowedDelayedList;
  1406. const TCB_t * const pxTCB = xTask;
  1407. configASSERT( pxTCB );
  1408. taskENTER_CRITICAL( &xKernelLock );
  1409. {
  1410. if( taskIS_CURRENTLY_RUNNING( pxTCB ) == pdTRUE )
  1411. {
  1412. /* The task calling this function is querying its own state. */
  1413. eReturn = eRunning;
  1414. }
  1415. else
  1416. {
  1417. pxStateList = listLIST_ITEM_CONTAINER( &( pxTCB->xStateListItem ) );
  1418. pxDelayedList = pxDelayedTaskList;
  1419. pxOverflowedDelayedList = pxOverflowDelayedTaskList;
  1420. if( ( pxStateList == pxDelayedList ) || ( pxStateList == pxOverflowedDelayedList ) )
  1421. {
  1422. /* The task being queried is referenced from one of the Blocked
  1423. * lists. */
  1424. eReturn = eBlocked;
  1425. }
  1426. #if ( INCLUDE_vTaskSuspend == 1 )
  1427. else if( pxStateList == &xSuspendedTaskList )
  1428. {
  1429. /* The task being queried is referenced from the suspended
  1430. * list. Is it genuinely suspended or is it blocked
  1431. * indefinitely? */
  1432. if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL )
  1433. {
  1434. #if ( configUSE_TASK_NOTIFICATIONS == 1 )
  1435. {
  1436. BaseType_t x;
  1437. /* The task does not appear on the event list item of
  1438. * and of the RTOS objects, but could still be in the
  1439. * blocked state if it is waiting on its notification
  1440. * rather than waiting on an object. If not, is
  1441. * suspended. */
  1442. eReturn = eSuspended;
  1443. for( x = 0; x < configTASK_NOTIFICATION_ARRAY_ENTRIES; x++ )
  1444. {
  1445. if( pxTCB->ucNotifyState[ x ] == taskWAITING_NOTIFICATION )
  1446. {
  1447. eReturn = eBlocked;
  1448. break;
  1449. }
  1450. }
  1451. }
  1452. #else /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
  1453. {
  1454. eReturn = eSuspended;
  1455. }
  1456. #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
  1457. }
  1458. else
  1459. {
  1460. eReturn = eBlocked;
  1461. }
  1462. }
  1463. #endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
  1464. #if ( INCLUDE_vTaskDelete == 1 )
  1465. else if( ( pxStateList == &xTasksWaitingTermination ) || ( pxStateList == NULL ) )
  1466. {
  1467. /* The task being queried is referenced from the deleted
  1468. * tasks list, or it is not referenced from any lists at
  1469. * all. */
  1470. eReturn = eDeleted;
  1471. }
  1472. #endif
  1473. else /*lint !e525 Negative indentation is intended to make use of pre-processor clearer. */
  1474. {
  1475. /* If the task is not in any other state, it must be in the
  1476. * Ready (including pending ready) state. */
  1477. eReturn = eReady;
  1478. }
  1479. }
  1480. }
  1481. taskEXIT_CRITICAL( &xKernelLock );
  1482. return eReturn;
  1483. } /*lint !e818 xTask cannot be a pointer to const because it is a typedef. */
  1484. #endif /* INCLUDE_eTaskGetState */
  1485. /*-----------------------------------------------------------*/
  1486. #if ( INCLUDE_uxTaskPriorityGet == 1 )
  1487. UBaseType_t uxTaskPriorityGet( const TaskHandle_t xTask )
  1488. {
  1489. TCB_t const * pxTCB;
  1490. UBaseType_t uxReturn;
  1491. taskENTER_CRITICAL( &xKernelLock );
  1492. {
  1493. /* If null is passed in here then it is the priority of the task
  1494. * that called uxTaskPriorityGet() that is being queried. */
  1495. pxTCB = prvGetTCBFromHandle( xTask );
  1496. uxReturn = pxTCB->uxPriority;
  1497. }
  1498. taskEXIT_CRITICAL( &xKernelLock );
  1499. return uxReturn;
  1500. }
  1501. #endif /* INCLUDE_uxTaskPriorityGet */
  1502. /*-----------------------------------------------------------*/
  1503. #if ( INCLUDE_uxTaskPriorityGet == 1 )
  1504. UBaseType_t uxTaskPriorityGetFromISR( const TaskHandle_t xTask )
  1505. {
  1506. TCB_t const * pxTCB;
  1507. UBaseType_t uxReturn, uxSavedInterruptState;
  1508. /* RTOS ports that support interrupt nesting have the concept of a
  1509. * maximum system call (or maximum API call) interrupt priority.
  1510. * Interrupts that are above the maximum system call priority are keep
  1511. * permanently enabled, even when the RTOS kernel is in a critical section,
  1512. * but cannot make any calls to FreeRTOS API functions. If configASSERT()
  1513. * is defined in FreeRTOSConfig.h then
  1514. * portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
  1515. * failure if a FreeRTOS API function is called from an interrupt that has
  1516. * been assigned a priority above the configured maximum system call
  1517. * priority. Only FreeRTOS functions that end in FromISR can be called
  1518. * from interrupts that have been assigned a priority at or (logically)
  1519. * below the maximum system call interrupt priority. FreeRTOS maintains a
  1520. * separate interrupt safe API to ensure interrupt entry is as fast and as
  1521. * simple as possible. More information (albeit Cortex-M specific) is
  1522. * provided on the following link:
  1523. * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
  1524. portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
  1525. prvENTER_CRITICAL_OR_MASK_ISR( &xKernelLock, uxSavedInterruptState );
  1526. {
  1527. /* If null is passed in here then it is the priority of the calling
  1528. * task that is being queried. */
  1529. pxTCB = prvGetTCBFromHandle( xTask );
  1530. uxReturn = pxTCB->uxPriority;
  1531. }
  1532. prvEXIT_CRITICAL_OR_UNMASK_ISR( &xKernelLock, uxSavedInterruptState );
  1533. return uxReturn;
  1534. }
  1535. #endif /* INCLUDE_uxTaskPriorityGet */
  1536. /*-----------------------------------------------------------*/
  1537. #if ( INCLUDE_vTaskPrioritySet == 1 )
  1538. void vTaskPrioritySet( TaskHandle_t xTask,
  1539. UBaseType_t uxNewPriority )
  1540. {
  1541. TCB_t * pxTCB;
  1542. UBaseType_t uxCurrentBasePriority, uxPriorityUsedOnEntry;
  1543. BaseType_t xYieldRequired = pdFALSE;
  1544. configASSERT( uxNewPriority < configMAX_PRIORITIES );
  1545. /* Ensure the new priority is valid. */
  1546. if( uxNewPriority >= ( UBaseType_t ) configMAX_PRIORITIES )
  1547. {
  1548. uxNewPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U;
  1549. }
  1550. else
  1551. {
  1552. mtCOVERAGE_TEST_MARKER();
  1553. }
  1554. taskENTER_CRITICAL( &xKernelLock );
  1555. {
  1556. /* Get current core ID as we can no longer be preempted. */
  1557. const BaseType_t xCurCoreID = portGET_CORE_ID();
  1558. /* If null is passed in here then it is the priority of the calling
  1559. * task that is being changed. */
  1560. pxTCB = prvGetTCBFromHandle( xTask );
  1561. traceTASK_PRIORITY_SET( pxTCB, uxNewPriority );
  1562. #if ( configUSE_MUTEXES == 1 )
  1563. {
  1564. uxCurrentBasePriority = pxTCB->uxBasePriority;
  1565. }
  1566. #else
  1567. {
  1568. uxCurrentBasePriority = pxTCB->uxPriority;
  1569. }
  1570. #endif
  1571. if( uxCurrentBasePriority != uxNewPriority )
  1572. {
  1573. /* The priority change may have readied a task of higher
  1574. * priority than the calling task. */
  1575. if( uxNewPriority > uxCurrentBasePriority )
  1576. {
  1577. if( taskIS_CURRENTLY_RUNNING( pxTCB ) == pdFALSE )
  1578. {
  1579. /* The priority of a task other than the currently
  1580. * running task is being raised. Is the priority being
  1581. * raised above that of the running task? */
  1582. if( taskIS_YIELD_REQUIRED_USING_PRIORITY( uxNewPriority, pxTCB->xCoreID, portGET_CORE_ID(), pdTRUE ) == pdTRUE )
  1583. {
  1584. xYieldRequired = pdTRUE;
  1585. }
  1586. else
  1587. {
  1588. mtCOVERAGE_TEST_MARKER();
  1589. }
  1590. }
  1591. else
  1592. {
  1593. /* The priority of the running task is being raised,
  1594. * but the running task must already be the highest
  1595. * priority task able to run so no yield is required. */
  1596. }
  1597. }
  1598. else if( taskIS_CURRENTLY_RUNNING_ON_CORE( pxTCB, xCurCoreID ) == pdTRUE )
  1599. {
  1600. /* Lowering the priority of task currently running on the
  1601. * current core means there may now be another task of
  1602. * higher priority that is ready to execute. */
  1603. xYieldRequired = pdTRUE;
  1604. }
  1605. #if ( configNUMBER_OF_CORES > 1 )
  1606. else if( taskIS_CURRENTLY_RUNNING_ON_CORE( pxTCB, !xCurCoreID ) == pdTRUE )
  1607. {
  1608. /* Lowering the priority of task currently running on the
  1609. * other core also means there may now be another task of
  1610. * higher priority that is ready to execute. */
  1611. taskYIELD_CORE( !xCurCoreID );
  1612. }
  1613. #endif /* configNUMBER_OF_CORES > 1 */
  1614. else
  1615. {
  1616. /* Setting the priority of any other task down does not
  1617. * require a yield as the running task must be above the
  1618. * new priority of the task being modified. */
  1619. }
  1620. /* Remember the ready list the task might be referenced from
  1621. * before its uxPriority member is changed so the
  1622. * taskRESET_READY_PRIORITY() macro can function correctly. */
  1623. uxPriorityUsedOnEntry = pxTCB->uxPriority;
  1624. #if ( configUSE_MUTEXES == 1 )
  1625. {
  1626. /* Only change the priority being used if the task is not
  1627. * currently using an inherited priority. */
  1628. if( pxTCB->uxBasePriority == pxTCB->uxPriority )
  1629. {
  1630. pxTCB->uxPriority = uxNewPriority;
  1631. }
  1632. else
  1633. {
  1634. mtCOVERAGE_TEST_MARKER();
  1635. }
  1636. /* The base priority gets set whatever. */
  1637. pxTCB->uxBasePriority = uxNewPriority;
  1638. }
  1639. #else /* if ( configUSE_MUTEXES == 1 ) */
  1640. {
  1641. pxTCB->uxPriority = uxNewPriority;
  1642. }
  1643. #endif /* if ( configUSE_MUTEXES == 1 ) */
  1644. /* Only reset the event list item value if the value is not
  1645. * being used for anything else. */
  1646. if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL )
  1647. {
  1648. listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxNewPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
  1649. }
  1650. else
  1651. {
  1652. mtCOVERAGE_TEST_MARKER();
  1653. }
  1654. /* If the task is in the blocked or suspended list we need do
  1655. * nothing more than change its priority variable. However, if
  1656. * the task is in a ready list it needs to be removed and placed
  1657. * in the list appropriate to its new priority. */
  1658. if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE )
  1659. {
  1660. /* The task is currently in its ready list - remove before
  1661. * adding it to its new ready list. As we are in a critical
  1662. * section we can do this even if the scheduler is suspended. */
  1663. if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
  1664. {
  1665. /* It is known that the task is in its ready list so
  1666. * there is no need to check again and the port level
  1667. * reset macro can be called directly. */
  1668. portRESET_READY_PRIORITY( uxPriorityUsedOnEntry, uxTopReadyPriority );
  1669. }
  1670. else
  1671. {
  1672. mtCOVERAGE_TEST_MARKER();
  1673. }
  1674. prvAddTaskToReadyList( pxTCB );
  1675. }
  1676. else
  1677. {
  1678. mtCOVERAGE_TEST_MARKER();
  1679. }
  1680. if( xYieldRequired != pdFALSE )
  1681. {
  1682. taskYIELD_IF_USING_PREEMPTION();
  1683. }
  1684. else
  1685. {
  1686. mtCOVERAGE_TEST_MARKER();
  1687. }
  1688. /* Remove compiler warning about unused variables when the port
  1689. * optimised task selection is not being used. */
  1690. ( void ) uxPriorityUsedOnEntry;
  1691. }
  1692. }
  1693. taskEXIT_CRITICAL( &xKernelLock );
  1694. }
  1695. #endif /* INCLUDE_vTaskPrioritySet */
  1696. /*-----------------------------------------------------------*/
  1697. #if ( INCLUDE_vTaskSuspend == 1 )
  1698. void vTaskSuspend( TaskHandle_t xTaskToSuspend )
  1699. {
  1700. TCB_t * pxTCB;
  1701. taskENTER_CRITICAL( &xKernelLock );
  1702. {
  1703. /* Get current core ID as we can no longer be preempted. */
  1704. const BaseType_t xCurCoreID = portGET_CORE_ID();
  1705. /* If null is passed in here then it is the running task that is
  1706. * being suspended. */
  1707. pxTCB = prvGetTCBFromHandle( xTaskToSuspend );
  1708. traceTASK_SUSPEND( pxTCB );
  1709. /* Remove task from the ready/delayed list and place in the
  1710. * suspended list. */
  1711. if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
  1712. {
  1713. taskRESET_READY_PRIORITY( pxTCB->uxPriority );
  1714. }
  1715. else
  1716. {
  1717. mtCOVERAGE_TEST_MARKER();
  1718. }
  1719. /* Is the task waiting on an event also? */
  1720. if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
  1721. {
  1722. ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
  1723. }
  1724. else
  1725. {
  1726. mtCOVERAGE_TEST_MARKER();
  1727. }
  1728. vListInsertEnd( &xSuspendedTaskList, &( pxTCB->xStateListItem ) );
  1729. #if ( configUSE_TASK_NOTIFICATIONS == 1 )
  1730. {
  1731. BaseType_t x;
  1732. for( x = 0; x < configTASK_NOTIFICATION_ARRAY_ENTRIES; x++ )
  1733. {
  1734. if( pxTCB->ucNotifyState[ x ] == taskWAITING_NOTIFICATION )
  1735. {
  1736. /* The task was blocked to wait for a notification, but is
  1737. * now suspended, so no notification was received. */
  1738. pxTCB->ucNotifyState[ x ] = taskNOT_WAITING_NOTIFICATION;
  1739. }
  1740. }
  1741. }
  1742. #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
  1743. if( xSchedulerRunning != pdFALSE )
  1744. {
  1745. /* Reset the next expected unblock time in case it referred to the
  1746. * task that is now in the Suspended state. */
  1747. prvResetNextTaskUnblockTime();
  1748. }
  1749. else
  1750. {
  1751. mtCOVERAGE_TEST_MARKER();
  1752. }
  1753. if( taskIS_CURRENTLY_RUNNING_ON_CORE( pxTCB, xCurCoreID ) == pdTRUE )
  1754. {
  1755. if( xSchedulerRunning != pdFALSE )
  1756. {
  1757. /* The current task has just been suspended. */
  1758. configASSERT( uxSchedulerSuspended[ xCurCoreID ] == 0 );
  1759. portYIELD_WITHIN_API();
  1760. }
  1761. else
  1762. {
  1763. /* The scheduler is not running, but the task that was pointed
  1764. * to by pxCurrentTCBs has just been suspended and pxCurrentTCBs
  1765. * must be adjusted to point to a different task. */
  1766. if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == uxCurrentNumberOfTasks ) /*lint !e931 Right has no side effect, just volatile. */
  1767. {
  1768. /* No other tasks are ready, so set pxCurrentTCBs back to
  1769. * NULL so when the next task is created pxCurrentTCBs will
  1770. * be set to point to it no matter what its relative priority
  1771. * is. */
  1772. pxCurrentTCBs[ xCurCoreID ] = NULL;
  1773. }
  1774. else
  1775. {
  1776. vTaskSwitchContext();
  1777. }
  1778. }
  1779. }
  1780. #if ( configNUMBER_OF_CORES > 1 )
  1781. else if( taskIS_CURRENTLY_RUNNING_ON_CORE( pxTCB, !xCurCoreID ) == pdTRUE )
  1782. {
  1783. /* The other core's current task has just been suspended */
  1784. if( xSchedulerRunning != pdFALSE )
  1785. {
  1786. taskYIELD_CORE( !xCurCoreID );
  1787. }
  1788. else
  1789. {
  1790. /* The scheduler is not running, but the task that was
  1791. * pointed to by pxCurrentTCBs[ otherCore ] has just been
  1792. * suspended. We simply set the
  1793. * pxCurrentTCBs[ otherCore ] to NULL for now.
  1794. *
  1795. * Todo: Update vTaskSwitchContext() to be able to run
  1796. * on behalf of the other core. */
  1797. pxCurrentTCBs[ !xCurCoreID ] = NULL;
  1798. }
  1799. }
  1800. #endif /* configNUMBER_OF_CORES > 1 */
  1801. else
  1802. {
  1803. mtCOVERAGE_TEST_MARKER();
  1804. }
  1805. }
  1806. taskEXIT_CRITICAL( &xKernelLock );
  1807. }
  1808. #endif /* INCLUDE_vTaskSuspend */
  1809. /*-----------------------------------------------------------*/
  1810. #if ( INCLUDE_vTaskSuspend == 1 )
  1811. static BaseType_t prvTaskIsTaskSuspended( const TaskHandle_t xTask )
  1812. {
  1813. BaseType_t xReturn = pdFALSE;
  1814. const TCB_t * const pxTCB = xTask;
  1815. /* Accesses xPendingReadyList so must be called from a critical
  1816. * section. */
  1817. /* It does not make sense to check if the calling task is suspended. */
  1818. configASSERT( xTask );
  1819. /* Is the task being resumed actually in the suspended list? */
  1820. if( listIS_CONTAINED_WITHIN( &xSuspendedTaskList, &( pxTCB->xStateListItem ) ) != pdFALSE )
  1821. {
  1822. /* Has the task already been resumed from within an ISR? */
  1823. #if ( configNUMBER_OF_CORES > 1 )
  1824. if( ( listIS_CONTAINED_WITHIN( &xPendingReadyList[ 0 ], &( pxTCB->xEventListItem ) ) == pdFALSE ) &&
  1825. ( listIS_CONTAINED_WITHIN( &xPendingReadyList[ 1 ], &( pxTCB->xEventListItem ) ) == pdFALSE ) )
  1826. #else
  1827. if( listIS_CONTAINED_WITHIN( &xPendingReadyList[ 0 ], &( pxTCB->xEventListItem ) ) == pdFALSE )
  1828. #endif /* configNUMBER_OF_CORES > 1 */
  1829. {
  1830. /* Is it in the suspended list because it is in the Suspended
  1831. * state, or because is is blocked with no timeout? */
  1832. if( listIS_CONTAINED_WITHIN( NULL, &( pxTCB->xEventListItem ) ) != pdFALSE ) /*lint !e961. The cast is only redundant when NULL is used. */
  1833. {
  1834. xReturn = pdTRUE;
  1835. }
  1836. else
  1837. {
  1838. mtCOVERAGE_TEST_MARKER();
  1839. }
  1840. }
  1841. else
  1842. {
  1843. mtCOVERAGE_TEST_MARKER();
  1844. }
  1845. }
  1846. else
  1847. {
  1848. mtCOVERAGE_TEST_MARKER();
  1849. }
  1850. return xReturn;
  1851. } /*lint !e818 xTask cannot be a pointer to const because it is a typedef. */
  1852. #endif /* INCLUDE_vTaskSuspend */
  1853. /*-----------------------------------------------------------*/
  1854. #if ( INCLUDE_vTaskSuspend == 1 )
  1855. void vTaskResume( TaskHandle_t xTaskToResume )
  1856. {
  1857. TCB_t * const pxTCB = xTaskToResume;
  1858. /* It does not make sense to resume the calling task. */
  1859. configASSERT( xTaskToResume );
  1860. taskENTER_CRITICAL( &xKernelLock );
  1861. {
  1862. /* The parameter cannot be NULL as it is impossible to resume the
  1863. * currently executing task. */
  1864. if( ( taskIS_CURRENTLY_RUNNING( pxTCB ) == pdFALSE ) && ( pxTCB != NULL ) )
  1865. {
  1866. if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE )
  1867. {
  1868. traceTASK_RESUME( pxTCB );
  1869. /* The ready list can be accessed even if the scheduler is
  1870. * suspended because this is inside a critical section. */
  1871. ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
  1872. prvAddTaskToReadyList( pxTCB );
  1873. /* A higher priority task may have just been resumed. */
  1874. if( taskIS_YIELD_REQUIRED( pxTCB, portGET_CORE_ID(), pdTRUE ) == pdTRUE )
  1875. {
  1876. /* This yield may not cause the task just resumed to run,
  1877. * but will leave the lists in the correct state for the
  1878. * next yield. */
  1879. taskYIELD_IF_USING_PREEMPTION();
  1880. }
  1881. else
  1882. {
  1883. mtCOVERAGE_TEST_MARKER();
  1884. }
  1885. }
  1886. else
  1887. {
  1888. mtCOVERAGE_TEST_MARKER();
  1889. }
  1890. }
  1891. else
  1892. {
  1893. mtCOVERAGE_TEST_MARKER();
  1894. }
  1895. }
  1896. taskEXIT_CRITICAL( &xKernelLock );
  1897. }
  1898. #endif /* INCLUDE_vTaskSuspend */
  1899. /*-----------------------------------------------------------*/
  1900. #if ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) )
  1901. BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume )
  1902. {
  1903. BaseType_t xYieldRequired = pdFALSE;
  1904. TCB_t * const pxTCB = xTaskToResume;
  1905. UBaseType_t uxSavedInterruptStatus;
  1906. configASSERT( xTaskToResume );
  1907. /* RTOS ports that support interrupt nesting have the concept of a
  1908. * maximum system call (or maximum API call) interrupt priority.
  1909. * Interrupts that are above the maximum system call priority are keep
  1910. * permanently enabled, even when the RTOS kernel is in a critical section,
  1911. * but cannot make any calls to FreeRTOS API functions. If configASSERT()
  1912. * is defined in FreeRTOSConfig.h then
  1913. * portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
  1914. * failure if a FreeRTOS API function is called from an interrupt that has
  1915. * been assigned a priority above the configured maximum system call
  1916. * priority. Only FreeRTOS functions that end in FromISR can be called
  1917. * from interrupts that have been assigned a priority at or (logically)
  1918. * below the maximum system call interrupt priority. FreeRTOS maintains a
  1919. * separate interrupt safe API to ensure interrupt entry is as fast and as
  1920. * simple as possible. More information (albeit Cortex-M specific) is
  1921. * provided on the following link:
  1922. * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
  1923. portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
  1924. prvENTER_CRITICAL_OR_MASK_ISR( &xKernelLock, uxSavedInterruptStatus );
  1925. {
  1926. if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE )
  1927. {
  1928. /* Get current core ID as we can no longer be preempted. */
  1929. const BaseType_t xCurCoreID = portGET_CORE_ID();
  1930. traceTASK_RESUME_FROM_ISR( pxTCB );
  1931. /* Check the ready lists can be accessed. */
  1932. if( taskCAN_BE_SCHEDULED( pxTCB ) == pdTRUE )
  1933. {
  1934. /* Ready lists can be accessed so move the task from the
  1935. * suspended list to the ready list directly. */
  1936. if( taskIS_YIELD_REQUIRED( pxTCB, xCurCoreID, pdTRUE ) == pdTRUE )
  1937. {
  1938. xYieldRequired = pdTRUE;
  1939. /* Mark that a yield is pending in case the user is not
  1940. * using the return value to initiate a context switch
  1941. * from the ISR using portYIELD_FROM_ISR. */
  1942. xYieldPending[ xCurCoreID ] = pdTRUE;
  1943. }
  1944. else
  1945. {
  1946. mtCOVERAGE_TEST_MARKER();
  1947. }
  1948. ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
  1949. prvAddTaskToReadyList( pxTCB );
  1950. }
  1951. else
  1952. {
  1953. /* The delayed or ready lists cannot be accessed so the task
  1954. * is held in the pending ready list until the scheduler is
  1955. * unsuspended. */
  1956. vListInsertEnd( &( xPendingReadyList[ xCurCoreID ] ), &( pxTCB->xEventListItem ) );
  1957. }
  1958. }
  1959. else
  1960. {
  1961. mtCOVERAGE_TEST_MARKER();
  1962. }
  1963. }
  1964. prvEXIT_CRITICAL_OR_UNMASK_ISR( &xKernelLock, uxSavedInterruptStatus );
  1965. return xYieldRequired;
  1966. }
  1967. #endif /* ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) ) */
  1968. /*-----------------------------------------------------------*/
  1969. void vTaskStartScheduler( void )
  1970. {
  1971. BaseType_t xReturn;
  1972. UBaseType_t x;
  1973. /* Create idle tasks that are pinned to each core */
  1974. for( x = 0; x < configNUMBER_OF_CORES; x++ )
  1975. {
  1976. /* Add the idle task at the lowest priority. */
  1977. #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
  1978. {
  1979. StaticTask_t * pxIdleTaskTCBBuffer = NULL;
  1980. StackType_t * pxIdleTaskStackBuffer = NULL;
  1981. uint32_t ulIdleTaskStackSize;
  1982. /* The Idle task is created using user provided RAM - obtain the
  1983. * address of the RAM then create the idle task. */
  1984. vApplicationGetIdleTaskMemory( &pxIdleTaskTCBBuffer, &pxIdleTaskStackBuffer, &ulIdleTaskStackSize );
  1985. xIdleTaskHandle[ x ] = xTaskCreateStaticPinnedToCore( prvIdleTask,
  1986. configIDLE_TASK_NAME,
  1987. ulIdleTaskStackSize,
  1988. ( void * ) NULL, /*lint !e961. The cast is not redundant for all compilers. */
  1989. portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */
  1990. pxIdleTaskStackBuffer,
  1991. pxIdleTaskTCBBuffer, /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */
  1992. x );
  1993. if( xIdleTaskHandle[ x ] != NULL )
  1994. {
  1995. xReturn = pdPASS;
  1996. }
  1997. else
  1998. {
  1999. xReturn = pdFAIL;
  2000. break;
  2001. }
  2002. }
  2003. #else /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) */
  2004. {
  2005. /* The Idle task is being created using dynamically allocated RAM. */
  2006. xReturn = xTaskCreatePinnedToCore( prvIdleTask,
  2007. configIDLE_TASK_NAME,
  2008. configMINIMAL_STACK_SIZE,
  2009. ( void * ) NULL,
  2010. portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */
  2011. &xIdleTaskHandle[ xCoreID ], /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */
  2012. xCoreID );
  2013. if( xReturn == pdFAIL )
  2014. {
  2015. break;
  2016. }
  2017. #endif /* configSUPPORT_STATIC_ALLOCATION */
  2018. }
  2019. #if ( configUSE_TIMERS == 1 )
  2020. {
  2021. if( xReturn == pdPASS )
  2022. {
  2023. xReturn = xTimerCreateTimerTask();
  2024. }
  2025. else
  2026. {
  2027. mtCOVERAGE_TEST_MARKER();
  2028. }
  2029. }
  2030. #endif /* configUSE_TIMERS */
  2031. if( xReturn == pdPASS )
  2032. {
  2033. /* freertos_tasks_c_additions_init() should only be called if the user
  2034. * definable macro FREERTOS_TASKS_C_ADDITIONS_INIT() is defined, as that is
  2035. * the only macro called by the function. */
  2036. #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT
  2037. {
  2038. freertos_tasks_c_additions_init();
  2039. }
  2040. #endif
  2041. /* Interrupts are turned off here, to ensure a tick does not occur
  2042. * before or during the call to xPortStartScheduler(). The stacks of
  2043. * the created tasks contain a status word with interrupts switched on
  2044. * so interrupts will automatically get re-enabled when the first task
  2045. * starts to run. */
  2046. portDISABLE_INTERRUPTS();
  2047. /* For SMP, we need to take the kernel lock here as we are about to
  2048. * access kernel data structures. */
  2049. taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
  2050. {
  2051. #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) )
  2052. {
  2053. /* Switch C-Runtime's TLS Block to point to the TLS
  2054. * block specific to the task that will run first. */
  2055. configSET_TLS_BLOCK( pxCurrentTCBs[ portGET_CORE_ID() ]->xTLSBlock );
  2056. }
  2057. #endif
  2058. xNextTaskUnblockTime = portMAX_DELAY;
  2059. xSchedulerRunning = pdTRUE;
  2060. xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT;
  2061. }
  2062. /* Release the previously taken kernel lock. */
  2063. taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
  2064. /* If configGENERATE_RUN_TIME_STATS is defined then the following
  2065. * macro must be defined to configure the timer/counter used to generate
  2066. * the run time counter time base. NOTE: If configGENERATE_RUN_TIME_STATS
  2067. * is set to 0 and the following line fails to build then ensure you do not
  2068. * have portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() defined in your
  2069. * FreeRTOSConfig.h file. */
  2070. portCONFIGURE_TIMER_FOR_RUN_TIME_STATS();
  2071. traceTASK_SWITCHED_IN();
  2072. /* Setting up the timer tick is hardware specific and thus in the
  2073. * portable interface. */
  2074. xPortStartScheduler();
  2075. /* In most cases, xPortStartScheduler() will not return. If it
  2076. * returns pdTRUE then there was not enough heap memory available
  2077. * to create either the Idle or the Timer task. If it returned
  2078. * pdFALSE, then the application called xTaskEndScheduler().
  2079. * Most ports don't implement xTaskEndScheduler() as there is
  2080. * nothing to return to. */
  2081. }
  2082. else
  2083. {
  2084. /* This line will only be reached if the kernel could not be started,
  2085. * because there was not enough FreeRTOS heap to create the idle task
  2086. * or the timer task. */
  2087. configASSERT( xReturn != errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY );
  2088. }
  2089. /* Prevent compiler warnings if INCLUDE_xTaskGetIdleTaskHandle is set to 0,
  2090. * meaning xIdleTaskHandle is not used anywhere else. */
  2091. ( void ) xIdleTaskHandle[ 0 ];
  2092. /* OpenOCD makes use of uxTopUsedPriority for thread debugging. Prevent uxTopUsedPriority
  2093. * from getting optimized out as it is no longer used by the kernel. */
  2094. ( void ) uxTopUsedPriority;
  2095. }
  2096. /*-----------------------------------------------------------*/
  2097. void vTaskEndScheduler( void )
  2098. {
  2099. /* Stop the scheduler interrupts and call the portable scheduler end
  2100. * routine so the original ISRs can be restored if necessary. The port
  2101. * layer must ensure interrupts enable bit is left in the correct state. */
  2102. portDISABLE_INTERRUPTS();
  2103. /* For SMP, we need to take the kernel lock here as we are about to access
  2104. * kernel data structures. */
  2105. taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
  2106. {
  2107. xSchedulerRunning = pdFALSE;
  2108. }
  2109. /* Release the previously taken kernel lock. */
  2110. taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
  2111. vPortEndScheduler();
  2112. }
  2113. /*----------------------------------------------------------*/
  2114. void vTaskSuspendAll( void )
  2115. {
  2116. /* A critical section is not required as the variable is of type
  2117. * BaseType_t. Please read Richard Barry's reply in the following link to a
  2118. * post in the FreeRTOS support forum before reporting this as a bug! -
  2119. * https://goo.gl/wu4acr */
  2120. /* portSOFTWARE_BARRIER() is only implemented for emulated/simulated ports that
  2121. * do not otherwise exhibit real time behaviour. */
  2122. portSOFTWARE_BARRIER();
  2123. /* The scheduler is suspended if uxSchedulerSuspended is non-zero. An increment
  2124. * is used to allow calls to vTaskSuspendAll() to nest. */
  2125. ++uxSchedulerSuspended[ portGET_CORE_ID() ];
  2126. /* Enforces ordering for ports and optimised compilers that may otherwise place
  2127. * the above increment elsewhere. */
  2128. portMEMORY_BARRIER();
  2129. }
  2130. /*----------------------------------------------------------*/
  2131. #if ( configUSE_TICKLESS_IDLE != 0 )
  2132. static TickType_t prvGetExpectedIdleTime( void )
  2133. {
  2134. TickType_t xReturn;
  2135. UBaseType_t uxHigherPriorityReadyTasks = pdFALSE;
  2136. /* uxHigherPriorityReadyTasks takes care of the case where
  2137. * configUSE_PREEMPTION is 0, so there may be tasks above the idle priority
  2138. * task that are in the Ready state, even though the idle task is
  2139. * running. */
  2140. #if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 )
  2141. {
  2142. if( uxTopReadyPriority > tskIDLE_PRIORITY )
  2143. {
  2144. uxHigherPriorityReadyTasks = pdTRUE;
  2145. }
  2146. }
  2147. #else
  2148. {
  2149. const UBaseType_t uxLeastSignificantBit = ( UBaseType_t ) 0x01;
  2150. /* When port optimised task selection is used the uxTopReadyPriority
  2151. * variable is used as a bit map. If bits other than the least
  2152. * significant bit are set then there are tasks that have a priority
  2153. * above the idle priority that are in the Ready state. This takes
  2154. * care of the case where the co-operative scheduler is in use. */
  2155. if( uxTopReadyPriority > uxLeastSignificantBit )
  2156. {
  2157. uxHigherPriorityReadyTasks = pdTRUE;
  2158. }
  2159. }
  2160. #endif /* if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 ) */
  2161. if( pxCurrentTCBs[ portGET_CORE_ID() ]->uxPriority > tskIDLE_PRIORITY )
  2162. {
  2163. xReturn = 0;
  2164. }
  2165. else if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > configNUMBER_OF_CORES )
  2166. {
  2167. /* There are other idle priority tasks in the ready state. If
  2168. * time slicing is used then the very next tick interrupt must be
  2169. * processed. */
  2170. xReturn = 0;
  2171. }
  2172. else if( uxHigherPriorityReadyTasks != pdFALSE )
  2173. {
  2174. /* There are tasks in the Ready state that have a priority above the
  2175. * idle priority. This path can only be reached if
  2176. * configUSE_PREEMPTION is 0. */
  2177. xReturn = 0;
  2178. }
  2179. else
  2180. {
  2181. xReturn = xNextTaskUnblockTime - xTickCount;
  2182. }
  2183. return xReturn;
  2184. }
  2185. #endif /* configUSE_TICKLESS_IDLE */
  2186. /*----------------------------------------------------------*/
  2187. BaseType_t xTaskResumeAll( void )
  2188. {
  2189. TCB_t * pxTCB = NULL;
  2190. BaseType_t xAlreadyYielded = pdFALSE;
  2191. /* If uxSchedulerSuspended is zero then this function does not match a
  2192. * previous call to vTaskSuspendAll(). */
  2193. configASSERT( taskIS_SCHEDULER_SUSPENDED() == pdTRUE );
  2194. /* It is possible that an ISR caused a task to be removed from an event
  2195. * list while the scheduler was suspended. If this was the case then the
  2196. * removed task will have been added to the xPendingReadyList. Once the
  2197. * scheduler has been resumed it is safe to move all the pending ready
  2198. * tasks from this list into their appropriate ready list. */
  2199. taskENTER_CRITICAL( &xKernelLock );
  2200. {
  2201. /* Get current core ID as we can no longer be preempted. */
  2202. const BaseType_t xCurCoreID = portGET_CORE_ID();
  2203. --uxSchedulerSuspended[ xCurCoreID ];
  2204. if( uxSchedulerSuspended[ xCurCoreID ] == ( UBaseType_t ) pdFALSE )
  2205. {
  2206. if( uxCurrentNumberOfTasks > ( UBaseType_t ) 0U )
  2207. {
  2208. /* Move any readied tasks from the pending list into the
  2209. * appropriate ready list. */
  2210. while( listLIST_IS_EMPTY( &xPendingReadyList[ xCurCoreID ] ) == pdFALSE )
  2211. {
  2212. /* Note: Add volatile cast (IDF-8361) */
  2213. pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( volatile List_t * ) ( &xPendingReadyList[ xCurCoreID ] ) ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
  2214. listREMOVE_ITEM( &( pxTCB->xEventListItem ) );
  2215. portMEMORY_BARRIER();
  2216. listREMOVE_ITEM( &( pxTCB->xStateListItem ) );
  2217. prvAddTaskToReadyList( pxTCB );
  2218. /* If the moved task has a priority higher than or equal to
  2219. * the current task then a yield must be performed. */
  2220. if( taskIS_YIELD_REQUIRED( pxTCB, xCurCoreID, pdTRUE ) == pdTRUE )
  2221. {
  2222. xYieldPending[ xCurCoreID ] = pdTRUE;
  2223. }
  2224. else
  2225. {
  2226. mtCOVERAGE_TEST_MARKER();
  2227. }
  2228. }
  2229. if( pxTCB != NULL )
  2230. {
  2231. /* A task was unblocked while the scheduler was suspended,
  2232. * which may have prevented the next unblock time from being
  2233. * re-calculated, in which case re-calculate it now. Mainly
  2234. * important for low power tickless implementations, where
  2235. * this can prevent an unnecessary exit from low power
  2236. * state. */
  2237. prvResetNextTaskUnblockTime();
  2238. }
  2239. #if ( configNUMBER_OF_CORES > 1 )
  2240. /* Core 0 is solely responsible for managing tick count, thus it
  2241. * must be the only core to unwind the pended ticks */
  2242. if( xCurCoreID == 0 )
  2243. #endif
  2244. /* If any ticks occurred while the scheduler was suspended then
  2245. * they should be processed now. This ensures the tick count does
  2246. * not slip, and that any delayed tasks are resumed at the correct
  2247. * time. */
  2248. {
  2249. TickType_t xPendedCounts = xPendedTicks; /* Non-volatile copy. */
  2250. if( xPendedCounts > ( TickType_t ) 0U )
  2251. {
  2252. do
  2253. {
  2254. if( xTaskIncrementTick() != pdFALSE )
  2255. {
  2256. xYieldPending[ xCurCoreID ] = pdTRUE;
  2257. }
  2258. else
  2259. {
  2260. mtCOVERAGE_TEST_MARKER();
  2261. }
  2262. --xPendedCounts;
  2263. } while( xPendedCounts > ( TickType_t ) 0U );
  2264. xPendedTicks = 0;
  2265. }
  2266. else
  2267. {
  2268. mtCOVERAGE_TEST_MARKER();
  2269. }
  2270. }
  2271. if( xYieldPending[ xCurCoreID ] != pdFALSE )
  2272. {
  2273. #if ( configUSE_PREEMPTION != 0 )
  2274. {
  2275. xAlreadyYielded = pdTRUE;
  2276. }
  2277. #endif
  2278. taskYIELD_IF_USING_PREEMPTION();
  2279. }
  2280. else
  2281. {
  2282. mtCOVERAGE_TEST_MARKER();
  2283. }
  2284. }
  2285. }
  2286. else
  2287. {
  2288. mtCOVERAGE_TEST_MARKER();
  2289. }
  2290. }
  2291. taskEXIT_CRITICAL( &xKernelLock );
  2292. return xAlreadyYielded;
  2293. }
  2294. /*-----------------------------------------------------------*/
  2295. TickType_t xTaskGetTickCount( void )
  2296. {
  2297. TickType_t xTicks;
  2298. /* Critical section required if running on a 16 bit processor. */
  2299. portTICK_TYPE_ENTER_CRITICAL();
  2300. {
  2301. xTicks = xTickCount;
  2302. }
  2303. portTICK_TYPE_EXIT_CRITICAL();
  2304. return xTicks;
  2305. }
  2306. /*-----------------------------------------------------------*/
  2307. TickType_t xTaskGetTickCountFromISR( void )
  2308. {
  2309. TickType_t xReturn;
  2310. UBaseType_t uxSavedInterruptStatus;
  2311. /* RTOS ports that support interrupt nesting have the concept of a maximum
  2312. * system call (or maximum API call) interrupt priority. Interrupts that are
  2313. * above the maximum system call priority are kept permanently enabled, even
  2314. * when the RTOS kernel is in a critical section, but cannot make any calls to
  2315. * FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
  2316. * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
  2317. * failure if a FreeRTOS API function is called from an interrupt that has been
  2318. * assigned a priority above the configured maximum system call priority.
  2319. * Only FreeRTOS functions that end in FromISR can be called from interrupts
  2320. * that have been assigned a priority at or (logically) below the maximum
  2321. * system call interrupt priority. FreeRTOS maintains a separate interrupt
  2322. * safe API to ensure interrupt entry is as fast and as simple as possible.
  2323. * More information (albeit Cortex-M specific) is provided on the following
  2324. * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
  2325. portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
  2326. /* For SMP, we need to take the kernel lock here as we are about to access
  2327. * kernel data structures. */
  2328. taskENTER_CRITICAL_ISR_SMP_ONLY( &xKernelLock );
  2329. {
  2330. uxSavedInterruptStatus = portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR();
  2331. {
  2332. xReturn = xTickCount;
  2333. }
  2334. portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
  2335. }
  2336. /* Release the previously taken kernel lock. */
  2337. taskEXIT_CRITICAL_ISR_SMP_ONLY( &xKernelLock );
  2338. return xReturn;
  2339. }
  2340. /*-----------------------------------------------------------*/
  2341. UBaseType_t uxTaskGetNumberOfTasks( void )
  2342. {
  2343. /* A critical section is not required because the variables are of type
  2344. * BaseType_t. */
  2345. return uxCurrentNumberOfTasks;
  2346. }
  2347. /*-----------------------------------------------------------*/
  2348. char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
  2349. {
  2350. TCB_t * pxTCB;
  2351. /* If null is passed in here then the name of the calling task is being
  2352. * queried. */
  2353. pxTCB = prvGetTCBFromHandle( xTaskToQuery );
  2354. configASSERT( pxTCB );
  2355. return &( pxTCB->pcTaskName[ 0 ] );
  2356. }
  2357. /*-----------------------------------------------------------*/
  2358. #if ( INCLUDE_xTaskGetHandle == 1 )
  2359. static TCB_t * prvSearchForNameWithinSingleList( List_t * pxList,
  2360. const char pcNameToQuery[] )
  2361. {
  2362. TCB_t * pxNextTCB;
  2363. TCB_t * pxFirstTCB;
  2364. TCB_t * pxReturn = NULL;
  2365. UBaseType_t x;
  2366. char cNextChar;
  2367. BaseType_t xBreakLoop;
  2368. /* This function is called with the scheduler suspended. */
  2369. if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 )
  2370. {
  2371. listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
  2372. do
  2373. {
  2374. listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
  2375. /* Check each character in the name looking for a match or
  2376. * mismatch. */
  2377. xBreakLoop = pdFALSE;
  2378. for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ )
  2379. {
  2380. cNextChar = pxNextTCB->pcTaskName[ x ];
  2381. if( cNextChar != pcNameToQuery[ x ] )
  2382. {
  2383. /* Characters didn't match. */
  2384. xBreakLoop = pdTRUE;
  2385. }
  2386. else if( cNextChar == ( char ) 0x00 )
  2387. {
  2388. /* Both strings terminated, a match must have been
  2389. * found. */
  2390. pxReturn = pxNextTCB;
  2391. xBreakLoop = pdTRUE;
  2392. }
  2393. else
  2394. {
  2395. mtCOVERAGE_TEST_MARKER();
  2396. }
  2397. if( xBreakLoop != pdFALSE )
  2398. {
  2399. break;
  2400. }
  2401. }
  2402. if( pxReturn != NULL )
  2403. {
  2404. /* The handle has been found. */
  2405. break;
  2406. }
  2407. } while( pxNextTCB != pxFirstTCB );
  2408. }
  2409. else
  2410. {
  2411. mtCOVERAGE_TEST_MARKER();
  2412. }
  2413. return pxReturn;
  2414. }
  2415. #endif /* INCLUDE_xTaskGetHandle */
  2416. /*-----------------------------------------------------------*/
  2417. #if ( INCLUDE_xTaskGetHandle == 1 )
  2418. TaskHandle_t xTaskGetHandle( const char * pcNameToQuery ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
  2419. {
  2420. UBaseType_t uxQueue = configMAX_PRIORITIES;
  2421. TCB_t * pxTCB;
  2422. /* Task names will be truncated to configMAX_TASK_NAME_LEN - 1 bytes. */
  2423. configASSERT( strlen( pcNameToQuery ) < configMAX_TASK_NAME_LEN );
  2424. prvENTER_CRITICAL_OR_SUSPEND_ALL( &xKernelLock );
  2425. {
  2426. /* Search the ready lists. */
  2427. do
  2428. {
  2429. uxQueue--;
  2430. pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) &( pxReadyTasksLists[ uxQueue ] ), pcNameToQuery );
  2431. if( pxTCB != NULL )
  2432. {
  2433. /* Found the handle. */
  2434. break;
  2435. }
  2436. } while( uxQueue > ( UBaseType_t ) tskIDLE_PRIORITY ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
  2437. /* Search the delayed lists. */
  2438. if( pxTCB == NULL )
  2439. {
  2440. pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) pxDelayedTaskList, pcNameToQuery );
  2441. }
  2442. if( pxTCB == NULL )
  2443. {
  2444. pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) pxOverflowDelayedTaskList, pcNameToQuery );
  2445. }
  2446. #if ( INCLUDE_vTaskSuspend == 1 )
  2447. {
  2448. if( pxTCB == NULL )
  2449. {
  2450. /* Search the suspended list. */
  2451. pxTCB = prvSearchForNameWithinSingleList( &xSuspendedTaskList, pcNameToQuery );
  2452. }
  2453. }
  2454. #endif
  2455. #if ( INCLUDE_vTaskDelete == 1 )
  2456. {
  2457. if( pxTCB == NULL )
  2458. {
  2459. /* Search the deleted list. */
  2460. pxTCB = prvSearchForNameWithinSingleList( &xTasksWaitingTermination, pcNameToQuery );
  2461. }
  2462. }
  2463. #endif
  2464. }
  2465. ( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &xKernelLock );
  2466. return pxTCB;
  2467. }
  2468. #endif /* INCLUDE_xTaskGetHandle */
  2469. /*-----------------------------------------------------------*/
  2470. #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
  2471. BaseType_t xTaskGetStaticBuffers( TaskHandle_t xTask,
  2472. StackType_t ** ppuxStackBuffer,
  2473. StaticTask_t ** ppxTaskBuffer )
  2474. {
  2475. BaseType_t xReturn;
  2476. TCB_t * pxTCB;
  2477. configASSERT( ppuxStackBuffer != NULL );
  2478. configASSERT( ppxTaskBuffer != NULL );
  2479. pxTCB = prvGetTCBFromHandle( xTask );
  2480. #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE == 1 )
  2481. {
  2482. if( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_AND_TCB )
  2483. {
  2484. *ppuxStackBuffer = pxTCB->pxStack;
  2485. *ppxTaskBuffer = ( StaticTask_t * ) pxTCB;
  2486. xReturn = pdTRUE;
  2487. }
  2488. else if( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_ONLY )
  2489. {
  2490. *ppuxStackBuffer = pxTCB->pxStack;
  2491. *ppxTaskBuffer = NULL;
  2492. xReturn = pdTRUE;
  2493. }
  2494. else
  2495. {
  2496. xReturn = pdFALSE;
  2497. }
  2498. }
  2499. #else /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE == 1 */
  2500. {
  2501. *ppuxStackBuffer = pxTCB->pxStack;
  2502. *ppxTaskBuffer = ( StaticTask_t * ) pxTCB;
  2503. xReturn = pdTRUE;
  2504. }
  2505. #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE == 1 */
  2506. return xReturn;
  2507. }
  2508. #endif /* configSUPPORT_STATIC_ALLOCATION */
  2509. /*-----------------------------------------------------------*/
  2510. #if ( configUSE_TRACE_FACILITY == 1 )
  2511. UBaseType_t uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
  2512. const UBaseType_t uxArraySize,
  2513. configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime )
  2514. {
  2515. UBaseType_t uxTask = 0, uxQueue = configMAX_PRIORITIES;
  2516. prvENTER_CRITICAL_OR_SUSPEND_ALL( &xKernelLock );
  2517. {
  2518. /* Is there a space in the array for each task in the system? */
  2519. if( uxArraySize >= uxCurrentNumberOfTasks )
  2520. {
  2521. /* Fill in an TaskStatus_t structure with information on each
  2522. * task in the Ready state. */
  2523. do
  2524. {
  2525. uxQueue--;
  2526. uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &( pxReadyTasksLists[ uxQueue ] ), eReady );
  2527. } while( uxQueue > ( UBaseType_t ) tskIDLE_PRIORITY ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
  2528. /* Fill in an TaskStatus_t structure with information on each
  2529. * task in the Blocked state. */
  2530. uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), ( List_t * ) pxDelayedTaskList, eBlocked );
  2531. uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), ( List_t * ) pxOverflowDelayedTaskList, eBlocked );
  2532. #if ( INCLUDE_vTaskDelete == 1 )
  2533. {
  2534. /* Fill in an TaskStatus_t structure with information on
  2535. * each task that has been deleted but not yet cleaned up. */
  2536. uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &xTasksWaitingTermination, eDeleted );
  2537. }
  2538. #endif
  2539. #if ( INCLUDE_vTaskSuspend == 1 )
  2540. {
  2541. /* Fill in an TaskStatus_t structure with information on
  2542. * each task in the Suspended state. */
  2543. uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &xSuspendedTaskList, eSuspended );
  2544. }
  2545. #endif
  2546. #if ( configGENERATE_RUN_TIME_STATS == 1 )
  2547. {
  2548. if( pulTotalRunTime != NULL )
  2549. {
  2550. #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE
  2551. portALT_GET_RUN_TIME_COUNTER_VALUE( ( *pulTotalRunTime ) );
  2552. #else
  2553. *pulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE();
  2554. #endif
  2555. }
  2556. }
  2557. #else /* if ( configGENERATE_RUN_TIME_STATS == 1 ) */
  2558. {
  2559. if( pulTotalRunTime != NULL )
  2560. {
  2561. *pulTotalRunTime = 0;
  2562. }
  2563. }
  2564. #endif /* if ( configGENERATE_RUN_TIME_STATS == 1 ) */
  2565. }
  2566. else
  2567. {
  2568. mtCOVERAGE_TEST_MARKER();
  2569. }
  2570. }
  2571. ( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &xKernelLock );
  2572. return uxTask;
  2573. }
  2574. #endif /* configUSE_TRACE_FACILITY */
  2575. /*----------------------------------------------------------*/
  2576. #if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
  2577. TaskHandle_t xTaskGetIdleTaskHandle( void )
  2578. {
  2579. return xTaskGetIdleTaskHandleForCore( portGET_CORE_ID() );
  2580. }
  2581. #endif /* INCLUDE_xTaskGetIdleTaskHandle */
  2582. /*----------------------------------------------------------*/
  2583. /* This conditional compilation should use inequality to 0, not equality to 1.
  2584. * This is to ensure vTaskStepTick() is available when user defined low power mode
  2585. * implementations require configUSE_TICKLESS_IDLE to be set to a value other than
  2586. * 1. */
  2587. #if ( configUSE_TICKLESS_IDLE != 0 )
  2588. void vTaskStepTick( TickType_t xTicksToJump )
  2589. {
  2590. /* SINGLE-CORE MODIFICATION: Expanded critical section so that SMP
  2591. * accesses xTickCount inside a critical section. */
  2592. taskENTER_CRITICAL( &xKernelLock );
  2593. {
  2594. /* Correct the tick count value after a period during which the tick
  2595. * was suppressed. Note this does *not* call the tick hook function for
  2596. * each stepped tick. */
  2597. configASSERT( ( xTickCount + xTicksToJump ) <= xNextTaskUnblockTime );
  2598. if( ( xTickCount + xTicksToJump ) == xNextTaskUnblockTime )
  2599. {
  2600. /* Arrange for xTickCount to reach xNextTaskUnblockTime in
  2601. * xTaskIncrementTick() when the scheduler resumes. This ensures
  2602. * that any delayed tasks are resumed at the correct time. */
  2603. #if ( configNUMBER_OF_CORES > 1 )
  2604. {
  2605. /* In SMP, the entire tickless idle handling block
  2606. * is replaced with a critical section, taking the kernel lock. */
  2607. configASSERT( taskIS_SCHEDULER_SUSPENDED() == pdFALSE );
  2608. }
  2609. #else /* configNUMBER_OF_CORES > 1 */
  2610. {
  2611. /* In single-core, the entire tickless idle handling block
  2612. * is done with scheduler suspended. */
  2613. configASSERT( taskIS_SCHEDULER_SUSPENDED() == pdTRUE );
  2614. }
  2615. #endif /* configNUMBER_OF_CORES > 1 */
  2616. configASSERT( xTicksToJump != ( TickType_t ) 0 );
  2617. xPendedTicks++;
  2618. xTicksToJump--;
  2619. }
  2620. else
  2621. {
  2622. mtCOVERAGE_TEST_MARKER();
  2623. }
  2624. xTickCount += xTicksToJump;
  2625. traceINCREASE_TICK_COUNT( xTicksToJump );
  2626. }
  2627. /* SINGLE-CORE MODIFICATION: Expanded critical section */
  2628. taskEXIT_CRITICAL( &xKernelLock );
  2629. }
  2630. #endif /* configUSE_TICKLESS_IDLE */
  2631. /*----------------------------------------------------------*/
  2632. BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
  2633. {
  2634. BaseType_t xYieldOccurred;
  2635. /* Must not be called with the scheduler suspended as the implementation
  2636. * relies on xPendedTicks being wound down to 0 in xTaskResumeAll(). */
  2637. configASSERT( taskIS_SCHEDULER_SUSPENDED() == pdFALSE );
  2638. /* Use xPendedTicks to mimic xTicksToCatchUp number of ticks occurring when
  2639. * the scheduler is suspended so the ticks are executed in xTaskResumeAll(). */
  2640. vTaskSuspendAll();
  2641. /* Prevent the tick interrupt modifying xPendedTicks simultaneously. */
  2642. taskENTER_CRITICAL( &xKernelLock );
  2643. {
  2644. xPendedTicks += xTicksToCatchUp;
  2645. }
  2646. taskEXIT_CRITICAL( &xKernelLock );
  2647. xYieldOccurred = xTaskResumeAll();
  2648. return xYieldOccurred;
  2649. }
  2650. /*----------------------------------------------------------*/
  2651. #if ( INCLUDE_xTaskAbortDelay == 1 )
  2652. BaseType_t xTaskAbortDelay( TaskHandle_t xTask )
  2653. {
  2654. TCB_t * pxTCB = xTask;
  2655. BaseType_t xReturn;
  2656. configASSERT( pxTCB );
  2657. prvENTER_CRITICAL_OR_SUSPEND_ALL( &xKernelLock );
  2658. {
  2659. /* A task can only be prematurely removed from the Blocked state if
  2660. * it is actually in the Blocked state. */
  2661. if( eTaskGetState( xTask ) == eBlocked )
  2662. {
  2663. xReturn = pdPASS;
  2664. /* Remove the reference to the task from the blocked list. An
  2665. * interrupt won't touch the xStateListItem because the
  2666. * scheduler is suspended. */
  2667. ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
  2668. /* Is the task waiting on an event also? If so remove it from
  2669. * the event list too. Interrupts can touch the event list item,
  2670. * even though the scheduler is suspended, so a critical section
  2671. * is used. */
  2672. taskENTER_CRITICAL_SC_ONLY( &xKernelLock );
  2673. {
  2674. if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
  2675. {
  2676. ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
  2677. /* This lets the task know it was forcibly removed from the
  2678. * blocked state so it should not re-evaluate its block time and
  2679. * then block again. */
  2680. pxTCB->ucDelayAborted = pdTRUE;
  2681. }
  2682. else
  2683. {
  2684. mtCOVERAGE_TEST_MARKER();
  2685. }
  2686. }
  2687. taskEXIT_CRITICAL_SC_ONLY( &xKernelLock );
  2688. /* Place the unblocked task into the appropriate ready list. */
  2689. prvAddTaskToReadyList( pxTCB );
  2690. /* A task being unblocked cannot cause an immediate context
  2691. * switch if preemption is turned off. */
  2692. #if ( configUSE_PREEMPTION == 1 )
  2693. {
  2694. /* Get current core ID as we can no longer be preempted. */
  2695. const BaseType_t xCurCoreID = portGET_CORE_ID();
  2696. /* Preemption is on, but a context switch should only be
  2697. * performed if the unblocked task has a priority that is
  2698. * higher than the currently executing task. */
  2699. if( taskIS_YIELD_REQUIRED( pxTCB, xCurCoreID, pdFALSE ) == pdTRUE )
  2700. {
  2701. /* Pend the yield to be performed when the scheduler
  2702. * is unsuspended. */
  2703. xYieldPending[ xCurCoreID ] = pdTRUE;
  2704. }
  2705. else
  2706. {
  2707. mtCOVERAGE_TEST_MARKER();
  2708. }
  2709. }
  2710. #endif /* configUSE_PREEMPTION */
  2711. }
  2712. else
  2713. {
  2714. xReturn = pdFAIL;
  2715. }
  2716. }
  2717. ( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &xKernelLock );
  2718. return xReturn;
  2719. }
  2720. #endif /* INCLUDE_xTaskAbortDelay */
  2721. /*----------------------------------------------------------*/
  2722. BaseType_t xTaskIncrementTick( void )
  2723. {
  2724. #if ( configNUMBER_OF_CORES > 1 )
  2725. /* Only Core 0 should ever call this function. */
  2726. configASSERT( portGET_CORE_ID() == 0 );
  2727. #endif /* configNUMBER_OF_CORES > 1 */
  2728. TCB_t * pxTCB;
  2729. TickType_t xItemValue;
  2730. BaseType_t xSwitchRequired = pdFALSE;
  2731. #if ( configUSE_TICK_HOOK == 1 )
  2732. BaseType_t xCallTickHook;
  2733. #endif /* configUSE_TICK_HOOK == 1 */
  2734. /* Called by the portable layer each time a tick interrupt occurs.
  2735. * Increments the tick then checks to see if the new tick value will cause any
  2736. * tasks to be unblocked. */
  2737. traceTASK_INCREMENT_TICK( xTickCount );
  2738. /* For SMP, we need to take the kernel lock here as we are about to access
  2739. * kernel data structures (unlike single core which calls this function with
  2740. * interrupts disabled). */
  2741. taskENTER_CRITICAL_SAFE_SMP_ONLY( &xKernelLock );
  2742. {
  2743. if( uxSchedulerSuspended[ 0 ] == ( UBaseType_t ) pdFALSE )
  2744. {
  2745. /* Minor optimisation. The tick count cannot change in this
  2746. * block. */
  2747. const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1;
  2748. /* Increment the RTOS tick, switching the delayed and overflowed
  2749. * delayed lists if it wraps to 0. */
  2750. xTickCount = xConstTickCount;
  2751. if( xConstTickCount == ( TickType_t ) 0U ) /*lint !e774 'if' does not always evaluate to false as it is looking for an overflow. */
  2752. {
  2753. taskSWITCH_DELAYED_LISTS();
  2754. }
  2755. else
  2756. {
  2757. mtCOVERAGE_TEST_MARKER();
  2758. }
  2759. /* See if this tick has made a timeout expire. Tasks are stored in
  2760. * the queue in the order of their wake time - meaning once one task
  2761. * has been found whose block time has not expired there is no need to
  2762. * look any further down the list. */
  2763. if( xConstTickCount >= xNextTaskUnblockTime )
  2764. {
  2765. for( ; ; )
  2766. {
  2767. if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE )
  2768. {
  2769. /* The delayed list is empty. Set xNextTaskUnblockTime
  2770. * to the maximum possible value so it is extremely
  2771. * unlikely that the
  2772. * if( xTickCount >= xNextTaskUnblockTime ) test will pass
  2773. * next time through. */
  2774. xNextTaskUnblockTime = portMAX_DELAY; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
  2775. break;
  2776. }
  2777. else
  2778. {
  2779. /* The delayed list is not empty, get the value of the
  2780. * item at the head of the delayed list. This is the time
  2781. * at which the task at the head of the delayed list must
  2782. * be removed from the Blocked state. */
  2783. pxTCB = listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
  2784. xItemValue = listGET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ) );
  2785. if( xConstTickCount < xItemValue )
  2786. {
  2787. /* It is not time to unblock this item yet, but the
  2788. * item value is the time at which the task at the head
  2789. * of the blocked list must be removed from the Blocked
  2790. * state - so record the item value in
  2791. * xNextTaskUnblockTime. */
  2792. xNextTaskUnblockTime = xItemValue;
  2793. break; /*lint !e9011 Code structure here is deemed easier to understand with multiple breaks. */
  2794. }
  2795. else
  2796. {
  2797. mtCOVERAGE_TEST_MARKER();
  2798. }
  2799. /* It is time to remove the item from the Blocked state. */
  2800. listREMOVE_ITEM( &( pxTCB->xStateListItem ) );
  2801. /* Is the task waiting on an event also? If so remove
  2802. * it from the event list. */
  2803. if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
  2804. {
  2805. listREMOVE_ITEM( &( pxTCB->xEventListItem ) );
  2806. }
  2807. else
  2808. {
  2809. mtCOVERAGE_TEST_MARKER();
  2810. }
  2811. /* Place the unblocked task into the appropriate ready
  2812. * list. */
  2813. prvAddTaskToReadyList( pxTCB );
  2814. /* A task being unblocked cannot cause an immediate
  2815. * context switch if preemption is turned off. */
  2816. #if ( configUSE_PREEMPTION == 1 )
  2817. {
  2818. /* Preemption is on, but a context switch should
  2819. * only be performed if the unblocked task has a
  2820. * priority that is equal to or higher than the
  2821. * currently executing task.
  2822. *
  2823. * For SMP, since this function is only run on core
  2824. * 0, we only need to context switch if the unblocked
  2825. * task can run on core 0 and has a higher priority
  2826. * than the current task. */
  2827. /* ">" changed to ">="" due to IDF incompatibility (IDF-8428) */
  2828. if( ( taskIS_AFFINITY_COMPATIBLE( 0, pxTCB->xCoreID ) == pdTRUE ) && ( pxTCB->uxPriority >= pxCurrentTCBs[ 0 ]->uxPriority ) )
  2829. {
  2830. xSwitchRequired = pdTRUE;
  2831. }
  2832. else
  2833. {
  2834. mtCOVERAGE_TEST_MARKER();
  2835. }
  2836. }
  2837. #endif /* configUSE_PREEMPTION */
  2838. }
  2839. }
  2840. }
  2841. /* Tasks of equal priority to the currently running task will share
  2842. * processing time (time slice) if preemption is on, and the application
  2843. * writer has not explicitly turned time slicing off. */
  2844. #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) )
  2845. {
  2846. if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCBs[ 0 ]->uxPriority ] ) ) > ( UBaseType_t ) 1 )
  2847. {
  2848. xSwitchRequired = pdTRUE;
  2849. }
  2850. else
  2851. {
  2852. mtCOVERAGE_TEST_MARKER();
  2853. }
  2854. }
  2855. #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */
  2856. #if ( configUSE_TICK_HOOK == 1 )
  2857. {
  2858. /* Guard against the tick hook being called when the pended tick
  2859. * count is being unwound (when the scheduler is being unlocked). */
  2860. if( xPendedTicksTemp == ( TickType_t ) 0 )
  2861. {
  2862. xCallTickHook = pdTRUE;
  2863. }
  2864. else
  2865. {
  2866. xCallTickHook = pdFALSE;
  2867. }
  2868. }
  2869. #endif /* configUSE_TICK_HOOK */
  2870. #if ( configUSE_PREEMPTION == 1 )
  2871. {
  2872. if( xYieldPending[ 0 ] != pdFALSE )
  2873. {
  2874. xSwitchRequired = pdTRUE;
  2875. }
  2876. else
  2877. {
  2878. mtCOVERAGE_TEST_MARKER();
  2879. }
  2880. }
  2881. #endif /* configUSE_PREEMPTION */
  2882. }
  2883. else
  2884. {
  2885. ++xPendedTicks;
  2886. /* The tick hook gets called at regular intervals, even if the
  2887. * scheduler is locked. */
  2888. #if ( configUSE_TICK_HOOK == 1 )
  2889. {
  2890. xCallTickHook = pdTRUE;
  2891. }
  2892. #endif
  2893. }
  2894. }
  2895. /* Release the previously taken kernel lock as we have finished accessing
  2896. * the kernel data structures. */
  2897. taskEXIT_CRITICAL_SAFE_SMP_ONLY( &xKernelLock );
  2898. #if ( configUSE_TICK_HOOK == 1 )
  2899. {
  2900. if( xCallTickHook == pdTRUE )
  2901. {
  2902. vApplicationTickHook();
  2903. }
  2904. }
  2905. #endif
  2906. return xSwitchRequired;
  2907. }
  2908. /*-----------------------------------------------------------*/
  2909. #if ( configUSE_APPLICATION_TASK_TAG == 1 )
  2910. void vTaskSetApplicationTaskTag( TaskHandle_t xTask,
  2911. TaskHookFunction_t pxHookFunction )
  2912. {
  2913. TCB_t * xTCB;
  2914. /* If xTask is NULL then it is the task hook of the calling task that is
  2915. * getting set. */
  2916. if( xTask == NULL )
  2917. {
  2918. xTCB = ( TCB_t * ) xTaskGetCurrentTaskHandle();
  2919. }
  2920. else
  2921. {
  2922. xTCB = xTask;
  2923. }
  2924. /* Save the hook function in the TCB. A critical section is required as
  2925. * the value can be accessed from an interrupt. */
  2926. taskENTER_CRITICAL( &xKernelLock );
  2927. {
  2928. xTCB->pxTaskTag = pxHookFunction;
  2929. }
  2930. taskEXIT_CRITICAL( &xKernelLock );
  2931. }
  2932. #endif /* configUSE_APPLICATION_TASK_TAG */
  2933. /*-----------------------------------------------------------*/
  2934. #if ( configUSE_APPLICATION_TASK_TAG == 1 )
  2935. TaskHookFunction_t xTaskGetApplicationTaskTag( TaskHandle_t xTask )
  2936. {
  2937. TCB_t * pxTCB;
  2938. TaskHookFunction_t xReturn;
  2939. /* If xTask is NULL then set the calling task's hook. */
  2940. pxTCB = prvGetTCBFromHandle( xTask );
  2941. /* Save the hook function in the TCB. A critical section is required as
  2942. * the value can be accessed from an interrupt. */
  2943. taskENTER_CRITICAL( &xKernelLock );
  2944. {
  2945. xReturn = pxTCB->pxTaskTag;
  2946. }
  2947. taskEXIT_CRITICAL( &xKernelLock );
  2948. return xReturn;
  2949. }
  2950. #endif /* configUSE_APPLICATION_TASK_TAG */
  2951. /*-----------------------------------------------------------*/
  2952. #if ( configUSE_APPLICATION_TASK_TAG == 1 )
  2953. TaskHookFunction_t xTaskGetApplicationTaskTagFromISR( TaskHandle_t xTask )
  2954. {
  2955. TCB_t * pxTCB;
  2956. TaskHookFunction_t xReturn;
  2957. UBaseType_t uxSavedInterruptStatus;
  2958. /* If xTask is NULL then set the calling task's hook. */
  2959. pxTCB = prvGetTCBFromHandle( xTask );
  2960. /* Save the hook function in the TCB. A critical section is required as
  2961. * the value can be accessed from an interrupt. */
  2962. prvENTER_CRITICAL_OR_MASK_ISR( &xKernelLock, uxSavedInterruptStatus );
  2963. {
  2964. xReturn = pxTCB->pxTaskTag;
  2965. }
  2966. prvEXIT_CRITICAL_OR_UNMASK_ISR( &xKernelLock, uxSavedInterruptStatus );
  2967. return xReturn;
  2968. }
  2969. #endif /* configUSE_APPLICATION_TASK_TAG */
  2970. /*-----------------------------------------------------------*/
  2971. #if ( configUSE_APPLICATION_TASK_TAG == 1 )
  2972. BaseType_t xTaskCallApplicationTaskHook( TaskHandle_t xTask,
  2973. void * pvParameter )
  2974. {
  2975. TCB_t * xTCB;
  2976. BaseType_t xReturn;
  2977. /* If xTask is NULL then we are calling our own task hook. */
  2978. if( xTask == NULL )
  2979. {
  2980. xTCB = xTaskGetCurrentTaskHandle();
  2981. }
  2982. else
  2983. {
  2984. xTCB = xTask;
  2985. }
  2986. if( xTCB->pxTaskTag != NULL )
  2987. {
  2988. xReturn = xTCB->pxTaskTag( pvParameter );
  2989. }
  2990. else
  2991. {
  2992. xReturn = pdFAIL;
  2993. }
  2994. return xReturn;
  2995. }
  2996. #endif /* configUSE_APPLICATION_TASK_TAG */
  2997. /*-----------------------------------------------------------*/
  2998. #if ( configNUMBER_OF_CORES > 1 )
  2999. static void prvSelectHighestPriorityTaskSMP( void )
  3000. {
  3001. /* This function is called from a critical section. So some optimizations are made */
  3002. BaseType_t uxCurPriority;
  3003. BaseType_t xTaskScheduled = pdFALSE;
  3004. BaseType_t xNewTopPrioritySet = pdFALSE;
  3005. BaseType_t xCurCoreID = portGET_CORE_ID();
  3006. /* Search for tasks, starting form the highest ready priority. If nothing is
  3007. * found, we eventually default to the IDLE tasks at priority 0 */
  3008. for( uxCurPriority = uxTopReadyPriority; uxCurPriority >= 0 && xTaskScheduled == pdFALSE; uxCurPriority-- )
  3009. {
  3010. /* Check if current priority has one or more ready tasks. Skip if none */
  3011. if( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxCurPriority ] ) ) )
  3012. {
  3013. continue;
  3014. }
  3015. /* Save a copy of highest priority that has a ready state task */
  3016. if( xNewTopPrioritySet == pdFALSE )
  3017. {
  3018. xNewTopPrioritySet = pdTRUE;
  3019. uxTopReadyPriority = uxCurPriority;
  3020. }
  3021. /* We now search this priority's ready task list for a runnable task.
  3022. * We always start searching from the head of the list, so we reset
  3023. * pxIndex to point to the tail so that we start walking the list from
  3024. * the first item */
  3025. pxReadyTasksLists[ uxCurPriority ].pxIndex = ( ListItem_t * ) &( pxReadyTasksLists[ uxCurPriority ].xListEnd );
  3026. /* Get the first item on the list */
  3027. TCB_t * pxTCBCur;
  3028. TCB_t * pxTCBFirst;
  3029. listGET_OWNER_OF_NEXT_ENTRY( pxTCBCur, &( pxReadyTasksLists[ uxCurPriority ] ) );
  3030. pxTCBFirst = pxTCBCur;
  3031. do
  3032. {
  3033. /* Check if the current task is currently being executed. However, if
  3034. * it's being executed by the current core, we can still schedule it.
  3035. * Todo: Each task can store a xTaskRunState, instead of needing to
  3036. * check each core */
  3037. UBaseType_t x;
  3038. for( x = 0; x < configNUMBER_OF_CORES; x++ )
  3039. {
  3040. if( x == xCurCoreID )
  3041. {
  3042. continue;
  3043. }
  3044. else if( pxCurrentTCBs[ x ] == pxTCBCur )
  3045. {
  3046. /* Current task is already being executed. Get the next task */
  3047. goto get_next_task;
  3048. }
  3049. }
  3050. /* Check if the current task has a compatible affinity */
  3051. if( taskIS_AFFINITY_COMPATIBLE( xCurCoreID, pxTCBCur->xCoreID ) == pdFALSE )
  3052. {
  3053. goto get_next_task;
  3054. }
  3055. /* The current task is runnable. Schedule it */
  3056. pxCurrentTCBs[ xCurCoreID ] = pxTCBCur;
  3057. xTaskScheduled = pdTRUE;
  3058. /* Move the current tasks list item to the back of the list in order
  3059. * to implement best effort round robin. To do this, we need to reset
  3060. * the pxIndex to point to the tail again. */
  3061. pxReadyTasksLists[ uxCurPriority ].pxIndex = ( ListItem_t * ) &( pxReadyTasksLists[ uxCurPriority ].xListEnd );
  3062. listREMOVE_ITEM( &( pxTCBCur->xStateListItem ) );
  3063. listINSERT_END( &( pxReadyTasksLists[ uxCurPriority ] ), &( pxTCBCur->xStateListItem ) );
  3064. break;
  3065. get_next_task:
  3066. /* The current task cannot be scheduled. Get the next task in the list */
  3067. listGET_OWNER_OF_NEXT_ENTRY( pxTCBCur, &( pxReadyTasksLists[ uxCurPriority ] ) );
  3068. } while( pxTCBCur != pxTCBFirst ); /* Check to see if we've walked the entire list */
  3069. }
  3070. configASSERT( xTaskScheduled == pdTRUE ); /* At this point, a task MUST have been scheduled */
  3071. }
  3072. #endif /* configNUMBER_OF_CORES > 1 */
  3073. /*-----------------------------------------------------------*/
  3074. void vTaskSwitchContext( void )
  3075. {
  3076. /* For SMP, we need to take the kernel lock here as we are about to access
  3077. * kernel data structures (unlike single core which calls this function with
  3078. * either interrupts disabled or when the scheduler hasn't started yet). */
  3079. taskENTER_CRITICAL_SAFE_SMP_ONLY( &xKernelLock );
  3080. {
  3081. /* Get current core ID as we can no longer be preempted. */
  3082. const BaseType_t xCurCoreID = portGET_CORE_ID();
  3083. if( uxSchedulerSuspended[ xCurCoreID ] != ( UBaseType_t ) pdFALSE )
  3084. {
  3085. /* The scheduler is currently suspended - do not allow a context
  3086. * switch. */
  3087. xYieldPending[ xCurCoreID ] = pdTRUE;
  3088. }
  3089. else
  3090. {
  3091. xYieldPending[ xCurCoreID ] = pdFALSE;
  3092. traceTASK_SWITCHED_OUT();
  3093. #if ( configGENERATE_RUN_TIME_STATS == 1 )
  3094. {
  3095. #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE
  3096. portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime );
  3097. #else
  3098. ulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE();
  3099. #endif
  3100. /* Add the amount of time the task has been running to the
  3101. * accumulated time so far. The time the task started running was
  3102. * stored in ulTaskSwitchedInTime. Note that there is no overflow
  3103. * protection here so count values are only valid until the timer
  3104. * overflows. The guard against negative values is to protect
  3105. * against suspect run time stat counter implementations - which
  3106. * are provided by the application, not the kernel. */
  3107. if( ulTotalRunTime > ulTaskSwitchedInTime[ xCurCoreID ] )
  3108. {
  3109. pxCurrentTCBs[ xCurCoreID ]->ulRunTimeCounter += ( ulTotalRunTime - ulTaskSwitchedInTime[ xCurCoreID ] );
  3110. }
  3111. else
  3112. {
  3113. mtCOVERAGE_TEST_MARKER();
  3114. }
  3115. ulTaskSwitchedInTime[ xCurCoreID ] = ulTotalRunTime;
  3116. }
  3117. #endif /* configGENERATE_RUN_TIME_STATS */
  3118. /* Check for stack overflow, if configured. */
  3119. taskCHECK_FOR_STACK_OVERFLOW( xCurCoreID );
  3120. /* Before the currently running task is switched out, save its errno. */
  3121. #if ( configUSE_POSIX_ERRNO == 1 )
  3122. {
  3123. pxCurrentTCBs[ xCurCoreID ]->iTaskErrno = FreeRTOS_errno;
  3124. }
  3125. #endif
  3126. /* Select a new task to run using either the generic C or port
  3127. * optimised asm code. */
  3128. taskSELECT_HIGHEST_PRIORITY_TASK(); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
  3129. traceTASK_SWITCHED_IN();
  3130. /* After the new task is switched in, update the global errno. */
  3131. #if ( configUSE_POSIX_ERRNO == 1 )
  3132. {
  3133. FreeRTOS_errno = pxCurrentTCBs[ xCurCoreID ]->iTaskErrno;
  3134. }
  3135. #endif
  3136. /* Wrap this call in a macro. IDF-8434 */
  3137. #if CONFIG_FREERTOS_WATCHPOINT_END_OF_STACK
  3138. {
  3139. vPortSetStackWatchpoint( pxCurrentTCBs[ xCurCoreID ]->pxStack );
  3140. }
  3141. #endif /* CONFIG_FREERTOS_WATCHPOINT_END_OF_STACK */
  3142. #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) )
  3143. {
  3144. /* Switch C-Runtime's TLS Block to point to the TLS
  3145. * Block specific to this task. */
  3146. configSET_TLS_BLOCK( pxCurrentTCBs[ xCurCoreID ]->xTLSBlock );
  3147. }
  3148. #endif
  3149. }
  3150. }
  3151. /* Release the previously taken kernel lock as we have finished accessing
  3152. * the kernel data structures. */
  3153. taskEXIT_CRITICAL_SAFE_SMP_ONLY( &xKernelLock );
  3154. }
  3155. /*-----------------------------------------------------------*/
  3156. void vTaskPlaceOnEventList( List_t * const pxEventList,
  3157. const TickType_t xTicksToWait )
  3158. {
  3159. configASSERT( pxEventList );
  3160. /* IN SINGLE-CORE THIS FUNCTION MUST BE CALLED WITH EITHER INTERRUPTS DISABLED
  3161. * OR THE SCHEDULER SUSPENDED AND THE QUEUE BEING ACCESSED LOCKED. IN SMP
  3162. * THIS FUNCTION MUST BE CALLED WITH THE QUEUE'S xQueueLock TAKEN. */
  3163. /* For SMP, we need to take the kernel lock here as we are about to access
  3164. * kernel data structures. */
  3165. taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
  3166. {
  3167. /* Place the event list item of the TCB in the appropriate event list.
  3168. * This is placed in the list in priority order so the highest priority task
  3169. * is the first to be woken by the event.
  3170. *
  3171. * Note: Lists are sorted in ascending order by ListItem_t.xItemValue.
  3172. * Normally, the xItemValue of a TCB's ListItem_t members is:
  3173. * xItemValue = ( configMAX_PRIORITIES - uxPriority )
  3174. * Therefore, the event list is sorted in descending priority order.
  3175. *
  3176. * The queue that contains the event list is locked, preventing
  3177. * simultaneous access from interrupts. */
  3178. vListInsert( pxEventList, &( pxCurrentTCBs[ portGET_CORE_ID() ]->xEventListItem ) );
  3179. prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
  3180. }
  3181. /* Release the previously taken kernel lock. */
  3182. taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
  3183. }
  3184. /*-----------------------------------------------------------*/
  3185. void vTaskPlaceOnUnorderedEventList( List_t * pxEventList,
  3186. const TickType_t xItemValue,
  3187. const TickType_t xTicksToWait )
  3188. {
  3189. /* Get current core ID as we can no longer be preempted. */
  3190. const BaseType_t xCurCoreID = portGET_CORE_ID();
  3191. configASSERT( pxEventList );
  3192. #if ( configNUMBER_OF_CORES > 1 )
  3193. {
  3194. /* IN SMP, THIS FUNCTION MUST BE CALLED WITH THE EVENT GROUP'S
  3195. * xEventGroupLock ALREADY TAKEN. */
  3196. }
  3197. #else /* configNUMBER_OF_CORES > 1 */
  3198. {
  3199. /* IN SINGLE-CORE, THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED.
  3200. * It is used by the event groups implementation. */
  3201. configASSERT( uxSchedulerSuspended[ xCurCoreID ] != 0 );
  3202. }
  3203. #endif /* configNUMBER_OF_CORES > 1 */
  3204. /* For SMP, we need to take the kernel lock here as we are about to access
  3205. * kernel data structures. */
  3206. taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
  3207. {
  3208. /* Store the item value in the event list item. It is safe to access the
  3209. * event list item here as interrupts won't access the event list item of a
  3210. * task that is not in the Blocked state. */
  3211. listSET_LIST_ITEM_VALUE( &( pxCurrentTCBs[ xCurCoreID ]->xEventListItem ), xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE );
  3212. /* Place the event list item of the TCB at the end of the appropriate event
  3213. * list. It is safe to access the event list here because it is part of an
  3214. * event group implementation - and interrupts don't access event groups
  3215. * directly (instead they access them indirectly by pending function calls to
  3216. * the task level). */
  3217. listINSERT_END( pxEventList, &( pxCurrentTCBs[ xCurCoreID ]->xEventListItem ) );
  3218. prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
  3219. }
  3220. /* Release the previously taken kernel lock. */
  3221. taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
  3222. }
  3223. /*-----------------------------------------------------------*/
  3224. #if ( configUSE_TIMERS == 1 )
  3225. void vTaskPlaceOnEventListRestricted( List_t * const pxEventList,
  3226. TickType_t xTicksToWait,
  3227. const BaseType_t xWaitIndefinitely )
  3228. {
  3229. configASSERT( pxEventList );
  3230. /* This function should not be called by application code hence the
  3231. * 'Restricted' in its name. It is not part of the public API. It is
  3232. * designed for use by kernel code, and has special calling requirements -
  3233. * it should be called with the scheduler suspended in single-core, or
  3234. * with the queue's xQueueLock already taken in SMP. */
  3235. /* For SMP, we need to take the kernel lock here as we are about to access
  3236. * kernel data structures. */
  3237. taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
  3238. {
  3239. /* Place the event list item of the TCB in the appropriate event list.
  3240. * In this case it is assume that this is the only task that is going to
  3241. * be waiting on this event list, so the faster vListInsertEnd() function
  3242. * can be used in place of vListInsert. */
  3243. listINSERT_END( pxEventList, &( pxCurrentTCBs[ portGET_CORE_ID() ]->xEventListItem ) );
  3244. /* If the task should block indefinitely then set the block time to a
  3245. * value that will be recognised as an indefinite delay inside the
  3246. * prvAddCurrentTaskToDelayedList() function. */
  3247. if( xWaitIndefinitely != pdFALSE )
  3248. {
  3249. xTicksToWait = portMAX_DELAY;
  3250. }
  3251. traceTASK_DELAY_UNTIL( ( xTickCount + xTicksToWait ) );
  3252. prvAddCurrentTaskToDelayedList( xTicksToWait, xWaitIndefinitely );
  3253. }
  3254. /* Release the previously taken kernel lock. */
  3255. taskEXIT_CRITICAL( &xKernelLock );
  3256. }
  3257. #endif /* configUSE_TIMERS */
  3258. /*-----------------------------------------------------------*/
  3259. #if ( configNUMBER_OF_CORES > 1 )
  3260. BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList )
  3261. {
  3262. TCB_t * pxUnblockedTCB;
  3263. BaseType_t xReturn;
  3264. /* For SMP, we need to take the kernel lock here as we are about to access
  3265. * kernel data structures.
  3266. * This function can also be called from an ISR context, so we
  3267. * need to check whether we are in an ISR.*/
  3268. if( portCHECK_IF_IN_ISR() == pdFALSE )
  3269. {
  3270. taskENTER_CRITICAL( &xKernelLock );
  3271. }
  3272. else
  3273. {
  3274. taskENTER_CRITICAL_ISR( &xKernelLock );
  3275. }
  3276. {
  3277. /* Before taking the kernel lock, another task/ISR could have already
  3278. * emptied the pxEventList. So we insert a check here to see if
  3279. * pxEventList is empty before attempting to remove an item from it. */
  3280. if( listLIST_IS_EMPTY( pxEventList ) == pdFALSE )
  3281. {
  3282. /* Get current core ID as we can no longer be preempted. */
  3283. const BaseType_t xCurCoreID = portGET_CORE_ID();
  3284. /* Remove the task from its current event list */
  3285. pxUnblockedTCB = listGET_OWNER_OF_HEAD_ENTRY( pxEventList );
  3286. configASSERT( pxUnblockedTCB );
  3287. listREMOVE_ITEM( &( pxUnblockedTCB->xEventListItem ) );
  3288. /* Add the task to the ready list if a core with compatible affinity
  3289. * has NOT suspended its scheduler. This occurs when:
  3290. * - The task is pinned, and the pinned core's scheduler is running
  3291. * - The task is unpinned, and at least one of the core's scheduler is running */
  3292. if( taskCAN_BE_SCHEDULED( pxUnblockedTCB ) == pdTRUE )
  3293. {
  3294. listREMOVE_ITEM( &( pxUnblockedTCB->xStateListItem ) );
  3295. prvAddTaskToReadyList( pxUnblockedTCB );
  3296. #if ( configUSE_TICKLESS_IDLE != 0 )
  3297. {
  3298. /* If a task is blocked on a kernel object then xNextTaskUnblockTime
  3299. * might be set to the blocked task's time out time. If the task is
  3300. * unblocked for a reason other than a timeout xNextTaskUnblockTime is
  3301. * normally left unchanged, because it is automatically reset to a new
  3302. * value when the tick count equals xNextTaskUnblockTime. However if
  3303. * tickless idling is used it might be more important to enter sleep mode
  3304. * at the earliest possible time - so reset xNextTaskUnblockTime here to
  3305. * ensure it is updated at the earliest possible time. */
  3306. prvResetNextTaskUnblockTime();
  3307. }
  3308. #endif
  3309. }
  3310. else
  3311. {
  3312. /* We arrive here due to one of the following possibilities:
  3313. * - The task is pinned to core X and core X has suspended its scheduler
  3314. * - The task is unpinned and both cores have suspend their schedulers
  3315. * Therefore, we add the task to one of the pending lists:
  3316. * - If the task is pinned to core X, add it to core X's pending list
  3317. * - If the task is unpinned, add it to the current core's pending list */
  3318. UBaseType_t uxPendCore = ( ( pxUnblockedTCB->xCoreID == tskNO_AFFINITY ) ? xCurCoreID : pxUnblockedTCB->xCoreID );
  3319. configASSERT( uxSchedulerSuspended[ uxPendCore ] != ( UBaseType_t ) 0U );
  3320. /* Add the task to the current core's pending list */
  3321. listINSERT_END( &( xPendingReadyList[ uxPendCore ] ), &( pxUnblockedTCB->xEventListItem ) );
  3322. }
  3323. if( taskIS_YIELD_REQUIRED( pxUnblockedTCB, xCurCoreID, pdFALSE ) == pdTRUE )
  3324. {
  3325. /* The unblocked task requires a the current core to yield */
  3326. xReturn = pdTRUE;
  3327. /* Mark that a yield is pending in case the user is not using the
  3328. * "xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */
  3329. xYieldPending[ xCurCoreID ] = pdTRUE;
  3330. }
  3331. else
  3332. {
  3333. xReturn = pdFALSE;
  3334. }
  3335. }
  3336. else
  3337. {
  3338. /* The pxEventList was emptied before we entered the critical
  3339. * section, Nothing to do except return pdFALSE. */
  3340. xReturn = pdFALSE;
  3341. }
  3342. }
  3343. /* Release the previously taken kernel lock. */
  3344. if( portCHECK_IF_IN_ISR() == pdFALSE )
  3345. {
  3346. taskEXIT_CRITICAL( &xKernelLock );
  3347. }
  3348. else
  3349. {
  3350. taskEXIT_CRITICAL_ISR( &xKernelLock );
  3351. }
  3352. return xReturn;
  3353. }
  3354. #else /* configNUMBER_OF_CORES > 1 */
  3355. BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList )
  3356. {
  3357. TCB_t * pxUnblockedTCB;
  3358. BaseType_t xReturn;
  3359. /* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be
  3360. * called from a critical section within an ISR. */
  3361. /* The event list is sorted in priority order, so the first in the list can
  3362. * be removed as it is known to be the highest priority. Remove the TCB from
  3363. * the delayed list, and add it to the ready list.
  3364. *
  3365. * If an event is for a queue that is locked then this function will never
  3366. * get called - the lock count on the queue will get modified instead. This
  3367. * means exclusive access to the event list is guaranteed here.
  3368. *
  3369. * This function assumes that a check has already been made to ensure that
  3370. * pxEventList is not empty. */
  3371. pxUnblockedTCB = listGET_OWNER_OF_HEAD_ENTRY( pxEventList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
  3372. configASSERT( pxUnblockedTCB );
  3373. listREMOVE_ITEM( &( pxUnblockedTCB->xEventListItem ) );
  3374. if( uxSchedulerSuspended[ 0 ] == ( UBaseType_t ) pdFALSE )
  3375. {
  3376. listREMOVE_ITEM( &( pxUnblockedTCB->xStateListItem ) );
  3377. prvAddTaskToReadyList( pxUnblockedTCB );
  3378. #if ( configUSE_TICKLESS_IDLE != 0 )
  3379. {
  3380. /* If a task is blocked on a kernel object then xNextTaskUnblockTime
  3381. * might be set to the blocked task's time out time. If the task is
  3382. * unblocked for a reason other than a timeout xNextTaskUnblockTime is
  3383. * normally left unchanged, because it is automatically reset to a new
  3384. * value when the tick count equals xNextTaskUnblockTime. However if
  3385. * tickless idling is used it might be more important to enter sleep mode
  3386. * at the earliest possible time - so reset xNextTaskUnblockTime here to
  3387. * ensure it is updated at the earliest possible time. */
  3388. prvResetNextTaskUnblockTime();
  3389. }
  3390. #endif
  3391. }
  3392. else
  3393. {
  3394. /* The delayed and ready lists cannot be accessed, so hold this task
  3395. * pending until the scheduler is resumed. */
  3396. listINSERT_END( &( xPendingReadyList[ 0 ] ), &( pxUnblockedTCB->xEventListItem ) );
  3397. }
  3398. if( pxUnblockedTCB->uxPriority > pxCurrentTCBs[ 0 ]->uxPriority )
  3399. {
  3400. /* Return true if the task removed from the event list has a higher
  3401. * priority than the calling task. This allows the calling task to know if
  3402. * it should force a context switch now. */
  3403. xReturn = pdTRUE;
  3404. /* Mark that a yield is pending in case the user is not using the
  3405. * "xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */
  3406. xYieldPending[ 0 ] = pdTRUE;
  3407. }
  3408. else
  3409. {
  3410. xReturn = pdFALSE;
  3411. }
  3412. return xReturn;
  3413. }
  3414. #endif /* configNUMBER_OF_CORES > 1 */
  3415. /*-----------------------------------------------------------*/
  3416. void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem,
  3417. const TickType_t xItemValue )
  3418. {
  3419. TCB_t * pxUnblockedTCB;
  3420. /* Get current core ID as we can no longer be preempted. */
  3421. const BaseType_t xCurCoreID = portGET_CORE_ID();
  3422. #if ( configNUM_CORES > 1 )
  3423. /* THIS FUNCTION MUST BE CALLED WITH THE KERNEL LOCK ALREADY TAKEN.
  3424. * It is used by the event flags implementation, thus those functions
  3425. * should call prvTakeKernelLock() before calling this function. */
  3426. #else /* configNUM_CORES > 1 */
  3427. /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by
  3428. * the event flags implementation. */
  3429. configASSERT( uxSchedulerSuspended[ 0 ] != ( UBaseType_t ) 0U );
  3430. #endif /* configNUM_CORES > 1 */
  3431. /* Store the new item value in the event list. */
  3432. listSET_LIST_ITEM_VALUE( pxEventListItem, xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE );
  3433. /* Remove the event list form the event flag. Interrupts do not access
  3434. * event flags. */
  3435. pxUnblockedTCB = listGET_LIST_ITEM_OWNER( pxEventListItem ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
  3436. configASSERT( pxUnblockedTCB );
  3437. listREMOVE_ITEM( pxEventListItem );
  3438. #if ( configUSE_TICKLESS_IDLE != 0 )
  3439. {
  3440. /* If a task is blocked on a kernel object then xNextTaskUnblockTime
  3441. * might be set to the blocked task's time out time. If the task is
  3442. * unblocked for a reason other than a timeout xNextTaskUnblockTime is
  3443. * normally left unchanged, because it is automatically reset to a new
  3444. * value when the tick count equals xNextTaskUnblockTime. However if
  3445. * tickless idling is used it might be more important to enter sleep mode
  3446. * at the earliest possible time - so reset xNextTaskUnblockTime here to
  3447. * ensure it is updated at the earliest possible time. */
  3448. prvResetNextTaskUnblockTime();
  3449. }
  3450. #endif
  3451. #if ( configNUM_CORES > 1 )
  3452. /* Add the task to the ready list if a core with compatible affinity
  3453. * has NOT suspended its scheduler. This occurs when:
  3454. * - The task is pinned, and the pinned core's scheduler is running
  3455. * - The task is unpinned, and at least one of the core's scheduler is
  3456. * running */
  3457. if( taskCAN_BE_SCHEDULED( pxUnblockedTCB ) == pdFALSE )
  3458. {
  3459. /* We arrive here due to one of the following possibilities:
  3460. * - The task is pinned to core X and core X has suspended its scheduler
  3461. * - The task is unpinned and both cores have suspend their schedulers
  3462. * Therefore, we add the task to one of the pending lists:
  3463. * - If the task is pinned to core X, add it to core X's pending list
  3464. * - If the task is unpinned, add it to the current core's pending list */
  3465. BaseType_t xPendingListCore = ( ( pxUnblockedTCB->xCoreID == tskNO_AFFINITY ) ? xCurCoreID : pxUnblockedTCB->xCoreID );
  3466. configASSERT( uxSchedulerSuspended[ xPendingListCore ] != ( UBaseType_t ) 0U );
  3467. /* The delayed and ready lists cannot be accessed, so hold this task
  3468. * pending until the scheduler is resumed. */
  3469. listINSERT_END( &( xPendingReadyList[ xPendingListCore ] ), &( pxUnblockedTCB->xEventListItem ) );
  3470. }
  3471. else
  3472. #else /* configNUM_CORES > 1 */
  3473. /* In single core, the caller of this function has already suspended the
  3474. * scheduler, which means we have exclusive access to the ready list.
  3475. * We add the unblocked task to the ready list directly. */
  3476. #endif /* configNUM_CORES > 1 */
  3477. {
  3478. /* Remove the task from the delayed list and add it to the ready list. The
  3479. * scheduler is suspended so interrupts will not be accessing the ready
  3480. * lists. */
  3481. listREMOVE_ITEM( &( pxUnblockedTCB->xStateListItem ) );
  3482. prvAddTaskToReadyList( pxUnblockedTCB );
  3483. if( taskIS_YIELD_REQUIRED( pxUnblockedTCB, xCurCoreID, pdFALSE ) == pdTRUE )
  3484. {
  3485. /* The unblocked task has a priority above that of the calling task, so
  3486. * a context switch is required. This function is called with the
  3487. * scheduler suspended so xYieldPending is set so the context switch
  3488. * occurs immediately that the scheduler is resumed (unsuspended). */
  3489. xYieldPending[ xCurCoreID ] = pdTRUE;
  3490. }
  3491. }
  3492. }
  3493. /*-----------------------------------------------------------*/
  3494. void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut )
  3495. {
  3496. configASSERT( pxTimeOut );
  3497. taskENTER_CRITICAL( &xKernelLock );
  3498. {
  3499. pxTimeOut->xOverflowCount = xNumOfOverflows;
  3500. pxTimeOut->xTimeOnEntering = xTickCount;
  3501. }
  3502. taskEXIT_CRITICAL( &xKernelLock );
  3503. }
  3504. /*-----------------------------------------------------------*/
  3505. void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut )
  3506. {
  3507. /* For internal use only as it does not use a critical section. */
  3508. pxTimeOut->xOverflowCount = xNumOfOverflows;
  3509. pxTimeOut->xTimeOnEntering = xTickCount;
  3510. }
  3511. /*-----------------------------------------------------------*/
  3512. BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
  3513. TickType_t * const pxTicksToWait )
  3514. {
  3515. BaseType_t xReturn;
  3516. configASSERT( pxTimeOut );
  3517. configASSERT( pxTicksToWait );
  3518. taskENTER_CRITICAL( &xKernelLock );
  3519. {
  3520. /* Minor optimisation. The tick count cannot change in this block. */
  3521. const TickType_t xConstTickCount = xTickCount;
  3522. const TickType_t xElapsedTime = xConstTickCount - pxTimeOut->xTimeOnEntering;
  3523. /* Get current core ID as we can no longer be preempted. */
  3524. const BaseType_t xCurCoreID = portGET_CORE_ID();
  3525. #if ( INCLUDE_xTaskAbortDelay == 1 )
  3526. if( pxCurrentTCBs[ xCurCoreID ]->ucDelayAborted != ( uint8_t ) pdFALSE )
  3527. {
  3528. /* The delay was aborted, which is not the same as a time out,
  3529. * but has the same result. */
  3530. pxCurrentTCBs[ xCurCoreID ]->ucDelayAborted = pdFALSE;
  3531. xReturn = pdTRUE;
  3532. }
  3533. else
  3534. #endif
  3535. #if ( INCLUDE_vTaskSuspend == 1 )
  3536. if( *pxTicksToWait == portMAX_DELAY )
  3537. {
  3538. /* If INCLUDE_vTaskSuspend is set to 1 and the block time
  3539. * specified is the maximum block time then the task should block
  3540. * indefinitely, and therefore never time out. */
  3541. xReturn = pdFALSE;
  3542. }
  3543. else
  3544. #endif
  3545. if( ( xNumOfOverflows != pxTimeOut->xOverflowCount ) && ( xConstTickCount >= pxTimeOut->xTimeOnEntering ) ) /*lint !e525 Indentation preferred as is to make code within pre-processor directives clearer. */
  3546. {
  3547. /* The tick count is greater than the time at which
  3548. * vTaskSetTimeout() was called, but has also overflowed since
  3549. * vTaskSetTimeOut() was called. It must have wrapped all the way
  3550. * around and gone past again. This passed since vTaskSetTimeout()
  3551. * was called. */
  3552. xReturn = pdTRUE;
  3553. *pxTicksToWait = ( TickType_t ) 0;
  3554. }
  3555. else if( xElapsedTime < *pxTicksToWait ) /*lint !e961 Explicit casting is only redundant with some compilers, whereas others require it to prevent integer conversion errors. */
  3556. {
  3557. /* Not a genuine timeout. Adjust parameters for time remaining. */
  3558. *pxTicksToWait -= xElapsedTime;
  3559. vTaskInternalSetTimeOutState( pxTimeOut );
  3560. xReturn = pdFALSE;
  3561. }
  3562. else
  3563. {
  3564. *pxTicksToWait = ( TickType_t ) 0;
  3565. xReturn = pdTRUE;
  3566. }
  3567. }
  3568. taskEXIT_CRITICAL( &xKernelLock );
  3569. return xReturn;
  3570. }
  3571. /*-----------------------------------------------------------*/
  3572. void vTaskMissedYield( void )
  3573. {
  3574. xYieldPending[ portGET_CORE_ID() ] = pdTRUE;
  3575. }
  3576. /*-----------------------------------------------------------*/
  3577. #if ( configUSE_TRACE_FACILITY == 1 )
  3578. UBaseType_t uxTaskGetTaskNumber( TaskHandle_t xTask )
  3579. {
  3580. UBaseType_t uxReturn;
  3581. TCB_t const * pxTCB;
  3582. if( xTask != NULL )
  3583. {
  3584. pxTCB = xTask;
  3585. uxReturn = pxTCB->uxTaskNumber;
  3586. }
  3587. else
  3588. {
  3589. uxReturn = 0U;
  3590. }
  3591. return uxReturn;
  3592. }
  3593. #endif /* configUSE_TRACE_FACILITY */
  3594. /*-----------------------------------------------------------*/
  3595. #if ( configUSE_TRACE_FACILITY == 1 )
  3596. void vTaskSetTaskNumber( TaskHandle_t xTask,
  3597. const UBaseType_t uxHandle )
  3598. {
  3599. TCB_t * pxTCB;
  3600. if( xTask != NULL )
  3601. {
  3602. pxTCB = xTask;
  3603. pxTCB->uxTaskNumber = uxHandle;
  3604. }
  3605. }
  3606. #endif /* configUSE_TRACE_FACILITY */
  3607. /*
  3608. * -----------------------------------------------------------
  3609. * The Idle task.
  3610. * ----------------------------------------------------------
  3611. *
  3612. * The portTASK_FUNCTION() macro is used to allow port/compiler specific
  3613. * language extensions. The equivalent prototype for this function is:
  3614. *
  3615. * void prvIdleTask( void *pvParameters );
  3616. *
  3617. */
  3618. static portTASK_FUNCTION( prvIdleTask, pvParameters )
  3619. {
  3620. /* Stop warnings. */
  3621. ( void ) pvParameters;
  3622. /** THIS IS THE RTOS IDLE TASK - WHICH IS CREATED AUTOMATICALLY WHEN THE
  3623. * SCHEDULER IS STARTED. **/
  3624. /* In case a task that has a secure context deletes itself, in which case
  3625. * the idle task is responsible for deleting the task's secure context, if
  3626. * any. */
  3627. portALLOCATE_SECURE_CONTEXT( configMINIMAL_SECURE_STACK_SIZE );
  3628. for( ; ; )
  3629. {
  3630. /* See if any tasks have deleted themselves - if so then the idle task
  3631. * is responsible for freeing the deleted task's TCB and stack. */
  3632. prvCheckTasksWaitingTermination();
  3633. #if ( configUSE_PREEMPTION == 0 )
  3634. {
  3635. /* If we are not using preemption we keep forcing a task switch to
  3636. * see if any other task has become available. If we are using
  3637. * preemption we don't need to do this as any task becoming available
  3638. * will automatically get the processor anyway. */
  3639. taskYIELD();
  3640. }
  3641. #endif /* configUSE_PREEMPTION */
  3642. #if ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) )
  3643. {
  3644. /* When using preemption tasks of equal priority will be
  3645. * timesliced. If a task that is sharing the idle priority is ready
  3646. * to run then the idle task should yield before the end of the
  3647. * timeslice.
  3648. *
  3649. * A critical region is not required here as we are just reading from
  3650. * the list, and an occasional incorrect value will not matter. If
  3651. * the ready list at the idle priority contains more than one task
  3652. * then a task other than the idle task is ready to execute. */
  3653. if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > ( UBaseType_t ) 1 )
  3654. {
  3655. taskYIELD();
  3656. }
  3657. else
  3658. {
  3659. mtCOVERAGE_TEST_MARKER();
  3660. }
  3661. }
  3662. #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) */
  3663. #if ( configUSE_IDLE_HOOK == 1 )
  3664. {
  3665. extern void vApplicationIdleHook( void );
  3666. /* Call the user defined function from within the idle task. This
  3667. * allows the application designer to add background functionality
  3668. * without the overhead of a separate task.
  3669. * NOTE: vApplicationIdleHook() MUST NOT, UNDER ANY CIRCUMSTANCES,
  3670. * CALL A FUNCTION THAT MIGHT BLOCK. */
  3671. vApplicationIdleHook();
  3672. }
  3673. #endif /* configUSE_IDLE_HOOK */
  3674. /* Call the esp-idf idle hook system. Todo IDF-8180 */
  3675. extern void esp_vApplicationIdleHook( void );
  3676. esp_vApplicationIdleHook();
  3677. /* This conditional compilation should use inequality to 0, not equality
  3678. * to 1. This is to ensure portSUPPRESS_TICKS_AND_SLEEP() is called when
  3679. * user defined low power mode implementations require
  3680. * configUSE_TICKLESS_IDLE to be set to a value other than 1. */
  3681. #if ( configUSE_TICKLESS_IDLE != 0 )
  3682. {
  3683. TickType_t xExpectedIdleTime;
  3684. /* It is not desirable to suspend then resume the scheduler on
  3685. * each iteration of the idle task. Therefore, a preliminary
  3686. * test of the expected idle time is performed without the
  3687. * scheduler suspended. The result here is not necessarily
  3688. * valid. */
  3689. xExpectedIdleTime = prvGetExpectedIdleTime();
  3690. if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP )
  3691. {
  3692. prvENTER_CRITICAL_OR_SUSPEND_ALL( &xKernelLock );
  3693. {
  3694. /* Now the scheduler is suspended, the expected idle
  3695. * time can be sampled again, and this time its value can
  3696. * be used. */
  3697. configASSERT( xNextTaskUnblockTime >= xTickCount );
  3698. xExpectedIdleTime = prvGetExpectedIdleTime();
  3699. /* Define the following macro to set xExpectedIdleTime to 0
  3700. * if the application does not want
  3701. * portSUPPRESS_TICKS_AND_SLEEP() to be called. */
  3702. configPRE_SUPPRESS_TICKS_AND_SLEEP_PROCESSING( xExpectedIdleTime );
  3703. if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP )
  3704. {
  3705. traceLOW_POWER_IDLE_BEGIN();
  3706. portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime );
  3707. traceLOW_POWER_IDLE_END();
  3708. }
  3709. else
  3710. {
  3711. mtCOVERAGE_TEST_MARKER();
  3712. }
  3713. }
  3714. ( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &xKernelLock );
  3715. }
  3716. else
  3717. {
  3718. mtCOVERAGE_TEST_MARKER();
  3719. }
  3720. }
  3721. #endif /* configUSE_TICKLESS_IDLE */
  3722. }
  3723. }
  3724. /*-----------------------------------------------------------*/
  3725. #if ( configUSE_TICKLESS_IDLE != 0 )
  3726. eSleepModeStatus eTaskConfirmSleepModeStatus( void )
  3727. {
  3728. #if ( INCLUDE_vTaskSuspend == 1 )
  3729. /* The idle task exists in addition to the application tasks. */
  3730. const UBaseType_t uxNonApplicationTasks = 1;
  3731. #endif /* INCLUDE_vTaskSuspend */
  3732. eSleepModeStatus eReturn = eStandardSleep;
  3733. /* This function must be called from a critical section. */
  3734. /* Get current core ID as we can no longer be preempted. */
  3735. const BaseType_t xCurCoreID = portGET_CORE_ID();
  3736. if( listCURRENT_LIST_LENGTH( &xPendingReadyList[ xCurCoreID ] ) != 0 )
  3737. {
  3738. /* A task was made ready while the scheduler was suspended. */
  3739. eReturn = eAbortSleep;
  3740. }
  3741. else if( xYieldPending[ xCurCoreID ] != pdFALSE )
  3742. {
  3743. /* A yield was pended while the scheduler was suspended. */
  3744. eReturn = eAbortSleep;
  3745. }
  3746. #if ( configNUMBER_OF_CORES == 1 )
  3747. else if( xPendedTicks != 0 )
  3748. {
  3749. /* A tick interrupt has already occurred but was held pending
  3750. * because the scheduler is suspended. */
  3751. eReturn = eAbortSleep;
  3752. }
  3753. #endif /* configNUMBER_OF_CORES == 1 */
  3754. #if ( INCLUDE_vTaskSuspend == 1 )
  3755. else if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == ( uxCurrentNumberOfTasks - uxNonApplicationTasks ) )
  3756. {
  3757. /* If all the tasks are in the suspended list (which might mean they
  3758. * have an infinite block time rather than actually being suspended)
  3759. * then it is safe to turn all clocks off and just wait for external
  3760. * interrupts. */
  3761. eReturn = eNoTasksWaitingTimeout;
  3762. }
  3763. #endif /* INCLUDE_vTaskSuspend */
  3764. else
  3765. {
  3766. mtCOVERAGE_TEST_MARKER();
  3767. }
  3768. return eReturn;
  3769. }
  3770. #endif /* configUSE_TICKLESS_IDLE */
  3771. /*-----------------------------------------------------------*/
  3772. #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
  3773. void vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
  3774. BaseType_t xIndex,
  3775. void * pvValue )
  3776. {
  3777. TCB_t * pxTCB;
  3778. if( ( xIndex >= 0 ) &&
  3779. ( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS ) )
  3780. {
  3781. pxTCB = prvGetTCBFromHandle( xTaskToSet );
  3782. configASSERT( pxTCB != NULL );
  3783. pxTCB->pvThreadLocalStoragePointers[ xIndex ] = pvValue;
  3784. }
  3785. }
  3786. #endif /* configNUM_THREAD_LOCAL_STORAGE_POINTERS */
  3787. /*-----------------------------------------------------------*/
  3788. #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
  3789. void * pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
  3790. BaseType_t xIndex )
  3791. {
  3792. void * pvReturn = NULL;
  3793. TCB_t * pxTCB;
  3794. if( ( xIndex >= 0 ) &&
  3795. ( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS ) )
  3796. {
  3797. pxTCB = prvGetTCBFromHandle( xTaskToQuery );
  3798. pvReturn = pxTCB->pvThreadLocalStoragePointers[ xIndex ];
  3799. }
  3800. else
  3801. {
  3802. pvReturn = NULL;
  3803. }
  3804. return pvReturn;
  3805. }
  3806. #endif /* configNUM_THREAD_LOCAL_STORAGE_POINTERS */
  3807. /*-----------------------------------------------------------*/
  3808. #if ( portUSING_MPU_WRAPPERS == 1 )
  3809. void vTaskAllocateMPURegions( TaskHandle_t xTaskToModify,
  3810. const MemoryRegion_t * const xRegions )
  3811. {
  3812. TCB_t * pxTCB;
  3813. /* If null is passed in here then we are modifying the MPU settings of
  3814. * the calling task. */
  3815. pxTCB = prvGetTCBFromHandle( xTaskToModify );
  3816. vPortStoreTaskMPUSettings( &( pxTCB->xMPUSettings ), xRegions, NULL, 0 );
  3817. }
  3818. #endif /* portUSING_MPU_WRAPPERS */
  3819. /*-----------------------------------------------------------*/
  3820. static void prvInitialiseTaskLists( void )
  3821. {
  3822. UBaseType_t uxPriority;
  3823. UBaseType_t x;
  3824. for( uxPriority = ( UBaseType_t ) 0U; uxPriority < ( UBaseType_t ) configMAX_PRIORITIES; uxPriority++ )
  3825. {
  3826. vListInitialise( &( pxReadyTasksLists[ uxPriority ] ) );
  3827. }
  3828. vListInitialise( &xDelayedTaskList1 );
  3829. vListInitialise( &xDelayedTaskList2 );
  3830. for( x = 0; x < configNUMBER_OF_CORES; x++ )
  3831. {
  3832. vListInitialise( &xPendingReadyList[ x ] );
  3833. }
  3834. #if ( INCLUDE_vTaskDelete == 1 )
  3835. {
  3836. vListInitialise( &xTasksWaitingTermination );
  3837. }
  3838. #endif /* INCLUDE_vTaskDelete */
  3839. #if ( INCLUDE_vTaskSuspend == 1 )
  3840. {
  3841. vListInitialise( &xSuspendedTaskList );
  3842. }
  3843. #endif /* INCLUDE_vTaskSuspend */
  3844. /* Start with pxDelayedTaskList using list1 and the pxOverflowDelayedTaskList
  3845. * using list2. */
  3846. pxDelayedTaskList = &xDelayedTaskList1;
  3847. pxOverflowDelayedTaskList = &xDelayedTaskList2;
  3848. }
  3849. /*-----------------------------------------------------------*/
  3850. static void prvCheckTasksWaitingTermination( void )
  3851. {
  3852. /** THIS FUNCTION IS CALLED FROM THE RTOS IDLE TASK **/
  3853. #if ( INCLUDE_vTaskDelete == 1 )
  3854. {
  3855. TCB_t * pxTCB;
  3856. /* uxDeletedTasksWaitingCleanUp is used to prevent taskENTER_CRITICAL()
  3857. * being called too often in the idle task. */
  3858. while( uxDeletedTasksWaitingCleanUp > ( UBaseType_t ) 0U )
  3859. {
  3860. #if ( configNUMBER_OF_CORES > 1 )
  3861. {
  3862. pxTCB = NULL;
  3863. taskENTER_CRITICAL( &xKernelLock );
  3864. {
  3865. /* List may have already been cleared by the other core. Check again */
  3866. if( listLIST_IS_EMPTY( &xTasksWaitingTermination ) == pdFALSE )
  3867. {
  3868. /* We can't delete a task if it is still running on
  3869. * the other core. Keep walking the list until we
  3870. * find a task we can free, or until we walk the
  3871. * entire list. */
  3872. ListItem_t * xEntry;
  3873. for( xEntry = listGET_HEAD_ENTRY( &xTasksWaitingTermination ); xEntry != listGET_END_MARKER( &xTasksWaitingTermination ); xEntry = listGET_NEXT( xEntry ) )
  3874. {
  3875. if( taskIS_CURRENTLY_RUNNING( ( ( TCB_t * ) listGET_LIST_ITEM_OWNER( xEntry ) ) ) == pdFALSE )
  3876. {
  3877. pxTCB = ( TCB_t * ) listGET_LIST_ITEM_OWNER( xEntry );
  3878. ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
  3879. --uxCurrentNumberOfTasks;
  3880. --uxDeletedTasksWaitingCleanUp;
  3881. break;
  3882. }
  3883. }
  3884. }
  3885. }
  3886. taskEXIT_CRITICAL( &xKernelLock );
  3887. if( pxTCB != NULL )
  3888. {
  3889. prvDeleteTCB( pxTCB );
  3890. }
  3891. else
  3892. {
  3893. /* No task found to delete, break out of loop */
  3894. break;
  3895. }
  3896. }
  3897. #else /* configNUMBER_OF_CORES > 1 */
  3898. {
  3899. taskENTER_CRITICAL( &xKernelLock );
  3900. {
  3901. pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xTasksWaitingTermination ) ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
  3902. ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
  3903. --uxCurrentNumberOfTasks;
  3904. --uxDeletedTasksWaitingCleanUp;
  3905. }
  3906. taskEXIT_CRITICAL( &xKernelLock );
  3907. prvDeleteTCB( pxTCB );
  3908. }
  3909. #endif /* configNUMBER_OF_CORES > 1 */
  3910. }
  3911. }
  3912. #endif /* INCLUDE_vTaskDelete */
  3913. }
  3914. /*-----------------------------------------------------------*/
  3915. #if ( configUSE_TRACE_FACILITY == 1 )
  3916. void vTaskGetInfo( TaskHandle_t xTask,
  3917. TaskStatus_t * pxTaskStatus,
  3918. BaseType_t xGetFreeStackSpace,
  3919. eTaskState eState )
  3920. {
  3921. TCB_t * pxTCB;
  3922. /* xTask is NULL then get the state of the calling task. */
  3923. pxTCB = prvGetTCBFromHandle( xTask );
  3924. /* A critical section is required for SMP in case another core modifies
  3925. * the task simultaneously. */
  3926. taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
  3927. {
  3928. pxTaskStatus->xHandle = ( TaskHandle_t ) pxTCB;
  3929. pxTaskStatus->pcTaskName = ( const char * ) &( pxTCB->pcTaskName[ 0 ] );
  3930. pxTaskStatus->uxCurrentPriority = pxTCB->uxPriority;
  3931. pxTaskStatus->pxStackBase = pxTCB->pxStack;
  3932. #if ( ( portSTACK_GROWTH > 0 ) && ( configRECORD_STACK_HIGH_ADDRESS == 1 ) )
  3933. pxTaskStatus->pxTopOfStack = pxTCB->pxTopOfStack;
  3934. pxTaskStatus->pxEndOfStack = pxTCB->pxEndOfStack;
  3935. #endif
  3936. pxTaskStatus->xTaskNumber = pxTCB->uxTCBNumber;
  3937. /* Todo: Remove xCoreID for single core builds (IDF-7894) */
  3938. pxTaskStatus->xCoreID = pxTCB->xCoreID;
  3939. #if ( configUSE_MUTEXES == 1 )
  3940. {
  3941. pxTaskStatus->uxBasePriority = pxTCB->uxBasePriority;
  3942. }
  3943. #else
  3944. {
  3945. pxTaskStatus->uxBasePriority = 0;
  3946. }
  3947. #endif
  3948. #if ( configGENERATE_RUN_TIME_STATS == 1 )
  3949. {
  3950. pxTaskStatus->ulRunTimeCounter = pxTCB->ulRunTimeCounter;
  3951. }
  3952. #else
  3953. {
  3954. pxTaskStatus->ulRunTimeCounter = ( configRUN_TIME_COUNTER_TYPE ) 0;
  3955. }
  3956. #endif
  3957. /* Obtaining the task state is a little fiddly, so is only done if the
  3958. * value of eState passed into this function is eInvalid - otherwise the
  3959. * state is just set to whatever is passed in. */
  3960. if( eState != eInvalid )
  3961. {
  3962. if( pxTCB == pxCurrentTCBs[ portGET_CORE_ID() ] )
  3963. {
  3964. pxTaskStatus->eCurrentState = eRunning;
  3965. }
  3966. else
  3967. {
  3968. pxTaskStatus->eCurrentState = eState;
  3969. #if ( INCLUDE_vTaskSuspend == 1 )
  3970. {
  3971. /* If the task is in the suspended list then there is a
  3972. * chance it is actually just blocked indefinitely - so really
  3973. * it should be reported as being in the Blocked state. */
  3974. if( eState == eSuspended )
  3975. {
  3976. #if ( configNUMBER_OF_CORES == 1 )
  3977. {
  3978. /* Single core uses a scheduler suspension to
  3979. * atomically check if the task task is blocked. */
  3980. vTaskSuspendAll();
  3981. }
  3982. #endif /* configNUMBER_OF_CORES == 1 */
  3983. {
  3984. if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
  3985. {
  3986. pxTaskStatus->eCurrentState = eBlocked;
  3987. }
  3988. }
  3989. #if ( configNUMBER_OF_CORES == 1 )
  3990. {
  3991. ( void ) xTaskResumeAll();
  3992. }
  3993. #endif /* configNUMBER_OF_CORES == 1 */
  3994. }
  3995. }
  3996. #endif /* INCLUDE_vTaskSuspend */
  3997. }
  3998. }
  3999. else
  4000. {
  4001. pxTaskStatus->eCurrentState = eTaskGetState( pxTCB );
  4002. }
  4003. /* Obtaining the stack space takes some time, so the xGetFreeStackSpace
  4004. * parameter is provided to allow it to be skipped. */
  4005. if( xGetFreeStackSpace != pdFALSE )
  4006. {
  4007. #if ( portSTACK_GROWTH > 0 )
  4008. {
  4009. pxTaskStatus->usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxTCB->pxEndOfStack );
  4010. }
  4011. #else
  4012. {
  4013. pxTaskStatus->usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxTCB->pxStack );
  4014. }
  4015. #endif
  4016. }
  4017. else
  4018. {
  4019. pxTaskStatus->usStackHighWaterMark = 0;
  4020. }
  4021. }
  4022. /* Exit the previously entered critical section. */
  4023. taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
  4024. }
  4025. #endif /* configUSE_TRACE_FACILITY */
  4026. /*-----------------------------------------------------------*/
  4027. #if ( configUSE_TRACE_FACILITY == 1 )
  4028. static UBaseType_t prvListTasksWithinSingleList( TaskStatus_t * pxTaskStatusArray,
  4029. List_t * pxList,
  4030. eTaskState eState )
  4031. {
  4032. configLIST_VOLATILE TCB_t * pxNextTCB;
  4033. configLIST_VOLATILE TCB_t * pxFirstTCB;
  4034. UBaseType_t uxTask = 0;
  4035. if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 )
  4036. {
  4037. listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
  4038. /* Populate an TaskStatus_t structure within the
  4039. * pxTaskStatusArray array for each task that is referenced from
  4040. * pxList. See the definition of TaskStatus_t in task.h for the
  4041. * meaning of each TaskStatus_t structure member. */
  4042. do
  4043. {
  4044. listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
  4045. vTaskGetInfo( ( TaskHandle_t ) pxNextTCB, &( pxTaskStatusArray[ uxTask ] ), pdTRUE, eState );
  4046. uxTask++;
  4047. } while( pxNextTCB != pxFirstTCB );
  4048. }
  4049. else
  4050. {
  4051. mtCOVERAGE_TEST_MARKER();
  4052. }
  4053. return uxTask;
  4054. }
  4055. #endif /* configUSE_TRACE_FACILITY */
  4056. /*-----------------------------------------------------------*/
  4057. #if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) )
  4058. static configSTACK_DEPTH_TYPE prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte )
  4059. {
  4060. uint32_t ulCount = 0U;
  4061. while( *pucStackByte == ( uint8_t ) tskSTACK_FILL_BYTE )
  4062. {
  4063. pucStackByte -= portSTACK_GROWTH;
  4064. ulCount++;
  4065. }
  4066. ulCount /= ( uint32_t ) sizeof( StackType_t ); /*lint !e961 Casting is not redundant on smaller architectures. */
  4067. return ( configSTACK_DEPTH_TYPE ) ulCount;
  4068. }
  4069. #endif /* ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) ) */
  4070. /*-----------------------------------------------------------*/
  4071. #if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 )
  4072. /* uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are the
  4073. * same except for their return type. Using configSTACK_DEPTH_TYPE allows the
  4074. * user to determine the return type. It gets around the problem of the value
  4075. * overflowing on 8-bit types without breaking backward compatibility for
  4076. * applications that expect an 8-bit return type. */
  4077. configSTACK_DEPTH_TYPE uxTaskGetStackHighWaterMark2( TaskHandle_t xTask )
  4078. {
  4079. TCB_t * pxTCB;
  4080. uint8_t * pucEndOfStack;
  4081. configSTACK_DEPTH_TYPE uxReturn;
  4082. /* uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are
  4083. * the same except for their return type. Using configSTACK_DEPTH_TYPE
  4084. * allows the user to determine the return type. It gets around the
  4085. * problem of the value overflowing on 8-bit types without breaking
  4086. * backward compatibility for applications that expect an 8-bit return
  4087. * type. */
  4088. pxTCB = prvGetTCBFromHandle( xTask );
  4089. #if portSTACK_GROWTH < 0
  4090. {
  4091. pucEndOfStack = ( uint8_t * ) pxTCB->pxStack;
  4092. }
  4093. #else
  4094. {
  4095. pucEndOfStack = ( uint8_t * ) pxTCB->pxEndOfStack;
  4096. }
  4097. #endif
  4098. uxReturn = prvTaskCheckFreeStackSpace( pucEndOfStack );
  4099. return uxReturn;
  4100. }
  4101. #endif /* INCLUDE_uxTaskGetStackHighWaterMark2 */
  4102. /*-----------------------------------------------------------*/
  4103. #if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
  4104. UBaseType_t uxTaskGetStackHighWaterMark( TaskHandle_t xTask )
  4105. {
  4106. TCB_t * pxTCB;
  4107. uint8_t * pucEndOfStack;
  4108. UBaseType_t uxReturn;
  4109. pxTCB = prvGetTCBFromHandle( xTask );
  4110. #if portSTACK_GROWTH < 0
  4111. {
  4112. pucEndOfStack = ( uint8_t * ) pxTCB->pxStack;
  4113. }
  4114. #else
  4115. {
  4116. pucEndOfStack = ( uint8_t * ) pxTCB->pxEndOfStack;
  4117. }
  4118. #endif
  4119. uxReturn = ( UBaseType_t ) prvTaskCheckFreeStackSpace( pucEndOfStack );
  4120. return uxReturn;
  4121. }
  4122. #endif /* INCLUDE_uxTaskGetStackHighWaterMark */
  4123. /*-----------------------------------------------------------*/
  4124. #if ( INCLUDE_vTaskDelete == 1 )
  4125. static void prvDeleteTCB( TCB_t * pxTCB )
  4126. {
  4127. /* This call is required specifically for the TriCore port. It must be
  4128. * above the vPortFree() calls. The call is also used by ports/demos that
  4129. * want to allocate and clean RAM statically. */
  4130. portCLEAN_UP_TCB( pxTCB );
  4131. #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) )
  4132. {
  4133. /* Free up the memory allocated for the task's TLS Block. */
  4134. /* Note: Fixed bug in upstream. Free TLS block of pxTCB, NOT pxCurrentTCBs */
  4135. configDEINIT_TLS_BLOCK( pxTCB->xTLSBlock );
  4136. }
  4137. #endif
  4138. #if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) && ( portUSING_MPU_WRAPPERS == 0 ) )
  4139. {
  4140. /* The task can only have been allocated dynamically - free both
  4141. * the stack and TCB. */
  4142. vPortFreeStack( pxTCB->pxStack );
  4143. vPortFree( pxTCB );
  4144. }
  4145. #elif ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */
  4146. {
  4147. /* The task could have been allocated statically or dynamically, so
  4148. * check what was statically allocated before trying to free the
  4149. * memory. */
  4150. if( pxTCB->ucStaticallyAllocated == tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB )
  4151. {
  4152. /* Both the stack and TCB were allocated dynamically, so both
  4153. * must be freed. */
  4154. vPortFreeStack( pxTCB->pxStack );
  4155. vPortFree( pxTCB );
  4156. }
  4157. else if( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_ONLY )
  4158. {
  4159. /* Only the stack was statically allocated, so the TCB is the
  4160. * only memory that must be freed. */
  4161. vPortFree( pxTCB );
  4162. }
  4163. else
  4164. {
  4165. /* Neither the stack nor the TCB were allocated dynamically, so
  4166. * nothing needs to be freed. */
  4167. configASSERT( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_AND_TCB );
  4168. mtCOVERAGE_TEST_MARKER();
  4169. }
  4170. }
  4171. #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
  4172. }
  4173. #endif /* INCLUDE_vTaskDelete */
  4174. /*-----------------------------------------------------------*/
  4175. static void prvResetNextTaskUnblockTime( void )
  4176. {
  4177. if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE )
  4178. {
  4179. /* The new current delayed list is empty. Set xNextTaskUnblockTime to
  4180. * the maximum possible value so it is extremely unlikely that the
  4181. * if( xTickCount >= xNextTaskUnblockTime ) test will pass until
  4182. * there is an item in the delayed list. */
  4183. xNextTaskUnblockTime = portMAX_DELAY;
  4184. }
  4185. else
  4186. {
  4187. /* The new current delayed list is not empty, get the value of
  4188. * the item at the head of the delayed list. This is the time at
  4189. * which the task at the head of the delayed list should be removed
  4190. * from the Blocked state. */
  4191. xNextTaskUnblockTime = listGET_ITEM_VALUE_OF_HEAD_ENTRY( pxDelayedTaskList );
  4192. }
  4193. }
  4194. /*-----------------------------------------------------------*/
  4195. #if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) )
  4196. TaskHandle_t xTaskGetCurrentTaskHandle( void )
  4197. {
  4198. TaskHandle_t xReturn;
  4199. UBaseType_t uxSavedInterruptStatus;
  4200. /* For SMP, we need to disable interrupts to ensure the caller does not
  4201. * switch cores in between portGET_CORE_ID() and fetching the current
  4202. * core's TCB. We use the ISR versions of interrupt macros as this
  4203. * function could be called inside critical sections.
  4204. *
  4205. * For single-core a critical section is not required as this is not
  4206. * called from an interrupt and the current TCB will always be the same
  4207. * for any individual execution thread. */
  4208. uxSavedInterruptStatus = taskDISABLE_INTERRUPTS_ISR_SMP_ONLY();
  4209. {
  4210. xReturn = pxCurrentTCBs[ portGET_CORE_ID() ];
  4211. }
  4212. taskEnable_INTERRUPTS_ISR_SMP_ONLY( uxSavedInterruptStatus );
  4213. return xReturn;
  4214. }
  4215. #endif /* ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
  4216. /*-----------------------------------------------------------*/
  4217. #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
  4218. BaseType_t xTaskGetSchedulerState( void )
  4219. {
  4220. BaseType_t xReturn;
  4221. UBaseType_t uxSavedInterruptStatus;
  4222. /* For SMP, we need to disable interrupts here to ensure we don't switch
  4223. * cores midway. We forego taking the kernel lock here as a minor
  4224. * optimization as it is not required.
  4225. *
  4226. * - xSchedulerRunning is only ever set by core 0 atomically
  4227. * - Each core will only ever update its own copy of uxSchedulerSuspended.
  4228. *
  4229. * We use the ISR versions of interrupt macros as this function could be
  4230. * called inside critical sections. */
  4231. uxSavedInterruptStatus = taskDISABLE_INTERRUPTS_ISR_SMP_ONLY();
  4232. {
  4233. if( xSchedulerRunning == pdFALSE )
  4234. {
  4235. xReturn = taskSCHEDULER_NOT_STARTED;
  4236. }
  4237. else
  4238. {
  4239. if( uxSchedulerSuspended[ portGET_CORE_ID() ] == ( UBaseType_t ) pdFALSE )
  4240. {
  4241. xReturn = taskSCHEDULER_RUNNING;
  4242. }
  4243. else
  4244. {
  4245. xReturn = taskSCHEDULER_SUSPENDED;
  4246. }
  4247. }
  4248. }
  4249. taskEnable_INTERRUPTS_ISR_SMP_ONLY( uxSavedInterruptStatus );
  4250. return xReturn;
  4251. }
  4252. #endif /* ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) */
  4253. /*-----------------------------------------------------------*/
  4254. #if ( configUSE_MUTEXES == 1 )
  4255. BaseType_t xTaskPriorityInherit( TaskHandle_t const pxMutexHolder )
  4256. {
  4257. TCB_t * const pxMutexHolderTCB = pxMutexHolder;
  4258. BaseType_t xReturn = pdFALSE;
  4259. /* For SMP, we need to take the kernel lock here as we are about to
  4260. * access kernel data structures. */
  4261. taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
  4262. {
  4263. /* Get current core ID as we can no longer be preempted. */
  4264. const BaseType_t xCurCoreID = portGET_CORE_ID();
  4265. /* If the mutex was given back by an interrupt while the queue was
  4266. * locked then the mutex holder might now be NULL. _RB_ Is this still
  4267. * needed as interrupts can no longer use mutexes? */
  4268. if( pxMutexHolder != NULL )
  4269. {
  4270. /* If the holder of the mutex has a priority below the priority of
  4271. * the task attempting to obtain the mutex then it will temporarily
  4272. * inherit the priority of the task attempting to obtain the mutex. */
  4273. if( pxMutexHolderTCB->uxPriority < pxCurrentTCBs[ xCurCoreID ]->uxPriority )
  4274. {
  4275. /* Adjust the mutex holder state to account for its new
  4276. * priority. Only reset the event list item value if the value is
  4277. * not being used for anything else. */
  4278. if( ( listGET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL )
  4279. {
  4280. listSET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCBs[ xCurCoreID ]->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
  4281. }
  4282. else
  4283. {
  4284. mtCOVERAGE_TEST_MARKER();
  4285. }
  4286. /* If the task being modified is in the ready state it will need
  4287. * to be moved into a new list. */
  4288. if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxMutexHolderTCB->uxPriority ] ), &( pxMutexHolderTCB->xStateListItem ) ) != pdFALSE )
  4289. {
  4290. if( uxListRemove( &( pxMutexHolderTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
  4291. {
  4292. /* It is known that the task is in its ready list so
  4293. * there is no need to check again and the port level
  4294. * reset macro can be called directly. */
  4295. portRESET_READY_PRIORITY( pxMutexHolderTCB->uxPriority, uxTopReadyPriority );
  4296. }
  4297. else
  4298. {
  4299. mtCOVERAGE_TEST_MARKER();
  4300. }
  4301. /* Inherit the priority before being moved into the new list. */
  4302. pxMutexHolderTCB->uxPriority = pxCurrentTCBs[ xCurCoreID ]->uxPriority;
  4303. prvAddTaskToReadyList( pxMutexHolderTCB );
  4304. }
  4305. else
  4306. {
  4307. /* Just inherit the priority. */
  4308. pxMutexHolderTCB->uxPriority = pxCurrentTCBs[ xCurCoreID ]->uxPriority;
  4309. }
  4310. traceTASK_PRIORITY_INHERIT( pxMutexHolderTCB, pxCurrentTCBs[ xCurCoreID ]->uxPriority );
  4311. /* Inheritance occurred. */
  4312. xReturn = pdTRUE;
  4313. }
  4314. else
  4315. {
  4316. if( pxMutexHolderTCB->uxBasePriority < pxCurrentTCBs[ xCurCoreID ]->uxPriority )
  4317. {
  4318. /* The base priority of the mutex holder is lower than the
  4319. * priority of the task attempting to take the mutex, but the
  4320. * current priority of the mutex holder is not lower than the
  4321. * priority of the task attempting to take the mutex.
  4322. * Therefore the mutex holder must have already inherited a
  4323. * priority, but inheritance would have occurred if that had
  4324. * not been the case. */
  4325. xReturn = pdTRUE;
  4326. }
  4327. else
  4328. {
  4329. mtCOVERAGE_TEST_MARKER();
  4330. }
  4331. }
  4332. }
  4333. else
  4334. {
  4335. mtCOVERAGE_TEST_MARKER();
  4336. }
  4337. }
  4338. /* Release the previously taken kernel lock. */
  4339. taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
  4340. return xReturn;
  4341. }
  4342. #endif /* configUSE_MUTEXES */
  4343. /*-----------------------------------------------------------*/
  4344. #if ( configUSE_MUTEXES == 1 )
  4345. BaseType_t xTaskPriorityDisinherit( TaskHandle_t const pxMutexHolder )
  4346. {
  4347. TCB_t * const pxTCB = pxMutexHolder;
  4348. BaseType_t xReturn = pdFALSE;
  4349. /* For SMP, we need to take the kernel lock here as we are about to
  4350. * access kernel data structures. */
  4351. taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
  4352. {
  4353. if( pxMutexHolder != NULL )
  4354. {
  4355. /* A task can only have an inherited priority if it holds the mutex.
  4356. * If the mutex is held by a task then it cannot be given from an
  4357. * interrupt, and if a mutex is given by the holding task then it must
  4358. * be the running state task. */
  4359. configASSERT( pxTCB == pxCurrentTCBs[ portGET_CORE_ID() ] );
  4360. configASSERT( pxTCB->uxMutexesHeld );
  4361. ( pxTCB->uxMutexesHeld )--;
  4362. /* Has the holder of the mutex inherited the priority of another
  4363. * task? */
  4364. if( pxTCB->uxPriority != pxTCB->uxBasePriority )
  4365. {
  4366. /* Only disinherit if no other mutexes are held. */
  4367. if( pxTCB->uxMutexesHeld == ( UBaseType_t ) 0 )
  4368. {
  4369. /* A task can only have an inherited priority if it holds
  4370. * the mutex. If the mutex is held by a task then it cannot be
  4371. * given from an interrupt, and if a mutex is given by the
  4372. * holding task then it must be the running state task. Remove
  4373. * the holding task from the ready list. */
  4374. if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
  4375. {
  4376. portRESET_READY_PRIORITY( pxTCB->uxPriority, uxTopReadyPriority );
  4377. }
  4378. else
  4379. {
  4380. mtCOVERAGE_TEST_MARKER();
  4381. }
  4382. /* Disinherit the priority before adding the task into the
  4383. * new ready list. */
  4384. traceTASK_PRIORITY_DISINHERIT( pxTCB, pxTCB->uxBasePriority );
  4385. pxTCB->uxPriority = pxTCB->uxBasePriority;
  4386. /* Reset the event list item value. It cannot be in use for
  4387. * any other purpose if this task is running, and it must be
  4388. * running to give back the mutex. */
  4389. listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxTCB->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
  4390. prvAddTaskToReadyList( pxTCB );
  4391. /* Return true to indicate that a context switch is required.
  4392. * This is only actually required in the corner case whereby
  4393. * multiple mutexes were held and the mutexes were given back
  4394. * in an order different to that in which they were taken.
  4395. * If a context switch did not occur when the first mutex was
  4396. * returned, even if a task was waiting on it, then a context
  4397. * switch should occur when the last mutex is returned whether
  4398. * a task is waiting on it or not. */
  4399. xReturn = pdTRUE;
  4400. }
  4401. else
  4402. {
  4403. mtCOVERAGE_TEST_MARKER();
  4404. }
  4405. }
  4406. else
  4407. {
  4408. mtCOVERAGE_TEST_MARKER();
  4409. }
  4410. }
  4411. else
  4412. {
  4413. mtCOVERAGE_TEST_MARKER();
  4414. }
  4415. }
  4416. /* Release the previously taken kernel lock. */
  4417. taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
  4418. return xReturn;
  4419. }
  4420. #endif /* configUSE_MUTEXES */
  4421. /*-----------------------------------------------------------*/
  4422. #if ( configUSE_MUTEXES == 1 )
  4423. void vTaskPriorityDisinheritAfterTimeout( TaskHandle_t const pxMutexHolder,
  4424. UBaseType_t uxHighestPriorityWaitingTask )
  4425. {
  4426. TCB_t * const pxTCB = pxMutexHolder;
  4427. UBaseType_t uxPriorityUsedOnEntry, uxPriorityToUse;
  4428. const UBaseType_t uxOnlyOneMutexHeld = ( UBaseType_t ) 1;
  4429. /* For SMP, we need to take the kernel lock here as we are about to
  4430. * access kernel data structures. */
  4431. taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
  4432. {
  4433. if( pxMutexHolder != NULL )
  4434. {
  4435. /* If pxMutexHolder is not NULL then the holder must hold at least
  4436. * one mutex. */
  4437. configASSERT( pxTCB->uxMutexesHeld );
  4438. /* Determine the priority to which the priority of the task that
  4439. * holds the mutex should be set. This will be the greater of the
  4440. * holding task's base priority and the priority of the highest
  4441. * priority task that is waiting to obtain the mutex. */
  4442. if( pxTCB->uxBasePriority < uxHighestPriorityWaitingTask )
  4443. {
  4444. uxPriorityToUse = uxHighestPriorityWaitingTask;
  4445. }
  4446. else
  4447. {
  4448. uxPriorityToUse = pxTCB->uxBasePriority;
  4449. }
  4450. /* Does the priority need to change? */
  4451. if( pxTCB->uxPriority != uxPriorityToUse )
  4452. {
  4453. /* Only disinherit if no other mutexes are held. This is a
  4454. * simplification in the priority inheritance implementation. If
  4455. * the task that holds the mutex is also holding other mutexes then
  4456. * the other mutexes may have caused the priority inheritance. */
  4457. if( pxTCB->uxMutexesHeld == uxOnlyOneMutexHeld )
  4458. {
  4459. /* If a task has timed out because it already holds the
  4460. * mutex it was trying to obtain then it cannot of inherited
  4461. * its own priority. */
  4462. configASSERT( pxTCB != pxCurrentTCBs[ portGET_CORE_ID() ] );
  4463. /* Disinherit the priority, remembering the previous
  4464. * priority to facilitate determining the subject task's
  4465. * state. */
  4466. traceTASK_PRIORITY_DISINHERIT( pxTCB, uxPriorityToUse );
  4467. uxPriorityUsedOnEntry = pxTCB->uxPriority;
  4468. pxTCB->uxPriority = uxPriorityToUse;
  4469. /* Only reset the event list item value if the value is not
  4470. * being used for anything else. */
  4471. if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL )
  4472. {
  4473. listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriorityToUse ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
  4474. }
  4475. else
  4476. {
  4477. mtCOVERAGE_TEST_MARKER();
  4478. }
  4479. /* If the running task is not the task that holds the mutex
  4480. * then the task that holds the mutex could be in either the
  4481. * Ready, Blocked or Suspended states. Only remove the task
  4482. * from its current state list if it is in the Ready state as
  4483. * the task's priority is going to change and there is one
  4484. * Ready list per priority. */
  4485. if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE )
  4486. {
  4487. if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
  4488. {
  4489. /* It is known that the task is in its ready list so
  4490. * there is no need to check again and the port level
  4491. * reset macro can be called directly. */
  4492. portRESET_READY_PRIORITY( pxTCB->uxPriority, uxTopReadyPriority );
  4493. }
  4494. else
  4495. {
  4496. mtCOVERAGE_TEST_MARKER();
  4497. }
  4498. prvAddTaskToReadyList( pxTCB );
  4499. }
  4500. else
  4501. {
  4502. mtCOVERAGE_TEST_MARKER();
  4503. }
  4504. }
  4505. else
  4506. {
  4507. mtCOVERAGE_TEST_MARKER();
  4508. }
  4509. }
  4510. else
  4511. {
  4512. mtCOVERAGE_TEST_MARKER();
  4513. }
  4514. }
  4515. else
  4516. {
  4517. mtCOVERAGE_TEST_MARKER();
  4518. }
  4519. }
  4520. /* Release the previously taken kernel lock. */
  4521. taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
  4522. }
  4523. #endif /* configUSE_MUTEXES */
  4524. /*-----------------------------------------------------------*/
  4525. #if ( portCRITICAL_NESTING_IN_TCB == 1 )
  4526. void vTaskEnterCritical( void )
  4527. {
  4528. portDISABLE_INTERRUPTS();
  4529. if( xSchedulerRunning != pdFALSE )
  4530. {
  4531. /* Get current core ID as we can no longer be preempted. */
  4532. const BaseType_t xCurCoreID = portGET_CORE_ID();
  4533. ( pxCurrentTCBs[ xCurCoreID ]->uxCriticalNesting )++;
  4534. /* This is not the interrupt safe version of the enter critical
  4535. * function so assert() if it is being called from an interrupt
  4536. * context. Only API functions that end in "FromISR" can be used in an
  4537. * interrupt. Only assert if the critical nesting count is 1 to
  4538. * protect against recursive calls if the assert function also uses a
  4539. * critical section. */
  4540. if( pxCurrentTCBs[ xCurCoreID ]->uxCriticalNesting == 1 )
  4541. {
  4542. portASSERT_IF_IN_ISR();
  4543. }
  4544. }
  4545. else
  4546. {
  4547. mtCOVERAGE_TEST_MARKER();
  4548. }
  4549. }
  4550. #endif /* portCRITICAL_NESTING_IN_TCB */
  4551. /*-----------------------------------------------------------*/
  4552. #if ( portCRITICAL_NESTING_IN_TCB == 1 )
  4553. void vTaskExitCritical( void )
  4554. {
  4555. if( xSchedulerRunning != pdFALSE )
  4556. {
  4557. /* Get current core ID as we can no longer be preempted. */
  4558. const BaseType_t xCurCoreID = portGET_CORE_ID();
  4559. if( pxCurrentTCBs[ xCurCoreID ]->uxCriticalNesting > 0U )
  4560. {
  4561. ( pxCurrentTCBs[ xCurCoreID ]->uxCriticalNesting )--;
  4562. if( pxCurrentTCBs[ xCurCoreID ]->uxCriticalNesting == 0U )
  4563. {
  4564. portENABLE_INTERRUPTS();
  4565. }
  4566. else
  4567. {
  4568. mtCOVERAGE_TEST_MARKER();
  4569. }
  4570. }
  4571. else
  4572. {
  4573. mtCOVERAGE_TEST_MARKER();
  4574. }
  4575. }
  4576. else
  4577. {
  4578. mtCOVERAGE_TEST_MARKER();
  4579. }
  4580. }
  4581. #endif /* portCRITICAL_NESTING_IN_TCB */
  4582. /*-----------------------------------------------------------*/
  4583. #if ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 )
  4584. static char * prvWriteNameToBuffer( char * pcBuffer,
  4585. const char * pcTaskName )
  4586. {
  4587. size_t x;
  4588. /* Start by copying the entire string. */
  4589. strcpy( pcBuffer, pcTaskName );
  4590. /* Pad the end of the string with spaces to ensure columns line up when
  4591. * printed out. */
  4592. for( x = strlen( pcBuffer ); x < ( size_t ) ( configMAX_TASK_NAME_LEN - 1 ); x++ )
  4593. {
  4594. pcBuffer[ x ] = ' ';
  4595. }
  4596. /* Terminate. */
  4597. pcBuffer[ x ] = ( char ) 0x00;
  4598. /* Return the new end of string. */
  4599. return &( pcBuffer[ x ] );
  4600. }
  4601. #endif /* ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) */
  4602. /*-----------------------------------------------------------*/
  4603. #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) )
  4604. void vTaskList( char * pcWriteBuffer )
  4605. {
  4606. TaskStatus_t * pxTaskStatusArray;
  4607. UBaseType_t uxArraySize, x;
  4608. char cStatus;
  4609. /*
  4610. * PLEASE NOTE:
  4611. *
  4612. * This function is provided for convenience only, and is used by many
  4613. * of the demo applications. Do not consider it to be part of the
  4614. * scheduler.
  4615. *
  4616. * vTaskList() calls uxTaskGetSystemState(), then formats part of the
  4617. * uxTaskGetSystemState() output into a human readable table that
  4618. * displays task: names, states, priority, stack usage and task number.
  4619. * Stack usage specified as the number of unused StackType_t words stack can hold
  4620. * on top of stack - not the number of bytes.
  4621. *
  4622. * vTaskList() has a dependency on the sprintf() C library function that
  4623. * might bloat the code size, use a lot of stack, and provide different
  4624. * results on different platforms. An alternative, tiny, third party,
  4625. * and limited functionality implementation of sprintf() is provided in
  4626. * many of the FreeRTOS/Demo sub-directories in a file called
  4627. * printf-stdarg.c (note printf-stdarg.c does not provide a full
  4628. * snprintf() implementation!).
  4629. *
  4630. * It is recommended that production systems call uxTaskGetSystemState()
  4631. * directly to get access to raw stats data, rather than indirectly
  4632. * through a call to vTaskList().
  4633. */
  4634. /* Make sure the write buffer does not contain a string. */
  4635. *pcWriteBuffer = ( char ) 0x00;
  4636. /* Take a snapshot of the number of tasks in case it changes while this
  4637. * function is executing. */
  4638. uxArraySize = uxCurrentNumberOfTasks;
  4639. /* Allocate an array index for each task. NOTE! if
  4640. * configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will
  4641. * equate to NULL. */
  4642. pxTaskStatusArray = pvPortMalloc( uxCurrentNumberOfTasks * sizeof( TaskStatus_t ) ); /*lint !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack and this allocation allocates a struct that has the alignment requirements of a pointer. */
  4643. if( pxTaskStatusArray != NULL )
  4644. {
  4645. /* Generate the (binary) data. */
  4646. uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, NULL );
  4647. /* Create a human readable table from the binary data. */
  4648. for( x = 0; x < uxArraySize; x++ )
  4649. {
  4650. switch( pxTaskStatusArray[ x ].eCurrentState )
  4651. {
  4652. case eRunning:
  4653. cStatus = tskRUNNING_CHAR;
  4654. break;
  4655. case eReady:
  4656. cStatus = tskREADY_CHAR;
  4657. break;
  4658. case eBlocked:
  4659. cStatus = tskBLOCKED_CHAR;
  4660. break;
  4661. case eSuspended:
  4662. cStatus = tskSUSPENDED_CHAR;
  4663. break;
  4664. case eDeleted:
  4665. cStatus = tskDELETED_CHAR;
  4666. break;
  4667. case eInvalid: /* Fall through. */
  4668. default: /* Should not get here, but it is included
  4669. * to prevent static checking errors. */
  4670. cStatus = ( char ) 0x00;
  4671. break;
  4672. }
  4673. /* Write the task name to the string, padding with spaces so it
  4674. * can be printed in tabular form more easily. */
  4675. pcWriteBuffer = prvWriteNameToBuffer( pcWriteBuffer, pxTaskStatusArray[ x ].pcTaskName );
  4676. /* Write the rest of the string. */
  4677. sprintf( pcWriteBuffer, "\t%c\t%u\t%d\t%u\t%u\r\n", cStatus, ( unsigned int ) pxTaskStatusArray[ x ].uxCurrentPriority, pxTaskStatusArray[ x ].xCoreID, ( unsigned int ) pxTaskStatusArray[ x ].usStackHighWaterMark, ( unsigned int ) pxTaskStatusArray[ x ].xTaskNumber ); /*lint !e586 sprintf() allowed as this is compiled with many compilers and this is a utility function only - not part of the core kernel implementation. */
  4678. pcWriteBuffer += strlen( pcWriteBuffer ); /*lint !e9016 Pointer arithmetic ok on char pointers especially as in this case where it best denotes the intent of the code. */
  4679. }
  4680. /* Free the array again. NOTE! If configSUPPORT_DYNAMIC_ALLOCATION
  4681. * is 0 then vPortFree() will be #defined to nothing. */
  4682. vPortFree( pxTaskStatusArray );
  4683. }
  4684. else
  4685. {
  4686. mtCOVERAGE_TEST_MARKER();
  4687. }
  4688. }
  4689. #endif /* ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) ) */
  4690. /*----------------------------------------------------------*/
  4691. #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configUSE_TRACE_FACILITY == 1 ) )
  4692. void vTaskGetRunTimeStats( char * pcWriteBuffer )
  4693. {
  4694. TaskStatus_t * pxTaskStatusArray;
  4695. UBaseType_t uxArraySize, x;
  4696. configRUN_TIME_COUNTER_TYPE ulTotalTime, ulStatsAsPercentage;
  4697. /*
  4698. * PLEASE NOTE:
  4699. *
  4700. * This function is provided for convenience only, and is used by many
  4701. * of the demo applications. Do not consider it to be part of the
  4702. * scheduler.
  4703. *
  4704. * vTaskGetRunTimeStats() calls uxTaskGetSystemState(), then formats part
  4705. * of the uxTaskGetSystemState() output into a human readable table that
  4706. * displays the amount of time each task has spent in the Running state
  4707. * in both absolute and percentage terms.
  4708. *
  4709. * vTaskGetRunTimeStats() has a dependency on the sprintf() C library
  4710. * function that might bloat the code size, use a lot of stack, and
  4711. * provide different results on different platforms. An alternative,
  4712. * tiny, third party, and limited functionality implementation of
  4713. * sprintf() is provided in many of the FreeRTOS/Demo sub-directories in
  4714. * a file called printf-stdarg.c (note printf-stdarg.c does not provide
  4715. * a full snprintf() implementation!).
  4716. *
  4717. * It is recommended that production systems call uxTaskGetSystemState()
  4718. * directly to get access to raw stats data, rather than indirectly
  4719. * through a call to vTaskGetRunTimeStats().
  4720. */
  4721. /* Make sure the write buffer does not contain a string. */
  4722. *pcWriteBuffer = ( char ) 0x00;
  4723. /* Take a snapshot of the number of tasks in case it changes while this
  4724. * function is executing. */
  4725. uxArraySize = uxCurrentNumberOfTasks;
  4726. /* Allocate an array index for each task. NOTE! If
  4727. * configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will
  4728. * equate to NULL. */
  4729. pxTaskStatusArray = pvPortMalloc( uxCurrentNumberOfTasks * sizeof( TaskStatus_t ) ); /*lint !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack and this allocation allocates a struct that has the alignment requirements of a pointer. */
  4730. if( pxTaskStatusArray != NULL )
  4731. {
  4732. /* Generate the (binary) data. */
  4733. uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, &ulTotalTime );
  4734. /* For percentage calculations. */
  4735. ulTotalTime /= 100UL;
  4736. /* Avoid divide by zero errors. */
  4737. if( ulTotalTime > 0UL )
  4738. {
  4739. /* Create a human readable table from the binary data. */
  4740. for( x = 0; x < uxArraySize; x++ )
  4741. {
  4742. /* What percentage of the total run time has the task used?
  4743. * This will always be rounded down to the nearest integer.
  4744. * ulTotalRunTime has already been divided by 100. */
  4745. ulStatsAsPercentage = pxTaskStatusArray[ x ].ulRunTimeCounter / ulTotalTime;
  4746. /* Write the task name to the string, padding with
  4747. * spaces so it can be printed in tabular form more
  4748. * easily. */
  4749. pcWriteBuffer = prvWriteNameToBuffer( pcWriteBuffer, pxTaskStatusArray[ x ].pcTaskName );
  4750. if( ulStatsAsPercentage > 0UL )
  4751. {
  4752. #ifdef portLU_PRINTF_SPECIFIER_REQUIRED
  4753. {
  4754. sprintf( pcWriteBuffer, "\t%lu\t\t%lu%%\r\n", pxTaskStatusArray[ x ].ulRunTimeCounter, ulStatsAsPercentage );
  4755. }
  4756. #else
  4757. {
  4758. /* sizeof( int ) == sizeof( long ) so a smaller
  4759. * printf() library can be used. */
  4760. sprintf( pcWriteBuffer, "\t%u\t\t%u%%\r\n", ( unsigned int ) pxTaskStatusArray[ x ].ulRunTimeCounter, ( unsigned int ) ulStatsAsPercentage ); /*lint !e586 sprintf() allowed as this is compiled with many compilers and this is a utility function only - not part of the core kernel implementation. */
  4761. }
  4762. #endif
  4763. }
  4764. else
  4765. {
  4766. /* If the percentage is zero here then the task has
  4767. * consumed less than 1% of the total run time. */
  4768. #ifdef portLU_PRINTF_SPECIFIER_REQUIRED
  4769. {
  4770. sprintf( pcWriteBuffer, "\t%lu\t\t<1%%\r\n", pxTaskStatusArray[ x ].ulRunTimeCounter );
  4771. }
  4772. #else
  4773. {
  4774. /* sizeof( int ) == sizeof( long ) so a smaller
  4775. * printf() library can be used. */
  4776. sprintf( pcWriteBuffer, "\t%u\t\t<1%%\r\n", ( unsigned int ) pxTaskStatusArray[ x ].ulRunTimeCounter ); /*lint !e586 sprintf() allowed as this is compiled with many compilers and this is a utility function only - not part of the core kernel implementation. */
  4777. }
  4778. #endif
  4779. }
  4780. pcWriteBuffer += strlen( pcWriteBuffer ); /*lint !e9016 Pointer arithmetic ok on char pointers especially as in this case where it best denotes the intent of the code. */
  4781. }
  4782. }
  4783. else
  4784. {
  4785. mtCOVERAGE_TEST_MARKER();
  4786. }
  4787. /* Free the array again. NOTE! If configSUPPORT_DYNAMIC_ALLOCATION
  4788. * is 0 then vPortFree() will be #defined to nothing. */
  4789. vPortFree( pxTaskStatusArray );
  4790. }
  4791. else
  4792. {
  4793. mtCOVERAGE_TEST_MARKER();
  4794. }
  4795. }
  4796. #endif /* ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) ) */
  4797. /*-----------------------------------------------------------*/
  4798. TickType_t uxTaskResetEventItemValue( void )
  4799. {
  4800. TickType_t uxReturn;
  4801. /* For SMP, we need to take the kernel lock here to ensure nothing else
  4802. * modifies the task's event item value simultaneously. */
  4803. taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
  4804. {
  4805. /* Get current core ID as we can no longer be preempted. */
  4806. const BaseType_t xCurCoreID = portGET_CORE_ID();
  4807. uxReturn = listGET_LIST_ITEM_VALUE( &( pxCurrentTCBs[ xCurCoreID ]->xEventListItem ) );
  4808. /* Reset the event list item to its normal value - so it can be used with
  4809. * queues and semaphores. */
  4810. listSET_LIST_ITEM_VALUE( &( pxCurrentTCBs[ xCurCoreID ]->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCBs[ xCurCoreID ]->uxPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
  4811. }
  4812. taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
  4813. /* Release the previously taken kernel lock. */
  4814. return uxReturn;
  4815. }
  4816. /*-----------------------------------------------------------*/
  4817. #if ( configUSE_MUTEXES == 1 )
  4818. TaskHandle_t pvTaskIncrementMutexHeldCount( void )
  4819. {
  4820. TaskHandle_t xReturn;
  4821. /* For SMP, we need to take the kernel lock here as we are about to
  4822. * access kernel data structures. */
  4823. taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
  4824. {
  4825. /* Get current core ID as we can no longer be preempted. */
  4826. const BaseType_t xCurCoreID = portGET_CORE_ID();
  4827. /* If xSemaphoreCreateMutex() is called before any tasks have been created
  4828. * then pxCurrentTCBs will be NULL. */
  4829. if( pxCurrentTCBs[ xCurCoreID ] != NULL )
  4830. {
  4831. ( pxCurrentTCBs[ xCurCoreID ]->uxMutexesHeld )++;
  4832. }
  4833. xReturn = pxCurrentTCBs[ xCurCoreID ];
  4834. }
  4835. /* Release the previously taken kernel lock. */
  4836. taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
  4837. return xReturn;
  4838. }
  4839. #endif /* configUSE_MUTEXES */
  4840. /*-----------------------------------------------------------*/
  4841. #if ( configUSE_TASK_NOTIFICATIONS == 1 )
  4842. uint32_t ulTaskGenericNotifyTake( UBaseType_t uxIndexToWait,
  4843. BaseType_t xClearCountOnExit,
  4844. TickType_t xTicksToWait )
  4845. {
  4846. uint32_t ulReturn;
  4847. configASSERT( uxIndexToWait < configTASK_NOTIFICATION_ARRAY_ENTRIES );
  4848. taskENTER_CRITICAL( &xKernelLock );
  4849. {
  4850. /* Get current core ID as we can no longer be preempted. */
  4851. const BaseType_t xCurCoreID = portGET_CORE_ID();
  4852. /* Only block if the notification count is not already non-zero. */
  4853. if( pxCurrentTCBs[ xCurCoreID ]->ulNotifiedValue[ uxIndexToWait ] == 0UL )
  4854. {
  4855. /* Mark this task as waiting for a notification. */
  4856. pxCurrentTCBs[ xCurCoreID ]->ucNotifyState[ uxIndexToWait ] = taskWAITING_NOTIFICATION;
  4857. if( xTicksToWait > ( TickType_t ) 0 )
  4858. {
  4859. prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
  4860. traceTASK_NOTIFY_TAKE_BLOCK( uxIndexToWait );
  4861. /* All ports are written to allow a yield in a critical
  4862. * section (some will yield immediately, others wait until the
  4863. * critical section exits) - but it is not something that
  4864. * application code should ever do. */
  4865. portYIELD_WITHIN_API();
  4866. }
  4867. else
  4868. {
  4869. mtCOVERAGE_TEST_MARKER();
  4870. }
  4871. }
  4872. else
  4873. {
  4874. mtCOVERAGE_TEST_MARKER();
  4875. }
  4876. }
  4877. taskEXIT_CRITICAL( &xKernelLock );
  4878. taskENTER_CRITICAL( &xKernelLock );
  4879. {
  4880. /* Get current core ID as we can no longer be preempted. */
  4881. const BaseType_t xCurCoreID = portGET_CORE_ID();
  4882. traceTASK_NOTIFY_TAKE( uxIndexToWait );
  4883. ulReturn = pxCurrentTCBs[ xCurCoreID ]->ulNotifiedValue[ uxIndexToWait ];
  4884. if( ulReturn != 0UL )
  4885. {
  4886. if( xClearCountOnExit != pdFALSE )
  4887. {
  4888. pxCurrentTCBs[ xCurCoreID ]->ulNotifiedValue[ uxIndexToWait ] = 0UL;
  4889. }
  4890. else
  4891. {
  4892. pxCurrentTCBs[ xCurCoreID ]->ulNotifiedValue[ uxIndexToWait ] = ulReturn - ( uint32_t ) 1;
  4893. }
  4894. }
  4895. else
  4896. {
  4897. mtCOVERAGE_TEST_MARKER();
  4898. }
  4899. pxCurrentTCBs[ xCurCoreID ]->ucNotifyState[ uxIndexToWait ] = taskNOT_WAITING_NOTIFICATION;
  4900. }
  4901. taskEXIT_CRITICAL( &xKernelLock );
  4902. return ulReturn;
  4903. }
  4904. #endif /* configUSE_TASK_NOTIFICATIONS */
  4905. /*-----------------------------------------------------------*/
  4906. #if ( configUSE_TASK_NOTIFICATIONS == 1 )
  4907. BaseType_t xTaskGenericNotifyWait( UBaseType_t uxIndexToWait,
  4908. uint32_t ulBitsToClearOnEntry,
  4909. uint32_t ulBitsToClearOnExit,
  4910. uint32_t * pulNotificationValue,
  4911. TickType_t xTicksToWait )
  4912. {
  4913. BaseType_t xReturn;
  4914. configASSERT( uxIndexToWait < configTASK_NOTIFICATION_ARRAY_ENTRIES );
  4915. taskENTER_CRITICAL( &xKernelLock );
  4916. {
  4917. /* Get current core ID as we can no longer be preempted. */
  4918. const BaseType_t xCurCoreID = portGET_CORE_ID();
  4919. /* Only block if a notification is not already pending. */
  4920. if( pxCurrentTCBs[ xCurCoreID ]->ucNotifyState[ uxIndexToWait ] != taskNOTIFICATION_RECEIVED )
  4921. {
  4922. /* Clear bits in the task's notification value as bits may get
  4923. * set by the notifying task or interrupt. This can be used to
  4924. * clear the value to zero. */
  4925. pxCurrentTCBs[ xCurCoreID ]->ulNotifiedValue[ uxIndexToWait ] &= ~ulBitsToClearOnEntry;
  4926. /* Mark this task as waiting for a notification. */
  4927. pxCurrentTCBs[ xCurCoreID ]->ucNotifyState[ uxIndexToWait ] = taskWAITING_NOTIFICATION;
  4928. if( xTicksToWait > ( TickType_t ) 0 )
  4929. {
  4930. prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
  4931. traceTASK_NOTIFY_WAIT_BLOCK( uxIndexToWait );
  4932. /* All ports are written to allow a yield in a critical
  4933. * section (some will yield immediately, others wait until the
  4934. * critical section exits) - but it is not something that
  4935. * application code should ever do. */
  4936. portYIELD_WITHIN_API();
  4937. }
  4938. else
  4939. {
  4940. mtCOVERAGE_TEST_MARKER();
  4941. }
  4942. }
  4943. else
  4944. {
  4945. mtCOVERAGE_TEST_MARKER();
  4946. }
  4947. }
  4948. taskEXIT_CRITICAL( &xKernelLock );
  4949. taskENTER_CRITICAL( &xKernelLock );
  4950. {
  4951. /* Get current core ID as we can no longer be preempted. */
  4952. const BaseType_t xCurCoreID = portGET_CORE_ID();
  4953. traceTASK_NOTIFY_WAIT( uxIndexToWait );
  4954. if( pulNotificationValue != NULL )
  4955. {
  4956. /* Output the current notification value, which may or may not
  4957. * have changed. */
  4958. *pulNotificationValue = pxCurrentTCBs[ xCurCoreID ]->ulNotifiedValue[ uxIndexToWait ];
  4959. }
  4960. /* If ucNotifyValue is set then either the task never entered the
  4961. * blocked state (because a notification was already pending) or the
  4962. * task unblocked because of a notification. Otherwise the task
  4963. * unblocked because of a timeout. */
  4964. if( pxCurrentTCBs[ xCurCoreID ]->ucNotifyState[ uxIndexToWait ] != taskNOTIFICATION_RECEIVED )
  4965. {
  4966. /* A notification was not received. */
  4967. xReturn = pdFALSE;
  4968. }
  4969. else
  4970. {
  4971. /* A notification was already pending or a notification was
  4972. * received while the task was waiting. */
  4973. pxCurrentTCBs[ xCurCoreID ]->ulNotifiedValue[ uxIndexToWait ] &= ~ulBitsToClearOnExit;
  4974. xReturn = pdTRUE;
  4975. }
  4976. pxCurrentTCBs[ xCurCoreID ]->ucNotifyState[ uxIndexToWait ] = taskNOT_WAITING_NOTIFICATION;
  4977. }
  4978. taskEXIT_CRITICAL( &xKernelLock );
  4979. return xReturn;
  4980. }
  4981. #endif /* configUSE_TASK_NOTIFICATIONS */
  4982. /*-----------------------------------------------------------*/
  4983. #if ( configUSE_TASK_NOTIFICATIONS == 1 )
  4984. BaseType_t xTaskGenericNotify( TaskHandle_t xTaskToNotify,
  4985. UBaseType_t uxIndexToNotify,
  4986. uint32_t ulValue,
  4987. eNotifyAction eAction,
  4988. uint32_t * pulPreviousNotificationValue )
  4989. {
  4990. TCB_t * pxTCB;
  4991. BaseType_t xReturn = pdPASS;
  4992. uint8_t ucOriginalNotifyState;
  4993. configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES );
  4994. configASSERT( xTaskToNotify );
  4995. pxTCB = xTaskToNotify;
  4996. taskENTER_CRITICAL( &xKernelLock );
  4997. {
  4998. if( pulPreviousNotificationValue != NULL )
  4999. {
  5000. *pulPreviousNotificationValue = pxTCB->ulNotifiedValue[ uxIndexToNotify ];
  5001. }
  5002. ucOriginalNotifyState = pxTCB->ucNotifyState[ uxIndexToNotify ];
  5003. pxTCB->ucNotifyState[ uxIndexToNotify ] = taskNOTIFICATION_RECEIVED;
  5004. switch( eAction )
  5005. {
  5006. case eSetBits:
  5007. pxTCB->ulNotifiedValue[ uxIndexToNotify ] |= ulValue;
  5008. break;
  5009. case eIncrement:
  5010. ( pxTCB->ulNotifiedValue[ uxIndexToNotify ] )++;
  5011. break;
  5012. case eSetValueWithOverwrite:
  5013. pxTCB->ulNotifiedValue[ uxIndexToNotify ] = ulValue;
  5014. break;
  5015. case eSetValueWithoutOverwrite:
  5016. if( ucOriginalNotifyState != taskNOTIFICATION_RECEIVED )
  5017. {
  5018. pxTCB->ulNotifiedValue[ uxIndexToNotify ] = ulValue;
  5019. }
  5020. else
  5021. {
  5022. /* The value could not be written to the task. */
  5023. xReturn = pdFAIL;
  5024. }
  5025. break;
  5026. case eNoAction:
  5027. /* The task is being notified without its notify value being
  5028. * updated. */
  5029. break;
  5030. default:
  5031. /* Should not get here if all enums are handled.
  5032. * Artificially force an assert by testing a value the
  5033. * compiler can't assume is const. */
  5034. configASSERT( xTickCount == ( TickType_t ) 0 );
  5035. break;
  5036. }
  5037. traceTASK_NOTIFY( uxIndexToNotify );
  5038. /* If the task is in the blocked state specifically to wait for a
  5039. * notification then unblock it now. */
  5040. if( ucOriginalNotifyState == taskWAITING_NOTIFICATION )
  5041. {
  5042. listREMOVE_ITEM( &( pxTCB->xStateListItem ) );
  5043. prvAddTaskToReadyList( pxTCB );
  5044. /* The task should not have been on an event list. */
  5045. configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
  5046. #if ( configUSE_TICKLESS_IDLE != 0 )
  5047. {
  5048. /* If a task is blocked waiting for a notification then
  5049. * xNextTaskUnblockTime might be set to the blocked task's time
  5050. * out time. If the task is unblocked for a reason other than
  5051. * a timeout xNextTaskUnblockTime is normally left unchanged,
  5052. * because it will automatically get reset to a new value when
  5053. * the tick count equals xNextTaskUnblockTime. However if
  5054. * tickless idling is used it might be more important to enter
  5055. * sleep mode at the earliest possible time - so reset
  5056. * xNextTaskUnblockTime here to ensure it is updated at the
  5057. * earliest possible time. */
  5058. prvResetNextTaskUnblockTime();
  5059. }
  5060. #endif
  5061. if( taskIS_YIELD_REQUIRED( pxTCB, portGET_CORE_ID(), pdFALSE ) == pdTRUE )
  5062. {
  5063. /* The notified task has a priority above the currently
  5064. * executing task so a yield is required. */
  5065. taskYIELD_IF_USING_PREEMPTION();
  5066. }
  5067. else
  5068. {
  5069. mtCOVERAGE_TEST_MARKER();
  5070. }
  5071. }
  5072. else
  5073. {
  5074. mtCOVERAGE_TEST_MARKER();
  5075. }
  5076. }
  5077. taskEXIT_CRITICAL( &xKernelLock );
  5078. return xReturn;
  5079. }
  5080. #endif /* configUSE_TASK_NOTIFICATIONS */
  5081. /*-----------------------------------------------------------*/
  5082. #if ( configUSE_TASK_NOTIFICATIONS == 1 )
  5083. BaseType_t xTaskGenericNotifyFromISR( TaskHandle_t xTaskToNotify,
  5084. UBaseType_t uxIndexToNotify,
  5085. uint32_t ulValue,
  5086. eNotifyAction eAction,
  5087. uint32_t * pulPreviousNotificationValue,
  5088. BaseType_t * pxHigherPriorityTaskWoken )
  5089. {
  5090. TCB_t * pxTCB;
  5091. uint8_t ucOriginalNotifyState;
  5092. BaseType_t xReturn = pdPASS;
  5093. UBaseType_t uxSavedInterruptStatus;
  5094. configASSERT( xTaskToNotify );
  5095. configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES );
  5096. /* RTOS ports that support interrupt nesting have the concept of a
  5097. * maximum system call (or maximum API call) interrupt priority.
  5098. * Interrupts that are above the maximum system call priority are keep
  5099. * permanently enabled, even when the RTOS kernel is in a critical section,
  5100. * but cannot make any calls to FreeRTOS API functions. If configASSERT()
  5101. * is defined in FreeRTOSConfig.h then
  5102. * portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
  5103. * failure if a FreeRTOS API function is called from an interrupt that has
  5104. * been assigned a priority above the configured maximum system call
  5105. * priority. Only FreeRTOS functions that end in FromISR can be called
  5106. * from interrupts that have been assigned a priority at or (logically)
  5107. * below the maximum system call interrupt priority. FreeRTOS maintains a
  5108. * separate interrupt safe API to ensure interrupt entry is as fast and as
  5109. * simple as possible. More information (albeit Cortex-M specific) is
  5110. * provided on the following link:
  5111. * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
  5112. portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
  5113. pxTCB = xTaskToNotify;
  5114. prvENTER_CRITICAL_OR_MASK_ISR( &xKernelLock, uxSavedInterruptStatus );
  5115. {
  5116. /* Get current core ID as we can no longer be preempted. */
  5117. const BaseType_t xCurCoreID = portGET_CORE_ID();
  5118. if( pulPreviousNotificationValue != NULL )
  5119. {
  5120. *pulPreviousNotificationValue = pxTCB->ulNotifiedValue[ uxIndexToNotify ];
  5121. }
  5122. ucOriginalNotifyState = pxTCB->ucNotifyState[ uxIndexToNotify ];
  5123. pxTCB->ucNotifyState[ uxIndexToNotify ] = taskNOTIFICATION_RECEIVED;
  5124. switch( eAction )
  5125. {
  5126. case eSetBits:
  5127. pxTCB->ulNotifiedValue[ uxIndexToNotify ] |= ulValue;
  5128. break;
  5129. case eIncrement:
  5130. ( pxTCB->ulNotifiedValue[ uxIndexToNotify ] )++;
  5131. break;
  5132. case eSetValueWithOverwrite:
  5133. pxTCB->ulNotifiedValue[ uxIndexToNotify ] = ulValue;
  5134. break;
  5135. case eSetValueWithoutOverwrite:
  5136. if( ucOriginalNotifyState != taskNOTIFICATION_RECEIVED )
  5137. {
  5138. pxTCB->ulNotifiedValue[ uxIndexToNotify ] = ulValue;
  5139. }
  5140. else
  5141. {
  5142. /* The value could not be written to the task. */
  5143. xReturn = pdFAIL;
  5144. }
  5145. break;
  5146. case eNoAction:
  5147. /* The task is being notified without its notify value being
  5148. * updated. */
  5149. break;
  5150. default:
  5151. /* Should not get here if all enums are handled.
  5152. * Artificially force an assert by testing a value the
  5153. * compiler can't assume is const. */
  5154. configASSERT( xTickCount == ( TickType_t ) 0 );
  5155. break;
  5156. }
  5157. traceTASK_NOTIFY_FROM_ISR( uxIndexToNotify );
  5158. /* If the task is in the blocked state specifically to wait for a
  5159. * notification then unblock it now. */
  5160. if( ucOriginalNotifyState == taskWAITING_NOTIFICATION )
  5161. {
  5162. /* The task should not have been on an event list. */
  5163. configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
  5164. if( taskCAN_BE_SCHEDULED( pxTCB ) == pdTRUE )
  5165. {
  5166. listREMOVE_ITEM( &( pxTCB->xStateListItem ) );
  5167. prvAddTaskToReadyList( pxTCB );
  5168. }
  5169. else
  5170. {
  5171. /* The delayed and ready lists cannot be accessed, so hold
  5172. * this task pending until the scheduler is resumed. */
  5173. listINSERT_END( &( xPendingReadyList[ xCurCoreID ] ), &( pxTCB->xEventListItem ) );
  5174. }
  5175. if( taskIS_YIELD_REQUIRED( pxTCB, xCurCoreID, pdFALSE ) == pdTRUE )
  5176. {
  5177. /* The notified task has a priority above the currently
  5178. * executing task so a yield is required. */
  5179. if( pxHigherPriorityTaskWoken != NULL )
  5180. {
  5181. *pxHigherPriorityTaskWoken = pdTRUE;
  5182. }
  5183. /* Mark that a yield is pending in case the user is not
  5184. * using the "xHigherPriorityTaskWoken" parameter to an ISR
  5185. * safe FreeRTOS function. */
  5186. xYieldPending[ xCurCoreID ] = pdTRUE;
  5187. }
  5188. else
  5189. {
  5190. mtCOVERAGE_TEST_MARKER();
  5191. }
  5192. }
  5193. }
  5194. prvEXIT_CRITICAL_OR_UNMASK_ISR( &xKernelLock, uxSavedInterruptStatus );
  5195. return xReturn;
  5196. }
  5197. #endif /* configUSE_TASK_NOTIFICATIONS */
  5198. /*-----------------------------------------------------------*/
  5199. #if ( configUSE_TASK_NOTIFICATIONS == 1 )
  5200. void vTaskGenericNotifyGiveFromISR( TaskHandle_t xTaskToNotify,
  5201. UBaseType_t uxIndexToNotify,
  5202. BaseType_t * pxHigherPriorityTaskWoken )
  5203. {
  5204. TCB_t * pxTCB;
  5205. uint8_t ucOriginalNotifyState;
  5206. UBaseType_t uxSavedInterruptStatus;
  5207. configASSERT( xTaskToNotify );
  5208. configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES );
  5209. /* RTOS ports that support interrupt nesting have the concept of a
  5210. * maximum system call (or maximum API call) interrupt priority.
  5211. * Interrupts that are above the maximum system call priority are keep
  5212. * permanently enabled, even when the RTOS kernel is in a critical section,
  5213. * but cannot make any calls to FreeRTOS API functions. If configASSERT()
  5214. * is defined in FreeRTOSConfig.h then
  5215. * portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
  5216. * failure if a FreeRTOS API function is called from an interrupt that has
  5217. * been assigned a priority above the configured maximum system call
  5218. * priority. Only FreeRTOS functions that end in FromISR can be called
  5219. * from interrupts that have been assigned a priority at or (logically)
  5220. * below the maximum system call interrupt priority. FreeRTOS maintains a
  5221. * separate interrupt safe API to ensure interrupt entry is as fast and as
  5222. * simple as possible. More information (albeit Cortex-M specific) is
  5223. * provided on the following link:
  5224. * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
  5225. portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
  5226. pxTCB = xTaskToNotify;
  5227. prvENTER_CRITICAL_OR_MASK_ISR( &xKernelLock, uxSavedInterruptStatus );
  5228. {
  5229. /* Get current core ID as we can no longer be preempted. */
  5230. const BaseType_t xCurCoreID = portGET_CORE_ID();
  5231. ucOriginalNotifyState = pxTCB->ucNotifyState[ uxIndexToNotify ];
  5232. pxTCB->ucNotifyState[ uxIndexToNotify ] = taskNOTIFICATION_RECEIVED;
  5233. /* 'Giving' is equivalent to incrementing a count in a counting
  5234. * semaphore. */
  5235. ( pxTCB->ulNotifiedValue[ uxIndexToNotify ] )++;
  5236. traceTASK_NOTIFY_GIVE_FROM_ISR( uxIndexToNotify );
  5237. /* If the task is in the blocked state specifically to wait for a
  5238. * notification then unblock it now. */
  5239. if( ucOriginalNotifyState == taskWAITING_NOTIFICATION )
  5240. {
  5241. /* The task should not have been on an event list. */
  5242. configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
  5243. if( taskCAN_BE_SCHEDULED( pxTCB ) == pdTRUE )
  5244. {
  5245. listREMOVE_ITEM( &( pxTCB->xStateListItem ) );
  5246. prvAddTaskToReadyList( pxTCB );
  5247. }
  5248. else
  5249. {
  5250. /* The delayed and ready lists cannot be accessed, so hold
  5251. * this task pending until the scheduler is resumed. */
  5252. listINSERT_END( &( xPendingReadyList[ xCurCoreID ] ), &( pxTCB->xEventListItem ) );
  5253. }
  5254. if( taskIS_YIELD_REQUIRED( pxTCB, xCurCoreID, pdFALSE ) == pdTRUE )
  5255. {
  5256. /* The notified task has a priority above the currently
  5257. * executing task so a yield is required. */
  5258. if( pxHigherPriorityTaskWoken != NULL )
  5259. {
  5260. *pxHigherPriorityTaskWoken = pdTRUE;
  5261. }
  5262. /* Mark that a yield is pending in case the user is not
  5263. * using the "xHigherPriorityTaskWoken" parameter in an ISR
  5264. * safe FreeRTOS function. */
  5265. xYieldPending[ xCurCoreID ] = pdTRUE;
  5266. }
  5267. else
  5268. {
  5269. mtCOVERAGE_TEST_MARKER();
  5270. }
  5271. }
  5272. }
  5273. prvEXIT_CRITICAL_OR_UNMASK_ISR( &xKernelLock, uxSavedInterruptStatus );
  5274. }
  5275. #endif /* configUSE_TASK_NOTIFICATIONS */
  5276. /*-----------------------------------------------------------*/
  5277. #if ( configUSE_TASK_NOTIFICATIONS == 1 )
  5278. BaseType_t xTaskGenericNotifyStateClear( TaskHandle_t xTask,
  5279. UBaseType_t uxIndexToClear )
  5280. {
  5281. TCB_t * pxTCB;
  5282. BaseType_t xReturn;
  5283. configASSERT( uxIndexToClear < configTASK_NOTIFICATION_ARRAY_ENTRIES );
  5284. /* If null is passed in here then it is the calling task that is having
  5285. * its notification state cleared. */
  5286. pxTCB = prvGetTCBFromHandle( xTask );
  5287. taskENTER_CRITICAL( &xKernelLock );
  5288. {
  5289. if( pxTCB->ucNotifyState[ uxIndexToClear ] == taskNOTIFICATION_RECEIVED )
  5290. {
  5291. pxTCB->ucNotifyState[ uxIndexToClear ] = taskNOT_WAITING_NOTIFICATION;
  5292. xReturn = pdPASS;
  5293. }
  5294. else
  5295. {
  5296. xReturn = pdFAIL;
  5297. }
  5298. }
  5299. taskEXIT_CRITICAL( &xKernelLock );
  5300. return xReturn;
  5301. }
  5302. #endif /* configUSE_TASK_NOTIFICATIONS */
  5303. /*-----------------------------------------------------------*/
  5304. #if ( configUSE_TASK_NOTIFICATIONS == 1 )
  5305. uint32_t ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
  5306. UBaseType_t uxIndexToClear,
  5307. uint32_t ulBitsToClear )
  5308. {
  5309. TCB_t * pxTCB;
  5310. uint32_t ulReturn;
  5311. /* If null is passed in here then it is the calling task that is having
  5312. * its notification state cleared. */
  5313. pxTCB = prvGetTCBFromHandle( xTask );
  5314. taskENTER_CRITICAL( &xKernelLock );
  5315. {
  5316. /* Return the notification as it was before the bits were cleared,
  5317. * then clear the bit mask. */
  5318. ulReturn = pxTCB->ulNotifiedValue[ uxIndexToClear ];
  5319. pxTCB->ulNotifiedValue[ uxIndexToClear ] &= ~ulBitsToClear;
  5320. }
  5321. taskEXIT_CRITICAL( &xKernelLock );
  5322. return ulReturn;
  5323. }
  5324. #endif /* configUSE_TASK_NOTIFICATIONS */
  5325. /*-----------------------------------------------------------*/
  5326. #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
  5327. configRUN_TIME_COUNTER_TYPE ulTaskGetIdleRunTimeCounter( void )
  5328. {
  5329. return ulTaskGetIdleRunTimeCounterForCore( portGET_CORE_ID() );
  5330. }
  5331. #endif
  5332. /*-----------------------------------------------------------*/
  5333. #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
  5334. configRUN_TIME_COUNTER_TYPE ulTaskGetIdleRunTimePercent( void )
  5335. {
  5336. return ulTaskGetIdleRunTimePercentForCore( portGET_CORE_ID() );
  5337. }
  5338. #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
  5339. /*-----------------------------------------------------------*/
  5340. static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait,
  5341. const BaseType_t xCanBlockIndefinitely )
  5342. {
  5343. TickType_t xTimeToWake;
  5344. const TickType_t xConstTickCount = xTickCount;
  5345. /* Get current core ID as we can no longer be preempted. */
  5346. const BaseType_t xCurCoreID = portGET_CORE_ID();
  5347. #if ( configNUMBER_OF_CORES > 1 )
  5348. {
  5349. if( listIS_CONTAINED_WITHIN( &xTasksWaitingTermination, &( pxCurrentTCBs[ xCurCoreID ]->xStateListItem ) ) == pdTRUE )
  5350. {
  5351. /* In SMP, it is possible that another core has already deleted the
  5352. * current task (via vTaskDelete()) which will result in the current
  5353. * task being placed on the waiting termination list. In this case,
  5354. * we do nothing and return, the current task will yield as soon
  5355. * as it re-enables interrupts. */
  5356. return;
  5357. }
  5358. }
  5359. #endif /* configNUMBER_OF_CORES > 1 */
  5360. #if ( INCLUDE_xTaskAbortDelay == 1 )
  5361. {
  5362. /* About to enter a delayed list, so ensure the ucDelayAborted flag is
  5363. * reset to pdFALSE so it can be detected as having been set to pdTRUE
  5364. * when the task leaves the Blocked state. */
  5365. pxCurrentTCBs[ xCurCoreID ]->ucDelayAborted = pdFALSE;
  5366. }
  5367. #endif
  5368. /* Remove the task from the ready list before adding it to the blocked list
  5369. * as the same list item is used for both lists. */
  5370. if( uxListRemove( &( pxCurrentTCBs[ xCurCoreID ]->xStateListItem ) ) == ( UBaseType_t ) 0 )
  5371. {
  5372. /* The current task must be in a ready list, so there is no need to
  5373. * check, and the port reset macro can be called directly. */
  5374. portRESET_READY_PRIORITY( pxCurrentTCBs[ xCurCoreID ]->uxPriority, uxTopReadyPriority ); /*lint !e931 pxCurrentTCBs cannot change as it is the calling task. pxCurrentTCBs->uxPriority and uxTopReadyPriority cannot change as called with scheduler suspended or in a critical section. */
  5375. }
  5376. else
  5377. {
  5378. mtCOVERAGE_TEST_MARKER();
  5379. }
  5380. #if ( INCLUDE_vTaskSuspend == 1 )
  5381. {
  5382. if( ( xTicksToWait == portMAX_DELAY ) && ( xCanBlockIndefinitely != pdFALSE ) )
  5383. {
  5384. /* Add the task to the suspended task list instead of a delayed task
  5385. * list to ensure it is not woken by a timing event. It will block
  5386. * indefinitely. */
  5387. listINSERT_END( &xSuspendedTaskList, &( pxCurrentTCBs[ xCurCoreID ]->xStateListItem ) );
  5388. }
  5389. else
  5390. {
  5391. /* Calculate the time at which the task should be woken if the event
  5392. * does not occur. This may overflow but this doesn't matter, the
  5393. * kernel will manage it correctly. */
  5394. xTimeToWake = xConstTickCount + xTicksToWait;
  5395. /* The list item will be inserted in wake time order. */
  5396. listSET_LIST_ITEM_VALUE( &( pxCurrentTCBs[ xCurCoreID ]->xStateListItem ), xTimeToWake );
  5397. if( xTimeToWake < xConstTickCount )
  5398. {
  5399. /* Wake time has overflowed. Place this item in the overflow
  5400. * list. */
  5401. vListInsert( pxOverflowDelayedTaskList, &( pxCurrentTCBs[ xCurCoreID ]->xStateListItem ) );
  5402. }
  5403. else
  5404. {
  5405. /* The wake time has not overflowed, so the current block list
  5406. * is used. */
  5407. vListInsert( pxDelayedTaskList, &( pxCurrentTCBs[ xCurCoreID ]->xStateListItem ) );
  5408. /* If the task entering the blocked state was placed at the
  5409. * head of the list of blocked tasks then xNextTaskUnblockTime
  5410. * needs to be updated too. */
  5411. if( xTimeToWake < xNextTaskUnblockTime )
  5412. {
  5413. xNextTaskUnblockTime = xTimeToWake;
  5414. }
  5415. else
  5416. {
  5417. mtCOVERAGE_TEST_MARKER();
  5418. }
  5419. }
  5420. }
  5421. }
  5422. #else /* INCLUDE_vTaskSuspend */
  5423. {
  5424. /* Calculate the time at which the task should be woken if the event
  5425. * does not occur. This may overflow but this doesn't matter, the kernel
  5426. * will manage it correctly. */
  5427. xTimeToWake = xConstTickCount + xTicksToWait;
  5428. /* The list item will be inserted in wake time order. */
  5429. listSET_LIST_ITEM_VALUE( &( pxCurrentTCBs[ xCurCoreID ]->xStateListItem ), xTimeToWake );
  5430. if( xTimeToWake < xConstTickCount )
  5431. {
  5432. /* Wake time has overflowed. Place this item in the overflow list. */
  5433. vListInsert( pxOverflowDelayedTaskList, &( pxCurrentTCBs[ xCurCoreID ]->xStateListItem ) );
  5434. }
  5435. else
  5436. {
  5437. /* The wake time has not overflowed, so the current block list is used. */
  5438. vListInsert( pxDelayedTaskList, &( pxCurrentTCBs[ xCurCoreID ]->xStateListItem ) );
  5439. /* If the task entering the blocked state was placed at the head of the
  5440. * list of blocked tasks then xNextTaskUnblockTime needs to be updated
  5441. * too. */
  5442. if( xTimeToWake < xNextTaskUnblockTime )
  5443. {
  5444. xNextTaskUnblockTime = xTimeToWake;
  5445. }
  5446. else
  5447. {
  5448. mtCOVERAGE_TEST_MARKER();
  5449. }
  5450. }
  5451. /* Avoid compiler warning when INCLUDE_vTaskSuspend is not 1. */
  5452. ( void ) xCanBlockIndefinitely;
  5453. }
  5454. #endif /* INCLUDE_vTaskSuspend */
  5455. }
  5456. /* Code below here allows additional code to be inserted into this source file,
  5457. * especially where access to file scope functions and data is needed (for example
  5458. * when performing module tests). */
  5459. #ifdef FREERTOS_MODULE_TEST
  5460. #include "tasks_test_access_functions.h"
  5461. #endif
  5462. #if ( configINCLUDE_FREERTOS_TASK_C_ADDITIONS_H == 1 )
  5463. #include "freertos_tasks_c_additions.h"
  5464. #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT
  5465. static void freertos_tasks_c_additions_init( void )
  5466. {
  5467. FREERTOS_TASKS_C_ADDITIONS_INIT();
  5468. }
  5469. #endif
  5470. #endif /* if ( configINCLUDE_FREERTOS_TASK_C_ADDITIONS_H == 1 ) */