tx_thread.h 57 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681
  1. /***************************************************************************
  2. * Copyright (c) 2024 Microsoft Corporation
  3. *
  4. * This program and the accompanying materials are made available under the
  5. * terms of the MIT License which is available at
  6. * https://opensource.org/licenses/MIT.
  7. *
  8. * SPDX-License-Identifier: MIT
  9. **************************************************************************/
  10. /**************************************************************************/
  11. /**************************************************************************/
  12. /** */
  13. /** ThreadX Component */
  14. /** */
  15. /** Thread */
  16. /** */
  17. /**************************************************************************/
  18. /**************************************************************************/
  19. /**************************************************************************/
  20. /* */
  21. /* COMPONENT DEFINITION RELEASE */
  22. /* */
  23. /* tx_thread.h PORTABLE SMP */
  24. /* 6.3.0 */
  25. /* AUTHOR */
  26. /* */
  27. /* William E. Lamie, Microsoft Corporation */
  28. /* */
  29. /* DESCRIPTION */
  30. /* */
  31. /* This file defines the ThreadX thread control component, including */
  32. /* data types and external references. It is assumed that tx_api.h */
  33. /* and tx_port.h have already been included. */
  34. /* */
  35. /* RELEASE HISTORY */
  36. /* */
  37. /* DATE NAME DESCRIPTION */
  38. /* */
  39. /* 09-30-2020 William E. Lamie Initial Version 6.1 */
  40. /* 10-31-2023 Tiejun Zhou Fixed MISRA2012 rule 8.3, */
  41. /* resulting in version 6.3.0 */
  42. /* */
  43. /**************************************************************************/
  44. #ifndef TX_THREAD_H
  45. #define TX_THREAD_H
  46. /* Add include files needed for in-line macros. */
  47. #include "tx_initialize.h"
  48. /* Define thread control specific data definitions. */
  49. #define TX_THREAD_ID ((ULONG) 0x54485244)
  50. #define TX_THREAD_MAX_BYTE_VALUES 256
  51. #define TX_THREAD_PRIORITY_GROUP_MASK ((ULONG) 0xFF)
  52. #define TX_THREAD_PRIORITY_GROUP_SIZE 8
  53. #define TX_THREAD_EXECUTE_LOG_SIZE ((UINT) 8)
  54. #define TX_THREAD_SMP_PROTECT_WAIT_LIST_SIZE (TX_THREAD_SMP_MAX_CORES + 1)
  55. /* Define the default thread stack checking. This can be overridden by
  56. a particular port, which is necessary if the stack growth is from
  57. low address to high address (the default logic is for stacks that
  58. grow from high address to low address. */
  59. #ifndef TX_THREAD_STACK_CHECK
  60. #define TX_THREAD_STACK_CHECK(thread_ptr) \
  61. { \
  62. TX_INTERRUPT_SAVE_AREA \
  63. TX_DISABLE \
  64. if (((thread_ptr)) && ((thread_ptr) -> tx_thread_id == TX_THREAD_ID)) \
  65. { \
  66. if (((ULONG *) (thread_ptr) -> tx_thread_stack_ptr) < ((ULONG *) (thread_ptr) -> tx_thread_stack_highest_ptr)) \
  67. { \
  68. (thread_ptr) -> tx_thread_stack_highest_ptr = (thread_ptr) -> tx_thread_stack_ptr; \
  69. } \
  70. if ((*((ULONG *) (thread_ptr) -> tx_thread_stack_start) != TX_STACK_FILL) || \
  71. (*((ULONG *) (((UCHAR *) (thread_ptr) -> tx_thread_stack_end) + 1)) != TX_STACK_FILL) || \
  72. (((ULONG *) (thread_ptr) -> tx_thread_stack_highest_ptr) < ((ULONG *) (thread_ptr) -> tx_thread_stack_start))) \
  73. { \
  74. TX_RESTORE \
  75. _tx_thread_stack_error_handler((thread_ptr)); \
  76. TX_DISABLE \
  77. } \
  78. if (*(((ULONG *) (thread_ptr) -> tx_thread_stack_highest_ptr) - 1) != TX_STACK_FILL) \
  79. { \
  80. TX_RESTORE \
  81. _tx_thread_stack_analyze((thread_ptr)); \
  82. TX_DISABLE \
  83. } \
  84. } \
  85. TX_RESTORE \
  86. }
  87. #endif
  88. /* Define default post thread delete macro to whitespace, if it hasn't been defined previously (typically in tx_port.h). */
  89. #ifndef TX_THREAD_DELETE_PORT_COMPLETION
  90. #define TX_THREAD_DELETE_PORT_COMPLETION(t)
  91. #endif
  92. /* Define default post thread reset macro to whitespace, if it hasn't been defined previously (typically in tx_port.h). */
  93. #ifndef TX_THREAD_RESET_PORT_COMPLETION
  94. #define TX_THREAD_RESET_PORT_COMPLETION(t)
  95. #endif
  96. /* Define the thread create internal extension macro to whitespace, if it hasn't been defined previously (typically in tx_port.h). */
  97. #ifndef TX_THREAD_CREATE_INTERNAL_EXTENSION
  98. #define TX_THREAD_CREATE_INTERNAL_EXTENSION(t)
  99. #endif
  100. /* Define internal thread control function prototypes. */
  101. VOID _tx_thread_initialize(VOID);
  102. VOID _tx_thread_schedule(VOID);
  103. VOID _tx_thread_shell_entry(VOID);
  104. VOID _tx_thread_stack_analyze(TX_THREAD *thread_ptr);
  105. VOID _tx_thread_stack_build(TX_THREAD *thread_ptr, VOID (*function_ptr)(VOID));
  106. VOID _tx_thread_stack_error(TX_THREAD *thread_ptr);
  107. VOID _tx_thread_stack_error_handler(TX_THREAD *thread_ptr);
  108. VOID _tx_thread_system_preempt_check(VOID);
  109. VOID _tx_thread_system_resume(TX_THREAD *thread_ptr);
  110. VOID _tx_thread_system_ni_resume(TX_THREAD *thread_ptr);
  111. VOID _tx_thread_system_return(VOID);
  112. VOID _tx_thread_system_suspend(TX_THREAD *thread_ptr);
  113. VOID _tx_thread_system_ni_suspend(TX_THREAD *thread_ptr, ULONG timeout);
  114. VOID _tx_thread_time_slice(VOID);
  115. VOID _tx_thread_timeout(ULONG timeout_input);
  116. /* Define all internal SMP prototypes. */
  117. void _tx_thread_smp_current_state_set(ULONG new_state);
  118. UINT _tx_thread_smp_find_next_priority(UINT priority);
  119. void _tx_thread_smp_high_level_initialize(void);
  120. void _tx_thread_smp_rebalance_execute_list(UINT core_index);
  121. /* Define all internal ThreadX SMP low-level assembly routines. */
  122. VOID _tx_thread_smp_core_wait(void);
  123. void _tx_thread_smp_initialize_wait(void);
  124. void _tx_thread_smp_low_level_initialize(UINT number_of_cores);
  125. void _tx_thread_smp_core_preempt(UINT core);
  126. /* Thread control component external data declarations follow. */
  127. #define THREAD_DECLARE extern
  128. /* Define the pointer that contains the system stack pointer. This is
  129. utilized when control returns from a thread to the system to reset the
  130. current stack. This is setup in the low-level initialization function. */
  131. THREAD_DECLARE VOID * _tx_thread_system_stack_ptr[TX_THREAD_SMP_MAX_CORES];
  132. /* Define the current thread pointer. This variable points to the currently
  133. executing thread. If this variable is NULL, no thread is executing. */
  134. THREAD_DECLARE TX_THREAD * _tx_thread_current_ptr[TX_THREAD_SMP_MAX_CORES];
  135. /* Define the variable that holds the next thread to execute. It is important
  136. to remember that this is not necessarily equal to the current thread
  137. pointer. */
  138. THREAD_DECLARE TX_THREAD * _tx_thread_execute_ptr[TX_THREAD_SMP_MAX_CORES];
  139. /* Define the ThreadX SMP scheduling and mapping data structures. */
  140. THREAD_DECLARE TX_THREAD * _tx_thread_smp_schedule_list[TX_THREAD_SMP_MAX_CORES];
  141. THREAD_DECLARE ULONG _tx_thread_smp_reschedule_pending;
  142. THREAD_DECLARE TX_THREAD_SMP_PROTECT _tx_thread_smp_protection;
  143. THREAD_DECLARE volatile ULONG _tx_thread_smp_release_cores_flag;
  144. THREAD_DECLARE ULONG _tx_thread_smp_system_error;
  145. THREAD_DECLARE ULONG _tx_thread_smp_inter_core_interrupts[TX_THREAD_SMP_MAX_CORES];
  146. THREAD_DECLARE ULONG _tx_thread_smp_protect_wait_list_size;
  147. THREAD_DECLARE ULONG _tx_thread_smp_protect_wait_list[TX_THREAD_SMP_PROTECT_WAIT_LIST_SIZE];
  148. THREAD_DECLARE ULONG _tx_thread_smp_protect_wait_counts[TX_THREAD_SMP_MAX_CORES];
  149. THREAD_DECLARE ULONG _tx_thread_smp_protect_wait_list_lock_protect_in_force;
  150. THREAD_DECLARE ULONG _tx_thread_smp_protect_wait_list_tail;
  151. THREAD_DECLARE ULONG _tx_thread_smp_protect_wait_list_head;
  152. /* Define logic for conditional dynamic maximum number of cores. */
  153. #ifdef TX_THREAD_SMP_DYNAMIC_CORE_MAX
  154. THREAD_DECLARE ULONG _tx_thread_smp_max_cores;
  155. THREAD_DECLARE ULONG _tx_thread_smp_detected_cores;
  156. #endif
  157. /* Define the head pointer of the created thread list. */
  158. THREAD_DECLARE TX_THREAD * _tx_thread_created_ptr;
  159. /* Define the variable that holds the number of created threads. */
  160. THREAD_DECLARE ULONG _tx_thread_created_count;
  161. /* Define the current state variable. When this value is 0, a thread
  162. is executing or the system is idle. Other values indicate that
  163. interrupt or initialization processing is active. This variable is
  164. initialized to TX_INITIALIZE_IN_PROGRESS to indicate initialization is
  165. active. */
  166. THREAD_DECLARE volatile ULONG _tx_thread_system_state[TX_THREAD_SMP_MAX_CORES];
  167. /* Determine if we need to remap system state to a function call. */
  168. #ifndef TX_THREAD_SMP_SOURCE_CODE
  169. /* Yes, remap system state to a function call so we can get the system state for the current core. */
  170. #define _tx_thread_system_state _tx_thread_smp_current_state_get()
  171. /* Yes, remap get current thread to a function call so we can get the current thread for the current core. */
  172. #define _tx_thread_current_ptr _tx_thread_smp_current_thread_get()
  173. #endif
  174. /* Define the 32-bit priority bit-maps. There is one priority bit map for each
  175. 32 priority levels supported. If only 32 priorities are supported there is
  176. only one bit map. Each bit within a priority bit map represents that one
  177. or more threads at the associated thread priority are ready. */
  178. THREAD_DECLARE ULONG _tx_thread_priority_maps[TX_MAX_PRIORITIES/32];
  179. /* Define the priority map active bit map that specifies which of the previously
  180. defined priority maps have something set. This is only necessary if more than
  181. 32 priorities are supported. */
  182. #if TX_MAX_PRIORITIES > 32
  183. THREAD_DECLARE ULONG _tx_thread_priority_map_active;
  184. #endif
  185. #ifndef TX_DISABLE_PREEMPTION_THRESHOLD
  186. /* Define the 32-bit preempt priority bit maps. There is one preempt bit map
  187. for each 32 priority levels supported. If only 32 priorities are supported
  188. there is only one bit map. Each set set bit corresponds to a preempted priority
  189. level that had preemption-threshold active to protect against preemption of a
  190. range of relatively higher priority threads. */
  191. THREAD_DECLARE ULONG _tx_thread_preempted_maps[TX_MAX_PRIORITIES/32];
  192. /* Define the preempt map active bit map that specifies which of the previously
  193. defined preempt maps have something set. This is only necessary if more than
  194. 32 priorities are supported. */
  195. #if TX_MAX_PRIORITIES > 32
  196. THREAD_DECLARE ULONG _tx_thread_preempted_map_active;
  197. #endif
  198. /* Define the array that contains the thread at each priority level that was scheduled with
  199. preemption-threshold enabled. This will be useful when returning from a nested
  200. preemption-threshold condition. */
  201. THREAD_DECLARE TX_THREAD *_tx_thread_preemption_threshold_list[TX_MAX_PRIORITIES];
  202. #endif
  203. /* Define the last thread scheduled with preemption-threshold. When preemption-threshold is
  204. disabled, a thread with preemption-threshold set disables all other threads from running.
  205. Effectively, its preemption-threshold is 0. */
  206. THREAD_DECLARE TX_THREAD *_tx_thread_preemption__threshold_scheduled;
  207. /* Define the array of thread pointers. Each entry represents the threads that
  208. are ready at that priority group. For example, index 10 in this array
  209. represents the first thread ready at priority 10. If this entry is NULL,
  210. no threads are ready at that priority. */
  211. THREAD_DECLARE TX_THREAD * _tx_thread_priority_list[TX_MAX_PRIORITIES];
  212. /* Define the global preempt disable variable. If this is non-zero, preemption is
  213. disabled. It is used internally by ThreadX to prevent preemption of a thread in
  214. the middle of a service that is resuming or suspending another thread. */
  215. THREAD_DECLARE volatile UINT _tx_thread_preempt_disable;
  216. /* Define the global function pointer for mutex cleanup on thread completion or
  217. termination. This pointer is setup during mutex initialization. */
  218. THREAD_DECLARE VOID (*_tx_thread_mutex_release)(TX_THREAD *thread_ptr);
  219. /* Define the global build options variable. This contains a bit map representing
  220. how the ThreadX library was built. The following are the bit field definitions:
  221. Bit(s) Meaning
  222. 31 Reserved
  223. 30 TX_NOT_INTERRUPTABLE defined
  224. 29-24 Priority groups 1 -> 32 priorities
  225. 2 -> 64 priorities
  226. 3 -> 96 priorities
  227. ...
  228. 32 -> 1024 priorities
  229. 23 TX_TIMER_PROCESS_IN_ISR defined
  230. 22 TX_REACTIVATE_INLINE defined
  231. 21 TX_DISABLE_STACK_FILLING defined
  232. 20 TX_ENABLE_STACK_CHECKING defined
  233. 19 TX_DISABLE_PREEMPTION_THRESHOLD defined
  234. 18 TX_DISABLE_REDUNDANT_CLEARING defined
  235. 17 TX_DISABLE_NOTIFY_CALLBACKS defined
  236. 16 TX_BLOCK_POOL_ENABLE_PERFORMANCE_INFO defined
  237. 15 TX_BYTE_POOL_ENABLE_PERFORMANCE_INFO defined
  238. 14 TX_EVENT_FLAGS_ENABLE_PERFORMANCE_INFO defined
  239. 13 TX_MUTEX_ENABLE_PERFORMANCE_INFO defined
  240. 12 TX_QUEUE_ENABLE_PERFORMANCE_INFO defined
  241. 11 TX_SEMAPHORE_ENABLE_PERFORMANCE_INFO defined
  242. 10 TX_THREAD_ENABLE_PERFORMANCE_INFO defined
  243. 9 TX_TIMER_ENABLE_PERFORMANCE_INFO defined
  244. 8 TX_ENABLE_EVENT_TRACE | TX_ENABLE_EVENT_LOGGING defined
  245. 7 Reserved
  246. 6 Reserved
  247. 5 Reserved
  248. 4 Reserved
  249. 3 Reserved
  250. 2 Reserved
  251. 1 64-bit FPU Enabled
  252. 0 Reserved */
  253. THREAD_DECLARE ULONG _tx_build_options;
  254. #ifdef TX_ENABLE_STACK_CHECKING
  255. /* Define the global function pointer for stack error handling. If a stack error is
  256. detected and the application has registered a stack error handler, it will be
  257. called via this function pointer. */
  258. THREAD_DECLARE VOID (*_tx_thread_application_stack_error_handler)(TX_THREAD *thread_ptr);
  259. #endif
  260. #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
  261. /* Define the total number of thread resumptions. Each time a thread enters the
  262. ready state this variable is incremented. */
  263. THREAD_DECLARE ULONG _tx_thread_performance_resume_count;
  264. /* Define the total number of thread suspensions. Each time a thread enters a
  265. suspended state this variable is incremented. */
  266. THREAD_DECLARE ULONG _tx_thread_performance_suspend_count;
  267. /* Define the total number of solicited thread preemptions. Each time a thread is
  268. preempted by directly calling a ThreadX service, this variable is incremented. */
  269. THREAD_DECLARE ULONG _tx_thread_performance_solicited_preemption_count;
  270. /* Define the total number of interrupt thread preemptions. Each time a thread is
  271. preempted as a result of an ISR calling a ThreadX service, this variable is
  272. incremented. */
  273. THREAD_DECLARE ULONG _tx_thread_performance_interrupt_preemption_count;
  274. /* Define the total number of priority inversions. Each time a thread is blocked by
  275. a mutex owned by a lower-priority thread, this variable is incremented. */
  276. THREAD_DECLARE ULONG _tx_thread_performance_priority_inversion_count;
  277. /* Define the total number of time-slices. Each time a time-slice operation is
  278. actually performed (another thread is setup for running) this variable is
  279. incremented. */
  280. THREAD_DECLARE ULONG _tx_thread_performance_time_slice_count;
  281. /* Define the total number of thread relinquish operations. Each time a thread
  282. relinquish operation is actually performed (another thread is setup for running)
  283. this variable is incremented. */
  284. THREAD_DECLARE ULONG _tx_thread_performance_relinquish_count;
  285. /* Define the total number of thread timeouts. Each time a thread has a
  286. timeout this variable is incremented. */
  287. THREAD_DECLARE ULONG _tx_thread_performance_timeout_count;
  288. /* Define the total number of thread wait aborts. Each time a thread's suspension
  289. is lifted by the tx_thread_wait_abort call this variable is incremented. */
  290. THREAD_DECLARE ULONG _tx_thread_performance_wait_abort_count;
  291. /* Define the total number of idle system thread returns. Each time a thread returns to
  292. an idle system (no other thread is ready to run) this variable is incremented. */
  293. THREAD_DECLARE ULONG _tx_thread_performance_idle_return_count;
  294. /* Define the total number of non-idle system thread returns. Each time a thread returns to
  295. a non-idle system (another thread is ready to run) this variable is incremented. */
  296. THREAD_DECLARE ULONG _tx_thread_performance_non_idle_return_count;
  297. #endif
  298. /* Define macros and helper functions. */
  299. /* Define the MOD32 bit set macro that is used to set/clear a priority bit within a specific
  300. priority group. */
  301. #if TX_MAX_PRIORITIES > 32
  302. #define MAP_INDEX (map_index)
  303. #ifndef TX_MOD32_BIT_SET
  304. #define TX_MOD32_BIT_SET(a,b) (b) = (((ULONG) 1) << ((a)%((UINT) 32)));
  305. #endif
  306. #else
  307. #define MAP_INDEX (0)
  308. #ifndef TX_MOD32_BIT_SET
  309. #define TX_MOD32_BIT_SET(a,b) (b) = (((ULONG) 1) << ((a)));
  310. #endif
  311. #endif
  312. /* Define the DIV32 bit set macro that is used to set/clear a priority group bit and is
  313. only necessary when using priorities greater than 32. */
  314. #if TX_MAX_PRIORITIES > 32
  315. #ifndef TX_DIV32_BIT_SET
  316. #define TX_DIV32_BIT_SET(a,b) (b) = (((ULONG) 1) << ((a)/((UINT) 32)));
  317. #endif
  318. #endif
  319. /* Define state change macro that can be used by run-mode debug agents to keep track of thread
  320. state changes. By default, it is mapped to white space. */
  321. #ifndef TX_THREAD_STATE_CHANGE
  322. #define TX_THREAD_STATE_CHANGE(a, b)
  323. #endif
  324. /* Define the macro to set the current thread pointer. This is particularly useful in SMP
  325. versions of ThreadX to add additional processing. The default implementation is to simply
  326. access the global current thread pointer directly. */
  327. #ifndef TX_THREAD_SET_CURRENT
  328. #define TX_THREAD_SET_CURRENT(a) TX_MEMSET(&_tx_thread_current_ptr[0], (a), sizeof(_tx_thread_current_ptr));
  329. #endif
  330. /* Define the get system state macro. By default, it is mapped to white space. */
  331. #ifndef TX_THREAD_GET_SYSTEM_STATE
  332. #define TX_THREAD_GET_SYSTEM_STATE() _tx_thread_smp_current_state_get()
  333. #endif
  334. /* Define the check for whether or not to call the _tx_thread_system_return function. A non-zero value
  335. indicates that _tx_thread_system_return should not be called. */
  336. #ifndef TX_THREAD_SYSTEM_RETURN_CHECK
  337. #define TX_THREAD_SYSTEM_RETURN_CHECK(c) (c) = (ULONG) _tx_thread_preempt_disable; (c) = (c) | TX_THREAD_GET_SYSTEM_STATE();
  338. #endif
  339. /* Define the timeout setup macro used in _tx_thread_create. */
  340. #ifndef TX_THREAD_CREATE_TIMEOUT_SETUP
  341. #define TX_THREAD_CREATE_TIMEOUT_SETUP(t) (t) -> tx_thread_timer.tx_timer_internal_timeout_function = &(_tx_thread_timeout); \
  342. (t) -> tx_thread_timer.tx_timer_internal_timeout_param = TX_POINTER_TO_ULONG_CONVERT((t));
  343. #endif
  344. /* Define the thread timeout pointer setup macro used in _tx_thread_timeout. */
  345. #ifndef TX_THREAD_TIMEOUT_POINTER_SETUP
  346. #define TX_THREAD_TIMEOUT_POINTER_SETUP(t) (t) = TX_ULONG_TO_THREAD_POINTER_CONVERT(timeout_input);
  347. #endif
  348. #ifdef TX_THREAD_SMP_SOURCE_CODE
  349. /* Determine if the in-line capability has been disabled. */
  350. #ifndef TX_DISABLE_INLINE
  351. /* Define the inline option, which is compiler specific. If not defined, it will be resolved as
  352. "inline". */
  353. #ifndef INLINE_DECLARE
  354. #define INLINE_DECLARE inline
  355. #endif
  356. /* Define the lowest bit set macro. Note, that this may be overridden
  357. by a port specific definition if there is supporting assembly language
  358. instructions in the architecture. */
  359. #ifndef TX_LOWEST_SET_BIT_CALCULATE
  360. static INLINE_DECLARE UINT _tx_thread_lowest_set_bit_calculate(ULONG map)
  361. {
  362. UINT bit_set;
  363. if ((map & ((ULONG) 0x1)) != ((ULONG) 0))
  364. {
  365. bit_set = ((UINT) 0);
  366. }
  367. else
  368. {
  369. map = map & (ULONG) ((~map) + ((ULONG) 1));
  370. if (map < ((ULONG) 0x100))
  371. {
  372. bit_set = ((UINT) 1);
  373. }
  374. else if (map < ((ULONG) 0x10000))
  375. {
  376. bit_set = ((UINT) 9);
  377. map = map >> ((UINT) 8);
  378. }
  379. else if (map < ((ULONG) 0x01000000))
  380. {
  381. bit_set = ((UINT) 17);
  382. map = map >> ((UINT) 16);
  383. }
  384. else
  385. {
  386. bit_set = ((UINT) 25);
  387. map = map >> ((UINT) 24);
  388. }
  389. if (map >= ((ULONG) 0x10))
  390. {
  391. map = map >> ((UINT) 4);
  392. bit_set = bit_set + ((UINT) 4);
  393. }
  394. if (map >= ((ULONG) 0x4))
  395. {
  396. map = map >> ((UINT) 2);
  397. bit_set = bit_set + ((UINT) 2);
  398. }
  399. bit_set = bit_set - (UINT) (map & (ULONG) 0x1);
  400. }
  401. return(bit_set);
  402. }
  403. #define TX_LOWEST_SET_BIT_CALCULATE(m, b) (b) = _tx_thread_lowest_set_bit_calculate((m));
  404. #endif
  405. /* Define the next priority macro. Note, that this may be overridden
  406. by a port specific definition. */
  407. #ifndef TX_NEXT_PRIORITY_FIND
  408. #if TX_MAX_PRIORITIES > 32
  409. static INLINE_DECLARE UINT _tx_thread_smp_next_priority_find(UINT priority)
  410. {
  411. ULONG map_index;
  412. ULONG local_priority_map_active;
  413. ULONG local_priority_map;
  414. ULONG priority_bit;
  415. ULONG first_bit_set;
  416. ULONG found_priority;
  417. found_priority = ((UINT) TX_MAX_PRIORITIES);
  418. if (priority < ((UINT) TX_MAX_PRIORITIES))
  419. {
  420. map_index = priority/((UINT) 32);
  421. local_priority_map = _tx_thread_priority_maps[map_index];
  422. priority_bit = (((ULONG) 1) << (priority % ((UINT) 32)));
  423. local_priority_map = local_priority_map & ~(priority_bit - ((UINT)1));
  424. if (local_priority_map != ((ULONG) 0))
  425. {
  426. TX_LOWEST_SET_BIT_CALCULATE(local_priority_map, first_bit_set)
  427. found_priority = (map_index * ((UINT) 32)) + first_bit_set;
  428. }
  429. else
  430. {
  431. /* Move to next map index. */
  432. map_index++;
  433. if (map_index < (((UINT) TX_MAX_PRIORITIES)/((UINT) 32)))
  434. {
  435. priority_bit = (((ULONG) 1) << (map_index));
  436. local_priority_map_active = _tx_thread_priority_map_active & ~(priority_bit - ((UINT) 1));
  437. if (local_priority_map_active != ((ULONG) 0))
  438. {
  439. TX_LOWEST_SET_BIT_CALCULATE(local_priority_map_active, map_index)
  440. local_priority_map = _tx_thread_priority_maps[map_index];
  441. TX_LOWEST_SET_BIT_CALCULATE(local_priority_map, first_bit_set)
  442. found_priority = (map_index * ((UINT) 32)) + first_bit_set;
  443. }
  444. }
  445. }
  446. }
  447. return(found_priority);
  448. }
  449. #else
  450. static INLINE_DECLARE UINT _tx_thread_smp_next_priority_find(UINT priority)
  451. {
  452. UINT first_bit_set;
  453. ULONG local_priority_map;
  454. UINT next_priority;
  455. local_priority_map = _tx_thread_priority_maps[0];
  456. local_priority_map = local_priority_map >> priority;
  457. next_priority = priority;
  458. if (local_priority_map == ((ULONG) 0))
  459. {
  460. next_priority = ((UINT) TX_MAX_PRIORITIES);
  461. }
  462. else
  463. {
  464. if (next_priority >= ((UINT) TX_MAX_PRIORITIES))
  465. {
  466. next_priority = ((UINT) TX_MAX_PRIORITIES);
  467. }
  468. else
  469. {
  470. TX_LOWEST_SET_BIT_CALCULATE(local_priority_map, first_bit_set)
  471. next_priority = priority + first_bit_set;
  472. }
  473. }
  474. return(next_priority);
  475. }
  476. #endif
  477. #endif
  478. static INLINE_DECLARE void _tx_thread_smp_schedule_list_clear(void)
  479. {
  480. #if TX_THREAD_SMP_MAX_CORES > 6
  481. UINT i;
  482. #endif
  483. /* Clear the schedule list. */
  484. _tx_thread_smp_schedule_list[0] = TX_NULL;
  485. #if TX_THREAD_SMP_MAX_CORES > 1
  486. _tx_thread_smp_schedule_list[1] = TX_NULL;
  487. #if TX_THREAD_SMP_MAX_CORES > 2
  488. _tx_thread_smp_schedule_list[2] = TX_NULL;
  489. #if TX_THREAD_SMP_MAX_CORES > 3
  490. _tx_thread_smp_schedule_list[3] = TX_NULL;
  491. #if TX_THREAD_SMP_MAX_CORES > 4
  492. _tx_thread_smp_schedule_list[4] = TX_NULL;
  493. #if TX_THREAD_SMP_MAX_CORES > 5
  494. _tx_thread_smp_schedule_list[5] = TX_NULL;
  495. #if TX_THREAD_SMP_MAX_CORES > 6
  496. /* Loop to clear the remainder of the schedule list. */
  497. i = ((UINT) 6);
  498. #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
  499. while (i < ((UINT) TX_THREAD_SMP_MAX_CORES))
  500. #else
  501. while (i < _tx_thread_smp_max_cores)
  502. #endif
  503. {
  504. /* Clear entry in schedule list. */
  505. _tx_thread_smp_schedule_list[i] = TX_NULL;
  506. /* Move to next index. */
  507. i++;
  508. }
  509. #endif
  510. #endif
  511. #endif
  512. #endif
  513. #endif
  514. #endif
  515. }
  516. static INLINE_DECLARE VOID _tx_thread_smp_execute_list_clear(void)
  517. {
  518. #if TX_THREAD_SMP_MAX_CORES > 6
  519. UINT j;
  520. #endif
  521. /* Clear the execute list. */
  522. _tx_thread_execute_ptr[0] = TX_NULL;
  523. #if TX_THREAD_SMP_MAX_CORES > 1
  524. _tx_thread_execute_ptr[1] = TX_NULL;
  525. #if TX_THREAD_SMP_MAX_CORES > 2
  526. _tx_thread_execute_ptr[2] = TX_NULL;
  527. #if TX_THREAD_SMP_MAX_CORES > 3
  528. _tx_thread_execute_ptr[3] = TX_NULL;
  529. #if TX_THREAD_SMP_MAX_CORES > 4
  530. _tx_thread_execute_ptr[4] = TX_NULL;
  531. #if TX_THREAD_SMP_MAX_CORES > 5
  532. _tx_thread_execute_ptr[5] = TX_NULL;
  533. #if TX_THREAD_SMP_MAX_CORES > 6
  534. /* Loop to clear the remainder of the execute list. */
  535. j = ((UINT) 6);
  536. #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
  537. while (j < ((UINT) TX_THREAD_SMP_MAX_CORES))
  538. #else
  539. while (j < _tx_thread_smp_max_cores)
  540. #endif
  541. {
  542. /* Clear entry in execute list. */
  543. _tx_thread_execute_ptr[j] = TX_NULL;
  544. /* Move to next index. */
  545. j++;
  546. }
  547. #endif
  548. #endif
  549. #endif
  550. #endif
  551. #endif
  552. #endif
  553. }
  554. static INLINE_DECLARE VOID _tx_thread_smp_schedule_list_setup(void)
  555. {
  556. #if TX_THREAD_SMP_MAX_CORES > 6
  557. UINT j;
  558. #endif
  559. _tx_thread_smp_schedule_list[0] = _tx_thread_execute_ptr[0];
  560. #if TX_THREAD_SMP_MAX_CORES > 1
  561. _tx_thread_smp_schedule_list[1] = _tx_thread_execute_ptr[1];
  562. #if TX_THREAD_SMP_MAX_CORES > 2
  563. _tx_thread_smp_schedule_list[2] = _tx_thread_execute_ptr[2];
  564. #if TX_THREAD_SMP_MAX_CORES > 3
  565. _tx_thread_smp_schedule_list[3] = _tx_thread_execute_ptr[3];
  566. #if TX_THREAD_SMP_MAX_CORES > 4
  567. _tx_thread_smp_schedule_list[4] = _tx_thread_execute_ptr[4];
  568. #if TX_THREAD_SMP_MAX_CORES > 5
  569. _tx_thread_smp_schedule_list[5] = _tx_thread_execute_ptr[5];
  570. #if TX_THREAD_SMP_MAX_CORES > 6
  571. /* Loop to setup the remainder of the schedule list. */
  572. j = ((UINT) 6);
  573. #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
  574. while (j < ((UINT) TX_THREAD_SMP_MAX_CORES))
  575. #else
  576. while (j < _tx_thread_smp_max_cores)
  577. #endif
  578. {
  579. /* Setup entry in schedule list. */
  580. _tx_thread_smp_schedule_list[j] = _tx_thread_execute_ptr[j];
  581. /* Move to next index. */
  582. j++;
  583. }
  584. #endif
  585. #endif
  586. #endif
  587. #endif
  588. #endif
  589. #endif
  590. }
  591. #ifdef TX_THREAD_SMP_INTER_CORE_INTERRUPT
  592. static INLINE_DECLARE VOID _tx_thread_smp_core_interrupt(TX_THREAD *thread_ptr, UINT current_core, UINT target_core)
  593. {
  594. TX_THREAD *current_thread;
  595. /* Make sure this is a different core, since there is no need to interrupt the current core for
  596. a scheduling change. */
  597. if (current_core != target_core)
  598. {
  599. /* Yes, a different core is present. */
  600. /* Pickup the currently executing thread. */
  601. current_thread = _tx_thread_current_ptr[target_core];
  602. /* Determine if they are the same. */
  603. if ((current_thread != TX_NULL) && (thread_ptr != current_thread))
  604. {
  605. /* Not the same and not NULL... determine if the core is running at thread level. */
  606. if (_tx_thread_system_state[target_core] < TX_INITIALIZE_IN_PROGRESS)
  607. {
  608. /* Preempt the mapped thread. */
  609. _tx_thread_smp_core_preempt(target_core);
  610. }
  611. }
  612. }
  613. }
  614. #else
  615. /* Define to whitespace. */
  616. #define _tx_thread_smp_core_interrupt(a,b,c)
  617. #endif
  618. #ifdef TX_THREAD_SMP_WAKEUP_LOGIC
  619. static INLINE_DECLARE VOID _tx_thread_smp_core_wakeup(UINT current_core, UINT target_core)
  620. {
  621. /* Determine if the core specified is not the current core - no need to wakeup the
  622. current core. */
  623. if (target_core != current_core)
  624. {
  625. /* Wakeup based on application's macro. */
  626. TX_THREAD_SMP_WAKEUP(target_core);
  627. }
  628. }
  629. #else
  630. /* Define to whitespace. */
  631. #define _tx_thread_smp_core_wakeup(a,b)
  632. #endif
  633. static INLINE_DECLARE VOID _tx_thread_smp_execute_list_setup(UINT core_index)
  634. {
  635. TX_THREAD *schedule_thread;
  636. UINT i;
  637. /* Loop to copy the schedule list into the execution list. */
  638. i = ((UINT) 0);
  639. #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
  640. while (i < ((UINT) TX_THREAD_SMP_MAX_CORES))
  641. #else
  642. while (i < _tx_thread_smp_max_cores)
  643. #endif
  644. {
  645. /* Pickup the thread to schedule. */
  646. schedule_thread = _tx_thread_smp_schedule_list[i];
  647. /* Copy the schedule list into the execution list. */
  648. _tx_thread_execute_ptr[i] = schedule_thread;
  649. /* If necessary, interrupt the core with the new thread to schedule. */
  650. _tx_thread_smp_core_interrupt(schedule_thread, core_index, i);
  651. #ifdef TX_THREAD_SMP_WAKEUP_LOGIC
  652. /* Does this need to be waked up? */
  653. if ((i != core_index) && (schedule_thread != TX_NULL))
  654. {
  655. /* Wakeup based on application's macro. */
  656. TX_THREAD_SMP_WAKEUP(i);
  657. }
  658. #endif
  659. /* Move to next index. */
  660. i++;
  661. }
  662. }
  663. static INLINE_DECLARE ULONG _tx_thread_smp_available_cores_get(void)
  664. {
  665. #if TX_THREAD_SMP_MAX_CORES > 6
  666. UINT j;
  667. #endif
  668. ULONG available_cores;
  669. available_cores = ((ULONG) 0);
  670. if (_tx_thread_execute_ptr[0] == TX_NULL)
  671. {
  672. available_cores = ((ULONG) 1);
  673. }
  674. #if TX_THREAD_SMP_MAX_CORES > 1
  675. if (_tx_thread_execute_ptr[1] == TX_NULL)
  676. {
  677. available_cores = available_cores | ((ULONG) 2);
  678. }
  679. #if TX_THREAD_SMP_MAX_CORES > 2
  680. if (_tx_thread_execute_ptr[2] == TX_NULL)
  681. {
  682. available_cores = available_cores | ((ULONG) 4);
  683. }
  684. #if TX_THREAD_SMP_MAX_CORES > 3
  685. if (_tx_thread_execute_ptr[3] == TX_NULL)
  686. {
  687. available_cores = available_cores | ((ULONG) 8);
  688. }
  689. #if TX_THREAD_SMP_MAX_CORES > 4
  690. if (_tx_thread_execute_ptr[4] == TX_NULL)
  691. {
  692. available_cores = available_cores | ((ULONG) 0x10);
  693. }
  694. #if TX_THREAD_SMP_MAX_CORES > 5
  695. if (_tx_thread_execute_ptr[5] == TX_NULL)
  696. {
  697. available_cores = available_cores | ((ULONG) 0x20);
  698. }
  699. #if TX_THREAD_SMP_MAX_CORES > 6
  700. /* Loop to setup the remainder of the schedule list. */
  701. j = ((UINT) 6);
  702. #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
  703. while (j < ((UINT) TX_THREAD_SMP_MAX_CORES))
  704. #else
  705. while (j < _tx_thread_smp_max_cores)
  706. #endif
  707. {
  708. /* Determine if this core is available. */
  709. if (_tx_thread_execute_ptr[j] == TX_NULL)
  710. {
  711. available_cores = available_cores | (((ULONG) 1) << j);
  712. }
  713. /* Move to next core. */
  714. j++;
  715. }
  716. #endif
  717. #endif
  718. #endif
  719. #endif
  720. #endif
  721. #endif
  722. return(available_cores);
  723. }
  724. static INLINE_DECLARE ULONG _tx_thread_smp_possible_cores_get(void)
  725. {
  726. #if TX_THREAD_SMP_MAX_CORES > 6
  727. UINT j;
  728. #endif
  729. ULONG possible_cores;
  730. TX_THREAD *thread_ptr;
  731. possible_cores = ((ULONG) 0);
  732. thread_ptr = _tx_thread_execute_ptr[0];
  733. if (thread_ptr != TX_NULL)
  734. {
  735. possible_cores = thread_ptr -> tx_thread_smp_cores_allowed;
  736. }
  737. #if TX_THREAD_SMP_MAX_CORES > 1
  738. thread_ptr = _tx_thread_execute_ptr[1];
  739. if (thread_ptr != TX_NULL)
  740. {
  741. possible_cores = possible_cores | thread_ptr -> tx_thread_smp_cores_allowed;
  742. }
  743. #if TX_THREAD_SMP_MAX_CORES > 2
  744. thread_ptr = _tx_thread_execute_ptr[2];
  745. if (thread_ptr != TX_NULL)
  746. {
  747. possible_cores = possible_cores | thread_ptr -> tx_thread_smp_cores_allowed;
  748. }
  749. #if TX_THREAD_SMP_MAX_CORES > 3
  750. thread_ptr = _tx_thread_execute_ptr[3];
  751. if (thread_ptr != TX_NULL)
  752. {
  753. possible_cores = possible_cores | thread_ptr -> tx_thread_smp_cores_allowed;
  754. }
  755. #if TX_THREAD_SMP_MAX_CORES > 4
  756. thread_ptr = _tx_thread_execute_ptr[4];
  757. if (thread_ptr != TX_NULL)
  758. {
  759. possible_cores = possible_cores | thread_ptr -> tx_thread_smp_cores_allowed;
  760. }
  761. #if TX_THREAD_SMP_MAX_CORES > 5
  762. thread_ptr = _tx_thread_execute_ptr[5];
  763. if (thread_ptr != TX_NULL)
  764. {
  765. possible_cores = possible_cores | thread_ptr -> tx_thread_smp_cores_allowed;
  766. }
  767. #if TX_THREAD_SMP_MAX_CORES > 6
  768. /* Loop to setup the remainder of the schedule list. */
  769. j = ((UINT) 6);
  770. #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
  771. while (j < ((UINT) TX_THREAD_SMP_MAX_CORES))
  772. #else
  773. while (j < _tx_thread_smp_max_cores)
  774. #endif
  775. {
  776. /* Determine if this core is available. */
  777. thread_ptr = _tx_thread_execute_ptr[j];
  778. if (thread_ptr != TX_NULL)
  779. {
  780. possible_cores = possible_cores | thread_ptr -> tx_thread_smp_cores_allowed;
  781. }
  782. /* Move to next core. */
  783. j++;
  784. }
  785. #endif
  786. #endif
  787. #endif
  788. #endif
  789. #endif
  790. #endif
  791. return(possible_cores);
  792. }
  793. static INLINE_DECLARE UINT _tx_thread_smp_lowest_priority_get(void)
  794. {
  795. #if TX_THREAD_SMP_MAX_CORES > 6
  796. UINT j;
  797. #endif
  798. TX_THREAD *thread_ptr;
  799. UINT lowest_priority;
  800. lowest_priority = ((UINT) 0);
  801. thread_ptr = _tx_thread_execute_ptr[0];
  802. if (thread_ptr != TX_NULL)
  803. {
  804. if (thread_ptr -> tx_thread_priority > lowest_priority)
  805. {
  806. lowest_priority = thread_ptr -> tx_thread_priority;
  807. }
  808. }
  809. #if TX_THREAD_SMP_MAX_CORES > 1
  810. thread_ptr = _tx_thread_execute_ptr[1];
  811. if (thread_ptr != TX_NULL)
  812. {
  813. if (thread_ptr -> tx_thread_priority > lowest_priority)
  814. {
  815. lowest_priority = thread_ptr -> tx_thread_priority;
  816. }
  817. }
  818. #if TX_THREAD_SMP_MAX_CORES > 2
  819. thread_ptr = _tx_thread_execute_ptr[2];
  820. if (thread_ptr != TX_NULL)
  821. {
  822. if (thread_ptr -> tx_thread_priority > lowest_priority)
  823. {
  824. lowest_priority = thread_ptr -> tx_thread_priority;
  825. }
  826. }
  827. #if TX_THREAD_SMP_MAX_CORES > 3
  828. thread_ptr = _tx_thread_execute_ptr[3];
  829. if (thread_ptr != TX_NULL)
  830. {
  831. if (thread_ptr -> tx_thread_priority > lowest_priority)
  832. {
  833. lowest_priority = thread_ptr -> tx_thread_priority;
  834. }
  835. }
  836. #if TX_THREAD_SMP_MAX_CORES > 4
  837. thread_ptr = _tx_thread_execute_ptr[4];
  838. if (thread_ptr != TX_NULL)
  839. {
  840. if (thread_ptr -> tx_thread_priority > lowest_priority)
  841. {
  842. lowest_priority = thread_ptr -> tx_thread_priority;
  843. }
  844. }
  845. #if TX_THREAD_SMP_MAX_CORES > 5
  846. thread_ptr = _tx_thread_execute_ptr[5];
  847. if (thread_ptr != TX_NULL)
  848. {
  849. if (thread_ptr -> tx_thread_priority > lowest_priority)
  850. {
  851. lowest_priority = thread_ptr -> tx_thread_priority;
  852. }
  853. }
  854. #if TX_THREAD_SMP_MAX_CORES > 6
  855. /* Loop to setup the remainder of the schedule list. */
  856. j = ((UINT) 6);
  857. #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
  858. while (j < ((UINT) TX_THREAD_SMP_MAX_CORES))
  859. #else
  860. while (j < _tx_thread_smp_max_cores)
  861. #endif
  862. {
  863. /* Determine if this core has a thread scheduled. */
  864. thread_ptr = _tx_thread_execute_ptr[j];
  865. if (thread_ptr != TX_NULL)
  866. {
  867. /* Is this the new lowest priority? */
  868. if (thread_ptr -> tx_thread_priority > lowest_priority)
  869. {
  870. lowest_priority = thread_ptr -> tx_thread_priority;
  871. }
  872. }
  873. /* Move to next core. */
  874. j++;
  875. }
  876. #endif
  877. #endif
  878. #endif
  879. #endif
  880. #endif
  881. #endif
  882. return(lowest_priority);
  883. }
  884. static INLINE_DECLARE UINT _tx_thread_smp_remap_solution_find(TX_THREAD *schedule_thread, ULONG available_cores, ULONG thread_possible_cores, ULONG test_possible_cores)
  885. {
  886. UINT core;
  887. UINT previous_core;
  888. ULONG test_cores;
  889. ULONG last_thread_cores;
  890. UINT queue_first, queue_last;
  891. UINT core_queue[TX_THREAD_SMP_MAX_CORES-1];
  892. TX_THREAD *thread_ptr;
  893. TX_THREAD *last_thread;
  894. TX_THREAD *thread_remap_list[TX_THREAD_SMP_MAX_CORES];
  895. /* Clear the last thread cores in the search. */
  896. last_thread_cores = ((ULONG) 0);
  897. /* Set the last thread pointer to NULL. */
  898. last_thread = TX_NULL;
  899. /* Setup the core queue indices. */
  900. queue_first = ((UINT) 0);
  901. queue_last = ((UINT) 0);
  902. /* Build a list of possible cores for this thread to execute on, starting
  903. with the previously mapped core. */
  904. core = schedule_thread -> tx_thread_smp_core_mapped;
  905. if ((thread_possible_cores & (((ULONG) 1) << core)) != ((ULONG) 0))
  906. {
  907. /* Remember this potential mapping. */
  908. thread_remap_list[core] = schedule_thread;
  909. core_queue[queue_last] = core;
  910. /* Move to next slot. */
  911. queue_last++;
  912. /* Clear this core. */
  913. thread_possible_cores = thread_possible_cores & ~(((ULONG) 1) << core);
  914. }
  915. /* Loop to add additional possible cores. */
  916. while (thread_possible_cores != ((ULONG) 0))
  917. {
  918. /* Determine the first possible core. */
  919. test_cores = thread_possible_cores;
  920. TX_LOWEST_SET_BIT_CALCULATE(test_cores, core)
  921. /* Clear this core. */
  922. thread_possible_cores = thread_possible_cores & ~(((ULONG) 1) << core);
  923. /* Remember this potential mapping. */
  924. thread_remap_list[core] = schedule_thread;
  925. core_queue[queue_last] = core;
  926. /* Move to next slot. */
  927. queue_last++;
  928. }
  929. /* Loop to evaluate the potential thread mappings, against what is already mapped. */
  930. do
  931. {
  932. /* Pickup the next entry. */
  933. core = core_queue[queue_first];
  934. /* Move to next slot. */
  935. queue_first++;
  936. /* Retrieve the thread from the current mapping. */
  937. thread_ptr = _tx_thread_smp_schedule_list[core];
  938. /* Determine if there is a thread currently mapped to this core. */
  939. if (thread_ptr != TX_NULL)
  940. {
  941. /* Determine the cores available for this thread. */
  942. thread_possible_cores = thread_ptr -> tx_thread_smp_cores_allowed;
  943. thread_possible_cores = test_possible_cores & thread_possible_cores;
  944. /* Are there any possible cores for this thread? */
  945. if (thread_possible_cores != ((ULONG) 0))
  946. {
  947. /* Determine if there are cores available for this thread. */
  948. if ((thread_possible_cores & available_cores) != ((ULONG) 0))
  949. {
  950. /* Yes, remember the final thread and cores that are valid for this thread. */
  951. last_thread_cores = thread_possible_cores & available_cores;
  952. last_thread = thread_ptr;
  953. /* We are done - get out of the loop! */
  954. break;
  955. }
  956. else
  957. {
  958. /* Remove cores that will be added to the list. */
  959. test_possible_cores = test_possible_cores & ~(thread_possible_cores);
  960. /* Loop to add this thread to the potential mapping list. */
  961. do
  962. {
  963. /* Calculate the core. */
  964. test_cores = thread_possible_cores;
  965. TX_LOWEST_SET_BIT_CALCULATE(test_cores, core)
  966. /* Clear this core. */
  967. thread_possible_cores = thread_possible_cores & ~(((ULONG) 1) << core);
  968. /* Remember this thread for remapping. */
  969. thread_remap_list[core] = thread_ptr;
  970. /* Remember this core. */
  971. core_queue[queue_last] = core;
  972. /* Move to next slot. */
  973. queue_last++;
  974. } while (thread_possible_cores != ((ULONG) 0));
  975. }
  976. }
  977. }
  978. } while (queue_first != queue_last);
  979. /* Was a remapping solution found? */
  980. if (last_thread != TX_NULL)
  981. {
  982. /* Pickup the core of the last thread to remap. */
  983. core = last_thread -> tx_thread_smp_core_mapped;
  984. /* Pickup the thread from the remapping list. */
  985. thread_ptr = thread_remap_list[core];
  986. /* Loop until we arrive at the thread we have been trying to map. */
  987. while (thread_ptr != schedule_thread)
  988. {
  989. /* Move this thread in the schedule list. */
  990. _tx_thread_smp_schedule_list[core] = thread_ptr;
  991. /* Remember the previous core. */
  992. previous_core = core;
  993. /* Pickup the core of thread to remap. */
  994. core = thread_ptr -> tx_thread_smp_core_mapped;
  995. /* Save the new core mapping for this thread. */
  996. thread_ptr -> tx_thread_smp_core_mapped = previous_core;
  997. /* Move the next thread. */
  998. thread_ptr = thread_remap_list[core];
  999. }
  1000. /* Save the remaining thread in the updated schedule list. */
  1001. _tx_thread_smp_schedule_list[core] = thread_ptr;
  1002. /* Update this thread's core mapping. */
  1003. thread_ptr -> tx_thread_smp_core_mapped = core;
  1004. /* Finally, setup the last thread in the remapping solution. */
  1005. test_cores = last_thread_cores;
  1006. TX_LOWEST_SET_BIT_CALCULATE(test_cores, core)
  1007. /* Setup the last thread. */
  1008. _tx_thread_smp_schedule_list[core] = last_thread;
  1009. /* Remember the core mapping for this thread. */
  1010. last_thread -> tx_thread_smp_core_mapped = core;
  1011. }
  1012. else
  1013. {
  1014. /* Set core to the maximum value in order to signal a remapping solution was not found. */
  1015. core = ((UINT) TX_THREAD_SMP_MAX_CORES);
  1016. }
  1017. /* Return core to the caller. */
  1018. return(core);
  1019. }
  1020. static INLINE_DECLARE ULONG _tx_thread_smp_preemptable_threads_get(UINT priority, TX_THREAD *possible_preemption_list[TX_THREAD_SMP_MAX_CORES])
  1021. {
  1022. UINT i, j, k;
  1023. TX_THREAD *thread_ptr;
  1024. TX_THREAD *next_thread;
  1025. TX_THREAD *search_thread;
  1026. TX_THREAD *list_head;
  1027. ULONG possible_cores = ((ULONG) 0);
  1028. /* Clear the possible preemption list. */
  1029. possible_preemption_list[0] = TX_NULL;
  1030. #if TX_THREAD_SMP_MAX_CORES > 1
  1031. possible_preemption_list[1] = TX_NULL;
  1032. #if TX_THREAD_SMP_MAX_CORES > 2
  1033. possible_preemption_list[2] = TX_NULL;
  1034. #if TX_THREAD_SMP_MAX_CORES > 3
  1035. possible_preemption_list[3] = TX_NULL;
  1036. #if TX_THREAD_SMP_MAX_CORES > 4
  1037. possible_preemption_list[4] = TX_NULL;
  1038. #if TX_THREAD_SMP_MAX_CORES > 5
  1039. possible_preemption_list[5] = TX_NULL;
  1040. #if TX_THREAD_SMP_MAX_CORES > 6
  1041. /* Loop to clear the remainder of the possible preemption list. */
  1042. j = ((UINT) 6);
  1043. #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
  1044. while (j < ((UINT) TX_THREAD_SMP_MAX_CORES))
  1045. #else
  1046. while (j < _tx_thread_smp_max_cores)
  1047. #endif
  1048. {
  1049. /* Clear entry in possible preemption list. */
  1050. possible_preemption_list[j] = TX_NULL;
  1051. /* Move to next core. */
  1052. j++;
  1053. }
  1054. #endif
  1055. #endif
  1056. #endif
  1057. #endif
  1058. #endif
  1059. #endif
  1060. /* Loop to build a list of threads of less priority. */
  1061. i = ((UINT) 0);
  1062. j = ((UINT) 0);
  1063. #ifndef TX_THREAD_SMP_DYNAMIC_CORE_MAX
  1064. while (i < ((UINT) TX_THREAD_SMP_MAX_CORES))
  1065. #else
  1066. while (i < _tx_thread_smp_max_cores)
  1067. #endif
  1068. {
  1069. /* Pickup the currently mapped thread. */
  1070. thread_ptr = _tx_thread_execute_ptr[i];
  1071. /* Is there a thread scheduled for this core? */
  1072. if (thread_ptr != TX_NULL)
  1073. {
  1074. /* Update the possible cores bit map. */
  1075. possible_cores = possible_cores | thread_ptr -> tx_thread_smp_cores_allowed;
  1076. /* Can this thread be preempted? */
  1077. if (priority < thread_ptr -> tx_thread_priority)
  1078. {
  1079. /* Thread that can be added to the preemption possible list. */
  1080. /* Yes, this scheduled thread is lower priority, so add it to the preemption possible list. */
  1081. possible_preemption_list[j] = thread_ptr;
  1082. /* Move to next entry in preemption possible list. */
  1083. j++;
  1084. }
  1085. }
  1086. /* Move to next core. */
  1087. i++;
  1088. }
  1089. /* Check to see if there are more than 2 threads that can be preempted. */
  1090. if (j > ((UINT) 1))
  1091. {
  1092. /* Yes, loop through the preemption possible list and sort by priority. */
  1093. i = ((UINT) 0);
  1094. do
  1095. {
  1096. /* Pickup preemptable thread. */
  1097. thread_ptr = possible_preemption_list[i];
  1098. /* Initialize the search index. */
  1099. k = i + ((UINT) 1);
  1100. /* Loop to get the lowest priority thread at the front of the list. */
  1101. while (k < j)
  1102. {
  1103. /* Pickup the next thread to evaluate. */
  1104. next_thread = possible_preemption_list[k];
  1105. /* Is this thread lower priority? */
  1106. if (next_thread -> tx_thread_priority > thread_ptr -> tx_thread_priority)
  1107. {
  1108. /* Yes, swap the threads. */
  1109. possible_preemption_list[i] = next_thread;
  1110. possible_preemption_list[k] = thread_ptr;
  1111. thread_ptr = next_thread;
  1112. }
  1113. else
  1114. {
  1115. /* Compare the thread priorities. */
  1116. if (next_thread -> tx_thread_priority == thread_ptr -> tx_thread_priority)
  1117. {
  1118. /* Equal priority threads... see which is in the ready list first. */
  1119. search_thread = thread_ptr -> tx_thread_ready_next;
  1120. /* Pickup the list head. */
  1121. list_head = _tx_thread_priority_list[thread_ptr -> tx_thread_priority];
  1122. /* Now loop to see if the next thread is after the current thread preemption. */
  1123. while (search_thread != list_head)
  1124. {
  1125. /* Have we found the next thread? */
  1126. if (search_thread == next_thread)
  1127. {
  1128. /* Yes, swap the threads. */
  1129. possible_preemption_list[i] = next_thread;
  1130. possible_preemption_list[k] = thread_ptr;
  1131. thread_ptr = next_thread;
  1132. break;
  1133. }
  1134. /* Move to the next thread. */
  1135. search_thread = search_thread -> tx_thread_ready_next;
  1136. }
  1137. }
  1138. /* Move to examine the next possible preemptable thread. */
  1139. k++;
  1140. }
  1141. }
  1142. /* We have found the lowest priority thread to preempt, now find the next lowest. */
  1143. i++;
  1144. }
  1145. while (i < (j-((UINT) 1)));
  1146. }
  1147. /* Return the possible cores. */
  1148. return(possible_cores);
  1149. }
  1150. static INLINE_DECLARE VOID _tx_thread_smp_simple_priority_change(TX_THREAD *thread_ptr, UINT new_priority)
  1151. {
  1152. UINT priority;
  1153. ULONG priority_bit;
  1154. TX_THREAD *head_ptr;
  1155. TX_THREAD *tail_ptr;
  1156. #if TX_MAX_PRIORITIES > 32
  1157. UINT map_index;
  1158. #endif
  1159. /* Pickup the priority. */
  1160. priority = thread_ptr -> tx_thread_priority;
  1161. /* Determine if there are other threads at this priority that are
  1162. ready. */
  1163. if (thread_ptr -> tx_thread_ready_next != thread_ptr)
  1164. {
  1165. /* Yes, there are other threads at this priority ready. */
  1166. /* Just remove this thread from the priority list. */
  1167. (thread_ptr -> tx_thread_ready_next) -> tx_thread_ready_previous = thread_ptr -> tx_thread_ready_previous;
  1168. (thread_ptr -> tx_thread_ready_previous) -> tx_thread_ready_next = thread_ptr -> tx_thread_ready_next;
  1169. /* Determine if this is the head of the priority list. */
  1170. if (_tx_thread_priority_list[priority] == thread_ptr)
  1171. {
  1172. /* Update the head pointer of this priority list. */
  1173. _tx_thread_priority_list[priority] = thread_ptr -> tx_thread_ready_next;
  1174. }
  1175. }
  1176. else
  1177. {
  1178. /* This is the only thread at this priority ready to run. Set the head
  1179. pointer to NULL. */
  1180. _tx_thread_priority_list[priority] = TX_NULL;
  1181. #if TX_MAX_PRIORITIES > 32
  1182. /* Calculate the index into the bit map array. */
  1183. map_index = priority/((UINT) 32);
  1184. #endif
  1185. /* Clear this priority bit in the ready priority bit map. */
  1186. TX_MOD32_BIT_SET(priority, priority_bit)
  1187. _tx_thread_priority_maps[MAP_INDEX] = _tx_thread_priority_maps[MAP_INDEX] & (~(priority_bit));
  1188. #if TX_MAX_PRIORITIES > 32
  1189. /* Determine if there are any other bits set in this priority map. */
  1190. if (_tx_thread_priority_maps[MAP_INDEX] == ((ULONG) 0))
  1191. {
  1192. /* No, clear the active bit to signify this priority map has nothing set. */
  1193. TX_DIV32_BIT_SET(priority, priority_bit)
  1194. _tx_thread_priority_map_active = _tx_thread_priority_map_active & (~(priority_bit));
  1195. }
  1196. #endif
  1197. }
  1198. /* Determine if the actual thread priority should be setup, which is the
  1199. case if the new priority is higher than the priority inheritance. */
  1200. if (new_priority < thread_ptr -> tx_thread_inherit_priority)
  1201. {
  1202. /* Change thread priority to the new user's priority. */
  1203. thread_ptr -> tx_thread_priority = new_priority;
  1204. thread_ptr -> tx_thread_preempt_threshold = new_priority;
  1205. }
  1206. else
  1207. {
  1208. /* Change thread priority to the priority inheritance. */
  1209. thread_ptr -> tx_thread_priority = thread_ptr -> tx_thread_inherit_priority;
  1210. thread_ptr -> tx_thread_preempt_threshold = thread_ptr -> tx_thread_inherit_priority;
  1211. }
  1212. /* Now, place the thread at the new priority level. */
  1213. /* Determine if there are other threads at this priority that are
  1214. ready. */
  1215. head_ptr = _tx_thread_priority_list[new_priority];
  1216. if (head_ptr != TX_NULL)
  1217. {
  1218. /* Yes, there are other threads at this priority already ready. */
  1219. /* Just add this thread to the priority list. */
  1220. tail_ptr = head_ptr -> tx_thread_ready_previous;
  1221. tail_ptr -> tx_thread_ready_next = thread_ptr;
  1222. head_ptr -> tx_thread_ready_previous = thread_ptr;
  1223. thread_ptr -> tx_thread_ready_previous = tail_ptr;
  1224. thread_ptr -> tx_thread_ready_next = head_ptr;
  1225. }
  1226. else
  1227. {
  1228. /* First thread at this priority ready. Add to the front of the list. */
  1229. _tx_thread_priority_list[new_priority] = thread_ptr;
  1230. thread_ptr -> tx_thread_ready_next = thread_ptr;
  1231. thread_ptr -> tx_thread_ready_previous = thread_ptr;
  1232. #if TX_MAX_PRIORITIES > 32
  1233. /* Calculate the index into the bit map array. */
  1234. map_index = new_priority/((UINT) 32);
  1235. /* Set the active bit to remember that the priority map has something set. */
  1236. TX_DIV32_BIT_SET(new_priority, priority_bit)
  1237. _tx_thread_priority_map_active = _tx_thread_priority_map_active | priority_bit;
  1238. #endif
  1239. /* Or in the thread's priority bit. */
  1240. TX_MOD32_BIT_SET(new_priority, priority_bit)
  1241. _tx_thread_priority_maps[MAP_INDEX] = _tx_thread_priority_maps[MAP_INDEX] | priority_bit;
  1242. }
  1243. }
  1244. #else
  1245. /* In-line was disabled. All of the above helper fuctions must be defined as actual functions. */
  1246. UINT _tx_thread_lowest_set_bit_calculate(ULONG map);
  1247. #define TX_LOWEST_SET_BIT_CALCULATE(m, b) (b) = _tx_thread_lowest_set_bit_calculate((m));
  1248. UINT _tx_thread_smp_next_priority_find(UINT priority);
  1249. VOID _tx_thread_smp_schedule_list_clear(void);
  1250. VOID _tx_thread_smp_execute_list_clear(void);
  1251. VOID _tx_thread_smp_schedule_list_setup(void);
  1252. #ifdef TX_THREAD_SMP_INTER_CORE_INTERRUPT
  1253. VOID _tx_thread_smp_core_interrupt(TX_THREAD *thread_ptr, UINT current_core, UINT target_core);
  1254. #else
  1255. /* Define to whitespace. */
  1256. #define _tx_thread_smp_core_interrupt(a,b,c)
  1257. #endif
  1258. #ifdef TX_THREAD_SMP_WAKEUP_LOGIC
  1259. VOID _tx_thread_smp_core_wakeup(UINT current_core, UINT target_core);
  1260. #else
  1261. /* Define to whitespace. */
  1262. #define _tx_thread_smp_core_wakeup(a,b)
  1263. #endif
  1264. VOID _tx_thread_smp_execute_list_setup(UINT core_index);
  1265. ULONG _tx_thread_smp_available_cores_get(void);
  1266. ULONG _tx_thread_smp_possible_cores_get(void);
  1267. UINT _tx_thread_smp_lowest_priority_get(void);
  1268. UINT _tx_thread_smp_remap_solution_find(TX_THREAD *schedule_thread, ULONG available_cores, ULONG thread_possible_cores, ULONG test_possible_cores);
  1269. ULONG _tx_thread_smp_preemptable_threads_get(UINT priority, TX_THREAD *possible_preemption_list[TX_THREAD_SMP_MAX_CORES]);
  1270. VOID _tx_thread_smp_simple_priority_change(TX_THREAD *thread_ptr, UINT new_priority);
  1271. #endif
  1272. #endif
  1273. #endif