mempool.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460
  1. /*
  2. * Copyright (c) 2006-2022, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2006-05-27 Bernard implement memory pool
  9. * 2006-06-03 Bernard fix the thread timer init bug
  10. * 2006-06-30 Bernard fix the allocate/free block bug
  11. * 2006-08-04 Bernard add hook support
  12. * 2006-08-10 Bernard fix interrupt bug in rt_mp_alloc
  13. * 2010-07-13 Bernard fix RT_ALIGN issue found by kuronca
  14. * 2010-10-26 yi.qiu add module support in rt_mp_delete
  15. * 2011-01-24 Bernard add object allocation check.
  16. * 2012-03-22 Bernard fix align issue in rt_mp_init and rt_mp_create.
  17. * 2022-01-07 Gabriel Moving __on_rt_xxxxx_hook to mempool.c
  18. * 2023-09-15 xqyjlj perf rt_hw_interrupt_disable/enable
  19. * 2023-12-10 xqyjlj fix spinlock assert
  20. */
  21. #include <rthw.h>
  22. #include <rtthread.h>
  23. #ifdef RT_USING_MEMPOOL
  24. #ifndef __on_rt_mp_alloc_hook
  25. #define __on_rt_mp_alloc_hook(mp, block) __ON_HOOK_ARGS(rt_mp_alloc_hook, (mp, block))
  26. #endif
  27. #ifndef __on_rt_mp_free_hook
  28. #define __on_rt_mp_free_hook(mp, block) __ON_HOOK_ARGS(rt_mp_free_hook, (mp, block))
  29. #endif
  30. #if defined(RT_USING_HOOK) && defined(RT_HOOK_USING_FUNC_PTR)
  31. static void (*rt_mp_alloc_hook)(struct rt_mempool *mp, void *block);
  32. static void (*rt_mp_free_hook)(struct rt_mempool *mp, void *block);
  33. /**
  34. * @addtogroup Hook
  35. */
  36. /**@{*/
  37. /**
  38. * @brief This function will set a hook function, which will be invoked when a memory
  39. * block is allocated from the memory pool.
  40. *
  41. * @param hook the hook function
  42. */
  43. void rt_mp_alloc_sethook(void (*hook)(struct rt_mempool *mp, void *block))
  44. {
  45. rt_mp_alloc_hook = hook;
  46. }
  47. /**
  48. * @brief This function will set a hook function, which will be invoked when a memory
  49. * block is released to the memory pool.
  50. *
  51. * @param hook the hook function
  52. */
  53. void rt_mp_free_sethook(void (*hook)(struct rt_mempool *mp, void *block))
  54. {
  55. rt_mp_free_hook = hook;
  56. }
  57. /**@}*/
  58. #endif /* RT_USING_HOOK */
  59. /**
  60. * @addtogroup MM
  61. */
  62. /**@{*/
  63. /**
  64. * @brief This function will initialize a memory pool object, normally which is used
  65. * for static object.
  66. *
  67. * @param mp is the memory pool object.
  68. *
  69. * @param name is the name of the memory pool.
  70. *
  71. * @param start is the start address of the memory pool.
  72. *
  73. * @param size is the total size of the memory pool.
  74. *
  75. * @param block_size is the size for each block..
  76. *
  77. * @return RT_EOK
  78. */
  79. rt_err_t rt_mp_init(struct rt_mempool *mp,
  80. const char *name,
  81. void *start,
  82. rt_size_t size,
  83. rt_size_t block_size)
  84. {
  85. rt_uint8_t *block_ptr;
  86. rt_size_t offset;
  87. /* parameter check */
  88. RT_ASSERT(mp != RT_NULL);
  89. RT_ASSERT(name != RT_NULL);
  90. RT_ASSERT(start != RT_NULL);
  91. RT_ASSERT(size > 0 && block_size > 0);
  92. /* initialize object */
  93. rt_object_init(&(mp->parent), RT_Object_Class_MemPool, name);
  94. /* initialize memory pool */
  95. mp->start_address = start;
  96. mp->size = RT_ALIGN_DOWN(size, RT_ALIGN_SIZE);
  97. /* align the block size */
  98. block_size = RT_ALIGN(block_size, RT_ALIGN_SIZE);
  99. mp->block_size = block_size;
  100. /* align to align size byte */
  101. mp->block_total_count = mp->size / (mp->block_size + sizeof(rt_uint8_t *));
  102. mp->block_free_count = mp->block_total_count;
  103. /* initialize suspended thread list */
  104. rt_list_init(&(mp->suspend_thread));
  105. /* initialize free block list */
  106. block_ptr = (rt_uint8_t *)mp->start_address;
  107. for (offset = 0; offset < mp->block_total_count; offset ++)
  108. {
  109. *(rt_uint8_t **)(block_ptr + offset * (block_size + sizeof(rt_uint8_t *))) =
  110. (rt_uint8_t *)(block_ptr + (offset + 1) * (block_size + sizeof(rt_uint8_t *)));
  111. }
  112. *(rt_uint8_t **)(block_ptr + (offset - 1) * (block_size + sizeof(rt_uint8_t *))) =
  113. RT_NULL;
  114. mp->block_list = block_ptr;
  115. rt_spin_lock_init(&(mp->spinlock));
  116. return RT_EOK;
  117. }
  118. RTM_EXPORT(rt_mp_init);
  119. /**
  120. * @brief This function will detach a memory pool from system object management.
  121. *
  122. * @param mp is the memory pool object.
  123. *
  124. * @return RT_EOK
  125. */
  126. rt_err_t rt_mp_detach(struct rt_mempool *mp)
  127. {
  128. struct rt_thread *thread;
  129. rt_base_t level;
  130. /* parameter check */
  131. RT_ASSERT(mp != RT_NULL);
  132. RT_ASSERT(rt_object_get_type(&mp->parent) == RT_Object_Class_MemPool);
  133. RT_ASSERT(rt_object_is_systemobject(&mp->parent));
  134. level = rt_spin_lock_irqsave(&(mp->spinlock));
  135. /* wake up all suspended threads */
  136. while (!rt_list_isempty(&(mp->suspend_thread)))
  137. {
  138. /* get next suspend thread */
  139. thread = rt_list_entry(mp->suspend_thread.next, struct rt_thread, tlist);
  140. /* set error code to -RT_ERROR */
  141. thread->error = -RT_ERROR;
  142. /*
  143. * resume thread
  144. * In rt_thread_resume function, it will remove current thread from
  145. * suspend list
  146. */
  147. rt_thread_resume(thread);
  148. }
  149. /* detach object */
  150. rt_object_detach(&(mp->parent));
  151. rt_spin_unlock_irqrestore(&(mp->spinlock), level);
  152. return RT_EOK;
  153. }
  154. RTM_EXPORT(rt_mp_detach);
  155. #ifdef RT_USING_HEAP
  156. /**
  157. * @brief This function will create a mempool object and allocate the memory pool from
  158. * heap.
  159. *
  160. * @param name is the name of memory pool.
  161. *
  162. * @param block_count is the count of blocks in memory pool.
  163. *
  164. * @param block_size is the size for each block.
  165. *
  166. * @return the created mempool object
  167. */
  168. rt_mp_t rt_mp_create(const char *name,
  169. rt_size_t block_count,
  170. rt_size_t block_size)
  171. {
  172. rt_uint8_t *block_ptr;
  173. struct rt_mempool *mp;
  174. rt_size_t offset;
  175. RT_DEBUG_NOT_IN_INTERRUPT;
  176. /* parameter check */
  177. RT_ASSERT(name != RT_NULL);
  178. RT_ASSERT(block_count > 0 && block_size > 0);
  179. /* allocate object */
  180. mp = (struct rt_mempool *)rt_object_allocate(RT_Object_Class_MemPool, name);
  181. /* allocate object failed */
  182. if (mp == RT_NULL)
  183. return RT_NULL;
  184. /* initialize memory pool */
  185. block_size = RT_ALIGN(block_size, RT_ALIGN_SIZE);
  186. mp->block_size = block_size;
  187. mp->size = (block_size + sizeof(rt_uint8_t *)) * block_count;
  188. /* allocate memory */
  189. mp->start_address = rt_malloc((block_size + sizeof(rt_uint8_t *)) *
  190. block_count);
  191. if (mp->start_address == RT_NULL)
  192. {
  193. /* no memory, delete memory pool object */
  194. rt_object_delete(&(mp->parent));
  195. return RT_NULL;
  196. }
  197. mp->block_total_count = block_count;
  198. mp->block_free_count = mp->block_total_count;
  199. /* initialize suspended thread list */
  200. rt_list_init(&(mp->suspend_thread));
  201. /* initialize free block list */
  202. block_ptr = (rt_uint8_t *)mp->start_address;
  203. for (offset = 0; offset < mp->block_total_count; offset ++)
  204. {
  205. *(rt_uint8_t **)(block_ptr + offset * (block_size + sizeof(rt_uint8_t *)))
  206. = block_ptr + (offset + 1) * (block_size + sizeof(rt_uint8_t *));
  207. }
  208. *(rt_uint8_t **)(block_ptr + (offset - 1) * (block_size + sizeof(rt_uint8_t *)))
  209. = RT_NULL;
  210. mp->block_list = block_ptr;
  211. rt_spin_lock_init(&(mp->spinlock));
  212. return mp;
  213. }
  214. RTM_EXPORT(rt_mp_create);
  215. /**
  216. * @brief This function will delete a memory pool and release the object memory.
  217. *
  218. * @param mp is the memory pool object.
  219. *
  220. * @return RT_EOK
  221. */
  222. rt_err_t rt_mp_delete(rt_mp_t mp)
  223. {
  224. struct rt_thread *thread;
  225. rt_base_t level;
  226. RT_DEBUG_NOT_IN_INTERRUPT;
  227. /* parameter check */
  228. RT_ASSERT(mp != RT_NULL);
  229. RT_ASSERT(rt_object_get_type(&mp->parent) == RT_Object_Class_MemPool);
  230. RT_ASSERT(rt_object_is_systemobject(&mp->parent) == RT_FALSE);
  231. level = rt_spin_lock_irqsave(&(mp->spinlock));
  232. /* wake up all suspended threads */
  233. while (!rt_list_isempty(&(mp->suspend_thread)))
  234. {
  235. /* get next suspend thread */
  236. thread = rt_list_entry(mp->suspend_thread.next, struct rt_thread, tlist);
  237. /* set error code to -RT_ERROR */
  238. thread->error = -RT_ERROR;
  239. /*
  240. * resume thread
  241. * In rt_thread_resume function, it will remove current thread from
  242. * suspend list
  243. */
  244. rt_thread_resume(thread);
  245. }
  246. rt_spin_unlock_irqrestore(&(mp->spinlock), level);
  247. /* release allocated room */
  248. rt_free(mp->start_address);
  249. /* detach object */
  250. rt_object_delete(&(mp->parent));
  251. return RT_EOK;
  252. }
  253. RTM_EXPORT(rt_mp_delete);
  254. #endif /* RT_USING_HEAP */
  255. /**
  256. * @brief This function will allocate a block from memory pool.
  257. *
  258. * @param mp is the memory pool object.
  259. *
  260. * @param time is the maximum waiting time for allocating memory.
  261. * - 0 for not waiting, allocating memory immediately.
  262. *
  263. * @return the allocated memory block or RT_NULL on allocated failed.
  264. */
  265. void *rt_mp_alloc(rt_mp_t mp, rt_int32_t time)
  266. {
  267. rt_uint8_t *block_ptr;
  268. rt_base_t level;
  269. struct rt_thread *thread;
  270. rt_uint32_t before_sleep = 0;
  271. /* parameter check */
  272. RT_ASSERT(mp != RT_NULL);
  273. /* get current thread */
  274. thread = rt_thread_self();
  275. level = rt_spin_lock_irqsave(&(mp->spinlock));
  276. while (mp->block_free_count == 0)
  277. {
  278. /* memory block is unavailable. */
  279. if (time == 0)
  280. {
  281. rt_spin_unlock_irqrestore(&(mp->spinlock), level);
  282. rt_set_errno(-RT_ETIMEOUT);
  283. return RT_NULL;
  284. }
  285. RT_DEBUG_NOT_IN_INTERRUPT;
  286. thread->error = RT_EOK;
  287. /* need suspend thread */
  288. rt_thread_suspend(thread);
  289. rt_list_insert_after(&(mp->suspend_thread), &(thread->tlist));
  290. if (time > 0)
  291. {
  292. /* get the start tick of timer */
  293. before_sleep = rt_tick_get();
  294. /* init thread timer and start it */
  295. rt_timer_control(&(thread->thread_timer),
  296. RT_TIMER_CTRL_SET_TIME,
  297. &time);
  298. rt_timer_start(&(thread->thread_timer));
  299. }
  300. /* enable interrupt */
  301. rt_spin_unlock_irqrestore(&(mp->spinlock), level);
  302. /* do a schedule */
  303. rt_schedule();
  304. if (thread->error != RT_EOK)
  305. return RT_NULL;
  306. if (time > 0)
  307. {
  308. time -= rt_tick_get() - before_sleep;
  309. if (time < 0)
  310. time = 0;
  311. }
  312. level = rt_spin_lock_irqsave(&(mp->spinlock));
  313. }
  314. /* memory block is available. decrease the free block counter */
  315. mp->block_free_count--;
  316. /* get block from block list */
  317. block_ptr = mp->block_list;
  318. RT_ASSERT(block_ptr != RT_NULL);
  319. /* Setup the next free node. */
  320. mp->block_list = *(rt_uint8_t **)block_ptr;
  321. /* point to memory pool */
  322. *(rt_uint8_t **)block_ptr = (rt_uint8_t *)mp;
  323. rt_spin_unlock_irqrestore(&(mp->spinlock), level);
  324. RT_OBJECT_HOOK_CALL(rt_mp_alloc_hook,
  325. (mp, (rt_uint8_t *)(block_ptr + sizeof(rt_uint8_t *))));
  326. return (rt_uint8_t *)(block_ptr + sizeof(rt_uint8_t *));
  327. }
  328. RTM_EXPORT(rt_mp_alloc);
  329. /**
  330. * @brief This function will release a memory block.
  331. *
  332. * @param block the address of memory block to be released.
  333. */
  334. void rt_mp_free(void *block)
  335. {
  336. rt_uint8_t **block_ptr;
  337. struct rt_mempool *mp;
  338. struct rt_thread *thread;
  339. rt_base_t level;
  340. /* parameter check */
  341. if (block == RT_NULL) return;
  342. /* get the control block of pool which the block belongs to */
  343. block_ptr = (rt_uint8_t **)((rt_uint8_t *)block - sizeof(rt_uint8_t *));
  344. mp = (struct rt_mempool *)*block_ptr;
  345. RT_OBJECT_HOOK_CALL(rt_mp_free_hook, (mp, block));
  346. level = rt_spin_lock_irqsave(&(mp->spinlock));
  347. /* increase the free block count */
  348. mp->block_free_count ++;
  349. /* link the block into the block list */
  350. *block_ptr = mp->block_list;
  351. mp->block_list = (rt_uint8_t *)block_ptr;
  352. if (!rt_list_isempty(&(mp->suspend_thread)))
  353. {
  354. /* get the suspended thread */
  355. thread = rt_list_entry(mp->suspend_thread.next,
  356. struct rt_thread,
  357. tlist);
  358. /* set error */
  359. thread->error = RT_EOK;
  360. /* resume thread */
  361. rt_thread_resume(thread);
  362. rt_spin_unlock_irqrestore(&(mp->spinlock), level);
  363. /* do a schedule */
  364. rt_schedule();
  365. return;
  366. }
  367. rt_spin_unlock_irqrestore(&(mp->spinlock), level);
  368. }
  369. RTM_EXPORT(rt_mp_free);
  370. /**@}*/
  371. #endif /* RT_USING_MEMPOOL */