mempool.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453
  1. /*
  2. * Copyright (c) 2006-2022, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2006-05-27 Bernard implement memory pool
  9. * 2006-06-03 Bernard fix the thread timer init bug
  10. * 2006-06-30 Bernard fix the allocate/free block bug
  11. * 2006-08-04 Bernard add hook support
  12. * 2006-08-10 Bernard fix interrupt bug in rt_mp_alloc
  13. * 2010-07-13 Bernard fix RT_ALIGN issue found by kuronca
  14. * 2010-10-26 yi.qiu add module support in rt_mp_delete
  15. * 2011-01-24 Bernard add object allocation check.
  16. * 2012-03-22 Bernard fix align issue in rt_mp_init and rt_mp_create.
  17. * 2022-01-07 Gabriel Moving __on_rt_xxxxx_hook to mempool.c
  18. * 2023-09-15 xqyjlj perf rt_hw_interrupt_disable/enable
  19. * 2023-12-10 xqyjlj fix spinlock assert
  20. */
  21. #include <rthw.h>
  22. #include <rtthread.h>
  23. #ifdef RT_USING_MEMPOOL
  24. #if defined(RT_USING_HOOK) && defined(RT_HOOK_USING_FUNC_PTR)
  25. static void (*rt_mp_alloc_hook)(struct rt_mempool *mp, void *block);
  26. static void (*rt_mp_free_hook)(struct rt_mempool *mp, void *block);
  27. /**
  28. * @addtogroup Hook
  29. */
  30. /**@{*/
  31. /**
  32. * @brief This function will set a hook function, which will be invoked when a memory
  33. * block is allocated from the memory pool.
  34. *
  35. * @param hook the hook function
  36. */
  37. void rt_mp_alloc_sethook(void (*hook)(struct rt_mempool *mp, void *block))
  38. {
  39. rt_mp_alloc_hook = hook;
  40. }
  41. /**
  42. * @brief This function will set a hook function, which will be invoked when a memory
  43. * block is released to the memory pool.
  44. *
  45. * @param hook the hook function
  46. */
  47. void rt_mp_free_sethook(void (*hook)(struct rt_mempool *mp, void *block))
  48. {
  49. rt_mp_free_hook = hook;
  50. }
  51. /**@}*/
  52. #endif /* RT_USING_HOOK */
  53. /**
  54. * @addtogroup MM
  55. */
  56. /**@{*/
  57. /**
  58. * @brief This function will initialize a memory pool object, normally which is used
  59. * for static object.
  60. *
  61. * @param mp is the memory pool object.
  62. *
  63. * @param name is the name of the memory pool.
  64. *
  65. * @param start is the start address of the memory pool.
  66. *
  67. * @param size is the total size of the memory pool.
  68. *
  69. * @param block_size is the size for each block..
  70. *
  71. * @return RT_EOK
  72. */
  73. rt_err_t rt_mp_init(struct rt_mempool *mp,
  74. const char *name,
  75. void *start,
  76. rt_size_t size,
  77. rt_size_t block_size)
  78. {
  79. rt_uint8_t *block_ptr;
  80. rt_size_t offset;
  81. /* parameter check */
  82. RT_ASSERT(mp != RT_NULL);
  83. RT_ASSERT(name != RT_NULL);
  84. RT_ASSERT(start != RT_NULL);
  85. RT_ASSERT(size > 0 && block_size > 0);
  86. /* initialize object */
  87. rt_object_init(&(mp->parent), RT_Object_Class_MemPool, name);
  88. /* initialize memory pool */
  89. mp->start_address = start;
  90. mp->size = RT_ALIGN_DOWN(size, RT_ALIGN_SIZE);
  91. /* align the block size */
  92. block_size = RT_ALIGN(block_size, RT_ALIGN_SIZE);
  93. mp->block_size = block_size;
  94. /* align to align size byte */
  95. mp->block_total_count = mp->size / (mp->block_size + sizeof(rt_uint8_t *));
  96. mp->block_free_count = mp->block_total_count;
  97. /* initialize suspended thread list */
  98. rt_list_init(&(mp->suspend_thread));
  99. /* initialize free block list */
  100. block_ptr = (rt_uint8_t *)mp->start_address;
  101. for (offset = 0; offset < mp->block_total_count; offset ++)
  102. {
  103. *(rt_uint8_t **)(block_ptr + offset * (block_size + sizeof(rt_uint8_t *))) =
  104. (rt_uint8_t *)(block_ptr + (offset + 1) * (block_size + sizeof(rt_uint8_t *)));
  105. }
  106. *(rt_uint8_t **)(block_ptr + (offset - 1) * (block_size + sizeof(rt_uint8_t *))) =
  107. RT_NULL;
  108. mp->block_list = block_ptr;
  109. rt_spin_lock_init(&(mp->spinlock));
  110. return RT_EOK;
  111. }
  112. RTM_EXPORT(rt_mp_init);
  113. /**
  114. * @brief This function will detach a memory pool from system object management.
  115. *
  116. * @param mp is the memory pool object.
  117. *
  118. * @return RT_EOK
  119. */
  120. rt_err_t rt_mp_detach(struct rt_mempool *mp)
  121. {
  122. struct rt_thread *thread;
  123. rt_base_t level;
  124. /* parameter check */
  125. RT_ASSERT(mp != RT_NULL);
  126. RT_ASSERT(rt_object_get_type(&mp->parent) == RT_Object_Class_MemPool);
  127. RT_ASSERT(rt_object_is_systemobject(&mp->parent));
  128. level = rt_spin_lock_irqsave(&(mp->spinlock));
  129. /* wake up all suspended threads */
  130. while (!rt_list_isempty(&(mp->suspend_thread)))
  131. {
  132. /* get next suspend thread */
  133. thread = rt_list_entry(mp->suspend_thread.next, struct rt_thread, tlist);
  134. /* set error code to -RT_ERROR */
  135. thread->error = -RT_ERROR;
  136. /*
  137. * resume thread
  138. * In rt_thread_resume function, it will remove current thread from
  139. * suspend list
  140. */
  141. rt_thread_resume(thread);
  142. }
  143. /* detach object */
  144. rt_object_detach(&(mp->parent));
  145. rt_spin_unlock_irqrestore(&(mp->spinlock), level);
  146. return RT_EOK;
  147. }
  148. RTM_EXPORT(rt_mp_detach);
  149. #ifdef RT_USING_HEAP
  150. /**
  151. * @brief This function will create a mempool object and allocate the memory pool from
  152. * heap.
  153. *
  154. * @param name is the name of memory pool.
  155. *
  156. * @param block_count is the count of blocks in memory pool.
  157. *
  158. * @param block_size is the size for each block.
  159. *
  160. * @return the created mempool object
  161. */
  162. rt_mp_t rt_mp_create(const char *name,
  163. rt_size_t block_count,
  164. rt_size_t block_size)
  165. {
  166. rt_uint8_t *block_ptr;
  167. struct rt_mempool *mp;
  168. rt_size_t offset;
  169. RT_DEBUG_NOT_IN_INTERRUPT;
  170. /* parameter check */
  171. RT_ASSERT(name != RT_NULL);
  172. RT_ASSERT(block_count > 0 && block_size > 0);
  173. /* allocate object */
  174. mp = (struct rt_mempool *)rt_object_allocate(RT_Object_Class_MemPool, name);
  175. /* allocate object failed */
  176. if (mp == RT_NULL)
  177. return RT_NULL;
  178. /* initialize memory pool */
  179. block_size = RT_ALIGN(block_size, RT_ALIGN_SIZE);
  180. mp->block_size = block_size;
  181. mp->size = (block_size + sizeof(rt_uint8_t *)) * block_count;
  182. /* allocate memory */
  183. mp->start_address = rt_malloc((block_size + sizeof(rt_uint8_t *)) *
  184. block_count);
  185. if (mp->start_address == RT_NULL)
  186. {
  187. /* no memory, delete memory pool object */
  188. rt_object_delete(&(mp->parent));
  189. return RT_NULL;
  190. }
  191. mp->block_total_count = block_count;
  192. mp->block_free_count = mp->block_total_count;
  193. /* initialize suspended thread list */
  194. rt_list_init(&(mp->suspend_thread));
  195. /* initialize free block list */
  196. block_ptr = (rt_uint8_t *)mp->start_address;
  197. for (offset = 0; offset < mp->block_total_count; offset ++)
  198. {
  199. *(rt_uint8_t **)(block_ptr + offset * (block_size + sizeof(rt_uint8_t *)))
  200. = block_ptr + (offset + 1) * (block_size + sizeof(rt_uint8_t *));
  201. }
  202. *(rt_uint8_t **)(block_ptr + (offset - 1) * (block_size + sizeof(rt_uint8_t *)))
  203. = RT_NULL;
  204. mp->block_list = block_ptr;
  205. rt_spin_lock_init(&(mp->spinlock));
  206. return mp;
  207. }
  208. RTM_EXPORT(rt_mp_create);
  209. /**
  210. * @brief This function will delete a memory pool and release the object memory.
  211. *
  212. * @param mp is the memory pool object.
  213. *
  214. * @return RT_EOK
  215. */
  216. rt_err_t rt_mp_delete(rt_mp_t mp)
  217. {
  218. struct rt_thread *thread;
  219. rt_base_t level;
  220. RT_DEBUG_NOT_IN_INTERRUPT;
  221. /* parameter check */
  222. RT_ASSERT(mp != RT_NULL);
  223. RT_ASSERT(rt_object_get_type(&mp->parent) == RT_Object_Class_MemPool);
  224. RT_ASSERT(rt_object_is_systemobject(&mp->parent) == RT_FALSE);
  225. level = rt_spin_lock_irqsave(&(mp->spinlock));
  226. /* wake up all suspended threads */
  227. while (!rt_list_isempty(&(mp->suspend_thread)))
  228. {
  229. /* get next suspend thread */
  230. thread = rt_list_entry(mp->suspend_thread.next, struct rt_thread, tlist);
  231. /* set error code to -RT_ERROR */
  232. thread->error = -RT_ERROR;
  233. /*
  234. * resume thread
  235. * In rt_thread_resume function, it will remove current thread from
  236. * suspend list
  237. */
  238. rt_thread_resume(thread);
  239. }
  240. rt_spin_unlock_irqrestore(&(mp->spinlock), level);
  241. /* release allocated room */
  242. rt_free(mp->start_address);
  243. /* detach object */
  244. rt_object_delete(&(mp->parent));
  245. return RT_EOK;
  246. }
  247. RTM_EXPORT(rt_mp_delete);
  248. #endif /* RT_USING_HEAP */
  249. /**
  250. * @brief This function will allocate a block from memory pool.
  251. *
  252. * @param mp is the memory pool object.
  253. *
  254. * @param time is the maximum waiting time for allocating memory.
  255. * - 0 for not waiting, allocating memory immediately.
  256. *
  257. * @return the allocated memory block or RT_NULL on allocated failed.
  258. */
  259. void *rt_mp_alloc(rt_mp_t mp, rt_int32_t time)
  260. {
  261. rt_uint8_t *block_ptr;
  262. rt_base_t level;
  263. struct rt_thread *thread;
  264. rt_uint32_t before_sleep = 0;
  265. /* parameter check */
  266. RT_ASSERT(mp != RT_NULL);
  267. /* get current thread */
  268. thread = rt_thread_self();
  269. level = rt_spin_lock_irqsave(&(mp->spinlock));
  270. while (mp->block_free_count == 0)
  271. {
  272. /* memory block is unavailable. */
  273. if (time == 0)
  274. {
  275. rt_spin_unlock_irqrestore(&(mp->spinlock), level);
  276. rt_set_errno(-RT_ETIMEOUT);
  277. return RT_NULL;
  278. }
  279. RT_DEBUG_NOT_IN_INTERRUPT;
  280. thread->error = RT_EOK;
  281. /* need suspend thread */
  282. rt_thread_suspend(thread);
  283. rt_list_insert_after(&(mp->suspend_thread), &(thread->tlist));
  284. if (time > 0)
  285. {
  286. /* get the start tick of timer */
  287. before_sleep = rt_tick_get();
  288. /* init thread timer and start it */
  289. rt_timer_control(&(thread->thread_timer),
  290. RT_TIMER_CTRL_SET_TIME,
  291. &time);
  292. rt_timer_start(&(thread->thread_timer));
  293. }
  294. /* enable interrupt */
  295. rt_spin_unlock_irqrestore(&(mp->spinlock), level);
  296. /* do a schedule */
  297. rt_schedule();
  298. if (thread->error != RT_EOK)
  299. return RT_NULL;
  300. if (time > 0)
  301. {
  302. time -= rt_tick_get() - before_sleep;
  303. if (time < 0)
  304. time = 0;
  305. }
  306. level = rt_spin_lock_irqsave(&(mp->spinlock));
  307. }
  308. /* memory block is available. decrease the free block counter */
  309. mp->block_free_count--;
  310. /* get block from block list */
  311. block_ptr = mp->block_list;
  312. RT_ASSERT(block_ptr != RT_NULL);
  313. /* Setup the next free node. */
  314. mp->block_list = *(rt_uint8_t **)block_ptr;
  315. /* point to memory pool */
  316. *(rt_uint8_t **)block_ptr = (rt_uint8_t *)mp;
  317. rt_spin_unlock_irqrestore(&(mp->spinlock), level);
  318. RT_OBJECT_HOOK_CALL(rt_mp_alloc_hook,
  319. (mp, (rt_uint8_t *)(block_ptr + sizeof(rt_uint8_t *))));
  320. return (rt_uint8_t *)(block_ptr + sizeof(rt_uint8_t *));
  321. }
  322. RTM_EXPORT(rt_mp_alloc);
  323. /**
  324. * @brief This function will release a memory block.
  325. *
  326. * @param block the address of memory block to be released.
  327. */
  328. void rt_mp_free(void *block)
  329. {
  330. rt_uint8_t **block_ptr;
  331. struct rt_mempool *mp;
  332. struct rt_thread *thread;
  333. rt_base_t level;
  334. /* parameter check */
  335. if (block == RT_NULL) return;
  336. /* get the control block of pool which the block belongs to */
  337. block_ptr = (rt_uint8_t **)((rt_uint8_t *)block - sizeof(rt_uint8_t *));
  338. mp = (struct rt_mempool *)*block_ptr;
  339. RT_OBJECT_HOOK_CALL(rt_mp_free_hook, (mp, block));
  340. level = rt_spin_lock_irqsave(&(mp->spinlock));
  341. /* increase the free block count */
  342. mp->block_free_count ++;
  343. /* link the block into the block list */
  344. *block_ptr = mp->block_list;
  345. mp->block_list = (rt_uint8_t *)block_ptr;
  346. if (!rt_list_isempty(&(mp->suspend_thread)))
  347. {
  348. /* get the suspended thread */
  349. thread = rt_list_entry(mp->suspend_thread.next,
  350. struct rt_thread,
  351. tlist);
  352. /* set error */
  353. thread->error = RT_EOK;
  354. /* resume thread */
  355. rt_thread_resume(thread);
  356. rt_spin_unlock_irqrestore(&(mp->spinlock), level);
  357. /* do a schedule */
  358. rt_schedule();
  359. return;
  360. }
  361. rt_spin_unlock_irqrestore(&(mp->spinlock), level);
  362. }
  363. RTM_EXPORT(rt_mp_free);
  364. /**@}*/
  365. #endif /* RT_USING_MEMPOOL */