smp_004_tc.c 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245
  1. /*
  2. * Copyright (c) 2006-2024 RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2024/10/28 Shell Added smp.smoke
  9. * 2025/12/3 ChuanN-sudo add standardized utest documentation block
  10. * 2025/12/9 ChuanN-sudo fix: initialize current_mask variable
  11. */
  12. /**
  13. * Test Case Name: SMP Call Smoke 004 Test
  14. *
  15. * Test Objectives:
  16. * - Validate SMP call mechanism is re-entrant.
  17. * - Test rt_smp_call_request robustness from interrupt context.
  18. * - Ensure system stability under nested Inter-Processor Interrupts scenarios from multiple cores.
  19. * - Test core APIs: rt_smp_call_request(), rt_smp_call_req_init(), rt_smp_request_wait_freed(), rt_smp_call_cpu_mask()
  20. *
  21. * Test Scenarios:
  22. * - Pre-initialized 2D array of rt_smp_call_req objects.
  23. * - Worker thread triggers primary Inter-Processor Interrupts to another CPU.
  24. * - Primary handler fires secondary Inter-Processor Interrupts to all other CPUs.
  25. * - Secondary handlers update shared bitmask.
  26. * - Worker thread polls bitmask until all secondary Inter-Processor Interrupts complete.
  27. *
  28. * Verification Metrics:
  29. * - Bitmask must match expected value.
  30. * - Callbacks execute in interrupt-disabled context.
  31. * - rt_smp_call_request from ISR returns no error.
  32. *
  33. * Dependencies:
  34. * - Hardware requirements: QEMU emulator or any multi-core hardware platform that supports RT-Thread.
  35. * - Software configuration:
  36. * - RT_USING_UTEST must be enabled (select "RT-Thread Utestcases" in menuconfig).
  37. * - RT_UTEST_SMP_CALL_FUNC must be enabled(enable via: RT-Thread Utestcases -> Kernel Components -> Drivers -> SMP-Call Test -> SMP-Call Smoke Test).
  38. * - Environmental Assumptions: System scheduler and SMP services working normally.
  39. *
  40. * Expected Results:
  41. * - Progress logs: A series of characters (0-N) indicating which worker's callbacks are executing.
  42. * - Final output: "[ PASSED ] [ result ] testcase (components.drivers.smp_call.smoke_004)"
  43. */
  44. #include <rtdevice.h>
  45. #include <utest.h>
  46. #include <utest_assert.h>
  47. #include <smp_call.h>
  48. #define PERCPU_TEST_COUNT 10000
  49. #define NEWLINE_ON 80
  50. #define MAX_RETRIES (RT_TICK_PER_SECOND)
  51. static struct rt_semaphore _utestd_exited;
  52. static rt_thread_t _utestd[RT_CPUS_NR];
  53. static rt_atomic_t _entry_counts[RT_CPUS_NR];
  54. static struct rt_smp_call_req _callreq_data[RT_CPUS_NR][RT_CPUS_NR];
  55. static rt_ubase_t _masks_data[RT_CPUS_NR];
  56. static RT_DEFINE_SPINLOCK(_test_data_lock);
  57. static void _logging_progress(char id)
  58. {
  59. static rt_atomic_t counts;
  60. rt_ubase_t old;
  61. rt_kprintf("%c", id);
  62. old = rt_atomic_add(&counts, 1);
  63. if (old % NEWLINE_ON == 0)
  64. {
  65. rt_kputs("\n");
  66. }
  67. }
  68. static void _reentr_isr_cb(void *param)
  69. {
  70. rt_ubase_t *maskp;
  71. int oncpu;
  72. if (!rt_hw_interrupt_is_disabled())
  73. {
  74. /* SYNC.004 */
  75. uassert_true(0);
  76. }
  77. rt_spin_lock(&_test_data_lock);
  78. oncpu = rt_hw_cpu_id();
  79. maskp = (rt_ubase_t *)param;
  80. *maskp |= (1 << oncpu);
  81. rt_spin_unlock(&_test_data_lock);
  82. _logging_progress('0' + (maskp - _masks_data));
  83. }
  84. static void _test_smp_call_isr(void *param)
  85. {
  86. rt_err_t error;
  87. rt_ubase_t iter, oncpu = (rt_ubase_t)param;
  88. struct rt_smp_call_req *callreqp = _callreq_data[oncpu];
  89. if (rt_hw_cpu_id() != oncpu)
  90. {
  91. /* SYNC.004 */
  92. uassert_true(0);
  93. }
  94. if (!rt_hw_interrupt_is_disabled())
  95. {
  96. /* SYNC.004, PRIV.001 */
  97. uassert_true(0);
  98. }
  99. rt_smp_for_each_remote_cpu(iter, oncpu)
  100. {
  101. error = rt_smp_call_request(iter, SMP_CALL_NO_LOCAL, &callreqp[iter]);
  102. if (error)
  103. {
  104. /* SYNC.002 */
  105. uassert_false(error);
  106. }
  107. }
  108. }
  109. static rt_ubase_t _wait_for_update(rt_ubase_t *maskp, rt_ubase_t exp, int cpuid, rt_thread_t curthr)
  110. {
  111. rt_ubase_t level, current_mask = 0;
  112. for (size_t i = cpuid; i < RT_CPUS_NR; i++)
  113. {
  114. rt_thread_control(curthr, RT_THREAD_CTRL_BIND_CPU, (void *)(i % RT_CPUS_NR));
  115. }
  116. for (size_t i = 0; i < MAX_RETRIES; i++)
  117. {
  118. level = rt_spin_lock_irqsave(&_test_data_lock);
  119. current_mask = *maskp;
  120. rt_spin_unlock_irqrestore(&_test_data_lock, level);
  121. if (current_mask == exp)
  122. {
  123. break;
  124. }
  125. rt_thread_delay(1);
  126. }
  127. return current_mask;
  128. }
  129. static void _utestd_entry(void *oncpu_param)
  130. {
  131. rt_thread_t curthr = rt_thread_self();
  132. rt_ubase_t oncpu = (rt_ubase_t)oncpu_param;
  133. rt_ubase_t worker_id = (oncpu + 1) % RT_CPUS_NR;
  134. int cpu_mask = 1ul << worker_id;
  135. rt_ubase_t req_cpus_mask = ~cpu_mask & RT_ALL_CPU;
  136. rt_ubase_t *mask_data = &_masks_data[worker_id];
  137. rt_ubase_t current_mask;
  138. rt_ubase_t level;
  139. for (size_t i = 0; i < PERCPU_TEST_COUNT; i++)
  140. {
  141. rt_smp_call_cpu_mask(cpu_mask, _test_smp_call_isr, (void *)worker_id, 0);
  142. current_mask = _wait_for_update(mask_data, req_cpus_mask, worker_id, curthr);
  143. if (current_mask != req_cpus_mask)
  144. {
  145. LOG_I("current mask 0x%x, last fetch 0x%x", *mask_data, current_mask);
  146. /* MP.002, TARG.001 */
  147. uassert_true(0);
  148. break;
  149. }
  150. else
  151. {
  152. rt_ubase_t iter;
  153. level = rt_spin_lock_irqsave(&_test_data_lock);
  154. *mask_data = 0;
  155. rt_spin_unlock_irqrestore(&_test_data_lock, level);
  156. rt_smp_for_each_remote_cpu(iter, worker_id)
  157. {
  158. rt_smp_request_wait_freed(&_callreq_data[worker_id][iter]);
  159. }
  160. }
  161. }
  162. rt_sem_release(&_utestd_exited);
  163. }
  164. static void _test_reentr_isr_main(void)
  165. {
  166. for (size_t i = 0; i < RT_CPUS_NR; i++)
  167. {
  168. rt_thread_startup(_utestd[i]);
  169. }
  170. for (size_t i = 0; i < RT_CPUS_NR; i++)
  171. {
  172. rt_sem_take(&_utestd_exited, RT_WAITING_FOREVER);
  173. }
  174. }
  175. static rt_err_t utest_tc_init(void)
  176. {
  177. size_t iter_x, iter_y;
  178. rt_smp_for_each_cpu(iter_x)
  179. {
  180. rt_smp_for_each_cpu(iter_y)
  181. {
  182. rt_smp_call_req_init(&_callreq_data[iter_x][iter_y],
  183. _reentr_isr_cb, &_masks_data[iter_x]);
  184. }
  185. }
  186. for (size_t i = 0; i < RT_CPUS_NR; i++)
  187. {
  188. _masks_data[i] = 0;
  189. rt_atomic_store(&_entry_counts[i], 0);
  190. _utestd[i] = rt_thread_create("utestd", _utestd_entry, (void *)i,
  191. UTEST_THR_STACK_SIZE, UTEST_THR_PRIORITY + 1,
  192. 20);
  193. rt_thread_control(_utestd[i], RT_THREAD_CTRL_BIND_CPU, (void *)i);
  194. uassert_true(_utestd[i] != RT_NULL);
  195. }
  196. rt_sem_init(&_utestd_exited, "utestd", 0, RT_IPC_FLAG_PRIO);
  197. srand(rt_tick_get());
  198. return RT_EOK;
  199. }
  200. static rt_err_t utest_tc_cleanup(void)
  201. {
  202. rt_sem_detach(&_utestd_exited);
  203. return RT_EOK;
  204. }
  205. static void _testcase(void)
  206. {
  207. UTEST_UNIT_RUN(_test_reentr_isr_main);
  208. }
  209. UTEST_TC_EXPORT(_testcase, "components.drivers.smp_call.smoke_004", utest_tc_init, utest_tc_cleanup, 10);