zephyr_thread.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-FileCopyrightText: 2024 Siemens AG (For Zephyr usermode changes)
  4. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  5. */
  6. #include "platform_api_vmcore.h"
  7. #include "platform_api_extension.h"
  8. /* clang-format off */
  9. #define bh_assert(v) do { \
  10. if (!(v)) { \
  11. printf("\nASSERTION FAILED: %s, at %s, line %d\n", \
  12. #v, __FILE__, __LINE__); \
  13. abort(); \
  14. } \
  15. } while (0)
  16. /* clang-format on */
  17. #if defined(CONFIG_ARM_MPU) || defined(CONFIG_ARC_MPU) \
  18. || KERNEL_VERSION_NUMBER > 0x020300 /* version 2.3.0 */
  19. #define BH_ENABLE_ZEPHYR_MPU_STACK 1
  20. #elif !defined(BH_ENABLE_ZEPHYR_MPU_STACK)
  21. #define BH_ENABLE_ZEPHYR_MPU_STACK 0
  22. #endif
  23. #if !defined(BH_ZEPHYR_MPU_STACK_SIZE)
  24. #define BH_ZEPHYR_MPU_STACK_SIZE APP_THREAD_STACK_SIZE_MIN
  25. #endif
  26. #if !defined(BH_ZEPHYR_MPU_STACK_COUNT)
  27. #define BH_ZEPHYR_MPU_STACK_COUNT 4
  28. #endif
  29. #if BH_ENABLE_ZEPHYR_MPU_STACK != 0
  30. static K_THREAD_STACK_ARRAY_DEFINE(mpu_stacks, BH_ZEPHYR_MPU_STACK_COUNT,
  31. BH_ZEPHYR_MPU_STACK_SIZE);
  32. static bool mpu_stack_allocated[BH_ZEPHYR_MPU_STACK_COUNT];
  33. static zmutex_t mpu_stack_lock;
  34. static char *
  35. mpu_stack_alloc()
  36. {
  37. int i;
  38. zmutex_lock(&mpu_stack_lock, K_FOREVER);
  39. for (i = 0; i < BH_ZEPHYR_MPU_STACK_COUNT; i++) {
  40. if (!mpu_stack_allocated[i]) {
  41. mpu_stack_allocated[i] = true;
  42. zmutex_unlock(&mpu_stack_lock);
  43. return (char *)mpu_stacks[i];
  44. }
  45. }
  46. zmutex_unlock(&mpu_stack_lock);
  47. return NULL;
  48. }
  49. static void
  50. mpu_stack_free(char *stack)
  51. {
  52. int i;
  53. zmutex_lock(&mpu_stack_lock, K_FOREVER);
  54. for (i = 0; i < BH_ZEPHYR_MPU_STACK_COUNT; i++) {
  55. if ((char *)mpu_stacks[i] == stack)
  56. mpu_stack_allocated[i] = false;
  57. }
  58. zmutex_unlock(&mpu_stack_lock);
  59. }
  60. #endif
  61. typedef struct os_thread_wait_node {
  62. zsem_t sem;
  63. os_thread_wait_list next;
  64. } os_thread_wait_node;
  65. typedef struct os_thread_data {
  66. /* Next thread data */
  67. struct os_thread_data *next;
  68. /* Zephyr thread handle */
  69. korp_tid tid;
  70. /* Jeff thread local root */
  71. void *tlr;
  72. /* Lock for waiting list */
  73. zmutex_t wait_list_lock;
  74. /* Waiting list of other threads who are joining this thread */
  75. os_thread_wait_list thread_wait_list;
  76. /* Thread stack size */
  77. unsigned stack_size;
  78. #if BH_ENABLE_ZEPHYR_MPU_STACK == 0
  79. /* Thread stack */
  80. char stack[1];
  81. #else
  82. char *stack;
  83. #endif
  84. } os_thread_data;
  85. typedef struct os_thread_obj {
  86. struct k_thread thread;
  87. /* Whether the thread is terminated and this thread object is to
  88. be freed in the future. */
  89. bool to_be_freed;
  90. struct os_thread_obj *next;
  91. } os_thread_obj;
  92. static bool is_thread_sys_inited = false;
  93. /* Thread data of supervisor thread */
  94. static os_thread_data supervisor_thread_data;
  95. /* Lock for thread data list */
  96. static zmutex_t thread_data_lock;
  97. /* Thread data list */
  98. static os_thread_data *thread_data_list = NULL;
  99. /* Lock for thread object list */
  100. static zmutex_t thread_obj_lock;
  101. /* Thread object list */
  102. static os_thread_obj *thread_obj_list = NULL;
  103. static void
  104. thread_data_list_add(os_thread_data *thread_data)
  105. {
  106. zmutex_lock(&thread_data_lock, K_FOREVER);
  107. if (!thread_data_list)
  108. thread_data_list = thread_data;
  109. else {
  110. /* If already in list, just return */
  111. os_thread_data *p = thread_data_list;
  112. while (p) {
  113. if (p == thread_data) {
  114. zmutex_unlock(&thread_data_lock);
  115. return;
  116. }
  117. p = p->next;
  118. }
  119. /* Set as head of list */
  120. thread_data->next = thread_data_list;
  121. thread_data_list = thread_data;
  122. }
  123. zmutex_unlock(&thread_data_lock);
  124. }
  125. static void
  126. thread_data_list_remove(os_thread_data *thread_data)
  127. {
  128. zmutex_lock(&thread_data_lock, K_FOREVER);
  129. if (thread_data_list) {
  130. if (thread_data_list == thread_data)
  131. thread_data_list = thread_data_list->next;
  132. else {
  133. /* Search and remove it from list */
  134. os_thread_data *p = thread_data_list;
  135. while (p && p->next != thread_data)
  136. p = p->next;
  137. if (p && p->next == thread_data)
  138. p->next = p->next->next;
  139. }
  140. }
  141. zmutex_unlock(&thread_data_lock);
  142. }
  143. static os_thread_data *
  144. thread_data_list_lookup(k_tid_t tid)
  145. {
  146. zmutex_lock(&thread_data_lock, K_FOREVER);
  147. if (thread_data_list) {
  148. os_thread_data *p = thread_data_list;
  149. while (p) {
  150. if (p->tid == tid) {
  151. /* Found */
  152. zmutex_unlock(&thread_data_lock);
  153. return p;
  154. }
  155. p = p->next;
  156. }
  157. }
  158. zmutex_unlock(&thread_data_lock);
  159. return NULL;
  160. }
  161. static void
  162. thread_obj_list_add(os_thread_obj *thread_obj)
  163. {
  164. zmutex_lock(&thread_obj_lock, K_FOREVER);
  165. if (!thread_obj_list)
  166. thread_obj_list = thread_obj;
  167. else {
  168. /* Set as head of list */
  169. thread_obj->next = thread_obj_list;
  170. thread_obj_list = thread_obj;
  171. }
  172. zmutex_unlock(&thread_obj_lock);
  173. }
  174. static void
  175. thread_obj_list_reclaim()
  176. {
  177. os_thread_obj *p, *p_prev;
  178. zmutex_lock(&thread_obj_lock, K_FOREVER);
  179. p_prev = NULL;
  180. p = thread_obj_list;
  181. while (p) {
  182. if (p->to_be_freed) {
  183. if (p_prev == NULL) { /* p is the head of list */
  184. thread_obj_list = p->next;
  185. BH_FREE(p);
  186. p = thread_obj_list;
  187. }
  188. else { /* p is not the head of list */
  189. p_prev->next = p->next;
  190. BH_FREE(p);
  191. p = p_prev->next;
  192. }
  193. }
  194. else {
  195. p_prev = p;
  196. p = p->next;
  197. }
  198. }
  199. zmutex_unlock(&thread_obj_lock);
  200. }
  201. int
  202. os_thread_sys_init()
  203. {
  204. if (is_thread_sys_inited)
  205. return BHT_OK;
  206. #if BH_ENABLE_ZEPHYR_MPU_STACK != 0
  207. zmutex_init(&mpu_stack_lock);
  208. #endif
  209. zmutex_init(&thread_data_lock);
  210. zmutex_init(&thread_obj_lock);
  211. /* Initialize supervisor thread data */
  212. memset(&supervisor_thread_data, 0, sizeof(supervisor_thread_data));
  213. supervisor_thread_data.tid = k_current_get();
  214. /* Set as head of thread data list */
  215. thread_data_list = &supervisor_thread_data;
  216. is_thread_sys_inited = true;
  217. return BHT_OK;
  218. }
  219. void
  220. os_thread_sys_destroy(void)
  221. {
  222. if (is_thread_sys_inited) {
  223. is_thread_sys_inited = false;
  224. }
  225. }
  226. static os_thread_data *
  227. thread_data_current()
  228. {
  229. k_tid_t tid = k_current_get();
  230. return thread_data_list_lookup(tid);
  231. }
  232. static void
  233. os_thread_cleanup(void)
  234. {
  235. os_thread_data *thread_data = thread_data_current();
  236. bh_assert(thread_data != NULL);
  237. zmutex_lock(&thread_data->wait_list_lock, K_FOREVER);
  238. if (thread_data->thread_wait_list) {
  239. /* Signal each joining thread */
  240. os_thread_wait_list head = thread_data->thread_wait_list;
  241. while (head) {
  242. os_thread_wait_list next = head->next;
  243. zsem_give(&head->sem);
  244. /* head will be freed by joining thread */
  245. head = next;
  246. }
  247. thread_data->thread_wait_list = NULL;
  248. }
  249. zmutex_unlock(&thread_data->wait_list_lock);
  250. thread_data_list_remove(thread_data);
  251. /* Set flag to true for the next thread creating to
  252. free the thread object */
  253. ((os_thread_obj *)thread_data->tid)->to_be_freed = true;
  254. #if BH_ENABLE_ZEPHYR_MPU_STACK != 0
  255. mpu_stack_free(thread_data->stack);
  256. #endif
  257. BH_FREE(thread_data);
  258. }
  259. static void
  260. os_thread_wrapper(void *start, void *arg, void *thread_data)
  261. {
  262. /* Set thread custom data */
  263. ((os_thread_data *)thread_data)->tid = k_current_get();
  264. thread_data_list_add(thread_data);
  265. ((thread_start_routine_t)start)(arg);
  266. os_thread_cleanup();
  267. }
  268. int
  269. os_thread_create(korp_tid *p_tid, thread_start_routine_t start, void *arg,
  270. unsigned int stack_size)
  271. {
  272. return os_thread_create_with_prio(p_tid, start, arg, stack_size,
  273. BH_THREAD_DEFAULT_PRIORITY);
  274. }
  275. int
  276. os_thread_create_with_prio(korp_tid *p_tid, thread_start_routine_t start,
  277. void *arg, unsigned int stack_size, int prio)
  278. {
  279. korp_tid tid;
  280. os_thread_data *thread_data;
  281. unsigned thread_data_size;
  282. if (!p_tid || !stack_size)
  283. return BHT_ERROR;
  284. /* Free the thread objects of terminated threads */
  285. thread_obj_list_reclaim();
  286. /* Create and initialize thread object */
  287. if (!(tid = BH_MALLOC(sizeof(os_thread_obj))))
  288. return BHT_ERROR;
  289. memset(tid, 0, sizeof(os_thread_obj));
  290. /* Create and initialize thread data */
  291. #if BH_ENABLE_ZEPHYR_MPU_STACK == 0
  292. if (stack_size < APP_THREAD_STACK_SIZE_MIN)
  293. stack_size = APP_THREAD_STACK_SIZE_MIN;
  294. thread_data_size = offsetof(os_thread_data, stack) + stack_size;
  295. #else
  296. stack_size = BH_ZEPHYR_MPU_STACK_SIZE;
  297. thread_data_size = sizeof(os_thread_data);
  298. #endif
  299. if (!(thread_data = BH_MALLOC(thread_data_size))) {
  300. goto fail1;
  301. }
  302. memset(thread_data, 0, thread_data_size);
  303. zmutex_init(&thread_data->wait_list_lock);
  304. thread_data->stack_size = stack_size;
  305. thread_data->tid = tid;
  306. #if BH_ENABLE_ZEPHYR_MPU_STACK != 0
  307. if (!(thread_data->stack = mpu_stack_alloc())) {
  308. goto fail2;
  309. }
  310. #endif
  311. /* Create the thread */
  312. if (!((tid = k_thread_create(tid, (k_thread_stack_t *)thread_data->stack,
  313. stack_size, os_thread_wrapper, start, arg,
  314. thread_data, prio, 0, K_NO_WAIT)))) {
  315. goto fail3;
  316. }
  317. bh_assert(tid == thread_data->tid);
  318. k_thread_name_set(tid, "wasm-zephyr");
  319. /* Set thread custom data */
  320. thread_data_list_add(thread_data);
  321. thread_obj_list_add((os_thread_obj *)tid);
  322. *p_tid = tid;
  323. return BHT_OK;
  324. fail3:
  325. #if BH_ENABLE_ZEPHYR_MPU_STACK != 0
  326. mpu_stack_free(thread_data->stack);
  327. fail2:
  328. #endif
  329. BH_FREE(thread_data);
  330. fail1:
  331. BH_FREE(tid);
  332. return BHT_ERROR;
  333. }
  334. korp_tid
  335. os_self_thread()
  336. {
  337. return (korp_tid)k_current_get();
  338. }
  339. int
  340. os_thread_join(korp_tid thread, void **value_ptr)
  341. {
  342. (void)value_ptr;
  343. os_thread_data *thread_data;
  344. os_thread_wait_node *node;
  345. /* Get thread data */
  346. thread_data = thread_data_list_lookup(thread);
  347. if (thread_data == NULL) {
  348. os_printf(
  349. "Can't join thread %p, probably already exited or does not exist",
  350. thread);
  351. return BHT_OK;
  352. }
  353. /* Create wait node and append it to wait list */
  354. if (!(node = BH_MALLOC(sizeof(os_thread_wait_node))))
  355. return BHT_ERROR;
  356. zsem_init(&node->sem, 0, 1);
  357. node->next = NULL;
  358. zmutex_lock(&thread_data->wait_list_lock, K_FOREVER);
  359. if (!thread_data->thread_wait_list)
  360. thread_data->thread_wait_list = node;
  361. else {
  362. /* Add to end of waiting list */
  363. os_thread_wait_node *p = thread_data->thread_wait_list;
  364. while (p->next)
  365. p = p->next;
  366. p->next = node;
  367. }
  368. zmutex_unlock(&thread_data->wait_list_lock);
  369. /* Wait the sem */
  370. zsem_take(&node->sem, K_FOREVER);
  371. /* Wait some time for the thread to be actually terminated */
  372. k_sleep(Z_TIMEOUT_MS(100));
  373. /* Destroy resource */
  374. BH_FREE(node);
  375. return BHT_OK;
  376. }
  377. int
  378. os_mutex_init(korp_mutex *mutex)
  379. {
  380. zmutex_init(mutex);
  381. return BHT_OK;
  382. }
  383. int
  384. os_recursive_mutex_init(korp_mutex *mutex)
  385. {
  386. zmutex_init(mutex);
  387. return BHT_OK;
  388. }
  389. int
  390. os_mutex_destroy(korp_mutex *mutex)
  391. {
  392. (void)mutex;
  393. return BHT_OK;
  394. }
  395. int
  396. os_mutex_lock(korp_mutex *mutex)
  397. {
  398. return zmutex_lock(mutex, K_FOREVER);
  399. }
  400. int
  401. os_mutex_unlock(korp_mutex *mutex)
  402. {
  403. #if KERNEL_VERSION_NUMBER >= 0x020200 /* version 2.2.0 */
  404. return zmutex_unlock(mutex);
  405. #else
  406. zmutex_unlock(mutex);
  407. return 0;
  408. #endif
  409. }
  410. int
  411. os_cond_init(korp_cond *cond)
  412. {
  413. zmutex_init(&cond->wait_list_lock);
  414. cond->thread_wait_list = NULL;
  415. return BHT_OK;
  416. }
  417. int
  418. os_cond_destroy(korp_cond *cond)
  419. {
  420. (void)cond;
  421. return BHT_OK;
  422. }
  423. static int
  424. os_cond_wait_internal(korp_cond *cond, korp_mutex *mutex, bool timed, int mills)
  425. {
  426. os_thread_wait_node *node;
  427. /* Create wait node and append it to wait list */
  428. if (!(node = BH_MALLOC(sizeof(os_thread_wait_node))))
  429. return BHT_ERROR;
  430. zsem_init(&node->sem, 0, 1);
  431. node->next = NULL;
  432. zmutex_lock(&cond->wait_list_lock, K_FOREVER);
  433. if (!cond->thread_wait_list)
  434. cond->thread_wait_list = node;
  435. else {
  436. /* Add to end of wait list */
  437. os_thread_wait_node *p = cond->thread_wait_list;
  438. while (p->next)
  439. p = p->next;
  440. p->next = node;
  441. }
  442. zmutex_unlock(&cond->wait_list_lock);
  443. /* Unlock mutex, wait sem and lock mutex again */
  444. zmutex_unlock(mutex);
  445. zsem_take(&node->sem, timed ? Z_TIMEOUT_MS(mills) : K_FOREVER);
  446. zmutex_lock(mutex, K_FOREVER);
  447. /* Remove wait node from wait list */
  448. zmutex_lock(&cond->wait_list_lock, K_FOREVER);
  449. if (cond->thread_wait_list == node)
  450. cond->thread_wait_list = node->next;
  451. else {
  452. /* Remove from the wait list */
  453. os_thread_wait_node *p = cond->thread_wait_list;
  454. while (p->next != node)
  455. p = p->next;
  456. p->next = node->next;
  457. }
  458. BH_FREE(node);
  459. zmutex_unlock(&cond->wait_list_lock);
  460. return BHT_OK;
  461. }
  462. int
  463. os_cond_wait(korp_cond *cond, korp_mutex *mutex)
  464. {
  465. return os_cond_wait_internal(cond, mutex, false, 0);
  466. }
  467. int
  468. os_cond_reltimedwait(korp_cond *cond, korp_mutex *mutex, uint64 useconds)
  469. {
  470. if (useconds == BHT_WAIT_FOREVER) {
  471. return os_cond_wait_internal(cond, mutex, false, 0);
  472. }
  473. else {
  474. uint64 mills_64 = useconds / 1000;
  475. int32 mills;
  476. if (mills_64 < (uint64)INT32_MAX) {
  477. mills = (int32)mills_64;
  478. }
  479. else {
  480. mills = INT32_MAX;
  481. os_printf("Warning: os_cond_reltimedwait exceeds limit, "
  482. "set to max timeout instead\n");
  483. }
  484. return os_cond_wait_internal(cond, mutex, true, mills);
  485. }
  486. }
  487. int
  488. os_cond_signal(korp_cond *cond)
  489. {
  490. /* Signal the head wait node of wait list */
  491. zmutex_lock(&cond->wait_list_lock, K_FOREVER);
  492. if (cond->thread_wait_list)
  493. zsem_give(&cond->thread_wait_list->sem);
  494. zmutex_unlock(&cond->wait_list_lock);
  495. return BHT_OK;
  496. }
  497. uint8 *
  498. os_thread_get_stack_boundary()
  499. {
  500. #if defined(CONFIG_THREAD_STACK_INFO) && !defined(CONFIG_USERSPACE)
  501. korp_tid thread = k_current_get();
  502. return (uint8 *)thread->stack_info.start;
  503. #else
  504. return NULL;
  505. #endif
  506. }
  507. void
  508. os_thread_jit_write_protect_np(bool enabled)
  509. {}
  510. int
  511. os_rwlock_init(korp_rwlock *lock)
  512. {
  513. if (!lock) {
  514. return BHT_ERROR;
  515. }
  516. k_mutex_init(&lock->mtx);
  517. k_sem_init(&lock->sem, 0, K_SEM_MAX_LIMIT);
  518. lock->read_count = 0;
  519. return BHT_OK;
  520. }
  521. int
  522. os_rwlock_rdlock(korp_rwlock *lock)
  523. {
  524. /* Not implemented */
  525. return BHT_ERROR;
  526. }
  527. int
  528. os_rwlock_wrlock(korp_rwlock *lock)
  529. {
  530. // Acquire the mutex to ensure exclusive access
  531. if (k_mutex_lock(&lock->mtx, K_FOREVER) != 0) {
  532. return BHT_ERROR;
  533. }
  534. // Wait until there are no readers
  535. while (lock->read_count > 0) {
  536. // Release the mutex while we're waiting
  537. k_mutex_unlock(&lock->mtx);
  538. // Wait for a short time
  539. k_sleep(K_MSEC(1));
  540. // Re-acquire the mutex
  541. if (k_mutex_lock(&lock->mtx, K_FOREVER) != 0) {
  542. return BHT_ERROR;
  543. }
  544. }
  545. // At this point, we hold the mutex and there are no readers, so we have the
  546. // write lock
  547. return BHT_OK;
  548. }
  549. int
  550. os_rwlock_unlock(korp_rwlock *lock)
  551. {
  552. k_mutex_unlock(&lock->mtx);
  553. return BHT_OK;
  554. }
  555. int
  556. os_rwlock_destroy(korp_rwlock *lock)
  557. {
  558. /* Not implemented */
  559. return BHT_ERROR;
  560. }
  561. int
  562. os_thread_detach(korp_tid thread)
  563. {
  564. (void)thread;
  565. return BHT_OK;
  566. }
  567. void
  568. os_thread_exit(void *retval)
  569. {
  570. (void)retval;
  571. os_thread_cleanup();
  572. k_thread_abort(k_current_get());
  573. }
  574. int
  575. os_cond_broadcast(korp_cond *cond)
  576. {
  577. os_thread_wait_node *node;
  578. zmutex_lock(&cond->wait_list_lock, K_FOREVER);
  579. node = cond->thread_wait_list;
  580. while (node) {
  581. os_thread_wait_node *next = node->next;
  582. zsem_give(&node->sem);
  583. node = next;
  584. }
  585. zmutex_unlock(&cond->wait_list_lock);
  586. return BHT_OK;
  587. }
  588. korp_sem *
  589. os_sem_open(const char *name, int oflags, int mode, int val)
  590. {
  591. /* Not implemented */
  592. return NULL;
  593. }
  594. int
  595. os_sem_close(korp_sem *sem)
  596. {
  597. /* Not implemented */
  598. return BHT_ERROR;
  599. }
  600. int
  601. os_sem_wait(korp_sem *sem)
  602. {
  603. /* Not implemented */
  604. return BHT_ERROR;
  605. }
  606. int
  607. os_sem_trywait(korp_sem *sem)
  608. {
  609. /* Not implemented */
  610. return BHT_ERROR;
  611. }
  612. int
  613. os_sem_post(korp_sem *sem)
  614. {
  615. /* Not implemented */
  616. return BHT_ERROR;
  617. }
  618. int
  619. os_sem_getvalue(korp_sem *sem, int *sval)
  620. {
  621. /* Not implemented */
  622. return BHT_ERROR;
  623. }
  624. int
  625. os_sem_unlink(const char *name)
  626. {
  627. /* Not implemented */
  628. return BHT_ERROR;
  629. }
  630. int
  631. os_blocking_op_init()
  632. {
  633. /* Not implemented */
  634. return BHT_ERROR;
  635. }
  636. void
  637. os_begin_blocking_op()
  638. {
  639. /* Not implemented */
  640. }
  641. void
  642. os_end_blocking_op()
  643. {
  644. /* Not implemented */
  645. }
  646. int
  647. os_wakeup_blocking_op(korp_tid tid)
  648. {
  649. /* Not implemented */
  650. return BHT_ERROR;
  651. }