nimble.c 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227
  1. /* hci middle header */
  2. #include "hm_hci_transport_h4.h"
  3. #include "hm_hci_transport_h4_uart.h"
  4. #include "hm_chipset.h"
  5. #include "hm_dump.h"
  6. /* nimble header */
  7. #include "nimble/ble.h"
  8. #include "nimble/ble_hci_trans.h"
  9. #include "nimble/hci_common.h"
  10. #include "os/os_mempool.h"
  11. #include "os/os_mbuf.h"
  12. /* rt-thread header */
  13. #include <rtthread.h>
  14. #define NIMBLE_DEBUG 0
  15. static ble_hci_trans_rx_cmd_fn *ble_hci_uart_rx_cmd_cb;
  16. static void *ble_hci_uart_rx_cmd_arg;
  17. static ble_hci_trans_rx_acl_fn *ble_hci_uart_rx_acl_cb;
  18. static void *ble_hci_uart_rx_acl_arg;
  19. static struct os_mbuf_pool ble_hci_uart_acl_mbuf_pool;
  20. static struct os_mempool_ext ble_hci_uart_acl_pool;
  21. #define ACL_BLOCK_SIZE OS_ALIGN(MYNEWT_VAL(BLE_ACL_BUF_SIZE) \
  22. + BLE_MBUF_MEMBLOCK_OVERHEAD \
  23. + BLE_HCI_DATA_HDR_SZ, OS_ALIGNMENT)
  24. static os_membuf_t ble_hci_uart_acl_buf[
  25. OS_MEMPOOL_SIZE(MYNEWT_VAL(BLE_ACL_BUF_COUNT),
  26. ACL_BLOCK_SIZE) /* Now is only 1 block in this pool. */
  27. ];
  28. static rt_thread_t nimble_tid;
  29. int ble_hci_trans_hs_cmd_tx(uint8_t *cmd)
  30. {
  31. #if NIMBLE_DEBUG
  32. hm_dump_out(1, cmd);
  33. #endif
  34. if (hci_trans_h4_send(HCI_TRANS_H4_TYPE_CMD, cmd)) {
  35. hci_trans_h4_send_free(cmd);
  36. return BLE_ERR_UNKNOWN_HCI_CMD;
  37. }
  38. hci_trans_h4_send_free(cmd);
  39. return BLE_ERR_SUCCESS;
  40. }
  41. int ble_hci_trans_hs_acl_tx(struct os_mbuf *om)
  42. {
  43. uint8_t pkt_type = HCI_TRANS_H4_TYPE_ACL;
  44. struct os_mbuf *om_next;
  45. hci_trans_h4_uart_send(&pkt_type, sizeof(pkt_type));
  46. while (om) {
  47. om_next = SLIST_NEXT(om, om_next);
  48. hci_trans_h4_uart_send(om->om_data, om->om_len);
  49. os_mbuf_free(om);
  50. om = om_next;
  51. }
  52. return 0;
  53. }
  54. uint8_t *ble_hci_trans_buf_alloc(int type)
  55. {
  56. /* In this port, NimBLE only support Host. */
  57. uint8_t *buf = NULL;
  58. switch (type) {
  59. case BLE_HCI_TRANS_BUF_CMD:
  60. buf = hci_trans_h4_send_alloc(HCI_TRANS_H4_TYPE_CMD);
  61. break;
  62. default:
  63. RT_ASSERT(0);
  64. buf = NULL;
  65. break;
  66. }
  67. return buf;
  68. }
  69. /*
  70. * Called by NimBLE host to free buffer allocated for HCI Event packet.
  71. * Called by HCI transport to free buffer allocated for HCI Command packet.
  72. */
  73. void ble_hci_trans_buf_free(uint8_t *buf)
  74. {
  75. /* In this port, only called by NimBLE host to free buffer allocated for HCI Evnet packet */
  76. hci_trans_h4_recv_free(buf);
  77. }
  78. int ble_hci_trans_set_acl_free_cb(os_mempool_put_fn *cb, void *arg)
  79. {
  80. return BLE_ERR_UNSUPPORTED;
  81. }
  82. void ble_hci_trans_cfg_hs(ble_hci_trans_rx_cmd_fn *cmd_cb,
  83. void *cmd_arg,
  84. ble_hci_trans_rx_acl_fn *acl_cb,
  85. void *acl_arg)
  86. {
  87. ble_hci_uart_rx_cmd_cb = cmd_cb;
  88. ble_hci_uart_rx_cmd_arg = cmd_arg;
  89. ble_hci_uart_rx_acl_cb = acl_cb;
  90. ble_hci_uart_rx_acl_arg = acl_arg;
  91. }
  92. int ble_hci_trans_reset(void)
  93. {
  94. return BLE_ERR_SUCCESS;
  95. }
  96. /* Not supported now. */
  97. int ble_hci_trans_ll_evt_tx(uint8_t *hci_ev)
  98. {
  99. RT_ASSERT(0);
  100. return HM_NOT_SUPPORT;
  101. }
  102. int ble_hci_trans_ll_acl_tx(struct os_mbuf *om)
  103. {
  104. RT_ASSERT(0);
  105. return HM_NOT_SUPPORT;
  106. }
  107. void ble_hci_trans_cfg_ll(ble_hci_trans_rx_cmd_fn *cmd_cb,
  108. void *cmd_arg,
  109. ble_hci_trans_rx_acl_fn *acl_cb,
  110. void *acl_arg)
  111. {
  112. RT_ASSERT(0);
  113. return ;
  114. }
  115. static struct os_mbuf * ble_hci_trans_acl_buf_alloc(void)
  116. {
  117. struct os_mbuf *m;
  118. uint8_t usrhdr_len;
  119. #if MYNEWT_VAL(BLE_DEVICE)
  120. usrhdr_len = sizeof(struct ble_mbuf_hdr);
  121. #elif MYNEWT_VAL(BLE_HS_FLOW_CTRL)
  122. usrhdr_len = BLE_MBUF_HS_HDR_LEN;
  123. #else
  124. usrhdr_len = 0;
  125. #endif
  126. m = os_mbuf_get_pkthdr(&ble_hci_uart_acl_mbuf_pool, usrhdr_len);
  127. return m;
  128. }
  129. static void hm_nimble_thread_entry(void *args)
  130. {
  131. uint8_t *recv = NULL;
  132. uint8_t type;
  133. while (1) {
  134. hci_trans_h4_recv_all(&recv, RT_WAITING_FOREVER, &type);
  135. switch (type) {
  136. case HCI_TRANS_H4_TYPE_EVT: {
  137. #if NIMBLE_DEBUG
  138. hm_dump_in(4, recv);
  139. #endif
  140. ble_hci_uart_rx_cmd_cb(recv, ble_hci_uart_rx_cmd_arg);
  141. break;
  142. }
  143. case HCI_TRANS_H4_TYPE_ACL: {
  144. #if NIMBLE_DEBUG
  145. hm_dump_in(2, recv);
  146. #endif
  147. struct os_mbuf *om = ble_hci_trans_acl_buf_alloc();
  148. uint16_t packet_len = 4 + ((uint16_t)recv[2] | (uint16_t)recv[3] << 8);
  149. RT_ASSERT(packet_len <= 255);
  150. rt_memcpy(om->om_data, recv, packet_len);
  151. hci_trans_h4_recv_free(recv);
  152. om->om_len = packet_len;
  153. OS_MBUF_PKTLEN(om) = packet_len;
  154. ble_hci_uart_rx_acl_cb(om, ble_hci_uart_rx_acl_arg);
  155. break;
  156. }
  157. default:
  158. break;
  159. }
  160. }
  161. }
  162. int hm_nimble_init(void)
  163. {
  164. int rc;
  165. rc = os_mempool_ext_init(&ble_hci_uart_acl_pool,
  166. MYNEWT_VAL(BLE_ACL_BUF_COUNT),
  167. ACL_BLOCK_SIZE,
  168. ble_hci_uart_acl_buf,
  169. "ble_hci_uart_acl_pool");
  170. RT_ASSERT(rc == 0);
  171. rc = os_mbuf_pool_init(&ble_hci_uart_acl_mbuf_pool,
  172. &ble_hci_uart_acl_pool.mpe_mp,
  173. ACL_BLOCK_SIZE,
  174. MYNEWT_VAL(BLE_ACL_BUF_COUNT));
  175. RT_ASSERT(rc == 0);
  176. nimble_tid = rt_thread_create("hm.nimble", hm_nimble_thread_entry, NULL,
  177. 512, 10, 10);
  178. RT_ASSERT(nimble_tid != NULL);
  179. rt_thread_startup(nimble_tid);
  180. return RT_EOK;
  181. }