usbh.c 50 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305
  1. /*
  2. * SPDX-FileCopyrightText: 2015-2023 Espressif Systems (Shanghai) CO LTD
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include "sdkconfig.h"
  7. #include <stdint.h>
  8. #include <string.h>
  9. #include <assert.h>
  10. #include <sys/queue.h>
  11. #include "freertos/FreeRTOS.h"
  12. #include "freertos/portmacro.h"
  13. #include "freertos/task.h"
  14. #include "freertos/semphr.h"
  15. #include "esp_err.h"
  16. #include "esp_log.h"
  17. #include "esp_heap_caps.h"
  18. #include "hcd.h"
  19. #include "usbh.h"
  20. #include "usb/usb_helpers.h"
  21. #include "usb/usb_types_ch9.h"
  22. #define EP_NUM_MIN 1 // The smallest possible non-default endpoint number
  23. #define EP_NUM_MAX 16 // The largest possible non-default endpoint number
  24. #define NUM_NON_DEFAULT_EP ((EP_NUM_MAX - 1) * 2) // The total number of non-default endpoints a device can have.
  25. // Device action flags. LISTED IN THE ORDER THEY SHOULD BE HANDLED IN within usbh_process(). Some actions are mutually exclusive
  26. typedef enum {
  27. DEV_ACTION_EPn_HALT_FLUSH = (1 << 0), // Halt all non-default endpoints then flush them (called after a device gone is gone)
  28. DEV_ACTION_EP0_FLUSH = (1 << 1), // Retire all URBS submitted to EP0
  29. DEV_ACTION_EP0_DEQUEUE = (1 << 2), // Dequeue all URBs from EP0
  30. DEV_ACTION_EP0_CLEAR = (1 << 3), // Move EP0 to the the active state
  31. DEV_ACTION_PROP_GONE_EVT = (1 << 4), // Propagate a USBH_EVENT_DEV_GONE event
  32. DEV_ACTION_FREE_AND_RECOVER = (1 << 5), // Free the device object, but send a USBH_HUB_REQ_PORT_RECOVER request afterwards.
  33. DEV_ACTION_FREE = (1 << 6), // Free the device object
  34. DEV_ACTION_PORT_DISABLE = (1 << 7), // Request the hub driver to disable the port of the device
  35. DEV_ACTION_PROP_NEW = (1 << 8), // Propagate a USBH_EVENT_DEV_NEW event
  36. } dev_action_t;
  37. typedef struct device_s device_t;
  38. typedef struct {
  39. struct {
  40. usbh_ep_cb_t ep_cb;
  41. void *ep_cb_arg;
  42. hcd_pipe_handle_t pipe_hdl;
  43. device_t *dev; // Pointer to the device object that this endpoint is contained in
  44. const usb_ep_desc_t *ep_desc; // This just stores a pointer endpoint descriptor inside the device's "config_desc"
  45. } constant;
  46. } endpoint_t;
  47. struct device_s {
  48. // Dynamic members require a critical section
  49. struct {
  50. TAILQ_ENTRY(device_s) tailq_entry;
  51. union {
  52. struct {
  53. uint32_t in_pending_list: 1;
  54. uint32_t is_gone: 1;
  55. uint32_t waiting_close: 1;
  56. uint32_t waiting_port_disable: 1;
  57. uint32_t waiting_free: 1;
  58. uint32_t reserved27: 27;
  59. };
  60. uint32_t val;
  61. } flags;
  62. uint32_t action_flags;
  63. int num_ctrl_xfers_inflight;
  64. usb_device_state_t state;
  65. uint32_t ref_count;
  66. } dynamic;
  67. // Mux protected members must be protected by the USBH mux_lock when accessed
  68. struct {
  69. /*
  70. - Endpoint object pointers for each possible non-default endpoint
  71. - All OUT EPs are listed before IN EPs (i.e., EP_NUM_MIN OUT ... EP_NUM_MAX OUT ... EP_NUM_MIN IN ... EP_NUM_MAX)
  72. */
  73. endpoint_t *endpoints[NUM_NON_DEFAULT_EP];
  74. } mux_protected;
  75. // Constant members do not change after device allocation and enumeration thus do not require a critical section
  76. struct {
  77. hcd_pipe_handle_t default_pipe;
  78. hcd_port_handle_t port_hdl;
  79. uint8_t address;
  80. usb_speed_t speed;
  81. const usb_device_desc_t *desc;
  82. const usb_config_desc_t *config_desc;
  83. const usb_str_desc_t *str_desc_manu;
  84. const usb_str_desc_t *str_desc_product;
  85. const usb_str_desc_t *str_desc_ser_num;
  86. } constant;
  87. };
  88. typedef struct {
  89. // Dynamic members require a critical section
  90. struct {
  91. TAILQ_HEAD(tailhead_devs, device_s) devs_idle_tailq; // Tailq of all enum and configured devices
  92. TAILQ_HEAD(tailhead_devs_cb, device_s) devs_pending_tailq; // Tailq of devices that need to have their cb called
  93. } dynamic;
  94. // Mux protected members must be protected by the USBH mux_lock when accessed
  95. struct {
  96. uint8_t num_device; // Number of enumerated devices
  97. } mux_protected;
  98. // Constant members do no change after installation thus do not require a critical section
  99. struct {
  100. usb_proc_req_cb_t proc_req_cb;
  101. void *proc_req_cb_arg;
  102. usbh_hub_req_cb_t hub_req_cb;
  103. void *hub_req_cb_arg;
  104. usbh_event_cb_t event_cb;
  105. void *event_cb_arg;
  106. usbh_ctrl_xfer_cb_t ctrl_xfer_cb;
  107. void *ctrl_xfer_cb_arg;
  108. SemaphoreHandle_t mux_lock;
  109. } constant;
  110. } usbh_t;
  111. static usbh_t *p_usbh_obj = NULL;
  112. static portMUX_TYPE usbh_lock = portMUX_INITIALIZER_UNLOCKED;
  113. const char *USBH_TAG = "USBH";
  114. #define USBH_ENTER_CRITICAL_ISR() portENTER_CRITICAL_ISR(&usbh_lock)
  115. #define USBH_EXIT_CRITICAL_ISR() portEXIT_CRITICAL_ISR(&usbh_lock)
  116. #define USBH_ENTER_CRITICAL() portENTER_CRITICAL(&usbh_lock)
  117. #define USBH_EXIT_CRITICAL() portEXIT_CRITICAL(&usbh_lock)
  118. #define USBH_ENTER_CRITICAL_SAFE() portENTER_CRITICAL_SAFE(&usbh_lock)
  119. #define USBH_EXIT_CRITICAL_SAFE() portEXIT_CRITICAL_SAFE(&usbh_lock)
  120. #define USBH_CHECK(cond, ret_val) ({ \
  121. if (!(cond)) { \
  122. return (ret_val); \
  123. } \
  124. })
  125. #define USBH_CHECK_FROM_CRIT(cond, ret_val) ({ \
  126. if (!(cond)) { \
  127. USBH_EXIT_CRITICAL(); \
  128. return ret_val; \
  129. } \
  130. })
  131. // ------------------------------------------------- Forward Declare ---------------------------------------------------
  132. static bool ep0_pipe_callback(hcd_pipe_handle_t pipe_hdl, hcd_pipe_event_t pipe_event, void *user_arg, bool in_isr);
  133. static bool epN_pipe_callback(hcd_pipe_handle_t pipe_hdl, hcd_pipe_event_t pipe_event, void *user_arg, bool in_isr);
  134. static bool _dev_set_actions(device_t *dev_obj, uint32_t action_flags);
  135. // ----------------------------------------------------- Helpers -------------------------------------------------------
  136. static inline bool check_ep_addr(uint8_t bEndpointAddress)
  137. {
  138. /*
  139. Check that the bEndpointAddress is valid
  140. - Must be <= EP_NUM_MAX (e.g., 16)
  141. - Must be >= EP_NUM_MIN (e.g., 1).
  142. - EP0 is the owned/managed by USBH, thus must never by directly addressed by users (see USB 2.0 section 10.5.1.2)
  143. */
  144. uint8_t addr = bEndpointAddress & USB_B_ENDPOINT_ADDRESS_EP_NUM_MASK;
  145. return (addr >= EP_NUM_MIN) && (addr <= EP_NUM_MAX);
  146. }
  147. static endpoint_t *get_ep_from_addr(device_t *dev_obj, uint8_t bEndpointAddress)
  148. {
  149. /*
  150. CALLER IS RESPONSIBLE FOR TAKING THE mux_lock
  151. */
  152. // Calculate index to the device's endpoint object list
  153. int index;
  154. // EP_NUM_MIN should map to an index of 0
  155. index = (bEndpointAddress & USB_B_ENDPOINT_ADDRESS_EP_NUM_MASK) - EP_NUM_MIN;
  156. assert(index >= 0); // Endpoint address is not supported
  157. if (bEndpointAddress & USB_B_ENDPOINT_ADDRESS_EP_DIR_MASK) {
  158. // OUT EPs are listed before IN EPs, so add an offset
  159. index += (EP_NUM_MAX - EP_NUM_MIN);
  160. }
  161. return dev_obj->mux_protected.endpoints[index];
  162. }
  163. static inline void set_ep_from_addr(device_t *dev_obj, uint8_t bEndpointAddress, endpoint_t *ep_obj)
  164. {
  165. /*
  166. CALLER IS RESPONSIBLE FOR TAKING THE mux_lock
  167. */
  168. // Calculate index to the device's endpoint object list
  169. int index;
  170. // EP_NUM_MIN should map to an index of 0
  171. index = (bEndpointAddress & USB_B_ENDPOINT_ADDRESS_EP_NUM_MASK) - EP_NUM_MIN;
  172. assert(index >= 0); // Endpoint address is not supported
  173. if (bEndpointAddress & USB_B_ENDPOINT_ADDRESS_EP_DIR_MASK) {
  174. // OUT EPs are listed before IN EPs, so add an offset
  175. index += (EP_NUM_MAX - EP_NUM_MIN);
  176. }
  177. dev_obj->mux_protected.endpoints[index] = ep_obj;
  178. }
  179. static bool urb_check_args(urb_t *urb)
  180. {
  181. if (urb->transfer.callback == NULL) {
  182. ESP_LOGE(USBH_TAG, "usb_transfer_t callback is NULL");
  183. return false;
  184. }
  185. if (urb->transfer.num_bytes > urb->transfer.data_buffer_size) {
  186. ESP_LOGE(USBH_TAG, "usb_transfer_t num_bytes > data_buffer_size");
  187. return false;
  188. }
  189. return true;
  190. }
  191. static bool transfer_check_usb_compliance(usb_transfer_t *transfer, usb_transfer_type_t type, int mps, bool is_in)
  192. {
  193. if (type == USB_TRANSFER_TYPE_CTRL) {
  194. // Check that num_bytes and wLength are set correctly
  195. usb_setup_packet_t *setup_pkt = (usb_setup_packet_t *)transfer->data_buffer;
  196. if (transfer->num_bytes != sizeof(usb_setup_packet_t) + setup_pkt->wLength) {
  197. ESP_LOGE(USBH_TAG, "usb_transfer_t num_bytes and usb_setup_packet_t wLength mismatch");
  198. return false;
  199. }
  200. } else if (type == USB_TRANSFER_TYPE_ISOCHRONOUS) {
  201. // Check that there is at least one isochronous packet descriptor
  202. if (transfer->num_isoc_packets <= 0) {
  203. ESP_LOGE(USBH_TAG, "usb_transfer_t num_isoc_packets is 0");
  204. return false;
  205. }
  206. // Check that sum of all packet lengths add up to transfer length
  207. // If IN, check that each packet length is integer multiple of MPS
  208. int total_num_bytes = 0;
  209. bool mod_mps_all_zero = true;
  210. for (int i = 0; i < transfer->num_isoc_packets; i++) {
  211. total_num_bytes += transfer->isoc_packet_desc[i].num_bytes;
  212. if (transfer->isoc_packet_desc[i].num_bytes % mps != 0) {
  213. mod_mps_all_zero = false;
  214. }
  215. }
  216. if (transfer->num_bytes != total_num_bytes) {
  217. ESP_LOGE(USBH_TAG, "ISOC transfer num_bytes != num_bytes of all packets");
  218. return false;
  219. }
  220. if (is_in && !mod_mps_all_zero) {
  221. ESP_LOGE(USBH_TAG, "ISOC IN num_bytes not integer multiple of MPS");
  222. return false;
  223. }
  224. } else {
  225. // Check that IN transfers are integer multiple of MPS
  226. if (is_in && (transfer->num_bytes % mps != 0)) {
  227. ESP_LOGE(USBH_TAG, "IN transfer num_bytes not integer multiple of MPS");
  228. return false;
  229. }
  230. }
  231. return true;
  232. }
  233. // --------------------------------------------------- Allocation ------------------------------------------------------
  234. static esp_err_t endpoint_alloc(device_t *dev_obj, const usb_ep_desc_t *ep_desc, usbh_ep_config_t *ep_config, endpoint_t **ep_obj_ret)
  235. {
  236. esp_err_t ret;
  237. endpoint_t *ep_obj;
  238. hcd_pipe_handle_t pipe_hdl;
  239. ep_obj = heap_caps_calloc(1, sizeof(endpoint_t), MALLOC_CAP_DEFAULT);
  240. if (ep_obj == NULL) {
  241. return ESP_ERR_NO_MEM;
  242. }
  243. // Allocate the EP's underlying pipe
  244. hcd_pipe_config_t pipe_config = {
  245. .callback = epN_pipe_callback,
  246. .callback_arg = (void *)ep_obj,
  247. .context = ep_config->context,
  248. .ep_desc = ep_desc,
  249. .dev_speed = dev_obj->constant.speed,
  250. .dev_addr = dev_obj->constant.address,
  251. };
  252. ret = hcd_pipe_alloc(dev_obj->constant.port_hdl, &pipe_config, &pipe_hdl);
  253. if (ret != ESP_OK) {
  254. goto pipe_err;
  255. }
  256. // Initialize the endpoint object
  257. ep_obj->constant.pipe_hdl = pipe_hdl;
  258. ep_obj->constant.ep_cb = ep_config->ep_cb;
  259. ep_obj->constant.ep_cb_arg = ep_config->ep_cb_arg;
  260. ep_obj->constant.dev = dev_obj;
  261. ep_obj->constant.ep_desc = ep_desc;
  262. // Return the endpoint object
  263. *ep_obj_ret = ep_obj;
  264. ret = ESP_OK;
  265. return ret;
  266. pipe_err:
  267. heap_caps_free(ep_obj);
  268. return ret;
  269. }
  270. static void endpoint_free(endpoint_t *ep_obj)
  271. {
  272. if (ep_obj == NULL) {
  273. return;
  274. }
  275. // Deallocate the EP's underlying pipe
  276. ESP_ERROR_CHECK(hcd_pipe_free(ep_obj->constant.pipe_hdl));
  277. // Free the heap object
  278. heap_caps_free(ep_obj);
  279. }
  280. static esp_err_t device_alloc(hcd_port_handle_t port_hdl, usb_speed_t speed, device_t **dev_obj_ret)
  281. {
  282. esp_err_t ret;
  283. device_t *dev_obj = heap_caps_calloc(1, sizeof(device_t), MALLOC_CAP_DEFAULT);
  284. usb_device_desc_t *dev_desc = heap_caps_calloc(1, sizeof(usb_device_desc_t), MALLOC_CAP_DEFAULT);
  285. if (dev_obj == NULL || dev_desc == NULL) {
  286. ret = ESP_ERR_NO_MEM;
  287. goto err;
  288. }
  289. // Allocate a pipe for EP0. We set the pipe callback to NULL for now
  290. hcd_pipe_config_t pipe_config = {
  291. .callback = NULL,
  292. .callback_arg = NULL,
  293. .context = (void *)dev_obj,
  294. .ep_desc = NULL, // No endpoint descriptor means we're allocating a pipe for EP0
  295. .dev_speed = speed,
  296. .dev_addr = 0,
  297. };
  298. hcd_pipe_handle_t default_pipe_hdl;
  299. ret = hcd_pipe_alloc(port_hdl, &pipe_config, &default_pipe_hdl);
  300. if (ret != ESP_OK) {
  301. goto err;
  302. }
  303. // Initialize device object
  304. dev_obj->dynamic.state = USB_DEVICE_STATE_DEFAULT;
  305. dev_obj->constant.default_pipe = default_pipe_hdl;
  306. dev_obj->constant.port_hdl = port_hdl;
  307. // Note: dev_obj->constant.address is assigned later during enumeration
  308. dev_obj->constant.speed = speed;
  309. dev_obj->constant.desc = dev_desc;
  310. *dev_obj_ret = dev_obj;
  311. ret = ESP_OK;
  312. return ret;
  313. err:
  314. heap_caps_free(dev_desc);
  315. heap_caps_free(dev_obj);
  316. return ret;
  317. }
  318. static void device_free(device_t *dev_obj)
  319. {
  320. if (dev_obj == NULL) {
  321. return;
  322. }
  323. // Configuration might not have been allocated (in case of early enumeration failure)
  324. if (dev_obj->constant.config_desc) {
  325. heap_caps_free((usb_config_desc_t *)dev_obj->constant.config_desc);
  326. }
  327. // String descriptors might not have been allocated (in case of early enumeration failure)
  328. if (dev_obj->constant.str_desc_manu) {
  329. heap_caps_free((usb_str_desc_t *)dev_obj->constant.str_desc_manu);
  330. }
  331. if (dev_obj->constant.str_desc_product) {
  332. heap_caps_free((usb_str_desc_t *)dev_obj->constant.str_desc_product);
  333. }
  334. if (dev_obj->constant.str_desc_ser_num) {
  335. heap_caps_free((usb_str_desc_t *)dev_obj->constant.str_desc_ser_num);
  336. }
  337. heap_caps_free((usb_device_desc_t *)dev_obj->constant.desc);
  338. ESP_ERROR_CHECK(hcd_pipe_free(dev_obj->constant.default_pipe));
  339. heap_caps_free(dev_obj);
  340. }
  341. // ---------------------------------------------------- Callbacks ------------------------------------------------------
  342. static bool ep0_pipe_callback(hcd_pipe_handle_t pipe_hdl, hcd_pipe_event_t pipe_event, void *user_arg, bool in_isr)
  343. {
  344. uint32_t action_flags;
  345. device_t *dev_obj = (device_t *)user_arg;
  346. switch (pipe_event) {
  347. case HCD_PIPE_EVENT_URB_DONE:
  348. // A control transfer completed on EP0's pipe . We need to dequeue it
  349. action_flags = DEV_ACTION_EP0_DEQUEUE;
  350. break;
  351. case HCD_PIPE_EVENT_ERROR_XFER:
  352. case HCD_PIPE_EVENT_ERROR_URB_NOT_AVAIL:
  353. case HCD_PIPE_EVENT_ERROR_OVERFLOW:
  354. // EP0's pipe has encountered an error. We need to retire all URBs, dequeue them, then make the pipe active again
  355. action_flags = DEV_ACTION_EP0_FLUSH |
  356. DEV_ACTION_EP0_DEQUEUE |
  357. DEV_ACTION_EP0_CLEAR;
  358. if (in_isr) {
  359. ESP_EARLY_LOGE(USBH_TAG, "Dev %d EP 0 Error", dev_obj->constant.address);
  360. } else {
  361. ESP_LOGE(USBH_TAG, "Dev %d EP 0 Error", dev_obj->constant.address);
  362. }
  363. break;
  364. case HCD_PIPE_EVENT_ERROR_STALL:
  365. // EP0's pipe encountered a "protocol stall". We just need to dequeue URBs then make the pipe active again
  366. action_flags = DEV_ACTION_EP0_DEQUEUE | DEV_ACTION_EP0_CLEAR;
  367. if (in_isr) {
  368. ESP_EARLY_LOGE(USBH_TAG, "Dev %d EP 0 STALL", dev_obj->constant.address);
  369. } else {
  370. ESP_LOGE(USBH_TAG, "Dev %d EP 0 STALL", dev_obj->constant.address);
  371. }
  372. break;
  373. default:
  374. action_flags = 0;
  375. break;
  376. }
  377. USBH_ENTER_CRITICAL_SAFE();
  378. bool call_proc_req_cb = _dev_set_actions(dev_obj, action_flags);
  379. USBH_EXIT_CRITICAL_SAFE();
  380. bool yield = false;
  381. if (call_proc_req_cb) {
  382. yield = p_usbh_obj->constant.proc_req_cb(USB_PROC_REQ_SOURCE_USBH, in_isr, p_usbh_obj->constant.proc_req_cb_arg);
  383. }
  384. return yield;
  385. }
  386. static bool epN_pipe_callback(hcd_pipe_handle_t pipe_hdl, hcd_pipe_event_t pipe_event, void *user_arg, bool in_isr)
  387. {
  388. endpoint_t *ep_obj = (endpoint_t *)user_arg;
  389. return ep_obj->constant.ep_cb((usbh_ep_handle_t)ep_obj,
  390. (usbh_ep_event_t)pipe_event,
  391. ep_obj->constant.ep_cb_arg,
  392. in_isr);
  393. }
  394. // -------------------------------------------------- Event Related ----------------------------------------------------
  395. static bool _dev_set_actions(device_t *dev_obj, uint32_t action_flags)
  396. {
  397. if (action_flags == 0) {
  398. return false;
  399. }
  400. bool call_proc_req_cb;
  401. // Check if device is already on the callback list
  402. if (!dev_obj->dynamic.flags.in_pending_list) {
  403. // Move device form idle device list to callback device list
  404. TAILQ_REMOVE(&p_usbh_obj->dynamic.devs_idle_tailq, dev_obj, dynamic.tailq_entry);
  405. TAILQ_INSERT_TAIL(&p_usbh_obj->dynamic.devs_pending_tailq, dev_obj, dynamic.tailq_entry);
  406. dev_obj->dynamic.action_flags |= action_flags;
  407. dev_obj->dynamic.flags.in_pending_list = 1;
  408. call_proc_req_cb = true;
  409. } else {
  410. call_proc_req_cb = false;
  411. }
  412. return call_proc_req_cb;
  413. }
  414. static inline void handle_epn_halt_flush(device_t *dev_obj)
  415. {
  416. // We need to take the mux_lock to access mux_protected members
  417. xSemaphoreTake(p_usbh_obj->constant.mux_lock, portMAX_DELAY);
  418. // Halt then flush all non-default EPs
  419. for (int i = 0; i < NUM_NON_DEFAULT_EP; i++) {
  420. if (dev_obj->mux_protected.endpoints[i] != NULL) {
  421. ESP_ERROR_CHECK(hcd_pipe_command(dev_obj->mux_protected.endpoints[i]->constant.pipe_hdl, HCD_PIPE_CMD_HALT));
  422. ESP_ERROR_CHECK(hcd_pipe_command(dev_obj->mux_protected.endpoints[i]->constant.pipe_hdl, HCD_PIPE_CMD_FLUSH));
  423. }
  424. }
  425. xSemaphoreGive(p_usbh_obj->constant.mux_lock);
  426. }
  427. static inline void handle_ep0_flush(device_t *dev_obj)
  428. {
  429. ESP_ERROR_CHECK(hcd_pipe_command(dev_obj->constant.default_pipe, HCD_PIPE_CMD_HALT));
  430. ESP_ERROR_CHECK(hcd_pipe_command(dev_obj->constant.default_pipe, HCD_PIPE_CMD_FLUSH));
  431. }
  432. static inline void handle_ep0_dequeue(device_t *dev_obj)
  433. {
  434. // Empty URBs from EP0's pipe and call the control transfer callback
  435. ESP_LOGD(USBH_TAG, "Default pipe device %d", dev_obj->constant.address);
  436. int num_urbs = 0;
  437. urb_t *urb = hcd_urb_dequeue(dev_obj->constant.default_pipe);
  438. while (urb != NULL) {
  439. num_urbs++;
  440. p_usbh_obj->constant.ctrl_xfer_cb((usb_device_handle_t)dev_obj, urb, p_usbh_obj->constant.ctrl_xfer_cb_arg);
  441. urb = hcd_urb_dequeue(dev_obj->constant.default_pipe);
  442. }
  443. USBH_ENTER_CRITICAL();
  444. dev_obj->dynamic.num_ctrl_xfers_inflight -= num_urbs;
  445. USBH_EXIT_CRITICAL();
  446. }
  447. static inline void handle_ep0_clear(device_t *dev_obj)
  448. {
  449. // We allow the pipe command to fail just in case the pipe becomes invalid mid command
  450. hcd_pipe_command(dev_obj->constant.default_pipe, HCD_PIPE_CMD_CLEAR);
  451. }
  452. static inline void handle_prop_gone_evt(device_t *dev_obj)
  453. {
  454. // Flush EP0's pipe. Then propagate a USBH_EVENT_DEV_GONE event
  455. ESP_LOGE(USBH_TAG, "Device %d gone", dev_obj->constant.address);
  456. p_usbh_obj->constant.event_cb((usb_device_handle_t)dev_obj, USBH_EVENT_DEV_GONE, p_usbh_obj->constant.event_cb_arg);
  457. }
  458. static void handle_free_and_recover(device_t *dev_obj, bool recover_port)
  459. {
  460. // Cache a copy of the port handle as we are about to free the device object
  461. bool all_free;
  462. hcd_port_handle_t port_hdl = dev_obj->constant.port_hdl;
  463. ESP_LOGD(USBH_TAG, "Freeing device %d", dev_obj->constant.address);
  464. // We need to take the mux_lock to access mux_protected members
  465. xSemaphoreTake(p_usbh_obj->constant.mux_lock, portMAX_DELAY);
  466. USBH_ENTER_CRITICAL();
  467. // Remove the device object for it's containing list
  468. if (dev_obj->dynamic.flags.in_pending_list) {
  469. dev_obj->dynamic.flags.in_pending_list = 0;
  470. TAILQ_REMOVE(&p_usbh_obj->dynamic.devs_pending_tailq, dev_obj, dynamic.tailq_entry);
  471. } else {
  472. TAILQ_REMOVE(&p_usbh_obj->dynamic.devs_idle_tailq, dev_obj, dynamic.tailq_entry);
  473. }
  474. USBH_EXIT_CRITICAL();
  475. p_usbh_obj->mux_protected.num_device--;
  476. all_free = (p_usbh_obj->mux_protected.num_device == 0);
  477. xSemaphoreGive(p_usbh_obj->constant.mux_lock);
  478. device_free(dev_obj);
  479. // If all devices have been freed, propagate a USBH_EVENT_DEV_ALL_FREE event
  480. if (all_free) {
  481. ESP_LOGD(USBH_TAG, "Device all free");
  482. p_usbh_obj->constant.event_cb((usb_device_handle_t)NULL, USBH_EVENT_DEV_ALL_FREE, p_usbh_obj->constant.event_cb_arg);
  483. }
  484. // Check if we need to recover the device's port
  485. if (recover_port) {
  486. p_usbh_obj->constant.hub_req_cb(port_hdl, USBH_HUB_REQ_PORT_RECOVER, p_usbh_obj->constant.hub_req_cb_arg);
  487. }
  488. }
  489. static inline void handle_port_disable(device_t *dev_obj)
  490. {
  491. // Request that the HUB disables this device's port
  492. ESP_LOGD(USBH_TAG, "Disable device port %d", dev_obj->constant.address);
  493. p_usbh_obj->constant.hub_req_cb(dev_obj->constant.port_hdl, USBH_HUB_REQ_PORT_DISABLE, p_usbh_obj->constant.hub_req_cb_arg);
  494. }
  495. static inline void handle_prop_new_evt(device_t *dev_obj)
  496. {
  497. ESP_LOGD(USBH_TAG, "New device %d", dev_obj->constant.address);
  498. p_usbh_obj->constant.event_cb((usb_device_handle_t)dev_obj, USBH_EVENT_DEV_NEW, p_usbh_obj->constant.event_cb_arg);
  499. }
  500. // ------------------------------------------------- USBH Functions ----------------------------------------------------
  501. esp_err_t usbh_install(const usbh_config_t *usbh_config)
  502. {
  503. USBH_CHECK(usbh_config != NULL, ESP_ERR_INVALID_ARG);
  504. USBH_ENTER_CRITICAL();
  505. USBH_CHECK_FROM_CRIT(p_usbh_obj == NULL, ESP_ERR_INVALID_STATE);
  506. USBH_EXIT_CRITICAL();
  507. esp_err_t ret;
  508. usbh_t *usbh_obj = heap_caps_calloc(1, sizeof(usbh_t), MALLOC_CAP_DEFAULT);
  509. SemaphoreHandle_t mux_lock = xSemaphoreCreateMutex();
  510. if (usbh_obj == NULL || mux_lock == NULL) {
  511. ret = ESP_ERR_NO_MEM;
  512. goto err;
  513. }
  514. // Initialize USBH object
  515. TAILQ_INIT(&usbh_obj->dynamic.devs_idle_tailq);
  516. TAILQ_INIT(&usbh_obj->dynamic.devs_pending_tailq);
  517. usbh_obj->constant.proc_req_cb = usbh_config->proc_req_cb;
  518. usbh_obj->constant.proc_req_cb_arg = usbh_config->proc_req_cb_arg;
  519. usbh_obj->constant.event_cb = usbh_config->event_cb;
  520. usbh_obj->constant.event_cb_arg = usbh_config->event_cb_arg;
  521. usbh_obj->constant.ctrl_xfer_cb = usbh_config->ctrl_xfer_cb;
  522. usbh_obj->constant.ctrl_xfer_cb_arg = usbh_config->ctrl_xfer_cb_arg;
  523. usbh_obj->constant.mux_lock = mux_lock;
  524. // Assign USBH object pointer
  525. USBH_ENTER_CRITICAL();
  526. if (p_usbh_obj != NULL) {
  527. USBH_EXIT_CRITICAL();
  528. ret = ESP_ERR_INVALID_STATE;
  529. goto err;
  530. }
  531. p_usbh_obj = usbh_obj;
  532. USBH_EXIT_CRITICAL();
  533. ret = ESP_OK;
  534. return ret;
  535. err:
  536. if (mux_lock != NULL) {
  537. vSemaphoreDelete(mux_lock);
  538. }
  539. heap_caps_free(usbh_obj);
  540. return ret;
  541. }
  542. esp_err_t usbh_uninstall(void)
  543. {
  544. // Check that USBH is in a state to be uninstalled
  545. USBH_ENTER_CRITICAL();
  546. USBH_CHECK_FROM_CRIT(p_usbh_obj != NULL, ESP_ERR_INVALID_STATE);
  547. usbh_t *usbh_obj = p_usbh_obj;
  548. USBH_EXIT_CRITICAL();
  549. esp_err_t ret;
  550. // We need to take the mux_lock to access mux_protected members
  551. xSemaphoreTake(usbh_obj->constant.mux_lock, portMAX_DELAY);
  552. if (p_usbh_obj->mux_protected.num_device > 0) {
  553. // There are still devices allocated. Can't uninstall right now.
  554. ret = ESP_ERR_INVALID_STATE;
  555. goto exit;
  556. }
  557. // Check again if we can uninstall
  558. USBH_ENTER_CRITICAL();
  559. assert(p_usbh_obj == usbh_obj);
  560. p_usbh_obj = NULL;
  561. USBH_EXIT_CRITICAL();
  562. xSemaphoreGive(usbh_obj->constant.mux_lock);
  563. // Free resources
  564. vSemaphoreDelete(usbh_obj->constant.mux_lock);
  565. heap_caps_free(usbh_obj);
  566. ret = ESP_OK;
  567. return ret;
  568. exit:
  569. xSemaphoreGive(p_usbh_obj->constant.mux_lock);
  570. return ret;
  571. }
  572. esp_err_t usbh_process(void)
  573. {
  574. USBH_ENTER_CRITICAL();
  575. USBH_CHECK_FROM_CRIT(p_usbh_obj != NULL, ESP_ERR_INVALID_STATE);
  576. // Keep processing until all device's with pending events have been handled
  577. while (!TAILQ_EMPTY(&p_usbh_obj->dynamic.devs_pending_tailq)) {
  578. // Move the device back into the idle device list,
  579. device_t *dev_obj = TAILQ_FIRST(&p_usbh_obj->dynamic.devs_pending_tailq);
  580. TAILQ_REMOVE(&p_usbh_obj->dynamic.devs_pending_tailq, dev_obj, dynamic.tailq_entry);
  581. TAILQ_INSERT_TAIL(&p_usbh_obj->dynamic.devs_idle_tailq, dev_obj, dynamic.tailq_entry);
  582. // Clear the device's flags
  583. uint32_t action_flags = dev_obj->dynamic.action_flags;
  584. dev_obj->dynamic.action_flags = 0;
  585. dev_obj->dynamic.flags.in_pending_list = 0;
  586. /* ---------------------------------------------------------------------
  587. Exit critical section to handle device action flags in their listed order
  588. --------------------------------------------------------------------- */
  589. USBH_EXIT_CRITICAL();
  590. ESP_LOGD(USBH_TAG, "Processing actions 0x%"PRIx32"", action_flags);
  591. // Sanity check. If the device is being freed, there must not be any other action flags set
  592. assert(!(action_flags & DEV_ACTION_FREE) || action_flags == DEV_ACTION_FREE);
  593. if (action_flags & DEV_ACTION_EPn_HALT_FLUSH) {
  594. handle_epn_halt_flush(dev_obj);
  595. }
  596. if (action_flags & DEV_ACTION_EP0_FLUSH) {
  597. handle_ep0_flush(dev_obj);
  598. }
  599. if (action_flags & DEV_ACTION_EP0_DEQUEUE) {
  600. handle_ep0_dequeue(dev_obj);
  601. }
  602. if (action_flags & DEV_ACTION_EP0_CLEAR) {
  603. handle_ep0_clear(dev_obj);
  604. }
  605. if (action_flags & DEV_ACTION_PROP_GONE_EVT) {
  606. handle_prop_gone_evt(dev_obj);
  607. }
  608. /*
  609. Note: We make these action flags mutually exclusive in case they happen in rapid succession. They are handled
  610. in the order of precedence
  611. For example
  612. - New device event is requested followed immediately by a disconnection
  613. - Port disable requested followed immediately by a disconnection
  614. */
  615. if (action_flags & DEV_ACTION_FREE_AND_RECOVER) {
  616. handle_free_and_recover(dev_obj, true);
  617. } else if (action_flags & DEV_ACTION_FREE) {
  618. handle_free_and_recover(dev_obj, false);
  619. } else if (action_flags & DEV_ACTION_PORT_DISABLE) {
  620. handle_port_disable(dev_obj);
  621. } else if (action_flags & DEV_ACTION_PROP_NEW) {
  622. handle_prop_new_evt(dev_obj);
  623. }
  624. USBH_ENTER_CRITICAL();
  625. /* ---------------------------------------------------------------------
  626. Re-enter critical sections. All device action flags should have been handled.
  627. --------------------------------------------------------------------- */
  628. }
  629. USBH_EXIT_CRITICAL();
  630. return ESP_OK;
  631. }
  632. esp_err_t usbh_num_devs(int *num_devs_ret)
  633. {
  634. USBH_CHECK(num_devs_ret != NULL, ESP_ERR_INVALID_ARG);
  635. xSemaphoreTake(p_usbh_obj->constant.mux_lock, portMAX_DELAY);
  636. *num_devs_ret = p_usbh_obj->mux_protected.num_device;
  637. xSemaphoreGive(p_usbh_obj->constant.mux_lock);
  638. return ESP_OK;
  639. }
  640. // ------------------------------------------------ Device Functions ---------------------------------------------------
  641. // --------------------- Device Pool -----------------------
  642. esp_err_t usbh_dev_addr_list_fill(int list_len, uint8_t *dev_addr_list, int *num_dev_ret)
  643. {
  644. USBH_CHECK(dev_addr_list != NULL && num_dev_ret != NULL, ESP_ERR_INVALID_ARG);
  645. USBH_ENTER_CRITICAL();
  646. int num_filled = 0;
  647. device_t *dev_obj;
  648. // Fill list with devices from idle tailq
  649. TAILQ_FOREACH(dev_obj, &p_usbh_obj->dynamic.devs_idle_tailq, dynamic.tailq_entry) {
  650. if (num_filled < list_len) {
  651. dev_addr_list[num_filled] = dev_obj->constant.address;
  652. num_filled++;
  653. } else {
  654. break;
  655. }
  656. }
  657. // Fill list with devices from pending tailq
  658. TAILQ_FOREACH(dev_obj, &p_usbh_obj->dynamic.devs_pending_tailq, dynamic.tailq_entry) {
  659. if (num_filled < list_len) {
  660. dev_addr_list[num_filled] = dev_obj->constant.address;
  661. num_filled++;
  662. } else {
  663. break;
  664. }
  665. }
  666. USBH_EXIT_CRITICAL();
  667. // Write back number of devices filled
  668. *num_dev_ret = num_filled;
  669. return ESP_OK;
  670. }
  671. esp_err_t usbh_dev_open(uint8_t dev_addr, usb_device_handle_t *dev_hdl)
  672. {
  673. USBH_CHECK(dev_hdl != NULL, ESP_ERR_INVALID_ARG);
  674. esp_err_t ret;
  675. USBH_ENTER_CRITICAL();
  676. // Go through the device lists to find the device with the specified address
  677. device_t *found_dev_obj = NULL;
  678. device_t *dev_obj;
  679. TAILQ_FOREACH(dev_obj, &p_usbh_obj->dynamic.devs_idle_tailq, dynamic.tailq_entry) {
  680. if (dev_obj->constant.address == dev_addr) {
  681. found_dev_obj = dev_obj;
  682. goto exit;
  683. }
  684. }
  685. TAILQ_FOREACH(dev_obj, &p_usbh_obj->dynamic.devs_pending_tailq, dynamic.tailq_entry) {
  686. if (dev_obj->constant.address == dev_addr) {
  687. found_dev_obj = dev_obj;
  688. goto exit;
  689. }
  690. }
  691. exit:
  692. if (found_dev_obj != NULL) {
  693. // The device is not in a state to be referenced
  694. if (dev_obj->dynamic.flags.is_gone || dev_obj->dynamic.flags.waiting_port_disable || dev_obj->dynamic.flags.waiting_free) {
  695. ret = ESP_ERR_INVALID_STATE;
  696. } else {
  697. dev_obj->dynamic.ref_count++;
  698. *dev_hdl = (usb_device_handle_t)found_dev_obj;
  699. ret = ESP_OK;
  700. }
  701. } else {
  702. ret = ESP_ERR_NOT_FOUND;
  703. }
  704. USBH_EXIT_CRITICAL();
  705. return ret;
  706. }
  707. esp_err_t usbh_dev_close(usb_device_handle_t dev_hdl)
  708. {
  709. USBH_CHECK(dev_hdl != NULL, ESP_ERR_INVALID_ARG);
  710. device_t *dev_obj = (device_t *)dev_hdl;
  711. USBH_ENTER_CRITICAL();
  712. dev_obj->dynamic.ref_count--;
  713. bool call_proc_req_cb = false;
  714. if (dev_obj->dynamic.ref_count == 0) {
  715. // Sanity check.
  716. assert(dev_obj->dynamic.num_ctrl_xfers_inflight == 0); // There cannot be any control transfer in-flight
  717. assert(!dev_obj->dynamic.flags.waiting_free); // This can only be set when ref count reaches 0
  718. if (dev_obj->dynamic.flags.is_gone) {
  719. // Device is already gone so it's port is already disabled. Trigger the USBH process to free the device
  720. dev_obj->dynamic.flags.waiting_free = 1;
  721. call_proc_req_cb = _dev_set_actions(dev_obj, DEV_ACTION_FREE_AND_RECOVER); // Port error occurred so we need to recover it
  722. } else if (dev_obj->dynamic.flags.waiting_close) {
  723. // Device is still connected but is no longer needed. Trigger the USBH process to request device's port be disabled
  724. dev_obj->dynamic.flags.waiting_port_disable = 1;
  725. call_proc_req_cb = _dev_set_actions(dev_obj, DEV_ACTION_PORT_DISABLE);
  726. }
  727. // Else, there's nothing to do. Leave the device allocated
  728. }
  729. USBH_EXIT_CRITICAL();
  730. if (call_proc_req_cb) {
  731. p_usbh_obj->constant.proc_req_cb(USB_PROC_REQ_SOURCE_USBH, false, p_usbh_obj->constant.proc_req_cb_arg);
  732. }
  733. return ESP_OK;
  734. }
  735. esp_err_t usbh_dev_mark_all_free(void)
  736. {
  737. USBH_ENTER_CRITICAL();
  738. /*
  739. Go through the device list and mark each device as waiting to be closed. If the device is not opened at all, we can
  740. disable it immediately.
  741. Note: We manually traverse the list because we need to add/remove items while traversing
  742. */
  743. bool call_proc_req_cb = false;
  744. bool wait_for_free = false;
  745. for (int i = 0; i < 2; i++) {
  746. device_t *dev_obj_cur;
  747. device_t *dev_obj_next;
  748. // Go through pending list first as it's more efficient
  749. if (i == 0) {
  750. dev_obj_cur = TAILQ_FIRST(&p_usbh_obj->dynamic.devs_pending_tailq);
  751. } else {
  752. dev_obj_cur = TAILQ_FIRST(&p_usbh_obj->dynamic.devs_idle_tailq);
  753. }
  754. while (dev_obj_cur != NULL) {
  755. assert(!dev_obj_cur->dynamic.flags.waiting_close); // Sanity check
  756. // Keep a copy of the next item first in case we remove the current item
  757. dev_obj_next = TAILQ_NEXT(dev_obj_cur, dynamic.tailq_entry);
  758. if (dev_obj_cur->dynamic.ref_count == 0 && !dev_obj_cur->dynamic.flags.is_gone) {
  759. // Device is not opened as is not gone, so we can disable it now
  760. dev_obj_cur->dynamic.flags.waiting_port_disable = 1;
  761. call_proc_req_cb |= _dev_set_actions(dev_obj_cur, DEV_ACTION_PORT_DISABLE);
  762. } else {
  763. // Device is still opened. Just mark it as waiting to be closed
  764. dev_obj_cur->dynamic.flags.waiting_close = 1;
  765. }
  766. wait_for_free = true; // As long as there is still a device, we need to wait for an event indicating it is freed
  767. dev_obj_cur = dev_obj_next;
  768. }
  769. }
  770. USBH_EXIT_CRITICAL();
  771. if (call_proc_req_cb) {
  772. p_usbh_obj->constant.proc_req_cb(USB_PROC_REQ_SOURCE_USBH, false, p_usbh_obj->constant.proc_req_cb_arg);
  773. }
  774. return (wait_for_free) ? ESP_ERR_NOT_FINISHED : ESP_OK;
  775. }
  776. // ------------------- Single Device ----------------------
  777. esp_err_t usbh_dev_get_addr(usb_device_handle_t dev_hdl, uint8_t *dev_addr)
  778. {
  779. USBH_CHECK(dev_hdl != NULL && dev_addr != NULL, ESP_ERR_INVALID_ARG);
  780. device_t *dev_obj = (device_t *)dev_hdl;
  781. USBH_ENTER_CRITICAL();
  782. USBH_CHECK_FROM_CRIT(dev_obj->constant.address > 0, ESP_ERR_INVALID_STATE);
  783. *dev_addr = dev_obj->constant.address;
  784. USBH_EXIT_CRITICAL();
  785. return ESP_OK;
  786. }
  787. esp_err_t usbh_dev_get_info(usb_device_handle_t dev_hdl, usb_device_info_t *dev_info)
  788. {
  789. USBH_CHECK(dev_hdl != NULL && dev_info != NULL, ESP_ERR_INVALID_ARG);
  790. device_t *dev_obj = (device_t *)dev_hdl;
  791. esp_err_t ret;
  792. // Device must be configured, or not attached (if it suddenly disconnected)
  793. USBH_ENTER_CRITICAL();
  794. if (!(dev_obj->dynamic.state == USB_DEVICE_STATE_CONFIGURED || dev_obj->dynamic.state == USB_DEVICE_STATE_NOT_ATTACHED)) {
  795. USBH_EXIT_CRITICAL();
  796. ret = ESP_ERR_INVALID_STATE;
  797. goto exit;
  798. }
  799. // Critical section for the dynamic members
  800. dev_info->speed = dev_obj->constant.speed;
  801. dev_info->dev_addr = dev_obj->constant.address;
  802. dev_info->bMaxPacketSize0 = dev_obj->constant.desc->bMaxPacketSize0;
  803. USBH_EXIT_CRITICAL();
  804. assert(dev_obj->constant.config_desc);
  805. dev_info->bConfigurationValue = dev_obj->constant.config_desc->bConfigurationValue;
  806. // String descriptors are allowed to be NULL as not all devices support them
  807. dev_info->str_desc_manufacturer = dev_obj->constant.str_desc_manu;
  808. dev_info->str_desc_product = dev_obj->constant.str_desc_product;
  809. dev_info->str_desc_serial_num = dev_obj->constant.str_desc_ser_num;
  810. ret = ESP_OK;
  811. exit:
  812. return ret;
  813. }
  814. esp_err_t usbh_dev_get_desc(usb_device_handle_t dev_hdl, const usb_device_desc_t **dev_desc_ret)
  815. {
  816. USBH_CHECK(dev_hdl != NULL && dev_desc_ret != NULL, ESP_ERR_INVALID_ARG);
  817. device_t *dev_obj = (device_t *)dev_hdl;
  818. USBH_ENTER_CRITICAL();
  819. USBH_CHECK_FROM_CRIT(dev_obj->dynamic.state == USB_DEVICE_STATE_CONFIGURED, ESP_ERR_INVALID_STATE);
  820. USBH_EXIT_CRITICAL();
  821. *dev_desc_ret = dev_obj->constant.desc;
  822. return ESP_OK;
  823. }
  824. esp_err_t usbh_dev_get_config_desc(usb_device_handle_t dev_hdl, const usb_config_desc_t **config_desc_ret)
  825. {
  826. USBH_CHECK(dev_hdl != NULL && config_desc_ret != NULL, ESP_ERR_INVALID_ARG);
  827. device_t *dev_obj = (device_t *)dev_hdl;
  828. esp_err_t ret;
  829. // Device must be in the configured state
  830. USBH_ENTER_CRITICAL();
  831. if (dev_obj->dynamic.state != USB_DEVICE_STATE_CONFIGURED) {
  832. USBH_EXIT_CRITICAL();
  833. ret = ESP_ERR_INVALID_STATE;
  834. goto exit;
  835. }
  836. USBH_EXIT_CRITICAL();
  837. assert(dev_obj->constant.config_desc);
  838. *config_desc_ret = dev_obj->constant.config_desc;
  839. ret = ESP_OK;
  840. exit:
  841. return ret;
  842. }
  843. esp_err_t usbh_dev_submit_ctrl_urb(usb_device_handle_t dev_hdl, urb_t *urb)
  844. {
  845. USBH_CHECK(dev_hdl != NULL && urb != NULL, ESP_ERR_INVALID_ARG);
  846. device_t *dev_obj = (device_t *)dev_hdl;
  847. USBH_CHECK(urb_check_args(urb), ESP_ERR_INVALID_ARG);
  848. bool xfer_is_in = ((usb_setup_packet_t *)urb->transfer.data_buffer)->bmRequestType & USB_BM_REQUEST_TYPE_DIR_IN;
  849. USBH_CHECK(transfer_check_usb_compliance(&(urb->transfer), USB_TRANSFER_TYPE_CTRL, dev_obj->constant.desc->bMaxPacketSize0, xfer_is_in), ESP_ERR_INVALID_ARG);
  850. USBH_ENTER_CRITICAL();
  851. USBH_CHECK_FROM_CRIT(dev_obj->dynamic.state == USB_DEVICE_STATE_CONFIGURED, ESP_ERR_INVALID_STATE);
  852. // Increment the control transfer count first
  853. dev_obj->dynamic.num_ctrl_xfers_inflight++;
  854. USBH_EXIT_CRITICAL();
  855. esp_err_t ret;
  856. if (hcd_pipe_get_state(dev_obj->constant.default_pipe) != HCD_PIPE_STATE_ACTIVE) {
  857. ret = ESP_ERR_INVALID_STATE;
  858. goto hcd_err;
  859. }
  860. ret = hcd_urb_enqueue(dev_obj->constant.default_pipe, urb);
  861. if (ret != ESP_OK) {
  862. goto hcd_err;
  863. }
  864. ret = ESP_OK;
  865. return ret;
  866. hcd_err:
  867. USBH_ENTER_CRITICAL();
  868. dev_obj->dynamic.num_ctrl_xfers_inflight--;
  869. USBH_EXIT_CRITICAL();
  870. return ret;
  871. }
  872. // ----------------------------------------------- Interface Functions -------------------------------------------------
  873. esp_err_t usbh_ep_alloc(usb_device_handle_t dev_hdl, usbh_ep_config_t *ep_config, usbh_ep_handle_t *ep_hdl_ret)
  874. {
  875. USBH_CHECK(dev_hdl != NULL && ep_config != NULL && ep_hdl_ret != NULL, ESP_ERR_INVALID_ARG);
  876. uint8_t bEndpointAddress = ep_config->bEndpointAddress;
  877. USBH_CHECK(check_ep_addr(bEndpointAddress), ESP_ERR_INVALID_ARG);
  878. esp_err_t ret;
  879. device_t *dev_obj = (device_t *)dev_hdl;
  880. endpoint_t *ep_obj;
  881. // Find the endpoint descriptor from the device's current configuration descriptor
  882. const usb_ep_desc_t *ep_desc = usb_parse_endpoint_descriptor_by_address(dev_obj->constant.config_desc, ep_config->bInterfaceNumber, ep_config->bAlternateSetting, ep_config->bEndpointAddress, NULL);
  883. if (ep_desc == NULL) {
  884. return ESP_ERR_NOT_FOUND;
  885. }
  886. // Allocate the endpoint object
  887. ret = endpoint_alloc(dev_obj, ep_desc, ep_config, &ep_obj);
  888. if (ret != ESP_OK) {
  889. goto alloc_err;
  890. }
  891. // We need to take the mux_lock to access mux_protected members
  892. xSemaphoreTake(p_usbh_obj->constant.mux_lock, portMAX_DELAY);
  893. USBH_ENTER_CRITICAL();
  894. // Check the device's state before we assign the a pipe to the allocated endpoint
  895. if (dev_obj->dynamic.state != USB_DEVICE_STATE_CONFIGURED) {
  896. USBH_EXIT_CRITICAL();
  897. ret = ESP_ERR_INVALID_STATE;
  898. goto dev_state_err;
  899. }
  900. USBH_EXIT_CRITICAL();
  901. // Check if the endpoint has already been allocated
  902. if (get_ep_from_addr(dev_obj, bEndpointAddress) == NULL) {
  903. set_ep_from_addr(dev_obj, bEndpointAddress, ep_obj);
  904. // Write back the endpoint handle
  905. *ep_hdl_ret = (usbh_ep_handle_t)ep_obj;
  906. ret = ESP_OK;
  907. } else {
  908. // Endpoint is already allocated
  909. ret = ESP_ERR_INVALID_STATE;
  910. }
  911. dev_state_err:
  912. xSemaphoreGive(p_usbh_obj->constant.mux_lock);
  913. // If the endpoint was not assigned, free it
  914. if (ret != ESP_OK) {
  915. endpoint_free(ep_obj);
  916. }
  917. alloc_err:
  918. return ret;
  919. }
  920. esp_err_t usbh_ep_free(usbh_ep_handle_t ep_hdl)
  921. {
  922. USBH_CHECK(ep_hdl != NULL, ESP_ERR_INVALID_ARG);
  923. esp_err_t ret;
  924. endpoint_t *ep_obj = (endpoint_t *)ep_hdl;
  925. device_t *dev_obj = (device_t *)ep_obj->constant.dev;
  926. uint8_t bEndpointAddress = ep_obj->constant.ep_desc->bEndpointAddress;
  927. // Todo: Check that the EP's underlying pipe is halted before allowing the EP to be freed (IDF-7273)
  928. // Check that the the EP's underlying pipe has no more in-flight URBs
  929. if (hcd_pipe_get_num_urbs(ep_obj->constant.pipe_hdl) != 0) {
  930. ret = ESP_ERR_INVALID_STATE;
  931. goto exit;
  932. }
  933. // We need to take the mux_lock to access mux_protected members
  934. xSemaphoreTake(p_usbh_obj->constant.mux_lock, portMAX_DELAY);
  935. // Check if the endpoint was allocated on this device
  936. if (ep_obj == get_ep_from_addr(dev_obj, bEndpointAddress)) {
  937. // Clear the endpoint from the device's endpoint object list
  938. set_ep_from_addr(dev_obj, bEndpointAddress, NULL);
  939. ret = ESP_OK;
  940. } else {
  941. ret = ESP_ERR_NOT_FOUND;
  942. }
  943. xSemaphoreGive(p_usbh_obj->constant.mux_lock);
  944. // Finally, we free the endpoint object
  945. if (ret == ESP_OK) {
  946. endpoint_free(ep_obj);
  947. }
  948. exit:
  949. return ret;
  950. }
  951. esp_err_t usbh_ep_get_handle(usb_device_handle_t dev_hdl, uint8_t bEndpointAddress, usbh_ep_handle_t *ep_hdl_ret)
  952. {
  953. USBH_CHECK(dev_hdl != NULL && ep_hdl_ret != NULL, ESP_ERR_INVALID_ARG);
  954. USBH_CHECK(check_ep_addr(bEndpointAddress), ESP_ERR_INVALID_ARG);
  955. esp_err_t ret;
  956. device_t *dev_obj = (device_t *)dev_hdl;
  957. endpoint_t *ep_obj;
  958. // We need to take the mux_lock to access mux_protected members
  959. xSemaphoreTake(p_usbh_obj->constant.mux_lock, portMAX_DELAY);
  960. ep_obj = get_ep_from_addr(dev_obj, bEndpointAddress);
  961. xSemaphoreGive(p_usbh_obj->constant.mux_lock);
  962. if (ep_obj) {
  963. *ep_hdl_ret = (usbh_ep_handle_t)ep_obj;
  964. ret = ESP_OK;
  965. } else {
  966. ret = ESP_ERR_NOT_FOUND;
  967. }
  968. return ret;
  969. }
  970. esp_err_t usbh_ep_enqueue_urb(usbh_ep_handle_t ep_hdl, urb_t *urb)
  971. {
  972. USBH_CHECK(ep_hdl != NULL && urb != NULL, ESP_ERR_INVALID_ARG);
  973. USBH_CHECK(urb_check_args(urb), ESP_ERR_INVALID_ARG);
  974. endpoint_t *ep_obj = (endpoint_t *)ep_hdl;
  975. USBH_CHECK( transfer_check_usb_compliance(&(urb->transfer),
  976. USB_EP_DESC_GET_XFERTYPE(ep_obj->constant.ep_desc),
  977. USB_EP_DESC_GET_MPS(ep_obj->constant.ep_desc),
  978. USB_EP_DESC_GET_EP_DIR(ep_obj->constant.ep_desc)),
  979. ESP_ERR_INVALID_ARG);
  980. // Check that the EP's underlying pipe is in the active state before submitting the URB
  981. if (hcd_pipe_get_state(ep_obj->constant.pipe_hdl) != HCD_PIPE_STATE_ACTIVE) {
  982. return ESP_ERR_INVALID_STATE;
  983. }
  984. // Enqueue the URB to the EP's underlying pipe
  985. return hcd_urb_enqueue(ep_obj->constant.pipe_hdl, urb);
  986. }
  987. esp_err_t usbh_ep_dequeue_urb(usbh_ep_handle_t ep_hdl, urb_t **urb_ret)
  988. {
  989. USBH_CHECK(ep_hdl != NULL && urb_ret != NULL, ESP_ERR_INVALID_ARG);
  990. endpoint_t *ep_obj = (endpoint_t *)ep_hdl;
  991. // Enqueue the URB to the EP's underlying pipe
  992. *urb_ret = hcd_urb_dequeue(ep_obj->constant.pipe_hdl);
  993. return ESP_OK;
  994. }
  995. esp_err_t usbh_ep_command(usbh_ep_handle_t ep_hdl, usbh_ep_cmd_t command)
  996. {
  997. USBH_CHECK(ep_hdl != NULL, ESP_ERR_INVALID_ARG);
  998. endpoint_t *ep_obj = (endpoint_t *)ep_hdl;
  999. // Send the command to the EP's underlying pipe
  1000. return hcd_pipe_command(ep_obj->constant.pipe_hdl, (hcd_pipe_cmd_t)command);
  1001. }
  1002. void *usbh_ep_get_context(usbh_ep_handle_t ep_hdl)
  1003. {
  1004. assert(ep_hdl);
  1005. endpoint_t *ep_obj = (endpoint_t *)ep_hdl;
  1006. return hcd_pipe_get_context(ep_obj->constant.pipe_hdl);
  1007. }
  1008. // -------------------------------------------------- Hub Functions ----------------------------------------------------
  1009. // ------------------- Device Related ----------------------
  1010. esp_err_t usbh_hub_is_installed(usbh_hub_req_cb_t hub_req_callback, void *callback_arg)
  1011. {
  1012. USBH_CHECK(hub_req_callback != NULL, ESP_ERR_INVALID_ARG);
  1013. USBH_ENTER_CRITICAL();
  1014. // Check that USBH is already installed
  1015. USBH_CHECK_FROM_CRIT(p_usbh_obj != NULL, ESP_ERR_INVALID_STATE);
  1016. // Check that Hub has not be installed yet
  1017. USBH_CHECK_FROM_CRIT(p_usbh_obj->constant.hub_req_cb == NULL, ESP_ERR_INVALID_STATE);
  1018. p_usbh_obj->constant.hub_req_cb = hub_req_callback;
  1019. p_usbh_obj->constant.hub_req_cb_arg = callback_arg;
  1020. USBH_EXIT_CRITICAL();
  1021. return ESP_OK;
  1022. }
  1023. esp_err_t usbh_hub_add_dev(hcd_port_handle_t port_hdl, usb_speed_t dev_speed, usb_device_handle_t *new_dev_hdl, hcd_pipe_handle_t *default_pipe_hdl)
  1024. {
  1025. // Note: Parent device handle can be NULL if it's connected to the root hub
  1026. USBH_CHECK(new_dev_hdl != NULL, ESP_ERR_INVALID_ARG);
  1027. esp_err_t ret;
  1028. device_t *dev_obj;
  1029. ret = device_alloc(port_hdl, dev_speed, &dev_obj);
  1030. if (ret != ESP_OK) {
  1031. return ret;
  1032. }
  1033. // Write-back device handle
  1034. *new_dev_hdl = (usb_device_handle_t)dev_obj;
  1035. *default_pipe_hdl = dev_obj->constant.default_pipe;
  1036. ret = ESP_OK;
  1037. return ret;
  1038. }
  1039. esp_err_t usbh_hub_pass_event(usb_device_handle_t dev_hdl, usbh_hub_event_t hub_event)
  1040. {
  1041. USBH_CHECK(dev_hdl != NULL, ESP_ERR_INVALID_ARG);
  1042. device_t *dev_obj = (device_t *)dev_hdl;
  1043. bool call_proc_req_cb;
  1044. switch (hub_event) {
  1045. case USBH_HUB_EVENT_PORT_ERROR: {
  1046. USBH_ENTER_CRITICAL();
  1047. dev_obj->dynamic.flags.is_gone = 1;
  1048. // Check if the device can be freed now
  1049. if (dev_obj->dynamic.ref_count == 0) {
  1050. dev_obj->dynamic.flags.waiting_free = 1;
  1051. // Device is already waiting free so none of it's EP's will be in use. Can free immediately.
  1052. call_proc_req_cb = _dev_set_actions(dev_obj, DEV_ACTION_FREE_AND_RECOVER); // Port error occurred so we need to recover it
  1053. } else {
  1054. call_proc_req_cb = _dev_set_actions(dev_obj,
  1055. DEV_ACTION_EPn_HALT_FLUSH |
  1056. DEV_ACTION_EP0_FLUSH |
  1057. DEV_ACTION_EP0_DEQUEUE |
  1058. DEV_ACTION_PROP_GONE_EVT);
  1059. }
  1060. USBH_EXIT_CRITICAL();
  1061. break;
  1062. }
  1063. case USBH_HUB_EVENT_PORT_DISABLED: {
  1064. USBH_ENTER_CRITICAL();
  1065. assert(dev_obj->dynamic.ref_count == 0); // At this stage, the device should have been closed by all users
  1066. dev_obj->dynamic.flags.waiting_free = 1;
  1067. call_proc_req_cb = _dev_set_actions(dev_obj, DEV_ACTION_FREE);
  1068. USBH_EXIT_CRITICAL();
  1069. break;
  1070. }
  1071. default:
  1072. return ESP_ERR_INVALID_ARG;
  1073. }
  1074. if (call_proc_req_cb) {
  1075. p_usbh_obj->constant.proc_req_cb(USB_PROC_REQ_SOURCE_USBH, false, p_usbh_obj->constant.proc_req_cb_arg);
  1076. }
  1077. return ESP_OK;
  1078. }
  1079. // ----------------- Enumeration Related -------------------
  1080. esp_err_t usbh_hub_enum_fill_dev_addr(usb_device_handle_t dev_hdl, uint8_t dev_addr)
  1081. {
  1082. USBH_CHECK(dev_hdl != NULL, ESP_ERR_INVALID_ARG);
  1083. device_t *dev_obj = (device_t *)dev_hdl;
  1084. USBH_ENTER_CRITICAL();
  1085. dev_obj->dynamic.state = USB_DEVICE_STATE_ADDRESS;
  1086. USBH_EXIT_CRITICAL();
  1087. // We can modify the info members outside the critical section
  1088. dev_obj->constant.address = dev_addr;
  1089. return ESP_OK;
  1090. }
  1091. esp_err_t usbh_hub_enum_fill_dev_desc(usb_device_handle_t dev_hdl, const usb_device_desc_t *device_desc)
  1092. {
  1093. USBH_CHECK(dev_hdl != NULL && device_desc != NULL, ESP_ERR_INVALID_ARG);
  1094. device_t *dev_obj = (device_t *)dev_hdl;
  1095. // We can modify the info members outside the critical section
  1096. memcpy((usb_device_desc_t *)dev_obj->constant.desc, device_desc, sizeof(usb_device_desc_t));
  1097. return ESP_OK;
  1098. }
  1099. esp_err_t usbh_hub_enum_fill_config_desc(usb_device_handle_t dev_hdl, const usb_config_desc_t *config_desc_full)
  1100. {
  1101. USBH_CHECK(dev_hdl != NULL && config_desc_full != NULL, ESP_ERR_INVALID_ARG);
  1102. device_t *dev_obj = (device_t *)dev_hdl;
  1103. // Allocate memory to store the configuration descriptor
  1104. usb_config_desc_t *config_desc = heap_caps_malloc(config_desc_full->wTotalLength, MALLOC_CAP_DEFAULT); // Buffer to copy over full configuration descriptor (wTotalLength)
  1105. if (config_desc == NULL) {
  1106. return ESP_ERR_NO_MEM;
  1107. }
  1108. // Copy the configuration descriptor
  1109. memcpy(config_desc, config_desc_full, config_desc_full->wTotalLength);
  1110. // Assign the config desc to the device object
  1111. assert(dev_obj->constant.config_desc == NULL);
  1112. dev_obj->constant.config_desc = config_desc;
  1113. return ESP_OK;
  1114. }
  1115. esp_err_t usbh_hub_enum_fill_str_desc(usb_device_handle_t dev_hdl, const usb_str_desc_t *str_desc, int select)
  1116. {
  1117. USBH_CHECK(dev_hdl != NULL && str_desc != NULL && (select >= 0 && select < 3), ESP_ERR_INVALID_ARG);
  1118. device_t *dev_obj = (device_t *)dev_hdl;
  1119. // Allocate memory to store the manufacturer string descriptor
  1120. usb_str_desc_t *str_desc_fill = heap_caps_malloc(str_desc->bLength, MALLOC_CAP_DEFAULT);
  1121. if (str_desc_fill == NULL) {
  1122. return ESP_ERR_NO_MEM;
  1123. }
  1124. // Copy the string descriptor
  1125. memcpy(str_desc_fill, str_desc, str_desc->bLength);
  1126. // Assign filled string descriptor to the device object
  1127. switch (select) {
  1128. case 0:
  1129. assert(dev_obj->constant.str_desc_manu == NULL);
  1130. dev_obj->constant.str_desc_manu = str_desc_fill;
  1131. break;
  1132. case 1:
  1133. assert(dev_obj->constant.str_desc_product == NULL);
  1134. dev_obj->constant.str_desc_product = str_desc_fill;
  1135. break;
  1136. default: // 2
  1137. assert(dev_obj->constant.str_desc_ser_num == NULL);
  1138. dev_obj->constant.str_desc_ser_num = str_desc_fill;
  1139. break;
  1140. }
  1141. return ESP_OK;
  1142. }
  1143. esp_err_t usbh_hub_enum_done(usb_device_handle_t dev_hdl)
  1144. {
  1145. USBH_CHECK(dev_hdl != NULL, ESP_ERR_INVALID_ARG);
  1146. device_t *dev_obj = (device_t *)dev_hdl;
  1147. // We need to take the mux_lock to access mux_protected members
  1148. xSemaphoreTake(p_usbh_obj->constant.mux_lock, portMAX_DELAY);
  1149. USBH_ENTER_CRITICAL();
  1150. dev_obj->dynamic.state = USB_DEVICE_STATE_CONFIGURED;
  1151. // Add the device to list of devices, then trigger a device event
  1152. TAILQ_INSERT_TAIL(&p_usbh_obj->dynamic.devs_idle_tailq, dev_obj, dynamic.tailq_entry); // Add it to the idle device list first
  1153. bool call_proc_req_cb = _dev_set_actions(dev_obj, DEV_ACTION_PROP_NEW);
  1154. USBH_EXIT_CRITICAL();
  1155. p_usbh_obj->mux_protected.num_device++;
  1156. xSemaphoreGive(p_usbh_obj->constant.mux_lock);
  1157. // Update the EP0's underlying pipe's callback
  1158. ESP_ERROR_CHECK(hcd_pipe_update_callback(dev_obj->constant.default_pipe, ep0_pipe_callback, (void *)dev_obj));
  1159. // Call the processing request callback
  1160. if (call_proc_req_cb) {
  1161. p_usbh_obj->constant.proc_req_cb(USB_PROC_REQ_SOURCE_USBH, false, p_usbh_obj->constant.proc_req_cb_arg);
  1162. }
  1163. return ESP_OK;
  1164. }
  1165. esp_err_t usbh_hub_enum_failed(usb_device_handle_t dev_hdl)
  1166. {
  1167. USBH_CHECK(dev_hdl != NULL, ESP_ERR_INVALID_ARG);
  1168. device_t *dev_obj = (device_t *)dev_hdl;
  1169. device_free(dev_obj);
  1170. return ESP_OK;
  1171. }