usbh.c 50 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306
  1. /*
  2. * SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include "sdkconfig.h"
  7. #include <stdint.h>
  8. #include <string.h>
  9. #include <assert.h>
  10. #include <sys/queue.h>
  11. #include "freertos/FreeRTOS.h"
  12. #include "freertos/portmacro.h"
  13. #include "freertos/task.h"
  14. #include "freertos/semphr.h"
  15. #include "esp_err.h"
  16. #include "esp_log.h"
  17. #include "esp_heap_caps.h"
  18. #include "hcd.h"
  19. #include "usbh.h"
  20. #include "usb/usb_helpers.h"
  21. #include "usb/usb_types_ch9.h"
  22. #define EP_NUM_MIN 1 // The smallest possible non-default endpoint number
  23. #define EP_NUM_MAX 16 // The largest possible non-default endpoint number
  24. #define NUM_NON_DEFAULT_EP ((EP_NUM_MAX - 1) * 2) // The total number of non-default endpoints a device can have.
  25. // Device action flags. LISTED IN THE ORDER THEY SHOULD BE HANDLED IN within usbh_process(). Some actions are mutually exclusive
  26. typedef enum {
  27. DEV_ACTION_EPn_HALT_FLUSH = (1 << 0), // Halt all non-default endpoints then flush them (called after a device gone is gone)
  28. DEV_ACTION_EP0_FLUSH = (1 << 1), // Retire all URBS submitted to EP0
  29. DEV_ACTION_EP0_DEQUEUE = (1 << 2), // Dequeue all URBs from EP0
  30. DEV_ACTION_EP0_CLEAR = (1 << 3), // Move EP0 to the the active state
  31. DEV_ACTION_PROP_GONE_EVT = (1 << 4), // Propagate a USBH_EVENT_DEV_GONE event
  32. DEV_ACTION_FREE_AND_RECOVER = (1 << 5), // Free the device object, but send a USBH_HUB_REQ_PORT_RECOVER request afterwards.
  33. DEV_ACTION_FREE = (1 << 6), // Free the device object
  34. DEV_ACTION_PORT_DISABLE = (1 << 7), // Request the hub driver to disable the port of the device
  35. DEV_ACTION_PROP_NEW = (1 << 8), // Propagate a USBH_EVENT_DEV_NEW event
  36. } dev_action_t;
  37. typedef struct device_s device_t;
  38. typedef struct {
  39. struct {
  40. usbh_ep_cb_t ep_cb;
  41. void *ep_cb_arg;
  42. hcd_pipe_handle_t pipe_hdl;
  43. device_t *dev; // Pointer to the device object that this endpoint is contained in
  44. const usb_ep_desc_t *ep_desc; // This just stores a pointer endpoint descriptor inside the device's "config_desc"
  45. } constant;
  46. } endpoint_t;
  47. struct device_s {
  48. // Dynamic members require a critical section
  49. struct {
  50. TAILQ_ENTRY(device_s) tailq_entry;
  51. union {
  52. struct {
  53. uint32_t in_pending_list: 1;
  54. uint32_t is_gone: 1;
  55. uint32_t waiting_close: 1;
  56. uint32_t waiting_port_disable: 1;
  57. uint32_t waiting_free: 1;
  58. uint32_t reserved27: 27;
  59. };
  60. uint32_t val;
  61. } flags;
  62. uint32_t action_flags;
  63. int num_ctrl_xfers_inflight;
  64. usb_device_state_t state;
  65. uint32_t ref_count;
  66. } dynamic;
  67. // Mux protected members must be protected by the USBH mux_lock when accessed
  68. struct {
  69. /*
  70. - Endpoint object pointers for each possible non-default endpoint
  71. - All OUT EPs are listed before IN EPs (i.e., EP_NUM_MIN OUT ... EP_NUM_MAX OUT ... EP_NUM_MIN IN ... EP_NUM_MAX)
  72. */
  73. endpoint_t *endpoints[NUM_NON_DEFAULT_EP];
  74. } mux_protected;
  75. // Constant members do not change after device allocation and enumeration thus do not require a critical section
  76. struct {
  77. hcd_pipe_handle_t default_pipe;
  78. hcd_port_handle_t port_hdl;
  79. uint8_t address;
  80. usb_speed_t speed;
  81. const usb_device_desc_t *desc;
  82. const usb_config_desc_t *config_desc;
  83. const usb_str_desc_t *str_desc_manu;
  84. const usb_str_desc_t *str_desc_product;
  85. const usb_str_desc_t *str_desc_ser_num;
  86. } constant;
  87. };
  88. typedef struct {
  89. // Dynamic members require a critical section
  90. struct {
  91. TAILQ_HEAD(tailhead_devs, device_s) devs_idle_tailq; // Tailq of all enum and configured devices
  92. TAILQ_HEAD(tailhead_devs_cb, device_s) devs_pending_tailq; // Tailq of devices that need to have their cb called
  93. } dynamic;
  94. // Mux protected members must be protected by the USBH mux_lock when accessed
  95. struct {
  96. uint8_t num_device; // Number of enumerated devices
  97. } mux_protected;
  98. // Constant members do no change after installation thus do not require a critical section
  99. struct {
  100. usb_proc_req_cb_t proc_req_cb;
  101. void *proc_req_cb_arg;
  102. usbh_hub_req_cb_t hub_req_cb;
  103. void *hub_req_cb_arg;
  104. usbh_event_cb_t event_cb;
  105. void *event_cb_arg;
  106. usbh_ctrl_xfer_cb_t ctrl_xfer_cb;
  107. void *ctrl_xfer_cb_arg;
  108. SemaphoreHandle_t mux_lock;
  109. } constant;
  110. } usbh_t;
  111. static usbh_t *p_usbh_obj = NULL;
  112. static portMUX_TYPE usbh_lock = portMUX_INITIALIZER_UNLOCKED;
  113. const char *USBH_TAG = "USBH";
  114. #define USBH_ENTER_CRITICAL_ISR() portENTER_CRITICAL_ISR(&usbh_lock)
  115. #define USBH_EXIT_CRITICAL_ISR() portEXIT_CRITICAL_ISR(&usbh_lock)
  116. #define USBH_ENTER_CRITICAL() portENTER_CRITICAL(&usbh_lock)
  117. #define USBH_EXIT_CRITICAL() portEXIT_CRITICAL(&usbh_lock)
  118. #define USBH_ENTER_CRITICAL_SAFE() portENTER_CRITICAL_SAFE(&usbh_lock)
  119. #define USBH_EXIT_CRITICAL_SAFE() portEXIT_CRITICAL_SAFE(&usbh_lock)
  120. #define USBH_CHECK(cond, ret_val) ({ \
  121. if (!(cond)) { \
  122. return (ret_val); \
  123. } \
  124. })
  125. #define USBH_CHECK_FROM_CRIT(cond, ret_val) ({ \
  126. if (!(cond)) { \
  127. USBH_EXIT_CRITICAL(); \
  128. return ret_val; \
  129. } \
  130. })
  131. // ------------------------------------------------- Forward Declare ---------------------------------------------------
  132. static bool ep0_pipe_callback(hcd_pipe_handle_t pipe_hdl, hcd_pipe_event_t pipe_event, void *user_arg, bool in_isr);
  133. static bool epN_pipe_callback(hcd_pipe_handle_t pipe_hdl, hcd_pipe_event_t pipe_event, void *user_arg, bool in_isr);
  134. static bool _dev_set_actions(device_t *dev_obj, uint32_t action_flags);
  135. // ----------------------------------------------------- Helpers -------------------------------------------------------
  136. static inline bool check_ep_addr(uint8_t bEndpointAddress)
  137. {
  138. /*
  139. Check that the bEndpointAddress is valid
  140. - Must be <= EP_NUM_MAX (e.g., 16)
  141. - Must be >= EP_NUM_MIN (e.g., 1).
  142. - EP0 is the owned/managed by USBH, thus must never by directly addressed by users (see USB 2.0 section 10.5.1.2)
  143. */
  144. uint8_t addr = bEndpointAddress & USB_B_ENDPOINT_ADDRESS_EP_NUM_MASK;
  145. return (addr >= EP_NUM_MIN) && (addr <= EP_NUM_MAX);
  146. }
  147. static endpoint_t *get_ep_from_addr(device_t *dev_obj, uint8_t bEndpointAddress)
  148. {
  149. /*
  150. CALLER IS RESPONSIBLE FOR TAKING THE mux_lock
  151. */
  152. // Calculate index to the device's endpoint object list
  153. int index;
  154. // EP_NUM_MIN should map to an index of 0
  155. index = (bEndpointAddress & USB_B_ENDPOINT_ADDRESS_EP_NUM_MASK) - EP_NUM_MIN;
  156. assert(index >= 0); // Endpoint address is not supported
  157. if (bEndpointAddress & USB_B_ENDPOINT_ADDRESS_EP_DIR_MASK) {
  158. // OUT EPs are listed before IN EPs, so add an offset
  159. index += (EP_NUM_MAX - EP_NUM_MIN);
  160. }
  161. return dev_obj->mux_protected.endpoints[index];
  162. }
  163. static inline void set_ep_from_addr(device_t *dev_obj, uint8_t bEndpointAddress, endpoint_t *ep_obj)
  164. {
  165. /*
  166. CALLER IS RESPONSIBLE FOR TAKING THE mux_lock
  167. */
  168. // Calculate index to the device's endpoint object list
  169. int index;
  170. // EP_NUM_MIN should map to an index of 0
  171. index = (bEndpointAddress & USB_B_ENDPOINT_ADDRESS_EP_NUM_MASK) - EP_NUM_MIN;
  172. assert(index >= 0); // Endpoint address is not supported
  173. if (bEndpointAddress & USB_B_ENDPOINT_ADDRESS_EP_DIR_MASK) {
  174. // OUT EPs are listed before IN EPs, so add an offset
  175. index += (EP_NUM_MAX - EP_NUM_MIN);
  176. }
  177. dev_obj->mux_protected.endpoints[index] = ep_obj;
  178. }
  179. static bool urb_check_args(urb_t *urb)
  180. {
  181. if (urb->transfer.callback == NULL) {
  182. ESP_LOGE(USBH_TAG, "usb_transfer_t callback is NULL");
  183. return false;
  184. }
  185. if (urb->transfer.num_bytes > urb->transfer.data_buffer_size) {
  186. ESP_LOGE(USBH_TAG, "usb_transfer_t num_bytes > data_buffer_size");
  187. return false;
  188. }
  189. return true;
  190. }
  191. static bool transfer_check_usb_compliance(usb_transfer_t *transfer, usb_transfer_type_t type, int mps, bool is_in)
  192. {
  193. if (type == USB_TRANSFER_TYPE_CTRL) {
  194. // Check that num_bytes and wLength are set correctly
  195. usb_setup_packet_t *setup_pkt = (usb_setup_packet_t *)transfer->data_buffer;
  196. if (transfer->num_bytes != sizeof(usb_setup_packet_t) + setup_pkt->wLength) {
  197. ESP_LOGE(USBH_TAG, "usb_transfer_t num_bytes and usb_setup_packet_t wLength mismatch");
  198. return false;
  199. }
  200. } else if (type == USB_TRANSFER_TYPE_ISOCHRONOUS) {
  201. // Check that there is at least one isochronous packet descriptor
  202. if (transfer->num_isoc_packets <= 0) {
  203. ESP_LOGE(USBH_TAG, "usb_transfer_t num_isoc_packets is 0");
  204. return false;
  205. }
  206. // Check that sum of all packet lengths add up to transfer length
  207. // If IN, check that each packet length is integer multiple of MPS
  208. int total_num_bytes = 0;
  209. bool mod_mps_all_zero = true;
  210. for (int i = 0; i < transfer->num_isoc_packets; i++) {
  211. total_num_bytes += transfer->isoc_packet_desc[i].num_bytes;
  212. if (transfer->isoc_packet_desc[i].num_bytes % mps != 0) {
  213. mod_mps_all_zero = false;
  214. }
  215. }
  216. if (transfer->num_bytes != total_num_bytes) {
  217. ESP_LOGE(USBH_TAG, "ISOC transfer num_bytes != num_bytes of all packets");
  218. return false;
  219. }
  220. if (is_in && !mod_mps_all_zero) {
  221. ESP_LOGE(USBH_TAG, "ISOC IN num_bytes not integer multiple of MPS");
  222. return false;
  223. }
  224. } else {
  225. // Check that IN transfers are integer multiple of MPS
  226. if (is_in && (transfer->num_bytes % mps != 0)) {
  227. ESP_LOGE(USBH_TAG, "IN transfer num_bytes not integer multiple of MPS");
  228. return false;
  229. }
  230. }
  231. return true;
  232. }
  233. // --------------------------------------------------- Allocation ------------------------------------------------------
  234. static esp_err_t endpoint_alloc(device_t *dev_obj, const usb_ep_desc_t *ep_desc, usbh_ep_config_t *ep_config, endpoint_t **ep_obj_ret)
  235. {
  236. esp_err_t ret;
  237. endpoint_t *ep_obj;
  238. hcd_pipe_handle_t pipe_hdl;
  239. ep_obj = heap_caps_calloc(1, sizeof(endpoint_t), MALLOC_CAP_DEFAULT);
  240. if (ep_obj == NULL) {
  241. return ESP_ERR_NO_MEM;
  242. }
  243. // Allocate the EP's underlying pipe
  244. hcd_pipe_config_t pipe_config = {
  245. .callback = epN_pipe_callback,
  246. .callback_arg = (void *)ep_obj,
  247. .context = ep_config->context,
  248. .ep_desc = ep_desc,
  249. .dev_speed = dev_obj->constant.speed,
  250. .dev_addr = dev_obj->constant.address,
  251. };
  252. ret = hcd_pipe_alloc(dev_obj->constant.port_hdl, &pipe_config, &pipe_hdl);
  253. if (ret != ESP_OK) {
  254. goto pipe_err;
  255. }
  256. // Initialize the endpoint object
  257. ep_obj->constant.pipe_hdl = pipe_hdl;
  258. ep_obj->constant.ep_cb = ep_config->ep_cb;
  259. ep_obj->constant.ep_cb_arg = ep_config->ep_cb_arg;
  260. ep_obj->constant.dev = dev_obj;
  261. ep_obj->constant.ep_desc = ep_desc;
  262. // Return the endpoint object
  263. *ep_obj_ret = ep_obj;
  264. ret = ESP_OK;
  265. return ret;
  266. pipe_err:
  267. heap_caps_free(ep_obj);
  268. return ret;
  269. }
  270. static void endpoint_free(endpoint_t *ep_obj)
  271. {
  272. if (ep_obj == NULL) {
  273. return;
  274. }
  275. // Deallocate the EP's underlying pipe
  276. ESP_ERROR_CHECK(hcd_pipe_free(ep_obj->constant.pipe_hdl));
  277. // Free the heap object
  278. heap_caps_free(ep_obj);
  279. }
  280. static esp_err_t device_alloc(hcd_port_handle_t port_hdl, usb_speed_t speed, device_t **dev_obj_ret)
  281. {
  282. esp_err_t ret;
  283. device_t *dev_obj = heap_caps_calloc(1, sizeof(device_t), MALLOC_CAP_DEFAULT);
  284. usb_device_desc_t *dev_desc = heap_caps_calloc(1, sizeof(usb_device_desc_t), MALLOC_CAP_DEFAULT);
  285. if (dev_obj == NULL || dev_desc == NULL) {
  286. ret = ESP_ERR_NO_MEM;
  287. goto err;
  288. }
  289. // Allocate a pipe for EP0. We set the pipe callback to NULL for now
  290. hcd_pipe_config_t pipe_config = {
  291. .callback = NULL,
  292. .callback_arg = NULL,
  293. .context = (void *)dev_obj,
  294. .ep_desc = NULL, // No endpoint descriptor means we're allocating a pipe for EP0
  295. .dev_speed = speed,
  296. .dev_addr = 0,
  297. };
  298. hcd_pipe_handle_t default_pipe_hdl;
  299. ret = hcd_pipe_alloc(port_hdl, &pipe_config, &default_pipe_hdl);
  300. if (ret != ESP_OK) {
  301. goto err;
  302. }
  303. // Initialize device object
  304. dev_obj->dynamic.state = USB_DEVICE_STATE_DEFAULT;
  305. dev_obj->constant.default_pipe = default_pipe_hdl;
  306. dev_obj->constant.port_hdl = port_hdl;
  307. // Note: dev_obj->constant.address is assigned later during enumeration
  308. dev_obj->constant.speed = speed;
  309. dev_obj->constant.desc = dev_desc;
  310. *dev_obj_ret = dev_obj;
  311. ret = ESP_OK;
  312. return ret;
  313. err:
  314. heap_caps_free(dev_desc);
  315. heap_caps_free(dev_obj);
  316. return ret;
  317. }
  318. static void device_free(device_t *dev_obj)
  319. {
  320. if (dev_obj == NULL) {
  321. return;
  322. }
  323. // Configuration might not have been allocated (in case of early enumeration failure)
  324. if (dev_obj->constant.config_desc) {
  325. heap_caps_free((usb_config_desc_t *)dev_obj->constant.config_desc);
  326. }
  327. // String descriptors might not have been allocated (in case of early enumeration failure)
  328. if (dev_obj->constant.str_desc_manu) {
  329. heap_caps_free((usb_str_desc_t *)dev_obj->constant.str_desc_manu);
  330. }
  331. if (dev_obj->constant.str_desc_product) {
  332. heap_caps_free((usb_str_desc_t *)dev_obj->constant.str_desc_product);
  333. }
  334. if (dev_obj->constant.str_desc_ser_num) {
  335. heap_caps_free((usb_str_desc_t *)dev_obj->constant.str_desc_ser_num);
  336. }
  337. heap_caps_free((usb_device_desc_t *)dev_obj->constant.desc);
  338. ESP_ERROR_CHECK(hcd_pipe_free(dev_obj->constant.default_pipe));
  339. heap_caps_free(dev_obj);
  340. }
  341. // ---------------------------------------------------- Callbacks ------------------------------------------------------
  342. static bool ep0_pipe_callback(hcd_pipe_handle_t pipe_hdl, hcd_pipe_event_t pipe_event, void *user_arg, bool in_isr)
  343. {
  344. uint32_t action_flags;
  345. device_t *dev_obj = (device_t *)user_arg;
  346. switch (pipe_event) {
  347. case HCD_PIPE_EVENT_URB_DONE:
  348. // A control transfer completed on EP0's pipe . We need to dequeue it
  349. action_flags = DEV_ACTION_EP0_DEQUEUE;
  350. break;
  351. case HCD_PIPE_EVENT_ERROR_XFER:
  352. case HCD_PIPE_EVENT_ERROR_URB_NOT_AVAIL:
  353. case HCD_PIPE_EVENT_ERROR_OVERFLOW:
  354. // EP0's pipe has encountered an error. We need to retire all URBs, dequeue them, then make the pipe active again
  355. action_flags = DEV_ACTION_EP0_FLUSH |
  356. DEV_ACTION_EP0_DEQUEUE |
  357. DEV_ACTION_EP0_CLEAR;
  358. if (in_isr) {
  359. ESP_EARLY_LOGE(USBH_TAG, "Dev %d EP 0 Error", dev_obj->constant.address);
  360. } else {
  361. ESP_LOGE(USBH_TAG, "Dev %d EP 0 Error", dev_obj->constant.address);
  362. }
  363. break;
  364. case HCD_PIPE_EVENT_ERROR_STALL:
  365. // EP0's pipe encountered a "protocol stall". We just need to dequeue URBs then make the pipe active again
  366. action_flags = DEV_ACTION_EP0_DEQUEUE | DEV_ACTION_EP0_CLEAR;
  367. if (in_isr) {
  368. ESP_EARLY_LOGE(USBH_TAG, "Dev %d EP 0 STALL", dev_obj->constant.address);
  369. } else {
  370. ESP_LOGE(USBH_TAG, "Dev %d EP 0 STALL", dev_obj->constant.address);
  371. }
  372. break;
  373. default:
  374. action_flags = 0;
  375. break;
  376. }
  377. USBH_ENTER_CRITICAL_SAFE();
  378. bool call_proc_req_cb = _dev_set_actions(dev_obj, action_flags);
  379. USBH_EXIT_CRITICAL_SAFE();
  380. bool yield = false;
  381. if (call_proc_req_cb) {
  382. yield = p_usbh_obj->constant.proc_req_cb(USB_PROC_REQ_SOURCE_USBH, in_isr, p_usbh_obj->constant.proc_req_cb_arg);
  383. }
  384. return yield;
  385. }
  386. static bool epN_pipe_callback(hcd_pipe_handle_t pipe_hdl, hcd_pipe_event_t pipe_event, void *user_arg, bool in_isr)
  387. {
  388. endpoint_t *ep_obj = (endpoint_t *)user_arg;
  389. return ep_obj->constant.ep_cb((usbh_ep_handle_t)ep_obj,
  390. (usbh_ep_event_t)pipe_event,
  391. ep_obj->constant.ep_cb_arg,
  392. in_isr);
  393. }
  394. // -------------------------------------------------- Event Related ----------------------------------------------------
  395. static bool _dev_set_actions(device_t *dev_obj, uint32_t action_flags)
  396. {
  397. if (action_flags == 0) {
  398. return false;
  399. }
  400. bool call_proc_req_cb;
  401. // Check if device is already on the callback list
  402. if (!dev_obj->dynamic.flags.in_pending_list) {
  403. // Move device form idle device list to callback device list
  404. TAILQ_REMOVE(&p_usbh_obj->dynamic.devs_idle_tailq, dev_obj, dynamic.tailq_entry);
  405. TAILQ_INSERT_TAIL(&p_usbh_obj->dynamic.devs_pending_tailq, dev_obj, dynamic.tailq_entry);
  406. dev_obj->dynamic.action_flags |= action_flags;
  407. dev_obj->dynamic.flags.in_pending_list = 1;
  408. call_proc_req_cb = true;
  409. } else {
  410. call_proc_req_cb = false;
  411. }
  412. return call_proc_req_cb;
  413. }
  414. static inline void handle_epn_halt_flush(device_t *dev_obj)
  415. {
  416. // We need to take the mux_lock to access mux_protected members
  417. xSemaphoreTake(p_usbh_obj->constant.mux_lock, portMAX_DELAY);
  418. // Halt then flush all non-default EPs
  419. for (int i = 0; i < NUM_NON_DEFAULT_EP; i++) {
  420. if (dev_obj->mux_protected.endpoints[i] != NULL) {
  421. ESP_ERROR_CHECK(hcd_pipe_command(dev_obj->mux_protected.endpoints[i]->constant.pipe_hdl, HCD_PIPE_CMD_HALT));
  422. ESP_ERROR_CHECK(hcd_pipe_command(dev_obj->mux_protected.endpoints[i]->constant.pipe_hdl, HCD_PIPE_CMD_FLUSH));
  423. }
  424. }
  425. xSemaphoreGive(p_usbh_obj->constant.mux_lock);
  426. }
  427. static inline void handle_ep0_flush(device_t *dev_obj)
  428. {
  429. ESP_ERROR_CHECK(hcd_pipe_command(dev_obj->constant.default_pipe, HCD_PIPE_CMD_HALT));
  430. ESP_ERROR_CHECK(hcd_pipe_command(dev_obj->constant.default_pipe, HCD_PIPE_CMD_FLUSH));
  431. }
  432. static inline void handle_ep0_dequeue(device_t *dev_obj)
  433. {
  434. // Empty URBs from EP0's pipe and call the control transfer callback
  435. ESP_LOGD(USBH_TAG, "Default pipe device %d", dev_obj->constant.address);
  436. int num_urbs = 0;
  437. urb_t *urb = hcd_urb_dequeue(dev_obj->constant.default_pipe);
  438. while (urb != NULL) {
  439. num_urbs++;
  440. p_usbh_obj->constant.ctrl_xfer_cb((usb_device_handle_t)dev_obj, urb, p_usbh_obj->constant.ctrl_xfer_cb_arg);
  441. urb = hcd_urb_dequeue(dev_obj->constant.default_pipe);
  442. }
  443. USBH_ENTER_CRITICAL();
  444. dev_obj->dynamic.num_ctrl_xfers_inflight -= num_urbs;
  445. USBH_EXIT_CRITICAL();
  446. }
  447. static inline void handle_ep0_clear(device_t *dev_obj)
  448. {
  449. // We allow the pipe command to fail just in case the pipe becomes invalid mid command
  450. hcd_pipe_command(dev_obj->constant.default_pipe, HCD_PIPE_CMD_CLEAR);
  451. }
  452. static inline void handle_prop_gone_evt(device_t *dev_obj)
  453. {
  454. // Flush EP0's pipe. Then propagate a USBH_EVENT_DEV_GONE event
  455. ESP_LOGE(USBH_TAG, "Device %d gone", dev_obj->constant.address);
  456. p_usbh_obj->constant.event_cb((usb_device_handle_t)dev_obj, USBH_EVENT_DEV_GONE, p_usbh_obj->constant.event_cb_arg);
  457. }
  458. static void handle_free_and_recover(device_t *dev_obj, bool recover_port)
  459. {
  460. // Cache a copy of the port handle as we are about to free the device object
  461. bool all_free;
  462. hcd_port_handle_t port_hdl = dev_obj->constant.port_hdl;
  463. ESP_LOGD(USBH_TAG, "Freeing device %d", dev_obj->constant.address);
  464. // We need to take the mux_lock to access mux_protected members
  465. xSemaphoreTake(p_usbh_obj->constant.mux_lock, portMAX_DELAY);
  466. USBH_ENTER_CRITICAL();
  467. // Remove the device object for it's containing list
  468. if (dev_obj->dynamic.flags.in_pending_list) {
  469. dev_obj->dynamic.flags.in_pending_list = 0;
  470. TAILQ_REMOVE(&p_usbh_obj->dynamic.devs_pending_tailq, dev_obj, dynamic.tailq_entry);
  471. } else {
  472. TAILQ_REMOVE(&p_usbh_obj->dynamic.devs_idle_tailq, dev_obj, dynamic.tailq_entry);
  473. }
  474. USBH_EXIT_CRITICAL();
  475. p_usbh_obj->mux_protected.num_device--;
  476. all_free = (p_usbh_obj->mux_protected.num_device == 0);
  477. xSemaphoreGive(p_usbh_obj->constant.mux_lock);
  478. device_free(dev_obj);
  479. // If all devices have been freed, propagate a USBH_EVENT_DEV_ALL_FREE event
  480. if (all_free) {
  481. ESP_LOGD(USBH_TAG, "Device all free");
  482. p_usbh_obj->constant.event_cb((usb_device_handle_t)NULL, USBH_EVENT_DEV_ALL_FREE, p_usbh_obj->constant.event_cb_arg);
  483. }
  484. // Check if we need to recover the device's port
  485. if (recover_port) {
  486. p_usbh_obj->constant.hub_req_cb(port_hdl, USBH_HUB_REQ_PORT_RECOVER, p_usbh_obj->constant.hub_req_cb_arg);
  487. }
  488. }
  489. static inline void handle_port_disable(device_t *dev_obj)
  490. {
  491. // Request that the HUB disables this device's port
  492. ESP_LOGD(USBH_TAG, "Disable device port %d", dev_obj->constant.address);
  493. p_usbh_obj->constant.hub_req_cb(dev_obj->constant.port_hdl, USBH_HUB_REQ_PORT_DISABLE, p_usbh_obj->constant.hub_req_cb_arg);
  494. }
  495. static inline void handle_prop_new_evt(device_t *dev_obj)
  496. {
  497. ESP_LOGD(USBH_TAG, "New device %d", dev_obj->constant.address);
  498. p_usbh_obj->constant.event_cb((usb_device_handle_t)dev_obj, USBH_EVENT_DEV_NEW, p_usbh_obj->constant.event_cb_arg);
  499. }
  500. // ------------------------------------------------- USBH Functions ----------------------------------------------------
  501. esp_err_t usbh_install(const usbh_config_t *usbh_config)
  502. {
  503. USBH_CHECK(usbh_config != NULL, ESP_ERR_INVALID_ARG);
  504. USBH_ENTER_CRITICAL();
  505. USBH_CHECK_FROM_CRIT(p_usbh_obj == NULL, ESP_ERR_INVALID_STATE);
  506. USBH_EXIT_CRITICAL();
  507. esp_err_t ret;
  508. usbh_t *usbh_obj = heap_caps_calloc(1, sizeof(usbh_t), MALLOC_CAP_DEFAULT);
  509. SemaphoreHandle_t mux_lock = xSemaphoreCreateMutex();
  510. if (usbh_obj == NULL || mux_lock == NULL) {
  511. ret = ESP_ERR_NO_MEM;
  512. goto err;
  513. }
  514. // Initialize USBH object
  515. TAILQ_INIT(&usbh_obj->dynamic.devs_idle_tailq);
  516. TAILQ_INIT(&usbh_obj->dynamic.devs_pending_tailq);
  517. usbh_obj->constant.proc_req_cb = usbh_config->proc_req_cb;
  518. usbh_obj->constant.proc_req_cb_arg = usbh_config->proc_req_cb_arg;
  519. usbh_obj->constant.event_cb = usbh_config->event_cb;
  520. usbh_obj->constant.event_cb_arg = usbh_config->event_cb_arg;
  521. usbh_obj->constant.ctrl_xfer_cb = usbh_config->ctrl_xfer_cb;
  522. usbh_obj->constant.ctrl_xfer_cb_arg = usbh_config->ctrl_xfer_cb_arg;
  523. usbh_obj->constant.mux_lock = mux_lock;
  524. // Assign USBH object pointer
  525. USBH_ENTER_CRITICAL();
  526. if (p_usbh_obj != NULL) {
  527. USBH_EXIT_CRITICAL();
  528. ret = ESP_ERR_INVALID_STATE;
  529. goto err;
  530. }
  531. p_usbh_obj = usbh_obj;
  532. USBH_EXIT_CRITICAL();
  533. ret = ESP_OK;
  534. return ret;
  535. err:
  536. if (mux_lock != NULL) {
  537. vSemaphoreDelete(mux_lock);
  538. }
  539. heap_caps_free(usbh_obj);
  540. return ret;
  541. }
  542. esp_err_t usbh_uninstall(void)
  543. {
  544. // Check that USBH is in a state to be uninstalled
  545. USBH_ENTER_CRITICAL();
  546. USBH_CHECK_FROM_CRIT(p_usbh_obj != NULL, ESP_ERR_INVALID_STATE);
  547. usbh_t *usbh_obj = p_usbh_obj;
  548. USBH_EXIT_CRITICAL();
  549. esp_err_t ret;
  550. // We need to take the mux_lock to access mux_protected members
  551. xSemaphoreTake(usbh_obj->constant.mux_lock, portMAX_DELAY);
  552. if (p_usbh_obj->mux_protected.num_device > 0) {
  553. // There are still devices allocated. Can't uninstall right now.
  554. ret = ESP_ERR_INVALID_STATE;
  555. goto exit;
  556. }
  557. // Check again if we can uninstall
  558. USBH_ENTER_CRITICAL();
  559. assert(p_usbh_obj == usbh_obj);
  560. p_usbh_obj = NULL;
  561. USBH_EXIT_CRITICAL();
  562. xSemaphoreGive(usbh_obj->constant.mux_lock);
  563. // Free resources
  564. vSemaphoreDelete(usbh_obj->constant.mux_lock);
  565. heap_caps_free(usbh_obj);
  566. ret = ESP_OK;
  567. return ret;
  568. exit:
  569. xSemaphoreGive(p_usbh_obj->constant.mux_lock);
  570. return ret;
  571. }
  572. esp_err_t usbh_process(void)
  573. {
  574. USBH_ENTER_CRITICAL();
  575. USBH_CHECK_FROM_CRIT(p_usbh_obj != NULL, ESP_ERR_INVALID_STATE);
  576. // Keep processing until all device's with pending events have been handled
  577. while (!TAILQ_EMPTY(&p_usbh_obj->dynamic.devs_pending_tailq)) {
  578. // Move the device back into the idle device list,
  579. device_t *dev_obj = TAILQ_FIRST(&p_usbh_obj->dynamic.devs_pending_tailq);
  580. TAILQ_REMOVE(&p_usbh_obj->dynamic.devs_pending_tailq, dev_obj, dynamic.tailq_entry);
  581. TAILQ_INSERT_TAIL(&p_usbh_obj->dynamic.devs_idle_tailq, dev_obj, dynamic.tailq_entry);
  582. // Clear the device's flags
  583. uint32_t action_flags = dev_obj->dynamic.action_flags;
  584. dev_obj->dynamic.action_flags = 0;
  585. dev_obj->dynamic.flags.in_pending_list = 0;
  586. /* ---------------------------------------------------------------------
  587. Exit critical section to handle device action flags in their listed order
  588. --------------------------------------------------------------------- */
  589. USBH_EXIT_CRITICAL();
  590. ESP_LOGD(USBH_TAG, "Processing actions 0x%"PRIx32"", action_flags);
  591. // Sanity check. If the device is being freed, there must not be any other action flags set
  592. assert(!(action_flags & DEV_ACTION_FREE) || action_flags == DEV_ACTION_FREE);
  593. if (action_flags & DEV_ACTION_EPn_HALT_FLUSH) {
  594. handle_epn_halt_flush(dev_obj);
  595. }
  596. if (action_flags & DEV_ACTION_EP0_FLUSH) {
  597. handle_ep0_flush(dev_obj);
  598. }
  599. if (action_flags & DEV_ACTION_EP0_DEQUEUE) {
  600. handle_ep0_dequeue(dev_obj);
  601. }
  602. if (action_flags & DEV_ACTION_EP0_CLEAR) {
  603. handle_ep0_clear(dev_obj);
  604. }
  605. if (action_flags & DEV_ACTION_PROP_GONE_EVT) {
  606. handle_prop_gone_evt(dev_obj);
  607. }
  608. /*
  609. Note: We make these action flags mutually exclusive in case they happen in rapid succession. They are handled
  610. in the order of precedence
  611. For example
  612. - New device event is requested followed immediately by a disconnection
  613. - Port disable requested followed immediately by a disconnection
  614. */
  615. if (action_flags & DEV_ACTION_FREE_AND_RECOVER) {
  616. handle_free_and_recover(dev_obj, true);
  617. } else if (action_flags & DEV_ACTION_FREE) {
  618. handle_free_and_recover(dev_obj, false);
  619. } else if (action_flags & DEV_ACTION_PORT_DISABLE) {
  620. handle_port_disable(dev_obj);
  621. } else if (action_flags & DEV_ACTION_PROP_NEW) {
  622. handle_prop_new_evt(dev_obj);
  623. }
  624. USBH_ENTER_CRITICAL();
  625. /* ---------------------------------------------------------------------
  626. Re-enter critical sections. All device action flags should have been handled.
  627. --------------------------------------------------------------------- */
  628. }
  629. USBH_EXIT_CRITICAL();
  630. return ESP_OK;
  631. }
  632. esp_err_t usbh_num_devs(int *num_devs_ret)
  633. {
  634. USBH_CHECK(num_devs_ret != NULL, ESP_ERR_INVALID_ARG);
  635. xSemaphoreTake(p_usbh_obj->constant.mux_lock, portMAX_DELAY);
  636. *num_devs_ret = p_usbh_obj->mux_protected.num_device;
  637. xSemaphoreGive(p_usbh_obj->constant.mux_lock);
  638. return ESP_OK;
  639. }
  640. // ------------------------------------------------ Device Functions ---------------------------------------------------
  641. // --------------------- Device Pool -----------------------
  642. esp_err_t usbh_dev_addr_list_fill(int list_len, uint8_t *dev_addr_list, int *num_dev_ret)
  643. {
  644. USBH_CHECK(dev_addr_list != NULL && num_dev_ret != NULL, ESP_ERR_INVALID_ARG);
  645. USBH_ENTER_CRITICAL();
  646. int num_filled = 0;
  647. device_t *dev_obj;
  648. // Fill list with devices from idle tailq
  649. TAILQ_FOREACH(dev_obj, &p_usbh_obj->dynamic.devs_idle_tailq, dynamic.tailq_entry) {
  650. if (num_filled < list_len) {
  651. dev_addr_list[num_filled] = dev_obj->constant.address;
  652. num_filled++;
  653. } else {
  654. break;
  655. }
  656. }
  657. // Fill list with devices from pending tailq
  658. TAILQ_FOREACH(dev_obj, &p_usbh_obj->dynamic.devs_pending_tailq, dynamic.tailq_entry) {
  659. if (num_filled < list_len) {
  660. dev_addr_list[num_filled] = dev_obj->constant.address;
  661. num_filled++;
  662. } else {
  663. break;
  664. }
  665. }
  666. USBH_EXIT_CRITICAL();
  667. // Write back number of devices filled
  668. *num_dev_ret = num_filled;
  669. return ESP_OK;
  670. }
  671. esp_err_t usbh_dev_open(uint8_t dev_addr, usb_device_handle_t *dev_hdl)
  672. {
  673. USBH_CHECK(dev_hdl != NULL, ESP_ERR_INVALID_ARG);
  674. esp_err_t ret;
  675. USBH_ENTER_CRITICAL();
  676. // Go through the device lists to find the device with the specified address
  677. device_t *found_dev_obj = NULL;
  678. device_t *dev_obj;
  679. TAILQ_FOREACH(dev_obj, &p_usbh_obj->dynamic.devs_idle_tailq, dynamic.tailq_entry) {
  680. if (dev_obj->constant.address == dev_addr) {
  681. found_dev_obj = dev_obj;
  682. goto exit;
  683. }
  684. }
  685. TAILQ_FOREACH(dev_obj, &p_usbh_obj->dynamic.devs_pending_tailq, dynamic.tailq_entry) {
  686. if (dev_obj->constant.address == dev_addr) {
  687. found_dev_obj = dev_obj;
  688. goto exit;
  689. }
  690. }
  691. exit:
  692. if (found_dev_obj != NULL) {
  693. // The device is not in a state to be referenced
  694. if (dev_obj->dynamic.flags.is_gone || dev_obj->dynamic.flags.waiting_port_disable || dev_obj->dynamic.flags.waiting_free) {
  695. ret = ESP_ERR_INVALID_STATE;
  696. } else {
  697. dev_obj->dynamic.ref_count++;
  698. *dev_hdl = (usb_device_handle_t)found_dev_obj;
  699. ret = ESP_OK;
  700. }
  701. } else {
  702. ret = ESP_ERR_NOT_FOUND;
  703. }
  704. USBH_EXIT_CRITICAL();
  705. return ret;
  706. }
  707. esp_err_t usbh_dev_close(usb_device_handle_t dev_hdl)
  708. {
  709. USBH_CHECK(dev_hdl != NULL, ESP_ERR_INVALID_ARG);
  710. device_t *dev_obj = (device_t *)dev_hdl;
  711. USBH_ENTER_CRITICAL();
  712. dev_obj->dynamic.ref_count--;
  713. bool call_proc_req_cb = false;
  714. if (dev_obj->dynamic.ref_count == 0) {
  715. // Sanity check.
  716. assert(dev_obj->dynamic.num_ctrl_xfers_inflight == 0); // There cannot be any control transfer in-flight
  717. assert(!dev_obj->dynamic.flags.waiting_free); // This can only be set when ref count reaches 0
  718. if (dev_obj->dynamic.flags.is_gone) {
  719. // Device is already gone so it's port is already disabled. Trigger the USBH process to free the device
  720. dev_obj->dynamic.flags.waiting_free = 1;
  721. call_proc_req_cb = _dev_set_actions(dev_obj, DEV_ACTION_FREE_AND_RECOVER); // Port error occurred so we need to recover it
  722. } else if (dev_obj->dynamic.flags.waiting_close) {
  723. // Device is still connected but is no longer needed. Trigger the USBH process to request device's port be disabled
  724. dev_obj->dynamic.flags.waiting_port_disable = 1;
  725. call_proc_req_cb = _dev_set_actions(dev_obj, DEV_ACTION_PORT_DISABLE);
  726. }
  727. // Else, there's nothing to do. Leave the device allocated
  728. }
  729. USBH_EXIT_CRITICAL();
  730. if (call_proc_req_cb) {
  731. p_usbh_obj->constant.proc_req_cb(USB_PROC_REQ_SOURCE_USBH, false, p_usbh_obj->constant.proc_req_cb_arg);
  732. }
  733. return ESP_OK;
  734. }
  735. esp_err_t usbh_dev_mark_all_free(void)
  736. {
  737. USBH_ENTER_CRITICAL();
  738. /*
  739. Go through the device list and mark each device as waiting to be closed. If the device is not opened at all, we can
  740. disable it immediately.
  741. Note: We manually traverse the list because we need to add/remove items while traversing
  742. */
  743. bool call_proc_req_cb = false;
  744. bool wait_for_free = false;
  745. for (int i = 0; i < 2; i++) {
  746. device_t *dev_obj_cur;
  747. device_t *dev_obj_next;
  748. // Go through pending list first as it's more efficient
  749. if (i == 0) {
  750. dev_obj_cur = TAILQ_FIRST(&p_usbh_obj->dynamic.devs_pending_tailq);
  751. } else {
  752. dev_obj_cur = TAILQ_FIRST(&p_usbh_obj->dynamic.devs_idle_tailq);
  753. }
  754. while (dev_obj_cur != NULL) {
  755. assert(!dev_obj_cur->dynamic.flags.waiting_close); // Sanity check
  756. // Keep a copy of the next item first in case we remove the current item
  757. dev_obj_next = TAILQ_NEXT(dev_obj_cur, dynamic.tailq_entry);
  758. if (dev_obj_cur->dynamic.ref_count == 0 && !dev_obj_cur->dynamic.flags.is_gone) {
  759. // Device is not opened as is not gone, so we can disable it now
  760. dev_obj_cur->dynamic.flags.waiting_port_disable = 1;
  761. call_proc_req_cb |= _dev_set_actions(dev_obj_cur, DEV_ACTION_PORT_DISABLE);
  762. } else {
  763. // Device is still opened. Just mark it as waiting to be closed
  764. dev_obj_cur->dynamic.flags.waiting_close = 1;
  765. }
  766. wait_for_free = true; // As long as there is still a device, we need to wait for an event indicating it is freed
  767. dev_obj_cur = dev_obj_next;
  768. }
  769. }
  770. USBH_EXIT_CRITICAL();
  771. if (call_proc_req_cb) {
  772. p_usbh_obj->constant.proc_req_cb(USB_PROC_REQ_SOURCE_USBH, false, p_usbh_obj->constant.proc_req_cb_arg);
  773. }
  774. return (wait_for_free) ? ESP_ERR_NOT_FINISHED : ESP_OK;
  775. }
  776. // ------------------- Single Device ----------------------
  777. esp_err_t usbh_dev_get_addr(usb_device_handle_t dev_hdl, uint8_t *dev_addr)
  778. {
  779. USBH_CHECK(dev_hdl != NULL && dev_addr != NULL, ESP_ERR_INVALID_ARG);
  780. device_t *dev_obj = (device_t *)dev_hdl;
  781. USBH_ENTER_CRITICAL();
  782. USBH_CHECK_FROM_CRIT(dev_obj->constant.address > 0, ESP_ERR_INVALID_STATE);
  783. *dev_addr = dev_obj->constant.address;
  784. USBH_EXIT_CRITICAL();
  785. return ESP_OK;
  786. }
  787. esp_err_t usbh_dev_get_info(usb_device_handle_t dev_hdl, usb_device_info_t *dev_info)
  788. {
  789. USBH_CHECK(dev_hdl != NULL && dev_info != NULL, ESP_ERR_INVALID_ARG);
  790. device_t *dev_obj = (device_t *)dev_hdl;
  791. esp_err_t ret;
  792. // Device must be configured, or not attached (if it suddenly disconnected)
  793. USBH_ENTER_CRITICAL();
  794. if (!(dev_obj->dynamic.state == USB_DEVICE_STATE_CONFIGURED || dev_obj->dynamic.state == USB_DEVICE_STATE_NOT_ATTACHED)) {
  795. USBH_EXIT_CRITICAL();
  796. ret = ESP_ERR_INVALID_STATE;
  797. goto exit;
  798. }
  799. // Critical section for the dynamic members
  800. dev_info->speed = dev_obj->constant.speed;
  801. dev_info->dev_addr = dev_obj->constant.address;
  802. dev_info->bMaxPacketSize0 = dev_obj->constant.desc->bMaxPacketSize0;
  803. USBH_EXIT_CRITICAL();
  804. assert(dev_obj->constant.config_desc);
  805. dev_info->bConfigurationValue = dev_obj->constant.config_desc->bConfigurationValue;
  806. // String descriptors are allowed to be NULL as not all devices support them
  807. dev_info->str_desc_manufacturer = dev_obj->constant.str_desc_manu;
  808. dev_info->str_desc_product = dev_obj->constant.str_desc_product;
  809. dev_info->str_desc_serial_num = dev_obj->constant.str_desc_ser_num;
  810. ret = ESP_OK;
  811. exit:
  812. return ret;
  813. }
  814. esp_err_t usbh_dev_get_desc(usb_device_handle_t dev_hdl, const usb_device_desc_t **dev_desc_ret)
  815. {
  816. USBH_CHECK(dev_hdl != NULL && dev_desc_ret != NULL, ESP_ERR_INVALID_ARG);
  817. device_t *dev_obj = (device_t *)dev_hdl;
  818. USBH_ENTER_CRITICAL();
  819. USBH_CHECK_FROM_CRIT(dev_obj->dynamic.state == USB_DEVICE_STATE_CONFIGURED, ESP_ERR_INVALID_STATE);
  820. USBH_EXIT_CRITICAL();
  821. *dev_desc_ret = dev_obj->constant.desc;
  822. return ESP_OK;
  823. }
  824. esp_err_t usbh_dev_get_config_desc(usb_device_handle_t dev_hdl, const usb_config_desc_t **config_desc_ret)
  825. {
  826. USBH_CHECK(dev_hdl != NULL && config_desc_ret != NULL, ESP_ERR_INVALID_ARG);
  827. device_t *dev_obj = (device_t *)dev_hdl;
  828. esp_err_t ret;
  829. // Device must be in the configured state
  830. USBH_ENTER_CRITICAL();
  831. if (dev_obj->dynamic.state != USB_DEVICE_STATE_CONFIGURED) {
  832. USBH_EXIT_CRITICAL();
  833. ret = ESP_ERR_INVALID_STATE;
  834. goto exit;
  835. }
  836. USBH_EXIT_CRITICAL();
  837. assert(dev_obj->constant.config_desc);
  838. *config_desc_ret = dev_obj->constant.config_desc;
  839. ret = ESP_OK;
  840. exit:
  841. return ret;
  842. }
  843. esp_err_t usbh_dev_submit_ctrl_urb(usb_device_handle_t dev_hdl, urb_t *urb)
  844. {
  845. USBH_CHECK(dev_hdl != NULL && urb != NULL, ESP_ERR_INVALID_ARG);
  846. device_t *dev_obj = (device_t *)dev_hdl;
  847. USBH_CHECK(urb_check_args(urb), ESP_ERR_INVALID_ARG);
  848. bool xfer_is_in = ((usb_setup_packet_t *)urb->transfer.data_buffer)->bmRequestType & USB_BM_REQUEST_TYPE_DIR_IN;
  849. USBH_CHECK(transfer_check_usb_compliance(&(urb->transfer), USB_TRANSFER_TYPE_CTRL, dev_obj->constant.desc->bMaxPacketSize0, xfer_is_in), ESP_ERR_INVALID_ARG);
  850. USBH_ENTER_CRITICAL();
  851. USBH_CHECK_FROM_CRIT(dev_obj->dynamic.state == USB_DEVICE_STATE_CONFIGURED, ESP_ERR_INVALID_STATE);
  852. // Increment the control transfer count first
  853. dev_obj->dynamic.num_ctrl_xfers_inflight++;
  854. USBH_EXIT_CRITICAL();
  855. esp_err_t ret;
  856. if (hcd_pipe_get_state(dev_obj->constant.default_pipe) != HCD_PIPE_STATE_ACTIVE) {
  857. ret = ESP_ERR_INVALID_STATE;
  858. goto hcd_err;
  859. }
  860. ret = hcd_urb_enqueue(dev_obj->constant.default_pipe, urb);
  861. if (ret != ESP_OK) {
  862. goto hcd_err;
  863. }
  864. ret = ESP_OK;
  865. return ret;
  866. hcd_err:
  867. USBH_ENTER_CRITICAL();
  868. dev_obj->dynamic.num_ctrl_xfers_inflight--;
  869. USBH_EXIT_CRITICAL();
  870. return ret;
  871. }
  872. // ----------------------------------------------- Interface Functions -------------------------------------------------
  873. esp_err_t usbh_ep_alloc(usb_device_handle_t dev_hdl, usbh_ep_config_t *ep_config, usbh_ep_handle_t *ep_hdl_ret)
  874. {
  875. USBH_CHECK(dev_hdl != NULL && ep_config != NULL && ep_hdl_ret != NULL, ESP_ERR_INVALID_ARG);
  876. uint8_t bEndpointAddress = ep_config->bEndpointAddress;
  877. USBH_CHECK(check_ep_addr(bEndpointAddress), ESP_ERR_INVALID_ARG);
  878. esp_err_t ret;
  879. device_t *dev_obj = (device_t *)dev_hdl;
  880. endpoint_t *ep_obj;
  881. // Find the endpoint descriptor from the device's current configuration descriptor
  882. const usb_ep_desc_t *ep_desc = usb_parse_endpoint_descriptor_by_address(dev_obj->constant.config_desc, ep_config->bInterfaceNumber, ep_config->bAlternateSetting, ep_config->bEndpointAddress, NULL);
  883. if (ep_desc == NULL) {
  884. return ESP_ERR_NOT_FOUND;
  885. }
  886. // Allocate the endpoint object
  887. ret = endpoint_alloc(dev_obj, ep_desc, ep_config, &ep_obj);
  888. if (ret != ESP_OK) {
  889. goto alloc_err;
  890. }
  891. // We need to take the mux_lock to access mux_protected members
  892. xSemaphoreTake(p_usbh_obj->constant.mux_lock, portMAX_DELAY);
  893. USBH_ENTER_CRITICAL();
  894. // Check the device's state before we assign the a pipe to the allocated endpoint
  895. if (dev_obj->dynamic.state != USB_DEVICE_STATE_CONFIGURED) {
  896. USBH_EXIT_CRITICAL();
  897. ret = ESP_ERR_INVALID_STATE;
  898. goto dev_state_err;
  899. }
  900. USBH_EXIT_CRITICAL();
  901. // Check if the endpoint has already been allocated
  902. if (get_ep_from_addr(dev_obj, bEndpointAddress) == NULL) {
  903. set_ep_from_addr(dev_obj, bEndpointAddress, ep_obj);
  904. // Write back the endpoint handle
  905. *ep_hdl_ret = (usbh_ep_handle_t)ep_obj;
  906. ret = ESP_OK;
  907. } else {
  908. // Endpoint is already allocated
  909. ret = ESP_ERR_INVALID_STATE;
  910. }
  911. dev_state_err:
  912. xSemaphoreGive(p_usbh_obj->constant.mux_lock);
  913. // If the endpoint was not assigned, free it
  914. if (ret != ESP_OK) {
  915. endpoint_free(ep_obj);
  916. }
  917. alloc_err:
  918. return ret;
  919. }
  920. esp_err_t usbh_ep_free(usbh_ep_handle_t ep_hdl)
  921. {
  922. USBH_CHECK(ep_hdl != NULL, ESP_ERR_INVALID_ARG);
  923. esp_err_t ret;
  924. endpoint_t *ep_obj = (endpoint_t *)ep_hdl;
  925. device_t *dev_obj = (device_t *)ep_obj->constant.dev;
  926. uint8_t bEndpointAddress = ep_obj->constant.ep_desc->bEndpointAddress;
  927. // Todo: Check that the EP's underlying pipe is halted before allowing the EP to be freed (IDF-7273)
  928. // Check that the the EP's underlying pipe has no more in-flight URBs
  929. if (hcd_pipe_get_num_urbs(ep_obj->constant.pipe_hdl) != 0) {
  930. ret = ESP_ERR_INVALID_STATE;
  931. goto exit;
  932. }
  933. // We need to take the mux_lock to access mux_protected members
  934. xSemaphoreTake(p_usbh_obj->constant.mux_lock, portMAX_DELAY);
  935. // Check if the endpoint was allocated on this device
  936. if (ep_obj == get_ep_from_addr(dev_obj, bEndpointAddress)) {
  937. // Clear the endpoint from the device's endpoint object list
  938. set_ep_from_addr(dev_obj, bEndpointAddress, NULL);
  939. ret = ESP_OK;
  940. } else {
  941. ret = ESP_ERR_NOT_FOUND;
  942. }
  943. xSemaphoreGive(p_usbh_obj->constant.mux_lock);
  944. // Finally, we free the endpoint object
  945. if (ret == ESP_OK) {
  946. endpoint_free(ep_obj);
  947. }
  948. exit:
  949. return ret;
  950. }
  951. esp_err_t usbh_ep_get_handle(usb_device_handle_t dev_hdl, uint8_t bEndpointAddress, usbh_ep_handle_t *ep_hdl_ret)
  952. {
  953. USBH_CHECK(dev_hdl != NULL && ep_hdl_ret != NULL, ESP_ERR_INVALID_ARG);
  954. USBH_CHECK(check_ep_addr(bEndpointAddress), ESP_ERR_INVALID_ARG);
  955. esp_err_t ret;
  956. device_t *dev_obj = (device_t *)dev_hdl;
  957. endpoint_t *ep_obj;
  958. // We need to take the mux_lock to access mux_protected members
  959. xSemaphoreTake(p_usbh_obj->constant.mux_lock, portMAX_DELAY);
  960. ep_obj = get_ep_from_addr(dev_obj, bEndpointAddress);
  961. xSemaphoreGive(p_usbh_obj->constant.mux_lock);
  962. if (ep_obj) {
  963. *ep_hdl_ret = (usbh_ep_handle_t)ep_obj;
  964. ret = ESP_OK;
  965. } else {
  966. ret = ESP_ERR_NOT_FOUND;
  967. }
  968. return ret;
  969. }
  970. esp_err_t usbh_ep_enqueue_urb(usbh_ep_handle_t ep_hdl, urb_t *urb)
  971. {
  972. USBH_CHECK(ep_hdl != NULL && urb != NULL, ESP_ERR_INVALID_ARG);
  973. USBH_CHECK(urb_check_args(urb), ESP_ERR_INVALID_ARG);
  974. bool xfer_is_in = ((usb_setup_packet_t *)urb->transfer.data_buffer)->bmRequestType & USB_BM_REQUEST_TYPE_DIR_IN;
  975. endpoint_t *ep_obj = (endpoint_t *)ep_hdl;
  976. USBH_CHECK( transfer_check_usb_compliance(&(urb->transfer),
  977. USB_EP_DESC_GET_XFERTYPE(ep_obj->constant.ep_desc),
  978. USB_EP_DESC_GET_MPS(ep_obj->constant.ep_desc),
  979. xfer_is_in),
  980. ESP_ERR_INVALID_ARG);
  981. // Check that the EP's underlying pipe is in the active state before submitting the URB
  982. if (hcd_pipe_get_state(ep_obj->constant.pipe_hdl) != HCD_PIPE_STATE_ACTIVE) {
  983. return ESP_ERR_INVALID_STATE;
  984. }
  985. // Enqueue the URB to the EP's underlying pipe
  986. return hcd_urb_enqueue(ep_obj->constant.pipe_hdl, urb);
  987. }
  988. esp_err_t usbh_ep_dequeue_urb(usbh_ep_handle_t ep_hdl, urb_t **urb_ret)
  989. {
  990. USBH_CHECK(ep_hdl != NULL && urb_ret != NULL, ESP_ERR_INVALID_ARG);
  991. endpoint_t *ep_obj = (endpoint_t *)ep_hdl;
  992. // Enqueue the URB to the EP's underlying pipe
  993. *urb_ret = hcd_urb_dequeue(ep_obj->constant.pipe_hdl);
  994. return ESP_OK;
  995. }
  996. esp_err_t usbh_ep_command(usbh_ep_handle_t ep_hdl, usbh_ep_cmd_t command)
  997. {
  998. USBH_CHECK(ep_hdl != NULL, ESP_ERR_INVALID_ARG);
  999. endpoint_t *ep_obj = (endpoint_t *)ep_hdl;
  1000. // Send the command to the EP's underlying pipe
  1001. return hcd_pipe_command(ep_obj->constant.pipe_hdl, (hcd_pipe_cmd_t)command);
  1002. }
  1003. void *usbh_ep_get_context(usbh_ep_handle_t ep_hdl)
  1004. {
  1005. assert(ep_hdl);
  1006. endpoint_t *ep_obj = (endpoint_t *)ep_hdl;
  1007. return hcd_pipe_get_context(ep_obj->constant.pipe_hdl);
  1008. }
  1009. // -------------------------------------------------- Hub Functions ----------------------------------------------------
  1010. // ------------------- Device Related ----------------------
  1011. esp_err_t usbh_hub_is_installed(usbh_hub_req_cb_t hub_req_callback, void *callback_arg)
  1012. {
  1013. USBH_CHECK(hub_req_callback != NULL, ESP_ERR_INVALID_ARG);
  1014. USBH_ENTER_CRITICAL();
  1015. // Check that USBH is already installed
  1016. USBH_CHECK_FROM_CRIT(p_usbh_obj != NULL, ESP_ERR_INVALID_STATE);
  1017. // Check that Hub has not be installed yet
  1018. USBH_CHECK_FROM_CRIT(p_usbh_obj->constant.hub_req_cb == NULL, ESP_ERR_INVALID_STATE);
  1019. p_usbh_obj->constant.hub_req_cb = hub_req_callback;
  1020. p_usbh_obj->constant.hub_req_cb_arg = callback_arg;
  1021. USBH_EXIT_CRITICAL();
  1022. return ESP_OK;
  1023. }
  1024. esp_err_t usbh_hub_add_dev(hcd_port_handle_t port_hdl, usb_speed_t dev_speed, usb_device_handle_t *new_dev_hdl, hcd_pipe_handle_t *default_pipe_hdl)
  1025. {
  1026. // Note: Parent device handle can be NULL if it's connected to the root hub
  1027. USBH_CHECK(new_dev_hdl != NULL, ESP_ERR_INVALID_ARG);
  1028. esp_err_t ret;
  1029. device_t *dev_obj;
  1030. ret = device_alloc(port_hdl, dev_speed, &dev_obj);
  1031. if (ret != ESP_OK) {
  1032. return ret;
  1033. }
  1034. // Write-back device handle
  1035. *new_dev_hdl = (usb_device_handle_t)dev_obj;
  1036. *default_pipe_hdl = dev_obj->constant.default_pipe;
  1037. ret = ESP_OK;
  1038. return ret;
  1039. }
  1040. esp_err_t usbh_hub_pass_event(usb_device_handle_t dev_hdl, usbh_hub_event_t hub_event)
  1041. {
  1042. USBH_CHECK(dev_hdl != NULL, ESP_ERR_INVALID_ARG);
  1043. device_t *dev_obj = (device_t *)dev_hdl;
  1044. bool call_proc_req_cb;
  1045. switch (hub_event) {
  1046. case USBH_HUB_EVENT_PORT_ERROR: {
  1047. USBH_ENTER_CRITICAL();
  1048. dev_obj->dynamic.flags.is_gone = 1;
  1049. // Check if the device can be freed now
  1050. if (dev_obj->dynamic.ref_count == 0) {
  1051. dev_obj->dynamic.flags.waiting_free = 1;
  1052. // Device is already waiting free so none of it's EP's will be in use. Can free immediately.
  1053. call_proc_req_cb = _dev_set_actions(dev_obj, DEV_ACTION_FREE_AND_RECOVER); // Port error occurred so we need to recover it
  1054. } else {
  1055. call_proc_req_cb = _dev_set_actions(dev_obj,
  1056. DEV_ACTION_EPn_HALT_FLUSH |
  1057. DEV_ACTION_EP0_FLUSH |
  1058. DEV_ACTION_EP0_DEQUEUE |
  1059. DEV_ACTION_PROP_GONE_EVT);
  1060. }
  1061. USBH_EXIT_CRITICAL();
  1062. break;
  1063. }
  1064. case USBH_HUB_EVENT_PORT_DISABLED: {
  1065. USBH_ENTER_CRITICAL();
  1066. assert(dev_obj->dynamic.ref_count == 0); // At this stage, the device should have been closed by all users
  1067. dev_obj->dynamic.flags.waiting_free = 1;
  1068. call_proc_req_cb = _dev_set_actions(dev_obj, DEV_ACTION_FREE);
  1069. USBH_EXIT_CRITICAL();
  1070. break;
  1071. }
  1072. default:
  1073. return ESP_ERR_INVALID_ARG;
  1074. }
  1075. if (call_proc_req_cb) {
  1076. p_usbh_obj->constant.proc_req_cb(USB_PROC_REQ_SOURCE_USBH, false, p_usbh_obj->constant.proc_req_cb_arg);
  1077. }
  1078. return ESP_OK;
  1079. }
  1080. // ----------------- Enumeration Related -------------------
  1081. esp_err_t usbh_hub_enum_fill_dev_addr(usb_device_handle_t dev_hdl, uint8_t dev_addr)
  1082. {
  1083. USBH_CHECK(dev_hdl != NULL, ESP_ERR_INVALID_ARG);
  1084. device_t *dev_obj = (device_t *)dev_hdl;
  1085. USBH_ENTER_CRITICAL();
  1086. dev_obj->dynamic.state = USB_DEVICE_STATE_ADDRESS;
  1087. USBH_EXIT_CRITICAL();
  1088. // We can modify the info members outside the critical section
  1089. dev_obj->constant.address = dev_addr;
  1090. return ESP_OK;
  1091. }
  1092. esp_err_t usbh_hub_enum_fill_dev_desc(usb_device_handle_t dev_hdl, const usb_device_desc_t *device_desc)
  1093. {
  1094. USBH_CHECK(dev_hdl != NULL && device_desc != NULL, ESP_ERR_INVALID_ARG);
  1095. device_t *dev_obj = (device_t *)dev_hdl;
  1096. // We can modify the info members outside the critical section
  1097. memcpy((usb_device_desc_t *)dev_obj->constant.desc, device_desc, sizeof(usb_device_desc_t));
  1098. return ESP_OK;
  1099. }
  1100. esp_err_t usbh_hub_enum_fill_config_desc(usb_device_handle_t dev_hdl, const usb_config_desc_t *config_desc_full)
  1101. {
  1102. USBH_CHECK(dev_hdl != NULL && config_desc_full != NULL, ESP_ERR_INVALID_ARG);
  1103. device_t *dev_obj = (device_t *)dev_hdl;
  1104. // Allocate memory to store the configuration descriptor
  1105. usb_config_desc_t *config_desc = heap_caps_malloc(config_desc_full->wTotalLength, MALLOC_CAP_DEFAULT); // Buffer to copy over full configuration descriptor (wTotalLength)
  1106. if (config_desc == NULL) {
  1107. return ESP_ERR_NO_MEM;
  1108. }
  1109. // Copy the configuration descriptor
  1110. memcpy(config_desc, config_desc_full, config_desc_full->wTotalLength);
  1111. // Assign the config desc to the device object
  1112. assert(dev_obj->constant.config_desc == NULL);
  1113. dev_obj->constant.config_desc = config_desc;
  1114. return ESP_OK;
  1115. }
  1116. esp_err_t usbh_hub_enum_fill_str_desc(usb_device_handle_t dev_hdl, const usb_str_desc_t *str_desc, int select)
  1117. {
  1118. USBH_CHECK(dev_hdl != NULL && str_desc != NULL && (select >= 0 && select < 3), ESP_ERR_INVALID_ARG);
  1119. device_t *dev_obj = (device_t *)dev_hdl;
  1120. // Allocate memory to store the manufacturer string descriptor
  1121. usb_str_desc_t *str_desc_fill = heap_caps_malloc(str_desc->bLength, MALLOC_CAP_DEFAULT);
  1122. if (str_desc_fill == NULL) {
  1123. return ESP_ERR_NO_MEM;
  1124. }
  1125. // Copy the string descriptor
  1126. memcpy(str_desc_fill, str_desc, str_desc->bLength);
  1127. // Assign filled string descriptor to the device object
  1128. switch (select) {
  1129. case 0:
  1130. assert(dev_obj->constant.str_desc_manu == NULL);
  1131. dev_obj->constant.str_desc_manu = str_desc_fill;
  1132. break;
  1133. case 1:
  1134. assert(dev_obj->constant.str_desc_product == NULL);
  1135. dev_obj->constant.str_desc_product = str_desc_fill;
  1136. break;
  1137. default: // 2
  1138. assert(dev_obj->constant.str_desc_ser_num == NULL);
  1139. dev_obj->constant.str_desc_ser_num = str_desc_fill;
  1140. break;
  1141. }
  1142. return ESP_OK;
  1143. }
  1144. esp_err_t usbh_hub_enum_done(usb_device_handle_t dev_hdl)
  1145. {
  1146. USBH_CHECK(dev_hdl != NULL, ESP_ERR_INVALID_ARG);
  1147. device_t *dev_obj = (device_t *)dev_hdl;
  1148. // We need to take the mux_lock to access mux_protected members
  1149. xSemaphoreTake(p_usbh_obj->constant.mux_lock, portMAX_DELAY);
  1150. USBH_ENTER_CRITICAL();
  1151. dev_obj->dynamic.state = USB_DEVICE_STATE_CONFIGURED;
  1152. // Add the device to list of devices, then trigger a device event
  1153. TAILQ_INSERT_TAIL(&p_usbh_obj->dynamic.devs_idle_tailq, dev_obj, dynamic.tailq_entry); // Add it to the idle device list first
  1154. bool call_proc_req_cb = _dev_set_actions(dev_obj, DEV_ACTION_PROP_NEW);
  1155. USBH_EXIT_CRITICAL();
  1156. p_usbh_obj->mux_protected.num_device++;
  1157. xSemaphoreGive(p_usbh_obj->constant.mux_lock);
  1158. // Update the EP0's underlying pipe's callback
  1159. ESP_ERROR_CHECK(hcd_pipe_update_callback(dev_obj->constant.default_pipe, ep0_pipe_callback, (void *)dev_obj));
  1160. // Call the processing request callback
  1161. if (call_proc_req_cb) {
  1162. p_usbh_obj->constant.proc_req_cb(USB_PROC_REQ_SOURCE_USBH, false, p_usbh_obj->constant.proc_req_cb_arg);
  1163. }
  1164. return ESP_OK;
  1165. }
  1166. esp_err_t usbh_hub_enum_failed(usb_device_handle_t dev_hdl)
  1167. {
  1168. USBH_CHECK(dev_hdl != NULL, ESP_ERR_INVALID_ARG);
  1169. device_t *dev_obj = (device_t *)dev_hdl;
  1170. device_free(dev_obj);
  1171. return ESP_OK;
  1172. }