hcd.c 74 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907
  1. // Copyright 2015-2020 Espressif Systems (Shanghai) PTE LTD
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. // http://www.apache.org/licenses/LICENSE-2.0
  7. //
  8. // Unless required by applicable law or agreed to in writing, software
  9. // distributed under the License is distributed on an "AS IS" BASIS,
  10. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  11. // See the License for the specific language governing permissions and
  12. // limitations under the License.
  13. #include <string.h>
  14. #include "sys/queue.h"
  15. #include "esp_heap_caps.h"
  16. #include "esp_intr_alloc.h"
  17. #include "esp_timer.h"
  18. #include "esp_err.h"
  19. #include "esp_rom_gpio.h"
  20. #include "hal/usbh_hal.h"
  21. #include "soc/gpio_pins.h"
  22. #include "soc/gpio_sig_map.h"
  23. #include "driver/periph_ctrl.h"
  24. #include "freertos/FreeRTOS.h"
  25. #include "freertos/task.h"
  26. #include "freertos/semphr.h"
  27. #include "hcd.h"
  28. // ----------------------------------------------------- Macros --------------------------------------------------------
  29. // --------------------- Constants -------------------------
  30. /**
  31. * @brief Number of transfer descriptors per transfer for various transfer types
  32. *
  33. * Control: Requires 3 transfer descriptors for a single transfer
  34. * corresponding to each stage of a control transfer
  35. * Bulk: Requires 1 transfer descriptor for each transfer
  36. */
  37. #define NUM_DESC_PER_XFER_CTRL 3
  38. #define NUM_DESC_PER_XFER_BULK 1
  39. #define XFER_LIST_LEN_CTRL 1
  40. #define XFER_LIST_LEN_BULK 1
  41. #define INIT_DELAY_MS 30 //A delay of at least 25ms to enter Host mode. Make it 30ms to be safe
  42. #define DEBOUNCE_DELAY_MS 250 //A debounce delay of 250ms
  43. #define RESET_HOLD_MS 30 //Spec requires at least 10ms. Make it 30ms to be safe
  44. #define RESET_RECOVERY_MS 30 //Reset recovery delay of 10ms (make it 30 ms to be safe) to allow for connected device to recover (and for port enabled interrupt to occur)
  45. #define RESUME_HOLD_MS 30 //Spec requires at least 20ms, Make it 30ms to be safe
  46. #define RESUME_RECOVERY_MS 20 //Resume recovery of at least 10ms. Make it 20 ms to be safe. This will include the 3 LS bit times of the EOP
  47. #define CTRL_EP_MAX_MPS_LS 8 //Largest Maximum Packet Size for Low Speed control endpoints
  48. #define CTRL_EP_MAX_MPS_FS 64 //Largest Maximum Packet Size for Full Speed control endpoints
  49. #define NUM_PORTS 1 //The controller only has one port.
  50. typedef enum {
  51. XFER_REQ_STATE_IDLE, //The transfer request is not enqueued
  52. XFER_REQ_STATE_PENDING, //The transfer request is enqueued and pending execution
  53. XFER_REQ_STATE_INFLIGHT, //The transfer request is currently being executed
  54. XFER_REQ_STATE_DONE, //The transfer request has completed executed or is retired, and is waiting to be dequeued
  55. } xfer_req_state_t;
  56. // -------------------- Convenience ------------------------
  57. #define HCD_ENTER_CRITICAL_ISR() portENTER_CRITICAL_ISR(&hcd_lock)
  58. #define HCD_EXIT_CRITICAL_ISR() portEXIT_CRITICAL_ISR(&hcd_lock)
  59. #define HCD_ENTER_CRITICAL() portENTER_CRITICAL(&hcd_lock)
  60. #define HCD_EXIT_CRITICAL() portEXIT_CRITICAL(&hcd_lock)
  61. #define HCD_CHECK(cond, ret_val) ({ \
  62. if (!(cond)) { \
  63. return (ret_val); \
  64. } \
  65. })
  66. #define HCD_CHECK_FROM_CRIT(cond, ret_val) ({ \
  67. if (!(cond)) { \
  68. HCD_EXIT_CRITICAL(); \
  69. return ret_val; \
  70. } \
  71. })
  72. // ------------------------------------------------------ Types --------------------------------------------------------
  73. typedef struct xfer_req_obj xfer_req_t;
  74. typedef struct pipe_obj pipe_t;
  75. typedef struct port_obj port_t;
  76. /**
  77. * @brief Object representing an HCD transfer request
  78. */
  79. struct xfer_req_obj {
  80. TAILQ_ENTRY(xfer_req_obj) tailq_entry; //TailQ entry for pending or done tailq in pipe object
  81. pipe_t *pipe; //Target pipe of transfer request
  82. usb_irp_t *irp; //Target IRP
  83. void *context; //Context variable of transfer request
  84. xfer_req_state_t state; //Current state of the transfer request
  85. };
  86. /**
  87. * @brief Object representing a pipe in the HCD layer
  88. */
  89. struct pipe_obj {
  90. //Transfer requests related
  91. TAILQ_HEAD(tailhead_xfer_req_pend, xfer_req_obj) pend_xfer_req_tailq;
  92. TAILQ_HEAD(tailhead_xfer_req_done, xfer_req_obj) done_xfer_req_tailq;
  93. int num_xfer_req_pending;
  94. int num_xfer_req_done;
  95. xfer_req_t *inflight_xfer_req; //Pointer to the current transfer request being executed by the pipe. NULL if none.
  96. //Port related
  97. port_t *port; //The port to which this pipe is routed through
  98. TAILQ_ENTRY(pipe_obj) tailq_entry; //TailQ entry for port's list of pipes
  99. //HAl channel related
  100. void *xfer_desc_list;
  101. usbh_hal_chan_t *chan_obj;
  102. usbh_hal_ep_char_t ep_char;
  103. //Pipe status, state, and events
  104. hcd_pipe_state_t state;
  105. hcd_pipe_event_t last_event;
  106. TaskHandle_t task_waiting_pipe_notif; //Task handle used for internal pipe events
  107. union {
  108. struct {
  109. uint32_t waiting_xfer_done: 1;
  110. uint32_t paused: 1;
  111. uint32_t pipe_cmd_processing: 1;
  112. //Flags only used by control transfers
  113. uint32_t ctrl_data_stg_in: 1;
  114. uint32_t ctrl_data_stg_skip: 1;
  115. uint32_t reserved3: 3;
  116. uint32_t xfer_desc_list_len: 8;
  117. uint32_t reserved16: 16;
  118. };
  119. uint32_t val;
  120. } flags;
  121. //Pipe callback and context
  122. hcd_pipe_isr_callback_t callback;
  123. void *callback_arg;
  124. void *context;
  125. };
  126. /**
  127. * @brief Object representing a port in the HCD layer
  128. */
  129. struct port_obj {
  130. usbh_hal_context_t *hal;
  131. //Pipes routed through this port
  132. TAILQ_HEAD(tailhead_pipes_idle, pipe_obj) pipes_idle_tailq;
  133. TAILQ_HEAD(tailhead_pipes_queued, pipe_obj) pipes_queued_tailq;
  134. int num_pipes_idle;
  135. int num_pipes_queued;
  136. //Port status, state, and events
  137. hcd_port_state_t state;
  138. usb_speed_t speed;
  139. hcd_port_event_t last_event;
  140. TaskHandle_t task_waiting_port_notif; //Task handle used for internal port events
  141. union {
  142. struct {
  143. uint32_t event_pending: 1; //The port has an event that needs to be handled
  144. uint32_t event_processing: 1; //The port is current processing (handling) an event
  145. uint32_t cmd_processing: 1; //Used to indicate command handling is ongoing
  146. uint32_t waiting_all_pipes_pause: 1; //Waiting for all pipes routed through this port to be paused
  147. uint32_t disable_requested: 1;
  148. uint32_t conn_devc_ena: 1; //Used to indicate the port is connected to a device that has been reset
  149. uint32_t reserved10: 10;
  150. uint32_t num_pipes_waiting_pause: 16;
  151. };
  152. uint32_t val;
  153. } flags;
  154. bool initialized;
  155. //Port callback and context
  156. hcd_port_isr_callback_t callback;
  157. void *callback_arg;
  158. SemaphoreHandle_t port_mux;
  159. void *context;
  160. };
  161. /**
  162. * @brief Object representing the HCD
  163. */
  164. typedef struct {
  165. //Ports (Hardware only has one)
  166. port_t *port_obj;
  167. intr_handle_t isr_hdl;
  168. } hcd_obj_t;
  169. static portMUX_TYPE hcd_lock = portMUX_INITIALIZER_UNLOCKED;
  170. static hcd_obj_t *s_hcd_obj = NULL; //Note: "s_" is for the static pointer
  171. // ------------------------------------------------- Forward Declare ---------------------------------------------------
  172. // ----------------------- Events --------------------------
  173. /**
  174. * @brief Wait for an internal event from a port
  175. *
  176. * @note For each port, there can only be one thread/task waiting for an internal port event
  177. * @note This function is blocking (will exit and re-enter the critical section to do so)
  178. *
  179. * @param port Port object
  180. */
  181. static void _internal_port_event_wait(port_t *port);
  182. /**
  183. * @brief Notify (from an ISR context) the thread/task waiting for the internal port event
  184. *
  185. * @param port Port object
  186. * @return true A yield is required
  187. * @return false Whether a yield is required or not
  188. */
  189. static bool _internal_port_event_notify_from_isr(port_t *port);
  190. /**
  191. * @brief Wait for an internal event from a particular pipe
  192. *
  193. * @note For each pipe, there can only be one thread/task waiting for an internal port event
  194. * @note This function is blocking (will exit and re-enter the critical section to do so)
  195. *
  196. * @param pipe Pipe object
  197. */
  198. static void _internal_pipe_event_wait(pipe_t *pipe);
  199. /**
  200. * @brief Notify (from an ISR context) the thread/task waiting for an internal pipe event
  201. *
  202. * @param pipe Pipe object
  203. * @param from_isr Whether this is called from an ISR or not
  204. * @return true A yield is required
  205. * @return false Whether a yield is required or not. Always false when from_isr is also false
  206. */
  207. static bool _internal_pipe_event_notify(pipe_t *pipe, bool from_isr);
  208. // ------------------------ Port ---------------------------
  209. /**
  210. * @brief Invalidates all the pipes routed through a port
  211. *
  212. * This should be called when port or its connected device is no longer valid (e.g., the port is suddenly reset/disabled
  213. * or the device suddenly disconnects)
  214. *
  215. * @note This function may run one or more callbacks, and will exit and enter the critical section to do so
  216. *
  217. * Entry:
  218. * - The port or its connected device is no longer valid. This guarantees that none of the pipes will be transferring
  219. * Exit:
  220. * - Each pipe will have any pending transfer request moved to their respective done tailq
  221. * - Each pipe will be put into the invalid state
  222. * - Generate a HCD_PIPE_EVENT_INVALID event on each pipe and run their respective callbacks
  223. *
  224. * @param port Port object
  225. */
  226. static void _port_invalidate_all_pipes(port_t *port);
  227. /**
  228. * @brief Pause all pipes routed through a port
  229. *
  230. * Call this before attempting to reset or suspend a port
  231. *
  232. * Entry:
  233. * - The port is in the HCD_PORT_STATE_ENABLED state (i.e., there is a connected device which has been reset)
  234. * Exit:
  235. * - All pipes of the port have either paused, or are waiting to complete their inflight transfer request to pause
  236. * - If waiting for one or more pipes, _internal_port_event_wait() must be called after this function returns
  237. *
  238. * @param port Port object
  239. * @return true All pipes have been paused
  240. * @return false Need to wait for one or more pipes to pause. Call _internal_port_event_wait() afterwards
  241. */
  242. static bool _port_pause_all_pipes(port_t *port);
  243. /**
  244. * @brief Un-pause all pipes routed through a port
  245. *
  246. * Call this before after coming out of a port reset or resume.
  247. *
  248. * Entry:
  249. * - The port is in the HCD_PORT_STATE_ENABLED state
  250. * - All pipes are paused
  251. * Exit:
  252. * - All pipes un-paused. If those pipes have pending transfer requests, they will be started.
  253. *
  254. * @param port Port object
  255. */
  256. static void _port_unpause_all_pipes(port_t *port);
  257. /**
  258. * @brief Send a reset condition on a port's bus
  259. *
  260. * Entry:
  261. * - The port must be in the HCD_PORT_STATE_ENABLED or HCD_PORT_STATE_DISABLED state
  262. * Exit:
  263. * - Reset condition sent on the port's bus
  264. *
  265. * @note This function is blocking (will exit and re-enter the critical section to do so)
  266. *
  267. * @param port Port object
  268. * @return true Reset condition successfully sent
  269. * @return false Failed to send reset condition due to unexpected port state
  270. */
  271. static bool _port_bus_reset(port_t *port);
  272. /**
  273. * @brief Send a suspend condition on a port's bus
  274. *
  275. * This function will first pause pipes routed through a port, and then send a suspend condition.
  276. *
  277. * Entry:
  278. * - The port must be in the HCD_PORT_STATE_ENABLED state
  279. * Exit:
  280. * - All pipes paused and the port is put into the suspended state
  281. *
  282. * @note This function is blocking (will exit and re-enter the critical section to do so)
  283. *
  284. * @param port Port object
  285. * @return true Suspend condition successfully sent. Port is now in the HCD_PORT_STATE_SUSPENDED state
  286. * @return false Failed to send a suspend condition due to unexpected port state
  287. */
  288. static bool _port_bus_suspend(port_t *port);
  289. /**
  290. * @brief Send a resume condition on a port's bus
  291. *
  292. * This function will send a resume condition, and then un-pause all the pipes routed through a port
  293. *
  294. * Entry:
  295. * - The port must be in the HCD_PORT_STATE_SUSPENDED state
  296. * Exit:
  297. * - The port is put into the enabled state and all pipes un-paused
  298. *
  299. * @note This function is blocking (will exit and re-enter the critical section to do so)
  300. *
  301. * @param port Port object
  302. * @return true Resume condition successfully sent. Port is now in the HCD_PORT_STATE_ENABLED state
  303. * @return false Failed to send a resume condition due to unexpected port state.
  304. */
  305. static bool _port_bus_resume(port_t *port);
  306. /**
  307. * @brief Disable a port
  308. *
  309. * Entry:
  310. * - The port must be in the HCD_PORT_STATE_ENABLED or HCD_PORT_STATE_SUSPENDED state
  311. * Exit:
  312. * - All pipes paused (should already be paused if port was suspended), and the port is put into the disabled state.
  313. *
  314. * @note This function is blocking (will exit and re-enter the critical section to do so)
  315. *
  316. * @param port Port object
  317. * @return true Port successfully disabled
  318. * @return false Port to disable port due to unexpected port state
  319. */
  320. static bool _port_disable(port_t *port);
  321. /**
  322. * @brief Debounce port after a connection or disconnection event
  323. *
  324. * This function should be called after a port connection or disconnect event. This function will execute a debounce
  325. * delay then check the actual connection/disconnections state.
  326. *
  327. * @param port Port object
  328. * @return true A device is connected
  329. * @return false No device connected
  330. */
  331. static bool _port_debounce(port_t *port);
  332. // ------------------------ Pipe ---------------------------
  333. /**
  334. * @brief Get the next pending transfer request from the pending tailq
  335. *
  336. * Entry:
  337. * - The inflight transfer request must be set to NULL (indicating the pipe currently has no inflight transfer request)
  338. * Exit:
  339. * - If (num_xfer_req_pending > 0), the first transfer request is removed from pend_xfer_req_tailq and and
  340. * inflight_xfer_req is set to that transfer request.
  341. * - If there are no more queued transfer requests, inflight_xfer_req is left as NULL
  342. *
  343. * @param pipe Pipe object
  344. * @return true A pending transfer request is now set as the inflight transfer request
  345. * @return false No more pending transfer requests
  346. */
  347. static bool _pipe_get_next_xfer_req(pipe_t *pipe);
  348. /**
  349. * @brief Return the inflight transfer request to the done tailq
  350. *
  351. * Entry:
  352. * - The inflight transfer request must already have been parsed (i.e., results have been checked)
  353. * Exit:
  354. * - The inflight transfer request is returned to the done tailq and inflight_xfer_req is set to NULL
  355. *
  356. * @param pipe Pipe object
  357. */
  358. static void _pipe_ret_cur_xfer_req(pipe_t *pipe);
  359. /**
  360. * @brief Wait until a pipe's inflight transfer request is done
  361. *
  362. * If the pipe has an inflight transfer request, this function will block until it is done (via a internal pipe event).
  363. * If the pipe has no inflight transfer request, this function do nothing and return immediately.
  364. * If the pipe's state changes unexpectedely, this function will return false.
  365. *
  366. * @note This function is blocking (will exit and re-enter the critical section to do so)
  367. *
  368. * @param pipe Pipe object
  369. * @return true Pipes inflight transfer request is done
  370. * @return false Pipes state unexpectedly changed
  371. */
  372. static bool _pipe_wait_done(pipe_t *pipe);
  373. /**
  374. * @brief Retires all transfer requests (those that were previously inflight or pending)
  375. *
  376. * Retiring all transfer requests will result in any pending transfer request being moved to the done tailq. This
  377. * function will update the IPR status of each transfer request.
  378. * - If the retiring is self-initiated (i.e., due to a pipe command), the IRP status will be set to USB_TRANSFER_STATUS_CANCELLED.
  379. * - If the retiring is NOT self-initiated (i.e., the pipe is no longer valid), the IRP status will be set to USB_TRANSFER_STATUS_NO_DEVICE
  380. *
  381. * Entry:
  382. * - There can be no inflight transfer request (must already be parsed and returned to done queue)
  383. * Exit:
  384. * - If there was an inflight transfer request, it is parsed and returned to the done queue
  385. * - If there are any pending transfer requests:
  386. * - They are moved to the done tailq
  387. *
  388. * @param pipe Pipe object
  389. * @param cancelled Are we actively Pipe retire is initialized by the user due to a command, thus transfer request are actively
  390. * cancelled
  391. */
  392. static void _pipe_retire(pipe_t *pipe, bool self_initiated);
  393. /**
  394. * @brief Decode a HAL channel error to the corresponding pipe event
  395. *
  396. * @param chan_error The HAL channel error
  397. * @return hcd_pipe_event_t The corresponding pipe error event
  398. */
  399. static inline hcd_pipe_event_t pipe_decode_error_event(usbh_hal_chan_error_t chan_error);
  400. // ------------------ Transfer Requests --------------------
  401. /**
  402. * @brief Fill a transfer request into the pipe's transfer descriptor list
  403. *
  404. * Entry:
  405. * - The pipe's inflight_xfer_req must be set to the next transfer request
  406. * Exit:
  407. * - inflight_xfer_req filled into the pipe's transfer descriptor list
  408. * - Starting PIDs and directions set
  409. * - Channel slot acquired. Will need to call usbh_hal_chan_activate() to actually start execution
  410. *
  411. * @param pipe Pipe where inflight_xfer_req is already set to the next transfer request
  412. */
  413. static void _xfer_req_fill(pipe_t *pipe);
  414. /**
  415. * @brief Continue a transfer request
  416. *
  417. * @note This is currently only used for control transfers
  418. *
  419. * @param pipe Pipe where inflight_xfer_req contains the transfer request to continue
  420. */
  421. static void _xfer_req_continue(pipe_t *pipe);
  422. /**
  423. * @brief Parse the results of a pipe's transfer descriptor list into a transfer request
  424. *
  425. * Entry:
  426. * - The pipe must have stop transferring either due a channel event or a port disconnection.
  427. * - The pipe's state and last_event must be updated before parsing the transfer request as
  428. * they will used to determine the resuult of the transfer request
  429. * Exit:
  430. * - The pipe's inflight_xfer_req is filled with result of the transfer request (i.e., the underlying IRP has its status set)
  431. *
  432. * @param pipe Pipe where inflight_xfer_req contains the completed transfer request
  433. * @param error_occurred Are we parsing after the pipe had an error (or has become invalid)
  434. */
  435. static void _xfer_req_parse(pipe_t *pipe, bool error_occurred);
  436. // ----------------------------------------------- Interrupt Handling --------------------------------------------------
  437. // ------------------- Internal Event ----------------------
  438. static void _internal_port_event_wait(port_t *port)
  439. {
  440. //There must NOT be another thread/task already waiting for an internal event
  441. assert(port->task_waiting_port_notif == NULL);
  442. port->task_waiting_port_notif = xTaskGetCurrentTaskHandle();
  443. HCD_EXIT_CRITICAL();
  444. //Wait to be notified from ISR
  445. ulTaskNotifyTake(pdTRUE, portMAX_DELAY);
  446. HCD_ENTER_CRITICAL();
  447. port->task_waiting_port_notif = NULL;
  448. }
  449. static bool _internal_port_event_notify_from_isr(port_t *port)
  450. {
  451. //There must be a thread/task waiting for an internal event
  452. assert(port->task_waiting_port_notif != NULL);
  453. BaseType_t xTaskWoken = pdFALSE;
  454. //Unblock the thread/task waiting for the notification
  455. HCD_EXIT_CRITICAL_ISR();
  456. vTaskNotifyGiveFromISR(port->task_waiting_port_notif, &xTaskWoken);
  457. HCD_ENTER_CRITICAL_ISR();
  458. return (xTaskWoken == pdTRUE);
  459. }
  460. static void _internal_pipe_event_wait(pipe_t *pipe)
  461. {
  462. //There must NOT be another thread/task already waiting for an internal event
  463. assert(pipe->task_waiting_pipe_notif == NULL);
  464. pipe->task_waiting_pipe_notif = xTaskGetCurrentTaskHandle();
  465. HCD_EXIT_CRITICAL();
  466. //Wait to be notified from ISR
  467. ulTaskNotifyTake(pdTRUE, portMAX_DELAY);
  468. HCD_ENTER_CRITICAL();
  469. pipe->task_waiting_pipe_notif = NULL;
  470. }
  471. static bool _internal_pipe_event_notify(pipe_t *pipe, bool from_isr)
  472. {
  473. //There must be a thread/task waiting for an internal event
  474. assert(pipe->task_waiting_pipe_notif != NULL);
  475. bool ret;
  476. if (from_isr) {
  477. BaseType_t xTaskWoken = pdFALSE;
  478. HCD_EXIT_CRITICAL_ISR();
  479. //Unblock the thread/task waiting for the pipe notification
  480. vTaskNotifyGiveFromISR(pipe->task_waiting_pipe_notif, &xTaskWoken);
  481. HCD_ENTER_CRITICAL_ISR();
  482. ret = (xTaskWoken == pdTRUE);
  483. } else {
  484. HCD_EXIT_CRITICAL();
  485. xTaskNotifyGive(pipe->task_waiting_pipe_notif);
  486. HCD_ENTER_CRITICAL();
  487. ret = false;
  488. }
  489. return ret;
  490. }
  491. // ----------------- Interrupt Handlers --------------------
  492. /**
  493. * @brief Handle a HAL port interrupt and obtain the corresponding port event
  494. *
  495. * @param[in] port Port object
  496. * @param[in] hal_port_event The HAL port event
  497. * @param[out] yield Set to true if a yield is required as a result of handling the interrupt
  498. * @return hcd_port_event_t Returns a port event, or HCD_PORT_EVENT_NONE if no port event occurred
  499. */
  500. static hcd_port_event_t _intr_hdlr_hprt(port_t *port, usbh_hal_port_event_t hal_port_event, bool *yield)
  501. {
  502. hcd_port_event_t port_event = HCD_PORT_EVENT_NONE;
  503. switch (hal_port_event) {
  504. case USBH_HAL_PORT_EVENT_CONN: {
  505. //Don't update state immediately, we still need to debounce.
  506. port_event = HCD_PORT_EVENT_CONNECTION;
  507. break;
  508. }
  509. case USBH_HAL_PORT_EVENT_DISCONN: {
  510. if (port->flags.conn_devc_ena) {
  511. //The port was previously enabled, so this is a sudden disconenction
  512. port->state = HCD_PORT_STATE_RECOVERY;
  513. port_event = HCD_PORT_EVENT_SUDDEN_DISCONN;
  514. } else {
  515. //For normal disconnections, don't update state immediately as we still need to debounce.
  516. port_event = HCD_PORT_EVENT_DISCONNECTION;
  517. }
  518. port->flags.conn_devc_ena = 0;
  519. break;
  520. }
  521. case USBH_HAL_PORT_EVENT_ENABLED: {
  522. usbh_hal_port_enable(port->hal); //Initialize remaining host port registers
  523. port->speed = usbh_hal_port_get_conn_speed(port->hal);
  524. port->state = HCD_PORT_STATE_ENABLED;
  525. port->flags.conn_devc_ena = 1;
  526. //This was triggered by a command, so no event needs to be propagated.
  527. break;
  528. }
  529. case USBH_HAL_PORT_EVENT_DISABLED: {
  530. port->flags.conn_devc_ena = 0;
  531. //Disabled could be due to a disable request or reset request, or due to a port error
  532. if (port->state != HCD_PORT_STATE_RESETTING) { //Ignore the disable event if it's due to a reset request
  533. port->state = HCD_PORT_STATE_DISABLED;
  534. if (port->flags.disable_requested) {
  535. //Disabled by request (i.e. by port command). Generate an internal event
  536. port->flags.disable_requested = 0;
  537. *yield |= _internal_port_event_notify_from_isr(port);
  538. } else {
  539. //Disabled due to a port error
  540. port_event = HCD_PORT_EVENT_ERROR;
  541. }
  542. }
  543. break;
  544. }
  545. case USBH_HAL_PORT_EVENT_OVRCUR:
  546. case USBH_HAL_PORT_EVENT_OVRCUR_CLR: { //Could occur if a quick overcurrent then clear happens
  547. if (port->state != HCD_PORT_STATE_NOT_POWERED) {
  548. //We need to power OFF the port to protect it
  549. usbh_hal_port_toggle_power(port->hal, false);
  550. port->state = HCD_PORT_STATE_NOT_POWERED;
  551. port_event = HCD_PORT_EVENT_OVERCURRENT;
  552. }
  553. port->flags.conn_devc_ena = 0;
  554. break;
  555. }
  556. default: {
  557. abort();
  558. break;
  559. }
  560. }
  561. return port_event;
  562. }
  563. /**
  564. * @brief Handles a HAL channel interrupt
  565. *
  566. * This function should be called on a HAL channel when it has an interrupt. Most HAL channel events will correspond to
  567. * to a pipe event, but not always. This function will store the pipe event and return a pipe object pointer if a pipe
  568. * event occurred, or return NULL otherwise.
  569. *
  570. * @param[in] chan_obj Pointer to HAL channel object with interrupt
  571. * @param[out] yield Set to true if a yield is required as a result of handling the interrupt
  572. * @return hcd_pipe_event_t The pipe event
  573. */
  574. static hcd_pipe_event_t _intr_hdlr_chan(pipe_t *pipe, usbh_hal_chan_t *chan_obj, bool *yield)
  575. {
  576. usbh_hal_chan_event_t chan_event = usbh_hal_chan_decode_intr(chan_obj);
  577. hcd_pipe_event_t event = HCD_PIPE_EVENT_NONE;
  578. //Check the the pipe's port still has a connected and enabled device before processing the interrupt
  579. if (!pipe->port->flags.conn_devc_ena) {
  580. return event; //Treat as a no event.
  581. }
  582. switch (chan_event) {
  583. case USBH_HAL_CHAN_EVENT_SLOT_DONE: {
  584. //An entire transfer descriptor list has completed execution
  585. pipe->last_event = HCD_PIPE_EVENT_XFER_REQ_DONE;
  586. event = HCD_PIPE_EVENT_XFER_REQ_DONE;
  587. _xfer_req_parse(pipe, false); //Parse results of transfer request
  588. _pipe_ret_cur_xfer_req(pipe); //Return the transfer request to the pipe's done tailq
  589. if (pipe->flags.waiting_xfer_done) {
  590. //A port/pipe command is waiting for this pipe to complete its transfer. So don't load the next transfer
  591. pipe->flags.waiting_xfer_done = 0;
  592. if (pipe->port->flags.waiting_all_pipes_pause) {
  593. //Port command is waiting for all pipes to be paused
  594. pipe->flags.paused = 1;
  595. pipe->port->flags.num_pipes_waiting_pause--;
  596. if (pipe->port->flags.num_pipes_waiting_pause == 0) {
  597. //All pipes have finished pausing, Notify the blocked port command
  598. pipe->port->flags.waiting_all_pipes_pause = 0;
  599. *yield |= _internal_port_event_notify_from_isr(pipe->port);
  600. }
  601. } else {
  602. //Pipe command is waiting for transfer to complete
  603. *yield |= _internal_pipe_event_notify(pipe, true);
  604. }
  605. } else if (_pipe_get_next_xfer_req(pipe)) {
  606. //Fill the descriptor list with the transfer request and start the transfer
  607. _xfer_req_fill(pipe);
  608. usbh_hal_chan_activate(chan_obj, 0); //Start with the first descriptor
  609. }
  610. break;
  611. }
  612. case USBH_HAL_CHAN_EVENT_SLOT_HALT: {
  613. //A transfer descriptor list has partially completed. This currently only happens on control pipes
  614. assert(pipe->ep_char.type == USB_XFER_TYPE_CTRL);
  615. _xfer_req_continue(pipe); //Continue the transfer request.
  616. //We are continuing a transfer, so no event has occurred
  617. break;
  618. }
  619. case USBH_HAL_CHAN_EVENT_ERROR: {
  620. //Get and store the pipe error event
  621. usbh_hal_chan_error_t chan_error = usbh_hal_chan_get_error(chan_obj);
  622. usbh_hal_chan_clear_error(chan_obj);
  623. pipe->last_event = pipe_decode_error_event(chan_error);
  624. event = pipe->last_event;
  625. pipe->state = HCD_PIPE_STATE_HALTED;
  626. //Parse the failed transfer request and update it's IRP status
  627. _xfer_req_parse(pipe, true);
  628. _pipe_ret_cur_xfer_req(pipe); //Return the transfer request to the pipe's done tailq
  629. break;
  630. }
  631. case USBH_HAL_CHAN_EVENT_HALT_REQ: //We currently don't halt request so this event should never occur
  632. default:
  633. abort();
  634. break;
  635. }
  636. return event;
  637. }
  638. /**
  639. * @brief Main interrupt handler
  640. *
  641. * - Handle all HPRT (Host Port) related interrupts first as they may change the
  642. * state of the driver (e.g., a disconnect event)
  643. * - If any channels (pipes) have pending interrupts, handle them one by one
  644. * - The HCD has not blocking functions, so the user's ISR callback is run to
  645. * allow the users to send whatever OS primitives they need.
  646. * @param arg
  647. */
  648. static void intr_hdlr_main(void *arg)
  649. {
  650. port_t *port = (port_t *)arg;
  651. bool yield = false;
  652. HCD_ENTER_CRITICAL_ISR();
  653. usbh_hal_port_event_t hal_port_evt = usbh_hal_decode_intr(port->hal);
  654. if (hal_port_evt == USBH_HAL_PORT_EVENT_CHAN) {
  655. //Channel event. Cycle through each pending channel
  656. usbh_hal_chan_t *chan_obj = usbh_hal_get_chan_pending_intr(port->hal);
  657. while (chan_obj != NULL) {
  658. pipe_t *pipe = (pipe_t *)usbh_hal_chan_get_context(chan_obj);
  659. hcd_pipe_event_t event = _intr_hdlr_chan(pipe, chan_obj, &yield);
  660. //Run callback if a pipe event has occurred and the pipe also has a callback
  661. if (event != HCD_PIPE_EVENT_NONE && pipe->callback != NULL) {
  662. HCD_EXIT_CRITICAL_ISR();
  663. yield |= pipe->callback((hcd_pipe_handle_t)pipe, event, pipe->callback_arg, true);
  664. HCD_ENTER_CRITICAL_ISR();
  665. }
  666. //Check for more channels with pending interrupts. Returns NULL if there are no more
  667. chan_obj = usbh_hal_get_chan_pending_intr(port->hal);
  668. }
  669. } else if (hal_port_evt != USBH_HAL_PORT_EVENT_NONE) { //Port event
  670. hcd_port_event_t port_event = _intr_hdlr_hprt(port, hal_port_evt, &yield);
  671. if (port_event != HCD_PORT_EVENT_NONE) {
  672. port->last_event = port_event;
  673. port->flags.event_pending = 1;
  674. if (port->callback != NULL) {
  675. HCD_EXIT_CRITICAL_ISR();
  676. yield |= port->callback((hcd_port_handle_t)port, port_event, port->callback_arg, true);
  677. HCD_ENTER_CRITICAL_ISR();
  678. }
  679. }
  680. }
  681. HCD_EXIT_CRITICAL_ISR();
  682. if (yield) {
  683. portYIELD_FROM_ISR();
  684. }
  685. }
  686. // --------------------------------------------- Host Controller Driver ------------------------------------------------
  687. static port_t *port_obj_alloc(void)
  688. {
  689. port_t *port = calloc(1, sizeof(port_t));
  690. usbh_hal_context_t *hal = malloc(sizeof(usbh_hal_context_t));
  691. SemaphoreHandle_t port_mux = xSemaphoreCreateMutex();
  692. if (port == NULL || hal == NULL || port_mux == NULL) {
  693. free(port);
  694. free(hal);
  695. if (port_mux != NULL) {
  696. vSemaphoreDelete(port_mux);
  697. }
  698. return NULL;
  699. }
  700. port->hal = hal;
  701. port->port_mux = port_mux;
  702. return port;
  703. }
  704. static void port_obj_free(port_t *port)
  705. {
  706. if (port == NULL) {
  707. return;
  708. }
  709. vSemaphoreDelete(port->port_mux);
  710. free(port->hal);
  711. free(port);
  712. }
  713. // ----------------------- Public --------------------------
  714. esp_err_t hcd_install(const hcd_config_t *config)
  715. {
  716. HCD_ENTER_CRITICAL();
  717. HCD_CHECK_FROM_CRIT(s_hcd_obj == NULL, ESP_ERR_INVALID_STATE);
  718. HCD_EXIT_CRITICAL();
  719. esp_err_t err_ret;
  720. //Allocate memory and resources for driver object and all port objects
  721. hcd_obj_t *p_hcd_obj_dmy = calloc(1, sizeof(hcd_obj_t));
  722. if (p_hcd_obj_dmy == NULL) {
  723. return ESP_ERR_NO_MEM;
  724. }
  725. //Allocate resources for each port (there's only one)
  726. p_hcd_obj_dmy->port_obj = port_obj_alloc();
  727. esp_err_t intr_alloc_ret = esp_intr_alloc(ETS_USB_INTR_SOURCE,
  728. config->intr_flags | ESP_INTR_FLAG_INTRDISABLED, //The interruupt must be disabled until the port is initialized
  729. intr_hdlr_main,
  730. (void *)p_hcd_obj_dmy->port_obj,
  731. &p_hcd_obj_dmy->isr_hdl);
  732. if (p_hcd_obj_dmy->port_obj == NULL) {
  733. err_ret = ESP_ERR_NO_MEM;
  734. }
  735. if (intr_alloc_ret != ESP_OK) {
  736. err_ret = intr_alloc_ret;
  737. goto err;
  738. }
  739. HCD_ENTER_CRITICAL();
  740. if (s_hcd_obj != NULL) {
  741. HCD_EXIT_CRITICAL();
  742. err_ret = ESP_ERR_INVALID_STATE;
  743. goto err;
  744. }
  745. s_hcd_obj = p_hcd_obj_dmy;
  746. //Set HW prereqs for each port (there's only one)
  747. periph_module_enable(PERIPH_USB_MODULE);
  748. periph_module_reset(PERIPH_USB_MODULE);
  749. /*
  750. Configure GPIOS for Host mode operation using internal PHY
  751. - Forces ID to GND for A side
  752. - Forces B Valid to GND as we are A side host
  753. - Forces VBUS Valid to HIGH
  754. - Froces A Valid to HIGH
  755. */
  756. esp_rom_gpio_connect_in_signal(GPIO_MATRIX_CONST_ZERO_INPUT, USB_OTG_IDDIG_IN_IDX, false);
  757. esp_rom_gpio_connect_in_signal(GPIO_MATRIX_CONST_ZERO_INPUT, USB_SRP_BVALID_IN_IDX, false);
  758. esp_rom_gpio_connect_in_signal(GPIO_MATRIX_CONST_ONE_INPUT, USB_OTG_VBUSVALID_IN_IDX, false);
  759. esp_rom_gpio_connect_in_signal(GPIO_MATRIX_CONST_ONE_INPUT, USB_OTG_AVALID_IN_IDX, false);
  760. HCD_EXIT_CRITICAL();
  761. return ESP_OK;
  762. err:
  763. if (intr_alloc_ret == ESP_OK) {
  764. esp_intr_free(p_hcd_obj_dmy->isr_hdl);
  765. }
  766. port_obj_free(p_hcd_obj_dmy->port_obj);
  767. free(p_hcd_obj_dmy);
  768. return err_ret;
  769. }
  770. esp_err_t hcd_uninstall(void)
  771. {
  772. HCD_ENTER_CRITICAL();
  773. //Check that all ports have been disabled (theres only one)
  774. if (s_hcd_obj == NULL || s_hcd_obj->port_obj->initialized) {
  775. HCD_EXIT_CRITICAL();
  776. return ESP_ERR_INVALID_STATE;
  777. }
  778. periph_module_disable(PERIPH_USB_MODULE);
  779. hcd_obj_t *p_hcd_obj_dmy = s_hcd_obj;
  780. s_hcd_obj = NULL;
  781. HCD_EXIT_CRITICAL();
  782. //Free resources
  783. port_obj_free(p_hcd_obj_dmy->port_obj);
  784. esp_intr_free(p_hcd_obj_dmy->isr_hdl);
  785. free(p_hcd_obj_dmy);
  786. return ESP_OK;
  787. }
  788. // ------------------------------------------------------ Port ---------------------------------------------------------
  789. // ----------------------- Private -------------------------
  790. static void _port_invalidate_all_pipes(port_t *port)
  791. {
  792. //This function should only be called when the port is invalid
  793. assert(!port->flags.conn_devc_ena);
  794. pipe_t *pipe;
  795. //Process all pipes that have queued transfer requests
  796. TAILQ_FOREACH(pipe, &port->pipes_queued_tailq, tailq_entry) {
  797. //Mark the pipe as invalid and set an invalid event
  798. pipe->state = HCD_PIPE_STATE_INVALID;
  799. pipe->last_event = HCD_PIPE_EVENT_INVALID;
  800. //If the pipe had an inflight transfer, parse and return it
  801. if (pipe->inflight_xfer_req != NULL) {
  802. _xfer_req_parse(pipe, true);
  803. _pipe_ret_cur_xfer_req(pipe);
  804. }
  805. //Retire any remaining transfer requests
  806. _pipe_retire(pipe, false);
  807. if (pipe->task_waiting_pipe_notif != NULL) {
  808. //Unblock the thread/task waiting for a notification from the pipe as the pipe is no longer valid.
  809. _internal_pipe_event_notify(pipe, false);
  810. }
  811. if (pipe->callback != NULL) {
  812. HCD_EXIT_CRITICAL();
  813. (void) pipe->callback((hcd_pipe_handle_t)pipe, HCD_PIPE_EVENT_INVALID, pipe->callback_arg, false);
  814. HCD_ENTER_CRITICAL();
  815. }
  816. }
  817. //Process all idle pipes
  818. TAILQ_FOREACH(pipe, &port->pipes_idle_tailq, tailq_entry) {
  819. //Mark pipe as invalid and call its callback
  820. pipe->state = HCD_PIPE_STATE_INVALID;
  821. pipe->last_event = HCD_PIPE_EVENT_INVALID;
  822. if (pipe->callback != NULL) {
  823. HCD_EXIT_CRITICAL();
  824. (void) pipe->callback((hcd_pipe_handle_t)pipe, HCD_PIPE_EVENT_INVALID, pipe->callback_arg, false);
  825. HCD_ENTER_CRITICAL();
  826. }
  827. }
  828. }
  829. static bool _port_pause_all_pipes(port_t *port)
  830. {
  831. assert(port->state == HCD_PORT_STATE_ENABLED);
  832. pipe_t *pipe;
  833. int num_pipes_waiting_done = 0;
  834. //Process all pipes that have queued transfer requests
  835. TAILQ_FOREACH(pipe, &port->pipes_queued_tailq, tailq_entry) {
  836. if (pipe->inflight_xfer_req != NULL) {
  837. //Pipe has an inflight transfer. Indicate to the pipe we are waiting the transfer to complete
  838. pipe->flags.waiting_xfer_done = 1;
  839. num_pipes_waiting_done++;
  840. } else {
  841. //No inflight transfer so no need to wait
  842. pipe->flags.paused = 1;
  843. }
  844. }
  845. //Process all idle pipes. They don't have queue transfer so just mark them as paused
  846. TAILQ_FOREACH(pipe, &port->pipes_idle_tailq, tailq_entry) {
  847. pipe->flags.paused = 1;
  848. }
  849. if (num_pipes_waiting_done > 0) {
  850. //Indicate we need to wait for one or more pipes to complete their transfers
  851. port->flags.num_pipes_waiting_pause = num_pipes_waiting_done;
  852. port->flags.waiting_all_pipes_pause = 1;
  853. return false;
  854. }
  855. return true;
  856. }
  857. static void _port_unpause_all_pipes(port_t *port)
  858. {
  859. assert(port->state == HCD_PORT_STATE_ENABLED);
  860. pipe_t *pipe;
  861. //Process all idle pipes. They don't have queue transfer so just mark them as un-paused
  862. TAILQ_FOREACH(pipe, &port->pipes_idle_tailq, tailq_entry) {
  863. pipe->flags.paused = 0;
  864. }
  865. //Process all pipes that have queued transfer requests
  866. TAILQ_FOREACH(pipe, &port->pipes_queued_tailq, tailq_entry) {
  867. pipe->flags.paused = 0;
  868. //If the pipe has more pending transfer request, start them.
  869. if (_pipe_get_next_xfer_req(pipe)) {
  870. _xfer_req_fill(pipe);
  871. usbh_hal_chan_activate(pipe->chan_obj, 0);
  872. }
  873. }
  874. }
  875. static bool _port_bus_reset(port_t *port)
  876. {
  877. assert(port->state == HCD_PORT_STATE_ENABLED || port->state == HCD_PORT_STATE_DISABLED);
  878. //Put and hold the bus in the reset state. If the port was previously enabled, a disabled event will occur after this
  879. port->state = HCD_PORT_STATE_RESETTING;
  880. usbh_hal_port_toggle_reset(port->hal, true);
  881. HCD_EXIT_CRITICAL();
  882. vTaskDelay(pdMS_TO_TICKS(RESET_HOLD_MS));
  883. HCD_ENTER_CRITICAL();
  884. if (port->state != HCD_PORT_STATE_RESETTING) {
  885. //The port state has unexpectedly changed
  886. goto bailout;
  887. }
  888. //Return the bus to the idle state and hold it for the required reset recovery time. Port enabled event should occur
  889. usbh_hal_port_toggle_reset(port->hal, false);
  890. HCD_EXIT_CRITICAL();
  891. vTaskDelay(pdMS_TO_TICKS(RESET_RECOVERY_MS));
  892. HCD_ENTER_CRITICAL();
  893. if (port->state != HCD_PORT_STATE_ENABLED || !port->flags.conn_devc_ena) {
  894. //The port state has unexpectedly changed
  895. goto bailout;
  896. }
  897. return true;
  898. bailout:
  899. return false;
  900. }
  901. static bool _port_bus_suspend(port_t *port)
  902. {
  903. assert(port->state == HCD_PORT_STATE_ENABLED);
  904. //Pause all pipes before suspending the bus
  905. if (!_port_pause_all_pipes(port)) {
  906. //Need to wait for some pipes to pause. Wait for notification from ISR
  907. _internal_port_event_wait(port);
  908. if (port->state != HCD_PORT_STATE_ENABLED || !port->flags.conn_devc_ena) {
  909. //Port state unexpectedley changed
  910. goto bailout;
  911. }
  912. }
  913. //All pipes are guaranteed paused at this point. Proceed to suspend the port
  914. usbh_hal_port_suspend(port->hal);
  915. port->state = HCD_PORT_STATE_SUSPENDED;
  916. return true;
  917. bailout:
  918. return false;
  919. }
  920. static bool _port_bus_resume(port_t *port)
  921. {
  922. assert(port->state == HCD_PORT_STATE_SUSPENDED);
  923. //Put and hold the bus in the K state.
  924. usbh_hal_port_toggle_resume(port->hal, true);
  925. port->state = HCD_PORT_STATE_RESUMING;
  926. HCD_EXIT_CRITICAL();
  927. vTaskDelay(pdMS_TO_TICKS(RESUME_HOLD_MS));
  928. HCD_ENTER_CRITICAL();
  929. //Return and hold the bus to the J state (as port of the LS EOP)
  930. usbh_hal_port_toggle_resume(port->hal, false);
  931. if (port->state != HCD_PORT_STATE_RESUMING || !port->flags.conn_devc_ena) {
  932. //Port state unexpectedley changed
  933. goto bailout;
  934. }
  935. HCD_EXIT_CRITICAL();
  936. vTaskDelay(pdMS_TO_TICKS(RESUME_RECOVERY_MS));
  937. HCD_ENTER_CRITICAL();
  938. if (port->state != HCD_PORT_STATE_RESUMING || !port->flags.conn_devc_ena) {
  939. //Port state unexpectedley changed
  940. goto bailout;
  941. }
  942. port->state = HCD_PORT_STATE_ENABLED;
  943. _port_unpause_all_pipes(port);
  944. return true;
  945. bailout:
  946. return false;
  947. }
  948. static bool _port_disable(port_t *port)
  949. {
  950. assert(port->state == HCD_PORT_STATE_ENABLED || port->state == HCD_PORT_STATE_SUSPENDED);
  951. if (port->state == HCD_PORT_STATE_ENABLED) {
  952. //There may be pipes that are still transferring, so pause them.
  953. if (!_port_pause_all_pipes(port)) {
  954. //Need to wait for some pipes to pause. Wait for notification from ISR
  955. _internal_port_event_wait(port);
  956. if (port->state != HCD_PORT_STATE_ENABLED || !port->flags.conn_devc_ena) {
  957. //Port state unexpectedley changed
  958. goto bailout;
  959. }
  960. }
  961. }
  962. //All pipes are guaranteed paused at this point. Proceed to suspend the port. This should trigger an internal event
  963. port->flags.disable_requested = 1;
  964. usbh_hal_port_disable(port->hal);
  965. _internal_port_event_wait(port);
  966. if (port->state != HCD_PORT_STATE_DISABLED) {
  967. //Port state unexpectedley changed
  968. goto bailout;
  969. }
  970. _port_invalidate_all_pipes(port);
  971. return true;
  972. bailout:
  973. return false;
  974. }
  975. static bool _port_debounce(port_t *port)
  976. {
  977. if (port->state == HCD_PORT_STATE_NOT_POWERED) {
  978. //Disconnect event due to power off, no need to debounce or update port state.
  979. return false;
  980. }
  981. HCD_EXIT_CRITICAL();
  982. vTaskDelay(pdMS_TO_TICKS(DEBOUNCE_DELAY_MS));
  983. HCD_ENTER_CRITICAL();
  984. //Check the post-debounce state of the bus (i.e., whether it's actually connected/disconnected)
  985. bool is_connected = usbh_hal_port_check_if_connected(port->hal);
  986. if (is_connected) {
  987. port->state = HCD_PORT_STATE_DISABLED;
  988. } else {
  989. port->state = HCD_PORT_STATE_DISCONNECTED;
  990. }
  991. //Disable debounce lock
  992. usbh_hal_disable_debounce_lock(port->hal);
  993. return is_connected;
  994. }
  995. // ----------------------- Public --------------------------
  996. esp_err_t hcd_port_init(int port_number, hcd_port_config_t *port_config, hcd_port_handle_t *port_hdl)
  997. {
  998. HCD_CHECK(port_number > 0 && port_config != NULL && port_hdl != NULL, ESP_ERR_INVALID_ARG);
  999. HCD_CHECK(port_number <= NUM_PORTS, ESP_ERR_NOT_FOUND);
  1000. HCD_ENTER_CRITICAL();
  1001. HCD_CHECK_FROM_CRIT(s_hcd_obj != NULL && !s_hcd_obj->port_obj->initialized, ESP_ERR_INVALID_STATE);
  1002. //Port object memory and resources (such as mutex) already be allocated. Just need to initialize necessary fields only
  1003. port_t *port_obj = s_hcd_obj->port_obj;
  1004. TAILQ_INIT(&port_obj->pipes_idle_tailq);
  1005. TAILQ_INIT(&port_obj->pipes_queued_tailq);
  1006. port_obj->state = HCD_PORT_STATE_NOT_POWERED;
  1007. port_obj->last_event = HCD_PORT_EVENT_NONE;
  1008. port_obj->callback = port_config->callback;
  1009. port_obj->callback_arg = port_config->callback_arg;
  1010. port_obj->context = port_config->context;
  1011. usbh_hal_init(port_obj->hal);
  1012. port_obj->initialized = true;
  1013. esp_intr_enable(s_hcd_obj->isr_hdl);
  1014. *port_hdl = (hcd_port_handle_t)port_obj;
  1015. HCD_EXIT_CRITICAL();
  1016. vTaskDelay(pdMS_TO_TICKS(INIT_DELAY_MS)); //Need a short delay before host mode takes effect
  1017. return ESP_OK;
  1018. }
  1019. esp_err_t hcd_port_deinit(hcd_port_handle_t port_hdl)
  1020. {
  1021. port_t *port = (port_t *)port_hdl;
  1022. HCD_ENTER_CRITICAL();
  1023. HCD_CHECK_FROM_CRIT(s_hcd_obj != NULL && port->initialized
  1024. && port->num_pipes_idle == 0 && port->num_pipes_queued == 0
  1025. && (port->state == HCD_PORT_STATE_NOT_POWERED || port->state == HCD_PORT_STATE_RECOVERY)
  1026. && port->flags.val == 0 && port->task_waiting_port_notif == NULL,
  1027. ESP_ERR_INVALID_STATE);
  1028. port->initialized = false;
  1029. esp_intr_disable(s_hcd_obj->isr_hdl);
  1030. usbh_hal_deinit(port->hal);
  1031. HCD_EXIT_CRITICAL();
  1032. return ESP_OK;
  1033. }
  1034. esp_err_t hcd_port_command(hcd_port_handle_t port_hdl, hcd_port_cmd_t command)
  1035. {
  1036. esp_err_t ret = ESP_ERR_INVALID_STATE;
  1037. port_t *port = (port_t *)port_hdl;
  1038. xSemaphoreTake(port->port_mux, portMAX_DELAY);
  1039. HCD_ENTER_CRITICAL();
  1040. if (port->initialized && !port->flags.event_pending) { //Port events need to be handled first before issuing a command
  1041. port->flags.cmd_processing = 1;
  1042. switch (command) {
  1043. case HCD_PORT_CMD_POWER_ON: {
  1044. //Port can only be powered on if currently unpowered
  1045. if (port->state == HCD_PORT_STATE_NOT_POWERED) {
  1046. port->state = HCD_PORT_STATE_DISCONNECTED;
  1047. usbh_hal_port_start(port->hal);
  1048. usbh_hal_port_toggle_power(port->hal, true);
  1049. ret = ESP_OK;
  1050. }
  1051. break;
  1052. }
  1053. case HCD_PORT_CMD_POWER_OFF: {
  1054. //Port can only be unpowered if already powered
  1055. if (port->state != HCD_PORT_STATE_NOT_POWERED) {
  1056. port->state = HCD_PORT_STATE_NOT_POWERED;
  1057. usbh_hal_port_stop(port->hal);
  1058. usbh_hal_port_toggle_power(port->hal, false);
  1059. //If a device is currently connected, this should trigger a disconnect event
  1060. ret = ESP_OK;
  1061. }
  1062. break;
  1063. }
  1064. case HCD_PORT_CMD_RESET: {
  1065. //Port can only a reset when it is in the enabled or disabled states (in case of new connection)
  1066. if (port->state == HCD_PORT_STATE_ENABLED || port->state == HCD_PORT_STATE_DISABLED) {
  1067. ret = (_port_bus_reset(port)) ? ESP_OK : ESP_ERR_INVALID_RESPONSE;
  1068. }
  1069. break;
  1070. }
  1071. case HCD_PORT_CMD_SUSPEND: {
  1072. //Port can only be suspended if already in the enabled state
  1073. if (port->state == HCD_PORT_STATE_ENABLED) {
  1074. ret = (_port_bus_suspend(port)) ? ESP_OK : ESP_ERR_INVALID_RESPONSE;
  1075. }
  1076. break;
  1077. }
  1078. case HCD_PORT_CMD_RESUME: {
  1079. //Port can only be resumed if already suspended
  1080. if (port->state == HCD_PORT_STATE_SUSPENDED) {
  1081. ret = (_port_bus_resume(port)) ? ESP_OK : ESP_ERR_INVALID_RESPONSE;
  1082. }
  1083. break;
  1084. }
  1085. case HCD_PORT_CMD_DISABLE: {
  1086. //Can only disable the port when already enabled or suspended
  1087. if (port->state == HCD_PORT_STATE_ENABLED || port->state == HCD_PORT_STATE_SUSPENDED) {
  1088. ret = (_port_disable(port)) ? ESP_OK : ESP_ERR_INVALID_RESPONSE;
  1089. }
  1090. break;
  1091. }
  1092. }
  1093. port->flags.cmd_processing = 0;
  1094. }
  1095. HCD_EXIT_CRITICAL();
  1096. xSemaphoreGive(port->port_mux);
  1097. return ret;
  1098. }
  1099. hcd_port_state_t hcd_port_get_state(hcd_port_handle_t port_hdl)
  1100. {
  1101. port_t *port = (port_t *)port_hdl;
  1102. hcd_port_state_t ret;
  1103. HCD_ENTER_CRITICAL();
  1104. ret = port->state;
  1105. HCD_EXIT_CRITICAL();
  1106. return ret;
  1107. }
  1108. esp_err_t hcd_port_get_speed(hcd_port_handle_t port_hdl, usb_speed_t *speed)
  1109. {
  1110. port_t *port = (port_t *)port_hdl;
  1111. HCD_CHECK(speed != NULL, ESP_ERR_INVALID_ARG);
  1112. HCD_ENTER_CRITICAL();
  1113. //Device speed is only valid if there is a resetted device connected to the port
  1114. HCD_CHECK_FROM_CRIT(port->flags.conn_devc_ena, ESP_ERR_INVALID_STATE);
  1115. *speed = usbh_hal_port_get_conn_speed(port->hal);
  1116. HCD_EXIT_CRITICAL();
  1117. return ESP_OK;
  1118. }
  1119. hcd_port_event_t hcd_port_handle_event(hcd_port_handle_t port_hdl)
  1120. {
  1121. port_t *port = (port_t *)port_hdl;
  1122. hcd_port_event_t ret = HCD_PORT_EVENT_NONE;
  1123. xSemaphoreTake(port->port_mux, portMAX_DELAY);
  1124. HCD_ENTER_CRITICAL();
  1125. if (port->initialized && port->flags.event_pending) {
  1126. port->flags.event_pending = 0;
  1127. port->flags.event_processing = 1;
  1128. ret = port->last_event;
  1129. switch (ret) {
  1130. case HCD_PORT_EVENT_CONNECTION: {
  1131. if (_port_debounce(port)) {
  1132. ret = HCD_PORT_EVENT_CONNECTION;
  1133. }
  1134. break;
  1135. }
  1136. case HCD_PORT_EVENT_DISCONNECTION:
  1137. if (_port_debounce(port)) {
  1138. //A device is still connected, so it was just a debounce
  1139. port->state = HCD_PORT_STATE_DISABLED;
  1140. ret = HCD_PORT_EVENT_NONE;
  1141. } else {
  1142. //No device conencted after debounce delay. This is an actual disconenction
  1143. port->state = HCD_PORT_STATE_DISCONNECTED;
  1144. ret = HCD_PORT_EVENT_DISCONNECTION;
  1145. }
  1146. break;
  1147. case HCD_PORT_EVENT_ERROR:
  1148. case HCD_PORT_EVENT_OVERCURRENT:
  1149. case HCD_PORT_EVENT_SUDDEN_DISCONN: {
  1150. _port_invalidate_all_pipes(port);
  1151. break;
  1152. }
  1153. default: {
  1154. break;
  1155. }
  1156. }
  1157. port->flags.event_processing = 0;
  1158. } else {
  1159. ret = HCD_PORT_EVENT_NONE;
  1160. }
  1161. HCD_EXIT_CRITICAL();
  1162. xSemaphoreGive(port->port_mux);
  1163. return ret;
  1164. }
  1165. esp_err_t hcd_port_recover(hcd_port_handle_t port_hdl)
  1166. {
  1167. port_t *port = (port_t *)port_hdl;
  1168. HCD_ENTER_CRITICAL();
  1169. HCD_CHECK_FROM_CRIT(s_hcd_obj != NULL && port->initialized && port->state == HCD_PORT_STATE_RECOVERY
  1170. && port->num_pipes_idle == 0 && port->num_pipes_queued == 0
  1171. && port->flags.val == 0 && port->task_waiting_port_notif == NULL,
  1172. ESP_ERR_INVALID_STATE);
  1173. //We are about to do a soft reset on the peripheral. Disable the peripheral throughout
  1174. esp_intr_disable(s_hcd_obj->isr_hdl);
  1175. usbh_hal_core_soft_reset(port->hal);
  1176. port->state = HCD_PORT_STATE_NOT_POWERED;
  1177. port->last_event = HCD_PORT_EVENT_NONE;
  1178. port->flags.val = 0;
  1179. esp_intr_enable(s_hcd_obj->isr_hdl);
  1180. HCD_EXIT_CRITICAL();
  1181. return ESP_OK;
  1182. }
  1183. void *hcd_port_get_ctx(hcd_port_handle_t port_hdl)
  1184. {
  1185. port_t *port = (port_t *)port_hdl;
  1186. void *ret;
  1187. HCD_ENTER_CRITICAL();
  1188. ret = port->context;
  1189. HCD_EXIT_CRITICAL();
  1190. return ret;
  1191. }
  1192. // --------------------------------------------------- HCD Pipes -------------------------------------------------------
  1193. // ----------------------- Private -------------------------
  1194. static bool _pipe_get_next_xfer_req(pipe_t *pipe)
  1195. {
  1196. assert(pipe->inflight_xfer_req == NULL);
  1197. bool ret;
  1198. //This function assigns the next pending transfer request to the inflight_xfer_req
  1199. if (pipe->num_xfer_req_pending > 0) {
  1200. //Set inflight_xfer_req to the next pending transfer request
  1201. pipe->inflight_xfer_req = TAILQ_FIRST(&pipe->pend_xfer_req_tailq);
  1202. TAILQ_REMOVE(&pipe->pend_xfer_req_tailq, pipe->inflight_xfer_req, tailq_entry);
  1203. pipe->inflight_xfer_req->state = XFER_REQ_STATE_INFLIGHT;
  1204. pipe->num_xfer_req_pending--;
  1205. ret = true;
  1206. } else {
  1207. ret = false;
  1208. }
  1209. return ret;
  1210. }
  1211. static void _pipe_ret_cur_xfer_req(pipe_t *pipe)
  1212. {
  1213. assert(pipe->inflight_xfer_req != NULL);
  1214. //Add the transfer request to the pipe's done tailq
  1215. TAILQ_INSERT_TAIL(&pipe->done_xfer_req_tailq, pipe->inflight_xfer_req, tailq_entry);
  1216. pipe->inflight_xfer_req->state = XFER_REQ_STATE_DONE;
  1217. pipe->inflight_xfer_req = NULL;
  1218. pipe->num_xfer_req_done++;
  1219. }
  1220. static bool _pipe_wait_done(pipe_t *pipe)
  1221. {
  1222. //Check if there is a currently inflight transfer request
  1223. if (pipe->inflight_xfer_req != NULL) {
  1224. //Wait for pipe to complete its transfer
  1225. pipe->flags.waiting_xfer_done = 1;
  1226. _internal_pipe_event_wait(pipe);
  1227. if (pipe->state == HCD_PIPE_STATE_INVALID) {
  1228. //The pipe become invalid whilst waiting for its internal event
  1229. pipe->flags.waiting_xfer_done = 0; //Need to manually reset this bit in this case
  1230. return false;
  1231. }
  1232. bool chan_halted = usbh_hal_chan_slot_request_halt(pipe->chan_obj);
  1233. assert(chan_halted);
  1234. (void) chan_halted;
  1235. }
  1236. return true;
  1237. }
  1238. static void _pipe_retire(pipe_t *pipe, bool self_initiated)
  1239. {
  1240. //Cannot have any inflight transfer request
  1241. assert(pipe->inflight_xfer_req == NULL);
  1242. if (pipe->num_xfer_req_pending > 0) {
  1243. //Process all remaining pending transfer requests
  1244. xfer_req_t *xfer_req;
  1245. TAILQ_FOREACH(xfer_req, &pipe->pend_xfer_req_tailq, tailq_entry) {
  1246. xfer_req->state = XFER_REQ_STATE_DONE;
  1247. //If we are initiating the retire, mark the transfer request as cancelled
  1248. xfer_req->irp->status = (self_initiated) ? USB_TRANSFER_STATUS_CANCELLED : USB_TRANSFER_STATUS_NO_DEVICE;
  1249. }
  1250. //Concatenated pending tailq to the done tailq
  1251. TAILQ_CONCAT(&pipe->done_xfer_req_tailq, &pipe->pend_xfer_req_tailq, tailq_entry);
  1252. pipe->num_xfer_req_done += pipe->num_xfer_req_pending;
  1253. pipe->num_xfer_req_pending = 0;
  1254. }
  1255. }
  1256. static inline hcd_pipe_event_t pipe_decode_error_event(usbh_hal_chan_error_t chan_error)
  1257. {
  1258. hcd_pipe_event_t event = HCD_PIPE_EVENT_NONE;
  1259. switch (chan_error) {
  1260. case USBH_HAL_CHAN_ERROR_XCS_XACT:
  1261. event = HCD_PIPE_EVENT_ERROR_XFER;
  1262. break;
  1263. case USBH_HAL_CHAN_ERROR_BNA:
  1264. event = HCD_PIPE_EVENT_ERROR_XFER_NOT_AVAIL;
  1265. break;
  1266. case USBH_HAL_CHAN_ERROR_PKT_BBL:
  1267. event = HCD_PIPE_EVENT_ERROR_OVERFLOW;
  1268. break;
  1269. case USBH_HAL_CHAN_ERROR_STALL:
  1270. event = HCD_PIPE_EVENT_ERROR_STALL;
  1271. break;
  1272. }
  1273. return event;
  1274. }
  1275. // ----------------------- Public --------------------------
  1276. esp_err_t hcd_pipe_alloc(hcd_port_handle_t port_hdl, const hcd_pipe_config_t *pipe_config, hcd_pipe_handle_t *pipe_hdl)
  1277. {
  1278. HCD_CHECK(port_hdl != NULL && pipe_config != NULL && pipe_hdl != NULL, ESP_ERR_INVALID_ARG);
  1279. port_t *port = (port_t *)port_hdl;
  1280. HCD_ENTER_CRITICAL();
  1281. //Can only allocate a pipe if the targetted port is initialized and conencted to an enabled device
  1282. HCD_CHECK_FROM_CRIT(port->initialized && port->flags.conn_devc_ena, ESP_ERR_INVALID_STATE);
  1283. usb_speed_t port_speed = port->speed;
  1284. HCD_EXIT_CRITICAL();
  1285. //Cannot connect to a FS device if the port is LS
  1286. HCD_CHECK(port_speed == USB_SPEED_FULL || (port_speed == USB_SPEED_LOW && pipe_config->dev_speed == USB_SPEED_LOW), ESP_ERR_NOT_SUPPORTED);
  1287. esp_err_t ret = ESP_OK;
  1288. //Get the type of pipe to allocate
  1289. usb_xfer_type_t type;
  1290. bool is_default_pipe;
  1291. if (pipe_config->ep_desc == NULL) { //A NULL ep_desc indicates we are allocating a default pipe
  1292. type = USB_XFER_TYPE_CTRL;
  1293. is_default_pipe = true;
  1294. } else {
  1295. type = USB_DESC_EP_GET_XFERTYPE(pipe_config->ep_desc);
  1296. is_default_pipe = false;
  1297. }
  1298. size_t num_xfer_desc = 0;
  1299. switch (type) {
  1300. case USB_XFER_TYPE_CTRL: {
  1301. num_xfer_desc = XFER_LIST_LEN_CTRL * NUM_DESC_PER_XFER_CTRL;
  1302. break;
  1303. }
  1304. case USB_XFER_TYPE_BULK: {
  1305. if (pipe_config->dev_speed == USB_SPEED_LOW) {
  1306. return ESP_ERR_NOT_SUPPORTED; //Low speed devices do not support bulk transfers
  1307. }
  1308. num_xfer_desc = XFER_LIST_LEN_BULK * NUM_DESC_PER_XFER_BULK;
  1309. break;
  1310. }
  1311. default: {
  1312. //Isochronous and Interrupt pipes currently not supported
  1313. return ESP_ERR_NOT_SUPPORTED;
  1314. }
  1315. }
  1316. //Allocate the pipe resources
  1317. pipe_t *pipe = calloc(1, sizeof(pipe_t));
  1318. usbh_hal_chan_t *chan_obj = malloc(sizeof(usbh_hal_chan_t));
  1319. void *xfer_desc_list = heap_caps_aligned_calloc(USBH_HAL_DMA_MEM_ALIGN, num_xfer_desc, USBH_HAL_XFER_DESC_SIZE, MALLOC_CAP_DMA);
  1320. if (pipe == NULL|| chan_obj == NULL || xfer_desc_list == NULL) {
  1321. ret = ESP_ERR_NO_MEM;
  1322. goto err;
  1323. }
  1324. //Initialize pipe object
  1325. TAILQ_INIT(&pipe->pend_xfer_req_tailq);
  1326. TAILQ_INIT(&pipe->done_xfer_req_tailq);
  1327. pipe->port = port;
  1328. pipe->xfer_desc_list = xfer_desc_list;
  1329. pipe->flags.xfer_desc_list_len = num_xfer_desc;
  1330. pipe->chan_obj = chan_obj;
  1331. pipe->ep_char.type = type;
  1332. if (is_default_pipe) {
  1333. pipe->ep_char.bEndpointAddress = 0;
  1334. //Set the default pipe's MPS to the worst case MPS for the device's speed
  1335. pipe->ep_char.mps = (pipe_config->dev_speed == USB_SPEED_FULL) ? CTRL_EP_MAX_MPS_FS : CTRL_EP_MAX_MPS_LS;
  1336. } else {
  1337. pipe->ep_char.bEndpointAddress = pipe_config->ep_desc->bEndpointAddress;
  1338. pipe->ep_char.mps = pipe_config->ep_desc->wMaxPacketSize;
  1339. }
  1340. pipe->ep_char.dev_addr = pipe_config->dev_addr;
  1341. pipe->ep_char.ls_via_fs_hub = (port_speed == USB_SPEED_FULL && pipe_config->dev_speed == USB_SPEED_LOW);
  1342. pipe->state = HCD_PIPE_STATE_ACTIVE;
  1343. pipe->callback = pipe_config->callback;
  1344. pipe->callback_arg = pipe_config->callback_arg;
  1345. pipe->context = pipe_config->context;
  1346. //Allocate channel
  1347. HCD_ENTER_CRITICAL();
  1348. if (!port->initialized || !port->flags.conn_devc_ena) {
  1349. HCD_EXIT_CRITICAL();
  1350. ret = ESP_ERR_INVALID_STATE;
  1351. goto err;
  1352. }
  1353. bool chan_allocated = usbh_hal_chan_alloc(port->hal, pipe->chan_obj, (void *) pipe);
  1354. if (!chan_allocated) {
  1355. HCD_EXIT_CRITICAL();
  1356. ret = ESP_ERR_NOT_SUPPORTED;
  1357. goto err;
  1358. }
  1359. usbh_hal_chan_set_ep_char(pipe->chan_obj, &pipe->ep_char);
  1360. //Add the pipe to the list of idle pipes in the port object
  1361. TAILQ_INSERT_TAIL(&port->pipes_idle_tailq, pipe, tailq_entry);
  1362. port->num_pipes_idle++;
  1363. HCD_EXIT_CRITICAL();
  1364. *pipe_hdl = (hcd_pipe_handle_t)pipe;
  1365. return ret;
  1366. err:
  1367. free(xfer_desc_list);
  1368. free(chan_obj);
  1369. free(pipe);
  1370. return ret;
  1371. }
  1372. esp_err_t hcd_pipe_free(hcd_pipe_handle_t pipe_hdl)
  1373. {
  1374. pipe_t *pipe = (pipe_t *)pipe_hdl;
  1375. HCD_ENTER_CRITICAL();
  1376. //Check that all transfer requests have been removed and pipe has no pending events
  1377. HCD_CHECK_FROM_CRIT(pipe->inflight_xfer_req == NULL
  1378. && pipe->num_xfer_req_pending == 0
  1379. && pipe->num_xfer_req_done == 0,
  1380. ESP_ERR_INVALID_STATE);
  1381. //Remove pipe from the list of idle pipes (it must be in the idle list because it should have no queued transfer requests)
  1382. TAILQ_REMOVE(&pipe->port->pipes_idle_tailq, pipe, tailq_entry);
  1383. pipe->port->num_pipes_idle--;
  1384. usbh_hal_chan_free(pipe->port->hal, pipe->chan_obj);
  1385. HCD_EXIT_CRITICAL();
  1386. //Free pipe resources
  1387. free(pipe->xfer_desc_list);
  1388. free(pipe->chan_obj);
  1389. free(pipe);
  1390. return ESP_OK;
  1391. }
  1392. esp_err_t hcd_pipe_update(hcd_pipe_handle_t pipe_hdl, uint8_t dev_addr, int mps)
  1393. {
  1394. pipe_t *pipe = (pipe_t *)pipe_hdl;
  1395. HCD_ENTER_CRITICAL();
  1396. //Check if pipe is in the correct state to be updated
  1397. HCD_CHECK_FROM_CRIT(pipe->state != HCD_PIPE_STATE_INVALID
  1398. && !pipe->flags.pipe_cmd_processing
  1399. && pipe->num_xfer_req_pending == 0
  1400. && pipe->num_xfer_req_done == 0,
  1401. ESP_ERR_INVALID_STATE);
  1402. //Check that all transfer requests have been removed and pipe has no pending events
  1403. pipe->ep_char.dev_addr = dev_addr;
  1404. pipe->ep_char.mps = mps;
  1405. usbh_hal_chan_set_ep_char(pipe->chan_obj, &pipe->ep_char);
  1406. HCD_EXIT_CRITICAL();
  1407. return ESP_OK;
  1408. }
  1409. void *hcd_pipe_get_ctx(hcd_pipe_handle_t pipe_hdl)
  1410. {
  1411. pipe_t *pipe = (pipe_t *) pipe_hdl;
  1412. void *ret;
  1413. HCD_ENTER_CRITICAL();
  1414. ret = pipe->context;
  1415. HCD_EXIT_CRITICAL();
  1416. return ret;
  1417. }
  1418. hcd_pipe_state_t hcd_pipe_get_state(hcd_pipe_handle_t pipe_hdl)
  1419. {
  1420. hcd_pipe_state_t ret;
  1421. pipe_t *pipe = (pipe_t *) pipe_hdl;
  1422. HCD_ENTER_CRITICAL();
  1423. //If there is no enabled device, all existing pipes are invalid.
  1424. if (pipe->port->state != HCD_PORT_STATE_ENABLED
  1425. && pipe->port->state != HCD_PORT_STATE_SUSPENDED
  1426. && pipe->port->state != HCD_PORT_STATE_RESUMING) {
  1427. ret = HCD_PIPE_STATE_INVALID;
  1428. } else {
  1429. ret = pipe->state;
  1430. }
  1431. HCD_EXIT_CRITICAL();
  1432. return ret;
  1433. }
  1434. esp_err_t hcd_pipe_command(hcd_pipe_handle_t pipe_hdl, hcd_pipe_cmd_t command)
  1435. {
  1436. pipe_t *pipe = (pipe_t *) pipe_hdl;
  1437. bool ret = ESP_OK;
  1438. HCD_ENTER_CRITICAL();
  1439. //Cannot execute pipe commands the pipe is already executing a command, or if the pipe or its port are no longer valid
  1440. if (pipe->flags.pipe_cmd_processing || !pipe->port->flags.conn_devc_ena || pipe->state == HCD_PIPE_STATE_INVALID) {
  1441. ret = ESP_ERR_INVALID_STATE;
  1442. } else {
  1443. pipe->flags.pipe_cmd_processing = 1;
  1444. switch (command) {
  1445. case HCD_PIPE_CMD_ABORT: {
  1446. //Retire all scheduled transfer requests. Pipe's state remains unchanged
  1447. if (!_pipe_wait_done(pipe)) { //Stop any on going transfers
  1448. ret = ESP_ERR_INVALID_RESPONSE;
  1449. break;
  1450. }
  1451. _pipe_retire(pipe, true); //Retire any pending transfers
  1452. break;
  1453. }
  1454. case HCD_PIPE_CMD_RESET: {
  1455. //Retire all scheduled transfer requests. Pipe's state moves to active
  1456. if (!_pipe_wait_done(pipe)) { //Stop any on going transfers
  1457. ret = ESP_ERR_INVALID_RESPONSE;
  1458. break;
  1459. }
  1460. _pipe_retire(pipe, true); //Retire any pending transfers
  1461. pipe->state = HCD_PIPE_STATE_ACTIVE;
  1462. break;
  1463. }
  1464. case HCD_PIPE_CMD_CLEAR: { //Can only do this if port is still active
  1465. //Pipe's state moves from halted to active
  1466. if (pipe->state == HCD_PIPE_STATE_HALTED) {
  1467. pipe->state = HCD_PIPE_STATE_ACTIVE;
  1468. //Start the next pending transfer if it exists
  1469. if (_pipe_get_next_xfer_req(pipe)) {
  1470. //Fill the descriptor list with the transfer request and start the transfer
  1471. _xfer_req_fill(pipe);
  1472. usbh_hal_chan_activate(pipe->chan_obj, 0); //Start with the first descriptor
  1473. }
  1474. }
  1475. break;
  1476. }
  1477. case HCD_PIPE_CMD_HALT: {
  1478. //Pipe's state moves to halted
  1479. if (!_pipe_wait_done(pipe)) { //Stop any on going transfers
  1480. ret = ESP_ERR_INVALID_RESPONSE;
  1481. break;
  1482. }
  1483. pipe->state = HCD_PIPE_STATE_HALTED;
  1484. break;
  1485. }
  1486. }
  1487. pipe->flags.pipe_cmd_processing = 0;
  1488. }
  1489. HCD_EXIT_CRITICAL();
  1490. return ret;
  1491. }
  1492. hcd_pipe_event_t hcd_pipe_get_event(hcd_pipe_handle_t pipe_hdl)
  1493. {
  1494. pipe_t *pipe = (pipe_t *) pipe_hdl;
  1495. hcd_pipe_event_t ret;
  1496. HCD_ENTER_CRITICAL();
  1497. ret = pipe->last_event;
  1498. pipe->last_event = HCD_PIPE_EVENT_NONE;
  1499. HCD_EXIT_CRITICAL();
  1500. return ret;
  1501. }
  1502. // ----------------------------------------------- HCD Transfer Requests -----------------------------------------------
  1503. // ----------------------- Private -------------------------
  1504. static void _xfer_req_fill(pipe_t *pipe)
  1505. {
  1506. //inflight_xfer_req of the pipe must already set to the target transfer request
  1507. assert(pipe->inflight_xfer_req != NULL);
  1508. //Fill transfer descriptor list with a single transfer request
  1509. usb_irp_t *usb_irp = pipe->inflight_xfer_req->irp;
  1510. switch (pipe->ep_char.type) {
  1511. case USB_XFER_TYPE_CTRL: {
  1512. //Get information about the contorl transfer by analyzing the setup packet (the first 8 bytes)
  1513. usb_ctrl_req_t *ctrl_req = (usb_ctrl_req_t *)usb_irp->data_buffer;
  1514. pipe->flags.ctrl_data_stg_in = ((ctrl_req->bRequestType & USB_B_REQUEST_TYPE_DIR_IN) != 0);
  1515. pipe->flags.ctrl_data_stg_skip = (usb_irp->num_bytes == 0);
  1516. //Fill setup stage
  1517. usbh_hal_xfer_desc_fill(pipe->xfer_desc_list, 0, usb_irp->data_buffer, sizeof(usb_ctrl_req_t),
  1518. USBH_HAL_XFER_DESC_FLAG_SETUP | USBH_HAL_XFER_DESC_FLAG_HALT);
  1519. if (pipe->flags.ctrl_data_stg_skip) {
  1520. //Fill a NULL packet if there is no data stage
  1521. usbh_hal_xfer_desc_fill(pipe->xfer_desc_list, 1, NULL, 0, USBH_HAL_XFER_DESC_FLAG_NULL);
  1522. } else {
  1523. //Fill data stage
  1524. usbh_hal_xfer_desc_fill(pipe->xfer_desc_list, 1, usb_irp->data_buffer + sizeof(usb_ctrl_req_t), usb_irp->num_bytes,
  1525. ((pipe->flags.ctrl_data_stg_in) ? USBH_HAL_XFER_DESC_FLAG_IN : 0) | USBH_HAL_XFER_DESC_FLAG_HALT);
  1526. }
  1527. //Fill status stage (i.e., a zero length packet). If data stage is skipped, the status stage is always IN.
  1528. usbh_hal_xfer_desc_fill(pipe->xfer_desc_list, 2, NULL, 0,
  1529. ((pipe->flags.ctrl_data_stg_in && !pipe->flags.ctrl_data_stg_skip) ? 0 : USBH_HAL_XFER_DESC_FLAG_IN) | USBH_HAL_XFER_DESC_FLAG_HALT);
  1530. //Set the channel's direction to OUT and PID to 0 respectively for the the setup stage
  1531. usbh_hal_chan_set_dir(pipe->chan_obj, false); //Setup stage is always OUT
  1532. usbh_hal_chan_set_pid(pipe->chan_obj, 0); //Setup stage always has a PID of DATA0
  1533. break;
  1534. }
  1535. case USB_XFER_TYPE_BULK: {
  1536. bool is_in = pipe->ep_char.bEndpointAddress & USB_B_ENDPOINT_ADDRESS_EP_DIR_MASK;
  1537. usbh_hal_xfer_desc_fill(pipe->xfer_desc_list, 0, usb_irp->data_buffer, usb_irp->num_bytes,
  1538. ((is_in) ? USBH_HAL_XFER_DESC_FLAG_IN : 0) | USBH_HAL_XFER_DESC_FLAG_HALT);
  1539. break;
  1540. }
  1541. default: {
  1542. break; //Isoc and Interrupt transfers not supported yet
  1543. }
  1544. }
  1545. //Claim slot
  1546. usbh_hal_chan_slot_acquire(pipe->chan_obj, pipe->xfer_desc_list, pipe->flags.xfer_desc_list_len, (void *)pipe);
  1547. }
  1548. static void _xfer_req_continue(pipe_t *pipe)
  1549. {
  1550. int next_idx = usbh_hal_chan_get_next_desc_index(pipe->chan_obj);
  1551. bool next_dir_is_in; //Next descriptor direction is IN
  1552. int next_pid; //Next PID (DATA0 or DATA 1)
  1553. int num_to_skip; //Number of descriptors to skip
  1554. if (next_idx == 1) {
  1555. //Just finished setup stage
  1556. if (pipe->flags.ctrl_data_stg_skip) {
  1557. //Skipping data stage. Go straight to status stage
  1558. next_dir_is_in = true; //With no data stage, status stage must be IN
  1559. next_pid = 1; //Status stage always has a PID of DATA1
  1560. num_to_skip = 1; //Skip over the null descriptor representing the skipped data stage
  1561. } else {
  1562. //Go to data stage
  1563. next_dir_is_in = pipe->flags.ctrl_data_stg_in;
  1564. next_pid = 1; //Data stage always starts with a PID of DATA1
  1565. num_to_skip = 0;
  1566. }
  1567. } else { //next_idx == 2
  1568. //Going to status stage from data stage
  1569. next_dir_is_in = !pipe->flags.ctrl_data_stg_in; //Status stage is opposite direction of data stage
  1570. next_pid = 1; //Status stage always has a PID of DATA1
  1571. num_to_skip = 0;
  1572. }
  1573. usbh_hal_chan_set_dir(pipe->chan_obj, next_dir_is_in);
  1574. usbh_hal_chan_set_pid(pipe->chan_obj, next_pid);
  1575. usbh_hal_chan_activate(pipe->chan_obj, num_to_skip); //Start the next stage
  1576. }
  1577. static void _xfer_req_parse(pipe_t *pipe, bool error_occurred)
  1578. {
  1579. assert(pipe->inflight_xfer_req != NULL);
  1580. //Release the slot
  1581. void *xfer_desc_list;
  1582. int xfer_desc_len;
  1583. usbh_hal_chan_slot_release(pipe->chan_obj, &xfer_desc_list, &xfer_desc_len);
  1584. assert(xfer_desc_list == pipe->xfer_desc_list);
  1585. (void) xfer_desc_len;
  1586. //Parse the transfer descriptor list for the result of the transfer
  1587. usb_irp_t *usb_irp = pipe->inflight_xfer_req->irp;
  1588. usb_transfer_status_t xfer_status;
  1589. int xfer_rem_len;
  1590. if (error_occurred) {
  1591. //Either a pipe error has occurred or the pipe is no longer valid
  1592. if (pipe->state == HCD_PIPE_STATE_INVALID) {
  1593. xfer_status = USB_TRANSFER_STATUS_NO_DEVICE;
  1594. } else {
  1595. //Must have been a pipe error event
  1596. switch (pipe->last_event) {
  1597. case HCD_PIPE_EVENT_ERROR_XFER: //Excessive transaction error
  1598. xfer_status = USB_TRANSFER_STATUS_ERROR;
  1599. break;
  1600. case HCD_PIPE_EVENT_ERROR_OVERFLOW:
  1601. xfer_status = USB_TRANSFER_STATUS_OVERFLOW;
  1602. break;
  1603. case HCD_PIPE_EVENT_ERROR_STALL:
  1604. xfer_status = USB_TRANSFER_STATUS_STALL;
  1605. break;
  1606. default:
  1607. //HCD_PIPE_EVENT_ERROR_XFER_NOT_AVAIL should never occur
  1608. abort();
  1609. break;
  1610. }
  1611. }
  1612. //We assume no bytes transmitted because of an error.
  1613. xfer_rem_len = usb_irp->num_bytes;
  1614. } else {
  1615. int desc_status;
  1616. switch (pipe->ep_char.type) {
  1617. case USB_XFER_TYPE_CTRL: {
  1618. if (pipe->flags.ctrl_data_stg_skip) {
  1619. //There was no data stage. Just set it as successful
  1620. desc_status = USBH_HAL_XFER_DESC_STS_SUCCESS;
  1621. xfer_rem_len = 0;
  1622. } else {
  1623. //Check the data stage (index 1)
  1624. usbh_hal_xfer_desc_parse(pipe->xfer_desc_list, 1, &xfer_rem_len, &desc_status);
  1625. }
  1626. break;
  1627. }
  1628. case USB_XFER_TYPE_BULK: {
  1629. usbh_hal_xfer_desc_parse(pipe->xfer_desc_list, 0, &xfer_rem_len, &desc_status);
  1630. break;
  1631. }
  1632. default: {
  1633. //We don't supportISOC and INTR pipes yet
  1634. desc_status = USBH_HAL_XFER_DESC_STS_NOT_EXECUTED;
  1635. xfer_rem_len = 0;
  1636. xfer_status = USB_TRANSFER_STATUS_ERROR;
  1637. abort();
  1638. break;
  1639. }
  1640. }
  1641. xfer_status = USB_TRANSFER_STATUS_COMPLETED;
  1642. assert(desc_status == USBH_HAL_XFER_DESC_STS_SUCCESS);
  1643. }
  1644. //Write back results to IRP
  1645. usb_irp->actual_num_bytes = usb_irp->num_bytes - xfer_rem_len;
  1646. usb_irp->status = xfer_status;
  1647. }
  1648. // ----------------------- Public --------------------------
  1649. hcd_xfer_req_handle_t hcd_xfer_req_alloc()
  1650. {
  1651. xfer_req_t *xfer_req = calloc(1, sizeof(xfer_req_t));
  1652. xfer_req->state = XFER_REQ_STATE_IDLE;
  1653. return (hcd_xfer_req_handle_t) xfer_req;
  1654. }
  1655. void hcd_xfer_req_free(hcd_xfer_req_handle_t req_hdl)
  1656. {
  1657. if (req_hdl == NULL) {
  1658. return;
  1659. }
  1660. xfer_req_t *xfer_req = (xfer_req_t *) req_hdl;
  1661. //Cannot free a transfer request that is still being used
  1662. assert(xfer_req->state == XFER_REQ_STATE_IDLE);
  1663. free(xfer_req);
  1664. }
  1665. void hcd_xfer_req_set_target(hcd_xfer_req_handle_t req_hdl, hcd_pipe_handle_t pipe_hdl, usb_irp_t *irp, void *context)
  1666. {
  1667. xfer_req_t *xfer_req = (xfer_req_t *) req_hdl;
  1668. //Can only set an transfer request's target when the transfer request is idl
  1669. assert(xfer_req->state == XFER_REQ_STATE_IDLE);
  1670. xfer_req->pipe = (pipe_t *) pipe_hdl;
  1671. xfer_req->irp = irp;
  1672. xfer_req->context = context;
  1673. }
  1674. void hcd_xfer_req_get_target(hcd_xfer_req_handle_t req_hdl, hcd_pipe_handle_t *pipe_hdl, usb_irp_t **irp, void **context)
  1675. {
  1676. xfer_req_t *xfer_req = (xfer_req_t *) req_hdl;
  1677. *pipe_hdl = (hcd_pipe_handle_t) xfer_req->pipe;
  1678. *irp = xfer_req->irp;
  1679. *context = xfer_req->context;
  1680. }
  1681. esp_err_t hcd_xfer_req_enqueue(hcd_xfer_req_handle_t req_hdl)
  1682. {
  1683. xfer_req_t *xfer_req = (xfer_req_t *) req_hdl;
  1684. HCD_CHECK(xfer_req->pipe != NULL && xfer_req->irp != NULL //The transfer request's target must be set
  1685. && xfer_req->state == XFER_REQ_STATE_IDLE, //The transfer request cannot be already enqueued
  1686. ESP_ERR_INVALID_STATE);
  1687. pipe_t *pipe = xfer_req->pipe;
  1688. HCD_ENTER_CRITICAL();
  1689. HCD_CHECK_FROM_CRIT(pipe->port->state == HCD_PORT_STATE_ENABLED //The pipe's port must be in the correct state
  1690. && pipe->state == HCD_PIPE_STATE_ACTIVE //The pipe must be in the correct state
  1691. && !pipe->flags.pipe_cmd_processing, //Pipe cannot currently be processing a pipe command
  1692. ESP_ERR_INVALID_STATE);
  1693. //Check if we can start execution on the pipe immediately
  1694. if (!pipe->flags.paused && pipe->num_xfer_req_pending == 0 && pipe->inflight_xfer_req == NULL) {
  1695. //Pipe isn't executing any transfers. Start immediately
  1696. pipe->inflight_xfer_req = xfer_req;
  1697. _xfer_req_fill(pipe);
  1698. usbh_hal_chan_activate(pipe->chan_obj, 0); //Start with the first descriptor
  1699. xfer_req->state = XFER_REQ_STATE_INFLIGHT;
  1700. if (pipe->num_xfer_req_done == 0) {
  1701. //This is the first transfer request to be enqueued into the pipe. Move the pipe to the list of queued pipes
  1702. TAILQ_REMOVE(&pipe->port->pipes_idle_tailq, pipe, tailq_entry);
  1703. TAILQ_INSERT_TAIL(&pipe->port->pipes_queued_tailq, pipe, tailq_entry);
  1704. pipe->port->num_pipes_idle--;
  1705. pipe->port->num_pipes_queued++;
  1706. }
  1707. } else {
  1708. //Add the transfer request to the pipe's pending tailq
  1709. TAILQ_INSERT_TAIL(&pipe->pend_xfer_req_tailq, xfer_req, tailq_entry);
  1710. pipe->num_xfer_req_pending++;
  1711. xfer_req->state = XFER_REQ_STATE_PENDING;
  1712. }
  1713. HCD_EXIT_CRITICAL();
  1714. return ESP_OK;
  1715. }
  1716. hcd_xfer_req_handle_t hcd_xfer_req_dequeue(hcd_pipe_handle_t pipe_hdl)
  1717. {
  1718. pipe_t *pipe = (pipe_t *)pipe_hdl;
  1719. hcd_xfer_req_handle_t ret;
  1720. HCD_ENTER_CRITICAL();
  1721. if (pipe->num_xfer_req_done > 0) {
  1722. xfer_req_t *xfer_req = TAILQ_FIRST(&pipe->done_xfer_req_tailq);
  1723. TAILQ_REMOVE(&pipe->done_xfer_req_tailq, xfer_req, tailq_entry);
  1724. pipe->num_xfer_req_done--;
  1725. assert(xfer_req->state == XFER_REQ_STATE_DONE);
  1726. xfer_req->state = XFER_REQ_STATE_IDLE;
  1727. ret = (hcd_xfer_req_handle_t) xfer_req;
  1728. if (pipe->num_xfer_req_done == 0 && pipe->num_xfer_req_pending == 0) {
  1729. //This pipe has no more enqueued transfers. Move the pipe to the list of idle pipes
  1730. TAILQ_REMOVE(&pipe->port->pipes_queued_tailq, pipe, tailq_entry);
  1731. TAILQ_INSERT_TAIL(&pipe->port->pipes_idle_tailq, pipe, tailq_entry);
  1732. pipe->port->num_pipes_idle++;
  1733. pipe->port->num_pipes_queued--;
  1734. }
  1735. } else {
  1736. ret = NULL;
  1737. }
  1738. HCD_EXIT_CRITICAL();
  1739. return ret;
  1740. }
  1741. esp_err_t hcd_xfer_req_abort(hcd_xfer_req_handle_t req_hdl)
  1742. {
  1743. xfer_req_t *xfer_req = (xfer_req_t *) req_hdl;
  1744. esp_err_t ret;
  1745. HCD_ENTER_CRITICAL();
  1746. switch (xfer_req->state) {
  1747. case XFER_REQ_STATE_PENDING: {
  1748. //Transfer request has not been executed so it can be aborted
  1749. pipe_t *pipe = xfer_req->pipe;
  1750. //Remove it form the pending queue
  1751. TAILQ_REMOVE(&pipe->pend_xfer_req_tailq, xfer_req, tailq_entry);
  1752. pipe->num_xfer_req_pending--;
  1753. //Add it to the done queue
  1754. TAILQ_INSERT_TAIL(&pipe->done_xfer_req_tailq, xfer_req, tailq_entry);
  1755. pipe->num_xfer_req_done++;
  1756. //Update the transfer request and associated IRP's status
  1757. xfer_req->state = XFER_REQ_STATE_DONE;
  1758. xfer_req->irp->status = USB_TRANSFER_STATUS_CANCELLED;
  1759. ret = ESP_OK;
  1760. break;
  1761. }
  1762. case XFER_REQ_STATE_IDLE: {
  1763. //Cannot abort a transfer request that was never enqueued
  1764. ret = ESP_ERR_INVALID_STATE;
  1765. break;
  1766. }
  1767. default :{
  1768. //Transfer request is currently or has already been executed. Nothing to do.
  1769. ret = ESP_OK;
  1770. break;
  1771. }
  1772. }
  1773. HCD_EXIT_CRITICAL();
  1774. return ret;
  1775. }