sdio-dw.c 84 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2022-12-06 GuEe-GUI first version
  9. * 2023-02-25 GuEe-GUI add EDMA support
  10. */
  11. #include <rtthread.h>
  12. #define DBG_TAG "sdio.dw"
  13. #define DBG_LVL DBG_INFO
  14. #include <rtdbg.h>
  15. #include <mmu.h>
  16. #include <cpuport.h>
  17. #include "sdio-dw.h"
  18. /* Common flag combinations */
  19. #define PINT(x) SDIO_DW_INT_##x
  20. #define SDIO_DW_DATA_ERROR_FLAGS (PINT(DRTO) | PINT(DCRC) | PINT(HTO) | PINT(SBE) | PINT(EBE) | PINT(HLE))
  21. #define SDIO_DW_CMD_ERROR_FLAGS (PINT(RTO) | PINT(RCRC) | PINT(RESP_ERR) | PINT(HLE))
  22. #define SDIO_DW_ERROR_FLAGS (SDIO_DW_DATA_ERROR_FLAGS | SDIO_DW_CMD_ERROR_FLAGS)
  23. #define SDIO_DW_SEND_STATUS 1
  24. #define SDIO_DW_RECV_STATUS 2
  25. #define SDIO_DW_DMA_THRESHOLD 16
  26. #define SDIO_DW_FREQ_HZ_MAX 200000000
  27. #define SDIO_DW_FREQ_HZ_MIN 100000
  28. #define PINTC(x) SDIO_DW_IDMAC_INT_##x
  29. #define SDIO_DW_IDMAC_INT_CLR (PINTC(AI) | PINTC(NI) | PINTC(CES) | PINTC(DU) | PINTC(FBE) | PINTC(RI) | PINTC(TI))
  30. #define DESC_RING_BUF_SZ ARCH_PAGE_SIZE
  31. #define NSEC_PER_SEC 1000000000L
  32. #define USEC_PER_MSEC 1000L
  33. #define MSEC_PER_SEC 1000L
  34. struct idmac_desc64
  35. {
  36. rt_uint32_t des0; /* Control descriptor */
  37. #define IDMAC_OWN_CLR64(x) !((x) & rt_cpu_to_le32(IDMAC_DES0_OWN))
  38. rt_uint32_t des1; /* Reserved */
  39. rt_uint32_t des2; /* Buffer sizes */
  40. #define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \
  41. ((d)->des2 = ((d)->des2 & rt_cpu_to_le32(0x03ffe000)) | ((rt_cpu_to_le32(s)) & rt_cpu_to_le32(0x1fff)))
  42. rt_uint32_t des3; /* Reserved */
  43. rt_uint32_t des4; /* Lower 32-bits of buffer address pointer 1 */
  44. rt_uint32_t des5; /* Upper 32-bits of buffer address pointer 1 */
  45. rt_uint32_t des6; /* Lower 32-bits of next descriptor address */
  46. rt_uint32_t des7; /* Upper 32-bits of next descriptor address */
  47. };
  48. struct idmac_desc32
  49. {
  50. rt_le32_t des0; /* Control Descriptor */
  51. #define IDMAC_DES0_DIC RT_BIT(1)
  52. #define IDMAC_DES0_LD RT_BIT(2)
  53. #define IDMAC_DES0_FD RT_BIT(3)
  54. #define IDMAC_DES0_CH RT_BIT(4)
  55. #define IDMAC_DES0_ER RT_BIT(5)
  56. #define IDMAC_DES0_CES RT_BIT(30)
  57. #define IDMAC_DES0_OWN RT_BIT(31)
  58. rt_le32_t des1; /* Buffer sizes */
  59. #define IDMAC_32ADDR_SET_BUFFER1_SIZE(d, s) \
  60. ((d)->des1 = ((d)->des1 & rt_cpu_to_le32(0x03ffe000)) | (rt_cpu_to_le32((s) & 0x1fff)))
  61. rt_le32_t des2; /* Buffer 1 physical address */
  62. rt_le32_t des3; /* Buffer 2 physical address */
  63. };
  64. /* Each descriptor can transfer up to 4KB of data in chained mode */
  65. #define DW_MCI_DESC_DATA_LENGTH 0x1000
  66. static rt_bool_t sdio_dw_ctrl_reset(struct sdio_dw *sd, rt_uint32_t reset)
  67. {
  68. rt_uint32_t ctrl;
  69. rt_tick_t start;
  70. int timeout = rt_tick_from_millisecond(500);
  71. ctrl = sdio_dw_readl(sd, CTRL);
  72. ctrl |= reset;
  73. sdio_dw_writel(sd, CTRL, ctrl);
  74. start = rt_tick_get();
  75. while ((sdio_dw_readl(sd, CTRL) & reset))
  76. {
  77. if ((rt_tick_get() - start) > timeout)
  78. {
  79. LOG_E("Timeout resetting block (ctrl reset 0x%x)", ctrl & reset);
  80. return RT_FALSE;
  81. }
  82. rt_hw_cpu_relax();
  83. }
  84. return RT_TRUE;
  85. }
  86. static void sdio_dw_wait_while_busy(struct sdio_dw *sd, rt_uint32_t cmd_flags)
  87. {
  88. if ((cmd_flags & SDIO_DW_CMD_PRV_DAT_WAIT) && !(cmd_flags & SDIO_DW_CMD_VOLT_SWITCH))
  89. {
  90. rt_tick_t start = rt_tick_get();
  91. int timeout = rt_tick_from_millisecond(500);
  92. while ((sdio_dw_readl(sd, STATUS) & SDIO_DW_STATUS_BUSY))
  93. {
  94. if ((rt_tick_get() - start) > timeout)
  95. {
  96. LOG_E("Wait busy fail");
  97. break;
  98. }
  99. rt_hw_cpu_relax();
  100. }
  101. }
  102. }
  103. static void sdio_dw_send_cmd(struct sdio_dw_slot *slot, rt_uint32_t cmd, rt_uint32_t arg)
  104. {
  105. rt_tick_t start;
  106. struct sdio_dw *sd = slot->sd;
  107. int timeout = rt_tick_from_millisecond(500);
  108. sdio_dw_writel(sd, CMDARG, arg);
  109. rt_hw_wmb();
  110. sdio_dw_wait_while_busy(sd, cmd);
  111. sdio_dw_writel(sd, CMD, SDIO_DW_CMD_START | cmd);
  112. start = rt_tick_get();
  113. while ((sdio_dw_readl(sd, CMD) & SDIO_DW_CMD_START))
  114. {
  115. if ((rt_tick_get() - start) > timeout)
  116. {
  117. LOG_E("Wait command start fail");
  118. break;
  119. }
  120. rt_hw_cpu_relax();
  121. }
  122. }
  123. rt_inline void sdio_dw_set_cto(struct sdio_dw *sd)
  124. {
  125. rt_ubase_t level;
  126. rt_uint32_t cto_clks, cto_div, cto_ms;
  127. cto_clks = sdio_dw_readl(sd, TMOUT) & 0xff;
  128. cto_div = (sdio_dw_readl(sd, CLKDIV) & 0xff) * 2;
  129. if (cto_div == 0)
  130. {
  131. cto_div = 1;
  132. }
  133. cto_ms = RT_DIV_ROUND_UP_ULL((rt_uint64_t)MSEC_PER_SEC * cto_clks * cto_div,
  134. sd->bus_hz);
  135. /* Add a bit spare time */
  136. cto_ms += 10;
  137. /*
  138. * The durations we're working with are fairly short so we have to be extra
  139. * careful about synchronization here. Specifically in hardware a command
  140. * timeout is _at most_ 5.1 ms, so that means we expect an interrupt
  141. * (either command done or timeout) to come rather quickly after the
  142. * sdio_dw_writel. ...but just in case we have a long interrupt latency
  143. * let's add a bit of paranoia.
  144. *
  145. * In general we'll assume that at least an interrupt will be asserted in
  146. * hardware by the time the cto_timer runs. ...and if it hasn't been
  147. * asserted in hardware by that time then we'll assume it'll never come.
  148. */
  149. level = rt_spin_lock_irqsave(&sd->irq_lock);
  150. if (!rt_bitmap_test_bit(&sd->pending_events, EVENT_CMD_COMPLETE))
  151. {
  152. rt_tick_t tick = rt_tick_from_millisecond(cto_ms) + 1;
  153. rt_timer_control(&sd->cto_timer, RT_TIMER_CTRL_SET_TIME, &tick);
  154. rt_timer_start(&sd->cto_timer);
  155. }
  156. rt_spin_unlock_irqrestore(&sd->irq_lock, level);
  157. }
  158. static void sdio_dw_start_cmd(struct sdio_dw_slot *slot, rt_uint32_t cmd,
  159. rt_uint32_t arg)
  160. {
  161. struct sdio_dw *sd = slot->sd;
  162. sdio_dw_writel(sd, CMDARG, arg);
  163. rt_hw_wmb();
  164. sdio_dw_wait_while_busy(sd, cmd);
  165. sdio_dw_writel(sd, CMD, SDIO_DW_CMD_START | cmd);
  166. /* Response expected command only */
  167. if ((cmd & SDIO_DW_CMD_RESP_EXP))
  168. {
  169. sdio_dw_set_cto(sd);
  170. }
  171. }
  172. rt_inline void send_stop_abort(struct sdio_dw *sd, struct rt_mmcsd_data *data)
  173. {
  174. struct rt_mmcsd_cmd *stop = &sd->stop_abort;
  175. sdio_dw_start_cmd(sd->slot, sd->stop_cmdr, stop->arg);
  176. }
  177. /* DMA interface functions */
  178. static void sdio_dw_stop_dma(struct sdio_dw *sd)
  179. {
  180. if (sd->using_dma)
  181. {
  182. sd->dma_ops->stop(sd);
  183. sd->dma_ops->cleanup(sd);
  184. }
  185. /* Data transfer was stopped by the interrupt handler */
  186. rt_bitmap_set_bit(&sd->pending_events, EVENT_XFER_COMPLETE);
  187. }
  188. static rt_uint32_t sdio_dw_prep_stop_abort(struct sdio_dw *sd, struct rt_mmcsd_cmd *cmd)
  189. {
  190. rt_uint32_t cmdr;
  191. struct rt_mmcsd_cmd *stop;
  192. if (!cmd->data)
  193. {
  194. return 0;
  195. }
  196. stop = &sd->stop_abort;
  197. cmdr = cmd->cmd_code;
  198. rt_memset(stop, 0, sizeof(*stop));
  199. if (cmdr == READ_SINGLE_BLOCK ||
  200. cmdr == READ_MULTIPLE_BLOCK ||
  201. cmdr == WRITE_BLOCK ||
  202. cmdr == WRITE_MULTIPLE_BLOCK ||
  203. cmdr == SEND_TUNING_BLOCK ||
  204. cmdr == SEND_TUNING_BLOCK_HS200 ||
  205. cmdr == GEN_CMD)
  206. {
  207. stop->cmd_code = STOP_TRANSMISSION;
  208. stop->arg = 0;
  209. stop->flags = CMD_AC;
  210. }
  211. else if (cmdr == SD_IO_RW_EXTENDED)
  212. {
  213. stop->cmd_code = SD_IO_RW_DIRECT;
  214. stop->arg |= (1 << 31) | (0 << 28) | ((cmd->arg >> 28) & 0x7);
  215. stop->flags = RESP_SPI_R5 | CMD_AC;
  216. }
  217. else
  218. {
  219. return 0;
  220. }
  221. cmdr = stop->cmd_code | SDIO_DW_CMD_STOP | SDIO_DW_CMD_RESP_CRC | SDIO_DW_CMD_RESP_EXP;
  222. if (!rt_bitmap_test_bit(&sd->slot->flags, DW_MMC_CARD_NO_USE_HOLD))
  223. {
  224. cmdr |= SDIO_DW_CMD_USE_HOLD_REG;
  225. }
  226. return cmdr;
  227. }
  228. static void sdio_dw_idmac_reset(struct sdio_dw *sd)
  229. {
  230. /* Software reset of DMA */
  231. sdio_dw_writel(sd, BMOD, sdio_dw_readl(sd, BMOD) | SDIO_DW_IDMAC_SWRESET);
  232. }
  233. static rt_err_t sdio_dw_idmac_init(struct sdio_dw *sd)
  234. {
  235. int i;
  236. rt_err_t err = RT_EOK;
  237. if (sd->dma_64bit_address)
  238. {
  239. struct idmac_desc64 *p;
  240. /* Number of descriptors in the ring buffer */
  241. sd->ring_size = DESC_RING_BUF_SZ / sizeof(struct idmac_desc64);
  242. /* Forward link the descriptor list */
  243. for (i = 0, p = sd->dma_buf; i < sd->ring_size - 1; ++i, ++p)
  244. {
  245. p->des6 = (sd->dma_buf_phy + (sizeof(struct idmac_desc64) * (i + 1))) & 0xffffffff;
  246. p->des7 = (rt_uint64_t)(sd->dma_buf_phy + (sizeof(struct idmac_desc64) * (i + 1))) >> 32;
  247. /* Initialize reserved and buffer size fields to "0" */
  248. p->des0 = 0;
  249. p->des1 = 0;
  250. p->des2 = 0;
  251. p->des3 = 0;
  252. }
  253. /* Set the last descriptor as the end-of-ring descriptor */
  254. p->des6 = sd->dma_buf_phy & 0xffffffff;
  255. p->des7 = (rt_uint64_t)sd->dma_buf_phy >> 32;
  256. p->des0 = IDMAC_DES0_ER;
  257. }
  258. else
  259. {
  260. struct idmac_desc32 *p;
  261. /* Number of descriptors in the ring buffer */
  262. sd->ring_size = DESC_RING_BUF_SZ / sizeof(struct idmac_desc32);
  263. /* Forward link the descriptor list */
  264. for (i = 0, p = sd->dma_buf; i < sd->ring_size - 1; ++i, ++p)
  265. {
  266. p->des3 = rt_cpu_to_le32(sd->dma_buf_phy + (sizeof(struct idmac_desc32) * (i + 1)));
  267. p->des0 = 0;
  268. p->des1 = 0;
  269. }
  270. /* Set the last descriptor as the end-of-ring descriptor */
  271. p->des3 = rt_cpu_to_le32(sd->dma_buf_phy);
  272. p->des0 = rt_cpu_to_le32(IDMAC_DES0_ER);
  273. }
  274. sdio_dw_idmac_reset(sd);
  275. if (sd->dma_64bit_address)
  276. {
  277. /* Mask out interrupts - get Tx & Rx complete only */
  278. sdio_dw_writel(sd, IDSTS64, SDIO_DW_IDMAC_INT_CLR);
  279. sdio_dw_writel(sd, IDINTEN64, PINTC(NI) | PINTC(RI) | PINTC(TI));
  280. /* Set the descriptor base address */
  281. sdio_dw_writel(sd, DBADDRL, sd->dma_buf_phy & 0xffffffff);
  282. sdio_dw_writel(sd, DBADDRU, (rt_uint64_t)sd->dma_buf_phy >> 32);
  283. }
  284. else
  285. {
  286. /* Mask out interrupts - get Tx & Rx complete only */
  287. sdio_dw_writel(sd, IDSTS, SDIO_DW_IDMAC_INT_CLR);
  288. sdio_dw_writel(sd, IDINTEN, PINTC(NI) | PINTC(RI) | PINTC(TI));
  289. /* Set the descriptor base address */
  290. sdio_dw_writel(sd, DBADDR, sd->dma_buf_phy);
  291. }
  292. return err;
  293. }
  294. rt_inline rt_err_t sdio_dw_prepare_desc64(struct sdio_dw *sd, struct rt_mmcsd_data *data)
  295. {
  296. rt_uint32_t desc_len;
  297. rt_uint64_t mem_addr;
  298. int timeout = rt_tick_from_millisecond(100);
  299. struct idmac_desc64 *desc_first, *desc_last, *desc;
  300. desc_first = desc_last = desc = sd->dma_buf;
  301. mem_addr = (rt_uint64_t)rt_kmem_v2p(sd->last_buf);
  302. for (rt_uint32_t length = sd->last_remain; length; ++desc)
  303. {
  304. rt_tick_t start = rt_tick_get();
  305. desc_len = rt_min_t(rt_uint32_t, length, DW_MCI_DESC_DATA_LENGTH);
  306. length -= desc_len;
  307. /*
  308. * Wait for the former clear OWN bit operation of IDMAC to make sure
  309. * that this descriptor isn't still owned by IDMAC as IDMAC's write ops
  310. * and CPU's read ops are asynchronous.
  311. */
  312. while ((HWREG32(&desc->des0) & IDMAC_DES0_OWN))
  313. {
  314. if ((rt_tick_get() - start) > timeout)
  315. {
  316. goto _err_own_bit;
  317. }
  318. rt_hw_cpu_relax();
  319. }
  320. /* Set the OWN bit and disable interrupts for this descriptor */
  321. desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
  322. /* Buffer length */
  323. IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len);
  324. /* Physical address to DMA to/from */
  325. desc->des4 = mem_addr & 0xffffffff;
  326. desc->des5 = mem_addr >> 32;
  327. /* Update physical address for the next desc */
  328. mem_addr += desc_len;
  329. /* Save pointer to the last descriptor */
  330. desc_last = desc;
  331. }
  332. /* Set first descriptor */
  333. desc_first->des0 |= IDMAC_DES0_FD;
  334. /* Set last descriptor */
  335. desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
  336. desc_last->des0 |= IDMAC_DES0_LD;
  337. return RT_EOK;
  338. _err_own_bit:
  339. /* restore the descriptor chain as it's polluted */
  340. LOG_D("Descriptor is still owned by IDMAC");
  341. rt_memset(sd->dma_buf, 0, DESC_RING_BUF_SZ);
  342. sdio_dw_idmac_init(sd);
  343. return -RT_EINVAL;
  344. }
  345. rt_inline rt_err_t sdio_dw_prepare_desc32(struct sdio_dw *sd, struct rt_mmcsd_data *data)
  346. {
  347. rt_uint32_t desc_len, mem_addr;
  348. int timeout = rt_tick_from_millisecond(100);
  349. struct idmac_desc32 *desc_first, *desc_last, *desc;
  350. desc_first = desc_last = desc = sd->dma_buf;
  351. mem_addr = (rt_ubase_t)rt_kmem_v2p(sd->last_buf);
  352. for (rt_uint32_t length = sd->last_remain; length; ++desc)
  353. {
  354. rt_tick_t start = rt_tick_get();
  355. desc_len = rt_min_t(rt_uint32_t, length, DW_MCI_DESC_DATA_LENGTH);
  356. length -= desc_len;
  357. /*
  358. * Wait for the former clear OWN bit operation of IDMAC to make sure
  359. * that this descriptor isn't still owned by IDMAC as IDMAC's write ops
  360. * and CPU's read ops are asynchronous.
  361. */
  362. while (!IDMAC_OWN_CLR64(HWREG32(&desc->des0)))
  363. {
  364. if ((rt_tick_get() - start) > timeout)
  365. {
  366. goto _err_own_bit;
  367. }
  368. rt_hw_cpu_relax();
  369. }
  370. /* Set the OWN bit and disable interrupts for this descriptor */
  371. desc->des0 = rt_cpu_to_le32(IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH);
  372. /* Buffer length */
  373. IDMAC_32ADDR_SET_BUFFER1_SIZE(desc, desc_len);
  374. /* Physical address to DMA to/from */
  375. desc->des2 = rt_cpu_to_le32(mem_addr);
  376. /* Update physical address for the next desc */
  377. mem_addr += desc_len;
  378. /* Save pointer to the last descriptor */
  379. desc_last = desc;
  380. }
  381. /* Set first descriptor */
  382. desc_first->des0 |= rt_cpu_to_le32(IDMAC_DES0_FD);
  383. /* Set last descriptor */
  384. desc_last->des0 &= rt_cpu_to_le32(~(IDMAC_DES0_CH | IDMAC_DES0_DIC));
  385. desc_last->des0 |= rt_cpu_to_le32(IDMAC_DES0_LD);
  386. return RT_EOK;
  387. _err_own_bit:
  388. /* restore the descriptor chain as it's polluted */
  389. LOG_D("Descriptor is still owned by IDMAC");
  390. rt_memset(sd->dma_buf, 0, DESC_RING_BUF_SZ);
  391. sdio_dw_idmac_init(sd);
  392. return -RT_EINVAL;
  393. }
  394. static rt_err_t sdio_dw_idmac_start(struct sdio_dw *sd)
  395. {
  396. rt_err_t err = RT_EOK;
  397. if (sd->dma_64bit_address)
  398. {
  399. err = sdio_dw_prepare_desc64(sd, sd->data);
  400. }
  401. else
  402. {
  403. err = sdio_dw_prepare_desc32(sd, sd->data);
  404. }
  405. if (err)
  406. {
  407. goto _out;
  408. }
  409. /* Drain writebuffer */
  410. rt_hw_wmb();
  411. /* Make sure to reset DMA in case we did PIO before this */
  412. sdio_dw_ctrl_reset(sd, SDIO_DW_CTRL_DMA_RESET);
  413. sdio_dw_idmac_reset(sd);
  414. /* Select IDMAC interface */
  415. sdio_dw_writel(sd, CTRL, sdio_dw_readl(sd, CTRL) | SDIO_DW_CTRL_USE_IDMAC);
  416. /* Drain writebuffer */
  417. rt_hw_wmb();
  418. /* Enable the IDMAC */
  419. sdio_dw_writel(sd, BMOD, sdio_dw_readl(sd, BMOD) | SDIO_DW_IDMAC_ENABLE | SDIO_DW_IDMAC_FB);
  420. /* Start it running */
  421. sdio_dw_writel(sd, PLDMND, 1);
  422. _out:
  423. return err;
  424. }
  425. static rt_err_t sdio_dw_idmac_complete(struct sdio_dw *sd)
  426. {
  427. rt_err_t err = RT_EOK;
  428. struct rt_mmcsd_data *data = sd->data;
  429. sd->dma_ops->cleanup(sd);
  430. if (data)
  431. {
  432. rt_hw_cpu_dcache_ops(RT_HW_CACHE_INVALIDATE, data->buf, data->blks * data->blksize);
  433. rt_bitmap_set_bit(&sd->pending_events, EVENT_XFER_COMPLETE);
  434. rt_workqueue_urgent_work(sd->state_wq, &sd->state_work);
  435. }
  436. return err;
  437. }
  438. static rt_err_t sdio_dw_idmac_stop(struct sdio_dw *sd)
  439. {
  440. rt_uint32_t reg;
  441. /* Disable and reset the IDMAC interface */
  442. reg = sdio_dw_readl(sd, CTRL);
  443. reg &= ~SDIO_DW_CTRL_USE_IDMAC;
  444. reg |= SDIO_DW_CTRL_DMA_RESET;
  445. sdio_dw_writel(sd, CTRL, reg);
  446. /* Stop the IDMAC running */
  447. reg = sdio_dw_readl(sd, BMOD);
  448. reg &= ~(SDIO_DW_IDMAC_ENABLE | SDIO_DW_IDMAC_FB);
  449. reg |= SDIO_DW_IDMAC_SWRESET;
  450. sdio_dw_writel(sd, BMOD, reg);
  451. return RT_EOK;
  452. }
  453. static rt_err_t sdio_dw_idmac_cleanup(struct sdio_dw *sd)
  454. {
  455. return RT_EOK;
  456. }
  457. static const struct sdio_dw_dma_ops sdio_dw_idmac_ops =
  458. {
  459. .init = sdio_dw_idmac_init,
  460. .start = sdio_dw_idmac_start,
  461. .complete = sdio_dw_idmac_complete,
  462. .stop = sdio_dw_idmac_stop,
  463. .cleanup = sdio_dw_idmac_cleanup,
  464. };
  465. static void edma_callback(struct rt_dma_chan *chan, rt_size_t size)
  466. {
  467. struct sdio_dw *sd = chan->priv;
  468. sd->dma_ops->complete(sd);
  469. }
  470. static rt_err_t sdio_dw_edmac_init(struct sdio_dw *sd)
  471. {
  472. rt_err_t err = RT_EOK;
  473. sd->edma_chan = rt_dma_chan_request(sd->bus_dev, "rx-tx");
  474. if (rt_is_err(sd->edma_chan))
  475. {
  476. err = rt_ptr_err(sd->edma_chan);
  477. LOG_E("Get external DMA channel error = %s", rt_strerror(err));
  478. sd->edma_chan = RT_NULL;
  479. }
  480. else if (!sd->edma_chan)
  481. {
  482. err = -RT_ERROR;
  483. }
  484. else
  485. {
  486. sd->edma_chan->callback = edma_callback;
  487. sd->edma_chan->priv = sd;
  488. }
  489. return err;
  490. }
  491. static rt_err_t sdio_dw_edmac_start(struct sdio_dw *sd)
  492. {
  493. rt_err_t err;
  494. struct rt_dma_slave_config config;
  495. struct rt_dma_slave_transfer transfer;
  496. rt_memset(&config, 0, sizeof(config));
  497. config.src_addr_width = RT_DMA_SLAVE_BUSWIDTH_4_BYTES;
  498. config.dst_addr_width = RT_DMA_SLAVE_BUSWIDTH_4_BYTES;
  499. config.src_addr = (rt_ubase_t)rt_kmem_v2p(sd->last_buf);
  500. config.dst_addr = (rt_ubase_t)rt_kmem_v2p(sd->fifo_base);
  501. config.dst_maxburst = 1 << (((sdio_dw_readl(sd, FIFOTH) >> 28) & 0x7) + 1);
  502. config.dst_maxburst = config.dst_maxburst == 2 ? 1 : config.dst_maxburst;
  503. config.src_maxburst = config.dst_maxburst;
  504. if (sd->data->flags & DATA_DIR_READ)
  505. {
  506. config.direction = RT_DMA_DEV_TO_MEM;
  507. }
  508. else
  509. {
  510. config.direction = RT_DMA_MEM_TO_DEV;
  511. }
  512. if ((err = rt_dma_chan_config(sd->edma_chan, &config)))
  513. {
  514. LOG_E("Config EDMAC error = %s", rt_strerror(err));
  515. return err;
  516. }
  517. rt_memset(&transfer, 0, sizeof(transfer));
  518. transfer.src_addr = config.src_addr;
  519. transfer.dst_addr = config.dst_addr;
  520. transfer.buffer_len = sd->last_remain;
  521. if ((err = rt_dma_prep_single(sd->edma_chan, &transfer)))
  522. {
  523. LOG_E("Prepare EDMAC error = %s", rt_strerror(err));
  524. return err;
  525. }
  526. return rt_dma_chan_start(sd->edma_chan);
  527. }
  528. static rt_err_t sdio_dw_edmac_complete(struct sdio_dw *sd)
  529. {
  530. rt_err_t err = RT_EOK;
  531. struct rt_mmcsd_data *data = sd->data;
  532. sd->dma_ops->cleanup(sd);
  533. if (data)
  534. {
  535. rt_hw_cpu_dcache_ops(RT_HW_CACHE_INVALIDATE, data->buf, data->blks * data->blksize);
  536. rt_bitmap_set_bit(&sd->pending_events, EVENT_XFER_COMPLETE);
  537. rt_workqueue_urgent_work(sd->state_wq, &sd->state_work);
  538. }
  539. return err;
  540. }
  541. static rt_err_t sdio_dw_edmac_stop(struct sdio_dw *sd)
  542. {
  543. return rt_dma_chan_stop(sd->edma_chan);
  544. }
  545. static rt_err_t sdio_dw_edmac_cleanup(struct sdio_dw *sd)
  546. {
  547. return RT_EOK;
  548. }
  549. static rt_err_t sdio_dw_edmac_exit(struct sdio_dw *sd)
  550. {
  551. if (sd->edma_chan)
  552. {
  553. rt_dma_chan_release(sd->edma_chan);
  554. sd->edma_chan = RT_NULL;
  555. }
  556. return RT_EOK;
  557. }
  558. static const struct sdio_dw_dma_ops sdio_dw_edmac_ops =
  559. {
  560. .init = sdio_dw_edmac_init,
  561. .start = sdio_dw_edmac_start,
  562. .complete = sdio_dw_edmac_complete,
  563. .stop = sdio_dw_edmac_stop,
  564. .cleanup = sdio_dw_edmac_cleanup,
  565. .exit = sdio_dw_edmac_exit,
  566. };
  567. static rt_bool_t sdio_dw_get_cd(struct sdio_dw_slot *slot)
  568. {
  569. rt_bool_t present;
  570. struct sdio_dw *sd = slot->sd;
  571. if (!controller_is_removable(slot->host))
  572. {
  573. present = RT_TRUE;
  574. }
  575. else
  576. {
  577. present = (sdio_dw_readl(sd, CDETECT) & (1 << slot->id)) == 0;
  578. }
  579. return present;
  580. }
  581. static void sdio_dw_adjust_fifoth(struct sdio_dw *sd, struct rt_mmcsd_data *data)
  582. {
  583. static const rt_uint32_t mszs[] = { 1, 4, 8, 16, 32, 64, 128, 256 };
  584. rt_uint32_t blksz = data->blksize;
  585. rt_uint32_t fifo_width = 1 << sd->data_shift;
  586. rt_uint32_t blksz_depth = blksz / fifo_width, fifoth_val;
  587. rt_uint32_t msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
  588. int idx = RT_ARRAY_SIZE(mszs) - 1;
  589. /* PIO should ship this scenario */
  590. if (!sd->use_dma)
  591. {
  592. return;
  593. }
  594. tx_wmark = (sd->fifo_depth) / 2;
  595. tx_wmark_invers = sd->fifo_depth - tx_wmark;
  596. /* MSIZE is '1', if blksz is not a multiple of the FIFO width */
  597. if (blksz % fifo_width)
  598. {
  599. goto _done;
  600. }
  601. do {
  602. if (!((blksz_depth % mszs[idx]) || (tx_wmark_invers % mszs[idx])))
  603. {
  604. msize = idx;
  605. rx_wmark = mszs[idx] - 1;
  606. break;
  607. }
  608. } while (--idx > 0);
  609. /* If idx is '0', it won't be tried Thus, initial values are uesed */
  610. _done:
  611. fifoth_val = SDIO_DW_SET_FIFOTH(msize, rx_wmark, tx_wmark);
  612. sdio_dw_writel(sd, FIFOTH, fifoth_val);
  613. }
  614. static void sdio_dw_ctrl_thld(struct sdio_dw *sd, struct rt_mmcsd_data *data)
  615. {
  616. rt_uint8_t enable;
  617. rt_uint16_t thld_size;
  618. rt_uint32_t blksz_depth, fifo_depth;
  619. rt_uint32_t blksz = data->blksize;
  620. /*
  621. * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is
  622. * in the FIFO region, so we really shouldn't access it).
  623. */
  624. if (sd->verid < SDIO_DW_240A ||
  625. (sd->verid < SDIO_DW_280A && (data->flags & DATA_DIR_WRITE)))
  626. {
  627. return;
  628. }
  629. /*
  630. * Card write Threshold is introduced since 2.80a
  631. * It's used when HS400 mode is enabled.
  632. */
  633. if ((data->flags & DATA_DIR_WRITE) && sd->timing != MMCSD_TIMING_MMC_HS400)
  634. {
  635. goto _disable;
  636. }
  637. if ((data->flags & DATA_DIR_WRITE))
  638. {
  639. enable = SDIO_DW_CARD_WR_THR_EN;
  640. }
  641. else
  642. {
  643. enable = SDIO_DW_CARD_RD_THR_EN;
  644. }
  645. if (sd->timing != MMCSD_TIMING_MMC_HS200 &&
  646. sd->timing != MMCSD_TIMING_UHS_SDR104 &&
  647. sd->timing != MMCSD_TIMING_MMC_HS400)
  648. {
  649. goto _disable;
  650. }
  651. blksz_depth = blksz / (1 << sd->data_shift);
  652. fifo_depth = sd->fifo_depth;
  653. if (blksz_depth > fifo_depth)
  654. {
  655. goto _disable;
  656. }
  657. /*
  658. * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
  659. * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
  660. * Currently just choose blksz.
  661. */
  662. thld_size = blksz;
  663. sdio_dw_writel(sd, CDTHRCTL, SDIO_DW_SET_THLD(thld_size, enable));
  664. return;
  665. _disable:
  666. sdio_dw_writel(sd, CDTHRCTL, 0);
  667. }
  668. static rt_err_t sdio_dw_submit_data_dma(struct sdio_dw *sd, struct rt_mmcsd_data *data)
  669. {
  670. rt_uint32_t temp;
  671. rt_ubase_t level;
  672. sd->using_dma = RT_FALSE;
  673. /* If we don't have a channel, we can't do DMA */
  674. if (!sd->use_dma)
  675. {
  676. return -RT_ENOSYS;
  677. }
  678. sd->using_dma = RT_TRUE;
  679. /*
  680. * Decide the MSIZE and RX/TX Watermark. If current block size is same with
  681. * previous size, no need to update fifoth.
  682. */
  683. if (sd->prev_blksz != data->blksize)
  684. {
  685. sdio_dw_adjust_fifoth(sd, data);
  686. }
  687. /* Enable the DMA interface */
  688. temp = sdio_dw_readl(sd, CTRL);
  689. temp |= SDIO_DW_CTRL_DMA_ENABLE;
  690. sdio_dw_writel(sd, CTRL, temp);
  691. /* Disable RX/TX IRQs, let DMA handle it */
  692. level = rt_spin_lock_irqsave(&sd->irq_lock);
  693. temp = sdio_dw_readl(sd, INTMASK);
  694. temp &= ~(PINT(RXDR) | PINT(TXDR));
  695. sdio_dw_writel(sd, INTMASK, temp);
  696. rt_spin_unlock_irqrestore(&sd->irq_lock, level);
  697. /* Flush data to memory */
  698. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, sd->last_buf, sd->last_remain);
  699. if (sd->dma_ops->start(sd))
  700. {
  701. /* We can't do DMA, try PIO for this one */
  702. sd->dma_ops->stop(sd);
  703. return -RT_ENOSYS;
  704. }
  705. return RT_EOK;
  706. }
  707. static void sdio_dw_submit_data(struct sdio_dw *sd, struct rt_mmcsd_data *data)
  708. {
  709. rt_ubase_t level;
  710. rt_uint32_t temp;
  711. data->err = -RT_ERROR;
  712. sd->data = data;
  713. sd->last_buf = data->buf;
  714. sd->last_remain = data->blks * data->blksize;
  715. if ((data->flags & DATA_DIR_READ))
  716. {
  717. sd->dir_status = SDIO_DW_RECV_STATUS;
  718. }
  719. else
  720. {
  721. sd->dir_status = SDIO_DW_SEND_STATUS;
  722. }
  723. sdio_dw_ctrl_thld(sd, data);
  724. if (sdio_dw_submit_data_dma(sd, data))
  725. {
  726. sd->part_buf_start = 0;
  727. sd->part_buf_count = 0;
  728. sdio_dw_writel(sd, RINTSTS, PINT(TXDR) | PINT(RXDR));
  729. level = rt_spin_lock_irqsave(&sd->irq_lock);
  730. temp = sdio_dw_readl(sd, INTMASK);
  731. temp |= PINT(TXDR) | PINT(RXDR);
  732. sdio_dw_writel(sd, INTMASK, temp);
  733. rt_spin_unlock_irqrestore(&sd->irq_lock, level);
  734. temp = sdio_dw_readl(sd, CTRL);
  735. temp &= ~SDIO_DW_CTRL_DMA_ENABLE;
  736. sdio_dw_writel(sd, CTRL, temp);
  737. /*
  738. * Use the initial fifoth_val for PIO mode. If wm_algined is set, we set
  739. * watermark same as data size. If next issued data may be transfered by
  740. * DMA mode, prev_blksz should be invalidated.
  741. */
  742. if (sd->wm_aligned)
  743. {
  744. sdio_dw_adjust_fifoth(sd, data);
  745. }
  746. else
  747. {
  748. sdio_dw_writel(sd, FIFOTH, sd->fifoth_val);
  749. }
  750. sd->prev_blksz = 0;
  751. }
  752. else
  753. {
  754. /*
  755. * Keep the current block size.
  756. * It will be used to decide whether to update fifoth register next time.
  757. */
  758. sd->prev_blksz = data->blksize;
  759. }
  760. }
  761. static void sdio_dw_setup_bus(struct sdio_dw_slot *slot, rt_bool_t force_clkinit)
  762. {
  763. struct sdio_dw *sd = slot->sd;
  764. rt_uint32_t clock = slot->clock;
  765. rt_uint32_t cmd_bits = SDIO_DW_CMD_UPD_CLK | SDIO_DW_CMD_PRV_DAT_WAIT;
  766. /* We must continue to set bit 28 in CMD until the change is complete */
  767. if (sd->state == STATE_WAITING_CMD11_DONE)
  768. {
  769. cmd_bits |= SDIO_DW_CMD_VOLT_SWITCH;
  770. }
  771. if (!clock)
  772. {
  773. sdio_dw_writel(sd, CLKENA, 0);
  774. sdio_dw_send_cmd(slot, cmd_bits, 0);
  775. }
  776. else if (clock != sd->current_speed || force_clkinit)
  777. {
  778. rt_uint32_t clk_en_a, div = sd->bus_hz / clock;
  779. if (sd->bus_hz % clock && sd->bus_hz > clock)
  780. {
  781. /* Move the + 1 after the divide to prevent over-clocking the card */
  782. div += 1;
  783. }
  784. div = (sd->bus_hz != clock) ? RT_DIV_ROUND_UP(div, 2) : 0;
  785. if (clock != slot->clk_old &&
  786. !rt_bitmap_test_bit(&slot->flags, DW_MMC_CARD_NEEDS_POLL) &&
  787. !force_clkinit)
  788. {
  789. LOG_D("Bus speed (slot %d) = %uHz (slot req %uHz, actual %uHZ div = %d)",
  790. slot->id, sd->bus_hz, clock,
  791. div ? ((sd->bus_hz / div) >> 1) : sd->bus_hz, div);
  792. }
  793. /* Disable clock */
  794. sdio_dw_writel(sd, CLKENA, 0);
  795. sdio_dw_writel(sd, CLKSRC, 0);
  796. /* Inform CIU */
  797. sdio_dw_send_cmd(slot, cmd_bits, 0);
  798. /* Set clock to desired speed */
  799. sdio_dw_writel(sd, CLKDIV, div);
  800. /* Inform CIU */
  801. sdio_dw_send_cmd(slot, cmd_bits, 0);
  802. /* Enable clock; only low power if no SDIO */
  803. clk_en_a = SDIO_DW_CLKEN_ENABLE << slot->id;
  804. if (!rt_bitmap_test_bit(&slot->flags, DW_MMC_CARD_NO_LOW_PWR))
  805. {
  806. clk_en_a |= SDIO_DW_CLKEN_LOW_PWR << slot->id;
  807. }
  808. sdio_dw_writel(sd, CLKENA, clk_en_a);
  809. /* Inform CIU */
  810. sdio_dw_send_cmd(slot, cmd_bits, 0);
  811. /* Keep the last clock value that was requested from core */
  812. slot->clk_old = clock;
  813. }
  814. sd->current_speed = clock;
  815. /* Set the current slot bus width */
  816. sdio_dw_writel(sd, CTYPE, (slot->ctype << slot->id));
  817. }
  818. static void sdio_dw_set_data_timeout(struct sdio_dw *sd, rt_uint32_t timeout_ns)
  819. {
  820. rt_uint64_t tmp;
  821. rt_uint32_t clk_div, tmout;
  822. const struct sdio_dw_drv_data *drv_data = sd->drv_data;
  823. if (drv_data && drv_data->set_data_timeout)
  824. {
  825. drv_data->set_data_timeout(sd, timeout_ns);
  826. return;
  827. }
  828. clk_div = (sdio_dw_readl(sd, CLKDIV) & 0xff) * 2;
  829. if (clk_div == 0)
  830. {
  831. clk_div = 1;
  832. }
  833. tmp = RT_DIV_ROUND_UP_ULL((rt_uint64_t)timeout_ns * sd->bus_hz, NSEC_PER_SEC);
  834. tmp = RT_DIV_ROUND_UP_ULL(tmp, clk_div);
  835. /* TMOUT[7:0] (RESPONSE_TIMEOUT): Set maximum */
  836. tmout = 0xff;
  837. /* TMOUT[31:8] (DATA_TIMEOUT) */
  838. if (!tmp || tmp > 0xffffff)
  839. {
  840. tmout |= (0xffffff << 8);
  841. }
  842. else
  843. {
  844. tmout |= (tmp & 0xffffff) << 8;
  845. }
  846. sdio_dw_writel(sd, TMOUT, tmout);
  847. }
  848. /* Push final bytes to part_buf, only use during push */
  849. static void sdio_dw_set_part_bytes(struct sdio_dw *sd, void *buf, int cnt)
  850. {
  851. rt_memcpy((void *)&sd->part_buf, buf, cnt);
  852. sd->part_buf_count = cnt;
  853. }
  854. /* Append bytes to part_buf, only use during push */
  855. static int sdio_dw_push_part_bytes(struct sdio_dw *sd, void *buf, int cnt)
  856. {
  857. cnt = rt_min(cnt, (1 << sd->data_shift) - sd->part_buf_count);
  858. rt_memcpy((void *)&sd->part_buf + sd->part_buf_count, buf, cnt);
  859. sd->part_buf_count += cnt;
  860. return cnt;
  861. }
  862. /* Pull first bytes from part_buf, only use during pull */
  863. static int sdio_dw_pull_part_bytes(struct sdio_dw *sd, void *buf, int cnt)
  864. {
  865. cnt = rt_min_t(int, cnt, sd->part_buf_count);
  866. if (cnt)
  867. {
  868. rt_memcpy(buf, (void *)&sd->part_buf + sd->part_buf_start, cnt);
  869. sd->part_buf_count -= cnt;
  870. sd->part_buf_start += cnt;
  871. }
  872. return cnt;
  873. }
  874. /* Pull final bytes from the part_buf, assuming it's just been filled */
  875. static void sdio_dw_pull_final_bytes(struct sdio_dw *sd, void *buf, int cnt)
  876. {
  877. rt_memcpy(buf, &sd->part_buf, cnt);
  878. sd->part_buf_start = cnt;
  879. sd->part_buf_count = (1 << sd->data_shift) - cnt;
  880. }
  881. static void sdio_dw_push_data16(struct sdio_dw *sd, void *buf, int cnt)
  882. {
  883. struct rt_mmcsd_data *data = sd->data;
  884. int init_cnt = cnt;
  885. /* Try and push anything in the part_buf */
  886. if ((sd->part_buf_count))
  887. {
  888. int len = sdio_dw_push_part_bytes(sd, buf, cnt);
  889. buf += len;
  890. cnt -= len;
  891. if (sd->part_buf_count == 2)
  892. {
  893. sdio_dw_fifo_writew(sd, sd->part_buf16);
  894. sd->part_buf_count = 0;
  895. }
  896. }
  897. #ifndef ARCH_HAVE_EFFICIENT_UNALIGNED_ACCESS
  898. if (((rt_ubase_t)buf & 0x1))
  899. {
  900. while (cnt >= 2)
  901. {
  902. rt_uint16_t aligned_buf[64];
  903. int len = rt_min(cnt & -2, (int)sizeof(aligned_buf));
  904. int items = len >> 1;
  905. /* rt_memcpy from input buffer into aligned buffer */
  906. rt_memcpy(aligned_buf, buf, len);
  907. buf += len;
  908. cnt -= len;
  909. /* Push data from aligned buffer into fifo */
  910. for (int i = 0; i < items; ++i)
  911. {
  912. sdio_dw_fifo_writew(sd, aligned_buf[i]);
  913. }
  914. }
  915. }
  916. else
  917. #endif /* !ARCH_HAVE_EFFICIENT_UNALIGNED_ACCESS */
  918. {
  919. rt_uint16_t *pdata = buf;
  920. for (; cnt >= 2; cnt -= 2)
  921. {
  922. sdio_dw_fifo_writew(sd, *pdata++);
  923. }
  924. buf = pdata;
  925. }
  926. /* Put anything remaining in the part_buf */
  927. if (cnt)
  928. {
  929. sdio_dw_set_part_bytes(sd, buf, cnt);
  930. /* Push data if we have reached the expected data length */
  931. if ((data->bytes_xfered + init_cnt) == (data->blksize * data->blks))
  932. {
  933. sdio_dw_fifo_writew(sd, sd->part_buf16);
  934. }
  935. }
  936. }
  937. static void sdio_dw_pull_data16(struct sdio_dw *sd, void *buf, int cnt)
  938. {
  939. #ifndef ARCH_HAVE_EFFICIENT_UNALIGNED_ACCESS
  940. if (((rt_ubase_t)buf & 0x1))
  941. {
  942. while (cnt >= 2)
  943. {
  944. /* Pull data from fifo into aligned buffer */
  945. rt_uint16_t aligned_buf[64];
  946. int len = rt_min(cnt & -2, (int)sizeof(aligned_buf));
  947. int items = len >> 1;
  948. for (int i = 0; i < items; ++i)
  949. {
  950. aligned_buf[i] = sdio_dw_fifo_readw(sd);
  951. }
  952. /* rt_memcpy from aligned buffer into output buffer */
  953. rt_memcpy(buf, aligned_buf, len);
  954. buf += len;
  955. cnt -= len;
  956. }
  957. }
  958. else
  959. #endif /* !ARCH_HAVE_EFFICIENT_UNALIGNED_ACCESS */
  960. {
  961. rt_uint16_t *pdata = buf;
  962. for (; cnt >= 2; cnt -= 2)
  963. {
  964. *pdata++ = sdio_dw_fifo_readw(sd);
  965. }
  966. buf = pdata;
  967. }
  968. if (cnt)
  969. {
  970. sd->part_buf16 = sdio_dw_fifo_readw(sd);
  971. sdio_dw_pull_final_bytes(sd, buf, cnt);
  972. }
  973. }
  974. static void sdio_dw_push_data32(struct sdio_dw *sd, void *buf, int cnt)
  975. {
  976. struct rt_mmcsd_data *data = sd->data;
  977. int init_cnt = cnt;
  978. /* Try and push anything in the part_buf */
  979. if ((sd->part_buf_count))
  980. {
  981. int len = sdio_dw_push_part_bytes(sd, buf, cnt);
  982. buf += len;
  983. cnt -= len;
  984. if (sd->part_buf_count == 4)
  985. {
  986. sdio_dw_fifo_writel(sd, sd->part_buf32);
  987. sd->part_buf_count = 0;
  988. }
  989. }
  990. #ifndef ARCH_HAVE_EFFICIENT_UNALIGNED_ACCESS
  991. if (((rt_ubase_t)buf & 0x3))
  992. {
  993. while (cnt >= 4)
  994. {
  995. rt_uint32_t aligned_buf[32];
  996. int len = rt_min(cnt & -4, (int)sizeof(aligned_buf));
  997. int items = len >> 2;
  998. /* rt_memcpy from input buffer into aligned buffer */
  999. rt_memcpy(aligned_buf, buf, len);
  1000. buf += len;
  1001. cnt -= len;
  1002. /* Push data from aligned buffer into fifo */
  1003. for (int i = 0; i < items; ++i)
  1004. {
  1005. sdio_dw_fifo_writel(sd, aligned_buf[i]);
  1006. }
  1007. }
  1008. }
  1009. else
  1010. #endif /* !ARCH_HAVE_EFFICIENT_UNALIGNED_ACCESS */
  1011. {
  1012. rt_uint32_t *pdata = buf;
  1013. for (; cnt >= 4; cnt -= 4)
  1014. {
  1015. sdio_dw_fifo_writel(sd, *pdata++);
  1016. }
  1017. buf = pdata;
  1018. }
  1019. /* Put anything remaining in the part_buf */
  1020. if (cnt)
  1021. {
  1022. sdio_dw_set_part_bytes(sd, buf, cnt);
  1023. /* Push data if we have reached the expected data length */
  1024. if ((data->bytes_xfered + init_cnt) == (data->blksize * data->blks))
  1025. {
  1026. sdio_dw_fifo_writel(sd, sd->part_buf32);
  1027. }
  1028. }
  1029. }
  1030. static void sdio_dw_pull_data32(struct sdio_dw *sd, void *buf, int cnt)
  1031. {
  1032. #ifndef ARCH_HAVE_EFFICIENT_UNALIGNED_ACCESS
  1033. if (((rt_ubase_t)buf & 0x3))
  1034. {
  1035. while (cnt >= 4)
  1036. {
  1037. /* Pull data from fifo into aligned buffer */
  1038. rt_uint32_t aligned_buf[32];
  1039. int len = rt_min(cnt & -4, (int)sizeof(aligned_buf));
  1040. int items = len >> 2;
  1041. for (int i = 0; i < items; ++i)
  1042. {
  1043. aligned_buf[i] = sdio_dw_fifo_readl(sd);
  1044. }
  1045. /* rt_memcpy from aligned buffer into output buffer */
  1046. rt_memcpy(buf, aligned_buf, len);
  1047. buf += len;
  1048. cnt -= len;
  1049. }
  1050. }
  1051. else
  1052. #endif /* !ARCH_HAVE_EFFICIENT_UNALIGNED_ACCESS */
  1053. {
  1054. rt_uint32_t *pdata = buf;
  1055. for (; cnt >= 4; cnt -= 4)
  1056. {
  1057. *pdata++ = sdio_dw_fifo_readl(sd);
  1058. }
  1059. buf = pdata;
  1060. }
  1061. if (cnt)
  1062. {
  1063. sd->part_buf32 = sdio_dw_fifo_readl(sd);
  1064. sdio_dw_pull_final_bytes(sd, buf, cnt);
  1065. }
  1066. }
  1067. static void sdio_dw_push_data64(struct sdio_dw *sd, void *buf, int cnt)
  1068. {
  1069. struct rt_mmcsd_data *data = sd->data;
  1070. int init_cnt = cnt;
  1071. /* Try and push anything in the part_buf */
  1072. if ((sd->part_buf_count))
  1073. {
  1074. int len = sdio_dw_push_part_bytes(sd, buf, cnt);
  1075. buf += len;
  1076. cnt -= len;
  1077. if (sd->part_buf_count == 8)
  1078. {
  1079. sdio_dw_fifo_writeq(sd, sd->part_buf64);
  1080. sd->part_buf_count = 0;
  1081. }
  1082. }
  1083. #ifndef ARCH_HAVE_EFFICIENT_UNALIGNED_ACCESS
  1084. if (((rt_ubase_t)buf & 0x7))
  1085. {
  1086. while (cnt >= 8)
  1087. {
  1088. rt_uint64_t aligned_buf[16];
  1089. int len = rt_min(cnt & -8, (int)sizeof(aligned_buf));
  1090. int items = len >> 3;
  1091. /* rt_memcpy from input buffer into aligned buffer */
  1092. rt_memcpy(aligned_buf, buf, len);
  1093. buf += len;
  1094. cnt -= len;
  1095. /* Push data from aligned buffer into fifo */
  1096. for (int i = 0; i < items; ++i)
  1097. {
  1098. sdio_dw_fifo_writeq(sd, aligned_buf[i]);
  1099. }
  1100. }
  1101. }
  1102. else
  1103. #endif /* !ARCH_HAVE_EFFICIENT_UNALIGNED_ACCESS */
  1104. {
  1105. rt_uint64_t *pdata = buf;
  1106. for (; cnt >= 8; cnt -= 8)
  1107. {
  1108. sdio_dw_fifo_writeq(sd, *pdata++);
  1109. }
  1110. buf = pdata;
  1111. }
  1112. /* Put anything remaining in the part_buf */
  1113. if (cnt)
  1114. {
  1115. sdio_dw_set_part_bytes(sd, buf, cnt);
  1116. /* Push data if we have reached the expected data length */
  1117. if ((data->bytes_xfered + init_cnt) == (data->blksize * data->blks))
  1118. {
  1119. sdio_dw_fifo_writeq(sd, sd->part_buf64);
  1120. }
  1121. }
  1122. }
  1123. static void sdio_dw_pull_data64(struct sdio_dw *sd, void *buf, int cnt)
  1124. {
  1125. #ifndef ARCH_HAVE_EFFICIENT_UNALIGNED_ACCESS
  1126. if (((rt_ubase_t)buf & 0x7))
  1127. {
  1128. while (cnt >= 8)
  1129. {
  1130. /* Pull data from fifo into aligned buffer */
  1131. rt_uint64_t aligned_buf[16];
  1132. int len = rt_min(cnt & -8, (int)sizeof(aligned_buf));
  1133. int items = len >> 3;
  1134. for (int i = 0; i < items; ++i)
  1135. {
  1136. aligned_buf[i] = sdio_dw_fifo_readq(sd);
  1137. }
  1138. /* rt_memcpy from aligned buffer into output buffer */
  1139. rt_memcpy(buf, aligned_buf, len);
  1140. buf += len;
  1141. cnt -= len;
  1142. }
  1143. }
  1144. else
  1145. #endif /* !ARCH_HAVE_EFFICIENT_UNALIGNED_ACCESS */
  1146. {
  1147. rt_uint64_t *pdata = buf;
  1148. for (; cnt >= 8; cnt -= 8)
  1149. {
  1150. *pdata++ = sdio_dw_fifo_readq(sd);
  1151. }
  1152. buf = pdata;
  1153. }
  1154. if (cnt)
  1155. {
  1156. sd->part_buf64 = sdio_dw_fifo_readq(sd);
  1157. sdio_dw_pull_final_bytes(sd, buf, cnt);
  1158. }
  1159. }
  1160. static void sdio_dw_pull_data(struct sdio_dw *sd, void *buf, int cnt)
  1161. {
  1162. /* Get remaining partial bytes */
  1163. int len = sdio_dw_pull_part_bytes(sd, buf, cnt);
  1164. if (len != cnt)
  1165. {
  1166. buf += len;
  1167. cnt -= len;
  1168. /* Get the rest of the data */
  1169. sd->pull_data(sd, buf, cnt);
  1170. }
  1171. }
  1172. static void sdio_dw_read_data_pio(struct sdio_dw *sd, rt_bool_t dto)
  1173. {
  1174. void *buf;
  1175. int shift = sd->data_shift;
  1176. struct rt_mmcsd_data *data = sd->data;
  1177. rt_uint32_t status, remain, fcnt, len;
  1178. buf = sd->last_buf;
  1179. remain = sd->last_remain;
  1180. do {
  1181. if (!remain)
  1182. {
  1183. break;
  1184. }
  1185. do {
  1186. fcnt = (SDIO_DW_GET_FCNT(sdio_dw_readl(sd, STATUS)) << shift) + sd->part_buf_count;
  1187. len = rt_min(remain, fcnt);
  1188. if (!len)
  1189. {
  1190. break;
  1191. }
  1192. sdio_dw_pull_data(sd, buf, len);
  1193. data->bytes_xfered += len;
  1194. buf += len;
  1195. remain -= len;
  1196. } while (remain);
  1197. status = sdio_dw_readl(sd, MINTSTS);
  1198. sdio_dw_writel(sd, RINTSTS, PINT(RXDR));
  1199. /* If the RXDR is ready read again */
  1200. } while ((status & PINT(RXDR)) || (dto && SDIO_DW_GET_FCNT(sdio_dw_readl(sd, STATUS))));
  1201. sd->last_buf = remain ? buf : RT_NULL;
  1202. sd->last_remain = remain;
  1203. rt_hw_wmb();
  1204. rt_bitmap_set_bit(&sd->pending_events, EVENT_XFER_COMPLETE);
  1205. }
  1206. static void sdio_dw_write_data_pio(struct sdio_dw *sd)
  1207. {
  1208. void *buf;
  1209. int shift = sd->data_shift;
  1210. struct rt_mmcsd_data *data = sd->data;
  1211. rt_uint32_t status, remain, fcnt, len, fifo_depth;
  1212. buf = sd->last_buf;
  1213. remain = sd->last_remain;
  1214. fifo_depth = sd->fifo_depth;
  1215. do {
  1216. if (!remain)
  1217. {
  1218. break;
  1219. }
  1220. do {
  1221. fcnt = ((fifo_depth - SDIO_DW_GET_FCNT(sdio_dw_readl(sd, STATUS))) << shift) - sd->part_buf_count;
  1222. len = rt_min(remain, fcnt);
  1223. if (!len)
  1224. {
  1225. break;
  1226. }
  1227. sd->push_data(sd, buf, len);
  1228. data->bytes_xfered += len;
  1229. buf += len;
  1230. remain -= len;
  1231. } while (remain);
  1232. status = sdio_dw_readl(sd, MINTSTS);
  1233. sdio_dw_writel(sd, RINTSTS, PINT(TXDR));
  1234. /* If TXDR write again */
  1235. } while ((status & PINT(TXDR)));
  1236. sd->last_buf = remain ? buf : RT_NULL;
  1237. sd->last_remain = remain;
  1238. rt_hw_wmb();
  1239. rt_bitmap_set_bit(&sd->pending_events, EVENT_XFER_COMPLETE);
  1240. }
  1241. static void sdio_dw_init_dma(struct sdio_dw *sd)
  1242. {
  1243. int addr_config;
  1244. /*
  1245. * Check tansfer mode from HCON[17:16]
  1246. * Clear the ambiguous description of dw_mmc databook:
  1247. * 2b'00: No DMA Interface -> Actually means using Internal DMA block
  1248. * 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block
  1249. * 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block
  1250. * 2b'11: Non DW DMA Interface -> pio only
  1251. * Compared to DesignWare DMA Interface, Generic DMA Interface has a simpler
  1252. * request/acknowledge handshake mechanism and both of them are regarded as
  1253. * external dma master for dw_mmc.
  1254. */
  1255. sd->use_dma = SDIO_DW_GET_TRANS_MODE(sdio_dw_readl(sd, HCON));
  1256. if (sd->use_dma == DMA_INTERFACE_IDMA)
  1257. {
  1258. sd->use_dma = TRANS_MODE_IDMAC;
  1259. }
  1260. else if (sd->use_dma == DMA_INTERFACE_DWDMA || sd->use_dma == DMA_INTERFACE_GDMA)
  1261. {
  1262. sd->use_dma = TRANS_MODE_EDMAC;
  1263. }
  1264. else
  1265. {
  1266. goto _no_dma;
  1267. }
  1268. /* Determine which DMA interface to use */
  1269. if (sd->use_dma == TRANS_MODE_IDMAC)
  1270. {
  1271. /* Check ADDR_CONFIG bit in HCON to find IDMAC address bus width */
  1272. addr_config = SDIO_DW_GET_ADDR_CONFIG(sdio_dw_readl(sd, HCON));
  1273. /* Supports IDMAC in 64/32-bit address mode */
  1274. sd->dma_64bit_address = (addr_config == 1);
  1275. LOG_D("IDMAC supports %s-bit address mode", sd->dma_64bit_address ? "64" : "32");
  1276. /* Alloc memory for translation */
  1277. sd->dma_buf = rt_dma_alloc_coherent(sd->bus_dev, DESC_RING_BUF_SZ, &sd->dma_buf_phy);
  1278. if (!sd->dma_buf)
  1279. {
  1280. LOG_E("Could not alloc DMA memory witch cache");
  1281. goto _no_dma;
  1282. }
  1283. sd->dma_ops = &sdio_dw_idmac_ops;
  1284. LOG_D("Using internal DMA controller");
  1285. }
  1286. else
  1287. {
  1288. if (!rt_dm_dev_prop_read_bool(&sd->parent, "dma-names") ||
  1289. !rt_dm_dev_prop_read_bool(&sd->parent, "dmas"))
  1290. {
  1291. goto _no_dma;
  1292. }
  1293. sd->dma_ops = &sdio_dw_edmac_ops;
  1294. LOG_D("Using external DMA controller");
  1295. }
  1296. if (sd->dma_ops->init && sd->dma_ops->start && sd->dma_ops->stop && sd->dma_ops->cleanup)
  1297. {
  1298. if (sd->dma_ops->init(sd))
  1299. {
  1300. LOG_E("Unable to initialize DMA Controller");
  1301. goto _no_dma;
  1302. }
  1303. }
  1304. else
  1305. {
  1306. LOG_E("DMA initialization not found");
  1307. goto _no_dma;
  1308. }
  1309. return;
  1310. _no_dma:
  1311. LOG_D("Using PIO mode");
  1312. sd->use_dma = TRANS_MODE_PIO;
  1313. }
  1314. static rt_bool_t sdio_dw_reset(struct sdio_dw *sd)
  1315. {
  1316. rt_err_t res = RT_FALSE;
  1317. rt_uint32_t flags = SDIO_DW_CTRL_RESET | SDIO_DW_CTRL_FIFO_RESET;
  1318. if (sd->use_dma)
  1319. {
  1320. flags |= SDIO_DW_CTRL_DMA_RESET;
  1321. }
  1322. if (sdio_dw_ctrl_reset(sd, flags))
  1323. {
  1324. int timeout = 500 * USEC_PER_MSEC;
  1325. /* In all cases we clear the RAWINTS register to clear any interrupts */
  1326. sdio_dw_writel(sd, RINTSTS, 0xffffffff);
  1327. if (!sd->use_dma)
  1328. {
  1329. res = RT_TRUE;
  1330. goto _ciu_out;
  1331. }
  1332. /* Wait for dma_req to be cleared */
  1333. while ((sdio_dw_readl(sd, STATUS) & SDIO_DW_STATUS_DMA_REQ) && timeout--)
  1334. {
  1335. rt_hw_cpu_relax();
  1336. }
  1337. if (time <= 0)
  1338. {
  1339. LOG_E("Timeout waiting for dma_req to be cleared in reset");
  1340. goto _ciu_out;
  1341. }
  1342. /* when using DMA next we reset the fifo again */
  1343. if (!sdio_dw_ctrl_reset(sd, SDIO_DW_CTRL_FIFO_RESET))
  1344. {
  1345. goto _ciu_out;
  1346. }
  1347. }
  1348. else
  1349. {
  1350. /* If the controller reset bit did clear, then set clock regs */
  1351. if (!(sdio_dw_readl(sd, CTRL) & SDIO_DW_CTRL_RESET))
  1352. {
  1353. LOG_E("FIFO/DMA reset bits didn't clear but ciu was reset, doing clock update");
  1354. goto _ciu_out;
  1355. }
  1356. }
  1357. if (sd->use_dma == TRANS_MODE_IDMAC)
  1358. {
  1359. /* It is also required that we reinit idmac */
  1360. sdio_dw_idmac_init(sd);
  1361. }
  1362. res = RT_TRUE;
  1363. _ciu_out:
  1364. /* After a CTRL reset we need to have CIU set clock registers */
  1365. sdio_dw_send_cmd(sd->slot, SDIO_DW_CMD_UPD_CLK, 0);
  1366. return res;
  1367. }
  1368. static void sdio_dw_start_request(struct sdio_dw *sd, struct rt_mmcsd_cmd *cmd)
  1369. {
  1370. rt_uint32_t cmd_flags;
  1371. struct sdio_dw_slot *slot = sd->slot;
  1372. struct rt_mmcsd_data *data = cmd->data;
  1373. if (sd->state == STATE_WAITING_CMD11_DONE)
  1374. {
  1375. sd->state = STATE_IDLE;
  1376. }
  1377. if (sd->state == STATE_IDLE)
  1378. {
  1379. sd->state = STATE_SENDING_CMD;
  1380. }
  1381. sd->req = sd->slot->req;
  1382. sd->cmd = cmd;
  1383. sd->pending_events = 0;
  1384. sd->cmd_status = 0;
  1385. sd->data_status = 0;
  1386. sd->dir_status = 0;
  1387. if (data)
  1388. {
  1389. sdio_dw_set_data_timeout(sd, data->timeout_ns);
  1390. sdio_dw_writel(sd, BYTCNT, data->blksize * data->blks);
  1391. sdio_dw_writel(sd, BLKSIZ, data->blksize);
  1392. }
  1393. cmd_flags = cmd->cmd_code;
  1394. if (cmd->cmd_code == STOP_TRANSMISSION ||
  1395. cmd->cmd_code == GO_IDLE_STATE ||
  1396. cmd->cmd_code == GO_INACTIVE_STATE ||
  1397. (cmd->cmd_code == SD_IO_RW_DIRECT && ((cmd->arg >> 9) & 0x1ffff) == SDIO_REG_CCCR_IO_ABORT))
  1398. {
  1399. cmd_flags |= SDIO_DW_CMD_STOP;
  1400. }
  1401. else if (cmd->cmd_code != SEND_STATUS && data)
  1402. {
  1403. cmd_flags |= SDIO_DW_CMD_PRV_DAT_WAIT;
  1404. }
  1405. if (cmd->cmd_code == VOLTAGE_SWITCH)
  1406. {
  1407. rt_uint32_t clk_en_a;
  1408. /* Special bit makes CMD11 not die */
  1409. cmd_flags |= SDIO_DW_CMD_VOLT_SWITCH;
  1410. /* Change state to continue to handle CMD11 weirdness */
  1411. sd->state = STATE_SENDING_CMD11;
  1412. /*
  1413. * We need to disable low power mode (automatic clock stop) while
  1414. * doing voltage switch so we don't confuse the card, since stopping
  1415. * the clock is a specific part of the UHS voltage change dance.
  1416. *
  1417. * Note that low power mode (SDIO_DW_CLKEN_LOW_PWR) will be
  1418. * unconditionally turned back on in dw_mci_setup_bus() if it's ever
  1419. * called with a non-zero clock. That shouldn't happen until the
  1420. * voltage change is all done.
  1421. */
  1422. clk_en_a = sdio_dw_readl(sd, CLKENA);
  1423. clk_en_a &= ~(SDIO_DW_CLKEN_LOW_PWR << slot->id);
  1424. sdio_dw_writel(sd, CLKENA, clk_en_a);
  1425. sdio_dw_send_cmd(sd->slot, SDIO_DW_CMD_UPD_CLK | SDIO_DW_CMD_PRV_DAT_WAIT, 0);
  1426. }
  1427. switch (resp_type(cmd))
  1428. {
  1429. case RESP_NONE:
  1430. break;
  1431. case RESP_R1:
  1432. case RESP_R5:
  1433. case RESP_R6:
  1434. case RESP_R7:
  1435. case RESP_R1B:
  1436. cmd_flags |= SDIO_DW_CMD_RESP_EXP;
  1437. cmd_flags |= SDIO_DW_CMD_RESP_CRC;
  1438. break;
  1439. case RESP_R2:
  1440. cmd_flags |= SDIO_DW_CMD_RESP_EXP;
  1441. cmd_flags |= SDIO_DW_CMD_RESP_CRC;
  1442. cmd_flags |= SDIO_DW_CMD_RESP_LONG;
  1443. break;
  1444. case RESP_R3:
  1445. case RESP_R4:
  1446. cmd_flags |= SDIO_DW_CMD_RESP_EXP;
  1447. break;
  1448. default:
  1449. LOG_D("Unsupported cmd type = %x", resp_type(cmd));
  1450. break;
  1451. }
  1452. if (data)
  1453. {
  1454. cmd_flags |= SDIO_DW_CMD_DAT_EXP;
  1455. if ((data->flags & DATA_DIR_WRITE))
  1456. {
  1457. cmd_flags |= SDIO_DW_CMD_DAT_WR;
  1458. }
  1459. }
  1460. if (!rt_bitmap_test_bit(&slot->flags, DW_MMC_CARD_NO_USE_HOLD))
  1461. {
  1462. cmd_flags |= SDIO_DW_CMD_USE_HOLD_REG;
  1463. }
  1464. if (rt_bitmap_test_bit(&slot->flags, DW_MMC_CARD_NEED_INIT))
  1465. {
  1466. cmd_flags |= SDIO_DW_CMD_INIT;
  1467. rt_bitmap_clear_bit(&slot->flags, DW_MMC_CARD_NEED_INIT);
  1468. }
  1469. if (data)
  1470. {
  1471. sdio_dw_submit_data(sd, data);
  1472. /* Drain writebuffer */
  1473. rt_hw_wmb();
  1474. }
  1475. sdio_dw_start_cmd(slot, cmd_flags, cmd->arg);
  1476. if (cmd->cmd_code == VOLTAGE_SWITCH)
  1477. {
  1478. rt_ubase_t level = rt_spin_lock_irqsave(&sd->irq_lock);
  1479. if (!rt_bitmap_test_bit(&sd->pending_events, EVENT_CMD_COMPLETE))
  1480. {
  1481. rt_tick_t tick = rt_tick_from_millisecond(500) + 1;
  1482. rt_timer_control(&sd->cmd11_timer, RT_TIMER_CTRL_SET_TIME, &tick);
  1483. rt_timer_start(&sd->cmd11_timer);
  1484. }
  1485. rt_spin_unlock_irqrestore(&sd->irq_lock, level);
  1486. }
  1487. sd->stop_cmdr = sdio_dw_prep_stop_abort(sd, cmd);
  1488. }
  1489. static void sdio_dw_end_request(struct sdio_dw *sd)
  1490. {
  1491. sd->slot->req = RT_NULL;
  1492. sd->req = RT_NULL;
  1493. if (sd->state == STATE_SENDING_CMD11)
  1494. {
  1495. sd->state = STATE_WAITING_CMD11_DONE;
  1496. }
  1497. else
  1498. {
  1499. sd->state = STATE_IDLE;
  1500. }
  1501. rt_hw_spin_unlock(&sd->lock.lock);
  1502. mmcsd_req_complete(sd->slot->host);
  1503. rt_hw_spin_lock(&sd->lock.lock);
  1504. }
  1505. static rt_err_t sdio_dw_cmd_complete(struct sdio_dw *sd, struct rt_mmcsd_cmd *cmd)
  1506. {
  1507. rt_uint32_t status = sd->cmd_status;
  1508. sd->cmd_status = 0;
  1509. /* Read the response from the card (up to 16 bytes) */
  1510. if (resp_type(cmd) == RESP_R2)
  1511. {
  1512. cmd->resp[0] = sdio_dw_readl(sd, RESP3);
  1513. cmd->resp[1] = sdio_dw_readl(sd, RESP2);
  1514. cmd->resp[2] = sdio_dw_readl(sd, RESP1);
  1515. cmd->resp[3] = sdio_dw_readl(sd, RESP0);
  1516. }
  1517. else
  1518. {
  1519. cmd->resp[0] = sdio_dw_readl(sd, RESP0);
  1520. }
  1521. if ((status & PINT(RTO)))
  1522. {
  1523. cmd->err = -RT_ETIMEOUT;
  1524. }
  1525. else if ((resp_type(cmd) & (RESP_R1 | RESP_R5 | RESP_R6 | RESP_R7 | RESP_R1B)) &&
  1526. (status & PINT(RCRC)))
  1527. {
  1528. cmd->err = -RT_EIO;
  1529. }
  1530. else if ((status & PINT(RESP_ERR)))
  1531. {
  1532. cmd->err = -RT_EIO;
  1533. }
  1534. else
  1535. {
  1536. cmd->err = RT_EOK;
  1537. }
  1538. return cmd->err;
  1539. }
  1540. static int sdio_dw_data_complete(struct sdio_dw *sd, struct rt_mmcsd_data *data)
  1541. {
  1542. rt_uint32_t status = sd->data_status;
  1543. if (status & SDIO_DW_DATA_ERROR_FLAGS)
  1544. {
  1545. if (status & PINT(DRTO))
  1546. {
  1547. data->err = -RT_ETIMEOUT;
  1548. }
  1549. else if (status & PINT(DCRC))
  1550. {
  1551. data->err = -RT_EIO;
  1552. }
  1553. else if (status & PINT(EBE))
  1554. {
  1555. if (sd->dir_status == SDIO_DW_SEND_STATUS)
  1556. {
  1557. /*
  1558. * No data CRC status was returned. The number of bytes
  1559. * transferred will be exaggerated in PIO mode.
  1560. */
  1561. data->bytes_xfered = 0;
  1562. data->err = -RT_ETIMEOUT;
  1563. }
  1564. else if (sd->dir_status == SDIO_DW_RECV_STATUS)
  1565. {
  1566. data->err = -RT_EIO;
  1567. }
  1568. }
  1569. else
  1570. {
  1571. /* PINT(SBE) is included */
  1572. data->err = -RT_EIO;
  1573. }
  1574. LOG_D("Data error, status 0x%x", status);
  1575. /* After an error, there may be data lingering in the FIFO */
  1576. sdio_dw_reset(sd);
  1577. }
  1578. else
  1579. {
  1580. data->bytes_xfered = data->blks * data->blksize;
  1581. data->err = RT_EOK;
  1582. }
  1583. return data->err;
  1584. }
  1585. static void sdio_dw_mmc_request(struct rt_mmcsd_host *host, struct rt_mmcsd_req *req)
  1586. {
  1587. struct sdio_dw_slot *slot = host->private_data;
  1588. struct sdio_dw *sd = slot->sd;
  1589. /*
  1590. * The check for card presence and queueing of the request must be atomic,
  1591. * otherwise the card could be removed in between and the request wouldn't
  1592. * fail until another card was inserted.
  1593. */
  1594. if (!sdio_dw_get_cd(slot))
  1595. {
  1596. req->cmd->err = -RT_EIO;
  1597. mmcsd_req_complete(host);
  1598. return;
  1599. }
  1600. rt_hw_spin_lock(&sd->lock.lock);
  1601. sd->slot->req = req;
  1602. sdio_dw_start_request(sd, req->sbc ? : req->cmd);
  1603. rt_hw_spin_unlock(&sd->lock.lock);
  1604. }
  1605. static void sdio_dw_mmc_set_iocfg(struct rt_mmcsd_host *host, struct rt_mmcsd_io_cfg *ios)
  1606. {
  1607. rt_err_t err;
  1608. rt_uint32_t regs;
  1609. struct sdio_dw_slot *slot = host->private_data;
  1610. struct sdio_dw *sd = slot->sd;
  1611. const struct sdio_dw_drv_data *drv_data = sd->drv_data;
  1612. /* Bus */
  1613. switch (ios->bus_width)
  1614. {
  1615. case MMCSD_BUS_WIDTH_4:
  1616. slot->ctype = SDIO_DW_CTYPE_4BIT;
  1617. break;
  1618. case MMCSD_BUS_WIDTH_8:
  1619. slot->ctype = SDIO_DW_CTYPE_8BIT;
  1620. break;
  1621. default:
  1622. slot->ctype = SDIO_DW_CTYPE_1BIT;
  1623. break;
  1624. }
  1625. regs = sdio_dw_readl(sd, UHS_REG);
  1626. /* DDR mode set */
  1627. if (ios->timing == MMCSD_TIMING_MMC_DDR52 ||
  1628. ios->timing == MMCSD_TIMING_UHS_DDR50 ||
  1629. ios->timing == MMCSD_TIMING_MMC_HS400)
  1630. {
  1631. regs |= ((0x1 << slot->id) << 16);
  1632. }
  1633. else
  1634. {
  1635. regs &= ~((0x1 << slot->id) << 16);
  1636. }
  1637. sdio_dw_writel(sd, UHS_REG, regs);
  1638. sd->timing = ios->timing;
  1639. /*
  1640. * Use mirror of ios->clock to prevent race with mmc core ios update when
  1641. * finding the minimum.
  1642. */
  1643. slot->clock = ios->clock;
  1644. if (drv_data && drv_data->set_iocfg)
  1645. {
  1646. drv_data->set_iocfg(sd, ios);
  1647. }
  1648. /* Power */
  1649. switch (ios->power_mode)
  1650. {
  1651. case MMCSD_POWER_UP:
  1652. if (host->supply.vmmc)
  1653. {
  1654. err = sdio_regulator_set_ocr(host, host->supply.vmmc, ios->vdd);
  1655. if (err)
  1656. {
  1657. LOG_E("Failed to enable vmmc regulator error = %s", rt_strerror(err));
  1658. return;
  1659. }
  1660. }
  1661. rt_bitmap_set_bit(&slot->flags, DW_MMC_CARD_NEED_INIT);
  1662. regs = sdio_dw_readl(sd, PWREN);
  1663. regs |= (1 << slot->id);
  1664. sdio_dw_writel(sd, PWREN, regs);
  1665. break;
  1666. case MMCSD_POWER_ON:
  1667. if (!sd->vqmmc_enabled)
  1668. {
  1669. if (host->supply.vqmmc)
  1670. {
  1671. err = rt_regulator_enable(host->supply.vqmmc);
  1672. if (err)
  1673. {
  1674. LOG_E("Failed to enable vqmmc error = %s", rt_strerror(err));
  1675. }
  1676. else
  1677. {
  1678. sd->vqmmc_enabled = RT_TRUE;
  1679. }
  1680. }
  1681. else
  1682. {
  1683. sd->vqmmc_enabled = RT_TRUE;
  1684. }
  1685. sdio_dw_ctrl_reset(sd, SDIO_DW_CTRL_ALL_RESET_FLAGS);
  1686. }
  1687. /* Adjust clock / bus width after power is up */
  1688. sdio_dw_setup_bus(slot, RT_FALSE);
  1689. break;
  1690. case MMCSD_POWER_OFF:
  1691. /* Turn clock off before power goes down */
  1692. sdio_dw_setup_bus(slot, RT_FALSE);
  1693. if (host->supply.vmmc)
  1694. {
  1695. sdio_regulator_set_ocr(host, host->supply.vmmc, 0);
  1696. }
  1697. if (host->supply.vqmmc && sd->vqmmc_enabled)
  1698. {
  1699. rt_regulator_disable(host->supply.vqmmc);
  1700. }
  1701. sd->vqmmc_enabled = RT_FALSE;
  1702. regs = sdio_dw_readl(sd, PWREN);
  1703. regs &= ~(1 << slot->id);
  1704. sdio_dw_writel(sd, PWREN, regs);
  1705. break;
  1706. default:
  1707. LOG_E("Invalid power_mode value %x", ios->power_mode);
  1708. break;
  1709. }
  1710. if (sd->state == STATE_WAITING_CMD11_DONE && ios->clock != 0)
  1711. {
  1712. sd->state = STATE_IDLE;
  1713. }
  1714. }
  1715. static rt_int32_t sdio_dw_mmc_get_card_status(struct rt_mmcsd_host *host)
  1716. {
  1717. return 0;
  1718. }
  1719. static void sdio_dw_mmc_enable_sdio_irq(struct rt_mmcsd_host *host, rt_int32_t enable)
  1720. {
  1721. rt_ubase_t level;
  1722. rt_uint32_t int_mask, clk_en_a_old, clk_en_a;
  1723. struct sdio_dw_slot *slot = host->private_data;
  1724. struct sdio_dw *sd = slot->sd;
  1725. const rt_uint32_t clken_low_pwr = SDIO_DW_CLKEN_LOW_PWR << slot->id;
  1726. /*
  1727. * Low power mode will stop the card clock when idle. According to the
  1728. * description of the CLKENA register we should disable low power mode for
  1729. * SDIO cards if we need SDIO interrupts to work.
  1730. */
  1731. clk_en_a_old = sdio_dw_readl(sd, CLKENA);
  1732. if (enable)
  1733. {
  1734. rt_bitmap_set_bit(&slot->flags, DW_MMC_CARD_NO_LOW_PWR);
  1735. clk_en_a = clk_en_a_old & ~clken_low_pwr;
  1736. }
  1737. else
  1738. {
  1739. rt_bitmap_clear_bit(&slot->flags, DW_MMC_CARD_NO_LOW_PWR);
  1740. clk_en_a = clk_en_a_old | clken_low_pwr;
  1741. }
  1742. if (clk_en_a != clk_en_a_old)
  1743. {
  1744. sdio_dw_writel(sd, CLKENA, clk_en_a);
  1745. sdio_dw_send_cmd(slot, SDIO_DW_CMD_UPD_CLK | SDIO_DW_CMD_PRV_DAT_WAIT, 0);
  1746. }
  1747. level = rt_spin_lock_irqsave(&sd->irq_lock);
  1748. /* Enable/disable Slot Specific SDIO interrupt */
  1749. int_mask = sdio_dw_readl(sd, INTMASK);
  1750. if (enable)
  1751. {
  1752. int_mask |= SDIO_DW_INT_SDIO(slot->sdio_id);
  1753. }
  1754. else
  1755. {
  1756. int_mask &= ~SDIO_DW_INT_SDIO(slot->sdio_id);
  1757. }
  1758. sdio_dw_writel(sd, INTMASK, int_mask);
  1759. rt_spin_unlock_irqrestore(&sd->irq_lock, level);
  1760. }
  1761. static rt_int32_t sdio_dw_mmc_execute_tuning(struct rt_mmcsd_host *host, rt_int32_t opcode)
  1762. {
  1763. struct sdio_dw_slot *slot = host->private_data;
  1764. struct sdio_dw *sd = slot->sd;
  1765. const struct sdio_dw_drv_data *drv_data = sd->drv_data;
  1766. if (drv_data && drv_data->execute_tuning)
  1767. {
  1768. return drv_data->execute_tuning(slot, opcode);
  1769. }
  1770. return -RT_EINVAL;
  1771. }
  1772. static rt_bool_t sdio_dw_mmc_card_busy(struct rt_mmcsd_host *host)
  1773. {
  1774. rt_uint32_t status;
  1775. struct sdio_dw_slot *slot = host->private_data;
  1776. /* Check the busy bit which is low when DAT[3:0] (the data lines) are 0000 */
  1777. status = sdio_dw_readl(slot->sd, STATUS);
  1778. return !!(status & SDIO_DW_STATUS_BUSY);
  1779. }
  1780. static rt_err_t sdio_dw_mmc_signal_voltage_switch(struct rt_mmcsd_host *host,
  1781. struct rt_mmcsd_io_cfg *ios)
  1782. {
  1783. rt_uint32_t uhs, v18;
  1784. struct sdio_dw_slot *slot = host->private_data;
  1785. struct sdio_dw *sd = slot->sd;
  1786. const struct sdio_dw_drv_data *drv_data = sd->drv_data;
  1787. v18 = SDIO_DW_UHS_18V << slot->id;
  1788. if (drv_data && drv_data->switch_voltage)
  1789. {
  1790. return drv_data->switch_voltage(host, ios);
  1791. }
  1792. /*
  1793. * Program the voltage. Note that some instances of dw_mmc may use
  1794. * the UHS_REG for this. For other instances (like exynos) the UHS_REG
  1795. * does no harm but you need to set the regulator directly. Try both.
  1796. */
  1797. uhs = sdio_dw_readl(sd, UHS_REG);
  1798. if (ios->signal_voltage == MMCSD_SIGNAL_VOLTAGE_330)
  1799. {
  1800. uhs &= ~v18;
  1801. }
  1802. else
  1803. {
  1804. uhs |= v18;
  1805. }
  1806. if (host->supply.vqmmc)
  1807. {
  1808. rt_err_t err = sdio_regulator_set_vqmmc(host, ios);
  1809. if (err < 0)
  1810. {
  1811. LOG_D("Regulator set error %s to %s V", rt_strerror(err),
  1812. uhs & v18 ? "1.8" : "3.3");
  1813. return err;
  1814. }
  1815. }
  1816. sdio_dw_writel(sd, UHS_REG, uhs);
  1817. return RT_EOK;
  1818. }
  1819. static const struct rt_mmcsd_host_ops sdio_dw_mmc_ops =
  1820. {
  1821. .request = sdio_dw_mmc_request,
  1822. .set_iocfg = sdio_dw_mmc_set_iocfg,
  1823. .get_card_status = sdio_dw_mmc_get_card_status,
  1824. .enable_sdio_irq = sdio_dw_mmc_enable_sdio_irq,
  1825. .execute_tuning = sdio_dw_mmc_execute_tuning,
  1826. .card_busy = sdio_dw_mmc_card_busy,
  1827. .signal_voltage_switch = sdio_dw_mmc_signal_voltage_switch,
  1828. };
  1829. static void sdio_dw_set_drto(struct sdio_dw *sd)
  1830. {
  1831. rt_ubase_t level;
  1832. rt_uint32_t drto_clks, drto_div, drto_ms;
  1833. const struct sdio_dw_drv_data *drv_data = sd->drv_data;
  1834. if (drv_data && drv_data->get_drto_clks)
  1835. {
  1836. drto_clks = drv_data->get_drto_clks(sd);
  1837. }
  1838. else
  1839. {
  1840. drto_clks = sdio_dw_readl(sd, TMOUT) >> 8;
  1841. }
  1842. drto_div = (sdio_dw_readl(sd, CLKDIV) & 0xff) * 2;
  1843. if (drto_div == 0)
  1844. {
  1845. drto_div = 1;
  1846. }
  1847. drto_ms = RT_DIV_ROUND_UP_ULL((rt_uint64_t)MSEC_PER_SEC * drto_clks * drto_div,
  1848. sd->bus_hz);
  1849. /* Add a bit spare time */
  1850. drto_ms += 10;
  1851. level = rt_spin_lock_irqsave(&sd->irq_lock);
  1852. if (!rt_bitmap_test_bit(&sd->pending_events, EVENT_DATA_COMPLETE))
  1853. {
  1854. rt_tick_t tick = rt_tick_from_millisecond(drto_ms);
  1855. rt_timer_control(&sd->dto_timer, RT_TIMER_CTRL_SET_TIME, &tick);
  1856. rt_timer_start(&sd->dto_timer);
  1857. }
  1858. rt_spin_unlock_irqrestore(&sd->irq_lock, level);
  1859. }
  1860. static rt_bool_t sdio_dw_clear_pending_cmd_complete(struct sdio_dw *sd)
  1861. {
  1862. if (!rt_bitmap_test_bit(&sd->pending_events, EVENT_CMD_COMPLETE))
  1863. {
  1864. return RT_FALSE;
  1865. }
  1866. rt_timer_stop(&sd->cto_timer);
  1867. rt_bitmap_clear_bit(&sd->pending_events, EVENT_CMD_COMPLETE);
  1868. return RT_TRUE;
  1869. }
  1870. static rt_bool_t sdio_dw_clear_pending_data_complete(struct sdio_dw *sd)
  1871. {
  1872. if (!rt_bitmap_test_bit(&sd->pending_events, EVENT_DATA_COMPLETE))
  1873. {
  1874. return RT_FALSE;
  1875. }
  1876. rt_timer_stop(&sd->dto_timer);
  1877. rt_bitmap_clear_bit(&sd->pending_events, EVENT_DATA_COMPLETE);
  1878. return RT_TRUE;
  1879. }
  1880. static void sdio_dw_state_change(struct rt_work *work, void *work_data)
  1881. {
  1882. rt_err_t err;
  1883. rt_uint32_t state, prev_state;
  1884. struct rt_mmcsd_cmd *cmd;
  1885. struct rt_mmcsd_req *req;
  1886. struct rt_mmcsd_data *data;
  1887. struct sdio_dw *sd = rt_container_of(work, struct sdio_dw, state_work);
  1888. rt_hw_spin_lock(&sd->lock.lock);
  1889. state = sd->state;
  1890. req = sd->req;
  1891. data = sd->data;
  1892. _next_status:
  1893. prev_state = state;
  1894. switch (state)
  1895. {
  1896. case STATE_IDLE:
  1897. case STATE_WAITING_CMD11_DONE:
  1898. break;
  1899. case STATE_SENDING_CMD11:
  1900. case STATE_SENDING_CMD:
  1901. if (!sdio_dw_clear_pending_cmd_complete(sd))
  1902. {
  1903. break;
  1904. }
  1905. cmd = sd->cmd;
  1906. sd->cmd = RT_NULL;
  1907. err = sdio_dw_cmd_complete(sd, cmd);
  1908. if (cmd == req->sbc && !err)
  1909. {
  1910. sdio_dw_start_request(sd, req->cmd);
  1911. goto _unlock;
  1912. }
  1913. if (cmd->data && err)
  1914. {
  1915. if (err != -RT_ETIMEOUT && sd->dir_status == SDIO_DW_RECV_STATUS)
  1916. {
  1917. state = STATE_SENDING_DATA;
  1918. goto _check_status;
  1919. }
  1920. send_stop_abort(sd, data);
  1921. sdio_dw_stop_dma(sd);
  1922. state = STATE_SENDING_STOP;
  1923. break;
  1924. }
  1925. if (!cmd->data || err)
  1926. {
  1927. sdio_dw_end_request(sd);
  1928. goto _unlock;
  1929. }
  1930. prev_state = state = STATE_SENDING_DATA;
  1931. /* Fall through */
  1932. case STATE_SENDING_DATA:
  1933. if (rt_bitmap_test_bit(&sd->pending_events, EVENT_DATA_ERROR))
  1934. {
  1935. rt_bitmap_clear_bit(&sd->pending_events, EVENT_DATA_ERROR);
  1936. if (!(sd->data_status & (PINT(DRTO) | PINT(EBE))))
  1937. {
  1938. send_stop_abort(sd, data);
  1939. }
  1940. sdio_dw_stop_dma(sd);
  1941. state = STATE_DATA_ERROR;
  1942. break;
  1943. }
  1944. if (!rt_bitmap_test_bit(&sd->pending_events, EVENT_XFER_COMPLETE))
  1945. {
  1946. /*
  1947. * If all data-related interrupts don't come within the given time
  1948. * in reading data state.
  1949. */
  1950. if (sd->dir_status == SDIO_DW_RECV_STATUS)
  1951. {
  1952. sdio_dw_set_drto(sd);
  1953. }
  1954. break;
  1955. }
  1956. rt_bitmap_clear_bit(&sd->pending_events, EVENT_XFER_COMPLETE);
  1957. /*
  1958. * Handle an EVENT_DATA_ERROR that might have shown up before the
  1959. * transfer completed. This might not have been caught by the check
  1960. * above because the interrupt could have gone off between the previous
  1961. * check and the check for transfer complete.
  1962. *
  1963. * Technically this ought not be needed assuming we get a DATA_COMPLETE
  1964. * eventually (we'll notice the error and end the request), but it
  1965. * shouldn't hurt.
  1966. *
  1967. * This has the advantage of sending the stop command.
  1968. */
  1969. if (rt_bitmap_test_bit(&sd->pending_events, EVENT_DATA_ERROR))
  1970. {
  1971. rt_bitmap_clear_bit(&sd->pending_events, EVENT_DATA_ERROR);
  1972. if (!(sd->data_status & (PINT(DRTO) | PINT(EBE))))
  1973. {
  1974. send_stop_abort(sd, data);
  1975. }
  1976. sdio_dw_stop_dma(sd);
  1977. state = STATE_DATA_ERROR;
  1978. break;
  1979. }
  1980. prev_state = state = STATE_DATA_BUSY;
  1981. /* Fall through */
  1982. case STATE_DATA_BUSY:
  1983. if (!sdio_dw_clear_pending_data_complete(sd))
  1984. {
  1985. /*
  1986. * If data error interrupt comes but data over interrupt doesn't
  1987. * come within the given time. In reading data state.
  1988. */
  1989. if (sd->dir_status == SDIO_DW_RECV_STATUS)
  1990. {
  1991. sdio_dw_set_drto(sd);
  1992. }
  1993. break;
  1994. }
  1995. sd->data = RT_NULL;
  1996. err = sdio_dw_data_complete(sd, data);
  1997. if (!err)
  1998. {
  1999. if (!data->stop || req->sbc)
  2000. {
  2001. if (req->sbc && data->stop)
  2002. {
  2003. data->stop->err = RT_EOK;
  2004. }
  2005. sdio_dw_end_request(sd);
  2006. goto _unlock;
  2007. }
  2008. /* Stop command for open-ended transfer */
  2009. if (data->stop)
  2010. {
  2011. send_stop_abort(sd, data);
  2012. }
  2013. }
  2014. else
  2015. {
  2016. /*
  2017. * If we don't have a command complete now we'll never get one since
  2018. * we just reset everything; better end the request.
  2019. *
  2020. * If we do have a command complete we'll fall through to the
  2021. * STATE_SENDING_STOP command and everything will be peachy keen.
  2022. */
  2023. if (!rt_bitmap_test_bit(&sd->pending_events, EVENT_CMD_COMPLETE))
  2024. {
  2025. sd->cmd = RT_NULL;
  2026. sdio_dw_end_request(sd);
  2027. goto _unlock;
  2028. }
  2029. }
  2030. /* If err has non-zero, stop-abort command has been already issued. */
  2031. prev_state = state = STATE_SENDING_STOP;
  2032. /* Fall through */
  2033. case STATE_SENDING_STOP:
  2034. if (!sdio_dw_clear_pending_cmd_complete(sd))
  2035. {
  2036. break;
  2037. }
  2038. /* CMD error in data command */
  2039. if (req->cmd->err && req->data)
  2040. {
  2041. sdio_dw_reset(sd);
  2042. }
  2043. sd->cmd = RT_NULL;
  2044. sd->data = RT_NULL;
  2045. if (!req->sbc && req->stop)
  2046. {
  2047. sdio_dw_cmd_complete(sd, req->stop);
  2048. }
  2049. else
  2050. {
  2051. sd->cmd_status = 0;
  2052. }
  2053. sdio_dw_end_request(sd);
  2054. goto _unlock;
  2055. case STATE_DATA_ERROR:
  2056. if (!rt_bitmap_test_bit(&sd->pending_events, EVENT_XFER_COMPLETE))
  2057. {
  2058. break;
  2059. }
  2060. rt_bitmap_clear_bit(&sd->pending_events, EVENT_XFER_COMPLETE);
  2061. state = STATE_DATA_BUSY;
  2062. break;
  2063. default:
  2064. break;
  2065. }
  2066. _check_status:
  2067. if (state != prev_state)
  2068. {
  2069. goto _next_status;
  2070. }
  2071. sd->state = state;
  2072. _unlock:
  2073. rt_hw_spin_unlock(&sd->lock.lock);
  2074. }
  2075. static void sdio_dw_cmd11_timer(void *param)
  2076. {
  2077. struct sdio_dw *sd = param;
  2078. if (sd->state != STATE_SENDING_CMD11)
  2079. {
  2080. LOG_W("Unexpected CMD11 timeout");
  2081. return;
  2082. }
  2083. sd->cmd_status = PINT(RTO);
  2084. rt_bitmap_set_bit(&sd->pending_events, EVENT_CMD_COMPLETE);
  2085. rt_workqueue_urgent_work(sd->state_wq, &sd->state_work);
  2086. }
  2087. static void sdio_dw_cto_timer(void *param)
  2088. {
  2089. rt_ubase_t level;
  2090. rt_uint32_t pending;
  2091. struct sdio_dw *sd = param;
  2092. level = rt_spin_lock_irqsave(&sd->irq_lock);
  2093. /*
  2094. * If somehow we have very bad interrupt latency it's remotely possible that
  2095. * the timer could fire while the interrupt is still pending or while the
  2096. * interrupt is midway through running. Let's be paranoid and detect those
  2097. * two cases. Note that this is paranoia is somewhat justified because in
  2098. * this function we don't actually cancel the pending command in the
  2099. * controller, we just assume it will never come.
  2100. */
  2101. /* Read-only mask reg */
  2102. pending = sdio_dw_readl(sd, MINTSTS);
  2103. if ((pending & (SDIO_DW_CMD_ERROR_FLAGS | PINT(CMD_DONE))))
  2104. {
  2105. /* The interrupt should fire; no need to act but we can warn */
  2106. LOG_W("Unexpected interrupt latency");
  2107. goto _unlock;
  2108. }
  2109. if (rt_bitmap_test_bit(&sd->pending_events, EVENT_CMD_COMPLETE))
  2110. {
  2111. /* Presumably interrupt handler couldn't delete the timer */
  2112. LOG_W("CTO timeout when already completed");
  2113. goto _unlock;
  2114. }
  2115. /*
  2116. * Continued paranoia to make sure we're in the state we expect.
  2117. * This paranoia isn't really justified but it seems good to be safe.
  2118. */
  2119. switch (sd->state)
  2120. {
  2121. case STATE_SENDING_CMD11:
  2122. case STATE_SENDING_CMD:
  2123. case STATE_SENDING_STOP:
  2124. /*
  2125. * If CMD_DONE interrupt does NOT come in sending command state, we
  2126. * should notify the driver to terminate current transfer and report a
  2127. * command timeout to the core.
  2128. */
  2129. sd->cmd_status = PINT(RTO);
  2130. rt_bitmap_set_bit(&sd->pending_events, EVENT_CMD_COMPLETE);
  2131. rt_workqueue_urgent_work(sd->state_wq, &sd->state_work);
  2132. break;
  2133. default:
  2134. LOG_W("Unexpected command timeout, state %d", sd->state);
  2135. break;
  2136. }
  2137. _unlock:
  2138. rt_spin_unlock_irqrestore(&sd->irq_lock, level);
  2139. }
  2140. static void sdio_dw_dto_timer(void *param)
  2141. {
  2142. rt_ubase_t level;
  2143. rt_uint32_t pending;
  2144. struct sdio_dw *sd = param;
  2145. level = rt_spin_lock_irqsave(&sd->irq_lock);
  2146. /*
  2147. * The DTO timer is much longer than the CTO timer, so it's even less likely
  2148. * that we'll these cases, but it pays to be paranoid.
  2149. */
  2150. /* Read-only mask reg */
  2151. pending = sdio_dw_readl(sd, MINTSTS);
  2152. if ((pending & PINT(DATA_OVER)))
  2153. {
  2154. /* The interrupt should fire; no need to act but we can warn */
  2155. LOG_W("Unexpected data interrupt latency");
  2156. goto _unlock;
  2157. }
  2158. if (rt_bitmap_test_bit(&sd->pending_events, EVENT_DATA_COMPLETE))
  2159. {
  2160. /* Presumably interrupt handler couldn't delete the timer */
  2161. LOG_W("DTO timeout when already completed");
  2162. goto _unlock;
  2163. }
  2164. /*
  2165. * Continued paranoia to make sure we're in the state we expect.
  2166. * This paranoia isn't really justified but it seems good to be safe.
  2167. */
  2168. switch (sd->state)
  2169. {
  2170. case STATE_SENDING_DATA:
  2171. case STATE_DATA_BUSY:
  2172. /*
  2173. * If DTO interrupt does NOT come in sending data state, we should
  2174. * notify the driver to terminate current transfer and report a data
  2175. * timeout to the core.
  2176. */
  2177. sd->data_status = PINT(DRTO);
  2178. rt_bitmap_set_bit(&sd->pending_events, EVENT_DATA_ERROR);
  2179. rt_bitmap_set_bit(&sd->pending_events, EVENT_DATA_COMPLETE);
  2180. rt_workqueue_urgent_work(sd->state_wq, &sd->state_work);
  2181. break;
  2182. default:
  2183. LOG_W("Unexpected data timeout, state %d", sd->state);
  2184. break;
  2185. }
  2186. _unlock:
  2187. rt_spin_unlock_irqrestore(&sd->irq_lock, level);
  2188. }
  2189. static void sdio_dw_cmd_interrupt(struct sdio_dw *sd, rt_uint32_t status)
  2190. {
  2191. rt_timer_stop(&sd->cto_timer);
  2192. if (!sd->cmd_status)
  2193. {
  2194. sd->cmd_status = status;
  2195. }
  2196. /* Drain writebuffer */
  2197. rt_hw_wmb();
  2198. rt_bitmap_set_bit(&sd->pending_events, EVENT_CMD_COMPLETE);
  2199. rt_workqueue_urgent_work(sd->state_wq, &sd->state_work);
  2200. }
  2201. static void sdio_dw_isr(int irqno, void *param)
  2202. {
  2203. rt_uint32_t pending;
  2204. struct sdio_dw *sd = (struct sdio_dw *)param;
  2205. struct sdio_dw_slot *slot = sd->slot;
  2206. /* Read-only mask reg */
  2207. pending = sdio_dw_readl(sd, MINTSTS);
  2208. if (pending)
  2209. {
  2210. if (sd->state == STATE_SENDING_CMD11 && (pending & PINT(VOLT_SWITCH)))
  2211. {
  2212. sdio_dw_writel(sd, RINTSTS, PINT(VOLT_SWITCH));
  2213. pending &= ~PINT(VOLT_SWITCH);
  2214. rt_hw_spin_lock(&sd->irq_lock.lock);
  2215. sdio_dw_cmd_interrupt(sd, pending);
  2216. rt_hw_spin_unlock(&sd->irq_lock.lock);
  2217. rt_timer_stop(&sd->cmd11_timer);
  2218. }
  2219. if ((pending & SDIO_DW_CMD_ERROR_FLAGS))
  2220. {
  2221. rt_hw_spin_lock(&sd->irq_lock.lock);
  2222. rt_timer_stop(&sd->cto_timer);
  2223. sdio_dw_writel(sd, RINTSTS, SDIO_DW_CMD_ERROR_FLAGS);
  2224. sd->cmd_status = pending;
  2225. rt_hw_wmb();
  2226. rt_bitmap_set_bit(&sd->pending_events, EVENT_CMD_COMPLETE);
  2227. rt_hw_spin_unlock(&sd->irq_lock.lock);
  2228. }
  2229. if ((pending & SDIO_DW_DATA_ERROR_FLAGS))
  2230. {
  2231. rt_hw_spin_lock(&sd->irq_lock.lock);
  2232. if ((sd->quirks & SDIO_DW_QUIRK_EXTENDED_TMOUT))
  2233. {
  2234. rt_timer_stop(&sd->dto_timer);
  2235. }
  2236. sdio_dw_writel(sd, RINTSTS, SDIO_DW_DATA_ERROR_FLAGS);
  2237. sd->data_status = pending;
  2238. rt_hw_wmb();
  2239. rt_bitmap_set_bit(&sd->pending_events, EVENT_DATA_ERROR);
  2240. if ((sd->quirks & SDIO_DW_QUIRK_EXTENDED_TMOUT))
  2241. {
  2242. /* In case of error, we cannot expect a DTO */
  2243. rt_bitmap_set_bit(&sd->pending_events, EVENT_DATA_COMPLETE);
  2244. }
  2245. rt_workqueue_urgent_work(sd->state_wq, &sd->state_work);
  2246. rt_hw_spin_unlock(&sd->irq_lock.lock);
  2247. }
  2248. if ((pending & PINT(DATA_OVER)))
  2249. {
  2250. rt_hw_spin_lock(&sd->irq_lock.lock);
  2251. rt_timer_stop(&sd->dto_timer);
  2252. sdio_dw_writel(sd, RINTSTS, PINT(DATA_OVER));
  2253. if (!sd->data_status)
  2254. {
  2255. sd->data_status = pending;
  2256. }
  2257. rt_hw_wmb();
  2258. if (sd->dir_status == SDIO_DW_RECV_STATUS && sd->data && sd->data->buf)
  2259. {
  2260. sdio_dw_read_data_pio(sd, RT_TRUE);
  2261. }
  2262. rt_bitmap_set_bit(&sd->pending_events, EVENT_DATA_COMPLETE);
  2263. rt_workqueue_urgent_work(sd->state_wq, &sd->state_work);
  2264. rt_hw_spin_unlock(&sd->irq_lock.lock);
  2265. }
  2266. if ((pending & PINT(RXDR)))
  2267. {
  2268. sdio_dw_writel(sd, RINTSTS, PINT(RXDR));
  2269. if (sd->dir_status == SDIO_DW_RECV_STATUS && sd->data && sd->data->buf)
  2270. {
  2271. sdio_dw_read_data_pio(sd, RT_FALSE);
  2272. }
  2273. }
  2274. if ((pending & PINT(TXDR)))
  2275. {
  2276. sdio_dw_writel(sd, RINTSTS, PINT(TXDR));
  2277. if (sd->dir_status == SDIO_DW_SEND_STATUS && sd->data && sd->data->buf)
  2278. {
  2279. sdio_dw_write_data_pio(sd);
  2280. }
  2281. }
  2282. if ((pending & PINT(CMD_DONE)))
  2283. {
  2284. rt_hw_spin_lock(&sd->irq_lock.lock);
  2285. sdio_dw_writel(sd, RINTSTS, PINT(CMD_DONE));
  2286. sdio_dw_cmd_interrupt(sd, pending);
  2287. rt_hw_spin_unlock(&sd->irq_lock.lock);
  2288. }
  2289. if ((pending & PINT(CD)))
  2290. {
  2291. sdio_dw_writel(sd, RINTSTS, PINT(CD));
  2292. mmcsd_change(slot->host);
  2293. }
  2294. if ((pending & SDIO_DW_INT_SDIO(slot->sdio_id)))
  2295. {
  2296. sdio_dw_writel(sd, RINTSTS, SDIO_DW_INT_SDIO(slot->sdio_id));
  2297. sdio_dw_mmc_enable_sdio_irq(slot->host, RT_FALSE);
  2298. sdio_irq_wakeup(slot->host);
  2299. }
  2300. }
  2301. if (sd->use_dma != TRANS_MODE_IDMAC)
  2302. {
  2303. return;
  2304. }
  2305. /* Handle IDMA interrupts */
  2306. pending = sd->dma_64bit_address ? sdio_dw_readl(sd, IDSTS64) : sdio_dw_readl(sd, IDSTS);
  2307. if ((pending & (PINTC(TI) | PINTC(RI))))
  2308. {
  2309. if (sd->dma_64bit_address)
  2310. {
  2311. sdio_dw_writel(sd, IDSTS64, PINTC(TI) | PINTC(RI));
  2312. sdio_dw_writel(sd, IDSTS64, PINTC(NI));
  2313. }
  2314. else
  2315. {
  2316. sdio_dw_writel(sd, IDSTS, PINTC(TI) | PINTC(RI));
  2317. sdio_dw_writel(sd, IDSTS, PINTC(NI));
  2318. }
  2319. if (!rt_bitmap_test_bit(&sd->pending_events, EVENT_DATA_ERROR))
  2320. {
  2321. sd->dma_ops->complete(sd);
  2322. }
  2323. }
  2324. }
  2325. #ifdef RT_USING_OFW
  2326. static rt_err_t sdio_dw_parse_ofw(struct sdio_dw *sd)
  2327. {
  2328. struct rt_ofw_node *np = sd->parent.ofw_node;
  2329. const struct sdio_dw_drv_data *drv_data = sd->drv_data;
  2330. rt_ofw_prop_read_u32(np, "fifo-depth", &sd->fifo_depth);
  2331. rt_ofw_prop_read_u32(np, "card-detect-delay", &sd->detect_delay_ms);
  2332. rt_ofw_prop_read_u32(np, "data-addr", &sd->data_addr_override);
  2333. if (rt_ofw_prop_read_bool(np, "fifo-watermark-aligned"))
  2334. {
  2335. sd->wm_aligned = RT_TRUE;
  2336. }
  2337. rt_ofw_prop_read_u32(np, "clock-frequency", &sd->bus_hz);
  2338. if (drv_data && drv_data->parse_ofw)
  2339. {
  2340. return drv_data->parse_ofw(sd);
  2341. }
  2342. return RT_EOK;
  2343. }
  2344. #else
  2345. rt_inline rt_err_t sdio_dw_parse_ofw(struct sdio_dw *sd)
  2346. {
  2347. return -RT_ENOSYS;
  2348. }
  2349. #endif /* RT_USING_OFW */
  2350. static rt_err_t sdio_dw_init_slot(struct sdio_dw *sd)
  2351. {
  2352. rt_err_t err;
  2353. struct sdio_dw_slot *slot;
  2354. struct rt_mmcsd_host *host = mmcsd_alloc_host();
  2355. if (!host)
  2356. {
  2357. return -RT_ENOMEM;
  2358. }
  2359. slot = rt_calloc(1, sizeof(*slot));
  2360. if (!slot)
  2361. {
  2362. err = -RT_ENOMEM;
  2363. goto _free;
  2364. }
  2365. err = sdio_regulator_get_supply(&sd->parent, host);
  2366. if (err)
  2367. {
  2368. goto _free;
  2369. }
  2370. host->ops = &sdio_dw_mmc_ops;
  2371. host->private_data = slot;
  2372. slot->host = host;
  2373. slot->sd = sd;
  2374. sd->slot = slot;
  2375. slot->id = 0;
  2376. slot->sdio_id = sd->sdio_id0 + slot->id;
  2377. err = sdio_ofw_parse(sd->parent.ofw_node, host);
  2378. if (err)
  2379. {
  2380. goto _free;
  2381. }
  2382. if (!host->valid_ocr)
  2383. {
  2384. host->valid_ocr = VDD_32_33 | VDD_33_34;
  2385. }
  2386. if (sd->minimum_speed)
  2387. {
  2388. host->freq_min = sd->minimum_speed;
  2389. }
  2390. else
  2391. {
  2392. host->freq_min = SDIO_DW_FREQ_HZ_MIN;
  2393. }
  2394. if (!host->freq_max)
  2395. {
  2396. host->freq_max = SDIO_DW_FREQ_HZ_MAX;
  2397. }
  2398. /* Useful defaults if platform data is unset. */
  2399. if (sd->use_dma == TRANS_MODE_IDMAC)
  2400. {
  2401. host->max_dma_segs = sd->ring_size;
  2402. host->max_blk_size = 65535;
  2403. host->max_seg_size = DESC_RING_BUF_SZ;
  2404. host->max_blk_count = (host->max_seg_size * sd->ring_size) / 512;
  2405. }
  2406. else if (sd->use_dma == TRANS_MODE_EDMAC)
  2407. {
  2408. host->max_dma_segs = 64;
  2409. host->max_blk_size = 65535;
  2410. host->max_blk_count = 65535;
  2411. host->max_seg_size = host->max_blk_size * host->max_blk_count;
  2412. }
  2413. else
  2414. {
  2415. /* TRANS_MODE_PIO */
  2416. host->max_dma_segs = 64;
  2417. host->max_blk_size = 65535;
  2418. host->max_blk_count = 512;
  2419. host->max_seg_size = host->max_blk_size * host->max_blk_count;
  2420. }
  2421. return RT_EOK;
  2422. _free:
  2423. if (host)
  2424. {
  2425. mmcsd_free_host(host);
  2426. }
  2427. if (slot)
  2428. {
  2429. rt_free(slot);
  2430. }
  2431. return err;
  2432. }
  2433. static void sdio_dw_free(struct sdio_dw *sd)
  2434. {
  2435. if (!rt_is_err_or_null(sd->rstc))
  2436. {
  2437. rt_reset_control_assert(sd->rstc);
  2438. rt_reset_control_put(sd->rstc);
  2439. }
  2440. if (!rt_is_err_or_null(sd->ciu_clk))
  2441. {
  2442. rt_clk_disable_unprepare(sd->ciu_clk);
  2443. rt_clk_put(sd->ciu_clk);
  2444. }
  2445. if (!rt_is_err_or_null(sd->biu_clk))
  2446. {
  2447. rt_clk_disable_unprepare(sd->biu_clk);
  2448. rt_clk_put(sd->biu_clk);
  2449. }
  2450. if (sd->use_dma && sd->dma_ops->exit)
  2451. {
  2452. sd->dma_ops->exit(sd);
  2453. }
  2454. if (sd->dma_buf)
  2455. {
  2456. rt_dma_free_coherent(sd->bus_dev,
  2457. DESC_RING_BUF_SZ, sd->dma_buf, sd->dma_buf_phy);
  2458. }
  2459. }
  2460. rt_err_t sdio_dw_probe(struct sdio_dw *sd)
  2461. {
  2462. int i, len;
  2463. rt_err_t err = RT_EOK;
  2464. char dev_name[RT_NAME_MAX];
  2465. const struct sdio_dw_drv_data *drv_data = sd->drv_data;
  2466. err = sdio_dw_parse_ofw(sd);
  2467. if (err && err != -RT_ENOSYS)
  2468. {
  2469. goto _free_res;
  2470. }
  2471. sd->rstc = rt_reset_control_get_by_name(&sd->parent, "reset");
  2472. if (rt_is_err(sd->rstc))
  2473. {
  2474. LOG_E("Reset controller not found");
  2475. err = rt_ptr_err(sd->rstc);
  2476. goto _free_res;
  2477. }
  2478. if (sd->rstc)
  2479. {
  2480. rt_reset_control_assert(sd->rstc);
  2481. rt_hw_us_delay(20);
  2482. rt_reset_control_deassert(sd->rstc);
  2483. }
  2484. sd->biu_clk = rt_clk_get_by_name(&sd->parent, "biu");
  2485. sd->ciu_clk = rt_clk_get_by_name(&sd->parent, "ciu");
  2486. if (rt_is_err(sd->biu_clk) || rt_is_err(sd->ciu_clk))
  2487. {
  2488. /* board has init clock */
  2489. if (sd->bus_hz)
  2490. {
  2491. goto _out_clk;
  2492. }
  2493. err = rt_is_err(sd->biu_clk) ? rt_ptr_err(sd->biu_clk) : rt_ptr_err(sd->ciu_clk);
  2494. goto _free_res;
  2495. }
  2496. err = rt_clk_prepare_enable(sd->ciu_clk);
  2497. if (err)
  2498. {
  2499. goto _free_res;
  2500. }
  2501. if (sd->bus_hz)
  2502. {
  2503. rt_clk_set_rate(sd->ciu_clk, sd->bus_hz);
  2504. }
  2505. sd->bus_hz = rt_clk_get_rate(sd->ciu_clk);
  2506. if (!sd->bus_hz)
  2507. {
  2508. err = -RT_EIO;
  2509. LOG_E("Bus speed not found");
  2510. goto _free_res;
  2511. }
  2512. _out_clk:
  2513. if (drv_data && drv_data->init)
  2514. {
  2515. err = drv_data->init(sd);
  2516. if (err)
  2517. {
  2518. goto _free_res;
  2519. }
  2520. }
  2521. rt_spin_lock_init(&sd->lock);
  2522. rt_spin_lock_init(&sd->irq_lock);
  2523. /*
  2524. * Get the host data width - this assumes that HCON has been set with the
  2525. * correct values.
  2526. */
  2527. i = SDIO_DW_GET_HDATA_WIDTH(sdio_dw_readl(sd, HCON));
  2528. if (!i)
  2529. {
  2530. sd->push_data = sdio_dw_push_data16;
  2531. sd->pull_data = sdio_dw_pull_data16;
  2532. sd->data_shift = 1;
  2533. }
  2534. else if (i == 2)
  2535. {
  2536. sd->push_data = sdio_dw_push_data64;
  2537. sd->pull_data = sdio_dw_pull_data64;
  2538. sd->data_shift = 3;
  2539. }
  2540. else
  2541. {
  2542. /* Check for a reserved value, and warn if it is */
  2543. if (i != 1)
  2544. {
  2545. LOG_W("HCON reports a reserved host data width, defaulting to 32-bit access");
  2546. }
  2547. sd->push_data = sdio_dw_push_data32;
  2548. sd->pull_data = sdio_dw_pull_data32;
  2549. sd->data_shift = 2;
  2550. }
  2551. /* Reset all blocks */
  2552. if (!sdio_dw_ctrl_reset(sd, SDIO_DW_CTRL_ALL_RESET_FLAGS))
  2553. {
  2554. err = -RT_EIO;
  2555. goto _free_res;
  2556. }
  2557. sdio_dw_init_dma(sd);
  2558. /* Clear the interrupts for the host controller */
  2559. sdio_dw_writel(sd, RINTSTS, 0xffffffff);
  2560. /* Disable all mmc interrupt first */
  2561. sdio_dw_writel(sd, INTMASK, 0);
  2562. /* Put in max timeout */
  2563. sdio_dw_writel(sd, TMOUT, 0xffffffff);
  2564. /*
  2565. * FIFO threshold settings:
  2566. * Rx Mark = fifo_size / 2 - 1,
  2567. * Tx Mark = fifo_size / 2
  2568. * DMA Size = 8
  2569. */
  2570. if (sd->fifo_depth)
  2571. {
  2572. /*
  2573. * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may have been
  2574. * overwritten by the bootloader, just like we're about to do, so if you
  2575. * know the value for your hardware, you should put it in the platform
  2576. * data.
  2577. */
  2578. sd->fifo_depth = sdio_dw_readl(sd, FIFOTH);
  2579. sd->fifo_depth = 1 + ((sd->fifo_depth >> 16) & 0xfff);
  2580. }
  2581. sd->fifoth_val = SDIO_DW_SET_FIFOTH(0x2, sd->fifo_depth / 2 - 1, sd->fifo_depth / 2);
  2582. sdio_dw_writel(sd, FIFOTH, sd->fifoth_val);
  2583. /* Disable clock to CIU */
  2584. sdio_dw_writel(sd, CLKENA, 0);
  2585. sdio_dw_writel(sd, CLKSRC, 0);
  2586. /*
  2587. * In 2.40a spec, Data offset is changed.
  2588. * Need to check the version-id and set data-offset for DATA register.
  2589. */
  2590. sd->verid = SDIO_DW_GET_VERID(sdio_dw_readl(sd, VERID));
  2591. LOG_D("Version ID is %04x", sd->verid);
  2592. if (sd->data_addr_override)
  2593. {
  2594. sd->fifo_base = sd->base + sd->data_addr_override;
  2595. }
  2596. else if (sd->verid < SDIO_DW_240A)
  2597. {
  2598. sd->fifo_base = sd->base + DATA_OFFSET;
  2599. }
  2600. else
  2601. {
  2602. sd->fifo_base = sd->base + DATA_240A_OFFSET;
  2603. }
  2604. /*
  2605. * Enable interrupts for command done, data over, data empty, receive ready
  2606. * and error such as transmit, receive timeout, crc error
  2607. */
  2608. sdio_dw_writel(sd, INTMASK, PINT(CMD_DONE) | PINT(DATA_OVER) | PINT(TXDR) | PINT(RXDR) | SDIO_DW_ERROR_FLAGS);
  2609. /* Enable mci interrupt */
  2610. sdio_dw_writel(sd, CTRL, SDIO_DW_CTRL_INT_ENABLE);
  2611. /* Enable GPIO interrupt */
  2612. sdio_dw_writel(sd, INTMASK, sdio_dw_readl(sd, INTMASK) | PINT(CD));
  2613. /* We need at least one slot to succeed */
  2614. err = sdio_dw_init_slot(sd);
  2615. if (err)
  2616. {
  2617. goto _free_res;
  2618. }
  2619. /* Now that slots are all setup, we can enable card detect */
  2620. sdio_dw_writel(sd, INTMASK, sdio_dw_readl(sd, INTMASK) | PINT(CD));
  2621. len = sdio_host_set_name(sd->slot->host, dev_name);
  2622. sd->state_wq = rt_workqueue_create(dev_name, RT_SYSTEM_WORKQUEUE_STACKSIZE,
  2623. RT_MMCSD_THREAD_PRIORITY);
  2624. if (!sd->state_wq)
  2625. {
  2626. err = -RT_ENOMEM;
  2627. goto _free_res;
  2628. }
  2629. rt_work_init(&sd->state_work, sdio_dw_state_change, sd);
  2630. rt_hw_interrupt_install(sd->irq, sdio_dw_isr, sd, dev_name);
  2631. rt_hw_interrupt_umask(sd->irq);
  2632. rt_strncpy(&dev_name[len], "-cmd11", sizeof(dev_name) - len);
  2633. rt_timer_init(&sd->cmd11_timer, dev_name, sdio_dw_cmd11_timer, sd,
  2634. 0, RT_TIMER_FLAG_PERIODIC);
  2635. rt_strncpy(&dev_name[len], "-cto", sizeof(dev_name) - len);
  2636. rt_timer_init(&sd->cto_timer, dev_name, sdio_dw_cto_timer, sd,
  2637. 0, RT_TIMER_FLAG_PERIODIC);
  2638. rt_strncpy(&dev_name[len], "-dto", sizeof(dev_name) - len);
  2639. rt_timer_init(&sd->dto_timer, dev_name, sdio_dw_dto_timer, sd,
  2640. 0, RT_TIMER_FLAG_PERIODIC);
  2641. mmcsd_change(sd->slot->host);
  2642. return err;
  2643. _free_res:
  2644. sdio_dw_free(sd);
  2645. return err;
  2646. }
  2647. rt_err_t sdio_dw_remove(struct sdio_dw *sd)
  2648. {
  2649. if (sd->slot)
  2650. {
  2651. mmcsd_free_host(sd->slot->host);
  2652. }
  2653. sdio_dw_writel(sd, RINTSTS, 0xffffffff);
  2654. /* Disable all mmc interrupt first */
  2655. sdio_dw_writel(sd, INTMASK, 0);
  2656. /* Disable clock to CIU */
  2657. sdio_dw_writel(sd, CLKENA, 0);
  2658. sdio_dw_writel(sd, CLKSRC, 0);
  2659. rt_hw_interrupt_mask(sd->irq);
  2660. rt_pic_detach_irq(sd->irq, sd);
  2661. rt_timer_detach(&sd->cmd11_timer);
  2662. rt_timer_detach(&sd->cto_timer);
  2663. rt_timer_detach(&sd->dto_timer);
  2664. sdio_dw_free(sd);
  2665. return RT_EOK;
  2666. }