Kaynağa Gözat

[dm][ufs] support Universal Flash Storage (UFS)

Support UFS over PCI, too.

Signed-off-by: GuEe-GUI <2991707448@qq.com>
GuEe-GUI 1 ay önce
ebeveyn
işleme
ea13820c2c

+ 1 - 0
components/drivers/Kconfig

@@ -30,6 +30,7 @@ rsource "ata/Kconfig"
 rsource "nvme/Kconfig"
 rsource "nvme/Kconfig"
 rsource "block/Kconfig"
 rsource "block/Kconfig"
 rsource "scsi/Kconfig"
 rsource "scsi/Kconfig"
+rsource "ufs/Kconfig"
 rsource "firmware/Kconfig"
 rsource "firmware/Kconfig"
 rsource "hwcache/Kconfig"
 rsource "hwcache/Kconfig"
 rsource "regulator/Kconfig"
 rsource "regulator/Kconfig"

+ 662 - 0
components/drivers/include/drivers/ufs.h

@@ -0,0 +1,662 @@
+/*
+ * Copyright (c) 2006-2023, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2023-02-25     GuEe-GUI     the first version
+ */
+
+#ifndef __UFS_H__
+#define __UFS_H__
+
+#include <rthw.h>
+#include <rtthread.h>
+#include <drivers/blk.h>
+#include <drivers/misc.h>
+#include <drivers/scsi.h>
+#include <drivers/byteorder.h>
+
+#define __BIT_FIELD(v, h, l)                        ((v & RT_GENMASK(h, l)) >> l)
+
+/* Host Capabilities */
+#define RT_UFS_REG_CAP                              0x00 /* Host Controller Capabiities */
+#define   RT_UFS_REG_CAP_NUTRS(v)                   __BIT_FIELD(v, 4, 0) /* Number of UTP Transfer Request Slots */
+#define   RT_UFS_REG_CAP_NORTT(v)                   __BIT_FIELD(v, 15, 8) /* Number of outstanding READY TO TRANSFER requests supported */
+#define   RT_UFS_REG_CAP_NUTMRS(v)                  __BIT_FIELD(v, 18, 16) /* Number of UTP Task Management Request Slots */
+#define   RT_UFS_REG_CAP_AUTOH8                     RT_BIT(23) /* Auto-Hibernation Support */
+#define   RT_UFS_REG_CAP_64AS                       RT_BIT(24) /* 64-bit addressing supported */
+#define   RT_UFS_REG_CAP_OODDS                      RT_BIT(25) /* Out of order data delivery supported */
+#define   RT_UFS_REG_CAP_UICDMETMS                  RT_BIT(26) /* UIC DME_TEST_MODE command supported */
+#define   RT_UFS_REG_CAP_UME                        RT_BIT(27) /* Reserved for Unified Memory Extension */
+#define RT_UFS_REG_VER                              0x08 /* UFS Version */
+#define   RT_UFS_REG_VER_VS(v)                      __BIT_FIELD(v, 03, 0) /* Version Suffix (VS) */
+#define   RT_UFS_REG_VER_MNR(v)                     __BIT_FIELD(v, 07, 4) /* Minor Version Number (MNR) */
+#define   RT_UFS_REG_VER_MJR(v)                     __BIT_FIELD(v, 15, 8) /* Major Version Number (MJR) */
+#define RT_UFS_REG_HCPID                            0x10 /* Host Controller Identification Descriptor – Product ID */
+#define RT_UFS_REG_HCMID                            0x14 /* Host Controller Identification Descriptor – Manufacturer ID */
+#define   RT_UFS_REG_HCMID_MIC(v)                   __BIT_FIELD(v, 7, 0) /* Manufacturer Identification Code */
+#define   RT_UFS_REG_HCMID_BI(v)                    __BIT_FIELD(v, 15, 8) /* Bank Index */
+#define RT_UFS_REG_AHIT                             0x18 /* Auto-Hibernate Idle Timer */
+#define   RT_UFS_REG_AHIT_AH8ITV(v)                 __BIT_FIELD(v, 9, 0) /* Auto-Hibern8 Idle Timer Value */
+#define   RT_UFS_REG_AHIT_TS(v)                     __BIT_FIELD(v, 12, 10) /* Timer scale */
+/* Operation and Runtime */
+#define RT_UFS_REG_IS                               0x20  /* Interrupt Status */
+#define   RT_UFS_REG_IS_UTRCS                       RT_BIT(0)  /* UTP Transfer Request Completion Status */
+#define   RT_UFS_REG_IS_UDEPRI                      RT_BIT(1)  /* UIC DME_ENDPOINTRESET Indication */
+#define   RT_UFS_REG_IS_UE                          RT_BIT(2)  /* UIC Error */
+#define   RT_UFS_REG_IS_UTMS                        RT_BIT(3)  /* UIC Test Mode Status */
+#define   RT_UFS_REG_IS_UPMS                        RT_BIT(4)  /* UIC Power Mode Status */
+#define   RT_UFS_REG_IS_UHXS                        RT_BIT(5)  /* UIC Hibernate Exit Status */
+#define   RT_UFS_REG_IS_UHES                        RT_BIT(6)  /* UIC Hibernate Enter Status */
+#define   RT_UFS_REG_IS_ULLS                        RT_BIT(7)  /* UIC Link Lost Status */
+#define   RT_UFS_REG_IS_ULSS                        RT_BIT(8)  /* UIC Link Startup Status */
+#define   RT_UFS_REG_IS_UTMRCS                      RT_BIT(9)  /* UTP Task Management Request Completion Status */
+#define   RT_UFS_REG_IS_UCCS                        RT_BIT(10) /* UIC Command Completion Status */
+#define   RT_UFS_REG_IS_DFES                        RT_BIT(11) /* Device Fatal Error Status */
+#define   RT_UFS_REG_IS_UTPES                       RT_BIT(12) /* UTP Error Status */
+#define   RT_UFS_REG_IS_HCFES                       RT_BIT(16) /* Host Controller Fatal Error Status */
+#define   RT_UFS_REG_IS_SBFES                       RT_BIT(17) /* System Bus Fatal Error Status */
+#define RT_UFS_REG_IE                               0x24  /* Interrupt Enable */
+#define   RT_UFS_REG_IE_UTRCE                       RT_BIT(0)  /* UTP Transfer Request Completion Enable */
+#define   RT_UFS_REG_IE_UDEPRIE                     RT_BIT(1)  /* UIC DME_ENDPOINTRESET */
+#define   RT_UFS_REG_IE_UEE                         RT_BIT(2)  /* UIC Error Enable */
+#define   RT_UFS_REG_IE_UTMSE                       RT_BIT(3)  /* UIC Test Mode Status Enable */
+#define   RT_UFS_REG_IE_UPMSE                       RT_BIT(4)  /* UIC Power Mode Status Enable */
+#define   RT_UFS_REG_IE_UHXSE                       RT_BIT(5)  /* UIC Hibernate Exit Status Enable */
+#define   RT_UFS_REG_IE_UHESE                       RT_BIT(6)  /* UIC Hibernate Enter Status Enable */
+#define   RT_UFS_REG_IE_ULLSE                       RT_BIT(7)  /* UIC Link Lost Status Enable */
+#define   RT_UFS_REG_IE_ULSSE                       RT_BIT(8)  /* UIC Link Startup Status Enable */
+#define   RT_UFS_REG_IE_UTMRCE                      RT_BIT(9)  /* UTP Task Management Request Completion Enable */
+#define   RT_UFS_REG_IE_UCCE                        RT_BIT(10) /* UIC COMMAND Completion Enable */
+#define   RT_UFS_REG_IE_DFEE                        RT_BIT(11) /* Device Fatal Error Enable */
+#define   RT_UFS_REG_IE_UTPEE                       RT_BIT(12) /* UTP Error Enable */
+#define   RT_UFS_REG_IE_HCFEE                       RT_BIT(16) /* Host Controller Fatal Error Enable */
+#define   RT_UFS_REG_IE_SBFEE                       RT_BIT(17) /* System Bus Fatal Error Enable */
+#define RT_UFS_REG_HCS                              0x30  /* Host Controller Status */
+#define   RT_UFS_REG_HCS_DP                         RT_BIT(0) /* Device Present */
+#define   RT_UFS_REG_HCS_UTRLRDY                    RT_BIT(1) /* UTP Transfer Request List Ready */
+#define   RT_UFS_REG_HCS_UTMRLRDY                   RT_BIT(2) /* UTP Task Management Request List Ready */
+#define   RT_UFS_REG_HCS_UCRDY                      RT_BIT(3) /* UIC COMMAND Ready */
+#define   RT_UFS_REG_HCS_UPMCRS(v)                  __BIT_FIELD(v, 10, 8) /* UIC Power Mode Change Request Status */
+#define   RT_UFS_REG_HCS_UTPEC(v)                   __BIT_FIELD(v, 15, 12) /* UTP Error Code */
+#define   RT_UFS_REG_HCS_TTAGUTPE(v)                __BIT_FIELD(v, 23, 16) /* Task Tag of UTP error */
+#define   RT_UFS_REG_HCS_TLUNUTPE(v)                __BIT_FIELD(v, 31, 24) /* Target LUN of UTP error */
+#define RT_UFS_REG_HCE                              0x34  /* Host Controller Enable */
+#define RT_UFS_REG_UECPA                            0x38  /* Host UIC Error Code PHY Adapter Layer */
+#define   RT_UFS_REG_UECPA_EC(v)                    __BIT_FIELD(v, 4, 0) /* UIC PHY Adapter Layer Error Code */
+#define   RT_UFS_REG_UECPA_ERR                      RT_BIT(31) /* UIC PHY AdapterA Layer Error */
+#define RT_UFS_REG_UECDL                            0x3c  /* Host UIC Error Code Data Link Layer */
+#define   RT_UFS_REG_UECDL_EC(v)                    __BIT_FIELD(v, 14, 0) /* UIC Data Link Layer Error Code */
+#define   RT_UFS_REG_UECDL_ERR                      RT_BIT(31) /* UIC Data Link Layer Error */
+#define RT_UFS_REG_UECN                             0x40  /* Host UIC Error Code Network Layer */
+#define   RT_UFS_REG_UECN_EC(v)                     __BIT_FIELD(v, 2, 0) /* UIC Network Layer Error Code */
+#define   RT_UFS_REG_UECN_ERR                       RT_BIT(31) /* UIC Network Layer Error */
+#define RT_UFS_REG_UECT                             0x44  /* Host UIC Error Code Transport Layer */
+#define   RT_UFS_REG_UECT_EC(v)                     __BIT_FIELD(v, 6, 0) /* UIC Transport Layer Error Code */
+#define   RT_UFS_REG_UECT_ERR                       RT_BIT(31) /* UIC Transport Layer Error */
+#define RT_UFS_REG_UECDME                           0x48  /* Host UIC Error Code DME */
+#define   RT_UFS_REG_UECDME_EC(v)                   __BIT_FIELD(v, 0, 0) /* UIC DME Error Code */
+#define   RT_UFS_REG_UECDME_ERR                     RT_BIT(31) /* UIC DME Error */
+#define RT_UFS_REG_UTRIACR                          0x4c  /* UTP Transfer Request Interrupt Aggregation Control Register */
+#define   RT_UFS_REG_UTRIACR_IAEN                   RT_BIT(31) /* Interrupt Aggregation Enable/Disable */
+#define   RT_UFS_REG_UTRIACR_IAPWEN                 RT_BIT(24) /* Interrupt aggregation parameter write enable */
+#define   RT_UFS_REG_UTRIACR_IASB                   RT_BIT(20) /* Interrupt aggregation status bit */
+#define   RT_UFS_REG_UTRIACR_CTR                    RT_BIT(16) /* Counter and Timer Reset */
+#define   RT_UFS_REG_UTRIACR_IACTH(v)               __BIT_FIELD(v, 12, 8) /* Interrupt aggregation counter threshold */
+#define   RT_UFS_REG_UTRIACR_IATOVAL(v)             __BIT_FIELD(v, 7, 0) /* Interrupt aggregation timeout (40us units) */
+/* UTRIACR compose */
+#define RT_UFS_UTRIACR_TIMEOUT_MASK                 RT_GENMASK(7, 0)
+#define RT_UFS_UTRIACR_COUNTER_THLD_MASK            RT_GENMASK(12, 8)
+#define RT_UFS_UTRIACR_CTR                          RT_BIT(16)
+#define RT_UFS_UTRIACR_IASB                         RT_BIT(20)
+#define RT_UFS_UTRIACR_IAPWEN                       RT_BIT(24)
+#define RT_UFS_UTRIACR_IAEN                         RT_BIT(31)
+#define RT_UFS_UTRIACR_COUNTER_THLD(c)              ((((rt_uint32_t)(c)) & 0x1fu) << 8)
+#define RT_UFS_UTRIACR_TIMEOUT(t)                   (((rt_uint32_t)(t)) & 0xffu)
+#define RT_UFS_UTRIACR_ENABLE_PARAMS(cnt, to)       (RT_UFS_UTRIACR_IAEN | RT_UFS_UTRIACR_IAPWEN | \
+                                                            RT_UFS_UTRIACR_COUNTER_THLD(cnt) | RT_UFS_UTRIACR_TIMEOUT(to))
+#define RT_UFS_UTRIACR_RESET                        (RT_UFS_UTRIACR_IAEN | RT_UFS_UTRIACR_CTR)
+/* UTP Transfer */
+#define RT_UFS_REG_UTRLBA                           0x50 /* UTP Transfer Request List Base Address */
+#define   RT_UFS_REG_UTRLBA_MASK(v)                 __BIT_FIELD(v, 31, 10)
+#define RT_UFS_REG_UTRLBAU                          0x54 /* UTP Transfer Request List Base Address Upper 32-Bits */
+#define   RT_UFS_REG_UTRLBAU_MASK(v)                __BIT_FIELD(v, 31, 0)
+#define RT_UFS_REG_UTRLDBR                          0x58 /* UTP Transfer Request List Door Bell Register */
+#define RT_UFS_REG_UTRLCLR                          0x5c /* UTP Transfer Request List CLear Register */
+#define RT_UFS_REG_UTRLRSR                          0x60 /* UTP Transfer Request Run-Stop Register */
+/* UTP Task Managemeng */
+#define RT_UFS_REG_UTMRLBA                          0x70 /* UTP Task Management Request List Base Address */
+#define   RT_UFS_REG_UTMRLBA_MASK(v)                __BIT_FIELD(v, 31, 10)
+#define RT_UFS_REG_UTMRLBAU                         0x74 /* UTP Task Management Request List Base Address Upper 32-Bits */
+#define   RT_UFS_REG_UTMRLBAU_MASK(v)               __BIT_FIELD(v, 31, 0)
+#define RT_UFS_REG_UTMRLDBR                         0x78 /* UTP Task Management Request List Door Bell Register */
+#define RT_UFS_REG_UTMRLCLR                         0x7c /* UTP Task Management Request List CLear Register */
+#define RT_UFS_REG_UTMRLRSR                         0x80 /* UTP Task Management Run-Stop Register */
+/* UIC Command */
+#define RT_UFS_REG_UICCMD                           0x90 /* UIC Command Register */
+#define   RT_UFS_REG_UICCMD_CMDOP(v)                __BIT_FIELD(v, 7, 0) /* Command Opcode */
+#define RT_UFS_REG_UCMDARG1                         0x94 /* UIC Command Argument 1 */
+#define RT_UFS_REG_UCMDARG2                         0x98 /* UIC Command Argument 2 */
+#define RT_UFS_REG_UCMDARG3                         0x9c /* UIC Command Argument 3 */
+/* UMA */
+#define RT_UFS_REG_UMA_EXT                          0xb0 /* Reserved for Unified Memory Extension */
+/* Vendor Specific */
+#define RT_UFS_REG_VS                               0xc0 /* Vendor Specific Registers */
+
+/* UTP Error Code */
+enum
+{
+    RT_UFS_UTPEC_ERR_INV_TYPE                       = 1,
+};
+
+/* UIC Power Mode Change Request Status */
+enum
+{
+    RT_UFS_UPMCRS_PWR_OK                            = 0x0,
+    RT_UFS_UPMCRS_PWR_LOCAL                         = 0x1,
+    RT_UFS_UPMCRS_PWR_REMOTE                        = 0x2,
+    RT_UFS_UPMCRS_PWR_BUSY                          = 0x3,
+    RT_UFS_UPMCRS_PWR_ERROR_CAP                     = 0x4,
+    RT_UFS_UPMCRS_PWR_FATAL_ERROR                   = 0x5,
+};
+
+/* UIC PHY Adapter Layer Error Code */
+enum
+{
+    RT_UFS_UECPA_EC_LANE0                           = RT_BIT(0),
+    RT_UFS_UECPA_EC_LANE1                           = RT_BIT(1),
+    RT_UFS_UECPA_EC_LANE2                           = RT_BIT(2),
+    RT_UFS_UECPA_EC_LANE3                           = RT_BIT(3),
+    RT_UFS_UECPA_EC_GENERIC                         = RT_BIT(4),
+};
+
+/* UIC Data Link Layer Error Code */
+enum
+{
+    RT_UFS_UECDL_EC_NAC_RECEIVED                    = RT_BIT(0),
+    RT_UFS_UECDL_EC_TCx_REPLAY_TIMER_EXPIRED        = RT_BIT(1),
+    RT_UFS_UECDL_EC_AFCx_REQUEST_TIMER_EXPIRED      = RT_BIT(2),
+    RT_UFS_UECDL_EC_FCx_PROTECTION_TIMER_EXPIRED    = RT_BIT(3),
+    RT_UFS_UECDL_EC_CRC_ERROR                       = RT_BIT(4),
+    RT_UFS_UECDL_EC_RX_BUFFER_OVERFLOW              = RT_BIT(5),
+    RT_UFS_UECDL_EC_MAX_FRAME_LENGTH_EXCEEDED       = RT_BIT(6),
+    RT_UFS_UECDL_EC_WRONG_SEQUENCE_NUMBER           = RT_BIT(7),
+    RT_UFS_UECDL_EC_AFC_FRAME_SYNTAX_ERROR          = RT_BIT(8),
+    RT_UFS_UECDL_EC_NAC_FRAME_SYNTAX_ERROR          = RT_BIT(9),
+    RT_UFS_UECDL_EC_EOF_SYNTAX_ERROR                = RT_BIT(10),
+    RT_UFS_UECDL_EC_FRAME_SYNTAX_ERROR              = RT_BIT(11),
+    RT_UFS_UECDL_EC_BAD_CTRL_SYMBOL_TYPE            = RT_BIT(12),
+    RT_UFS_UECDL_EC_PA_INIT_ERROR                   = RT_BIT(13),
+    RT_UFS_UECDL_EC_PA_ERROR_IND_RECEIVED           = RT_BIT(14),
+};
+
+/* UIC Network Layer Error Code */
+enum
+{
+    RT_UFS_UECN_EC_UNSUPPORTED_HEADER_TYPE          = RT_BIT(0),
+    RT_UFS_UECN_EC_BAD_DEVICEID_ENC                 = RT_BIT(1),
+    RT_UFS_UECN_EC_LHDR_TRAP_PACKET_DROPPING        = RT_BIT(2),
+};
+
+/* UIC Transport Layer Error Code */
+enum
+{
+    RT_UFS_UECT_EC_UNSUPPORTED_HEADER_TYPE          = RT_BIT(0),
+    RT_UFS_UECT_EC_UNKNOWN_CPORTID                  = RT_BIT(1),
+    RT_UFS_UECT_EC_NO_CONNECTION_RX                 = RT_BIT(2),
+    RT_UFS_UECT_EC_CONTROLLED_SEGMENT_DROPPING      = RT_BIT(3),
+    RT_UFS_UECT_EC_BAD_TC                           = RT_BIT(4),
+    RT_UFS_UECT_EC_E2E_CREDIT_OVERFLOW              = RT_BIT(5),
+    RT_UFS_UECT_EC_SAFETY_VALVE_DROPPING            = RT_BIT(6),
+};
+
+/* UIC DME Error Code */
+enum
+{
+    RT_UFS_UECDME_EC_GENERIC                        = RT_BIT(0),
+};
+
+/* UIC Command Opcode */
+enum
+{
+    /* Configuration */
+    RT_UFS_CMDOP_DME_GET                            = 0x1,
+    RT_UFS_CMDOP_DME_SET                            = 0x2,
+    RT_UFS_CMDOP_DME_PEER_GET                       = 0x3,
+    RT_UFS_CMDOP_DME_PEER_SET                       = 0x4,
+    /* Control */
+    RT_UFS_CMDOP_DME_POWERON                        = 0x10,
+    RT_UFS_CMDOP_DME_POWEROFF                       = 0x11,
+    RT_UFS_CMDOP_DME_ENABLE                         = 0x12,
+    RT_UFS_CMDOP_DME_RESET                          = 0x14,
+    RT_UFS_CMDOP_DME_ENDPOINTRESET                  = 0x15,
+    RT_UFS_CMDOP_DME_LINKSTARTUP                    = 0x16,
+    RT_UFS_CMDOP_DME_HIBERNATE_ENTER                = 0x17,
+    RT_UFS_CMDOP_DME_HIBERNATE_EXIT                 = 0x18,
+    RT_UFS_CMDOP_DME_TEST_MODE                      = 0x1a,
+};
+
+/* UIC Config result code / Generic error code */
+enum
+{
+    RT_UFS_CMDRES_SUCCESS                           = 0x0,
+    RT_UFS_CMDRES_INVALID_MIB_ATTRIBUTE             = 0x1,
+    RT_UFS_CMDRES_INVALID_MIB_ATTRIBUTE_VALUE       = 0x2,
+    RT_UFS_CMDRES_READ_ONLY_MIB_ATTRIBUTE           = 0x3,
+    RT_UFS_CMDRES_WRITE_ONLY_MIB_ATTRIBUTE          = 0x4,
+    RT_UFS_CMDRES_BAD_INDEX                         = 0x5,
+    RT_UFS_CMDRES_LOCKED_MIB_ATTRIBUTE              = 0x6,
+    RT_UFS_CMDRES_BAD_TEST_FEATURE_INDEX            = 0x7,
+    RT_UFS_CMDRES_PEER_COMMUNICATION_FAILURE        = 0x8,
+    RT_UFS_CMDRES_BUSY                              = 0x9,
+    RT_UFS_CMDRES_DME_FAILURE                       = 0xa,
+    RT_UFS_CMDRES_MASK                              = 0xff,
+};
+
+/*
+ * UIC DME attribute encoding (JEDEC UFS / UniPro PA)
+ */
+#define RT_UFS_UIC_ARG_MIB_SEL(attr, sel)           ((((rt_uint32_t)(attr) & 0xffffU) << 16) | ((rt_uint32_t)(sel) & 0xffffU))
+#define RT_UFS_UIC_ARG_MIB(attr)                    RT_UFS_UIC_ARG_MIB_SEL(attr, 0)
+
+/* PHY Adapter — subset used for power / link configuration */
+#define RT_UFS_PA_RXGEAR                            0x1583
+#define RT_UFS_PA_TXGEAR                            0x1568
+#define RT_UFS_PA_ACTIVERXDATALANES                 0x1580
+#define RT_UFS_PA_ACTIVETXDATALANES                 0x1560
+#define RT_UFS_PA_RXTERMINATION                     0x1584
+#define RT_UFS_PA_TXTERMINATION                     0x1569
+#define RT_UFS_PA_HSSERIES                          0x156a
+#define RT_UFS_PA_PWRMODE                           0x1571
+#define RT_UFS_PA_PWRMODEUSERDATA0                  0x15b0
+#define RT_UFS_PA_PWRMODEUSERDATA1                  0x15b1
+#define RT_UFS_PA_PWRMODEUSERDATA2                  0x15b2
+#define RT_UFS_PA_PWRMODEUSERDATA3                  0x15b3
+#define RT_UFS_PA_PWRMODEUSERDATA4                  0x15b4
+#define RT_UFS_PA_PWRMODEUSERDATA5                  0x15b5
+#define RT_UFS_DME_LOCAL_FC0_PROT_TO                0xd041
+#define RT_UFS_DME_LOCAL_TC0_REPLAY_TO              0xd042
+#define RT_UFS_DME_LOCAL_AFC0_REQ_TO                0xd043
+
+#define RT_UFS_DL_FC0_PROT_TO_DEFAULT               8191U
+#define RT_UFS_DL_TC0_REPLAY_TO_DEFAULT             65535U
+#define RT_UFS_DL_AFC0_REQ_TO_DEFAULT               32767U
+#define RT_UFS_DL_FC1_PROT_TO_DEFAULT               8191U
+#define RT_UFS_DL_TC1_REPLAY_TO_DEFAULT             65535U
+#define RT_UFS_DL_AFC1_REQ_TO_DEFAULT               32767U
+
+enum rt_ufs_pa_pwr_mode
+{
+    RT_UFS_PA_FAST_MODE                             = 1,
+    RT_UFS_PA_SLOW_MODE                             = 2,
+    RT_UFS_PA_FASTAUTO_MODE                         = 4,
+    RT_UFS_PA_SLOWAUTO_MODE                         = 5,
+    RT_UFS_PA_UNCHANGED_MODE                        = 7,
+};
+
+enum rt_ufs_pa_hs_rate
+{
+    RT_UFS_PA_HS_MODE_A                             = 1,
+    RT_UFS_PA_HS_MODE_B                             = 2,
+};
+
+/* Auto-Hibernate Idle Timer (AHIT) — REG 0x18 */
+#define RT_UFS_AHIT_TIMER_MASK                      RT_GENMASK(9, 0)
+#define RT_UFS_AHIT_SCALE_MASK                      RT_GENMASK(12, 10)
+
+rt_inline rt_uint32_t rt_ufs_ahit_encode(rt_uint16_t timer, rt_uint8_t scale)
+{
+    return ((rt_uint32_t)timer & RT_UFS_AHIT_TIMER_MASK) | (((rt_uint32_t)scale << 10) & RT_UFS_AHIT_SCALE_MASK);
+}
+
+/* 150 * 10^3 us idle before auto-Hibernate8 */
+#define RT_UFS_AHIT_DEFAULT                         rt_ufs_ahit_encode(150, 3)
+
+/*
+ * UTP / UPIU (refer to JEDEC UFS & UFSHCI)
+ */
+#define RT_UFS_CDB_SIZE                             16
+#define RT_UFS_ALIGNED_UPIU_SIZE                    512
+#define RT_UFS_SENSE_SIZE                           18
+
+/* UTP Transfer Request Command Type */
+#define RT_UTP_CMD_TYPE_SCSI                        0x0
+#define RT_UTP_CMD_TYPE_UFS_STORAGE                 0x1
+#define RT_UTP_CMD_TYPE_DEV_MANAGE                  0x2
+
+/* UTP Data Direction */
+#define RT_UTP_NO_DATA_TRANSFER                     0
+#define RT_UTP_HOST_TO_DEVICE                       1
+#define RT_UTP_DEVICE_TO_HOST                       2
+
+/* Overall Command Status (OCS) in UTRD */
+enum rt_ufs_ocs
+{
+    RT_UFS_OCS_SUCCESS                              = 0x0,
+    RT_UFS_OCS_INVALID_CMD_TABLE_ATTR               = 0x1,
+    RT_UFS_OCS_INVALID_PRDT_ATTR                    = 0x2,
+    RT_UFS_OCS_MISMATCH_DATA_BUF_SIZE               = 0x3,
+    RT_UFS_OCS_MISMATCH_RESP_UPIU_SIZE              = 0x4,
+    RT_UFS_OCS_PEER_COMM_FAILURE                    = 0x5,
+    RT_UFS_OCS_ABORTED                              = 0x6,
+    RT_UFS_OCS_FATAL_ERROR                          = 0x7,
+    RT_UFS_OCS_DEVICE_FATAL_ERROR                   = 0x8,
+    RT_UFS_OCS_INVALID_COMMAND_STATUS               = 0xf,
+};
+
+/* UPIU Transaction Codes (Initiator to Target) */
+#define RT_UPIU_TRANSACTION_NOP_OUT                 0x00
+#define RT_UPIU_TRANSACTION_COMMAND                 0x01
+#define RT_UPIU_TRANSACTION_DATA_OUT                0x02
+#define RT_UPIU_TRANSACTION_TASK_REQ                0x04
+#define RT_UPIU_TRANSACTION_QUERY_REQ               0x16
+
+/* UPIU Transaction Codes (Target to Initiator) */
+#define RT_UPIU_TRANSACTION_NOP_IN                  0x20
+#define RT_UPIU_TRANSACTION_RESPONSE                0x21
+#define RT_UPIU_TRANSACTION_DATA_IN                 0x22
+#define RT_UPIU_TRANSACTION_TASK_RSP                0x24
+#define RT_UPIU_TRANSACTION_QUERY_RSP               0x36
+
+/* UPIU Command Set Type */
+#define RT_UPIU_COMMAND_SET_TYPE_SCSI               0x0
+
+/* UPIU Command flags */
+#define RT_UPIU_CMD_FLAGS_NONE                      0x00
+#define RT_UPIU_CMD_FLAGS_WRITE                     0x20
+#define RT_UPIU_CMD_FLAGS_READ                      0x40
+
+/* PRDT: max single entry size 256KB, granularity 4 bytes */
+#define RT_UFS_PRDT_BYTE_COUNT_MAX                  (256 * 1024)
+
+/* UPIU header - 12 bytes, big-endian */
+rt_packed(struct rt_utp_upiu_header
+{
+    rt_uint8_t transaction_code;
+    rt_uint8_t flags;
+    rt_uint8_t lun;
+    rt_uint8_t task_tag;
+    rt_uint8_t command_set_type;    /* 4:0, iid 7:5 in same byte on LE */
+    rt_uint8_t query_function;      /* or tm_function */
+    rt_uint8_t response;
+    rt_uint8_t status;
+    rt_uint8_t ehs_length;
+    rt_uint8_t device_information;
+    rt_be16_t data_segment_length;
+});
+
+/* Command UPIU - SCSI CDB */
+rt_packed(struct rt_utp_upiu_cmd
+{
+    rt_be32_t exp_data_transfer_len;
+    rt_uint8_t cdb[RT_UFS_CDB_SIZE];
+});
+
+/* Request UPIU - command type */
+rt_packed(struct rt_utp_upiu_req
+{
+    struct rt_utp_upiu_header header;
+    struct rt_utp_upiu_cmd sc;
+});
+
+/* Response UPIU - SCSI response (cmd_rsp) */
+rt_packed(struct rt_utp_cmd_rsp
+{
+    rt_be32_t residual_transfer_count;
+    rt_be32_t reserved[4];
+    rt_be16_t sense_data_len;
+    rt_uint8_t sense_data[RT_UFS_SENSE_SIZE];
+});
+
+rt_packed(struct rt_utp_upiu_rsp
+{
+    struct rt_utp_upiu_header header;
+    struct rt_utp_cmd_rsp sr;
+});
+
+/* Physical Region Descriptor Table entry (LE, used by controller) */
+rt_packed(struct rt_ufs_sg_entry
+{
+    rt_uint64_t addr;   /* LE in memory for UFSHCI */
+    rt_uint32_t reserved;
+    rt_uint32_t size;   /* LE */
+});
+
+/* Request Descriptor Header - common to UTRD/UTMRD (4DW) */
+rt_packed(struct rt_ufs_request_desc_header
+{
+    rt_uint32_t dword_0;
+    rt_uint32_t dword_1;
+    rt_uint32_t dword_2;
+    rt_uint32_t dword_3;
+});
+
+/* UTP Transfer Request Descriptor bits (dword_0) */
+#define RT_UFS_UPIU_COMMAND_TYPE_OFFSET 28
+#define RT_UFS_UTP_REQ_DESC_INT_CMD     0x01000000
+
+/* UTP Transfer Request Descriptor (UTRD) - 32 bytes, host byte order for HC (LE in spec) */
+rt_packed(struct rt_utp_transfer_req_desc
+{
+    struct rt_ufs_request_desc_header header;
+    /* DW 4-5: UCD base address low/high dwords */
+    rt_uint32_t command_desc_base_addr_lo; /* 128-byte aligned, HC reads as LE */
+    rt_uint32_t command_desc_base_addr_hi; /* 128-byte aligned, HC reads as LE */
+    rt_uint16_t response_upiu_length;
+    rt_uint16_t response_upiu_offset;
+    rt_uint16_t prd_table_length;
+    rt_uint16_t prd_table_offset;
+});
+
+/* UTP Transfer Command Descriptor (UCD) - command_upiu + response_upiu + prd_table */
+struct rt_utp_transfer_cmd_desc
+{
+    rt_uint8_t command_upiu[RT_UFS_ALIGNED_UPIU_SIZE];
+    rt_uint8_t response_upiu[RT_UFS_ALIGNED_UPIU_SIZE];
+    struct rt_ufs_sg_entry prd_table[];
+};
+
+/* One slot: UCD size = 512 + 512 + N*16. N=1 for single buffer. */
+/*
+ * PRDT entries per slot.
+ * Partition probing may read multiple blocks with buffers not page-aligned,
+ * so we keep a generous entry count to safely split by page boundaries.
+ */
+#define RT_UFS_PRDT_ENTRIES_PER_SLOT    32
+#define RT_UFS_UCD_SIZE                 (RT_UFS_ALIGNED_UPIU_SIZE * 2 + sizeof(struct rt_ufs_sg_entry) * RT_UFS_PRDT_ENTRIES_PER_SLOT)
+
+struct rt_ufs_ops;
+struct rt_ufs_host;
+
+enum rt_ufs_notify_change_status
+{
+    RT_UFS_NOTIFY_CHANGE_STATUS_PRE,
+    RT_UFS_NOTIFY_CHANGE_STATUS_POST,
+};
+
+/* PA layer attributes for link gear / power mode (ufshcd_change_power_mode) */
+struct rt_ufs_pa_layer_attr
+{
+    rt_uint32_t gear_rx;
+    rt_uint32_t gear_tx;
+    rt_uint32_t lane_rx;
+    rt_uint32_t lane_tx;
+    rt_uint32_t pwr_rx;   /* enum rt_ufs_pa_pwr_mode */
+    rt_uint32_t pwr_tx;
+    rt_uint32_t hs_rate;  /* enum rt_ufs_pa_hs_rate when in FAST* mode */
+};
+
+struct rt_ufs_host
+{
+    struct rt_scsi_host parent;
+
+    void *regs;
+    int irq;
+
+    rt_uint32_t cap;
+    rt_uint32_t nutrs;                          /* Number of UTP transfer request slots, 1..32 */
+
+    const struct rt_ufs_ops *ops;
+
+    /* UTRL (UTRD list) base (must be 1KB aligned); we use slot0 only */
+    struct rt_utp_transfer_req_desc *utrd;      /* Points to utrl_base[0] */
+    void *utrl_base;                            /* Full UTRL base */
+    rt_size_t utrl_size;                        /* Bytes */
+    rt_ubase_t utrl_handle;
+
+    /* UTMRL (UTMRD list) base (must be 1KB aligned); placeholder for now */
+    void *utmrl_base;
+    rt_size_t utmrl_size;
+    rt_ubase_t utmrl_handle;
+    rt_uint8_t utmrl_coherent;
+
+    rt_uint8_t *ucd_base;                       /* One UCD: command_upiu + response_upiu + prd_table */
+    rt_size_t ucd_size;                         /* RT_UFS_UCD_SIZE or platform value */
+    rt_ubase_t ucd_handle;
+    rt_uint8_t ucd_coherent;
+
+    /* Bounce buffer for small DMA I/O (avoid stack/unmapped buffers) */
+    rt_uint8_t *bounce;
+    rt_size_t bounce_size;
+    rt_ubase_t bounce_handle;
+
+    /* IRQ status snapshot (written by ISR, read by transfer thread) */
+    volatile rt_uint32_t irq_status;
+
+    struct rt_completion done;
+    struct rt_spinlock lock;
+
+    struct rt_ufs_pa_layer_attr pwr_active;
+    rt_uint8_t pwr_active_valid;
+    /**
+     * Raw AHIT register value. 0 = use RT_UFS_AHIT_DEFAULT when CAP reports Auto-H8 support.
+     * Write 0 via rt_ufs_auto_hibern8_set() to turn off auto-hibernate.
+     */
+    rt_uint32_t ahit;
+};
+
+struct rt_ufs_ops
+{
+    rt_err_t (*init)(struct rt_ufs_host *ufs);
+    rt_err_t (*exit)(struct rt_ufs_host *ufs);
+    rt_err_t (*reset)(struct rt_ufs_host *ufs);
+    rt_err_t (*link_startup_notify)(struct rt_ufs_host *ufs, enum rt_ufs_notify_change_status status);
+};
+
+rt_err_t rt_ufs_host_register(struct rt_ufs_host *ufs);
+rt_err_t rt_ufs_host_unregister(struct rt_ufs_host *ufs);
+
+/**
+ * @brief Send a UIC command and wait for completion
+ *
+ * Used during controller bring-up and in the link_startup_notify hook (e.g. DME_GET/SET,
+ * DME_LINK_STARTUP).
+ *
+ * @param ufs   UFS host controller
+ * @param cmd   UIC opcode (e.g. RT_UFS_CMDOP_DME_LINKSTARTUP)
+ * @param arg1  UIC command argument 1
+ * @param arg2  UIC argument 2; on return, low 8 bits contain the UIC result code
+ * @param arg3  UIC command argument 3
+ *
+ * @return RT_EOK on success, -RT_ETIMEOUT on timeout, -RT_ERROR on failure
+ */
+rt_err_t rt_ufs_uic_cmd_send(struct rt_ufs_host *ufs, rt_uint32_t cmd,
+        rt_uint32_t arg1, rt_uint32_t *arg2, rt_uint32_t arg3);
+
+/**
+ * @brief Apply default power/performance settings after the link is up
+ *
+ * Programs interrupt aggregation (UTRIACR) and, if the host supports it (CAP Auto-H8), the
+ * Auto-Hibernate idle timer (AHIT). Called from rt_ufs_host_register(); BSP code may call
+ * again after custom link training if needed.
+ *
+ * @param ufs UFS host controller
+ */
+void rt_ufs_pm_post_linkup(struct rt_ufs_host *ufs);
+
+/**
+ * @brief UIC DME_SET with UIC result check
+ *
+ * @param ufs       UFS host controller
+ * @param attr_sel  MIB selector (e.g. RT_UFS_UIC_ARG_MIB)
+ * @param value     Attribute value written to UIC argument 3
+ *
+ * @return RT_EOK on success, otherwise an error code
+ */
+rt_err_t rt_ufs_dme_set(struct rt_ufs_host *ufs, rt_uint32_t attr_sel, rt_uint32_t value);
+
+/**
+ * @brief UIC DME_GET
+ *
+ * @param ufs       UFS host controller
+ * @param attr_sel  MIB selector
+ * @param value     Storage for the attribute value read from UIC argument 3
+ *
+ * @return RT_EOK on success, otherwise an error code
+ */
+rt_err_t rt_ufs_dme_get(struct rt_ufs_host *ufs, rt_uint32_t attr_sel, rt_uint32_t *value);
+
+/**
+ * @brief Set PHY power mode via PA_PWRMODE
+ *
+ * @param ufs  UFS host controller
+ * @param mode Encoded as (rx_mode << 4) | tx_mode (see UniPro PA power modes)
+ *
+ * @return RT_EOK on success, otherwise an error code
+ */
+rt_err_t rt_ufs_uic_pa_pwrmode(struct rt_ufs_host *ufs, rt_uint8_t mode);
+
+/**
+ * @brief Change PA layer gear, lanes, termination, HS series, and trigger power mode
+ *
+ * @param ufs   UFS host controller
+ * @param attr  Desired PA layer parameters
+ * @param force If true, apply even when attr matches the last successful configuration
+ *
+ * @return RT_EOK on success, otherwise an error code
+ */
+rt_err_t rt_ufs_pa_power_mode_set(struct rt_ufs_host *ufs, const struct rt_ufs_pa_layer_attr *attr, rt_bool_t force);
+
+/**
+ * @brief Request link Hibernate8 enter (UIC DME_HIBERNATE_ENTER)
+ *
+ * @param ufs UFS host controller
+ *
+ * @return RT_EOK on success, otherwise an error code
+ */
+rt_err_t rt_ufs_hibern8_enter(struct rt_ufs_host *ufs);
+
+/**
+ * @brief Request link Hibernate8 exit (UIC DME_HIBERNATE_EXIT)
+ *
+ * @param ufs UFS host controller
+ *
+ * @return RT_EOK on success, otherwise an error code
+ */
+rt_err_t rt_ufs_hibern8_exit(struct rt_ufs_host *ufs);
+
+/**
+ * @brief Program the Auto-Hibernate idle timer register (AHIT)
+ *
+ * Requires host capability RT_UFS_REG_CAP_AUTOH8. Pass 0 to disable auto-hibernate.
+ *
+ * @param ufs      UFS host controller
+ * @param reg_val  Raw AHIT register value
+ *
+ * @return RT_EOK on success, -RT_ENOSYS if Auto-H8 is not supported, otherwise an error code
+ */
+rt_err_t rt_ufs_auto_hibern8_set(struct rt_ufs_host *ufs, rt_uint32_t reg_val);
+
+/**
+ * @brief Configure UTP transfer completion interrupt aggregation (UTRIACR, offset 0x4C)
+ *
+ * @param ufs     UFS host controller
+ * @param enable  RT_TRUE to enable aggregation; RT_FALSE clears the register
+ * @param cnt     Completion counter threshold before an interrupt (0–31)
+ * @param timeout Aggregation timeout in 40 µs steps (UFSHCI)
+ */
+void rt_ufs_intr_aggr_configure(struct rt_ufs_host *ufs, rt_bool_t enable, rt_uint8_t cnt, rt_uint8_t timeout);
+
+#endif /* __UFS_H__ */

+ 4 - 0
components/drivers/include/rtdevice.h

@@ -135,6 +135,10 @@ extern "C" {
 #include "drivers/thermal.h"
 #include "drivers/thermal.h"
 #endif /* RT_USING_THERMAL */
 #endif /* RT_USING_THERMAL */
 
 
+#ifdef RT_USING_UFS
+#include "drivers/ufs.h"
+#endif /* RT_USING_UFS */
+
 #ifdef RT_USING_FIRMWARE
 #ifdef RT_USING_FIRMWARE
 #ifdef RT_FIRMWARE_ARM_SCMI
 #ifdef RT_FIRMWARE_ARM_SCMI
 #include "drivers/scmi.h"
 #include "drivers/scmi.h"

+ 16 - 0
components/drivers/ufs/Kconfig

@@ -0,0 +1,16 @@
+menuconfig RT_USING_UFS
+    bool "Using Universal Flash Storage (UFS) device drivers"
+    depends on RT_USING_DM
+    depends on RT_USING_DMA
+    depends on RT_SCSI_SD
+    default n
+
+config RT_UFS_PCI
+    bool "UFS support on PCI bus"
+    depends on RT_USING_UFS
+    depends on RT_USING_PCI
+    default n
+
+if RT_USING_UFS
+    osource "$(SOC_DM_UFS_DIR)/Kconfig"
+endif

+ 18 - 0
components/drivers/ufs/SConscript

@@ -0,0 +1,18 @@
+from building import *
+
+group = []
+
+if not GetDepend(['RT_USING_UFS']):
+    Return('group')
+
+cwd = GetCurrentDir()
+CPPPATH = [cwd + '/../include']
+
+src = ['ufs.c', 'ufs_pm.c']
+
+if GetDepend(['RT_UFS_PCI']):
+    src += ['ufs-pci.c']
+
+group = DefineGroup('DeviceDrivers', src, depend = [''], CPPPATH = CPPPATH)
+
+Return('group')

+ 116 - 0
components/drivers/ufs/ufs-pci.c

@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2006-2023, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2023-02-25     GuEe-GUI     the first version
+ */
+
+#include <rtthread.h>
+#include <rtdevice.h>
+
+#define UFS_REG_BAR 0
+
+struct pci_ufs_quirk
+{
+    const struct rt_ufs_ops *ops;
+};
+
+struct pci_ufs_host
+{
+    struct rt_ufs_host parent;
+    const struct pci_ufs_quirk *quirk;
+};
+
+static const struct rt_ufs_ops pci_ufs_std_ops =
+{
+};
+
+static rt_err_t pci_ufs_probe(struct rt_pci_device *pdev)
+{
+    rt_err_t err;
+    struct rt_ufs_host *ufs;
+    struct pci_ufs_host *pci_ufs = rt_calloc(1, sizeof(*pci_ufs));
+    const struct pci_ufs_quirk *quirk = pdev->id->data;
+
+    if (!pci_ufs)
+    {
+        return -RT_ENOMEM;
+    }
+
+    pci_ufs->quirk = quirk;
+    ufs = &pci_ufs->parent;
+    ufs->parent.dev = &pdev->parent;
+    ufs->regs = rt_pci_iomap(pdev, UFS_REG_BAR);
+    ufs->irq = pdev->irq;
+
+    if (!ufs->regs)
+    {
+        err = -RT_EIO;
+        goto _fail;
+    }
+
+    ufs->ops = quirk ? quirk->ops : &pci_ufs_std_ops;
+
+    rt_pci_irq_unmask(pdev);
+    rt_pci_set_master(pdev);
+
+    if ((err = rt_ufs_host_register(ufs)))
+    {
+        goto _fail;
+    }
+
+    pdev->parent.user_data = pci_ufs;
+
+    return RT_EOK;
+
+_fail:
+    rt_free(pci_ufs);
+
+    return err;
+}
+
+static rt_err_t pci_ufs_remove(struct rt_pci_device *pdev)
+{
+    struct rt_ufs_host *ufs;
+    struct pci_ufs_host *pci_ufs = pdev->parent.user_data;
+
+    ufs = &pci_ufs->parent;
+
+    rt_ufs_host_unregister(ufs);
+
+    /* INTx is shared, don't mask all */
+    rt_hw_interrupt_umask(pdev->irq);
+    rt_pci_irq_mask(pdev);
+    rt_pci_clear_master(pdev);
+
+    rt_iounmap(ufs->regs);
+    rt_free(pci_ufs);
+
+    return RT_EOK;
+}
+
+static rt_err_t pci_ufs_shutdown(struct rt_pci_device *pdev)
+{
+    return pci_ufs_remove(pdev);
+}
+
+static const struct rt_pci_device_id pci_ufs_ids[] =
+{
+    { RT_PCI_DEVICE_ID(PCI_VENDOR_ID_REDHAT, 0x0013), },
+    { RT_PCI_DEVICE_ID(PCI_VENDOR_ID_SAMSUNG, 0xc00c), },
+    { /* sentinel */ }
+};
+
+static struct rt_pci_driver pci_ufs_driver =
+{
+    .name = "ufs-pci",
+
+    .ids = pci_ufs_ids,
+    .probe = pci_ufs_probe,
+    .remove = pci_ufs_remove,
+    .shutdown = pci_ufs_shutdown,
+};
+RT_PCI_DRIVER_EXPORT(pci_ufs_driver);

+ 783 - 0
components/drivers/ufs/ufs.c

@@ -0,0 +1,783 @@
+/*
+ * Copyright (c) 2006-2023, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2023-02-25     GuEe-GUI     the first version
+ */
+
+#include <rthw.h>
+#include <rtthread.h>
+#include <rtdevice.h>
+
+#include <mm_page.h>
+#include <mm_aspace.h>
+
+#define DBG_TAG "rtdm.ufs"
+#define DBG_LVL DBG_INFO
+#include <rtdbg.h>
+
+/* UTP slot index used for all SCSI commands (single-threaded transfer) */
+#define RT_UFS_SLOT_ID                  0
+/* Timeout for UTP transfer completion (ms) */
+#define RT_UFS_UTP_TIMEOUT_MS           5000
+/* UIC command timeout (ms) */
+#define RT_UFS_UIC_TIMEOUT_MS           500
+/* UTRL/UTMRL base address must be 1KB aligned (UTRLBA/UTMRLBA bits[31:10]). */
+#define RT_UFS_UTR_LIST_ALIGN           1024
+#define RT_UFS_UTR_LIST_BYTES(n)        ((rt_size_t)(n) * sizeof(struct rt_utp_transfer_req_desc))
+/* UTRD dword_0 data direction bits */
+#define RT_UFS_UTRD_DD_NONE             0x00000000U
+#define RT_UFS_UTRD_DD_HOST_TO_DEVICE   0x02000000U
+#define RT_UFS_UTRD_DD_DEVICE_TO_HOST   0x04000000U
+/* UTRD dword_0 command type bits */
+#define RT_UFS_UTRD_CMD_TYPE_SCSI       (1U << RT_UFS_UPIU_COMMAND_TYPE_OFFSET)
+
+static rt_uint8_t ufs_scsi_cmd_data_dir(struct rt_scsi_cmd *cmd)
+{
+    switch (cmd->op.unknow.opcode)
+    {
+    case RT_SCSI_CMD_READ10:
+    case RT_SCSI_CMD_READ12:
+    case RT_SCSI_CMD_READ16:
+    case RT_SCSI_CMD_READ_CAPACITY10:
+    case RT_SCSI_CMD_READ_CAPACITY16:
+    case RT_SCSI_CMD_INQUIRY:
+    case RT_SCSI_CMD_REQUEST_SENSE:
+        return RT_UTP_DEVICE_TO_HOST;
+    case RT_SCSI_CMD_WRITE10:
+    case RT_SCSI_CMD_WRITE12:
+    case RT_SCSI_CMD_WRITE16:
+        return RT_UTP_HOST_TO_DEVICE;
+    default:
+        return RT_UTP_NO_DATA_TRANSFER;
+    }
+}
+
+static void ufs_prepare_upiu_cmd(struct rt_ufs_host *ufs, struct rt_utp_upiu_req *req, struct rt_utp_upiu_rsp *rsp,
+        struct rt_scsi_device *sdev, struct rt_scsi_cmd *cmd)
+{
+    rt_uint16_t cdb_len;
+    rt_size_t data_len = cmd->data.size;
+    rt_uint8_t flags = RT_UPIU_CMD_FLAGS_NONE;
+
+    if (ufs_scsi_cmd_data_dir(cmd) == RT_UTP_DEVICE_TO_HOST)
+    {
+        flags = RT_UPIU_CMD_FLAGS_READ;
+    }
+    else if (ufs_scsi_cmd_data_dir(cmd) == RT_UTP_HOST_TO_DEVICE)
+    {
+        flags = RT_UPIU_CMD_FLAGS_WRITE;
+    }
+
+    rt_memset(req, 0, sizeof(*req));
+    req->header.transaction_code = RT_UPIU_TRANSACTION_COMMAND;
+    req->header.flags = flags;
+    req->header.lun = (rt_uint8_t)(sdev->lun & 0x7f);
+    req->header.task_tag = RT_UFS_SLOT_ID;
+    req->header.command_set_type = RT_UPIU_COMMAND_SET_TYPE_SCSI;
+    req->header.data_segment_length = rt_cpu_to_be16((rt_uint16_t)data_len);
+
+    req->sc.exp_data_transfer_len = rt_cpu_to_be32((rt_uint32_t)data_len);
+    cdb_len = cmd->op_size < RT_UFS_CDB_SIZE ? (rt_uint16_t)cmd->op_size : RT_UFS_CDB_SIZE;
+    rt_memcpy(req->sc.cdb, &cmd->op, cdb_len);
+
+    rt_memset(rsp, 0, sizeof(*rsp));
+}
+
+/* Fill UTRD header and link to UCD; PRDT filled separately. */
+static rt_err_t ufs_prepare_utrd(struct rt_ufs_host *ufs, rt_uint8_t data_dir, rt_size_t data_len)
+{
+    rt_uint64_t ucd_phys = ufs->ucd_handle;
+    rt_uint32_t utp_dd_dword = RT_UFS_UTRD_DD_NONE;
+    rt_uint16_t resp_off_dw, resp_len_dw, prdt_off_dw, prdt_len_entries;
+    struct rt_utp_transfer_req_desc *utrd = ufs->utrd;
+
+    /* Clear whole descriptor to avoid any leftover fields */
+    rt_memset(utrd, 0, sizeof(*utrd));
+
+    /* UFSHCI: offset/length in DWORDs (4 bytes) */
+    resp_off_dw = (rt_uint16_t)(RT_UFS_ALIGNED_UPIU_SIZE >> 2);
+    prdt_off_dw = (rt_uint16_t)((RT_UFS_ALIGNED_UPIU_SIZE * 2) >> 2);
+    resp_len_dw = (rt_uint16_t)(RT_UFS_ALIGNED_UPIU_SIZE >> 2);
+    prdt_len_entries = (data_len > 0) ? 1 : 0;
+
+    /* RequestDescHeader.dword_0..dword_3 */
+    if (data_dir == RT_UTP_HOST_TO_DEVICE)
+    {
+        utp_dd_dword = RT_UFS_UTRD_DD_HOST_TO_DEVICE;
+    }
+    else if (data_dir == RT_UTP_DEVICE_TO_HOST)
+    {
+        utp_dd_dword = RT_UFS_UTRD_DD_DEVICE_TO_HOST;
+    }
+
+    /* Always set INT_CMD to ensure UTRCS gets asserted */
+    utrd->header.dword_0 = rt_cpu_to_le32(RT_UFS_UTRD_CMD_TYPE_SCSI | utp_dd_dword | RT_UFS_UTP_REQ_DESC_INT_CMD);
+    utrd->header.dword_1 = 0;
+    utrd->header.dword_2 = rt_cpu_to_le32(RT_UFS_OCS_INVALID_COMMAND_STATUS);
+    utrd->header.dword_3 = 0;
+
+    /* UFSHCI expects LE for descriptor fields */
+    utrd->command_desc_base_addr_lo = rt_cpu_to_le32(rt_lower_32_bits(ucd_phys));
+    utrd->command_desc_base_addr_hi = rt_cpu_to_le32(rt_upper_32_bits(ucd_phys));
+    utrd->response_upiu_offset = rt_cpu_to_le16(resp_off_dw);
+    utrd->response_upiu_length = rt_cpu_to_le16(resp_len_dw);
+    utrd->prd_table_offset = rt_cpu_to_le16(prdt_off_dw);
+    utrd->prd_table_length = rt_cpu_to_le16(prdt_len_entries);
+
+    return RT_EOK;
+}
+
+/* Fill PRDT with entries for cmd->data.ptr (split by page boundary). */
+static rt_err_t ufs_prepare_prdt(struct rt_ufs_host *ufs, const void *data_ptr, rt_size_t len, rt_uint16_t *out_entries)
+{
+    rt_size_t left;
+    rt_uint8_t *ptr;
+    rt_uint16_t n = 0;
+    struct rt_utp_transfer_cmd_desc *ucd = (struct rt_utp_transfer_cmd_desc *)ufs->ucd_base;
+    struct rt_ufs_sg_entry *prd = ucd->prd_table;
+
+    if (out_entries)
+    {
+        *out_entries = 0;
+    }
+
+    if (len == 0)
+    {
+        return RT_EOK;
+    }
+
+    left = len;
+    ptr = (rt_uint8_t *)(rt_ubase_t)data_ptr;
+
+    while (left)
+    {
+        rt_uint64_t addr;
+        rt_size_t chunk, page_off;
+
+        if (n >= RT_UFS_PRDT_ENTRIES_PER_SLOT)
+        {
+            LOG_E("%s: UFS: PRDT entries overflow (len=%u left=%u n=%u)", rt_dm_dev_get_name(ufs->parent.dev),
+                    len, left, n);
+            return -RT_EIO;
+        }
+
+        page_off = ((rt_ubase_t)ptr) & (ARCH_PAGE_SIZE - 1);
+        chunk = ARCH_PAGE_SIZE - page_off;
+        if (chunk > left)
+        {
+            chunk = left;
+        }
+
+        if (ptr >= ufs->bounce && ptr < (ufs->bounce + ufs->bounce_size))
+        {
+            addr = (rt_uint64_t)ufs->bounce_handle + (rt_uint64_t)(ptr - ufs->bounce);
+        }
+        else
+        {
+            addr = (rt_uint64_t)rt_kmem_v2p(ptr);
+        }
+
+        prd[n].addr = rt_cpu_to_le64(addr);
+        prd[n].reserved = 0;
+        prd[n].size = rt_cpu_to_le32((rt_uint32_t)(chunk - 1)); /* 0-based byte count */
+
+        ptr += chunk;
+        left -= chunk;
+        ++n;
+    }
+
+    if (out_entries)
+    {
+        *out_entries = n;
+    }
+
+    return RT_EOK;
+}
+
+static rt_err_t ufs_utp_transfer(struct rt_ufs_host *ufs, struct rt_scsi_device *sdev, struct rt_scsi_cmd *cmd)
+{
+    struct rt_utp_transfer_cmd_desc *ucd = (struct rt_utp_transfer_cmd_desc *)ufs->ucd_base;
+    struct rt_utp_upiu_req *req = (struct rt_utp_upiu_req *)ucd->command_upiu;
+    struct rt_utp_upiu_rsp *rsp = (struct rt_utp_upiu_rsp *)ucd->response_upiu;
+    struct rt_utp_transfer_req_desc *utrd = ufs->utrd;
+    void *regs = ufs->regs;
+    rt_uint8_t data_dir;
+    rt_uint32_t is;
+    rt_err_t err = RT_EOK;
+    rt_uint16_t prdt_entries = 0;
+    rt_uint8_t ocs;
+    rt_bool_t use_bounce = RT_FALSE;
+    void *orig_ptr = cmd->data.ptr, *resp_buf;
+    rt_size_t orig_size = cmd->data.size;
+    void *dma_ptr = cmd->data.ptr;
+    rt_size_t dma_size = cmd->data.size;
+    rt_bool_t bounce_copied = RT_FALSE;
+
+    data_dir = ufs_scsi_cmd_data_dir(cmd);
+    /* Basic controller readiness checks to avoid hard timeouts */
+    if (regs)
+    {
+        rt_uint32_t hcs = HWREG32(regs + RT_UFS_REG_HCS);
+        rt_uint32_t hce = HWREG32(regs + RT_UFS_REG_HCE);
+
+        if (!(hce & 0x1) || !(hcs & RT_UFS_REG_HCS_UTRLRDY) || !(hcs & RT_UFS_REG_HCS_UCRDY))
+        {
+            LOG_E("%s: UFS not ready for UTP: HCE=%#08x HCS=%#08x (UTRLRDY=%u UCRDY=%u)",
+                  rt_dm_dev_get_name(ufs->parent.dev),
+                  hce, hcs,
+                  (hcs & RT_UFS_REG_HCS_UTRLRDY) ? 1 : 0,
+                  (hcs & RT_UFS_REG_HCS_UCRDY) ? 1 : 0);
+
+            return -RT_EIO;
+        }
+    }
+
+    /*
+     * Bounce small I/O buffers.
+     * Many SCSI helper commands use stack-allocated buffers (embedded in rt_scsi_cmd),
+     * which are unsafe for DMA in Smart/MMU environments. Use a pre-allocated
+     * DMA-friendly bounce buffer for small transfers.
+     */
+    if (cmd->data.ptr && cmd->data.size && ufs->bounce && ufs->bounce_size && cmd->data.size <= ufs->bounce_size)
+    {
+        use_bounce = RT_TRUE;
+
+        if (data_dir == RT_UTP_HOST_TO_DEVICE)
+        {
+            rt_memcpy(ufs->bounce, orig_ptr, orig_size);
+        }
+        dma_ptr = ufs->bounce;
+        dma_size = orig_size;
+    }
+
+    ufs_prepare_upiu_cmd(ufs, req, rsp, sdev, cmd);
+    if ((err = ufs_prepare_utrd(ufs, data_dir, cmd->data.size)))
+    {
+        goto _end;
+    }
+
+    if ((err = ufs_prepare_prdt(ufs, dma_ptr, dma_size, &prdt_entries)))
+    {
+        goto _end;
+    }
+    utrd->prd_table_length = rt_cpu_to_le16(prdt_entries);
+
+    /* Basic debug information before submitting UTP */
+    LOG_D("%s: UTP submit: opcode=%#02x dir=%u data_len=%u slot=%u",
+            rt_dm_dev_get_name(ufs->parent.dev),
+            cmd->op.unknow.opcode, data_dir,
+            cmd->data.size, RT_UFS_SLOT_ID);
+    LOG_D("%s: UTP ctrl regs: HCS=%#08x IE=%#08x HCE=%#08x",
+            rt_dm_dev_get_name(ufs->parent.dev),
+            HWREG32(regs + RT_UFS_REG_HCS),
+            HWREG32(regs + RT_UFS_REG_IE),
+            HWREG32(regs + RT_UFS_REG_HCE));
+    LOG_D("%s: UTP ctrl ready bits: UTRLRDY=%u UTMRLRDY=%u UICRDY=%u",
+            rt_dm_dev_get_name(ufs->parent.dev),
+            (HWREG32(regs + RT_UFS_REG_HCS) & RT_UFS_REG_HCS_UTRLRDY) ? 1 : 0,
+            (HWREG32(regs + RT_UFS_REG_HCS) & RT_UFS_REG_HCS_UTMRLRDY) ? 1 : 0,
+            (HWREG32(regs + RT_UFS_REG_HCS) & RT_UFS_REG_HCS_UCRDY) ? 1 : 0);
+    LOG_D("%s: UTP UTRD: dword_0=%#08x dword_2(ocs)=%#08x",
+            rt_dm_dev_get_name(ufs->parent.dev),
+            rt_le32_to_cpu(utrd->header.dword_0),
+            rt_le32_to_cpu(utrd->header.dword_2));
+    LOG_D("%s: UTP UTRD: ucd_base_lo=%#08x ucd_base_hi=%#08x resp_off_dw=%u resp_len_dw=%u prd_off_dw=%u prd_len=%u",
+            rt_dm_dev_get_name(ufs->parent.dev),
+            rt_le32_to_cpu(utrd->command_desc_base_addr_lo),
+            rt_le32_to_cpu(utrd->command_desc_base_addr_hi),
+            rt_le16_to_cpu(utrd->response_upiu_offset),
+            rt_le16_to_cpu(utrd->response_upiu_length),
+            rt_le16_to_cpu(utrd->prd_table_offset),
+            rt_le16_to_cpu(utrd->prd_table_length));
+
+    /*
+     * DMA descriptors/data must be visible to controller.
+     * Clean dcache for UTRD + UCD (command/response/PRDT) before ringing doorbell.
+     */
+    rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, ufs->utrd, sizeof(*utrd));
+    rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, ufs->ucd_base, ufs->ucd_size);
+
+    if (dma_ptr && dma_size && data_dir == RT_UTP_HOST_TO_DEVICE)
+    {
+        rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, dma_ptr, dma_size);
+    }
+
+    /* Clear interrupt status for transfer complete */
+    ufs->irq_status = 0;
+    /* Ack any stale pending IRQ bits before kicking */
+    HWREG32(regs + RT_UFS_REG_IS) = RT_UINT32_MAX;
+    /* Enable UTP completion/error interrupts */
+    HWREG32(regs + RT_UFS_REG_IE) = RT_UFS_REG_IE_UTRCE | RT_UFS_REG_IE_UTPEE | RT_UFS_REG_IE_DFEE | RT_UFS_REG_IE_UEE;
+
+    /* Ring door bell for slot 0 */
+    rt_spin_lock(&ufs->lock);
+    HWREG32(regs + RT_UFS_REG_UTRLDBR) = RT_BIT(RT_UFS_SLOT_ID);
+    rt_spin_unlock(&ufs->lock);
+
+    /* Interrupt mode: wait for completion signaled by ISR */
+    if ((err = rt_completion_wait(&ufs->done, rt_tick_from_millisecond(RT_UFS_UTP_TIMEOUT_MS))))
+    {
+        LOG_E("%s: UFS UTP wait timeout: IS=%#08x irq_status=%#08x",
+                rt_dm_dev_get_name(ufs->parent.dev),
+                HWREG32(regs + RT_UFS_REG_IS), ufs->irq_status);
+
+        /* Dump UPIU header and PRDT entry for post-mortem */
+        LOG_E("%s: UTP UPIU: tx=%u flags=%#02x lun=%u tag=%u seg_len(be16)=%u",
+                rt_dm_dev_get_name(ufs->parent.dev),
+                req->header.transaction_code, req->header.flags,
+                req->header.lun, req->header.task_tag,
+                rt_be16_to_cpu(req->header.data_segment_length));
+
+        if (cmd->data.size > 0)
+        {
+            LOG_E("%s: UTP PRDT[0]: addr=%#llx size(le32)=%#08x",
+                    rt_dm_dev_get_name(ufs->parent.dev),
+                    (rt_uint64_t)(&((struct rt_utp_transfer_cmd_desc *)ufs->ucd_base)->prd_table[0])->addr,
+                    (&((struct rt_utp_transfer_cmd_desc *)ufs->ucd_base)->prd_table[0])->size);
+        }
+
+        goto _end;
+    }
+
+    is = ufs->irq_status | HWREG32(regs + RT_UFS_REG_IS);
+    if (is & (RT_UFS_REG_IS_UTPES | RT_UFS_REG_IS_DFES | RT_UFS_REG_IS_UE))
+    {
+        err = -RT_ERROR;
+        goto _end;
+    }
+
+    /* Ensure controller-written status/data are seen */
+    /* UTRL is 1KB aligned; invalidate a full slot region to avoid cacheline/alias issues */
+    rt_hw_cpu_dcache_ops(RT_HW_CACHE_INVALIDATE, ufs->utrl_base, RT_UFS_UTR_LIST_ALIGN);
+    rt_hw_cpu_dcache_ops(RT_HW_CACHE_INVALIDATE, ufs->ucd_base, ufs->ucd_size);
+    if (dma_ptr && dma_size && data_dir == RT_UTP_DEVICE_TO_HOST)
+    {
+        rt_hw_cpu_dcache_ops(RT_HW_CACHE_INVALIDATE, dma_ptr, dma_size);
+    }
+
+    /*
+     * If bounce buffer is used for device->host, copy it back immediately after
+     * invalidation so that any post-processing (e.g. READ CAPACITY emulation)
+     * operates on the latest data. Otherwise, the late copy at out_restore would
+     * overwrite any modifications.
+     */
+    if (use_bounce && data_dir == RT_UTP_DEVICE_TO_HOST && orig_ptr && orig_size)
+    {
+        rt_memcpy(orig_ptr, ufs->bounce, orig_size);
+        bounce_copied = RT_TRUE;
+    }
+
+    LOG_D("%s: UFS UTRD complete: dword_2=%#08x (ocs=%#x)",
+            rt_dm_dev_get_name(ufs->parent.dev),
+            rt_le32_to_cpu(utrd->header.dword_2),
+            (rt_le32_to_cpu(utrd->header.dword_2) & 0x0f));
+
+    /*
+     * Post-process SCSI response in resp_buf (INQUIRY etc.).
+     * READ CAPACITY: pass through device-reported block_size / last_block.
+     * Use resp_buf: after bounce_copied, cmd->data.ptr may alias response memory.
+     */
+    resp_buf = (bounce_copied && orig_ptr) ? orig_ptr : cmd->data.ptr;
+
+    if (!resp_buf)
+    {
+        goto _no_resp_buf;
+    }
+
+    if (cmd->op.unknow.opcode == RT_SCSI_CMD_INQUIRY &&
+        cmd->data.size >= sizeof(struct rt_scsi_inquiry_data))
+    {
+        struct rt_scsi_inquiry_data *inq = (struct rt_scsi_inquiry_data *)resp_buf;
+
+        /* Align with AHCI behavior: always expose direct-access block device */
+        inq->devtype = SCSI_DEVICE_TYPE_DIRECT;
+        inq->rmb = 0;
+        inq->length = 95 - 4;
+    }
+
+_no_resp_buf:
+    /* Log response header for troubleshooting */
+    LOG_D("%s: UFS RSP: tc=%#02x rsp=%#02x sts=%#02x seg_len=%u sense_len=%u",
+          rt_dm_dev_get_name(ufs->parent.dev),
+          rsp->header.transaction_code,
+          rsp->header.response,
+          rsp->header.status,
+          rt_be16_to_cpu(rsp->header.data_segment_length),
+          rt_be16_to_cpu(rsp->sr.sense_data_len));
+
+    ocs = rt_le32_to_cpu(utrd->header.dword_2) & 0xf;
+    if (ocs != RT_UFS_OCS_SUCCESS)
+    {
+        LOG_E("%s: UFS OCS %#x", rt_dm_dev_get_name(ufs->parent.dev), ocs);
+        err = -RT_ERROR;
+        goto _end;
+    }
+
+    if (rsp->header.transaction_code != RT_UPIU_TRANSACTION_RESPONSE)
+    {
+        err = -RT_ERROR;
+        goto _end;
+    }
+
+    if (rsp->header.status != 0)
+    {
+        err = -RT_ERROR;
+        goto _end;
+    }
+
+    /* Sense data: copy to cmd when check condition (BSP may use cmd->data.ptr for sense) */
+    if (err != RT_EOK && rsp->sr.sense_data_len != 0 && cmd->data.ptr && cmd->data.size >= RT_UFS_SENSE_SIZE)
+    {
+        rt_uint16_t sense_len = rt_be16_to_cpu(rsp->sr.sense_data_len);
+
+        sense_len = rt_min_t(rt_uint16_t, sense_len, RT_UFS_SENSE_SIZE);
+        rt_memcpy(cmd->data.ptr, rsp->sr.sense_data, sense_len);
+    }
+
+_end:
+    if (use_bounce && !bounce_copied && err == RT_EOK && data_dir == RT_UTP_DEVICE_TO_HOST && orig_ptr && orig_size)
+    {
+        rt_memcpy(orig_ptr, ufs->bounce, orig_size);
+    }
+
+    return err;
+}
+
+static rt_err_t ufs_host_reset(struct rt_scsi_device *sdev)
+{
+    struct rt_ufs_host *ufs = rt_container_of(sdev->host, struct rt_ufs_host, parent);
+
+    if (ufs->ops->reset)
+    {
+        return ufs->ops->reset(ufs);
+    }
+
+    return RT_EOK;
+}
+
+static rt_err_t ufs_host_transfer(struct rt_scsi_device *sdev, struct rt_scsi_cmd *cmd)
+{
+    rt_err_t err;
+    struct rt_ufs_host *ufs = rt_container_of(sdev->host, struct rt_ufs_host, parent);
+
+    switch (cmd->op.unknow.opcode)
+    {
+    case RT_SCSI_CMD_REQUEST_SENSE:
+    case RT_SCSI_CMD_READ10:
+    case RT_SCSI_CMD_READ12:
+    case RT_SCSI_CMD_READ16:
+    case RT_SCSI_CMD_WRITE10:
+    case RT_SCSI_CMD_WRITE12:
+    case RT_SCSI_CMD_WRITE16:
+    case RT_SCSI_CMD_SYNCHRONIZE_CACHE10:
+    case RT_SCSI_CMD_SYNCHRONIZE_CACHE16:
+    case RT_SCSI_CMD_READ_CAPACITY10:
+    case RT_SCSI_CMD_READ_CAPACITY16:
+    case RT_SCSI_CMD_TEST_UNIT_READY:
+    case RT_SCSI_CMD_INQUIRY:
+        err = ufs_utp_transfer(ufs, sdev, cmd);
+        break;
+
+    case RT_SCSI_CMD_WRITE_SAME10:
+    case RT_SCSI_CMD_WRITE_SAME16:
+    case RT_SCSI_CMD_MODE_SENSE:
+    case RT_SCSI_CMD_MODE_SENSE10:
+    case RT_SCSI_CMD_MODE_SELECT:
+    case RT_SCSI_CMD_MODE_SELECT10:
+        err = -RT_ENOSYS;
+        break;
+
+    default:
+        err = -RT_EINVAL;
+        break;
+    }
+
+    return err;
+}
+
+static struct rt_scsi_ops ufs_host_ops =
+{
+    .reset = ufs_host_reset,
+    .transfer = ufs_host_transfer,
+};
+
+static void ufs_isr(int irqno, void *param)
+{
+    rt_uint32_t is;
+    struct rt_ufs_host *ufs = param;
+
+    if (!(is = HWREG32(ufs->regs + RT_UFS_REG_IS)))
+    {
+        return;
+    }
+
+    ufs->irq_status |= is;
+    HWREG32(ufs->regs + RT_UFS_REG_IS) = is;
+
+    if (is & (RT_UFS_REG_IS_UTRCS | RT_UFS_REG_IS_UTPES | RT_UFS_REG_IS_DFES | RT_UFS_REG_IS_UE))
+    {
+        rt_completion_done(&ufs->done);
+    }
+}
+
+rt_err_t rt_ufs_host_register(struct rt_ufs_host *ufs)
+{
+    rt_err_t err;
+    rt_uint32_t value;
+    char dev_name[RT_NAME_MAX];
+    struct rt_scsi_host *scsi;
+
+    if (!ufs || !ufs->ops || !ufs->regs)
+    {
+        return -RT_EINVAL;
+    }
+
+    ufs->cap = HWREG32(ufs->regs + RT_UFS_REG_CAP);
+    ufs->nutrs = RT_UFS_REG_CAP_NUTRS(ufs->cap) + 1;
+    ufs->nutrs = rt_min_t(rt_uint32_t, 32, ufs->nutrs);
+
+    /* Allocate UTRL (UTRD list) with required 1KB alignment */
+    ufs->utrl_size = RT_UFS_UTR_LIST_BYTES(ufs->nutrs);
+    ufs->utrl_size = rt_max_t(rt_size_t, ufs->utrl_size, RT_UFS_UTR_LIST_ALIGN);
+    ufs->utrl_base = rt_dma_alloc_coherent(ufs->parent.dev, ufs->utrl_size, &ufs->utrl_handle);
+
+    if (!ufs->utrl_base)
+    {
+        err = -RT_ENOMEM;
+        goto _fail;
+    }
+
+    rt_memset(ufs->utrl_base, 0, ufs->utrl_size);
+    ufs->utrd = (struct rt_utp_transfer_req_desc *)ufs->utrl_base; /* Slot0 */
+
+    /* Allocate UCD (Command Descriptor) */
+    ufs->ucd_size = ufs->ucd_size ? : RT_UFS_UCD_SIZE;
+    ufs->ucd_base = rt_dma_alloc_coherent(ufs->parent.dev, ufs->ucd_size, &ufs->ucd_handle);
+
+    if (!ufs->ucd_base)
+    {
+        err = -RT_ENOMEM;
+        goto _fail;
+    }
+
+    rt_memset(ufs->ucd_base, 0, ufs->ucd_size);
+
+    /* Allocate bounce buffer for small DMA transfers (512B is enough for inquiry/sense/capacity) */
+    ufs->bounce_size = 4096;
+    ufs->bounce_handle = 0;
+    ufs->bounce = rt_dma_alloc_coherent(ufs->parent.dev, ufs->bounce_size, &ufs->bounce_handle);
+
+    if (!ufs->bounce)
+    {
+        err = -RT_ENOMEM;
+        goto _fail;
+    }
+
+    rt_memset(ufs->bounce, 0, ufs->bounce_size);
+
+    /* Variant/board specific init (clocks/reset/phy) */
+    if (ufs->ops->init && (err = ufs->ops->init(ufs)))
+    {
+        goto _fail;
+    }
+
+    /* Enable controller (HCE 1) */
+    HWREG32(ufs->regs + RT_UFS_REG_HCE) = 0;
+    rt_thread_mdelay(1);
+    HWREG32(ufs->regs + RT_UFS_REG_HCE) = 0x1;
+    rt_thread_mdelay(1);
+
+    /* Program UTRD/UTMRD list base addresses */
+    HWREG32(ufs->regs + RT_UFS_REG_UTRLBA) = rt_lower_32_bits(ufs->utrl_handle);
+    HWREG32(ufs->regs + RT_UFS_REG_UTRLBAU) = rt_upper_32_bits(ufs->utrl_handle);
+
+    /* UTMRL: allocate minimal aligned list if not provided */
+    ufs->utmrl_size = RT_UFS_UTR_LIST_ALIGN;
+    ufs->utmrl_handle = 0;
+    ufs->utmrl_base = rt_dma_alloc_coherent(ufs->parent.dev, ufs->utmrl_size, &ufs->utmrl_handle);
+    if (ufs->utmrl_base)
+    {
+        rt_memset(ufs->utmrl_base, 0, ufs->utmrl_size);
+    }
+
+    HWREG32(ufs->regs + RT_UFS_REG_UTMRLBA) = rt_lower_32_bits(ufs->utmrl_handle);
+    HWREG32(ufs->regs + RT_UFS_REG_UTMRLBAU) = rt_upper_32_bits(ufs->utmrl_handle);
+
+    /* Start run/stop */
+    HWREG32(ufs->regs + RT_UFS_REG_UTRLRSR) = 0x1;
+    HWREG32(ufs->regs + RT_UFS_REG_UTMRLRSR) = 0x1;
+
+    /* Link startup: set UTRLRDY/UTMRLRDY */
+    value = HWREG32(ufs->regs + RT_UFS_REG_HCS);
+    if (!(value & RT_UFS_REG_HCS_UTRLRDY) || !(value & RT_UFS_REG_HCS_UTMRLRDY) || !(value & RT_UFS_REG_HCS_UCRDY))
+    {
+        if (ufs->ops->link_startup_notify)
+        {
+            ufs->ops->link_startup_notify(ufs, RT_UFS_NOTIFY_CHANGE_STATUS_PRE);
+        }
+
+        if ((err = rt_ufs_uic_cmd_send(ufs, RT_UFS_CMDOP_DME_LINKSTARTUP, 0, &value, 0)))
+        {
+            goto _fail;
+        }
+
+        if (ufs->ops->link_startup_notify)
+        {
+            ufs->ops->link_startup_notify(ufs, RT_UFS_NOTIFY_CHANGE_STATUS_POST);
+        }
+    }
+
+    ufs->pwr_active_valid = 0;
+    /* ahit==0 lets rt_ufs_pm_post_linkup apply RT_UFS_AHIT_DEFAULT when CAP_AUTOH8 */
+    rt_ufs_pm_post_linkup(ufs);
+
+    rt_completion_init(&ufs->done);
+    rt_spin_lock_init(&ufs->lock);
+
+    rt_snprintf(dev_name, sizeof(dev_name), "ufs-%s", rt_dm_dev_get_name(ufs->parent.dev));
+
+    rt_hw_interrupt_install(ufs->irq, ufs_isr, ufs, dev_name);
+    rt_hw_interrupt_umask(ufs->irq);
+
+    scsi = &ufs->parent;
+    scsi->ops = &ufs_host_ops;
+    scsi->max_id = rt_max_t(rt_size_t, scsi->max_id, 1);
+    scsi->max_lun = rt_max_t(rt_size_t, scsi->max_lun, 1);
+    scsi->parallel_io = RT_TRUE;
+
+    if ((err = rt_scsi_host_register(scsi)))
+    {
+        goto _free_irq;
+    }
+
+    return RT_EOK;
+
+_free_irq:
+    rt_hw_interrupt_mask(ufs->irq);
+    rt_pic_detach_irq(ufs->irq, ufs);
+
+_fail:
+    if (ufs->utrl_base)
+    {
+        rt_dma_free_coherent(ufs->parent.dev, ufs->utrl_size, ufs->utrl_base, ufs->utrl_handle);
+        ufs->utrl_base = RT_NULL;
+    }
+    if (ufs->utmrl_base)
+    {
+        rt_dma_free_coherent(ufs->parent.dev, ufs->utmrl_size, ufs->utmrl_base, ufs->utmrl_handle);
+        ufs->utmrl_base = RT_NULL;
+    }
+    if (ufs->ucd_base)
+    {
+        rt_dma_free_coherent(ufs->parent.dev, ufs->ucd_size, ufs->ucd_base, ufs->ucd_handle);
+        ufs->ucd_base = RT_NULL;
+    }
+    if (ufs->bounce)
+    {
+        rt_dma_free_coherent(ufs->parent.dev, ufs->bounce_size, ufs->bounce, ufs->bounce_handle);
+        ufs->bounce = RT_NULL;
+    }
+
+    return err;
+}
+
+rt_err_t rt_ufs_host_unregister(struct rt_ufs_host *ufs)
+{
+    rt_err_t err = RT_EOK;
+
+    rt_scsi_host_unregister(&ufs->parent);
+
+    if (ufs->ops->exit && (err = ufs->ops->exit(ufs)))
+    {
+        LOG_W("%s: UFS ops->exit failed: %s", rt_dm_dev_get_name(ufs->parent.dev), rt_strerror(err));
+    }
+
+    if (ufs->utrl_base)
+    {
+        rt_dma_free_coherent(ufs->parent.dev, ufs->utrl_size, ufs->utrl_base, ufs->utrl_handle);
+        ufs->utrl_base = RT_NULL;
+    }
+    if (ufs->utmrl_base)
+    {
+        rt_dma_free_coherent(ufs->parent.dev, ufs->utmrl_size, ufs->utmrl_base, ufs->utmrl_handle);
+        ufs->utmrl_base = RT_NULL;
+    }
+    if (ufs->ucd_base)
+    {
+        rt_dma_free_coherent(ufs->parent.dev, ufs->ucd_size, ufs->ucd_base, ufs->ucd_handle);
+        ufs->ucd_base = RT_NULL;
+    }
+    if (ufs->bounce)
+    {
+        rt_dma_free_coherent(ufs->parent.dev, ufs->bounce_size, ufs->bounce, ufs->bounce_handle);
+        ufs->bounce = RT_NULL;
+    }
+
+    return err;
+}
+
+rt_err_t rt_ufs_uic_cmd_send(struct rt_ufs_host *ufs, rt_uint32_t cmd,
+        rt_uint32_t arg1, rt_uint32_t *arg2, rt_uint32_t arg3)
+{
+    rt_uint32_t is;
+    rt_int32_t timeout;
+    void *regs = ufs->regs;
+
+    if (!regs)
+    {
+        return -RT_EINVAL;
+    }
+
+    timeout = rt_tick_from_millisecond(RT_UFS_UIC_TIMEOUT_MS);
+    timeout += rt_tick_get();
+
+    while (!(HWREG32(regs + RT_UFS_REG_HCS) & RT_UFS_REG_HCS_UCRDY))
+    {
+        if (rt_tick_get() >= timeout)
+        {
+            LOG_E("%s: UFS UIC not ready", rt_dm_dev_get_name(ufs->parent.dev));
+            return -RT_ETIMEOUT;
+        }
+
+        rt_thread_mdelay(1);
+    }
+
+    HWREG32(regs + RT_UFS_REG_UCMDARG1) = arg1;
+    HWREG32(regs + RT_UFS_REG_UCMDARG2) = arg2 ? *arg2 : 0;
+    HWREG32(regs + RT_UFS_REG_UCMDARG3) = arg3;
+    HWREG32(regs + RT_UFS_REG_UICCMD) = cmd & 0xff;
+
+    timeout = rt_tick_from_millisecond(RT_UFS_UIC_TIMEOUT_MS);
+    timeout += rt_tick_get();
+
+    do {
+        is = HWREG32(regs + RT_UFS_REG_IS);
+        if (is & RT_UFS_REG_IS_UCCS)
+        {
+            break;
+        }
+        if (is & (RT_UFS_REG_IS_UE | RT_UFS_REG_IS_DFES))
+        {
+            return -RT_ERROR;
+        }
+        rt_thread_mdelay(1);
+    } while (rt_tick_get() < timeout);
+
+    if (!(is & RT_UFS_REG_IS_UCCS))
+    {
+        return -RT_ETIMEOUT;
+    }
+
+    HWREG32(regs + RT_UFS_REG_IS) = RT_UFS_REG_IS_UCCS;
+
+    if (arg2)
+    {
+        *arg2 = HWREG32(regs + RT_UFS_REG_UCMDARG2);
+    }
+
+    return RT_EOK;
+}

+ 312 - 0
components/drivers/ufs/ufs_pm.c

@@ -0,0 +1,312 @@
+/*
+ * Copyright (c) 2006-2023, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2023-02-25     GuEe-GUI     the first version
+ */
+
+#include <rthw.h>
+#include <rtthread.h>
+#include <rtdevice.h>
+
+#include <drivers/ufs.h>
+
+#define DBG_TAG "rtdm.ufs.pm"
+#define DBG_LVL DBG_INFO
+#include <rtdbg.h>
+
+#define RT_UFS_DME_ATTR_SET_NOR         0
+
+rt_inline rt_uint32_t ufs_uic_arg_attr_type(rt_uint8_t t)
+{
+    return ((rt_uint32_t)(t) & 0xffu) << 16;
+}
+
+rt_err_t rt_ufs_dme_set(struct rt_ufs_host *ufs, rt_uint32_t attr_sel, rt_uint32_t value)
+{
+    rt_err_t err;
+    rt_uint32_t arg2 = ufs_uic_arg_attr_type(RT_UFS_DME_ATTR_SET_NOR);
+
+    if ((err = rt_ufs_uic_cmd_send(ufs, RT_UFS_CMDOP_DME_SET, attr_sel, &arg2, value)))
+    {
+        return err;
+    }
+
+    if ((arg2 & RT_UFS_CMDRES_MASK) != RT_UFS_CMDRES_SUCCESS)
+    {
+        return -RT_ERROR;
+    }
+
+    return RT_EOK;
+}
+
+rt_err_t rt_ufs_dme_get(struct rt_ufs_host *ufs, rt_uint32_t attr_sel, rt_uint32_t *value)
+{
+    rt_uint32_t arg2 = 0;
+    rt_err_t err;
+
+    if (!value)
+    {
+        return -RT_EINVAL;
+    }
+
+    if ((err = rt_ufs_uic_cmd_send(ufs, RT_UFS_CMDOP_DME_GET, attr_sel, &arg2, 0)))
+    {
+        return err;
+    }
+
+    if ((arg2 & RT_UFS_CMDRES_MASK) != RT_UFS_CMDRES_SUCCESS)
+    {
+        return -RT_ERROR;
+    }
+
+    *value = HWREG32(ufs->regs + RT_UFS_REG_UCMDARG3);
+
+    return RT_EOK;
+}
+
+rt_err_t rt_ufs_uic_pa_pwrmode(struct rt_ufs_host *ufs, rt_uint8_t mode)
+{
+    return rt_ufs_dme_set(ufs, RT_UFS_UIC_ARG_MIB(RT_UFS_PA_PWRMODE), mode);
+}
+
+rt_err_t rt_ufs_pa_power_mode_set(struct rt_ufs_host *ufs, const struct rt_ufs_pa_layer_attr *attr, rt_bool_t force)
+{
+    rt_err_t err;
+    rt_uint8_t mode;
+
+    if (!ufs || !attr || !ufs->regs)
+    {
+        return -RT_EINVAL;
+    }
+
+    if (!force && ufs->pwr_active_valid &&
+        attr->gear_rx == ufs->pwr_active.gear_rx &&
+        attr->gear_tx == ufs->pwr_active.gear_tx &&
+        attr->lane_rx == ufs->pwr_active.lane_rx &&
+        attr->lane_tx == ufs->pwr_active.lane_tx &&
+        attr->pwr_rx == ufs->pwr_active.pwr_rx &&
+        attr->pwr_tx == ufs->pwr_active.pwr_tx &&
+        attr->hs_rate == ufs->pwr_active.hs_rate)
+    {
+        return RT_EOK;
+    }
+
+    if ((err = rt_ufs_dme_set(ufs, RT_UFS_UIC_ARG_MIB(RT_UFS_PA_RXGEAR), attr->gear_rx)))
+    {
+        return err;
+    }
+    if ((err = rt_ufs_dme_set(ufs, RT_UFS_UIC_ARG_MIB(RT_UFS_PA_ACTIVERXDATALANES), attr->lane_rx)))
+    {
+        return err;
+    }
+    if (attr->pwr_rx == RT_UFS_PA_FASTAUTO_MODE || attr->pwr_rx == RT_UFS_PA_FAST_MODE)
+    {
+        err = rt_ufs_dme_set(ufs, RT_UFS_UIC_ARG_MIB(RT_UFS_PA_RXTERMINATION), 1);
+    }
+    else
+    {
+        err = rt_ufs_dme_set(ufs, RT_UFS_UIC_ARG_MIB(RT_UFS_PA_RXTERMINATION), 0);
+    }
+    if (err)
+    {
+        return err;
+    }
+
+    if ((err = rt_ufs_dme_set(ufs, RT_UFS_UIC_ARG_MIB(RT_UFS_PA_TXGEAR), attr->gear_tx)))
+    {
+        return err;
+    }
+    if ((err = rt_ufs_dme_set(ufs, RT_UFS_UIC_ARG_MIB(RT_UFS_PA_ACTIVETXDATALANES), attr->lane_tx)))
+    {
+        return err;
+    }
+    if (attr->pwr_tx == RT_UFS_PA_FASTAUTO_MODE || attr->pwr_tx == RT_UFS_PA_FAST_MODE)
+    {
+        err = rt_ufs_dme_set(ufs, RT_UFS_UIC_ARG_MIB(RT_UFS_PA_TXTERMINATION), 1);
+    }
+    else
+    {
+        err = rt_ufs_dme_set(ufs, RT_UFS_UIC_ARG_MIB(RT_UFS_PA_TXTERMINATION), 0);
+    }
+    if (err)
+    {
+        return err;
+    }
+
+    if (attr->pwr_rx == RT_UFS_PA_FASTAUTO_MODE || attr->pwr_tx == RT_UFS_PA_FASTAUTO_MODE ||
+        attr->pwr_rx == RT_UFS_PA_FAST_MODE || attr->pwr_tx == RT_UFS_PA_FAST_MODE)
+    {
+        if ((err = rt_ufs_dme_set(ufs, RT_UFS_UIC_ARG_MIB(RT_UFS_PA_HSSERIES), attr->hs_rate)))
+        {
+            return err;
+        }
+    }
+
+    if ((err = rt_ufs_dme_set(ufs, RT_UFS_UIC_ARG_MIB(RT_UFS_PA_PWRMODEUSERDATA0), RT_UFS_DL_FC0_PROT_TO_DEFAULT)))
+    {
+        return err;
+    }
+    if ((err = rt_ufs_dme_set(ufs, RT_UFS_UIC_ARG_MIB(RT_UFS_PA_PWRMODEUSERDATA1), RT_UFS_DL_TC0_REPLAY_TO_DEFAULT)))
+    {
+        return err;
+    }
+    if ((err = rt_ufs_dme_set(ufs, RT_UFS_UIC_ARG_MIB(RT_UFS_PA_PWRMODEUSERDATA2), RT_UFS_DL_AFC0_REQ_TO_DEFAULT)))
+    {
+        return err;
+    }
+    if ((err = rt_ufs_dme_set(ufs, RT_UFS_UIC_ARG_MIB(RT_UFS_PA_PWRMODEUSERDATA3), RT_UFS_DL_FC1_PROT_TO_DEFAULT)))
+    {
+        return err;
+    }
+    if ((err = rt_ufs_dme_set(ufs, RT_UFS_UIC_ARG_MIB(RT_UFS_PA_PWRMODEUSERDATA4), RT_UFS_DL_TC1_REPLAY_TO_DEFAULT)))
+    {
+        return err;
+    }
+    if ((err = rt_ufs_dme_set(ufs, RT_UFS_UIC_ARG_MIB(RT_UFS_PA_PWRMODEUSERDATA5), RT_UFS_DL_AFC1_REQ_TO_DEFAULT)))
+    {
+        return err;
+    }
+
+    if ((err = rt_ufs_dme_set(ufs, RT_UFS_UIC_ARG_MIB(RT_UFS_DME_LOCAL_FC0_PROT_TO), RT_UFS_DL_FC0_PROT_TO_DEFAULT)))
+    {
+        return err;
+    }
+    if ((err = rt_ufs_dme_set(ufs, RT_UFS_UIC_ARG_MIB(RT_UFS_DME_LOCAL_TC0_REPLAY_TO), RT_UFS_DL_TC0_REPLAY_TO_DEFAULT)))
+    {
+        return err;
+    }
+    if ((err = rt_ufs_dme_set(ufs, RT_UFS_UIC_ARG_MIB(RT_UFS_DME_LOCAL_AFC0_REQ_TO), RT_UFS_DL_AFC0_REQ_TO_DEFAULT)))
+    {
+        return err;
+    }
+
+    mode = (rt_uint8_t)((attr->pwr_rx << 4) | (attr->pwr_tx & 0xfu));
+
+    if ((err = rt_ufs_uic_pa_pwrmode(ufs, mode)))
+    {
+        LOG_E("%s: PA power mode change failed: %s", rt_dm_dev_get_name(ufs->parent.dev), rt_strerror(err));
+        return err;
+    }
+
+    ufs->pwr_active = *attr;
+    ufs->pwr_active_valid = 1;
+
+    return RT_EOK;
+}
+
+rt_err_t rt_ufs_hibern8_enter(struct rt_ufs_host *ufs)
+{
+    rt_uint32_t arg2 = 0;
+
+    if (!ufs || !ufs->regs)
+    {
+        return -RT_EINVAL;
+    }
+
+    if (rt_ufs_uic_cmd_send(ufs, RT_UFS_CMDOP_DME_HIBERNATE_ENTER, 0, &arg2, 0))
+    {
+        return -RT_ERROR;
+    }
+
+    if ((arg2 & RT_UFS_CMDRES_MASK) != RT_UFS_CMDRES_SUCCESS)
+    {
+        return -RT_ERROR;
+    }
+
+    return RT_EOK;
+}
+
+rt_err_t rt_ufs_hibern8_exit(struct rt_ufs_host *ufs)
+{
+    rt_uint32_t arg2 = 0;
+
+    if (!ufs || !ufs->regs)
+    {
+        return -RT_EINVAL;
+    }
+
+    if (rt_ufs_uic_cmd_send(ufs, RT_UFS_CMDOP_DME_HIBERNATE_EXIT, 0, &arg2, 0))
+    {
+        return -RT_ERROR;
+    }
+
+    if ((arg2 & RT_UFS_CMDRES_MASK) != RT_UFS_CMDRES_SUCCESS)
+    {
+        return -RT_ERROR;
+    }
+
+    return RT_EOK;
+}
+
+rt_err_t rt_ufs_auto_hibern8_set(struct rt_ufs_host *ufs, rt_uint32_t reg_val)
+{
+    if (!ufs || !ufs->regs)
+    {
+        return -RT_EINVAL;
+    }
+
+    if (!(ufs->cap & RT_UFS_REG_CAP_AUTOH8))
+    {
+        return -RT_ENOSYS;
+    }
+
+    HWREG32(ufs->regs + RT_UFS_REG_AHIT) = reg_val;
+    ufs->ahit = reg_val;
+
+    return RT_EOK;
+}
+
+void rt_ufs_intr_aggr_configure(struct rt_ufs_host *ufs, rt_bool_t enable, rt_uint8_t cnt, rt_uint8_t timeout)
+{
+    void *regs;
+
+    if (!ufs || !(regs = ufs->regs))
+    {
+        return;
+    }
+
+    if (!enable)
+    {
+        HWREG32(regs + RT_UFS_REG_UTRIACR) = 0;
+        return;
+    }
+
+    /* Same layout as Linux ufshcd_config_intr_aggr */
+    HWREG32(regs + RT_UFS_REG_UTRIACR) = RT_UFS_UTRIACR_ENABLE_PARAMS(cnt, timeout);
+}
+
+void rt_ufs_pm_post_linkup(struct rt_ufs_host *ufs)
+{
+    rt_uint8_t cnt;
+
+    if (!ufs || !ufs->regs)
+    {
+        return;
+    }
+
+    /* Linux INT_AGGR_DEF_TO = 2 (~80µs), counter = nutrs - 1 */
+    cnt = (ufs->nutrs > 0) ? (rt_uint8_t)(ufs->nutrs - 1) : 0;
+    if (cnt > 31)
+    {
+        cnt = 31;
+    }
+    rt_ufs_intr_aggr_configure(ufs, RT_TRUE, cnt, 2);
+
+    if (ufs->cap & RT_UFS_REG_CAP_AUTOH8)
+    {
+        rt_uint32_t ahit = ufs->ahit ? ufs->ahit : RT_UFS_AHIT_DEFAULT;
+
+        if (rt_ufs_auto_hibern8_set(ufs, ahit))
+        {
+            LOG_W("%s: Auto-Hibernate8 (AHIT) not applied", rt_dm_dev_get_name(ufs->parent.dev));
+        }
+        else
+        {
+            LOG_D("%s: AHIT=%#08x (auto-Hibernate8)", rt_dm_dev_get_name(ufs->parent.dev), ahit);
+        }
+    }
+}