Explorar el Código

[DM/DMA] Update DMA #10987

* Append WT attribute.
* Change the API with pool size only.
* Add address mask for DMA
* Change DMA lock to mutex
* Add pause callback for DMA engine driver
* Add DMA Engine test
* Add ARM PL330 DMA Engine driver
GUI hace 1 mes
padre
commit
e5e0ad29bf

+ 7 - 0
components/drivers/dma/Kconfig

@@ -5,6 +5,13 @@ menuconfig RT_USING_DMA
     select RT_USING_ADT_BITMAP
     default n
 
+config RT_DMA_PL330
+    bool "ARM PL330"
+    depends on RT_USING_DMA
+    depends on RT_USING_CLK
+    depends on RT_USING_RESET
+    default n
+
 if RT_USING_DMA
     osource "$(SOC_DM_DMA_DIR)/Kconfig"
 endif

+ 3 - 0
components/drivers/dma/SConscript

@@ -10,6 +10,9 @@ CPPPATH = [cwd + '/../include']
 
 src = ['dma.c', 'dma_pool.c']
 
+if GetDepend(['RT_DMA_PL330']):
+    src += ['dma-pl330.c']
+
 group = DefineGroup('DeviceDrivers', src, depend = [''], CPPPATH = CPPPATH)
 
 Return('group')

+ 1045 - 0
components/drivers/dma/dma-pl330.c

@@ -0,0 +1,1045 @@
+/*
+ * Copyright (c) 2006-2023, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2023-02-25     GuEe-GUI     the first version
+ */
+
+#include <rthw.h>
+#include <rtthread.h>
+#include <rtdevice.h>
+#include <bitmap.h>
+
+#define DBG_TAG "dma.pl330"
+#define DBG_LVL DBG_INFO
+#include <rtdbg.h>
+
+#define _FIELD_READ(h, l, x)                ((RT_GENMASK(h, l) & (x)) >> l)
+
+#define PL330_REG_DSR                       0x000                   /* DMA Manager Status Register */
+#define PL330_REG_DPC                       0x004                   /* DMA Program Counter Register */
+#define PL330_REG_INTEN                     0x020                   /* Interrupt Enable Register */
+#define PL330_REG_INT_EVENT_RIS             0x024                   /* Event-Interrupt Raw Status Register */
+#define PL330_REG_INTMIS                    0x028                   /* Interrupt Status Register */
+#define PL330_REG_INTCLR                    0x02c                   /* Interrupt Clear Register */
+#define PL330_REG_FSRD                      0x030                   /* Fault Status DMA Manager Register */
+#define PL330_REG_FSRC                      0x034                   /* Fault Status DMA Channel Register */
+#define PL330_REG_FTRD                      0x038                   /* Fault Type DMA Manager Register */
+#define PL330_REG_FTR(n)                    (0x040 + (n) * 0x4)     /* Fault Type DMA Channel [n] */
+#define   PL330_FT_UNDEF_INSTR              RT_BIT(0)
+#define   PL330_FT_OPERAND_INVALID          RT_BIT(1)
+#define   PL330_FT_DMAGO_ERR                RT_BIT(4)
+#define   PL330_FT_EVENT_ERR                RT_BIT(5)
+#define   PL330_FT_CH_PERIPH_ERR            RT_BIT(6)
+#define   PL330_FT_CH_RDWR_ERR              RT_BIT(7)
+#define   PL330_FT_ST_DATA_UNAVAILABLE      RT_BIT(12)
+#define   PL330_FT_FIFOEMPTY_ERR            RT_BIT(13)
+#define   PL330_FT_INSTR_FETCH_ERR          RT_BIT(16)
+#define   PL330_FT_DATA_WRITE_ERR           RT_BIT(17)
+#define   PL330_FT_DATA_READ_ERR            RT_BIT(18)
+#define   PL330_FT_DBG_INSTR                RT_BIT(30)
+#define   PL330_FT_LOCKUP_ERR               RT_BIT(31)
+#define PL330_REG_CSR(n)                    (0x100 + (n) * 0x8)     /* Channel [n] Status Register */
+#define   PL330_CS_STOP                     0x0
+#define   PL330_CS_EXEC                     0x1
+#define   PL330_CS_CMISS                    0x2
+#define   PL330_CS_UPDTPC                   0x3
+#define   PL330_CS_WFE                      0x4
+#define   PL330_CS_ATBRR                    0x5
+#define   PL330_CS_QBUSY                    0x6
+#define   PL330_CS_WFP                      0x7
+#define   PL330_CS_KILL                     0x8
+#define   PL330_CS_CMPLT                    0x9
+#define   PL330_CS_FLTCMP                   0xe
+#define   PL330_CS_FAULT                    0xf
+#define PL330_REG_CPC(n)                    (0x104 + (n) * 0x8)     /* Channel [n] Program Counter Register */
+#define pl330_REG_SAR(n)                    (0x0400 + (n) * 0x20)   /* Channel [n] Source Address Register */
+#define pl330_REG_DAR(n)                    (0x0404 + (n) * 0x20)   /* Channel [n] Destination Address Register */
+#define pl330_REG_CCR(n)                    (0x0408 + (n) * 0x20)   /* Channel [n] Channel Control Register */
+#define pl330_REG_LC0(n)                    (0x040c + (n) * 0x20)   /* Channel [n] Loop Counter 0 Register */
+#define pl330_REG_LC1(n)                    (0x0410 + (n) * 0x20)   /* Channel [n] Loop Counter 1 Register */
+#define PL330_REG_DBGSTATUS                 0xd00                   /* Debug Status Register */
+#define   PL330_DBGSTATUS_IDLE              0
+#define   PL330_DBGSTATUS_BUSY              RT_BIT(0)
+#define PL330_REG_DBGCMD                    0xd04                   /* Debug Command Register */
+#define PL330_REG_DBGINST0                  0xd08                   /* Debug Instruction-0 Register */
+#define PL330_REG_DBGINST1                  0xd0c                   /* Debug Instruction-1 Register */
+#define PL330_REG_CR(n)                     (0xe00 + (n) * 0x4)     /* Configuration Register [n] */
+#define   PL330_CR0_PERIPH_REQ_SET          RT_BIT(0)
+#define   PL330_CR0_MGR_NS_AT_RST(x)        _FIELD_READ(2, 2, x)
+#define   PL330_CR0_NUM_CHNLS(x)            _FIELD_READ(6, 4, x)
+#define   PL330_CR0_NUM_PERIPH(x)           _FIELD_READ(16, 12, x)
+#define   PL330_CR0_NUM_EVENTS(x)           _FIELD_READ(21, 17, x)
+#define PL330_REG_CRD                       0x0e14                  /* Configuration Register */
+#define   PL330_CRD_DATA_WIDTH(x)           _FIELD_READ(2, 0, x)
+#define   PL330_CRD_WR_CAP(x)               _FIELD_READ(6, 4, x)
+#define   PL330_CRD_WR_Q_DEP(x)             _FIELD_READ(11, 8, x)
+#define   PL330_CRD_RD_CAP(x)               _FIELD_READ(14, 12, x)
+#define   PL330_CRD_RD_Q_DEP(x)             _FIELD_READ(19, 16, x)
+#define   PL330_CRD_DATA_BUFFER_DEP(x)      _FIELD_READ(29, 20, x)
+#define PL330_REG_WDT                       0x0e80                  /* DMA Watchdog Register */
+#define PL330_REG_PERIPH_ID                 0x0fe0                  /* Periph ID Register */
+#define   PL330_PERIPH_REV(x)               _FIELD_READ(23, 20, x)
+#define   PL330_PERIPH_REV_R0P0             0
+#define   PL330_PERIPH_REV_R1P0             1
+#define   PL330_PERIPH_REV_R1P1             2
+
+#define PL330_CMD_DMAADDH                   0x54
+#define PL330_CMD_DMAEND                    0x00
+#define PL330_CMD_DMAFLUSHP                 0x35
+#define PL330_CMD_DMAGO                     0xa0
+#define PL330_CMD_DMALD                     0x04
+#define PL330_CMD_DMALDP                    0x25
+#define PL330_CMD_DMALP                     0x20
+#define PL330_CMD_DMALPEND                  0x28
+#define PL330_CMD_DMAKILL                   0x01
+#define PL330_CMD_DMAMOV                    0xbc
+#define PL330_CMD_DMANOP                    0x18
+#define PL330_CMD_DMARMB                    0x12
+#define PL330_CMD_DMASEV                    0x34
+#define PL330_CMD_DMAST                     0x08
+#define PL330_CMD_DMASTP                    0x29
+#define PL330_CMD_DMASTZ                    0x0c
+#define PL330_CMD_DMAWFE                    0x36
+#define PL330_CMD_DMAWFP                    0x30
+#define PL330_CMD_DMAWMB                    0x13
+
+#define PL330_SIZE_DMAADDH                  3
+#define PL330_SIZE_DMAEND                   1
+#define PL330_SIZE_DMAFLUSHP                2
+#define PL330_SIZE_DMALD                    1
+#define PL330_SIZE_DMALDP                   2
+#define PL330_SIZE_DMALP                    2
+#define PL330_SIZE_DMALPEND                 2
+#define PL330_SIZE_DMAKILL                  1
+#define PL330_SIZE_DMAMOV                   6
+#define PL330_SIZE_DMANOP                   1
+#define PL330_SIZE_DMARMB                   1
+#define PL330_SIZE_DMASEV                   2
+#define PL330_SIZE_DMAST                    1
+#define PL330_SIZE_DMASTP                   2
+#define PL330_SIZE_DMASTZ                   1
+#define PL330_SIZE_DMAWFE                   2
+#define PL330_SIZE_DMAWFP                   2
+#define PL330_SIZE_DMAWMB                   1
+#define PL330_SIZE_DMAGO                    6
+
+#define PL330_DIR_SAR                       0
+#define PL330_DIR_CCR                       1
+#define PL330_DIR_DAR                       2
+
+#define PL330_SRC_INC                       RT_BIT(0)
+#define PL330_SRC_BURST_SIZE_SHIFT          1
+#define PL330_SRC_BURST_LEN_SHIFT           4
+#define PL330_DST_INC                       RT_BIT(14)
+#define PL330_DST_BURST_SIZE_SHIFT          15
+#define PL330_DST_BURST_LEN_SHIFT           18
+
+#define PL330_COND_SINGLE                   0
+#define PL330_COND_BURST                    1
+#define PL330_COND_ALWAYS                   2
+
+#define PL330_MICROCODE_SIZE                128
+#define AMBA_NR_IRQS                        9
+
+struct pl330_chan
+{
+    struct rt_dma_chan parent;
+
+    rt_bool_t enabled;
+    rt_size_t size;
+
+    void *microcode;
+    rt_size_t microcode_len;
+    rt_ubase_t microcode_dma;
+    rt_uint8_t microcode_raw[PL330_MICROCODE_SIZE + 4]; /* For align */
+};
+
+struct pl330
+{
+    struct rt_dma_controller parent;
+
+    void *regs;
+    int irqs_nr;
+    int irqs[AMBA_NR_IRQS];
+
+#define PL330_QUIRK_BROKEN_NO_FLUSHP    RT_BIT(0)
+#define PL330_QUIRK_PERIPH_BURST        RT_BIT(1)
+    rt_uint32_t quirk;
+
+    rt_uint32_t mode_ns;
+    rt_uint32_t num_chan;
+    rt_uint32_t num_peri;
+    rt_uint32_t num_events;
+    rt_uint32_t data_width;
+    rt_uint32_t data_buffer_dep;
+    rt_uint32_t ins;
+
+    struct pl330_chan *chans;
+
+    struct rt_clk *pclk;
+    struct rt_reset_control *rstc;
+    struct rt_reset_control *rstc_ocp;
+};
+#define raw_to_pl330(raw)   rt_container_of(raw, struct pl330, parent)
+
+static void pl330_read_config(struct pl330 *pl330)
+{
+    rt_uint32_t value;
+    void *regs = pl330->regs;
+
+    value = HWREG32(regs + PL330_REG_CR(0));
+    pl330->mode_ns = !!PL330_CR0_NUM_EVENTS(value);
+    pl330->num_chan = PL330_CR0_NUM_CHNLS(value) + 1;
+    pl330->num_events = PL330_CR0_NUM_EVENTS(value) + 1;
+
+    if (value & PL330_CR0_PERIPH_REQ_SET)
+    {
+        pl330->num_peri = PL330_CR0_NUM_PERIPH(value) + 1;
+    }
+
+    value = HWREG32(regs + PL330_REG_CRD);
+    pl330->data_width = 8 * (1 << PL330_CRD_DATA_WIDTH(value));
+    pl330->data_buffer_dep = PL330_CRD_DATA_BUFFER_DEP(value) + 1;
+
+    pl330->ins = HWREG32(regs + PL330_REG_CR(3));
+}
+
+static rt_err_t pl330_ccr_config(struct rt_dma_slave_config *conf, rt_uint32_t *ccr)
+{
+    *ccr = PL330_SRC_INC | PL330_DST_INC;
+    *ccr |= conf->src_maxburst << PL330_SRC_BURST_LEN_SHIFT;
+    *ccr |= conf->dst_maxburst << PL330_DST_BURST_LEN_SHIFT;
+
+    switch (conf->src_addr_width)
+    {
+    case RT_DMA_SLAVE_BUSWIDTH_1_BYTE:
+        *ccr |= 0 << PL330_SRC_BURST_SIZE_SHIFT;
+        break;
+    case RT_DMA_SLAVE_BUSWIDTH_2_BYTES:
+        *ccr |= 1 << PL330_SRC_BURST_SIZE_SHIFT;
+        break;
+    case RT_DMA_SLAVE_BUSWIDTH_4_BYTES:
+        *ccr |= 2 << PL330_SRC_BURST_SIZE_SHIFT;
+        break;
+
+    default:
+        return -RT_EINVAL;
+    }
+
+    switch (conf->dst_addr_width)
+    {
+    case RT_DMA_SLAVE_BUSWIDTH_1_BYTE:
+        *ccr |= 0 << PL330_DST_BURST_SIZE_SHIFT;
+        break;
+    case RT_DMA_SLAVE_BUSWIDTH_2_BYTES:
+        *ccr |= 1 << PL330_DST_BURST_SIZE_SHIFT;
+        break;
+    case RT_DMA_SLAVE_BUSWIDTH_4_BYTES:
+        *ccr |= 2 << PL330_DST_BURST_SIZE_SHIFT;
+        break;
+
+    default:
+        return -RT_EINVAL;
+    }
+
+    return RT_EOK;
+}
+
+static int pl330_cmd_dmamov(rt_uint8_t *microcode, rt_uint8_t rd, rt_uint32_t imm)
+{
+    /*
+     * DMAMOV encoding
+     * 15 4 3 2 1 10 ... 8 7 6 5 4 3 2 1 0
+     *  0 0 0 0 0 |rd[2:0]|1 0 1 1 1 1 0 0
+     *
+     * 47 ... 16
+     *  imm[32:0]
+     *
+     * rd: b000 for SAR, b001 CCR, b010 DAR
+     */
+
+    *microcode++ = PL330_CMD_DMAMOV;
+    *microcode++ = rd;
+    *microcode++ = imm;
+    *microcode++ = imm >> 8;
+    *microcode++ = imm >> 16;
+    *microcode++ = imm >> 24;
+
+    LOG_D("DMAMOV %s, %#x", ((const char *const []){
+            [PL330_DIR_SAR] = "SAR",
+            [PL330_DIR_CCR] = "CCR",
+            [PL330_DIR_DAR] = "DAR"
+        })[rd], imm);
+
+    return PL330_SIZE_DMAMOV;
+}
+
+static int pl330_cmd_dmald(rt_uint8_t *microcode, rt_uint32_t cond)
+{
+    /*
+     * DMALD encoding
+     * 7 6 5 4 3 2 1  0
+     * 0 0 0 0 0 1 bs x
+     */
+    *microcode = PL330_CMD_DMALD;
+
+    if (cond == PL330_COND_SINGLE)
+    {
+        *microcode |= (0 << 1) | (1 << 0);
+    }
+    else if (cond == PL330_COND_BURST)
+    {
+        *microcode |= (1 << 1) | (1 << 0);
+    }
+
+    LOG_D("DMALD %c", ((char []){
+            [PL330_COND_SINGLE] = 'S',
+            [PL330_COND_BURST] = 'B',
+            [PL330_COND_ALWAYS] = 'A'
+        })[cond]);
+
+    return PL330_SIZE_DMALD;
+}
+
+static int pl330_cmd_dmast(rt_uint8_t *microcode, rt_uint32_t cond)
+{
+    /*
+     * DMAST encoding
+     * 7 6 5 4 3 2 1  0
+     * 0 0 0 0 1 0 bs x
+     */
+    *microcode = PL330_CMD_DMAST;
+
+    if (cond == PL330_COND_SINGLE)
+    {
+        *microcode |= (0 << 1) | (1 << 0);
+    }
+    else if (cond == PL330_COND_BURST)
+    {
+        *microcode |= (1 << 1) | (1 << 0);
+    }
+
+    LOG_D("DMAST %c", ((char []){
+            [PL330_COND_SINGLE] = 'S',
+            [PL330_COND_BURST] = 'B',
+            [PL330_COND_ALWAYS] = 'A'
+        })[cond]);
+
+    return PL330_SIZE_DMAST;
+}
+
+static int pl330_cmd_dmalp(rt_uint8_t *microcode, rt_uint8_t lc, rt_uint16_t loops)
+{
+    /*
+     * DMALP encoding
+     * 15   ...   8 7 6 5 4 3 2 1  0
+     * | iter[7:0] |0 0 1 0 0 0 lc 0
+     */
+    *microcode++ = PL330_CMD_DMALP | ((lc & 1) << 1);
+    *microcode |= loops - 1;
+
+    LOG_D("DMALP %u (lc: %c)", loops - 1, lc ? '1' : '0');
+
+    return PL330_SIZE_DMALP;
+}
+
+static int pl330_cmd_dmalpend(rt_uint8_t *microcode, rt_uint32_t cond,
+        rt_bool_t forever, rt_uint32_t loop, rt_uint8_t bjump)
+{
+    /*
+     * DMALPEND encoding
+     * 15       ...        8 7 6 5 4  3 2  1  0
+     * | backward_jump[7:0] |0 0 1 nf 1 lc bs x
+     */
+    *microcode = PL330_CMD_DMALPEND;
+
+    if (loop)
+    {
+        *microcode |= (1 << 2);
+    }
+
+    if (!forever)
+    {
+        *microcode |= (1 << 4);
+    }
+
+    if (cond == PL330_COND_SINGLE)
+    {
+        *microcode |= (0 << 1) | (1 << 0);
+    }
+    else if (cond == PL330_COND_BURST)
+    {
+        *microcode |= (1 << 1) | (1 << 0);
+    }
+
+    ++microcode;
+
+    *microcode = bjump;
+
+    LOG_D("DMALPEND %c (%sloop: %c bjump: %d)", ((char []){
+            [PL330_COND_SINGLE] = 'S',
+            [PL330_COND_BURST] = 'B',
+            [PL330_COND_ALWAYS] = 'A'
+        })[cond], forever ? "FE, " : "", loop ? 'Y' : 'N', bjump);
+
+    return PL330_SIZE_DMALPEND;
+}
+
+static int pl330_cmd_dmasev(rt_uint8_t *microcode, int event)
+{
+    *microcode++ = PL330_CMD_DMASEV;
+    *microcode++ = (event & 0x1f) << 3;
+
+    LOG_D("DMASEV %u", event & 0x1f);
+
+    return PL330_SIZE_DMASEV;
+}
+
+static int pl330_cmd_dmaend(rt_uint8_t *microcode)
+{
+    /*
+     * DMAEND encoding:
+     * 7 6 5 4 3 2 1 0
+     * 0 0 0 0 0 0 0 0
+     */
+    *microcode = PL330_CMD_DMAEND;
+
+    LOG_D("DMAEND");
+
+    return PL330_SIZE_DMAEND;
+}
+
+static rt_uint32_t pl330_chan_id(struct pl330 *pl330, struct pl330_chan *pc)
+{
+    return pc - pl330->chans;
+}
+
+static struct rt_dma_chan *pl330_dma_request_chan(struct rt_dma_controller *ctrl,
+        struct rt_device *slave, void *fw_data)
+{
+    int idx = -1;
+    struct pl330_chan *pc;
+    struct pl330 *pl330 = raw_to_pl330(ctrl);
+    struct rt_ofw_cell_args *args = fw_data;
+
+    if (args && (idx = args->args[0]) >= pl330->num_chan)
+    {
+        return rt_err_ptr(-RT_EINVAL);
+    }
+
+    if (idx >= 0)
+    {
+        pc = &pl330->chans[idx];
+
+        if (pc->enabled)
+        {
+            return rt_err_ptr(-RT_EBUSY);
+        }
+    }
+    else
+    {
+        /*
+         * Memory to Memory is often called after after DM has been initialized,
+         * So just return unused chan
+         */
+        for (int i = 0; i < pl330->num_chan; ++i)
+        {
+            pc = &pl330->chans[i];
+
+            if (!pc->enabled)
+            {
+                goto _found;
+            }
+        }
+
+        return RT_NULL;
+    }
+
+_found:
+    pc->enabled = RT_TRUE;
+    HWREG32(pl330->regs + PL330_REG_INTEN) |= RT_BIT(pl330_chan_id(pl330, pc));
+
+    return &pc->parent;
+}
+
+static rt_err_t pl330_dma_release_chan(struct rt_dma_chan *chan)
+{
+    struct pl330_chan *pc = rt_container_of(chan, struct pl330_chan, parent);
+
+    pc->enabled = RT_FALSE;
+
+    return RT_EOK;
+}
+
+static rt_err_t pl330_dma_start(struct rt_dma_chan *chan)
+{
+    struct pl330_chan *pc = rt_container_of(chan, struct pl330_chan, parent);
+    struct pl330 *pl330 = raw_to_pl330(chan->ctrl);
+
+    while (HWREG32(pl330->regs + PL330_REG_DBGSTATUS) & PL330_DBGSTATUS_BUSY)
+    {
+        rt_hw_cpu_relax();
+    }
+
+    rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, pc->microcode, pc->microcode_len);
+
+    HWREG32(pl330->regs + PL330_REG_DBGINST0) = ((pl330_chan_id(pl330, pc) + 1) << 8) | (PL330_CMD_DMAGO << 16);
+    HWREG32(pl330->regs + PL330_REG_DBGINST1) = pc->microcode_dma;
+    HWREG32(pl330->regs + PL330_REG_DBGCMD) = 0;
+
+    return RT_EOK;
+}
+
+static rt_err_t pl330_dma_stop(struct rt_dma_chan *chan)
+{
+    struct pl330_chan *pc = rt_container_of(chan, struct pl330_chan, parent);
+    struct pl330 *pl330 = raw_to_pl330(chan->ctrl);
+
+    while (HWREG32(pl330->regs + PL330_REG_DBGSTATUS) & PL330_DBGSTATUS_BUSY)
+    {
+        rt_hw_cpu_relax();
+    }
+
+    HWREG32(pl330->regs + PL330_REG_DBGINST0) = ((pl330_chan_id(pl330, pc) + 1) << 8) | PL330_CMD_DMAKILL;
+    HWREG32(pl330->regs + PL330_REG_DBGINST1) = 0;
+    HWREG32(pl330->regs + PL330_REG_DBGCMD) = 0;
+
+    return RT_EOK;
+}
+
+static rt_err_t pl330_dma_config(struct rt_dma_chan *chan,
+        struct rt_dma_slave_config *conf)
+{
+    return RT_EOK;
+}
+
+static rt_err_t pl330_dma_prep_memcpy(struct rt_dma_chan *chan,
+        rt_ubase_t dma_addr_src, rt_ubase_t dma_addr_dst, rt_size_t len)
+{
+    void *mc;
+    rt_err_t err;
+    rt_uint32_t value;
+    rt_size_t burst_bytes;
+    rt_uint8_t ljmp_inner, ljmp_outer;
+    rt_uint16_t loop, outer, rem, inner_first;
+    struct pl330_chan *pc = rt_container_of(chan, struct pl330_chan, parent);
+    struct pl330 *pl330 = raw_to_pl330(chan->ctrl);
+    struct rt_dma_slave_config *conf = &chan->conf;
+
+    mc = pc->microcode;
+
+    if ((err = pl330_ccr_config(conf, &value)))
+    {
+        return err;
+    }
+    mc += pl330_cmd_dmamov(mc, PL330_DIR_CCR, value);
+
+    mc += pl330_cmd_dmamov(mc, PL330_DIR_SAR, dma_addr_src);
+
+    mc += pl330_cmd_dmamov(mc, PL330_DIR_DAR, dma_addr_dst);
+
+    burst_bytes = conf->src_addr_width * conf->src_maxburst;
+
+    if (!burst_bytes || !len)
+    {
+        return -RT_EINVAL;
+    }
+
+    if (len % burst_bytes)
+    {
+        LOG_E("Memcpy len(%lu) must be multiple of burst(%lu)", len, burst_bytes);
+        return -RT_EINVAL;
+    }
+
+    /* Total iterations */
+    loop = len / burst_bytes;
+    if (loop == 0)
+    {
+        return -RT_EINVAL;
+    }
+
+    outer = loop / 256;
+    rem = loop % 256;
+
+    if (outer == 0)
+    {
+        outer = 1;
+    }
+
+    inner_first = (loop < 256) ? loop : 256;
+
+    mc += pl330_cmd_dmalp(mc, 1, outer);
+    ljmp_outer = mc - pc->microcode;
+
+    mc += pl330_cmd_dmalp(mc, 0, inner_first);
+    ljmp_inner = mc - pc->microcode;
+
+    mc += pl330_cmd_dmald(mc, PL330_COND_ALWAYS);
+    mc += pl330_cmd_dmast(mc, PL330_COND_ALWAYS);
+
+    mc += pl330_cmd_dmalpend(mc, PL330_COND_ALWAYS, RT_FALSE, 0,
+                             mc - pc->microcode - ljmp_inner);
+
+    if (rem && loop > 256)
+    {
+        rt_uint8_t ljmp_inner2;
+
+        mc += pl330_cmd_dmalp(mc, 0, rem);
+        ljmp_inner2 = mc - pc->microcode;
+
+        mc += pl330_cmd_dmald(mc, PL330_COND_ALWAYS);
+        mc += pl330_cmd_dmast(mc, PL330_COND_ALWAYS);
+
+        mc += pl330_cmd_dmalpend(mc, PL330_COND_ALWAYS, RT_FALSE, 0,
+                                 mc - pc->microcode - ljmp_inner2);
+    }
+
+    mc += pl330_cmd_dmalpend(mc, PL330_COND_ALWAYS, RT_FALSE, 1,
+                             mc - pc->microcode - ljmp_outer);
+
+    mc += pl330_cmd_dmasev(mc, pl330_chan_id(pl330, pc));
+
+    mc += pl330_cmd_dmaend(mc);
+
+    pc->size = len;
+    pc->microcode_len = mc - pc->microcode;
+
+    return RT_EOK;
+}
+
+static rt_err_t pl330_dma_prep_cyclic(struct rt_dma_chan *chan,
+        rt_ubase_t dma_buf_addr, rt_size_t buf_len, rt_size_t period_len,
+        enum rt_dma_transfer_direction dir)
+{
+    void *mc;
+    rt_err_t err;
+    rt_uint32_t ccr_val;
+    rt_size_t burst_bytes;
+    rt_uint16_t period_loop, total_periods;
+    struct pl330_chan *pc = rt_container_of(chan, struct pl330_chan, parent);
+    struct pl330 *pl330 = raw_to_pl330(chan->ctrl);
+    struct rt_dma_slave_config *conf = &chan->conf;
+
+    mc = pc->microcode;
+
+    if ((err = pl330_ccr_config(conf, &ccr_val)))
+    {
+        return err;
+    }
+
+    mc += pl330_cmd_dmamov(mc, PL330_DIR_CCR, ccr_val);
+
+    if (dir == RT_DMA_MEM_TO_DEV)
+    {
+        mc += pl330_cmd_dmamov(mc, PL330_DIR_SAR, dma_buf_addr);
+        mc += pl330_cmd_dmamov(mc, PL330_DIR_DAR, conf->dst_addr);
+    }
+    else if (dir == RT_DMA_DEV_TO_MEM)
+    {
+        mc += pl330_cmd_dmamov(mc, PL330_DIR_SAR, conf->src_addr);
+        mc += pl330_cmd_dmamov(mc, PL330_DIR_DAR, dma_buf_addr);
+    }
+
+    burst_bytes = conf->src_addr_width * conf->src_maxburst;
+    if (!burst_bytes)
+    {
+        return -RT_EINVAL;
+    }
+
+    if (period_len % burst_bytes)
+    {
+        return -RT_EINVAL;
+    }
+
+    if (buf_len % period_len)
+    {
+        return -RT_EINVAL;
+    }
+
+    period_loop = period_len / burst_bytes;
+
+    if (period_loop == 0 || period_loop > 255)
+    {
+        return -RT_EINVAL;
+    }
+
+    total_periods = buf_len / period_len;
+    if (total_periods == 0)
+    {
+        return -RT_EINVAL;
+    }
+
+    /* Outer Loop */
+    mc += pl330_cmd_dmalp(mc, 1 /* LC1 */, 0 /* Infinite loop */);
+    rt_uint8_t outer_ljmp = mc - pc->microcode;
+
+    /* Inner Loop */
+    mc += pl330_cmd_dmalp(mc, 0 /* LC0 */, period_loop);
+    rt_uint8_t inner_ljmp = mc - pc->microcode;
+
+    mc += pl330_cmd_dmald(mc, PL330_COND_ALWAYS);
+    mc += pl330_cmd_dmast(mc, PL330_COND_ALWAYS);
+
+    mc += pl330_cmd_dmalpend(mc, PL330_COND_ALWAYS,
+                             RT_FALSE, 0,
+                             mc - pc->microcode - inner_ljmp);
+
+    mc += pl330_cmd_dmasev(mc, pl330_chan_id(pl330, pc));
+
+    /* Outer loop end */
+    mc += pl330_cmd_dmalpend(mc, PL330_COND_ALWAYS,
+                             RT_FALSE, 1,  /* LC1 */
+                             mc - pc->microcode - outer_ljmp);
+
+    pc->microcode_len = mc - pc->microcode;
+
+    return RT_EOK;
+}
+
+static rt_err_t pl330_dma_prep_single(struct rt_dma_chan *chan,
+        rt_ubase_t dma_buf_addr, rt_size_t buf_len,
+        enum rt_dma_transfer_direction dir)
+{
+    void *mc;
+    rt_err_t err;
+    rt_uint16_t loop;
+    rt_uint32_t ccr_val;
+    rt_size_t burst_bytes;
+    struct pl330_chan *pc = rt_container_of(chan, struct pl330_chan, parent);
+    struct pl330 *pl330 = raw_to_pl330(chan->ctrl);
+    struct rt_dma_slave_config *conf = &chan->conf;
+
+    mc = pc->microcode;
+
+    if ((err = pl330_ccr_config(conf, &ccr_val)))
+    {
+        return err;
+    }
+
+    mc += pl330_cmd_dmamov(mc, PL330_DIR_CCR, ccr_val);
+
+    if (dir == RT_DMA_MEM_TO_DEV)
+    {
+        mc += pl330_cmd_dmamov(mc, PL330_DIR_SAR, dma_buf_addr);
+        mc += pl330_cmd_dmamov(mc, PL330_DIR_DAR, conf->dst_addr);
+    }
+    else if (dir == RT_DMA_DEV_TO_MEM)
+    {
+        mc += pl330_cmd_dmamov(mc, PL330_DIR_SAR, conf->src_addr);
+        mc += pl330_cmd_dmamov(mc, PL330_DIR_DAR, dma_buf_addr);
+    }
+
+    burst_bytes = conf->src_addr_width * conf->src_maxburst;
+    if (!burst_bytes || !buf_len)
+    {
+        return -RT_EINVAL;
+    }
+
+    if (buf_len % burst_bytes)
+    {
+        return -RT_EINVAL;
+    }
+
+    loop = buf_len / burst_bytes;
+    if (loop == 0 || loop > 255)
+    {
+        return -RT_EINVAL;
+    }
+
+    mc += pl330_cmd_dmalp(mc, 0 /* LC0 */, loop);
+    rt_uint8_t ljmp = mc - pc->microcode;
+
+    mc += pl330_cmd_dmald(mc, PL330_COND_ALWAYS);
+    mc += pl330_cmd_dmast(mc, PL330_COND_ALWAYS);
+
+    mc += pl330_cmd_dmalpend(mc,
+            PL330_COND_ALWAYS,
+            RT_FALSE,
+            0, /* LC0 */
+            mc - pc->microcode - ljmp);
+
+    mc += pl330_cmd_dmasev(mc, pl330_chan_id(pl330, pc));
+
+    mc += pl330_cmd_dmaend(mc);
+
+    pc->microcode_len = mc - pc->microcode;
+    pc->size = buf_len;
+
+    return RT_EOK;
+}
+
+static const struct rt_dma_controller_ops pl330_dma_ops =
+{
+    .request_chan = pl330_dma_request_chan,
+    .release_chan = pl330_dma_release_chan,
+    .start = pl330_dma_start,
+    .stop = pl330_dma_stop,
+    .config = pl330_dma_config,
+    .prep_memcpy = pl330_dma_prep_memcpy,
+    .prep_cyclic = pl330_dma_prep_cyclic,
+    .prep_single = pl330_dma_prep_single,
+};
+
+static void pl330_isr(int irqno, void *params)
+{
+    rt_uint32_t isr, csr;
+    struct pl330_chan *pc;
+    struct pl330 *pl330 = params;
+
+    isr = HWREG32(pl330->regs + PL330_REG_INTMIS);
+
+    for (int i = 0; i < pl330->num_chan; ++i)
+    {
+        if (!(isr & RT_BIT(i)))
+        {
+            continue;
+        }
+
+        pc = &pl330->chans[i];
+
+        HWREG32(pl330->regs + PL330_REG_INTCLR) = RT_BIT(i);
+
+        csr = HWREG32(pl330->regs + PL330_REG_CSR(i)) & 0xf;
+
+        switch (csr)
+        {
+        case PL330_CS_CMPLT:
+        case PL330_CS_STOP:
+            rt_dma_chan_done(&pc->parent, pc->size);
+            break;
+
+        case PL330_CS_FAULT:
+        case PL330_CS_FLTCMP:
+            LOG_E("Channel[%d] fault", i);
+            rt_dma_chan_done(&pc->parent, 0);
+            break;
+
+        default:
+            LOG_E("Unhandle CSR = %x", csr);
+            break;
+        }
+    }
+}
+
+static void pl330_free(struct pl330 *pl330)
+{
+    if (pl330->regs)
+    {
+        rt_iounmap(pl330->regs);
+    }
+
+    if (!rt_is_err_or_null(pl330->pclk))
+    {
+        rt_clk_disable_unprepare(pl330->pclk);
+        rt_clk_put(pl330->pclk);
+    }
+
+    if (!rt_is_err_or_null(pl330->rstc))
+    {
+        rt_reset_control_assert(pl330->rstc);
+        rt_reset_control_put(pl330->rstc);
+    }
+
+    if (!rt_is_err_or_null(pl330->rstc_ocp))
+    {
+        rt_reset_control_assert(pl330->rstc_ocp);
+        rt_reset_control_put(pl330->rstc_ocp);
+    }
+
+    if (pl330->chans)
+    {
+        rt_free(pl330->chans);
+    }
+
+    rt_free(pl330);
+}
+
+static rt_err_t pl330_probe(struct rt_platform_device *pdev)
+{
+    rt_err_t err;
+    char isr_name[RT_NAME_MAX];
+    struct rt_device *dev = &pdev->parent;
+    struct pl330 *pl330 = rt_calloc(1, sizeof(*pl330));
+
+    if (!pl330)
+    {
+        return -RT_ENOMEM;
+    }
+
+    pl330->regs = rt_dm_dev_iomap(dev, 0);
+
+    if (!pl330->regs)
+    {
+        err = -RT_EIO;
+        goto _fail;
+    }
+
+    pl330->irqs_nr = rt_dm_dev_get_irq_count(dev);
+
+    if (pl330->irqs_nr < 0)
+    {
+        err = pl330->irqs_nr;
+        goto _fail;
+    }
+    else if (pl330->irqs_nr == 0)
+    {
+        err = -RT_EINVAL;
+        goto _fail;
+    }
+
+    for (int i = 0; i < pl330->irqs_nr; ++i)
+    {
+        int irq = rt_dm_dev_get_irq(dev, i);
+
+        if (irq < 0)
+        {
+            err = irq;
+            goto _fail;
+        }
+
+        pl330->irqs[i] = irq;
+    }
+
+    pl330->pclk = rt_clk_get_by_name(dev, "apb_pclk");
+
+    if (rt_is_err(pl330->pclk))
+    {
+        err = rt_ptr_err(pl330->pclk);
+        goto _fail;
+    }
+
+    if ((err = rt_clk_prepare_enable(pl330->pclk)))
+    {
+        goto _fail;
+    }
+
+    pl330->rstc = rt_reset_control_get_by_name(dev, "dma");
+
+    if (rt_is_err(pl330->rstc))
+    {
+        err = rt_ptr_err(pl330->rstc);
+        goto _fail;
+    }
+
+    if (pl330->rstc && (err = rt_reset_control_deassert(pl330->rstc)))
+    {
+        goto _fail;
+    }
+
+    pl330->rstc_ocp = rt_reset_control_get_by_name(dev, "dma-ocp");
+
+    if (rt_is_err(pl330->rstc_ocp))
+    {
+        err = rt_ptr_err(pl330->rstc_ocp);
+        goto _fail;
+    }
+
+    if (pl330->rstc_ocp && (err = rt_reset_control_deassert(pl330->rstc_ocp)))
+    {
+        goto _fail;
+    }
+
+    if (rt_dm_dev_prop_read_bool(dev, "arm,pl330-broken-no-flushp"))
+    {
+        pl330->quirk |= PL330_QUIRK_BROKEN_NO_FLUSHP;
+    }
+
+    if (rt_dm_dev_prop_read_bool(dev, "arm,pl330-periph-burst"))
+    {
+        pl330->quirk |= PL330_QUIRK_PERIPH_BURST;
+    }
+
+    pl330_read_config(pl330);
+
+    pl330->chans = rt_calloc(pl330->num_chan, sizeof(pl330->chans[0]));
+
+    if (!pl330->chans)
+    {
+        err = -RT_ENOMEM;
+        goto _fail;
+    }
+
+    for (int i = 0; i < pl330->num_chan; ++i)
+    {
+        int offset = 0;
+        struct pl330_chan *chan = &pl330->chans[i];
+
+        chan->microcode_dma = (rt_ubase_t)rt_kmem_v2p(chan->microcode_raw);
+        offset = RT_ALIGN(chan->microcode_dma, 4) - chan->microcode_dma;
+
+        chan->microcode = chan->microcode_raw + offset;
+        chan->microcode_dma += offset;
+    }
+
+    pl330->parent.dev = dev;
+    pl330->parent.ops = &pl330_dma_ops;
+    rt_dma_controller_add_direction(&pl330->parent, RT_DMA_MEM_TO_MEM);
+    rt_dma_controller_add_direction(&pl330->parent, RT_DMA_MEM_TO_DEV);
+    rt_dma_controller_add_direction(&pl330->parent, RT_DMA_DEV_TO_MEM);
+    rt_dma_controller_set_addr_mask(&pl330->parent, RT_DMA_ADDR_MASK(32));
+
+    if ((err = rt_dma_controller_register(&pl330->parent)))
+    {
+        goto _fail;
+    }
+
+    dev->user_data = pl330;
+
+    for (int i = 0; i < pl330->irqs_nr; ++i)
+    {
+        rt_snprintf(isr_name, RT_NAME_MAX, "%s-%u", rt_dm_dev_get_name(dev), i);
+        rt_hw_interrupt_install(pl330->irqs[i], pl330_isr, pl330, isr_name);
+        rt_hw_interrupt_umask(pl330->irqs[i]);
+    }
+
+    return RT_EOK;
+
+_fail:
+    pl330_free(pl330);
+
+    return err;
+}
+
+static rt_err_t pl330_remove(struct rt_platform_device *pdev)
+{
+    struct pl330 *pl330 = pdev->parent.user_data;
+
+    for (int i = 0; i < pl330->irqs_nr; ++i)
+    {
+        rt_hw_interrupt_mask(pl330->irqs[i]);
+        rt_pic_detach_irq(pl330->irqs[i], pl330);
+    }
+
+    rt_dma_controller_unregister(&pl330->parent);
+
+    pl330_free(pl330);
+
+    return RT_EOK;
+}
+
+static const struct rt_ofw_node_id pl330_ofw_ids[] =
+{
+    { .compatible = "arm,pl330" },
+    { /* sentinel */ }
+};
+
+static struct rt_platform_driver pl330_driver =
+{
+    .name = "dma-pl330",
+    .ids = pl330_ofw_ids,
+
+    .probe = pl330_probe,
+    .remove = pl330_remove,
+};
+
+static int pl330_drv_register(void)
+{
+    rt_platform_driver_register(&pl330_driver);
+
+    return 0;
+}
+INIT_SUBSYS_EXPORT(pl330_drv_register);

+ 116 - 17
components/drivers/dma/dma.c

@@ -19,6 +19,22 @@
 static rt_list_t dmac_nodes = RT_LIST_OBJECT_INIT(dmac_nodes);
 static RT_DEFINE_SPINLOCK(dmac_nodes_lock);
 
+static void dma_lock(struct rt_dma_controller *ctrl)
+{
+    if (rt_thread_self())
+    {
+        rt_mutex_take(&ctrl->mutex, RT_WAITING_FOREVER);
+    }
+}
+
+static void dma_unlock(struct rt_dma_controller *ctrl)
+{
+    if (rt_thread_self())
+    {
+        rt_mutex_release(&ctrl->mutex);
+    }
+}
+
 rt_err_t rt_dma_controller_register(struct rt_dma_controller *ctrl)
 {
     const char *dev_name;
@@ -64,11 +80,11 @@ rt_err_t rt_dma_controller_unregister(struct rt_dma_controller *ctrl)
         return -RT_EINVAL;
     }
 
-    rt_mutex_take(&ctrl->mutex, RT_WAITING_FOREVER);
+    dma_lock(ctrl);
 
     if (!rt_list_isempty(&ctrl->channels_nodes))
     {
-        rt_mutex_release(&ctrl->mutex);
+        dma_unlock(ctrl);
         return -RT_EBUSY;
     }
 
@@ -77,7 +93,7 @@ rt_err_t rt_dma_controller_unregister(struct rt_dma_controller *ctrl)
         rt_dm_dev_unbind_fwdata(ctrl->dev, RT_NULL);
     }
 
-    rt_mutex_release(&ctrl->mutex);
+    dma_unlock(ctrl);
     rt_mutex_detach(&ctrl->mutex);
 
     rt_spin_lock(&dmac_nodes_lock);
@@ -106,11 +122,45 @@ rt_err_t rt_dma_chan_start(struct rt_dma_chan *chan)
 
     ctrl = chan->ctrl;
 
-    rt_mutex_take(&ctrl->mutex, RT_WAITING_FOREVER);
+    dma_lock(ctrl);
 
     err = ctrl->ops->start(chan);
 
-    rt_mutex_release(&ctrl->mutex);
+    dma_unlock(ctrl);
+
+    return err;
+}
+
+rt_err_t rt_dma_chan_pause(struct rt_dma_chan *chan)
+{
+    rt_err_t err;
+    struct rt_dma_controller *ctrl;
+
+    if (!chan)
+    {
+        return -RT_EINVAL;
+    }
+
+    if (!chan->ctrl->ops->pause)
+    {
+        LOG_D("%s: No pause, try stop", rt_dm_dev_get_name(chan->ctrl->dev));
+        return rt_dma_chan_stop(chan);
+    }
+
+    if (chan->prep_err)
+    {
+        LOG_D("%s: Not config done", rt_dm_dev_get_name(chan->slave));
+
+        return chan->prep_err;
+    }
+
+    ctrl = chan->ctrl;
+
+    dma_lock(ctrl);
+
+    err = ctrl->ops->pause(chan);
+
+    dma_unlock(ctrl);
 
     return err;
 }
@@ -134,11 +184,11 @@ rt_err_t rt_dma_chan_stop(struct rt_dma_chan *chan)
 
     ctrl = chan->ctrl;
 
-    rt_mutex_take(&ctrl->mutex, RT_WAITING_FOREVER);
+    dma_lock(ctrl);
 
     err = ctrl->ops->stop(chan);
 
-    rt_mutex_release(&ctrl->mutex);
+    dma_unlock(ctrl);
 
     return err;
 }
@@ -188,11 +238,11 @@ rt_err_t rt_dma_chan_config(struct rt_dma_chan *chan,
         goto _end;
     }
 
-    rt_mutex_take(&ctrl->mutex, RT_WAITING_FOREVER);
+    dma_lock(ctrl);
 
     err = ctrl->ops->config(chan, conf);
 
-    rt_mutex_release(&ctrl->mutex);
+    dma_unlock(ctrl);
 
     if (!err)
     {
@@ -233,6 +283,19 @@ static rt_bool_t range_is_illegal(const char *name, const char *desc,
     return illegal;
 }
 
+static rt_bool_t addr_is_supported(const char *name, const char *desc,
+        rt_uint64_t mask, rt_ubase_t addr)
+{
+    rt_bool_t illegal = !!(addr & ~mask);
+
+    if (illegal)
+    {
+        LOG_E("%s: %s %p is out of mask %p", name, desc, addr, mask);
+    }
+
+    return illegal;
+}
+
 rt_err_t rt_dma_prep_memcpy(struct rt_dma_chan *chan,
         struct rt_dma_slave_transfer *transfer)
 {
@@ -262,6 +325,18 @@ rt_err_t rt_dma_prep_memcpy(struct rt_dma_chan *chan,
     dma_addr_dst = transfer->dst_addr;
     len = transfer->buffer_len;
 
+    if (addr_is_supported(rt_dm_dev_get_name(ctrl->dev), "source",
+        ctrl->addr_mask, conf->src_addr))
+    {
+        return -RT_ENOSYS;
+    }
+
+    if (addr_is_supported(rt_dm_dev_get_name(ctrl->dev), "dest",
+        ctrl->addr_mask, conf->dst_addr))
+    {
+        return -RT_ENOSYS;
+    }
+
     if (range_is_illegal(rt_dm_dev_get_name(ctrl->dev), "source",
         dma_addr_src, conf->src_addr))
     {
@@ -276,11 +351,11 @@ rt_err_t rt_dma_prep_memcpy(struct rt_dma_chan *chan,
 
     if (ctrl->ops->prep_memcpy)
     {
-        rt_mutex_take(&ctrl->mutex, RT_WAITING_FOREVER);
+        dma_lock(ctrl);
 
         err = ctrl->ops->prep_memcpy(chan, dma_addr_src, dma_addr_dst, len);
 
-        rt_mutex_release(&ctrl->mutex);
+        dma_unlock(ctrl);
     }
     else
     {
@@ -327,6 +402,12 @@ rt_err_t rt_dma_prep_cyclic(struct rt_dma_chan *chan,
     {
         dma_buf_addr = transfer->src_addr;
 
+        if (addr_is_supported(rt_dm_dev_get_name(ctrl->dev), "source",
+            ctrl->addr_mask, conf->src_addr))
+        {
+            return -RT_ENOSYS;
+        }
+
         if (range_is_illegal(rt_dm_dev_get_name(ctrl->dev), "source",
             dma_buf_addr, conf->src_addr))
         {
@@ -337,6 +418,12 @@ rt_err_t rt_dma_prep_cyclic(struct rt_dma_chan *chan,
     {
         dma_buf_addr = transfer->dst_addr;
 
+        if (addr_is_supported(rt_dm_dev_get_name(ctrl->dev), "dest",
+            ctrl->addr_mask, conf->dst_addr))
+        {
+            return -RT_ENOSYS;
+        }
+
         if (range_is_illegal(rt_dm_dev_get_name(ctrl->dev), "dest",
             dma_buf_addr, conf->dst_addr))
         {
@@ -350,12 +437,12 @@ rt_err_t rt_dma_prep_cyclic(struct rt_dma_chan *chan,
 
     if (ctrl->ops->prep_cyclic)
     {
-        rt_mutex_take(&ctrl->mutex, RT_WAITING_FOREVER);
+        dma_lock(ctrl);
 
         err = ctrl->ops->prep_cyclic(chan, dma_buf_addr,
                 transfer->buffer_len, transfer->period_len, dir);
 
-        rt_mutex_release(&ctrl->mutex);
+        dma_unlock(ctrl);
     }
     else
     {
@@ -402,6 +489,12 @@ rt_err_t rt_dma_prep_single(struct rt_dma_chan *chan,
     {
         dma_buf_addr = transfer->src_addr;
 
+        if (addr_is_supported(rt_dm_dev_get_name(ctrl->dev), "source",
+            ctrl->addr_mask, conf->src_addr))
+        {
+            return -RT_ENOSYS;
+        }
+
         if (range_is_illegal(rt_dm_dev_get_name(ctrl->dev), "source",
             dma_buf_addr, conf->src_addr))
         {
@@ -412,6 +505,12 @@ rt_err_t rt_dma_prep_single(struct rt_dma_chan *chan,
     {
         dma_buf_addr = transfer->dst_addr;
 
+        if (addr_is_supported(rt_dm_dev_get_name(ctrl->dev), "dest",
+            ctrl->addr_mask, conf->dst_addr))
+        {
+            return -RT_ENOSYS;
+        }
+
         if (range_is_illegal(rt_dm_dev_get_name(ctrl->dev), "dest",
             dma_buf_addr, conf->dst_addr))
         {
@@ -425,12 +524,12 @@ rt_err_t rt_dma_prep_single(struct rt_dma_chan *chan,
 
     if (ctrl->ops->prep_single)
     {
-        rt_mutex_take(&ctrl->mutex, RT_WAITING_FOREVER);
+        dma_lock(ctrl);
 
         err = ctrl->ops->prep_single(chan, dma_buf_addr,
                 transfer->buffer_len, dir);
 
-        rt_mutex_release(&ctrl->mutex);
+        dma_unlock(ctrl);
     }
     else
     {
@@ -556,9 +655,9 @@ struct rt_dma_chan *rt_dma_chan_request(struct rt_device *dev, const char *name)
     chan->conf_err = -RT_ERROR;
     chan->prep_err = -RT_ERROR;
 
-    rt_mutex_take(&ctrl->mutex, RT_WAITING_FOREVER);
+    dma_lock(ctrl);
     rt_list_insert_before(&ctrl->channels_nodes, &chan->list);
-    rt_mutex_release(&ctrl->mutex);
+    dma_unlock(ctrl);
 
     return chan;
 }

+ 34 - 14
components/drivers/dma/dma_pool.c

@@ -17,6 +17,7 @@
 #include <rtdbg.h>
 
 #include <mm_aspace.h>
+#include <mm_memblock.h>
 #include <dt-bindings/size.h>
 
 static RT_DEFINE_SPINLOCK(dma_pools_lock);
@@ -291,9 +292,13 @@ static rt_ubase_t dma_pool_alloc(struct rt_dma_pool *pool, rt_size_t size)
                 rt_bitmap_set_bit(pool->map, next_bit);
             }
 
+            LOG_D("%s offset = %p, pages = %d", "Alloc",
+                    pool->start + bit * ARCH_PAGE_SIZE, size);
+
             return pool->start + bit * ARCH_PAGE_SIZE;
         }
     _next:
+        ;
     }
 
     return RT_NULL;
@@ -310,6 +315,8 @@ static void dma_pool_free(struct rt_dma_pool *pool, rt_ubase_t offset, rt_size_t
     {
         rt_bitmap_clear_bit(pool->map, bit);
     }
+
+    LOG_D("%s offset = %p, pages = %d", "Free", offset, size);
 }
 
 static void *dma_alloc(struct rt_device *dev, rt_size_t size,
@@ -344,11 +351,6 @@ static void *dma_alloc(struct rt_device *dev, rt_size_t size,
             continue;
         }
 
-        if ((flags & RT_DMA_F_LINEAR) && !((pool->flags & RT_DMA_F_LINEAR)))
-        {
-            continue;
-        }
-
         *dma_handle = dma_pool_alloc(pool, size);
 
         if (*dma_handle && !(flags & RT_DMA_F_NOMAP))
@@ -357,6 +359,10 @@ static void *dma_alloc(struct rt_device *dev, rt_size_t size,
             {
                 dma_buffer = rt_ioremap_nocache((void *)*dma_handle, size);
             }
+            else if (flags & RT_DMA_F_WT)
+            {
+                dma_buffer = rt_ioremap_wt((void *)*dma_handle, size);
+            }
             else
             {
                 dma_buffer = rt_ioremap_cached((void *)*dma_handle, size);
@@ -584,20 +590,33 @@ struct rt_dma_pool *rt_dma_pool_install(rt_region_t *region)
     return pool;
 }
 
-rt_err_t rt_dma_pool_extract(rt_region_t *region_list, rt_size_t list_len,
-        rt_size_t cma_size, rt_size_t coherent_pool_size)
+rt_err_t rt_dma_pool_extract(rt_size_t cma_size, rt_size_t coherent_pool_size)
 {
     struct rt_dma_pool *pool;
-    rt_region_t *region = region_list, *region_high = RT_NULL, cma, coherent_pool;
+    struct rt_mmblk_reg *reg, *reg_high;
+    struct rt_memblock *memblock = rt_memblock_get_reserved();
+    rt_region_t *region, *region_high = RT_NULL, cma, coherent_pool;
 
-    if (!region_list || !list_len || cma_size < coherent_pool_size)
+    if (!memblock)
+    {
+        return -RT_ENOSYS;
+    }
+
+    /* Coherent pool is included in CMA */
+    if (cma_size < coherent_pool_size)
     {
         return -RT_EINVAL;
     }
 
-    for (rt_size_t i = 0; i < list_len; ++i, ++region)
+    rt_slist_for_each_entry(reg, &memblock->reg_list, node)
     {
-        if (!region->name)
+        if (!reg->alloc || (reg->flags & MEMBLOCK_HOTPLUG))
+        {
+            continue;
+        }
+
+        region = &reg->memreg;
+        if (rt_strcmp(region->name, "dma-pool") || !reg->memreg.name)
         {
             continue;
         }
@@ -608,6 +627,7 @@ rt_err_t rt_dma_pool_extract(rt_region_t *region_list, rt_size_t list_len,
             if ((rt_ssize_t)((4UL * SIZE_GB) - region->start) < cma_size)
             {
                 region_high = region;
+                reg_high = reg;
                 continue;
             }
 
@@ -618,6 +638,7 @@ rt_err_t rt_dma_pool_extract(rt_region_t *region_list, rt_size_t list_len,
     if (region_high)
     {
         region = region_high;
+        reg = reg_high;
         LOG_W("No available DMA zone in 4G");
 
         goto _found;
@@ -630,9 +651,6 @@ _found:
     {
         cma.start = region->start;
         cma.end = cma.start + cma_size;
-
-        /* Update input region */
-        region->start += cma_size;
     }
     else
     {
@@ -657,6 +675,8 @@ _found:
         return -RT_ENOMEM;
     }
 
+    reg->alloc = RT_FALSE;
+
     return RT_EOK;
 }
 

+ 14 - 2
components/drivers/include/drivers/dma.h

@@ -81,6 +81,8 @@ struct rt_dma_controller
 
     struct rt_device *dev;
 
+#define RT_DMA_ADDR_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL << (n)) - 1))
+    rt_uint64_t addr_mask;
     RT_BITMAP_DECLARE(dir_cap, RT_DMA_DIR_MAX);
     const struct rt_dma_controller_ops *ops;
 
@@ -95,6 +97,7 @@ struct rt_dma_controller_ops
     rt_err_t (*release_chan)(struct rt_dma_chan *chan);
 
     rt_err_t (*start)(struct rt_dma_chan *chan);
+    rt_err_t (*pause)(struct rt_dma_chan *chan);
     rt_err_t (*stop)(struct rt_dma_chan *chan);
     rt_err_t (*config)(struct rt_dma_chan *chan, struct rt_dma_slave_config *conf);
 
@@ -164,10 +167,19 @@ rt_inline void rt_dma_controller_add_direction(struct rt_dma_controller *ctrl,
     rt_bitmap_set_bit(ctrl->dir_cap, dir);
 }
 
+rt_inline void rt_dma_controller_set_addr_mask(struct rt_dma_controller *ctrl,
+        rt_uint64_t mask)
+{
+    RT_ASSERT(ctrl != RT_NULL);
+
+    ctrl->addr_mask = mask;
+}
+
 rt_err_t rt_dma_controller_register(struct rt_dma_controller *ctrl);
 rt_err_t rt_dma_controller_unregister(struct rt_dma_controller *ctrl);
 
 rt_err_t rt_dma_chan_start(struct rt_dma_chan *chan);
+rt_err_t rt_dma_chan_pause(struct rt_dma_chan *chan);
 rt_err_t rt_dma_chan_stop(struct rt_dma_chan *chan);
 rt_err_t rt_dma_chan_config(struct rt_dma_chan *chan,
         struct rt_dma_slave_config *conf);
@@ -188,6 +200,7 @@ rt_err_t rt_dma_chan_release(struct rt_dma_chan *chan);
 #define RT_DMA_F_NOCACHE    RT_BIT(2)
 #define RT_DMA_F_DEVICE     RT_BIT(3)
 #define RT_DMA_F_NOMAP      RT_BIT(4)
+#define RT_DMA_F_WT         RT_BIT(5)
 
 #define RT_DMA_PAGE_SIZE    ARCH_PAGE_SIZE
 
@@ -228,7 +241,6 @@ rt_inline void rt_dma_device_set_ops(struct rt_device *dev,
 }
 
 struct rt_dma_pool *rt_dma_pool_install(rt_region_t *region);
-rt_err_t rt_dma_pool_extract(rt_region_t *region_list, rt_size_t list_len,
-        rt_size_t cma_size, rt_size_t coherent_pool_size);
+rt_err_t rt_dma_pool_extract(rt_size_t cma_size, rt_size_t coherent_pool_size);
 
 #endif /* __DMA_H__ */

+ 146 - 0
examples/test/dma_test.c

@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2006-2021, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2023-02-25     GuEe-GUI     the first version
+ */
+
+#include <rthw.h>
+#include <rtthread.h>
+#include <rtdevice.h>
+
+#if defined(RT_USING_DMA) && defined(RT_USING_FINSH)
+#include <stdlib.h>
+
+static void test_dma_callback(struct rt_dma_chan *chan, rt_size_t size)
+{
+    rt_bool_t *done_ptr = chan->priv;
+
+    *done_ptr = RT_TRUE;
+    rt_hw_wmb();
+}
+
+static int dma_memcpy_test(int argc, char**argv)
+{
+    rt_bool_t done;
+    int dma_sz = 64;
+    rt_ubase_t dma_addr;
+    char *src_addr, *dst_addr;
+    struct rt_device dev = {};
+    struct rt_dma_slave_config config;
+    struct rt_dma_slave_transfer transfer;
+    struct rt_dma_chan *chn = rt_dma_chan_request(&dev, RT_NULL);
+
+    if (rt_is_err_or_null(chn))
+    {
+        rt_kputs("Alloc DMA channel fail");
+        return 0;
+    }
+
+    if (argc > 1)
+    {
+        dma_sz = atoi(argv[1]);
+    }
+
+    if (dma_sz % sizeof(rt_uint32_t))
+    {
+        dma_sz = RT_ALIGN_DOWN(dma_sz, sizeof(rt_uint32_t));
+        rt_kprintf("DMA size align to %d\n", dma_sz);
+    }
+
+    if (!(src_addr = rt_dma_alloc_coherent(&dev, dma_sz, &dma_addr)))
+    {
+        rt_kprintf("Alloc DMA %s buffer(size = %d) fail\n", "SRC", dma_sz);
+        goto _free_dma_chan;
+    }
+    config.src_addr = dma_addr;
+
+    if (!(dst_addr = rt_dma_alloc_coherent(&dev, dma_sz, &dma_addr)))
+    {
+        rt_kprintf("Alloc DMA %s buffer(size = %d) fail\n", "DST", dma_sz);
+        goto _free_src_addr;
+    }
+    config.dst_addr = dma_addr;
+
+    config.direction = RT_DMA_MEM_TO_MEM;
+    config.src_addr_width = sizeof(rt_uint32_t);
+    config.src_maxburst = sizeof(rt_uint32_t);
+    config.dst_addr_width = sizeof(rt_uint32_t);
+    config.dst_maxburst = sizeof(rt_uint32_t);
+
+    chn->callback = test_dma_callback;
+    chn->priv = &done;
+    if (rt_dma_chan_config(chn, &config))
+    {
+        rt_kprintf("DMA channel %s fail\n", "config");
+        goto _free_dst_addr;
+    }
+
+    rt_memset(&transfer, 0, sizeof(transfer));
+    transfer.src_addr = config.src_addr;
+    transfer.dst_addr = config.dst_addr;
+    transfer.buffer_len = dma_sz;
+
+    if (rt_dma_prep_memcpy(chn, &transfer))
+    {
+        rt_kprintf("DMA channel %s fail\n", "prep");
+        goto _free_dst_addr;
+    }
+
+    rt_memset(src_addr, 0xff, dma_sz);
+    rt_memset(dst_addr, 0, dma_sz);
+
+    rt_kprintf("%s %s:\n", "SRC", "start");
+    for (int i = 0; i < dma_sz; ++i)
+    {
+        rt_kprintf("%02x ", src_addr[i]);
+    }
+    rt_kputs("\n");
+
+    rt_kprintf("%s %s:\n", "DST", "start");
+    for (int i = 0; i < dma_sz; ++i)
+    {
+        rt_kprintf("%02x ", dst_addr[i]);
+    }
+    rt_kputs("\n");
+
+    done = RT_FALSE;
+    if (rt_dma_chan_start(chn))
+    {
+        rt_kprintf("DMA channel %s fail\n", "start");
+        goto _free_dst_addr;
+    }
+
+    while (!done)
+    {
+        rt_hw_cpu_relax();
+    }
+
+    rt_kprintf("%s %s:\n", "SRC", "end");
+    for (int i = 0; i < dma_sz; ++i)
+    {
+        rt_kprintf("%02x ", src_addr[i]);
+    }
+    rt_kputs("\n");
+
+    rt_kprintf("%s %s:\n", "DST", "end");
+    for (int i = 0; i < dma_sz; ++i)
+    {
+        rt_kprintf("%02x ", dst_addr[i]);
+    }
+    rt_kputs("\n");
+
+_free_dst_addr:
+    rt_dma_free_coherent(&dev, dma_sz, dst_addr, config.dst_addr);
+_free_src_addr:
+    rt_dma_free_coherent(&dev, dma_sz, src_addr, config.src_addr);
+_free_dma_chan:
+    rt_dma_chan_release(chn);
+
+    return 0;
+}
+MSH_CMD_EXPORT(dma_memcpy_test, test dma memcpy e.g: dma_memcpy_test(64));
+#endif /* RT_USING_DMA && RT_USING_FINSH */