dmaengine: ptdma: Extend ptdma to support multi-channel and version
authorBasavaraj Natikar <Basavaraj.Natikar@amd.com>
Fri, 25 Oct 2024 09:59:28 +0000 (15:29 +0530)
committerVinod Koul <vkoul@kernel.org>
Mon, 2 Dec 2024 17:25:27 +0000 (22:55 +0530)
To support multi-channel functionality with AE4DMA engine, extend the
PTDMA code with reusable components.

Reviewed-by: Raju Rangoju <Raju.Rangoju@amd.com>
Signed-off-by: Basavaraj Natikar <Basavaraj.Natikar@amd.com>
Link: https://lore.kernel.org/r/20241025095931.726018-4-Basavaraj.Natikar@amd.com
Signed-off-by: Vinod Koul <vkoul@kernel.org>
drivers/dma/amd/ae4dma/ae4dma.h
drivers/dma/amd/ptdma/ptdma-dmaengine.c
drivers/dma/amd/ptdma/ptdma.h

index 4a1dfcf620c177a6266a3c6a1b55a9a79da83ef3..92cb8c379c182dbb7a0d972c7b50a9f838382b90 100644 (file)
@@ -34,6 +34,8 @@
 #define AE4_Q_BASE_H_OFF               0x1c
 #define AE4_Q_SZ                       0x20
 
+#define AE4_DMA_VERSION                        4
+
 struct ae4_msix {
        int msix_count;
        struct msix_entry msix_entry[MAX_AE4_HW_QUEUES];
index 77fe709fb32782a071208bf6179153a468799f22..e2d4bc8aa1dec075ff1c1b336ee84ae2d21c78df 100644 (file)
@@ -93,7 +93,24 @@ static void pt_do_cleanup(struct virt_dma_desc *vd)
        kmem_cache_free(pt->dma_desc_cache, desc);
 }
 
-static int pt_dma_start_desc(struct pt_dma_desc *desc)
+static struct pt_cmd_queue *pt_get_cmd_queue(struct pt_device *pt, struct pt_dma_chan *chan)
+{
+       struct ae4_cmd_queue *ae4cmd_q;
+       struct pt_cmd_queue *cmd_q;
+       struct ae4_device *ae4;
+
+       if (pt->ver == AE4_DMA_VERSION) {
+               ae4 = container_of(pt, struct ae4_device, pt);
+               ae4cmd_q = &ae4->ae4cmd_q[chan->id];
+               cmd_q = &ae4cmd_q->cmd_q;
+       } else {
+               cmd_q = &pt->cmd_q;
+       }
+
+       return cmd_q;
+}
+
+static int pt_dma_start_desc(struct pt_dma_desc *desc, struct pt_dma_chan *chan)
 {
        struct pt_passthru_engine *pt_engine;
        struct pt_device *pt;
@@ -104,7 +121,9 @@ static int pt_dma_start_desc(struct pt_dma_desc *desc)
 
        pt_cmd = &desc->pt_cmd;
        pt = pt_cmd->pt;
-       cmd_q = &pt->cmd_q;
+
+       cmd_q = pt_get_cmd_queue(pt, chan);
+
        pt_engine = &pt_cmd->passthru;
 
        pt->tdata.cmd = pt_cmd;
@@ -199,7 +218,7 @@ static void pt_cmd_callback(void *data, int err)
                if (!desc)
                        break;
 
-               ret = pt_dma_start_desc(desc);
+               ret = pt_dma_start_desc(desc, chan);
                if (!ret)
                        break;
 
@@ -234,7 +253,10 @@ static struct pt_dma_desc *pt_create_desc(struct dma_chan *dma_chan,
 {
        struct pt_dma_chan *chan = to_pt_chan(dma_chan);
        struct pt_passthru_engine *pt_engine;
+       struct pt_device *pt = chan->pt;
+       struct ae4_cmd_queue *ae4cmd_q;
        struct pt_dma_desc *desc;
+       struct ae4_device *ae4;
        struct pt_cmd *pt_cmd;
 
        desc = pt_alloc_dma_desc(chan, flags);
@@ -242,7 +264,7 @@ static struct pt_dma_desc *pt_create_desc(struct dma_chan *dma_chan,
                return NULL;
 
        pt_cmd = &desc->pt_cmd;
-       pt_cmd->pt = chan->pt;
+       pt_cmd->pt = pt;
        pt_engine = &pt_cmd->passthru;
        pt_cmd->engine = PT_ENGINE_PASSTHRU;
        pt_engine->src_dma = src;
@@ -253,6 +275,14 @@ static struct pt_dma_desc *pt_create_desc(struct dma_chan *dma_chan,
 
        desc->len = len;
 
+       if (pt->ver == AE4_DMA_VERSION) {
+               ae4 = container_of(pt, struct ae4_device, pt);
+               ae4cmd_q = &ae4->ae4cmd_q[chan->id];
+               mutex_lock(&ae4cmd_q->cmd_lock);
+               list_add_tail(&pt_cmd->entry, &ae4cmd_q->cmd);
+               mutex_unlock(&ae4cmd_q->cmd_lock);
+       }
+
        return desc;
 }
 
@@ -310,8 +340,11 @@ static enum dma_status
 pt_tx_status(struct dma_chan *c, dma_cookie_t cookie,
                struct dma_tx_state *txstate)
 {
-       struct pt_device *pt = to_pt_chan(c)->pt;
-       struct pt_cmd_queue *cmd_q = &pt->cmd_q;
+       struct pt_dma_chan *chan = to_pt_chan(c);
+       struct pt_device *pt = chan->pt;
+       struct pt_cmd_queue *cmd_q;
+
+       cmd_q = pt_get_cmd_queue(pt, chan);
 
        pt_check_status_trans(pt, cmd_q);
        return dma_cookie_status(c, cookie, txstate);
@@ -320,10 +353,13 @@ pt_tx_status(struct dma_chan *c, dma_cookie_t cookie,
 static int pt_pause(struct dma_chan *dma_chan)
 {
        struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+       struct pt_device *pt = chan->pt;
+       struct pt_cmd_queue *cmd_q;
        unsigned long flags;
 
        spin_lock_irqsave(&chan->vc.lock, flags);
-       pt_stop_queue(&chan->pt->cmd_q);
+       cmd_q = pt_get_cmd_queue(pt, chan);
+       pt_stop_queue(cmd_q);
        spin_unlock_irqrestore(&chan->vc.lock, flags);
 
        return 0;
@@ -333,10 +369,13 @@ static int pt_resume(struct dma_chan *dma_chan)
 {
        struct pt_dma_chan *chan = to_pt_chan(dma_chan);
        struct pt_dma_desc *desc = NULL;
+       struct pt_device *pt = chan->pt;
+       struct pt_cmd_queue *cmd_q;
        unsigned long flags;
 
        spin_lock_irqsave(&chan->vc.lock, flags);
-       pt_start_queue(&chan->pt->cmd_q);
+       cmd_q = pt_get_cmd_queue(pt, chan);
+       pt_start_queue(cmd_q);
        desc = pt_next_dma_desc(chan);
        spin_unlock_irqrestore(&chan->vc.lock, flags);
 
@@ -350,11 +389,17 @@ static int pt_resume(struct dma_chan *dma_chan)
 static int pt_terminate_all(struct dma_chan *dma_chan)
 {
        struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+       struct pt_device *pt = chan->pt;
+       struct pt_cmd_queue *cmd_q;
        unsigned long flags;
-       struct pt_cmd_queue *cmd_q = &chan->pt->cmd_q;
        LIST_HEAD(head);
 
-       iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_control + 0x0010);
+       cmd_q = pt_get_cmd_queue(pt, chan);
+       if (pt->ver == AE4_DMA_VERSION)
+               pt_stop_queue(cmd_q);
+       else
+               iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_control + 0x0010);
+
        spin_lock_irqsave(&chan->vc.lock, flags);
        vchan_get_all_descriptors(&chan->vc, &head);
        spin_unlock_irqrestore(&chan->vc.lock, flags);
@@ -367,14 +412,24 @@ static int pt_terminate_all(struct dma_chan *dma_chan)
 
 int pt_dmaengine_register(struct pt_device *pt)
 {
-       struct pt_dma_chan *chan;
        struct dma_device *dma_dev = &pt->dma_dev;
-       char *cmd_cache_name;
+       struct ae4_cmd_queue *ae4cmd_q = NULL;
+       struct ae4_device *ae4 = NULL;
+       struct pt_dma_chan *chan;
        char *desc_cache_name;
-       int ret;
+       char *cmd_cache_name;
+       int ret, i;
+
+       if (pt->ver == AE4_DMA_VERSION)
+               ae4 = container_of(pt, struct ae4_device, pt);
+
+       if (ae4)
+               pt->pt_dma_chan = devm_kcalloc(pt->dev, ae4->cmd_q_count,
+                                              sizeof(*pt->pt_dma_chan), GFP_KERNEL);
+       else
+               pt->pt_dma_chan = devm_kzalloc(pt->dev, sizeof(*pt->pt_dma_chan),
+                                              GFP_KERNEL);
 
-       pt->pt_dma_chan = devm_kzalloc(pt->dev, sizeof(*pt->pt_dma_chan),
-                                      GFP_KERNEL);
        if (!pt->pt_dma_chan)
                return -ENOMEM;
 
@@ -416,9 +471,6 @@ int pt_dmaengine_register(struct pt_device *pt)
 
        INIT_LIST_HEAD(&dma_dev->channels);
 
-       chan = pt->pt_dma_chan;
-       chan->pt = pt;
-
        /* Set base and prep routines */
        dma_dev->device_free_chan_resources = pt_free_chan_resources;
        dma_dev->device_prep_dma_memcpy = pt_prep_dma_memcpy;
@@ -430,8 +482,21 @@ int pt_dmaengine_register(struct pt_device *pt)
        dma_dev->device_terminate_all = pt_terminate_all;
        dma_dev->device_synchronize = pt_synchronize;
 
-       chan->vc.desc_free = pt_do_cleanup;
-       vchan_init(&chan->vc, dma_dev);
+       if (ae4) {
+               for (i = 0; i < ae4->cmd_q_count; i++) {
+                       chan = pt->pt_dma_chan + i;
+                       ae4cmd_q = &ae4->ae4cmd_q[i];
+                       chan->id = ae4cmd_q->id;
+                       chan->pt = pt;
+                       chan->vc.desc_free = pt_do_cleanup;
+                       vchan_init(&chan->vc, dma_dev);
+               }
+       } else {
+               chan = pt->pt_dma_chan;
+               chan->pt = pt;
+               chan->vc.desc_free = pt_do_cleanup;
+               vchan_init(&chan->vc, dma_dev);
+       }
 
        ret = dma_async_device_register(dma_dev);
        if (ret)
index 7a8ca8e239e006f4d3f8826a25a68d4f133cb367..0a7939105e512239c78499e672b7007b6b6491b9 100644 (file)
@@ -184,6 +184,7 @@ struct pt_dma_desc {
 struct pt_dma_chan {
        struct virt_dma_chan vc;
        struct pt_device *pt;
+       u32 id;
 };
 
 struct pt_cmd_queue {
@@ -262,6 +263,7 @@ struct pt_device {
        unsigned long total_interrupts;
 
        struct pt_tasklet_data tdata;
+       int ver;
 };
 
 /*