usb: dwc2: host: avoid usage of dma_alloc_coherent with irqs disabled
authorGregory Herrero <gregory.herrero@intel.com>
Fri, 20 Nov 2015 10:49:29 +0000 (11:49 +0100)
committerFelipe Balbi <balbi@ti.com>
Tue, 15 Dec 2015 15:12:41 +0000 (09:12 -0600)
Use Streaming DMA mappings to handle cache coherency of frame list and
descriptor list. Cache are always flushed before controller access it
or before cpu access it.

Acked-by: John Youn <johnyoun@synopsys.com>
Signed-off-by: Gregory Herrero <gregory.herrero@intel.com>
Signed-off-by: Felipe Balbi <balbi@ti.com>
drivers/usb/dwc2/core.c
drivers/usb/dwc2/core.h
drivers/usb/dwc2/hcd.c
drivers/usb/dwc2/hcd.h
drivers/usb/dwc2/hcd_ddma.c

index 5568d9c8e213c932992e9f4c04211d54aa5c4428..542c9e6d95db6b434584029b398cdb01bd347412 100644 (file)
@@ -1934,6 +1934,9 @@ void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg,
 
        dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
 
+       dma_sync_single_for_device(hsotg->dev, chan->desc_list_addr,
+                                  chan->desc_list_sz, DMA_TO_DEVICE);
+
        hc_dma = (u32)chan->desc_list_addr & HCDMA_DMA_ADDR_MASK;
 
        /* Always start from first descriptor */
index fd4c2365069ae92c080988063c42ccd2689fe20c..e7cc54268f2f537078c19ab68098f3f34543b7b9 100644 (file)
@@ -685,6 +685,7 @@ struct dwc2_hregs_backup {
  * @otg_port:           OTG port number
  * @frame_list:         Frame list
  * @frame_list_dma:     Frame list DMA address
+ * @frame_list_sz:      Frame list size
  *
  * These are for peripheral mode:
  *
@@ -804,6 +805,7 @@ struct dwc2_hsotg {
        u8 otg_port;
        u32 *frame_list;
        dma_addr_t frame_list_dma;
+       u32 frame_list_sz;
 
 #ifdef DEBUG
        u32 frrem_samples;
index 32c84e7a81ab3d58c1e1c2f887a873ab4ce8a30a..7fd4f41e12a793cd41862c2f95aca7ddf36927bc 100644 (file)
@@ -881,8 +881,10 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
                 */
                chan->multi_count = dwc2_hb_mult(qh->maxp);
 
-       if (hsotg->core_params->dma_desc_enable > 0)
+       if (hsotg->core_params->dma_desc_enable > 0) {
                chan->desc_list_addr = qh->desc_list_dma;
+               chan->desc_list_sz = qh->desc_list_sz;
+       }
 
        dwc2_hc_init(hsotg, chan);
        chan->qh = qh;
index 2e5e9d9a00161f220f005a0f97f3928cd4d16e24..6e822661a69e3263307ce96d440e59ff6c8fc6d5 100644 (file)
@@ -107,6 +107,7 @@ struct dwc2_qh;
  * @qh:                 QH for the transfer being processed by this channel
  * @hc_list_entry:      For linking to list of host channels
  * @desc_list_addr:     Current QH's descriptor list DMA address
+ * @desc_list_sz:       Current QH's descriptor list size
  *
  * This structure represents the state of a single host channel when acting in
  * host mode. It contains the data items needed to transfer packets to an
@@ -159,6 +160,7 @@ struct dwc2_host_chan {
        struct dwc2_qh *qh;
        struct list_head hc_list_entry;
        dma_addr_t desc_list_addr;
+       u32 desc_list_sz;
 };
 
 struct dwc2_hcd_pipe_info {
@@ -251,6 +253,7 @@ enum dwc2_transaction_type {
  *                      schedule
  * @desc_list:          List of transfer descriptors
  * @desc_list_dma:      Physical address of desc_list
+ * @desc_list_sz:       Size of descriptors list
  * @n_bytes:            Xfer Bytes array. Each element corresponds to a transfer
  *                      descriptor and indicates original XferSize value for the
  *                      descriptor
@@ -284,6 +287,7 @@ struct dwc2_qh {
        struct list_head qh_list_entry;
        struct dwc2_hcd_dma_desc *desc_list;
        dma_addr_t desc_list_dma;
+       u32 desc_list_sz;
        u32 *n_bytes;
        unsigned tt_buffer_dirty:1;
 };
index f98c7e91f6bcd2251785975289291a62dc9bef57..85d7816d29f1d71c5cfdceb18f3401e4690a1af5 100644 (file)
@@ -87,22 +87,23 @@ static u16 dwc2_frame_incr_val(struct dwc2_qh *qh)
 static int dwc2_desc_list_alloc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
                                gfp_t flags)
 {
-       qh->desc_list = dma_alloc_coherent(hsotg->dev,
-                               sizeof(struct dwc2_hcd_dma_desc) *
-                               dwc2_max_desc_num(qh), &qh->desc_list_dma,
-                               flags);
+       qh->desc_list_sz = sizeof(struct dwc2_hcd_dma_desc) *
+                                               dwc2_max_desc_num(qh);
 
+       qh->desc_list = kzalloc(qh->desc_list_sz, flags | GFP_DMA);
        if (!qh->desc_list)
                return -ENOMEM;
 
-       memset(qh->desc_list, 0,
-              sizeof(struct dwc2_hcd_dma_desc) * dwc2_max_desc_num(qh));
+       qh->desc_list_dma = dma_map_single(hsotg->dev, qh->desc_list,
+                                          qh->desc_list_sz,
+                                          DMA_TO_DEVICE);
 
        qh->n_bytes = kzalloc(sizeof(u32) * dwc2_max_desc_num(qh), flags);
        if (!qh->n_bytes) {
-               dma_free_coherent(hsotg->dev, sizeof(struct dwc2_hcd_dma_desc)
-                                 * dwc2_max_desc_num(qh), qh->desc_list,
-                                 qh->desc_list_dma);
+               dma_unmap_single(hsotg->dev, qh->desc_list_dma,
+                                qh->desc_list_sz,
+                                DMA_FROM_DEVICE);
+               kfree(qh->desc_list);
                qh->desc_list = NULL;
                return -ENOMEM;
        }
@@ -113,9 +114,9 @@ static int dwc2_desc_list_alloc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
 static void dwc2_desc_list_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
 {
        if (qh->desc_list) {
-               dma_free_coherent(hsotg->dev, sizeof(struct dwc2_hcd_dma_desc)
-                                 * dwc2_max_desc_num(qh), qh->desc_list,
-                                 qh->desc_list_dma);
+               dma_unmap_single(hsotg->dev, qh->desc_list_dma,
+                                qh->desc_list_sz, DMA_FROM_DEVICE);
+               kfree(qh->desc_list);
                qh->desc_list = NULL;
        }
 
@@ -128,21 +129,20 @@ static int dwc2_frame_list_alloc(struct dwc2_hsotg *hsotg, gfp_t mem_flags)
        if (hsotg->frame_list)
                return 0;
 
-       hsotg->frame_list = dma_alloc_coherent(hsotg->dev,
-                                              4 * FRLISTEN_64_SIZE,
-                                              &hsotg->frame_list_dma,
-                                              mem_flags);
+       hsotg->frame_list_sz = 4 * FRLISTEN_64_SIZE;
+       hsotg->frame_list = kzalloc(hsotg->frame_list_sz, GFP_ATOMIC | GFP_DMA);
        if (!hsotg->frame_list)
                return -ENOMEM;
 
-       memset(hsotg->frame_list, 0, 4 * FRLISTEN_64_SIZE);
+       hsotg->frame_list_dma = dma_map_single(hsotg->dev, hsotg->frame_list,
+                                              hsotg->frame_list_sz,
+                                              DMA_TO_DEVICE);
+
        return 0;
 }
 
 static void dwc2_frame_list_free(struct dwc2_hsotg *hsotg)
 {
-       u32 *frame_list;
-       dma_addr_t frame_list_dma;
        unsigned long flags;
 
        spin_lock_irqsave(&hsotg->lock, flags);
@@ -152,14 +152,14 @@ static void dwc2_frame_list_free(struct dwc2_hsotg *hsotg)
                return;
        }
 
-       frame_list = hsotg->frame_list;
-       frame_list_dma = hsotg->frame_list_dma;
+       dma_unmap_single(hsotg->dev, hsotg->frame_list_dma,
+                        hsotg->frame_list_sz, DMA_FROM_DEVICE);
+
+       kfree(hsotg->frame_list);
        hsotg->frame_list = NULL;
 
        spin_unlock_irqrestore(&hsotg->lock, flags);
 
-       dma_free_coherent(hsotg->dev, 4 * FRLISTEN_64_SIZE, frame_list,
-                         frame_list_dma);
 }
 
 static void dwc2_per_sched_enable(struct dwc2_hsotg *hsotg, u32 fr_list_en)
@@ -249,6 +249,15 @@ static void dwc2_update_frame_list(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
                j = (j + inc) & (FRLISTEN_64_SIZE - 1);
        } while (j != i);
 
+       /*
+        * Sync frame list since controller will access it if periodic
+        * channel is currently enabled.
+        */
+       dma_sync_single_for_device(hsotg->dev,
+                                  hsotg->frame_list_dma,
+                                  hsotg->frame_list_sz,
+                                  DMA_TO_DEVICE);
+
        if (!enable)
                return;
 
@@ -541,6 +550,11 @@ static void dwc2_fill_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
                dma_desc->status |= HOST_DMA_IOC;
 #endif
 
+       dma_sync_single_for_device(hsotg->dev,
+                       qh->desc_list_dma +
+                       (idx * sizeof(struct dwc2_hcd_dma_desc)),
+                       sizeof(struct dwc2_hcd_dma_desc),
+                       DMA_TO_DEVICE);
 }
 
 static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg,
@@ -610,6 +624,11 @@ static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg,
        if (qh->ntd == ntd_max) {
                idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
                qh->desc_list[idx].status |= HOST_DMA_IOC;
+               dma_sync_single_for_device(hsotg->dev,
+                                          qh->desc_list_dma + (idx *
+                                          sizeof(struct dwc2_hcd_dma_desc)),
+                                          sizeof(struct dwc2_hcd_dma_desc),
+                                          DMA_TO_DEVICE);
        }
 #else
        /*
@@ -639,6 +658,11 @@ static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg,
                idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
 
        qh->desc_list[idx].status |= HOST_DMA_IOC;
+       dma_sync_single_for_device(hsotg->dev,
+                                  qh->desc_list_dma +
+                                  (idx * sizeof(struct dwc2_hcd_dma_desc)),
+                                  sizeof(struct dwc2_hcd_dma_desc),
+                                  DMA_TO_DEVICE);
 #endif
 }
 
@@ -676,6 +700,12 @@ static void dwc2_fill_host_dma_desc(struct dwc2_hsotg *hsotg,
 
        dma_desc->buf = (u32)chan->xfer_dma;
 
+       dma_sync_single_for_device(hsotg->dev,
+                                  qh->desc_list_dma +
+                                  (n_desc * sizeof(struct dwc2_hcd_dma_desc)),
+                                  sizeof(struct dwc2_hcd_dma_desc),
+                                  DMA_TO_DEVICE);
+
        /*
         * Last (or only) descriptor of IN transfer with actual size less
         * than MaxPacket
@@ -726,6 +756,12 @@ static void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg,
                                         "set A bit in desc %d (%p)\n",
                                         n_desc - 1,
                                         &qh->desc_list[n_desc - 1]);
+                               dma_sync_single_for_device(hsotg->dev,
+                                       qh->desc_list_dma +
+                                       ((n_desc - 1) *
+                                       sizeof(struct dwc2_hcd_dma_desc)),
+                                       sizeof(struct dwc2_hcd_dma_desc),
+                                       DMA_TO_DEVICE);
                        }
                        dwc2_fill_host_dma_desc(hsotg, chan, qtd, qh, n_desc);
                        dev_vdbg(hsotg->dev,
@@ -751,10 +787,19 @@ static void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg,
                                HOST_DMA_IOC | HOST_DMA_EOL | HOST_DMA_A;
                dev_vdbg(hsotg->dev, "set IOC/EOL/A bits in desc %d (%p)\n",
                         n_desc - 1, &qh->desc_list[n_desc - 1]);
+               dma_sync_single_for_device(hsotg->dev,
+                                          qh->desc_list_dma + (n_desc - 1) *
+                                          sizeof(struct dwc2_hcd_dma_desc),
+                                          sizeof(struct dwc2_hcd_dma_desc),
+                                          DMA_TO_DEVICE);
                if (n_desc > 1) {
                        qh->desc_list[0].status |= HOST_DMA_A;
                        dev_vdbg(hsotg->dev, "set A bit in desc 0 (%p)\n",
                                 &qh->desc_list[0]);
+                       dma_sync_single_for_device(hsotg->dev,
+                                       qh->desc_list_dma,
+                                       sizeof(struct dwc2_hcd_dma_desc),
+                                       DMA_TO_DEVICE);
                }
                chan->ntd = n_desc;
        }
@@ -829,7 +874,7 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
                                        struct dwc2_qtd *qtd,
                                        struct dwc2_qh *qh, u16 idx)
 {
-       struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[idx];
+       struct dwc2_hcd_dma_desc *dma_desc;
        struct dwc2_hcd_iso_packet_desc *frame_desc;
        u16 remain = 0;
        int rc = 0;
@@ -837,6 +882,13 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
        if (!qtd->urb)
                return -EINVAL;
 
+       dma_sync_single_for_cpu(hsotg->dev, qh->desc_list_dma + (idx *
+                               sizeof(struct dwc2_hcd_dma_desc)),
+                               sizeof(struct dwc2_hcd_dma_desc),
+                               DMA_FROM_DEVICE);
+
+       dma_desc = &qh->desc_list[idx];
+
        frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
        dma_desc->buf = (u32)(qtd->urb->dma + frame_desc->offset);
        if (chan->ep_is_in)
@@ -1092,6 +1144,12 @@ static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg,
        if (!urb)
                return -EINVAL;
 
+       dma_sync_single_for_cpu(hsotg->dev,
+                               qh->desc_list_dma + (desc_num *
+                               sizeof(struct dwc2_hcd_dma_desc)),
+                               sizeof(struct dwc2_hcd_dma_desc),
+                               DMA_FROM_DEVICE);
+
        dma_desc = &qh->desc_list[desc_num];
        n_bytes = qh->n_bytes[desc_num];
        dev_vdbg(hsotg->dev,