Merge tag 'f2fs-for-5.1' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk...
[linux-2.6-block.git] / drivers / dma / tegra20-apb-dma.c
index 9a558e30c461c4f5b9d26ecf4b8602e05a3a2e85..cf462b1abc0bb3eb57083438071e3bf4dfc2e129 100644 (file)
@@ -38,6 +38,9 @@
 
 #include "dmaengine.h"
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/tegra_apb_dma.h>
+
 #define TEGRA_APBDMA_GENERAL                   0x0
 #define TEGRA_APBDMA_GENERAL_ENABLE            BIT(31)
 
@@ -146,7 +149,7 @@ struct tegra_dma_channel_regs {
 };
 
 /*
- * tegra_dma_sg_req: Dma request details to configure hardware. This
+ * tegra_dma_sg_req: DMA request details to configure hardware. This
  * contains the details for one transfer to configure DMA hw.
  * The client's request for data transfer can be broken into multiple
  * sub-transfer as per requester details and hw support.
@@ -155,7 +158,7 @@ struct tegra_dma_channel_regs {
  */
 struct tegra_dma_sg_req {
        struct tegra_dma_channel_regs   ch_regs;
-       int                             req_len;
+       unsigned int                    req_len;
        bool                            configured;
        bool                            last_sg;
        struct list_head                node;
@@ -169,8 +172,8 @@ struct tegra_dma_sg_req {
  */
 struct tegra_dma_desc {
        struct dma_async_tx_descriptor  txd;
-       int                             bytes_requested;
-       int                             bytes_transferred;
+       unsigned int                    bytes_requested;
+       unsigned int                    bytes_transferred;
        enum dma_status                 dma_status;
        struct list_head                node;
        struct list_head                tx_list;
@@ -186,7 +189,7 @@ typedef void (*dma_isr_handler)(struct tegra_dma_channel *tdc,
 /* tegra_dma_channel: Channel specific information */
 struct tegra_dma_channel {
        struct dma_chan         dma_chan;
-       char                    name[30];
+       char                    name[12];
        bool                    config_init;
        int                     id;
        int                     irq;
@@ -574,7 +577,7 @@ static bool handle_continuous_head_request(struct tegra_dma_channel *tdc,
        struct tegra_dma_sg_req *hsgreq = NULL;
 
        if (list_empty(&tdc->pending_sg_req)) {
-               dev_err(tdc2dev(tdc), "Dma is running without req\n");
+               dev_err(tdc2dev(tdc), "DMA is running without req\n");
                tegra_dma_stop(tdc);
                return false;
        }
@@ -587,7 +590,7 @@ static bool handle_continuous_head_request(struct tegra_dma_channel *tdc,
        hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
        if (!hsgreq->configured) {
                tegra_dma_stop(tdc);
-               dev_err(tdc2dev(tdc), "Error in dma transfer, aborting dma\n");
+               dev_err(tdc2dev(tdc), "Error in DMA transfer, aborting DMA\n");
                tegra_dma_abort_all(tdc);
                return false;
        }
@@ -636,7 +639,10 @@ static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
 
        sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
        dma_desc = sgreq->dma_desc;
-       dma_desc->bytes_transferred += sgreq->req_len;
+       /* if we dma for long enough the transfer count will wrap */
+       dma_desc->bytes_transferred =
+               (dma_desc->bytes_transferred + sgreq->req_len) %
+               dma_desc->bytes_requested;
 
        /* Callback need to be call */
        if (!dma_desc->cb_count)
@@ -669,6 +675,8 @@ static void tegra_dma_tasklet(unsigned long data)
                dmaengine_desc_get_callback(&dma_desc->txd, &cb);
                cb_count = dma_desc->cb_count;
                dma_desc->cb_count = 0;
+               trace_tegra_dma_complete_cb(&tdc->dma_chan, cb_count,
+                                           cb.callback);
                spin_unlock_irqrestore(&tdc->lock, flags);
                while (cb_count--)
                        dmaengine_desc_callback_invoke(&cb, NULL);
@@ -685,6 +693,7 @@ static irqreturn_t tegra_dma_isr(int irq, void *dev_id)
 
        spin_lock_irqsave(&tdc->lock, flags);
 
+       trace_tegra_dma_isr(&tdc->dma_chan, irq);
        status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
        if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
                tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
@@ -843,6 +852,7 @@ found:
                dma_set_residue(txstate, residual);
        }
 
+       trace_tegra_dma_tx_status(&tdc->dma_chan, cookie, txstate);
        spin_unlock_irqrestore(&tdc->lock, flags);
        return ret;
 }
@@ -919,7 +929,7 @@ static int get_transfer_param(struct tegra_dma_channel *tdc,
                return 0;
 
        default:
-               dev_err(tdc2dev(tdc), "Dma direction is not supported\n");
+               dev_err(tdc2dev(tdc), "DMA direction is not supported\n");
                return -EINVAL;
        }
        return -EINVAL;
@@ -952,7 +962,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
        enum dma_slave_buswidth slave_bw;
 
        if (!tdc->config_init) {
-               dev_err(tdc2dev(tdc), "dma channel is not configured\n");
+               dev_err(tdc2dev(tdc), "DMA channel is not configured\n");
                return NULL;
        }
        if (sg_len < 1) {
@@ -985,7 +995,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
 
        dma_desc = tegra_dma_desc_get(tdc);
        if (!dma_desc) {
-               dev_err(tdc2dev(tdc), "Dma descriptors not available\n");
+               dev_err(tdc2dev(tdc), "DMA descriptors not available\n");
                return NULL;
        }
        INIT_LIST_HEAD(&dma_desc->tx_list);
@@ -1005,14 +1015,14 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
                if ((len & 3) || (mem & 3) ||
                                (len > tdc->tdma->chip_data->max_dma_count)) {
                        dev_err(tdc2dev(tdc),
-                               "Dma length/memory address is not supported\n");
+                               "DMA length/memory address is not supported\n");
                        tegra_dma_desc_put(tdc, dma_desc);
                        return NULL;
                }
 
                sg_req = tegra_dma_sg_req_get(tdc);
                if (!sg_req) {
-                       dev_err(tdc2dev(tdc), "Dma sg-req not available\n");
+                       dev_err(tdc2dev(tdc), "DMA sg-req not available\n");
                        tegra_dma_desc_put(tdc, dma_desc);
                        return NULL;
                }
@@ -1087,7 +1097,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
         * terminating the DMA.
         */
        if (tdc->busy) {
-               dev_err(tdc2dev(tdc), "Request not allowed when dma running\n");
+               dev_err(tdc2dev(tdc), "Request not allowed when DMA running\n");
                return NULL;
        }
 
@@ -1144,7 +1154,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
        while (remain_len) {
                sg_req = tegra_dma_sg_req_get(tdc);
                if (!sg_req) {
-                       dev_err(tdc2dev(tdc), "Dma sg-req not available\n");
+                       dev_err(tdc2dev(tdc), "DMA sg-req not available\n");
                        tegra_dma_desc_put(tdc, dma_desc);
                        return NULL;
                }
@@ -1319,8 +1329,9 @@ static int tegra_dma_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
-       tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels *
-                       sizeof(struct tegra_dma_channel), GFP_KERNEL);
+       tdma = devm_kzalloc(&pdev->dev,
+                           struct_size(tdma, channels, cdata->nr_channels),
+                           GFP_KERNEL);
        if (!tdma)
                return -ENOMEM;