/*
* imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation
*/
-static inline int imxdma_sg_next(struct imxdma_desc *d)
+static inline void imxdma_sg_next(struct imxdma_desc *d)
{
struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
struct imxdma_engine *imxdma = imxdmac->imxdma;
struct scatterlist *sg = d->sg;
- unsigned long now;
+ size_t now;
- now = min(d->len, sg_dma_len(sg));
+ now = min_t(size_t, d->len, sg_dma_len(sg));
if (d->len != IMX_DMA_LENGTH_LOOP)
d->len -= now;
imx_dmav1_readl(imxdma, DMA_DAR(imxdmac->channel)),
imx_dmav1_readl(imxdma, DMA_SAR(imxdmac->channel)),
imx_dmav1_readl(imxdma, DMA_CNTR(imxdmac->channel)));
-
- return now;
}
static void imxdma_enable_hw(struct imxdma_desc *d)
unsigned long watermark_level;
u32 shp_addr, per_addr;
enum dma_status status;
+ bool context_loaded;
struct imx_dma_data data;
struct work_struct terminate_worker;
};
int ret;
unsigned long flags;
- buf_virt = dma_alloc_coherent(NULL, size, &buf_phys, GFP_KERNEL);
+ buf_virt = dma_alloc_coherent(sdma->dev, size, &buf_phys, GFP_KERNEL);
if (!buf_virt) {
return -ENOMEM;
}
spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
- dma_free_coherent(NULL, size, buf_virt, buf_phys);
+ dma_free_coherent(sdma->dev, size, buf_virt, buf_phys);
return ret;
}
int ret;
unsigned long flags;
+ if (sdmac->context_loaded)
+ return 0;
+
if (sdmac->direction == DMA_DEV_TO_MEM)
load_address = sdmac->pc_from_device;
else if (sdmac->direction == DMA_DEV_TO_DEV)
spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
+ sdmac->context_loaded = true;
+
return ret;
}
sdmac->desc = NULL;
spin_unlock_irqrestore(&sdmac->vc.lock, flags);
vchan_dma_desc_free_list(&sdmac->vc, &head);
+ sdmac->context_loaded = false;
}
static int sdma_disable_channel_async(struct dma_chan *chan)
{
int ret = -EBUSY;
- sdma->bd0 = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdma->bd0_phys,
+ sdma->bd0 = dma_zalloc_coherent(sdma->dev, PAGE_SIZE, &sdma->bd0_phys,
GFP_NOWAIT);
if (!sdma->bd0) {
ret = -ENOMEM;
u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
int ret = 0;
- desc->bd = dma_zalloc_coherent(NULL, bd_size, &desc->bd_phys,
- GFP_NOWAIT);
+ desc->bd = dma_zalloc_coherent(desc->sdmac->sdma->dev, bd_size,
+ &desc->bd_phys, GFP_NOWAIT);
if (!desc->bd) {
ret = -ENOMEM;
goto out;
{
u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
- dma_free_coherent(NULL, bd_size, desc->bd, desc->bd_phys);
+ dma_free_coherent(desc->sdmac->sdma->dev, bd_size, desc->bd,
+ desc->bd_phys);
}
static void sdma_desc_free(struct virt_dma_desc *vd)
/* Be sure SDMA has not started yet */
writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
- sdma->channel_control = dma_alloc_coherent(NULL,
+ sdma->channel_control = dma_alloc_coherent(sdma->dev,
MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) +
sizeof(struct sdma_context_data),
&ccb_phys, GFP_KERNEL);