2 * Driver for the Atmel Extensible DMA Controller (aka XDMAC on AT91 systems)
4 * Copyright (C) 2014 Atmel Corporation
6 * Author: Ludovic Desroches <ludovic.desroches@atmel.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 as published by
10 * the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program. If not, see <http://www.gnu.org/licenses/>.
21 #include <asm/barrier.h>
22 #include <dt-bindings/dma/at91.h>
23 #include <linux/clk.h>
24 #include <linux/dmaengine.h>
25 #include <linux/dmapool.h>
26 #include <linux/interrupt.h>
27 #include <linux/irq.h>
28 #include <linux/kernel.h>
29 #include <linux/list.h>
30 #include <linux/module.h>
31 #include <linux/of_dma.h>
32 #include <linux/of_platform.h>
33 #include <linux/platform_device.h>
36 #include "dmaengine.h"
38 /* Global registers */
39 #define AT_XDMAC_GTYPE 0x00 /* Global Type Register */
40 #define AT_XDMAC_NB_CH(i) (((i) & 0x1F) + 1) /* Number of Channels Minus One */
41 #define AT_XDMAC_FIFO_SZ(i) (((i) >> 5) & 0x7FF) /* Number of Bytes */
42 #define AT_XDMAC_NB_REQ(i) ((((i) >> 16) & 0x3F) + 1) /* Number of Peripheral Requests Minus One */
43 #define AT_XDMAC_GCFG 0x04 /* Global Configuration Register */
44 #define AT_XDMAC_GWAC 0x08 /* Global Weighted Arbiter Configuration Register */
45 #define AT_XDMAC_GIE 0x0C /* Global Interrupt Enable Register */
46 #define AT_XDMAC_GID 0x10 /* Global Interrupt Disable Register */
47 #define AT_XDMAC_GIM 0x14 /* Global Interrupt Mask Register */
48 #define AT_XDMAC_GIS 0x18 /* Global Interrupt Status Register */
49 #define AT_XDMAC_GE 0x1C /* Global Channel Enable Register */
50 #define AT_XDMAC_GD 0x20 /* Global Channel Disable Register */
51 #define AT_XDMAC_GS 0x24 /* Global Channel Status Register */
52 #define AT_XDMAC_GRS 0x28 /* Global Channel Read Suspend Register */
53 #define AT_XDMAC_GWS 0x2C /* Global Write Suspend Register */
54 #define AT_XDMAC_GRWS 0x30 /* Global Channel Read Write Suspend Register */
55 #define AT_XDMAC_GRWR 0x34 /* Global Channel Read Write Resume Register */
56 #define AT_XDMAC_GSWR 0x38 /* Global Channel Software Request Register */
57 #define AT_XDMAC_GSWS 0x3C /* Global channel Software Request Status Register */
58 #define AT_XDMAC_GSWF 0x40 /* Global Channel Software Flush Request Register */
59 #define AT_XDMAC_VERSION 0xFFC /* XDMAC Version Register */
61 /* Channel relative registers offsets */
62 #define AT_XDMAC_CIE 0x00 /* Channel Interrupt Enable Register */
63 #define AT_XDMAC_CIE_BIE BIT(0) /* End of Block Interrupt Enable Bit */
64 #define AT_XDMAC_CIE_LIE BIT(1) /* End of Linked List Interrupt Enable Bit */
65 #define AT_XDMAC_CIE_DIE BIT(2) /* End of Disable Interrupt Enable Bit */
66 #define AT_XDMAC_CIE_FIE BIT(3) /* End of Flush Interrupt Enable Bit */
67 #define AT_XDMAC_CIE_RBEIE BIT(4) /* Read Bus Error Interrupt Enable Bit */
68 #define AT_XDMAC_CIE_WBEIE BIT(5) /* Write Bus Error Interrupt Enable Bit */
69 #define AT_XDMAC_CIE_ROIE BIT(6) /* Request Overflow Interrupt Enable Bit */
70 #define AT_XDMAC_CID 0x04 /* Channel Interrupt Disable Register */
71 #define AT_XDMAC_CID_BID BIT(0) /* End of Block Interrupt Disable Bit */
72 #define AT_XDMAC_CID_LID BIT(1) /* End of Linked List Interrupt Disable Bit */
73 #define AT_XDMAC_CID_DID BIT(2) /* End of Disable Interrupt Disable Bit */
74 #define AT_XDMAC_CID_FID BIT(3) /* End of Flush Interrupt Disable Bit */
75 #define AT_XDMAC_CID_RBEID BIT(4) /* Read Bus Error Interrupt Disable Bit */
76 #define AT_XDMAC_CID_WBEID BIT(5) /* Write Bus Error Interrupt Disable Bit */
77 #define AT_XDMAC_CID_ROID BIT(6) /* Request Overflow Interrupt Disable Bit */
78 #define AT_XDMAC_CIM 0x08 /* Channel Interrupt Mask Register */
79 #define AT_XDMAC_CIM_BIM BIT(0) /* End of Block Interrupt Mask Bit */
80 #define AT_XDMAC_CIM_LIM BIT(1) /* End of Linked List Interrupt Mask Bit */
81 #define AT_XDMAC_CIM_DIM BIT(2) /* End of Disable Interrupt Mask Bit */
82 #define AT_XDMAC_CIM_FIM BIT(3) /* End of Flush Interrupt Mask Bit */
83 #define AT_XDMAC_CIM_RBEIM BIT(4) /* Read Bus Error Interrupt Mask Bit */
84 #define AT_XDMAC_CIM_WBEIM BIT(5) /* Write Bus Error Interrupt Mask Bit */
85 #define AT_XDMAC_CIM_ROIM BIT(6) /* Request Overflow Interrupt Mask Bit */
86 #define AT_XDMAC_CIS 0x0C /* Channel Interrupt Status Register */
87 #define AT_XDMAC_CIS_BIS BIT(0) /* End of Block Interrupt Status Bit */
88 #define AT_XDMAC_CIS_LIS BIT(1) /* End of Linked List Interrupt Status Bit */
89 #define AT_XDMAC_CIS_DIS BIT(2) /* End of Disable Interrupt Status Bit */
90 #define AT_XDMAC_CIS_FIS BIT(3) /* End of Flush Interrupt Status Bit */
91 #define AT_XDMAC_CIS_RBEIS BIT(4) /* Read Bus Error Interrupt Status Bit */
92 #define AT_XDMAC_CIS_WBEIS BIT(5) /* Write Bus Error Interrupt Status Bit */
93 #define AT_XDMAC_CIS_ROIS BIT(6) /* Request Overflow Interrupt Status Bit */
94 #define AT_XDMAC_CSA 0x10 /* Channel Source Address Register */
95 #define AT_XDMAC_CDA 0x14 /* Channel Destination Address Register */
96 #define AT_XDMAC_CNDA 0x18 /* Channel Next Descriptor Address Register */
97 #define AT_XDMAC_CNDA_NDAIF(i) ((i) & 0x1) /* Channel x Next Descriptor Interface */
98 #define AT_XDMAC_CNDA_NDA(i) ((i) & 0xfffffffc) /* Channel x Next Descriptor Address */
99 #define AT_XDMAC_CNDC 0x1C /* Channel Next Descriptor Control Register */
100 #define AT_XDMAC_CNDC_NDE (0x1 << 0) /* Channel x Next Descriptor Enable */
101 #define AT_XDMAC_CNDC_NDSUP (0x1 << 1) /* Channel x Next Descriptor Source Update */
102 #define AT_XDMAC_CNDC_NDDUP (0x1 << 2) /* Channel x Next Descriptor Destination Update */
103 #define AT_XDMAC_CNDC_NDVIEW_NDV0 (0x0 << 3) /* Channel x Next Descriptor View 0 */
104 #define AT_XDMAC_CNDC_NDVIEW_NDV1 (0x1 << 3) /* Channel x Next Descriptor View 1 */
105 #define AT_XDMAC_CNDC_NDVIEW_NDV2 (0x2 << 3) /* Channel x Next Descriptor View 2 */
106 #define AT_XDMAC_CNDC_NDVIEW_NDV3 (0x3 << 3) /* Channel x Next Descriptor View 3 */
107 #define AT_XDMAC_CUBC 0x20 /* Channel Microblock Control Register */
108 #define AT_XDMAC_CBC 0x24 /* Channel Block Control Register */
109 #define AT_XDMAC_CC 0x28 /* Channel Configuration Register */
110 #define AT_XDMAC_CC_TYPE (0x1 << 0) /* Channel Transfer Type */
111 #define AT_XDMAC_CC_TYPE_MEM_TRAN (0x0 << 0) /* Memory to Memory Transfer */
112 #define AT_XDMAC_CC_TYPE_PER_TRAN (0x1 << 0) /* Peripheral to Memory or Memory to Peripheral Transfer */
113 #define AT_XDMAC_CC_MBSIZE_MASK (0x3 << 1)
114 #define AT_XDMAC_CC_MBSIZE_SINGLE (0x0 << 1)
115 #define AT_XDMAC_CC_MBSIZE_FOUR (0x1 << 1)
116 #define AT_XDMAC_CC_MBSIZE_EIGHT (0x2 << 1)
117 #define AT_XDMAC_CC_MBSIZE_SIXTEEN (0x3 << 1)
118 #define AT_XDMAC_CC_DSYNC (0x1 << 4) /* Channel Synchronization */
119 #define AT_XDMAC_CC_DSYNC_PER2MEM (0x0 << 4)
120 #define AT_XDMAC_CC_DSYNC_MEM2PER (0x1 << 4)
121 #define AT_XDMAC_CC_PROT (0x1 << 5) /* Channel Protection */
122 #define AT_XDMAC_CC_PROT_SEC (0x0 << 5)
123 #define AT_XDMAC_CC_PROT_UNSEC (0x1 << 5)
124 #define AT_XDMAC_CC_SWREQ (0x1 << 6) /* Channel Software Request Trigger */
125 #define AT_XDMAC_CC_SWREQ_HWR_CONNECTED (0x0 << 6)
126 #define AT_XDMAC_CC_SWREQ_SWR_CONNECTED (0x1 << 6)
127 #define AT_XDMAC_CC_MEMSET (0x1 << 7) /* Channel Fill Block of memory */
128 #define AT_XDMAC_CC_MEMSET_NORMAL_MODE (0x0 << 7)
129 #define AT_XDMAC_CC_MEMSET_HW_MODE (0x1 << 7)
130 #define AT_XDMAC_CC_CSIZE(i) ((0x7 & (i)) << 8) /* Channel Chunk Size */
131 #define AT_XDMAC_CC_DWIDTH_OFFSET 11
132 #define AT_XDMAC_CC_DWIDTH_MASK (0x3 << AT_XDMAC_CC_DWIDTH_OFFSET)
133 #define AT_XDMAC_CC_DWIDTH(i) ((0x3 & (i)) << AT_XDMAC_CC_DWIDTH_OFFSET) /* Channel Data Width */
134 #define AT_XDMAC_CC_DWIDTH_BYTE 0x0
135 #define AT_XDMAC_CC_DWIDTH_HALFWORD 0x1
136 #define AT_XDMAC_CC_DWIDTH_WORD 0x2
137 #define AT_XDMAC_CC_DWIDTH_DWORD 0x3
138 #define AT_XDMAC_CC_SIF(i) ((0x1 & (i)) << 13) /* Channel Source Interface Identifier */
139 #define AT_XDMAC_CC_DIF(i) ((0x1 & (i)) << 14) /* Channel Destination Interface Identifier */
140 #define AT_XDMAC_CC_SAM_MASK (0x3 << 16) /* Channel Source Addressing Mode */
141 #define AT_XDMAC_CC_SAM_FIXED_AM (0x0 << 16)
142 #define AT_XDMAC_CC_SAM_INCREMENTED_AM (0x1 << 16)
143 #define AT_XDMAC_CC_SAM_UBS_AM (0x2 << 16)
144 #define AT_XDMAC_CC_SAM_UBS_DS_AM (0x3 << 16)
145 #define AT_XDMAC_CC_DAM_MASK (0x3 << 18) /* Channel Source Addressing Mode */
146 #define AT_XDMAC_CC_DAM_FIXED_AM (0x0 << 18)
147 #define AT_XDMAC_CC_DAM_INCREMENTED_AM (0x1 << 18)
148 #define AT_XDMAC_CC_DAM_UBS_AM (0x2 << 18)
149 #define AT_XDMAC_CC_DAM_UBS_DS_AM (0x3 << 18)
150 #define AT_XDMAC_CC_INITD (0x1 << 21) /* Channel Initialization Terminated (read only) */
151 #define AT_XDMAC_CC_INITD_TERMINATED (0x0 << 21)
152 #define AT_XDMAC_CC_INITD_IN_PROGRESS (0x1 << 21)
153 #define AT_XDMAC_CC_RDIP (0x1 << 22) /* Read in Progress (read only) */
154 #define AT_XDMAC_CC_RDIP_DONE (0x0 << 22)
155 #define AT_XDMAC_CC_RDIP_IN_PROGRESS (0x1 << 22)
156 #define AT_XDMAC_CC_WRIP (0x1 << 23) /* Write in Progress (read only) */
157 #define AT_XDMAC_CC_WRIP_DONE (0x0 << 23)
158 #define AT_XDMAC_CC_WRIP_IN_PROGRESS (0x1 << 23)
159 #define AT_XDMAC_CC_PERID(i) (0x7f & (h) << 24) /* Channel Peripheral Identifier */
160 #define AT_XDMAC_CDS_MSP 0x2C /* Channel Data Stride Memory Set Pattern */
161 #define AT_XDMAC_CSUS 0x30 /* Channel Source Microblock Stride */
162 #define AT_XDMAC_CDUS 0x34 /* Channel Destination Microblock Stride */
164 #define AT_XDMAC_CHAN_REG_BASE 0x50 /* Channel registers base address */
166 /* Microblock control members */
167 #define AT_XDMAC_MBR_UBC_UBLEN_MAX 0xFFFFFFUL /* Maximum Microblock Length */
168 #define AT_XDMAC_MBR_UBC_NDE (0x1 << 24) /* Next Descriptor Enable */
169 #define AT_XDMAC_MBR_UBC_NSEN (0x1 << 25) /* Next Descriptor Source Update */
170 #define AT_XDMAC_MBR_UBC_NDEN (0x1 << 26) /* Next Descriptor Destination Update */
171 #define AT_XDMAC_MBR_UBC_NDV0 (0x0 << 27) /* Next Descriptor View 0 */
172 #define AT_XDMAC_MBR_UBC_NDV1 (0x1 << 27) /* Next Descriptor View 1 */
173 #define AT_XDMAC_MBR_UBC_NDV2 (0x2 << 27) /* Next Descriptor View 2 */
174 #define AT_XDMAC_MBR_UBC_NDV3 (0x3 << 27) /* Next Descriptor View 3 */
176 #define AT_XDMAC_MAX_CHAN 0x20
178 #define AT_XDMAC_DMA_BUSWIDTHS\
179 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
180 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
181 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
182 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |\
183 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
186 AT_XDMAC_CHAN_IS_CYCLIC = 0,
187 AT_XDMAC_CHAN_IS_PAUSED,
190 /* ----- Channels ----- */
191 struct at_xdmac_chan {
192 struct dma_chan chan;
193 void __iomem *ch_regs;
194 u32 mask; /* Channel Mask */
195 u32 cfg[2]; /* Channel Configuration Register */
196 #define AT_XDMAC_DEV_TO_MEM_CFG 0 /* Predifined dev to mem channel conf */
197 #define AT_XDMAC_MEM_TO_DEV_CFG 1 /* Predifined mem to dev channel conf */
198 u8 perid; /* Peripheral ID */
199 u8 perif; /* Peripheral Interface */
200 u8 memif; /* Memory Interface */
207 unsigned long status;
208 struct tasklet_struct tasklet;
212 struct list_head xfers_list;
213 struct list_head free_descs_list;
217 /* ----- Controller ----- */
219 struct dma_device dma;
225 struct dma_pool *at_xdmac_desc_pool;
226 struct at_xdmac_chan chan[0];
230 /* ----- Descriptors ----- */
232 /* Linked List Descriptor */
233 struct at_xdmac_lld {
234 dma_addr_t mbr_nda; /* Next Descriptor Member */
235 u32 mbr_ubc; /* Microblock Control Member */
236 dma_addr_t mbr_sa; /* Source Address Member */
237 dma_addr_t mbr_da; /* Destination Address Member */
238 u32 mbr_cfg; /* Configuration Register */
239 u32 mbr_bc; /* Block Control Register */
240 u32 mbr_ds; /* Data Stride Register */
241 u32 mbr_sus; /* Source Microblock Stride Register */
242 u32 mbr_dus; /* Destination Microblock Stride Register */
246 struct at_xdmac_desc {
247 struct at_xdmac_lld lld;
248 enum dma_transfer_direction direction;
249 struct dma_async_tx_descriptor tx_dma_desc;
250 struct list_head desc_node;
251 /* Following members are only used by the first descriptor */
253 unsigned int xfer_size;
254 struct list_head descs_list;
255 struct list_head xfer_node;
258 static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb)
260 return atxdmac->regs + (AT_XDMAC_CHAN_REG_BASE + chan_nb * 0x40);
263 #define at_xdmac_read(atxdmac, reg) readl_relaxed((atxdmac)->regs + (reg))
264 #define at_xdmac_write(atxdmac, reg, value) \
265 writel_relaxed((value), (atxdmac)->regs + (reg))
267 #define at_xdmac_chan_read(atchan, reg) readl_relaxed((atchan)->ch_regs + (reg))
268 #define at_xdmac_chan_write(atchan, reg, value) writel_relaxed((value), (atchan)->ch_regs + (reg))
270 static inline struct at_xdmac_chan *to_at_xdmac_chan(struct dma_chan *dchan)
272 return container_of(dchan, struct at_xdmac_chan, chan);
275 static struct device *chan2dev(struct dma_chan *chan)
277 return &chan->dev->device;
280 static inline struct at_xdmac *to_at_xdmac(struct dma_device *ddev)
282 return container_of(ddev, struct at_xdmac, dma);
285 static inline struct at_xdmac_desc *txd_to_at_desc(struct dma_async_tx_descriptor *txd)
287 return container_of(txd, struct at_xdmac_desc, tx_dma_desc);
290 static inline int at_xdmac_chan_is_cyclic(struct at_xdmac_chan *atchan)
292 return test_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
295 static inline int at_xdmac_chan_is_paused(struct at_xdmac_chan *atchan)
297 return test_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
300 static inline int at_xdmac_csize(u32 maxburst)
304 csize = ffs(maxburst) - 1;
311 static inline u8 at_xdmac_get_dwidth(u32 cfg)
313 return (cfg & AT_XDMAC_CC_DWIDTH_MASK) >> AT_XDMAC_CC_DWIDTH_OFFSET;
316 static unsigned int init_nr_desc_per_channel = 64;
317 module_param(init_nr_desc_per_channel, uint, 0644);
318 MODULE_PARM_DESC(init_nr_desc_per_channel,
319 "initial descriptors per channel (default: 64)");
322 static bool at_xdmac_chan_is_enabled(struct at_xdmac_chan *atchan)
324 return at_xdmac_chan_read(atchan, AT_XDMAC_GS) & atchan->mask;
327 static void at_xdmac_off(struct at_xdmac *atxdmac)
329 at_xdmac_write(atxdmac, AT_XDMAC_GD, -1L);
331 /* Wait that all chans are disabled. */
332 while (at_xdmac_read(atxdmac, AT_XDMAC_GS))
335 at_xdmac_write(atxdmac, AT_XDMAC_GID, -1L);
338 /* Call with lock hold. */
339 static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
340 struct at_xdmac_desc *first)
342 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
345 dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, first);
347 if (at_xdmac_chan_is_enabled(atchan))
350 /* Set transfer as active to not try to start it again. */
351 first->active_xfer = true;
353 /* Tell xdmac where to get the first descriptor. */
354 reg = AT_XDMAC_CNDA_NDA(first->tx_dma_desc.phys)
355 | AT_XDMAC_CNDA_NDAIF(atchan->memif);
356 at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, reg);
359 * When doing non cyclic transfer we need to use the next
360 * descriptor view 2 since some fields of the configuration register
361 * depend on transfer size and src/dest addresses.
363 if (at_xdmac_chan_is_cyclic(atchan)) {
364 reg = AT_XDMAC_CNDC_NDVIEW_NDV1;
365 at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg);
366 } else if (first->lld.mbr_ubc & AT_XDMAC_MBR_UBC_NDV3) {
367 reg = AT_XDMAC_CNDC_NDVIEW_NDV3;
370 * No need to write AT_XDMAC_CC reg, it will be done when the
371 * descriptor is fecthed.
373 reg = AT_XDMAC_CNDC_NDVIEW_NDV2;
376 reg |= AT_XDMAC_CNDC_NDDUP
377 | AT_XDMAC_CNDC_NDSUP
379 at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, reg);
381 dev_vdbg(chan2dev(&atchan->chan),
382 "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
383 __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC),
384 at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
385 at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
386 at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
387 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
388 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
390 at_xdmac_chan_write(atchan, AT_XDMAC_CID, 0xffffffff);
391 reg = AT_XDMAC_CIE_RBEIE | AT_XDMAC_CIE_WBEIE | AT_XDMAC_CIE_ROIE;
393 * There is no end of list when doing cyclic dma, we need to get
394 * an interrupt after each periods.
396 if (at_xdmac_chan_is_cyclic(atchan))
397 at_xdmac_chan_write(atchan, AT_XDMAC_CIE,
398 reg | AT_XDMAC_CIE_BIE);
400 at_xdmac_chan_write(atchan, AT_XDMAC_CIE,
401 reg | AT_XDMAC_CIE_LIE);
402 at_xdmac_write(atxdmac, AT_XDMAC_GIE, atchan->mask);
403 dev_vdbg(chan2dev(&atchan->chan),
404 "%s: enable channel (0x%08x)\n", __func__, atchan->mask);
406 at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
408 dev_vdbg(chan2dev(&atchan->chan),
409 "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
410 __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC),
411 at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
412 at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
413 at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
414 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
415 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
419 static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
421 struct at_xdmac_desc *desc = txd_to_at_desc(tx);
422 struct at_xdmac_chan *atchan = to_at_xdmac_chan(tx->chan);
425 spin_lock_bh(&atchan->lock);
426 cookie = dma_cookie_assign(tx);
428 dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n",
429 __func__, atchan, desc);
430 list_add_tail(&desc->xfer_node, &atchan->xfers_list);
431 if (list_is_singular(&atchan->xfers_list))
432 at_xdmac_start_xfer(atchan, desc);
434 spin_unlock_bh(&atchan->lock);
438 static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan,
441 struct at_xdmac_desc *desc;
442 struct at_xdmac *atxdmac = to_at_xdmac(chan->device);
445 desc = dma_pool_alloc(atxdmac->at_xdmac_desc_pool, gfp_flags, &phys);
447 memset(desc, 0, sizeof(*desc));
448 INIT_LIST_HEAD(&desc->descs_list);
449 dma_async_tx_descriptor_init(&desc->tx_dma_desc, chan);
450 desc->tx_dma_desc.tx_submit = at_xdmac_tx_submit;
451 desc->tx_dma_desc.phys = phys;
457 /* Call must be protected by lock. */
458 static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan)
460 struct at_xdmac_desc *desc;
462 if (list_empty(&atchan->free_descs_list)) {
463 desc = at_xdmac_alloc_desc(&atchan->chan, GFP_NOWAIT);
465 desc = list_first_entry(&atchan->free_descs_list,
466 struct at_xdmac_desc, desc_node);
467 list_del(&desc->desc_node);
468 desc->active_xfer = false;
474 static void at_xdmac_queue_desc(struct dma_chan *chan,
475 struct at_xdmac_desc *prev,
476 struct at_xdmac_desc *desc)
481 prev->lld.mbr_nda = desc->tx_dma_desc.phys;
482 prev->lld.mbr_ubc |= AT_XDMAC_MBR_UBC_NDE;
484 dev_dbg(chan2dev(chan), "%s: chain lld: prev=0x%p, mbr_nda=%pad\n",
485 __func__, prev, &prev->lld.mbr_nda);
488 static inline void at_xdmac_increment_block_count(struct dma_chan *chan,
489 struct at_xdmac_desc *desc)
496 dev_dbg(chan2dev(chan),
497 "%s: incrementing the block count of the desc 0x%p\n",
501 static struct dma_chan *at_xdmac_xlate(struct of_phandle_args *dma_spec,
502 struct of_dma *of_dma)
504 struct at_xdmac *atxdmac = of_dma->of_dma_data;
505 struct at_xdmac_chan *atchan;
506 struct dma_chan *chan;
507 struct device *dev = atxdmac->dma.dev;
509 if (dma_spec->args_count != 1) {
510 dev_err(dev, "dma phandler args: bad number of args\n");
514 chan = dma_get_any_slave_channel(&atxdmac->dma);
516 dev_err(dev, "can't get a dma channel\n");
520 atchan = to_at_xdmac_chan(chan);
521 atchan->memif = AT91_XDMAC_DT_GET_MEM_IF(dma_spec->args[0]);
522 atchan->perif = AT91_XDMAC_DT_GET_PER_IF(dma_spec->args[0]);
523 atchan->perid = AT91_XDMAC_DT_GET_PERID(dma_spec->args[0]);
524 dev_dbg(dev, "chan dt cfg: memif=%u perif=%u perid=%u\n",
525 atchan->memif, atchan->perif, atchan->perid);
530 static int at_xdmac_set_slave_config(struct dma_chan *chan,
531 struct dma_slave_config *sconfig)
533 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
537 atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] =
538 AT91_XDMAC_DT_PERID(atchan->perid)
539 | AT_XDMAC_CC_DAM_INCREMENTED_AM
540 | AT_XDMAC_CC_SAM_FIXED_AM
541 | AT_XDMAC_CC_DIF(atchan->memif)
542 | AT_XDMAC_CC_SIF(atchan->perif)
543 | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
544 | AT_XDMAC_CC_DSYNC_PER2MEM
545 | AT_XDMAC_CC_MBSIZE_SIXTEEN
546 | AT_XDMAC_CC_TYPE_PER_TRAN;
547 csize = at_xdmac_csize(sconfig->src_maxburst);
549 dev_err(chan2dev(chan), "invalid src maxburst value\n");
552 atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] |= AT_XDMAC_CC_CSIZE(csize);
553 dwidth = ffs(sconfig->src_addr_width) - 1;
554 atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] |= AT_XDMAC_CC_DWIDTH(dwidth);
557 atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] =
558 AT91_XDMAC_DT_PERID(atchan->perid)
559 | AT_XDMAC_CC_DAM_FIXED_AM
560 | AT_XDMAC_CC_SAM_INCREMENTED_AM
561 | AT_XDMAC_CC_DIF(atchan->perif)
562 | AT_XDMAC_CC_SIF(atchan->memif)
563 | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
564 | AT_XDMAC_CC_DSYNC_MEM2PER
565 | AT_XDMAC_CC_MBSIZE_SIXTEEN
566 | AT_XDMAC_CC_TYPE_PER_TRAN;
567 csize = at_xdmac_csize(sconfig->dst_maxburst);
569 dev_err(chan2dev(chan), "invalid src maxburst value\n");
572 atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] |= AT_XDMAC_CC_CSIZE(csize);
573 dwidth = ffs(sconfig->dst_addr_width) - 1;
574 atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] |= AT_XDMAC_CC_DWIDTH(dwidth);
576 /* Src and dst addr are needed to configure the link list descriptor. */
577 atchan->per_src_addr = sconfig->src_addr;
578 atchan->per_dst_addr = sconfig->dst_addr;
580 dev_dbg(chan2dev(chan),
581 "%s: cfg[dev2mem]=0x%08x, cfg[mem2dev]=0x%08x, per_src_addr=0x%08x, per_dst_addr=0x%08x\n",
582 __func__, atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG],
583 atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG],
584 atchan->per_src_addr, atchan->per_dst_addr);
589 static struct dma_async_tx_descriptor *
590 at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
591 unsigned int sg_len, enum dma_transfer_direction direction,
592 unsigned long flags, void *context)
594 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
595 struct at_xdmac_desc *first = NULL, *prev = NULL;
596 struct scatterlist *sg;
598 unsigned int xfer_size = 0;
603 if (!is_slave_direction(direction)) {
604 dev_err(chan2dev(chan), "invalid DMA direction\n");
608 dev_dbg(chan2dev(chan), "%s: sg_len=%d, dir=%s, flags=0x%lx\n",
610 direction == DMA_MEM_TO_DEV ? "to device" : "from device",
613 /* Protect dma_sconfig field that can be modified by set_slave_conf. */
614 spin_lock_bh(&atchan->lock);
616 /* Prepare descriptors. */
617 for_each_sg(sgl, sg, sg_len, i) {
618 struct at_xdmac_desc *desc = NULL;
619 u32 len, mem, dwidth, fixed_dwidth;
621 len = sg_dma_len(sg);
622 mem = sg_dma_address(sg);
623 if (unlikely(!len)) {
624 dev_err(chan2dev(chan), "sg data length is zero\n");
625 spin_unlock_bh(&atchan->lock);
628 dev_dbg(chan2dev(chan), "%s: * sg%d len=%u, mem=0x%08x\n",
629 __func__, i, len, mem);
631 desc = at_xdmac_get_desc(atchan);
633 dev_err(chan2dev(chan), "can't get descriptor\n");
635 list_splice_init(&first->descs_list, &atchan->free_descs_list);
636 spin_unlock_bh(&atchan->lock);
640 /* Linked list descriptor setup. */
641 if (direction == DMA_DEV_TO_MEM) {
642 desc->lld.mbr_sa = atchan->per_src_addr;
643 desc->lld.mbr_da = mem;
644 desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
646 desc->lld.mbr_sa = mem;
647 desc->lld.mbr_da = atchan->per_dst_addr;
648 desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
650 dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
651 fixed_dwidth = IS_ALIGNED(len, 1 << dwidth)
652 ? at_xdmac_get_dwidth(desc->lld.mbr_cfg)
653 : AT_XDMAC_CC_DWIDTH_BYTE;
654 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 /* next descriptor view */
655 | AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */
656 | AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */
657 | (len >> fixed_dwidth); /* microblock length */
658 dev_dbg(chan2dev(chan),
659 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
660 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
664 at_xdmac_queue_desc(chan, prev, desc);
670 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
671 __func__, desc, first);
672 list_add_tail(&desc->desc_node, &first->descs_list);
676 spin_unlock_bh(&atchan->lock);
678 first->tx_dma_desc.flags = flags;
679 first->xfer_size = xfer_size;
680 first->direction = direction;
682 return &first->tx_dma_desc;
685 static struct dma_async_tx_descriptor *
686 at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
687 size_t buf_len, size_t period_len,
688 enum dma_transfer_direction direction,
691 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
692 struct at_xdmac_desc *first = NULL, *prev = NULL;
693 unsigned int periods = buf_len / period_len;
696 dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n",
697 __func__, &buf_addr, buf_len, period_len,
698 direction == DMA_MEM_TO_DEV ? "mem2per" : "per2mem", flags);
700 if (!is_slave_direction(direction)) {
701 dev_err(chan2dev(chan), "invalid DMA direction\n");
705 if (test_and_set_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status)) {
706 dev_err(chan2dev(chan), "channel currently used\n");
710 for (i = 0; i < periods; i++) {
711 struct at_xdmac_desc *desc = NULL;
713 spin_lock_bh(&atchan->lock);
714 desc = at_xdmac_get_desc(atchan);
716 dev_err(chan2dev(chan), "can't get descriptor\n");
718 list_splice_init(&first->descs_list, &atchan->free_descs_list);
719 spin_unlock_bh(&atchan->lock);
722 spin_unlock_bh(&atchan->lock);
723 dev_dbg(chan2dev(chan),
724 "%s: desc=0x%p, tx_dma_desc.phys=%pad\n",
725 __func__, desc, &desc->tx_dma_desc.phys);
727 if (direction == DMA_DEV_TO_MEM) {
728 desc->lld.mbr_sa = atchan->per_src_addr;
729 desc->lld.mbr_da = buf_addr + i * period_len;
730 desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
732 desc->lld.mbr_sa = buf_addr + i * period_len;
733 desc->lld.mbr_da = atchan->per_dst_addr;
734 desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
736 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1
737 | AT_XDMAC_MBR_UBC_NDEN
738 | AT_XDMAC_MBR_UBC_NSEN
739 | period_len >> at_xdmac_get_dwidth(desc->lld.mbr_cfg);
741 dev_dbg(chan2dev(chan),
742 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
743 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
747 at_xdmac_queue_desc(chan, prev, desc);
753 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
754 __func__, desc, first);
755 list_add_tail(&desc->desc_node, &first->descs_list);
758 prev->lld.mbr_nda = first->tx_dma_desc.phys;
759 dev_dbg(chan2dev(chan),
760 "%s: chain lld: prev=0x%p, mbr_nda=%pad\n",
761 __func__, prev, &prev->lld.mbr_nda);
762 first->tx_dma_desc.flags = flags;
763 first->xfer_size = buf_len;
764 first->direction = direction;
766 return &first->tx_dma_desc;
769 static inline u32 at_xdmac_align_width(struct dma_chan *chan, dma_addr_t addr)
774 * Check address alignment to select the greater data width we
777 * Some XDMAC implementations don't provide dword transfer, in
778 * this case selecting dword has the same behavior as
779 * selecting word transfers.
782 width = AT_XDMAC_CC_DWIDTH_DWORD;
783 dev_dbg(chan2dev(chan), "%s: dwidth: double word\n", __func__);
784 } else if (!(addr & 3)) {
785 width = AT_XDMAC_CC_DWIDTH_WORD;
786 dev_dbg(chan2dev(chan), "%s: dwidth: word\n", __func__);
787 } else if (!(addr & 1)) {
788 width = AT_XDMAC_CC_DWIDTH_HALFWORD;
789 dev_dbg(chan2dev(chan), "%s: dwidth: half word\n", __func__);
791 width = AT_XDMAC_CC_DWIDTH_BYTE;
792 dev_dbg(chan2dev(chan), "%s: dwidth: byte\n", __func__);
798 static struct at_xdmac_desc *
799 at_xdmac_interleaved_queue_desc(struct dma_chan *chan,
800 struct at_xdmac_chan *atchan,
801 struct at_xdmac_desc *prev,
802 dma_addr_t src, dma_addr_t dst,
803 struct dma_interleaved_template *xt,
804 struct data_chunk *chunk)
806 struct at_xdmac_desc *desc;
811 * WARNING: The channel configuration is set here since there is no
812 * dmaengine_slave_config call in this case. Moreover we don't know the
813 * direction, it involves we can't dynamically set the source and dest
814 * interface so we have to use the same one. Only interface 0 allows EBI
815 * access. Hopefully we can access DDR through both ports (at least on
816 * SAMA5D4x), so we can use the same interface for source and dest,
817 * that solves the fact we don't know the direction.
819 u32 chan_cc = AT_XDMAC_CC_DIF(0)
821 | AT_XDMAC_CC_MBSIZE_SIXTEEN
822 | AT_XDMAC_CC_TYPE_MEM_TRAN;
824 dwidth = at_xdmac_align_width(chan, src | dst | chunk->size);
825 if (chunk->size >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) {
826 dev_dbg(chan2dev(chan),
827 "%s: chunk too big (%d, max size %lu)...\n",
828 __func__, chunk->size,
829 AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth);
834 dev_dbg(chan2dev(chan),
835 "Adding items at the end of desc 0x%p\n", prev);
839 chan_cc |= AT_XDMAC_CC_SAM_UBS_DS_AM;
841 chan_cc |= AT_XDMAC_CC_SAM_INCREMENTED_AM;
846 chan_cc |= AT_XDMAC_CC_DAM_UBS_DS_AM;
848 chan_cc |= AT_XDMAC_CC_DAM_INCREMENTED_AM;
851 spin_lock_irqsave(&atchan->lock, flags);
852 desc = at_xdmac_get_desc(atchan);
853 spin_unlock_irqrestore(&atchan->lock, flags);
855 dev_err(chan2dev(chan), "can't get descriptor\n");
859 chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
861 ublen = chunk->size >> dwidth;
863 desc->lld.mbr_sa = src;
864 desc->lld.mbr_da = dst;
866 if (xt->src_inc && xt->src_sgl) {
868 desc->lld.mbr_sus = chunk->src_icg;
870 desc->lld.mbr_sus = chunk->icg;
873 if (xt->dst_inc && xt->dst_sgl) {
875 desc->lld.mbr_dus = chunk->dst_icg;
877 desc->lld.mbr_dus = chunk->icg;
880 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3
881 | AT_XDMAC_MBR_UBC_NDEN
882 | AT_XDMAC_MBR_UBC_NSEN
884 desc->lld.mbr_cfg = chan_cc;
886 dev_dbg(chan2dev(chan),
887 "%s: lld: mbr_sa=0x%08x, mbr_da=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
888 __func__, desc->lld.mbr_sa, desc->lld.mbr_da,
889 desc->lld.mbr_ubc, desc->lld.mbr_cfg);
893 at_xdmac_queue_desc(chan, prev, desc);
898 static size_t at_xdmac_get_icg(bool inc, bool sgl, size_t icg, size_t dir_icg)
910 static size_t at_xdmac_get_dst_icg(struct dma_interleaved_template *xt,
911 struct data_chunk *chunk)
913 return at_xdmac_get_icg(xt->dst_inc, xt->dst_sgl,
914 chunk->icg, chunk->dst_icg);
917 static size_t at_xdmac_get_src_icg(struct dma_interleaved_template *xt,
918 struct data_chunk *chunk)
920 return at_xdmac_get_icg(xt->src_inc, xt->src_sgl,
921 chunk->icg, chunk->src_icg);
924 static struct dma_async_tx_descriptor *
925 at_xdmac_prep_interleaved(struct dma_chan *chan,
926 struct dma_interleaved_template *xt,
929 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
930 struct at_xdmac_desc *prev = NULL, *first = NULL;
931 struct data_chunk *chunk, *prev_chunk = NULL;
932 dma_addr_t dst_addr, src_addr;
933 size_t dst_skip, src_skip, len = 0;
934 size_t prev_dst_icg = 0, prev_src_icg = 0;
937 if (!xt || (xt->numf != 1) || (xt->dir != DMA_MEM_TO_MEM))
940 dev_dbg(chan2dev(chan), "%s: src=0x%08x, dest=0x%08x, numf=%d, frame_size=%d, flags=0x%lx\n",
941 __func__, xt->src_start, xt->dst_start, xt->numf,
942 xt->frame_size, flags);
944 src_addr = xt->src_start;
945 dst_addr = xt->dst_start;
947 for (i = 0; i < xt->frame_size; i++) {
948 struct at_xdmac_desc *desc;
949 size_t src_icg, dst_icg;
953 dst_icg = at_xdmac_get_dst_icg(xt, chunk);
954 src_icg = at_xdmac_get_src_icg(xt, chunk);
956 src_skip = chunk->size + src_icg;
957 dst_skip = chunk->size + dst_icg;
959 dev_dbg(chan2dev(chan),
960 "%s: chunk size=%d, src icg=%d, dst icg=%d\n",
961 __func__, chunk->size, src_icg, dst_icg);
964 * Handle the case where we just have the same
965 * transfer to setup, we can just increase the
966 * block number and reuse the same descriptor.
968 if (prev_chunk && prev &&
969 (prev_chunk->size == chunk->size) &&
970 (prev_src_icg == src_icg) &&
971 (prev_dst_icg == dst_icg)) {
972 dev_dbg(chan2dev(chan),
973 "%s: same configuration that the previous chunk, merging the descriptors...\n",
975 at_xdmac_increment_block_count(chan, prev);
979 desc = at_xdmac_interleaved_queue_desc(chan, atchan,
984 list_splice_init(&first->descs_list,
985 &atchan->free_descs_list);
992 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
993 __func__, desc, first);
994 list_add_tail(&desc->desc_node, &first->descs_list);
997 src_addr += src_skip;
1000 dst_addr += dst_skip;
1004 prev_dst_icg = dst_icg;
1005 prev_src_icg = src_icg;
1009 first->tx_dma_desc.cookie = -EBUSY;
1010 first->tx_dma_desc.flags = flags;
1011 first->xfer_size = len;
1013 return &first->tx_dma_desc;
1016 static struct dma_async_tx_descriptor *
1017 at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1018 size_t len, unsigned long flags)
1020 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1021 struct at_xdmac_desc *first = NULL, *prev = NULL;
1022 size_t remaining_size = len, xfer_size = 0, ublen;
1023 dma_addr_t src_addr = src, dst_addr = dest;
1026 * WARNING: We don't know the direction, it involves we can't
1027 * dynamically set the source and dest interface so we have to use the
1028 * same one. Only interface 0 allows EBI access. Hopefully we can
1029 * access DDR through both ports (at least on SAMA5D4x), so we can use
1030 * the same interface for source and dest, that solves the fact we
1031 * don't know the direction.
1033 u32 chan_cc = AT_XDMAC_CC_DAM_INCREMENTED_AM
1034 | AT_XDMAC_CC_SAM_INCREMENTED_AM
1035 | AT_XDMAC_CC_DIF(0)
1036 | AT_XDMAC_CC_SIF(0)
1037 | AT_XDMAC_CC_MBSIZE_SIXTEEN
1038 | AT_XDMAC_CC_TYPE_MEM_TRAN;
1040 dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n",
1041 __func__, &src, &dest, len, flags);
1046 dwidth = at_xdmac_align_width(chan, src_addr | dst_addr);
1048 /* Prepare descriptors. */
1049 while (remaining_size) {
1050 struct at_xdmac_desc *desc = NULL;
1052 dev_dbg(chan2dev(chan), "%s: remaining_size=%zu\n", __func__, remaining_size);
1054 spin_lock_bh(&atchan->lock);
1055 desc = at_xdmac_get_desc(atchan);
1056 spin_unlock_bh(&atchan->lock);
1058 dev_err(chan2dev(chan), "can't get descriptor\n");
1060 list_splice_init(&first->descs_list, &atchan->free_descs_list);
1064 /* Update src and dest addresses. */
1065 src_addr += xfer_size;
1066 dst_addr += xfer_size;
1068 if (remaining_size >= AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)
1069 xfer_size = AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth;
1071 xfer_size = remaining_size;
1073 dev_dbg(chan2dev(chan), "%s: xfer_size=%zu\n", __func__, xfer_size);
1075 /* Check remaining length and change data width if needed. */
1076 dwidth = at_xdmac_align_width(chan,
1077 src_addr | dst_addr | xfer_size);
1078 chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
1080 ublen = xfer_size >> dwidth;
1081 remaining_size -= xfer_size;
1083 desc->lld.mbr_sa = src_addr;
1084 desc->lld.mbr_da = dst_addr;
1085 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2
1086 | AT_XDMAC_MBR_UBC_NDEN
1087 | AT_XDMAC_MBR_UBC_NSEN
1089 desc->lld.mbr_cfg = chan_cc;
1091 dev_dbg(chan2dev(chan),
1092 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
1093 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc, desc->lld.mbr_cfg);
1097 at_xdmac_queue_desc(chan, prev, desc);
1103 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
1104 __func__, desc, first);
1105 list_add_tail(&desc->desc_node, &first->descs_list);
1108 first->tx_dma_desc.flags = flags;
1109 first->xfer_size = len;
1111 return &first->tx_dma_desc;
1114 static enum dma_status
1115 at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
1116 struct dma_tx_state *txstate)
1118 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1119 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1120 struct at_xdmac_desc *desc, *_desc;
1121 struct list_head *descs_list;
1122 enum dma_status ret;
1124 u32 cur_nda, mask, value;
1127 ret = dma_cookie_status(chan, cookie, txstate);
1128 if (ret == DMA_COMPLETE)
1134 spin_lock_bh(&atchan->lock);
1136 desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
1139 * If the transfer has not been started yet, don't need to compute the
1140 * residue, it's the transfer length.
1142 if (!desc->active_xfer) {
1143 dma_set_residue(txstate, desc->xfer_size);
1144 spin_unlock_bh(&atchan->lock);
1148 residue = desc->xfer_size;
1150 * Flush FIFO: only relevant when the transfer is source peripheral
1153 mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC;
1154 value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM;
1155 if ((desc->lld.mbr_cfg & mask) == value) {
1156 at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask);
1157 while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
1161 cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
1163 * Remove size of all microblocks already transferred and the current
1164 * one. Then add the remaining size to transfer of the current
1167 descs_list = &desc->descs_list;
1168 list_for_each_entry_safe(desc, _desc, descs_list, desc_node) {
1169 dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
1170 residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth;
1171 if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda)
1174 residue += at_xdmac_chan_read(atchan, AT_XDMAC_CUBC) << dwidth;
1176 spin_unlock_bh(&atchan->lock);
1178 dma_set_residue(txstate, residue);
1180 dev_dbg(chan2dev(chan),
1181 "%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n",
1182 __func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue);
1187 /* Call must be protected by lock. */
1188 static void at_xdmac_remove_xfer(struct at_xdmac_chan *atchan,
1189 struct at_xdmac_desc *desc)
1191 dev_dbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
1194 * Remove the transfer from the transfer list then move the transfer
1195 * descriptors into the free descriptors list.
1197 list_del(&desc->xfer_node);
1198 list_splice_init(&desc->descs_list, &atchan->free_descs_list);
1201 static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
1203 struct at_xdmac_desc *desc;
1205 spin_lock_bh(&atchan->lock);
1208 * If channel is enabled, do nothing, advance_work will be triggered
1209 * after the interruption.
1211 if (!at_xdmac_chan_is_enabled(atchan) && !list_empty(&atchan->xfers_list)) {
1212 desc = list_first_entry(&atchan->xfers_list,
1213 struct at_xdmac_desc,
1215 dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
1216 if (!desc->active_xfer)
1217 at_xdmac_start_xfer(atchan, desc);
1220 spin_unlock_bh(&atchan->lock);
1223 static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
1225 struct at_xdmac_desc *desc;
1226 struct dma_async_tx_descriptor *txd;
1228 desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
1229 txd = &desc->tx_dma_desc;
1231 if (txd->callback && (txd->flags & DMA_PREP_INTERRUPT))
1232 txd->callback(txd->callback_param);
1235 static void at_xdmac_tasklet(unsigned long data)
1237 struct at_xdmac_chan *atchan = (struct at_xdmac_chan *)data;
1238 struct at_xdmac_desc *desc;
1241 dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08lx\n",
1242 __func__, atchan->status);
1244 error_mask = AT_XDMAC_CIS_RBEIS
1245 | AT_XDMAC_CIS_WBEIS
1246 | AT_XDMAC_CIS_ROIS;
1248 if (at_xdmac_chan_is_cyclic(atchan)) {
1249 at_xdmac_handle_cyclic(atchan);
1250 } else if ((atchan->status & AT_XDMAC_CIS_LIS)
1251 || (atchan->status & error_mask)) {
1252 struct dma_async_tx_descriptor *txd;
1254 if (atchan->status & AT_XDMAC_CIS_RBEIS)
1255 dev_err(chan2dev(&atchan->chan), "read bus error!!!");
1256 if (atchan->status & AT_XDMAC_CIS_WBEIS)
1257 dev_err(chan2dev(&atchan->chan), "write bus error!!!");
1258 if (atchan->status & AT_XDMAC_CIS_ROIS)
1259 dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
1261 spin_lock_bh(&atchan->lock);
1262 desc = list_first_entry(&atchan->xfers_list,
1263 struct at_xdmac_desc,
1265 dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
1266 BUG_ON(!desc->active_xfer);
1268 txd = &desc->tx_dma_desc;
1270 at_xdmac_remove_xfer(atchan, desc);
1271 spin_unlock_bh(&atchan->lock);
1273 if (!at_xdmac_chan_is_cyclic(atchan)) {
1274 dma_cookie_complete(txd);
1275 if (txd->callback && (txd->flags & DMA_PREP_INTERRUPT))
1276 txd->callback(txd->callback_param);
1279 dma_run_dependencies(txd);
1281 at_xdmac_advance_work(atchan);
1285 static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
1287 struct at_xdmac *atxdmac = (struct at_xdmac *)dev_id;
1288 struct at_xdmac_chan *atchan;
1289 u32 imr, status, pending;
1290 u32 chan_imr, chan_status;
1291 int i, ret = IRQ_NONE;
1294 imr = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
1295 status = at_xdmac_read(atxdmac, AT_XDMAC_GIS);
1296 pending = status & imr;
1298 dev_vdbg(atxdmac->dma.dev,
1299 "%s: status=0x%08x, imr=0x%08x, pending=0x%08x\n",
1300 __func__, status, imr, pending);
1305 /* We have to find which channel has generated the interrupt. */
1306 for (i = 0; i < atxdmac->dma.chancnt; i++) {
1307 if (!((1 << i) & pending))
1310 atchan = &atxdmac->chan[i];
1311 chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
1312 chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS);
1313 atchan->status = chan_status & chan_imr;
1314 dev_vdbg(atxdmac->dma.dev,
1315 "%s: chan%d: imr=0x%x, status=0x%x\n",
1316 __func__, i, chan_imr, chan_status);
1317 dev_vdbg(chan2dev(&atchan->chan),
1318 "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
1320 at_xdmac_chan_read(atchan, AT_XDMAC_CC),
1321 at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
1322 at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
1323 at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
1324 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
1325 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
1327 if (atchan->status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
1328 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
1330 tasklet_schedule(&atchan->tasklet);
1339 static void at_xdmac_issue_pending(struct dma_chan *chan)
1341 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1343 dev_dbg(chan2dev(&atchan->chan), "%s\n", __func__);
1345 if (!at_xdmac_chan_is_cyclic(atchan))
1346 at_xdmac_advance_work(atchan);
1351 static int at_xdmac_device_config(struct dma_chan *chan,
1352 struct dma_slave_config *config)
1354 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1357 dev_dbg(chan2dev(chan), "%s\n", __func__);
1359 spin_lock_bh(&atchan->lock);
1360 ret = at_xdmac_set_slave_config(chan, config);
1361 spin_unlock_bh(&atchan->lock);
1366 static int at_xdmac_device_pause(struct dma_chan *chan)
1368 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1369 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1371 dev_dbg(chan2dev(chan), "%s\n", __func__);
1373 if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status))
1376 spin_lock_bh(&atchan->lock);
1377 at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask);
1378 while (at_xdmac_chan_read(atchan, AT_XDMAC_CC)
1379 & (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP))
1381 spin_unlock_bh(&atchan->lock);
1386 static int at_xdmac_device_resume(struct dma_chan *chan)
1388 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1389 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1391 dev_dbg(chan2dev(chan), "%s\n", __func__);
1393 spin_lock_bh(&atchan->lock);
1394 if (!at_xdmac_chan_is_paused(atchan)) {
1395 spin_unlock_bh(&atchan->lock);
1399 at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask);
1400 clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
1401 spin_unlock_bh(&atchan->lock);
1406 static int at_xdmac_device_terminate_all(struct dma_chan *chan)
1408 struct at_xdmac_desc *desc, *_desc;
1409 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1410 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1412 dev_dbg(chan2dev(chan), "%s\n", __func__);
1414 spin_lock_bh(&atchan->lock);
1415 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
1416 while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
1419 /* Cancel all pending transfers. */
1420 list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node)
1421 at_xdmac_remove_xfer(atchan, desc);
1423 clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
1424 spin_unlock_bh(&atchan->lock);
1429 static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
1431 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1432 struct at_xdmac_desc *desc;
1435 spin_lock_bh(&atchan->lock);
1437 if (at_xdmac_chan_is_enabled(atchan)) {
1438 dev_err(chan2dev(chan),
1439 "can't allocate channel resources (channel enabled)\n");
1444 if (!list_empty(&atchan->free_descs_list)) {
1445 dev_err(chan2dev(chan),
1446 "can't allocate channel resources (channel not free from a previous use)\n");
1451 for (i = 0; i < init_nr_desc_per_channel; i++) {
1452 desc = at_xdmac_alloc_desc(chan, GFP_ATOMIC);
1454 dev_warn(chan2dev(chan),
1455 "only %d descriptors have been allocated\n", i);
1458 list_add_tail(&desc->desc_node, &atchan->free_descs_list);
1461 dma_cookie_init(chan);
1463 dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
1466 spin_unlock_bh(&atchan->lock);
1470 static void at_xdmac_free_chan_resources(struct dma_chan *chan)
1472 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1473 struct at_xdmac *atxdmac = to_at_xdmac(chan->device);
1474 struct at_xdmac_desc *desc, *_desc;
1476 list_for_each_entry_safe(desc, _desc, &atchan->free_descs_list, desc_node) {
1477 dev_dbg(chan2dev(chan), "%s: freeing descriptor %p\n", __func__, desc);
1478 list_del(&desc->desc_node);
1479 dma_pool_free(atxdmac->at_xdmac_desc_pool, desc, desc->tx_dma_desc.phys);
1486 static int atmel_xdmac_prepare(struct device *dev)
1488 struct platform_device *pdev = to_platform_device(dev);
1489 struct at_xdmac *atxdmac = platform_get_drvdata(pdev);
1490 struct dma_chan *chan, *_chan;
1492 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
1493 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1495 /* Wait for transfer completion, except in cyclic case. */
1496 if (at_xdmac_chan_is_enabled(atchan) && !at_xdmac_chan_is_cyclic(atchan))
1502 # define atmel_xdmac_prepare NULL
1505 #ifdef CONFIG_PM_SLEEP
1506 static int atmel_xdmac_suspend(struct device *dev)
1508 struct platform_device *pdev = to_platform_device(dev);
1509 struct at_xdmac *atxdmac = platform_get_drvdata(pdev);
1510 struct dma_chan *chan, *_chan;
1512 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
1513 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1515 atchan->save_cc = at_xdmac_chan_read(atchan, AT_XDMAC_CC);
1516 if (at_xdmac_chan_is_cyclic(atchan)) {
1517 if (!at_xdmac_chan_is_paused(atchan))
1518 at_xdmac_device_pause(chan);
1519 atchan->save_cim = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
1520 atchan->save_cnda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA);
1521 atchan->save_cndc = at_xdmac_chan_read(atchan, AT_XDMAC_CNDC);
1524 atxdmac->save_gim = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
1526 at_xdmac_off(atxdmac);
1527 clk_disable_unprepare(atxdmac->clk);
1531 static int atmel_xdmac_resume(struct device *dev)
1533 struct platform_device *pdev = to_platform_device(dev);
1534 struct at_xdmac *atxdmac = platform_get_drvdata(pdev);
1535 struct at_xdmac_chan *atchan;
1536 struct dma_chan *chan, *_chan;
1539 clk_prepare_enable(atxdmac->clk);
1541 /* Clear pending interrupts. */
1542 for (i = 0; i < atxdmac->dma.chancnt; i++) {
1543 atchan = &atxdmac->chan[i];
1544 while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
1548 at_xdmac_write(atxdmac, AT_XDMAC_GIE, atxdmac->save_gim);
1549 at_xdmac_write(atxdmac, AT_XDMAC_GE, atxdmac->save_gs);
1550 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
1551 atchan = to_at_xdmac_chan(chan);
1552 at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc);
1553 if (at_xdmac_chan_is_cyclic(atchan)) {
1554 at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda);
1555 at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
1556 at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim);
1558 at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
1563 #endif /* CONFIG_PM_SLEEP */
1565 static int at_xdmac_probe(struct platform_device *pdev)
1567 struct resource *res;
1568 struct at_xdmac *atxdmac;
1569 int irq, size, nr_channels, i, ret;
1573 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1577 irq = platform_get_irq(pdev, 0);
1581 base = devm_ioremap_resource(&pdev->dev, res);
1583 return PTR_ERR(base);
1586 * Read number of xdmac channels, read helper function can't be used
1587 * since atxdmac is not yet allocated and we need to know the number
1588 * of channels to do the allocation.
1590 reg = readl_relaxed(base + AT_XDMAC_GTYPE);
1591 nr_channels = AT_XDMAC_NB_CH(reg);
1592 if (nr_channels > AT_XDMAC_MAX_CHAN) {
1593 dev_err(&pdev->dev, "invalid number of channels (%u)\n",
1598 size = sizeof(*atxdmac);
1599 size += nr_channels * sizeof(struct at_xdmac_chan);
1600 atxdmac = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
1602 dev_err(&pdev->dev, "can't allocate at_xdmac structure\n");
1606 atxdmac->regs = base;
1609 atxdmac->clk = devm_clk_get(&pdev->dev, "dma_clk");
1610 if (IS_ERR(atxdmac->clk)) {
1611 dev_err(&pdev->dev, "can't get dma_clk\n");
1612 return PTR_ERR(atxdmac->clk);
1615 /* Do not use dev res to prevent races with tasklet */
1616 ret = request_irq(atxdmac->irq, at_xdmac_interrupt, 0, "at_xdmac", atxdmac);
1618 dev_err(&pdev->dev, "can't request irq\n");
1622 ret = clk_prepare_enable(atxdmac->clk);
1624 dev_err(&pdev->dev, "can't prepare or enable clock\n");
1628 atxdmac->at_xdmac_desc_pool =
1629 dmam_pool_create(dev_name(&pdev->dev), &pdev->dev,
1630 sizeof(struct at_xdmac_desc), 4, 0);
1631 if (!atxdmac->at_xdmac_desc_pool) {
1632 dev_err(&pdev->dev, "no memory for descriptors dma pool\n");
1634 goto err_clk_disable;
1637 dma_cap_set(DMA_CYCLIC, atxdmac->dma.cap_mask);
1638 dma_cap_set(DMA_INTERLEAVE, atxdmac->dma.cap_mask);
1639 dma_cap_set(DMA_MEMCPY, atxdmac->dma.cap_mask);
1640 dma_cap_set(DMA_SLAVE, atxdmac->dma.cap_mask);
1642 * Without DMA_PRIVATE the driver is not able to allocate more than
1643 * one channel, second allocation fails in private_candidate.
1645 dma_cap_set(DMA_PRIVATE, atxdmac->dma.cap_mask);
1646 atxdmac->dma.dev = &pdev->dev;
1647 atxdmac->dma.device_alloc_chan_resources = at_xdmac_alloc_chan_resources;
1648 atxdmac->dma.device_free_chan_resources = at_xdmac_free_chan_resources;
1649 atxdmac->dma.device_tx_status = at_xdmac_tx_status;
1650 atxdmac->dma.device_issue_pending = at_xdmac_issue_pending;
1651 atxdmac->dma.device_prep_dma_cyclic = at_xdmac_prep_dma_cyclic;
1652 atxdmac->dma.device_prep_interleaved_dma = at_xdmac_prep_interleaved;
1653 atxdmac->dma.device_prep_dma_memcpy = at_xdmac_prep_dma_memcpy;
1654 atxdmac->dma.device_prep_slave_sg = at_xdmac_prep_slave_sg;
1655 atxdmac->dma.device_config = at_xdmac_device_config;
1656 atxdmac->dma.device_pause = at_xdmac_device_pause;
1657 atxdmac->dma.device_resume = at_xdmac_device_resume;
1658 atxdmac->dma.device_terminate_all = at_xdmac_device_terminate_all;
1659 atxdmac->dma.src_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
1660 atxdmac->dma.dst_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
1661 atxdmac->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1662 atxdmac->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1664 /* Disable all chans and interrupts. */
1665 at_xdmac_off(atxdmac);
1667 /* Init channels. */
1668 INIT_LIST_HEAD(&atxdmac->dma.channels);
1669 for (i = 0; i < nr_channels; i++) {
1670 struct at_xdmac_chan *atchan = &atxdmac->chan[i];
1672 atchan->chan.device = &atxdmac->dma;
1673 list_add_tail(&atchan->chan.device_node,
1674 &atxdmac->dma.channels);
1676 atchan->ch_regs = at_xdmac_chan_reg_base(atxdmac, i);
1677 atchan->mask = 1 << i;
1679 spin_lock_init(&atchan->lock);
1680 INIT_LIST_HEAD(&atchan->xfers_list);
1681 INIT_LIST_HEAD(&atchan->free_descs_list);
1682 tasklet_init(&atchan->tasklet, at_xdmac_tasklet,
1683 (unsigned long)atchan);
1685 /* Clear pending interrupts. */
1686 while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
1689 platform_set_drvdata(pdev, atxdmac);
1691 ret = dma_async_device_register(&atxdmac->dma);
1693 dev_err(&pdev->dev, "fail to register DMA engine device\n");
1694 goto err_clk_disable;
1697 ret = of_dma_controller_register(pdev->dev.of_node,
1698 at_xdmac_xlate, atxdmac);
1700 dev_err(&pdev->dev, "could not register of dma controller\n");
1701 goto err_dma_unregister;
1704 dev_info(&pdev->dev, "%d channels, mapped at 0x%p\n",
1705 nr_channels, atxdmac->regs);
1710 dma_async_device_unregister(&atxdmac->dma);
1712 clk_disable_unprepare(atxdmac->clk);
1714 free_irq(atxdmac->irq, atxdmac->dma.dev);
1718 static int at_xdmac_remove(struct platform_device *pdev)
1720 struct at_xdmac *atxdmac = (struct at_xdmac *)platform_get_drvdata(pdev);
1723 at_xdmac_off(atxdmac);
1724 of_dma_controller_free(pdev->dev.of_node);
1725 dma_async_device_unregister(&atxdmac->dma);
1726 clk_disable_unprepare(atxdmac->clk);
1728 synchronize_irq(atxdmac->irq);
1730 free_irq(atxdmac->irq, atxdmac->dma.dev);
1732 for (i = 0; i < atxdmac->dma.chancnt; i++) {
1733 struct at_xdmac_chan *atchan = &atxdmac->chan[i];
1735 tasklet_kill(&atchan->tasklet);
1736 at_xdmac_free_chan_resources(&atchan->chan);
1742 static const struct dev_pm_ops atmel_xdmac_dev_pm_ops = {
1743 .prepare = atmel_xdmac_prepare,
1744 SET_LATE_SYSTEM_SLEEP_PM_OPS(atmel_xdmac_suspend, atmel_xdmac_resume)
1747 static const struct of_device_id atmel_xdmac_dt_ids[] = {
1749 .compatible = "atmel,sama5d4-dma",
1754 MODULE_DEVICE_TABLE(of, atmel_xdmac_dt_ids);
1756 static struct platform_driver at_xdmac_driver = {
1757 .probe = at_xdmac_probe,
1758 .remove = at_xdmac_remove,
1761 .of_match_table = of_match_ptr(atmel_xdmac_dt_ids),
1762 .pm = &atmel_xdmac_dev_pm_ops,
1766 static int __init at_xdmac_init(void)
1768 return platform_driver_probe(&at_xdmac_driver, at_xdmac_probe);
1770 subsys_initcall(at_xdmac_init);
1772 MODULE_DESCRIPTION("Atmel Extended DMA Controller driver");
1773 MODULE_AUTHOR("Ludovic Desroches <ludovic.desroches@atmel.com>");
1774 MODULE_LICENSE("GPL");