Commit | Line | Data |
---|---|---|
47e20577 MS |
1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | // | |
3 | // Actions Semi Owl SoCs DMA driver | |
4 | // | |
5 | // Copyright (c) 2014 Actions Semi Inc. | |
6 | // Author: David Liu <liuwei@actions-semi.com> | |
7 | // | |
8 | // Copyright (c) 2018 Linaro Ltd. | |
9 | // Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> | |
10 | ||
11 | #include <linux/bitops.h> | |
12 | #include <linux/clk.h> | |
13 | #include <linux/delay.h> | |
14 | #include <linux/dmaengine.h> | |
15 | #include <linux/dma-mapping.h> | |
16 | #include <linux/dmapool.h> | |
17 | #include <linux/err.h> | |
18 | #include <linux/init.h> | |
19 | #include <linux/interrupt.h> | |
20 | #include <linux/io.h> | |
21 | #include <linux/mm.h> | |
22 | #include <linux/module.h> | |
23 | #include <linux/of_device.h> | |
24 | #include <linux/slab.h> | |
25 | #include "virt-dma.h" | |
26 | ||
27 | #define OWL_DMA_FRAME_MAX_LENGTH 0xfffff | |
28 | ||
29 | /* Global DMA Controller Registers */ | |
30 | #define OWL_DMA_IRQ_PD0 0x00 | |
31 | #define OWL_DMA_IRQ_PD1 0x04 | |
32 | #define OWL_DMA_IRQ_PD2 0x08 | |
33 | #define OWL_DMA_IRQ_PD3 0x0C | |
34 | #define OWL_DMA_IRQ_EN0 0x10 | |
35 | #define OWL_DMA_IRQ_EN1 0x14 | |
36 | #define OWL_DMA_IRQ_EN2 0x18 | |
37 | #define OWL_DMA_IRQ_EN3 0x1C | |
38 | #define OWL_DMA_SECURE_ACCESS_CTL 0x20 | |
39 | #define OWL_DMA_NIC_QOS 0x24 | |
40 | #define OWL_DMA_DBGSEL 0x28 | |
41 | #define OWL_DMA_IDLE_STAT 0x2C | |
42 | ||
43 | /* Channel Registers */ | |
44 | #define OWL_DMA_CHAN_BASE(i) (0x100 + (i) * 0x100) | |
45 | #define OWL_DMAX_MODE 0x00 | |
46 | #define OWL_DMAX_SOURCE 0x04 | |
47 | #define OWL_DMAX_DESTINATION 0x08 | |
48 | #define OWL_DMAX_FRAME_LEN 0x0C | |
49 | #define OWL_DMAX_FRAME_CNT 0x10 | |
50 | #define OWL_DMAX_REMAIN_FRAME_CNT 0x14 | |
51 | #define OWL_DMAX_REMAIN_CNT 0x18 | |
52 | #define OWL_DMAX_SOURCE_STRIDE 0x1C | |
53 | #define OWL_DMAX_DESTINATION_STRIDE 0x20 | |
54 | #define OWL_DMAX_START 0x24 | |
55 | #define OWL_DMAX_PAUSE 0x28 | |
56 | #define OWL_DMAX_CHAINED_CTL 0x2C | |
57 | #define OWL_DMAX_CONSTANT 0x30 | |
58 | #define OWL_DMAX_LINKLIST_CTL 0x34 | |
59 | #define OWL_DMAX_NEXT_DESCRIPTOR 0x38 | |
60 | #define OWL_DMAX_CURRENT_DESCRIPTOR_NUM 0x3C | |
61 | #define OWL_DMAX_INT_CTL 0x40 | |
62 | #define OWL_DMAX_INT_STATUS 0x44 | |
63 | #define OWL_DMAX_CURRENT_SOURCE_POINTER 0x48 | |
64 | #define OWL_DMAX_CURRENT_DESTINATION_POINTER 0x4C | |
65 | ||
66 | /* OWL_DMAX_MODE Bits */ | |
67 | #define OWL_DMA_MODE_TS(x) (((x) & GENMASK(5, 0)) << 0) | |
68 | #define OWL_DMA_MODE_ST(x) (((x) & GENMASK(1, 0)) << 8) | |
69 | #define OWL_DMA_MODE_ST_DEV OWL_DMA_MODE_ST(0) | |
70 | #define OWL_DMA_MODE_ST_DCU OWL_DMA_MODE_ST(2) | |
71 | #define OWL_DMA_MODE_ST_SRAM OWL_DMA_MODE_ST(3) | |
72 | #define OWL_DMA_MODE_DT(x) (((x) & GENMASK(1, 0)) << 10) | |
73 | #define OWL_DMA_MODE_DT_DEV OWL_DMA_MODE_DT(0) | |
74 | #define OWL_DMA_MODE_DT_DCU OWL_DMA_MODE_DT(2) | |
75 | #define OWL_DMA_MODE_DT_SRAM OWL_DMA_MODE_DT(3) | |
76 | #define OWL_DMA_MODE_SAM(x) (((x) & GENMASK(1, 0)) << 16) | |
77 | #define OWL_DMA_MODE_SAM_CONST OWL_DMA_MODE_SAM(0) | |
78 | #define OWL_DMA_MODE_SAM_INC OWL_DMA_MODE_SAM(1) | |
79 | #define OWL_DMA_MODE_SAM_STRIDE OWL_DMA_MODE_SAM(2) | |
80 | #define OWL_DMA_MODE_DAM(x) (((x) & GENMASK(1, 0)) << 18) | |
81 | #define OWL_DMA_MODE_DAM_CONST OWL_DMA_MODE_DAM(0) | |
82 | #define OWL_DMA_MODE_DAM_INC OWL_DMA_MODE_DAM(1) | |
83 | #define OWL_DMA_MODE_DAM_STRIDE OWL_DMA_MODE_DAM(2) | |
84 | #define OWL_DMA_MODE_PW(x) (((x) & GENMASK(2, 0)) << 20) | |
85 | #define OWL_DMA_MODE_CB BIT(23) | |
86 | #define OWL_DMA_MODE_NDDBW(x) (((x) & 0x1) << 28) | |
87 | #define OWL_DMA_MODE_NDDBW_32BIT OWL_DMA_MODE_NDDBW(0) | |
88 | #define OWL_DMA_MODE_NDDBW_8BIT OWL_DMA_MODE_NDDBW(1) | |
89 | #define OWL_DMA_MODE_CFE BIT(29) | |
90 | #define OWL_DMA_MODE_LME BIT(30) | |
91 | #define OWL_DMA_MODE_CME BIT(31) | |
92 | ||
93 | /* OWL_DMAX_LINKLIST_CTL Bits */ | |
94 | #define OWL_DMA_LLC_SAV(x) (((x) & GENMASK(1, 0)) << 8) | |
95 | #define OWL_DMA_LLC_SAV_INC OWL_DMA_LLC_SAV(0) | |
96 | #define OWL_DMA_LLC_SAV_LOAD_NEXT OWL_DMA_LLC_SAV(1) | |
97 | #define OWL_DMA_LLC_SAV_LOAD_PREV OWL_DMA_LLC_SAV(2) | |
98 | #define OWL_DMA_LLC_DAV(x) (((x) & GENMASK(1, 0)) << 10) | |
99 | #define OWL_DMA_LLC_DAV_INC OWL_DMA_LLC_DAV(0) | |
100 | #define OWL_DMA_LLC_DAV_LOAD_NEXT OWL_DMA_LLC_DAV(1) | |
101 | #define OWL_DMA_LLC_DAV_LOAD_PREV OWL_DMA_LLC_DAV(2) | |
102 | #define OWL_DMA_LLC_SUSPEND BIT(16) | |
103 | ||
104 | /* OWL_DMAX_INT_CTL Bits */ | |
105 | #define OWL_DMA_INTCTL_BLOCK BIT(0) | |
106 | #define OWL_DMA_INTCTL_SUPER_BLOCK BIT(1) | |
107 | #define OWL_DMA_INTCTL_FRAME BIT(2) | |
108 | #define OWL_DMA_INTCTL_HALF_FRAME BIT(3) | |
109 | #define OWL_DMA_INTCTL_LAST_FRAME BIT(4) | |
110 | ||
111 | /* OWL_DMAX_INT_STATUS Bits */ | |
112 | #define OWL_DMA_INTSTAT_BLOCK BIT(0) | |
113 | #define OWL_DMA_INTSTAT_SUPER_BLOCK BIT(1) | |
114 | #define OWL_DMA_INTSTAT_FRAME BIT(2) | |
115 | #define OWL_DMA_INTSTAT_HALF_FRAME BIT(3) | |
116 | #define OWL_DMA_INTSTAT_LAST_FRAME BIT(4) | |
117 | ||
118 | /* Pack shift and newshift in a single word */ | |
119 | #define BIT_FIELD(val, width, shift, newshift) \ | |
120 | ((((val) >> (shift)) & ((BIT(width)) - 1)) << (newshift)) | |
121 | ||
122 | /** | |
123 | * struct owl_dma_lli_hw - Hardware link list for dma transfer | |
124 | * @next_lli: physical address of the next link list | |
125 | * @saddr: source physical address | |
126 | * @daddr: destination physical address | |
127 | * @flen: frame length | |
128 | * @fcnt: frame count | |
129 | * @src_stride: source stride | |
130 | * @dst_stride: destination stride | |
131 | * @ctrla: dma_mode and linklist ctrl config | |
132 | * @ctrlb: interrupt config | |
133 | * @const_num: data for constant fill | |
134 | */ | |
135 | struct owl_dma_lli_hw { | |
136 | u32 next_lli; | |
137 | u32 saddr; | |
138 | u32 daddr; | |
139 | u32 flen:20; | |
140 | u32 fcnt:12; | |
141 | u32 src_stride; | |
142 | u32 dst_stride; | |
143 | u32 ctrla; | |
144 | u32 ctrlb; | |
145 | u32 const_num; | |
146 | }; | |
147 | ||
148 | /** | |
149 | * struct owl_dma_lli - Link list for dma transfer | |
150 | * @hw: hardware link list | |
151 | * @phys: physical address of hardware link list | |
152 | * @node: node for txd's lli_list | |
153 | */ | |
154 | struct owl_dma_lli { | |
155 | struct owl_dma_lli_hw hw; | |
156 | dma_addr_t phys; | |
157 | struct list_head node; | |
158 | }; | |
159 | ||
160 | /** | |
161 | * struct owl_dma_txd - Wrapper for struct dma_async_tx_descriptor | |
162 | * @vd: virtual DMA descriptor | |
163 | * @lli_list: link list of lli nodes | |
164 | */ | |
165 | struct owl_dma_txd { | |
166 | struct virt_dma_desc vd; | |
167 | struct list_head lli_list; | |
168 | }; | |
169 | ||
170 | /** | |
171 | * struct owl_dma_pchan - Holder for the physical channels | |
172 | * @id: physical index to this channel | |
173 | * @base: virtual memory base for the dma channel | |
174 | * @vchan: the virtual channel currently being served by this physical channel | |
175 | * @lock: a lock to use when altering an instance of this struct | |
176 | */ | |
177 | struct owl_dma_pchan { | |
178 | u32 id; | |
179 | void __iomem *base; | |
180 | struct owl_dma_vchan *vchan; | |
181 | spinlock_t lock; | |
182 | }; | |
183 | ||
184 | /** | |
185 | * struct owl_dma_pchan - Wrapper for DMA ENGINE channel | |
186 | * @vc: wrappped virtual channel | |
187 | * @pchan: the physical channel utilized by this channel | |
188 | * @txd: active transaction on this channel | |
189 | */ | |
190 | struct owl_dma_vchan { | |
191 | struct virt_dma_chan vc; | |
192 | struct owl_dma_pchan *pchan; | |
193 | struct owl_dma_txd *txd; | |
194 | }; | |
195 | ||
196 | /** | |
197 | * struct owl_dma - Holder for the Owl DMA controller | |
198 | * @dma: dma engine for this instance | |
199 | * @base: virtual memory base for the DMA controller | |
200 | * @clk: clock for the DMA controller | |
201 | * @lock: a lock to use when change DMA controller global register | |
202 | * @lli_pool: a pool for the LLI descriptors | |
203 | * @nr_pchans: the number of physical channels | |
204 | * @pchans: array of data for the physical channels | |
205 | * @nr_vchans: the number of physical channels | |
206 | * @vchans: array of data for the physical channels | |
207 | */ | |
208 | struct owl_dma { | |
209 | struct dma_device dma; | |
210 | void __iomem *base; | |
211 | struct clk *clk; | |
212 | spinlock_t lock; | |
213 | struct dma_pool *lli_pool; | |
214 | int irq; | |
215 | ||
216 | unsigned int nr_pchans; | |
217 | struct owl_dma_pchan *pchans; | |
218 | ||
219 | unsigned int nr_vchans; | |
220 | struct owl_dma_vchan *vchans; | |
221 | }; | |
222 | ||
223 | static void pchan_update(struct owl_dma_pchan *pchan, u32 reg, | |
224 | u32 val, bool state) | |
225 | { | |
226 | u32 regval; | |
227 | ||
228 | regval = readl(pchan->base + reg); | |
229 | ||
230 | if (state) | |
231 | regval |= val; | |
232 | else | |
233 | regval &= ~val; | |
234 | ||
235 | writel(val, pchan->base + reg); | |
236 | } | |
237 | ||
238 | static void pchan_writel(struct owl_dma_pchan *pchan, u32 reg, u32 data) | |
239 | { | |
240 | writel(data, pchan->base + reg); | |
241 | } | |
242 | ||
243 | static u32 pchan_readl(struct owl_dma_pchan *pchan, u32 reg) | |
244 | { | |
245 | return readl(pchan->base + reg); | |
246 | } | |
247 | ||
248 | static void dma_update(struct owl_dma *od, u32 reg, u32 val, bool state) | |
249 | { | |
250 | u32 regval; | |
251 | ||
252 | regval = readl(od->base + reg); | |
253 | ||
254 | if (state) | |
255 | regval |= val; | |
256 | else | |
257 | regval &= ~val; | |
258 | ||
259 | writel(val, od->base + reg); | |
260 | } | |
261 | ||
262 | static void dma_writel(struct owl_dma *od, u32 reg, u32 data) | |
263 | { | |
264 | writel(data, od->base + reg); | |
265 | } | |
266 | ||
267 | static u32 dma_readl(struct owl_dma *od, u32 reg) | |
268 | { | |
269 | return readl(od->base + reg); | |
270 | } | |
271 | ||
272 | static inline struct owl_dma *to_owl_dma(struct dma_device *dd) | |
273 | { | |
274 | return container_of(dd, struct owl_dma, dma); | |
275 | } | |
276 | ||
277 | static struct device *chan2dev(struct dma_chan *chan) | |
278 | { | |
279 | return &chan->dev->device; | |
280 | } | |
281 | ||
282 | static inline struct owl_dma_vchan *to_owl_vchan(struct dma_chan *chan) | |
283 | { | |
284 | return container_of(chan, struct owl_dma_vchan, vc.chan); | |
285 | } | |
286 | ||
287 | static inline struct owl_dma_txd *to_owl_txd(struct dma_async_tx_descriptor *tx) | |
288 | { | |
289 | return container_of(tx, struct owl_dma_txd, vd.tx); | |
290 | } | |
291 | ||
292 | static inline u32 llc_hw_ctrla(u32 mode, u32 llc_ctl) | |
293 | { | |
294 | u32 ctl; | |
295 | ||
296 | ctl = BIT_FIELD(mode, 4, 28, 28) | | |
297 | BIT_FIELD(mode, 8, 16, 20) | | |
298 | BIT_FIELD(mode, 4, 8, 16) | | |
299 | BIT_FIELD(mode, 6, 0, 10) | | |
300 | BIT_FIELD(llc_ctl, 2, 10, 8) | | |
301 | BIT_FIELD(llc_ctl, 2, 8, 6); | |
302 | ||
303 | return ctl; | |
304 | } | |
305 | ||
306 | static inline u32 llc_hw_ctrlb(u32 int_ctl) | |
307 | { | |
308 | u32 ctl; | |
309 | ||
310 | ctl = BIT_FIELD(int_ctl, 7, 0, 18); | |
311 | ||
312 | return ctl; | |
313 | } | |
314 | ||
315 | static void owl_dma_free_lli(struct owl_dma *od, | |
316 | struct owl_dma_lli *lli) | |
317 | { | |
318 | list_del(&lli->node); | |
319 | dma_pool_free(od->lli_pool, lli, lli->phys); | |
320 | } | |
321 | ||
322 | static struct owl_dma_lli *owl_dma_alloc_lli(struct owl_dma *od) | |
323 | { | |
324 | struct owl_dma_lli *lli; | |
325 | dma_addr_t phys; | |
326 | ||
327 | lli = dma_pool_alloc(od->lli_pool, GFP_NOWAIT, &phys); | |
328 | if (!lli) | |
329 | return NULL; | |
330 | ||
331 | INIT_LIST_HEAD(&lli->node); | |
332 | lli->phys = phys; | |
333 | ||
334 | return lli; | |
335 | } | |
336 | ||
337 | static struct owl_dma_lli *owl_dma_add_lli(struct owl_dma_txd *txd, | |
338 | struct owl_dma_lli *prev, | |
339 | struct owl_dma_lli *next) | |
340 | { | |
341 | list_add_tail(&next->node, &txd->lli_list); | |
342 | ||
343 | if (prev) { | |
344 | prev->hw.next_lli = next->phys; | |
345 | prev->hw.ctrla |= llc_hw_ctrla(OWL_DMA_MODE_LME, 0); | |
346 | } | |
347 | ||
348 | return next; | |
349 | } | |
350 | ||
351 | static inline int owl_dma_cfg_lli(struct owl_dma_vchan *vchan, | |
352 | struct owl_dma_lli *lli, | |
353 | dma_addr_t src, dma_addr_t dst, | |
354 | u32 len, enum dma_transfer_direction dir) | |
355 | { | |
356 | struct owl_dma_lli_hw *hw = &lli->hw; | |
357 | u32 mode; | |
358 | ||
359 | mode = OWL_DMA_MODE_PW(0); | |
360 | ||
361 | switch (dir) { | |
362 | case DMA_MEM_TO_MEM: | |
363 | mode |= OWL_DMA_MODE_TS(0) | OWL_DMA_MODE_ST_DCU | | |
364 | OWL_DMA_MODE_DT_DCU | OWL_DMA_MODE_SAM_INC | | |
365 | OWL_DMA_MODE_DAM_INC; | |
366 | ||
367 | break; | |
368 | default: | |
369 | return -EINVAL; | |
370 | } | |
371 | ||
372 | hw->next_lli = 0; /* One link list by default */ | |
373 | hw->saddr = src; | |
374 | hw->daddr = dst; | |
375 | ||
376 | hw->fcnt = 1; /* Frame count fixed as 1 */ | |
377 | hw->flen = len; /* Max frame length is 1MB */ | |
378 | hw->src_stride = 0; | |
379 | hw->dst_stride = 0; | |
380 | hw->ctrla = llc_hw_ctrla(mode, | |
381 | OWL_DMA_LLC_SAV_LOAD_NEXT | | |
382 | OWL_DMA_LLC_DAV_LOAD_NEXT); | |
383 | ||
384 | hw->ctrlb = llc_hw_ctrlb(OWL_DMA_INTCTL_SUPER_BLOCK); | |
385 | ||
386 | return 0; | |
387 | } | |
388 | ||
389 | static struct owl_dma_pchan *owl_dma_get_pchan(struct owl_dma *od, | |
390 | struct owl_dma_vchan *vchan) | |
391 | { | |
392 | struct owl_dma_pchan *pchan = NULL; | |
393 | unsigned long flags; | |
394 | int i; | |
395 | ||
396 | for (i = 0; i < od->nr_pchans; i++) { | |
397 | pchan = &od->pchans[i]; | |
398 | ||
399 | spin_lock_irqsave(&pchan->lock, flags); | |
400 | if (!pchan->vchan) { | |
401 | pchan->vchan = vchan; | |
402 | spin_unlock_irqrestore(&pchan->lock, flags); | |
403 | break; | |
404 | } | |
405 | ||
406 | spin_unlock_irqrestore(&pchan->lock, flags); | |
407 | } | |
408 | ||
409 | return pchan; | |
410 | } | |
411 | ||
412 | static int owl_dma_pchan_busy(struct owl_dma *od, struct owl_dma_pchan *pchan) | |
413 | { | |
414 | unsigned int val; | |
415 | ||
416 | val = dma_readl(od, OWL_DMA_IDLE_STAT); | |
417 | ||
418 | return !(val & (1 << pchan->id)); | |
419 | } | |
420 | ||
421 | static void owl_dma_terminate_pchan(struct owl_dma *od, | |
422 | struct owl_dma_pchan *pchan) | |
423 | { | |
424 | unsigned long flags; | |
425 | u32 irq_pd; | |
426 | ||
427 | pchan_writel(pchan, OWL_DMAX_START, 0); | |
428 | pchan_update(pchan, OWL_DMAX_INT_STATUS, 0xff, false); | |
429 | ||
430 | spin_lock_irqsave(&od->lock, flags); | |
431 | dma_update(od, OWL_DMA_IRQ_EN0, (1 << pchan->id), false); | |
432 | ||
433 | irq_pd = dma_readl(od, OWL_DMA_IRQ_PD0); | |
434 | if (irq_pd & (1 << pchan->id)) { | |
435 | dev_warn(od->dma.dev, | |
436 | "terminating pchan %d that still has pending irq\n", | |
437 | pchan->id); | |
438 | dma_writel(od, OWL_DMA_IRQ_PD0, (1 << pchan->id)); | |
439 | } | |
440 | ||
441 | pchan->vchan = NULL; | |
442 | ||
443 | spin_unlock_irqrestore(&od->lock, flags); | |
444 | } | |
445 | ||
446 | static int owl_dma_start_next_txd(struct owl_dma_vchan *vchan) | |
447 | { | |
448 | struct owl_dma *od = to_owl_dma(vchan->vc.chan.device); | |
449 | struct virt_dma_desc *vd = vchan_next_desc(&vchan->vc); | |
450 | struct owl_dma_pchan *pchan = vchan->pchan; | |
451 | struct owl_dma_txd *txd = to_owl_txd(&vd->tx); | |
452 | struct owl_dma_lli *lli; | |
453 | unsigned long flags; | |
454 | u32 int_ctl; | |
455 | ||
456 | list_del(&vd->node); | |
457 | ||
458 | vchan->txd = txd; | |
459 | ||
460 | /* Wait for channel inactive */ | |
461 | while (owl_dma_pchan_busy(od, pchan)) | |
462 | cpu_relax(); | |
463 | ||
464 | lli = list_first_entry(&txd->lli_list, | |
465 | struct owl_dma_lli, node); | |
466 | ||
467 | int_ctl = OWL_DMA_INTCTL_SUPER_BLOCK; | |
468 | ||
469 | pchan_writel(pchan, OWL_DMAX_MODE, OWL_DMA_MODE_LME); | |
470 | pchan_writel(pchan, OWL_DMAX_LINKLIST_CTL, | |
471 | OWL_DMA_LLC_SAV_LOAD_NEXT | OWL_DMA_LLC_DAV_LOAD_NEXT); | |
472 | pchan_writel(pchan, OWL_DMAX_NEXT_DESCRIPTOR, lli->phys); | |
473 | pchan_writel(pchan, OWL_DMAX_INT_CTL, int_ctl); | |
474 | ||
475 | /* Clear IRQ status for this pchan */ | |
476 | pchan_update(pchan, OWL_DMAX_INT_STATUS, 0xff, false); | |
477 | ||
478 | spin_lock_irqsave(&od->lock, flags); | |
479 | ||
480 | dma_update(od, OWL_DMA_IRQ_EN0, (1 << pchan->id), true); | |
481 | ||
482 | spin_unlock_irqrestore(&od->lock, flags); | |
483 | ||
484 | dev_dbg(chan2dev(&vchan->vc.chan), "starting pchan %d\n", pchan->id); | |
485 | ||
486 | /* Start DMA transfer for this pchan */ | |
487 | pchan_writel(pchan, OWL_DMAX_START, 0x1); | |
488 | ||
489 | return 0; | |
490 | } | |
491 | ||
492 | static void owl_dma_phy_free(struct owl_dma *od, struct owl_dma_vchan *vchan) | |
493 | { | |
494 | /* Ensure that the physical channel is stopped */ | |
495 | owl_dma_terminate_pchan(od, vchan->pchan); | |
496 | ||
497 | vchan->pchan = NULL; | |
498 | } | |
499 | ||
500 | static irqreturn_t owl_dma_interrupt(int irq, void *dev_id) | |
501 | { | |
502 | struct owl_dma *od = dev_id; | |
503 | struct owl_dma_vchan *vchan; | |
504 | struct owl_dma_pchan *pchan; | |
505 | unsigned long pending; | |
506 | int i; | |
507 | unsigned int global_irq_pending, chan_irq_pending; | |
508 | ||
509 | spin_lock(&od->lock); | |
510 | ||
511 | pending = dma_readl(od, OWL_DMA_IRQ_PD0); | |
512 | ||
513 | /* Clear IRQ status for each pchan */ | |
514 | for_each_set_bit(i, &pending, od->nr_pchans) { | |
515 | pchan = &od->pchans[i]; | |
516 | pchan_update(pchan, OWL_DMAX_INT_STATUS, 0xff, false); | |
517 | } | |
518 | ||
519 | /* Clear pending IRQ */ | |
520 | dma_writel(od, OWL_DMA_IRQ_PD0, pending); | |
521 | ||
522 | /* Check missed pending IRQ */ | |
523 | for (i = 0; i < od->nr_pchans; i++) { | |
524 | pchan = &od->pchans[i]; | |
525 | chan_irq_pending = pchan_readl(pchan, OWL_DMAX_INT_CTL) & | |
526 | pchan_readl(pchan, OWL_DMAX_INT_STATUS); | |
527 | ||
528 | /* Dummy read to ensure OWL_DMA_IRQ_PD0 value is updated */ | |
529 | dma_readl(od, OWL_DMA_IRQ_PD0); | |
530 | ||
531 | global_irq_pending = dma_readl(od, OWL_DMA_IRQ_PD0); | |
532 | ||
533 | if (chan_irq_pending && !(global_irq_pending & BIT(i))) { | |
534 | dev_dbg(od->dma.dev, | |
535 | "global and channel IRQ pending match err\n"); | |
536 | ||
537 | /* Clear IRQ status for this pchan */ | |
538 | pchan_update(pchan, OWL_DMAX_INT_STATUS, | |
539 | 0xff, false); | |
540 | ||
541 | /* Update global IRQ pending */ | |
542 | pending |= BIT(i); | |
543 | } | |
544 | } | |
545 | ||
546 | spin_unlock(&od->lock); | |
547 | ||
548 | for_each_set_bit(i, &pending, od->nr_pchans) { | |
549 | struct owl_dma_txd *txd; | |
550 | ||
551 | pchan = &od->pchans[i]; | |
552 | ||
553 | vchan = pchan->vchan; | |
554 | if (!vchan) { | |
555 | dev_warn(od->dma.dev, "no vchan attached on pchan %d\n", | |
556 | pchan->id); | |
557 | continue; | |
558 | } | |
559 | ||
560 | spin_lock(&vchan->vc.lock); | |
561 | ||
562 | txd = vchan->txd; | |
563 | if (txd) { | |
564 | vchan->txd = NULL; | |
565 | ||
566 | vchan_cookie_complete(&txd->vd); | |
567 | ||
568 | /* | |
569 | * Start the next descriptor (if any), | |
570 | * otherwise free this channel. | |
571 | */ | |
572 | if (vchan_next_desc(&vchan->vc)) | |
573 | owl_dma_start_next_txd(vchan); | |
574 | else | |
575 | owl_dma_phy_free(od, vchan); | |
576 | } | |
577 | ||
578 | spin_unlock(&vchan->vc.lock); | |
579 | } | |
580 | ||
581 | return IRQ_HANDLED; | |
582 | } | |
583 | ||
584 | static void owl_dma_free_txd(struct owl_dma *od, struct owl_dma_txd *txd) | |
585 | { | |
586 | struct owl_dma_lli *lli, *_lli; | |
587 | ||
588 | if (unlikely(!txd)) | |
589 | return; | |
590 | ||
591 | list_for_each_entry_safe(lli, _lli, &txd->lli_list, node) | |
592 | owl_dma_free_lli(od, lli); | |
593 | ||
594 | kfree(txd); | |
595 | } | |
596 | ||
597 | static void owl_dma_desc_free(struct virt_dma_desc *vd) | |
598 | { | |
599 | struct owl_dma *od = to_owl_dma(vd->tx.chan->device); | |
600 | struct owl_dma_txd *txd = to_owl_txd(&vd->tx); | |
601 | ||
602 | owl_dma_free_txd(od, txd); | |
603 | } | |
604 | ||
605 | static int owl_dma_terminate_all(struct dma_chan *chan) | |
606 | { | |
607 | struct owl_dma *od = to_owl_dma(chan->device); | |
608 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); | |
609 | unsigned long flags; | |
610 | LIST_HEAD(head); | |
611 | ||
612 | spin_lock_irqsave(&vchan->vc.lock, flags); | |
613 | ||
614 | if (vchan->pchan) | |
615 | owl_dma_phy_free(od, vchan); | |
616 | ||
617 | if (vchan->txd) { | |
618 | owl_dma_desc_free(&vchan->txd->vd); | |
619 | vchan->txd = NULL; | |
620 | } | |
621 | ||
622 | vchan_get_all_descriptors(&vchan->vc, &head); | |
623 | vchan_dma_desc_free_list(&vchan->vc, &head); | |
624 | ||
625 | spin_unlock_irqrestore(&vchan->vc.lock, flags); | |
626 | ||
627 | return 0; | |
628 | } | |
629 | ||
630 | static u32 owl_dma_getbytes_chan(struct owl_dma_vchan *vchan) | |
631 | { | |
632 | struct owl_dma_pchan *pchan; | |
633 | struct owl_dma_txd *txd; | |
634 | struct owl_dma_lli *lli; | |
635 | unsigned int next_lli_phy; | |
636 | size_t bytes; | |
637 | ||
638 | pchan = vchan->pchan; | |
639 | txd = vchan->txd; | |
640 | ||
641 | if (!pchan || !txd) | |
642 | return 0; | |
643 | ||
644 | /* Get remain count of current node in link list */ | |
645 | bytes = pchan_readl(pchan, OWL_DMAX_REMAIN_CNT); | |
646 | ||
647 | /* Loop through the preceding nodes to get total remaining bytes */ | |
648 | if (pchan_readl(pchan, OWL_DMAX_MODE) & OWL_DMA_MODE_LME) { | |
649 | next_lli_phy = pchan_readl(pchan, OWL_DMAX_NEXT_DESCRIPTOR); | |
650 | list_for_each_entry(lli, &txd->lli_list, node) { | |
651 | /* Start from the next active node */ | |
652 | if (lli->phys == next_lli_phy) { | |
653 | list_for_each_entry(lli, &txd->lli_list, node) | |
654 | bytes += lli->hw.flen; | |
655 | break; | |
656 | } | |
657 | } | |
658 | } | |
659 | ||
660 | return bytes; | |
661 | } | |
662 | ||
663 | static enum dma_status owl_dma_tx_status(struct dma_chan *chan, | |
664 | dma_cookie_t cookie, | |
665 | struct dma_tx_state *state) | |
666 | { | |
667 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); | |
668 | struct owl_dma_lli *lli; | |
669 | struct virt_dma_desc *vd; | |
670 | struct owl_dma_txd *txd; | |
671 | enum dma_status ret; | |
672 | unsigned long flags; | |
673 | size_t bytes = 0; | |
674 | ||
675 | ret = dma_cookie_status(chan, cookie, state); | |
676 | if (ret == DMA_COMPLETE || !state) | |
677 | return ret; | |
678 | ||
679 | spin_lock_irqsave(&vchan->vc.lock, flags); | |
680 | ||
681 | vd = vchan_find_desc(&vchan->vc, cookie); | |
682 | if (vd) { | |
683 | txd = to_owl_txd(&vd->tx); | |
684 | list_for_each_entry(lli, &txd->lli_list, node) | |
685 | bytes += lli->hw.flen; | |
686 | } else { | |
687 | bytes = owl_dma_getbytes_chan(vchan); | |
688 | } | |
689 | ||
690 | spin_unlock_irqrestore(&vchan->vc.lock, flags); | |
691 | ||
692 | dma_set_residue(state, bytes); | |
693 | ||
694 | return ret; | |
695 | } | |
696 | ||
697 | static void owl_dma_phy_alloc_and_start(struct owl_dma_vchan *vchan) | |
698 | { | |
699 | struct owl_dma *od = to_owl_dma(vchan->vc.chan.device); | |
700 | struct owl_dma_pchan *pchan; | |
701 | ||
702 | pchan = owl_dma_get_pchan(od, vchan); | |
703 | if (!pchan) | |
704 | return; | |
705 | ||
706 | dev_dbg(od->dma.dev, "allocated pchan %d\n", pchan->id); | |
707 | ||
708 | vchan->pchan = pchan; | |
709 | owl_dma_start_next_txd(vchan); | |
710 | } | |
711 | ||
712 | static void owl_dma_issue_pending(struct dma_chan *chan) | |
713 | { | |
714 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); | |
715 | unsigned long flags; | |
716 | ||
717 | spin_lock_irqsave(&vchan->vc.lock, flags); | |
718 | if (vchan_issue_pending(&vchan->vc)) { | |
719 | if (!vchan->pchan) | |
720 | owl_dma_phy_alloc_and_start(vchan); | |
721 | } | |
722 | spin_unlock_irqrestore(&vchan->vc.lock, flags); | |
723 | } | |
724 | ||
725 | static struct dma_async_tx_descriptor | |
726 | *owl_dma_prep_memcpy(struct dma_chan *chan, | |
727 | dma_addr_t dst, dma_addr_t src, | |
728 | size_t len, unsigned long flags) | |
729 | { | |
730 | struct owl_dma *od = to_owl_dma(chan->device); | |
731 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); | |
732 | struct owl_dma_txd *txd; | |
733 | struct owl_dma_lli *lli, *prev = NULL; | |
734 | size_t offset, bytes; | |
735 | int ret; | |
736 | ||
737 | if (!len) | |
738 | return NULL; | |
739 | ||
740 | txd = kzalloc(sizeof(*txd), GFP_NOWAIT); | |
741 | if (!txd) | |
742 | return NULL; | |
743 | ||
744 | INIT_LIST_HEAD(&txd->lli_list); | |
745 | ||
746 | /* Process the transfer as frame by frame */ | |
747 | for (offset = 0; offset < len; offset += bytes) { | |
748 | lli = owl_dma_alloc_lli(od); | |
749 | if (!lli) { | |
750 | dev_warn(chan2dev(chan), "failed to allocate lli\n"); | |
751 | goto err_txd_free; | |
752 | } | |
753 | ||
754 | bytes = min_t(size_t, (len - offset), OWL_DMA_FRAME_MAX_LENGTH); | |
755 | ||
756 | ret = owl_dma_cfg_lli(vchan, lli, src + offset, dst + offset, | |
757 | bytes, DMA_MEM_TO_MEM); | |
758 | if (ret) { | |
759 | dev_warn(chan2dev(chan), "failed to config lli\n"); | |
760 | goto err_txd_free; | |
761 | } | |
762 | ||
763 | prev = owl_dma_add_lli(txd, prev, lli); | |
764 | } | |
765 | ||
766 | return vchan_tx_prep(&vchan->vc, &txd->vd, flags); | |
767 | ||
768 | err_txd_free: | |
769 | owl_dma_free_txd(od, txd); | |
770 | return NULL; | |
771 | } | |
772 | ||
773 | static void owl_dma_free_chan_resources(struct dma_chan *chan) | |
774 | { | |
775 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); | |
776 | ||
777 | /* Ensure all queued descriptors are freed */ | |
778 | vchan_free_chan_resources(&vchan->vc); | |
779 | } | |
780 | ||
781 | static inline void owl_dma_free(struct owl_dma *od) | |
782 | { | |
783 | struct owl_dma_vchan *vchan = NULL; | |
784 | struct owl_dma_vchan *next; | |
785 | ||
786 | list_for_each_entry_safe(vchan, | |
787 | next, &od->dma.channels, vc.chan.device_node) { | |
788 | list_del(&vchan->vc.chan.device_node); | |
789 | tasklet_kill(&vchan->vc.task); | |
790 | } | |
791 | } | |
792 | ||
793 | static int owl_dma_probe(struct platform_device *pdev) | |
794 | { | |
795 | struct device_node *np = pdev->dev.of_node; | |
796 | struct owl_dma *od; | |
797 | struct resource *res; | |
798 | int ret, i, nr_channels, nr_requests; | |
799 | ||
800 | od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL); | |
801 | if (!od) | |
802 | return -ENOMEM; | |
803 | ||
804 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
805 | if (!res) | |
806 | return -EINVAL; | |
807 | ||
808 | od->base = devm_ioremap_resource(&pdev->dev, res); | |
809 | if (IS_ERR(od->base)) | |
810 | return PTR_ERR(od->base); | |
811 | ||
812 | ret = of_property_read_u32(np, "dma-channels", &nr_channels); | |
813 | if (ret) { | |
814 | dev_err(&pdev->dev, "can't get dma-channels\n"); | |
815 | return ret; | |
816 | } | |
817 | ||
818 | ret = of_property_read_u32(np, "dma-requests", &nr_requests); | |
819 | if (ret) { | |
820 | dev_err(&pdev->dev, "can't get dma-requests\n"); | |
821 | return ret; | |
822 | } | |
823 | ||
824 | dev_info(&pdev->dev, "dma-channels %d, dma-requests %d\n", | |
825 | nr_channels, nr_requests); | |
826 | ||
827 | od->nr_pchans = nr_channels; | |
828 | od->nr_vchans = nr_requests; | |
829 | ||
830 | pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); | |
831 | ||
832 | platform_set_drvdata(pdev, od); | |
833 | spin_lock_init(&od->lock); | |
834 | ||
835 | dma_cap_set(DMA_MEMCPY, od->dma.cap_mask); | |
836 | ||
837 | od->dma.dev = &pdev->dev; | |
838 | od->dma.device_free_chan_resources = owl_dma_free_chan_resources; | |
839 | od->dma.device_tx_status = owl_dma_tx_status; | |
840 | od->dma.device_issue_pending = owl_dma_issue_pending; | |
841 | od->dma.device_prep_dma_memcpy = owl_dma_prep_memcpy; | |
842 | od->dma.device_terminate_all = owl_dma_terminate_all; | |
843 | od->dma.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); | |
844 | od->dma.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); | |
845 | od->dma.directions = BIT(DMA_MEM_TO_MEM); | |
846 | od->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | |
847 | ||
848 | INIT_LIST_HEAD(&od->dma.channels); | |
849 | ||
850 | od->clk = devm_clk_get(&pdev->dev, NULL); | |
851 | if (IS_ERR(od->clk)) { | |
852 | dev_err(&pdev->dev, "unable to get clock\n"); | |
853 | return PTR_ERR(od->clk); | |
854 | } | |
855 | ||
856 | /* | |
857 | * Eventhough the DMA controller is capable of generating 4 | |
858 | * IRQ's for DMA priority feature, we only use 1 IRQ for | |
859 | * simplification. | |
860 | */ | |
861 | od->irq = platform_get_irq(pdev, 0); | |
862 | ret = devm_request_irq(&pdev->dev, od->irq, owl_dma_interrupt, 0, | |
863 | dev_name(&pdev->dev), od); | |
864 | if (ret) { | |
865 | dev_err(&pdev->dev, "unable to request IRQ\n"); | |
866 | return ret; | |
867 | } | |
868 | ||
869 | /* Init physical channel */ | |
870 | od->pchans = devm_kcalloc(&pdev->dev, od->nr_pchans, | |
871 | sizeof(struct owl_dma_pchan), GFP_KERNEL); | |
872 | if (!od->pchans) | |
873 | return -ENOMEM; | |
874 | ||
875 | for (i = 0; i < od->nr_pchans; i++) { | |
876 | struct owl_dma_pchan *pchan = &od->pchans[i]; | |
877 | ||
878 | pchan->id = i; | |
879 | pchan->base = od->base + OWL_DMA_CHAN_BASE(i); | |
880 | } | |
881 | ||
882 | /* Init virtual channel */ | |
883 | od->vchans = devm_kcalloc(&pdev->dev, od->nr_vchans, | |
884 | sizeof(struct owl_dma_vchan), GFP_KERNEL); | |
885 | if (!od->vchans) | |
886 | return -ENOMEM; | |
887 | ||
888 | for (i = 0; i < od->nr_vchans; i++) { | |
889 | struct owl_dma_vchan *vchan = &od->vchans[i]; | |
890 | ||
891 | vchan->vc.desc_free = owl_dma_desc_free; | |
892 | vchan_init(&vchan->vc, &od->dma); | |
893 | } | |
894 | ||
895 | /* Create a pool of consistent memory blocks for hardware descriptors */ | |
896 | od->lli_pool = dma_pool_create(dev_name(od->dma.dev), od->dma.dev, | |
897 | sizeof(struct owl_dma_lli), | |
898 | __alignof__(struct owl_dma_lli), | |
899 | 0); | |
900 | if (!od->lli_pool) { | |
901 | dev_err(&pdev->dev, "unable to allocate DMA descriptor pool\n"); | |
902 | return -ENOMEM; | |
903 | } | |
904 | ||
905 | clk_prepare_enable(od->clk); | |
906 | ||
907 | ret = dma_async_device_register(&od->dma); | |
908 | if (ret) { | |
909 | dev_err(&pdev->dev, "failed to register DMA engine device\n"); | |
910 | goto err_pool_free; | |
911 | } | |
912 | ||
913 | return 0; | |
914 | ||
915 | err_pool_free: | |
916 | clk_disable_unprepare(od->clk); | |
917 | dma_pool_destroy(od->lli_pool); | |
918 | ||
919 | return ret; | |
920 | } | |
921 | ||
922 | static int owl_dma_remove(struct platform_device *pdev) | |
923 | { | |
924 | struct owl_dma *od = platform_get_drvdata(pdev); | |
925 | ||
926 | dma_async_device_unregister(&od->dma); | |
927 | ||
928 | /* Mask all interrupts for this execution environment */ | |
929 | dma_writel(od, OWL_DMA_IRQ_EN0, 0x0); | |
930 | ||
931 | /* Make sure we won't have any further interrupts */ | |
932 | devm_free_irq(od->dma.dev, od->irq, od); | |
933 | ||
934 | owl_dma_free(od); | |
935 | ||
936 | clk_disable_unprepare(od->clk); | |
937 | ||
938 | return 0; | |
939 | } | |
940 | ||
941 | static const struct of_device_id owl_dma_match[] = { | |
942 | { .compatible = "actions,s900-dma", }, | |
943 | { /* sentinel */ } | |
944 | }; | |
945 | MODULE_DEVICE_TABLE(of, owl_dma_match); | |
946 | ||
947 | static struct platform_driver owl_dma_driver = { | |
948 | .probe = owl_dma_probe, | |
949 | .remove = owl_dma_remove, | |
950 | .driver = { | |
951 | .name = "dma-owl", | |
952 | .of_match_table = of_match_ptr(owl_dma_match), | |
953 | }, | |
954 | }; | |
955 | ||
956 | static int owl_dma_init(void) | |
957 | { | |
958 | return platform_driver_register(&owl_dma_driver); | |
959 | } | |
960 | subsys_initcall(owl_dma_init); | |
961 | ||
962 | static void __exit owl_dma_exit(void) | |
963 | { | |
964 | platform_driver_unregister(&owl_dma_driver); | |
965 | } | |
966 | module_exit(owl_dma_exit); | |
967 | ||
968 | MODULE_AUTHOR("David Liu <liuwei@actions-semi.com>"); | |
969 | MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>"); | |
970 | MODULE_DESCRIPTION("Actions Semi Owl SoCs DMA driver"); | |
971 | MODULE_LICENSE("GPL"); |