Commit | Line | Data |
---|---|---|
47e20577 MS |
1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | // | |
3 | // Actions Semi Owl SoCs DMA driver | |
4 | // | |
5 | // Copyright (c) 2014 Actions Semi Inc. | |
6 | // Author: David Liu <liuwei@actions-semi.com> | |
7 | // | |
8 | // Copyright (c) 2018 Linaro Ltd. | |
9 | // Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> | |
10 | ||
11 | #include <linux/bitops.h> | |
12 | #include <linux/clk.h> | |
13 | #include <linux/delay.h> | |
14 | #include <linux/dmaengine.h> | |
15 | #include <linux/dma-mapping.h> | |
16 | #include <linux/dmapool.h> | |
17 | #include <linux/err.h> | |
18 | #include <linux/init.h> | |
19 | #include <linux/interrupt.h> | |
20 | #include <linux/io.h> | |
21 | #include <linux/mm.h> | |
22 | #include <linux/module.h> | |
23 | #include <linux/of_device.h> | |
d64e1b3f | 24 | #include <linux/of_dma.h> |
47e20577 MS |
25 | #include <linux/slab.h> |
26 | #include "virt-dma.h" | |
27 | ||
28 | #define OWL_DMA_FRAME_MAX_LENGTH 0xfffff | |
29 | ||
30 | /* Global DMA Controller Registers */ | |
31 | #define OWL_DMA_IRQ_PD0 0x00 | |
32 | #define OWL_DMA_IRQ_PD1 0x04 | |
33 | #define OWL_DMA_IRQ_PD2 0x08 | |
34 | #define OWL_DMA_IRQ_PD3 0x0C | |
35 | #define OWL_DMA_IRQ_EN0 0x10 | |
36 | #define OWL_DMA_IRQ_EN1 0x14 | |
37 | #define OWL_DMA_IRQ_EN2 0x18 | |
38 | #define OWL_DMA_IRQ_EN3 0x1C | |
39 | #define OWL_DMA_SECURE_ACCESS_CTL 0x20 | |
40 | #define OWL_DMA_NIC_QOS 0x24 | |
41 | #define OWL_DMA_DBGSEL 0x28 | |
42 | #define OWL_DMA_IDLE_STAT 0x2C | |
43 | ||
44 | /* Channel Registers */ | |
45 | #define OWL_DMA_CHAN_BASE(i) (0x100 + (i) * 0x100) | |
46 | #define OWL_DMAX_MODE 0x00 | |
47 | #define OWL_DMAX_SOURCE 0x04 | |
48 | #define OWL_DMAX_DESTINATION 0x08 | |
49 | #define OWL_DMAX_FRAME_LEN 0x0C | |
50 | #define OWL_DMAX_FRAME_CNT 0x10 | |
51 | #define OWL_DMAX_REMAIN_FRAME_CNT 0x14 | |
52 | #define OWL_DMAX_REMAIN_CNT 0x18 | |
53 | #define OWL_DMAX_SOURCE_STRIDE 0x1C | |
54 | #define OWL_DMAX_DESTINATION_STRIDE 0x20 | |
55 | #define OWL_DMAX_START 0x24 | |
56 | #define OWL_DMAX_PAUSE 0x28 | |
57 | #define OWL_DMAX_CHAINED_CTL 0x2C | |
58 | #define OWL_DMAX_CONSTANT 0x30 | |
59 | #define OWL_DMAX_LINKLIST_CTL 0x34 | |
60 | #define OWL_DMAX_NEXT_DESCRIPTOR 0x38 | |
61 | #define OWL_DMAX_CURRENT_DESCRIPTOR_NUM 0x3C | |
62 | #define OWL_DMAX_INT_CTL 0x40 | |
63 | #define OWL_DMAX_INT_STATUS 0x44 | |
64 | #define OWL_DMAX_CURRENT_SOURCE_POINTER 0x48 | |
65 | #define OWL_DMAX_CURRENT_DESTINATION_POINTER 0x4C | |
66 | ||
67 | /* OWL_DMAX_MODE Bits */ | |
68 | #define OWL_DMA_MODE_TS(x) (((x) & GENMASK(5, 0)) << 0) | |
69 | #define OWL_DMA_MODE_ST(x) (((x) & GENMASK(1, 0)) << 8) | |
70 | #define OWL_DMA_MODE_ST_DEV OWL_DMA_MODE_ST(0) | |
71 | #define OWL_DMA_MODE_ST_DCU OWL_DMA_MODE_ST(2) | |
72 | #define OWL_DMA_MODE_ST_SRAM OWL_DMA_MODE_ST(3) | |
73 | #define OWL_DMA_MODE_DT(x) (((x) & GENMASK(1, 0)) << 10) | |
74 | #define OWL_DMA_MODE_DT_DEV OWL_DMA_MODE_DT(0) | |
75 | #define OWL_DMA_MODE_DT_DCU OWL_DMA_MODE_DT(2) | |
76 | #define OWL_DMA_MODE_DT_SRAM OWL_DMA_MODE_DT(3) | |
77 | #define OWL_DMA_MODE_SAM(x) (((x) & GENMASK(1, 0)) << 16) | |
78 | #define OWL_DMA_MODE_SAM_CONST OWL_DMA_MODE_SAM(0) | |
79 | #define OWL_DMA_MODE_SAM_INC OWL_DMA_MODE_SAM(1) | |
80 | #define OWL_DMA_MODE_SAM_STRIDE OWL_DMA_MODE_SAM(2) | |
81 | #define OWL_DMA_MODE_DAM(x) (((x) & GENMASK(1, 0)) << 18) | |
82 | #define OWL_DMA_MODE_DAM_CONST OWL_DMA_MODE_DAM(0) | |
83 | #define OWL_DMA_MODE_DAM_INC OWL_DMA_MODE_DAM(1) | |
84 | #define OWL_DMA_MODE_DAM_STRIDE OWL_DMA_MODE_DAM(2) | |
85 | #define OWL_DMA_MODE_PW(x) (((x) & GENMASK(2, 0)) << 20) | |
86 | #define OWL_DMA_MODE_CB BIT(23) | |
87 | #define OWL_DMA_MODE_NDDBW(x) (((x) & 0x1) << 28) | |
88 | #define OWL_DMA_MODE_NDDBW_32BIT OWL_DMA_MODE_NDDBW(0) | |
89 | #define OWL_DMA_MODE_NDDBW_8BIT OWL_DMA_MODE_NDDBW(1) | |
90 | #define OWL_DMA_MODE_CFE BIT(29) | |
91 | #define OWL_DMA_MODE_LME BIT(30) | |
92 | #define OWL_DMA_MODE_CME BIT(31) | |
93 | ||
94 | /* OWL_DMAX_LINKLIST_CTL Bits */ | |
95 | #define OWL_DMA_LLC_SAV(x) (((x) & GENMASK(1, 0)) << 8) | |
96 | #define OWL_DMA_LLC_SAV_INC OWL_DMA_LLC_SAV(0) | |
97 | #define OWL_DMA_LLC_SAV_LOAD_NEXT OWL_DMA_LLC_SAV(1) | |
98 | #define OWL_DMA_LLC_SAV_LOAD_PREV OWL_DMA_LLC_SAV(2) | |
99 | #define OWL_DMA_LLC_DAV(x) (((x) & GENMASK(1, 0)) << 10) | |
100 | #define OWL_DMA_LLC_DAV_INC OWL_DMA_LLC_DAV(0) | |
101 | #define OWL_DMA_LLC_DAV_LOAD_NEXT OWL_DMA_LLC_DAV(1) | |
102 | #define OWL_DMA_LLC_DAV_LOAD_PREV OWL_DMA_LLC_DAV(2) | |
103 | #define OWL_DMA_LLC_SUSPEND BIT(16) | |
104 | ||
105 | /* OWL_DMAX_INT_CTL Bits */ | |
106 | #define OWL_DMA_INTCTL_BLOCK BIT(0) | |
107 | #define OWL_DMA_INTCTL_SUPER_BLOCK BIT(1) | |
108 | #define OWL_DMA_INTCTL_FRAME BIT(2) | |
109 | #define OWL_DMA_INTCTL_HALF_FRAME BIT(3) | |
110 | #define OWL_DMA_INTCTL_LAST_FRAME BIT(4) | |
111 | ||
112 | /* OWL_DMAX_INT_STATUS Bits */ | |
113 | #define OWL_DMA_INTSTAT_BLOCK BIT(0) | |
114 | #define OWL_DMA_INTSTAT_SUPER_BLOCK BIT(1) | |
115 | #define OWL_DMA_INTSTAT_FRAME BIT(2) | |
116 | #define OWL_DMA_INTSTAT_HALF_FRAME BIT(3) | |
117 | #define OWL_DMA_INTSTAT_LAST_FRAME BIT(4) | |
118 | ||
119 | /* Pack shift and newshift in a single word */ | |
120 | #define BIT_FIELD(val, width, shift, newshift) \ | |
121 | ((((val) >> (shift)) & ((BIT(width)) - 1)) << (newshift)) | |
122 | ||
123 | /** | |
124 | * struct owl_dma_lli_hw - Hardware link list for dma transfer | |
125 | * @next_lli: physical address of the next link list | |
126 | * @saddr: source physical address | |
127 | * @daddr: destination physical address | |
128 | * @flen: frame length | |
129 | * @fcnt: frame count | |
130 | * @src_stride: source stride | |
131 | * @dst_stride: destination stride | |
132 | * @ctrla: dma_mode and linklist ctrl config | |
133 | * @ctrlb: interrupt config | |
134 | * @const_num: data for constant fill | |
135 | */ | |
136 | struct owl_dma_lli_hw { | |
137 | u32 next_lli; | |
138 | u32 saddr; | |
139 | u32 daddr; | |
140 | u32 flen:20; | |
141 | u32 fcnt:12; | |
142 | u32 src_stride; | |
143 | u32 dst_stride; | |
144 | u32 ctrla; | |
145 | u32 ctrlb; | |
146 | u32 const_num; | |
147 | }; | |
148 | ||
149 | /** | |
150 | * struct owl_dma_lli - Link list for dma transfer | |
151 | * @hw: hardware link list | |
152 | * @phys: physical address of hardware link list | |
153 | * @node: node for txd's lli_list | |
154 | */ | |
155 | struct owl_dma_lli { | |
156 | struct owl_dma_lli_hw hw; | |
157 | dma_addr_t phys; | |
158 | struct list_head node; | |
159 | }; | |
160 | ||
161 | /** | |
162 | * struct owl_dma_txd - Wrapper for struct dma_async_tx_descriptor | |
163 | * @vd: virtual DMA descriptor | |
164 | * @lli_list: link list of lli nodes | |
a3e40316 | 165 | * @cyclic: flag to indicate cyclic transfers |
47e20577 MS |
166 | */ |
167 | struct owl_dma_txd { | |
168 | struct virt_dma_desc vd; | |
169 | struct list_head lli_list; | |
d64e1b3f | 170 | bool cyclic; |
47e20577 MS |
171 | }; |
172 | ||
173 | /** | |
174 | * struct owl_dma_pchan - Holder for the physical channels | |
175 | * @id: physical index to this channel | |
176 | * @base: virtual memory base for the dma channel | |
177 | * @vchan: the virtual channel currently being served by this physical channel | |
178 | * @lock: a lock to use when altering an instance of this struct | |
179 | */ | |
180 | struct owl_dma_pchan { | |
181 | u32 id; | |
182 | void __iomem *base; | |
183 | struct owl_dma_vchan *vchan; | |
184 | spinlock_t lock; | |
185 | }; | |
186 | ||
187 | /** | |
188 | * struct owl_dma_pchan - Wrapper for DMA ENGINE channel | |
189 | * @vc: wrappped virtual channel | |
190 | * @pchan: the physical channel utilized by this channel | |
191 | * @txd: active transaction on this channel | |
a3e40316 MS |
192 | * @cfg: slave configuration for this channel |
193 | * @drq: physical DMA request ID for this channel | |
47e20577 MS |
194 | */ |
195 | struct owl_dma_vchan { | |
196 | struct virt_dma_chan vc; | |
197 | struct owl_dma_pchan *pchan; | |
198 | struct owl_dma_txd *txd; | |
d64e1b3f MS |
199 | struct dma_slave_config cfg; |
200 | u8 drq; | |
47e20577 MS |
201 | }; |
202 | ||
203 | /** | |
204 | * struct owl_dma - Holder for the Owl DMA controller | |
205 | * @dma: dma engine for this instance | |
206 | * @base: virtual memory base for the DMA controller | |
207 | * @clk: clock for the DMA controller | |
208 | * @lock: a lock to use when change DMA controller global register | |
209 | * @lli_pool: a pool for the LLI descriptors | |
a3e40316 | 210 | * @irq: interrupt ID for the DMA controller |
47e20577 MS |
211 | * @nr_pchans: the number of physical channels |
212 | * @pchans: array of data for the physical channels | |
213 | * @nr_vchans: the number of physical channels | |
214 | * @vchans: array of data for the physical channels | |
215 | */ | |
216 | struct owl_dma { | |
217 | struct dma_device dma; | |
218 | void __iomem *base; | |
219 | struct clk *clk; | |
220 | spinlock_t lock; | |
221 | struct dma_pool *lli_pool; | |
222 | int irq; | |
223 | ||
224 | unsigned int nr_pchans; | |
225 | struct owl_dma_pchan *pchans; | |
226 | ||
227 | unsigned int nr_vchans; | |
228 | struct owl_dma_vchan *vchans; | |
229 | }; | |
230 | ||
231 | static void pchan_update(struct owl_dma_pchan *pchan, u32 reg, | |
232 | u32 val, bool state) | |
233 | { | |
234 | u32 regval; | |
235 | ||
236 | regval = readl(pchan->base + reg); | |
237 | ||
238 | if (state) | |
239 | regval |= val; | |
240 | else | |
241 | regval &= ~val; | |
242 | ||
243 | writel(val, pchan->base + reg); | |
244 | } | |
245 | ||
246 | static void pchan_writel(struct owl_dma_pchan *pchan, u32 reg, u32 data) | |
247 | { | |
248 | writel(data, pchan->base + reg); | |
249 | } | |
250 | ||
251 | static u32 pchan_readl(struct owl_dma_pchan *pchan, u32 reg) | |
252 | { | |
253 | return readl(pchan->base + reg); | |
254 | } | |
255 | ||
256 | static void dma_update(struct owl_dma *od, u32 reg, u32 val, bool state) | |
257 | { | |
258 | u32 regval; | |
259 | ||
260 | regval = readl(od->base + reg); | |
261 | ||
262 | if (state) | |
263 | regval |= val; | |
264 | else | |
265 | regval &= ~val; | |
266 | ||
267 | writel(val, od->base + reg); | |
268 | } | |
269 | ||
270 | static void dma_writel(struct owl_dma *od, u32 reg, u32 data) | |
271 | { | |
272 | writel(data, od->base + reg); | |
273 | } | |
274 | ||
275 | static u32 dma_readl(struct owl_dma *od, u32 reg) | |
276 | { | |
277 | return readl(od->base + reg); | |
278 | } | |
279 | ||
280 | static inline struct owl_dma *to_owl_dma(struct dma_device *dd) | |
281 | { | |
282 | return container_of(dd, struct owl_dma, dma); | |
283 | } | |
284 | ||
285 | static struct device *chan2dev(struct dma_chan *chan) | |
286 | { | |
287 | return &chan->dev->device; | |
288 | } | |
289 | ||
290 | static inline struct owl_dma_vchan *to_owl_vchan(struct dma_chan *chan) | |
291 | { | |
292 | return container_of(chan, struct owl_dma_vchan, vc.chan); | |
293 | } | |
294 | ||
295 | static inline struct owl_dma_txd *to_owl_txd(struct dma_async_tx_descriptor *tx) | |
296 | { | |
297 | return container_of(tx, struct owl_dma_txd, vd.tx); | |
298 | } | |
299 | ||
300 | static inline u32 llc_hw_ctrla(u32 mode, u32 llc_ctl) | |
301 | { | |
302 | u32 ctl; | |
303 | ||
304 | ctl = BIT_FIELD(mode, 4, 28, 28) | | |
305 | BIT_FIELD(mode, 8, 16, 20) | | |
306 | BIT_FIELD(mode, 4, 8, 16) | | |
307 | BIT_FIELD(mode, 6, 0, 10) | | |
308 | BIT_FIELD(llc_ctl, 2, 10, 8) | | |
309 | BIT_FIELD(llc_ctl, 2, 8, 6); | |
310 | ||
311 | return ctl; | |
312 | } | |
313 | ||
314 | static inline u32 llc_hw_ctrlb(u32 int_ctl) | |
315 | { | |
316 | u32 ctl; | |
317 | ||
318 | ctl = BIT_FIELD(int_ctl, 7, 0, 18); | |
319 | ||
320 | return ctl; | |
321 | } | |
322 | ||
323 | static void owl_dma_free_lli(struct owl_dma *od, | |
324 | struct owl_dma_lli *lli) | |
325 | { | |
326 | list_del(&lli->node); | |
327 | dma_pool_free(od->lli_pool, lli, lli->phys); | |
328 | } | |
329 | ||
330 | static struct owl_dma_lli *owl_dma_alloc_lli(struct owl_dma *od) | |
331 | { | |
332 | struct owl_dma_lli *lli; | |
333 | dma_addr_t phys; | |
334 | ||
335 | lli = dma_pool_alloc(od->lli_pool, GFP_NOWAIT, &phys); | |
336 | if (!lli) | |
337 | return NULL; | |
338 | ||
339 | INIT_LIST_HEAD(&lli->node); | |
340 | lli->phys = phys; | |
341 | ||
342 | return lli; | |
343 | } | |
344 | ||
345 | static struct owl_dma_lli *owl_dma_add_lli(struct owl_dma_txd *txd, | |
346 | struct owl_dma_lli *prev, | |
d64e1b3f MS |
347 | struct owl_dma_lli *next, |
348 | bool is_cyclic) | |
47e20577 | 349 | { |
d64e1b3f MS |
350 | if (!is_cyclic) |
351 | list_add_tail(&next->node, &txd->lli_list); | |
47e20577 MS |
352 | |
353 | if (prev) { | |
354 | prev->hw.next_lli = next->phys; | |
355 | prev->hw.ctrla |= llc_hw_ctrla(OWL_DMA_MODE_LME, 0); | |
356 | } | |
357 | ||
358 | return next; | |
359 | } | |
360 | ||
361 | static inline int owl_dma_cfg_lli(struct owl_dma_vchan *vchan, | |
362 | struct owl_dma_lli *lli, | |
363 | dma_addr_t src, dma_addr_t dst, | |
d64e1b3f MS |
364 | u32 len, enum dma_transfer_direction dir, |
365 | struct dma_slave_config *sconfig, | |
366 | bool is_cyclic) | |
47e20577 MS |
367 | { |
368 | struct owl_dma_lli_hw *hw = &lli->hw; | |
369 | u32 mode; | |
370 | ||
371 | mode = OWL_DMA_MODE_PW(0); | |
372 | ||
373 | switch (dir) { | |
374 | case DMA_MEM_TO_MEM: | |
375 | mode |= OWL_DMA_MODE_TS(0) | OWL_DMA_MODE_ST_DCU | | |
376 | OWL_DMA_MODE_DT_DCU | OWL_DMA_MODE_SAM_INC | | |
377 | OWL_DMA_MODE_DAM_INC; | |
378 | ||
d64e1b3f MS |
379 | break; |
380 | case DMA_MEM_TO_DEV: | |
381 | mode |= OWL_DMA_MODE_TS(vchan->drq) | |
382 | | OWL_DMA_MODE_ST_DCU | OWL_DMA_MODE_DT_DEV | |
383 | | OWL_DMA_MODE_SAM_INC | OWL_DMA_MODE_DAM_CONST; | |
384 | ||
385 | /* | |
386 | * Hardware only supports 32bit and 8bit buswidth. Since the | |
387 | * default is 32bit, select 8bit only when requested. | |
388 | */ | |
389 | if (sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_1_BYTE) | |
390 | mode |= OWL_DMA_MODE_NDDBW_8BIT; | |
391 | ||
392 | break; | |
393 | case DMA_DEV_TO_MEM: | |
394 | mode |= OWL_DMA_MODE_TS(vchan->drq) | |
395 | | OWL_DMA_MODE_ST_DEV | OWL_DMA_MODE_DT_DCU | |
396 | | OWL_DMA_MODE_SAM_CONST | OWL_DMA_MODE_DAM_INC; | |
397 | ||
398 | /* | |
399 | * Hardware only supports 32bit and 8bit buswidth. Since the | |
400 | * default is 32bit, select 8bit only when requested. | |
401 | */ | |
402 | if (sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_1_BYTE) | |
403 | mode |= OWL_DMA_MODE_NDDBW_8BIT; | |
404 | ||
47e20577 MS |
405 | break; |
406 | default: | |
407 | return -EINVAL; | |
408 | } | |
409 | ||
410 | hw->next_lli = 0; /* One link list by default */ | |
411 | hw->saddr = src; | |
412 | hw->daddr = dst; | |
413 | ||
414 | hw->fcnt = 1; /* Frame count fixed as 1 */ | |
415 | hw->flen = len; /* Max frame length is 1MB */ | |
416 | hw->src_stride = 0; | |
417 | hw->dst_stride = 0; | |
418 | hw->ctrla = llc_hw_ctrla(mode, | |
419 | OWL_DMA_LLC_SAV_LOAD_NEXT | | |
420 | OWL_DMA_LLC_DAV_LOAD_NEXT); | |
421 | ||
d64e1b3f MS |
422 | if (is_cyclic) |
423 | hw->ctrlb = llc_hw_ctrlb(OWL_DMA_INTCTL_BLOCK); | |
424 | else | |
425 | hw->ctrlb = llc_hw_ctrlb(OWL_DMA_INTCTL_SUPER_BLOCK); | |
47e20577 MS |
426 | |
427 | return 0; | |
428 | } | |
429 | ||
430 | static struct owl_dma_pchan *owl_dma_get_pchan(struct owl_dma *od, | |
431 | struct owl_dma_vchan *vchan) | |
432 | { | |
433 | struct owl_dma_pchan *pchan = NULL; | |
434 | unsigned long flags; | |
435 | int i; | |
436 | ||
437 | for (i = 0; i < od->nr_pchans; i++) { | |
438 | pchan = &od->pchans[i]; | |
439 | ||
440 | spin_lock_irqsave(&pchan->lock, flags); | |
441 | if (!pchan->vchan) { | |
442 | pchan->vchan = vchan; | |
443 | spin_unlock_irqrestore(&pchan->lock, flags); | |
444 | break; | |
445 | } | |
446 | ||
447 | spin_unlock_irqrestore(&pchan->lock, flags); | |
448 | } | |
449 | ||
450 | return pchan; | |
451 | } | |
452 | ||
453 | static int owl_dma_pchan_busy(struct owl_dma *od, struct owl_dma_pchan *pchan) | |
454 | { | |
455 | unsigned int val; | |
456 | ||
457 | val = dma_readl(od, OWL_DMA_IDLE_STAT); | |
458 | ||
459 | return !(val & (1 << pchan->id)); | |
460 | } | |
461 | ||
462 | static void owl_dma_terminate_pchan(struct owl_dma *od, | |
463 | struct owl_dma_pchan *pchan) | |
464 | { | |
465 | unsigned long flags; | |
466 | u32 irq_pd; | |
467 | ||
468 | pchan_writel(pchan, OWL_DMAX_START, 0); | |
469 | pchan_update(pchan, OWL_DMAX_INT_STATUS, 0xff, false); | |
470 | ||
471 | spin_lock_irqsave(&od->lock, flags); | |
472 | dma_update(od, OWL_DMA_IRQ_EN0, (1 << pchan->id), false); | |
473 | ||
474 | irq_pd = dma_readl(od, OWL_DMA_IRQ_PD0); | |
475 | if (irq_pd & (1 << pchan->id)) { | |
476 | dev_warn(od->dma.dev, | |
477 | "terminating pchan %d that still has pending irq\n", | |
478 | pchan->id); | |
479 | dma_writel(od, OWL_DMA_IRQ_PD0, (1 << pchan->id)); | |
480 | } | |
481 | ||
482 | pchan->vchan = NULL; | |
483 | ||
484 | spin_unlock_irqrestore(&od->lock, flags); | |
485 | } | |
486 | ||
d64e1b3f MS |
487 | static void owl_dma_pause_pchan(struct owl_dma_pchan *pchan) |
488 | { | |
489 | pchan_writel(pchan, 1, OWL_DMAX_PAUSE); | |
490 | } | |
491 | ||
492 | static void owl_dma_resume_pchan(struct owl_dma_pchan *pchan) | |
493 | { | |
494 | pchan_writel(pchan, 0, OWL_DMAX_PAUSE); | |
495 | } | |
496 | ||
47e20577 MS |
497 | static int owl_dma_start_next_txd(struct owl_dma_vchan *vchan) |
498 | { | |
499 | struct owl_dma *od = to_owl_dma(vchan->vc.chan.device); | |
500 | struct virt_dma_desc *vd = vchan_next_desc(&vchan->vc); | |
501 | struct owl_dma_pchan *pchan = vchan->pchan; | |
502 | struct owl_dma_txd *txd = to_owl_txd(&vd->tx); | |
503 | struct owl_dma_lli *lli; | |
504 | unsigned long flags; | |
505 | u32 int_ctl; | |
506 | ||
507 | list_del(&vd->node); | |
508 | ||
509 | vchan->txd = txd; | |
510 | ||
511 | /* Wait for channel inactive */ | |
512 | while (owl_dma_pchan_busy(od, pchan)) | |
513 | cpu_relax(); | |
514 | ||
515 | lli = list_first_entry(&txd->lli_list, | |
516 | struct owl_dma_lli, node); | |
517 | ||
d64e1b3f MS |
518 | if (txd->cyclic) |
519 | int_ctl = OWL_DMA_INTCTL_BLOCK; | |
520 | else | |
521 | int_ctl = OWL_DMA_INTCTL_SUPER_BLOCK; | |
47e20577 MS |
522 | |
523 | pchan_writel(pchan, OWL_DMAX_MODE, OWL_DMA_MODE_LME); | |
524 | pchan_writel(pchan, OWL_DMAX_LINKLIST_CTL, | |
525 | OWL_DMA_LLC_SAV_LOAD_NEXT | OWL_DMA_LLC_DAV_LOAD_NEXT); | |
526 | pchan_writel(pchan, OWL_DMAX_NEXT_DESCRIPTOR, lli->phys); | |
527 | pchan_writel(pchan, OWL_DMAX_INT_CTL, int_ctl); | |
528 | ||
529 | /* Clear IRQ status for this pchan */ | |
530 | pchan_update(pchan, OWL_DMAX_INT_STATUS, 0xff, false); | |
531 | ||
532 | spin_lock_irqsave(&od->lock, flags); | |
533 | ||
534 | dma_update(od, OWL_DMA_IRQ_EN0, (1 << pchan->id), true); | |
535 | ||
536 | spin_unlock_irqrestore(&od->lock, flags); | |
537 | ||
538 | dev_dbg(chan2dev(&vchan->vc.chan), "starting pchan %d\n", pchan->id); | |
539 | ||
540 | /* Start DMA transfer for this pchan */ | |
541 | pchan_writel(pchan, OWL_DMAX_START, 0x1); | |
542 | ||
543 | return 0; | |
544 | } | |
545 | ||
546 | static void owl_dma_phy_free(struct owl_dma *od, struct owl_dma_vchan *vchan) | |
547 | { | |
548 | /* Ensure that the physical channel is stopped */ | |
549 | owl_dma_terminate_pchan(od, vchan->pchan); | |
550 | ||
551 | vchan->pchan = NULL; | |
552 | } | |
553 | ||
554 | static irqreturn_t owl_dma_interrupt(int irq, void *dev_id) | |
555 | { | |
556 | struct owl_dma *od = dev_id; | |
557 | struct owl_dma_vchan *vchan; | |
558 | struct owl_dma_pchan *pchan; | |
559 | unsigned long pending; | |
560 | int i; | |
561 | unsigned int global_irq_pending, chan_irq_pending; | |
562 | ||
563 | spin_lock(&od->lock); | |
564 | ||
565 | pending = dma_readl(od, OWL_DMA_IRQ_PD0); | |
566 | ||
567 | /* Clear IRQ status for each pchan */ | |
568 | for_each_set_bit(i, &pending, od->nr_pchans) { | |
569 | pchan = &od->pchans[i]; | |
570 | pchan_update(pchan, OWL_DMAX_INT_STATUS, 0xff, false); | |
571 | } | |
572 | ||
573 | /* Clear pending IRQ */ | |
574 | dma_writel(od, OWL_DMA_IRQ_PD0, pending); | |
575 | ||
576 | /* Check missed pending IRQ */ | |
577 | for (i = 0; i < od->nr_pchans; i++) { | |
578 | pchan = &od->pchans[i]; | |
579 | chan_irq_pending = pchan_readl(pchan, OWL_DMAX_INT_CTL) & | |
580 | pchan_readl(pchan, OWL_DMAX_INT_STATUS); | |
581 | ||
582 | /* Dummy read to ensure OWL_DMA_IRQ_PD0 value is updated */ | |
583 | dma_readl(od, OWL_DMA_IRQ_PD0); | |
584 | ||
585 | global_irq_pending = dma_readl(od, OWL_DMA_IRQ_PD0); | |
586 | ||
587 | if (chan_irq_pending && !(global_irq_pending & BIT(i))) { | |
588 | dev_dbg(od->dma.dev, | |
589 | "global and channel IRQ pending match err\n"); | |
590 | ||
591 | /* Clear IRQ status for this pchan */ | |
592 | pchan_update(pchan, OWL_DMAX_INT_STATUS, | |
593 | 0xff, false); | |
594 | ||
595 | /* Update global IRQ pending */ | |
596 | pending |= BIT(i); | |
597 | } | |
598 | } | |
599 | ||
600 | spin_unlock(&od->lock); | |
601 | ||
602 | for_each_set_bit(i, &pending, od->nr_pchans) { | |
603 | struct owl_dma_txd *txd; | |
604 | ||
605 | pchan = &od->pchans[i]; | |
606 | ||
607 | vchan = pchan->vchan; | |
608 | if (!vchan) { | |
609 | dev_warn(od->dma.dev, "no vchan attached on pchan %d\n", | |
610 | pchan->id); | |
611 | continue; | |
612 | } | |
613 | ||
614 | spin_lock(&vchan->vc.lock); | |
615 | ||
616 | txd = vchan->txd; | |
617 | if (txd) { | |
618 | vchan->txd = NULL; | |
619 | ||
620 | vchan_cookie_complete(&txd->vd); | |
621 | ||
622 | /* | |
623 | * Start the next descriptor (if any), | |
624 | * otherwise free this channel. | |
625 | */ | |
626 | if (vchan_next_desc(&vchan->vc)) | |
627 | owl_dma_start_next_txd(vchan); | |
628 | else | |
629 | owl_dma_phy_free(od, vchan); | |
630 | } | |
631 | ||
632 | spin_unlock(&vchan->vc.lock); | |
633 | } | |
634 | ||
635 | return IRQ_HANDLED; | |
636 | } | |
637 | ||
638 | static void owl_dma_free_txd(struct owl_dma *od, struct owl_dma_txd *txd) | |
639 | { | |
640 | struct owl_dma_lli *lli, *_lli; | |
641 | ||
642 | if (unlikely(!txd)) | |
643 | return; | |
644 | ||
645 | list_for_each_entry_safe(lli, _lli, &txd->lli_list, node) | |
646 | owl_dma_free_lli(od, lli); | |
647 | ||
648 | kfree(txd); | |
649 | } | |
650 | ||
651 | static void owl_dma_desc_free(struct virt_dma_desc *vd) | |
652 | { | |
653 | struct owl_dma *od = to_owl_dma(vd->tx.chan->device); | |
654 | struct owl_dma_txd *txd = to_owl_txd(&vd->tx); | |
655 | ||
656 | owl_dma_free_txd(od, txd); | |
657 | } | |
658 | ||
659 | static int owl_dma_terminate_all(struct dma_chan *chan) | |
660 | { | |
661 | struct owl_dma *od = to_owl_dma(chan->device); | |
662 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); | |
663 | unsigned long flags; | |
664 | LIST_HEAD(head); | |
665 | ||
666 | spin_lock_irqsave(&vchan->vc.lock, flags); | |
667 | ||
668 | if (vchan->pchan) | |
669 | owl_dma_phy_free(od, vchan); | |
670 | ||
671 | if (vchan->txd) { | |
672 | owl_dma_desc_free(&vchan->txd->vd); | |
673 | vchan->txd = NULL; | |
674 | } | |
675 | ||
676 | vchan_get_all_descriptors(&vchan->vc, &head); | |
677 | vchan_dma_desc_free_list(&vchan->vc, &head); | |
678 | ||
679 | spin_unlock_irqrestore(&vchan->vc.lock, flags); | |
680 | ||
681 | return 0; | |
682 | } | |
683 | ||
d64e1b3f MS |
684 | static int owl_dma_config(struct dma_chan *chan, |
685 | struct dma_slave_config *config) | |
686 | { | |
687 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); | |
688 | ||
689 | /* Reject definitely invalid configurations */ | |
690 | if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || | |
691 | config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) | |
692 | return -EINVAL; | |
693 | ||
694 | memcpy(&vchan->cfg, config, sizeof(struct dma_slave_config)); | |
695 | ||
696 | return 0; | |
697 | } | |
698 | ||
699 | static int owl_dma_pause(struct dma_chan *chan) | |
700 | { | |
701 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); | |
702 | unsigned long flags; | |
703 | ||
704 | spin_lock_irqsave(&vchan->vc.lock, flags); | |
705 | ||
706 | owl_dma_pause_pchan(vchan->pchan); | |
707 | ||
708 | spin_unlock_irqrestore(&vchan->vc.lock, flags); | |
709 | ||
710 | return 0; | |
711 | } | |
712 | ||
713 | static int owl_dma_resume(struct dma_chan *chan) | |
714 | { | |
715 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); | |
716 | unsigned long flags; | |
717 | ||
718 | if (!vchan->pchan && !vchan->txd) | |
719 | return 0; | |
720 | ||
721 | dev_dbg(chan2dev(chan), "vchan %p: resume\n", &vchan->vc); | |
722 | ||
723 | spin_lock_irqsave(&vchan->vc.lock, flags); | |
724 | ||
725 | owl_dma_resume_pchan(vchan->pchan); | |
726 | ||
727 | spin_unlock_irqrestore(&vchan->vc.lock, flags); | |
728 | ||
729 | return 0; | |
730 | } | |
731 | ||
47e20577 MS |
732 | static u32 owl_dma_getbytes_chan(struct owl_dma_vchan *vchan) |
733 | { | |
734 | struct owl_dma_pchan *pchan; | |
735 | struct owl_dma_txd *txd; | |
736 | struct owl_dma_lli *lli; | |
737 | unsigned int next_lli_phy; | |
738 | size_t bytes; | |
739 | ||
740 | pchan = vchan->pchan; | |
741 | txd = vchan->txd; | |
742 | ||
743 | if (!pchan || !txd) | |
744 | return 0; | |
745 | ||
746 | /* Get remain count of current node in link list */ | |
747 | bytes = pchan_readl(pchan, OWL_DMAX_REMAIN_CNT); | |
748 | ||
749 | /* Loop through the preceding nodes to get total remaining bytes */ | |
750 | if (pchan_readl(pchan, OWL_DMAX_MODE) & OWL_DMA_MODE_LME) { | |
751 | next_lli_phy = pchan_readl(pchan, OWL_DMAX_NEXT_DESCRIPTOR); | |
752 | list_for_each_entry(lli, &txd->lli_list, node) { | |
753 | /* Start from the next active node */ | |
754 | if (lli->phys == next_lli_phy) { | |
755 | list_for_each_entry(lli, &txd->lli_list, node) | |
756 | bytes += lli->hw.flen; | |
757 | break; | |
758 | } | |
759 | } | |
760 | } | |
761 | ||
762 | return bytes; | |
763 | } | |
764 | ||
765 | static enum dma_status owl_dma_tx_status(struct dma_chan *chan, | |
766 | dma_cookie_t cookie, | |
767 | struct dma_tx_state *state) | |
768 | { | |
769 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); | |
770 | struct owl_dma_lli *lli; | |
771 | struct virt_dma_desc *vd; | |
772 | struct owl_dma_txd *txd; | |
773 | enum dma_status ret; | |
774 | unsigned long flags; | |
775 | size_t bytes = 0; | |
776 | ||
777 | ret = dma_cookie_status(chan, cookie, state); | |
778 | if (ret == DMA_COMPLETE || !state) | |
779 | return ret; | |
780 | ||
781 | spin_lock_irqsave(&vchan->vc.lock, flags); | |
782 | ||
783 | vd = vchan_find_desc(&vchan->vc, cookie); | |
784 | if (vd) { | |
785 | txd = to_owl_txd(&vd->tx); | |
786 | list_for_each_entry(lli, &txd->lli_list, node) | |
787 | bytes += lli->hw.flen; | |
788 | } else { | |
789 | bytes = owl_dma_getbytes_chan(vchan); | |
790 | } | |
791 | ||
792 | spin_unlock_irqrestore(&vchan->vc.lock, flags); | |
793 | ||
794 | dma_set_residue(state, bytes); | |
795 | ||
796 | return ret; | |
797 | } | |
798 | ||
799 | static void owl_dma_phy_alloc_and_start(struct owl_dma_vchan *vchan) | |
800 | { | |
801 | struct owl_dma *od = to_owl_dma(vchan->vc.chan.device); | |
802 | struct owl_dma_pchan *pchan; | |
803 | ||
804 | pchan = owl_dma_get_pchan(od, vchan); | |
805 | if (!pchan) | |
806 | return; | |
807 | ||
808 | dev_dbg(od->dma.dev, "allocated pchan %d\n", pchan->id); | |
809 | ||
810 | vchan->pchan = pchan; | |
811 | owl_dma_start_next_txd(vchan); | |
812 | } | |
813 | ||
814 | static void owl_dma_issue_pending(struct dma_chan *chan) | |
815 | { | |
816 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); | |
817 | unsigned long flags; | |
818 | ||
819 | spin_lock_irqsave(&vchan->vc.lock, flags); | |
820 | if (vchan_issue_pending(&vchan->vc)) { | |
821 | if (!vchan->pchan) | |
822 | owl_dma_phy_alloc_and_start(vchan); | |
823 | } | |
824 | spin_unlock_irqrestore(&vchan->vc.lock, flags); | |
825 | } | |
826 | ||
827 | static struct dma_async_tx_descriptor | |
828 | *owl_dma_prep_memcpy(struct dma_chan *chan, | |
829 | dma_addr_t dst, dma_addr_t src, | |
830 | size_t len, unsigned long flags) | |
831 | { | |
832 | struct owl_dma *od = to_owl_dma(chan->device); | |
833 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); | |
834 | struct owl_dma_txd *txd; | |
835 | struct owl_dma_lli *lli, *prev = NULL; | |
836 | size_t offset, bytes; | |
837 | int ret; | |
838 | ||
839 | if (!len) | |
840 | return NULL; | |
841 | ||
842 | txd = kzalloc(sizeof(*txd), GFP_NOWAIT); | |
843 | if (!txd) | |
844 | return NULL; | |
845 | ||
846 | INIT_LIST_HEAD(&txd->lli_list); | |
847 | ||
848 | /* Process the transfer as frame by frame */ | |
849 | for (offset = 0; offset < len; offset += bytes) { | |
850 | lli = owl_dma_alloc_lli(od); | |
851 | if (!lli) { | |
852 | dev_warn(chan2dev(chan), "failed to allocate lli\n"); | |
853 | goto err_txd_free; | |
854 | } | |
855 | ||
856 | bytes = min_t(size_t, (len - offset), OWL_DMA_FRAME_MAX_LENGTH); | |
857 | ||
858 | ret = owl_dma_cfg_lli(vchan, lli, src + offset, dst + offset, | |
d64e1b3f MS |
859 | bytes, DMA_MEM_TO_MEM, |
860 | &vchan->cfg, txd->cyclic); | |
47e20577 MS |
861 | if (ret) { |
862 | dev_warn(chan2dev(chan), "failed to config lli\n"); | |
863 | goto err_txd_free; | |
864 | } | |
865 | ||
d64e1b3f | 866 | prev = owl_dma_add_lli(txd, prev, lli, false); |
47e20577 MS |
867 | } |
868 | ||
869 | return vchan_tx_prep(&vchan->vc, &txd->vd, flags); | |
870 | ||
871 | err_txd_free: | |
872 | owl_dma_free_txd(od, txd); | |
873 | return NULL; | |
874 | } | |
875 | ||
d64e1b3f MS |
876 | static struct dma_async_tx_descriptor |
877 | *owl_dma_prep_slave_sg(struct dma_chan *chan, | |
878 | struct scatterlist *sgl, | |
879 | unsigned int sg_len, | |
880 | enum dma_transfer_direction dir, | |
881 | unsigned long flags, void *context) | |
882 | { | |
883 | struct owl_dma *od = to_owl_dma(chan->device); | |
884 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); | |
885 | struct dma_slave_config *sconfig = &vchan->cfg; | |
886 | struct owl_dma_txd *txd; | |
887 | struct owl_dma_lli *lli, *prev = NULL; | |
888 | struct scatterlist *sg; | |
889 | dma_addr_t addr, src = 0, dst = 0; | |
890 | size_t len; | |
891 | int ret, i; | |
892 | ||
893 | txd = kzalloc(sizeof(*txd), GFP_NOWAIT); | |
894 | if (!txd) | |
895 | return NULL; | |
896 | ||
897 | INIT_LIST_HEAD(&txd->lli_list); | |
898 | ||
899 | for_each_sg(sgl, sg, sg_len, i) { | |
900 | addr = sg_dma_address(sg); | |
901 | len = sg_dma_len(sg); | |
902 | ||
903 | if (len > OWL_DMA_FRAME_MAX_LENGTH) { | |
904 | dev_err(od->dma.dev, | |
905 | "frame length exceeds max supported length"); | |
906 | goto err_txd_free; | |
907 | } | |
908 | ||
909 | lli = owl_dma_alloc_lli(od); | |
910 | if (!lli) { | |
911 | dev_err(chan2dev(chan), "failed to allocate lli"); | |
912 | goto err_txd_free; | |
913 | } | |
914 | ||
915 | if (dir == DMA_MEM_TO_DEV) { | |
916 | src = addr; | |
917 | dst = sconfig->dst_addr; | |
918 | } else { | |
919 | src = sconfig->src_addr; | |
920 | dst = addr; | |
921 | } | |
922 | ||
923 | ret = owl_dma_cfg_lli(vchan, lli, src, dst, len, dir, sconfig, | |
924 | txd->cyclic); | |
925 | if (ret) { | |
926 | dev_warn(chan2dev(chan), "failed to config lli"); | |
927 | goto err_txd_free; | |
928 | } | |
929 | ||
930 | prev = owl_dma_add_lli(txd, prev, lli, false); | |
931 | } | |
932 | ||
933 | return vchan_tx_prep(&vchan->vc, &txd->vd, flags); | |
934 | ||
935 | err_txd_free: | |
936 | owl_dma_free_txd(od, txd); | |
937 | ||
938 | return NULL; | |
939 | } | |
940 | ||
941 | static struct dma_async_tx_descriptor | |
942 | *owl_prep_dma_cyclic(struct dma_chan *chan, | |
943 | dma_addr_t buf_addr, size_t buf_len, | |
944 | size_t period_len, | |
945 | enum dma_transfer_direction dir, | |
946 | unsigned long flags) | |
947 | { | |
948 | struct owl_dma *od = to_owl_dma(chan->device); | |
949 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); | |
950 | struct dma_slave_config *sconfig = &vchan->cfg; | |
951 | struct owl_dma_txd *txd; | |
952 | struct owl_dma_lli *lli, *prev = NULL, *first = NULL; | |
953 | dma_addr_t src = 0, dst = 0; | |
954 | unsigned int periods = buf_len / period_len; | |
955 | int ret, i; | |
956 | ||
957 | txd = kzalloc(sizeof(*txd), GFP_NOWAIT); | |
958 | if (!txd) | |
959 | return NULL; | |
960 | ||
961 | INIT_LIST_HEAD(&txd->lli_list); | |
962 | txd->cyclic = true; | |
963 | ||
964 | for (i = 0; i < periods; i++) { | |
965 | lli = owl_dma_alloc_lli(od); | |
966 | if (!lli) { | |
967 | dev_warn(chan2dev(chan), "failed to allocate lli"); | |
968 | goto err_txd_free; | |
969 | } | |
970 | ||
971 | if (dir == DMA_MEM_TO_DEV) { | |
972 | src = buf_addr + (period_len * i); | |
973 | dst = sconfig->dst_addr; | |
974 | } else if (dir == DMA_DEV_TO_MEM) { | |
975 | src = sconfig->src_addr; | |
976 | dst = buf_addr + (period_len * i); | |
977 | } | |
978 | ||
979 | ret = owl_dma_cfg_lli(vchan, lli, src, dst, period_len, | |
980 | dir, sconfig, txd->cyclic); | |
981 | if (ret) { | |
982 | dev_warn(chan2dev(chan), "failed to config lli"); | |
983 | goto err_txd_free; | |
984 | } | |
985 | ||
986 | if (!first) | |
987 | first = lli; | |
988 | ||
989 | prev = owl_dma_add_lli(txd, prev, lli, false); | |
990 | } | |
991 | ||
992 | /* close the cyclic list */ | |
993 | owl_dma_add_lli(txd, prev, first, true); | |
994 | ||
995 | return vchan_tx_prep(&vchan->vc, &txd->vd, flags); | |
996 | ||
997 | err_txd_free: | |
998 | owl_dma_free_txd(od, txd); | |
999 | ||
1000 | return NULL; | |
1001 | } | |
1002 | ||
47e20577 MS |
1003 | static void owl_dma_free_chan_resources(struct dma_chan *chan) |
1004 | { | |
1005 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); | |
1006 | ||
1007 | /* Ensure all queued descriptors are freed */ | |
1008 | vchan_free_chan_resources(&vchan->vc); | |
1009 | } | |
1010 | ||
1011 | static inline void owl_dma_free(struct owl_dma *od) | |
1012 | { | |
1013 | struct owl_dma_vchan *vchan = NULL; | |
1014 | struct owl_dma_vchan *next; | |
1015 | ||
1016 | list_for_each_entry_safe(vchan, | |
1017 | next, &od->dma.channels, vc.chan.device_node) { | |
1018 | list_del(&vchan->vc.chan.device_node); | |
1019 | tasklet_kill(&vchan->vc.task); | |
1020 | } | |
1021 | } | |
1022 | ||
d64e1b3f MS |
1023 | static struct dma_chan *owl_dma_of_xlate(struct of_phandle_args *dma_spec, |
1024 | struct of_dma *ofdma) | |
1025 | { | |
1026 | struct owl_dma *od = ofdma->of_dma_data; | |
1027 | struct owl_dma_vchan *vchan; | |
1028 | struct dma_chan *chan; | |
1029 | u8 drq = dma_spec->args[0]; | |
1030 | ||
1031 | if (drq > od->nr_vchans) | |
1032 | return NULL; | |
1033 | ||
1034 | chan = dma_get_any_slave_channel(&od->dma); | |
1035 | if (!chan) | |
1036 | return NULL; | |
1037 | ||
1038 | vchan = to_owl_vchan(chan); | |
1039 | vchan->drq = drq; | |
1040 | ||
1041 | return chan; | |
1042 | } | |
1043 | ||
47e20577 MS |
1044 | static int owl_dma_probe(struct platform_device *pdev) |
1045 | { | |
1046 | struct device_node *np = pdev->dev.of_node; | |
1047 | struct owl_dma *od; | |
1048 | struct resource *res; | |
1049 | int ret, i, nr_channels, nr_requests; | |
1050 | ||
1051 | od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL); | |
1052 | if (!od) | |
1053 | return -ENOMEM; | |
1054 | ||
1055 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1056 | if (!res) | |
1057 | return -EINVAL; | |
1058 | ||
1059 | od->base = devm_ioremap_resource(&pdev->dev, res); | |
1060 | if (IS_ERR(od->base)) | |
1061 | return PTR_ERR(od->base); | |
1062 | ||
1063 | ret = of_property_read_u32(np, "dma-channels", &nr_channels); | |
1064 | if (ret) { | |
1065 | dev_err(&pdev->dev, "can't get dma-channels\n"); | |
1066 | return ret; | |
1067 | } | |
1068 | ||
1069 | ret = of_property_read_u32(np, "dma-requests", &nr_requests); | |
1070 | if (ret) { | |
1071 | dev_err(&pdev->dev, "can't get dma-requests\n"); | |
1072 | return ret; | |
1073 | } | |
1074 | ||
1075 | dev_info(&pdev->dev, "dma-channels %d, dma-requests %d\n", | |
1076 | nr_channels, nr_requests); | |
1077 | ||
1078 | od->nr_pchans = nr_channels; | |
1079 | od->nr_vchans = nr_requests; | |
1080 | ||
1081 | pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); | |
1082 | ||
1083 | platform_set_drvdata(pdev, od); | |
1084 | spin_lock_init(&od->lock); | |
1085 | ||
1086 | dma_cap_set(DMA_MEMCPY, od->dma.cap_mask); | |
d64e1b3f MS |
1087 | dma_cap_set(DMA_SLAVE, od->dma.cap_mask); |
1088 | dma_cap_set(DMA_CYCLIC, od->dma.cap_mask); | |
47e20577 MS |
1089 | |
1090 | od->dma.dev = &pdev->dev; | |
1091 | od->dma.device_free_chan_resources = owl_dma_free_chan_resources; | |
1092 | od->dma.device_tx_status = owl_dma_tx_status; | |
1093 | od->dma.device_issue_pending = owl_dma_issue_pending; | |
1094 | od->dma.device_prep_dma_memcpy = owl_dma_prep_memcpy; | |
d64e1b3f MS |
1095 | od->dma.device_prep_slave_sg = owl_dma_prep_slave_sg; |
1096 | od->dma.device_prep_dma_cyclic = owl_prep_dma_cyclic; | |
1097 | od->dma.device_config = owl_dma_config; | |
1098 | od->dma.device_pause = owl_dma_pause; | |
1099 | od->dma.device_resume = owl_dma_resume; | |
47e20577 MS |
1100 | od->dma.device_terminate_all = owl_dma_terminate_all; |
1101 | od->dma.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); | |
1102 | od->dma.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); | |
1103 | od->dma.directions = BIT(DMA_MEM_TO_MEM); | |
1104 | od->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | |
1105 | ||
1106 | INIT_LIST_HEAD(&od->dma.channels); | |
1107 | ||
1108 | od->clk = devm_clk_get(&pdev->dev, NULL); | |
1109 | if (IS_ERR(od->clk)) { | |
1110 | dev_err(&pdev->dev, "unable to get clock\n"); | |
1111 | return PTR_ERR(od->clk); | |
1112 | } | |
1113 | ||
1114 | /* | |
1115 | * Eventhough the DMA controller is capable of generating 4 | |
1116 | * IRQ's for DMA priority feature, we only use 1 IRQ for | |
1117 | * simplification. | |
1118 | */ | |
1119 | od->irq = platform_get_irq(pdev, 0); | |
1120 | ret = devm_request_irq(&pdev->dev, od->irq, owl_dma_interrupt, 0, | |
1121 | dev_name(&pdev->dev), od); | |
1122 | if (ret) { | |
1123 | dev_err(&pdev->dev, "unable to request IRQ\n"); | |
1124 | return ret; | |
1125 | } | |
1126 | ||
1127 | /* Init physical channel */ | |
1128 | od->pchans = devm_kcalloc(&pdev->dev, od->nr_pchans, | |
1129 | sizeof(struct owl_dma_pchan), GFP_KERNEL); | |
1130 | if (!od->pchans) | |
1131 | return -ENOMEM; | |
1132 | ||
1133 | for (i = 0; i < od->nr_pchans; i++) { | |
1134 | struct owl_dma_pchan *pchan = &od->pchans[i]; | |
1135 | ||
1136 | pchan->id = i; | |
1137 | pchan->base = od->base + OWL_DMA_CHAN_BASE(i); | |
1138 | } | |
1139 | ||
1140 | /* Init virtual channel */ | |
1141 | od->vchans = devm_kcalloc(&pdev->dev, od->nr_vchans, | |
1142 | sizeof(struct owl_dma_vchan), GFP_KERNEL); | |
1143 | if (!od->vchans) | |
1144 | return -ENOMEM; | |
1145 | ||
1146 | for (i = 0; i < od->nr_vchans; i++) { | |
1147 | struct owl_dma_vchan *vchan = &od->vchans[i]; | |
1148 | ||
1149 | vchan->vc.desc_free = owl_dma_desc_free; | |
1150 | vchan_init(&vchan->vc, &od->dma); | |
1151 | } | |
1152 | ||
1153 | /* Create a pool of consistent memory blocks for hardware descriptors */ | |
1154 | od->lli_pool = dma_pool_create(dev_name(od->dma.dev), od->dma.dev, | |
1155 | sizeof(struct owl_dma_lli), | |
1156 | __alignof__(struct owl_dma_lli), | |
1157 | 0); | |
1158 | if (!od->lli_pool) { | |
1159 | dev_err(&pdev->dev, "unable to allocate DMA descriptor pool\n"); | |
1160 | return -ENOMEM; | |
1161 | } | |
1162 | ||
1163 | clk_prepare_enable(od->clk); | |
1164 | ||
1165 | ret = dma_async_device_register(&od->dma); | |
1166 | if (ret) { | |
1167 | dev_err(&pdev->dev, "failed to register DMA engine device\n"); | |
1168 | goto err_pool_free; | |
1169 | } | |
1170 | ||
d64e1b3f MS |
1171 | /* Device-tree DMA controller registration */ |
1172 | ret = of_dma_controller_register(pdev->dev.of_node, | |
1173 | owl_dma_of_xlate, od); | |
1174 | if (ret) { | |
1175 | dev_err(&pdev->dev, "of_dma_controller_register failed\n"); | |
1176 | goto err_dma_unregister; | |
1177 | } | |
1178 | ||
47e20577 MS |
1179 | return 0; |
1180 | ||
d64e1b3f MS |
1181 | err_dma_unregister: |
1182 | dma_async_device_unregister(&od->dma); | |
47e20577 MS |
1183 | err_pool_free: |
1184 | clk_disable_unprepare(od->clk); | |
1185 | dma_pool_destroy(od->lli_pool); | |
1186 | ||
1187 | return ret; | |
1188 | } | |
1189 | ||
1190 | static int owl_dma_remove(struct platform_device *pdev) | |
1191 | { | |
1192 | struct owl_dma *od = platform_get_drvdata(pdev); | |
1193 | ||
d64e1b3f | 1194 | of_dma_controller_free(pdev->dev.of_node); |
47e20577 MS |
1195 | dma_async_device_unregister(&od->dma); |
1196 | ||
1197 | /* Mask all interrupts for this execution environment */ | |
1198 | dma_writel(od, OWL_DMA_IRQ_EN0, 0x0); | |
1199 | ||
1200 | /* Make sure we won't have any further interrupts */ | |
1201 | devm_free_irq(od->dma.dev, od->irq, od); | |
1202 | ||
1203 | owl_dma_free(od); | |
1204 | ||
1205 | clk_disable_unprepare(od->clk); | |
1206 | ||
1207 | return 0; | |
1208 | } | |
1209 | ||
1210 | static const struct of_device_id owl_dma_match[] = { | |
1211 | { .compatible = "actions,s900-dma", }, | |
1212 | { /* sentinel */ } | |
1213 | }; | |
1214 | MODULE_DEVICE_TABLE(of, owl_dma_match); | |
1215 | ||
1216 | static struct platform_driver owl_dma_driver = { | |
1217 | .probe = owl_dma_probe, | |
1218 | .remove = owl_dma_remove, | |
1219 | .driver = { | |
1220 | .name = "dma-owl", | |
1221 | .of_match_table = of_match_ptr(owl_dma_match), | |
1222 | }, | |
1223 | }; | |
1224 | ||
1225 | static int owl_dma_init(void) | |
1226 | { | |
1227 | return platform_driver_register(&owl_dma_driver); | |
1228 | } | |
1229 | subsys_initcall(owl_dma_init); | |
1230 | ||
1231 | static void __exit owl_dma_exit(void) | |
1232 | { | |
1233 | platform_driver_unregister(&owl_dma_driver); | |
1234 | } | |
1235 | module_exit(owl_dma_exit); | |
1236 | ||
1237 | MODULE_AUTHOR("David Liu <liuwei@actions-semi.com>"); | |
1238 | MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>"); | |
1239 | MODULE_DESCRIPTION("Actions Semi Owl SoCs DMA driver"); | |
1240 | MODULE_LICENSE("GPL"); |