Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
c8acd6aa ZG |
2 | /* |
3 | * Copyright 2012 Marvell International Ltd. | |
c8acd6aa | 4 | */ |
2b7f65b1 | 5 | |
7331205a | 6 | #include <linux/err.h> |
c8acd6aa ZG |
7 | #include <linux/module.h> |
8 | #include <linux/init.h> | |
9 | #include <linux/types.h> | |
10 | #include <linux/interrupt.h> | |
11 | #include <linux/dma-mapping.h> | |
12 | #include <linux/slab.h> | |
13 | #include <linux/dmaengine.h> | |
14 | #include <linux/platform_device.h> | |
15 | #include <linux/device.h> | |
16 | #include <linux/platform_data/mmp_dma.h> | |
17 | #include <linux/dmapool.h> | |
18 | #include <linux/of_device.h> | |
a9a7cf08 | 19 | #include <linux/of_dma.h> |
c8acd6aa ZG |
20 | #include <linux/of.h> |
21 | ||
22 | #include "dmaengine.h" | |
23 | ||
24 | #define DCSR 0x0000 | |
25 | #define DALGN 0x00a0 | |
26 | #define DINT 0x00f0 | |
27 | #define DDADR 0x0200 | |
1b38da26 DM |
28 | #define DSADR(n) (0x0204 + ((n) << 4)) |
29 | #define DTADR(n) (0x0208 + ((n) << 4)) | |
c8acd6aa ZG |
30 | #define DCMD 0x020c |
31 | ||
2b7f65b1 JP |
32 | #define DCSR_RUN BIT(31) /* Run Bit (read / write) */ |
33 | #define DCSR_NODESC BIT(30) /* No-Descriptor Fetch (read / write) */ | |
34 | #define DCSR_STOPIRQEN BIT(29) /* Stop Interrupt Enable (read / write) */ | |
35 | #define DCSR_REQPEND BIT(8) /* Request Pending (read-only) */ | |
36 | #define DCSR_STOPSTATE BIT(3) /* Stop State (read-only) */ | |
37 | #define DCSR_ENDINTR BIT(2) /* End Interrupt (read / write) */ | |
38 | #define DCSR_STARTINTR BIT(1) /* Start Interrupt (read / write) */ | |
39 | #define DCSR_BUSERR BIT(0) /* Bus Error Interrupt (read / write) */ | |
40 | ||
41 | #define DCSR_EORIRQEN BIT(28) /* End of Receive Interrupt Enable (R/W) */ | |
42 | #define DCSR_EORJMPEN BIT(27) /* Jump to next descriptor on EOR */ | |
43 | #define DCSR_EORSTOPEN BIT(26) /* STOP on an EOR */ | |
44 | #define DCSR_SETCMPST BIT(25) /* Set Descriptor Compare Status */ | |
45 | #define DCSR_CLRCMPST BIT(24) /* Clear Descriptor Compare Status */ | |
46 | #define DCSR_CMPST BIT(10) /* The Descriptor Compare Status */ | |
47 | #define DCSR_EORINTR BIT(9) /* The end of Receive */ | |
48 | ||
49 | #define DRCMR(n) ((((n) < 64) ? 0x0100 : 0x1100) + (((n) & 0x3f) << 2)) | |
50 | #define DRCMR_MAPVLD BIT(7) /* Map Valid (read / write) */ | |
51 | #define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */ | |
c8acd6aa ZG |
52 | |
53 | #define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */ | |
2b7f65b1 JP |
54 | #define DDADR_STOP BIT(0) /* Stop (read / write) */ |
55 | ||
56 | #define DCMD_INCSRCADDR BIT(31) /* Source Address Increment Setting. */ | |
57 | #define DCMD_INCTRGADDR BIT(30) /* Target Address Increment Setting. */ | |
58 | #define DCMD_FLOWSRC BIT(29) /* Flow Control by the source. */ | |
59 | #define DCMD_FLOWTRG BIT(28) /* Flow Control by the target. */ | |
60 | #define DCMD_STARTIRQEN BIT(22) /* Start Interrupt Enable */ | |
61 | #define DCMD_ENDIRQEN BIT(21) /* End Interrupt Enable */ | |
62 | #define DCMD_ENDIAN BIT(18) /* Device Endian-ness. */ | |
c8acd6aa ZG |
63 | #define DCMD_BURST8 (1 << 16) /* 8 byte burst */ |
64 | #define DCMD_BURST16 (2 << 16) /* 16 byte burst */ | |
65 | #define DCMD_BURST32 (3 << 16) /* 32 byte burst */ | |
66 | #define DCMD_WIDTH1 (1 << 14) /* 1 byte width */ | |
67 | #define DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */ | |
68 | #define DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */ | |
69 | #define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */ | |
70 | ||
1ac0e845 | 71 | #define PDMA_MAX_DESC_BYTES DCMD_LENGTH |
c8acd6aa ZG |
72 | |
73 | struct mmp_pdma_desc_hw { | |
74 | u32 ddadr; /* Points to the next descriptor + flags */ | |
75 | u32 dsadr; /* DSADR value for the current transfer */ | |
76 | u32 dtadr; /* DTADR value for the current transfer */ | |
77 | u32 dcmd; /* DCMD value for the current transfer */ | |
78 | } __aligned(32); | |
79 | ||
80 | struct mmp_pdma_desc_sw { | |
81 | struct mmp_pdma_desc_hw desc; | |
82 | struct list_head node; | |
83 | struct list_head tx_list; | |
84 | struct dma_async_tx_descriptor async_tx; | |
85 | }; | |
86 | ||
87 | struct mmp_pdma_phy; | |
88 | ||
89 | struct mmp_pdma_chan { | |
90 | struct device *dev; | |
91 | struct dma_chan chan; | |
92 | struct dma_async_tx_descriptor desc; | |
93 | struct mmp_pdma_phy *phy; | |
94 | enum dma_transfer_direction dir; | |
56b94b02 | 95 | struct dma_slave_config slave_config; |
c8acd6aa | 96 | |
50440d74 DM |
97 | struct mmp_pdma_desc_sw *cyclic_first; /* first desc_sw if channel |
98 | * is in cyclic mode */ | |
99 | ||
c8acd6aa ZG |
100 | /* channel's basic info */ |
101 | struct tasklet_struct tasklet; | |
102 | u32 dcmd; | |
103 | u32 drcmr; | |
104 | u32 dev_addr; | |
105 | ||
106 | /* list for desc */ | |
107 | spinlock_t desc_lock; /* Descriptor list lock */ | |
108 | struct list_head chain_pending; /* Link descriptors queue for pending */ | |
109 | struct list_head chain_running; /* Link descriptors queue for running */ | |
110 | bool idle; /* channel statue machine */ | |
6fc4573c | 111 | bool byte_align; |
c8acd6aa ZG |
112 | |
113 | struct dma_pool *desc_pool; /* Descriptors pool */ | |
114 | }; | |
115 | ||
116 | struct mmp_pdma_phy { | |
117 | int idx; | |
118 | void __iomem *base; | |
119 | struct mmp_pdma_chan *vchan; | |
120 | }; | |
121 | ||
122 | struct mmp_pdma_device { | |
123 | int dma_channels; | |
124 | void __iomem *base; | |
125 | struct device *dev; | |
126 | struct dma_device device; | |
127 | struct mmp_pdma_phy *phy; | |
027f28b7 | 128 | spinlock_t phy_lock; /* protect alloc/free phy channels */ |
c8acd6aa ZG |
129 | }; |
130 | ||
2b7f65b1 JP |
131 | #define tx_to_mmp_pdma_desc(tx) \ |
132 | container_of(tx, struct mmp_pdma_desc_sw, async_tx) | |
133 | #define to_mmp_pdma_desc(lh) \ | |
134 | container_of(lh, struct mmp_pdma_desc_sw, node) | |
135 | #define to_mmp_pdma_chan(dchan) \ | |
136 | container_of(dchan, struct mmp_pdma_chan, chan) | |
137 | #define to_mmp_pdma_dev(dmadev) \ | |
138 | container_of(dmadev, struct mmp_pdma_device, device) | |
c8acd6aa | 139 | |
56b94b02 VK |
140 | static int mmp_pdma_config_write(struct dma_chan *dchan, |
141 | struct dma_slave_config *cfg, | |
142 | enum dma_transfer_direction direction); | |
143 | ||
c8acd6aa ZG |
144 | static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr) |
145 | { | |
146 | u32 reg = (phy->idx << 4) + DDADR; | |
147 | ||
148 | writel(addr, phy->base + reg); | |
149 | } | |
150 | ||
151 | static void enable_chan(struct mmp_pdma_phy *phy) | |
152 | { | |
6fc4573c | 153 | u32 reg, dalgn; |
c8acd6aa ZG |
154 | |
155 | if (!phy->vchan) | |
156 | return; | |
157 | ||
8b298ded | 158 | reg = DRCMR(phy->vchan->drcmr); |
c8acd6aa ZG |
159 | writel(DRCMR_MAPVLD | phy->idx, phy->base + reg); |
160 | ||
6fc4573c DM |
161 | dalgn = readl(phy->base + DALGN); |
162 | if (phy->vchan->byte_align) | |
163 | dalgn |= 1 << phy->idx; | |
164 | else | |
165 | dalgn &= ~(1 << phy->idx); | |
166 | writel(dalgn, phy->base + DALGN); | |
167 | ||
c8acd6aa | 168 | reg = (phy->idx << 2) + DCSR; |
2b7f65b1 | 169 | writel(readl(phy->base + reg) | DCSR_RUN, phy->base + reg); |
c8acd6aa ZG |
170 | } |
171 | ||
172 | static void disable_chan(struct mmp_pdma_phy *phy) | |
173 | { | |
174 | u32 reg; | |
175 | ||
2b7f65b1 JP |
176 | if (!phy) |
177 | return; | |
178 | ||
179 | reg = (phy->idx << 2) + DCSR; | |
180 | writel(readl(phy->base + reg) & ~DCSR_RUN, phy->base + reg); | |
c8acd6aa ZG |
181 | } |
182 | ||
183 | static int clear_chan_irq(struct mmp_pdma_phy *phy) | |
184 | { | |
185 | u32 dcsr; | |
186 | u32 dint = readl(phy->base + DINT); | |
187 | u32 reg = (phy->idx << 2) + DCSR; | |
188 | ||
2b7f65b1 JP |
189 | if (!(dint & BIT(phy->idx))) |
190 | return -EAGAIN; | |
191 | ||
192 | /* clear irq */ | |
193 | dcsr = readl(phy->base + reg); | |
194 | writel(dcsr, phy->base + reg); | |
195 | if ((dcsr & DCSR_BUSERR) && (phy->vchan)) | |
196 | dev_warn(phy->vchan->dev, "DCSR_BUSERR\n"); | |
197 | ||
198 | return 0; | |
c8acd6aa ZG |
199 | } |
200 | ||
201 | static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id) | |
202 | { | |
203 | struct mmp_pdma_phy *phy = dev_id; | |
204 | ||
2b7f65b1 | 205 | if (clear_chan_irq(phy) != 0) |
c8acd6aa | 206 | return IRQ_NONE; |
2b7f65b1 JP |
207 | |
208 | tasklet_schedule(&phy->vchan->tasklet); | |
209 | return IRQ_HANDLED; | |
c8acd6aa ZG |
210 | } |
211 | ||
212 | static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id) | |
213 | { | |
214 | struct mmp_pdma_device *pdev = dev_id; | |
215 | struct mmp_pdma_phy *phy; | |
216 | u32 dint = readl(pdev->base + DINT); | |
217 | int i, ret; | |
218 | int irq_num = 0; | |
219 | ||
220 | while (dint) { | |
221 | i = __ffs(dint); | |
3a314f14 QZ |
222 | /* only handle interrupts belonging to pdma driver*/ |
223 | if (i >= pdev->dma_channels) | |
224 | break; | |
c8acd6aa ZG |
225 | dint &= (dint - 1); |
226 | phy = &pdev->phy[i]; | |
227 | ret = mmp_pdma_chan_handler(irq, phy); | |
228 | if (ret == IRQ_HANDLED) | |
229 | irq_num++; | |
230 | } | |
231 | ||
232 | if (irq_num) | |
233 | return IRQ_HANDLED; | |
2b7f65b1 JP |
234 | |
235 | return IRQ_NONE; | |
c8acd6aa ZG |
236 | } |
237 | ||
238 | /* lookup free phy channel as descending priority */ | |
239 | static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan) | |
240 | { | |
241 | int prio, i; | |
242 | struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device); | |
638a542c | 243 | struct mmp_pdma_phy *phy, *found = NULL; |
027f28b7 | 244 | unsigned long flags; |
c8acd6aa ZG |
245 | |
246 | /* | |
247 | * dma channel priorities | |
248 | * ch 0 - 3, 16 - 19 <--> (0) | |
249 | * ch 4 - 7, 20 - 23 <--> (1) | |
250 | * ch 8 - 11, 24 - 27 <--> (2) | |
251 | * ch 12 - 15, 28 - 31 <--> (3) | |
252 | */ | |
027f28b7 XW |
253 | |
254 | spin_lock_irqsave(&pdev->phy_lock, flags); | |
2b7f65b1 | 255 | for (prio = 0; prio <= ((pdev->dma_channels - 1) & 0xf) >> 2; prio++) { |
c8acd6aa | 256 | for (i = 0; i < pdev->dma_channels; i++) { |
2b7f65b1 | 257 | if (prio != (i & 0xf) >> 2) |
c8acd6aa ZG |
258 | continue; |
259 | phy = &pdev->phy[i]; | |
260 | if (!phy->vchan) { | |
261 | phy->vchan = pchan; | |
638a542c DM |
262 | found = phy; |
263 | goto out_unlock; | |
c8acd6aa ZG |
264 | } |
265 | } | |
266 | } | |
267 | ||
638a542c | 268 | out_unlock: |
027f28b7 | 269 | spin_unlock_irqrestore(&pdev->phy_lock, flags); |
638a542c | 270 | return found; |
c8acd6aa ZG |
271 | } |
272 | ||
027f28b7 XW |
273 | static void mmp_pdma_free_phy(struct mmp_pdma_chan *pchan) |
274 | { | |
275 | struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device); | |
276 | unsigned long flags; | |
26a2dfde | 277 | u32 reg; |
027f28b7 XW |
278 | |
279 | if (!pchan->phy) | |
280 | return; | |
281 | ||
26a2dfde | 282 | /* clear the channel mapping in DRCMR */ |
a2a7c176 | 283 | reg = DRCMR(pchan->drcmr); |
26a2dfde XW |
284 | writel(0, pchan->phy->base + reg); |
285 | ||
027f28b7 XW |
286 | spin_lock_irqsave(&pdev->phy_lock, flags); |
287 | pchan->phy->vchan = NULL; | |
288 | pchan->phy = NULL; | |
289 | spin_unlock_irqrestore(&pdev->phy_lock, flags); | |
290 | } | |
291 | ||
6cfb8321 | 292 | /* |
c8acd6aa ZG |
293 | * start_pending_queue - transfer any pending transactions |
294 | * pending list ==> running list | |
295 | */ | |
296 | static void start_pending_queue(struct mmp_pdma_chan *chan) | |
297 | { | |
298 | struct mmp_pdma_desc_sw *desc; | |
299 | ||
300 | /* still in running, irq will start the pending list */ | |
301 | if (!chan->idle) { | |
302 | dev_dbg(chan->dev, "DMA controller still busy\n"); | |
303 | return; | |
304 | } | |
305 | ||
306 | if (list_empty(&chan->chain_pending)) { | |
307 | /* chance to re-fetch phy channel with higher prio */ | |
027f28b7 | 308 | mmp_pdma_free_phy(chan); |
c8acd6aa ZG |
309 | dev_dbg(chan->dev, "no pending list\n"); |
310 | return; | |
311 | } | |
312 | ||
313 | if (!chan->phy) { | |
314 | chan->phy = lookup_phy(chan); | |
315 | if (!chan->phy) { | |
316 | dev_dbg(chan->dev, "no free dma channel\n"); | |
317 | return; | |
318 | } | |
319 | } | |
320 | ||
321 | /* | |
322 | * pending -> running | |
323 | * reintilize pending list | |
324 | */ | |
325 | desc = list_first_entry(&chan->chain_pending, | |
326 | struct mmp_pdma_desc_sw, node); | |
327 | list_splice_tail_init(&chan->chain_pending, &chan->chain_running); | |
328 | ||
329 | /* | |
330 | * Program the descriptor's address into the DMA controller, | |
331 | * then start the DMA transaction | |
332 | */ | |
333 | set_desc(chan->phy, desc->async_tx.phys); | |
334 | enable_chan(chan->phy); | |
335 | chan->idle = false; | |
336 | } | |
337 | ||
338 | ||
339 | /* desc->tx_list ==> pending list */ | |
340 | static dma_cookie_t mmp_pdma_tx_submit(struct dma_async_tx_descriptor *tx) | |
341 | { | |
342 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(tx->chan); | |
343 | struct mmp_pdma_desc_sw *desc = tx_to_mmp_pdma_desc(tx); | |
344 | struct mmp_pdma_desc_sw *child; | |
345 | unsigned long flags; | |
346 | dma_cookie_t cookie = -EBUSY; | |
347 | ||
348 | spin_lock_irqsave(&chan->desc_lock, flags); | |
349 | ||
350 | list_for_each_entry(child, &desc->tx_list, node) { | |
351 | cookie = dma_cookie_assign(&child->async_tx); | |
352 | } | |
353 | ||
0cd61561 DM |
354 | /* softly link to pending list - desc->tx_list ==> pending list */ |
355 | list_splice_tail_init(&desc->tx_list, &chan->chain_pending); | |
c8acd6aa ZG |
356 | |
357 | spin_unlock_irqrestore(&chan->desc_lock, flags); | |
358 | ||
359 | return cookie; | |
360 | } | |
361 | ||
69c9f0ae JH |
362 | static struct mmp_pdma_desc_sw * |
363 | mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan) | |
c8acd6aa ZG |
364 | { |
365 | struct mmp_pdma_desc_sw *desc; | |
366 | dma_addr_t pdesc; | |
367 | ||
1c85a844 | 368 | desc = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &pdesc); |
c8acd6aa ZG |
369 | if (!desc) { |
370 | dev_err(chan->dev, "out of memory for link descriptor\n"); | |
371 | return NULL; | |
372 | } | |
373 | ||
c8acd6aa ZG |
374 | INIT_LIST_HEAD(&desc->tx_list); |
375 | dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan); | |
376 | /* each desc has submit */ | |
377 | desc->async_tx.tx_submit = mmp_pdma_tx_submit; | |
378 | desc->async_tx.phys = pdesc; | |
379 | ||
380 | return desc; | |
381 | } | |
382 | ||
6cfb8321 | 383 | /* |
c8acd6aa ZG |
384 | * mmp_pdma_alloc_chan_resources - Allocate resources for DMA channel. |
385 | * | |
386 | * This function will create a dma pool for descriptor allocation. | |
387 | * Request irq only when channel is requested | |
388 | * Return - The number of allocated descriptors. | |
389 | */ | |
390 | ||
391 | static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan) | |
392 | { | |
393 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); | |
394 | ||
395 | if (chan->desc_pool) | |
396 | return 1; | |
397 | ||
2b7f65b1 JP |
398 | chan->desc_pool = dma_pool_create(dev_name(&dchan->dev->device), |
399 | chan->dev, | |
400 | sizeof(struct mmp_pdma_desc_sw), | |
401 | __alignof__(struct mmp_pdma_desc_sw), | |
402 | 0); | |
c8acd6aa ZG |
403 | if (!chan->desc_pool) { |
404 | dev_err(chan->dev, "unable to allocate descriptor pool\n"); | |
405 | return -ENOMEM; | |
406 | } | |
2b7f65b1 | 407 | |
027f28b7 | 408 | mmp_pdma_free_phy(chan); |
c8acd6aa ZG |
409 | chan->idle = true; |
410 | chan->dev_addr = 0; | |
411 | return 1; | |
412 | } | |
413 | ||
414 | static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan, | |
2b7f65b1 | 415 | struct list_head *list) |
c8acd6aa ZG |
416 | { |
417 | struct mmp_pdma_desc_sw *desc, *_desc; | |
418 | ||
419 | list_for_each_entry_safe(desc, _desc, list, node) { | |
420 | list_del(&desc->node); | |
421 | dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); | |
422 | } | |
423 | } | |
424 | ||
425 | static void mmp_pdma_free_chan_resources(struct dma_chan *dchan) | |
426 | { | |
427 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); | |
428 | unsigned long flags; | |
429 | ||
430 | spin_lock_irqsave(&chan->desc_lock, flags); | |
431 | mmp_pdma_free_desc_list(chan, &chan->chain_pending); | |
432 | mmp_pdma_free_desc_list(chan, &chan->chain_running); | |
433 | spin_unlock_irqrestore(&chan->desc_lock, flags); | |
434 | ||
435 | dma_pool_destroy(chan->desc_pool); | |
436 | chan->desc_pool = NULL; | |
437 | chan->idle = true; | |
438 | chan->dev_addr = 0; | |
027f28b7 | 439 | mmp_pdma_free_phy(chan); |
c8acd6aa ZG |
440 | return; |
441 | } | |
442 | ||
443 | static struct dma_async_tx_descriptor * | |
444 | mmp_pdma_prep_memcpy(struct dma_chan *dchan, | |
2b7f65b1 JP |
445 | dma_addr_t dma_dst, dma_addr_t dma_src, |
446 | size_t len, unsigned long flags) | |
c8acd6aa ZG |
447 | { |
448 | struct mmp_pdma_chan *chan; | |
449 | struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new; | |
450 | size_t copy = 0; | |
451 | ||
452 | if (!dchan) | |
453 | return NULL; | |
454 | ||
455 | if (!len) | |
456 | return NULL; | |
457 | ||
458 | chan = to_mmp_pdma_chan(dchan); | |
6fc4573c | 459 | chan->byte_align = false; |
c8acd6aa ZG |
460 | |
461 | if (!chan->dir) { | |
462 | chan->dir = DMA_MEM_TO_MEM; | |
463 | chan->dcmd = DCMD_INCTRGADDR | DCMD_INCSRCADDR; | |
464 | chan->dcmd |= DCMD_BURST32; | |
465 | } | |
466 | ||
467 | do { | |
468 | /* Allocate the link descriptor from DMA pool */ | |
469 | new = mmp_pdma_alloc_descriptor(chan); | |
470 | if (!new) { | |
471 | dev_err(chan->dev, "no memory for desc\n"); | |
472 | goto fail; | |
473 | } | |
474 | ||
475 | copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES); | |
6fc4573c DM |
476 | if (dma_src & 0x7 || dma_dst & 0x7) |
477 | chan->byte_align = true; | |
c8acd6aa ZG |
478 | |
479 | new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy); | |
480 | new->desc.dsadr = dma_src; | |
481 | new->desc.dtadr = dma_dst; | |
482 | ||
483 | if (!first) | |
484 | first = new; | |
485 | else | |
486 | prev->desc.ddadr = new->async_tx.phys; | |
487 | ||
488 | new->async_tx.cookie = 0; | |
489 | async_tx_ack(&new->async_tx); | |
490 | ||
491 | prev = new; | |
492 | len -= copy; | |
493 | ||
494 | if (chan->dir == DMA_MEM_TO_DEV) { | |
495 | dma_src += copy; | |
496 | } else if (chan->dir == DMA_DEV_TO_MEM) { | |
497 | dma_dst += copy; | |
498 | } else if (chan->dir == DMA_MEM_TO_MEM) { | |
499 | dma_src += copy; | |
500 | dma_dst += copy; | |
501 | } | |
502 | ||
503 | /* Insert the link descriptor to the LD ring */ | |
504 | list_add_tail(&new->node, &first->tx_list); | |
505 | } while (len); | |
506 | ||
507 | first->async_tx.flags = flags; /* client is in control of this ack */ | |
508 | first->async_tx.cookie = -EBUSY; | |
509 | ||
510 | /* last desc and fire IRQ */ | |
511 | new->desc.ddadr = DDADR_STOP; | |
512 | new->desc.dcmd |= DCMD_ENDIRQEN; | |
513 | ||
50440d74 DM |
514 | chan->cyclic_first = NULL; |
515 | ||
c8acd6aa ZG |
516 | return &first->async_tx; |
517 | ||
518 | fail: | |
519 | if (first) | |
520 | mmp_pdma_free_desc_list(chan, &first->tx_list); | |
521 | return NULL; | |
522 | } | |
523 | ||
524 | static struct dma_async_tx_descriptor * | |
525 | mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, | |
2b7f65b1 JP |
526 | unsigned int sg_len, enum dma_transfer_direction dir, |
527 | unsigned long flags, void *context) | |
c8acd6aa ZG |
528 | { |
529 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); | |
530 | struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL; | |
531 | size_t len, avail; | |
532 | struct scatterlist *sg; | |
533 | dma_addr_t addr; | |
534 | int i; | |
535 | ||
536 | if ((sgl == NULL) || (sg_len == 0)) | |
537 | return NULL; | |
538 | ||
6fc4573c DM |
539 | chan->byte_align = false; |
540 | ||
56b94b02 VK |
541 | mmp_pdma_config_write(dchan, &chan->slave_config, dir); |
542 | ||
c8acd6aa ZG |
543 | for_each_sg(sgl, sg, sg_len, i) { |
544 | addr = sg_dma_address(sg); | |
545 | avail = sg_dma_len(sgl); | |
546 | ||
547 | do { | |
548 | len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES); | |
6fc4573c DM |
549 | if (addr & 0x7) |
550 | chan->byte_align = true; | |
c8acd6aa ZG |
551 | |
552 | /* allocate and populate the descriptor */ | |
553 | new = mmp_pdma_alloc_descriptor(chan); | |
554 | if (!new) { | |
555 | dev_err(chan->dev, "no memory for desc\n"); | |
556 | goto fail; | |
557 | } | |
558 | ||
559 | new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & len); | |
560 | if (dir == DMA_MEM_TO_DEV) { | |
561 | new->desc.dsadr = addr; | |
562 | new->desc.dtadr = chan->dev_addr; | |
563 | } else { | |
564 | new->desc.dsadr = chan->dev_addr; | |
565 | new->desc.dtadr = addr; | |
566 | } | |
567 | ||
568 | if (!first) | |
569 | first = new; | |
570 | else | |
571 | prev->desc.ddadr = new->async_tx.phys; | |
572 | ||
573 | new->async_tx.cookie = 0; | |
574 | async_tx_ack(&new->async_tx); | |
575 | prev = new; | |
576 | ||
577 | /* Insert the link descriptor to the LD ring */ | |
578 | list_add_tail(&new->node, &first->tx_list); | |
579 | ||
580 | /* update metadata */ | |
581 | addr += len; | |
582 | avail -= len; | |
583 | } while (avail); | |
584 | } | |
585 | ||
586 | first->async_tx.cookie = -EBUSY; | |
587 | first->async_tx.flags = flags; | |
588 | ||
589 | /* last desc and fire IRQ */ | |
590 | new->desc.ddadr = DDADR_STOP; | |
591 | new->desc.dcmd |= DCMD_ENDIRQEN; | |
592 | ||
50440d74 DM |
593 | chan->dir = dir; |
594 | chan->cyclic_first = NULL; | |
595 | ||
596 | return &first->async_tx; | |
597 | ||
598 | fail: | |
599 | if (first) | |
600 | mmp_pdma_free_desc_list(chan, &first->tx_list); | |
601 | return NULL; | |
602 | } | |
603 | ||
2b7f65b1 JP |
604 | static struct dma_async_tx_descriptor * |
605 | mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan, | |
606 | dma_addr_t buf_addr, size_t len, size_t period_len, | |
607 | enum dma_transfer_direction direction, | |
31c1e5a1 | 608 | unsigned long flags) |
50440d74 DM |
609 | { |
610 | struct mmp_pdma_chan *chan; | |
611 | struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new; | |
612 | dma_addr_t dma_src, dma_dst; | |
613 | ||
614 | if (!dchan || !len || !period_len) | |
615 | return NULL; | |
616 | ||
617 | /* the buffer length must be a multiple of period_len */ | |
618 | if (len % period_len != 0) | |
619 | return NULL; | |
620 | ||
621 | if (period_len > PDMA_MAX_DESC_BYTES) | |
622 | return NULL; | |
623 | ||
624 | chan = to_mmp_pdma_chan(dchan); | |
56b94b02 | 625 | mmp_pdma_config_write(dchan, &chan->slave_config, direction); |
50440d74 DM |
626 | |
627 | switch (direction) { | |
628 | case DMA_MEM_TO_DEV: | |
629 | dma_src = buf_addr; | |
630 | dma_dst = chan->dev_addr; | |
631 | break; | |
632 | case DMA_DEV_TO_MEM: | |
633 | dma_dst = buf_addr; | |
634 | dma_src = chan->dev_addr; | |
635 | break; | |
636 | default: | |
637 | dev_err(chan->dev, "Unsupported direction for cyclic DMA\n"); | |
638 | return NULL; | |
639 | } | |
640 | ||
641 | chan->dir = direction; | |
642 | ||
643 | do { | |
644 | /* Allocate the link descriptor from DMA pool */ | |
645 | new = mmp_pdma_alloc_descriptor(chan); | |
646 | if (!new) { | |
647 | dev_err(chan->dev, "no memory for desc\n"); | |
648 | goto fail; | |
649 | } | |
650 | ||
2b7f65b1 JP |
651 | new->desc.dcmd = (chan->dcmd | DCMD_ENDIRQEN | |
652 | (DCMD_LENGTH & period_len)); | |
50440d74 DM |
653 | new->desc.dsadr = dma_src; |
654 | new->desc.dtadr = dma_dst; | |
655 | ||
656 | if (!first) | |
657 | first = new; | |
658 | else | |
659 | prev->desc.ddadr = new->async_tx.phys; | |
660 | ||
661 | new->async_tx.cookie = 0; | |
662 | async_tx_ack(&new->async_tx); | |
663 | ||
664 | prev = new; | |
665 | len -= period_len; | |
666 | ||
667 | if (chan->dir == DMA_MEM_TO_DEV) | |
668 | dma_src += period_len; | |
669 | else | |
670 | dma_dst += period_len; | |
671 | ||
672 | /* Insert the link descriptor to the LD ring */ | |
673 | list_add_tail(&new->node, &first->tx_list); | |
674 | } while (len); | |
675 | ||
676 | first->async_tx.flags = flags; /* client is in control of this ack */ | |
677 | first->async_tx.cookie = -EBUSY; | |
678 | ||
679 | /* make the cyclic link */ | |
680 | new->desc.ddadr = first->async_tx.phys; | |
681 | chan->cyclic_first = first; | |
682 | ||
c8acd6aa ZG |
683 | return &first->async_tx; |
684 | ||
685 | fail: | |
686 | if (first) | |
687 | mmp_pdma_free_desc_list(chan, &first->tx_list); | |
688 | return NULL; | |
689 | } | |
690 | ||
56b94b02 VK |
691 | static int mmp_pdma_config_write(struct dma_chan *dchan, |
692 | struct dma_slave_config *cfg, | |
693 | enum dma_transfer_direction direction) | |
c8acd6aa ZG |
694 | { |
695 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); | |
c8acd6aa ZG |
696 | u32 maxburst = 0, addr = 0; |
697 | enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED; | |
698 | ||
699 | if (!dchan) | |
700 | return -EINVAL; | |
701 | ||
56b94b02 | 702 | if (direction == DMA_DEV_TO_MEM) { |
a0abd671 MR |
703 | chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC; |
704 | maxburst = cfg->src_maxburst; | |
705 | width = cfg->src_addr_width; | |
706 | addr = cfg->src_addr; | |
56b94b02 | 707 | } else if (direction == DMA_MEM_TO_DEV) { |
a0abd671 MR |
708 | chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG; |
709 | maxburst = cfg->dst_maxburst; | |
710 | width = cfg->dst_addr_width; | |
711 | addr = cfg->dst_addr; | |
c8acd6aa ZG |
712 | } |
713 | ||
a0abd671 MR |
714 | if (width == DMA_SLAVE_BUSWIDTH_1_BYTE) |
715 | chan->dcmd |= DCMD_WIDTH1; | |
716 | else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES) | |
717 | chan->dcmd |= DCMD_WIDTH2; | |
718 | else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES) | |
719 | chan->dcmd |= DCMD_WIDTH4; | |
720 | ||
721 | if (maxburst == 8) | |
722 | chan->dcmd |= DCMD_BURST8; | |
723 | else if (maxburst == 16) | |
724 | chan->dcmd |= DCMD_BURST16; | |
725 | else if (maxburst == 32) | |
726 | chan->dcmd |= DCMD_BURST32; | |
727 | ||
56b94b02 | 728 | chan->dir = direction; |
a0abd671 MR |
729 | chan->dev_addr = addr; |
730 | /* FIXME: drivers should be ported over to use the filter | |
731 | * function. Once that's done, the following two lines can | |
732 | * be removed. | |
733 | */ | |
734 | if (cfg->slave_id) | |
735 | chan->drcmr = cfg->slave_id; | |
736 | ||
737 | return 0; | |
738 | } | |
739 | ||
56b94b02 VK |
740 | static int mmp_pdma_config(struct dma_chan *dchan, |
741 | struct dma_slave_config *cfg) | |
742 | { | |
743 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); | |
744 | ||
745 | memcpy(&chan->slave_config, cfg, sizeof(*cfg)); | |
746 | return 0; | |
747 | } | |
748 | ||
a0abd671 MR |
749 | static int mmp_pdma_terminate_all(struct dma_chan *dchan) |
750 | { | |
751 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); | |
752 | unsigned long flags; | |
753 | ||
754 | if (!dchan) | |
755 | return -EINVAL; | |
756 | ||
757 | disable_chan(chan->phy); | |
758 | mmp_pdma_free_phy(chan); | |
759 | spin_lock_irqsave(&chan->desc_lock, flags); | |
760 | mmp_pdma_free_desc_list(chan, &chan->chain_pending); | |
761 | mmp_pdma_free_desc_list(chan, &chan->chain_running); | |
762 | spin_unlock_irqrestore(&chan->desc_lock, flags); | |
763 | chan->idle = true; | |
764 | ||
2b7f65b1 | 765 | return 0; |
c8acd6aa ZG |
766 | } |
767 | ||
1b38da26 DM |
768 | static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan, |
769 | dma_cookie_t cookie) | |
770 | { | |
771 | struct mmp_pdma_desc_sw *sw; | |
772 | u32 curr, residue = 0; | |
773 | bool passed = false; | |
774 | bool cyclic = chan->cyclic_first != NULL; | |
775 | ||
776 | /* | |
777 | * If the channel does not have a phy pointer anymore, it has already | |
778 | * been completed. Therefore, its residue is 0. | |
779 | */ | |
780 | if (!chan->phy) | |
781 | return 0; | |
782 | ||
783 | if (chan->dir == DMA_DEV_TO_MEM) | |
784 | curr = readl(chan->phy->base + DTADR(chan->phy->idx)); | |
785 | else | |
786 | curr = readl(chan->phy->base + DSADR(chan->phy->idx)); | |
787 | ||
788 | list_for_each_entry(sw, &chan->chain_running, node) { | |
789 | u32 start, end, len; | |
790 | ||
791 | if (chan->dir == DMA_DEV_TO_MEM) | |
792 | start = sw->desc.dtadr; | |
793 | else | |
794 | start = sw->desc.dsadr; | |
795 | ||
796 | len = sw->desc.dcmd & DCMD_LENGTH; | |
797 | end = start + len; | |
798 | ||
799 | /* | |
800 | * 'passed' will be latched once we found the descriptor which | |
801 | * lies inside the boundaries of the curr pointer. All | |
802 | * descriptors that occur in the list _after_ we found that | |
803 | * partially handled descriptor are still to be processed and | |
804 | * are hence added to the residual bytes counter. | |
805 | */ | |
806 | ||
807 | if (passed) { | |
808 | residue += len; | |
809 | } else if (curr >= start && curr <= end) { | |
810 | residue += end - curr; | |
811 | passed = true; | |
812 | } | |
813 | ||
814 | /* | |
815 | * Descriptors that have the ENDIRQEN bit set mark the end of a | |
816 | * transaction chain, and the cookie assigned with it has been | |
817 | * returned previously from mmp_pdma_tx_submit(). | |
818 | * | |
819 | * In case we have multiple transactions in the running chain, | |
820 | * and the cookie does not match the one the user asked us | |
821 | * about, reset the state variables and start over. | |
822 | * | |
823 | * This logic does not apply to cyclic transactions, where all | |
824 | * descriptors have the ENDIRQEN bit set, and for which we | |
825 | * can't have multiple transactions on one channel anyway. | |
826 | */ | |
827 | if (cyclic || !(sw->desc.dcmd & DCMD_ENDIRQEN)) | |
828 | continue; | |
829 | ||
830 | if (sw->async_tx.cookie == cookie) { | |
831 | return residue; | |
832 | } else { | |
833 | residue = 0; | |
834 | passed = false; | |
835 | } | |
836 | } | |
837 | ||
838 | /* We should only get here in case of cyclic transactions */ | |
839 | return residue; | |
840 | } | |
841 | ||
c8acd6aa | 842 | static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan, |
2b7f65b1 JP |
843 | dma_cookie_t cookie, |
844 | struct dma_tx_state *txstate) | |
c8acd6aa | 845 | { |
1b38da26 DM |
846 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); |
847 | enum dma_status ret; | |
848 | ||
849 | ret = dma_cookie_status(dchan, cookie, txstate); | |
850 | if (likely(ret != DMA_ERROR)) | |
851 | dma_set_residue(txstate, mmp_pdma_residue(chan, cookie)); | |
852 | ||
853 | return ret; | |
c8acd6aa ZG |
854 | } |
855 | ||
6cfb8321 | 856 | /* |
c8acd6aa ZG |
857 | * mmp_pdma_issue_pending - Issue the DMA start command |
858 | * pending list ==> running list | |
859 | */ | |
860 | static void mmp_pdma_issue_pending(struct dma_chan *dchan) | |
861 | { | |
862 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); | |
863 | unsigned long flags; | |
864 | ||
865 | spin_lock_irqsave(&chan->desc_lock, flags); | |
866 | start_pending_queue(chan); | |
867 | spin_unlock_irqrestore(&chan->desc_lock, flags); | |
868 | } | |
869 | ||
870 | /* | |
871 | * dma_do_tasklet | |
872 | * Do call back | |
873 | * Start pending list | |
874 | */ | |
77a4f4f7 | 875 | static void dma_do_tasklet(struct tasklet_struct *t) |
c8acd6aa | 876 | { |
77a4f4f7 | 877 | struct mmp_pdma_chan *chan = from_tasklet(chan, t, tasklet); |
c8acd6aa ZG |
878 | struct mmp_pdma_desc_sw *desc, *_desc; |
879 | LIST_HEAD(chain_cleanup); | |
880 | unsigned long flags; | |
9c1e511c | 881 | struct dmaengine_desc_callback cb; |
c8acd6aa | 882 | |
50440d74 | 883 | if (chan->cyclic_first) { |
50440d74 DM |
884 | spin_lock_irqsave(&chan->desc_lock, flags); |
885 | desc = chan->cyclic_first; | |
9c1e511c | 886 | dmaengine_desc_get_callback(&desc->async_tx, &cb); |
50440d74 DM |
887 | spin_unlock_irqrestore(&chan->desc_lock, flags); |
888 | ||
9c1e511c | 889 | dmaengine_desc_callback_invoke(&cb, NULL); |
50440d74 DM |
890 | |
891 | return; | |
892 | } | |
893 | ||
894 | /* submit pending list; callback for each desc; free desc */ | |
c8acd6aa ZG |
895 | spin_lock_irqsave(&chan->desc_lock, flags); |
896 | ||
b721f9e8 DM |
897 | list_for_each_entry_safe(desc, _desc, &chan->chain_running, node) { |
898 | /* | |
899 | * move the descriptors to a temporary list so we can drop | |
900 | * the lock during the entire cleanup operation | |
901 | */ | |
f358c289 | 902 | list_move(&desc->node, &chain_cleanup); |
c8acd6aa | 903 | |
b721f9e8 DM |
904 | /* |
905 | * Look for the first list entry which has the ENDIRQEN flag | |
906 | * set. That is the descriptor we got an interrupt for, so | |
907 | * complete that transaction and its cookie. | |
908 | */ | |
909 | if (desc->desc.dcmd & DCMD_ENDIRQEN) { | |
910 | dma_cookie_t cookie = desc->async_tx.cookie; | |
911 | dma_cookie_complete(&desc->async_tx); | |
912 | dev_dbg(chan->dev, "completed_cookie=%d\n", cookie); | |
913 | break; | |
914 | } | |
c8acd6aa ZG |
915 | } |
916 | ||
917 | /* | |
b721f9e8 DM |
918 | * The hardware is idle and ready for more when the |
919 | * chain_running list is empty. | |
c8acd6aa | 920 | */ |
b721f9e8 | 921 | chan->idle = list_empty(&chan->chain_running); |
c8acd6aa ZG |
922 | |
923 | /* Start any pending transactions automatically */ | |
924 | start_pending_queue(chan); | |
925 | spin_unlock_irqrestore(&chan->desc_lock, flags); | |
926 | ||
927 | /* Run the callback for each descriptor, in order */ | |
928 | list_for_each_entry_safe(desc, _desc, &chain_cleanup, node) { | |
929 | struct dma_async_tx_descriptor *txd = &desc->async_tx; | |
930 | ||
931 | /* Remove from the list of transactions */ | |
932 | list_del(&desc->node); | |
933 | /* Run the link descriptor callback function */ | |
9c1e511c DJ |
934 | dmaengine_desc_get_callback(txd, &cb); |
935 | dmaengine_desc_callback_invoke(&cb, NULL); | |
c8acd6aa ZG |
936 | |
937 | dma_pool_free(chan->desc_pool, desc, txd->phys); | |
938 | } | |
939 | } | |
940 | ||
4bf27b8b | 941 | static int mmp_pdma_remove(struct platform_device *op) |
c8acd6aa ZG |
942 | { |
943 | struct mmp_pdma_device *pdev = platform_get_drvdata(op); | |
a4601892 VK |
944 | struct mmp_pdma_phy *phy; |
945 | int i, irq = 0, irq_num = 0; | |
946 | ||
39716c56 CY |
947 | if (op->dev.of_node) |
948 | of_dma_controller_free(op->dev.of_node); | |
a4601892 VK |
949 | |
950 | for (i = 0; i < pdev->dma_channels; i++) { | |
951 | if (platform_get_irq(op, i) > 0) | |
952 | irq_num++; | |
953 | } | |
954 | ||
955 | if (irq_num != pdev->dma_channels) { | |
956 | irq = platform_get_irq(op, 0); | |
957 | devm_free_irq(&op->dev, irq, pdev); | |
958 | } else { | |
959 | for (i = 0; i < pdev->dma_channels; i++) { | |
960 | phy = &pdev->phy[i]; | |
961 | irq = platform_get_irq(op, i); | |
962 | devm_free_irq(&op->dev, irq, phy); | |
963 | } | |
964 | } | |
c8acd6aa ZG |
965 | |
966 | dma_async_device_unregister(&pdev->device); | |
967 | return 0; | |
968 | } | |
969 | ||
2b7f65b1 | 970 | static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx, int irq) |
c8acd6aa ZG |
971 | { |
972 | struct mmp_pdma_phy *phy = &pdev->phy[idx]; | |
973 | struct mmp_pdma_chan *chan; | |
974 | int ret; | |
975 | ||
593d9c2e | 976 | chan = devm_kzalloc(pdev->dev, sizeof(*chan), GFP_KERNEL); |
c8acd6aa ZG |
977 | if (chan == NULL) |
978 | return -ENOMEM; | |
979 | ||
980 | phy->idx = idx; | |
981 | phy->base = pdev->base; | |
982 | ||
983 | if (irq) { | |
f0b50777 CX |
984 | ret = devm_request_irq(pdev->dev, irq, mmp_pdma_chan_handler, |
985 | IRQF_SHARED, "pdma", phy); | |
c8acd6aa ZG |
986 | if (ret) { |
987 | dev_err(pdev->dev, "channel request irq fail!\n"); | |
988 | return ret; | |
989 | } | |
990 | } | |
991 | ||
992 | spin_lock_init(&chan->desc_lock); | |
993 | chan->dev = pdev->dev; | |
994 | chan->chan.device = &pdev->device; | |
77a4f4f7 | 995 | tasklet_setup(&chan->tasklet, dma_do_tasklet); |
c8acd6aa ZG |
996 | INIT_LIST_HEAD(&chan->chain_pending); |
997 | INIT_LIST_HEAD(&chan->chain_running); | |
998 | ||
999 | /* register virt channel to dma engine */ | |
2b7f65b1 | 1000 | list_add_tail(&chan->chan.device_node, &pdev->device.channels); |
c8acd6aa ZG |
1001 | |
1002 | return 0; | |
1003 | } | |
1004 | ||
57c03422 | 1005 | static const struct of_device_id mmp_pdma_dt_ids[] = { |
c8acd6aa ZG |
1006 | { .compatible = "marvell,pdma-1.0", }, |
1007 | {} | |
1008 | }; | |
1009 | MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids); | |
1010 | ||
a9a7cf08 DM |
1011 | static struct dma_chan *mmp_pdma_dma_xlate(struct of_phandle_args *dma_spec, |
1012 | struct of_dma *ofdma) | |
1013 | { | |
1014 | struct mmp_pdma_device *d = ofdma->of_dma_data; | |
8010dad5 | 1015 | struct dma_chan *chan; |
a9a7cf08 | 1016 | |
8010dad5 SW |
1017 | chan = dma_get_any_slave_channel(&d->device); |
1018 | if (!chan) | |
a9a7cf08 DM |
1019 | return NULL; |
1020 | ||
2b7f65b1 JP |
1021 | to_mmp_pdma_chan(chan)->drcmr = dma_spec->args[0]; |
1022 | ||
1023 | return chan; | |
a9a7cf08 DM |
1024 | } |
1025 | ||
463a1f8b | 1026 | static int mmp_pdma_probe(struct platform_device *op) |
c8acd6aa ZG |
1027 | { |
1028 | struct mmp_pdma_device *pdev; | |
1029 | const struct of_device_id *of_id; | |
1030 | struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev); | |
1031 | struct resource *iores; | |
1032 | int i, ret, irq = 0; | |
1033 | int dma_channels = 0, irq_num = 0; | |
ecb9b424 RJ |
1034 | const enum dma_slave_buswidth widths = |
1035 | DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES | | |
1036 | DMA_SLAVE_BUSWIDTH_4_BYTES; | |
c8acd6aa ZG |
1037 | |
1038 | pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL); | |
1039 | if (!pdev) | |
1040 | return -ENOMEM; | |
2b7f65b1 | 1041 | |
c8acd6aa ZG |
1042 | pdev->dev = &op->dev; |
1043 | ||
027f28b7 XW |
1044 | spin_lock_init(&pdev->phy_lock); |
1045 | ||
c8acd6aa | 1046 | iores = platform_get_resource(op, IORESOURCE_MEM, 0); |
7331205a TR |
1047 | pdev->base = devm_ioremap_resource(pdev->dev, iores); |
1048 | if (IS_ERR(pdev->base)) | |
1049 | return PTR_ERR(pdev->base); | |
c8acd6aa ZG |
1050 | |
1051 | of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev); | |
1052 | if (of_id) | |
2b7f65b1 JP |
1053 | of_property_read_u32(pdev->dev->of_node, "#dma-channels", |
1054 | &dma_channels); | |
c8acd6aa ZG |
1055 | else if (pdata && pdata->dma_channels) |
1056 | dma_channels = pdata->dma_channels; | |
1057 | else | |
1058 | dma_channels = 32; /* default 32 channel */ | |
1059 | pdev->dma_channels = dma_channels; | |
1060 | ||
1061 | for (i = 0; i < dma_channels; i++) { | |
5bc382ec | 1062 | if (platform_get_irq_optional(op, i) > 0) |
c8acd6aa ZG |
1063 | irq_num++; |
1064 | } | |
1065 | ||
593d9c2e | 1066 | pdev->phy = devm_kcalloc(pdev->dev, dma_channels, sizeof(*pdev->phy), |
2b7f65b1 | 1067 | GFP_KERNEL); |
c8acd6aa ZG |
1068 | if (pdev->phy == NULL) |
1069 | return -ENOMEM; | |
1070 | ||
1071 | INIT_LIST_HEAD(&pdev->device.channels); | |
1072 | ||
1073 | if (irq_num != dma_channels) { | |
1074 | /* all chan share one irq, demux inside */ | |
1075 | irq = platform_get_irq(op, 0); | |
f0b50777 CX |
1076 | ret = devm_request_irq(pdev->dev, irq, mmp_pdma_int_handler, |
1077 | IRQF_SHARED, "pdma", pdev); | |
c8acd6aa ZG |
1078 | if (ret) |
1079 | return ret; | |
1080 | } | |
1081 | ||
1082 | for (i = 0; i < dma_channels; i++) { | |
1083 | irq = (irq_num != dma_channels) ? 0 : platform_get_irq(op, i); | |
1084 | ret = mmp_pdma_chan_init(pdev, i, irq); | |
1085 | if (ret) | |
1086 | return ret; | |
1087 | } | |
1088 | ||
1089 | dma_cap_set(DMA_SLAVE, pdev->device.cap_mask); | |
1090 | dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask); | |
50440d74 | 1091 | dma_cap_set(DMA_CYCLIC, pdev->device.cap_mask); |
023bf55f | 1092 | dma_cap_set(DMA_PRIVATE, pdev->device.cap_mask); |
c8acd6aa ZG |
1093 | pdev->device.dev = &op->dev; |
1094 | pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources; | |
1095 | pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources; | |
1096 | pdev->device.device_tx_status = mmp_pdma_tx_status; | |
1097 | pdev->device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy; | |
1098 | pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg; | |
50440d74 | 1099 | pdev->device.device_prep_dma_cyclic = mmp_pdma_prep_dma_cyclic; |
c8acd6aa | 1100 | pdev->device.device_issue_pending = mmp_pdma_issue_pending; |
a0abd671 MR |
1101 | pdev->device.device_config = mmp_pdma_config; |
1102 | pdev->device.device_terminate_all = mmp_pdma_terminate_all; | |
77a68e56 | 1103 | pdev->device.copy_align = DMAENGINE_ALIGN_8_BYTES; |
ecb9b424 RJ |
1104 | pdev->device.src_addr_widths = widths; |
1105 | pdev->device.dst_addr_widths = widths; | |
1106 | pdev->device.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); | |
1107 | pdev->device.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; | |
c8acd6aa ZG |
1108 | |
1109 | if (pdev->dev->coherent_dma_mask) | |
1110 | dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask); | |
1111 | else | |
1112 | dma_set_mask(pdev->dev, DMA_BIT_MASK(64)); | |
1113 | ||
1114 | ret = dma_async_device_register(&pdev->device); | |
1115 | if (ret) { | |
1116 | dev_err(pdev->device.dev, "unable to register\n"); | |
1117 | return ret; | |
1118 | } | |
1119 | ||
a9a7cf08 DM |
1120 | if (op->dev.of_node) { |
1121 | /* Device-tree DMA controller registration */ | |
1122 | ret = of_dma_controller_register(op->dev.of_node, | |
1123 | mmp_pdma_dma_xlate, pdev); | |
1124 | if (ret < 0) { | |
1125 | dev_err(&op->dev, "of_dma_controller_register failed\n"); | |
1126 | return ret; | |
1127 | } | |
1128 | } | |
1129 | ||
086b0af1 | 1130 | platform_set_drvdata(op, pdev); |
419d1f12 | 1131 | dev_info(pdev->device.dev, "initialized %d channels\n", dma_channels); |
c8acd6aa ZG |
1132 | return 0; |
1133 | } | |
1134 | ||
1135 | static const struct platform_device_id mmp_pdma_id_table[] = { | |
1136 | { "mmp-pdma", }, | |
1137 | { }, | |
1138 | }; | |
1139 | ||
1140 | static struct platform_driver mmp_pdma_driver = { | |
1141 | .driver = { | |
1142 | .name = "mmp-pdma", | |
c8acd6aa ZG |
1143 | .of_match_table = mmp_pdma_dt_ids, |
1144 | }, | |
1145 | .id_table = mmp_pdma_id_table, | |
1146 | .probe = mmp_pdma_probe, | |
a7d6e3ec | 1147 | .remove = mmp_pdma_remove, |
c8acd6aa ZG |
1148 | }; |
1149 | ||
1150 | module_platform_driver(mmp_pdma_driver); | |
1151 | ||
2b7f65b1 | 1152 | MODULE_DESCRIPTION("MARVELL MMP Peripheral DMA Driver"); |
c8acd6aa ZG |
1153 | MODULE_AUTHOR("Marvell International Ltd."); |
1154 | MODULE_LICENSE("GPL v2"); |