Commit | Line | Data |
---|---|---|
0853c7a5 JC |
1 | /* |
2 | * Copyright (C) 2013, Lars-Peter Clausen <lars@metafoo.de> | |
3 | * GDMA4740 DMAC support | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms of the GNU General Public License as published by the | |
7 | * Free Software Foundation; either version 2 of the License, or (at your | |
8 | * option) any later version. | |
9 | * | |
10 | */ | |
11 | ||
12 | #include <linux/dmaengine.h> | |
13 | #include <linux/dma-mapping.h> | |
14 | #include <linux/err.h> | |
15 | #include <linux/init.h> | |
16 | #include <linux/list.h> | |
17 | #include <linux/module.h> | |
18 | #include <linux/platform_device.h> | |
19 | #include <linux/slab.h> | |
20 | #include <linux/spinlock.h> | |
21 | #include <linux/irq.h> | |
22 | #include <linux/of_dma.h> | |
23 | #include <linux/reset.h> | |
24 | #include <linux/of_device.h> | |
25 | ||
26 | #include "virt-dma.h" | |
27 | ||
28 | #define GDMA_REG_SRC_ADDR(x) (0x00 + (x) * 0x10) | |
29 | #define GDMA_REG_DST_ADDR(x) (0x04 + (x) * 0x10) | |
30 | ||
31 | #define GDMA_REG_CTRL0(x) (0x08 + (x) * 0x10) | |
32 | #define GDMA_REG_CTRL0_TX_MASK 0xffff | |
33 | #define GDMA_REG_CTRL0_TX_SHIFT 16 | |
34 | #define GDMA_REG_CTRL0_CURR_MASK 0xff | |
35 | #define GDMA_REG_CTRL0_CURR_SHIFT 8 | |
36 | #define GDMA_REG_CTRL0_SRC_ADDR_FIXED BIT(7) | |
37 | #define GDMA_REG_CTRL0_DST_ADDR_FIXED BIT(6) | |
38 | #define GDMA_REG_CTRL0_BURST_MASK 0x7 | |
39 | #define GDMA_REG_CTRL0_BURST_SHIFT 3 | |
40 | #define GDMA_REG_CTRL0_DONE_INT BIT(2) | |
41 | #define GDMA_REG_CTRL0_ENABLE BIT(1) | |
42 | #define GDMA_REG_CTRL0_SW_MODE BIT(0) | |
43 | ||
44 | #define GDMA_REG_CTRL1(x) (0x0c + (x) * 0x10) | |
45 | #define GDMA_REG_CTRL1_SEG_MASK 0xf | |
46 | #define GDMA_REG_CTRL1_SEG_SHIFT 22 | |
47 | #define GDMA_REG_CTRL1_REQ_MASK 0x3f | |
48 | #define GDMA_REG_CTRL1_SRC_REQ_SHIFT 16 | |
49 | #define GDMA_REG_CTRL1_DST_REQ_SHIFT 8 | |
0853c7a5 JC |
50 | #define GDMA_REG_CTRL1_NEXT_MASK 0x1f |
51 | #define GDMA_REG_CTRL1_NEXT_SHIFT 3 | |
52 | #define GDMA_REG_CTRL1_COHERENT BIT(2) | |
53 | #define GDMA_REG_CTRL1_FAIL BIT(1) | |
54 | #define GDMA_REG_CTRL1_MASK BIT(0) | |
55 | ||
56 | #define GDMA_REG_UNMASK_INT 0x200 | |
57 | #define GDMA_REG_DONE_INT 0x204 | |
58 | ||
59 | #define GDMA_REG_GCT 0x220 | |
60 | #define GDMA_REG_GCT_CHAN_MASK 0x3 | |
61 | #define GDMA_REG_GCT_CHAN_SHIFT 3 | |
62 | #define GDMA_REG_GCT_VER_MASK 0x3 | |
63 | #define GDMA_REG_GCT_VER_SHIFT 1 | |
64 | #define GDMA_REG_GCT_ARBIT_RR BIT(0) | |
65 | ||
66 | #define GDMA_REG_REQSTS 0x2a0 | |
67 | #define GDMA_REG_ACKSTS 0x2a4 | |
68 | #define GDMA_REG_FINSTS 0x2a8 | |
69 | ||
70 | /* for RT305X gdma registers */ | |
71 | #define GDMA_RT305X_CTRL0_REQ_MASK 0xf | |
72 | #define GDMA_RT305X_CTRL0_SRC_REQ_SHIFT 12 | |
73 | #define GDMA_RT305X_CTRL0_DST_REQ_SHIFT 8 | |
74 | ||
75 | #define GDMA_RT305X_CTRL1_FAIL BIT(4) | |
76 | #define GDMA_RT305X_CTRL1_NEXT_MASK 0x7 | |
77 | #define GDMA_RT305X_CTRL1_NEXT_SHIFT 1 | |
78 | ||
79 | #define GDMA_RT305X_STATUS_INT 0x80 | |
80 | #define GDMA_RT305X_STATUS_SIGNAL 0x84 | |
81 | #define GDMA_RT305X_GCT 0x88 | |
82 | ||
83 | /* for MT7621 gdma registers */ | |
84 | #define GDMA_REG_PERF_START(x) (0x230 + (x) * 0x8) | |
85 | #define GDMA_REG_PERF_END(x) (0x234 + (x) * 0x8) | |
86 | ||
87 | enum gdma_dma_transfer_size { | |
88 | GDMA_TRANSFER_SIZE_4BYTE = 0, | |
89 | GDMA_TRANSFER_SIZE_8BYTE = 1, | |
90 | GDMA_TRANSFER_SIZE_16BYTE = 2, | |
91 | GDMA_TRANSFER_SIZE_32BYTE = 3, | |
92 | GDMA_TRANSFER_SIZE_64BYTE = 4, | |
93 | }; | |
94 | ||
95 | struct gdma_dma_sg { | |
96 | dma_addr_t src_addr; | |
97 | dma_addr_t dst_addr; | |
98 | u32 len; | |
99 | }; | |
100 | ||
101 | struct gdma_dma_desc { | |
102 | struct virt_dma_desc vdesc; | |
103 | ||
104 | enum dma_transfer_direction direction; | |
105 | bool cyclic; | |
106 | ||
107 | u32 residue; | |
108 | unsigned int num_sgs; | |
109 | struct gdma_dma_sg sg[]; | |
110 | }; | |
111 | ||
112 | struct gdma_dmaengine_chan { | |
113 | struct virt_dma_chan vchan; | |
114 | unsigned int id; | |
115 | unsigned int slave_id; | |
116 | ||
117 | dma_addr_t fifo_addr; | |
118 | enum gdma_dma_transfer_size burst_size; | |
119 | ||
120 | struct gdma_dma_desc *desc; | |
121 | unsigned int next_sg; | |
122 | }; | |
123 | ||
124 | struct gdma_dma_dev { | |
125 | struct dma_device ddev; | |
126 | struct device_dma_parameters dma_parms; | |
127 | struct gdma_data *data; | |
128 | void __iomem *base; | |
129 | struct tasklet_struct task; | |
130 | volatile unsigned long chan_issued; | |
131 | atomic_t cnt; | |
132 | ||
133 | struct gdma_dmaengine_chan chan[]; | |
134 | }; | |
135 | ||
c1a3a340 | 136 | struct gdma_data { |
0853c7a5 JC |
137 | int chancnt; |
138 | u32 done_int_reg; | |
139 | void (*init)(struct gdma_dma_dev *dma_dev); | |
140 | int (*start_transfer)(struct gdma_dmaengine_chan *chan); | |
141 | }; | |
142 | ||
143 | static struct gdma_dma_dev *gdma_dma_chan_get_dev( | |
144 | struct gdma_dmaengine_chan *chan) | |
145 | { | |
146 | return container_of(chan->vchan.chan.device, struct gdma_dma_dev, | |
147 | ddev); | |
148 | } | |
149 | ||
150 | static struct gdma_dmaengine_chan *to_gdma_dma_chan(struct dma_chan *c) | |
151 | { | |
152 | return container_of(c, struct gdma_dmaengine_chan, vchan.chan); | |
153 | } | |
154 | ||
155 | static struct gdma_dma_desc *to_gdma_dma_desc(struct virt_dma_desc *vdesc) | |
156 | { | |
157 | return container_of(vdesc, struct gdma_dma_desc, vdesc); | |
158 | } | |
159 | ||
160 | static inline uint32_t gdma_dma_read(struct gdma_dma_dev *dma_dev, | |
4554af34 | 161 | unsigned int reg) |
0853c7a5 JC |
162 | { |
163 | return readl(dma_dev->base + reg); | |
164 | } | |
165 | ||
166 | static inline void gdma_dma_write(struct gdma_dma_dev *dma_dev, | |
4554af34 | 167 | unsigned reg, uint32_t val) |
0853c7a5 JC |
168 | { |
169 | writel(val, dma_dev->base + reg); | |
170 | } | |
171 | ||
172 | static struct gdma_dma_desc *gdma_dma_alloc_desc(unsigned int num_sgs) | |
173 | { | |
174 | return kzalloc(sizeof(struct gdma_dma_desc) + | |
175 | sizeof(struct gdma_dma_sg) * num_sgs, GFP_ATOMIC); | |
176 | } | |
177 | ||
178 | static enum gdma_dma_transfer_size gdma_dma_maxburst(u32 maxburst) | |
179 | { | |
180 | if (maxburst < 2) | |
181 | return GDMA_TRANSFER_SIZE_4BYTE; | |
182 | else if (maxburst < 4) | |
183 | return GDMA_TRANSFER_SIZE_8BYTE; | |
184 | else if (maxburst < 8) | |
185 | return GDMA_TRANSFER_SIZE_16BYTE; | |
186 | else if (maxburst < 16) | |
187 | return GDMA_TRANSFER_SIZE_32BYTE; | |
188 | else | |
189 | return GDMA_TRANSFER_SIZE_64BYTE; | |
190 | } | |
191 | ||
192 | static int gdma_dma_config(struct dma_chan *c, | |
4554af34 | 193 | struct dma_slave_config *config) |
0853c7a5 JC |
194 | { |
195 | struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c); | |
196 | struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan); | |
197 | ||
198 | if (config->device_fc) { | |
199 | dev_err(dma_dev->ddev.dev, "not support flow controller\n"); | |
200 | return -EINVAL; | |
201 | } | |
202 | ||
203 | switch (config->direction) { | |
204 | case DMA_MEM_TO_DEV: | |
205 | if (config->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) { | |
206 | dev_err(dma_dev->ddev.dev, "only support 4 byte buswidth\n"); | |
207 | return -EINVAL; | |
208 | } | |
209 | chan->slave_id = config->slave_id; | |
210 | chan->fifo_addr = config->dst_addr; | |
211 | chan->burst_size = gdma_dma_maxburst(config->dst_maxburst); | |
212 | break; | |
213 | case DMA_DEV_TO_MEM: | |
214 | if (config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) { | |
215 | dev_err(dma_dev->ddev.dev, "only support 4 byte buswidth\n"); | |
216 | return -EINVAL; | |
217 | } | |
218 | chan->slave_id = config->slave_id; | |
219 | chan->fifo_addr = config->src_addr; | |
220 | chan->burst_size = gdma_dma_maxburst(config->src_maxburst); | |
221 | break; | |
222 | default: | |
223 | dev_err(dma_dev->ddev.dev, "direction type %d error\n", | |
4554af34 | 224 | config->direction); |
0853c7a5 JC |
225 | return -EINVAL; |
226 | } | |
227 | ||
228 | return 0; | |
229 | } | |
230 | ||
231 | static int gdma_dma_terminate_all(struct dma_chan *c) | |
232 | { | |
233 | struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c); | |
234 | struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan); | |
235 | unsigned long flags, timeout; | |
236 | LIST_HEAD(head); | |
237 | int i = 0; | |
238 | ||
239 | spin_lock_irqsave(&chan->vchan.lock, flags); | |
240 | chan->desc = NULL; | |
241 | clear_bit(chan->id, &dma_dev->chan_issued); | |
242 | vchan_get_all_descriptors(&chan->vchan, &head); | |
243 | spin_unlock_irqrestore(&chan->vchan.lock, flags); | |
244 | ||
245 | vchan_dma_desc_free_list(&chan->vchan, &head); | |
246 | ||
247 | /* wait dma transfer complete */ | |
248 | timeout = jiffies + msecs_to_jiffies(5000); | |
249 | while (gdma_dma_read(dma_dev, GDMA_REG_CTRL0(chan->id)) & | |
250 | GDMA_REG_CTRL0_ENABLE) { | |
251 | if (time_after_eq(jiffies, timeout)) { | |
252 | dev_err(dma_dev->ddev.dev, "chan %d wait timeout\n", | |
4554af34 | 253 | chan->id); |
0853c7a5 JC |
254 | /* restore to init value */ |
255 | gdma_dma_write(dma_dev, GDMA_REG_CTRL0(chan->id), 0); | |
256 | break; | |
257 | } | |
258 | cpu_relax(); | |
259 | i++; | |
260 | } | |
261 | ||
262 | if (i) | |
263 | dev_dbg(dma_dev->ddev.dev, "terminate chan %d loops %d\n", | |
4554af34 | 264 | chan->id, i); |
0853c7a5 JC |
265 | |
266 | return 0; | |
267 | } | |
268 | ||
269 | static void rt305x_dump_reg(struct gdma_dma_dev *dma_dev, int id) | |
270 | { | |
271 | dev_dbg(dma_dev->ddev.dev, "chan %d, src %08x, dst %08x, ctr0 %08x, " \ | |
272 | "ctr1 %08x, intr %08x, signal %08x\n", id, | |
273 | gdma_dma_read(dma_dev, GDMA_REG_SRC_ADDR(id)), | |
274 | gdma_dma_read(dma_dev, GDMA_REG_DST_ADDR(id)), | |
275 | gdma_dma_read(dma_dev, GDMA_REG_CTRL0(id)), | |
276 | gdma_dma_read(dma_dev, GDMA_REG_CTRL1(id)), | |
277 | gdma_dma_read(dma_dev, GDMA_RT305X_STATUS_INT), | |
278 | gdma_dma_read(dma_dev, GDMA_RT305X_STATUS_SIGNAL)); | |
279 | } | |
280 | ||
281 | static int rt305x_gdma_start_transfer(struct gdma_dmaengine_chan *chan) | |
282 | { | |
283 | struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan); | |
284 | dma_addr_t src_addr, dst_addr; | |
285 | struct gdma_dma_sg *sg; | |
286 | uint32_t ctrl0, ctrl1; | |
287 | ||
288 | /* verify chan is already stopped */ | |
289 | ctrl0 = gdma_dma_read(dma_dev, GDMA_REG_CTRL0(chan->id)); | |
290 | if (unlikely(ctrl0 & GDMA_REG_CTRL0_ENABLE)) { | |
291 | dev_err(dma_dev->ddev.dev, "chan %d is start(%08x).\n", | |
4554af34 | 292 | chan->id, ctrl0); |
0853c7a5 JC |
293 | rt305x_dump_reg(dma_dev, chan->id); |
294 | return -EINVAL; | |
295 | } | |
296 | ||
297 | sg = &chan->desc->sg[chan->next_sg]; | |
298 | if (chan->desc->direction == DMA_MEM_TO_DEV) { | |
299 | src_addr = sg->src_addr; | |
300 | dst_addr = chan->fifo_addr; | |
301 | ctrl0 = GDMA_REG_CTRL0_DST_ADDR_FIXED | \ | |
302 | (8 << GDMA_RT305X_CTRL0_SRC_REQ_SHIFT) | \ | |
303 | (chan->slave_id << GDMA_RT305X_CTRL0_DST_REQ_SHIFT); | |
304 | } else if (chan->desc->direction == DMA_DEV_TO_MEM) { | |
305 | src_addr = chan->fifo_addr; | |
306 | dst_addr = sg->dst_addr; | |
307 | ctrl0 = GDMA_REG_CTRL0_SRC_ADDR_FIXED | \ | |
308 | (chan->slave_id << GDMA_RT305X_CTRL0_SRC_REQ_SHIFT) | \ | |
309 | (8 << GDMA_RT305X_CTRL0_DST_REQ_SHIFT); | |
310 | } else if (chan->desc->direction == DMA_MEM_TO_MEM) { | |
311 | /* | |
312 | * TODO: memcpy function have bugs. sometime it will copy | |
313 | * more 8 bytes data when using dmatest verify. | |
314 | */ | |
315 | src_addr = sg->src_addr; | |
316 | dst_addr = sg->dst_addr; | |
317 | ctrl0 = GDMA_REG_CTRL0_SW_MODE | \ | |
318 | (8 << GDMA_REG_CTRL1_SRC_REQ_SHIFT) | \ | |
319 | (8 << GDMA_REG_CTRL1_DST_REQ_SHIFT); | |
320 | } else { | |
321 | dev_err(dma_dev->ddev.dev, "direction type %d error\n", | |
4554af34 | 322 | chan->desc->direction); |
0853c7a5 JC |
323 | return -EINVAL; |
324 | } | |
325 | ||
326 | ctrl0 |= (sg->len << GDMA_REG_CTRL0_TX_SHIFT) | \ | |
327 | (chan->burst_size << GDMA_REG_CTRL0_BURST_SHIFT) | \ | |
328 | GDMA_REG_CTRL0_DONE_INT | GDMA_REG_CTRL0_ENABLE; | |
329 | ctrl1 = chan->id << GDMA_REG_CTRL1_NEXT_SHIFT; | |
330 | ||
331 | chan->next_sg++; | |
332 | gdma_dma_write(dma_dev, GDMA_REG_SRC_ADDR(chan->id), src_addr); | |
333 | gdma_dma_write(dma_dev, GDMA_REG_DST_ADDR(chan->id), dst_addr); | |
334 | gdma_dma_write(dma_dev, GDMA_REG_CTRL1(chan->id), ctrl1); | |
335 | ||
336 | /* make sure next_sg is update */ | |
337 | wmb(); | |
338 | gdma_dma_write(dma_dev, GDMA_REG_CTRL0(chan->id), ctrl0); | |
339 | ||
340 | return 0; | |
341 | } | |
342 | ||
343 | static void rt3883_dump_reg(struct gdma_dma_dev *dma_dev, int id) | |
344 | { | |
345 | dev_dbg(dma_dev->ddev.dev, "chan %d, src %08x, dst %08x, ctr0 %08x, " \ | |
346 | "ctr1 %08x, unmask %08x, done %08x, " \ | |
347 | "req %08x, ack %08x, fin %08x\n", id, | |
348 | gdma_dma_read(dma_dev, GDMA_REG_SRC_ADDR(id)), | |
349 | gdma_dma_read(dma_dev, GDMA_REG_DST_ADDR(id)), | |
350 | gdma_dma_read(dma_dev, GDMA_REG_CTRL0(id)), | |
351 | gdma_dma_read(dma_dev, GDMA_REG_CTRL1(id)), | |
352 | gdma_dma_read(dma_dev, GDMA_REG_UNMASK_INT), | |
353 | gdma_dma_read(dma_dev, GDMA_REG_DONE_INT), | |
354 | gdma_dma_read(dma_dev, GDMA_REG_REQSTS), | |
355 | gdma_dma_read(dma_dev, GDMA_REG_ACKSTS), | |
356 | gdma_dma_read(dma_dev, GDMA_REG_FINSTS)); | |
357 | } | |
358 | ||
359 | static int rt3883_gdma_start_transfer(struct gdma_dmaengine_chan *chan) | |
360 | { | |
361 | struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan); | |
362 | dma_addr_t src_addr, dst_addr; | |
363 | struct gdma_dma_sg *sg; | |
364 | uint32_t ctrl0, ctrl1; | |
365 | ||
366 | /* verify chan is already stopped */ | |
367 | ctrl0 = gdma_dma_read(dma_dev, GDMA_REG_CTRL0(chan->id)); | |
368 | if (unlikely(ctrl0 & GDMA_REG_CTRL0_ENABLE)) { | |
369 | dev_err(dma_dev->ddev.dev, "chan %d is start(%08x).\n", | |
4554af34 | 370 | chan->id, ctrl0); |
0853c7a5 JC |
371 | rt3883_dump_reg(dma_dev, chan->id); |
372 | return -EINVAL; | |
373 | } | |
374 | ||
375 | sg = &chan->desc->sg[chan->next_sg]; | |
376 | if (chan->desc->direction == DMA_MEM_TO_DEV) { | |
377 | src_addr = sg->src_addr; | |
378 | dst_addr = chan->fifo_addr; | |
379 | ctrl0 = GDMA_REG_CTRL0_DST_ADDR_FIXED; | |
380 | ctrl1 = (32 << GDMA_REG_CTRL1_SRC_REQ_SHIFT) | \ | |
381 | (chan->slave_id << GDMA_REG_CTRL1_DST_REQ_SHIFT); | |
382 | } else if (chan->desc->direction == DMA_DEV_TO_MEM) { | |
383 | src_addr = chan->fifo_addr; | |
384 | dst_addr = sg->dst_addr; | |
385 | ctrl0 = GDMA_REG_CTRL0_SRC_ADDR_FIXED; | |
386 | ctrl1 = (chan->slave_id << GDMA_REG_CTRL1_SRC_REQ_SHIFT) | \ | |
387 | (32 << GDMA_REG_CTRL1_DST_REQ_SHIFT) | \ | |
388 | GDMA_REG_CTRL1_COHERENT; | |
389 | } else if (chan->desc->direction == DMA_MEM_TO_MEM) { | |
390 | src_addr = sg->src_addr; | |
391 | dst_addr = sg->dst_addr; | |
392 | ctrl0 = GDMA_REG_CTRL0_SW_MODE; | |
393 | ctrl1 = (32 << GDMA_REG_CTRL1_SRC_REQ_SHIFT) | \ | |
394 | (32 << GDMA_REG_CTRL1_DST_REQ_SHIFT) | \ | |
395 | GDMA_REG_CTRL1_COHERENT; | |
396 | } else { | |
397 | dev_err(dma_dev->ddev.dev, "direction type %d error\n", | |
4554af34 | 398 | chan->desc->direction); |
0853c7a5 JC |
399 | return -EINVAL; |
400 | } | |
401 | ||
402 | ctrl0 |= (sg->len << GDMA_REG_CTRL0_TX_SHIFT) | \ | |
403 | (chan->burst_size << GDMA_REG_CTRL0_BURST_SHIFT) | \ | |
404 | GDMA_REG_CTRL0_DONE_INT | GDMA_REG_CTRL0_ENABLE; | |
405 | ctrl1 |= chan->id << GDMA_REG_CTRL1_NEXT_SHIFT; | |
406 | ||
407 | chan->next_sg++; | |
408 | gdma_dma_write(dma_dev, GDMA_REG_SRC_ADDR(chan->id), src_addr); | |
409 | gdma_dma_write(dma_dev, GDMA_REG_DST_ADDR(chan->id), dst_addr); | |
410 | gdma_dma_write(dma_dev, GDMA_REG_CTRL1(chan->id), ctrl1); | |
411 | ||
412 | /* make sure next_sg is update */ | |
413 | wmb(); | |
414 | gdma_dma_write(dma_dev, GDMA_REG_CTRL0(chan->id), ctrl0); | |
415 | ||
416 | return 0; | |
417 | } | |
418 | ||
419 | static inline int gdma_start_transfer(struct gdma_dma_dev *dma_dev, | |
4554af34 | 420 | struct gdma_dmaengine_chan *chan) |
0853c7a5 JC |
421 | { |
422 | return dma_dev->data->start_transfer(chan); | |
423 | } | |
424 | ||
425 | static int gdma_next_desc(struct gdma_dmaengine_chan *chan) | |
426 | { | |
427 | struct virt_dma_desc *vdesc; | |
428 | ||
429 | vdesc = vchan_next_desc(&chan->vchan); | |
430 | if (!vdesc) { | |
431 | chan->desc = NULL; | |
432 | return 0; | |
433 | } | |
434 | chan->desc = to_gdma_dma_desc(vdesc); | |
435 | chan->next_sg = 0; | |
436 | ||
437 | return 1; | |
438 | } | |
439 | ||
440 | static void gdma_dma_chan_irq(struct gdma_dma_dev *dma_dev, | |
4554af34 | 441 | struct gdma_dmaengine_chan *chan) |
0853c7a5 JC |
442 | { |
443 | struct gdma_dma_desc *desc; | |
444 | unsigned long flags; | |
445 | int chan_issued; | |
446 | ||
447 | chan_issued = 0; | |
448 | spin_lock_irqsave(&chan->vchan.lock, flags); | |
449 | desc = chan->desc; | |
450 | if (desc) { | |
451 | if (desc->cyclic) { | |
452 | vchan_cyclic_callback(&desc->vdesc); | |
453 | if (chan->next_sg == desc->num_sgs) | |
454 | chan->next_sg = 0; | |
455 | chan_issued = 1; | |
456 | } else { | |
457 | desc->residue -= desc->sg[chan->next_sg - 1].len; | |
458 | if (chan->next_sg == desc->num_sgs) { | |
459 | list_del(&desc->vdesc.node); | |
460 | vchan_cookie_complete(&desc->vdesc); | |
461 | chan_issued = gdma_next_desc(chan); | |
462 | } else | |
463 | chan_issued = 1; | |
464 | } | |
465 | } else | |
466 | dev_dbg(dma_dev->ddev.dev, "chan %d no desc to complete\n", | |
4554af34 | 467 | chan->id); |
0853c7a5 JC |
468 | if (chan_issued) |
469 | set_bit(chan->id, &dma_dev->chan_issued); | |
470 | spin_unlock_irqrestore(&chan->vchan.lock, flags); | |
471 | } | |
472 | ||
473 | static irqreturn_t gdma_dma_irq(int irq, void *devid) | |
474 | { | |
475 | struct gdma_dma_dev *dma_dev = devid; | |
476 | u32 done, done_reg; | |
477 | unsigned int i; | |
478 | ||
479 | done_reg = dma_dev->data->done_int_reg; | |
480 | done = gdma_dma_read(dma_dev, done_reg); | |
481 | if (unlikely(!done)) | |
482 | return IRQ_NONE; | |
483 | ||
484 | /* clean done bits */ | |
485 | gdma_dma_write(dma_dev, done_reg, done); | |
486 | ||
487 | i = 0; | |
488 | while (done) { | |
489 | if (done & 0x1) { | |
490 | gdma_dma_chan_irq(dma_dev, &dma_dev->chan[i]); | |
491 | atomic_dec(&dma_dev->cnt); | |
492 | } | |
493 | done >>= 1; | |
494 | i++; | |
495 | } | |
496 | ||
497 | /* start only have work to do */ | |
498 | if (dma_dev->chan_issued) | |
499 | tasklet_schedule(&dma_dev->task); | |
500 | ||
501 | return IRQ_HANDLED; | |
502 | } | |
503 | ||
504 | static void gdma_dma_issue_pending(struct dma_chan *c) | |
505 | { | |
506 | struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c); | |
507 | struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan); | |
508 | unsigned long flags; | |
509 | ||
510 | spin_lock_irqsave(&chan->vchan.lock, flags); | |
511 | if (vchan_issue_pending(&chan->vchan) && !chan->desc) { | |
512 | if (gdma_next_desc(chan)) { | |
513 | set_bit(chan->id, &dma_dev->chan_issued); | |
514 | tasklet_schedule(&dma_dev->task); | |
515 | } else | |
516 | dev_dbg(dma_dev->ddev.dev, "chan %d no desc to issue\n", | |
4554af34 | 517 | chan->id); |
0853c7a5 JC |
518 | } |
519 | spin_unlock_irqrestore(&chan->vchan.lock, flags); | |
520 | } | |
521 | ||
522 | static struct dma_async_tx_descriptor *gdma_dma_prep_slave_sg( | |
523 | struct dma_chan *c, struct scatterlist *sgl, | |
524 | unsigned int sg_len, enum dma_transfer_direction direction, | |
525 | unsigned long flags, void *context) | |
526 | { | |
527 | struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c); | |
528 | struct gdma_dma_desc *desc; | |
529 | struct scatterlist *sg; | |
530 | unsigned int i; | |
531 | ||
532 | desc = gdma_dma_alloc_desc(sg_len); | |
533 | if (!desc) { | |
534 | dev_err(c->device->dev, "alloc sg decs error\n"); | |
535 | return NULL; | |
536 | } | |
537 | desc->residue = 0; | |
538 | ||
539 | for_each_sg(sgl, sg, sg_len, i) { | |
540 | if (direction == DMA_MEM_TO_DEV) | |
541 | desc->sg[i].src_addr = sg_dma_address(sg); | |
542 | else if (direction == DMA_DEV_TO_MEM) | |
543 | desc->sg[i].dst_addr = sg_dma_address(sg); | |
544 | else { | |
545 | dev_err(c->device->dev, "direction type %d error\n", | |
4554af34 | 546 | direction); |
0853c7a5 JC |
547 | goto free_desc; |
548 | } | |
549 | ||
550 | if (unlikely(sg_dma_len(sg) > GDMA_REG_CTRL0_TX_MASK)) { | |
551 | dev_err(c->device->dev, "sg len too large %d\n", | |
4554af34 | 552 | sg_dma_len(sg)); |
0853c7a5 JC |
553 | goto free_desc; |
554 | } | |
555 | desc->sg[i].len = sg_dma_len(sg); | |
556 | desc->residue += sg_dma_len(sg); | |
557 | } | |
558 | ||
559 | desc->num_sgs = sg_len; | |
560 | desc->direction = direction; | |
561 | desc->cyclic = false; | |
562 | ||
563 | return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); | |
564 | ||
565 | free_desc: | |
566 | kfree(desc); | |
567 | return NULL; | |
568 | } | |
569 | ||
c91034b2 | 570 | static struct dma_async_tx_descriptor *gdma_dma_prep_dma_memcpy( |
0853c7a5 JC |
571 | struct dma_chan *c, dma_addr_t dest, dma_addr_t src, |
572 | size_t len, unsigned long flags) | |
573 | { | |
574 | struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c); | |
575 | struct gdma_dma_desc *desc; | |
576 | unsigned int num_periods, i; | |
577 | size_t xfer_count; | |
578 | ||
579 | if (len <= 0) | |
580 | return NULL; | |
581 | ||
582 | chan->burst_size = gdma_dma_maxburst(len >> 2); | |
583 | ||
584 | xfer_count = GDMA_REG_CTRL0_TX_MASK; | |
585 | num_periods = DIV_ROUND_UP(len, xfer_count); | |
586 | ||
587 | desc = gdma_dma_alloc_desc(num_periods); | |
588 | if (!desc) { | |
589 | dev_err(c->device->dev, "alloc memcpy decs error\n"); | |
590 | return NULL; | |
591 | } | |
592 | desc->residue = len; | |
593 | ||
594 | for (i = 0; i < num_periods; i++) { | |
595 | desc->sg[i].src_addr = src; | |
596 | desc->sg[i].dst_addr = dest; | |
5e62653b | 597 | if (len > xfer_count) |
0853c7a5 | 598 | desc->sg[i].len = xfer_count; |
5e62653b | 599 | else |
0853c7a5 | 600 | desc->sg[i].len = len; |
0853c7a5 JC |
601 | src += desc->sg[i].len; |
602 | dest += desc->sg[i].len; | |
603 | len -= desc->sg[i].len; | |
604 | } | |
605 | ||
606 | desc->num_sgs = num_periods; | |
607 | desc->direction = DMA_MEM_TO_MEM; | |
608 | desc->cyclic = false; | |
609 | ||
610 | return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); | |
611 | } | |
612 | ||
613 | static struct dma_async_tx_descriptor *gdma_dma_prep_dma_cyclic( | |
614 | struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len, | |
615 | size_t period_len, enum dma_transfer_direction direction, | |
616 | unsigned long flags) | |
617 | { | |
618 | struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c); | |
619 | struct gdma_dma_desc *desc; | |
620 | unsigned int num_periods, i; | |
621 | ||
622 | if (buf_len % period_len) | |
623 | return NULL; | |
624 | ||
625 | if (period_len > GDMA_REG_CTRL0_TX_MASK) { | |
626 | dev_err(c->device->dev, "cyclic len too large %d\n", | |
4554af34 | 627 | period_len); |
0853c7a5 JC |
628 | return NULL; |
629 | } | |
630 | ||
631 | num_periods = buf_len / period_len; | |
632 | desc = gdma_dma_alloc_desc(num_periods); | |
633 | if (!desc) { | |
634 | dev_err(c->device->dev, "alloc cyclic decs error\n"); | |
635 | return NULL; | |
636 | } | |
637 | desc->residue = buf_len; | |
638 | ||
639 | for (i = 0; i < num_periods; i++) { | |
640 | if (direction == DMA_MEM_TO_DEV) | |
641 | desc->sg[i].src_addr = buf_addr; | |
642 | else if (direction == DMA_DEV_TO_MEM) | |
643 | desc->sg[i].dst_addr = buf_addr; | |
644 | else { | |
645 | dev_err(c->device->dev, "direction type %d error\n", | |
4554af34 | 646 | direction); |
0853c7a5 JC |
647 | goto free_desc; |
648 | } | |
649 | desc->sg[i].len = period_len; | |
650 | buf_addr += period_len; | |
651 | } | |
652 | ||
653 | desc->num_sgs = num_periods; | |
654 | desc->direction = direction; | |
655 | desc->cyclic = true; | |
656 | ||
657 | return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); | |
658 | ||
659 | free_desc: | |
660 | kfree(desc); | |
661 | return NULL; | |
662 | } | |
663 | ||
664 | static enum dma_status gdma_dma_tx_status(struct dma_chan *c, | |
4554af34 CLS |
665 | dma_cookie_t cookie, |
666 | struct dma_tx_state *state) | |
0853c7a5 JC |
667 | { |
668 | struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c); | |
669 | struct virt_dma_desc *vdesc; | |
670 | enum dma_status status; | |
671 | unsigned long flags; | |
672 | struct gdma_dma_desc *desc; | |
673 | ||
674 | status = dma_cookie_status(c, cookie, state); | |
675 | if (status == DMA_COMPLETE || !state) | |
676 | return status; | |
677 | ||
678 | spin_lock_irqsave(&chan->vchan.lock, flags); | |
679 | desc = chan->desc; | |
680 | if (desc && (cookie == desc->vdesc.tx.cookie)) { | |
681 | /* | |
682 | * We never update edesc->residue in the cyclic case, so we | |
683 | * can tell the remaining room to the end of the circular | |
684 | * buffer. | |
685 | */ | |
686 | if (desc->cyclic) | |
687 | state->residue = desc->residue - | |
688 | ((chan->next_sg - 1) * desc->sg[0].len); | |
689 | else | |
690 | state->residue = desc->residue; | |
c48f6ac1 CLS |
691 | } else { |
692 | vdesc = vchan_find_desc(&chan->vchan, cookie); | |
693 | if (vdesc) | |
694 | state->residue = to_gdma_dma_desc(vdesc)->residue; | |
695 | } | |
0853c7a5 JC |
696 | spin_unlock_irqrestore(&chan->vchan.lock, flags); |
697 | ||
698 | dev_dbg(c->device->dev, "tx residue %d bytes\n", state->residue); | |
699 | ||
700 | return status; | |
701 | } | |
702 | ||
703 | static void gdma_dma_free_chan_resources(struct dma_chan *c) | |
704 | { | |
705 | vchan_free_chan_resources(to_virt_chan(c)); | |
706 | } | |
707 | ||
708 | static void gdma_dma_desc_free(struct virt_dma_desc *vdesc) | |
709 | { | |
710 | kfree(container_of(vdesc, struct gdma_dma_desc, vdesc)); | |
711 | } | |
712 | ||
713 | static void gdma_dma_tasklet(unsigned long arg) | |
714 | { | |
715 | struct gdma_dma_dev *dma_dev = (struct gdma_dma_dev *)arg; | |
716 | struct gdma_dmaengine_chan *chan; | |
717 | static unsigned int last_chan; | |
718 | unsigned int i, chan_mask; | |
719 | ||
720 | /* record last chan to round robin all chans */ | |
721 | i = last_chan; | |
722 | chan_mask = dma_dev->data->chancnt - 1; | |
723 | do { | |
724 | /* | |
725 | * on mt7621. when verify with dmatest with all | |
726 | * channel is enable. we need to limit only two | |
727 | * channel is working at the same time. otherwise the | |
728 | * data will have problem. | |
729 | */ | |
730 | if (atomic_read(&dma_dev->cnt) >= 2) { | |
731 | last_chan = i; | |
732 | break; | |
733 | } | |
734 | ||
735 | if (test_and_clear_bit(i, &dma_dev->chan_issued)) { | |
736 | chan = &dma_dev->chan[i]; | |
737 | if (chan->desc) { | |
738 | atomic_inc(&dma_dev->cnt); | |
739 | gdma_start_transfer(dma_dev, chan); | |
740 | } else | |
741 | dev_dbg(dma_dev->ddev.dev, "chan %d no desc to issue\n", chan->id); | |
742 | ||
743 | if (!dma_dev->chan_issued) | |
744 | break; | |
745 | } | |
746 | ||
747 | i = (i + 1) & chan_mask; | |
748 | } while (i != last_chan); | |
749 | } | |
750 | ||
751 | static void rt305x_gdma_init(struct gdma_dma_dev *dma_dev) | |
752 | { | |
753 | uint32_t gct; | |
754 | ||
755 | /* all chans round robin */ | |
756 | gdma_dma_write(dma_dev, GDMA_RT305X_GCT, GDMA_REG_GCT_ARBIT_RR); | |
757 | ||
758 | gct = gdma_dma_read(dma_dev, GDMA_RT305X_GCT); | |
759 | dev_info(dma_dev->ddev.dev, "revision: %d, channels: %d\n", | |
4554af34 CLS |
760 | (gct >> GDMA_REG_GCT_VER_SHIFT) & GDMA_REG_GCT_VER_MASK, |
761 | 8 << ((gct >> GDMA_REG_GCT_CHAN_SHIFT) & | |
762 | GDMA_REG_GCT_CHAN_MASK)); | |
0853c7a5 JC |
763 | } |
764 | ||
765 | static void rt3883_gdma_init(struct gdma_dma_dev *dma_dev) | |
766 | { | |
767 | uint32_t gct; | |
768 | ||
769 | /* all chans round robin */ | |
770 | gdma_dma_write(dma_dev, GDMA_REG_GCT, GDMA_REG_GCT_ARBIT_RR); | |
771 | ||
772 | gct = gdma_dma_read(dma_dev, GDMA_REG_GCT); | |
773 | dev_info(dma_dev->ddev.dev, "revision: %d, channels: %d\n", | |
4554af34 CLS |
774 | (gct >> GDMA_REG_GCT_VER_SHIFT) & GDMA_REG_GCT_VER_MASK, |
775 | 8 << ((gct >> GDMA_REG_GCT_CHAN_SHIFT) & | |
776 | GDMA_REG_GCT_CHAN_MASK)); | |
0853c7a5 JC |
777 | } |
778 | ||
779 | static struct gdma_data rt305x_gdma_data = { | |
780 | .chancnt = 8, | |
781 | .done_int_reg = GDMA_RT305X_STATUS_INT, | |
782 | .init = rt305x_gdma_init, | |
783 | .start_transfer = rt305x_gdma_start_transfer, | |
784 | }; | |
785 | ||
786 | static struct gdma_data rt3883_gdma_data = { | |
787 | .chancnt = 16, | |
788 | .done_int_reg = GDMA_REG_DONE_INT, | |
789 | .init = rt3883_gdma_init, | |
790 | .start_transfer = rt3883_gdma_start_transfer, | |
791 | }; | |
792 | ||
793 | static const struct of_device_id gdma_of_match_table[] = { | |
794 | { .compatible = "ralink,rt305x-gdma", .data = &rt305x_gdma_data }, | |
795 | { .compatible = "ralink,rt3883-gdma", .data = &rt3883_gdma_data }, | |
796 | { }, | |
797 | }; | |
798 | ||
799 | static int gdma_dma_probe(struct platform_device *pdev) | |
800 | { | |
801 | const struct of_device_id *match; | |
802 | struct gdma_dmaengine_chan *chan; | |
803 | struct gdma_dma_dev *dma_dev; | |
804 | struct dma_device *dd; | |
805 | unsigned int i; | |
806 | struct resource *res; | |
807 | int ret; | |
808 | int irq; | |
809 | void __iomem *base; | |
810 | struct gdma_data *data; | |
811 | ||
812 | ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); | |
813 | if (ret) | |
814 | return ret; | |
815 | ||
816 | match = of_match_device(gdma_of_match_table, &pdev->dev); | |
817 | if (!match) | |
818 | return -EINVAL; | |
819 | data = (struct gdma_data *) match->data; | |
820 | ||
821 | dma_dev = devm_kzalloc(&pdev->dev, sizeof(*dma_dev) + | |
822 | (sizeof(struct gdma_dmaengine_chan) * data->chancnt), | |
823 | GFP_KERNEL); | |
824 | if (!dma_dev) { | |
825 | dev_err(&pdev->dev, "alloc dma device failed\n"); | |
826 | return -EINVAL; | |
827 | } | |
828 | dma_dev->data = data; | |
829 | ||
830 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
831 | base = devm_ioremap_resource(&pdev->dev, res); | |
832 | if (IS_ERR(base)) | |
833 | return PTR_ERR(base); | |
834 | dma_dev->base = base; | |
835 | tasklet_init(&dma_dev->task, gdma_dma_tasklet, (unsigned long)dma_dev); | |
836 | ||
837 | irq = platform_get_irq(pdev, 0); | |
838 | if (irq < 0) { | |
839 | dev_err(&pdev->dev, "failed to get irq\n"); | |
840 | return -EINVAL; | |
841 | } | |
842 | ret = devm_request_irq(&pdev->dev, irq, gdma_dma_irq, | |
4554af34 | 843 | 0, dev_name(&pdev->dev), dma_dev); |
0853c7a5 JC |
844 | if (ret) { |
845 | dev_err(&pdev->dev, "failed to request irq\n"); | |
846 | return ret; | |
847 | } | |
848 | ||
849 | device_reset(&pdev->dev); | |
850 | ||
851 | dd = &dma_dev->ddev; | |
852 | dma_cap_set(DMA_MEMCPY, dd->cap_mask); | |
853 | dma_cap_set(DMA_SLAVE, dd->cap_mask); | |
854 | dma_cap_set(DMA_CYCLIC, dd->cap_mask); | |
855 | dd->device_free_chan_resources = gdma_dma_free_chan_resources; | |
856 | dd->device_prep_dma_memcpy = gdma_dma_prep_dma_memcpy; | |
857 | dd->device_prep_slave_sg = gdma_dma_prep_slave_sg; | |
858 | dd->device_prep_dma_cyclic = gdma_dma_prep_dma_cyclic; | |
859 | dd->device_config = gdma_dma_config; | |
860 | dd->device_terminate_all = gdma_dma_terminate_all; | |
861 | dd->device_tx_status = gdma_dma_tx_status; | |
862 | dd->device_issue_pending = gdma_dma_issue_pending; | |
863 | ||
864 | dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); | |
865 | dd->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); | |
866 | dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | |
867 | dd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; | |
868 | ||
869 | dd->dev = &pdev->dev; | |
870 | dd->dev->dma_parms = &dma_dev->dma_parms; | |
871 | dma_set_max_seg_size(dd->dev, GDMA_REG_CTRL0_TX_MASK); | |
872 | INIT_LIST_HEAD(&dd->channels); | |
873 | ||
874 | for (i = 0; i < data->chancnt; i++) { | |
875 | chan = &dma_dev->chan[i]; | |
876 | chan->id = i; | |
877 | chan->vchan.desc_free = gdma_dma_desc_free; | |
878 | vchan_init(&chan->vchan, dd); | |
879 | } | |
880 | ||
881 | /* init hardware */ | |
882 | data->init(dma_dev); | |
883 | ||
884 | ret = dma_async_device_register(dd); | |
885 | if (ret) { | |
886 | dev_err(&pdev->dev, "failed to register dma device\n"); | |
887 | return ret; | |
888 | } | |
889 | ||
890 | ret = of_dma_controller_register(pdev->dev.of_node, | |
4554af34 | 891 | of_dma_xlate_by_chan_id, dma_dev); |
0853c7a5 JC |
892 | if (ret) { |
893 | dev_err(&pdev->dev, "failed to register of dma controller\n"); | |
894 | goto err_unregister; | |
895 | } | |
896 | ||
897 | platform_set_drvdata(pdev, dma_dev); | |
898 | ||
899 | return 0; | |
900 | ||
901 | err_unregister: | |
902 | dma_async_device_unregister(dd); | |
903 | return ret; | |
904 | } | |
905 | ||
906 | static int gdma_dma_remove(struct platform_device *pdev) | |
907 | { | |
908 | struct gdma_dma_dev *dma_dev = platform_get_drvdata(pdev); | |
909 | ||
910 | tasklet_kill(&dma_dev->task); | |
2712d0e8 | 911 | of_dma_controller_free(pdev->dev.of_node); |
0853c7a5 JC |
912 | dma_async_device_unregister(&dma_dev->ddev); |
913 | ||
914 | return 0; | |
915 | } | |
916 | ||
917 | static struct platform_driver gdma_dma_driver = { | |
918 | .probe = gdma_dma_probe, | |
919 | .remove = gdma_dma_remove, | |
920 | .driver = { | |
921 | .name = "gdma-rt2880", | |
922 | .of_match_table = gdma_of_match_table, | |
923 | }, | |
924 | }; | |
925 | module_platform_driver(gdma_dma_driver); | |
926 | ||
927 | MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); | |
928 | MODULE_DESCRIPTION("Ralink/MTK DMA driver"); | |
929 | MODULE_LICENSE("GPL v2"); |