Commit | Line | Data |
---|---|---|
0c42bd0e YW |
1 | /* |
2 | * Topcliff PCH DMA controller driver | |
3 | * Copyright (c) 2010 Intel Corporation | |
2cdf2455 | 4 | * Copyright (C) 2011 OKI SEMICONDUCTOR CO., LTD. |
0c42bd0e YW |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License | |
16 | * along with this program; if not, write to the Free Software | |
17 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
18 | */ | |
19 | ||
20 | #include <linux/dmaengine.h> | |
21 | #include <linux/dma-mapping.h> | |
22 | #include <linux/init.h> | |
23 | #include <linux/pci.h> | |
24 | #include <linux/interrupt.h> | |
25 | #include <linux/module.h> | |
26 | #include <linux/pch_dma.h> | |
27 | ||
28 | #define DRV_NAME "pch-dma" | |
29 | ||
30 | #define DMA_CTL0_DISABLE 0x0 | |
31 | #define DMA_CTL0_SG 0x1 | |
32 | #define DMA_CTL0_ONESHOT 0x2 | |
33 | #define DMA_CTL0_MODE_MASK_BITS 0x3 | |
34 | #define DMA_CTL0_DIR_SHIFT_BITS 2 | |
35 | #define DMA_CTL0_BITS_PER_CH 4 | |
36 | ||
37 | #define DMA_CTL2_START_SHIFT_BITS 8 | |
38 | #define DMA_CTL2_IRQ_ENABLE_MASK ((1UL << DMA_CTL2_START_SHIFT_BITS) - 1) | |
39 | ||
40 | #define DMA_STATUS_IDLE 0x0 | |
41 | #define DMA_STATUS_DESC_READ 0x1 | |
42 | #define DMA_STATUS_WAIT 0x2 | |
43 | #define DMA_STATUS_ACCESS 0x3 | |
44 | #define DMA_STATUS_BITS_PER_CH 2 | |
45 | #define DMA_STATUS_MASK_BITS 0x3 | |
46 | #define DMA_STATUS_SHIFT_BITS 16 | |
47 | #define DMA_STATUS_IRQ(x) (0x1 << (x)) | |
48 | #define DMA_STATUS_ERR(x) (0x1 << ((x) + 8)) | |
49 | ||
50 | #define DMA_DESC_WIDTH_SHIFT_BITS 12 | |
51 | #define DMA_DESC_WIDTH_1_BYTE (0x3 << DMA_DESC_WIDTH_SHIFT_BITS) | |
52 | #define DMA_DESC_WIDTH_2_BYTES (0x2 << DMA_DESC_WIDTH_SHIFT_BITS) | |
53 | #define DMA_DESC_WIDTH_4_BYTES (0x0 << DMA_DESC_WIDTH_SHIFT_BITS) | |
54 | #define DMA_DESC_MAX_COUNT_1_BYTE 0x3FF | |
55 | #define DMA_DESC_MAX_COUNT_2_BYTES 0x3FF | |
56 | #define DMA_DESC_MAX_COUNT_4_BYTES 0x7FF | |
57 | #define DMA_DESC_END_WITHOUT_IRQ 0x0 | |
58 | #define DMA_DESC_END_WITH_IRQ 0x1 | |
59 | #define DMA_DESC_FOLLOW_WITHOUT_IRQ 0x2 | |
60 | #define DMA_DESC_FOLLOW_WITH_IRQ 0x3 | |
61 | ||
62 | #define MAX_CHAN_NR 8 | |
63 | ||
64 | static unsigned int init_nr_desc_per_channel = 64; | |
65 | module_param(init_nr_desc_per_channel, uint, 0644); | |
66 | MODULE_PARM_DESC(init_nr_desc_per_channel, | |
67 | "initial descriptors per channel (default: 64)"); | |
68 | ||
69 | struct pch_dma_desc_regs { | |
70 | u32 dev_addr; | |
71 | u32 mem_addr; | |
72 | u32 size; | |
73 | u32 next; | |
74 | }; | |
75 | ||
76 | struct pch_dma_regs { | |
77 | u32 dma_ctl0; | |
78 | u32 dma_ctl1; | |
79 | u32 dma_ctl2; | |
80 | u32 reserved1; | |
81 | u32 dma_sts0; | |
82 | u32 dma_sts1; | |
83 | u32 reserved2; | |
84 | u32 reserved3; | |
85 | struct pch_dma_desc_regs desc[0]; | |
86 | }; | |
87 | ||
88 | struct pch_dma_desc { | |
89 | struct pch_dma_desc_regs regs; | |
90 | struct dma_async_tx_descriptor txd; | |
91 | struct list_head desc_node; | |
92 | struct list_head tx_list; | |
93 | }; | |
94 | ||
95 | struct pch_dma_chan { | |
96 | struct dma_chan chan; | |
97 | void __iomem *membase; | |
98 | enum dma_data_direction dir; | |
99 | struct tasklet_struct tasklet; | |
100 | unsigned long err_status; | |
101 | ||
102 | spinlock_t lock; | |
103 | ||
104 | dma_cookie_t completed_cookie; | |
105 | struct list_head active_list; | |
106 | struct list_head queue; | |
107 | struct list_head free_list; | |
108 | unsigned int descs_allocated; | |
109 | }; | |
110 | ||
111 | #define PDC_DEV_ADDR 0x00 | |
112 | #define PDC_MEM_ADDR 0x04 | |
113 | #define PDC_SIZE 0x08 | |
114 | #define PDC_NEXT 0x0C | |
115 | ||
116 | #define channel_readl(pdc, name) \ | |
117 | readl((pdc)->membase + PDC_##name) | |
118 | #define channel_writel(pdc, name, val) \ | |
119 | writel((val), (pdc)->membase + PDC_##name) | |
120 | ||
121 | struct pch_dma { | |
122 | struct dma_device dma; | |
123 | void __iomem *membase; | |
124 | struct pci_pool *pool; | |
125 | struct pch_dma_regs regs; | |
126 | struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR]; | |
127 | struct pch_dma_chan channels[0]; | |
128 | }; | |
129 | ||
130 | #define PCH_DMA_CTL0 0x00 | |
131 | #define PCH_DMA_CTL1 0x04 | |
132 | #define PCH_DMA_CTL2 0x08 | |
133 | #define PCH_DMA_STS0 0x10 | |
134 | #define PCH_DMA_STS1 0x14 | |
135 | ||
136 | #define dma_readl(pd, name) \ | |
61cd2203 | 137 | readl((pd)->membase + PCH_DMA_##name) |
0c42bd0e | 138 | #define dma_writel(pd, name, val) \ |
61cd2203 | 139 | writel((val), (pd)->membase + PCH_DMA_##name) |
0c42bd0e YW |
140 | |
141 | static inline struct pch_dma_desc *to_pd_desc(struct dma_async_tx_descriptor *txd) | |
142 | { | |
143 | return container_of(txd, struct pch_dma_desc, txd); | |
144 | } | |
145 | ||
146 | static inline struct pch_dma_chan *to_pd_chan(struct dma_chan *chan) | |
147 | { | |
148 | return container_of(chan, struct pch_dma_chan, chan); | |
149 | } | |
150 | ||
151 | static inline struct pch_dma *to_pd(struct dma_device *ddev) | |
152 | { | |
153 | return container_of(ddev, struct pch_dma, dma); | |
154 | } | |
155 | ||
156 | static inline struct device *chan2dev(struct dma_chan *chan) | |
157 | { | |
158 | return &chan->dev->device; | |
159 | } | |
160 | ||
161 | static inline struct device *chan2parent(struct dma_chan *chan) | |
162 | { | |
163 | return chan->dev->device.parent; | |
164 | } | |
165 | ||
166 | static inline struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan) | |
167 | { | |
168 | return list_first_entry(&pd_chan->active_list, | |
169 | struct pch_dma_desc, desc_node); | |
170 | } | |
171 | ||
172 | static inline struct pch_dma_desc *pdc_first_queued(struct pch_dma_chan *pd_chan) | |
173 | { | |
174 | return list_first_entry(&pd_chan->queue, | |
175 | struct pch_dma_desc, desc_node); | |
176 | } | |
177 | ||
178 | static void pdc_enable_irq(struct dma_chan *chan, int enable) | |
179 | { | |
180 | struct pch_dma *pd = to_pd(chan->device); | |
181 | u32 val; | |
182 | ||
183 | val = dma_readl(pd, CTL2); | |
184 | ||
185 | if (enable) | |
186 | val |= 0x1 << chan->chan_id; | |
187 | else | |
188 | val &= ~(0x1 << chan->chan_id); | |
189 | ||
190 | dma_writel(pd, CTL2, val); | |
191 | ||
192 | dev_dbg(chan2dev(chan), "pdc_enable_irq: chan %d -> %x\n", | |
193 | chan->chan_id, val); | |
194 | } | |
195 | ||
196 | static void pdc_set_dir(struct dma_chan *chan) | |
197 | { | |
198 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); | |
199 | struct pch_dma *pd = to_pd(chan->device); | |
200 | u32 val; | |
201 | ||
202 | val = dma_readl(pd, CTL0); | |
203 | ||
204 | if (pd_chan->dir == DMA_TO_DEVICE) | |
205 | val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + | |
206 | DMA_CTL0_DIR_SHIFT_BITS); | |
207 | else | |
208 | val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + | |
209 | DMA_CTL0_DIR_SHIFT_BITS)); | |
210 | ||
211 | dma_writel(pd, CTL0, val); | |
212 | ||
213 | dev_dbg(chan2dev(chan), "pdc_set_dir: chan %d -> %x\n", | |
214 | chan->chan_id, val); | |
215 | } | |
216 | ||
217 | static void pdc_set_mode(struct dma_chan *chan, u32 mode) | |
218 | { | |
219 | struct pch_dma *pd = to_pd(chan->device); | |
220 | u32 val; | |
221 | ||
222 | val = dma_readl(pd, CTL0); | |
223 | ||
224 | val &= ~(DMA_CTL0_MODE_MASK_BITS << | |
225 | (DMA_CTL0_BITS_PER_CH * chan->chan_id)); | |
226 | val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id); | |
227 | ||
228 | dma_writel(pd, CTL0, val); | |
229 | ||
230 | dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n", | |
231 | chan->chan_id, val); | |
232 | } | |
233 | ||
234 | static u32 pdc_get_status(struct pch_dma_chan *pd_chan) | |
235 | { | |
236 | struct pch_dma *pd = to_pd(pd_chan->chan.device); | |
237 | u32 val; | |
238 | ||
239 | val = dma_readl(pd, STS0); | |
240 | return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS + | |
241 | DMA_STATUS_BITS_PER_CH * pd_chan->chan.chan_id)); | |
242 | } | |
243 | ||
244 | static bool pdc_is_idle(struct pch_dma_chan *pd_chan) | |
245 | { | |
246 | if (pdc_get_status(pd_chan) == DMA_STATUS_IDLE) | |
247 | return true; | |
248 | else | |
249 | return false; | |
250 | } | |
251 | ||
252 | static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc) | |
253 | { | |
254 | struct pch_dma *pd = to_pd(pd_chan->chan.device); | |
255 | u32 val; | |
256 | ||
257 | if (!pdc_is_idle(pd_chan)) { | |
258 | dev_err(chan2dev(&pd_chan->chan), | |
259 | "BUG: Attempt to start non-idle channel\n"); | |
260 | return; | |
261 | } | |
262 | ||
0c42bd0e YW |
263 | dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> dev_addr: %x\n", |
264 | pd_chan->chan.chan_id, desc->regs.dev_addr); | |
265 | dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> mem_addr: %x\n", | |
266 | pd_chan->chan.chan_id, desc->regs.mem_addr); | |
267 | dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> size: %x\n", | |
268 | pd_chan->chan.chan_id, desc->regs.size); | |
269 | dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> next: %x\n", | |
270 | pd_chan->chan.chan_id, desc->regs.next); | |
271 | ||
943d8d8b TM |
272 | if (list_empty(&desc->tx_list)) { |
273 | channel_writel(pd_chan, DEV_ADDR, desc->regs.dev_addr); | |
274 | channel_writel(pd_chan, MEM_ADDR, desc->regs.mem_addr); | |
275 | channel_writel(pd_chan, SIZE, desc->regs.size); | |
276 | channel_writel(pd_chan, NEXT, desc->regs.next); | |
0c42bd0e | 277 | pdc_set_mode(&pd_chan->chan, DMA_CTL0_ONESHOT); |
943d8d8b TM |
278 | } else { |
279 | channel_writel(pd_chan, NEXT, desc->txd.phys); | |
0c42bd0e | 280 | pdc_set_mode(&pd_chan->chan, DMA_CTL0_SG); |
943d8d8b | 281 | } |
0c42bd0e YW |
282 | |
283 | val = dma_readl(pd, CTL2); | |
284 | val |= 1 << (DMA_CTL2_START_SHIFT_BITS + pd_chan->chan.chan_id); | |
285 | dma_writel(pd, CTL2, val); | |
286 | } | |
287 | ||
288 | static void pdc_chain_complete(struct pch_dma_chan *pd_chan, | |
289 | struct pch_dma_desc *desc) | |
290 | { | |
291 | struct dma_async_tx_descriptor *txd = &desc->txd; | |
292 | dma_async_tx_callback callback = txd->callback; | |
293 | void *param = txd->callback_param; | |
294 | ||
295 | list_splice_init(&desc->tx_list, &pd_chan->free_list); | |
296 | list_move(&desc->desc_node, &pd_chan->free_list); | |
297 | ||
298 | if (callback) | |
299 | callback(param); | |
300 | } | |
301 | ||
302 | static void pdc_complete_all(struct pch_dma_chan *pd_chan) | |
303 | { | |
304 | struct pch_dma_desc *desc, *_d; | |
305 | LIST_HEAD(list); | |
306 | ||
307 | BUG_ON(!pdc_is_idle(pd_chan)); | |
308 | ||
309 | if (!list_empty(&pd_chan->queue)) | |
310 | pdc_dostart(pd_chan, pdc_first_queued(pd_chan)); | |
311 | ||
312 | list_splice_init(&pd_chan->active_list, &list); | |
313 | list_splice_init(&pd_chan->queue, &pd_chan->active_list); | |
314 | ||
315 | list_for_each_entry_safe(desc, _d, &list, desc_node) | |
316 | pdc_chain_complete(pd_chan, desc); | |
317 | } | |
318 | ||
319 | static void pdc_handle_error(struct pch_dma_chan *pd_chan) | |
320 | { | |
321 | struct pch_dma_desc *bad_desc; | |
322 | ||
323 | bad_desc = pdc_first_active(pd_chan); | |
324 | list_del(&bad_desc->desc_node); | |
325 | ||
326 | list_splice_init(&pd_chan->queue, pd_chan->active_list.prev); | |
327 | ||
328 | if (!list_empty(&pd_chan->active_list)) | |
329 | pdc_dostart(pd_chan, pdc_first_active(pd_chan)); | |
330 | ||
331 | dev_crit(chan2dev(&pd_chan->chan), "Bad descriptor submitted\n"); | |
332 | dev_crit(chan2dev(&pd_chan->chan), "descriptor cookie: %d\n", | |
333 | bad_desc->txd.cookie); | |
334 | ||
335 | pdc_chain_complete(pd_chan, bad_desc); | |
336 | } | |
337 | ||
338 | static void pdc_advance_work(struct pch_dma_chan *pd_chan) | |
339 | { | |
340 | if (list_empty(&pd_chan->active_list) || | |
341 | list_is_singular(&pd_chan->active_list)) { | |
342 | pdc_complete_all(pd_chan); | |
343 | } else { | |
344 | pdc_chain_complete(pd_chan, pdc_first_active(pd_chan)); | |
345 | pdc_dostart(pd_chan, pdc_first_active(pd_chan)); | |
346 | } | |
347 | } | |
348 | ||
349 | static dma_cookie_t pdc_assign_cookie(struct pch_dma_chan *pd_chan, | |
350 | struct pch_dma_desc *desc) | |
351 | { | |
352 | dma_cookie_t cookie = pd_chan->chan.cookie; | |
353 | ||
354 | if (++cookie < 0) | |
355 | cookie = 1; | |
356 | ||
357 | pd_chan->chan.cookie = cookie; | |
358 | desc->txd.cookie = cookie; | |
359 | ||
360 | return cookie; | |
361 | } | |
362 | ||
363 | static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd) | |
364 | { | |
365 | struct pch_dma_desc *desc = to_pd_desc(txd); | |
366 | struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan); | |
367 | dma_cookie_t cookie; | |
368 | ||
369 | spin_lock_bh(&pd_chan->lock); | |
370 | cookie = pdc_assign_cookie(pd_chan, desc); | |
371 | ||
372 | if (list_empty(&pd_chan->active_list)) { | |
373 | list_add_tail(&desc->desc_node, &pd_chan->active_list); | |
374 | pdc_dostart(pd_chan, desc); | |
375 | } else { | |
376 | list_add_tail(&desc->desc_node, &pd_chan->queue); | |
377 | } | |
378 | ||
379 | spin_unlock_bh(&pd_chan->lock); | |
380 | return 0; | |
381 | } | |
382 | ||
383 | static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags) | |
384 | { | |
385 | struct pch_dma_desc *desc = NULL; | |
386 | struct pch_dma *pd = to_pd(chan->device); | |
387 | dma_addr_t addr; | |
388 | ||
389 | desc = pci_pool_alloc(pd->pool, GFP_KERNEL, &addr); | |
390 | if (desc) { | |
391 | memset(desc, 0, sizeof(struct pch_dma_desc)); | |
392 | INIT_LIST_HEAD(&desc->tx_list); | |
393 | dma_async_tx_descriptor_init(&desc->txd, chan); | |
394 | desc->txd.tx_submit = pd_tx_submit; | |
395 | desc->txd.flags = DMA_CTRL_ACK; | |
396 | desc->txd.phys = addr; | |
397 | } | |
398 | ||
399 | return desc; | |
400 | } | |
401 | ||
402 | static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan) | |
403 | { | |
404 | struct pch_dma_desc *desc, *_d; | |
405 | struct pch_dma_desc *ret = NULL; | |
406 | int i; | |
407 | ||
408 | spin_lock_bh(&pd_chan->lock); | |
409 | list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) { | |
410 | i++; | |
411 | if (async_tx_test_ack(&desc->txd)) { | |
412 | list_del(&desc->desc_node); | |
413 | ret = desc; | |
414 | break; | |
415 | } | |
416 | dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc); | |
417 | } | |
418 | spin_unlock_bh(&pd_chan->lock); | |
419 | dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i); | |
420 | ||
421 | if (!ret) { | |
422 | ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO); | |
423 | if (ret) { | |
424 | spin_lock_bh(&pd_chan->lock); | |
425 | pd_chan->descs_allocated++; | |
426 | spin_unlock_bh(&pd_chan->lock); | |
427 | } else { | |
428 | dev_err(chan2dev(&pd_chan->chan), | |
429 | "failed to alloc desc\n"); | |
430 | } | |
431 | } | |
432 | ||
433 | return ret; | |
434 | } | |
435 | ||
436 | static void pdc_desc_put(struct pch_dma_chan *pd_chan, | |
437 | struct pch_dma_desc *desc) | |
438 | { | |
439 | if (desc) { | |
440 | spin_lock_bh(&pd_chan->lock); | |
441 | list_splice_init(&desc->tx_list, &pd_chan->free_list); | |
442 | list_add(&desc->desc_node, &pd_chan->free_list); | |
443 | spin_unlock_bh(&pd_chan->lock); | |
444 | } | |
445 | } | |
446 | ||
447 | static int pd_alloc_chan_resources(struct dma_chan *chan) | |
448 | { | |
449 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); | |
450 | struct pch_dma_desc *desc; | |
451 | LIST_HEAD(tmp_list); | |
452 | int i; | |
453 | ||
454 | if (!pdc_is_idle(pd_chan)) { | |
455 | dev_dbg(chan2dev(chan), "DMA channel not idle ?\n"); | |
456 | return -EIO; | |
457 | } | |
458 | ||
459 | if (!list_empty(&pd_chan->free_list)) | |
460 | return pd_chan->descs_allocated; | |
461 | ||
462 | for (i = 0; i < init_nr_desc_per_channel; i++) { | |
463 | desc = pdc_alloc_desc(chan, GFP_KERNEL); | |
464 | ||
465 | if (!desc) { | |
466 | dev_warn(chan2dev(chan), | |
467 | "Only allocated %d initial descriptors\n", i); | |
468 | break; | |
469 | } | |
470 | ||
471 | list_add_tail(&desc->desc_node, &tmp_list); | |
472 | } | |
473 | ||
474 | spin_lock_bh(&pd_chan->lock); | |
475 | list_splice(&tmp_list, &pd_chan->free_list); | |
476 | pd_chan->descs_allocated = i; | |
477 | pd_chan->completed_cookie = chan->cookie = 1; | |
478 | spin_unlock_bh(&pd_chan->lock); | |
479 | ||
480 | pdc_enable_irq(chan, 1); | |
481 | pdc_set_dir(chan); | |
482 | ||
483 | return pd_chan->descs_allocated; | |
484 | } | |
485 | ||
486 | static void pd_free_chan_resources(struct dma_chan *chan) | |
487 | { | |
488 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); | |
489 | struct pch_dma *pd = to_pd(chan->device); | |
490 | struct pch_dma_desc *desc, *_d; | |
491 | LIST_HEAD(tmp_list); | |
492 | ||
493 | BUG_ON(!pdc_is_idle(pd_chan)); | |
494 | BUG_ON(!list_empty(&pd_chan->active_list)); | |
495 | BUG_ON(!list_empty(&pd_chan->queue)); | |
496 | ||
497 | spin_lock_bh(&pd_chan->lock); | |
498 | list_splice_init(&pd_chan->free_list, &tmp_list); | |
499 | pd_chan->descs_allocated = 0; | |
500 | spin_unlock_bh(&pd_chan->lock); | |
501 | ||
502 | list_for_each_entry_safe(desc, _d, &tmp_list, desc_node) | |
503 | pci_pool_free(pd->pool, desc, desc->txd.phys); | |
504 | ||
505 | pdc_enable_irq(chan, 0); | |
506 | } | |
507 | ||
508 | static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |
509 | struct dma_tx_state *txstate) | |
510 | { | |
511 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); | |
512 | dma_cookie_t last_used; | |
513 | dma_cookie_t last_completed; | |
514 | int ret; | |
515 | ||
516 | spin_lock_bh(&pd_chan->lock); | |
517 | last_completed = pd_chan->completed_cookie; | |
518 | last_used = chan->cookie; | |
519 | spin_unlock_bh(&pd_chan->lock); | |
520 | ||
521 | ret = dma_async_is_complete(cookie, last_completed, last_used); | |
522 | ||
523 | dma_set_tx_state(txstate, last_completed, last_used, 0); | |
524 | ||
525 | return ret; | |
526 | } | |
527 | ||
528 | static void pd_issue_pending(struct dma_chan *chan) | |
529 | { | |
530 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); | |
531 | ||
532 | if (pdc_is_idle(pd_chan)) { | |
533 | spin_lock_bh(&pd_chan->lock); | |
534 | pdc_advance_work(pd_chan); | |
535 | spin_unlock_bh(&pd_chan->lock); | |
536 | } | |
537 | } | |
538 | ||
539 | static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan, | |
540 | struct scatterlist *sgl, unsigned int sg_len, | |
541 | enum dma_data_direction direction, unsigned long flags) | |
542 | { | |
543 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); | |
544 | struct pch_dma_slave *pd_slave = chan->private; | |
545 | struct pch_dma_desc *first = NULL; | |
546 | struct pch_dma_desc *prev = NULL; | |
547 | struct pch_dma_desc *desc = NULL; | |
548 | struct scatterlist *sg; | |
549 | dma_addr_t reg; | |
550 | int i; | |
551 | ||
552 | if (unlikely(!sg_len)) { | |
553 | dev_info(chan2dev(chan), "prep_slave_sg: length is zero!\n"); | |
554 | return NULL; | |
555 | } | |
556 | ||
557 | if (direction == DMA_FROM_DEVICE) | |
558 | reg = pd_slave->rx_reg; | |
559 | else if (direction == DMA_TO_DEVICE) | |
560 | reg = pd_slave->tx_reg; | |
561 | else | |
562 | return NULL; | |
563 | ||
564 | for_each_sg(sgl, sg, sg_len, i) { | |
565 | desc = pdc_desc_get(pd_chan); | |
566 | ||
567 | if (!desc) | |
568 | goto err_desc_get; | |
569 | ||
570 | desc->regs.dev_addr = reg; | |
571 | desc->regs.mem_addr = sg_phys(sg); | |
572 | desc->regs.size = sg_dma_len(sg); | |
573 | desc->regs.next = DMA_DESC_FOLLOW_WITHOUT_IRQ; | |
574 | ||
575 | switch (pd_slave->width) { | |
576 | case PCH_DMA_WIDTH_1_BYTE: | |
577 | if (desc->regs.size > DMA_DESC_MAX_COUNT_1_BYTE) | |
578 | goto err_desc_get; | |
579 | desc->regs.size |= DMA_DESC_WIDTH_1_BYTE; | |
580 | break; | |
581 | case PCH_DMA_WIDTH_2_BYTES: | |
582 | if (desc->regs.size > DMA_DESC_MAX_COUNT_2_BYTES) | |
583 | goto err_desc_get; | |
584 | desc->regs.size |= DMA_DESC_WIDTH_2_BYTES; | |
585 | break; | |
586 | case PCH_DMA_WIDTH_4_BYTES: | |
587 | if (desc->regs.size > DMA_DESC_MAX_COUNT_4_BYTES) | |
588 | goto err_desc_get; | |
589 | desc->regs.size |= DMA_DESC_WIDTH_4_BYTES; | |
590 | break; | |
591 | default: | |
592 | goto err_desc_get; | |
593 | } | |
594 | ||
595 | ||
596 | if (!first) { | |
597 | first = desc; | |
598 | } else { | |
599 | prev->regs.next |= desc->txd.phys; | |
600 | list_add_tail(&desc->desc_node, &first->tx_list); | |
601 | } | |
602 | ||
603 | prev = desc; | |
604 | } | |
605 | ||
606 | if (flags & DMA_PREP_INTERRUPT) | |
607 | desc->regs.next = DMA_DESC_END_WITH_IRQ; | |
608 | else | |
609 | desc->regs.next = DMA_DESC_END_WITHOUT_IRQ; | |
610 | ||
611 | first->txd.cookie = -EBUSY; | |
612 | desc->txd.flags = flags; | |
613 | ||
614 | return &first->txd; | |
615 | ||
616 | err_desc_get: | |
617 | dev_err(chan2dev(chan), "failed to get desc or wrong parameters\n"); | |
618 | pdc_desc_put(pd_chan, first); | |
619 | return NULL; | |
620 | } | |
621 | ||
622 | static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |
623 | unsigned long arg) | |
624 | { | |
625 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); | |
626 | struct pch_dma_desc *desc, *_d; | |
627 | LIST_HEAD(list); | |
628 | ||
629 | if (cmd != DMA_TERMINATE_ALL) | |
630 | return -ENXIO; | |
631 | ||
632 | spin_lock_bh(&pd_chan->lock); | |
633 | ||
634 | pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE); | |
635 | ||
636 | list_splice_init(&pd_chan->active_list, &list); | |
637 | list_splice_init(&pd_chan->queue, &list); | |
638 | ||
639 | list_for_each_entry_safe(desc, _d, &list, desc_node) | |
640 | pdc_chain_complete(pd_chan, desc); | |
641 | ||
642 | spin_unlock_bh(&pd_chan->lock); | |
643 | ||
644 | ||
645 | return 0; | |
646 | } | |
647 | ||
648 | static void pdc_tasklet(unsigned long data) | |
649 | { | |
650 | struct pch_dma_chan *pd_chan = (struct pch_dma_chan *)data; | |
651 | ||
652 | if (!pdc_is_idle(pd_chan)) { | |
653 | dev_err(chan2dev(&pd_chan->chan), | |
654 | "BUG: handle non-idle channel in tasklet\n"); | |
655 | return; | |
656 | } | |
657 | ||
658 | spin_lock_bh(&pd_chan->lock); | |
659 | if (test_and_clear_bit(0, &pd_chan->err_status)) | |
660 | pdc_handle_error(pd_chan); | |
661 | else | |
662 | pdc_advance_work(pd_chan); | |
663 | spin_unlock_bh(&pd_chan->lock); | |
664 | } | |
665 | ||
666 | static irqreturn_t pd_irq(int irq, void *devid) | |
667 | { | |
668 | struct pch_dma *pd = (struct pch_dma *)devid; | |
669 | struct pch_dma_chan *pd_chan; | |
670 | u32 sts0; | |
671 | int i; | |
672 | int ret = IRQ_NONE; | |
673 | ||
674 | sts0 = dma_readl(pd, STS0); | |
675 | ||
676 | dev_dbg(pd->dma.dev, "pd_irq sts0: %x\n", sts0); | |
677 | ||
678 | for (i = 0; i < pd->dma.chancnt; i++) { | |
679 | pd_chan = &pd->channels[i]; | |
680 | ||
681 | if (sts0 & DMA_STATUS_IRQ(i)) { | |
682 | if (sts0 & DMA_STATUS_ERR(i)) | |
683 | set_bit(0, &pd_chan->err_status); | |
684 | ||
685 | tasklet_schedule(&pd_chan->tasklet); | |
686 | ret = IRQ_HANDLED; | |
687 | } | |
688 | ||
689 | } | |
690 | ||
691 | /* clear interrupt bits in status register */ | |
692 | dma_writel(pd, STS0, sts0); | |
693 | ||
694 | return ret; | |
695 | } | |
696 | ||
697 | static void pch_dma_save_regs(struct pch_dma *pd) | |
698 | { | |
699 | struct pch_dma_chan *pd_chan; | |
700 | struct dma_chan *chan, *_c; | |
701 | int i = 0; | |
702 | ||
703 | pd->regs.dma_ctl0 = dma_readl(pd, CTL0); | |
704 | pd->regs.dma_ctl1 = dma_readl(pd, CTL1); | |
705 | pd->regs.dma_ctl2 = dma_readl(pd, CTL2); | |
706 | ||
707 | list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) { | |
708 | pd_chan = to_pd_chan(chan); | |
709 | ||
710 | pd->ch_regs[i].dev_addr = channel_readl(pd_chan, DEV_ADDR); | |
711 | pd->ch_regs[i].mem_addr = channel_readl(pd_chan, MEM_ADDR); | |
712 | pd->ch_regs[i].size = channel_readl(pd_chan, SIZE); | |
713 | pd->ch_regs[i].next = channel_readl(pd_chan, NEXT); | |
714 | ||
715 | i++; | |
716 | } | |
717 | } | |
718 | ||
719 | static void pch_dma_restore_regs(struct pch_dma *pd) | |
720 | { | |
721 | struct pch_dma_chan *pd_chan; | |
722 | struct dma_chan *chan, *_c; | |
723 | int i = 0; | |
724 | ||
725 | dma_writel(pd, CTL0, pd->regs.dma_ctl0); | |
726 | dma_writel(pd, CTL1, pd->regs.dma_ctl1); | |
727 | dma_writel(pd, CTL2, pd->regs.dma_ctl2); | |
728 | ||
729 | list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) { | |
730 | pd_chan = to_pd_chan(chan); | |
731 | ||
732 | channel_writel(pd_chan, DEV_ADDR, pd->ch_regs[i].dev_addr); | |
733 | channel_writel(pd_chan, MEM_ADDR, pd->ch_regs[i].mem_addr); | |
734 | channel_writel(pd_chan, SIZE, pd->ch_regs[i].size); | |
735 | channel_writel(pd_chan, NEXT, pd->ch_regs[i].next); | |
736 | ||
737 | i++; | |
738 | } | |
739 | } | |
740 | ||
741 | static int pch_dma_suspend(struct pci_dev *pdev, pm_message_t state) | |
742 | { | |
743 | struct pch_dma *pd = pci_get_drvdata(pdev); | |
744 | ||
745 | if (pd) | |
746 | pch_dma_save_regs(pd); | |
747 | ||
748 | pci_save_state(pdev); | |
749 | pci_disable_device(pdev); | |
750 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); | |
751 | ||
752 | return 0; | |
753 | } | |
754 | ||
755 | static int pch_dma_resume(struct pci_dev *pdev) | |
756 | { | |
757 | struct pch_dma *pd = pci_get_drvdata(pdev); | |
758 | int err; | |
759 | ||
760 | pci_set_power_state(pdev, PCI_D0); | |
761 | pci_restore_state(pdev); | |
762 | ||
763 | err = pci_enable_device(pdev); | |
764 | if (err) { | |
765 | dev_dbg(&pdev->dev, "failed to enable device\n"); | |
766 | return err; | |
767 | } | |
768 | ||
769 | if (pd) | |
770 | pch_dma_restore_regs(pd); | |
771 | ||
772 | return 0; | |
773 | } | |
774 | ||
775 | static int __devinit pch_dma_probe(struct pci_dev *pdev, | |
776 | const struct pci_device_id *id) | |
777 | { | |
778 | struct pch_dma *pd; | |
779 | struct pch_dma_regs *regs; | |
780 | unsigned int nr_channels; | |
781 | int err; | |
782 | int i; | |
783 | ||
784 | nr_channels = id->driver_data; | |
785 | pd = kzalloc(sizeof(struct pch_dma)+ | |
786 | sizeof(struct pch_dma_chan) * nr_channels, GFP_KERNEL); | |
787 | if (!pd) | |
788 | return -ENOMEM; | |
789 | ||
790 | pci_set_drvdata(pdev, pd); | |
791 | ||
792 | err = pci_enable_device(pdev); | |
793 | if (err) { | |
794 | dev_err(&pdev->dev, "Cannot enable PCI device\n"); | |
795 | goto err_free_mem; | |
796 | } | |
797 | ||
798 | if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { | |
799 | dev_err(&pdev->dev, "Cannot find proper base address\n"); | |
800 | goto err_disable_pdev; | |
801 | } | |
802 | ||
803 | err = pci_request_regions(pdev, DRV_NAME); | |
804 | if (err) { | |
805 | dev_err(&pdev->dev, "Cannot obtain PCI resources\n"); | |
806 | goto err_disable_pdev; | |
807 | } | |
808 | ||
809 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | |
810 | if (err) { | |
811 | dev_err(&pdev->dev, "Cannot set proper DMA config\n"); | |
812 | goto err_free_res; | |
813 | } | |
814 | ||
815 | regs = pd->membase = pci_iomap(pdev, 1, 0); | |
816 | if (!pd->membase) { | |
817 | dev_err(&pdev->dev, "Cannot map MMIO registers\n"); | |
818 | err = -ENOMEM; | |
819 | goto err_free_res; | |
820 | } | |
821 | ||
822 | pci_set_master(pdev); | |
823 | ||
824 | err = request_irq(pdev->irq, pd_irq, IRQF_SHARED, DRV_NAME, pd); | |
825 | if (err) { | |
826 | dev_err(&pdev->dev, "Failed to request IRQ\n"); | |
827 | goto err_iounmap; | |
828 | } | |
829 | ||
830 | pd->pool = pci_pool_create("pch_dma_desc_pool", pdev, | |
831 | sizeof(struct pch_dma_desc), 4, 0); | |
832 | if (!pd->pool) { | |
833 | dev_err(&pdev->dev, "Failed to alloc DMA descriptors\n"); | |
834 | err = -ENOMEM; | |
835 | goto err_free_irq; | |
836 | } | |
837 | ||
838 | pd->dma.dev = &pdev->dev; | |
839 | pd->dma.chancnt = nr_channels; | |
840 | ||
841 | INIT_LIST_HEAD(&pd->dma.channels); | |
842 | ||
843 | for (i = 0; i < nr_channels; i++) { | |
844 | struct pch_dma_chan *pd_chan = &pd->channels[i]; | |
845 | ||
846 | pd_chan->chan.device = &pd->dma; | |
847 | pd_chan->chan.cookie = 1; | |
848 | pd_chan->chan.chan_id = i; | |
849 | ||
850 | pd_chan->membase = ®s->desc[i]; | |
851 | ||
852 | pd_chan->dir = (i % 2) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; | |
853 | ||
854 | spin_lock_init(&pd_chan->lock); | |
855 | ||
856 | INIT_LIST_HEAD(&pd_chan->active_list); | |
857 | INIT_LIST_HEAD(&pd_chan->queue); | |
858 | INIT_LIST_HEAD(&pd_chan->free_list); | |
859 | ||
860 | tasklet_init(&pd_chan->tasklet, pdc_tasklet, | |
861 | (unsigned long)pd_chan); | |
862 | list_add_tail(&pd_chan->chan.device_node, &pd->dma.channels); | |
863 | } | |
864 | ||
865 | dma_cap_zero(pd->dma.cap_mask); | |
866 | dma_cap_set(DMA_PRIVATE, pd->dma.cap_mask); | |
867 | dma_cap_set(DMA_SLAVE, pd->dma.cap_mask); | |
868 | ||
869 | pd->dma.device_alloc_chan_resources = pd_alloc_chan_resources; | |
870 | pd->dma.device_free_chan_resources = pd_free_chan_resources; | |
871 | pd->dma.device_tx_status = pd_tx_status; | |
872 | pd->dma.device_issue_pending = pd_issue_pending; | |
873 | pd->dma.device_prep_slave_sg = pd_prep_slave_sg; | |
874 | pd->dma.device_control = pd_device_control; | |
875 | ||
876 | err = dma_async_device_register(&pd->dma); | |
877 | if (err) { | |
878 | dev_err(&pdev->dev, "Failed to register DMA device\n"); | |
879 | goto err_free_pool; | |
880 | } | |
881 | ||
882 | return 0; | |
883 | ||
884 | err_free_pool: | |
885 | pci_pool_destroy(pd->pool); | |
886 | err_free_irq: | |
887 | free_irq(pdev->irq, pd); | |
888 | err_iounmap: | |
889 | pci_iounmap(pdev, pd->membase); | |
890 | err_free_res: | |
891 | pci_release_regions(pdev); | |
892 | err_disable_pdev: | |
893 | pci_disable_device(pdev); | |
894 | err_free_mem: | |
895 | return err; | |
896 | } | |
897 | ||
898 | static void __devexit pch_dma_remove(struct pci_dev *pdev) | |
899 | { | |
900 | struct pch_dma *pd = pci_get_drvdata(pdev); | |
901 | struct pch_dma_chan *pd_chan; | |
902 | struct dma_chan *chan, *_c; | |
903 | ||
904 | if (pd) { | |
905 | dma_async_device_unregister(&pd->dma); | |
906 | ||
907 | list_for_each_entry_safe(chan, _c, &pd->dma.channels, | |
908 | device_node) { | |
909 | pd_chan = to_pd_chan(chan); | |
910 | ||
911 | tasklet_disable(&pd_chan->tasklet); | |
912 | tasklet_kill(&pd_chan->tasklet); | |
913 | } | |
914 | ||
915 | pci_pool_destroy(pd->pool); | |
916 | free_irq(pdev->irq, pd); | |
917 | pci_iounmap(pdev, pd->membase); | |
918 | pci_release_regions(pdev); | |
919 | pci_disable_device(pdev); | |
920 | kfree(pd); | |
921 | } | |
922 | } | |
923 | ||
924 | /* PCI Device ID of DMA device */ | |
2cdf2455 TM |
925 | #define PCI_VENDOR_ID_ROHM 0x10DB |
926 | #define PCI_DEVICE_ID_EG20T_PCH_DMA_8CH 0x8810 | |
927 | #define PCI_DEVICE_ID_EG20T_PCH_DMA_4CH 0x8815 | |
928 | #define PCI_DEVICE_ID_ML7213_DMA1_8CH 0x8026 | |
929 | #define PCI_DEVICE_ID_ML7213_DMA2_8CH 0x802B | |
930 | #define PCI_DEVICE_ID_ML7213_DMA3_4CH 0x8034 | |
0c42bd0e YW |
931 | |
932 | static const struct pci_device_id pch_dma_id_table[] = { | |
2cdf2455 TM |
933 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 }, |
934 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 }, | |
935 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */ | |
936 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA2_8CH), 8}, /* PCMIF SPI */ | |
937 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA3_4CH), 4}, /* FPGA */ | |
87acf5ad | 938 | { 0, }, |
0c42bd0e YW |
939 | }; |
940 | ||
941 | static struct pci_driver pch_dma_driver = { | |
942 | .name = DRV_NAME, | |
943 | .id_table = pch_dma_id_table, | |
944 | .probe = pch_dma_probe, | |
945 | .remove = __devexit_p(pch_dma_remove), | |
946 | #ifdef CONFIG_PM | |
947 | .suspend = pch_dma_suspend, | |
948 | .resume = pch_dma_resume, | |
949 | #endif | |
950 | }; | |
951 | ||
952 | static int __init pch_dma_init(void) | |
953 | { | |
954 | return pci_register_driver(&pch_dma_driver); | |
955 | } | |
956 | ||
957 | static void __exit pch_dma_exit(void) | |
958 | { | |
959 | pci_unregister_driver(&pch_dma_driver); | |
960 | } | |
961 | ||
962 | module_init(pch_dma_init); | |
963 | module_exit(pch_dma_exit); | |
964 | ||
2cdf2455 TM |
965 | MODULE_DESCRIPTION("Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH " |
966 | "DMA controller driver"); | |
0c42bd0e YW |
967 | MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>"); |
968 | MODULE_LICENSE("GPL v2"); |