Commit | Line | Data |
---|---|---|
9eaa3d9b AB |
1 | /* |
2 | * DMA Engine support for Tsi721 PCIExpress-to-SRIO bridge | |
3 | * | |
50835e97 | 4 | * Copyright (c) 2011-2014 Integrated Device Technology, Inc. |
9eaa3d9b AB |
5 | * Alexandre Bounine <alexandre.bounine@idt.com> |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify it | |
8 | * under the terms of the GNU General Public License as published by the Free | |
9 | * Software Foundation; either version 2 of the License, or (at your option) | |
10 | * any later version. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
15 | * more details. | |
16 | * | |
50835e97 AB |
17 | * The full GNU General Public License is included in this distribution in the |
18 | * file called COPYING. | |
9eaa3d9b AB |
19 | */ |
20 | ||
21 | #include <linux/io.h> | |
22 | #include <linux/errno.h> | |
23 | #include <linux/init.h> | |
24 | #include <linux/ioport.h> | |
25 | #include <linux/kernel.h> | |
26 | #include <linux/module.h> | |
27 | #include <linux/pci.h> | |
28 | #include <linux/rio.h> | |
29 | #include <linux/rio_drv.h> | |
30 | #include <linux/dma-mapping.h> | |
31 | #include <linux/interrupt.h> | |
32 | #include <linux/kfifo.h> | |
72d8a0d2 | 33 | #include <linux/sched.h> |
9eaa3d9b | 34 | #include <linux/delay.h> |
50835e97 | 35 | #include "../../dma/dmaengine.h" |
9eaa3d9b AB |
36 | |
37 | #include "tsi721.h" | |
38 | ||
50835e97 AB |
39 | #define TSI721_DMA_TX_QUEUE_SZ 16 /* number of transaction descriptors */ |
40 | ||
41 | #ifdef CONFIG_PCI_MSI | |
42 | static irqreturn_t tsi721_bdma_msix(int irq, void *ptr); | |
43 | #endif | |
44 | static int tsi721_submit_sg(struct tsi721_tx_desc *desc); | |
45 | ||
46 | static unsigned int dma_desc_per_channel = 128; | |
47 | module_param(dma_desc_per_channel, uint, S_IWUSR | S_IRUGO); | |
48 | MODULE_PARM_DESC(dma_desc_per_channel, | |
49 | "Number of DMA descriptors per channel (default: 128)"); | |
50 | ||
9eaa3d9b AB |
51 | static inline struct tsi721_bdma_chan *to_tsi721_chan(struct dma_chan *chan) |
52 | { | |
53 | return container_of(chan, struct tsi721_bdma_chan, dchan); | |
54 | } | |
55 | ||
56 | static inline struct tsi721_device *to_tsi721(struct dma_device *ddev) | |
57 | { | |
58 | return container_of(ddev, struct rio_mport, dma)->priv; | |
59 | } | |
60 | ||
61 | static inline | |
62 | struct tsi721_tx_desc *to_tsi721_desc(struct dma_async_tx_descriptor *txd) | |
63 | { | |
64 | return container_of(txd, struct tsi721_tx_desc, txd); | |
65 | } | |
66 | ||
50835e97 | 67 | static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num) |
9eaa3d9b AB |
68 | { |
69 | struct tsi721_dma_desc *bd_ptr; | |
70 | struct device *dev = bdma_chan->dchan.device->dev; | |
71 | u64 *sts_ptr; | |
72 | dma_addr_t bd_phys; | |
73 | dma_addr_t sts_phys; | |
74 | int sts_size; | |
50835e97 AB |
75 | #ifdef CONFIG_PCI_MSI |
76 | struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device); | |
77 | #endif | |
9eaa3d9b | 78 | |
72d8a0d2 | 79 | tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d", bdma_chan->id); |
9eaa3d9b | 80 | |
50835e97 AB |
81 | /* |
82 | * Allocate space for DMA descriptors | |
83 | * (add an extra element for link descriptor) | |
84 | */ | |
9eaa3d9b | 85 | bd_ptr = dma_zalloc_coherent(dev, |
50835e97 | 86 | (bd_num + 1) * sizeof(struct tsi721_dma_desc), |
e680b672 | 87 | &bd_phys, GFP_ATOMIC); |
9eaa3d9b AB |
88 | if (!bd_ptr) |
89 | return -ENOMEM; | |
90 | ||
50835e97 | 91 | bdma_chan->bd_num = bd_num; |
9eaa3d9b AB |
92 | bdma_chan->bd_phys = bd_phys; |
93 | bdma_chan->bd_base = bd_ptr; | |
94 | ||
72d8a0d2 AB |
95 | tsi_debug(DMA, &bdma_chan->dchan.dev->device, |
96 | "DMAC%d descriptors @ %p (phys = %pad)", | |
97 | bdma_chan->id, bd_ptr, &bd_phys); | |
9eaa3d9b AB |
98 | |
99 | /* Allocate space for descriptor status FIFO */ | |
50835e97 AB |
100 | sts_size = ((bd_num + 1) >= TSI721_DMA_MINSTSSZ) ? |
101 | (bd_num + 1) : TSI721_DMA_MINSTSSZ; | |
9eaa3d9b AB |
102 | sts_size = roundup_pow_of_two(sts_size); |
103 | sts_ptr = dma_zalloc_coherent(dev, | |
104 | sts_size * sizeof(struct tsi721_dma_sts), | |
e680b672 | 105 | &sts_phys, GFP_ATOMIC); |
9eaa3d9b AB |
106 | if (!sts_ptr) { |
107 | /* Free space allocated for DMA descriptors */ | |
108 | dma_free_coherent(dev, | |
50835e97 | 109 | (bd_num + 1) * sizeof(struct tsi721_dma_desc), |
9eaa3d9b AB |
110 | bd_ptr, bd_phys); |
111 | bdma_chan->bd_base = NULL; | |
112 | return -ENOMEM; | |
113 | } | |
114 | ||
115 | bdma_chan->sts_phys = sts_phys; | |
116 | bdma_chan->sts_base = sts_ptr; | |
117 | bdma_chan->sts_size = sts_size; | |
118 | ||
72d8a0d2 AB |
119 | tsi_debug(DMA, &bdma_chan->dchan.dev->device, |
120 | "DMAC%d desc status FIFO @ %p (phys = %pad) size=0x%x", | |
121 | bdma_chan->id, sts_ptr, &sts_phys, sts_size); | |
9eaa3d9b | 122 | |
50835e97 AB |
123 | /* Initialize DMA descriptors ring using added link descriptor */ |
124 | bd_ptr[bd_num].type_id = cpu_to_le32(DTYPE3 << 29); | |
125 | bd_ptr[bd_num].next_lo = cpu_to_le32((u64)bd_phys & | |
9eaa3d9b | 126 | TSI721_DMAC_DPTRL_MASK); |
50835e97 | 127 | bd_ptr[bd_num].next_hi = cpu_to_le32((u64)bd_phys >> 32); |
9eaa3d9b AB |
128 | |
129 | /* Setup DMA descriptor pointers */ | |
130 | iowrite32(((u64)bd_phys >> 32), | |
131 | bdma_chan->regs + TSI721_DMAC_DPTRH); | |
132 | iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK), | |
133 | bdma_chan->regs + TSI721_DMAC_DPTRL); | |
134 | ||
135 | /* Setup descriptor status FIFO */ | |
136 | iowrite32(((u64)sts_phys >> 32), | |
137 | bdma_chan->regs + TSI721_DMAC_DSBH); | |
138 | iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK), | |
139 | bdma_chan->regs + TSI721_DMAC_DSBL); | |
140 | iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size), | |
141 | bdma_chan->regs + TSI721_DMAC_DSSZ); | |
142 | ||
143 | /* Clear interrupt bits */ | |
144 | iowrite32(TSI721_DMAC_INT_ALL, | |
145 | bdma_chan->regs + TSI721_DMAC_INT); | |
146 | ||
147 | ioread32(bdma_chan->regs + TSI721_DMAC_INT); | |
148 | ||
50835e97 AB |
149 | #ifdef CONFIG_PCI_MSI |
150 | /* Request interrupt service if we are in MSI-X mode */ | |
151 | if (priv->flags & TSI721_USING_MSIX) { | |
152 | int rc, idx; | |
153 | ||
154 | idx = TSI721_VECT_DMA0_DONE + bdma_chan->id; | |
155 | ||
156 | rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0, | |
157 | priv->msix[idx].irq_name, (void *)bdma_chan); | |
158 | ||
159 | if (rc) { | |
72d8a0d2 AB |
160 | tsi_debug(DMA, &bdma_chan->dchan.dev->device, |
161 | "Unable to get MSI-X for DMAC%d-DONE", | |
162 | bdma_chan->id); | |
50835e97 AB |
163 | goto err_out; |
164 | } | |
165 | ||
166 | idx = TSI721_VECT_DMA0_INT + bdma_chan->id; | |
167 | ||
168 | rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0, | |
169 | priv->msix[idx].irq_name, (void *)bdma_chan); | |
170 | ||
171 | if (rc) { | |
72d8a0d2 AB |
172 | tsi_debug(DMA, &bdma_chan->dchan.dev->device, |
173 | "Unable to get MSI-X for DMAC%d-INT", | |
174 | bdma_chan->id); | |
50835e97 AB |
175 | free_irq( |
176 | priv->msix[TSI721_VECT_DMA0_DONE + | |
177 | bdma_chan->id].vector, | |
178 | (void *)bdma_chan); | |
179 | } | |
180 | ||
181 | err_out: | |
182 | if (rc) { | |
183 | /* Free space allocated for DMA descriptors */ | |
184 | dma_free_coherent(dev, | |
185 | (bd_num + 1) * sizeof(struct tsi721_dma_desc), | |
186 | bd_ptr, bd_phys); | |
187 | bdma_chan->bd_base = NULL; | |
188 | ||
189 | /* Free space allocated for status descriptors */ | |
190 | dma_free_coherent(dev, | |
191 | sts_size * sizeof(struct tsi721_dma_sts), | |
192 | sts_ptr, sts_phys); | |
193 | bdma_chan->sts_base = NULL; | |
194 | ||
195 | return -EIO; | |
196 | } | |
197 | } | |
198 | #endif /* CONFIG_PCI_MSI */ | |
199 | ||
9eaa3d9b AB |
200 | /* Toggle DMA channel initialization */ |
201 | iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL); | |
202 | ioread32(bdma_chan->regs + TSI721_DMAC_CTL); | |
203 | bdma_chan->wr_count = bdma_chan->wr_count_next = 0; | |
204 | bdma_chan->sts_rdptr = 0; | |
205 | udelay(10); | |
206 | ||
207 | return 0; | |
208 | } | |
209 | ||
210 | static int tsi721_bdma_ch_free(struct tsi721_bdma_chan *bdma_chan) | |
211 | { | |
212 | u32 ch_stat; | |
50835e97 AB |
213 | #ifdef CONFIG_PCI_MSI |
214 | struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device); | |
215 | #endif | |
9eaa3d9b AB |
216 | |
217 | if (bdma_chan->bd_base == NULL) | |
218 | return 0; | |
219 | ||
220 | /* Check if DMA channel still running */ | |
221 | ch_stat = ioread32(bdma_chan->regs + TSI721_DMAC_STS); | |
222 | if (ch_stat & TSI721_DMAC_STS_RUN) | |
223 | return -EFAULT; | |
224 | ||
225 | /* Put DMA channel into init state */ | |
226 | iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL); | |
227 | ||
50835e97 AB |
228 | #ifdef CONFIG_PCI_MSI |
229 | if (priv->flags & TSI721_USING_MSIX) { | |
230 | free_irq(priv->msix[TSI721_VECT_DMA0_DONE + | |
231 | bdma_chan->id].vector, (void *)bdma_chan); | |
232 | free_irq(priv->msix[TSI721_VECT_DMA0_INT + | |
233 | bdma_chan->id].vector, (void *)bdma_chan); | |
234 | } | |
235 | #endif /* CONFIG_PCI_MSI */ | |
236 | ||
9eaa3d9b AB |
237 | /* Free space allocated for DMA descriptors */ |
238 | dma_free_coherent(bdma_chan->dchan.device->dev, | |
50835e97 | 239 | (bdma_chan->bd_num + 1) * sizeof(struct tsi721_dma_desc), |
9eaa3d9b AB |
240 | bdma_chan->bd_base, bdma_chan->bd_phys); |
241 | bdma_chan->bd_base = NULL; | |
242 | ||
243 | /* Free space allocated for status FIFO */ | |
244 | dma_free_coherent(bdma_chan->dchan.device->dev, | |
245 | bdma_chan->sts_size * sizeof(struct tsi721_dma_sts), | |
246 | bdma_chan->sts_base, bdma_chan->sts_phys); | |
247 | bdma_chan->sts_base = NULL; | |
248 | return 0; | |
249 | } | |
250 | ||
251 | static void | |
252 | tsi721_bdma_interrupt_enable(struct tsi721_bdma_chan *bdma_chan, int enable) | |
253 | { | |
254 | if (enable) { | |
255 | /* Clear pending BDMA channel interrupts */ | |
256 | iowrite32(TSI721_DMAC_INT_ALL, | |
257 | bdma_chan->regs + TSI721_DMAC_INT); | |
258 | ioread32(bdma_chan->regs + TSI721_DMAC_INT); | |
259 | /* Enable BDMA channel interrupts */ | |
260 | iowrite32(TSI721_DMAC_INT_ALL, | |
261 | bdma_chan->regs + TSI721_DMAC_INTE); | |
262 | } else { | |
263 | /* Disable BDMA channel interrupts */ | |
264 | iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE); | |
265 | /* Clear pending BDMA channel interrupts */ | |
266 | iowrite32(TSI721_DMAC_INT_ALL, | |
267 | bdma_chan->regs + TSI721_DMAC_INT); | |
268 | } | |
269 | ||
270 | } | |
271 | ||
272 | static bool tsi721_dma_is_idle(struct tsi721_bdma_chan *bdma_chan) | |
273 | { | |
274 | u32 sts; | |
275 | ||
276 | sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS); | |
277 | return ((sts & TSI721_DMAC_STS_RUN) == 0); | |
278 | } | |
279 | ||
280 | void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan) | |
281 | { | |
282 | /* Disable BDMA channel interrupts */ | |
283 | iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE); | |
04379dff | 284 | if (bdma_chan->active) |
458bdf6e | 285 | tasklet_hi_schedule(&bdma_chan->tasklet); |
9eaa3d9b AB |
286 | } |
287 | ||
288 | #ifdef CONFIG_PCI_MSI | |
289 | /** | |
290 | * tsi721_omsg_msix - MSI-X interrupt handler for BDMA channels | |
291 | * @irq: Linux interrupt number | |
292 | * @ptr: Pointer to interrupt-specific data (BDMA channel structure) | |
293 | * | |
294 | * Handles BDMA channel interrupts signaled using MSI-X. | |
295 | */ | |
296 | static irqreturn_t tsi721_bdma_msix(int irq, void *ptr) | |
297 | { | |
298 | struct tsi721_bdma_chan *bdma_chan = ptr; | |
299 | ||
e680b672 | 300 | if (bdma_chan->active) |
458bdf6e | 301 | tasklet_hi_schedule(&bdma_chan->tasklet); |
9eaa3d9b AB |
302 | return IRQ_HANDLED; |
303 | } | |
304 | #endif /* CONFIG_PCI_MSI */ | |
305 | ||
306 | /* Must be called with the spinlock held */ | |
307 | static void tsi721_start_dma(struct tsi721_bdma_chan *bdma_chan) | |
308 | { | |
309 | if (!tsi721_dma_is_idle(bdma_chan)) { | |
72d8a0d2 AB |
310 | tsi_err(&bdma_chan->dchan.dev->device, |
311 | "DMAC%d Attempt to start non-idle channel", | |
312 | bdma_chan->id); | |
9eaa3d9b AB |
313 | return; |
314 | } | |
315 | ||
316 | if (bdma_chan->wr_count == bdma_chan->wr_count_next) { | |
72d8a0d2 AB |
317 | tsi_err(&bdma_chan->dchan.dev->device, |
318 | "DMAC%d Attempt to start DMA with no BDs ready %d", | |
319 | bdma_chan->id, task_pid_nr(current)); | |
9eaa3d9b AB |
320 | return; |
321 | } | |
322 | ||
72d8a0d2 AB |
323 | tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d (wrc=%d) %d", |
324 | bdma_chan->id, bdma_chan->wr_count_next, | |
325 | task_pid_nr(current)); | |
9eaa3d9b AB |
326 | |
327 | iowrite32(bdma_chan->wr_count_next, | |
328 | bdma_chan->regs + TSI721_DMAC_DWRCNT); | |
329 | ioread32(bdma_chan->regs + TSI721_DMAC_DWRCNT); | |
330 | ||
331 | bdma_chan->wr_count = bdma_chan->wr_count_next; | |
332 | } | |
333 | ||
9eaa3d9b | 334 | static int |
50835e97 AB |
335 | tsi721_desc_fill_init(struct tsi721_tx_desc *desc, |
336 | struct tsi721_dma_desc *bd_ptr, | |
337 | struct scatterlist *sg, u32 sys_size) | |
9eaa3d9b | 338 | { |
9eaa3d9b AB |
339 | u64 rio_addr; |
340 | ||
50835e97 AB |
341 | if (bd_ptr == NULL) |
342 | return -EINVAL; | |
343 | ||
9eaa3d9b AB |
344 | /* Initialize DMA descriptor */ |
345 | bd_ptr->type_id = cpu_to_le32((DTYPE1 << 29) | | |
50835e97 | 346 | (desc->rtype << 19) | desc->destid); |
9eaa3d9b | 347 | bd_ptr->bcount = cpu_to_le32(((desc->rio_addr & 0x3) << 30) | |
40f847ba | 348 | (sys_size << 26)); |
9eaa3d9b AB |
349 | rio_addr = (desc->rio_addr >> 2) | |
350 | ((u64)(desc->rio_addr_u & 0x3) << 62); | |
351 | bd_ptr->raddr_lo = cpu_to_le32(rio_addr & 0xffffffff); | |
352 | bd_ptr->raddr_hi = cpu_to_le32(rio_addr >> 32); | |
353 | bd_ptr->t1.bufptr_lo = cpu_to_le32( | |
354 | (u64)sg_dma_address(sg) & 0xffffffff); | |
355 | bd_ptr->t1.bufptr_hi = cpu_to_le32((u64)sg_dma_address(sg) >> 32); | |
356 | bd_ptr->t1.s_dist = 0; | |
357 | bd_ptr->t1.s_size = 0; | |
358 | ||
359 | return 0; | |
360 | } | |
361 | ||
40f847ba | 362 | static int |
50835e97 | 363 | tsi721_desc_fill_end(struct tsi721_dma_desc *bd_ptr, u32 bcount, bool interrupt) |
40f847ba | 364 | { |
50835e97 AB |
365 | if (bd_ptr == NULL) |
366 | return -EINVAL; | |
40f847ba AB |
367 | |
368 | /* Update DMA descriptor */ | |
50835e97 | 369 | if (interrupt) |
40f847ba | 370 | bd_ptr->type_id |= cpu_to_le32(TSI721_DMAD_IOF); |
50835e97 | 371 | bd_ptr->bcount |= cpu_to_le32(bcount & TSI721_DMAD_BCOUNT1); |
40f847ba AB |
372 | |
373 | return 0; | |
374 | } | |
375 | ||
50835e97 AB |
376 | static void tsi721_dma_tx_err(struct tsi721_bdma_chan *bdma_chan, |
377 | struct tsi721_tx_desc *desc) | |
9eaa3d9b AB |
378 | { |
379 | struct dma_async_tx_descriptor *txd = &desc->txd; | |
380 | dma_async_tx_callback callback = txd->callback; | |
381 | void *param = txd->callback_param; | |
382 | ||
9eaa3d9b | 383 | list_move(&desc->desc_node, &bdma_chan->free_list); |
9eaa3d9b AB |
384 | |
385 | if (callback) | |
386 | callback(param); | |
387 | } | |
388 | ||
9eaa3d9b AB |
389 | static void tsi721_clr_stat(struct tsi721_bdma_chan *bdma_chan) |
390 | { | |
391 | u32 srd_ptr; | |
392 | u64 *sts_ptr; | |
393 | int i, j; | |
394 | ||
395 | /* Check and clear descriptor status FIFO entries */ | |
396 | srd_ptr = bdma_chan->sts_rdptr; | |
397 | sts_ptr = bdma_chan->sts_base; | |
398 | j = srd_ptr * 8; | |
399 | while (sts_ptr[j]) { | |
400 | for (i = 0; i < 8 && sts_ptr[j]; i++, j++) | |
401 | sts_ptr[j] = 0; | |
402 | ||
403 | ++srd_ptr; | |
404 | srd_ptr %= bdma_chan->sts_size; | |
405 | j = srd_ptr * 8; | |
406 | } | |
407 | ||
408 | iowrite32(srd_ptr, bdma_chan->regs + TSI721_DMAC_DSRP); | |
409 | bdma_chan->sts_rdptr = srd_ptr; | |
410 | } | |
411 | ||
50835e97 AB |
412 | /* Must be called with the channel spinlock held */ |
413 | static int tsi721_submit_sg(struct tsi721_tx_desc *desc) | |
414 | { | |
415 | struct dma_chan *dchan = desc->txd.chan; | |
416 | struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); | |
417 | u32 sys_size; | |
418 | u64 rio_addr; | |
419 | dma_addr_t next_addr; | |
420 | u32 bcount; | |
421 | struct scatterlist *sg; | |
422 | unsigned int i; | |
423 | int err = 0; | |
424 | struct tsi721_dma_desc *bd_ptr = NULL; | |
425 | u32 idx, rd_idx; | |
426 | u32 add_count = 0; | |
72d8a0d2 | 427 | struct device *ch_dev = &dchan->dev->device; |
50835e97 AB |
428 | |
429 | if (!tsi721_dma_is_idle(bdma_chan)) { | |
72d8a0d2 AB |
430 | tsi_err(ch_dev, "DMAC%d ERR: Attempt to use non-idle channel", |
431 | bdma_chan->id); | |
50835e97 AB |
432 | return -EIO; |
433 | } | |
434 | ||
435 | /* | |
436 | * Fill DMA channel's hardware buffer descriptors. | |
437 | * (NOTE: RapidIO destination address is limited to 64 bits for now) | |
438 | */ | |
439 | rio_addr = desc->rio_addr; | |
440 | next_addr = -1; | |
441 | bcount = 0; | |
72d8a0d2 | 442 | sys_size = dma_to_mport(dchan->device)->sys_size; |
50835e97 AB |
443 | |
444 | rd_idx = ioread32(bdma_chan->regs + TSI721_DMAC_DRDCNT); | |
445 | rd_idx %= (bdma_chan->bd_num + 1); | |
446 | ||
447 | idx = bdma_chan->wr_count_next % (bdma_chan->bd_num + 1); | |
448 | if (idx == bdma_chan->bd_num) { | |
449 | /* wrap around link descriptor */ | |
450 | idx = 0; | |
451 | add_count++; | |
452 | } | |
453 | ||
72d8a0d2 AB |
454 | tsi_debug(DMA, ch_dev, "DMAC%d BD ring status: rdi=%d wri=%d", |
455 | bdma_chan->id, rd_idx, idx); | |
50835e97 AB |
456 | |
457 | for_each_sg(desc->sg, sg, desc->sg_len, i) { | |
458 | ||
72d8a0d2 AB |
459 | tsi_debug(DMAV, ch_dev, "DMAC%d sg%d/%d addr: 0x%llx len: %d", |
460 | bdma_chan->id, i, desc->sg_len, | |
50835e97 AB |
461 | (unsigned long long)sg_dma_address(sg), sg_dma_len(sg)); |
462 | ||
463 | if (sg_dma_len(sg) > TSI721_BDMA_MAX_BCOUNT) { | |
72d8a0d2 AB |
464 | tsi_err(ch_dev, "DMAC%d SG entry %d is too large", |
465 | bdma_chan->id, i); | |
50835e97 AB |
466 | err = -EINVAL; |
467 | break; | |
468 | } | |
469 | ||
470 | /* | |
471 | * If this sg entry forms contiguous block with previous one, | |
472 | * try to merge it into existing DMA descriptor | |
473 | */ | |
474 | if (next_addr == sg_dma_address(sg) && | |
475 | bcount + sg_dma_len(sg) <= TSI721_BDMA_MAX_BCOUNT) { | |
476 | /* Adjust byte count of the descriptor */ | |
477 | bcount += sg_dma_len(sg); | |
478 | goto entry_done; | |
479 | } else if (next_addr != -1) { | |
480 | /* Finalize descriptor using total byte count value */ | |
481 | tsi721_desc_fill_end(bd_ptr, bcount, 0); | |
72d8a0d2 AB |
482 | tsi_debug(DMAV, ch_dev, "DMAC%d prev desc final len: %d", |
483 | bdma_chan->id, bcount); | |
50835e97 AB |
484 | } |
485 | ||
486 | desc->rio_addr = rio_addr; | |
487 | ||
488 | if (i && idx == rd_idx) { | |
72d8a0d2 AB |
489 | tsi_debug(DMAV, ch_dev, |
490 | "DMAC%d HW descriptor ring is full @ %d", | |
491 | bdma_chan->id, i); | |
50835e97 AB |
492 | desc->sg = sg; |
493 | desc->sg_len -= i; | |
494 | break; | |
495 | } | |
496 | ||
497 | bd_ptr = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[idx]; | |
498 | err = tsi721_desc_fill_init(desc, bd_ptr, sg, sys_size); | |
499 | if (err) { | |
72d8a0d2 | 500 | tsi_err(ch_dev, "Failed to build desc: err=%d", err); |
50835e97 AB |
501 | break; |
502 | } | |
503 | ||
72d8a0d2 AB |
504 | tsi_debug(DMAV, ch_dev, "DMAC%d bd_ptr = %p did=%d raddr=0x%llx", |
505 | bdma_chan->id, bd_ptr, desc->destid, desc->rio_addr); | |
50835e97 AB |
506 | |
507 | next_addr = sg_dma_address(sg); | |
508 | bcount = sg_dma_len(sg); | |
509 | ||
510 | add_count++; | |
511 | if (++idx == bdma_chan->bd_num) { | |
512 | /* wrap around link descriptor */ | |
513 | idx = 0; | |
514 | add_count++; | |
515 | } | |
516 | ||
517 | entry_done: | |
518 | if (sg_is_last(sg)) { | |
519 | tsi721_desc_fill_end(bd_ptr, bcount, 0); | |
72d8a0d2 AB |
520 | tsi_debug(DMAV, ch_dev, |
521 | "DMAC%d last desc final len: %d", | |
522 | bdma_chan->id, bcount); | |
50835e97 AB |
523 | desc->sg_len = 0; |
524 | } else { | |
525 | rio_addr += sg_dma_len(sg); | |
526 | next_addr += sg_dma_len(sg); | |
527 | } | |
528 | } | |
529 | ||
530 | if (!err) | |
531 | bdma_chan->wr_count_next += add_count; | |
532 | ||
533 | return err; | |
534 | } | |
535 | ||
d2a321f3 AB |
536 | static void tsi721_advance_work(struct tsi721_bdma_chan *bdma_chan, |
537 | struct tsi721_tx_desc *desc) | |
9eaa3d9b | 538 | { |
50835e97 AB |
539 | int err; |
540 | ||
72d8a0d2 | 541 | tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d", bdma_chan->id); |
50835e97 | 542 | |
d2a321f3 AB |
543 | if (!tsi721_dma_is_idle(bdma_chan)) |
544 | return; | |
545 | ||
50835e97 | 546 | /* |
d2a321f3 AB |
547 | * If there is no data transfer in progress, fetch new descriptor from |
548 | * the pending queue. | |
549 | */ | |
550 | ||
551 | if (desc == NULL && bdma_chan->active_tx == NULL && | |
552 | !list_empty(&bdma_chan->queue)) { | |
553 | desc = list_first_entry(&bdma_chan->queue, | |
554 | struct tsi721_tx_desc, desc_node); | |
555 | list_del_init((&desc->desc_node)); | |
556 | bdma_chan->active_tx = desc; | |
557 | } | |
50835e97 | 558 | |
d2a321f3 | 559 | if (desc) { |
50835e97 AB |
560 | err = tsi721_submit_sg(desc); |
561 | if (!err) | |
562 | tsi721_start_dma(bdma_chan); | |
563 | else { | |
564 | tsi721_dma_tx_err(bdma_chan, desc); | |
72d8a0d2 AB |
565 | tsi_debug(DMA, &bdma_chan->dchan.dev->device, |
566 | "DMAC%d ERR: tsi721_submit_sg failed with err=%d", | |
567 | bdma_chan->id, err); | |
50835e97 | 568 | } |
9eaa3d9b | 569 | } |
50835e97 | 570 | |
72d8a0d2 AB |
571 | tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d Exit", |
572 | bdma_chan->id); | |
9eaa3d9b AB |
573 | } |
574 | ||
575 | static void tsi721_dma_tasklet(unsigned long data) | |
576 | { | |
577 | struct tsi721_bdma_chan *bdma_chan = (struct tsi721_bdma_chan *)data; | |
578 | u32 dmac_int, dmac_sts; | |
579 | ||
580 | dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT); | |
72d8a0d2 AB |
581 | tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d_INT = 0x%x", |
582 | bdma_chan->id, dmac_int); | |
9eaa3d9b AB |
583 | /* Clear channel interrupts */ |
584 | iowrite32(dmac_int, bdma_chan->regs + TSI721_DMAC_INT); | |
585 | ||
586 | if (dmac_int & TSI721_DMAC_INT_ERR) { | |
458bdf6e AB |
587 | int i = 10000; |
588 | struct tsi721_tx_desc *desc; | |
589 | ||
590 | desc = bdma_chan->active_tx; | |
9eaa3d9b | 591 | dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS); |
72d8a0d2 | 592 | tsi_err(&bdma_chan->dchan.dev->device, |
458bdf6e AB |
593 | "DMAC%d_STS = 0x%x did=%d raddr=0x%llx", |
594 | bdma_chan->id, dmac_sts, desc->destid, desc->rio_addr); | |
595 | ||
596 | /* Re-initialize DMA channel if possible */ | |
597 | ||
598 | if ((dmac_sts & TSI721_DMAC_STS_ABORT) == 0) | |
599 | goto err_out; | |
600 | ||
601 | tsi721_clr_stat(bdma_chan); | |
d2a321f3 AB |
602 | |
603 | spin_lock(&bdma_chan->lock); | |
458bdf6e AB |
604 | |
605 | /* Put DMA channel into init state */ | |
606 | iowrite32(TSI721_DMAC_CTL_INIT, | |
607 | bdma_chan->regs + TSI721_DMAC_CTL); | |
608 | do { | |
609 | udelay(1); | |
610 | dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS); | |
611 | i--; | |
612 | } while ((dmac_sts & TSI721_DMAC_STS_ABORT) && i); | |
613 | ||
614 | if (dmac_sts & TSI721_DMAC_STS_ABORT) { | |
615 | tsi_err(&bdma_chan->dchan.dev->device, | |
616 | "Failed to re-initiate DMAC%d", bdma_chan->id); | |
617 | spin_unlock(&bdma_chan->lock); | |
618 | goto err_out; | |
619 | } | |
620 | ||
621 | /* Setup DMA descriptor pointers */ | |
622 | iowrite32(((u64)bdma_chan->bd_phys >> 32), | |
623 | bdma_chan->regs + TSI721_DMAC_DPTRH); | |
624 | iowrite32(((u64)bdma_chan->bd_phys & TSI721_DMAC_DPTRL_MASK), | |
625 | bdma_chan->regs + TSI721_DMAC_DPTRL); | |
626 | ||
627 | /* Setup descriptor status FIFO */ | |
628 | iowrite32(((u64)bdma_chan->sts_phys >> 32), | |
629 | bdma_chan->regs + TSI721_DMAC_DSBH); | |
630 | iowrite32(((u64)bdma_chan->sts_phys & TSI721_DMAC_DSBL_MASK), | |
631 | bdma_chan->regs + TSI721_DMAC_DSBL); | |
632 | iowrite32(TSI721_DMAC_DSSZ_SIZE(bdma_chan->sts_size), | |
633 | bdma_chan->regs + TSI721_DMAC_DSSZ); | |
634 | ||
635 | /* Clear interrupt bits */ | |
636 | iowrite32(TSI721_DMAC_INT_ALL, | |
637 | bdma_chan->regs + TSI721_DMAC_INT); | |
638 | ||
639 | ioread32(bdma_chan->regs + TSI721_DMAC_INT); | |
640 | ||
641 | bdma_chan->wr_count = bdma_chan->wr_count_next = 0; | |
642 | bdma_chan->sts_rdptr = 0; | |
643 | udelay(10); | |
644 | ||
645 | desc = bdma_chan->active_tx; | |
646 | desc->status = DMA_ERROR; | |
647 | dma_cookie_complete(&desc->txd); | |
648 | list_add(&desc->desc_node, &bdma_chan->free_list); | |
d2a321f3 | 649 | bdma_chan->active_tx = NULL; |
458bdf6e AB |
650 | if (bdma_chan->active) |
651 | tsi721_advance_work(bdma_chan, NULL); | |
d2a321f3 | 652 | spin_unlock(&bdma_chan->lock); |
9eaa3d9b AB |
653 | } |
654 | ||
655 | if (dmac_int & TSI721_DMAC_INT_STFULL) { | |
72d8a0d2 AB |
656 | tsi_err(&bdma_chan->dchan.dev->device, |
657 | "DMAC%d descriptor status FIFO is full", | |
658 | bdma_chan->id); | |
9eaa3d9b AB |
659 | } |
660 | ||
661 | if (dmac_int & (TSI721_DMAC_INT_DONE | TSI721_DMAC_INT_IOFDONE)) { | |
50835e97 AB |
662 | struct tsi721_tx_desc *desc; |
663 | ||
9eaa3d9b AB |
664 | tsi721_clr_stat(bdma_chan); |
665 | spin_lock(&bdma_chan->lock); | |
d2a321f3 | 666 | desc = bdma_chan->active_tx; |
50835e97 AB |
667 | |
668 | if (desc->sg_len == 0) { | |
669 | dma_async_tx_callback callback = NULL; | |
670 | void *param = NULL; | |
671 | ||
672 | desc->status = DMA_COMPLETE; | |
673 | dma_cookie_complete(&desc->txd); | |
674 | if (desc->txd.flags & DMA_PREP_INTERRUPT) { | |
675 | callback = desc->txd.callback; | |
676 | param = desc->txd.callback_param; | |
677 | } | |
d2a321f3 AB |
678 | list_add(&desc->desc_node, &bdma_chan->free_list); |
679 | bdma_chan->active_tx = NULL; | |
458bdf6e AB |
680 | if (bdma_chan->active) |
681 | tsi721_advance_work(bdma_chan, NULL); | |
50835e97 AB |
682 | spin_unlock(&bdma_chan->lock); |
683 | if (callback) | |
684 | callback(param); | |
e680b672 | 685 | } else { |
458bdf6e AB |
686 | if (bdma_chan->active) |
687 | tsi721_advance_work(bdma_chan, | |
688 | bdma_chan->active_tx); | |
e680b672 | 689 | spin_unlock(&bdma_chan->lock); |
50835e97 | 690 | } |
9eaa3d9b | 691 | } |
458bdf6e | 692 | err_out: |
9eaa3d9b AB |
693 | /* Re-Enable BDMA channel interrupts */ |
694 | iowrite32(TSI721_DMAC_INT_ALL, bdma_chan->regs + TSI721_DMAC_INTE); | |
695 | } | |
696 | ||
697 | static dma_cookie_t tsi721_tx_submit(struct dma_async_tx_descriptor *txd) | |
698 | { | |
699 | struct tsi721_tx_desc *desc = to_tsi721_desc(txd); | |
700 | struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(txd->chan); | |
701 | dma_cookie_t cookie; | |
702 | ||
50835e97 AB |
703 | /* Check if the descriptor is detached from any lists */ |
704 | if (!list_empty(&desc->desc_node)) { | |
72d8a0d2 AB |
705 | tsi_err(&bdma_chan->dchan.dev->device, |
706 | "DMAC%d wrong state of descriptor %p", | |
707 | bdma_chan->id, txd); | |
50835e97 AB |
708 | return -EIO; |
709 | } | |
9eaa3d9b | 710 | |
50835e97 | 711 | spin_lock_bh(&bdma_chan->lock); |
9eaa3d9b | 712 | |
50835e97 AB |
713 | if (!bdma_chan->active) { |
714 | spin_unlock_bh(&bdma_chan->lock); | |
715 | return -ENODEV; | |
9eaa3d9b AB |
716 | } |
717 | ||
50835e97 AB |
718 | cookie = dma_cookie_assign(txd); |
719 | desc->status = DMA_IN_PROGRESS; | |
720 | list_add_tail(&desc->desc_node, &bdma_chan->queue); | |
721 | ||
9eaa3d9b AB |
722 | spin_unlock_bh(&bdma_chan->lock); |
723 | return cookie; | |
724 | } | |
725 | ||
726 | static int tsi721_alloc_chan_resources(struct dma_chan *dchan) | |
727 | { | |
728 | struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); | |
9eaa3d9b | 729 | struct tsi721_tx_desc *desc = NULL; |
9eaa3d9b | 730 | int i; |
50835e97 | 731 | |
72d8a0d2 | 732 | tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id); |
9eaa3d9b AB |
733 | |
734 | if (bdma_chan->bd_base) | |
50835e97 | 735 | return TSI721_DMA_TX_QUEUE_SZ; |
9eaa3d9b AB |
736 | |
737 | /* Initialize BDMA channel */ | |
50835e97 | 738 | if (tsi721_bdma_ch_init(bdma_chan, dma_desc_per_channel)) { |
72d8a0d2 AB |
739 | tsi_err(&dchan->dev->device, "Unable to initialize DMAC%d", |
740 | bdma_chan->id); | |
50835e97 | 741 | return -ENODEV; |
9eaa3d9b AB |
742 | } |
743 | ||
50835e97 AB |
744 | /* Allocate queue of transaction descriptors */ |
745 | desc = kcalloc(TSI721_DMA_TX_QUEUE_SZ, sizeof(struct tsi721_tx_desc), | |
e680b672 | 746 | GFP_ATOMIC); |
9eaa3d9b | 747 | if (!desc) { |
72d8a0d2 AB |
748 | tsi_err(&dchan->dev->device, |
749 | "DMAC%d Failed to allocate logical descriptors", | |
750 | bdma_chan->id); | |
50835e97 AB |
751 | tsi721_bdma_ch_free(bdma_chan); |
752 | return -ENOMEM; | |
9eaa3d9b AB |
753 | } |
754 | ||
755 | bdma_chan->tx_desc = desc; | |
756 | ||
50835e97 | 757 | for (i = 0; i < TSI721_DMA_TX_QUEUE_SZ; i++) { |
9eaa3d9b AB |
758 | dma_async_tx_descriptor_init(&desc[i].txd, dchan); |
759 | desc[i].txd.tx_submit = tsi721_tx_submit; | |
760 | desc[i].txd.flags = DMA_CTRL_ACK; | |
50835e97 | 761 | list_add(&desc[i].desc_node, &bdma_chan->free_list); |
9eaa3d9b AB |
762 | } |
763 | ||
50835e97 | 764 | dma_cookie_init(dchan); |
9eaa3d9b | 765 | |
04379dff | 766 | bdma_chan->active = true; |
9eaa3d9b AB |
767 | tsi721_bdma_interrupt_enable(bdma_chan, 1); |
768 | ||
50835e97 | 769 | return TSI721_DMA_TX_QUEUE_SZ; |
9eaa3d9b AB |
770 | } |
771 | ||
50835e97 | 772 | static void tsi721_sync_dma_irq(struct tsi721_bdma_chan *bdma_chan) |
9eaa3d9b | 773 | { |
50835e97 | 774 | struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device); |
04379dff AB |
775 | |
776 | #ifdef CONFIG_PCI_MSI | |
777 | if (priv->flags & TSI721_USING_MSIX) { | |
778 | synchronize_irq(priv->msix[TSI721_VECT_DMA0_DONE + | |
779 | bdma_chan->id].vector); | |
780 | synchronize_irq(priv->msix[TSI721_VECT_DMA0_INT + | |
781 | bdma_chan->id].vector); | |
782 | } else | |
783 | #endif | |
784 | synchronize_irq(priv->pdev->irq); | |
50835e97 | 785 | } |
04379dff | 786 | |
50835e97 AB |
787 | static void tsi721_free_chan_resources(struct dma_chan *dchan) |
788 | { | |
789 | struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); | |
9eaa3d9b | 790 | |
72d8a0d2 | 791 | tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id); |
9eaa3d9b | 792 | |
50835e97 AB |
793 | if (bdma_chan->bd_base == NULL) |
794 | return; | |
9eaa3d9b | 795 | |
50835e97 AB |
796 | tsi721_bdma_interrupt_enable(bdma_chan, 0); |
797 | bdma_chan->active = false; | |
798 | tsi721_sync_dma_irq(bdma_chan); | |
799 | tasklet_kill(&bdma_chan->tasklet); | |
800 | INIT_LIST_HEAD(&bdma_chan->free_list); | |
9eaa3d9b | 801 | kfree(bdma_chan->tx_desc); |
50835e97 | 802 | tsi721_bdma_ch_free(bdma_chan); |
9eaa3d9b AB |
803 | } |
804 | ||
805 | static | |
806 | enum dma_status tsi721_tx_status(struct dma_chan *dchan, dma_cookie_t cookie, | |
807 | struct dma_tx_state *txstate) | |
808 | { | |
e680b672 AB |
809 | struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); |
810 | enum dma_status status; | |
811 | ||
812 | spin_lock_bh(&bdma_chan->lock); | |
813 | status = dma_cookie_status(dchan, cookie, txstate); | |
814 | spin_unlock_bh(&bdma_chan->lock); | |
815 | return status; | |
9eaa3d9b AB |
816 | } |
817 | ||
818 | static void tsi721_issue_pending(struct dma_chan *dchan) | |
819 | { | |
820 | struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); | |
821 | ||
72d8a0d2 | 822 | tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id); |
9eaa3d9b | 823 | |
d2a321f3 | 824 | spin_lock_bh(&bdma_chan->lock); |
50835e97 | 825 | if (tsi721_dma_is_idle(bdma_chan) && bdma_chan->active) { |
d2a321f3 | 826 | tsi721_advance_work(bdma_chan, NULL); |
50835e97 | 827 | } |
d2a321f3 | 828 | spin_unlock_bh(&bdma_chan->lock); |
9eaa3d9b AB |
829 | } |
830 | ||
831 | static | |
832 | struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan, | |
833 | struct scatterlist *sgl, unsigned int sg_len, | |
834 | enum dma_transfer_direction dir, unsigned long flags, | |
835 | void *tinfo) | |
836 | { | |
837 | struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); | |
83472457 | 838 | struct tsi721_tx_desc *desc; |
9eaa3d9b | 839 | struct rio_dma_ext *rext = tinfo; |
9eaa3d9b | 840 | enum dma_rtype rtype; |
50835e97 | 841 | struct dma_async_tx_descriptor *txd = NULL; |
9eaa3d9b AB |
842 | |
843 | if (!sgl || !sg_len) { | |
72d8a0d2 AB |
844 | tsi_err(&dchan->dev->device, "DMAC%d No SG list", |
845 | bdma_chan->id); | |
83472457 | 846 | return ERR_PTR(-EINVAL); |
9eaa3d9b AB |
847 | } |
848 | ||
72d8a0d2 AB |
849 | tsi_debug(DMA, &dchan->dev->device, "DMAC%d %s", bdma_chan->id, |
850 | (dir == DMA_DEV_TO_MEM)?"READ":"WRITE"); | |
50835e97 | 851 | |
9eaa3d9b AB |
852 | if (dir == DMA_DEV_TO_MEM) |
853 | rtype = NREAD; | |
854 | else if (dir == DMA_MEM_TO_DEV) { | |
855 | switch (rext->wr_type) { | |
856 | case RDW_ALL_NWRITE: | |
857 | rtype = ALL_NWRITE; | |
858 | break; | |
859 | case RDW_ALL_NWRITE_R: | |
860 | rtype = ALL_NWRITE_R; | |
861 | break; | |
862 | case RDW_LAST_NWRITE_R: | |
863 | default: | |
864 | rtype = LAST_NWRITE_R; | |
865 | break; | |
866 | } | |
867 | } else { | |
72d8a0d2 AB |
868 | tsi_err(&dchan->dev->device, |
869 | "DMAC%d Unsupported DMA direction option", | |
870 | bdma_chan->id); | |
83472457 | 871 | return ERR_PTR(-EINVAL); |
9eaa3d9b AB |
872 | } |
873 | ||
50835e97 | 874 | spin_lock_bh(&bdma_chan->lock); |
40f847ba | 875 | |
83472457 AB |
876 | if (!list_empty(&bdma_chan->free_list)) { |
877 | desc = list_first_entry(&bdma_chan->free_list, | |
878 | struct tsi721_tx_desc, desc_node); | |
879 | list_del_init(&desc->desc_node); | |
880 | desc->destid = rext->destid; | |
881 | desc->rio_addr = rext->rio_addr; | |
882 | desc->rio_addr_u = 0; | |
883 | desc->rtype = rtype; | |
884 | desc->sg_len = sg_len; | |
885 | desc->sg = sgl; | |
886 | txd = &desc->txd; | |
887 | txd->flags = flags; | |
9eaa3d9b AB |
888 | } |
889 | ||
50835e97 | 890 | spin_unlock_bh(&bdma_chan->lock); |
9eaa3d9b | 891 | |
83472457 AB |
892 | if (!txd) { |
893 | tsi_debug(DMA, &dchan->dev->device, | |
894 | "DMAC%d free TXD is not available", bdma_chan->id); | |
895 | return ERR_PTR(-EBUSY); | |
896 | } | |
897 | ||
50835e97 | 898 | return txd; |
9eaa3d9b AB |
899 | } |
900 | ||
7664cfe0 | 901 | static int tsi721_terminate_all(struct dma_chan *dchan) |
9eaa3d9b AB |
902 | { |
903 | struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); | |
904 | struct tsi721_tx_desc *desc, *_d; | |
905 | LIST_HEAD(list); | |
906 | ||
72d8a0d2 | 907 | tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id); |
9eaa3d9b | 908 | |
9eaa3d9b AB |
909 | spin_lock_bh(&bdma_chan->lock); |
910 | ||
50835e97 AB |
911 | bdma_chan->active = false; |
912 | ||
458bdf6e AB |
913 | while (!tsi721_dma_is_idle(bdma_chan)) { |
914 | ||
915 | udelay(5); | |
916 | #if (0) | |
50835e97 AB |
917 | /* make sure to stop the transfer */ |
918 | iowrite32(TSI721_DMAC_CTL_SUSP, | |
919 | bdma_chan->regs + TSI721_DMAC_CTL); | |
920 | ||
921 | /* Wait until DMA channel stops */ | |
922 | do { | |
923 | dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT); | |
924 | } while ((dmac_int & TSI721_DMAC_INT_SUSP) == 0); | |
458bdf6e | 925 | #endif |
50835e97 | 926 | } |
9eaa3d9b | 927 | |
d2a321f3 AB |
928 | if (bdma_chan->active_tx) |
929 | list_add(&bdma_chan->active_tx->desc_node, &list); | |
9eaa3d9b AB |
930 | list_splice_init(&bdma_chan->queue, &list); |
931 | ||
932 | list_for_each_entry_safe(desc, _d, &list, desc_node) | |
50835e97 | 933 | tsi721_dma_tx_err(bdma_chan, desc); |
9eaa3d9b AB |
934 | |
935 | spin_unlock_bh(&bdma_chan->lock); | |
936 | ||
937 | return 0; | |
938 | } | |
939 | ||
e3dd8cd4 AB |
940 | static void tsi721_dma_stop(struct tsi721_bdma_chan *bdma_chan) |
941 | { | |
942 | if (!bdma_chan->active) | |
943 | return; | |
944 | spin_lock_bh(&bdma_chan->lock); | |
945 | if (!tsi721_dma_is_idle(bdma_chan)) { | |
946 | int timeout = 100000; | |
947 | ||
948 | /* stop the transfer in progress */ | |
949 | iowrite32(TSI721_DMAC_CTL_SUSP, | |
950 | bdma_chan->regs + TSI721_DMAC_CTL); | |
951 | ||
952 | /* Wait until DMA channel stops */ | |
953 | while (!tsi721_dma_is_idle(bdma_chan) && --timeout) | |
954 | udelay(1); | |
955 | } | |
956 | ||
957 | spin_unlock_bh(&bdma_chan->lock); | |
958 | } | |
959 | ||
960 | void tsi721_dma_stop_all(struct tsi721_device *priv) | |
961 | { | |
962 | int i; | |
963 | ||
964 | for (i = 0; i < TSI721_DMA_MAXCH; i++) { | |
965 | if (i != TSI721_DMACH_MAINT) | |
966 | tsi721_dma_stop(&priv->bdma[i]); | |
967 | } | |
968 | } | |
969 | ||
305c891e | 970 | int tsi721_register_dma(struct tsi721_device *priv) |
9eaa3d9b AB |
971 | { |
972 | int i; | |
50835e97 | 973 | int nr_channels = 0; |
9eaa3d9b | 974 | int err; |
748353cc | 975 | struct rio_mport *mport = &priv->mport; |
9eaa3d9b | 976 | |
9eaa3d9b AB |
977 | INIT_LIST_HEAD(&mport->dma.channels); |
978 | ||
50835e97 | 979 | for (i = 0; i < TSI721_DMA_MAXCH; i++) { |
9eaa3d9b AB |
980 | struct tsi721_bdma_chan *bdma_chan = &priv->bdma[i]; |
981 | ||
982 | if (i == TSI721_DMACH_MAINT) | |
983 | continue; | |
984 | ||
9eaa3d9b AB |
985 | bdma_chan->regs = priv->regs + TSI721_DMAC_BASE(i); |
986 | ||
987 | bdma_chan->dchan.device = &mport->dma; | |
988 | bdma_chan->dchan.cookie = 1; | |
989 | bdma_chan->dchan.chan_id = i; | |
990 | bdma_chan->id = i; | |
04379dff | 991 | bdma_chan->active = false; |
9eaa3d9b AB |
992 | |
993 | spin_lock_init(&bdma_chan->lock); | |
994 | ||
d2a321f3 | 995 | bdma_chan->active_tx = NULL; |
9eaa3d9b AB |
996 | INIT_LIST_HEAD(&bdma_chan->queue); |
997 | INIT_LIST_HEAD(&bdma_chan->free_list); | |
998 | ||
999 | tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet, | |
1000 | (unsigned long)bdma_chan); | |
9eaa3d9b AB |
1001 | list_add_tail(&bdma_chan->dchan.device_node, |
1002 | &mport->dma.channels); | |
50835e97 | 1003 | nr_channels++; |
9eaa3d9b AB |
1004 | } |
1005 | ||
50835e97 | 1006 | mport->dma.chancnt = nr_channels; |
9eaa3d9b AB |
1007 | dma_cap_zero(mport->dma.cap_mask); |
1008 | dma_cap_set(DMA_PRIVATE, mport->dma.cap_mask); | |
1009 | dma_cap_set(DMA_SLAVE, mport->dma.cap_mask); | |
1010 | ||
50835e97 | 1011 | mport->dma.dev = &priv->pdev->dev; |
9eaa3d9b AB |
1012 | mport->dma.device_alloc_chan_resources = tsi721_alloc_chan_resources; |
1013 | mport->dma.device_free_chan_resources = tsi721_free_chan_resources; | |
1014 | mport->dma.device_tx_status = tsi721_tx_status; | |
1015 | mport->dma.device_issue_pending = tsi721_issue_pending; | |
1016 | mport->dma.device_prep_slave_sg = tsi721_prep_rio_sg; | |
7664cfe0 | 1017 | mport->dma.device_terminate_all = tsi721_terminate_all; |
9eaa3d9b AB |
1018 | |
1019 | err = dma_async_device_register(&mport->dma); | |
1020 | if (err) | |
72d8a0d2 | 1021 | tsi_err(&priv->pdev->dev, "Failed to register DMA device"); |
9eaa3d9b AB |
1022 | |
1023 | return err; | |
1024 | } | |
748353cc AB |
1025 | |
1026 | void tsi721_unregister_dma(struct tsi721_device *priv) | |
1027 | { | |
1028 | struct rio_mport *mport = &priv->mport; | |
1029 | struct dma_chan *chan, *_c; | |
1030 | struct tsi721_bdma_chan *bdma_chan; | |
1031 | ||
1032 | tsi721_dma_stop_all(priv); | |
1033 | dma_async_device_unregister(&mport->dma); | |
1034 | ||
1035 | list_for_each_entry_safe(chan, _c, &mport->dma.channels, | |
1036 | device_node) { | |
1037 | bdma_chan = to_tsi721_chan(chan); | |
1038 | if (bdma_chan->active) { | |
1039 | tsi721_bdma_interrupt_enable(bdma_chan, 0); | |
1040 | bdma_chan->active = false; | |
1041 | tsi721_sync_dma_irq(bdma_chan); | |
1042 | tasklet_kill(&bdma_chan->tasklet); | |
1043 | INIT_LIST_HEAD(&bdma_chan->free_list); | |
1044 | kfree(bdma_chan->tx_desc); | |
1045 | tsi721_bdma_ch_free(bdma_chan); | |
1046 | } | |
1047 | ||
1048 | list_del(&chan->device_node); | |
1049 | } | |
1050 | } |