Commit | Line | Data |
---|---|---|
9ab65aff | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
9eaa3d9b AB |
2 | /* |
3 | * DMA Engine support for Tsi721 PCIExpress-to-SRIO bridge | |
4 | * | |
50835e97 | 5 | * Copyright (c) 2011-2014 Integrated Device Technology, Inc. |
9eaa3d9b | 6 | * Alexandre Bounine <alexandre.bounine@idt.com> |
9eaa3d9b AB |
7 | */ |
8 | ||
9 | #include <linux/io.h> | |
10 | #include <linux/errno.h> | |
11 | #include <linux/init.h> | |
12 | #include <linux/ioport.h> | |
13 | #include <linux/kernel.h> | |
14 | #include <linux/module.h> | |
15 | #include <linux/pci.h> | |
16 | #include <linux/rio.h> | |
17 | #include <linux/rio_drv.h> | |
18 | #include <linux/dma-mapping.h> | |
19 | #include <linux/interrupt.h> | |
20 | #include <linux/kfifo.h> | |
72d8a0d2 | 21 | #include <linux/sched.h> |
9eaa3d9b | 22 | #include <linux/delay.h> |
50835e97 | 23 | #include "../../dma/dmaengine.h" |
9eaa3d9b AB |
24 | |
25 | #include "tsi721.h" | |
26 | ||
50835e97 AB |
27 | #ifdef CONFIG_PCI_MSI |
28 | static irqreturn_t tsi721_bdma_msix(int irq, void *ptr); | |
29 | #endif | |
30 | static int tsi721_submit_sg(struct tsi721_tx_desc *desc); | |
31 | ||
32 | static unsigned int dma_desc_per_channel = 128; | |
4498c31a | 33 | module_param(dma_desc_per_channel, uint, S_IRUGO); |
50835e97 AB |
34 | MODULE_PARM_DESC(dma_desc_per_channel, |
35 | "Number of DMA descriptors per channel (default: 128)"); | |
36 | ||
4498c31a AB |
37 | static unsigned int dma_txqueue_sz = 16; |
38 | module_param(dma_txqueue_sz, uint, S_IRUGO); | |
39 | MODULE_PARM_DESC(dma_txqueue_sz, | |
40 | "DMA Transactions Queue Size (default: 16)"); | |
41 | ||
42 | static u8 dma_sel = 0x7f; | |
43 | module_param(dma_sel, byte, S_IRUGO); | |
44 | MODULE_PARM_DESC(dma_sel, | |
45 | "DMA Channel Selection Mask (default: 0x7f = all)"); | |
46 | ||
9eaa3d9b AB |
47 | static inline struct tsi721_bdma_chan *to_tsi721_chan(struct dma_chan *chan) |
48 | { | |
49 | return container_of(chan, struct tsi721_bdma_chan, dchan); | |
50 | } | |
51 | ||
52 | static inline struct tsi721_device *to_tsi721(struct dma_device *ddev) | |
53 | { | |
54 | return container_of(ddev, struct rio_mport, dma)->priv; | |
55 | } | |
56 | ||
57 | static inline | |
58 | struct tsi721_tx_desc *to_tsi721_desc(struct dma_async_tx_descriptor *txd) | |
59 | { | |
60 | return container_of(txd, struct tsi721_tx_desc, txd); | |
61 | } | |
62 | ||
50835e97 | 63 | static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num) |
9eaa3d9b AB |
64 | { |
65 | struct tsi721_dma_desc *bd_ptr; | |
66 | struct device *dev = bdma_chan->dchan.device->dev; | |
67 | u64 *sts_ptr; | |
68 | dma_addr_t bd_phys; | |
69 | dma_addr_t sts_phys; | |
70 | int sts_size; | |
50835e97 AB |
71 | #ifdef CONFIG_PCI_MSI |
72 | struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device); | |
73 | #endif | |
9eaa3d9b | 74 | |
72d8a0d2 | 75 | tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d", bdma_chan->id); |
9eaa3d9b | 76 | |
50835e97 AB |
77 | /* |
78 | * Allocate space for DMA descriptors | |
79 | * (add an extra element for link descriptor) | |
80 | */ | |
750afb08 LC |
81 | bd_ptr = dma_alloc_coherent(dev, |
82 | (bd_num + 1) * sizeof(struct tsi721_dma_desc), | |
83 | &bd_phys, GFP_ATOMIC); | |
9eaa3d9b AB |
84 | if (!bd_ptr) |
85 | return -ENOMEM; | |
86 | ||
50835e97 | 87 | bdma_chan->bd_num = bd_num; |
9eaa3d9b AB |
88 | bdma_chan->bd_phys = bd_phys; |
89 | bdma_chan->bd_base = bd_ptr; | |
90 | ||
72d8a0d2 AB |
91 | tsi_debug(DMA, &bdma_chan->dchan.dev->device, |
92 | "DMAC%d descriptors @ %p (phys = %pad)", | |
93 | bdma_chan->id, bd_ptr, &bd_phys); | |
9eaa3d9b AB |
94 | |
95 | /* Allocate space for descriptor status FIFO */ | |
50835e97 AB |
96 | sts_size = ((bd_num + 1) >= TSI721_DMA_MINSTSSZ) ? |
97 | (bd_num + 1) : TSI721_DMA_MINSTSSZ; | |
9eaa3d9b | 98 | sts_size = roundup_pow_of_two(sts_size); |
750afb08 | 99 | sts_ptr = dma_alloc_coherent(dev, |
9eaa3d9b | 100 | sts_size * sizeof(struct tsi721_dma_sts), |
e680b672 | 101 | &sts_phys, GFP_ATOMIC); |
9eaa3d9b AB |
102 | if (!sts_ptr) { |
103 | /* Free space allocated for DMA descriptors */ | |
104 | dma_free_coherent(dev, | |
50835e97 | 105 | (bd_num + 1) * sizeof(struct tsi721_dma_desc), |
9eaa3d9b AB |
106 | bd_ptr, bd_phys); |
107 | bdma_chan->bd_base = NULL; | |
108 | return -ENOMEM; | |
109 | } | |
110 | ||
111 | bdma_chan->sts_phys = sts_phys; | |
112 | bdma_chan->sts_base = sts_ptr; | |
113 | bdma_chan->sts_size = sts_size; | |
114 | ||
72d8a0d2 AB |
115 | tsi_debug(DMA, &bdma_chan->dchan.dev->device, |
116 | "DMAC%d desc status FIFO @ %p (phys = %pad) size=0x%x", | |
117 | bdma_chan->id, sts_ptr, &sts_phys, sts_size); | |
9eaa3d9b | 118 | |
50835e97 AB |
119 | /* Initialize DMA descriptors ring using added link descriptor */ |
120 | bd_ptr[bd_num].type_id = cpu_to_le32(DTYPE3 << 29); | |
121 | bd_ptr[bd_num].next_lo = cpu_to_le32((u64)bd_phys & | |
9eaa3d9b | 122 | TSI721_DMAC_DPTRL_MASK); |
50835e97 | 123 | bd_ptr[bd_num].next_hi = cpu_to_le32((u64)bd_phys >> 32); |
9eaa3d9b AB |
124 | |
125 | /* Setup DMA descriptor pointers */ | |
126 | iowrite32(((u64)bd_phys >> 32), | |
127 | bdma_chan->regs + TSI721_DMAC_DPTRH); | |
128 | iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK), | |
129 | bdma_chan->regs + TSI721_DMAC_DPTRL); | |
130 | ||
131 | /* Setup descriptor status FIFO */ | |
132 | iowrite32(((u64)sts_phys >> 32), | |
133 | bdma_chan->regs + TSI721_DMAC_DSBH); | |
134 | iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK), | |
135 | bdma_chan->regs + TSI721_DMAC_DSBL); | |
136 | iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size), | |
137 | bdma_chan->regs + TSI721_DMAC_DSSZ); | |
138 | ||
139 | /* Clear interrupt bits */ | |
140 | iowrite32(TSI721_DMAC_INT_ALL, | |
141 | bdma_chan->regs + TSI721_DMAC_INT); | |
142 | ||
143 | ioread32(bdma_chan->regs + TSI721_DMAC_INT); | |
144 | ||
50835e97 AB |
145 | #ifdef CONFIG_PCI_MSI |
146 | /* Request interrupt service if we are in MSI-X mode */ | |
147 | if (priv->flags & TSI721_USING_MSIX) { | |
148 | int rc, idx; | |
149 | ||
150 | idx = TSI721_VECT_DMA0_DONE + bdma_chan->id; | |
151 | ||
152 | rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0, | |
153 | priv->msix[idx].irq_name, (void *)bdma_chan); | |
154 | ||
155 | if (rc) { | |
72d8a0d2 AB |
156 | tsi_debug(DMA, &bdma_chan->dchan.dev->device, |
157 | "Unable to get MSI-X for DMAC%d-DONE", | |
158 | bdma_chan->id); | |
50835e97 AB |
159 | goto err_out; |
160 | } | |
161 | ||
162 | idx = TSI721_VECT_DMA0_INT + bdma_chan->id; | |
163 | ||
164 | rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0, | |
165 | priv->msix[idx].irq_name, (void *)bdma_chan); | |
166 | ||
167 | if (rc) { | |
72d8a0d2 AB |
168 | tsi_debug(DMA, &bdma_chan->dchan.dev->device, |
169 | "Unable to get MSI-X for DMAC%d-INT", | |
170 | bdma_chan->id); | |
50835e97 AB |
171 | free_irq( |
172 | priv->msix[TSI721_VECT_DMA0_DONE + | |
173 | bdma_chan->id].vector, | |
174 | (void *)bdma_chan); | |
175 | } | |
176 | ||
177 | err_out: | |
178 | if (rc) { | |
179 | /* Free space allocated for DMA descriptors */ | |
180 | dma_free_coherent(dev, | |
181 | (bd_num + 1) * sizeof(struct tsi721_dma_desc), | |
182 | bd_ptr, bd_phys); | |
183 | bdma_chan->bd_base = NULL; | |
184 | ||
185 | /* Free space allocated for status descriptors */ | |
186 | dma_free_coherent(dev, | |
187 | sts_size * sizeof(struct tsi721_dma_sts), | |
188 | sts_ptr, sts_phys); | |
189 | bdma_chan->sts_base = NULL; | |
190 | ||
191 | return -EIO; | |
192 | } | |
193 | } | |
194 | #endif /* CONFIG_PCI_MSI */ | |
195 | ||
9eaa3d9b AB |
196 | /* Toggle DMA channel initialization */ |
197 | iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL); | |
198 | ioread32(bdma_chan->regs + TSI721_DMAC_CTL); | |
199 | bdma_chan->wr_count = bdma_chan->wr_count_next = 0; | |
200 | bdma_chan->sts_rdptr = 0; | |
201 | udelay(10); | |
202 | ||
203 | return 0; | |
204 | } | |
205 | ||
206 | static int tsi721_bdma_ch_free(struct tsi721_bdma_chan *bdma_chan) | |
207 | { | |
208 | u32 ch_stat; | |
50835e97 AB |
209 | #ifdef CONFIG_PCI_MSI |
210 | struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device); | |
211 | #endif | |
9eaa3d9b | 212 | |
eab216e9 | 213 | if (!bdma_chan->bd_base) |
9eaa3d9b AB |
214 | return 0; |
215 | ||
216 | /* Check if DMA channel still running */ | |
217 | ch_stat = ioread32(bdma_chan->regs + TSI721_DMAC_STS); | |
218 | if (ch_stat & TSI721_DMAC_STS_RUN) | |
219 | return -EFAULT; | |
220 | ||
221 | /* Put DMA channel into init state */ | |
222 | iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL); | |
223 | ||
50835e97 AB |
224 | #ifdef CONFIG_PCI_MSI |
225 | if (priv->flags & TSI721_USING_MSIX) { | |
226 | free_irq(priv->msix[TSI721_VECT_DMA0_DONE + | |
227 | bdma_chan->id].vector, (void *)bdma_chan); | |
228 | free_irq(priv->msix[TSI721_VECT_DMA0_INT + | |
229 | bdma_chan->id].vector, (void *)bdma_chan); | |
230 | } | |
231 | #endif /* CONFIG_PCI_MSI */ | |
232 | ||
9eaa3d9b AB |
233 | /* Free space allocated for DMA descriptors */ |
234 | dma_free_coherent(bdma_chan->dchan.device->dev, | |
50835e97 | 235 | (bdma_chan->bd_num + 1) * sizeof(struct tsi721_dma_desc), |
9eaa3d9b AB |
236 | bdma_chan->bd_base, bdma_chan->bd_phys); |
237 | bdma_chan->bd_base = NULL; | |
238 | ||
239 | /* Free space allocated for status FIFO */ | |
240 | dma_free_coherent(bdma_chan->dchan.device->dev, | |
241 | bdma_chan->sts_size * sizeof(struct tsi721_dma_sts), | |
242 | bdma_chan->sts_base, bdma_chan->sts_phys); | |
243 | bdma_chan->sts_base = NULL; | |
244 | return 0; | |
245 | } | |
246 | ||
247 | static void | |
248 | tsi721_bdma_interrupt_enable(struct tsi721_bdma_chan *bdma_chan, int enable) | |
249 | { | |
250 | if (enable) { | |
251 | /* Clear pending BDMA channel interrupts */ | |
252 | iowrite32(TSI721_DMAC_INT_ALL, | |
253 | bdma_chan->regs + TSI721_DMAC_INT); | |
254 | ioread32(bdma_chan->regs + TSI721_DMAC_INT); | |
255 | /* Enable BDMA channel interrupts */ | |
256 | iowrite32(TSI721_DMAC_INT_ALL, | |
257 | bdma_chan->regs + TSI721_DMAC_INTE); | |
258 | } else { | |
259 | /* Disable BDMA channel interrupts */ | |
260 | iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE); | |
261 | /* Clear pending BDMA channel interrupts */ | |
262 | iowrite32(TSI721_DMAC_INT_ALL, | |
263 | bdma_chan->regs + TSI721_DMAC_INT); | |
264 | } | |
265 | ||
266 | } | |
267 | ||
268 | static bool tsi721_dma_is_idle(struct tsi721_bdma_chan *bdma_chan) | |
269 | { | |
270 | u32 sts; | |
271 | ||
272 | sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS); | |
273 | return ((sts & TSI721_DMAC_STS_RUN) == 0); | |
274 | } | |
275 | ||
276 | void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan) | |
277 | { | |
278 | /* Disable BDMA channel interrupts */ | |
279 | iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE); | |
04379dff | 280 | if (bdma_chan->active) |
458bdf6e | 281 | tasklet_hi_schedule(&bdma_chan->tasklet); |
9eaa3d9b AB |
282 | } |
283 | ||
284 | #ifdef CONFIG_PCI_MSI | |
285 | /** | |
286 | * tsi721_omsg_msix - MSI-X interrupt handler for BDMA channels | |
287 | * @irq: Linux interrupt number | |
288 | * @ptr: Pointer to interrupt-specific data (BDMA channel structure) | |
289 | * | |
290 | * Handles BDMA channel interrupts signaled using MSI-X. | |
291 | */ | |
292 | static irqreturn_t tsi721_bdma_msix(int irq, void *ptr) | |
293 | { | |
294 | struct tsi721_bdma_chan *bdma_chan = ptr; | |
295 | ||
e680b672 | 296 | if (bdma_chan->active) |
458bdf6e | 297 | tasklet_hi_schedule(&bdma_chan->tasklet); |
9eaa3d9b AB |
298 | return IRQ_HANDLED; |
299 | } | |
300 | #endif /* CONFIG_PCI_MSI */ | |
301 | ||
302 | /* Must be called with the spinlock held */ | |
303 | static void tsi721_start_dma(struct tsi721_bdma_chan *bdma_chan) | |
304 | { | |
305 | if (!tsi721_dma_is_idle(bdma_chan)) { | |
72d8a0d2 AB |
306 | tsi_err(&bdma_chan->dchan.dev->device, |
307 | "DMAC%d Attempt to start non-idle channel", | |
308 | bdma_chan->id); | |
9eaa3d9b AB |
309 | return; |
310 | } | |
311 | ||
312 | if (bdma_chan->wr_count == bdma_chan->wr_count_next) { | |
72d8a0d2 AB |
313 | tsi_err(&bdma_chan->dchan.dev->device, |
314 | "DMAC%d Attempt to start DMA with no BDs ready %d", | |
315 | bdma_chan->id, task_pid_nr(current)); | |
9eaa3d9b AB |
316 | return; |
317 | } | |
318 | ||
72d8a0d2 AB |
319 | tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d (wrc=%d) %d", |
320 | bdma_chan->id, bdma_chan->wr_count_next, | |
321 | task_pid_nr(current)); | |
9eaa3d9b AB |
322 | |
323 | iowrite32(bdma_chan->wr_count_next, | |
324 | bdma_chan->regs + TSI721_DMAC_DWRCNT); | |
325 | ioread32(bdma_chan->regs + TSI721_DMAC_DWRCNT); | |
326 | ||
327 | bdma_chan->wr_count = bdma_chan->wr_count_next; | |
328 | } | |
329 | ||
9eaa3d9b | 330 | static int |
50835e97 AB |
331 | tsi721_desc_fill_init(struct tsi721_tx_desc *desc, |
332 | struct tsi721_dma_desc *bd_ptr, | |
333 | struct scatterlist *sg, u32 sys_size) | |
9eaa3d9b | 334 | { |
9eaa3d9b AB |
335 | u64 rio_addr; |
336 | ||
eab216e9 | 337 | if (!bd_ptr) |
50835e97 AB |
338 | return -EINVAL; |
339 | ||
9eaa3d9b AB |
340 | /* Initialize DMA descriptor */ |
341 | bd_ptr->type_id = cpu_to_le32((DTYPE1 << 29) | | |
50835e97 | 342 | (desc->rtype << 19) | desc->destid); |
9eaa3d9b | 343 | bd_ptr->bcount = cpu_to_le32(((desc->rio_addr & 0x3) << 30) | |
40f847ba | 344 | (sys_size << 26)); |
9eaa3d9b AB |
345 | rio_addr = (desc->rio_addr >> 2) | |
346 | ((u64)(desc->rio_addr_u & 0x3) << 62); | |
347 | bd_ptr->raddr_lo = cpu_to_le32(rio_addr & 0xffffffff); | |
348 | bd_ptr->raddr_hi = cpu_to_le32(rio_addr >> 32); | |
349 | bd_ptr->t1.bufptr_lo = cpu_to_le32( | |
350 | (u64)sg_dma_address(sg) & 0xffffffff); | |
351 | bd_ptr->t1.bufptr_hi = cpu_to_le32((u64)sg_dma_address(sg) >> 32); | |
352 | bd_ptr->t1.s_dist = 0; | |
353 | bd_ptr->t1.s_size = 0; | |
354 | ||
355 | return 0; | |
356 | } | |
357 | ||
40f847ba | 358 | static int |
50835e97 | 359 | tsi721_desc_fill_end(struct tsi721_dma_desc *bd_ptr, u32 bcount, bool interrupt) |
40f847ba | 360 | { |
eab216e9 | 361 | if (!bd_ptr) |
50835e97 | 362 | return -EINVAL; |
40f847ba AB |
363 | |
364 | /* Update DMA descriptor */ | |
50835e97 | 365 | if (interrupt) |
40f847ba | 366 | bd_ptr->type_id |= cpu_to_le32(TSI721_DMAD_IOF); |
50835e97 | 367 | bd_ptr->bcount |= cpu_to_le32(bcount & TSI721_DMAD_BCOUNT1); |
40f847ba AB |
368 | |
369 | return 0; | |
370 | } | |
371 | ||
50835e97 AB |
372 | static void tsi721_dma_tx_err(struct tsi721_bdma_chan *bdma_chan, |
373 | struct tsi721_tx_desc *desc) | |
9eaa3d9b AB |
374 | { |
375 | struct dma_async_tx_descriptor *txd = &desc->txd; | |
376 | dma_async_tx_callback callback = txd->callback; | |
377 | void *param = txd->callback_param; | |
378 | ||
9eaa3d9b | 379 | list_move(&desc->desc_node, &bdma_chan->free_list); |
9eaa3d9b AB |
380 | |
381 | if (callback) | |
382 | callback(param); | |
383 | } | |
384 | ||
9eaa3d9b AB |
385 | static void tsi721_clr_stat(struct tsi721_bdma_chan *bdma_chan) |
386 | { | |
387 | u32 srd_ptr; | |
388 | u64 *sts_ptr; | |
389 | int i, j; | |
390 | ||
391 | /* Check and clear descriptor status FIFO entries */ | |
392 | srd_ptr = bdma_chan->sts_rdptr; | |
393 | sts_ptr = bdma_chan->sts_base; | |
394 | j = srd_ptr * 8; | |
395 | while (sts_ptr[j]) { | |
396 | for (i = 0; i < 8 && sts_ptr[j]; i++, j++) | |
397 | sts_ptr[j] = 0; | |
398 | ||
399 | ++srd_ptr; | |
400 | srd_ptr %= bdma_chan->sts_size; | |
401 | j = srd_ptr * 8; | |
402 | } | |
403 | ||
404 | iowrite32(srd_ptr, bdma_chan->regs + TSI721_DMAC_DSRP); | |
405 | bdma_chan->sts_rdptr = srd_ptr; | |
406 | } | |
407 | ||
50835e97 AB |
408 | /* Must be called with the channel spinlock held */ |
409 | static int tsi721_submit_sg(struct tsi721_tx_desc *desc) | |
410 | { | |
411 | struct dma_chan *dchan = desc->txd.chan; | |
412 | struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); | |
413 | u32 sys_size; | |
414 | u64 rio_addr; | |
415 | dma_addr_t next_addr; | |
416 | u32 bcount; | |
417 | struct scatterlist *sg; | |
418 | unsigned int i; | |
419 | int err = 0; | |
420 | struct tsi721_dma_desc *bd_ptr = NULL; | |
421 | u32 idx, rd_idx; | |
422 | u32 add_count = 0; | |
72d8a0d2 | 423 | struct device *ch_dev = &dchan->dev->device; |
50835e97 AB |
424 | |
425 | if (!tsi721_dma_is_idle(bdma_chan)) { | |
72d8a0d2 AB |
426 | tsi_err(ch_dev, "DMAC%d ERR: Attempt to use non-idle channel", |
427 | bdma_chan->id); | |
50835e97 AB |
428 | return -EIO; |
429 | } | |
430 | ||
431 | /* | |
432 | * Fill DMA channel's hardware buffer descriptors. | |
433 | * (NOTE: RapidIO destination address is limited to 64 bits for now) | |
434 | */ | |
435 | rio_addr = desc->rio_addr; | |
436 | next_addr = -1; | |
437 | bcount = 0; | |
72d8a0d2 | 438 | sys_size = dma_to_mport(dchan->device)->sys_size; |
50835e97 AB |
439 | |
440 | rd_idx = ioread32(bdma_chan->regs + TSI721_DMAC_DRDCNT); | |
441 | rd_idx %= (bdma_chan->bd_num + 1); | |
442 | ||
443 | idx = bdma_chan->wr_count_next % (bdma_chan->bd_num + 1); | |
444 | if (idx == bdma_chan->bd_num) { | |
445 | /* wrap around link descriptor */ | |
446 | idx = 0; | |
447 | add_count++; | |
448 | } | |
449 | ||
72d8a0d2 AB |
450 | tsi_debug(DMA, ch_dev, "DMAC%d BD ring status: rdi=%d wri=%d", |
451 | bdma_chan->id, rd_idx, idx); | |
50835e97 AB |
452 | |
453 | for_each_sg(desc->sg, sg, desc->sg_len, i) { | |
454 | ||
72d8a0d2 AB |
455 | tsi_debug(DMAV, ch_dev, "DMAC%d sg%d/%d addr: 0x%llx len: %d", |
456 | bdma_chan->id, i, desc->sg_len, | |
50835e97 AB |
457 | (unsigned long long)sg_dma_address(sg), sg_dma_len(sg)); |
458 | ||
459 | if (sg_dma_len(sg) > TSI721_BDMA_MAX_BCOUNT) { | |
72d8a0d2 AB |
460 | tsi_err(ch_dev, "DMAC%d SG entry %d is too large", |
461 | bdma_chan->id, i); | |
50835e97 AB |
462 | err = -EINVAL; |
463 | break; | |
464 | } | |
465 | ||
466 | /* | |
467 | * If this sg entry forms contiguous block with previous one, | |
468 | * try to merge it into existing DMA descriptor | |
469 | */ | |
470 | if (next_addr == sg_dma_address(sg) && | |
471 | bcount + sg_dma_len(sg) <= TSI721_BDMA_MAX_BCOUNT) { | |
472 | /* Adjust byte count of the descriptor */ | |
473 | bcount += sg_dma_len(sg); | |
474 | goto entry_done; | |
475 | } else if (next_addr != -1) { | |
476 | /* Finalize descriptor using total byte count value */ | |
477 | tsi721_desc_fill_end(bd_ptr, bcount, 0); | |
72d8a0d2 AB |
478 | tsi_debug(DMAV, ch_dev, "DMAC%d prev desc final len: %d", |
479 | bdma_chan->id, bcount); | |
50835e97 AB |
480 | } |
481 | ||
482 | desc->rio_addr = rio_addr; | |
483 | ||
484 | if (i && idx == rd_idx) { | |
72d8a0d2 AB |
485 | tsi_debug(DMAV, ch_dev, |
486 | "DMAC%d HW descriptor ring is full @ %d", | |
487 | bdma_chan->id, i); | |
50835e97 AB |
488 | desc->sg = sg; |
489 | desc->sg_len -= i; | |
490 | break; | |
491 | } | |
492 | ||
493 | bd_ptr = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[idx]; | |
494 | err = tsi721_desc_fill_init(desc, bd_ptr, sg, sys_size); | |
495 | if (err) { | |
72d8a0d2 | 496 | tsi_err(ch_dev, "Failed to build desc: err=%d", err); |
50835e97 AB |
497 | break; |
498 | } | |
499 | ||
72d8a0d2 AB |
500 | tsi_debug(DMAV, ch_dev, "DMAC%d bd_ptr = %p did=%d raddr=0x%llx", |
501 | bdma_chan->id, bd_ptr, desc->destid, desc->rio_addr); | |
50835e97 AB |
502 | |
503 | next_addr = sg_dma_address(sg); | |
504 | bcount = sg_dma_len(sg); | |
505 | ||
506 | add_count++; | |
507 | if (++idx == bdma_chan->bd_num) { | |
508 | /* wrap around link descriptor */ | |
509 | idx = 0; | |
510 | add_count++; | |
511 | } | |
512 | ||
513 | entry_done: | |
514 | if (sg_is_last(sg)) { | |
515 | tsi721_desc_fill_end(bd_ptr, bcount, 0); | |
72d8a0d2 AB |
516 | tsi_debug(DMAV, ch_dev, |
517 | "DMAC%d last desc final len: %d", | |
518 | bdma_chan->id, bcount); | |
50835e97 AB |
519 | desc->sg_len = 0; |
520 | } else { | |
521 | rio_addr += sg_dma_len(sg); | |
522 | next_addr += sg_dma_len(sg); | |
523 | } | |
524 | } | |
525 | ||
526 | if (!err) | |
527 | bdma_chan->wr_count_next += add_count; | |
528 | ||
529 | return err; | |
530 | } | |
531 | ||
d2a321f3 AB |
532 | static void tsi721_advance_work(struct tsi721_bdma_chan *bdma_chan, |
533 | struct tsi721_tx_desc *desc) | |
9eaa3d9b | 534 | { |
50835e97 AB |
535 | int err; |
536 | ||
72d8a0d2 | 537 | tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d", bdma_chan->id); |
50835e97 | 538 | |
d2a321f3 AB |
539 | if (!tsi721_dma_is_idle(bdma_chan)) |
540 | return; | |
541 | ||
50835e97 | 542 | /* |
d2a321f3 AB |
543 | * If there is no data transfer in progress, fetch new descriptor from |
544 | * the pending queue. | |
545 | */ | |
eab216e9 | 546 | if (!desc && !bdma_chan->active_tx && !list_empty(&bdma_chan->queue)) { |
d2a321f3 AB |
547 | desc = list_first_entry(&bdma_chan->queue, |
548 | struct tsi721_tx_desc, desc_node); | |
549 | list_del_init((&desc->desc_node)); | |
550 | bdma_chan->active_tx = desc; | |
551 | } | |
50835e97 | 552 | |
d2a321f3 | 553 | if (desc) { |
50835e97 AB |
554 | err = tsi721_submit_sg(desc); |
555 | if (!err) | |
556 | tsi721_start_dma(bdma_chan); | |
557 | else { | |
558 | tsi721_dma_tx_err(bdma_chan, desc); | |
72d8a0d2 AB |
559 | tsi_debug(DMA, &bdma_chan->dchan.dev->device, |
560 | "DMAC%d ERR: tsi721_submit_sg failed with err=%d", | |
561 | bdma_chan->id, err); | |
50835e97 | 562 | } |
9eaa3d9b | 563 | } |
50835e97 | 564 | |
72d8a0d2 AB |
565 | tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d Exit", |
566 | bdma_chan->id); | |
9eaa3d9b AB |
567 | } |
568 | ||
569 | static void tsi721_dma_tasklet(unsigned long data) | |
570 | { | |
571 | struct tsi721_bdma_chan *bdma_chan = (struct tsi721_bdma_chan *)data; | |
572 | u32 dmac_int, dmac_sts; | |
573 | ||
574 | dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT); | |
72d8a0d2 AB |
575 | tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d_INT = 0x%x", |
576 | bdma_chan->id, dmac_int); | |
9eaa3d9b AB |
577 | /* Clear channel interrupts */ |
578 | iowrite32(dmac_int, bdma_chan->regs + TSI721_DMAC_INT); | |
579 | ||
580 | if (dmac_int & TSI721_DMAC_INT_ERR) { | |
458bdf6e AB |
581 | int i = 10000; |
582 | struct tsi721_tx_desc *desc; | |
583 | ||
584 | desc = bdma_chan->active_tx; | |
9eaa3d9b | 585 | dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS); |
72d8a0d2 | 586 | tsi_err(&bdma_chan->dchan.dev->device, |
458bdf6e AB |
587 | "DMAC%d_STS = 0x%x did=%d raddr=0x%llx", |
588 | bdma_chan->id, dmac_sts, desc->destid, desc->rio_addr); | |
589 | ||
590 | /* Re-initialize DMA channel if possible */ | |
591 | ||
592 | if ((dmac_sts & TSI721_DMAC_STS_ABORT) == 0) | |
593 | goto err_out; | |
594 | ||
595 | tsi721_clr_stat(bdma_chan); | |
d2a321f3 AB |
596 | |
597 | spin_lock(&bdma_chan->lock); | |
458bdf6e AB |
598 | |
599 | /* Put DMA channel into init state */ | |
600 | iowrite32(TSI721_DMAC_CTL_INIT, | |
601 | bdma_chan->regs + TSI721_DMAC_CTL); | |
602 | do { | |
603 | udelay(1); | |
604 | dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS); | |
605 | i--; | |
606 | } while ((dmac_sts & TSI721_DMAC_STS_ABORT) && i); | |
607 | ||
608 | if (dmac_sts & TSI721_DMAC_STS_ABORT) { | |
609 | tsi_err(&bdma_chan->dchan.dev->device, | |
610 | "Failed to re-initiate DMAC%d", bdma_chan->id); | |
611 | spin_unlock(&bdma_chan->lock); | |
612 | goto err_out; | |
613 | } | |
614 | ||
615 | /* Setup DMA descriptor pointers */ | |
616 | iowrite32(((u64)bdma_chan->bd_phys >> 32), | |
617 | bdma_chan->regs + TSI721_DMAC_DPTRH); | |
618 | iowrite32(((u64)bdma_chan->bd_phys & TSI721_DMAC_DPTRL_MASK), | |
619 | bdma_chan->regs + TSI721_DMAC_DPTRL); | |
620 | ||
621 | /* Setup descriptor status FIFO */ | |
622 | iowrite32(((u64)bdma_chan->sts_phys >> 32), | |
623 | bdma_chan->regs + TSI721_DMAC_DSBH); | |
624 | iowrite32(((u64)bdma_chan->sts_phys & TSI721_DMAC_DSBL_MASK), | |
625 | bdma_chan->regs + TSI721_DMAC_DSBL); | |
626 | iowrite32(TSI721_DMAC_DSSZ_SIZE(bdma_chan->sts_size), | |
627 | bdma_chan->regs + TSI721_DMAC_DSSZ); | |
628 | ||
629 | /* Clear interrupt bits */ | |
630 | iowrite32(TSI721_DMAC_INT_ALL, | |
631 | bdma_chan->regs + TSI721_DMAC_INT); | |
632 | ||
633 | ioread32(bdma_chan->regs + TSI721_DMAC_INT); | |
634 | ||
635 | bdma_chan->wr_count = bdma_chan->wr_count_next = 0; | |
636 | bdma_chan->sts_rdptr = 0; | |
637 | udelay(10); | |
638 | ||
639 | desc = bdma_chan->active_tx; | |
640 | desc->status = DMA_ERROR; | |
641 | dma_cookie_complete(&desc->txd); | |
642 | list_add(&desc->desc_node, &bdma_chan->free_list); | |
d2a321f3 | 643 | bdma_chan->active_tx = NULL; |
458bdf6e AB |
644 | if (bdma_chan->active) |
645 | tsi721_advance_work(bdma_chan, NULL); | |
d2a321f3 | 646 | spin_unlock(&bdma_chan->lock); |
9eaa3d9b AB |
647 | } |
648 | ||
649 | if (dmac_int & TSI721_DMAC_INT_STFULL) { | |
72d8a0d2 AB |
650 | tsi_err(&bdma_chan->dchan.dev->device, |
651 | "DMAC%d descriptor status FIFO is full", | |
652 | bdma_chan->id); | |
9eaa3d9b AB |
653 | } |
654 | ||
655 | if (dmac_int & (TSI721_DMAC_INT_DONE | TSI721_DMAC_INT_IOFDONE)) { | |
50835e97 AB |
656 | struct tsi721_tx_desc *desc; |
657 | ||
9eaa3d9b AB |
658 | tsi721_clr_stat(bdma_chan); |
659 | spin_lock(&bdma_chan->lock); | |
d2a321f3 | 660 | desc = bdma_chan->active_tx; |
50835e97 AB |
661 | |
662 | if (desc->sg_len == 0) { | |
663 | dma_async_tx_callback callback = NULL; | |
664 | void *param = NULL; | |
665 | ||
666 | desc->status = DMA_COMPLETE; | |
667 | dma_cookie_complete(&desc->txd); | |
668 | if (desc->txd.flags & DMA_PREP_INTERRUPT) { | |
669 | callback = desc->txd.callback; | |
670 | param = desc->txd.callback_param; | |
671 | } | |
d2a321f3 AB |
672 | list_add(&desc->desc_node, &bdma_chan->free_list); |
673 | bdma_chan->active_tx = NULL; | |
458bdf6e AB |
674 | if (bdma_chan->active) |
675 | tsi721_advance_work(bdma_chan, NULL); | |
50835e97 AB |
676 | spin_unlock(&bdma_chan->lock); |
677 | if (callback) | |
678 | callback(param); | |
e680b672 | 679 | } else { |
458bdf6e AB |
680 | if (bdma_chan->active) |
681 | tsi721_advance_work(bdma_chan, | |
682 | bdma_chan->active_tx); | |
e680b672 | 683 | spin_unlock(&bdma_chan->lock); |
50835e97 | 684 | } |
9eaa3d9b | 685 | } |
458bdf6e | 686 | err_out: |
9eaa3d9b AB |
687 | /* Re-Enable BDMA channel interrupts */ |
688 | iowrite32(TSI721_DMAC_INT_ALL, bdma_chan->regs + TSI721_DMAC_INTE); | |
689 | } | |
690 | ||
691 | static dma_cookie_t tsi721_tx_submit(struct dma_async_tx_descriptor *txd) | |
692 | { | |
693 | struct tsi721_tx_desc *desc = to_tsi721_desc(txd); | |
694 | struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(txd->chan); | |
695 | dma_cookie_t cookie; | |
696 | ||
50835e97 AB |
697 | /* Check if the descriptor is detached from any lists */ |
698 | if (!list_empty(&desc->desc_node)) { | |
72d8a0d2 AB |
699 | tsi_err(&bdma_chan->dchan.dev->device, |
700 | "DMAC%d wrong state of descriptor %p", | |
701 | bdma_chan->id, txd); | |
50835e97 AB |
702 | return -EIO; |
703 | } | |
9eaa3d9b | 704 | |
50835e97 | 705 | spin_lock_bh(&bdma_chan->lock); |
9eaa3d9b | 706 | |
50835e97 AB |
707 | if (!bdma_chan->active) { |
708 | spin_unlock_bh(&bdma_chan->lock); | |
709 | return -ENODEV; | |
9eaa3d9b AB |
710 | } |
711 | ||
50835e97 AB |
712 | cookie = dma_cookie_assign(txd); |
713 | desc->status = DMA_IN_PROGRESS; | |
714 | list_add_tail(&desc->desc_node, &bdma_chan->queue); | |
f5485eb0 | 715 | tsi721_advance_work(bdma_chan, NULL); |
50835e97 | 716 | |
9eaa3d9b AB |
717 | spin_unlock_bh(&bdma_chan->lock); |
718 | return cookie; | |
719 | } | |
720 | ||
721 | static int tsi721_alloc_chan_resources(struct dma_chan *dchan) | |
722 | { | |
723 | struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); | |
6d94a03c | 724 | struct tsi721_tx_desc *desc; |
9eaa3d9b | 725 | int i; |
50835e97 | 726 | |
72d8a0d2 | 727 | tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id); |
9eaa3d9b AB |
728 | |
729 | if (bdma_chan->bd_base) | |
4498c31a | 730 | return dma_txqueue_sz; |
9eaa3d9b AB |
731 | |
732 | /* Initialize BDMA channel */ | |
50835e97 | 733 | if (tsi721_bdma_ch_init(bdma_chan, dma_desc_per_channel)) { |
72d8a0d2 AB |
734 | tsi_err(&dchan->dev->device, "Unable to initialize DMAC%d", |
735 | bdma_chan->id); | |
50835e97 | 736 | return -ENODEV; |
9eaa3d9b AB |
737 | } |
738 | ||
50835e97 | 739 | /* Allocate queue of transaction descriptors */ |
4498c31a | 740 | desc = kcalloc(dma_txqueue_sz, sizeof(struct tsi721_tx_desc), |
e680b672 | 741 | GFP_ATOMIC); |
9eaa3d9b | 742 | if (!desc) { |
50835e97 AB |
743 | tsi721_bdma_ch_free(bdma_chan); |
744 | return -ENOMEM; | |
9eaa3d9b AB |
745 | } |
746 | ||
747 | bdma_chan->tx_desc = desc; | |
748 | ||
4498c31a | 749 | for (i = 0; i < dma_txqueue_sz; i++) { |
9eaa3d9b AB |
750 | dma_async_tx_descriptor_init(&desc[i].txd, dchan); |
751 | desc[i].txd.tx_submit = tsi721_tx_submit; | |
752 | desc[i].txd.flags = DMA_CTRL_ACK; | |
50835e97 | 753 | list_add(&desc[i].desc_node, &bdma_chan->free_list); |
9eaa3d9b AB |
754 | } |
755 | ||
50835e97 | 756 | dma_cookie_init(dchan); |
9eaa3d9b | 757 | |
04379dff | 758 | bdma_chan->active = true; |
9eaa3d9b AB |
759 | tsi721_bdma_interrupt_enable(bdma_chan, 1); |
760 | ||
4498c31a | 761 | return dma_txqueue_sz; |
9eaa3d9b AB |
762 | } |
763 | ||
50835e97 | 764 | static void tsi721_sync_dma_irq(struct tsi721_bdma_chan *bdma_chan) |
9eaa3d9b | 765 | { |
50835e97 | 766 | struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device); |
04379dff AB |
767 | |
768 | #ifdef CONFIG_PCI_MSI | |
769 | if (priv->flags & TSI721_USING_MSIX) { | |
770 | synchronize_irq(priv->msix[TSI721_VECT_DMA0_DONE + | |
771 | bdma_chan->id].vector); | |
772 | synchronize_irq(priv->msix[TSI721_VECT_DMA0_INT + | |
773 | bdma_chan->id].vector); | |
774 | } else | |
775 | #endif | |
776 | synchronize_irq(priv->pdev->irq); | |
50835e97 | 777 | } |
04379dff | 778 | |
50835e97 AB |
779 | static void tsi721_free_chan_resources(struct dma_chan *dchan) |
780 | { | |
781 | struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); | |
9eaa3d9b | 782 | |
72d8a0d2 | 783 | tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id); |
9eaa3d9b | 784 | |
eab216e9 | 785 | if (!bdma_chan->bd_base) |
50835e97 | 786 | return; |
9eaa3d9b | 787 | |
50835e97 AB |
788 | tsi721_bdma_interrupt_enable(bdma_chan, 0); |
789 | bdma_chan->active = false; | |
790 | tsi721_sync_dma_irq(bdma_chan); | |
791 | tasklet_kill(&bdma_chan->tasklet); | |
792 | INIT_LIST_HEAD(&bdma_chan->free_list); | |
9eaa3d9b | 793 | kfree(bdma_chan->tx_desc); |
50835e97 | 794 | tsi721_bdma_ch_free(bdma_chan); |
9eaa3d9b AB |
795 | } |
796 | ||
797 | static | |
798 | enum dma_status tsi721_tx_status(struct dma_chan *dchan, dma_cookie_t cookie, | |
799 | struct dma_tx_state *txstate) | |
800 | { | |
e680b672 AB |
801 | struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); |
802 | enum dma_status status; | |
803 | ||
804 | spin_lock_bh(&bdma_chan->lock); | |
805 | status = dma_cookie_status(dchan, cookie, txstate); | |
806 | spin_unlock_bh(&bdma_chan->lock); | |
807 | return status; | |
9eaa3d9b AB |
808 | } |
809 | ||
810 | static void tsi721_issue_pending(struct dma_chan *dchan) | |
811 | { | |
812 | struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); | |
813 | ||
72d8a0d2 | 814 | tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id); |
9eaa3d9b | 815 | |
d2a321f3 | 816 | spin_lock_bh(&bdma_chan->lock); |
50835e97 | 817 | if (tsi721_dma_is_idle(bdma_chan) && bdma_chan->active) { |
d2a321f3 | 818 | tsi721_advance_work(bdma_chan, NULL); |
50835e97 | 819 | } |
d2a321f3 | 820 | spin_unlock_bh(&bdma_chan->lock); |
9eaa3d9b AB |
821 | } |
822 | ||
823 | static | |
824 | struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan, | |
825 | struct scatterlist *sgl, unsigned int sg_len, | |
826 | enum dma_transfer_direction dir, unsigned long flags, | |
827 | void *tinfo) | |
828 | { | |
829 | struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); | |
83472457 | 830 | struct tsi721_tx_desc *desc; |
9eaa3d9b | 831 | struct rio_dma_ext *rext = tinfo; |
9eaa3d9b | 832 | enum dma_rtype rtype; |
50835e97 | 833 | struct dma_async_tx_descriptor *txd = NULL; |
9eaa3d9b AB |
834 | |
835 | if (!sgl || !sg_len) { | |
72d8a0d2 AB |
836 | tsi_err(&dchan->dev->device, "DMAC%d No SG list", |
837 | bdma_chan->id); | |
83472457 | 838 | return ERR_PTR(-EINVAL); |
9eaa3d9b AB |
839 | } |
840 | ||
72d8a0d2 AB |
841 | tsi_debug(DMA, &dchan->dev->device, "DMAC%d %s", bdma_chan->id, |
842 | (dir == DMA_DEV_TO_MEM)?"READ":"WRITE"); | |
50835e97 | 843 | |
9eaa3d9b AB |
844 | if (dir == DMA_DEV_TO_MEM) |
845 | rtype = NREAD; | |
846 | else if (dir == DMA_MEM_TO_DEV) { | |
847 | switch (rext->wr_type) { | |
848 | case RDW_ALL_NWRITE: | |
849 | rtype = ALL_NWRITE; | |
850 | break; | |
851 | case RDW_ALL_NWRITE_R: | |
852 | rtype = ALL_NWRITE_R; | |
853 | break; | |
854 | case RDW_LAST_NWRITE_R: | |
855 | default: | |
856 | rtype = LAST_NWRITE_R; | |
857 | break; | |
858 | } | |
859 | } else { | |
72d8a0d2 AB |
860 | tsi_err(&dchan->dev->device, |
861 | "DMAC%d Unsupported DMA direction option", | |
862 | bdma_chan->id); | |
83472457 | 863 | return ERR_PTR(-EINVAL); |
9eaa3d9b AB |
864 | } |
865 | ||
50835e97 | 866 | spin_lock_bh(&bdma_chan->lock); |
40f847ba | 867 | |
83472457 AB |
868 | if (!list_empty(&bdma_chan->free_list)) { |
869 | desc = list_first_entry(&bdma_chan->free_list, | |
870 | struct tsi721_tx_desc, desc_node); | |
871 | list_del_init(&desc->desc_node); | |
872 | desc->destid = rext->destid; | |
873 | desc->rio_addr = rext->rio_addr; | |
874 | desc->rio_addr_u = 0; | |
875 | desc->rtype = rtype; | |
876 | desc->sg_len = sg_len; | |
877 | desc->sg = sgl; | |
878 | txd = &desc->txd; | |
879 | txd->flags = flags; | |
9eaa3d9b AB |
880 | } |
881 | ||
50835e97 | 882 | spin_unlock_bh(&bdma_chan->lock); |
9eaa3d9b | 883 | |
83472457 AB |
884 | if (!txd) { |
885 | tsi_debug(DMA, &dchan->dev->device, | |
886 | "DMAC%d free TXD is not available", bdma_chan->id); | |
887 | return ERR_PTR(-EBUSY); | |
888 | } | |
889 | ||
50835e97 | 890 | return txd; |
9eaa3d9b AB |
891 | } |
892 | ||
7664cfe0 | 893 | static int tsi721_terminate_all(struct dma_chan *dchan) |
9eaa3d9b AB |
894 | { |
895 | struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); | |
896 | struct tsi721_tx_desc *desc, *_d; | |
897 | LIST_HEAD(list); | |
898 | ||
72d8a0d2 | 899 | tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id); |
9eaa3d9b | 900 | |
9eaa3d9b AB |
901 | spin_lock_bh(&bdma_chan->lock); |
902 | ||
50835e97 AB |
903 | bdma_chan->active = false; |
904 | ||
458bdf6e AB |
905 | while (!tsi721_dma_is_idle(bdma_chan)) { |
906 | ||
907 | udelay(5); | |
908 | #if (0) | |
50835e97 AB |
909 | /* make sure to stop the transfer */ |
910 | iowrite32(TSI721_DMAC_CTL_SUSP, | |
911 | bdma_chan->regs + TSI721_DMAC_CTL); | |
912 | ||
913 | /* Wait until DMA channel stops */ | |
914 | do { | |
915 | dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT); | |
916 | } while ((dmac_int & TSI721_DMAC_INT_SUSP) == 0); | |
458bdf6e | 917 | #endif |
50835e97 | 918 | } |
9eaa3d9b | 919 | |
d2a321f3 AB |
920 | if (bdma_chan->active_tx) |
921 | list_add(&bdma_chan->active_tx->desc_node, &list); | |
9eaa3d9b AB |
922 | list_splice_init(&bdma_chan->queue, &list); |
923 | ||
924 | list_for_each_entry_safe(desc, _d, &list, desc_node) | |
50835e97 | 925 | tsi721_dma_tx_err(bdma_chan, desc); |
9eaa3d9b AB |
926 | |
927 | spin_unlock_bh(&bdma_chan->lock); | |
928 | ||
929 | return 0; | |
930 | } | |
931 | ||
e3dd8cd4 AB |
932 | static void tsi721_dma_stop(struct tsi721_bdma_chan *bdma_chan) |
933 | { | |
934 | if (!bdma_chan->active) | |
935 | return; | |
936 | spin_lock_bh(&bdma_chan->lock); | |
937 | if (!tsi721_dma_is_idle(bdma_chan)) { | |
938 | int timeout = 100000; | |
939 | ||
940 | /* stop the transfer in progress */ | |
941 | iowrite32(TSI721_DMAC_CTL_SUSP, | |
942 | bdma_chan->regs + TSI721_DMAC_CTL); | |
943 | ||
944 | /* Wait until DMA channel stops */ | |
945 | while (!tsi721_dma_is_idle(bdma_chan) && --timeout) | |
946 | udelay(1); | |
947 | } | |
948 | ||
949 | spin_unlock_bh(&bdma_chan->lock); | |
950 | } | |
951 | ||
952 | void tsi721_dma_stop_all(struct tsi721_device *priv) | |
953 | { | |
954 | int i; | |
955 | ||
956 | for (i = 0; i < TSI721_DMA_MAXCH; i++) { | |
4498c31a | 957 | if ((i != TSI721_DMACH_MAINT) && (dma_sel & (1 << i))) |
e3dd8cd4 AB |
958 | tsi721_dma_stop(&priv->bdma[i]); |
959 | } | |
960 | } | |
961 | ||
305c891e | 962 | int tsi721_register_dma(struct tsi721_device *priv) |
9eaa3d9b AB |
963 | { |
964 | int i; | |
50835e97 | 965 | int nr_channels = 0; |
9eaa3d9b | 966 | int err; |
748353cc | 967 | struct rio_mport *mport = &priv->mport; |
9eaa3d9b | 968 | |
9eaa3d9b AB |
969 | INIT_LIST_HEAD(&mport->dma.channels); |
970 | ||
50835e97 | 971 | for (i = 0; i < TSI721_DMA_MAXCH; i++) { |
9eaa3d9b AB |
972 | struct tsi721_bdma_chan *bdma_chan = &priv->bdma[i]; |
973 | ||
4498c31a | 974 | if ((i == TSI721_DMACH_MAINT) || (dma_sel & (1 << i)) == 0) |
9eaa3d9b AB |
975 | continue; |
976 | ||
9eaa3d9b AB |
977 | bdma_chan->regs = priv->regs + TSI721_DMAC_BASE(i); |
978 | ||
979 | bdma_chan->dchan.device = &mport->dma; | |
980 | bdma_chan->dchan.cookie = 1; | |
981 | bdma_chan->dchan.chan_id = i; | |
982 | bdma_chan->id = i; | |
04379dff | 983 | bdma_chan->active = false; |
9eaa3d9b AB |
984 | |
985 | spin_lock_init(&bdma_chan->lock); | |
986 | ||
d2a321f3 | 987 | bdma_chan->active_tx = NULL; |
9eaa3d9b AB |
988 | INIT_LIST_HEAD(&bdma_chan->queue); |
989 | INIT_LIST_HEAD(&bdma_chan->free_list); | |
990 | ||
991 | tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet, | |
992 | (unsigned long)bdma_chan); | |
9eaa3d9b AB |
993 | list_add_tail(&bdma_chan->dchan.device_node, |
994 | &mport->dma.channels); | |
50835e97 | 995 | nr_channels++; |
9eaa3d9b AB |
996 | } |
997 | ||
50835e97 | 998 | mport->dma.chancnt = nr_channels; |
9eaa3d9b AB |
999 | dma_cap_zero(mport->dma.cap_mask); |
1000 | dma_cap_set(DMA_PRIVATE, mport->dma.cap_mask); | |
1001 | dma_cap_set(DMA_SLAVE, mport->dma.cap_mask); | |
1002 | ||
50835e97 | 1003 | mport->dma.dev = &priv->pdev->dev; |
9eaa3d9b AB |
1004 | mport->dma.device_alloc_chan_resources = tsi721_alloc_chan_resources; |
1005 | mport->dma.device_free_chan_resources = tsi721_free_chan_resources; | |
1006 | mport->dma.device_tx_status = tsi721_tx_status; | |
1007 | mport->dma.device_issue_pending = tsi721_issue_pending; | |
1008 | mport->dma.device_prep_slave_sg = tsi721_prep_rio_sg; | |
7664cfe0 | 1009 | mport->dma.device_terminate_all = tsi721_terminate_all; |
9eaa3d9b AB |
1010 | |
1011 | err = dma_async_device_register(&mport->dma); | |
1012 | if (err) | |
72d8a0d2 | 1013 | tsi_err(&priv->pdev->dev, "Failed to register DMA device"); |
9eaa3d9b AB |
1014 | |
1015 | return err; | |
1016 | } | |
748353cc AB |
1017 | |
1018 | void tsi721_unregister_dma(struct tsi721_device *priv) | |
1019 | { | |
1020 | struct rio_mport *mport = &priv->mport; | |
1021 | struct dma_chan *chan, *_c; | |
1022 | struct tsi721_bdma_chan *bdma_chan; | |
1023 | ||
1024 | tsi721_dma_stop_all(priv); | |
1025 | dma_async_device_unregister(&mport->dma); | |
1026 | ||
1027 | list_for_each_entry_safe(chan, _c, &mport->dma.channels, | |
1028 | device_node) { | |
1029 | bdma_chan = to_tsi721_chan(chan); | |
1030 | if (bdma_chan->active) { | |
1031 | tsi721_bdma_interrupt_enable(bdma_chan, 0); | |
1032 | bdma_chan->active = false; | |
1033 | tsi721_sync_dma_irq(bdma_chan); | |
1034 | tasklet_kill(&bdma_chan->tasklet); | |
1035 | INIT_LIST_HEAD(&bdma_chan->free_list); | |
1036 | kfree(bdma_chan->tx_desc); | |
1037 | tsi721_bdma_ch_free(bdma_chan); | |
1038 | } | |
1039 | ||
1040 | list_del(&chan->device_node); | |
1041 | } | |
1042 | } |