Commit | Line | Data |
---|---|---|
9cd4360d ST |
1 | /* |
2 | * DMA driver for Xilinx Video DMA Engine | |
3 | * | |
4 | * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved. | |
5 | * | |
6 | * Based on the Freescale DMA driver. | |
7 | * | |
8 | * Description: | |
9 | * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP | |
10 | * core that provides high-bandwidth direct memory access between memory | |
11 | * and AXI4-Stream type video target peripherals. The core provides efficient | |
12 | * two dimensional DMA operations with independent asynchronous read (S2MM) | |
13 | * and write (MM2S) channel operation. It can be configured to have either | |
14 | * one channel or two channels. If configured as two channels, one is to | |
15 | * transmit to the video device (MM2S) and another is to receive from the | |
16 | * video device (S2MM). Initialization, status, interrupt and management | |
17 | * registers are accessed through an AXI4-Lite slave interface. | |
18 | * | |
19 | * This program is free software: you can redistribute it and/or modify | |
20 | * it under the terms of the GNU General Public License as published by | |
21 | * the Free Software Foundation, either version 2 of the License, or | |
22 | * (at your option) any later version. | |
23 | */ | |
24 | ||
9cd4360d ST |
25 | #include <linux/bitops.h> |
26 | #include <linux/dmapool.h> | |
937abe88 | 27 | #include <linux/dma/xilinx_dma.h> |
9cd4360d ST |
28 | #include <linux/init.h> |
29 | #include <linux/interrupt.h> | |
30 | #include <linux/io.h> | |
9495f264 | 31 | #include <linux/iopoll.h> |
9cd4360d ST |
32 | #include <linux/module.h> |
33 | #include <linux/of_address.h> | |
34 | #include <linux/of_dma.h> | |
35 | #include <linux/of_platform.h> | |
36 | #include <linux/of_irq.h> | |
37 | #include <linux/slab.h> | |
38 | ||
39 | #include "../dmaengine.h" | |
40 | ||
41 | /* Register/Descriptor Offsets */ | |
42 | #define XILINX_VDMA_MM2S_CTRL_OFFSET 0x0000 | |
43 | #define XILINX_VDMA_S2MM_CTRL_OFFSET 0x0030 | |
44 | #define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050 | |
45 | #define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0 | |
46 | ||
47 | /* Control Registers */ | |
48 | #define XILINX_VDMA_REG_DMACR 0x0000 | |
49 | #define XILINX_VDMA_DMACR_DELAY_MAX 0xff | |
50 | #define XILINX_VDMA_DMACR_DELAY_SHIFT 24 | |
51 | #define XILINX_VDMA_DMACR_FRAME_COUNT_MAX 0xff | |
52 | #define XILINX_VDMA_DMACR_FRAME_COUNT_SHIFT 16 | |
53 | #define XILINX_VDMA_DMACR_ERR_IRQ BIT(14) | |
54 | #define XILINX_VDMA_DMACR_DLY_CNT_IRQ BIT(13) | |
55 | #define XILINX_VDMA_DMACR_FRM_CNT_IRQ BIT(12) | |
56 | #define XILINX_VDMA_DMACR_MASTER_SHIFT 8 | |
57 | #define XILINX_VDMA_DMACR_FSYNCSRC_SHIFT 5 | |
58 | #define XILINX_VDMA_DMACR_FRAMECNT_EN BIT(4) | |
59 | #define XILINX_VDMA_DMACR_GENLOCK_EN BIT(3) | |
60 | #define XILINX_VDMA_DMACR_RESET BIT(2) | |
61 | #define XILINX_VDMA_DMACR_CIRC_EN BIT(1) | |
62 | #define XILINX_VDMA_DMACR_RUNSTOP BIT(0) | |
63 | #define XILINX_VDMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5) | |
64 | ||
65 | #define XILINX_VDMA_REG_DMASR 0x0004 | |
66 | #define XILINX_VDMA_DMASR_EOL_LATE_ERR BIT(15) | |
67 | #define XILINX_VDMA_DMASR_ERR_IRQ BIT(14) | |
68 | #define XILINX_VDMA_DMASR_DLY_CNT_IRQ BIT(13) | |
69 | #define XILINX_VDMA_DMASR_FRM_CNT_IRQ BIT(12) | |
70 | #define XILINX_VDMA_DMASR_SOF_LATE_ERR BIT(11) | |
71 | #define XILINX_VDMA_DMASR_SG_DEC_ERR BIT(10) | |
72 | #define XILINX_VDMA_DMASR_SG_SLV_ERR BIT(9) | |
73 | #define XILINX_VDMA_DMASR_EOF_EARLY_ERR BIT(8) | |
74 | #define XILINX_VDMA_DMASR_SOF_EARLY_ERR BIT(7) | |
75 | #define XILINX_VDMA_DMASR_DMA_DEC_ERR BIT(6) | |
76 | #define XILINX_VDMA_DMASR_DMA_SLAVE_ERR BIT(5) | |
77 | #define XILINX_VDMA_DMASR_DMA_INT_ERR BIT(4) | |
78 | #define XILINX_VDMA_DMASR_IDLE BIT(1) | |
79 | #define XILINX_VDMA_DMASR_HALTED BIT(0) | |
80 | #define XILINX_VDMA_DMASR_DELAY_MASK GENMASK(31, 24) | |
81 | #define XILINX_VDMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16) | |
82 | ||
83 | #define XILINX_VDMA_REG_CURDESC 0x0008 | |
84 | #define XILINX_VDMA_REG_TAILDESC 0x0010 | |
85 | #define XILINX_VDMA_REG_REG_INDEX 0x0014 | |
86 | #define XILINX_VDMA_REG_FRMSTORE 0x0018 | |
87 | #define XILINX_VDMA_REG_THRESHOLD 0x001c | |
88 | #define XILINX_VDMA_REG_FRMPTR_STS 0x0024 | |
89 | #define XILINX_VDMA_REG_PARK_PTR 0x0028 | |
90 | #define XILINX_VDMA_PARK_PTR_WR_REF_SHIFT 8 | |
91 | #define XILINX_VDMA_PARK_PTR_RD_REF_SHIFT 0 | |
92 | #define XILINX_VDMA_REG_VDMA_VERSION 0x002c | |
93 | ||
94 | /* Register Direct Mode Registers */ | |
95 | #define XILINX_VDMA_REG_VSIZE 0x0000 | |
96 | #define XILINX_VDMA_REG_HSIZE 0x0004 | |
97 | ||
98 | #define XILINX_VDMA_REG_FRMDLY_STRIDE 0x0008 | |
99 | #define XILINX_VDMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24 | |
100 | #define XILINX_VDMA_FRMDLY_STRIDE_STRIDE_SHIFT 0 | |
101 | ||
102 | #define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n)) | |
103 | ||
104 | /* HW specific definitions */ | |
105 | #define XILINX_VDMA_MAX_CHANS_PER_DEVICE 0x2 | |
106 | ||
107 | #define XILINX_VDMA_DMAXR_ALL_IRQ_MASK \ | |
108 | (XILINX_VDMA_DMASR_FRM_CNT_IRQ | \ | |
109 | XILINX_VDMA_DMASR_DLY_CNT_IRQ | \ | |
110 | XILINX_VDMA_DMASR_ERR_IRQ) | |
111 | ||
112 | #define XILINX_VDMA_DMASR_ALL_ERR_MASK \ | |
113 | (XILINX_VDMA_DMASR_EOL_LATE_ERR | \ | |
114 | XILINX_VDMA_DMASR_SOF_LATE_ERR | \ | |
115 | XILINX_VDMA_DMASR_SG_DEC_ERR | \ | |
116 | XILINX_VDMA_DMASR_SG_SLV_ERR | \ | |
117 | XILINX_VDMA_DMASR_EOF_EARLY_ERR | \ | |
118 | XILINX_VDMA_DMASR_SOF_EARLY_ERR | \ | |
119 | XILINX_VDMA_DMASR_DMA_DEC_ERR | \ | |
120 | XILINX_VDMA_DMASR_DMA_SLAVE_ERR | \ | |
121 | XILINX_VDMA_DMASR_DMA_INT_ERR) | |
122 | ||
123 | /* | |
124 | * Recoverable errors are DMA Internal error, SOF Early, EOF Early | |
125 | * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC | |
126 | * is enabled in the h/w system. | |
127 | */ | |
128 | #define XILINX_VDMA_DMASR_ERR_RECOVER_MASK \ | |
129 | (XILINX_VDMA_DMASR_SOF_LATE_ERR | \ | |
130 | XILINX_VDMA_DMASR_EOF_EARLY_ERR | \ | |
131 | XILINX_VDMA_DMASR_SOF_EARLY_ERR | \ | |
132 | XILINX_VDMA_DMASR_DMA_INT_ERR) | |
133 | ||
134 | /* Axi VDMA Flush on Fsync bits */ | |
135 | #define XILINX_VDMA_FLUSH_S2MM 3 | |
136 | #define XILINX_VDMA_FLUSH_MM2S 2 | |
137 | #define XILINX_VDMA_FLUSH_BOTH 1 | |
138 | ||
139 | /* Delay loop counter to prevent hardware failure */ | |
140 | #define XILINX_VDMA_LOOP_COUNT 1000000 | |
141 | ||
142 | /** | |
143 | * struct xilinx_vdma_desc_hw - Hardware Descriptor | |
144 | * @next_desc: Next Descriptor Pointer @0x00 | |
145 | * @pad1: Reserved @0x04 | |
146 | * @buf_addr: Buffer address @0x08 | |
147 | * @pad2: Reserved @0x0C | |
148 | * @vsize: Vertical Size @0x10 | |
149 | * @hsize: Horizontal Size @0x14 | |
150 | * @stride: Number of bytes between the first | |
151 | * pixels of each horizontal line @0x18 | |
152 | */ | |
153 | struct xilinx_vdma_desc_hw { | |
154 | u32 next_desc; | |
155 | u32 pad1; | |
156 | u32 buf_addr; | |
157 | u32 pad2; | |
158 | u32 vsize; | |
159 | u32 hsize; | |
160 | u32 stride; | |
161 | } __aligned(64); | |
162 | ||
163 | /** | |
164 | * struct xilinx_vdma_tx_segment - Descriptor segment | |
165 | * @hw: Hardware descriptor | |
166 | * @node: Node in the descriptor segments list | |
167 | * @phys: Physical address of segment | |
168 | */ | |
169 | struct xilinx_vdma_tx_segment { | |
170 | struct xilinx_vdma_desc_hw hw; | |
171 | struct list_head node; | |
172 | dma_addr_t phys; | |
173 | } __aligned(64); | |
174 | ||
175 | /** | |
176 | * struct xilinx_vdma_tx_descriptor - Per Transaction structure | |
177 | * @async_tx: Async transaction descriptor | |
178 | * @segments: TX segments list | |
179 | * @node: Node in the channel descriptors list | |
180 | */ | |
181 | struct xilinx_vdma_tx_descriptor { | |
182 | struct dma_async_tx_descriptor async_tx; | |
183 | struct list_head segments; | |
184 | struct list_head node; | |
185 | }; | |
186 | ||
187 | /** | |
188 | * struct xilinx_vdma_chan - Driver specific VDMA channel structure | |
189 | * @xdev: Driver specific device structure | |
190 | * @ctrl_offset: Control registers offset | |
191 | * @desc_offset: TX descriptor registers offset | |
192 | * @lock: Descriptor operation lock | |
193 | * @pending_list: Descriptors waiting | |
7096f36e | 194 | * @active_list: Descriptors ready to submit |
9cd4360d ST |
195 | * @done_list: Complete descriptors |
196 | * @common: DMA common channel | |
197 | * @desc_pool: Descriptors pool | |
198 | * @dev: The dma device | |
199 | * @irq: Channel IRQ | |
200 | * @id: Channel ID | |
201 | * @direction: Transfer direction | |
202 | * @num_frms: Number of frames | |
203 | * @has_sg: Support scatter transfers | |
204 | * @genlock: Support genlock mode | |
205 | * @err: Channel has errors | |
206 | * @tasklet: Cleanup work after irq | |
207 | * @config: Device configuration info | |
208 | * @flush_on_fsync: Flush on Frame sync | |
7096f36e | 209 | * @desc_pendingcount: Descriptor pending count |
9cd4360d ST |
210 | */ |
211 | struct xilinx_vdma_chan { | |
212 | struct xilinx_vdma_device *xdev; | |
213 | u32 ctrl_offset; | |
214 | u32 desc_offset; | |
215 | spinlock_t lock; | |
216 | struct list_head pending_list; | |
7096f36e | 217 | struct list_head active_list; |
9cd4360d ST |
218 | struct list_head done_list; |
219 | struct dma_chan common; | |
220 | struct dma_pool *desc_pool; | |
221 | struct device *dev; | |
222 | int irq; | |
223 | int id; | |
224 | enum dma_transfer_direction direction; | |
225 | int num_frms; | |
226 | bool has_sg; | |
227 | bool genlock; | |
228 | bool err; | |
229 | struct tasklet_struct tasklet; | |
230 | struct xilinx_vdma_config config; | |
231 | bool flush_on_fsync; | |
7096f36e | 232 | u32 desc_pendingcount; |
9cd4360d ST |
233 | }; |
234 | ||
235 | /** | |
236 | * struct xilinx_vdma_device - VDMA device structure | |
237 | * @regs: I/O mapped base address | |
238 | * @dev: Device Structure | |
239 | * @common: DMA device structure | |
240 | * @chan: Driver specific VDMA channel | |
241 | * @has_sg: Specifies whether Scatter-Gather is present or not | |
242 | * @flush_on_fsync: Flush on frame sync | |
243 | */ | |
244 | struct xilinx_vdma_device { | |
245 | void __iomem *regs; | |
246 | struct device *dev; | |
247 | struct dma_device common; | |
248 | struct xilinx_vdma_chan *chan[XILINX_VDMA_MAX_CHANS_PER_DEVICE]; | |
249 | bool has_sg; | |
250 | u32 flush_on_fsync; | |
251 | }; | |
252 | ||
253 | /* Macros */ | |
254 | #define to_xilinx_chan(chan) \ | |
255 | container_of(chan, struct xilinx_vdma_chan, common) | |
256 | #define to_vdma_tx_descriptor(tx) \ | |
257 | container_of(tx, struct xilinx_vdma_tx_descriptor, async_tx) | |
9495f264 KA |
258 | #define xilinx_vdma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \ |
259 | readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \ | |
260 | cond, delay_us, timeout_us) | |
9cd4360d ST |
261 | |
262 | /* IO accessors */ | |
263 | static inline u32 vdma_read(struct xilinx_vdma_chan *chan, u32 reg) | |
264 | { | |
265 | return ioread32(chan->xdev->regs + reg); | |
266 | } | |
267 | ||
268 | static inline void vdma_write(struct xilinx_vdma_chan *chan, u32 reg, u32 value) | |
269 | { | |
270 | iowrite32(value, chan->xdev->regs + reg); | |
271 | } | |
272 | ||
273 | static inline void vdma_desc_write(struct xilinx_vdma_chan *chan, u32 reg, | |
274 | u32 value) | |
275 | { | |
276 | vdma_write(chan, chan->desc_offset + reg, value); | |
277 | } | |
278 | ||
279 | static inline u32 vdma_ctrl_read(struct xilinx_vdma_chan *chan, u32 reg) | |
280 | { | |
281 | return vdma_read(chan, chan->ctrl_offset + reg); | |
282 | } | |
283 | ||
284 | static inline void vdma_ctrl_write(struct xilinx_vdma_chan *chan, u32 reg, | |
285 | u32 value) | |
286 | { | |
287 | vdma_write(chan, chan->ctrl_offset + reg, value); | |
288 | } | |
289 | ||
290 | static inline void vdma_ctrl_clr(struct xilinx_vdma_chan *chan, u32 reg, | |
291 | u32 clr) | |
292 | { | |
293 | vdma_ctrl_write(chan, reg, vdma_ctrl_read(chan, reg) & ~clr); | |
294 | } | |
295 | ||
296 | static inline void vdma_ctrl_set(struct xilinx_vdma_chan *chan, u32 reg, | |
297 | u32 set) | |
298 | { | |
299 | vdma_ctrl_write(chan, reg, vdma_ctrl_read(chan, reg) | set); | |
300 | } | |
301 | ||
302 | /* ----------------------------------------------------------------------------- | |
303 | * Descriptors and segments alloc and free | |
304 | */ | |
305 | ||
306 | /** | |
307 | * xilinx_vdma_alloc_tx_segment - Allocate transaction segment | |
308 | * @chan: Driver specific VDMA channel | |
309 | * | |
310 | * Return: The allocated segment on success and NULL on failure. | |
311 | */ | |
312 | static struct xilinx_vdma_tx_segment * | |
313 | xilinx_vdma_alloc_tx_segment(struct xilinx_vdma_chan *chan) | |
314 | { | |
315 | struct xilinx_vdma_tx_segment *segment; | |
316 | dma_addr_t phys; | |
317 | ||
318 | segment = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &phys); | |
319 | if (!segment) | |
320 | return NULL; | |
321 | ||
322 | memset(segment, 0, sizeof(*segment)); | |
323 | segment->phys = phys; | |
324 | ||
325 | return segment; | |
326 | } | |
327 | ||
328 | /** | |
329 | * xilinx_vdma_free_tx_segment - Free transaction segment | |
330 | * @chan: Driver specific VDMA channel | |
331 | * @segment: VDMA transaction segment | |
332 | */ | |
333 | static void xilinx_vdma_free_tx_segment(struct xilinx_vdma_chan *chan, | |
334 | struct xilinx_vdma_tx_segment *segment) | |
335 | { | |
336 | dma_pool_free(chan->desc_pool, segment, segment->phys); | |
337 | } | |
338 | ||
339 | /** | |
340 | * xilinx_vdma_tx_descriptor - Allocate transaction descriptor | |
341 | * @chan: Driver specific VDMA channel | |
342 | * | |
343 | * Return: The allocated descriptor on success and NULL on failure. | |
344 | */ | |
345 | static struct xilinx_vdma_tx_descriptor * | |
346 | xilinx_vdma_alloc_tx_descriptor(struct xilinx_vdma_chan *chan) | |
347 | { | |
348 | struct xilinx_vdma_tx_descriptor *desc; | |
9cd4360d ST |
349 | |
350 | desc = kzalloc(sizeof(*desc), GFP_KERNEL); | |
351 | if (!desc) | |
352 | return NULL; | |
353 | ||
9cd4360d ST |
354 | INIT_LIST_HEAD(&desc->segments); |
355 | ||
356 | return desc; | |
357 | } | |
358 | ||
359 | /** | |
360 | * xilinx_vdma_free_tx_descriptor - Free transaction descriptor | |
361 | * @chan: Driver specific VDMA channel | |
362 | * @desc: VDMA transaction descriptor | |
363 | */ | |
364 | static void | |
365 | xilinx_vdma_free_tx_descriptor(struct xilinx_vdma_chan *chan, | |
366 | struct xilinx_vdma_tx_descriptor *desc) | |
367 | { | |
368 | struct xilinx_vdma_tx_segment *segment, *next; | |
369 | ||
370 | if (!desc) | |
371 | return; | |
372 | ||
373 | list_for_each_entry_safe(segment, next, &desc->segments, node) { | |
374 | list_del(&segment->node); | |
375 | xilinx_vdma_free_tx_segment(chan, segment); | |
376 | } | |
377 | ||
378 | kfree(desc); | |
379 | } | |
380 | ||
381 | /* Required functions */ | |
382 | ||
383 | /** | |
384 | * xilinx_vdma_free_desc_list - Free descriptors list | |
385 | * @chan: Driver specific VDMA channel | |
386 | * @list: List to parse and delete the descriptor | |
387 | */ | |
388 | static void xilinx_vdma_free_desc_list(struct xilinx_vdma_chan *chan, | |
389 | struct list_head *list) | |
390 | { | |
391 | struct xilinx_vdma_tx_descriptor *desc, *next; | |
392 | ||
393 | list_for_each_entry_safe(desc, next, list, node) { | |
394 | list_del(&desc->node); | |
395 | xilinx_vdma_free_tx_descriptor(chan, desc); | |
396 | } | |
397 | } | |
398 | ||
399 | /** | |
400 | * xilinx_vdma_free_descriptors - Free channel descriptors | |
401 | * @chan: Driver specific VDMA channel | |
402 | */ | |
403 | static void xilinx_vdma_free_descriptors(struct xilinx_vdma_chan *chan) | |
404 | { | |
405 | unsigned long flags; | |
406 | ||
407 | spin_lock_irqsave(&chan->lock, flags); | |
408 | ||
409 | xilinx_vdma_free_desc_list(chan, &chan->pending_list); | |
410 | xilinx_vdma_free_desc_list(chan, &chan->done_list); | |
7096f36e | 411 | xilinx_vdma_free_desc_list(chan, &chan->active_list); |
9cd4360d ST |
412 | |
413 | spin_unlock_irqrestore(&chan->lock, flags); | |
414 | } | |
415 | ||
416 | /** | |
417 | * xilinx_vdma_free_chan_resources - Free channel resources | |
418 | * @dchan: DMA channel | |
419 | */ | |
420 | static void xilinx_vdma_free_chan_resources(struct dma_chan *dchan) | |
421 | { | |
422 | struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); | |
423 | ||
424 | dev_dbg(chan->dev, "Free all channel resources.\n"); | |
425 | ||
426 | xilinx_vdma_free_descriptors(chan); | |
427 | dma_pool_destroy(chan->desc_pool); | |
428 | chan->desc_pool = NULL; | |
429 | } | |
430 | ||
431 | /** | |
432 | * xilinx_vdma_chan_desc_cleanup - Clean channel descriptors | |
433 | * @chan: Driver specific VDMA channel | |
434 | */ | |
435 | static void xilinx_vdma_chan_desc_cleanup(struct xilinx_vdma_chan *chan) | |
436 | { | |
437 | struct xilinx_vdma_tx_descriptor *desc, *next; | |
438 | unsigned long flags; | |
439 | ||
440 | spin_lock_irqsave(&chan->lock, flags); | |
441 | ||
442 | list_for_each_entry_safe(desc, next, &chan->done_list, node) { | |
443 | dma_async_tx_callback callback; | |
444 | void *callback_param; | |
445 | ||
446 | /* Remove from the list of running transactions */ | |
447 | list_del(&desc->node); | |
448 | ||
449 | /* Run the link descriptor callback function */ | |
450 | callback = desc->async_tx.callback; | |
451 | callback_param = desc->async_tx.callback_param; | |
452 | if (callback) { | |
453 | spin_unlock_irqrestore(&chan->lock, flags); | |
454 | callback(callback_param); | |
455 | spin_lock_irqsave(&chan->lock, flags); | |
456 | } | |
457 | ||
458 | /* Run any dependencies, then free the descriptor */ | |
459 | dma_run_dependencies(&desc->async_tx); | |
460 | xilinx_vdma_free_tx_descriptor(chan, desc); | |
461 | } | |
462 | ||
463 | spin_unlock_irqrestore(&chan->lock, flags); | |
464 | } | |
465 | ||
466 | /** | |
467 | * xilinx_vdma_do_tasklet - Schedule completion tasklet | |
468 | * @data: Pointer to the Xilinx VDMA channel structure | |
469 | */ | |
470 | static void xilinx_vdma_do_tasklet(unsigned long data) | |
471 | { | |
472 | struct xilinx_vdma_chan *chan = (struct xilinx_vdma_chan *)data; | |
473 | ||
474 | xilinx_vdma_chan_desc_cleanup(chan); | |
475 | } | |
476 | ||
477 | /** | |
478 | * xilinx_vdma_alloc_chan_resources - Allocate channel resources | |
479 | * @dchan: DMA channel | |
480 | * | |
481 | * Return: '0' on success and failure value on error | |
482 | */ | |
483 | static int xilinx_vdma_alloc_chan_resources(struct dma_chan *dchan) | |
484 | { | |
485 | struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); | |
486 | ||
487 | /* Has this channel already been allocated? */ | |
488 | if (chan->desc_pool) | |
489 | return 0; | |
490 | ||
491 | /* | |
492 | * We need the descriptor to be aligned to 64bytes | |
493 | * for meeting Xilinx VDMA specification requirement. | |
494 | */ | |
495 | chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool", | |
496 | chan->dev, | |
497 | sizeof(struct xilinx_vdma_tx_segment), | |
498 | __alignof__(struct xilinx_vdma_tx_segment), 0); | |
499 | if (!chan->desc_pool) { | |
500 | dev_err(chan->dev, | |
501 | "unable to allocate channel %d descriptor pool\n", | |
502 | chan->id); | |
503 | return -ENOMEM; | |
504 | } | |
505 | ||
506 | dma_cookie_init(dchan); | |
507 | return 0; | |
508 | } | |
509 | ||
510 | /** | |
511 | * xilinx_vdma_tx_status - Get VDMA transaction status | |
512 | * @dchan: DMA channel | |
513 | * @cookie: Transaction identifier | |
514 | * @txstate: Transaction state | |
515 | * | |
516 | * Return: DMA transaction status | |
517 | */ | |
518 | static enum dma_status xilinx_vdma_tx_status(struct dma_chan *dchan, | |
519 | dma_cookie_t cookie, | |
520 | struct dma_tx_state *txstate) | |
521 | { | |
522 | return dma_cookie_status(dchan, cookie, txstate); | |
523 | } | |
524 | ||
525 | /** | |
526 | * xilinx_vdma_is_running - Check if VDMA channel is running | |
527 | * @chan: Driver specific VDMA channel | |
528 | * | |
529 | * Return: '1' if running, '0' if not. | |
530 | */ | |
531 | static bool xilinx_vdma_is_running(struct xilinx_vdma_chan *chan) | |
532 | { | |
533 | return !(vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) & | |
534 | XILINX_VDMA_DMASR_HALTED) && | |
535 | (vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) & | |
536 | XILINX_VDMA_DMACR_RUNSTOP); | |
537 | } | |
538 | ||
539 | /** | |
540 | * xilinx_vdma_is_idle - Check if VDMA channel is idle | |
541 | * @chan: Driver specific VDMA channel | |
542 | * | |
543 | * Return: '1' if idle, '0' if not. | |
544 | */ | |
545 | static bool xilinx_vdma_is_idle(struct xilinx_vdma_chan *chan) | |
546 | { | |
547 | return vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) & | |
548 | XILINX_VDMA_DMASR_IDLE; | |
549 | } | |
550 | ||
551 | /** | |
552 | * xilinx_vdma_halt - Halt VDMA channel | |
553 | * @chan: Driver specific VDMA channel | |
554 | */ | |
555 | static void xilinx_vdma_halt(struct xilinx_vdma_chan *chan) | |
556 | { | |
69490634 | 557 | int err; |
9495f264 | 558 | u32 val; |
9cd4360d ST |
559 | |
560 | vdma_ctrl_clr(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RUNSTOP); | |
561 | ||
562 | /* Wait for the hardware to halt */ | |
9495f264 KA |
563 | err = xilinx_vdma_poll_timeout(chan, XILINX_VDMA_REG_DMASR, val, |
564 | (val & XILINX_VDMA_DMASR_HALTED), 0, | |
565 | XILINX_VDMA_LOOP_COUNT); | |
9cd4360d | 566 | |
9495f264 | 567 | if (err) { |
9cd4360d ST |
568 | dev_err(chan->dev, "Cannot stop channel %p: %x\n", |
569 | chan, vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR)); | |
570 | chan->err = true; | |
571 | } | |
572 | ||
573 | return; | |
574 | } | |
575 | ||
576 | /** | |
577 | * xilinx_vdma_start - Start VDMA channel | |
578 | * @chan: Driver specific VDMA channel | |
579 | */ | |
580 | static void xilinx_vdma_start(struct xilinx_vdma_chan *chan) | |
581 | { | |
69490634 | 582 | int err; |
9495f264 | 583 | u32 val; |
9cd4360d ST |
584 | |
585 | vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RUNSTOP); | |
586 | ||
587 | /* Wait for the hardware to start */ | |
9495f264 KA |
588 | err = xilinx_vdma_poll_timeout(chan, XILINX_VDMA_REG_DMASR, val, |
589 | !(val & XILINX_VDMA_DMASR_HALTED), 0, | |
590 | XILINX_VDMA_LOOP_COUNT); | |
9cd4360d | 591 | |
9495f264 | 592 | if (err) { |
9cd4360d ST |
593 | dev_err(chan->dev, "Cannot start channel %p: %x\n", |
594 | chan, vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR)); | |
595 | ||
596 | chan->err = true; | |
597 | } | |
598 | ||
599 | return; | |
600 | } | |
601 | ||
602 | /** | |
603 | * xilinx_vdma_start_transfer - Starts VDMA transfer | |
604 | * @chan: Driver specific channel struct pointer | |
605 | */ | |
606 | static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan) | |
607 | { | |
608 | struct xilinx_vdma_config *config = &chan->config; | |
7096f36e | 609 | struct xilinx_vdma_tx_descriptor *desc, *tail_desc; |
9cd4360d | 610 | u32 reg; |
7096f36e | 611 | struct xilinx_vdma_tx_segment *tail_segment; |
9cd4360d | 612 | |
26c5e369 | 613 | /* This function was invoked with lock held */ |
9cd4360d ST |
614 | if (chan->err) |
615 | return; | |
616 | ||
9cd4360d | 617 | if (list_empty(&chan->pending_list)) |
26c5e369 | 618 | return; |
9cd4360d ST |
619 | |
620 | desc = list_first_entry(&chan->pending_list, | |
621 | struct xilinx_vdma_tx_descriptor, node); | |
7096f36e KA |
622 | tail_desc = list_last_entry(&chan->pending_list, |
623 | struct xilinx_vdma_tx_descriptor, node); | |
624 | ||
625 | tail_segment = list_last_entry(&tail_desc->segments, | |
626 | struct xilinx_vdma_tx_segment, node); | |
9cd4360d ST |
627 | |
628 | /* If it is SG mode and hardware is busy, cannot submit */ | |
629 | if (chan->has_sg && xilinx_vdma_is_running(chan) && | |
630 | !xilinx_vdma_is_idle(chan)) { | |
631 | dev_dbg(chan->dev, "DMA controller still busy\n"); | |
26c5e369 | 632 | return; |
9cd4360d ST |
633 | } |
634 | ||
635 | /* | |
636 | * If hardware is idle, then all descriptors on the running lists are | |
637 | * done, start new transfers | |
638 | */ | |
7096f36e KA |
639 | if (chan->has_sg) |
640 | vdma_ctrl_write(chan, XILINX_VDMA_REG_CURDESC, | |
641 | desc->async_tx.phys); | |
9cd4360d ST |
642 | |
643 | /* Configure the hardware using info in the config structure */ | |
644 | reg = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR); | |
645 | ||
646 | if (config->frm_cnt_en) | |
647 | reg |= XILINX_VDMA_DMACR_FRAMECNT_EN; | |
648 | else | |
649 | reg &= ~XILINX_VDMA_DMACR_FRAMECNT_EN; | |
650 | ||
e2b538a7 KA |
651 | /* Configure channel to allow number frame buffers */ |
652 | vdma_ctrl_write(chan, XILINX_VDMA_REG_FRMSTORE, | |
653 | chan->desc_pendingcount); | |
654 | ||
9cd4360d ST |
655 | /* |
656 | * With SG, start with circular mode, so that BDs can be fetched. | |
657 | * In direct register mode, if not parking, enable circular mode | |
658 | */ | |
659 | if (chan->has_sg || !config->park) | |
660 | reg |= XILINX_VDMA_DMACR_CIRC_EN; | |
661 | ||
662 | if (config->park) | |
663 | reg &= ~XILINX_VDMA_DMACR_CIRC_EN; | |
664 | ||
665 | vdma_ctrl_write(chan, XILINX_VDMA_REG_DMACR, reg); | |
666 | ||
667 | if (config->park && (config->park_frm >= 0) && | |
668 | (config->park_frm < chan->num_frms)) { | |
669 | if (chan->direction == DMA_MEM_TO_DEV) | |
670 | vdma_write(chan, XILINX_VDMA_REG_PARK_PTR, | |
671 | config->park_frm << | |
672 | XILINX_VDMA_PARK_PTR_RD_REF_SHIFT); | |
673 | else | |
674 | vdma_write(chan, XILINX_VDMA_REG_PARK_PTR, | |
675 | config->park_frm << | |
676 | XILINX_VDMA_PARK_PTR_WR_REF_SHIFT); | |
677 | } | |
678 | ||
679 | /* Start the hardware */ | |
680 | xilinx_vdma_start(chan); | |
681 | ||
682 | if (chan->err) | |
26c5e369 | 683 | return; |
9cd4360d ST |
684 | |
685 | /* Start the transfer */ | |
686 | if (chan->has_sg) { | |
7096f36e KA |
687 | vdma_ctrl_write(chan, XILINX_VDMA_REG_TAILDESC, |
688 | tail_segment->phys); | |
9cd4360d ST |
689 | } else { |
690 | struct xilinx_vdma_tx_segment *segment, *last = NULL; | |
691 | int i = 0; | |
692 | ||
7096f36e KA |
693 | list_for_each_entry(desc, &chan->pending_list, node) { |
694 | segment = list_first_entry(&desc->segments, | |
695 | struct xilinx_vdma_tx_segment, node); | |
9cd4360d ST |
696 | vdma_desc_write(chan, |
697 | XILINX_VDMA_REG_START_ADDRESS(i++), | |
698 | segment->hw.buf_addr); | |
699 | last = segment; | |
700 | } | |
701 | ||
702 | if (!last) | |
26c5e369 | 703 | return; |
9cd4360d ST |
704 | |
705 | /* HW expects these parameters to be same for one transaction */ | |
706 | vdma_desc_write(chan, XILINX_VDMA_REG_HSIZE, last->hw.hsize); | |
707 | vdma_desc_write(chan, XILINX_VDMA_REG_FRMDLY_STRIDE, | |
708 | last->hw.stride); | |
709 | vdma_desc_write(chan, XILINX_VDMA_REG_VSIZE, last->hw.vsize); | |
710 | } | |
711 | ||
7096f36e KA |
712 | list_splice_tail_init(&chan->pending_list, &chan->active_list); |
713 | chan->desc_pendingcount = 0; | |
9cd4360d ST |
714 | } |
715 | ||
716 | /** | |
717 | * xilinx_vdma_issue_pending - Issue pending transactions | |
718 | * @dchan: DMA channel | |
719 | */ | |
720 | static void xilinx_vdma_issue_pending(struct dma_chan *dchan) | |
721 | { | |
722 | struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); | |
26c5e369 | 723 | unsigned long flags; |
9cd4360d | 724 | |
26c5e369 | 725 | spin_lock_irqsave(&chan->lock, flags); |
9cd4360d | 726 | xilinx_vdma_start_transfer(chan); |
26c5e369 | 727 | spin_unlock_irqrestore(&chan->lock, flags); |
9cd4360d ST |
728 | } |
729 | ||
730 | /** | |
731 | * xilinx_vdma_complete_descriptor - Mark the active descriptor as complete | |
732 | * @chan : xilinx DMA channel | |
733 | * | |
734 | * CONTEXT: hardirq | |
735 | */ | |
736 | static void xilinx_vdma_complete_descriptor(struct xilinx_vdma_chan *chan) | |
737 | { | |
7096f36e | 738 | struct xilinx_vdma_tx_descriptor *desc, *next; |
9cd4360d | 739 | |
26c5e369 | 740 | /* This function was invoked with lock held */ |
7096f36e | 741 | if (list_empty(&chan->active_list)) |
26c5e369 | 742 | return; |
9cd4360d | 743 | |
7096f36e KA |
744 | list_for_each_entry_safe(desc, next, &chan->active_list, node) { |
745 | list_del(&desc->node); | |
746 | dma_cookie_complete(&desc->async_tx); | |
747 | list_add_tail(&desc->node, &chan->done_list); | |
748 | } | |
9cd4360d ST |
749 | } |
750 | ||
751 | /** | |
752 | * xilinx_vdma_reset - Reset VDMA channel | |
753 | * @chan: Driver specific VDMA channel | |
754 | * | |
755 | * Return: '0' on success and failure value on error | |
756 | */ | |
757 | static int xilinx_vdma_reset(struct xilinx_vdma_chan *chan) | |
758 | { | |
69490634 | 759 | int err; |
9cd4360d ST |
760 | u32 tmp; |
761 | ||
762 | vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RESET); | |
763 | ||
9cd4360d | 764 | /* Wait for the hardware to finish reset */ |
9495f264 KA |
765 | err = xilinx_vdma_poll_timeout(chan, XILINX_VDMA_REG_DMACR, tmp, |
766 | !(tmp & XILINX_VDMA_DMACR_RESET), 0, | |
767 | XILINX_VDMA_LOOP_COUNT); | |
9cd4360d | 768 | |
9495f264 | 769 | if (err) { |
9cd4360d ST |
770 | dev_err(chan->dev, "reset timeout, cr %x, sr %x\n", |
771 | vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR), | |
772 | vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR)); | |
773 | return -ETIMEDOUT; | |
774 | } | |
775 | ||
776 | chan->err = false; | |
777 | ||
9495f264 | 778 | return err; |
9cd4360d ST |
779 | } |
780 | ||
781 | /** | |
782 | * xilinx_vdma_chan_reset - Reset VDMA channel and enable interrupts | |
783 | * @chan: Driver specific VDMA channel | |
784 | * | |
785 | * Return: '0' on success and failure value on error | |
786 | */ | |
787 | static int xilinx_vdma_chan_reset(struct xilinx_vdma_chan *chan) | |
788 | { | |
789 | int err; | |
790 | ||
791 | /* Reset VDMA */ | |
792 | err = xilinx_vdma_reset(chan); | |
793 | if (err) | |
794 | return err; | |
795 | ||
796 | /* Enable interrupts */ | |
797 | vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, | |
798 | XILINX_VDMA_DMAXR_ALL_IRQ_MASK); | |
799 | ||
800 | return 0; | |
801 | } | |
802 | ||
803 | /** | |
804 | * xilinx_vdma_irq_handler - VDMA Interrupt handler | |
805 | * @irq: IRQ number | |
806 | * @data: Pointer to the Xilinx VDMA channel structure | |
807 | * | |
808 | * Return: IRQ_HANDLED/IRQ_NONE | |
809 | */ | |
810 | static irqreturn_t xilinx_vdma_irq_handler(int irq, void *data) | |
811 | { | |
812 | struct xilinx_vdma_chan *chan = data; | |
813 | u32 status; | |
814 | ||
815 | /* Read the status and ack the interrupts. */ | |
816 | status = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR); | |
817 | if (!(status & XILINX_VDMA_DMAXR_ALL_IRQ_MASK)) | |
818 | return IRQ_NONE; | |
819 | ||
820 | vdma_ctrl_write(chan, XILINX_VDMA_REG_DMASR, | |
821 | status & XILINX_VDMA_DMAXR_ALL_IRQ_MASK); | |
822 | ||
823 | if (status & XILINX_VDMA_DMASR_ERR_IRQ) { | |
824 | /* | |
825 | * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the | |
826 | * error is recoverable, ignore it. Otherwise flag the error. | |
827 | * | |
828 | * Only recoverable errors can be cleared in the DMASR register, | |
829 | * make sure not to write to other error bits to 1. | |
830 | */ | |
831 | u32 errors = status & XILINX_VDMA_DMASR_ALL_ERR_MASK; | |
832 | vdma_ctrl_write(chan, XILINX_VDMA_REG_DMASR, | |
833 | errors & XILINX_VDMA_DMASR_ERR_RECOVER_MASK); | |
834 | ||
835 | if (!chan->flush_on_fsync || | |
836 | (errors & ~XILINX_VDMA_DMASR_ERR_RECOVER_MASK)) { | |
837 | dev_err(chan->dev, | |
838 | "Channel %p has errors %x, cdr %x tdr %x\n", | |
839 | chan, errors, | |
840 | vdma_ctrl_read(chan, XILINX_VDMA_REG_CURDESC), | |
841 | vdma_ctrl_read(chan, XILINX_VDMA_REG_TAILDESC)); | |
842 | chan->err = true; | |
843 | } | |
844 | } | |
845 | ||
846 | if (status & XILINX_VDMA_DMASR_DLY_CNT_IRQ) { | |
847 | /* | |
848 | * Device takes too long to do the transfer when user requires | |
849 | * responsiveness. | |
850 | */ | |
851 | dev_dbg(chan->dev, "Inter-packet latency too long\n"); | |
852 | } | |
853 | ||
854 | if (status & XILINX_VDMA_DMASR_FRM_CNT_IRQ) { | |
26c5e369 | 855 | spin_lock(&chan->lock); |
9cd4360d ST |
856 | xilinx_vdma_complete_descriptor(chan); |
857 | xilinx_vdma_start_transfer(chan); | |
26c5e369 | 858 | spin_unlock(&chan->lock); |
9cd4360d ST |
859 | } |
860 | ||
861 | tasklet_schedule(&chan->tasklet); | |
862 | return IRQ_HANDLED; | |
863 | } | |
864 | ||
7096f36e KA |
865 | /** |
866 | * append_desc_queue - Queuing descriptor | |
867 | * @chan: Driver specific dma channel | |
868 | * @desc: dma transaction descriptor | |
869 | */ | |
870 | static void append_desc_queue(struct xilinx_vdma_chan *chan, | |
871 | struct xilinx_vdma_tx_descriptor *desc) | |
872 | { | |
873 | struct xilinx_vdma_tx_segment *tail_segment; | |
874 | struct xilinx_vdma_tx_descriptor *tail_desc; | |
875 | ||
876 | if (list_empty(&chan->pending_list)) | |
877 | goto append; | |
878 | ||
879 | /* | |
880 | * Add the hardware descriptor to the chain of hardware descriptors | |
881 | * that already exists in memory. | |
882 | */ | |
883 | tail_desc = list_last_entry(&chan->pending_list, | |
884 | struct xilinx_vdma_tx_descriptor, node); | |
885 | tail_segment = list_last_entry(&tail_desc->segments, | |
886 | struct xilinx_vdma_tx_segment, node); | |
887 | tail_segment->hw.next_desc = (u32)desc->async_tx.phys; | |
888 | ||
889 | /* | |
890 | * Add the software descriptor and all children to the list | |
891 | * of pending transactions | |
892 | */ | |
893 | append: | |
894 | list_add_tail(&desc->node, &chan->pending_list); | |
895 | chan->desc_pendingcount++; | |
896 | ||
897 | if (unlikely(chan->desc_pendingcount > chan->num_frms)) { | |
898 | dev_dbg(chan->dev, "desc pendingcount is too high\n"); | |
899 | chan->desc_pendingcount = chan->num_frms; | |
900 | } | |
901 | } | |
902 | ||
9cd4360d ST |
903 | /** |
904 | * xilinx_vdma_tx_submit - Submit DMA transaction | |
905 | * @tx: Async transaction descriptor | |
906 | * | |
907 | * Return: cookie value on success and failure value on error | |
908 | */ | |
909 | static dma_cookie_t xilinx_vdma_tx_submit(struct dma_async_tx_descriptor *tx) | |
910 | { | |
911 | struct xilinx_vdma_tx_descriptor *desc = to_vdma_tx_descriptor(tx); | |
912 | struct xilinx_vdma_chan *chan = to_xilinx_chan(tx->chan); | |
913 | dma_cookie_t cookie; | |
914 | unsigned long flags; | |
915 | int err; | |
916 | ||
917 | if (chan->err) { | |
918 | /* | |
919 | * If reset fails, need to hard reset the system. | |
920 | * Channel is no longer functional | |
921 | */ | |
922 | err = xilinx_vdma_chan_reset(chan); | |
923 | if (err < 0) | |
924 | return err; | |
925 | } | |
926 | ||
927 | spin_lock_irqsave(&chan->lock, flags); | |
928 | ||
929 | cookie = dma_cookie_assign(tx); | |
930 | ||
7096f36e KA |
931 | /* Put this transaction onto the tail of the pending queue */ |
932 | append_desc_queue(chan, desc); | |
9cd4360d ST |
933 | |
934 | spin_unlock_irqrestore(&chan->lock, flags); | |
935 | ||
936 | return cookie; | |
937 | } | |
938 | ||
939 | /** | |
940 | * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a | |
941 | * DMA_SLAVE transaction | |
942 | * @dchan: DMA channel | |
943 | * @xt: Interleaved template pointer | |
944 | * @flags: transfer ack flags | |
945 | * | |
946 | * Return: Async transaction descriptor on success and NULL on failure | |
947 | */ | |
948 | static struct dma_async_tx_descriptor * | |
949 | xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan, | |
950 | struct dma_interleaved_template *xt, | |
951 | unsigned long flags) | |
952 | { | |
953 | struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); | |
954 | struct xilinx_vdma_tx_descriptor *desc; | |
955 | struct xilinx_vdma_tx_segment *segment, *prev = NULL; | |
956 | struct xilinx_vdma_desc_hw *hw; | |
957 | ||
958 | if (!is_slave_direction(xt->dir)) | |
959 | return NULL; | |
960 | ||
961 | if (!xt->numf || !xt->sgl[0].size) | |
962 | return NULL; | |
963 | ||
a5e48e24 ST |
964 | if (xt->frame_size != 1) |
965 | return NULL; | |
966 | ||
9cd4360d ST |
967 | /* Allocate a transaction descriptor. */ |
968 | desc = xilinx_vdma_alloc_tx_descriptor(chan); | |
969 | if (!desc) | |
970 | return NULL; | |
971 | ||
972 | dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); | |
973 | desc->async_tx.tx_submit = xilinx_vdma_tx_submit; | |
974 | async_tx_ack(&desc->async_tx); | |
975 | ||
976 | /* Allocate the link descriptor from DMA pool */ | |
977 | segment = xilinx_vdma_alloc_tx_segment(chan); | |
978 | if (!segment) | |
979 | goto error; | |
980 | ||
981 | /* Fill in the hardware descriptor */ | |
982 | hw = &segment->hw; | |
983 | hw->vsize = xt->numf; | |
984 | hw->hsize = xt->sgl[0].size; | |
6d80f45f | 985 | hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) << |
9cd4360d ST |
986 | XILINX_VDMA_FRMDLY_STRIDE_STRIDE_SHIFT; |
987 | hw->stride |= chan->config.frm_dly << | |
988 | XILINX_VDMA_FRMDLY_STRIDE_FRMDLY_SHIFT; | |
989 | ||
990 | if (xt->dir != DMA_MEM_TO_DEV) | |
991 | hw->buf_addr = xt->dst_start; | |
992 | else | |
993 | hw->buf_addr = xt->src_start; | |
994 | ||
9cd4360d ST |
995 | /* Insert the segment into the descriptor segments list. */ |
996 | list_add_tail(&segment->node, &desc->segments); | |
997 | ||
998 | prev = segment; | |
999 | ||
1000 | /* Link the last hardware descriptor with the first. */ | |
1001 | segment = list_first_entry(&desc->segments, | |
1002 | struct xilinx_vdma_tx_segment, node); | |
7096f36e | 1003 | desc->async_tx.phys = segment->phys; |
9cd4360d ST |
1004 | |
1005 | return &desc->async_tx; | |
1006 | ||
1007 | error: | |
1008 | xilinx_vdma_free_tx_descriptor(chan, desc); | |
1009 | return NULL; | |
1010 | } | |
1011 | ||
1012 | /** | |
1013 | * xilinx_vdma_terminate_all - Halt the channel and free descriptors | |
1014 | * @chan: Driver specific VDMA Channel pointer | |
1015 | */ | |
ba714046 | 1016 | static int xilinx_vdma_terminate_all(struct dma_chan *dchan) |
9cd4360d | 1017 | { |
ba714046 MR |
1018 | struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); |
1019 | ||
9cd4360d ST |
1020 | /* Halt the DMA engine */ |
1021 | xilinx_vdma_halt(chan); | |
1022 | ||
1023 | /* Remove and free all of the descriptors in the lists */ | |
1024 | xilinx_vdma_free_descriptors(chan); | |
ba714046 MR |
1025 | |
1026 | return 0; | |
9cd4360d ST |
1027 | } |
1028 | ||
1029 | /** | |
1030 | * xilinx_vdma_channel_set_config - Configure VDMA channel | |
1031 | * Run-time configuration for Axi VDMA, supports: | |
1032 | * . halt the channel | |
1033 | * . configure interrupt coalescing and inter-packet delay threshold | |
1034 | * . start/stop parking | |
1035 | * . enable genlock | |
1036 | * | |
1037 | * @dchan: DMA channel | |
1038 | * @cfg: VDMA device configuration pointer | |
1039 | * | |
1040 | * Return: '0' on success and failure value on error | |
1041 | */ | |
1042 | int xilinx_vdma_channel_set_config(struct dma_chan *dchan, | |
1043 | struct xilinx_vdma_config *cfg) | |
1044 | { | |
1045 | struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); | |
1046 | u32 dmacr; | |
1047 | ||
1048 | if (cfg->reset) | |
1049 | return xilinx_vdma_chan_reset(chan); | |
1050 | ||
1051 | dmacr = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR); | |
1052 | ||
1053 | chan->config.frm_dly = cfg->frm_dly; | |
1054 | chan->config.park = cfg->park; | |
1055 | ||
1056 | /* genlock settings */ | |
1057 | chan->config.gen_lock = cfg->gen_lock; | |
1058 | chan->config.master = cfg->master; | |
1059 | ||
1060 | if (cfg->gen_lock && chan->genlock) { | |
1061 | dmacr |= XILINX_VDMA_DMACR_GENLOCK_EN; | |
1062 | dmacr |= cfg->master << XILINX_VDMA_DMACR_MASTER_SHIFT; | |
1063 | } | |
1064 | ||
1065 | chan->config.frm_cnt_en = cfg->frm_cnt_en; | |
1066 | if (cfg->park) | |
1067 | chan->config.park_frm = cfg->park_frm; | |
1068 | else | |
1069 | chan->config.park_frm = -1; | |
1070 | ||
1071 | chan->config.coalesc = cfg->coalesc; | |
1072 | chan->config.delay = cfg->delay; | |
1073 | ||
1074 | if (cfg->coalesc <= XILINX_VDMA_DMACR_FRAME_COUNT_MAX) { | |
1075 | dmacr |= cfg->coalesc << XILINX_VDMA_DMACR_FRAME_COUNT_SHIFT; | |
1076 | chan->config.coalesc = cfg->coalesc; | |
1077 | } | |
1078 | ||
1079 | if (cfg->delay <= XILINX_VDMA_DMACR_DELAY_MAX) { | |
1080 | dmacr |= cfg->delay << XILINX_VDMA_DMACR_DELAY_SHIFT; | |
1081 | chan->config.delay = cfg->delay; | |
1082 | } | |
1083 | ||
1084 | /* FSync Source selection */ | |
1085 | dmacr &= ~XILINX_VDMA_DMACR_FSYNCSRC_MASK; | |
1086 | dmacr |= cfg->ext_fsync << XILINX_VDMA_DMACR_FSYNCSRC_SHIFT; | |
1087 | ||
1088 | vdma_ctrl_write(chan, XILINX_VDMA_REG_DMACR, dmacr); | |
1089 | ||
1090 | return 0; | |
1091 | } | |
1092 | EXPORT_SYMBOL(xilinx_vdma_channel_set_config); | |
1093 | ||
9cd4360d ST |
1094 | /* ----------------------------------------------------------------------------- |
1095 | * Probe and remove | |
1096 | */ | |
1097 | ||
1098 | /** | |
1099 | * xilinx_vdma_chan_remove - Per Channel remove function | |
1100 | * @chan: Driver specific VDMA channel | |
1101 | */ | |
1102 | static void xilinx_vdma_chan_remove(struct xilinx_vdma_chan *chan) | |
1103 | { | |
1104 | /* Disable all interrupts */ | |
1105 | vdma_ctrl_clr(chan, XILINX_VDMA_REG_DMACR, | |
1106 | XILINX_VDMA_DMAXR_ALL_IRQ_MASK); | |
1107 | ||
1108 | if (chan->irq > 0) | |
1109 | free_irq(chan->irq, chan); | |
1110 | ||
1111 | tasklet_kill(&chan->tasklet); | |
1112 | ||
1113 | list_del(&chan->common.device_node); | |
1114 | } | |
1115 | ||
1116 | /** | |
1117 | * xilinx_vdma_chan_probe - Per Channel Probing | |
1118 | * It get channel features from the device tree entry and | |
1119 | * initialize special channel handling routines | |
1120 | * | |
1121 | * @xdev: Driver specific device structure | |
1122 | * @node: Device node | |
1123 | * | |
1124 | * Return: '0' on success and failure value on error | |
1125 | */ | |
1126 | static int xilinx_vdma_chan_probe(struct xilinx_vdma_device *xdev, | |
1127 | struct device_node *node) | |
1128 | { | |
1129 | struct xilinx_vdma_chan *chan; | |
1130 | bool has_dre = false; | |
1131 | u32 value, width; | |
1132 | int err; | |
1133 | ||
1134 | /* Allocate and initialize the channel structure */ | |
1135 | chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL); | |
1136 | if (!chan) | |
1137 | return -ENOMEM; | |
1138 | ||
1139 | chan->dev = xdev->dev; | |
1140 | chan->xdev = xdev; | |
1141 | chan->has_sg = xdev->has_sg; | |
7096f36e | 1142 | chan->desc_pendingcount = 0x0; |
9cd4360d ST |
1143 | |
1144 | spin_lock_init(&chan->lock); | |
1145 | INIT_LIST_HEAD(&chan->pending_list); | |
1146 | INIT_LIST_HEAD(&chan->done_list); | |
7096f36e | 1147 | INIT_LIST_HEAD(&chan->active_list); |
9cd4360d ST |
1148 | |
1149 | /* Retrieve the channel properties from the device tree */ | |
1150 | has_dre = of_property_read_bool(node, "xlnx,include-dre"); | |
1151 | ||
1152 | chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode"); | |
1153 | ||
1154 | err = of_property_read_u32(node, "xlnx,datawidth", &value); | |
1155 | if (err) { | |
1156 | dev_err(xdev->dev, "missing xlnx,datawidth property\n"); | |
1157 | return err; | |
1158 | } | |
1159 | width = value >> 3; /* Convert bits to bytes */ | |
1160 | ||
1161 | /* If data width is greater than 8 bytes, DRE is not in hw */ | |
1162 | if (width > 8) | |
1163 | has_dre = false; | |
1164 | ||
1165 | if (!has_dre) | |
1166 | xdev->common.copy_align = fls(width - 1); | |
1167 | ||
1168 | if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel")) { | |
1169 | chan->direction = DMA_MEM_TO_DEV; | |
1170 | chan->id = 0; | |
1171 | ||
1172 | chan->ctrl_offset = XILINX_VDMA_MM2S_CTRL_OFFSET; | |
1173 | chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET; | |
1174 | ||
1175 | if (xdev->flush_on_fsync == XILINX_VDMA_FLUSH_BOTH || | |
1176 | xdev->flush_on_fsync == XILINX_VDMA_FLUSH_MM2S) | |
1177 | chan->flush_on_fsync = true; | |
1178 | } else if (of_device_is_compatible(node, | |
1179 | "xlnx,axi-vdma-s2mm-channel")) { | |
1180 | chan->direction = DMA_DEV_TO_MEM; | |
1181 | chan->id = 1; | |
1182 | ||
1183 | chan->ctrl_offset = XILINX_VDMA_S2MM_CTRL_OFFSET; | |
1184 | chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET; | |
1185 | ||
1186 | if (xdev->flush_on_fsync == XILINX_VDMA_FLUSH_BOTH || | |
1187 | xdev->flush_on_fsync == XILINX_VDMA_FLUSH_S2MM) | |
1188 | chan->flush_on_fsync = true; | |
1189 | } else { | |
1190 | dev_err(xdev->dev, "Invalid channel compatible node\n"); | |
1191 | return -EINVAL; | |
1192 | } | |
1193 | ||
1194 | /* Request the interrupt */ | |
1195 | chan->irq = irq_of_parse_and_map(node, 0); | |
1196 | err = request_irq(chan->irq, xilinx_vdma_irq_handler, IRQF_SHARED, | |
1197 | "xilinx-vdma-controller", chan); | |
1198 | if (err) { | |
1199 | dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq); | |
1200 | return err; | |
1201 | } | |
1202 | ||
1203 | /* Initialize the tasklet */ | |
1204 | tasklet_init(&chan->tasklet, xilinx_vdma_do_tasklet, | |
1205 | (unsigned long)chan); | |
1206 | ||
1207 | /* | |
1208 | * Initialize the DMA channel and add it to the DMA engine channels | |
1209 | * list. | |
1210 | */ | |
1211 | chan->common.device = &xdev->common; | |
1212 | ||
1213 | list_add_tail(&chan->common.device_node, &xdev->common.channels); | |
1214 | xdev->chan[chan->id] = chan; | |
1215 | ||
1216 | /* Reset the channel */ | |
1217 | err = xilinx_vdma_chan_reset(chan); | |
1218 | if (err < 0) { | |
1219 | dev_err(xdev->dev, "Reset channel failed\n"); | |
1220 | return err; | |
1221 | } | |
1222 | ||
1223 | return 0; | |
1224 | } | |
1225 | ||
1226 | /** | |
1227 | * of_dma_xilinx_xlate - Translation function | |
1228 | * @dma_spec: Pointer to DMA specifier as found in the device tree | |
1229 | * @ofdma: Pointer to DMA controller data | |
1230 | * | |
1231 | * Return: DMA channel pointer on success and NULL on error | |
1232 | */ | |
1233 | static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec, | |
1234 | struct of_dma *ofdma) | |
1235 | { | |
1236 | struct xilinx_vdma_device *xdev = ofdma->of_dma_data; | |
1237 | int chan_id = dma_spec->args[0]; | |
1238 | ||
330ed4da | 1239 | if (chan_id >= XILINX_VDMA_MAX_CHANS_PER_DEVICE || !xdev->chan[chan_id]) |
9cd4360d ST |
1240 | return NULL; |
1241 | ||
1242 | return dma_get_slave_channel(&xdev->chan[chan_id]->common); | |
1243 | } | |
1244 | ||
1245 | /** | |
1246 | * xilinx_vdma_probe - Driver probe function | |
1247 | * @pdev: Pointer to the platform_device structure | |
1248 | * | |
1249 | * Return: '0' on success and failure value on error | |
1250 | */ | |
1251 | static int xilinx_vdma_probe(struct platform_device *pdev) | |
1252 | { | |
1253 | struct device_node *node = pdev->dev.of_node; | |
1254 | struct xilinx_vdma_device *xdev; | |
1255 | struct device_node *child; | |
1256 | struct resource *io; | |
1257 | u32 num_frames; | |
1258 | int i, err; | |
1259 | ||
1260 | /* Allocate and initialize the DMA engine structure */ | |
1261 | xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL); | |
1262 | if (!xdev) | |
1263 | return -ENOMEM; | |
1264 | ||
1265 | xdev->dev = &pdev->dev; | |
1266 | ||
1267 | /* Request and map I/O memory */ | |
1268 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1269 | xdev->regs = devm_ioremap_resource(&pdev->dev, io); | |
1270 | if (IS_ERR(xdev->regs)) | |
1271 | return PTR_ERR(xdev->regs); | |
1272 | ||
1273 | /* Retrieve the DMA engine properties from the device tree */ | |
1274 | xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg"); | |
1275 | ||
1276 | err = of_property_read_u32(node, "xlnx,num-fstores", &num_frames); | |
1277 | if (err < 0) { | |
1278 | dev_err(xdev->dev, "missing xlnx,num-fstores property\n"); | |
1279 | return err; | |
1280 | } | |
1281 | ||
1282 | err = of_property_read_u32(node, "xlnx,flush-fsync", | |
1283 | &xdev->flush_on_fsync); | |
1284 | if (err < 0) | |
1285 | dev_warn(xdev->dev, "missing xlnx,flush-fsync property\n"); | |
1286 | ||
1287 | /* Initialize the DMA engine */ | |
1288 | xdev->common.dev = &pdev->dev; | |
1289 | ||
1290 | INIT_LIST_HEAD(&xdev->common.channels); | |
1291 | dma_cap_set(DMA_SLAVE, xdev->common.cap_mask); | |
1292 | dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask); | |
1293 | ||
1294 | xdev->common.device_alloc_chan_resources = | |
1295 | xilinx_vdma_alloc_chan_resources; | |
1296 | xdev->common.device_free_chan_resources = | |
1297 | xilinx_vdma_free_chan_resources; | |
1298 | xdev->common.device_prep_interleaved_dma = | |
1299 | xilinx_vdma_dma_prep_interleaved; | |
ba714046 | 1300 | xdev->common.device_terminate_all = xilinx_vdma_terminate_all; |
9cd4360d ST |
1301 | xdev->common.device_tx_status = xilinx_vdma_tx_status; |
1302 | xdev->common.device_issue_pending = xilinx_vdma_issue_pending; | |
1303 | ||
1304 | platform_set_drvdata(pdev, xdev); | |
1305 | ||
1306 | /* Initialize the channels */ | |
1307 | for_each_child_of_node(node, child) { | |
1308 | err = xilinx_vdma_chan_probe(xdev, child); | |
1309 | if (err < 0) | |
1310 | goto error; | |
1311 | } | |
1312 | ||
1313 | for (i = 0; i < XILINX_VDMA_MAX_CHANS_PER_DEVICE; i++) | |
1314 | if (xdev->chan[i]) | |
1315 | xdev->chan[i]->num_frms = num_frames; | |
1316 | ||
1317 | /* Register the DMA engine with the core */ | |
1318 | dma_async_device_register(&xdev->common); | |
1319 | ||
1320 | err = of_dma_controller_register(node, of_dma_xilinx_xlate, | |
1321 | xdev); | |
1322 | if (err < 0) { | |
1323 | dev_err(&pdev->dev, "Unable to register DMA to DT\n"); | |
1324 | dma_async_device_unregister(&xdev->common); | |
1325 | goto error; | |
1326 | } | |
1327 | ||
1328 | dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n"); | |
1329 | ||
1330 | return 0; | |
1331 | ||
1332 | error: | |
1333 | for (i = 0; i < XILINX_VDMA_MAX_CHANS_PER_DEVICE; i++) | |
1334 | if (xdev->chan[i]) | |
1335 | xilinx_vdma_chan_remove(xdev->chan[i]); | |
1336 | ||
1337 | return err; | |
1338 | } | |
1339 | ||
1340 | /** | |
1341 | * xilinx_vdma_remove - Driver remove function | |
1342 | * @pdev: Pointer to the platform_device structure | |
1343 | * | |
1344 | * Return: Always '0' | |
1345 | */ | |
1346 | static int xilinx_vdma_remove(struct platform_device *pdev) | |
1347 | { | |
1348 | struct xilinx_vdma_device *xdev = platform_get_drvdata(pdev); | |
1349 | int i; | |
1350 | ||
1351 | of_dma_controller_free(pdev->dev.of_node); | |
1352 | ||
1353 | dma_async_device_unregister(&xdev->common); | |
1354 | ||
1355 | for (i = 0; i < XILINX_VDMA_MAX_CHANS_PER_DEVICE; i++) | |
1356 | if (xdev->chan[i]) | |
1357 | xilinx_vdma_chan_remove(xdev->chan[i]); | |
1358 | ||
1359 | return 0; | |
1360 | } | |
1361 | ||
1362 | static const struct of_device_id xilinx_vdma_of_ids[] = { | |
1363 | { .compatible = "xlnx,axi-vdma-1.00.a",}, | |
1364 | {} | |
1365 | }; | |
ad577e46 | 1366 | MODULE_DEVICE_TABLE(of, xilinx_vdma_of_ids); |
9cd4360d ST |
1367 | |
1368 | static struct platform_driver xilinx_vdma_driver = { | |
1369 | .driver = { | |
1370 | .name = "xilinx-vdma", | |
9cd4360d ST |
1371 | .of_match_table = xilinx_vdma_of_ids, |
1372 | }, | |
1373 | .probe = xilinx_vdma_probe, | |
1374 | .remove = xilinx_vdma_remove, | |
1375 | }; | |
1376 | ||
1377 | module_platform_driver(xilinx_vdma_driver); | |
1378 | ||
1379 | MODULE_AUTHOR("Xilinx, Inc."); | |
1380 | MODULE_DESCRIPTION("Xilinx VDMA driver"); | |
1381 | MODULE_LICENSE("GPL v2"); |