Commit | Line | Data |
---|---|---|
fd9871f7 | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
19a340b1 TP |
2 | /* |
3 | * Copyright (C) 2015-2016 Marvell International Ltd. | |
4 | ||
19a340b1 TP |
5 | */ |
6 | ||
7 | #include <linux/clk.h> | |
8 | #include <linux/dma-mapping.h> | |
9 | #include <linux/interrupt.h> | |
10 | #include <linux/io.h> | |
11 | #include <linux/module.h> | |
12 | #include <linux/msi.h> | |
13 | #include <linux/of.h> | |
14 | #include <linux/of_irq.h> | |
15 | #include <linux/platform_device.h> | |
16 | #include <linux/spinlock.h> | |
17 | ||
18 | #include "dmaengine.h" | |
19 | ||
20 | /* DMA Engine Registers */ | |
21 | #define MV_XOR_V2_DMA_DESQ_BALR_OFF 0x000 | |
22 | #define MV_XOR_V2_DMA_DESQ_BAHR_OFF 0x004 | |
23 | #define MV_XOR_V2_DMA_DESQ_SIZE_OFF 0x008 | |
24 | #define MV_XOR_V2_DMA_DESQ_DONE_OFF 0x00C | |
25 | #define MV_XOR_V2_DMA_DESQ_DONE_PENDING_MASK 0x7FFF | |
26 | #define MV_XOR_V2_DMA_DESQ_DONE_PENDING_SHIFT 0 | |
27 | #define MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_MASK 0x1FFF | |
28 | #define MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_SHIFT 16 | |
29 | #define MV_XOR_V2_DMA_DESQ_ARATTR_OFF 0x010 | |
30 | #define MV_XOR_V2_DMA_DESQ_ATTR_CACHE_MASK 0x3F3F | |
31 | #define MV_XOR_V2_DMA_DESQ_ATTR_OUTER_SHAREABLE 0x202 | |
32 | #define MV_XOR_V2_DMA_DESQ_ATTR_CACHEABLE 0x3C3C | |
33 | #define MV_XOR_V2_DMA_IMSG_CDAT_OFF 0x014 | |
34 | #define MV_XOR_V2_DMA_IMSG_THRD_OFF 0x018 | |
35 | #define MV_XOR_V2_DMA_IMSG_THRD_MASK 0x7FFF | |
d793327f | 36 | #define MV_XOR_V2_DMA_IMSG_TIMER_EN BIT(18) |
19a340b1 TP |
37 | #define MV_XOR_V2_DMA_DESQ_AWATTR_OFF 0x01C |
38 | /* Same flags as MV_XOR_V2_DMA_DESQ_ARATTR_OFF */ | |
39 | #define MV_XOR_V2_DMA_DESQ_ALLOC_OFF 0x04C | |
40 | #define MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_MASK 0xFFFF | |
41 | #define MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_SHIFT 16 | |
42 | #define MV_XOR_V2_DMA_IMSG_BALR_OFF 0x050 | |
43 | #define MV_XOR_V2_DMA_IMSG_BAHR_OFF 0x054 | |
44 | #define MV_XOR_V2_DMA_DESQ_CTRL_OFF 0x100 | |
45 | #define MV_XOR_V2_DMA_DESQ_CTRL_32B 1 | |
46 | #define MV_XOR_V2_DMA_DESQ_CTRL_128B 7 | |
47 | #define MV_XOR_V2_DMA_DESQ_STOP_OFF 0x800 | |
48 | #define MV_XOR_V2_DMA_DESQ_DEALLOC_OFF 0x804 | |
49 | #define MV_XOR_V2_DMA_DESQ_ADD_OFF 0x808 | |
d793327f HH |
50 | #define MV_XOR_V2_DMA_IMSG_TMOT 0x810 |
51 | #define MV_XOR_V2_DMA_IMSG_TIMER_THRD_MASK 0x1FFF | |
19a340b1 TP |
52 | |
53 | /* XOR Global registers */ | |
54 | #define MV_XOR_V2_GLOB_BW_CTRL 0x4 | |
55 | #define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_SHIFT 0 | |
56 | #define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_VAL 64 | |
57 | #define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_SHIFT 8 | |
58 | #define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_VAL 8 | |
59 | #define MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_SHIFT 12 | |
60 | #define MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_VAL 4 | |
61 | #define MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_SHIFT 16 | |
62 | #define MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_VAL 4 | |
63 | #define MV_XOR_V2_GLOB_PAUSE 0x014 | |
64 | #define MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL 0x8 | |
65 | #define MV_XOR_V2_GLOB_SYS_INT_CAUSE 0x200 | |
66 | #define MV_XOR_V2_GLOB_SYS_INT_MASK 0x204 | |
67 | #define MV_XOR_V2_GLOB_MEM_INT_CAUSE 0x220 | |
68 | #define MV_XOR_V2_GLOB_MEM_INT_MASK 0x224 | |
69 | ||
70 | #define MV_XOR_V2_MIN_DESC_SIZE 32 | |
71 | #define MV_XOR_V2_EXT_DESC_SIZE 128 | |
72 | ||
73 | #define MV_XOR_V2_DESC_RESERVED_SIZE 12 | |
74 | #define MV_XOR_V2_DESC_BUFF_D_ADDR_SIZE 12 | |
75 | ||
76 | #define MV_XOR_V2_CMD_LINE_NUM_MAX_D_BUF 8 | |
77 | ||
78 | /* | |
79 | * Descriptors queue size. With 32 bytes descriptors, up to 2^14 | |
80 | * descriptors are allowed, with 128 bytes descriptors, up to 2^12 | |
81 | * descriptors are allowed. This driver uses 128 bytes descriptors, | |
82 | * but experimentation has shown that a set of 1024 descriptors is | |
83 | * sufficient to reach a good level of performance. | |
84 | */ | |
85 | #define MV_XOR_V2_DESC_NUM 1024 | |
86 | ||
d793327f HH |
87 | /* |
88 | * Threshold values for descriptors and timeout, determined by | |
89 | * experimentation as giving a good level of performance. | |
90 | */ | |
91 | #define MV_XOR_V2_DONE_IMSG_THRD 0x14 | |
92 | #define MV_XOR_V2_TIMER_THRD 0xB0 | |
93 | ||
19a340b1 TP |
94 | /** |
95 | * struct mv_xor_v2_descriptor - DMA HW descriptor | |
96 | * @desc_id: used by S/W and is not affected by H/W. | |
97 | * @flags: error and status flags | |
98 | * @crc32_result: CRC32 calculation result | |
99 | * @desc_ctrl: operation mode and control flags | |
100 | * @buff_size: amount of bytes to be processed | |
101 | * @fill_pattern_src_addr: Fill-Pattern or Source-Address and | |
102 | * AW-Attributes | |
103 | * @data_buff_addr: Source (and might be RAID6 destination) | |
104 | * addresses of data buffers in RAID5 and RAID6 | |
105 | * @reserved: reserved | |
106 | */ | |
107 | struct mv_xor_v2_descriptor { | |
108 | u16 desc_id; | |
109 | u16 flags; | |
110 | u32 crc32_result; | |
111 | u32 desc_ctrl; | |
112 | ||
113 | /* Definitions for desc_ctrl */ | |
114 | #define DESC_NUM_ACTIVE_D_BUF_SHIFT 22 | |
115 | #define DESC_OP_MODE_SHIFT 28 | |
116 | #define DESC_OP_MODE_NOP 0 /* Idle operation */ | |
117 | #define DESC_OP_MODE_MEMCPY 1 /* Pure-DMA operation */ | |
118 | #define DESC_OP_MODE_MEMSET 2 /* Mem-Fill operation */ | |
119 | #define DESC_OP_MODE_MEMINIT 3 /* Mem-Init operation */ | |
120 | #define DESC_OP_MODE_MEM_COMPARE 4 /* Mem-Compare operation */ | |
121 | #define DESC_OP_MODE_CRC32 5 /* CRC32 calculation */ | |
122 | #define DESC_OP_MODE_XOR 6 /* RAID5 (XOR) operation */ | |
123 | #define DESC_OP_MODE_RAID6 7 /* RAID6 P&Q-generation */ | |
124 | #define DESC_OP_MODE_RAID6_REC 8 /* RAID6 Recovery */ | |
125 | #define DESC_Q_BUFFER_ENABLE BIT(16) | |
126 | #define DESC_P_BUFFER_ENABLE BIT(17) | |
127 | #define DESC_IOD BIT(27) | |
128 | ||
129 | u32 buff_size; | |
130 | u32 fill_pattern_src_addr[4]; | |
131 | u32 data_buff_addr[MV_XOR_V2_DESC_BUFF_D_ADDR_SIZE]; | |
132 | u32 reserved[MV_XOR_V2_DESC_RESERVED_SIZE]; | |
133 | }; | |
134 | ||
135 | /** | |
136 | * struct mv_xor_v2_device - implements a xor device | |
137 | * @lock: lock for the engine | |
e6fe333c LJ |
138 | * @clk: reference to the 'core' clock |
139 | * @reg_clk: reference to the 'reg' clock | |
19a340b1 TP |
140 | * @dma_base: memory mapped DMA register base |
141 | * @glob_base: memory mapped global register base | |
e6fe333c | 142 | * @irq_tasklet: tasklet used for IRQ handling call-backs |
19a340b1 TP |
143 | * @free_sw_desc: linked list of free SW descriptors |
144 | * @dmadev: dma device | |
145 | * @dmachan: dma channel | |
146 | * @hw_desq: HW descriptors queue | |
147 | * @hw_desq_virt: virtual address of DESCQ | |
148 | * @sw_desq: SW descriptors queue | |
149 | * @desc_size: HW descriptor size | |
150 | * @npendings: number of pending descriptors (for which tx_submit has | |
e6fe333c | 151 | * @hw_queue_idx: HW queue index |
f6632bb2 | 152 | * @irq: The Linux interrupt number |
19a340b1 TP |
153 | * been called, but not yet issue_pending) |
154 | */ | |
155 | struct mv_xor_v2_device { | |
156 | spinlock_t lock; | |
157 | void __iomem *dma_base; | |
158 | void __iomem *glob_base; | |
159 | struct clk *clk; | |
3cd2c313 | 160 | struct clk *reg_clk; |
19a340b1 TP |
161 | struct tasklet_struct irq_tasklet; |
162 | struct list_head free_sw_desc; | |
163 | struct dma_device dmadev; | |
164 | struct dma_chan dmachan; | |
165 | dma_addr_t hw_desq; | |
166 | struct mv_xor_v2_descriptor *hw_desq_virt; | |
167 | struct mv_xor_v2_sw_desc *sw_desq; | |
168 | int desc_size; | |
169 | unsigned int npendings; | |
44d5887a | 170 | unsigned int hw_queue_idx; |
f6632bb2 | 171 | unsigned int irq; |
19a340b1 TP |
172 | }; |
173 | ||
174 | /** | |
175 | * struct mv_xor_v2_sw_desc - implements a xor SW descriptor | |
176 | * @idx: descriptor index | |
177 | * @async_tx: support for the async_tx api | |
178 | * @hw_desc: assosiated HW descriptor | |
179 | * @free_list: node of the free SW descriprots list | |
180 | */ | |
181 | struct mv_xor_v2_sw_desc { | |
182 | int idx; | |
183 | struct dma_async_tx_descriptor async_tx; | |
184 | struct mv_xor_v2_descriptor hw_desc; | |
185 | struct list_head free_list; | |
186 | }; | |
187 | ||
188 | /* | |
189 | * Fill the data buffers to a HW descriptor | |
190 | */ | |
191 | static void mv_xor_v2_set_data_buffers(struct mv_xor_v2_device *xor_dev, | |
192 | struct mv_xor_v2_descriptor *desc, | |
193 | dma_addr_t src, int index) | |
194 | { | |
195 | int arr_index = ((index >> 1) * 3); | |
196 | ||
197 | /* | |
198 | * Fill the buffer's addresses to the descriptor. | |
199 | * | |
200 | * The format of the buffers address for 2 sequential buffers | |
201 | * X and X + 1: | |
202 | * | |
203 | * First word: Buffer-DX-Address-Low[31:0] | |
204 | * Second word: Buffer-DX+1-Address-Low[31:0] | |
205 | * Third word: DX+1-Buffer-Address-High[47:32] [31:16] | |
206 | * DX-Buffer-Address-High[47:32] [15:0] | |
207 | */ | |
208 | if ((index & 0x1) == 0) { | |
209 | desc->data_buff_addr[arr_index] = lower_32_bits(src); | |
210 | ||
211 | desc->data_buff_addr[arr_index + 2] &= ~0xFFFF; | |
212 | desc->data_buff_addr[arr_index + 2] |= | |
213 | upper_32_bits(src) & 0xFFFF; | |
214 | } else { | |
215 | desc->data_buff_addr[arr_index + 1] = | |
216 | lower_32_bits(src); | |
217 | ||
218 | desc->data_buff_addr[arr_index + 2] &= ~0xFFFF0000; | |
219 | desc->data_buff_addr[arr_index + 2] |= | |
220 | (upper_32_bits(src) & 0xFFFF) << 16; | |
221 | } | |
222 | } | |
223 | ||
19a340b1 TP |
224 | /* |
225 | * notify the engine of new descriptors, and update the available index. | |
226 | */ | |
227 | static void mv_xor_v2_add_desc_to_desq(struct mv_xor_v2_device *xor_dev, | |
228 | int num_of_desc) | |
229 | { | |
230 | /* write the number of new descriptors in the DESQ. */ | |
231 | writel(num_of_desc, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ADD_OFF); | |
232 | } | |
233 | ||
234 | /* | |
235 | * free HW descriptors | |
236 | */ | |
237 | static void mv_xor_v2_free_desc_from_desq(struct mv_xor_v2_device *xor_dev, | |
238 | int num_of_desc) | |
239 | { | |
240 | /* write the number of new descriptors in the DESQ. */ | |
241 | writel(num_of_desc, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_DEALLOC_OFF); | |
242 | } | |
243 | ||
244 | /* | |
245 | * Set descriptor size | |
246 | * Return the HW descriptor size in bytes | |
247 | */ | |
248 | static int mv_xor_v2_set_desc_size(struct mv_xor_v2_device *xor_dev) | |
249 | { | |
250 | writel(MV_XOR_V2_DMA_DESQ_CTRL_128B, | |
251 | xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_CTRL_OFF); | |
252 | ||
253 | return MV_XOR_V2_EXT_DESC_SIZE; | |
254 | } | |
255 | ||
d793327f HH |
256 | /* |
257 | * Set the IMSG threshold | |
258 | */ | |
259 | static inline | |
260 | void mv_xor_v2_enable_imsg_thrd(struct mv_xor_v2_device *xor_dev) | |
261 | { | |
262 | u32 reg; | |
263 | ||
264 | /* Configure threshold of number of descriptors, and enable timer */ | |
265 | reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF); | |
698f7a9b NH |
266 | reg &= ~MV_XOR_V2_DMA_IMSG_THRD_MASK; |
267 | reg |= MV_XOR_V2_DONE_IMSG_THRD; | |
d793327f HH |
268 | reg |= MV_XOR_V2_DMA_IMSG_TIMER_EN; |
269 | writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF); | |
270 | ||
271 | /* Configure Timer Threshold */ | |
272 | reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_TMOT); | |
698f7a9b NH |
273 | reg &= ~MV_XOR_V2_DMA_IMSG_TIMER_THRD_MASK; |
274 | reg |= MV_XOR_V2_TIMER_THRD; | |
d793327f HH |
275 | writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_TMOT); |
276 | } | |
277 | ||
19a340b1 TP |
278 | static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data) |
279 | { | |
280 | struct mv_xor_v2_device *xor_dev = data; | |
281 | unsigned int ndescs; | |
282 | u32 reg; | |
283 | ||
284 | reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_DONE_OFF); | |
285 | ||
286 | ndescs = ((reg >> MV_XOR_V2_DMA_DESQ_DONE_PENDING_SHIFT) & | |
287 | MV_XOR_V2_DMA_DESQ_DONE_PENDING_MASK); | |
288 | ||
289 | /* No descriptors to process */ | |
290 | if (!ndescs) | |
291 | return IRQ_NONE; | |
292 | ||
19a340b1 TP |
293 | /* schedule a tasklet to handle descriptors callbacks */ |
294 | tasklet_schedule(&xor_dev->irq_tasklet); | |
295 | ||
296 | return IRQ_HANDLED; | |
297 | } | |
298 | ||
299 | /* | |
300 | * submit a descriptor to the DMA engine | |
301 | */ | |
302 | static dma_cookie_t | |
303 | mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx) | |
304 | { | |
19a340b1 TP |
305 | void *dest_hw_desc; |
306 | dma_cookie_t cookie; | |
307 | struct mv_xor_v2_sw_desc *sw_desc = | |
308 | container_of(tx, struct mv_xor_v2_sw_desc, async_tx); | |
309 | struct mv_xor_v2_device *xor_dev = | |
310 | container_of(tx->chan, struct mv_xor_v2_device, dmachan); | |
311 | ||
312 | dev_dbg(xor_dev->dmadev.dev, | |
313 | "%s sw_desc %p: async_tx %p\n", | |
314 | __func__, sw_desc, &sw_desc->async_tx); | |
315 | ||
e4c4182f | 316 | /* assign cookie */ |
19a340b1 TP |
317 | spin_lock_bh(&xor_dev->lock); |
318 | cookie = dma_cookie_assign(tx); | |
319 | ||
19a340b1 | 320 | /* copy the HW descriptor from the SW descriptor to the DESQ */ |
44d5887a | 321 | dest_hw_desc = xor_dev->hw_desq_virt + xor_dev->hw_queue_idx; |
19a340b1 TP |
322 | |
323 | memcpy(dest_hw_desc, &sw_desc->hw_desc, xor_dev->desc_size); | |
324 | ||
325 | xor_dev->npendings++; | |
44d5887a TP |
326 | xor_dev->hw_queue_idx++; |
327 | if (xor_dev->hw_queue_idx >= MV_XOR_V2_DESC_NUM) | |
328 | xor_dev->hw_queue_idx = 0; | |
19a340b1 TP |
329 | |
330 | spin_unlock_bh(&xor_dev->lock); | |
331 | ||
332 | return cookie; | |
333 | } | |
334 | ||
335 | /* | |
336 | * Prepare a SW descriptor | |
337 | */ | |
338 | static struct mv_xor_v2_sw_desc * | |
339 | mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev) | |
340 | { | |
341 | struct mv_xor_v2_sw_desc *sw_desc; | |
bc473da1 | 342 | bool found = false; |
19a340b1 TP |
343 | |
344 | /* Lock the channel */ | |
345 | spin_lock_bh(&xor_dev->lock); | |
346 | ||
347 | if (list_empty(&xor_dev->free_sw_desc)) { | |
348 | spin_unlock_bh(&xor_dev->lock); | |
349 | /* schedule tasklet to free some descriptors */ | |
350 | tasklet_schedule(&xor_dev->irq_tasklet); | |
351 | return NULL; | |
352 | } | |
353 | ||
bc473da1 TP |
354 | list_for_each_entry(sw_desc, &xor_dev->free_sw_desc, free_list) { |
355 | if (async_tx_test_ack(&sw_desc->async_tx)) { | |
356 | found = true; | |
357 | break; | |
358 | } | |
359 | } | |
360 | ||
361 | if (!found) { | |
362 | spin_unlock_bh(&xor_dev->lock); | |
363 | return NULL; | |
364 | } | |
365 | ||
19a340b1 TP |
366 | list_del(&sw_desc->free_list); |
367 | ||
368 | /* Release the channel */ | |
369 | spin_unlock_bh(&xor_dev->lock); | |
370 | ||
19a340b1 TP |
371 | return sw_desc; |
372 | } | |
373 | ||
374 | /* | |
375 | * Prepare a HW descriptor for a memcpy operation | |
376 | */ | |
377 | static struct dma_async_tx_descriptor * | |
378 | mv_xor_v2_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, | |
379 | dma_addr_t src, size_t len, unsigned long flags) | |
380 | { | |
381 | struct mv_xor_v2_sw_desc *sw_desc; | |
382 | struct mv_xor_v2_descriptor *hw_descriptor; | |
383 | struct mv_xor_v2_device *xor_dev; | |
384 | ||
385 | xor_dev = container_of(chan, struct mv_xor_v2_device, dmachan); | |
386 | ||
387 | dev_dbg(xor_dev->dmadev.dev, | |
388 | "%s len: %zu src %pad dest %pad flags: %ld\n", | |
389 | __func__, len, &src, &dest, flags); | |
390 | ||
391 | sw_desc = mv_xor_v2_prep_sw_desc(xor_dev); | |
eb8df543 TP |
392 | if (!sw_desc) |
393 | return NULL; | |
19a340b1 TP |
394 | |
395 | sw_desc->async_tx.flags = flags; | |
396 | ||
397 | /* set the HW descriptor */ | |
398 | hw_descriptor = &sw_desc->hw_desc; | |
399 | ||
400 | /* save the SW descriptor ID to restore when operation is done */ | |
401 | hw_descriptor->desc_id = sw_desc->idx; | |
402 | ||
403 | /* Set the MEMCPY control word */ | |
404 | hw_descriptor->desc_ctrl = | |
405 | DESC_OP_MODE_MEMCPY << DESC_OP_MODE_SHIFT; | |
406 | ||
407 | if (flags & DMA_PREP_INTERRUPT) | |
408 | hw_descriptor->desc_ctrl |= DESC_IOD; | |
409 | ||
410 | /* Set source address */ | |
411 | hw_descriptor->fill_pattern_src_addr[0] = lower_32_bits(src); | |
412 | hw_descriptor->fill_pattern_src_addr[1] = | |
413 | upper_32_bits(src) & 0xFFFF; | |
414 | ||
415 | /* Set Destination address */ | |
416 | hw_descriptor->fill_pattern_src_addr[2] = lower_32_bits(dest); | |
417 | hw_descriptor->fill_pattern_src_addr[3] = | |
418 | upper_32_bits(dest) & 0xFFFF; | |
419 | ||
420 | /* Set buffers size */ | |
421 | hw_descriptor->buff_size = len; | |
422 | ||
423 | /* return the async tx descriptor */ | |
424 | return &sw_desc->async_tx; | |
425 | } | |
426 | ||
427 | /* | |
428 | * Prepare a HW descriptor for a XOR operation | |
429 | */ | |
430 | static struct dma_async_tx_descriptor * | |
431 | mv_xor_v2_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | |
432 | unsigned int src_cnt, size_t len, unsigned long flags) | |
433 | { | |
434 | struct mv_xor_v2_sw_desc *sw_desc; | |
435 | struct mv_xor_v2_descriptor *hw_descriptor; | |
436 | struct mv_xor_v2_device *xor_dev = | |
437 | container_of(chan, struct mv_xor_v2_device, dmachan); | |
438 | int i; | |
439 | ||
440 | if (src_cnt > MV_XOR_V2_CMD_LINE_NUM_MAX_D_BUF || src_cnt < 1) | |
441 | return NULL; | |
442 | ||
443 | dev_dbg(xor_dev->dmadev.dev, | |
444 | "%s src_cnt: %d len: %zu dest %pad flags: %ld\n", | |
445 | __func__, src_cnt, len, &dest, flags); | |
446 | ||
447 | sw_desc = mv_xor_v2_prep_sw_desc(xor_dev); | |
eb8df543 TP |
448 | if (!sw_desc) |
449 | return NULL; | |
19a340b1 TP |
450 | |
451 | sw_desc->async_tx.flags = flags; | |
452 | ||
453 | /* set the HW descriptor */ | |
454 | hw_descriptor = &sw_desc->hw_desc; | |
455 | ||
456 | /* save the SW descriptor ID to restore when operation is done */ | |
457 | hw_descriptor->desc_id = sw_desc->idx; | |
458 | ||
459 | /* Set the XOR control word */ | |
460 | hw_descriptor->desc_ctrl = | |
461 | DESC_OP_MODE_XOR << DESC_OP_MODE_SHIFT; | |
462 | hw_descriptor->desc_ctrl |= DESC_P_BUFFER_ENABLE; | |
463 | ||
464 | if (flags & DMA_PREP_INTERRUPT) | |
465 | hw_descriptor->desc_ctrl |= DESC_IOD; | |
466 | ||
467 | /* Set the data buffers */ | |
468 | for (i = 0; i < src_cnt; i++) | |
469 | mv_xor_v2_set_data_buffers(xor_dev, hw_descriptor, src[i], i); | |
470 | ||
471 | hw_descriptor->desc_ctrl |= | |
472 | src_cnt << DESC_NUM_ACTIVE_D_BUF_SHIFT; | |
473 | ||
474 | /* Set Destination address */ | |
475 | hw_descriptor->fill_pattern_src_addr[2] = lower_32_bits(dest); | |
476 | hw_descriptor->fill_pattern_src_addr[3] = | |
477 | upper_32_bits(dest) & 0xFFFF; | |
478 | ||
479 | /* Set buffers size */ | |
480 | hw_descriptor->buff_size = len; | |
481 | ||
482 | /* return the async tx descriptor */ | |
483 | return &sw_desc->async_tx; | |
484 | } | |
485 | ||
486 | /* | |
487 | * Prepare a HW descriptor for interrupt operation. | |
488 | */ | |
489 | static struct dma_async_tx_descriptor * | |
490 | mv_xor_v2_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags) | |
491 | { | |
492 | struct mv_xor_v2_sw_desc *sw_desc; | |
493 | struct mv_xor_v2_descriptor *hw_descriptor; | |
494 | struct mv_xor_v2_device *xor_dev = | |
495 | container_of(chan, struct mv_xor_v2_device, dmachan); | |
496 | ||
497 | sw_desc = mv_xor_v2_prep_sw_desc(xor_dev); | |
eb8df543 TP |
498 | if (!sw_desc) |
499 | return NULL; | |
19a340b1 TP |
500 | |
501 | /* set the HW descriptor */ | |
502 | hw_descriptor = &sw_desc->hw_desc; | |
503 | ||
504 | /* save the SW descriptor ID to restore when operation is done */ | |
505 | hw_descriptor->desc_id = sw_desc->idx; | |
506 | ||
507 | /* Set the INTERRUPT control word */ | |
508 | hw_descriptor->desc_ctrl = | |
509 | DESC_OP_MODE_NOP << DESC_OP_MODE_SHIFT; | |
510 | hw_descriptor->desc_ctrl |= DESC_IOD; | |
511 | ||
512 | /* return the async tx descriptor */ | |
513 | return &sw_desc->async_tx; | |
514 | } | |
515 | ||
516 | /* | |
517 | * push pending transactions to hardware | |
518 | */ | |
519 | static void mv_xor_v2_issue_pending(struct dma_chan *chan) | |
520 | { | |
521 | struct mv_xor_v2_device *xor_dev = | |
522 | container_of(chan, struct mv_xor_v2_device, dmachan); | |
523 | ||
524 | spin_lock_bh(&xor_dev->lock); | |
525 | ||
526 | /* | |
527 | * update the engine with the number of descriptors to | |
528 | * process | |
529 | */ | |
530 | mv_xor_v2_add_desc_to_desq(xor_dev, xor_dev->npendings); | |
531 | xor_dev->npendings = 0; | |
532 | ||
19a340b1 TP |
533 | spin_unlock_bh(&xor_dev->lock); |
534 | } | |
535 | ||
536 | static inline | |
537 | int mv_xor_v2_get_pending_params(struct mv_xor_v2_device *xor_dev, | |
538 | int *pending_ptr) | |
539 | { | |
540 | u32 reg; | |
541 | ||
542 | reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_DONE_OFF); | |
543 | ||
544 | /* get the next pending descriptor index */ | |
545 | *pending_ptr = ((reg >> MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_SHIFT) & | |
546 | MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_MASK); | |
547 | ||
548 | /* get the number of descriptors pending handle */ | |
549 | return ((reg >> MV_XOR_V2_DMA_DESQ_DONE_PENDING_SHIFT) & | |
550 | MV_XOR_V2_DMA_DESQ_DONE_PENDING_MASK); | |
551 | } | |
552 | ||
553 | /* | |
554 | * handle the descriptors after HW process | |
555 | */ | |
34ca9a53 | 556 | static void mv_xor_v2_tasklet(struct tasklet_struct *t) |
19a340b1 | 557 | { |
34ca9a53 AP |
558 | struct mv_xor_v2_device *xor_dev = from_tasklet(xor_dev, t, |
559 | irq_tasklet); | |
19a340b1 | 560 | int pending_ptr, num_of_pending, i; |
19a340b1 TP |
561 | struct mv_xor_v2_sw_desc *next_pending_sw_desc = NULL; |
562 | ||
563 | dev_dbg(xor_dev->dmadev.dev, "%s %d\n", __func__, __LINE__); | |
564 | ||
565 | /* get the pending descriptors parameters */ | |
566 | num_of_pending = mv_xor_v2_get_pending_params(xor_dev, &pending_ptr); | |
567 | ||
19a340b1 TP |
568 | /* loop over free descriptors */ |
569 | for (i = 0; i < num_of_pending; i++) { | |
2aab4e18 TP |
570 | struct mv_xor_v2_descriptor *next_pending_hw_desc = |
571 | xor_dev->hw_desq_virt + pending_ptr; | |
19a340b1 TP |
572 | |
573 | /* get the SW descriptor related to the HW descriptor */ | |
574 | next_pending_sw_desc = | |
575 | &xor_dev->sw_desq[next_pending_hw_desc->desc_id]; | |
576 | ||
577 | /* call the callback */ | |
578 | if (next_pending_sw_desc->async_tx.cookie > 0) { | |
579 | /* | |
580 | * update the channel's completed cookie - no | |
581 | * lock is required the IMSG threshold provide | |
582 | * the locking | |
583 | */ | |
584 | dma_cookie_complete(&next_pending_sw_desc->async_tx); | |
585 | ||
c3a272c7 | 586 | dma_descriptor_unmap(&next_pending_sw_desc->async_tx); |
5a80aff9 HH |
587 | dmaengine_desc_get_callback_invoke( |
588 | &next_pending_sw_desc->async_tx, NULL); | |
19a340b1 TP |
589 | } |
590 | ||
591 | dma_run_dependencies(&next_pending_sw_desc->async_tx); | |
592 | ||
593 | /* Lock the channel */ | |
99faef48 | 594 | spin_lock(&xor_dev->lock); |
19a340b1 TP |
595 | |
596 | /* add the SW descriptor to the free descriptors list */ | |
597 | list_add(&next_pending_sw_desc->free_list, | |
598 | &xor_dev->free_sw_desc); | |
599 | ||
600 | /* Release the channel */ | |
99faef48 | 601 | spin_unlock(&xor_dev->lock); |
19a340b1 TP |
602 | |
603 | /* increment the next descriptor */ | |
604 | pending_ptr++; | |
2aab4e18 TP |
605 | if (pending_ptr >= MV_XOR_V2_DESC_NUM) |
606 | pending_ptr = 0; | |
19a340b1 TP |
607 | } |
608 | ||
609 | if (num_of_pending != 0) { | |
610 | /* free the descriptores */ | |
611 | mv_xor_v2_free_desc_from_desq(xor_dev, num_of_pending); | |
612 | } | |
19a340b1 TP |
613 | } |
614 | ||
615 | /* | |
616 | * Set DMA Interrupt-message (IMSG) parameters | |
617 | */ | |
618 | static void mv_xor_v2_set_msi_msg(struct msi_desc *desc, struct msi_msg *msg) | |
619 | { | |
620 | struct mv_xor_v2_device *xor_dev = dev_get_drvdata(desc->dev); | |
621 | ||
622 | writel(msg->address_lo, | |
623 | xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_BALR_OFF); | |
624 | writel(msg->address_hi & 0xFFFF, | |
625 | xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_BAHR_OFF); | |
626 | writel(msg->data, | |
627 | xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_CDAT_OFF); | |
628 | } | |
629 | ||
630 | static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev) | |
631 | { | |
632 | u32 reg; | |
633 | ||
634 | /* write the DESQ size to the DMA engine */ | |
635 | writel(MV_XOR_V2_DESC_NUM, | |
636 | xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_SIZE_OFF); | |
637 | ||
638 | /* write the DESQ address to the DMA enngine*/ | |
ac7b06ba | 639 | writel(lower_32_bits(xor_dev->hw_desq), |
19a340b1 | 640 | xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BALR_OFF); |
ac7b06ba | 641 | writel(upper_32_bits(xor_dev->hw_desq), |
19a340b1 TP |
642 | xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BAHR_OFF); |
643 | ||
19a340b1 TP |
644 | /* |
645 | * This is a temporary solution, until we activate the | |
646 | * SMMU. Set the attributes for reading & writing data buffers | |
647 | * & descriptors to: | |
648 | * | |
649 | * - OuterShareable - Snoops will be performed on CPU caches | |
650 | * - Enable cacheable - Bufferable, Modifiable, Other Allocate | |
651 | * and Allocate | |
652 | */ | |
653 | reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ARATTR_OFF); | |
654 | reg &= ~MV_XOR_V2_DMA_DESQ_ATTR_CACHE_MASK; | |
655 | reg |= MV_XOR_V2_DMA_DESQ_ATTR_OUTER_SHAREABLE | | |
656 | MV_XOR_V2_DMA_DESQ_ATTR_CACHEABLE; | |
657 | writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ARATTR_OFF); | |
658 | ||
659 | reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_AWATTR_OFF); | |
660 | reg &= ~MV_XOR_V2_DMA_DESQ_ATTR_CACHE_MASK; | |
661 | reg |= MV_XOR_V2_DMA_DESQ_ATTR_OUTER_SHAREABLE | | |
662 | MV_XOR_V2_DMA_DESQ_ATTR_CACHEABLE; | |
663 | writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_AWATTR_OFF); | |
664 | ||
665 | /* BW CTRL - set values to optimize the XOR performance: | |
666 | * | |
667 | * - Set WrBurstLen & RdBurstLen - the unit will issue | |
668 | * maximum of 256B write/read transactions. | |
669 | * - Limit the number of outstanding write & read data | |
670 | * (OBB/IBB) requests to the maximal value. | |
671 | */ | |
672 | reg = ((MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_VAL << | |
673 | MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_SHIFT) | | |
674 | (MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_VAL << | |
675 | MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_SHIFT) | | |
676 | (MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_VAL << | |
677 | MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_SHIFT) | | |
678 | (MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_VAL << | |
679 | MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_SHIFT)); | |
680 | writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_BW_CTRL); | |
681 | ||
682 | /* Disable the AXI timer feature */ | |
683 | reg = readl(xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE); | |
684 | reg |= MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL; | |
685 | writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE); | |
686 | ||
ab2c5f0a HH |
687 | /* enable the DMA engine */ |
688 | writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF); | |
689 | ||
19a340b1 TP |
690 | return 0; |
691 | } | |
692 | ||
ecfa7714 HH |
693 | static int mv_xor_v2_suspend(struct platform_device *dev, pm_message_t state) |
694 | { | |
695 | struct mv_xor_v2_device *xor_dev = platform_get_drvdata(dev); | |
696 | ||
697 | /* Set this bit to disable to stop the XOR unit. */ | |
698 | writel(0x1, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF); | |
699 | ||
700 | return 0; | |
701 | } | |
702 | ||
703 | static int mv_xor_v2_resume(struct platform_device *dev) | |
704 | { | |
705 | struct mv_xor_v2_device *xor_dev = platform_get_drvdata(dev); | |
706 | ||
707 | mv_xor_v2_set_desc_size(xor_dev); | |
708 | mv_xor_v2_enable_imsg_thrd(xor_dev); | |
709 | mv_xor_v2_descq_init(xor_dev); | |
710 | ||
711 | return 0; | |
712 | } | |
713 | ||
19a340b1 TP |
714 | static int mv_xor_v2_probe(struct platform_device *pdev) |
715 | { | |
716 | struct mv_xor_v2_device *xor_dev; | |
19a340b1 TP |
717 | int i, ret = 0; |
718 | struct dma_device *dma_dev; | |
719 | struct mv_xor_v2_sw_desc *sw_desc; | |
19a340b1 TP |
720 | |
721 | BUILD_BUG_ON(sizeof(struct mv_xor_v2_descriptor) != | |
722 | MV_XOR_V2_EXT_DESC_SIZE); | |
723 | ||
724 | xor_dev = devm_kzalloc(&pdev->dev, sizeof(*xor_dev), GFP_KERNEL); | |
725 | if (!xor_dev) | |
726 | return -ENOMEM; | |
727 | ||
4b23603a | 728 | xor_dev->dma_base = devm_platform_ioremap_resource(pdev, 0); |
19a340b1 TP |
729 | if (IS_ERR(xor_dev->dma_base)) |
730 | return PTR_ERR(xor_dev->dma_base); | |
731 | ||
4b23603a | 732 | xor_dev->glob_base = devm_platform_ioremap_resource(pdev, 1); |
19a340b1 TP |
733 | if (IS_ERR(xor_dev->glob_base)) |
734 | return PTR_ERR(xor_dev->glob_base); | |
735 | ||
736 | platform_set_drvdata(pdev, xor_dev); | |
b2d3c270 TP |
737 | |
738 | ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); | |
739 | if (ret) | |
740 | return ret; | |
19a340b1 | 741 | |
376c2c9b CJ |
742 | xor_dev->reg_clk = devm_clk_get_optional_enabled(&pdev->dev, "reg"); |
743 | if (IS_ERR(xor_dev->reg_clk)) | |
744 | return PTR_ERR(xor_dev->reg_clk); | |
3cd2c313 | 745 | |
376c2c9b CJ |
746 | xor_dev->clk = devm_clk_get_enabled(&pdev->dev, NULL); |
747 | if (IS_ERR(xor_dev->clk)) | |
748 | return PTR_ERR(xor_dev->clk); | |
19a340b1 | 749 | |
14fd06c7 TG |
750 | ret = platform_device_msi_init_and_alloc_irqs(&pdev->dev, 1, |
751 | mv_xor_v2_set_msi_msg); | |
19a340b1 | 752 | if (ret) |
376c2c9b | 753 | return ret; |
19a340b1 | 754 | |
f6632bb2 | 755 | xor_dev->irq = msi_get_virq(&pdev->dev, 0); |
19a340b1 | 756 | |
f6632bb2 | 757 | ret = devm_request_irq(&pdev->dev, xor_dev->irq, |
19a340b1 TP |
758 | mv_xor_v2_interrupt_handler, 0, |
759 | dev_name(&pdev->dev), xor_dev); | |
760 | if (ret) | |
761 | goto free_msi_irqs; | |
762 | ||
34ca9a53 | 763 | tasklet_setup(&xor_dev->irq_tasklet, mv_xor_v2_tasklet); |
19a340b1 TP |
764 | |
765 | xor_dev->desc_size = mv_xor_v2_set_desc_size(xor_dev); | |
766 | ||
767 | dma_cookie_init(&xor_dev->dmachan); | |
768 | ||
769 | /* | |
770 | * allocate coherent memory for hardware descriptors | |
771 | * note: writecombine gives slightly better performance, but | |
772 | * requires that we explicitly flush the writes | |
773 | */ | |
774 | xor_dev->hw_desq_virt = | |
775 | dma_alloc_coherent(&pdev->dev, | |
776 | xor_dev->desc_size * MV_XOR_V2_DESC_NUM, | |
777 | &xor_dev->hw_desq, GFP_KERNEL); | |
778 | if (!xor_dev->hw_desq_virt) { | |
779 | ret = -ENOMEM; | |
780 | goto free_msi_irqs; | |
781 | } | |
782 | ||
783 | /* alloc memory for the SW descriptors */ | |
a86854d0 KC |
784 | xor_dev->sw_desq = devm_kcalloc(&pdev->dev, |
785 | MV_XOR_V2_DESC_NUM, sizeof(*sw_desc), | |
786 | GFP_KERNEL); | |
19a340b1 TP |
787 | if (!xor_dev->sw_desq) { |
788 | ret = -ENOMEM; | |
789 | goto free_hw_desq; | |
790 | } | |
791 | ||
792 | spin_lock_init(&xor_dev->lock); | |
793 | ||
794 | /* init the free SW descriptors list */ | |
795 | INIT_LIST_HEAD(&xor_dev->free_sw_desc); | |
796 | ||
797 | /* add all SW descriptors to the free list */ | |
798 | for (i = 0; i < MV_XOR_V2_DESC_NUM; i++) { | |
bc473da1 TP |
799 | struct mv_xor_v2_sw_desc *sw_desc = |
800 | xor_dev->sw_desq + i; | |
801 | sw_desc->idx = i; | |
802 | dma_async_tx_descriptor_init(&sw_desc->async_tx, | |
803 | &xor_dev->dmachan); | |
804 | sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit; | |
805 | async_tx_ack(&sw_desc->async_tx); | |
806 | ||
807 | list_add(&sw_desc->free_list, | |
19a340b1 TP |
808 | &xor_dev->free_sw_desc); |
809 | } | |
810 | ||
811 | dma_dev = &xor_dev->dmadev; | |
812 | ||
813 | /* set DMA capabilities */ | |
814 | dma_cap_zero(dma_dev->cap_mask); | |
815 | dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); | |
816 | dma_cap_set(DMA_XOR, dma_dev->cap_mask); | |
817 | dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask); | |
818 | ||
819 | /* init dma link list */ | |
820 | INIT_LIST_HEAD(&dma_dev->channels); | |
821 | ||
822 | /* set base routines */ | |
823 | dma_dev->device_tx_status = dma_cookie_status; | |
824 | dma_dev->device_issue_pending = mv_xor_v2_issue_pending; | |
825 | dma_dev->dev = &pdev->dev; | |
826 | ||
827 | dma_dev->device_prep_dma_memcpy = mv_xor_v2_prep_dma_memcpy; | |
828 | dma_dev->device_prep_dma_interrupt = mv_xor_v2_prep_dma_interrupt; | |
829 | dma_dev->max_xor = 8; | |
830 | dma_dev->device_prep_dma_xor = mv_xor_v2_prep_dma_xor; | |
831 | ||
832 | xor_dev->dmachan.device = dma_dev; | |
833 | ||
834 | list_add_tail(&xor_dev->dmachan.device_node, | |
835 | &dma_dev->channels); | |
836 | ||
d793327f HH |
837 | mv_xor_v2_enable_imsg_thrd(xor_dev); |
838 | ||
19a340b1 TP |
839 | mv_xor_v2_descq_init(xor_dev); |
840 | ||
841 | ret = dma_async_device_register(dma_dev); | |
842 | if (ret) | |
843 | goto free_hw_desq; | |
844 | ||
845 | dev_notice(&pdev->dev, "Marvell Version 2 XOR driver\n"); | |
846 | ||
847 | return 0; | |
848 | ||
849 | free_hw_desq: | |
850 | dma_free_coherent(&pdev->dev, | |
851 | xor_dev->desc_size * MV_XOR_V2_DESC_NUM, | |
852 | xor_dev->hw_desq_virt, xor_dev->hw_desq); | |
853 | free_msi_irqs: | |
14fd06c7 | 854 | platform_device_msi_free_irqs_all(&pdev->dev); |
19a340b1 TP |
855 | return ret; |
856 | } | |
857 | ||
733dbb8d | 858 | static void mv_xor_v2_remove(struct platform_device *pdev) |
19a340b1 TP |
859 | { |
860 | struct mv_xor_v2_device *xor_dev = platform_get_drvdata(pdev); | |
861 | ||
862 | dma_async_device_unregister(&xor_dev->dmadev); | |
863 | ||
864 | dma_free_coherent(&pdev->dev, | |
865 | xor_dev->desc_size * MV_XOR_V2_DESC_NUM, | |
866 | xor_dev->hw_desq_virt, xor_dev->hw_desq); | |
867 | ||
f6632bb2 | 868 | devm_free_irq(&pdev->dev, xor_dev->irq, xor_dev); |
48c008b5 | 869 | |
14fd06c7 | 870 | platform_device_msi_free_irqs_all(&pdev->dev); |
19a340b1 | 871 | |
8bbafed8 | 872 | tasklet_kill(&xor_dev->irq_tasklet); |
19a340b1 TP |
873 | } |
874 | ||
875 | #ifdef CONFIG_OF | |
876 | static const struct of_device_id mv_xor_v2_dt_ids[] = { | |
877 | { .compatible = "marvell,xor-v2", }, | |
878 | {}, | |
879 | }; | |
880 | MODULE_DEVICE_TABLE(of, mv_xor_v2_dt_ids); | |
881 | #endif | |
882 | ||
883 | static struct platform_driver mv_xor_v2_driver = { | |
884 | .probe = mv_xor_v2_probe, | |
ecfa7714 HH |
885 | .suspend = mv_xor_v2_suspend, |
886 | .resume = mv_xor_v2_resume, | |
733dbb8d | 887 | .remove_new = mv_xor_v2_remove, |
19a340b1 TP |
888 | .driver = { |
889 | .name = "mv_xor_v2", | |
890 | .of_match_table = of_match_ptr(mv_xor_v2_dt_ids), | |
891 | }, | |
892 | }; | |
893 | ||
894 | module_platform_driver(mv_xor_v2_driver); | |
895 | ||
896 | MODULE_DESCRIPTION("DMA engine driver for Marvell's Version 2 of XOR engine"); |