libnvdimm/altmap: Track namespace boundaries in altmap
[linux-2.6-block.git] / drivers / dma / mv_xor_v2.c
CommitLineData
fd9871f7 1// SPDX-License-Identifier: GPL-2.0-or-later
19a340b1
TP
2/*
3 * Copyright (C) 2015-2016 Marvell International Ltd.
4
19a340b1
TP
5 */
6
7#include <linux/clk.h>
8#include <linux/dma-mapping.h>
9#include <linux/interrupt.h>
10#include <linux/io.h>
11#include <linux/module.h>
12#include <linux/msi.h>
13#include <linux/of.h>
14#include <linux/of_irq.h>
15#include <linux/platform_device.h>
16#include <linux/spinlock.h>
17
18#include "dmaengine.h"
19
20/* DMA Engine Registers */
21#define MV_XOR_V2_DMA_DESQ_BALR_OFF 0x000
22#define MV_XOR_V2_DMA_DESQ_BAHR_OFF 0x004
23#define MV_XOR_V2_DMA_DESQ_SIZE_OFF 0x008
24#define MV_XOR_V2_DMA_DESQ_DONE_OFF 0x00C
25#define MV_XOR_V2_DMA_DESQ_DONE_PENDING_MASK 0x7FFF
26#define MV_XOR_V2_DMA_DESQ_DONE_PENDING_SHIFT 0
27#define MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_MASK 0x1FFF
28#define MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_SHIFT 16
29#define MV_XOR_V2_DMA_DESQ_ARATTR_OFF 0x010
30#define MV_XOR_V2_DMA_DESQ_ATTR_CACHE_MASK 0x3F3F
31#define MV_XOR_V2_DMA_DESQ_ATTR_OUTER_SHAREABLE 0x202
32#define MV_XOR_V2_DMA_DESQ_ATTR_CACHEABLE 0x3C3C
33#define MV_XOR_V2_DMA_IMSG_CDAT_OFF 0x014
34#define MV_XOR_V2_DMA_IMSG_THRD_OFF 0x018
35#define MV_XOR_V2_DMA_IMSG_THRD_MASK 0x7FFF
36#define MV_XOR_V2_DMA_IMSG_THRD_SHIFT 0x0
d793327f 37#define MV_XOR_V2_DMA_IMSG_TIMER_EN BIT(18)
19a340b1
TP
38#define MV_XOR_V2_DMA_DESQ_AWATTR_OFF 0x01C
39 /* Same flags as MV_XOR_V2_DMA_DESQ_ARATTR_OFF */
40#define MV_XOR_V2_DMA_DESQ_ALLOC_OFF 0x04C
41#define MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_MASK 0xFFFF
42#define MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_SHIFT 16
43#define MV_XOR_V2_DMA_IMSG_BALR_OFF 0x050
44#define MV_XOR_V2_DMA_IMSG_BAHR_OFF 0x054
45#define MV_XOR_V2_DMA_DESQ_CTRL_OFF 0x100
46#define MV_XOR_V2_DMA_DESQ_CTRL_32B 1
47#define MV_XOR_V2_DMA_DESQ_CTRL_128B 7
48#define MV_XOR_V2_DMA_DESQ_STOP_OFF 0x800
49#define MV_XOR_V2_DMA_DESQ_DEALLOC_OFF 0x804
50#define MV_XOR_V2_DMA_DESQ_ADD_OFF 0x808
d793327f
HH
51#define MV_XOR_V2_DMA_IMSG_TMOT 0x810
52#define MV_XOR_V2_DMA_IMSG_TIMER_THRD_MASK 0x1FFF
53#define MV_XOR_V2_DMA_IMSG_TIMER_THRD_SHIFT 0
19a340b1
TP
54
55/* XOR Global registers */
56#define MV_XOR_V2_GLOB_BW_CTRL 0x4
57#define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_SHIFT 0
58#define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_VAL 64
59#define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_SHIFT 8
60#define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_VAL 8
61#define MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_SHIFT 12
62#define MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_VAL 4
63#define MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_SHIFT 16
64#define MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_VAL 4
65#define MV_XOR_V2_GLOB_PAUSE 0x014
66#define MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL 0x8
67#define MV_XOR_V2_GLOB_SYS_INT_CAUSE 0x200
68#define MV_XOR_V2_GLOB_SYS_INT_MASK 0x204
69#define MV_XOR_V2_GLOB_MEM_INT_CAUSE 0x220
70#define MV_XOR_V2_GLOB_MEM_INT_MASK 0x224
71
72#define MV_XOR_V2_MIN_DESC_SIZE 32
73#define MV_XOR_V2_EXT_DESC_SIZE 128
74
75#define MV_XOR_V2_DESC_RESERVED_SIZE 12
76#define MV_XOR_V2_DESC_BUFF_D_ADDR_SIZE 12
77
78#define MV_XOR_V2_CMD_LINE_NUM_MAX_D_BUF 8
79
80/*
81 * Descriptors queue size. With 32 bytes descriptors, up to 2^14
82 * descriptors are allowed, with 128 bytes descriptors, up to 2^12
83 * descriptors are allowed. This driver uses 128 bytes descriptors,
84 * but experimentation has shown that a set of 1024 descriptors is
85 * sufficient to reach a good level of performance.
86 */
87#define MV_XOR_V2_DESC_NUM 1024
88
d793327f
HH
89/*
90 * Threshold values for descriptors and timeout, determined by
91 * experimentation as giving a good level of performance.
92 */
93#define MV_XOR_V2_DONE_IMSG_THRD 0x14
94#define MV_XOR_V2_TIMER_THRD 0xB0
95
19a340b1
TP
96/**
97 * struct mv_xor_v2_descriptor - DMA HW descriptor
98 * @desc_id: used by S/W and is not affected by H/W.
99 * @flags: error and status flags
100 * @crc32_result: CRC32 calculation result
101 * @desc_ctrl: operation mode and control flags
102 * @buff_size: amount of bytes to be processed
103 * @fill_pattern_src_addr: Fill-Pattern or Source-Address and
104 * AW-Attributes
105 * @data_buff_addr: Source (and might be RAID6 destination)
106 * addresses of data buffers in RAID5 and RAID6
107 * @reserved: reserved
108 */
109struct mv_xor_v2_descriptor {
110 u16 desc_id;
111 u16 flags;
112 u32 crc32_result;
113 u32 desc_ctrl;
114
115 /* Definitions for desc_ctrl */
116#define DESC_NUM_ACTIVE_D_BUF_SHIFT 22
117#define DESC_OP_MODE_SHIFT 28
118#define DESC_OP_MODE_NOP 0 /* Idle operation */
119#define DESC_OP_MODE_MEMCPY 1 /* Pure-DMA operation */
120#define DESC_OP_MODE_MEMSET 2 /* Mem-Fill operation */
121#define DESC_OP_MODE_MEMINIT 3 /* Mem-Init operation */
122#define DESC_OP_MODE_MEM_COMPARE 4 /* Mem-Compare operation */
123#define DESC_OP_MODE_CRC32 5 /* CRC32 calculation */
124#define DESC_OP_MODE_XOR 6 /* RAID5 (XOR) operation */
125#define DESC_OP_MODE_RAID6 7 /* RAID6 P&Q-generation */
126#define DESC_OP_MODE_RAID6_REC 8 /* RAID6 Recovery */
127#define DESC_Q_BUFFER_ENABLE BIT(16)
128#define DESC_P_BUFFER_ENABLE BIT(17)
129#define DESC_IOD BIT(27)
130
131 u32 buff_size;
132 u32 fill_pattern_src_addr[4];
133 u32 data_buff_addr[MV_XOR_V2_DESC_BUFF_D_ADDR_SIZE];
134 u32 reserved[MV_XOR_V2_DESC_RESERVED_SIZE];
135};
136
137/**
138 * struct mv_xor_v2_device - implements a xor device
139 * @lock: lock for the engine
140 * @dma_base: memory mapped DMA register base
141 * @glob_base: memory mapped global register base
142 * @irq_tasklet:
143 * @free_sw_desc: linked list of free SW descriptors
144 * @dmadev: dma device
145 * @dmachan: dma channel
146 * @hw_desq: HW descriptors queue
147 * @hw_desq_virt: virtual address of DESCQ
148 * @sw_desq: SW descriptors queue
149 * @desc_size: HW descriptor size
150 * @npendings: number of pending descriptors (for which tx_submit has
151 * been called, but not yet issue_pending)
152 */
153struct mv_xor_v2_device {
154 spinlock_t lock;
155 void __iomem *dma_base;
156 void __iomem *glob_base;
157 struct clk *clk;
3cd2c313 158 struct clk *reg_clk;
19a340b1
TP
159 struct tasklet_struct irq_tasklet;
160 struct list_head free_sw_desc;
161 struct dma_device dmadev;
162 struct dma_chan dmachan;
163 dma_addr_t hw_desq;
164 struct mv_xor_v2_descriptor *hw_desq_virt;
165 struct mv_xor_v2_sw_desc *sw_desq;
166 int desc_size;
167 unsigned int npendings;
44d5887a 168 unsigned int hw_queue_idx;
48c008b5 169 struct msi_desc *msi_desc;
19a340b1
TP
170};
171
172/**
173 * struct mv_xor_v2_sw_desc - implements a xor SW descriptor
174 * @idx: descriptor index
175 * @async_tx: support for the async_tx api
176 * @hw_desc: assosiated HW descriptor
177 * @free_list: node of the free SW descriprots list
178*/
179struct mv_xor_v2_sw_desc {
180 int idx;
181 struct dma_async_tx_descriptor async_tx;
182 struct mv_xor_v2_descriptor hw_desc;
183 struct list_head free_list;
184};
185
186/*
187 * Fill the data buffers to a HW descriptor
188 */
189static void mv_xor_v2_set_data_buffers(struct mv_xor_v2_device *xor_dev,
190 struct mv_xor_v2_descriptor *desc,
191 dma_addr_t src, int index)
192{
193 int arr_index = ((index >> 1) * 3);
194
195 /*
196 * Fill the buffer's addresses to the descriptor.
197 *
198 * The format of the buffers address for 2 sequential buffers
199 * X and X + 1:
200 *
201 * First word: Buffer-DX-Address-Low[31:0]
202 * Second word: Buffer-DX+1-Address-Low[31:0]
203 * Third word: DX+1-Buffer-Address-High[47:32] [31:16]
204 * DX-Buffer-Address-High[47:32] [15:0]
205 */
206 if ((index & 0x1) == 0) {
207 desc->data_buff_addr[arr_index] = lower_32_bits(src);
208
209 desc->data_buff_addr[arr_index + 2] &= ~0xFFFF;
210 desc->data_buff_addr[arr_index + 2] |=
211 upper_32_bits(src) & 0xFFFF;
212 } else {
213 desc->data_buff_addr[arr_index + 1] =
214 lower_32_bits(src);
215
216 desc->data_buff_addr[arr_index + 2] &= ~0xFFFF0000;
217 desc->data_buff_addr[arr_index + 2] |=
218 (upper_32_bits(src) & 0xFFFF) << 16;
219 }
220}
221
19a340b1
TP
222/*
223 * notify the engine of new descriptors, and update the available index.
224 */
225static void mv_xor_v2_add_desc_to_desq(struct mv_xor_v2_device *xor_dev,
226 int num_of_desc)
227{
228 /* write the number of new descriptors in the DESQ. */
229 writel(num_of_desc, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ADD_OFF);
230}
231
232/*
233 * free HW descriptors
234 */
235static void mv_xor_v2_free_desc_from_desq(struct mv_xor_v2_device *xor_dev,
236 int num_of_desc)
237{
238 /* write the number of new descriptors in the DESQ. */
239 writel(num_of_desc, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_DEALLOC_OFF);
240}
241
242/*
243 * Set descriptor size
244 * Return the HW descriptor size in bytes
245 */
246static int mv_xor_v2_set_desc_size(struct mv_xor_v2_device *xor_dev)
247{
248 writel(MV_XOR_V2_DMA_DESQ_CTRL_128B,
249 xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_CTRL_OFF);
250
251 return MV_XOR_V2_EXT_DESC_SIZE;
252}
253
d793327f
HH
254/*
255 * Set the IMSG threshold
256 */
257static inline
258void mv_xor_v2_enable_imsg_thrd(struct mv_xor_v2_device *xor_dev)
259{
260 u32 reg;
261
262 /* Configure threshold of number of descriptors, and enable timer */
263 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
264 reg &= (~MV_XOR_V2_DMA_IMSG_THRD_MASK << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
265 reg |= (MV_XOR_V2_DONE_IMSG_THRD << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
266 reg |= MV_XOR_V2_DMA_IMSG_TIMER_EN;
267 writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
268
269 /* Configure Timer Threshold */
270 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_TMOT);
271 reg &= (~MV_XOR_V2_DMA_IMSG_TIMER_THRD_MASK <<
272 MV_XOR_V2_DMA_IMSG_TIMER_THRD_SHIFT);
273 reg |= (MV_XOR_V2_TIMER_THRD << MV_XOR_V2_DMA_IMSG_TIMER_THRD_SHIFT);
274 writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_TMOT);
275}
276
19a340b1
TP
277static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data)
278{
279 struct mv_xor_v2_device *xor_dev = data;
280 unsigned int ndescs;
281 u32 reg;
282
283 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_DONE_OFF);
284
285 ndescs = ((reg >> MV_XOR_V2_DMA_DESQ_DONE_PENDING_SHIFT) &
286 MV_XOR_V2_DMA_DESQ_DONE_PENDING_MASK);
287
288 /* No descriptors to process */
289 if (!ndescs)
290 return IRQ_NONE;
291
19a340b1
TP
292 /* schedule a tasklet to handle descriptors callbacks */
293 tasklet_schedule(&xor_dev->irq_tasklet);
294
295 return IRQ_HANDLED;
296}
297
298/*
299 * submit a descriptor to the DMA engine
300 */
301static dma_cookie_t
302mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx)
303{
19a340b1
TP
304 void *dest_hw_desc;
305 dma_cookie_t cookie;
306 struct mv_xor_v2_sw_desc *sw_desc =
307 container_of(tx, struct mv_xor_v2_sw_desc, async_tx);
308 struct mv_xor_v2_device *xor_dev =
309 container_of(tx->chan, struct mv_xor_v2_device, dmachan);
310
311 dev_dbg(xor_dev->dmadev.dev,
312 "%s sw_desc %p: async_tx %p\n",
313 __func__, sw_desc, &sw_desc->async_tx);
314
315 /* assign coookie */
316 spin_lock_bh(&xor_dev->lock);
317 cookie = dma_cookie_assign(tx);
318
19a340b1 319 /* copy the HW descriptor from the SW descriptor to the DESQ */
44d5887a 320 dest_hw_desc = xor_dev->hw_desq_virt + xor_dev->hw_queue_idx;
19a340b1
TP
321
322 memcpy(dest_hw_desc, &sw_desc->hw_desc, xor_dev->desc_size);
323
324 xor_dev->npendings++;
44d5887a
TP
325 xor_dev->hw_queue_idx++;
326 if (xor_dev->hw_queue_idx >= MV_XOR_V2_DESC_NUM)
327 xor_dev->hw_queue_idx = 0;
19a340b1
TP
328
329 spin_unlock_bh(&xor_dev->lock);
330
331 return cookie;
332}
333
334/*
335 * Prepare a SW descriptor
336 */
337static struct mv_xor_v2_sw_desc *
338mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev)
339{
340 struct mv_xor_v2_sw_desc *sw_desc;
bc473da1 341 bool found = false;
19a340b1
TP
342
343 /* Lock the channel */
344 spin_lock_bh(&xor_dev->lock);
345
346 if (list_empty(&xor_dev->free_sw_desc)) {
347 spin_unlock_bh(&xor_dev->lock);
348 /* schedule tasklet to free some descriptors */
349 tasklet_schedule(&xor_dev->irq_tasklet);
350 return NULL;
351 }
352
bc473da1
TP
353 list_for_each_entry(sw_desc, &xor_dev->free_sw_desc, free_list) {
354 if (async_tx_test_ack(&sw_desc->async_tx)) {
355 found = true;
356 break;
357 }
358 }
359
360 if (!found) {
361 spin_unlock_bh(&xor_dev->lock);
362 return NULL;
363 }
364
19a340b1
TP
365 list_del(&sw_desc->free_list);
366
367 /* Release the channel */
368 spin_unlock_bh(&xor_dev->lock);
369
19a340b1
TP
370 return sw_desc;
371}
372
373/*
374 * Prepare a HW descriptor for a memcpy operation
375 */
376static struct dma_async_tx_descriptor *
377mv_xor_v2_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
378 dma_addr_t src, size_t len, unsigned long flags)
379{
380 struct mv_xor_v2_sw_desc *sw_desc;
381 struct mv_xor_v2_descriptor *hw_descriptor;
382 struct mv_xor_v2_device *xor_dev;
383
384 xor_dev = container_of(chan, struct mv_xor_v2_device, dmachan);
385
386 dev_dbg(xor_dev->dmadev.dev,
387 "%s len: %zu src %pad dest %pad flags: %ld\n",
388 __func__, len, &src, &dest, flags);
389
390 sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
eb8df543
TP
391 if (!sw_desc)
392 return NULL;
19a340b1
TP
393
394 sw_desc->async_tx.flags = flags;
395
396 /* set the HW descriptor */
397 hw_descriptor = &sw_desc->hw_desc;
398
399 /* save the SW descriptor ID to restore when operation is done */
400 hw_descriptor->desc_id = sw_desc->idx;
401
402 /* Set the MEMCPY control word */
403 hw_descriptor->desc_ctrl =
404 DESC_OP_MODE_MEMCPY << DESC_OP_MODE_SHIFT;
405
406 if (flags & DMA_PREP_INTERRUPT)
407 hw_descriptor->desc_ctrl |= DESC_IOD;
408
409 /* Set source address */
410 hw_descriptor->fill_pattern_src_addr[0] = lower_32_bits(src);
411 hw_descriptor->fill_pattern_src_addr[1] =
412 upper_32_bits(src) & 0xFFFF;
413
414 /* Set Destination address */
415 hw_descriptor->fill_pattern_src_addr[2] = lower_32_bits(dest);
416 hw_descriptor->fill_pattern_src_addr[3] =
417 upper_32_bits(dest) & 0xFFFF;
418
419 /* Set buffers size */
420 hw_descriptor->buff_size = len;
421
422 /* return the async tx descriptor */
423 return &sw_desc->async_tx;
424}
425
426/*
427 * Prepare a HW descriptor for a XOR operation
428 */
429static struct dma_async_tx_descriptor *
430mv_xor_v2_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
431 unsigned int src_cnt, size_t len, unsigned long flags)
432{
433 struct mv_xor_v2_sw_desc *sw_desc;
434 struct mv_xor_v2_descriptor *hw_descriptor;
435 struct mv_xor_v2_device *xor_dev =
436 container_of(chan, struct mv_xor_v2_device, dmachan);
437 int i;
438
439 if (src_cnt > MV_XOR_V2_CMD_LINE_NUM_MAX_D_BUF || src_cnt < 1)
440 return NULL;
441
442 dev_dbg(xor_dev->dmadev.dev,
443 "%s src_cnt: %d len: %zu dest %pad flags: %ld\n",
444 __func__, src_cnt, len, &dest, flags);
445
446 sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
eb8df543
TP
447 if (!sw_desc)
448 return NULL;
19a340b1
TP
449
450 sw_desc->async_tx.flags = flags;
451
452 /* set the HW descriptor */
453 hw_descriptor = &sw_desc->hw_desc;
454
455 /* save the SW descriptor ID to restore when operation is done */
456 hw_descriptor->desc_id = sw_desc->idx;
457
458 /* Set the XOR control word */
459 hw_descriptor->desc_ctrl =
460 DESC_OP_MODE_XOR << DESC_OP_MODE_SHIFT;
461 hw_descriptor->desc_ctrl |= DESC_P_BUFFER_ENABLE;
462
463 if (flags & DMA_PREP_INTERRUPT)
464 hw_descriptor->desc_ctrl |= DESC_IOD;
465
466 /* Set the data buffers */
467 for (i = 0; i < src_cnt; i++)
468 mv_xor_v2_set_data_buffers(xor_dev, hw_descriptor, src[i], i);
469
470 hw_descriptor->desc_ctrl |=
471 src_cnt << DESC_NUM_ACTIVE_D_BUF_SHIFT;
472
473 /* Set Destination address */
474 hw_descriptor->fill_pattern_src_addr[2] = lower_32_bits(dest);
475 hw_descriptor->fill_pattern_src_addr[3] =
476 upper_32_bits(dest) & 0xFFFF;
477
478 /* Set buffers size */
479 hw_descriptor->buff_size = len;
480
481 /* return the async tx descriptor */
482 return &sw_desc->async_tx;
483}
484
485/*
486 * Prepare a HW descriptor for interrupt operation.
487 */
488static struct dma_async_tx_descriptor *
489mv_xor_v2_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
490{
491 struct mv_xor_v2_sw_desc *sw_desc;
492 struct mv_xor_v2_descriptor *hw_descriptor;
493 struct mv_xor_v2_device *xor_dev =
494 container_of(chan, struct mv_xor_v2_device, dmachan);
495
496 sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
eb8df543
TP
497 if (!sw_desc)
498 return NULL;
19a340b1
TP
499
500 /* set the HW descriptor */
501 hw_descriptor = &sw_desc->hw_desc;
502
503 /* save the SW descriptor ID to restore when operation is done */
504 hw_descriptor->desc_id = sw_desc->idx;
505
506 /* Set the INTERRUPT control word */
507 hw_descriptor->desc_ctrl =
508 DESC_OP_MODE_NOP << DESC_OP_MODE_SHIFT;
509 hw_descriptor->desc_ctrl |= DESC_IOD;
510
511 /* return the async tx descriptor */
512 return &sw_desc->async_tx;
513}
514
515/*
516 * push pending transactions to hardware
517 */
518static void mv_xor_v2_issue_pending(struct dma_chan *chan)
519{
520 struct mv_xor_v2_device *xor_dev =
521 container_of(chan, struct mv_xor_v2_device, dmachan);
522
523 spin_lock_bh(&xor_dev->lock);
524
525 /*
526 * update the engine with the number of descriptors to
527 * process
528 */
529 mv_xor_v2_add_desc_to_desq(xor_dev, xor_dev->npendings);
530 xor_dev->npendings = 0;
531
19a340b1
TP
532 spin_unlock_bh(&xor_dev->lock);
533}
534
535static inline
536int mv_xor_v2_get_pending_params(struct mv_xor_v2_device *xor_dev,
537 int *pending_ptr)
538{
539 u32 reg;
540
541 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_DONE_OFF);
542
543 /* get the next pending descriptor index */
544 *pending_ptr = ((reg >> MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_SHIFT) &
545 MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_MASK);
546
547 /* get the number of descriptors pending handle */
548 return ((reg >> MV_XOR_V2_DMA_DESQ_DONE_PENDING_SHIFT) &
549 MV_XOR_V2_DMA_DESQ_DONE_PENDING_MASK);
550}
551
552/*
553 * handle the descriptors after HW process
554 */
555static void mv_xor_v2_tasklet(unsigned long data)
556{
557 struct mv_xor_v2_device *xor_dev = (struct mv_xor_v2_device *) data;
558 int pending_ptr, num_of_pending, i;
19a340b1
TP
559 struct mv_xor_v2_sw_desc *next_pending_sw_desc = NULL;
560
561 dev_dbg(xor_dev->dmadev.dev, "%s %d\n", __func__, __LINE__);
562
563 /* get the pending descriptors parameters */
564 num_of_pending = mv_xor_v2_get_pending_params(xor_dev, &pending_ptr);
565
19a340b1
TP
566 /* loop over free descriptors */
567 for (i = 0; i < num_of_pending; i++) {
2aab4e18
TP
568 struct mv_xor_v2_descriptor *next_pending_hw_desc =
569 xor_dev->hw_desq_virt + pending_ptr;
19a340b1
TP
570
571 /* get the SW descriptor related to the HW descriptor */
572 next_pending_sw_desc =
573 &xor_dev->sw_desq[next_pending_hw_desc->desc_id];
574
575 /* call the callback */
576 if (next_pending_sw_desc->async_tx.cookie > 0) {
577 /*
578 * update the channel's completed cookie - no
579 * lock is required the IMSG threshold provide
580 * the locking
581 */
582 dma_cookie_complete(&next_pending_sw_desc->async_tx);
583
c3a272c7 584 dma_descriptor_unmap(&next_pending_sw_desc->async_tx);
5a80aff9
HH
585 dmaengine_desc_get_callback_invoke(
586 &next_pending_sw_desc->async_tx, NULL);
19a340b1
TP
587 }
588
589 dma_run_dependencies(&next_pending_sw_desc->async_tx);
590
591 /* Lock the channel */
592 spin_lock_bh(&xor_dev->lock);
593
594 /* add the SW descriptor to the free descriptors list */
595 list_add(&next_pending_sw_desc->free_list,
596 &xor_dev->free_sw_desc);
597
598 /* Release the channel */
599 spin_unlock_bh(&xor_dev->lock);
600
601 /* increment the next descriptor */
602 pending_ptr++;
2aab4e18
TP
603 if (pending_ptr >= MV_XOR_V2_DESC_NUM)
604 pending_ptr = 0;
19a340b1
TP
605 }
606
607 if (num_of_pending != 0) {
608 /* free the descriptores */
609 mv_xor_v2_free_desc_from_desq(xor_dev, num_of_pending);
610 }
19a340b1
TP
611}
612
613/*
614 * Set DMA Interrupt-message (IMSG) parameters
615 */
616static void mv_xor_v2_set_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
617{
618 struct mv_xor_v2_device *xor_dev = dev_get_drvdata(desc->dev);
619
620 writel(msg->address_lo,
621 xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_BALR_OFF);
622 writel(msg->address_hi & 0xFFFF,
623 xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_BAHR_OFF);
624 writel(msg->data,
625 xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_CDAT_OFF);
626}
627
628static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev)
629{
630 u32 reg;
631
632 /* write the DESQ size to the DMA engine */
633 writel(MV_XOR_V2_DESC_NUM,
634 xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_SIZE_OFF);
635
636 /* write the DESQ address to the DMA enngine*/
ac7b06ba 637 writel(lower_32_bits(xor_dev->hw_desq),
19a340b1 638 xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BALR_OFF);
ac7b06ba 639 writel(upper_32_bits(xor_dev->hw_desq),
19a340b1
TP
640 xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BAHR_OFF);
641
19a340b1
TP
642 /*
643 * This is a temporary solution, until we activate the
644 * SMMU. Set the attributes for reading & writing data buffers
645 * & descriptors to:
646 *
647 * - OuterShareable - Snoops will be performed on CPU caches
648 * - Enable cacheable - Bufferable, Modifiable, Other Allocate
649 * and Allocate
650 */
651 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ARATTR_OFF);
652 reg &= ~MV_XOR_V2_DMA_DESQ_ATTR_CACHE_MASK;
653 reg |= MV_XOR_V2_DMA_DESQ_ATTR_OUTER_SHAREABLE |
654 MV_XOR_V2_DMA_DESQ_ATTR_CACHEABLE;
655 writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ARATTR_OFF);
656
657 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_AWATTR_OFF);
658 reg &= ~MV_XOR_V2_DMA_DESQ_ATTR_CACHE_MASK;
659 reg |= MV_XOR_V2_DMA_DESQ_ATTR_OUTER_SHAREABLE |
660 MV_XOR_V2_DMA_DESQ_ATTR_CACHEABLE;
661 writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_AWATTR_OFF);
662
663 /* BW CTRL - set values to optimize the XOR performance:
664 *
665 * - Set WrBurstLen & RdBurstLen - the unit will issue
666 * maximum of 256B write/read transactions.
667 * - Limit the number of outstanding write & read data
668 * (OBB/IBB) requests to the maximal value.
669 */
670 reg = ((MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_VAL <<
671 MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_SHIFT) |
672 (MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_VAL <<
673 MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_SHIFT) |
674 (MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_VAL <<
675 MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_SHIFT) |
676 (MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_VAL <<
677 MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_SHIFT));
678 writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_BW_CTRL);
679
680 /* Disable the AXI timer feature */
681 reg = readl(xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE);
682 reg |= MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL;
683 writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE);
684
ab2c5f0a
HH
685 /* enable the DMA engine */
686 writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
687
19a340b1
TP
688 return 0;
689}
690
ecfa7714
HH
691static int mv_xor_v2_suspend(struct platform_device *dev, pm_message_t state)
692{
693 struct mv_xor_v2_device *xor_dev = platform_get_drvdata(dev);
694
695 /* Set this bit to disable to stop the XOR unit. */
696 writel(0x1, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
697
698 return 0;
699}
700
701static int mv_xor_v2_resume(struct platform_device *dev)
702{
703 struct mv_xor_v2_device *xor_dev = platform_get_drvdata(dev);
704
705 mv_xor_v2_set_desc_size(xor_dev);
706 mv_xor_v2_enable_imsg_thrd(xor_dev);
707 mv_xor_v2_descq_init(xor_dev);
708
709 return 0;
710}
711
19a340b1
TP
712static int mv_xor_v2_probe(struct platform_device *pdev)
713{
714 struct mv_xor_v2_device *xor_dev;
715 struct resource *res;
716 int i, ret = 0;
717 struct dma_device *dma_dev;
718 struct mv_xor_v2_sw_desc *sw_desc;
719 struct msi_desc *msi_desc;
720
721 BUILD_BUG_ON(sizeof(struct mv_xor_v2_descriptor) !=
722 MV_XOR_V2_EXT_DESC_SIZE);
723
724 xor_dev = devm_kzalloc(&pdev->dev, sizeof(*xor_dev), GFP_KERNEL);
725 if (!xor_dev)
726 return -ENOMEM;
727
728 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
729 xor_dev->dma_base = devm_ioremap_resource(&pdev->dev, res);
730 if (IS_ERR(xor_dev->dma_base))
731 return PTR_ERR(xor_dev->dma_base);
732
733 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
734 xor_dev->glob_base = devm_ioremap_resource(&pdev->dev, res);
735 if (IS_ERR(xor_dev->glob_base))
736 return PTR_ERR(xor_dev->glob_base);
737
738 platform_set_drvdata(pdev, xor_dev);
b2d3c270
TP
739
740 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
741 if (ret)
742 return ret;
19a340b1 743
3cd2c313
GC
744 xor_dev->reg_clk = devm_clk_get(&pdev->dev, "reg");
745 if (PTR_ERR(xor_dev->reg_clk) != -ENOENT) {
746 if (!IS_ERR(xor_dev->reg_clk)) {
747 ret = clk_prepare_enable(xor_dev->reg_clk);
748 if (ret)
749 return ret;
750 } else {
751 return PTR_ERR(xor_dev->reg_clk);
752 }
753 }
754
19a340b1 755 xor_dev->clk = devm_clk_get(&pdev->dev, NULL);
3cd2c313
GC
756 if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) {
757 ret = EPROBE_DEFER;
758 goto disable_reg_clk;
759 }
19a340b1
TP
760 if (!IS_ERR(xor_dev->clk)) {
761 ret = clk_prepare_enable(xor_dev->clk);
762 if (ret)
3cd2c313 763 goto disable_reg_clk;
19a340b1
TP
764 }
765
766 ret = platform_msi_domain_alloc_irqs(&pdev->dev, 1,
767 mv_xor_v2_set_msi_msg);
768 if (ret)
769 goto disable_clk;
770
771 msi_desc = first_msi_entry(&pdev->dev);
772 if (!msi_desc)
773 goto free_msi_irqs;
48c008b5 774 xor_dev->msi_desc = msi_desc;
19a340b1
TP
775
776 ret = devm_request_irq(&pdev->dev, msi_desc->irq,
777 mv_xor_v2_interrupt_handler, 0,
778 dev_name(&pdev->dev), xor_dev);
779 if (ret)
780 goto free_msi_irqs;
781
782 tasklet_init(&xor_dev->irq_tasklet, mv_xor_v2_tasklet,
783 (unsigned long) xor_dev);
784
785 xor_dev->desc_size = mv_xor_v2_set_desc_size(xor_dev);
786
787 dma_cookie_init(&xor_dev->dmachan);
788
789 /*
790 * allocate coherent memory for hardware descriptors
791 * note: writecombine gives slightly better performance, but
792 * requires that we explicitly flush the writes
793 */
794 xor_dev->hw_desq_virt =
795 dma_alloc_coherent(&pdev->dev,
796 xor_dev->desc_size * MV_XOR_V2_DESC_NUM,
797 &xor_dev->hw_desq, GFP_KERNEL);
798 if (!xor_dev->hw_desq_virt) {
799 ret = -ENOMEM;
800 goto free_msi_irqs;
801 }
802
803 /* alloc memory for the SW descriptors */
a86854d0
KC
804 xor_dev->sw_desq = devm_kcalloc(&pdev->dev,
805 MV_XOR_V2_DESC_NUM, sizeof(*sw_desc),
806 GFP_KERNEL);
19a340b1
TP
807 if (!xor_dev->sw_desq) {
808 ret = -ENOMEM;
809 goto free_hw_desq;
810 }
811
812 spin_lock_init(&xor_dev->lock);
813
814 /* init the free SW descriptors list */
815 INIT_LIST_HEAD(&xor_dev->free_sw_desc);
816
817 /* add all SW descriptors to the free list */
818 for (i = 0; i < MV_XOR_V2_DESC_NUM; i++) {
bc473da1
TP
819 struct mv_xor_v2_sw_desc *sw_desc =
820 xor_dev->sw_desq + i;
821 sw_desc->idx = i;
822 dma_async_tx_descriptor_init(&sw_desc->async_tx,
823 &xor_dev->dmachan);
824 sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit;
825 async_tx_ack(&sw_desc->async_tx);
826
827 list_add(&sw_desc->free_list,
19a340b1
TP
828 &xor_dev->free_sw_desc);
829 }
830
831 dma_dev = &xor_dev->dmadev;
832
833 /* set DMA capabilities */
834 dma_cap_zero(dma_dev->cap_mask);
835 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
836 dma_cap_set(DMA_XOR, dma_dev->cap_mask);
837 dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
838
839 /* init dma link list */
840 INIT_LIST_HEAD(&dma_dev->channels);
841
842 /* set base routines */
843 dma_dev->device_tx_status = dma_cookie_status;
844 dma_dev->device_issue_pending = mv_xor_v2_issue_pending;
845 dma_dev->dev = &pdev->dev;
846
847 dma_dev->device_prep_dma_memcpy = mv_xor_v2_prep_dma_memcpy;
848 dma_dev->device_prep_dma_interrupt = mv_xor_v2_prep_dma_interrupt;
849 dma_dev->max_xor = 8;
850 dma_dev->device_prep_dma_xor = mv_xor_v2_prep_dma_xor;
851
852 xor_dev->dmachan.device = dma_dev;
853
854 list_add_tail(&xor_dev->dmachan.device_node,
855 &dma_dev->channels);
856
d793327f
HH
857 mv_xor_v2_enable_imsg_thrd(xor_dev);
858
19a340b1
TP
859 mv_xor_v2_descq_init(xor_dev);
860
861 ret = dma_async_device_register(dma_dev);
862 if (ret)
863 goto free_hw_desq;
864
865 dev_notice(&pdev->dev, "Marvell Version 2 XOR driver\n");
866
867 return 0;
868
869free_hw_desq:
870 dma_free_coherent(&pdev->dev,
871 xor_dev->desc_size * MV_XOR_V2_DESC_NUM,
872 xor_dev->hw_desq_virt, xor_dev->hw_desq);
873free_msi_irqs:
874 platform_msi_domain_free_irqs(&pdev->dev);
875disable_clk:
3cd2c313
GC
876 clk_disable_unprepare(xor_dev->clk);
877disable_reg_clk:
878 clk_disable_unprepare(xor_dev->reg_clk);
19a340b1
TP
879 return ret;
880}
881
882static int mv_xor_v2_remove(struct platform_device *pdev)
883{
884 struct mv_xor_v2_device *xor_dev = platform_get_drvdata(pdev);
885
886 dma_async_device_unregister(&xor_dev->dmadev);
887
888 dma_free_coherent(&pdev->dev,
889 xor_dev->desc_size * MV_XOR_V2_DESC_NUM,
890 xor_dev->hw_desq_virt, xor_dev->hw_desq);
891
48c008b5
HH
892 devm_free_irq(&pdev->dev, xor_dev->msi_desc->irq, xor_dev);
893
19a340b1
TP
894 platform_msi_domain_free_irqs(&pdev->dev);
895
8bbafed8
HH
896 tasklet_kill(&xor_dev->irq_tasklet);
897
19a340b1
TP
898 clk_disable_unprepare(xor_dev->clk);
899
900 return 0;
901}
902
903#ifdef CONFIG_OF
904static const struct of_device_id mv_xor_v2_dt_ids[] = {
905 { .compatible = "marvell,xor-v2", },
906 {},
907};
908MODULE_DEVICE_TABLE(of, mv_xor_v2_dt_ids);
909#endif
910
911static struct platform_driver mv_xor_v2_driver = {
912 .probe = mv_xor_v2_probe,
ecfa7714
HH
913 .suspend = mv_xor_v2_suspend,
914 .resume = mv_xor_v2_resume,
19a340b1
TP
915 .remove = mv_xor_v2_remove,
916 .driver = {
917 .name = "mv_xor_v2",
918 .of_match_table = of_match_ptr(mv_xor_v2_dt_ids),
919 },
920};
921
922module_platform_driver(mv_xor_v2_driver);
923
924MODULE_DESCRIPTION("DMA engine driver for Marvell's Version 2 of XOR engine");
925MODULE_LICENSE("GPL");