iommu/amd: Fix unity mapping initialization race
[linux-2.6-block.git] / drivers / dma / mv_xor.c
CommitLineData
ff7b0479
SB
1/*
2 * offload engine driver for the Marvell XOR engine
3 * Copyright (C) 2007, 2008, Marvell International Ltd.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
ff7b0479
SB
13 */
14
15#include <linux/init.h>
5a0e3ad6 16#include <linux/slab.h>
ff7b0479
SB
17#include <linux/delay.h>
18#include <linux/dma-mapping.h>
19#include <linux/spinlock.h>
20#include <linux/interrupt.h>
6f166312 21#include <linux/of_device.h>
ff7b0479
SB
22#include <linux/platform_device.h>
23#include <linux/memory.h>
c510182b 24#include <linux/clk.h>
f7d12ef5
TP
25#include <linux/of.h>
26#include <linux/of_irq.h>
27#include <linux/irqdomain.h>
77757291 28#include <linux/cpumask.h>
c02cecb9 29#include <linux/platform_data/dma-mv_xor.h>
d2ebfb33
RKAL
30
31#include "dmaengine.h"
ff7b0479
SB
32#include "mv_xor.h"
33
dd130c65
GC
34enum mv_xor_type {
35 XOR_ORION,
36 XOR_ARMADA_38X,
ac5f0f3f 37 XOR_ARMADA_37XX,
dd130c65
GC
38};
39
6f166312
LA
40enum mv_xor_mode {
41 XOR_MODE_IN_REG,
42 XOR_MODE_IN_DESC,
43};
44
ff7b0479
SB
45static void mv_xor_issue_pending(struct dma_chan *chan);
46
47#define to_mv_xor_chan(chan) \
98817b99 48 container_of(chan, struct mv_xor_chan, dmachan)
ff7b0479
SB
49
50#define to_mv_xor_slot(tx) \
51 container_of(tx, struct mv_xor_desc_slot, async_tx)
52
c98c1781 53#define mv_chan_to_devp(chan) \
1ef48a26 54 ((chan)->dmadev.dev)
c98c1781 55
dfc97661 56static void mv_desc_init(struct mv_xor_desc_slot *desc,
ba87d137
LA
57 dma_addr_t addr, u32 byte_count,
58 enum dma_ctrl_flags flags)
ff7b0479
SB
59{
60 struct mv_xor_desc *hw_desc = desc->hw_desc;
61
0e7488ed 62 hw_desc->status = XOR_DESC_DMA_OWNED;
ff7b0479 63 hw_desc->phy_next_desc = 0;
ba87d137
LA
64 /* Enable end-of-descriptor interrupts only for DMA_PREP_INTERRUPT */
65 hw_desc->desc_command = (flags & DMA_PREP_INTERRUPT) ?
66 XOR_DESC_EOD_INT_EN : 0;
dfc97661 67 hw_desc->phy_dest_addr = addr;
ff7b0479
SB
68 hw_desc->byte_count = byte_count;
69}
70
6f166312
LA
71static void mv_desc_set_mode(struct mv_xor_desc_slot *desc)
72{
73 struct mv_xor_desc *hw_desc = desc->hw_desc;
74
75 switch (desc->type) {
76 case DMA_XOR:
77 case DMA_INTERRUPT:
78 hw_desc->desc_command |= XOR_DESC_OPERATION_XOR;
79 break;
80 case DMA_MEMCPY:
81 hw_desc->desc_command |= XOR_DESC_OPERATION_MEMCPY;
82 break;
83 default:
84 BUG();
85 return;
86 }
87}
88
ff7b0479
SB
89static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
90 u32 next_desc_addr)
91{
92 struct mv_xor_desc *hw_desc = desc->hw_desc;
93 BUG_ON(hw_desc->phy_next_desc);
94 hw_desc->phy_next_desc = next_desc_addr;
95}
96
ff7b0479
SB
97static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
98 int index, dma_addr_t addr)
99{
100 struct mv_xor_desc *hw_desc = desc->hw_desc;
e03bc654 101 hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr;
ff7b0479
SB
102 if (desc->type == DMA_XOR)
103 hw_desc->desc_command |= (1 << index);
104}
105
106static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
107{
5733c38a 108 return readl_relaxed(XOR_CURR_DESC(chan));
ff7b0479
SB
109}
110
111static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
112 u32 next_desc_addr)
113{
5733c38a 114 writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan));
ff7b0479
SB
115}
116
ff7b0479
SB
117static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
118{
5733c38a 119 u32 val = readl_relaxed(XOR_INTR_MASK(chan));
ff7b0479 120 val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
5733c38a 121 writel_relaxed(val, XOR_INTR_MASK(chan));
ff7b0479
SB
122}
123
124static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
125{
5733c38a 126 u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan));
ff7b0479
SB
127 intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
128 return intr_cause;
129}
130
0951e728 131static void mv_chan_clear_eoc_cause(struct mv_xor_chan *chan)
ff7b0479 132{
ba87d137
LA
133 u32 val;
134
135 val = XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | XOR_INT_STOPPED;
136 val = ~(val << (chan->idx * 16));
c98c1781 137 dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
5733c38a 138 writel_relaxed(val, XOR_INTR_CAUSE(chan));
ff7b0479
SB
139}
140
0951e728 141static void mv_chan_clear_err_status(struct mv_xor_chan *chan)
ff7b0479
SB
142{
143 u32 val = 0xFFFF0000 >> (chan->idx * 16);
5733c38a 144 writel_relaxed(val, XOR_INTR_CAUSE(chan));
ff7b0479
SB
145}
146
0951e728 147static void mv_chan_set_mode(struct mv_xor_chan *chan,
81aafb3e 148 u32 op_mode)
ff7b0479 149{
5733c38a 150 u32 config = readl_relaxed(XOR_CONFIG(chan));
ff7b0479 151
6f166312
LA
152 config &= ~0x7;
153 config |= op_mode;
154
e03bc654
TP
155#if defined(__BIG_ENDIAN)
156 config |= XOR_DESCRIPTOR_SWAP;
157#else
158 config &= ~XOR_DESCRIPTOR_SWAP;
159#endif
160
5733c38a 161 writel_relaxed(config, XOR_CONFIG(chan));
ff7b0479
SB
162}
163
164static void mv_chan_activate(struct mv_xor_chan *chan)
165{
c98c1781 166 dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
5a9a55bf
EG
167
168 /* writel ensures all descriptors are flushed before activation */
169 writel(BIT(0), XOR_ACTIVATION(chan));
ff7b0479
SB
170}
171
172static char mv_chan_is_busy(struct mv_xor_chan *chan)
173{
5733c38a 174 u32 state = readl_relaxed(XOR_ACTIVATION(chan));
ff7b0479
SB
175
176 state = (state >> 4) & 0x3;
177
178 return (state == 1) ? 1 : 0;
179}
180
ff7b0479 181/*
0951e728
MR
182 * mv_chan_start_new_chain - program the engine to operate on new
183 * chain headed by sw_desc
ff7b0479
SB
184 * Caller must hold &mv_chan->lock while calling this function
185 */
0951e728
MR
186static void mv_chan_start_new_chain(struct mv_xor_chan *mv_chan,
187 struct mv_xor_desc_slot *sw_desc)
ff7b0479 188{
c98c1781 189 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
ff7b0479 190 __func__, __LINE__, sw_desc);
ff7b0479 191
48a9db46
BZ
192 /* set the hardware chain */
193 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
194
dfc97661 195 mv_chan->pending++;
98817b99 196 mv_xor_issue_pending(&mv_chan->dmachan);
ff7b0479
SB
197}
198
199static dma_cookie_t
0951e728
MR
200mv_desc_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
201 struct mv_xor_chan *mv_chan,
202 dma_cookie_t cookie)
ff7b0479
SB
203{
204 BUG_ON(desc->async_tx.cookie < 0);
205
206 if (desc->async_tx.cookie > 0) {
207 cookie = desc->async_tx.cookie;
208
209 /* call the callback (must not sleep or submit new
210 * operations to this channel)
211 */
212 if (desc->async_tx.callback)
213 desc->async_tx.callback(
214 desc->async_tx.callback_param);
215
d38a8c62 216 dma_descriptor_unmap(&desc->async_tx);
ff7b0479
SB
217 }
218
219 /* run dependent operations */
07f2211e 220 dma_run_dependencies(&desc->async_tx);
ff7b0479
SB
221
222 return cookie;
223}
224
225static int
0951e728 226mv_chan_clean_completed_slots(struct mv_xor_chan *mv_chan)
ff7b0479
SB
227{
228 struct mv_xor_desc_slot *iter, *_iter;
229
c98c1781 230 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
ff7b0479 231 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
fbea28a2 232 node) {
ff7b0479 233
fbea28a2
LA
234 if (async_tx_test_ack(&iter->async_tx))
235 list_move_tail(&iter->node, &mv_chan->free_slots);
ff7b0479
SB
236 }
237 return 0;
238}
239
240static int
0951e728
MR
241mv_desc_clean_slot(struct mv_xor_desc_slot *desc,
242 struct mv_xor_chan *mv_chan)
ff7b0479 243{
c98c1781 244 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
ff7b0479 245 __func__, __LINE__, desc, desc->async_tx.flags);
fbea28a2 246
ff7b0479
SB
247 /* the client is allowed to attach dependent operations
248 * until 'ack' is set
249 */
fbea28a2 250 if (!async_tx_test_ack(&desc->async_tx))
ff7b0479 251 /* move this slot to the completed_slots */
fbea28a2
LA
252 list_move_tail(&desc->node, &mv_chan->completed_slots);
253 else
254 list_move_tail(&desc->node, &mv_chan->free_slots);
ff7b0479 255
ff7b0479
SB
256 return 0;
257}
258
fbeec99a 259/* This function must be called with the mv_xor_chan spinlock held */
0951e728 260static void mv_chan_slot_cleanup(struct mv_xor_chan *mv_chan)
ff7b0479
SB
261{
262 struct mv_xor_desc_slot *iter, *_iter;
263 dma_cookie_t cookie = 0;
264 int busy = mv_chan_is_busy(mv_chan);
265 u32 current_desc = mv_chan_get_current_desc(mv_chan);
9136291f
LA
266 int current_cleaned = 0;
267 struct mv_xor_desc *hw_desc;
ff7b0479 268
c98c1781
TP
269 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
270 dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
0951e728 271 mv_chan_clean_completed_slots(mv_chan);
ff7b0479
SB
272
273 /* free completed slots from the chain starting with
274 * the oldest descriptor
275 */
276
277 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
fbea28a2 278 node) {
ff7b0479 279
9136291f
LA
280 /* clean finished descriptors */
281 hw_desc = iter->hw_desc;
282 if (hw_desc->status & XOR_DESC_SUCCESS) {
0951e728
MR
283 cookie = mv_desc_run_tx_complete_actions(iter, mv_chan,
284 cookie);
ff7b0479 285
9136291f 286 /* done processing desc, clean slot */
0951e728 287 mv_desc_clean_slot(iter, mv_chan);
9136291f
LA
288
289 /* break if we did cleaned the current */
290 if (iter->async_tx.phys == current_desc) {
291 current_cleaned = 1;
292 break;
293 }
294 } else {
295 if (iter->async_tx.phys == current_desc) {
296 current_cleaned = 0;
ff7b0479 297 break;
9136291f 298 }
ff7b0479 299 }
ff7b0479
SB
300 }
301
302 if ((busy == 0) && !list_empty(&mv_chan->chain)) {
9136291f
LA
303 if (current_cleaned) {
304 /*
305 * current descriptor cleaned and removed, run
306 * from list head
307 */
308 iter = list_entry(mv_chan->chain.next,
309 struct mv_xor_desc_slot,
fbea28a2 310 node);
0951e728 311 mv_chan_start_new_chain(mv_chan, iter);
9136291f 312 } else {
fbea28a2 313 if (!list_is_last(&iter->node, &mv_chan->chain)) {
9136291f
LA
314 /*
315 * descriptors are still waiting after
316 * current, trigger them
317 */
fbea28a2 318 iter = list_entry(iter->node.next,
9136291f 319 struct mv_xor_desc_slot,
fbea28a2 320 node);
0951e728 321 mv_chan_start_new_chain(mv_chan, iter);
9136291f
LA
322 } else {
323 /*
324 * some descriptors are still waiting
325 * to be cleaned
326 */
327 tasklet_schedule(&mv_chan->irq_tasklet);
328 }
329 }
ff7b0479
SB
330 }
331
332 if (cookie > 0)
98817b99 333 mv_chan->dmachan.completed_cookie = cookie;
ff7b0479
SB
334}
335
ff7b0479
SB
336static void mv_xor_tasklet(unsigned long data)
337{
338 struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
e43147ac
EG
339
340 spin_lock_bh(&chan->lock);
0951e728 341 mv_chan_slot_cleanup(chan);
e43147ac 342 spin_unlock_bh(&chan->lock);
ff7b0479
SB
343}
344
345static struct mv_xor_desc_slot *
0951e728 346mv_chan_alloc_slot(struct mv_xor_chan *mv_chan)
ff7b0479 347{
fbea28a2 348 struct mv_xor_desc_slot *iter;
ff7b0479 349
fbea28a2
LA
350 spin_lock_bh(&mv_chan->lock);
351
352 if (!list_empty(&mv_chan->free_slots)) {
353 iter = list_first_entry(&mv_chan->free_slots,
354 struct mv_xor_desc_slot,
355 node);
356
357 list_move_tail(&iter->node, &mv_chan->allocated_slots);
358
359 spin_unlock_bh(&mv_chan->lock);
ff7b0479 360
dfc97661
LA
361 /* pre-ack descriptor */
362 async_tx_ack(&iter->async_tx);
dfc97661 363 iter->async_tx.cookie = -EBUSY;
dfc97661
LA
364
365 return iter;
366
ff7b0479 367 }
fbea28a2
LA
368
369 spin_unlock_bh(&mv_chan->lock);
ff7b0479
SB
370
371 /* try to free some slots if the allocation fails */
372 tasklet_schedule(&mv_chan->irq_tasklet);
373
374 return NULL;
375}
376
ff7b0479
SB
377/************************ DMA engine API functions ****************************/
378static dma_cookie_t
379mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
380{
381 struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
382 struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
dfc97661 383 struct mv_xor_desc_slot *old_chain_tail;
ff7b0479
SB
384 dma_cookie_t cookie;
385 int new_hw_chain = 1;
386
c98c1781 387 dev_dbg(mv_chan_to_devp(mv_chan),
ff7b0479
SB
388 "%s sw_desc %p: async_tx %p\n",
389 __func__, sw_desc, &sw_desc->async_tx);
390
ff7b0479 391 spin_lock_bh(&mv_chan->lock);
884485e1 392 cookie = dma_cookie_assign(tx);
ff7b0479
SB
393
394 if (list_empty(&mv_chan->chain))
fbea28a2 395 list_move_tail(&sw_desc->node, &mv_chan->chain);
ff7b0479
SB
396 else {
397 new_hw_chain = 0;
398
399 old_chain_tail = list_entry(mv_chan->chain.prev,
400 struct mv_xor_desc_slot,
fbea28a2
LA
401 node);
402 list_move_tail(&sw_desc->node, &mv_chan->chain);
ff7b0479 403
31fd8f5b
OJ
404 dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n",
405 &old_chain_tail->async_tx.phys);
ff7b0479
SB
406
407 /* fix up the hardware chain */
dfc97661 408 mv_desc_set_next_desc(old_chain_tail, sw_desc->async_tx.phys);
ff7b0479
SB
409
410 /* if the channel is not busy */
411 if (!mv_chan_is_busy(mv_chan)) {
412 u32 current_desc = mv_chan_get_current_desc(mv_chan);
413 /*
414 * and the curren desc is the end of the chain before
415 * the append, then we need to start the channel
416 */
417 if (current_desc == old_chain_tail->async_tx.phys)
418 new_hw_chain = 1;
419 }
420 }
421
422 if (new_hw_chain)
0951e728 423 mv_chan_start_new_chain(mv_chan, sw_desc);
ff7b0479 424
ff7b0479
SB
425 spin_unlock_bh(&mv_chan->lock);
426
427 return cookie;
428}
429
430/* returns the number of allocated descriptors */
aa1e6f1a 431static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
ff7b0479 432{
31fd8f5b
OJ
433 void *virt_desc;
434 dma_addr_t dma_desc;
ff7b0479
SB
435 int idx;
436 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
437 struct mv_xor_desc_slot *slot = NULL;
b503fa01 438 int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE;
ff7b0479
SB
439
440 /* Allocate descriptor slots */
441 idx = mv_chan->slots_allocated;
442 while (idx < num_descs_in_pool) {
443 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
444 if (!slot) {
b8291dde
EG
445 dev_info(mv_chan_to_devp(mv_chan),
446 "channel only initialized %d descriptor slots",
447 idx);
ff7b0479
SB
448 break;
449 }
31fd8f5b
OJ
450 virt_desc = mv_chan->dma_desc_pool_virt;
451 slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE;
ff7b0479
SB
452
453 dma_async_tx_descriptor_init(&slot->async_tx, chan);
454 slot->async_tx.tx_submit = mv_xor_tx_submit;
fbea28a2 455 INIT_LIST_HEAD(&slot->node);
31fd8f5b
OJ
456 dma_desc = mv_chan->dma_desc_pool;
457 slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE;
ff7b0479
SB
458 slot->idx = idx++;
459
460 spin_lock_bh(&mv_chan->lock);
461 mv_chan->slots_allocated = idx;
fbea28a2 462 list_add_tail(&slot->node, &mv_chan->free_slots);
ff7b0479
SB
463 spin_unlock_bh(&mv_chan->lock);
464 }
465
c98c1781 466 dev_dbg(mv_chan_to_devp(mv_chan),
fbea28a2
LA
467 "allocated %d descriptor slots\n",
468 mv_chan->slots_allocated);
ff7b0479
SB
469
470 return mv_chan->slots_allocated ? : -ENOMEM;
471}
472
ff7b0479
SB
473static struct dma_async_tx_descriptor *
474mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
475 unsigned int src_cnt, size_t len, unsigned long flags)
476{
477 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
dfc97661 478 struct mv_xor_desc_slot *sw_desc;
ff7b0479
SB
479
480 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
481 return NULL;
482
7912d300 483 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
ff7b0479 484
c98c1781 485 dev_dbg(mv_chan_to_devp(mv_chan),
bc822e12 486 "%s src_cnt: %d len: %zu dest %pad flags: %ld\n",
31fd8f5b 487 __func__, src_cnt, len, &dest, flags);
ff7b0479 488
0951e728 489 sw_desc = mv_chan_alloc_slot(mv_chan);
ff7b0479
SB
490 if (sw_desc) {
491 sw_desc->type = DMA_XOR;
492 sw_desc->async_tx.flags = flags;
ba87d137 493 mv_desc_init(sw_desc, dest, len, flags);
6f166312
LA
494 if (mv_chan->op_in_desc == XOR_MODE_IN_DESC)
495 mv_desc_set_mode(sw_desc);
ff7b0479 496 while (src_cnt--)
dfc97661 497 mv_desc_set_src_addr(sw_desc, src_cnt, src[src_cnt]);
ff7b0479 498 }
fbea28a2 499
c98c1781 500 dev_dbg(mv_chan_to_devp(mv_chan),
ff7b0479
SB
501 "%s sw_desc %p async_tx %p \n",
502 __func__, sw_desc, &sw_desc->async_tx);
503 return sw_desc ? &sw_desc->async_tx : NULL;
504}
505
3e4f52e2
LA
506static struct dma_async_tx_descriptor *
507mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
508 size_t len, unsigned long flags)
509{
510 /*
511 * A MEMCPY operation is identical to an XOR operation with only
512 * a single source address.
513 */
514 return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
515}
516
22843545
LA
517static struct dma_async_tx_descriptor *
518mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
519{
520 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
521 dma_addr_t src, dest;
522 size_t len;
523
524 src = mv_chan->dummy_src_addr;
525 dest = mv_chan->dummy_dst_addr;
526 len = MV_XOR_MIN_BYTE_COUNT;
527
528 /*
529 * We implement the DMA_INTERRUPT operation as a minimum sized
530 * XOR operation with a single dummy source address.
531 */
532 return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
533}
534
ff7b0479
SB
535static void mv_xor_free_chan_resources(struct dma_chan *chan)
536{
537 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
538 struct mv_xor_desc_slot *iter, *_iter;
539 int in_use_descs = 0;
540
ff7b0479 541 spin_lock_bh(&mv_chan->lock);
e43147ac 542
0951e728 543 mv_chan_slot_cleanup(mv_chan);
ff7b0479 544
ff7b0479 545 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
fbea28a2 546 node) {
ff7b0479 547 in_use_descs++;
fbea28a2 548 list_move_tail(&iter->node, &mv_chan->free_slots);
ff7b0479
SB
549 }
550 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
fbea28a2
LA
551 node) {
552 in_use_descs++;
553 list_move_tail(&iter->node, &mv_chan->free_slots);
554 }
555 list_for_each_entry_safe(iter, _iter, &mv_chan->allocated_slots,
556 node) {
ff7b0479 557 in_use_descs++;
fbea28a2 558 list_move_tail(&iter->node, &mv_chan->free_slots);
ff7b0479
SB
559 }
560 list_for_each_entry_safe_reverse(
fbea28a2
LA
561 iter, _iter, &mv_chan->free_slots, node) {
562 list_del(&iter->node);
ff7b0479
SB
563 kfree(iter);
564 mv_chan->slots_allocated--;
565 }
ff7b0479 566
c98c1781 567 dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
ff7b0479
SB
568 __func__, mv_chan->slots_allocated);
569 spin_unlock_bh(&mv_chan->lock);
570
571 if (in_use_descs)
c98c1781 572 dev_err(mv_chan_to_devp(mv_chan),
ff7b0479
SB
573 "freeing %d in use descriptors!\n", in_use_descs);
574}
575
576/**
07934481 577 * mv_xor_status - poll the status of an XOR transaction
ff7b0479
SB
578 * @chan: XOR channel handle
579 * @cookie: XOR transaction identifier
07934481 580 * @txstate: XOR transactions state holder (or NULL)
ff7b0479 581 */
07934481 582static enum dma_status mv_xor_status(struct dma_chan *chan,
ff7b0479 583 dma_cookie_t cookie,
07934481 584 struct dma_tx_state *txstate)
ff7b0479
SB
585{
586 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
ff7b0479
SB
587 enum dma_status ret;
588
96a2af41 589 ret = dma_cookie_status(chan, cookie, txstate);
890766d2 590 if (ret == DMA_COMPLETE)
ff7b0479 591 return ret;
e43147ac
EG
592
593 spin_lock_bh(&mv_chan->lock);
0951e728 594 mv_chan_slot_cleanup(mv_chan);
e43147ac 595 spin_unlock_bh(&mv_chan->lock);
ff7b0479 596
96a2af41 597 return dma_cookie_status(chan, cookie, txstate);
ff7b0479
SB
598}
599
0951e728 600static void mv_chan_dump_regs(struct mv_xor_chan *chan)
ff7b0479
SB
601{
602 u32 val;
603
5733c38a 604 val = readl_relaxed(XOR_CONFIG(chan));
1ba151cd 605 dev_err(mv_chan_to_devp(chan), "config 0x%08x\n", val);
ff7b0479 606
5733c38a 607 val = readl_relaxed(XOR_ACTIVATION(chan));
1ba151cd 608 dev_err(mv_chan_to_devp(chan), "activation 0x%08x\n", val);
ff7b0479 609
5733c38a 610 val = readl_relaxed(XOR_INTR_CAUSE(chan));
1ba151cd 611 dev_err(mv_chan_to_devp(chan), "intr cause 0x%08x\n", val);
ff7b0479 612
5733c38a 613 val = readl_relaxed(XOR_INTR_MASK(chan));
1ba151cd 614 dev_err(mv_chan_to_devp(chan), "intr mask 0x%08x\n", val);
ff7b0479 615
5733c38a 616 val = readl_relaxed(XOR_ERROR_CAUSE(chan));
1ba151cd 617 dev_err(mv_chan_to_devp(chan), "error cause 0x%08x\n", val);
ff7b0479 618
5733c38a 619 val = readl_relaxed(XOR_ERROR_ADDR(chan));
1ba151cd 620 dev_err(mv_chan_to_devp(chan), "error addr 0x%08x\n", val);
ff7b0479
SB
621}
622
0951e728
MR
623static void mv_chan_err_interrupt_handler(struct mv_xor_chan *chan,
624 u32 intr_cause)
ff7b0479 625{
0e7488ed
EG
626 if (intr_cause & XOR_INT_ERR_DECODE) {
627 dev_dbg(mv_chan_to_devp(chan), "ignoring address decode error\n");
628 return;
ff7b0479
SB
629 }
630
0e7488ed 631 dev_err(mv_chan_to_devp(chan), "error on chan %d. intr cause 0x%08x\n",
a3fc74bc 632 chan->idx, intr_cause);
ff7b0479 633
0951e728 634 mv_chan_dump_regs(chan);
0e7488ed 635 WARN_ON(1);
ff7b0479
SB
636}
637
638static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
639{
640 struct mv_xor_chan *chan = data;
641 u32 intr_cause = mv_chan_get_intr_cause(chan);
642
c98c1781 643 dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause);
ff7b0479 644
0e7488ed 645 if (intr_cause & XOR_INTR_ERRORS)
0951e728 646 mv_chan_err_interrupt_handler(chan, intr_cause);
ff7b0479
SB
647
648 tasklet_schedule(&chan->irq_tasklet);
649
0951e728 650 mv_chan_clear_eoc_cause(chan);
ff7b0479
SB
651
652 return IRQ_HANDLED;
653}
654
655static void mv_xor_issue_pending(struct dma_chan *chan)
656{
657 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
658
659 if (mv_chan->pending >= MV_XOR_THRESHOLD) {
660 mv_chan->pending = 0;
661 mv_chan_activate(mv_chan);
662 }
663}
664
665/*
666 * Perform a transaction to verify the HW works.
667 */
ff7b0479 668
0951e728 669static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan)
ff7b0479 670{
b8c01d25 671 int i, ret;
ff7b0479
SB
672 void *src, *dest;
673 dma_addr_t src_dma, dest_dma;
674 struct dma_chan *dma_chan;
675 dma_cookie_t cookie;
676 struct dma_async_tx_descriptor *tx;
d16695a7 677 struct dmaengine_unmap_data *unmap;
ff7b0479 678 int err = 0;
ff7b0479 679
d16695a7 680 src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
ff7b0479
SB
681 if (!src)
682 return -ENOMEM;
683
d16695a7 684 dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
ff7b0479
SB
685 if (!dest) {
686 kfree(src);
687 return -ENOMEM;
688 }
689
690 /* Fill in src buffer */
d16695a7 691 for (i = 0; i < PAGE_SIZE; i++)
ff7b0479
SB
692 ((u8 *) src)[i] = (u8)i;
693
275cc0c8 694 dma_chan = &mv_chan->dmachan;
aa1e6f1a 695 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
ff7b0479
SB
696 err = -ENODEV;
697 goto out;
698 }
699
d16695a7
EG
700 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL);
701 if (!unmap) {
702 err = -ENOMEM;
703 goto free_resources;
704 }
705
51564635
SR
706 src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src),
707 (size_t)src & ~PAGE_MASK, PAGE_SIZE,
708 DMA_TO_DEVICE);
d16695a7 709 unmap->addr[0] = src_dma;
ff7b0479 710
b8c01d25
EG
711 ret = dma_mapping_error(dma_chan->device->dev, src_dma);
712 if (ret) {
713 err = -ENOMEM;
714 goto free_resources;
715 }
716 unmap->to_cnt = 1;
717
51564635
SR
718 dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest),
719 (size_t)dest & ~PAGE_MASK, PAGE_SIZE,
720 DMA_FROM_DEVICE);
d16695a7
EG
721 unmap->addr[1] = dest_dma;
722
b8c01d25
EG
723 ret = dma_mapping_error(dma_chan->device->dev, dest_dma);
724 if (ret) {
725 err = -ENOMEM;
726 goto free_resources;
727 }
728 unmap->from_cnt = 1;
d16695a7 729 unmap->len = PAGE_SIZE;
ff7b0479
SB
730
731 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
d16695a7 732 PAGE_SIZE, 0);
b8c01d25
EG
733 if (!tx) {
734 dev_err(dma_chan->device->dev,
735 "Self-test cannot prepare operation, disabling\n");
736 err = -ENODEV;
737 goto free_resources;
738 }
739
ff7b0479 740 cookie = mv_xor_tx_submit(tx);
b8c01d25
EG
741 if (dma_submit_error(cookie)) {
742 dev_err(dma_chan->device->dev,
743 "Self-test submit error, disabling\n");
744 err = -ENODEV;
745 goto free_resources;
746 }
747
ff7b0479
SB
748 mv_xor_issue_pending(dma_chan);
749 async_tx_ack(tx);
750 msleep(1);
751
07934481 752 if (mv_xor_status(dma_chan, cookie, NULL) !=
b3efb8fc 753 DMA_COMPLETE) {
a3fc74bc
TP
754 dev_err(dma_chan->device->dev,
755 "Self-test copy timed out, disabling\n");
ff7b0479
SB
756 err = -ENODEV;
757 goto free_resources;
758 }
759
c35064c4 760 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
d16695a7
EG
761 PAGE_SIZE, DMA_FROM_DEVICE);
762 if (memcmp(src, dest, PAGE_SIZE)) {
a3fc74bc
TP
763 dev_err(dma_chan->device->dev,
764 "Self-test copy failed compare, disabling\n");
ff7b0479
SB
765 err = -ENODEV;
766 goto free_resources;
767 }
768
769free_resources:
d16695a7 770 dmaengine_unmap_put(unmap);
ff7b0479
SB
771 mv_xor_free_chan_resources(dma_chan);
772out:
773 kfree(src);
774 kfree(dest);
775 return err;
776}
777
778#define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
463a1f8b 779static int
0951e728 780mv_chan_xor_self_test(struct mv_xor_chan *mv_chan)
ff7b0479 781{
b8c01d25 782 int i, src_idx, ret;
ff7b0479
SB
783 struct page *dest;
784 struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
785 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
786 dma_addr_t dest_dma;
787 struct dma_async_tx_descriptor *tx;
d16695a7 788 struct dmaengine_unmap_data *unmap;
ff7b0479
SB
789 struct dma_chan *dma_chan;
790 dma_cookie_t cookie;
791 u8 cmp_byte = 0;
792 u32 cmp_word;
793 int err = 0;
d16695a7 794 int src_count = MV_XOR_NUM_SRC_TEST;
ff7b0479 795
d16695a7 796 for (src_idx = 0; src_idx < src_count; src_idx++) {
ff7b0479 797 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
a09b09ae
RK
798 if (!xor_srcs[src_idx]) {
799 while (src_idx--)
ff7b0479 800 __free_page(xor_srcs[src_idx]);
a09b09ae
RK
801 return -ENOMEM;
802 }
ff7b0479
SB
803 }
804
805 dest = alloc_page(GFP_KERNEL);
a09b09ae
RK
806 if (!dest) {
807 while (src_idx--)
ff7b0479 808 __free_page(xor_srcs[src_idx]);
a09b09ae
RK
809 return -ENOMEM;
810 }
ff7b0479
SB
811
812 /* Fill in src buffers */
d16695a7 813 for (src_idx = 0; src_idx < src_count; src_idx++) {
ff7b0479
SB
814 u8 *ptr = page_address(xor_srcs[src_idx]);
815 for (i = 0; i < PAGE_SIZE; i++)
816 ptr[i] = (1 << src_idx);
817 }
818
d16695a7 819 for (src_idx = 0; src_idx < src_count; src_idx++)
ff7b0479
SB
820 cmp_byte ^= (u8) (1 << src_idx);
821
822 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
823 (cmp_byte << 8) | cmp_byte;
824
825 memset(page_address(dest), 0, PAGE_SIZE);
826
275cc0c8 827 dma_chan = &mv_chan->dmachan;
aa1e6f1a 828 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
ff7b0479
SB
829 err = -ENODEV;
830 goto out;
831 }
832
d16695a7
EG
833 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1,
834 GFP_KERNEL);
835 if (!unmap) {
836 err = -ENOMEM;
837 goto free_resources;
838 }
839
ff7b0479 840 /* test xor */
d16695a7
EG
841 for (i = 0; i < src_count; i++) {
842 unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
843 0, PAGE_SIZE, DMA_TO_DEVICE);
844 dma_srcs[i] = unmap->addr[i];
b8c01d25
EG
845 ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[i]);
846 if (ret) {
847 err = -ENOMEM;
848 goto free_resources;
849 }
d16695a7
EG
850 unmap->to_cnt++;
851 }
ff7b0479 852
d16695a7
EG
853 unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
854 DMA_FROM_DEVICE);
855 dest_dma = unmap->addr[src_count];
b8c01d25
EG
856 ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[src_count]);
857 if (ret) {
858 err = -ENOMEM;
859 goto free_resources;
860 }
d16695a7
EG
861 unmap->from_cnt = 1;
862 unmap->len = PAGE_SIZE;
ff7b0479
SB
863
864 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
d16695a7 865 src_count, PAGE_SIZE, 0);
b8c01d25
EG
866 if (!tx) {
867 dev_err(dma_chan->device->dev,
868 "Self-test cannot prepare operation, disabling\n");
869 err = -ENODEV;
870 goto free_resources;
871 }
ff7b0479
SB
872
873 cookie = mv_xor_tx_submit(tx);
b8c01d25
EG
874 if (dma_submit_error(cookie)) {
875 dev_err(dma_chan->device->dev,
876 "Self-test submit error, disabling\n");
877 err = -ENODEV;
878 goto free_resources;
879 }
880
ff7b0479
SB
881 mv_xor_issue_pending(dma_chan);
882 async_tx_ack(tx);
883 msleep(8);
884
07934481 885 if (mv_xor_status(dma_chan, cookie, NULL) !=
b3efb8fc 886 DMA_COMPLETE) {
a3fc74bc
TP
887 dev_err(dma_chan->device->dev,
888 "Self-test xor timed out, disabling\n");
ff7b0479
SB
889 err = -ENODEV;
890 goto free_resources;
891 }
892
c35064c4 893 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
ff7b0479
SB
894 PAGE_SIZE, DMA_FROM_DEVICE);
895 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
896 u32 *ptr = page_address(dest);
897 if (ptr[i] != cmp_word) {
a3fc74bc 898 dev_err(dma_chan->device->dev,
1ba151cd
JP
899 "Self-test xor failed compare, disabling. index %d, data %x, expected %x\n",
900 i, ptr[i], cmp_word);
ff7b0479
SB
901 err = -ENODEV;
902 goto free_resources;
903 }
904 }
905
906free_resources:
d16695a7 907 dmaengine_unmap_put(unmap);
ff7b0479
SB
908 mv_xor_free_chan_resources(dma_chan);
909out:
d16695a7 910 src_idx = src_count;
ff7b0479
SB
911 while (src_idx--)
912 __free_page(xor_srcs[src_idx]);
913 __free_page(dest);
914 return err;
915}
916
1ef48a26 917static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
ff7b0479 918{
ff7b0479 919 struct dma_chan *chan, *_chan;
1ef48a26 920 struct device *dev = mv_chan->dmadev.dev;
ff7b0479 921
1ef48a26 922 dma_async_device_unregister(&mv_chan->dmadev);
ff7b0479 923
b503fa01 924 dma_free_coherent(dev, MV_XOR_POOL_SIZE,
1ef48a26 925 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
22843545
LA
926 dma_unmap_single(dev, mv_chan->dummy_src_addr,
927 MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
928 dma_unmap_single(dev, mv_chan->dummy_dst_addr,
929 MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
ff7b0479 930
1ef48a26 931 list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
a6b4a9d2 932 device_node) {
ff7b0479
SB
933 list_del(&chan->device_node);
934 }
935
88eb92cb
TP
936 free_irq(mv_chan->irq, mv_chan);
937
ff7b0479
SB
938 return 0;
939}
940
1ef48a26 941static struct mv_xor_chan *
297eedba 942mv_xor_channel_add(struct mv_xor_device *xordev,
a6b4a9d2 943 struct platform_device *pdev,
dd130c65 944 int idx, dma_cap_mask_t cap_mask, int irq)
ff7b0479
SB
945{
946 int ret = 0;
ff7b0479
SB
947 struct mv_xor_chan *mv_chan;
948 struct dma_device *dma_dev;
ff7b0479 949
1ef48a26 950 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
a577659f
SK
951 if (!mv_chan)
952 return ERR_PTR(-ENOMEM);
ff7b0479 953
9aedbdba 954 mv_chan->idx = idx;
88eb92cb 955 mv_chan->irq = irq;
dd130c65
GC
956 if (xordev->xor_type == XOR_ORION)
957 mv_chan->op_in_desc = XOR_MODE_IN_REG;
958 else
959 mv_chan->op_in_desc = XOR_MODE_IN_DESC;
ff7b0479 960
1ef48a26 961 dma_dev = &mv_chan->dmadev;
ff7b0479 962
22843545
LA
963 /*
964 * These source and destination dummy buffers are used to implement
965 * a DMA_INTERRUPT operation as a minimum-sized XOR operation.
966 * Hence, we only need to map the buffers at initialization-time.
967 */
968 mv_chan->dummy_src_addr = dma_map_single(dma_dev->dev,
969 mv_chan->dummy_src, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
970 mv_chan->dummy_dst_addr = dma_map_single(dma_dev->dev,
971 mv_chan->dummy_dst, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
972
ff7b0479
SB
973 /* allocate coherent memory for hardware descriptors
974 * note: writecombine gives slightly better performance, but
975 * requires that we explicitly flush the writes
976 */
1ef48a26 977 mv_chan->dma_desc_pool_virt =
f6e45661
LR
978 dma_alloc_wc(&pdev->dev, MV_XOR_POOL_SIZE, &mv_chan->dma_desc_pool,
979 GFP_KERNEL);
1ef48a26 980 if (!mv_chan->dma_desc_pool_virt)
a6b4a9d2 981 return ERR_PTR(-ENOMEM);
ff7b0479
SB
982
983 /* discover transaction capabilites from the platform data */
a6b4a9d2 984 dma_dev->cap_mask = cap_mask;
ff7b0479
SB
985
986 INIT_LIST_HEAD(&dma_dev->channels);
987
988 /* set base routines */
989 dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
990 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
07934481 991 dma_dev->device_tx_status = mv_xor_status;
ff7b0479
SB
992 dma_dev->device_issue_pending = mv_xor_issue_pending;
993 dma_dev->dev = &pdev->dev;
994
995 /* set prep routines based on capability */
22843545
LA
996 if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
997 dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt;
ff7b0479
SB
998 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
999 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
ff7b0479 1000 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
c019894e 1001 dma_dev->max_xor = 8;
ff7b0479
SB
1002 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1003 }
1004
297eedba 1005 mv_chan->mmr_base = xordev->xor_base;
82a1402e 1006 mv_chan->mmr_high_base = xordev->xor_high_base;
ff7b0479
SB
1007 tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
1008 mv_chan);
1009
1010 /* clear errors before enabling interrupts */
0951e728 1011 mv_chan_clear_err_status(mv_chan);
ff7b0479 1012
2d0a0745
TP
1013 ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler,
1014 0, dev_name(&pdev->dev), mv_chan);
ff7b0479
SB
1015 if (ret)
1016 goto err_free_dma;
1017
1018 mv_chan_unmask_interrupts(mv_chan);
1019
6f166312 1020 if (mv_chan->op_in_desc == XOR_MODE_IN_DESC)
81aafb3e 1021 mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_IN_DESC);
6f166312 1022 else
81aafb3e 1023 mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_XOR);
ff7b0479
SB
1024
1025 spin_lock_init(&mv_chan->lock);
1026 INIT_LIST_HEAD(&mv_chan->chain);
1027 INIT_LIST_HEAD(&mv_chan->completed_slots);
fbea28a2
LA
1028 INIT_LIST_HEAD(&mv_chan->free_slots);
1029 INIT_LIST_HEAD(&mv_chan->allocated_slots);
98817b99
TP
1030 mv_chan->dmachan.device = dma_dev;
1031 dma_cookie_init(&mv_chan->dmachan);
ff7b0479 1032
98817b99 1033 list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels);
ff7b0479
SB
1034
1035 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
0951e728 1036 ret = mv_chan_memcpy_self_test(mv_chan);
ff7b0479
SB
1037 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1038 if (ret)
2d0a0745 1039 goto err_free_irq;
ff7b0479
SB
1040 }
1041
1042 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
0951e728 1043 ret = mv_chan_xor_self_test(mv_chan);
ff7b0479
SB
1044 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1045 if (ret)
2d0a0745 1046 goto err_free_irq;
ff7b0479
SB
1047 }
1048
6f166312
LA
1049 dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s)\n",
1050 mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode",
1ba151cd 1051 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1ba151cd
JP
1052 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1053 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
ff7b0479
SB
1054
1055 dma_async_device_register(dma_dev);
1ef48a26 1056 return mv_chan;
ff7b0479 1057
2d0a0745
TP
1058err_free_irq:
1059 free_irq(mv_chan->irq, mv_chan);
ff7b0479 1060 err_free_dma:
b503fa01 1061 dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
1ef48a26 1062 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
a6b4a9d2 1063 return ERR_PTR(ret);
ff7b0479
SB
1064}
1065
1066static void
297eedba 1067mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
63a9332b 1068 const struct mbus_dram_target_info *dram)
ff7b0479 1069{
82a1402e 1070 void __iomem *base = xordev->xor_high_base;
ff7b0479
SB
1071 u32 win_enable = 0;
1072 int i;
1073
1074 for (i = 0; i < 8; i++) {
1075 writel(0, base + WINDOW_BASE(i));
1076 writel(0, base + WINDOW_SIZE(i));
1077 if (i < 4)
1078 writel(0, base + WINDOW_REMAP_HIGH(i));
1079 }
1080
1081 for (i = 0; i < dram->num_cs; i++) {
63a9332b 1082 const struct mbus_dram_window *cs = dram->cs + i;
ff7b0479
SB
1083
1084 writel((cs->base & 0xffff0000) |
1085 (cs->mbus_attr << 8) |
1086 dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1087 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1088
1089 win_enable |= (1 << i);
1090 win_enable |= 3 << (16 + (2 * i));
1091 }
1092
1093 writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1094 writel(win_enable, base + WINDOW_BAR_ENABLE(1));
c4b4b732
TP
1095 writel(0, base + WINDOW_OVERRIDE_CTRL(0));
1096 writel(0, base + WINDOW_OVERRIDE_CTRL(1));
ff7b0479
SB
1097}
1098
ac5f0f3f
MW
1099static void
1100mv_xor_conf_mbus_windows_a3700(struct mv_xor_device *xordev)
1101{
1102 void __iomem *base = xordev->xor_high_base;
1103 u32 win_enable = 0;
1104 int i;
1105
1106 for (i = 0; i < 8; i++) {
1107 writel(0, base + WINDOW_BASE(i));
1108 writel(0, base + WINDOW_SIZE(i));
1109 if (i < 4)
1110 writel(0, base + WINDOW_REMAP_HIGH(i));
1111 }
1112 /*
1113 * For Armada3700 open default 4GB Mbus window. The dram
1114 * related configuration are done at AXIS level.
1115 */
1116 writel(0xffff0000, base + WINDOW_SIZE(0));
1117 win_enable |= 1;
1118 win_enable |= 3 << 16;
1119
1120 writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1121 writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1122 writel(0, base + WINDOW_OVERRIDE_CTRL(0));
1123 writel(0, base + WINDOW_OVERRIDE_CTRL(1));
1124}
1125
8b648436
TP
1126/*
1127 * Since this XOR driver is basically used only for RAID5, we don't
1128 * need to care about synchronizing ->suspend with DMA activity,
1129 * because the DMA engine will naturally be quiet due to the block
1130 * devices being suspended.
1131 */
1132static int mv_xor_suspend(struct platform_device *pdev, pm_message_t state)
1133{
1134 struct mv_xor_device *xordev = platform_get_drvdata(pdev);
1135 int i;
1136
1137 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1138 struct mv_xor_chan *mv_chan = xordev->channels[i];
1139
1140 if (!mv_chan)
1141 continue;
1142
1143 mv_chan->saved_config_reg =
1144 readl_relaxed(XOR_CONFIG(mv_chan));
1145 mv_chan->saved_int_mask_reg =
1146 readl_relaxed(XOR_INTR_MASK(mv_chan));
1147 }
1148
1149 return 0;
1150}
1151
1152static int mv_xor_resume(struct platform_device *dev)
1153{
1154 struct mv_xor_device *xordev = platform_get_drvdata(dev);
1155 const struct mbus_dram_target_info *dram;
1156 int i;
1157
1158 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1159 struct mv_xor_chan *mv_chan = xordev->channels[i];
1160
1161 if (!mv_chan)
1162 continue;
1163
1164 writel_relaxed(mv_chan->saved_config_reg,
1165 XOR_CONFIG(mv_chan));
1166 writel_relaxed(mv_chan->saved_int_mask_reg,
1167 XOR_INTR_MASK(mv_chan));
1168 }
1169
ac5f0f3f
MW
1170 if (xordev->xor_type == XOR_ARMADA_37XX) {
1171 mv_xor_conf_mbus_windows_a3700(xordev);
1172 return 0;
1173 }
1174
8b648436
TP
1175 dram = mv_mbus_dram_info();
1176 if (dram)
1177 mv_xor_conf_mbus_windows(xordev, dram);
1178
1179 return 0;
1180}
1181
6f166312 1182static const struct of_device_id mv_xor_dt_ids[] = {
dd130c65
GC
1183 { .compatible = "marvell,orion-xor", .data = (void *)XOR_ORION },
1184 { .compatible = "marvell,armada-380-xor", .data = (void *)XOR_ARMADA_38X },
ac5f0f3f 1185 { .compatible = "marvell,armada-3700-xor", .data = (void *)XOR_ARMADA_37XX },
6f166312
LA
1186 {},
1187};
6f166312 1188
77757291 1189static unsigned int mv_xor_engine_count;
6f166312 1190
c2714334 1191static int mv_xor_probe(struct platform_device *pdev)
ff7b0479 1192{
63a9332b 1193 const struct mbus_dram_target_info *dram;
297eedba 1194 struct mv_xor_device *xordev;
d4adcc01 1195 struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev);
ff7b0479 1196 struct resource *res;
77757291 1197 unsigned int max_engines, max_channels;
60d151f3 1198 int i, ret;
ff7b0479 1199
1ba151cd 1200 dev_notice(&pdev->dev, "Marvell shared XOR driver\n");
ff7b0479 1201
297eedba
TP
1202 xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
1203 if (!xordev)
ff7b0479
SB
1204 return -ENOMEM;
1205
1206 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1207 if (!res)
1208 return -ENODEV;
1209
297eedba
TP
1210 xordev->xor_base = devm_ioremap(&pdev->dev, res->start,
1211 resource_size(res));
1212 if (!xordev->xor_base)
ff7b0479
SB
1213 return -EBUSY;
1214
1215 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1216 if (!res)
1217 return -ENODEV;
1218
297eedba
TP
1219 xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start,
1220 resource_size(res));
1221 if (!xordev->xor_high_base)
ff7b0479
SB
1222 return -EBUSY;
1223
297eedba 1224 platform_set_drvdata(pdev, xordev);
ff7b0479 1225
dd130c65
GC
1226
1227 /*
1228 * We need to know which type of XOR device we use before
1229 * setting up. In non-dt case it can only be the legacy one.
1230 */
1231 xordev->xor_type = XOR_ORION;
1232 if (pdev->dev.of_node) {
1233 const struct of_device_id *of_id =
1234 of_match_device(mv_xor_dt_ids,
1235 &pdev->dev);
1236
1237 xordev->xor_type = (uintptr_t)of_id->data;
1238 }
1239
ff7b0479
SB
1240 /*
1241 * (Re-)program MBUS remapping windows if we are asked to.
1242 */
ac5f0f3f
MW
1243 if (xordev->xor_type == XOR_ARMADA_37XX) {
1244 mv_xor_conf_mbus_windows_a3700(xordev);
1245 } else {
1246 dram = mv_mbus_dram_info();
1247 if (dram)
1248 mv_xor_conf_mbus_windows(xordev, dram);
1249 }
ff7b0479 1250
c510182b
AL
1251 /* Not all platforms can gate the clock, so it is not
1252 * an error if the clock does not exists.
1253 */
297eedba
TP
1254 xordev->clk = clk_get(&pdev->dev, NULL);
1255 if (!IS_ERR(xordev->clk))
1256 clk_prepare_enable(xordev->clk);
c510182b 1257
77757291
TP
1258 /*
1259 * We don't want to have more than one channel per CPU in
1260 * order for async_tx to perform well. So we limit the number
1261 * of engines and channels so that we take into account this
1262 * constraint. Note that we also want to use channels from
ac5f0f3f
MW
1263 * separate engines when possible. For dual-CPU Armada 3700
1264 * SoC with single XOR engine allow using its both channels.
77757291
TP
1265 */
1266 max_engines = num_present_cpus();
ac5f0f3f
MW
1267 if (xordev->xor_type == XOR_ARMADA_37XX)
1268 max_channels = num_present_cpus();
1269 else
1270 max_channels = min_t(unsigned int,
1271 MV_XOR_MAX_CHANNELS,
1272 DIV_ROUND_UP(num_present_cpus(), 2));
77757291
TP
1273
1274 if (mv_xor_engine_count >= max_engines)
1275 return 0;
1276
f7d12ef5
TP
1277 if (pdev->dev.of_node) {
1278 struct device_node *np;
1279 int i = 0;
1280
1281 for_each_child_of_node(pdev->dev.of_node, np) {
0be8253f 1282 struct mv_xor_chan *chan;
f7d12ef5
TP
1283 dma_cap_mask_t cap_mask;
1284 int irq;
1285
77757291
TP
1286 if (i >= max_channels)
1287 continue;
1288
f7d12ef5 1289 dma_cap_zero(cap_mask);
6d8f7abd
TP
1290 dma_cap_set(DMA_MEMCPY, cap_mask);
1291 dma_cap_set(DMA_XOR, cap_mask);
1292 dma_cap_set(DMA_INTERRUPT, cap_mask);
f7d12ef5
TP
1293
1294 irq = irq_of_parse_and_map(np, 0);
f8eb9e7d
TP
1295 if (!irq) {
1296 ret = -ENODEV;
f7d12ef5
TP
1297 goto err_channel_add;
1298 }
1299
0be8253f 1300 chan = mv_xor_channel_add(xordev, pdev, i,
dd130c65 1301 cap_mask, irq);
0be8253f
RK
1302 if (IS_ERR(chan)) {
1303 ret = PTR_ERR(chan);
f7d12ef5
TP
1304 irq_dispose_mapping(irq);
1305 goto err_channel_add;
1306 }
1307
0be8253f 1308 xordev->channels[i] = chan;
f7d12ef5
TP
1309 i++;
1310 }
1311 } else if (pdata && pdata->channels) {
77757291 1312 for (i = 0; i < max_channels; i++) {
e39f6ec1 1313 struct mv_xor_channel_data *cd;
0be8253f 1314 struct mv_xor_chan *chan;
60d151f3
TP
1315 int irq;
1316
1317 cd = &pdata->channels[i];
1318 if (!cd) {
1319 ret = -ENODEV;
1320 goto err_channel_add;
1321 }
1322
1323 irq = platform_get_irq(pdev, i);
1324 if (irq < 0) {
1325 ret = irq;
1326 goto err_channel_add;
1327 }
1328
0be8253f 1329 chan = mv_xor_channel_add(xordev, pdev, i,
dd130c65 1330 cd->cap_mask, irq);
0be8253f
RK
1331 if (IS_ERR(chan)) {
1332 ret = PTR_ERR(chan);
60d151f3
TP
1333 goto err_channel_add;
1334 }
0be8253f
RK
1335
1336 xordev->channels[i] = chan;
60d151f3
TP
1337 }
1338 }
c510182b 1339
ff7b0479 1340 return 0;
60d151f3
TP
1341
1342err_channel_add:
1343 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
f7d12ef5 1344 if (xordev->channels[i]) {
ab6e439f 1345 mv_xor_channel_remove(xordev->channels[i]);
f7d12ef5
TP
1346 if (pdev->dev.of_node)
1347 irq_dispose_mapping(xordev->channels[i]->irq);
f7d12ef5 1348 }
60d151f3 1349
dab92064
TP
1350 if (!IS_ERR(xordev->clk)) {
1351 clk_disable_unprepare(xordev->clk);
1352 clk_put(xordev->clk);
1353 }
1354
60d151f3 1355 return ret;
ff7b0479
SB
1356}
1357
61971656
TP
1358static struct platform_driver mv_xor_driver = {
1359 .probe = mv_xor_probe,
8b648436
TP
1360 .suspend = mv_xor_suspend,
1361 .resume = mv_xor_resume,
ff7b0479 1362 .driver = {
f7d12ef5
TP
1363 .name = MV_XOR_NAME,
1364 .of_match_table = of_match_ptr(mv_xor_dt_ids),
ff7b0479
SB
1365 },
1366};
1367
1368
1369static int __init mv_xor_init(void)
1370{
61971656 1371 return platform_driver_register(&mv_xor_driver);
ff7b0479 1372}
25cf68da 1373device_initcall(mv_xor_init);
ff7b0479 1374
25cf68da 1375/*
ff7b0479
SB
1376MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1377MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1378MODULE_LICENSE("GPL");
25cf68da 1379*/