dma: mv_xor: remove the pool_size from platform_data
[linux-2.6-block.git] / drivers / dma / mv_xor.c
CommitLineData
ff7b0479
SB
1/*
2 * offload engine driver for the Marvell XOR engine
3 * Copyright (C) 2007, 2008, Marvell International Ltd.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19#include <linux/init.h>
20#include <linux/module.h>
5a0e3ad6 21#include <linux/slab.h>
ff7b0479
SB
22#include <linux/delay.h>
23#include <linux/dma-mapping.h>
24#include <linux/spinlock.h>
25#include <linux/interrupt.h>
26#include <linux/platform_device.h>
27#include <linux/memory.h>
c510182b 28#include <linux/clk.h>
c02cecb9 29#include <linux/platform_data/dma-mv_xor.h>
d2ebfb33
RKAL
30
31#include "dmaengine.h"
ff7b0479
SB
32#include "mv_xor.h"
33
34static void mv_xor_issue_pending(struct dma_chan *chan);
35
36#define to_mv_xor_chan(chan) \
98817b99 37 container_of(chan, struct mv_xor_chan, dmachan)
ff7b0479 38
ff7b0479
SB
39#define to_mv_xor_slot(tx) \
40 container_of(tx, struct mv_xor_desc_slot, async_tx)
41
c98c1781 42#define mv_chan_to_devp(chan) \
1ef48a26 43 ((chan)->dmadev.dev)
c98c1781 44
ff7b0479
SB
45static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
46{
47 struct mv_xor_desc *hw_desc = desc->hw_desc;
48
49 hw_desc->status = (1 << 31);
50 hw_desc->phy_next_desc = 0;
51 hw_desc->desc_command = (1 << 31);
52}
53
54static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
55{
56 struct mv_xor_desc *hw_desc = desc->hw_desc;
57 return hw_desc->phy_dest_addr;
58}
59
60static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc,
61 int src_idx)
62{
63 struct mv_xor_desc *hw_desc = desc->hw_desc;
64 return hw_desc->phy_src_addr[src_idx];
65}
66
67
68static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
69 u32 byte_count)
70{
71 struct mv_xor_desc *hw_desc = desc->hw_desc;
72 hw_desc->byte_count = byte_count;
73}
74
75static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
76 u32 next_desc_addr)
77{
78 struct mv_xor_desc *hw_desc = desc->hw_desc;
79 BUG_ON(hw_desc->phy_next_desc);
80 hw_desc->phy_next_desc = next_desc_addr;
81}
82
83static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc)
84{
85 struct mv_xor_desc *hw_desc = desc->hw_desc;
86 hw_desc->phy_next_desc = 0;
87}
88
89static void mv_desc_set_block_fill_val(struct mv_xor_desc_slot *desc, u32 val)
90{
91 desc->value = val;
92}
93
94static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc,
95 dma_addr_t addr)
96{
97 struct mv_xor_desc *hw_desc = desc->hw_desc;
98 hw_desc->phy_dest_addr = addr;
99}
100
101static int mv_chan_memset_slot_count(size_t len)
102{
103 return 1;
104}
105
106#define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c)
107
108static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
109 int index, dma_addr_t addr)
110{
111 struct mv_xor_desc *hw_desc = desc->hw_desc;
112 hw_desc->phy_src_addr[index] = addr;
113 if (desc->type == DMA_XOR)
114 hw_desc->desc_command |= (1 << index);
115}
116
117static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
118{
119 return __raw_readl(XOR_CURR_DESC(chan));
120}
121
122static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
123 u32 next_desc_addr)
124{
125 __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan));
126}
127
128static void mv_chan_set_dest_pointer(struct mv_xor_chan *chan, u32 desc_addr)
129{
130 __raw_writel(desc_addr, XOR_DEST_POINTER(chan));
131}
132
133static void mv_chan_set_block_size(struct mv_xor_chan *chan, u32 block_size)
134{
135 __raw_writel(block_size, XOR_BLOCK_SIZE(chan));
136}
137
138static void mv_chan_set_value(struct mv_xor_chan *chan, u32 value)
139{
140 __raw_writel(value, XOR_INIT_VALUE_LOW(chan));
141 __raw_writel(value, XOR_INIT_VALUE_HIGH(chan));
142}
143
144static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
145{
146 u32 val = __raw_readl(XOR_INTR_MASK(chan));
147 val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
148 __raw_writel(val, XOR_INTR_MASK(chan));
149}
150
151static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
152{
153 u32 intr_cause = __raw_readl(XOR_INTR_CAUSE(chan));
154 intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
155 return intr_cause;
156}
157
158static int mv_is_err_intr(u32 intr_cause)
159{
160 if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9)))
161 return 1;
162
163 return 0;
164}
165
166static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
167{
86363682 168 u32 val = ~(1 << (chan->idx * 16));
c98c1781 169 dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
ff7b0479
SB
170 __raw_writel(val, XOR_INTR_CAUSE(chan));
171}
172
173static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan)
174{
175 u32 val = 0xFFFF0000 >> (chan->idx * 16);
176 __raw_writel(val, XOR_INTR_CAUSE(chan));
177}
178
179static int mv_can_chain(struct mv_xor_desc_slot *desc)
180{
181 struct mv_xor_desc_slot *chain_old_tail = list_entry(
182 desc->chain_node.prev, struct mv_xor_desc_slot, chain_node);
183
184 if (chain_old_tail->type != desc->type)
185 return 0;
186 if (desc->type == DMA_MEMSET)
187 return 0;
188
189 return 1;
190}
191
192static void mv_set_mode(struct mv_xor_chan *chan,
193 enum dma_transaction_type type)
194{
195 u32 op_mode;
196 u32 config = __raw_readl(XOR_CONFIG(chan));
197
198 switch (type) {
199 case DMA_XOR:
200 op_mode = XOR_OPERATION_MODE_XOR;
201 break;
202 case DMA_MEMCPY:
203 op_mode = XOR_OPERATION_MODE_MEMCPY;
204 break;
205 case DMA_MEMSET:
206 op_mode = XOR_OPERATION_MODE_MEMSET;
207 break;
208 default:
c98c1781 209 dev_err(mv_chan_to_devp(chan),
a3fc74bc
TP
210 "error: unsupported operation %d.\n",
211 type);
ff7b0479
SB
212 BUG();
213 return;
214 }
215
216 config &= ~0x7;
217 config |= op_mode;
218 __raw_writel(config, XOR_CONFIG(chan));
219 chan->current_type = type;
220}
221
222static void mv_chan_activate(struct mv_xor_chan *chan)
223{
224 u32 activation;
225
c98c1781 226 dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
ff7b0479
SB
227 activation = __raw_readl(XOR_ACTIVATION(chan));
228 activation |= 0x1;
229 __raw_writel(activation, XOR_ACTIVATION(chan));
230}
231
232static char mv_chan_is_busy(struct mv_xor_chan *chan)
233{
234 u32 state = __raw_readl(XOR_ACTIVATION(chan));
235
236 state = (state >> 4) & 0x3;
237
238 return (state == 1) ? 1 : 0;
239}
240
241static int mv_chan_xor_slot_count(size_t len, int src_cnt)
242{
243 return 1;
244}
245
246/**
247 * mv_xor_free_slots - flags descriptor slots for reuse
248 * @slot: Slot to free
249 * Caller must hold &mv_chan->lock while calling this function
250 */
251static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
252 struct mv_xor_desc_slot *slot)
253{
c98c1781 254 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d slot %p\n",
ff7b0479
SB
255 __func__, __LINE__, slot);
256
257 slot->slots_per_op = 0;
258
259}
260
261/*
262 * mv_xor_start_new_chain - program the engine to operate on new chain headed by
263 * sw_desc
264 * Caller must hold &mv_chan->lock while calling this function
265 */
266static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
267 struct mv_xor_desc_slot *sw_desc)
268{
c98c1781 269 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
ff7b0479
SB
270 __func__, __LINE__, sw_desc);
271 if (sw_desc->type != mv_chan->current_type)
272 mv_set_mode(mv_chan, sw_desc->type);
273
274 if (sw_desc->type == DMA_MEMSET) {
275 /* for memset requests we need to program the engine, no
276 * descriptors used.
277 */
278 struct mv_xor_desc *hw_desc = sw_desc->hw_desc;
279 mv_chan_set_dest_pointer(mv_chan, hw_desc->phy_dest_addr);
280 mv_chan_set_block_size(mv_chan, sw_desc->unmap_len);
281 mv_chan_set_value(mv_chan, sw_desc->value);
282 } else {
283 /* set the hardware chain */
284 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
285 }
286 mv_chan->pending += sw_desc->slot_cnt;
98817b99 287 mv_xor_issue_pending(&mv_chan->dmachan);
ff7b0479
SB
288}
289
290static dma_cookie_t
291mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
292 struct mv_xor_chan *mv_chan, dma_cookie_t cookie)
293{
294 BUG_ON(desc->async_tx.cookie < 0);
295
296 if (desc->async_tx.cookie > 0) {
297 cookie = desc->async_tx.cookie;
298
299 /* call the callback (must not sleep or submit new
300 * operations to this channel)
301 */
302 if (desc->async_tx.callback)
303 desc->async_tx.callback(
304 desc->async_tx.callback_param);
305
306 /* unmap dma addresses
307 * (unmap_single vs unmap_page?)
308 */
309 if (desc->group_head && desc->unmap_len) {
310 struct mv_xor_desc_slot *unmap = desc->group_head;
ecde6cd4 311 struct device *dev = mv_chan_to_devp(mv_chan);
ff7b0479 312 u32 len = unmap->unmap_len;
e1d181ef
DW
313 enum dma_ctrl_flags flags = desc->async_tx.flags;
314 u32 src_cnt;
315 dma_addr_t addr;
a06d568f 316 dma_addr_t dest;
ff7b0479 317
a06d568f
DW
318 src_cnt = unmap->unmap_src_cnt;
319 dest = mv_desc_get_dest_addr(unmap);
e1d181ef 320 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
a06d568f
DW
321 enum dma_data_direction dir;
322
323 if (src_cnt > 1) /* is xor ? */
324 dir = DMA_BIDIRECTIONAL;
325 else
326 dir = DMA_FROM_DEVICE;
327 dma_unmap_page(dev, dest, len, dir);
e1d181ef
DW
328 }
329
330 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
e1d181ef
DW
331 while (src_cnt--) {
332 addr = mv_desc_get_src_addr(unmap,
333 src_cnt);
a06d568f
DW
334 if (addr == dest)
335 continue;
e1d181ef
DW
336 dma_unmap_page(dev, addr, len,
337 DMA_TO_DEVICE);
338 }
ff7b0479
SB
339 }
340 desc->group_head = NULL;
341 }
342 }
343
344 /* run dependent operations */
07f2211e 345 dma_run_dependencies(&desc->async_tx);
ff7b0479
SB
346
347 return cookie;
348}
349
350static int
351mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan)
352{
353 struct mv_xor_desc_slot *iter, *_iter;
354
c98c1781 355 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
ff7b0479
SB
356 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
357 completed_node) {
358
359 if (async_tx_test_ack(&iter->async_tx)) {
360 list_del(&iter->completed_node);
361 mv_xor_free_slots(mv_chan, iter);
362 }
363 }
364 return 0;
365}
366
367static int
368mv_xor_clean_slot(struct mv_xor_desc_slot *desc,
369 struct mv_xor_chan *mv_chan)
370{
c98c1781 371 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
ff7b0479
SB
372 __func__, __LINE__, desc, desc->async_tx.flags);
373 list_del(&desc->chain_node);
374 /* the client is allowed to attach dependent operations
375 * until 'ack' is set
376 */
377 if (!async_tx_test_ack(&desc->async_tx)) {
378 /* move this slot to the completed_slots */
379 list_add_tail(&desc->completed_node, &mv_chan->completed_slots);
380 return 0;
381 }
382
383 mv_xor_free_slots(mv_chan, desc);
384 return 0;
385}
386
387static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
388{
389 struct mv_xor_desc_slot *iter, *_iter;
390 dma_cookie_t cookie = 0;
391 int busy = mv_chan_is_busy(mv_chan);
392 u32 current_desc = mv_chan_get_current_desc(mv_chan);
393 int seen_current = 0;
394
c98c1781
TP
395 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
396 dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
ff7b0479
SB
397 mv_xor_clean_completed_slots(mv_chan);
398
399 /* free completed slots from the chain starting with
400 * the oldest descriptor
401 */
402
403 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
404 chain_node) {
405 prefetch(_iter);
406 prefetch(&_iter->async_tx);
407
408 /* do not advance past the current descriptor loaded into the
409 * hardware channel, subsequent descriptors are either in
410 * process or have not been submitted
411 */
412 if (seen_current)
413 break;
414
415 /* stop the search if we reach the current descriptor and the
416 * channel is busy
417 */
418 if (iter->async_tx.phys == current_desc) {
419 seen_current = 1;
420 if (busy)
421 break;
422 }
423
424 cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie);
425
426 if (mv_xor_clean_slot(iter, mv_chan))
427 break;
428 }
429
430 if ((busy == 0) && !list_empty(&mv_chan->chain)) {
431 struct mv_xor_desc_slot *chain_head;
432 chain_head = list_entry(mv_chan->chain.next,
433 struct mv_xor_desc_slot,
434 chain_node);
435
436 mv_xor_start_new_chain(mv_chan, chain_head);
437 }
438
439 if (cookie > 0)
98817b99 440 mv_chan->dmachan.completed_cookie = cookie;
ff7b0479
SB
441}
442
443static void
444mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
445{
446 spin_lock_bh(&mv_chan->lock);
447 __mv_xor_slot_cleanup(mv_chan);
448 spin_unlock_bh(&mv_chan->lock);
449}
450
451static void mv_xor_tasklet(unsigned long data)
452{
453 struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
8333f65e 454 mv_xor_slot_cleanup(chan);
ff7b0479
SB
455}
456
457static struct mv_xor_desc_slot *
458mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots,
459 int slots_per_op)
460{
461 struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL;
462 LIST_HEAD(chain);
463 int slots_found, retry = 0;
464
465 /* start search from the last allocated descrtiptor
466 * if a contiguous allocation can not be found start searching
467 * from the beginning of the list
468 */
469retry:
470 slots_found = 0;
471 if (retry == 0)
472 iter = mv_chan->last_used;
473 else
474 iter = list_entry(&mv_chan->all_slots,
475 struct mv_xor_desc_slot,
476 slot_node);
477
478 list_for_each_entry_safe_continue(
479 iter, _iter, &mv_chan->all_slots, slot_node) {
480 prefetch(_iter);
481 prefetch(&_iter->async_tx);
482 if (iter->slots_per_op) {
483 /* give up after finding the first busy slot
484 * on the second pass through the list
485 */
486 if (retry)
487 break;
488
489 slots_found = 0;
490 continue;
491 }
492
493 /* start the allocation if the slot is correctly aligned */
494 if (!slots_found++)
495 alloc_start = iter;
496
497 if (slots_found == num_slots) {
498 struct mv_xor_desc_slot *alloc_tail = NULL;
499 struct mv_xor_desc_slot *last_used = NULL;
500 iter = alloc_start;
501 while (num_slots) {
502 int i;
503
504 /* pre-ack all but the last descriptor */
505 async_tx_ack(&iter->async_tx);
506
507 list_add_tail(&iter->chain_node, &chain);
508 alloc_tail = iter;
509 iter->async_tx.cookie = 0;
510 iter->slot_cnt = num_slots;
511 iter->xor_check_result = NULL;
512 for (i = 0; i < slots_per_op; i++) {
513 iter->slots_per_op = slots_per_op - i;
514 last_used = iter;
515 iter = list_entry(iter->slot_node.next,
516 struct mv_xor_desc_slot,
517 slot_node);
518 }
519 num_slots -= slots_per_op;
520 }
521 alloc_tail->group_head = alloc_start;
522 alloc_tail->async_tx.cookie = -EBUSY;
64203b67 523 list_splice(&chain, &alloc_tail->tx_list);
ff7b0479
SB
524 mv_chan->last_used = last_used;
525 mv_desc_clear_next_desc(alloc_start);
526 mv_desc_clear_next_desc(alloc_tail);
527 return alloc_tail;
528 }
529 }
530 if (!retry++)
531 goto retry;
532
533 /* try to free some slots if the allocation fails */
534 tasklet_schedule(&mv_chan->irq_tasklet);
535
536 return NULL;
537}
538
ff7b0479
SB
539/************************ DMA engine API functions ****************************/
540static dma_cookie_t
541mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
542{
543 struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
544 struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
545 struct mv_xor_desc_slot *grp_start, *old_chain_tail;
546 dma_cookie_t cookie;
547 int new_hw_chain = 1;
548
c98c1781 549 dev_dbg(mv_chan_to_devp(mv_chan),
ff7b0479
SB
550 "%s sw_desc %p: async_tx %p\n",
551 __func__, sw_desc, &sw_desc->async_tx);
552
553 grp_start = sw_desc->group_head;
554
555 spin_lock_bh(&mv_chan->lock);
884485e1 556 cookie = dma_cookie_assign(tx);
ff7b0479
SB
557
558 if (list_empty(&mv_chan->chain))
64203b67 559 list_splice_init(&sw_desc->tx_list, &mv_chan->chain);
ff7b0479
SB
560 else {
561 new_hw_chain = 0;
562
563 old_chain_tail = list_entry(mv_chan->chain.prev,
564 struct mv_xor_desc_slot,
565 chain_node);
64203b67 566 list_splice_init(&grp_start->tx_list,
ff7b0479
SB
567 &old_chain_tail->chain_node);
568
569 if (!mv_can_chain(grp_start))
570 goto submit_done;
571
c98c1781 572 dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %x\n",
ff7b0479
SB
573 old_chain_tail->async_tx.phys);
574
575 /* fix up the hardware chain */
576 mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys);
577
578 /* if the channel is not busy */
579 if (!mv_chan_is_busy(mv_chan)) {
580 u32 current_desc = mv_chan_get_current_desc(mv_chan);
581 /*
582 * and the curren desc is the end of the chain before
583 * the append, then we need to start the channel
584 */
585 if (current_desc == old_chain_tail->async_tx.phys)
586 new_hw_chain = 1;
587 }
588 }
589
590 if (new_hw_chain)
591 mv_xor_start_new_chain(mv_chan, grp_start);
592
593submit_done:
594 spin_unlock_bh(&mv_chan->lock);
595
596 return cookie;
597}
598
599/* returns the number of allocated descriptors */
aa1e6f1a 600static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
ff7b0479
SB
601{
602 char *hw_desc;
603 int idx;
604 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
605 struct mv_xor_desc_slot *slot = NULL;
b503fa01 606 int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE;
ff7b0479
SB
607
608 /* Allocate descriptor slots */
609 idx = mv_chan->slots_allocated;
610 while (idx < num_descs_in_pool) {
611 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
612 if (!slot) {
613 printk(KERN_INFO "MV XOR Channel only initialized"
614 " %d descriptor slots", idx);
615 break;
616 }
1ef48a26 617 hw_desc = (char *) mv_chan->dma_desc_pool_virt;
ff7b0479
SB
618 slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE];
619
620 dma_async_tx_descriptor_init(&slot->async_tx, chan);
621 slot->async_tx.tx_submit = mv_xor_tx_submit;
622 INIT_LIST_HEAD(&slot->chain_node);
623 INIT_LIST_HEAD(&slot->slot_node);
64203b67 624 INIT_LIST_HEAD(&slot->tx_list);
1ef48a26 625 hw_desc = (char *) mv_chan->dma_desc_pool;
ff7b0479
SB
626 slot->async_tx.phys =
627 (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
628 slot->idx = idx++;
629
630 spin_lock_bh(&mv_chan->lock);
631 mv_chan->slots_allocated = idx;
632 list_add_tail(&slot->slot_node, &mv_chan->all_slots);
633 spin_unlock_bh(&mv_chan->lock);
634 }
635
636 if (mv_chan->slots_allocated && !mv_chan->last_used)
637 mv_chan->last_used = list_entry(mv_chan->all_slots.next,
638 struct mv_xor_desc_slot,
639 slot_node);
640
c98c1781 641 dev_dbg(mv_chan_to_devp(mv_chan),
ff7b0479
SB
642 "allocated %d descriptor slots last_used: %p\n",
643 mv_chan->slots_allocated, mv_chan->last_used);
644
645 return mv_chan->slots_allocated ? : -ENOMEM;
646}
647
648static struct dma_async_tx_descriptor *
649mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
650 size_t len, unsigned long flags)
651{
652 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
653 struct mv_xor_desc_slot *sw_desc, *grp_start;
654 int slot_cnt;
655
c98c1781 656 dev_dbg(mv_chan_to_devp(mv_chan),
ff7b0479
SB
657 "%s dest: %x src %x len: %u flags: %ld\n",
658 __func__, dest, src, len, flags);
659 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
660 return NULL;
661
7912d300 662 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
ff7b0479
SB
663
664 spin_lock_bh(&mv_chan->lock);
665 slot_cnt = mv_chan_memcpy_slot_count(len);
666 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
667 if (sw_desc) {
668 sw_desc->type = DMA_MEMCPY;
669 sw_desc->async_tx.flags = flags;
670 grp_start = sw_desc->group_head;
671 mv_desc_init(grp_start, flags);
672 mv_desc_set_byte_count(grp_start, len);
673 mv_desc_set_dest_addr(sw_desc->group_head, dest);
674 mv_desc_set_src_addr(grp_start, 0, src);
675 sw_desc->unmap_src_cnt = 1;
676 sw_desc->unmap_len = len;
677 }
678 spin_unlock_bh(&mv_chan->lock);
679
c98c1781 680 dev_dbg(mv_chan_to_devp(mv_chan),
ff7b0479
SB
681 "%s sw_desc %p async_tx %p\n",
682 __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0);
683
684 return sw_desc ? &sw_desc->async_tx : NULL;
685}
686
687static struct dma_async_tx_descriptor *
688mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
689 size_t len, unsigned long flags)
690{
691 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
692 struct mv_xor_desc_slot *sw_desc, *grp_start;
693 int slot_cnt;
694
c98c1781 695 dev_dbg(mv_chan_to_devp(mv_chan),
ff7b0479
SB
696 "%s dest: %x len: %u flags: %ld\n",
697 __func__, dest, len, flags);
698 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
699 return NULL;
700
7912d300 701 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
ff7b0479
SB
702
703 spin_lock_bh(&mv_chan->lock);
704 slot_cnt = mv_chan_memset_slot_count(len);
705 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
706 if (sw_desc) {
707 sw_desc->type = DMA_MEMSET;
708 sw_desc->async_tx.flags = flags;
709 grp_start = sw_desc->group_head;
710 mv_desc_init(grp_start, flags);
711 mv_desc_set_byte_count(grp_start, len);
712 mv_desc_set_dest_addr(sw_desc->group_head, dest);
713 mv_desc_set_block_fill_val(grp_start, value);
714 sw_desc->unmap_src_cnt = 1;
715 sw_desc->unmap_len = len;
716 }
717 spin_unlock_bh(&mv_chan->lock);
c98c1781 718 dev_dbg(mv_chan_to_devp(mv_chan),
ff7b0479
SB
719 "%s sw_desc %p async_tx %p \n",
720 __func__, sw_desc, &sw_desc->async_tx);
721 return sw_desc ? &sw_desc->async_tx : NULL;
722}
723
724static struct dma_async_tx_descriptor *
725mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
726 unsigned int src_cnt, size_t len, unsigned long flags)
727{
728 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
729 struct mv_xor_desc_slot *sw_desc, *grp_start;
730 int slot_cnt;
731
732 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
733 return NULL;
734
7912d300 735 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
ff7b0479 736
c98c1781 737 dev_dbg(mv_chan_to_devp(mv_chan),
ff7b0479
SB
738 "%s src_cnt: %d len: dest %x %u flags: %ld\n",
739 __func__, src_cnt, len, dest, flags);
740
741 spin_lock_bh(&mv_chan->lock);
742 slot_cnt = mv_chan_xor_slot_count(len, src_cnt);
743 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
744 if (sw_desc) {
745 sw_desc->type = DMA_XOR;
746 sw_desc->async_tx.flags = flags;
747 grp_start = sw_desc->group_head;
748 mv_desc_init(grp_start, flags);
749 /* the byte count field is the same as in memcpy desc*/
750 mv_desc_set_byte_count(grp_start, len);
751 mv_desc_set_dest_addr(sw_desc->group_head, dest);
752 sw_desc->unmap_src_cnt = src_cnt;
753 sw_desc->unmap_len = len;
754 while (src_cnt--)
755 mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]);
756 }
757 spin_unlock_bh(&mv_chan->lock);
c98c1781 758 dev_dbg(mv_chan_to_devp(mv_chan),
ff7b0479
SB
759 "%s sw_desc %p async_tx %p \n",
760 __func__, sw_desc, &sw_desc->async_tx);
761 return sw_desc ? &sw_desc->async_tx : NULL;
762}
763
764static void mv_xor_free_chan_resources(struct dma_chan *chan)
765{
766 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
767 struct mv_xor_desc_slot *iter, *_iter;
768 int in_use_descs = 0;
769
770 mv_xor_slot_cleanup(mv_chan);
771
772 spin_lock_bh(&mv_chan->lock);
773 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
774 chain_node) {
775 in_use_descs++;
776 list_del(&iter->chain_node);
777 }
778 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
779 completed_node) {
780 in_use_descs++;
781 list_del(&iter->completed_node);
782 }
783 list_for_each_entry_safe_reverse(
784 iter, _iter, &mv_chan->all_slots, slot_node) {
785 list_del(&iter->slot_node);
786 kfree(iter);
787 mv_chan->slots_allocated--;
788 }
789 mv_chan->last_used = NULL;
790
c98c1781 791 dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
ff7b0479
SB
792 __func__, mv_chan->slots_allocated);
793 spin_unlock_bh(&mv_chan->lock);
794
795 if (in_use_descs)
c98c1781 796 dev_err(mv_chan_to_devp(mv_chan),
ff7b0479
SB
797 "freeing %d in use descriptors!\n", in_use_descs);
798}
799
800/**
07934481 801 * mv_xor_status - poll the status of an XOR transaction
ff7b0479
SB
802 * @chan: XOR channel handle
803 * @cookie: XOR transaction identifier
07934481 804 * @txstate: XOR transactions state holder (or NULL)
ff7b0479 805 */
07934481 806static enum dma_status mv_xor_status(struct dma_chan *chan,
ff7b0479 807 dma_cookie_t cookie,
07934481 808 struct dma_tx_state *txstate)
ff7b0479
SB
809{
810 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
ff7b0479
SB
811 enum dma_status ret;
812
96a2af41 813 ret = dma_cookie_status(chan, cookie, txstate);
ff7b0479
SB
814 if (ret == DMA_SUCCESS) {
815 mv_xor_clean_completed_slots(mv_chan);
816 return ret;
817 }
818 mv_xor_slot_cleanup(mv_chan);
819
96a2af41 820 return dma_cookie_status(chan, cookie, txstate);
ff7b0479
SB
821}
822
823static void mv_dump_xor_regs(struct mv_xor_chan *chan)
824{
825 u32 val;
826
827 val = __raw_readl(XOR_CONFIG(chan));
c98c1781 828 dev_err(mv_chan_to_devp(chan),
a3fc74bc 829 "config 0x%08x.\n", val);
ff7b0479
SB
830
831 val = __raw_readl(XOR_ACTIVATION(chan));
c98c1781 832 dev_err(mv_chan_to_devp(chan),
a3fc74bc 833 "activation 0x%08x.\n", val);
ff7b0479
SB
834
835 val = __raw_readl(XOR_INTR_CAUSE(chan));
c98c1781 836 dev_err(mv_chan_to_devp(chan),
a3fc74bc 837 "intr cause 0x%08x.\n", val);
ff7b0479
SB
838
839 val = __raw_readl(XOR_INTR_MASK(chan));
c98c1781 840 dev_err(mv_chan_to_devp(chan),
a3fc74bc 841 "intr mask 0x%08x.\n", val);
ff7b0479
SB
842
843 val = __raw_readl(XOR_ERROR_CAUSE(chan));
c98c1781 844 dev_err(mv_chan_to_devp(chan),
a3fc74bc 845 "error cause 0x%08x.\n", val);
ff7b0479
SB
846
847 val = __raw_readl(XOR_ERROR_ADDR(chan));
c98c1781 848 dev_err(mv_chan_to_devp(chan),
a3fc74bc 849 "error addr 0x%08x.\n", val);
ff7b0479
SB
850}
851
852static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
853 u32 intr_cause)
854{
855 if (intr_cause & (1 << 4)) {
c98c1781 856 dev_dbg(mv_chan_to_devp(chan),
ff7b0479
SB
857 "ignore this error\n");
858 return;
859 }
860
c98c1781 861 dev_err(mv_chan_to_devp(chan),
a3fc74bc
TP
862 "error on chan %d. intr cause 0x%08x.\n",
863 chan->idx, intr_cause);
ff7b0479
SB
864
865 mv_dump_xor_regs(chan);
866 BUG();
867}
868
869static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
870{
871 struct mv_xor_chan *chan = data;
872 u32 intr_cause = mv_chan_get_intr_cause(chan);
873
c98c1781 874 dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause);
ff7b0479
SB
875
876 if (mv_is_err_intr(intr_cause))
877 mv_xor_err_interrupt_handler(chan, intr_cause);
878
879 tasklet_schedule(&chan->irq_tasklet);
880
881 mv_xor_device_clear_eoc_cause(chan);
882
883 return IRQ_HANDLED;
884}
885
886static void mv_xor_issue_pending(struct dma_chan *chan)
887{
888 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
889
890 if (mv_chan->pending >= MV_XOR_THRESHOLD) {
891 mv_chan->pending = 0;
892 mv_chan_activate(mv_chan);
893 }
894}
895
896/*
897 * Perform a transaction to verify the HW works.
898 */
899#define MV_XOR_TEST_SIZE 2000
900
275cc0c8 901static int __devinit mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
ff7b0479
SB
902{
903 int i;
904 void *src, *dest;
905 dma_addr_t src_dma, dest_dma;
906 struct dma_chan *dma_chan;
907 dma_cookie_t cookie;
908 struct dma_async_tx_descriptor *tx;
909 int err = 0;
ff7b0479
SB
910
911 src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
912 if (!src)
913 return -ENOMEM;
914
915 dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
916 if (!dest) {
917 kfree(src);
918 return -ENOMEM;
919 }
920
921 /* Fill in src buffer */
922 for (i = 0; i < MV_XOR_TEST_SIZE; i++)
923 ((u8 *) src)[i] = (u8)i;
924
275cc0c8 925 dma_chan = &mv_chan->dmachan;
aa1e6f1a 926 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
ff7b0479
SB
927 err = -ENODEV;
928 goto out;
929 }
930
931 dest_dma = dma_map_single(dma_chan->device->dev, dest,
932 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
933
934 src_dma = dma_map_single(dma_chan->device->dev, src,
935 MV_XOR_TEST_SIZE, DMA_TO_DEVICE);
936
937 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
938 MV_XOR_TEST_SIZE, 0);
939 cookie = mv_xor_tx_submit(tx);
940 mv_xor_issue_pending(dma_chan);
941 async_tx_ack(tx);
942 msleep(1);
943
07934481 944 if (mv_xor_status(dma_chan, cookie, NULL) !=
ff7b0479 945 DMA_SUCCESS) {
a3fc74bc
TP
946 dev_err(dma_chan->device->dev,
947 "Self-test copy timed out, disabling\n");
ff7b0479
SB
948 err = -ENODEV;
949 goto free_resources;
950 }
951
c35064c4 952 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
ff7b0479
SB
953 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
954 if (memcmp(src, dest, MV_XOR_TEST_SIZE)) {
a3fc74bc
TP
955 dev_err(dma_chan->device->dev,
956 "Self-test copy failed compare, disabling\n");
ff7b0479
SB
957 err = -ENODEV;
958 goto free_resources;
959 }
960
961free_resources:
962 mv_xor_free_chan_resources(dma_chan);
963out:
964 kfree(src);
965 kfree(dest);
966 return err;
967}
968
969#define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
970static int __devinit
275cc0c8 971mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
ff7b0479
SB
972{
973 int i, src_idx;
974 struct page *dest;
975 struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
976 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
977 dma_addr_t dest_dma;
978 struct dma_async_tx_descriptor *tx;
979 struct dma_chan *dma_chan;
980 dma_cookie_t cookie;
981 u8 cmp_byte = 0;
982 u32 cmp_word;
983 int err = 0;
ff7b0479
SB
984
985 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
986 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
a09b09ae
RK
987 if (!xor_srcs[src_idx]) {
988 while (src_idx--)
ff7b0479 989 __free_page(xor_srcs[src_idx]);
a09b09ae
RK
990 return -ENOMEM;
991 }
ff7b0479
SB
992 }
993
994 dest = alloc_page(GFP_KERNEL);
a09b09ae
RK
995 if (!dest) {
996 while (src_idx--)
ff7b0479 997 __free_page(xor_srcs[src_idx]);
a09b09ae
RK
998 return -ENOMEM;
999 }
ff7b0479
SB
1000
1001 /* Fill in src buffers */
1002 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
1003 u8 *ptr = page_address(xor_srcs[src_idx]);
1004 for (i = 0; i < PAGE_SIZE; i++)
1005 ptr[i] = (1 << src_idx);
1006 }
1007
1008 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++)
1009 cmp_byte ^= (u8) (1 << src_idx);
1010
1011 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
1012 (cmp_byte << 8) | cmp_byte;
1013
1014 memset(page_address(dest), 0, PAGE_SIZE);
1015
275cc0c8 1016 dma_chan = &mv_chan->dmachan;
aa1e6f1a 1017 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
ff7b0479
SB
1018 err = -ENODEV;
1019 goto out;
1020 }
1021
1022 /* test xor */
1023 dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
1024 DMA_FROM_DEVICE);
1025
1026 for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++)
1027 dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
1028 0, PAGE_SIZE, DMA_TO_DEVICE);
1029
1030 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
1031 MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0);
1032
1033 cookie = mv_xor_tx_submit(tx);
1034 mv_xor_issue_pending(dma_chan);
1035 async_tx_ack(tx);
1036 msleep(8);
1037
07934481 1038 if (mv_xor_status(dma_chan, cookie, NULL) !=
ff7b0479 1039 DMA_SUCCESS) {
a3fc74bc
TP
1040 dev_err(dma_chan->device->dev,
1041 "Self-test xor timed out, disabling\n");
ff7b0479
SB
1042 err = -ENODEV;
1043 goto free_resources;
1044 }
1045
c35064c4 1046 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
ff7b0479
SB
1047 PAGE_SIZE, DMA_FROM_DEVICE);
1048 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
1049 u32 *ptr = page_address(dest);
1050 if (ptr[i] != cmp_word) {
a3fc74bc
TP
1051 dev_err(dma_chan->device->dev,
1052 "Self-test xor failed compare, disabling."
1053 " index %d, data %x, expected %x\n", i,
1054 ptr[i], cmp_word);
ff7b0479
SB
1055 err = -ENODEV;
1056 goto free_resources;
1057 }
1058 }
1059
1060free_resources:
1061 mv_xor_free_chan_resources(dma_chan);
1062out:
1063 src_idx = MV_XOR_NUM_SRC_TEST;
1064 while (src_idx--)
1065 __free_page(xor_srcs[src_idx]);
1066 __free_page(dest);
1067 return err;
1068}
1069
1ef48a26 1070static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
ff7b0479 1071{
ff7b0479 1072 struct dma_chan *chan, *_chan;
1ef48a26 1073 struct device *dev = mv_chan->dmadev.dev;
ff7b0479 1074
1ef48a26 1075 dma_async_device_unregister(&mv_chan->dmadev);
ff7b0479 1076
b503fa01 1077 dma_free_coherent(dev, MV_XOR_POOL_SIZE,
1ef48a26 1078 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
ff7b0479 1079
1ef48a26 1080 list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
a6b4a9d2 1081 device_node) {
ff7b0479
SB
1082 list_del(&chan->device_node);
1083 }
1084
1085 return 0;
1086}
1087
1ef48a26 1088static struct mv_xor_chan *
297eedba 1089mv_xor_channel_add(struct mv_xor_device *xordev,
a6b4a9d2 1090 struct platform_device *pdev,
b503fa01 1091 int idx, dma_cap_mask_t cap_mask, int irq)
ff7b0479
SB
1092{
1093 int ret = 0;
ff7b0479
SB
1094 struct mv_xor_chan *mv_chan;
1095 struct dma_device *dma_dev;
ff7b0479 1096
1ef48a26
TP
1097 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
1098 if (!mv_chan) {
1099 ret = -ENOMEM;
1100 goto err_free_dma;
1101 }
1102
9aedbdba 1103 mv_chan->idx = idx;
ff7b0479 1104
1ef48a26 1105 dma_dev = &mv_chan->dmadev;
ff7b0479
SB
1106
1107 /* allocate coherent memory for hardware descriptors
1108 * note: writecombine gives slightly better performance, but
1109 * requires that we explicitly flush the writes
1110 */
1ef48a26 1111 mv_chan->dma_desc_pool_virt =
b503fa01 1112 dma_alloc_writecombine(&pdev->dev, MV_XOR_POOL_SIZE,
1ef48a26
TP
1113 &mv_chan->dma_desc_pool, GFP_KERNEL);
1114 if (!mv_chan->dma_desc_pool_virt)
a6b4a9d2 1115 return ERR_PTR(-ENOMEM);
ff7b0479 1116
ff7b0479 1117 /* discover transaction capabilites from the platform data */
a6b4a9d2 1118 dma_dev->cap_mask = cap_mask;
ff7b0479
SB
1119
1120 INIT_LIST_HEAD(&dma_dev->channels);
1121
1122 /* set base routines */
1123 dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
1124 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
07934481 1125 dma_dev->device_tx_status = mv_xor_status;
ff7b0479
SB
1126 dma_dev->device_issue_pending = mv_xor_issue_pending;
1127 dma_dev->dev = &pdev->dev;
1128
1129 /* set prep routines based on capability */
1130 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1131 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
1132 if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask))
1133 dma_dev->device_prep_dma_memset = mv_xor_prep_dma_memset;
1134 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
c019894e 1135 dma_dev->max_xor = 8;
ff7b0479
SB
1136 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1137 }
1138
297eedba 1139 mv_chan->mmr_base = xordev->xor_base;
ff7b0479
SB
1140 if (!mv_chan->mmr_base) {
1141 ret = -ENOMEM;
1142 goto err_free_dma;
1143 }
1144 tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
1145 mv_chan);
1146
1147 /* clear errors before enabling interrupts */
1148 mv_xor_device_clear_err_status(mv_chan);
1149
ff7b0479
SB
1150 ret = devm_request_irq(&pdev->dev, irq,
1151 mv_xor_interrupt_handler,
1152 0, dev_name(&pdev->dev), mv_chan);
1153 if (ret)
1154 goto err_free_dma;
1155
1156 mv_chan_unmask_interrupts(mv_chan);
1157
1158 mv_set_mode(mv_chan, DMA_MEMCPY);
1159
1160 spin_lock_init(&mv_chan->lock);
1161 INIT_LIST_HEAD(&mv_chan->chain);
1162 INIT_LIST_HEAD(&mv_chan->completed_slots);
1163 INIT_LIST_HEAD(&mv_chan->all_slots);
98817b99
TP
1164 mv_chan->dmachan.device = dma_dev;
1165 dma_cookie_init(&mv_chan->dmachan);
ff7b0479 1166
98817b99 1167 list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels);
ff7b0479
SB
1168
1169 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
275cc0c8 1170 ret = mv_xor_memcpy_self_test(mv_chan);
ff7b0479
SB
1171 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1172 if (ret)
1173 goto err_free_dma;
1174 }
1175
1176 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
275cc0c8 1177 ret = mv_xor_xor_self_test(mv_chan);
ff7b0479
SB
1178 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1179 if (ret)
1180 goto err_free_dma;
1181 }
1182
a3fc74bc 1183 dev_info(&pdev->dev, "Marvell XOR: "
ff7b0479
SB
1184 "( %s%s%s%s)\n",
1185 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1186 dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
1187 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1188 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1189
1190 dma_async_device_register(dma_dev);
1ef48a26 1191 return mv_chan;
ff7b0479
SB
1192
1193 err_free_dma:
b503fa01 1194 dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
1ef48a26 1195 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
a6b4a9d2
TP
1196 return ERR_PTR(ret);
1197}
1198
ff7b0479 1199static void
297eedba 1200mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
63a9332b 1201 const struct mbus_dram_target_info *dram)
ff7b0479 1202{
297eedba 1203 void __iomem *base = xordev->xor_base;
ff7b0479
SB
1204 u32 win_enable = 0;
1205 int i;
1206
1207 for (i = 0; i < 8; i++) {
1208 writel(0, base + WINDOW_BASE(i));
1209 writel(0, base + WINDOW_SIZE(i));
1210 if (i < 4)
1211 writel(0, base + WINDOW_REMAP_HIGH(i));
1212 }
1213
1214 for (i = 0; i < dram->num_cs; i++) {
63a9332b 1215 const struct mbus_dram_window *cs = dram->cs + i;
ff7b0479
SB
1216
1217 writel((cs->base & 0xffff0000) |
1218 (cs->mbus_attr << 8) |
1219 dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1220 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1221
1222 win_enable |= (1 << i);
1223 win_enable |= 3 << (16 + (2 * i));
1224 }
1225
1226 writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1227 writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1228}
1229
61971656 1230static int mv_xor_probe(struct platform_device *pdev)
ff7b0479 1231{
63a9332b 1232 const struct mbus_dram_target_info *dram;
297eedba 1233 struct mv_xor_device *xordev;
7dde453d 1234 struct mv_xor_platform_data *pdata = pdev->dev.platform_data;
ff7b0479 1235 struct resource *res;
60d151f3 1236 int i, ret;
ff7b0479 1237
61971656 1238 dev_notice(&pdev->dev, "Marvell XOR driver\n");
ff7b0479 1239
297eedba
TP
1240 xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
1241 if (!xordev)
ff7b0479
SB
1242 return -ENOMEM;
1243
1244 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1245 if (!res)
1246 return -ENODEV;
1247
297eedba
TP
1248 xordev->xor_base = devm_ioremap(&pdev->dev, res->start,
1249 resource_size(res));
1250 if (!xordev->xor_base)
ff7b0479
SB
1251 return -EBUSY;
1252
1253 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1254 if (!res)
1255 return -ENODEV;
1256
297eedba
TP
1257 xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start,
1258 resource_size(res));
1259 if (!xordev->xor_high_base)
ff7b0479
SB
1260 return -EBUSY;
1261
297eedba 1262 platform_set_drvdata(pdev, xordev);
ff7b0479
SB
1263
1264 /*
1265 * (Re-)program MBUS remapping windows if we are asked to.
1266 */
63a9332b
AL
1267 dram = mv_mbus_dram_info();
1268 if (dram)
297eedba 1269 mv_xor_conf_mbus_windows(xordev, dram);
ff7b0479 1270
c510182b
AL
1271 /* Not all platforms can gate the clock, so it is not
1272 * an error if the clock does not exists.
1273 */
297eedba
TP
1274 xordev->clk = clk_get(&pdev->dev, NULL);
1275 if (!IS_ERR(xordev->clk))
1276 clk_prepare_enable(xordev->clk);
c510182b 1277
60d151f3
TP
1278 if (pdata && pdata->channels) {
1279 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
e39f6ec1 1280 struct mv_xor_channel_data *cd;
60d151f3
TP
1281 int irq;
1282
1283 cd = &pdata->channels[i];
1284 if (!cd) {
1285 ret = -ENODEV;
1286 goto err_channel_add;
1287 }
1288
1289 irq = platform_get_irq(pdev, i);
1290 if (irq < 0) {
1291 ret = irq;
1292 goto err_channel_add;
1293 }
1294
297eedba 1295 xordev->channels[i] =
9aedbdba 1296 mv_xor_channel_add(xordev, pdev, i,
b503fa01 1297 cd->cap_mask, irq);
297eedba
TP
1298 if (IS_ERR(xordev->channels[i])) {
1299 ret = PTR_ERR(xordev->channels[i]);
60d151f3
TP
1300 goto err_channel_add;
1301 }
1302 }
1303 }
1304
ff7b0479 1305 return 0;
60d151f3
TP
1306
1307err_channel_add:
1308 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
297eedba
TP
1309 if (xordev->channels[i])
1310 mv_xor_channel_remove(xordev->channels[i]);
60d151f3 1311
297eedba
TP
1312 clk_disable_unprepare(xordev->clk);
1313 clk_put(xordev->clk);
60d151f3 1314 return ret;
ff7b0479
SB
1315}
1316
61971656 1317static int mv_xor_remove(struct platform_device *pdev)
ff7b0479 1318{
297eedba 1319 struct mv_xor_device *xordev = platform_get_drvdata(pdev);
60d151f3
TP
1320 int i;
1321
1322 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
297eedba
TP
1323 if (xordev->channels[i])
1324 mv_xor_channel_remove(xordev->channels[i]);
60d151f3 1325 }
c510182b 1326
297eedba
TP
1327 if (!IS_ERR(xordev->clk)) {
1328 clk_disable_unprepare(xordev->clk);
1329 clk_put(xordev->clk);
c510182b
AL
1330 }
1331
ff7b0479
SB
1332 return 0;
1333}
1334
61971656
TP
1335static struct platform_driver mv_xor_driver = {
1336 .probe = mv_xor_probe,
1337 .remove = mv_xor_remove,
ff7b0479
SB
1338 .driver = {
1339 .owner = THIS_MODULE,
0dddee7a 1340 .name = MV_XOR_NAME,
ff7b0479
SB
1341 },
1342};
1343
1344
1345static int __init mv_xor_init(void)
1346{
61971656 1347 return platform_driver_register(&mv_xor_driver);
ff7b0479
SB
1348}
1349module_init(mv_xor_init);
1350
1351/* it's currently unsafe to unload this module */
1352#if 0
1353static void __exit mv_xor_exit(void)
1354{
61971656 1355 platform_driver_unregister(&mv_xor_driver);
ff7b0479
SB
1356 return;
1357}
1358
1359module_exit(mv_xor_exit);
1360#endif
1361
1362MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1363MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1364MODULE_LICENSE("GPL");