dma: remove use of __devexit_p
[linux-2.6-block.git] / drivers / dma / mv_xor.c
CommitLineData
ff7b0479
SB
1/*
2 * offload engine driver for the Marvell XOR engine
3 * Copyright (C) 2007, 2008, Marvell International Ltd.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19#include <linux/init.h>
20#include <linux/module.h>
5a0e3ad6 21#include <linux/slab.h>
ff7b0479
SB
22#include <linux/delay.h>
23#include <linux/dma-mapping.h>
24#include <linux/spinlock.h>
25#include <linux/interrupt.h>
26#include <linux/platform_device.h>
27#include <linux/memory.h>
c510182b 28#include <linux/clk.h>
c02cecb9 29#include <linux/platform_data/dma-mv_xor.h>
d2ebfb33
RKAL
30
31#include "dmaengine.h"
ff7b0479
SB
32#include "mv_xor.h"
33
34static void mv_xor_issue_pending(struct dma_chan *chan);
35
36#define to_mv_xor_chan(chan) \
37 container_of(chan, struct mv_xor_chan, common)
38
39#define to_mv_xor_device(dev) \
40 container_of(dev, struct mv_xor_device, common)
41
42#define to_mv_xor_slot(tx) \
43 container_of(tx, struct mv_xor_desc_slot, async_tx)
44
45static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
46{
47 struct mv_xor_desc *hw_desc = desc->hw_desc;
48
49 hw_desc->status = (1 << 31);
50 hw_desc->phy_next_desc = 0;
51 hw_desc->desc_command = (1 << 31);
52}
53
54static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
55{
56 struct mv_xor_desc *hw_desc = desc->hw_desc;
57 return hw_desc->phy_dest_addr;
58}
59
60static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc,
61 int src_idx)
62{
63 struct mv_xor_desc *hw_desc = desc->hw_desc;
64 return hw_desc->phy_src_addr[src_idx];
65}
66
67
68static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
69 u32 byte_count)
70{
71 struct mv_xor_desc *hw_desc = desc->hw_desc;
72 hw_desc->byte_count = byte_count;
73}
74
75static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
76 u32 next_desc_addr)
77{
78 struct mv_xor_desc *hw_desc = desc->hw_desc;
79 BUG_ON(hw_desc->phy_next_desc);
80 hw_desc->phy_next_desc = next_desc_addr;
81}
82
83static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc)
84{
85 struct mv_xor_desc *hw_desc = desc->hw_desc;
86 hw_desc->phy_next_desc = 0;
87}
88
89static void mv_desc_set_block_fill_val(struct mv_xor_desc_slot *desc, u32 val)
90{
91 desc->value = val;
92}
93
94static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc,
95 dma_addr_t addr)
96{
97 struct mv_xor_desc *hw_desc = desc->hw_desc;
98 hw_desc->phy_dest_addr = addr;
99}
100
101static int mv_chan_memset_slot_count(size_t len)
102{
103 return 1;
104}
105
106#define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c)
107
108static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
109 int index, dma_addr_t addr)
110{
111 struct mv_xor_desc *hw_desc = desc->hw_desc;
112 hw_desc->phy_src_addr[index] = addr;
113 if (desc->type == DMA_XOR)
114 hw_desc->desc_command |= (1 << index);
115}
116
117static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
118{
119 return __raw_readl(XOR_CURR_DESC(chan));
120}
121
122static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
123 u32 next_desc_addr)
124{
125 __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan));
126}
127
128static void mv_chan_set_dest_pointer(struct mv_xor_chan *chan, u32 desc_addr)
129{
130 __raw_writel(desc_addr, XOR_DEST_POINTER(chan));
131}
132
133static void mv_chan_set_block_size(struct mv_xor_chan *chan, u32 block_size)
134{
135 __raw_writel(block_size, XOR_BLOCK_SIZE(chan));
136}
137
138static void mv_chan_set_value(struct mv_xor_chan *chan, u32 value)
139{
140 __raw_writel(value, XOR_INIT_VALUE_LOW(chan));
141 __raw_writel(value, XOR_INIT_VALUE_HIGH(chan));
142}
143
144static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
145{
146 u32 val = __raw_readl(XOR_INTR_MASK(chan));
147 val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
148 __raw_writel(val, XOR_INTR_MASK(chan));
149}
150
151static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
152{
153 u32 intr_cause = __raw_readl(XOR_INTR_CAUSE(chan));
154 intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
155 return intr_cause;
156}
157
158static int mv_is_err_intr(u32 intr_cause)
159{
160 if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9)))
161 return 1;
162
163 return 0;
164}
165
166static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
167{
86363682 168 u32 val = ~(1 << (chan->idx * 16));
ff7b0479
SB
169 dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val);
170 __raw_writel(val, XOR_INTR_CAUSE(chan));
171}
172
173static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan)
174{
175 u32 val = 0xFFFF0000 >> (chan->idx * 16);
176 __raw_writel(val, XOR_INTR_CAUSE(chan));
177}
178
179static int mv_can_chain(struct mv_xor_desc_slot *desc)
180{
181 struct mv_xor_desc_slot *chain_old_tail = list_entry(
182 desc->chain_node.prev, struct mv_xor_desc_slot, chain_node);
183
184 if (chain_old_tail->type != desc->type)
185 return 0;
186 if (desc->type == DMA_MEMSET)
187 return 0;
188
189 return 1;
190}
191
192static void mv_set_mode(struct mv_xor_chan *chan,
193 enum dma_transaction_type type)
194{
195 u32 op_mode;
196 u32 config = __raw_readl(XOR_CONFIG(chan));
197
198 switch (type) {
199 case DMA_XOR:
200 op_mode = XOR_OPERATION_MODE_XOR;
201 break;
202 case DMA_MEMCPY:
203 op_mode = XOR_OPERATION_MODE_MEMCPY;
204 break;
205 case DMA_MEMSET:
206 op_mode = XOR_OPERATION_MODE_MEMSET;
207 break;
208 default:
209 dev_printk(KERN_ERR, chan->device->common.dev,
210 "error: unsupported operation %d.\n",
211 type);
212 BUG();
213 return;
214 }
215
216 config &= ~0x7;
217 config |= op_mode;
218 __raw_writel(config, XOR_CONFIG(chan));
219 chan->current_type = type;
220}
221
222static void mv_chan_activate(struct mv_xor_chan *chan)
223{
224 u32 activation;
225
226 dev_dbg(chan->device->common.dev, " activate chan.\n");
227 activation = __raw_readl(XOR_ACTIVATION(chan));
228 activation |= 0x1;
229 __raw_writel(activation, XOR_ACTIVATION(chan));
230}
231
232static char mv_chan_is_busy(struct mv_xor_chan *chan)
233{
234 u32 state = __raw_readl(XOR_ACTIVATION(chan));
235
236 state = (state >> 4) & 0x3;
237
238 return (state == 1) ? 1 : 0;
239}
240
241static int mv_chan_xor_slot_count(size_t len, int src_cnt)
242{
243 return 1;
244}
245
246/**
247 * mv_xor_free_slots - flags descriptor slots for reuse
248 * @slot: Slot to free
249 * Caller must hold &mv_chan->lock while calling this function
250 */
251static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
252 struct mv_xor_desc_slot *slot)
253{
254 dev_dbg(mv_chan->device->common.dev, "%s %d slot %p\n",
255 __func__, __LINE__, slot);
256
257 slot->slots_per_op = 0;
258
259}
260
261/*
262 * mv_xor_start_new_chain - program the engine to operate on new chain headed by
263 * sw_desc
264 * Caller must hold &mv_chan->lock while calling this function
265 */
266static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
267 struct mv_xor_desc_slot *sw_desc)
268{
269 dev_dbg(mv_chan->device->common.dev, "%s %d: sw_desc %p\n",
270 __func__, __LINE__, sw_desc);
271 if (sw_desc->type != mv_chan->current_type)
272 mv_set_mode(mv_chan, sw_desc->type);
273
274 if (sw_desc->type == DMA_MEMSET) {
275 /* for memset requests we need to program the engine, no
276 * descriptors used.
277 */
278 struct mv_xor_desc *hw_desc = sw_desc->hw_desc;
279 mv_chan_set_dest_pointer(mv_chan, hw_desc->phy_dest_addr);
280 mv_chan_set_block_size(mv_chan, sw_desc->unmap_len);
281 mv_chan_set_value(mv_chan, sw_desc->value);
282 } else {
283 /* set the hardware chain */
284 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
285 }
286 mv_chan->pending += sw_desc->slot_cnt;
287 mv_xor_issue_pending(&mv_chan->common);
288}
289
290static dma_cookie_t
291mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
292 struct mv_xor_chan *mv_chan, dma_cookie_t cookie)
293{
294 BUG_ON(desc->async_tx.cookie < 0);
295
296 if (desc->async_tx.cookie > 0) {
297 cookie = desc->async_tx.cookie;
298
299 /* call the callback (must not sleep or submit new
300 * operations to this channel)
301 */
302 if (desc->async_tx.callback)
303 desc->async_tx.callback(
304 desc->async_tx.callback_param);
305
306 /* unmap dma addresses
307 * (unmap_single vs unmap_page?)
308 */
309 if (desc->group_head && desc->unmap_len) {
310 struct mv_xor_desc_slot *unmap = desc->group_head;
311 struct device *dev =
312 &mv_chan->device->pdev->dev;
313 u32 len = unmap->unmap_len;
e1d181ef
DW
314 enum dma_ctrl_flags flags = desc->async_tx.flags;
315 u32 src_cnt;
316 dma_addr_t addr;
a06d568f 317 dma_addr_t dest;
ff7b0479 318
a06d568f
DW
319 src_cnt = unmap->unmap_src_cnt;
320 dest = mv_desc_get_dest_addr(unmap);
e1d181ef 321 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
a06d568f
DW
322 enum dma_data_direction dir;
323
324 if (src_cnt > 1) /* is xor ? */
325 dir = DMA_BIDIRECTIONAL;
326 else
327 dir = DMA_FROM_DEVICE;
328 dma_unmap_page(dev, dest, len, dir);
e1d181ef
DW
329 }
330
331 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
e1d181ef
DW
332 while (src_cnt--) {
333 addr = mv_desc_get_src_addr(unmap,
334 src_cnt);
a06d568f
DW
335 if (addr == dest)
336 continue;
e1d181ef
DW
337 dma_unmap_page(dev, addr, len,
338 DMA_TO_DEVICE);
339 }
ff7b0479
SB
340 }
341 desc->group_head = NULL;
342 }
343 }
344
345 /* run dependent operations */
07f2211e 346 dma_run_dependencies(&desc->async_tx);
ff7b0479
SB
347
348 return cookie;
349}
350
351static int
352mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan)
353{
354 struct mv_xor_desc_slot *iter, *_iter;
355
356 dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__);
357 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
358 completed_node) {
359
360 if (async_tx_test_ack(&iter->async_tx)) {
361 list_del(&iter->completed_node);
362 mv_xor_free_slots(mv_chan, iter);
363 }
364 }
365 return 0;
366}
367
368static int
369mv_xor_clean_slot(struct mv_xor_desc_slot *desc,
370 struct mv_xor_chan *mv_chan)
371{
372 dev_dbg(mv_chan->device->common.dev, "%s %d: desc %p flags %d\n",
373 __func__, __LINE__, desc, desc->async_tx.flags);
374 list_del(&desc->chain_node);
375 /* the client is allowed to attach dependent operations
376 * until 'ack' is set
377 */
378 if (!async_tx_test_ack(&desc->async_tx)) {
379 /* move this slot to the completed_slots */
380 list_add_tail(&desc->completed_node, &mv_chan->completed_slots);
381 return 0;
382 }
383
384 mv_xor_free_slots(mv_chan, desc);
385 return 0;
386}
387
388static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
389{
390 struct mv_xor_desc_slot *iter, *_iter;
391 dma_cookie_t cookie = 0;
392 int busy = mv_chan_is_busy(mv_chan);
393 u32 current_desc = mv_chan_get_current_desc(mv_chan);
394 int seen_current = 0;
395
396 dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__);
397 dev_dbg(mv_chan->device->common.dev, "current_desc %x\n", current_desc);
398 mv_xor_clean_completed_slots(mv_chan);
399
400 /* free completed slots from the chain starting with
401 * the oldest descriptor
402 */
403
404 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
405 chain_node) {
406 prefetch(_iter);
407 prefetch(&_iter->async_tx);
408
409 /* do not advance past the current descriptor loaded into the
410 * hardware channel, subsequent descriptors are either in
411 * process or have not been submitted
412 */
413 if (seen_current)
414 break;
415
416 /* stop the search if we reach the current descriptor and the
417 * channel is busy
418 */
419 if (iter->async_tx.phys == current_desc) {
420 seen_current = 1;
421 if (busy)
422 break;
423 }
424
425 cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie);
426
427 if (mv_xor_clean_slot(iter, mv_chan))
428 break;
429 }
430
431 if ((busy == 0) && !list_empty(&mv_chan->chain)) {
432 struct mv_xor_desc_slot *chain_head;
433 chain_head = list_entry(mv_chan->chain.next,
434 struct mv_xor_desc_slot,
435 chain_node);
436
437 mv_xor_start_new_chain(mv_chan, chain_head);
438 }
439
440 if (cookie > 0)
4d4e58de 441 mv_chan->common.completed_cookie = cookie;
ff7b0479
SB
442}
443
444static void
445mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
446{
447 spin_lock_bh(&mv_chan->lock);
448 __mv_xor_slot_cleanup(mv_chan);
449 spin_unlock_bh(&mv_chan->lock);
450}
451
452static void mv_xor_tasklet(unsigned long data)
453{
454 struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
8333f65e 455 mv_xor_slot_cleanup(chan);
ff7b0479
SB
456}
457
458static struct mv_xor_desc_slot *
459mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots,
460 int slots_per_op)
461{
462 struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL;
463 LIST_HEAD(chain);
464 int slots_found, retry = 0;
465
466 /* start search from the last allocated descrtiptor
467 * if a contiguous allocation can not be found start searching
468 * from the beginning of the list
469 */
470retry:
471 slots_found = 0;
472 if (retry == 0)
473 iter = mv_chan->last_used;
474 else
475 iter = list_entry(&mv_chan->all_slots,
476 struct mv_xor_desc_slot,
477 slot_node);
478
479 list_for_each_entry_safe_continue(
480 iter, _iter, &mv_chan->all_slots, slot_node) {
481 prefetch(_iter);
482 prefetch(&_iter->async_tx);
483 if (iter->slots_per_op) {
484 /* give up after finding the first busy slot
485 * on the second pass through the list
486 */
487 if (retry)
488 break;
489
490 slots_found = 0;
491 continue;
492 }
493
494 /* start the allocation if the slot is correctly aligned */
495 if (!slots_found++)
496 alloc_start = iter;
497
498 if (slots_found == num_slots) {
499 struct mv_xor_desc_slot *alloc_tail = NULL;
500 struct mv_xor_desc_slot *last_used = NULL;
501 iter = alloc_start;
502 while (num_slots) {
503 int i;
504
505 /* pre-ack all but the last descriptor */
506 async_tx_ack(&iter->async_tx);
507
508 list_add_tail(&iter->chain_node, &chain);
509 alloc_tail = iter;
510 iter->async_tx.cookie = 0;
511 iter->slot_cnt = num_slots;
512 iter->xor_check_result = NULL;
513 for (i = 0; i < slots_per_op; i++) {
514 iter->slots_per_op = slots_per_op - i;
515 last_used = iter;
516 iter = list_entry(iter->slot_node.next,
517 struct mv_xor_desc_slot,
518 slot_node);
519 }
520 num_slots -= slots_per_op;
521 }
522 alloc_tail->group_head = alloc_start;
523 alloc_tail->async_tx.cookie = -EBUSY;
64203b67 524 list_splice(&chain, &alloc_tail->tx_list);
ff7b0479
SB
525 mv_chan->last_used = last_used;
526 mv_desc_clear_next_desc(alloc_start);
527 mv_desc_clear_next_desc(alloc_tail);
528 return alloc_tail;
529 }
530 }
531 if (!retry++)
532 goto retry;
533
534 /* try to free some slots if the allocation fails */
535 tasklet_schedule(&mv_chan->irq_tasklet);
536
537 return NULL;
538}
539
ff7b0479
SB
540/************************ DMA engine API functions ****************************/
541static dma_cookie_t
542mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
543{
544 struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
545 struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
546 struct mv_xor_desc_slot *grp_start, *old_chain_tail;
547 dma_cookie_t cookie;
548 int new_hw_chain = 1;
549
550 dev_dbg(mv_chan->device->common.dev,
551 "%s sw_desc %p: async_tx %p\n",
552 __func__, sw_desc, &sw_desc->async_tx);
553
554 grp_start = sw_desc->group_head;
555
556 spin_lock_bh(&mv_chan->lock);
884485e1 557 cookie = dma_cookie_assign(tx);
ff7b0479
SB
558
559 if (list_empty(&mv_chan->chain))
64203b67 560 list_splice_init(&sw_desc->tx_list, &mv_chan->chain);
ff7b0479
SB
561 else {
562 new_hw_chain = 0;
563
564 old_chain_tail = list_entry(mv_chan->chain.prev,
565 struct mv_xor_desc_slot,
566 chain_node);
64203b67 567 list_splice_init(&grp_start->tx_list,
ff7b0479
SB
568 &old_chain_tail->chain_node);
569
570 if (!mv_can_chain(grp_start))
571 goto submit_done;
572
573 dev_dbg(mv_chan->device->common.dev, "Append to last desc %x\n",
574 old_chain_tail->async_tx.phys);
575
576 /* fix up the hardware chain */
577 mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys);
578
579 /* if the channel is not busy */
580 if (!mv_chan_is_busy(mv_chan)) {
581 u32 current_desc = mv_chan_get_current_desc(mv_chan);
582 /*
583 * and the curren desc is the end of the chain before
584 * the append, then we need to start the channel
585 */
586 if (current_desc == old_chain_tail->async_tx.phys)
587 new_hw_chain = 1;
588 }
589 }
590
591 if (new_hw_chain)
592 mv_xor_start_new_chain(mv_chan, grp_start);
593
594submit_done:
595 spin_unlock_bh(&mv_chan->lock);
596
597 return cookie;
598}
599
600/* returns the number of allocated descriptors */
aa1e6f1a 601static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
ff7b0479
SB
602{
603 char *hw_desc;
604 int idx;
605 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
606 struct mv_xor_desc_slot *slot = NULL;
607 struct mv_xor_platform_data *plat_data =
608 mv_chan->device->pdev->dev.platform_data;
609 int num_descs_in_pool = plat_data->pool_size/MV_XOR_SLOT_SIZE;
610
611 /* Allocate descriptor slots */
612 idx = mv_chan->slots_allocated;
613 while (idx < num_descs_in_pool) {
614 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
615 if (!slot) {
616 printk(KERN_INFO "MV XOR Channel only initialized"
617 " %d descriptor slots", idx);
618 break;
619 }
620 hw_desc = (char *) mv_chan->device->dma_desc_pool_virt;
621 slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE];
622
623 dma_async_tx_descriptor_init(&slot->async_tx, chan);
624 slot->async_tx.tx_submit = mv_xor_tx_submit;
625 INIT_LIST_HEAD(&slot->chain_node);
626 INIT_LIST_HEAD(&slot->slot_node);
64203b67 627 INIT_LIST_HEAD(&slot->tx_list);
ff7b0479
SB
628 hw_desc = (char *) mv_chan->device->dma_desc_pool;
629 slot->async_tx.phys =
630 (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
631 slot->idx = idx++;
632
633 spin_lock_bh(&mv_chan->lock);
634 mv_chan->slots_allocated = idx;
635 list_add_tail(&slot->slot_node, &mv_chan->all_slots);
636 spin_unlock_bh(&mv_chan->lock);
637 }
638
639 if (mv_chan->slots_allocated && !mv_chan->last_used)
640 mv_chan->last_used = list_entry(mv_chan->all_slots.next,
641 struct mv_xor_desc_slot,
642 slot_node);
643
644 dev_dbg(mv_chan->device->common.dev,
645 "allocated %d descriptor slots last_used: %p\n",
646 mv_chan->slots_allocated, mv_chan->last_used);
647
648 return mv_chan->slots_allocated ? : -ENOMEM;
649}
650
651static struct dma_async_tx_descriptor *
652mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
653 size_t len, unsigned long flags)
654{
655 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
656 struct mv_xor_desc_slot *sw_desc, *grp_start;
657 int slot_cnt;
658
659 dev_dbg(mv_chan->device->common.dev,
660 "%s dest: %x src %x len: %u flags: %ld\n",
661 __func__, dest, src, len, flags);
662 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
663 return NULL;
664
7912d300 665 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
ff7b0479
SB
666
667 spin_lock_bh(&mv_chan->lock);
668 slot_cnt = mv_chan_memcpy_slot_count(len);
669 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
670 if (sw_desc) {
671 sw_desc->type = DMA_MEMCPY;
672 sw_desc->async_tx.flags = flags;
673 grp_start = sw_desc->group_head;
674 mv_desc_init(grp_start, flags);
675 mv_desc_set_byte_count(grp_start, len);
676 mv_desc_set_dest_addr(sw_desc->group_head, dest);
677 mv_desc_set_src_addr(grp_start, 0, src);
678 sw_desc->unmap_src_cnt = 1;
679 sw_desc->unmap_len = len;
680 }
681 spin_unlock_bh(&mv_chan->lock);
682
683 dev_dbg(mv_chan->device->common.dev,
684 "%s sw_desc %p async_tx %p\n",
685 __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0);
686
687 return sw_desc ? &sw_desc->async_tx : NULL;
688}
689
690static struct dma_async_tx_descriptor *
691mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
692 size_t len, unsigned long flags)
693{
694 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
695 struct mv_xor_desc_slot *sw_desc, *grp_start;
696 int slot_cnt;
697
698 dev_dbg(mv_chan->device->common.dev,
699 "%s dest: %x len: %u flags: %ld\n",
700 __func__, dest, len, flags);
701 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
702 return NULL;
703
7912d300 704 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
ff7b0479
SB
705
706 spin_lock_bh(&mv_chan->lock);
707 slot_cnt = mv_chan_memset_slot_count(len);
708 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
709 if (sw_desc) {
710 sw_desc->type = DMA_MEMSET;
711 sw_desc->async_tx.flags = flags;
712 grp_start = sw_desc->group_head;
713 mv_desc_init(grp_start, flags);
714 mv_desc_set_byte_count(grp_start, len);
715 mv_desc_set_dest_addr(sw_desc->group_head, dest);
716 mv_desc_set_block_fill_val(grp_start, value);
717 sw_desc->unmap_src_cnt = 1;
718 sw_desc->unmap_len = len;
719 }
720 spin_unlock_bh(&mv_chan->lock);
721 dev_dbg(mv_chan->device->common.dev,
722 "%s sw_desc %p async_tx %p \n",
723 __func__, sw_desc, &sw_desc->async_tx);
724 return sw_desc ? &sw_desc->async_tx : NULL;
725}
726
727static struct dma_async_tx_descriptor *
728mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
729 unsigned int src_cnt, size_t len, unsigned long flags)
730{
731 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
732 struct mv_xor_desc_slot *sw_desc, *grp_start;
733 int slot_cnt;
734
735 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
736 return NULL;
737
7912d300 738 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
ff7b0479
SB
739
740 dev_dbg(mv_chan->device->common.dev,
741 "%s src_cnt: %d len: dest %x %u flags: %ld\n",
742 __func__, src_cnt, len, dest, flags);
743
744 spin_lock_bh(&mv_chan->lock);
745 slot_cnt = mv_chan_xor_slot_count(len, src_cnt);
746 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
747 if (sw_desc) {
748 sw_desc->type = DMA_XOR;
749 sw_desc->async_tx.flags = flags;
750 grp_start = sw_desc->group_head;
751 mv_desc_init(grp_start, flags);
752 /* the byte count field is the same as in memcpy desc*/
753 mv_desc_set_byte_count(grp_start, len);
754 mv_desc_set_dest_addr(sw_desc->group_head, dest);
755 sw_desc->unmap_src_cnt = src_cnt;
756 sw_desc->unmap_len = len;
757 while (src_cnt--)
758 mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]);
759 }
760 spin_unlock_bh(&mv_chan->lock);
761 dev_dbg(mv_chan->device->common.dev,
762 "%s sw_desc %p async_tx %p \n",
763 __func__, sw_desc, &sw_desc->async_tx);
764 return sw_desc ? &sw_desc->async_tx : NULL;
765}
766
767static void mv_xor_free_chan_resources(struct dma_chan *chan)
768{
769 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
770 struct mv_xor_desc_slot *iter, *_iter;
771 int in_use_descs = 0;
772
773 mv_xor_slot_cleanup(mv_chan);
774
775 spin_lock_bh(&mv_chan->lock);
776 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
777 chain_node) {
778 in_use_descs++;
779 list_del(&iter->chain_node);
780 }
781 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
782 completed_node) {
783 in_use_descs++;
784 list_del(&iter->completed_node);
785 }
786 list_for_each_entry_safe_reverse(
787 iter, _iter, &mv_chan->all_slots, slot_node) {
788 list_del(&iter->slot_node);
789 kfree(iter);
790 mv_chan->slots_allocated--;
791 }
792 mv_chan->last_used = NULL;
793
794 dev_dbg(mv_chan->device->common.dev, "%s slots_allocated %d\n",
795 __func__, mv_chan->slots_allocated);
796 spin_unlock_bh(&mv_chan->lock);
797
798 if (in_use_descs)
799 dev_err(mv_chan->device->common.dev,
800 "freeing %d in use descriptors!\n", in_use_descs);
801}
802
803/**
07934481 804 * mv_xor_status - poll the status of an XOR transaction
ff7b0479
SB
805 * @chan: XOR channel handle
806 * @cookie: XOR transaction identifier
07934481 807 * @txstate: XOR transactions state holder (or NULL)
ff7b0479 808 */
07934481 809static enum dma_status mv_xor_status(struct dma_chan *chan,
ff7b0479 810 dma_cookie_t cookie,
07934481 811 struct dma_tx_state *txstate)
ff7b0479
SB
812{
813 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
ff7b0479
SB
814 enum dma_status ret;
815
96a2af41 816 ret = dma_cookie_status(chan, cookie, txstate);
ff7b0479
SB
817 if (ret == DMA_SUCCESS) {
818 mv_xor_clean_completed_slots(mv_chan);
819 return ret;
820 }
821 mv_xor_slot_cleanup(mv_chan);
822
96a2af41 823 return dma_cookie_status(chan, cookie, txstate);
ff7b0479
SB
824}
825
826static void mv_dump_xor_regs(struct mv_xor_chan *chan)
827{
828 u32 val;
829
830 val = __raw_readl(XOR_CONFIG(chan));
831 dev_printk(KERN_ERR, chan->device->common.dev,
832 "config 0x%08x.\n", val);
833
834 val = __raw_readl(XOR_ACTIVATION(chan));
835 dev_printk(KERN_ERR, chan->device->common.dev,
836 "activation 0x%08x.\n", val);
837
838 val = __raw_readl(XOR_INTR_CAUSE(chan));
839 dev_printk(KERN_ERR, chan->device->common.dev,
840 "intr cause 0x%08x.\n", val);
841
842 val = __raw_readl(XOR_INTR_MASK(chan));
843 dev_printk(KERN_ERR, chan->device->common.dev,
844 "intr mask 0x%08x.\n", val);
845
846 val = __raw_readl(XOR_ERROR_CAUSE(chan));
847 dev_printk(KERN_ERR, chan->device->common.dev,
848 "error cause 0x%08x.\n", val);
849
850 val = __raw_readl(XOR_ERROR_ADDR(chan));
851 dev_printk(KERN_ERR, chan->device->common.dev,
852 "error addr 0x%08x.\n", val);
853}
854
855static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
856 u32 intr_cause)
857{
858 if (intr_cause & (1 << 4)) {
859 dev_dbg(chan->device->common.dev,
860 "ignore this error\n");
861 return;
862 }
863
864 dev_printk(KERN_ERR, chan->device->common.dev,
865 "error on chan %d. intr cause 0x%08x.\n",
866 chan->idx, intr_cause);
867
868 mv_dump_xor_regs(chan);
869 BUG();
870}
871
872static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
873{
874 struct mv_xor_chan *chan = data;
875 u32 intr_cause = mv_chan_get_intr_cause(chan);
876
877 dev_dbg(chan->device->common.dev, "intr cause %x\n", intr_cause);
878
879 if (mv_is_err_intr(intr_cause))
880 mv_xor_err_interrupt_handler(chan, intr_cause);
881
882 tasklet_schedule(&chan->irq_tasklet);
883
884 mv_xor_device_clear_eoc_cause(chan);
885
886 return IRQ_HANDLED;
887}
888
889static void mv_xor_issue_pending(struct dma_chan *chan)
890{
891 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
892
893 if (mv_chan->pending >= MV_XOR_THRESHOLD) {
894 mv_chan->pending = 0;
895 mv_chan_activate(mv_chan);
896 }
897}
898
899/*
900 * Perform a transaction to verify the HW works.
901 */
902#define MV_XOR_TEST_SIZE 2000
903
904static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device)
905{
906 int i;
907 void *src, *dest;
908 dma_addr_t src_dma, dest_dma;
909 struct dma_chan *dma_chan;
910 dma_cookie_t cookie;
911 struct dma_async_tx_descriptor *tx;
912 int err = 0;
913 struct mv_xor_chan *mv_chan;
914
915 src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
916 if (!src)
917 return -ENOMEM;
918
919 dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
920 if (!dest) {
921 kfree(src);
922 return -ENOMEM;
923 }
924
925 /* Fill in src buffer */
926 for (i = 0; i < MV_XOR_TEST_SIZE; i++)
927 ((u8 *) src)[i] = (u8)i;
928
929 /* Start copy, using first DMA channel */
930 dma_chan = container_of(device->common.channels.next,
931 struct dma_chan,
932 device_node);
aa1e6f1a 933 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
ff7b0479
SB
934 err = -ENODEV;
935 goto out;
936 }
937
938 dest_dma = dma_map_single(dma_chan->device->dev, dest,
939 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
940
941 src_dma = dma_map_single(dma_chan->device->dev, src,
942 MV_XOR_TEST_SIZE, DMA_TO_DEVICE);
943
944 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
945 MV_XOR_TEST_SIZE, 0);
946 cookie = mv_xor_tx_submit(tx);
947 mv_xor_issue_pending(dma_chan);
948 async_tx_ack(tx);
949 msleep(1);
950
07934481 951 if (mv_xor_status(dma_chan, cookie, NULL) !=
ff7b0479
SB
952 DMA_SUCCESS) {
953 dev_printk(KERN_ERR, dma_chan->device->dev,
954 "Self-test copy timed out, disabling\n");
955 err = -ENODEV;
956 goto free_resources;
957 }
958
959 mv_chan = to_mv_xor_chan(dma_chan);
960 dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma,
961 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
962 if (memcmp(src, dest, MV_XOR_TEST_SIZE)) {
963 dev_printk(KERN_ERR, dma_chan->device->dev,
964 "Self-test copy failed compare, disabling\n");
965 err = -ENODEV;
966 goto free_resources;
967 }
968
969free_resources:
970 mv_xor_free_chan_resources(dma_chan);
971out:
972 kfree(src);
973 kfree(dest);
974 return err;
975}
976
977#define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
978static int __devinit
979mv_xor_xor_self_test(struct mv_xor_device *device)
980{
981 int i, src_idx;
982 struct page *dest;
983 struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
984 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
985 dma_addr_t dest_dma;
986 struct dma_async_tx_descriptor *tx;
987 struct dma_chan *dma_chan;
988 dma_cookie_t cookie;
989 u8 cmp_byte = 0;
990 u32 cmp_word;
991 int err = 0;
992 struct mv_xor_chan *mv_chan;
993
994 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
995 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
a09b09ae
RK
996 if (!xor_srcs[src_idx]) {
997 while (src_idx--)
ff7b0479 998 __free_page(xor_srcs[src_idx]);
a09b09ae
RK
999 return -ENOMEM;
1000 }
ff7b0479
SB
1001 }
1002
1003 dest = alloc_page(GFP_KERNEL);
a09b09ae
RK
1004 if (!dest) {
1005 while (src_idx--)
ff7b0479 1006 __free_page(xor_srcs[src_idx]);
a09b09ae
RK
1007 return -ENOMEM;
1008 }
ff7b0479
SB
1009
1010 /* Fill in src buffers */
1011 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
1012 u8 *ptr = page_address(xor_srcs[src_idx]);
1013 for (i = 0; i < PAGE_SIZE; i++)
1014 ptr[i] = (1 << src_idx);
1015 }
1016
1017 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++)
1018 cmp_byte ^= (u8) (1 << src_idx);
1019
1020 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
1021 (cmp_byte << 8) | cmp_byte;
1022
1023 memset(page_address(dest), 0, PAGE_SIZE);
1024
1025 dma_chan = container_of(device->common.channels.next,
1026 struct dma_chan,
1027 device_node);
aa1e6f1a 1028 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
ff7b0479
SB
1029 err = -ENODEV;
1030 goto out;
1031 }
1032
1033 /* test xor */
1034 dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
1035 DMA_FROM_DEVICE);
1036
1037 for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++)
1038 dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
1039 0, PAGE_SIZE, DMA_TO_DEVICE);
1040
1041 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
1042 MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0);
1043
1044 cookie = mv_xor_tx_submit(tx);
1045 mv_xor_issue_pending(dma_chan);
1046 async_tx_ack(tx);
1047 msleep(8);
1048
07934481 1049 if (mv_xor_status(dma_chan, cookie, NULL) !=
ff7b0479
SB
1050 DMA_SUCCESS) {
1051 dev_printk(KERN_ERR, dma_chan->device->dev,
1052 "Self-test xor timed out, disabling\n");
1053 err = -ENODEV;
1054 goto free_resources;
1055 }
1056
1057 mv_chan = to_mv_xor_chan(dma_chan);
1058 dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma,
1059 PAGE_SIZE, DMA_FROM_DEVICE);
1060 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
1061 u32 *ptr = page_address(dest);
1062 if (ptr[i] != cmp_word) {
1063 dev_printk(KERN_ERR, dma_chan->device->dev,
1064 "Self-test xor failed compare, disabling."
1065 " index %d, data %x, expected %x\n", i,
1066 ptr[i], cmp_word);
1067 err = -ENODEV;
1068 goto free_resources;
1069 }
1070 }
1071
1072free_resources:
1073 mv_xor_free_chan_resources(dma_chan);
1074out:
1075 src_idx = MV_XOR_NUM_SRC_TEST;
1076 while (src_idx--)
1077 __free_page(xor_srcs[src_idx]);
1078 __free_page(dest);
1079 return err;
1080}
1081
1082static int __devexit mv_xor_remove(struct platform_device *dev)
1083{
1084 struct mv_xor_device *device = platform_get_drvdata(dev);
1085 struct dma_chan *chan, *_chan;
1086 struct mv_xor_chan *mv_chan;
1087 struct mv_xor_platform_data *plat_data = dev->dev.platform_data;
1088
1089 dma_async_device_unregister(&device->common);
1090
1091 dma_free_coherent(&dev->dev, plat_data->pool_size,
1092 device->dma_desc_pool_virt, device->dma_desc_pool);
1093
1094 list_for_each_entry_safe(chan, _chan, &device->common.channels,
1095 device_node) {
1096 mv_chan = to_mv_xor_chan(chan);
1097 list_del(&chan->device_node);
1098 }
1099
1100 return 0;
1101}
1102
1103static int __devinit mv_xor_probe(struct platform_device *pdev)
1104{
1105 int ret = 0;
1106 int irq;
1107 struct mv_xor_device *adev;
1108 struct mv_xor_chan *mv_chan;
1109 struct dma_device *dma_dev;
1110 struct mv_xor_platform_data *plat_data = pdev->dev.platform_data;
1111
1112
1113 adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL);
1114 if (!adev)
1115 return -ENOMEM;
1116
1117 dma_dev = &adev->common;
1118
1119 /* allocate coherent memory for hardware descriptors
1120 * note: writecombine gives slightly better performance, but
1121 * requires that we explicitly flush the writes
1122 */
1123 adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev,
1124 plat_data->pool_size,
1125 &adev->dma_desc_pool,
1126 GFP_KERNEL);
1127 if (!adev->dma_desc_pool_virt)
1128 return -ENOMEM;
1129
1130 adev->id = plat_data->hw_id;
1131
1132 /* discover transaction capabilites from the platform data */
1133 dma_dev->cap_mask = plat_data->cap_mask;
1134 adev->pdev = pdev;
1135 platform_set_drvdata(pdev, adev);
1136
1137 adev->shared = platform_get_drvdata(plat_data->shared);
1138
1139 INIT_LIST_HEAD(&dma_dev->channels);
1140
1141 /* set base routines */
1142 dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
1143 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
07934481 1144 dma_dev->device_tx_status = mv_xor_status;
ff7b0479
SB
1145 dma_dev->device_issue_pending = mv_xor_issue_pending;
1146 dma_dev->dev = &pdev->dev;
1147
1148 /* set prep routines based on capability */
1149 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1150 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
1151 if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask))
1152 dma_dev->device_prep_dma_memset = mv_xor_prep_dma_memset;
1153 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
c019894e 1154 dma_dev->max_xor = 8;
ff7b0479
SB
1155 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1156 }
1157
1158 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
1159 if (!mv_chan) {
1160 ret = -ENOMEM;
1161 goto err_free_dma;
1162 }
1163 mv_chan->device = adev;
1164 mv_chan->idx = plat_data->hw_id;
1165 mv_chan->mmr_base = adev->shared->xor_base;
1166
1167 if (!mv_chan->mmr_base) {
1168 ret = -ENOMEM;
1169 goto err_free_dma;
1170 }
1171 tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
1172 mv_chan);
1173
1174 /* clear errors before enabling interrupts */
1175 mv_xor_device_clear_err_status(mv_chan);
1176
1177 irq = platform_get_irq(pdev, 0);
1178 if (irq < 0) {
1179 ret = irq;
1180 goto err_free_dma;
1181 }
1182 ret = devm_request_irq(&pdev->dev, irq,
1183 mv_xor_interrupt_handler,
1184 0, dev_name(&pdev->dev), mv_chan);
1185 if (ret)
1186 goto err_free_dma;
1187
1188 mv_chan_unmask_interrupts(mv_chan);
1189
1190 mv_set_mode(mv_chan, DMA_MEMCPY);
1191
1192 spin_lock_init(&mv_chan->lock);
1193 INIT_LIST_HEAD(&mv_chan->chain);
1194 INIT_LIST_HEAD(&mv_chan->completed_slots);
1195 INIT_LIST_HEAD(&mv_chan->all_slots);
ff7b0479 1196 mv_chan->common.device = dma_dev;
8ac69546 1197 dma_cookie_init(&mv_chan->common);
ff7b0479
SB
1198
1199 list_add_tail(&mv_chan->common.device_node, &dma_dev->channels);
1200
1201 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
1202 ret = mv_xor_memcpy_self_test(adev);
1203 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1204 if (ret)
1205 goto err_free_dma;
1206 }
1207
1208 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1209 ret = mv_xor_xor_self_test(adev);
1210 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1211 if (ret)
1212 goto err_free_dma;
1213 }
1214
1215 dev_printk(KERN_INFO, &pdev->dev, "Marvell XOR: "
1216 "( %s%s%s%s)\n",
1217 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1218 dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
1219 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1220 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1221
1222 dma_async_device_register(dma_dev);
1223 goto out;
1224
1225 err_free_dma:
1226 dma_free_coherent(&adev->pdev->dev, plat_data->pool_size,
1227 adev->dma_desc_pool_virt, adev->dma_desc_pool);
1228 out:
1229 return ret;
1230}
1231
1232static void
1233mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp,
63a9332b 1234 const struct mbus_dram_target_info *dram)
ff7b0479
SB
1235{
1236 void __iomem *base = msp->xor_base;
1237 u32 win_enable = 0;
1238 int i;
1239
1240 for (i = 0; i < 8; i++) {
1241 writel(0, base + WINDOW_BASE(i));
1242 writel(0, base + WINDOW_SIZE(i));
1243 if (i < 4)
1244 writel(0, base + WINDOW_REMAP_HIGH(i));
1245 }
1246
1247 for (i = 0; i < dram->num_cs; i++) {
63a9332b 1248 const struct mbus_dram_window *cs = dram->cs + i;
ff7b0479
SB
1249
1250 writel((cs->base & 0xffff0000) |
1251 (cs->mbus_attr << 8) |
1252 dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1253 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1254
1255 win_enable |= (1 << i);
1256 win_enable |= 3 << (16 + (2 * i));
1257 }
1258
1259 writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1260 writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1261}
1262
1263static struct platform_driver mv_xor_driver = {
1264 .probe = mv_xor_probe,
a7d6e3ec 1265 .remove = mv_xor_remove,
ff7b0479
SB
1266 .driver = {
1267 .owner = THIS_MODULE,
1268 .name = MV_XOR_NAME,
1269 },
1270};
1271
1272static int mv_xor_shared_probe(struct platform_device *pdev)
1273{
63a9332b 1274 const struct mbus_dram_target_info *dram;
ff7b0479
SB
1275 struct mv_xor_shared_private *msp;
1276 struct resource *res;
1277
1278 dev_printk(KERN_NOTICE, &pdev->dev, "Marvell shared XOR driver\n");
1279
1280 msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL);
1281 if (!msp)
1282 return -ENOMEM;
1283
1284 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1285 if (!res)
1286 return -ENODEV;
1287
1288 msp->xor_base = devm_ioremap(&pdev->dev, res->start,
4de1ba15 1289 resource_size(res));
ff7b0479
SB
1290 if (!msp->xor_base)
1291 return -EBUSY;
1292
1293 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1294 if (!res)
1295 return -ENODEV;
1296
1297 msp->xor_high_base = devm_ioremap(&pdev->dev, res->start,
4de1ba15 1298 resource_size(res));
ff7b0479
SB
1299 if (!msp->xor_high_base)
1300 return -EBUSY;
1301
1302 platform_set_drvdata(pdev, msp);
1303
1304 /*
1305 * (Re-)program MBUS remapping windows if we are asked to.
1306 */
63a9332b
AL
1307 dram = mv_mbus_dram_info();
1308 if (dram)
1309 mv_xor_conf_mbus_windows(msp, dram);
ff7b0479 1310
c510182b
AL
1311 /* Not all platforms can gate the clock, so it is not
1312 * an error if the clock does not exists.
1313 */
1314 msp->clk = clk_get(&pdev->dev, NULL);
1315 if (!IS_ERR(msp->clk))
1316 clk_prepare_enable(msp->clk);
1317
ff7b0479
SB
1318 return 0;
1319}
1320
1321static int mv_xor_shared_remove(struct platform_device *pdev)
1322{
c510182b
AL
1323 struct mv_xor_shared_private *msp = platform_get_drvdata(pdev);
1324
1325 if (!IS_ERR(msp->clk)) {
1326 clk_disable_unprepare(msp->clk);
1327 clk_put(msp->clk);
1328 }
1329
ff7b0479
SB
1330 return 0;
1331}
1332
1333static struct platform_driver mv_xor_shared_driver = {
1334 .probe = mv_xor_shared_probe,
1335 .remove = mv_xor_shared_remove,
1336 .driver = {
1337 .owner = THIS_MODULE,
1338 .name = MV_XOR_SHARED_NAME,
1339 },
1340};
1341
1342
1343static int __init mv_xor_init(void)
1344{
1345 int rc;
1346
1347 rc = platform_driver_register(&mv_xor_shared_driver);
1348 if (!rc) {
1349 rc = platform_driver_register(&mv_xor_driver);
1350 if (rc)
1351 platform_driver_unregister(&mv_xor_shared_driver);
1352 }
1353 return rc;
1354}
1355module_init(mv_xor_init);
1356
1357/* it's currently unsafe to unload this module */
1358#if 0
1359static void __exit mv_xor_exit(void)
1360{
1361 platform_driver_unregister(&mv_xor_driver);
1362 platform_driver_unregister(&mv_xor_shared_driver);
1363 return;
1364}
1365
1366module_exit(mv_xor_exit);
1367#endif
1368
1369MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1370MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1371MODULE_LICENSE("GPL");