2 * offload engine driver for the Marvell XOR engine
3 * Copyright (C) 2007, 2008, Marvell International Ltd.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 #include <linux/init.h>
20 #include <linux/module.h>
21 #include <linux/slab.h>
22 #include <linux/delay.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/spinlock.h>
25 #include <linux/interrupt.h>
26 #include <linux/platform_device.h>
27 #include <linux/memory.h>
28 #include <linux/clk.h>
30 #include <linux/of_irq.h>
31 #include <linux/irqdomain.h>
32 #include <linux/platform_data/dma-mv_xor.h>
34 #include "dmaengine.h"
37 static void mv_xor_issue_pending(struct dma_chan *chan);
39 #define to_mv_xor_chan(chan) \
40 container_of(chan, struct mv_xor_chan, dmachan)
42 #define to_mv_xor_slot(tx) \
43 container_of(tx, struct mv_xor_desc_slot, async_tx)
45 #define mv_chan_to_devp(chan) \
48 static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
50 struct mv_xor_desc *hw_desc = desc->hw_desc;
52 hw_desc->status = (1 << 31);
53 hw_desc->phy_next_desc = 0;
54 hw_desc->desc_command = (1 << 31);
57 static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
59 struct mv_xor_desc *hw_desc = desc->hw_desc;
60 return hw_desc->phy_dest_addr;
63 static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc,
66 struct mv_xor_desc *hw_desc = desc->hw_desc;
67 return hw_desc->phy_src_addr[mv_phy_src_idx(src_idx)];
71 static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
74 struct mv_xor_desc *hw_desc = desc->hw_desc;
75 hw_desc->byte_count = byte_count;
78 static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
81 struct mv_xor_desc *hw_desc = desc->hw_desc;
82 BUG_ON(hw_desc->phy_next_desc);
83 hw_desc->phy_next_desc = next_desc_addr;
86 static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc)
88 struct mv_xor_desc *hw_desc = desc->hw_desc;
89 hw_desc->phy_next_desc = 0;
92 static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc,
95 struct mv_xor_desc *hw_desc = desc->hw_desc;
96 hw_desc->phy_dest_addr = addr;
99 static int mv_chan_memset_slot_count(size_t len)
104 #define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c)
106 static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
107 int index, dma_addr_t addr)
109 struct mv_xor_desc *hw_desc = desc->hw_desc;
110 hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr;
111 if (desc->type == DMA_XOR)
112 hw_desc->desc_command |= (1 << index);
115 static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
117 return readl_relaxed(XOR_CURR_DESC(chan));
120 static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
123 writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan));
126 static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
128 u32 val = readl_relaxed(XOR_INTR_MASK(chan));
129 val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
130 writel_relaxed(val, XOR_INTR_MASK(chan));
133 static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
135 u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan));
136 intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
140 static int mv_is_err_intr(u32 intr_cause)
142 if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9)))
148 static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
150 u32 val = ~(1 << (chan->idx * 16));
151 dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
152 writel_relaxed(val, XOR_INTR_CAUSE(chan));
155 static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan)
157 u32 val = 0xFFFF0000 >> (chan->idx * 16);
158 writel_relaxed(val, XOR_INTR_CAUSE(chan));
161 static int mv_can_chain(struct mv_xor_desc_slot *desc)
163 struct mv_xor_desc_slot *chain_old_tail = list_entry(
164 desc->chain_node.prev, struct mv_xor_desc_slot, chain_node);
166 if (chain_old_tail->type != desc->type)
172 static void mv_set_mode(struct mv_xor_chan *chan,
173 enum dma_transaction_type type)
176 u32 config = readl_relaxed(XOR_CONFIG(chan));
180 op_mode = XOR_OPERATION_MODE_XOR;
183 op_mode = XOR_OPERATION_MODE_MEMCPY;
186 dev_err(mv_chan_to_devp(chan),
187 "error: unsupported operation %d\n",
196 #if defined(__BIG_ENDIAN)
197 config |= XOR_DESCRIPTOR_SWAP;
199 config &= ~XOR_DESCRIPTOR_SWAP;
202 writel_relaxed(config, XOR_CONFIG(chan));
203 chan->current_type = type;
206 static void mv_chan_activate(struct mv_xor_chan *chan)
210 dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
211 activation = readl_relaxed(XOR_ACTIVATION(chan));
213 writel_relaxed(activation, XOR_ACTIVATION(chan));
216 static char mv_chan_is_busy(struct mv_xor_chan *chan)
218 u32 state = readl_relaxed(XOR_ACTIVATION(chan));
220 state = (state >> 4) & 0x3;
222 return (state == 1) ? 1 : 0;
225 static int mv_chan_xor_slot_count(size_t len, int src_cnt)
231 * mv_xor_free_slots - flags descriptor slots for reuse
232 * @slot: Slot to free
233 * Caller must hold &mv_chan->lock while calling this function
235 static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
236 struct mv_xor_desc_slot *slot)
238 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d slot %p\n",
239 __func__, __LINE__, slot);
241 slot->slots_per_op = 0;
246 * mv_xor_start_new_chain - program the engine to operate on new chain headed by
248 * Caller must hold &mv_chan->lock while calling this function
250 static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
251 struct mv_xor_desc_slot *sw_desc)
253 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
254 __func__, __LINE__, sw_desc);
255 if (sw_desc->type != mv_chan->current_type)
256 mv_set_mode(mv_chan, sw_desc->type);
258 /* set the hardware chain */
259 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
261 mv_chan->pending += sw_desc->slot_cnt;
262 mv_xor_issue_pending(&mv_chan->dmachan);
266 mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
267 struct mv_xor_chan *mv_chan, dma_cookie_t cookie)
269 BUG_ON(desc->async_tx.cookie < 0);
271 if (desc->async_tx.cookie > 0) {
272 cookie = desc->async_tx.cookie;
274 /* call the callback (must not sleep or submit new
275 * operations to this channel)
277 if (desc->async_tx.callback)
278 desc->async_tx.callback(
279 desc->async_tx.callback_param);
281 dma_descriptor_unmap(&desc->async_tx);
282 /* unmap dma addresses
283 * (unmap_single vs unmap_page?)
285 if (desc->group_head && desc->unmap_len) {
286 struct mv_xor_desc_slot *unmap = desc->group_head;
287 struct device *dev = mv_chan_to_devp(mv_chan);
288 u32 len = unmap->unmap_len;
289 enum dma_ctrl_flags flags = desc->async_tx.flags;
294 src_cnt = unmap->unmap_src_cnt;
295 dest = mv_desc_get_dest_addr(unmap);
296 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
297 enum dma_data_direction dir;
299 if (src_cnt > 1) /* is xor ? */
300 dir = DMA_BIDIRECTIONAL;
302 dir = DMA_FROM_DEVICE;
303 dma_unmap_page(dev, dest, len, dir);
306 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
308 addr = mv_desc_get_src_addr(unmap,
312 dma_unmap_page(dev, addr, len,
316 desc->group_head = NULL;
320 /* run dependent operations */
321 dma_run_dependencies(&desc->async_tx);
327 mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan)
329 struct mv_xor_desc_slot *iter, *_iter;
331 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
332 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
335 if (async_tx_test_ack(&iter->async_tx)) {
336 list_del(&iter->completed_node);
337 mv_xor_free_slots(mv_chan, iter);
344 mv_xor_clean_slot(struct mv_xor_desc_slot *desc,
345 struct mv_xor_chan *mv_chan)
347 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
348 __func__, __LINE__, desc, desc->async_tx.flags);
349 list_del(&desc->chain_node);
350 /* the client is allowed to attach dependent operations
353 if (!async_tx_test_ack(&desc->async_tx)) {
354 /* move this slot to the completed_slots */
355 list_add_tail(&desc->completed_node, &mv_chan->completed_slots);
359 mv_xor_free_slots(mv_chan, desc);
363 static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
365 struct mv_xor_desc_slot *iter, *_iter;
366 dma_cookie_t cookie = 0;
367 int busy = mv_chan_is_busy(mv_chan);
368 u32 current_desc = mv_chan_get_current_desc(mv_chan);
369 int seen_current = 0;
371 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
372 dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
373 mv_xor_clean_completed_slots(mv_chan);
375 /* free completed slots from the chain starting with
376 * the oldest descriptor
379 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
382 prefetch(&_iter->async_tx);
384 /* do not advance past the current descriptor loaded into the
385 * hardware channel, subsequent descriptors are either in
386 * process or have not been submitted
391 /* stop the search if we reach the current descriptor and the
394 if (iter->async_tx.phys == current_desc) {
400 cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie);
402 if (mv_xor_clean_slot(iter, mv_chan))
406 if ((busy == 0) && !list_empty(&mv_chan->chain)) {
407 struct mv_xor_desc_slot *chain_head;
408 chain_head = list_entry(mv_chan->chain.next,
409 struct mv_xor_desc_slot,
412 mv_xor_start_new_chain(mv_chan, chain_head);
416 mv_chan->dmachan.completed_cookie = cookie;
420 mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
422 spin_lock_bh(&mv_chan->lock);
423 __mv_xor_slot_cleanup(mv_chan);
424 spin_unlock_bh(&mv_chan->lock);
427 static void mv_xor_tasklet(unsigned long data)
429 struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
430 mv_xor_slot_cleanup(chan);
433 static struct mv_xor_desc_slot *
434 mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots,
437 struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL;
439 int slots_found, retry = 0;
441 /* start search from the last allocated descrtiptor
442 * if a contiguous allocation can not be found start searching
443 * from the beginning of the list
448 iter = mv_chan->last_used;
450 iter = list_entry(&mv_chan->all_slots,
451 struct mv_xor_desc_slot,
454 list_for_each_entry_safe_continue(
455 iter, _iter, &mv_chan->all_slots, slot_node) {
457 prefetch(&_iter->async_tx);
458 if (iter->slots_per_op) {
459 /* give up after finding the first busy slot
460 * on the second pass through the list
469 /* start the allocation if the slot is correctly aligned */
473 if (slots_found == num_slots) {
474 struct mv_xor_desc_slot *alloc_tail = NULL;
475 struct mv_xor_desc_slot *last_used = NULL;
480 /* pre-ack all but the last descriptor */
481 async_tx_ack(&iter->async_tx);
483 list_add_tail(&iter->chain_node, &chain);
485 iter->async_tx.cookie = 0;
486 iter->slot_cnt = num_slots;
487 iter->xor_check_result = NULL;
488 for (i = 0; i < slots_per_op; i++) {
489 iter->slots_per_op = slots_per_op - i;
491 iter = list_entry(iter->slot_node.next,
492 struct mv_xor_desc_slot,
495 num_slots -= slots_per_op;
497 alloc_tail->group_head = alloc_start;
498 alloc_tail->async_tx.cookie = -EBUSY;
499 list_splice(&chain, &alloc_tail->tx_list);
500 mv_chan->last_used = last_used;
501 mv_desc_clear_next_desc(alloc_start);
502 mv_desc_clear_next_desc(alloc_tail);
509 /* try to free some slots if the allocation fails */
510 tasklet_schedule(&mv_chan->irq_tasklet);
515 /************************ DMA engine API functions ****************************/
517 mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
519 struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
520 struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
521 struct mv_xor_desc_slot *grp_start, *old_chain_tail;
523 int new_hw_chain = 1;
525 dev_dbg(mv_chan_to_devp(mv_chan),
526 "%s sw_desc %p: async_tx %p\n",
527 __func__, sw_desc, &sw_desc->async_tx);
529 grp_start = sw_desc->group_head;
531 spin_lock_bh(&mv_chan->lock);
532 cookie = dma_cookie_assign(tx);
534 if (list_empty(&mv_chan->chain))
535 list_splice_init(&sw_desc->tx_list, &mv_chan->chain);
539 old_chain_tail = list_entry(mv_chan->chain.prev,
540 struct mv_xor_desc_slot,
542 list_splice_init(&grp_start->tx_list,
543 &old_chain_tail->chain_node);
545 if (!mv_can_chain(grp_start))
548 dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %x\n",
549 old_chain_tail->async_tx.phys);
551 /* fix up the hardware chain */
552 mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys);
554 /* if the channel is not busy */
555 if (!mv_chan_is_busy(mv_chan)) {
556 u32 current_desc = mv_chan_get_current_desc(mv_chan);
558 * and the curren desc is the end of the chain before
559 * the append, then we need to start the channel
561 if (current_desc == old_chain_tail->async_tx.phys)
567 mv_xor_start_new_chain(mv_chan, grp_start);
570 spin_unlock_bh(&mv_chan->lock);
575 /* returns the number of allocated descriptors */
576 static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
580 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
581 struct mv_xor_desc_slot *slot = NULL;
582 int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE;
584 /* Allocate descriptor slots */
585 idx = mv_chan->slots_allocated;
586 while (idx < num_descs_in_pool) {
587 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
589 printk(KERN_INFO "MV XOR Channel only initialized"
590 " %d descriptor slots", idx);
593 hw_desc = (char *) mv_chan->dma_desc_pool_virt;
594 slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE];
596 dma_async_tx_descriptor_init(&slot->async_tx, chan);
597 slot->async_tx.tx_submit = mv_xor_tx_submit;
598 INIT_LIST_HEAD(&slot->chain_node);
599 INIT_LIST_HEAD(&slot->slot_node);
600 INIT_LIST_HEAD(&slot->tx_list);
601 hw_desc = (char *) mv_chan->dma_desc_pool;
602 slot->async_tx.phys =
603 (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
606 spin_lock_bh(&mv_chan->lock);
607 mv_chan->slots_allocated = idx;
608 list_add_tail(&slot->slot_node, &mv_chan->all_slots);
609 spin_unlock_bh(&mv_chan->lock);
612 if (mv_chan->slots_allocated && !mv_chan->last_used)
613 mv_chan->last_used = list_entry(mv_chan->all_slots.next,
614 struct mv_xor_desc_slot,
617 dev_dbg(mv_chan_to_devp(mv_chan),
618 "allocated %d descriptor slots last_used: %p\n",
619 mv_chan->slots_allocated, mv_chan->last_used);
621 return mv_chan->slots_allocated ? : -ENOMEM;
624 static struct dma_async_tx_descriptor *
625 mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
626 size_t len, unsigned long flags)
628 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
629 struct mv_xor_desc_slot *sw_desc, *grp_start;
632 dev_dbg(mv_chan_to_devp(mv_chan),
633 "%s dest: %x src %x len: %u flags: %ld\n",
634 __func__, dest, src, len, flags);
635 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
638 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
640 spin_lock_bh(&mv_chan->lock);
641 slot_cnt = mv_chan_memcpy_slot_count(len);
642 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
644 sw_desc->type = DMA_MEMCPY;
645 sw_desc->async_tx.flags = flags;
646 grp_start = sw_desc->group_head;
647 mv_desc_init(grp_start, flags);
648 mv_desc_set_byte_count(grp_start, len);
649 mv_desc_set_dest_addr(sw_desc->group_head, dest);
650 mv_desc_set_src_addr(grp_start, 0, src);
651 sw_desc->unmap_src_cnt = 1;
652 sw_desc->unmap_len = len;
654 spin_unlock_bh(&mv_chan->lock);
656 dev_dbg(mv_chan_to_devp(mv_chan),
657 "%s sw_desc %p async_tx %p\n",
658 __func__, sw_desc, sw_desc ? &sw_desc->async_tx : NULL);
660 return sw_desc ? &sw_desc->async_tx : NULL;
663 static struct dma_async_tx_descriptor *
664 mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
665 unsigned int src_cnt, size_t len, unsigned long flags)
667 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
668 struct mv_xor_desc_slot *sw_desc, *grp_start;
671 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
674 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
676 dev_dbg(mv_chan_to_devp(mv_chan),
677 "%s src_cnt: %d len: dest %x %u flags: %ld\n",
678 __func__, src_cnt, len, dest, flags);
680 spin_lock_bh(&mv_chan->lock);
681 slot_cnt = mv_chan_xor_slot_count(len, src_cnt);
682 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
684 sw_desc->type = DMA_XOR;
685 sw_desc->async_tx.flags = flags;
686 grp_start = sw_desc->group_head;
687 mv_desc_init(grp_start, flags);
688 /* the byte count field is the same as in memcpy desc*/
689 mv_desc_set_byte_count(grp_start, len);
690 mv_desc_set_dest_addr(sw_desc->group_head, dest);
691 sw_desc->unmap_src_cnt = src_cnt;
692 sw_desc->unmap_len = len;
694 mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]);
696 spin_unlock_bh(&mv_chan->lock);
697 dev_dbg(mv_chan_to_devp(mv_chan),
698 "%s sw_desc %p async_tx %p \n",
699 __func__, sw_desc, &sw_desc->async_tx);
700 return sw_desc ? &sw_desc->async_tx : NULL;
703 static void mv_xor_free_chan_resources(struct dma_chan *chan)
705 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
706 struct mv_xor_desc_slot *iter, *_iter;
707 int in_use_descs = 0;
709 mv_xor_slot_cleanup(mv_chan);
711 spin_lock_bh(&mv_chan->lock);
712 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
715 list_del(&iter->chain_node);
717 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
720 list_del(&iter->completed_node);
722 list_for_each_entry_safe_reverse(
723 iter, _iter, &mv_chan->all_slots, slot_node) {
724 list_del(&iter->slot_node);
726 mv_chan->slots_allocated--;
728 mv_chan->last_used = NULL;
730 dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
731 __func__, mv_chan->slots_allocated);
732 spin_unlock_bh(&mv_chan->lock);
735 dev_err(mv_chan_to_devp(mv_chan),
736 "freeing %d in use descriptors!\n", in_use_descs);
740 * mv_xor_status - poll the status of an XOR transaction
741 * @chan: XOR channel handle
742 * @cookie: XOR transaction identifier
743 * @txstate: XOR transactions state holder (or NULL)
745 static enum dma_status mv_xor_status(struct dma_chan *chan,
747 struct dma_tx_state *txstate)
749 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
752 ret = dma_cookie_status(chan, cookie, txstate);
753 if (ret == DMA_SUCCESS) {
754 mv_xor_clean_completed_slots(mv_chan);
757 mv_xor_slot_cleanup(mv_chan);
759 return dma_cookie_status(chan, cookie, txstate);
762 static void mv_dump_xor_regs(struct mv_xor_chan *chan)
766 val = readl_relaxed(XOR_CONFIG(chan));
767 dev_err(mv_chan_to_devp(chan), "config 0x%08x\n", val);
769 val = readl_relaxed(XOR_ACTIVATION(chan));
770 dev_err(mv_chan_to_devp(chan), "activation 0x%08x\n", val);
772 val = readl_relaxed(XOR_INTR_CAUSE(chan));
773 dev_err(mv_chan_to_devp(chan), "intr cause 0x%08x\n", val);
775 val = readl_relaxed(XOR_INTR_MASK(chan));
776 dev_err(mv_chan_to_devp(chan), "intr mask 0x%08x\n", val);
778 val = readl_relaxed(XOR_ERROR_CAUSE(chan));
779 dev_err(mv_chan_to_devp(chan), "error cause 0x%08x\n", val);
781 val = readl_relaxed(XOR_ERROR_ADDR(chan));
782 dev_err(mv_chan_to_devp(chan), "error addr 0x%08x\n", val);
785 static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
788 if (intr_cause & (1 << 4)) {
789 dev_dbg(mv_chan_to_devp(chan),
790 "ignore this error\n");
794 dev_err(mv_chan_to_devp(chan),
795 "error on chan %d. intr cause 0x%08x\n",
796 chan->idx, intr_cause);
798 mv_dump_xor_regs(chan);
802 static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
804 struct mv_xor_chan *chan = data;
805 u32 intr_cause = mv_chan_get_intr_cause(chan);
807 dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause);
809 if (mv_is_err_intr(intr_cause))
810 mv_xor_err_interrupt_handler(chan, intr_cause);
812 tasklet_schedule(&chan->irq_tasklet);
814 mv_xor_device_clear_eoc_cause(chan);
819 static void mv_xor_issue_pending(struct dma_chan *chan)
821 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
823 if (mv_chan->pending >= MV_XOR_THRESHOLD) {
824 mv_chan->pending = 0;
825 mv_chan_activate(mv_chan);
830 * Perform a transaction to verify the HW works.
832 #define MV_XOR_TEST_SIZE 2000
834 static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
838 dma_addr_t src_dma, dest_dma;
839 struct dma_chan *dma_chan;
841 struct dma_async_tx_descriptor *tx;
844 src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
848 dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
854 /* Fill in src buffer */
855 for (i = 0; i < MV_XOR_TEST_SIZE; i++)
856 ((u8 *) src)[i] = (u8)i;
858 dma_chan = &mv_chan->dmachan;
859 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
864 dest_dma = dma_map_single(dma_chan->device->dev, dest,
865 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
867 src_dma = dma_map_single(dma_chan->device->dev, src,
868 MV_XOR_TEST_SIZE, DMA_TO_DEVICE);
870 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
871 MV_XOR_TEST_SIZE, 0);
872 cookie = mv_xor_tx_submit(tx);
873 mv_xor_issue_pending(dma_chan);
877 if (mv_xor_status(dma_chan, cookie, NULL) !=
879 dev_err(dma_chan->device->dev,
880 "Self-test copy timed out, disabling\n");
885 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
886 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
887 if (memcmp(src, dest, MV_XOR_TEST_SIZE)) {
888 dev_err(dma_chan->device->dev,
889 "Self-test copy failed compare, disabling\n");
895 mv_xor_free_chan_resources(dma_chan);
902 #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
904 mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
908 struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
909 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
911 struct dma_async_tx_descriptor *tx;
912 struct dma_chan *dma_chan;
918 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
919 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
920 if (!xor_srcs[src_idx]) {
922 __free_page(xor_srcs[src_idx]);
927 dest = alloc_page(GFP_KERNEL);
930 __free_page(xor_srcs[src_idx]);
934 /* Fill in src buffers */
935 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
936 u8 *ptr = page_address(xor_srcs[src_idx]);
937 for (i = 0; i < PAGE_SIZE; i++)
938 ptr[i] = (1 << src_idx);
941 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++)
942 cmp_byte ^= (u8) (1 << src_idx);
944 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
945 (cmp_byte << 8) | cmp_byte;
947 memset(page_address(dest), 0, PAGE_SIZE);
949 dma_chan = &mv_chan->dmachan;
950 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
956 dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
959 for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++)
960 dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
961 0, PAGE_SIZE, DMA_TO_DEVICE);
963 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
964 MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0);
966 cookie = mv_xor_tx_submit(tx);
967 mv_xor_issue_pending(dma_chan);
971 if (mv_xor_status(dma_chan, cookie, NULL) !=
973 dev_err(dma_chan->device->dev,
974 "Self-test xor timed out, disabling\n");
979 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
980 PAGE_SIZE, DMA_FROM_DEVICE);
981 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
982 u32 *ptr = page_address(dest);
983 if (ptr[i] != cmp_word) {
984 dev_err(dma_chan->device->dev,
985 "Self-test xor failed compare, disabling. index %d, data %x, expected %x\n",
986 i, ptr[i], cmp_word);
993 mv_xor_free_chan_resources(dma_chan);
995 src_idx = MV_XOR_NUM_SRC_TEST;
997 __free_page(xor_srcs[src_idx]);
1002 /* This driver does not implement any of the optional DMA operations. */
1004 mv_xor_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1010 static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
1012 struct dma_chan *chan, *_chan;
1013 struct device *dev = mv_chan->dmadev.dev;
1015 dma_async_device_unregister(&mv_chan->dmadev);
1017 dma_free_coherent(dev, MV_XOR_POOL_SIZE,
1018 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
1020 list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
1022 list_del(&chan->device_node);
1025 free_irq(mv_chan->irq, mv_chan);
1030 static struct mv_xor_chan *
1031 mv_xor_channel_add(struct mv_xor_device *xordev,
1032 struct platform_device *pdev,
1033 int idx, dma_cap_mask_t cap_mask, int irq)
1036 struct mv_xor_chan *mv_chan;
1037 struct dma_device *dma_dev;
1039 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
1041 return ERR_PTR(-ENOMEM);
1046 dma_dev = &mv_chan->dmadev;
1048 /* allocate coherent memory for hardware descriptors
1049 * note: writecombine gives slightly better performance, but
1050 * requires that we explicitly flush the writes
1052 mv_chan->dma_desc_pool_virt =
1053 dma_alloc_writecombine(&pdev->dev, MV_XOR_POOL_SIZE,
1054 &mv_chan->dma_desc_pool, GFP_KERNEL);
1055 if (!mv_chan->dma_desc_pool_virt)
1056 return ERR_PTR(-ENOMEM);
1058 /* discover transaction capabilites from the platform data */
1059 dma_dev->cap_mask = cap_mask;
1061 INIT_LIST_HEAD(&dma_dev->channels);
1063 /* set base routines */
1064 dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
1065 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
1066 dma_dev->device_tx_status = mv_xor_status;
1067 dma_dev->device_issue_pending = mv_xor_issue_pending;
1068 dma_dev->device_control = mv_xor_control;
1069 dma_dev->dev = &pdev->dev;
1071 /* set prep routines based on capability */
1072 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1073 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
1074 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1075 dma_dev->max_xor = 8;
1076 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1079 mv_chan->mmr_base = xordev->xor_base;
1080 if (!mv_chan->mmr_base) {
1084 tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
1087 /* clear errors before enabling interrupts */
1088 mv_xor_device_clear_err_status(mv_chan);
1090 ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler,
1091 0, dev_name(&pdev->dev), mv_chan);
1095 mv_chan_unmask_interrupts(mv_chan);
1097 mv_set_mode(mv_chan, DMA_MEMCPY);
1099 spin_lock_init(&mv_chan->lock);
1100 INIT_LIST_HEAD(&mv_chan->chain);
1101 INIT_LIST_HEAD(&mv_chan->completed_slots);
1102 INIT_LIST_HEAD(&mv_chan->all_slots);
1103 mv_chan->dmachan.device = dma_dev;
1104 dma_cookie_init(&mv_chan->dmachan);
1106 list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels);
1108 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
1109 ret = mv_xor_memcpy_self_test(mv_chan);
1110 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1115 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1116 ret = mv_xor_xor_self_test(mv_chan);
1117 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1122 dev_info(&pdev->dev, "Marvell XOR: ( %s%s%s)\n",
1123 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1124 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1125 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1127 dma_async_device_register(dma_dev);
1131 free_irq(mv_chan->irq, mv_chan);
1133 dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
1134 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
1135 return ERR_PTR(ret);
1139 mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
1140 const struct mbus_dram_target_info *dram)
1142 void __iomem *base = xordev->xor_base;
1146 for (i = 0; i < 8; i++) {
1147 writel(0, base + WINDOW_BASE(i));
1148 writel(0, base + WINDOW_SIZE(i));
1150 writel(0, base + WINDOW_REMAP_HIGH(i));
1153 for (i = 0; i < dram->num_cs; i++) {
1154 const struct mbus_dram_window *cs = dram->cs + i;
1156 writel((cs->base & 0xffff0000) |
1157 (cs->mbus_attr << 8) |
1158 dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1159 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1161 win_enable |= (1 << i);
1162 win_enable |= 3 << (16 + (2 * i));
1165 writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1166 writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1167 writel(0, base + WINDOW_OVERRIDE_CTRL(0));
1168 writel(0, base + WINDOW_OVERRIDE_CTRL(1));
1171 static int mv_xor_probe(struct platform_device *pdev)
1173 const struct mbus_dram_target_info *dram;
1174 struct mv_xor_device *xordev;
1175 struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev);
1176 struct resource *res;
1179 dev_notice(&pdev->dev, "Marvell shared XOR driver\n");
1181 xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
1185 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1189 xordev->xor_base = devm_ioremap(&pdev->dev, res->start,
1190 resource_size(res));
1191 if (!xordev->xor_base)
1194 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1198 xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start,
1199 resource_size(res));
1200 if (!xordev->xor_high_base)
1203 platform_set_drvdata(pdev, xordev);
1206 * (Re-)program MBUS remapping windows if we are asked to.
1208 dram = mv_mbus_dram_info();
1210 mv_xor_conf_mbus_windows(xordev, dram);
1212 /* Not all platforms can gate the clock, so it is not
1213 * an error if the clock does not exists.
1215 xordev->clk = clk_get(&pdev->dev, NULL);
1216 if (!IS_ERR(xordev->clk))
1217 clk_prepare_enable(xordev->clk);
1219 if (pdev->dev.of_node) {
1220 struct device_node *np;
1223 for_each_child_of_node(pdev->dev.of_node, np) {
1224 dma_cap_mask_t cap_mask;
1227 dma_cap_zero(cap_mask);
1228 if (of_property_read_bool(np, "dmacap,memcpy"))
1229 dma_cap_set(DMA_MEMCPY, cap_mask);
1230 if (of_property_read_bool(np, "dmacap,xor"))
1231 dma_cap_set(DMA_XOR, cap_mask);
1232 if (of_property_read_bool(np, "dmacap,interrupt"))
1233 dma_cap_set(DMA_INTERRUPT, cap_mask);
1235 irq = irq_of_parse_and_map(np, 0);
1238 goto err_channel_add;
1241 xordev->channels[i] =
1242 mv_xor_channel_add(xordev, pdev, i,
1244 if (IS_ERR(xordev->channels[i])) {
1245 ret = PTR_ERR(xordev->channels[i]);
1246 xordev->channels[i] = NULL;
1247 irq_dispose_mapping(irq);
1248 goto err_channel_add;
1253 } else if (pdata && pdata->channels) {
1254 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1255 struct mv_xor_channel_data *cd;
1258 cd = &pdata->channels[i];
1261 goto err_channel_add;
1264 irq = platform_get_irq(pdev, i);
1267 goto err_channel_add;
1270 xordev->channels[i] =
1271 mv_xor_channel_add(xordev, pdev, i,
1273 if (IS_ERR(xordev->channels[i])) {
1274 ret = PTR_ERR(xordev->channels[i]);
1275 goto err_channel_add;
1283 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
1284 if (xordev->channels[i]) {
1285 mv_xor_channel_remove(xordev->channels[i]);
1286 if (pdev->dev.of_node)
1287 irq_dispose_mapping(xordev->channels[i]->irq);
1290 if (!IS_ERR(xordev->clk)) {
1291 clk_disable_unprepare(xordev->clk);
1292 clk_put(xordev->clk);
1298 static int mv_xor_remove(struct platform_device *pdev)
1300 struct mv_xor_device *xordev = platform_get_drvdata(pdev);
1303 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1304 if (xordev->channels[i])
1305 mv_xor_channel_remove(xordev->channels[i]);
1308 if (!IS_ERR(xordev->clk)) {
1309 clk_disable_unprepare(xordev->clk);
1310 clk_put(xordev->clk);
1317 static struct of_device_id mv_xor_dt_ids[] = {
1318 { .compatible = "marvell,orion-xor", },
1321 MODULE_DEVICE_TABLE(of, mv_xor_dt_ids);
1324 static struct platform_driver mv_xor_driver = {
1325 .probe = mv_xor_probe,
1326 .remove = mv_xor_remove,
1328 .owner = THIS_MODULE,
1329 .name = MV_XOR_NAME,
1330 .of_match_table = of_match_ptr(mv_xor_dt_ids),
1335 static int __init mv_xor_init(void)
1337 return platform_driver_register(&mv_xor_driver);
1339 module_init(mv_xor_init);
1341 /* it's currently unsafe to unload this module */
1343 static void __exit mv_xor_exit(void)
1345 platform_driver_unregister(&mv_xor_driver);
1349 module_exit(mv_xor_exit);
1352 MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1353 MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1354 MODULE_LICENSE("GPL");