drivers/dma: fix error return code
[linux-2.6-block.git] / drivers / dma / ppc4xx / adma.c
CommitLineData
12458ea0
AG
1/*
2 * Copyright (C) 2006-2009 DENX Software Engineering.
3 *
4 * Author: Yuri Tikhonov <yur@emcraft.com>
5 *
6 * Further porting to arch/powerpc by
7 * Anatolij Gustschin <agust@denx.de>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License along with
20 * this program; if not, write to the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 *
23 * The full GNU General Public License is included in this distribution in the
24 * file called COPYING.
25 */
26
27/*
28 * This driver supports the asynchrounous DMA copy and RAID engines available
29 * on the AMCC PPC440SPe Processors.
30 * Based on the Intel Xscale(R) family of I/O Processors (IOP 32x, 33x, 134x)
31 * ADMA driver written by D.Williams.
32 */
33
34#include <linux/init.h>
35#include <linux/module.h>
36#include <linux/async_tx.h>
37#include <linux/delay.h>
38#include <linux/dma-mapping.h>
39#include <linux/spinlock.h>
40#include <linux/interrupt.h>
5a0e3ad6 41#include <linux/slab.h>
12458ea0
AG
42#include <linux/uaccess.h>
43#include <linux/proc_fs.h>
44#include <linux/of.h>
c11eede6
RH
45#include <linux/of_address.h>
46#include <linux/of_irq.h>
12458ea0
AG
47#include <linux/of_platform.h>
48#include <asm/dcr.h>
49#include <asm/dcr-regs.h>
50#include "adma.h"
d2ebfb33 51#include "../dmaengine.h"
12458ea0
AG
52
53enum ppc_adma_init_code {
54 PPC_ADMA_INIT_OK = 0,
55 PPC_ADMA_INIT_MEMRES,
56 PPC_ADMA_INIT_MEMREG,
57 PPC_ADMA_INIT_ALLOC,
58 PPC_ADMA_INIT_COHERENT,
59 PPC_ADMA_INIT_CHANNEL,
60 PPC_ADMA_INIT_IRQ1,
61 PPC_ADMA_INIT_IRQ2,
62 PPC_ADMA_INIT_REGISTER
63};
64
65static char *ppc_adma_errors[] = {
66 [PPC_ADMA_INIT_OK] = "ok",
67 [PPC_ADMA_INIT_MEMRES] = "failed to get memory resource",
68 [PPC_ADMA_INIT_MEMREG] = "failed to request memory region",
69 [PPC_ADMA_INIT_ALLOC] = "failed to allocate memory for adev "
70 "structure",
71 [PPC_ADMA_INIT_COHERENT] = "failed to allocate coherent memory for "
72 "hardware descriptors",
73 [PPC_ADMA_INIT_CHANNEL] = "failed to allocate memory for channel",
74 [PPC_ADMA_INIT_IRQ1] = "failed to request first irq",
75 [PPC_ADMA_INIT_IRQ2] = "failed to request second irq",
76 [PPC_ADMA_INIT_REGISTER] = "failed to register dma async device",
77};
78
79static enum ppc_adma_init_code
80ppc440spe_adma_devices[PPC440SPE_ADMA_ENGINES_NUM];
81
82struct ppc_dma_chan_ref {
83 struct dma_chan *chan;
84 struct list_head node;
85};
86
87/* The list of channels exported by ppc440spe ADMA */
88struct list_head
89ppc440spe_adma_chan_list = LIST_HEAD_INIT(ppc440spe_adma_chan_list);
90
91/* This flag is set when want to refetch the xor chain in the interrupt
92 * handler
93 */
94static u32 do_xor_refetch;
95
96/* Pointer to DMA0, DMA1 CP/CS FIFO */
97static void *ppc440spe_dma_fifo_buf;
98
99/* Pointers to last submitted to DMA0, DMA1 CDBs */
100static struct ppc440spe_adma_desc_slot *chan_last_sub[3];
101static struct ppc440spe_adma_desc_slot *chan_first_cdb[3];
102
103/* Pointer to last linked and submitted xor CB */
104static struct ppc440spe_adma_desc_slot *xor_last_linked;
105static struct ppc440spe_adma_desc_slot *xor_last_submit;
106
107/* This array is used in data-check operations for storing a pattern */
108static char ppc440spe_qword[16];
109
110static atomic_t ppc440spe_adma_err_irq_ref;
111static dcr_host_t ppc440spe_mq_dcr_host;
112static unsigned int ppc440spe_mq_dcr_len;
113
114/* Since RXOR operations use the common register (MQ0_CF2H) for setting-up
115 * the block size in transactions, then we do not allow to activate more than
116 * only one RXOR transactions simultaneously. So use this var to store
117 * the information about is RXOR currently active (PPC440SPE_RXOR_RUN bit is
118 * set) or not (PPC440SPE_RXOR_RUN is clear).
119 */
120static unsigned long ppc440spe_rxor_state;
121
122/* These are used in enable & check routines
123 */
124static u32 ppc440spe_r6_enabled;
125static struct ppc440spe_adma_chan *ppc440spe_r6_tchan;
126static struct completion ppc440spe_r6_test_comp;
127
128static int ppc440spe_adma_dma2rxor_prep_src(
129 struct ppc440spe_adma_desc_slot *desc,
130 struct ppc440spe_rxor *cursor, int index,
131 int src_cnt, u32 addr);
132static void ppc440spe_adma_dma2rxor_set_src(
133 struct ppc440spe_adma_desc_slot *desc,
134 int index, dma_addr_t addr);
135static void ppc440spe_adma_dma2rxor_set_mult(
136 struct ppc440spe_adma_desc_slot *desc,
137 int index, u8 mult);
138
139#ifdef ADMA_LL_DEBUG
140#define ADMA_LL_DBG(x) ({ if (1) x; 0; })
141#else
142#define ADMA_LL_DBG(x) ({ if (0) x; 0; })
143#endif
144
145static void print_cb(struct ppc440spe_adma_chan *chan, void *block)
146{
147 struct dma_cdb *cdb;
148 struct xor_cb *cb;
149 int i;
150
151 switch (chan->device->id) {
152 case 0:
153 case 1:
154 cdb = block;
155
156 pr_debug("CDB at %p [%d]:\n"
157 "\t attr 0x%02x opc 0x%02x cnt 0x%08x\n"
158 "\t sg1u 0x%08x sg1l 0x%08x\n"
159 "\t sg2u 0x%08x sg2l 0x%08x\n"
160 "\t sg3u 0x%08x sg3l 0x%08x\n",
161 cdb, chan->device->id,
162 cdb->attr, cdb->opc, le32_to_cpu(cdb->cnt),
163 le32_to_cpu(cdb->sg1u), le32_to_cpu(cdb->sg1l),
164 le32_to_cpu(cdb->sg2u), le32_to_cpu(cdb->sg2l),
165 le32_to_cpu(cdb->sg3u), le32_to_cpu(cdb->sg3l)
166 );
167 break;
168 case 2:
169 cb = block;
170
171 pr_debug("CB at %p [%d]:\n"
172 "\t cbc 0x%08x cbbc 0x%08x cbs 0x%08x\n"
173 "\t cbtah 0x%08x cbtal 0x%08x\n"
174 "\t cblah 0x%08x cblal 0x%08x\n",
175 cb, chan->device->id,
176 cb->cbc, cb->cbbc, cb->cbs,
177 cb->cbtah, cb->cbtal,
178 cb->cblah, cb->cblal);
179 for (i = 0; i < 16; i++) {
180 if (i && !cb->ops[i].h && !cb->ops[i].l)
181 continue;
182 pr_debug("\t ops[%2d]: h 0x%08x l 0x%08x\n",
183 i, cb->ops[i].h, cb->ops[i].l);
184 }
185 break;
186 }
187}
188
189static void print_cb_list(struct ppc440spe_adma_chan *chan,
190 struct ppc440spe_adma_desc_slot *iter)
191{
192 for (; iter; iter = iter->hw_next)
193 print_cb(chan, iter->hw_desc);
194}
195
196static void prep_dma_xor_dbg(int id, dma_addr_t dst, dma_addr_t *src,
197 unsigned int src_cnt)
198{
199 int i;
200
201 pr_debug("\n%s(%d):\nsrc: ", __func__, id);
202 for (i = 0; i < src_cnt; i++)
203 pr_debug("\t0x%016llx ", src[i]);
204 pr_debug("dst:\n\t0x%016llx\n", dst);
205}
206
207static void prep_dma_pq_dbg(int id, dma_addr_t *dst, dma_addr_t *src,
208 unsigned int src_cnt)
209{
210 int i;
211
212 pr_debug("\n%s(%d):\nsrc: ", __func__, id);
213 for (i = 0; i < src_cnt; i++)
214 pr_debug("\t0x%016llx ", src[i]);
215 pr_debug("dst: ");
216 for (i = 0; i < 2; i++)
217 pr_debug("\t0x%016llx ", dst[i]);
218}
219
220static void prep_dma_pqzero_sum_dbg(int id, dma_addr_t *src,
221 unsigned int src_cnt,
222 const unsigned char *scf)
223{
224 int i;
225
226 pr_debug("\n%s(%d):\nsrc(coef): ", __func__, id);
227 if (scf) {
228 for (i = 0; i < src_cnt; i++)
229 pr_debug("\t0x%016llx(0x%02x) ", src[i], scf[i]);
230 } else {
231 for (i = 0; i < src_cnt; i++)
232 pr_debug("\t0x%016llx(no) ", src[i]);
233 }
234
235 pr_debug("dst: ");
236 for (i = 0; i < 2; i++)
237 pr_debug("\t0x%016llx ", src[src_cnt + i]);
238}
239
240/******************************************************************************
241 * Command (Descriptor) Blocks low-level routines
242 ******************************************************************************/
243/**
244 * ppc440spe_desc_init_interrupt - initialize the descriptor for INTERRUPT
245 * pseudo operation
246 */
247static void ppc440spe_desc_init_interrupt(struct ppc440spe_adma_desc_slot *desc,
248 struct ppc440spe_adma_chan *chan)
249{
250 struct xor_cb *p;
251
252 switch (chan->device->id) {
253 case PPC440SPE_XOR_ID:
254 p = desc->hw_desc;
255 memset(desc->hw_desc, 0, sizeof(struct xor_cb));
256 /* NOP with Command Block Complete Enable */
257 p->cbc = XOR_CBCR_CBCE_BIT;
258 break;
259 case PPC440SPE_DMA0_ID:
260 case PPC440SPE_DMA1_ID:
261 memset(desc->hw_desc, 0, sizeof(struct dma_cdb));
262 /* NOP with interrupt */
263 set_bit(PPC440SPE_DESC_INT, &desc->flags);
264 break;
265 default:
266 printk(KERN_ERR "Unsupported id %d in %s\n", chan->device->id,
267 __func__);
268 break;
269 }
270}
271
272/**
273 * ppc440spe_desc_init_null_xor - initialize the descriptor for NULL XOR
274 * pseudo operation
275 */
276static void ppc440spe_desc_init_null_xor(struct ppc440spe_adma_desc_slot *desc)
277{
278 memset(desc->hw_desc, 0, sizeof(struct xor_cb));
279 desc->hw_next = NULL;
280 desc->src_cnt = 0;
281 desc->dst_cnt = 1;
282}
283
284/**
285 * ppc440spe_desc_init_xor - initialize the descriptor for XOR operation
286 */
287static void ppc440spe_desc_init_xor(struct ppc440spe_adma_desc_slot *desc,
288 int src_cnt, unsigned long flags)
289{
290 struct xor_cb *hw_desc = desc->hw_desc;
291
292 memset(desc->hw_desc, 0, sizeof(struct xor_cb));
293 desc->hw_next = NULL;
294 desc->src_cnt = src_cnt;
295 desc->dst_cnt = 1;
296
297 hw_desc->cbc = XOR_CBCR_TGT_BIT | src_cnt;
298 if (flags & DMA_PREP_INTERRUPT)
299 /* Enable interrupt on completion */
300 hw_desc->cbc |= XOR_CBCR_CBCE_BIT;
301}
302
303/**
304 * ppc440spe_desc_init_dma2pq - initialize the descriptor for PQ
305 * operation in DMA2 controller
306 */
307static void ppc440spe_desc_init_dma2pq(struct ppc440spe_adma_desc_slot *desc,
308 int dst_cnt, int src_cnt, unsigned long flags)
309{
310 struct xor_cb *hw_desc = desc->hw_desc;
311
312 memset(desc->hw_desc, 0, sizeof(struct xor_cb));
313 desc->hw_next = NULL;
314 desc->src_cnt = src_cnt;
315 desc->dst_cnt = dst_cnt;
316 memset(desc->reverse_flags, 0, sizeof(desc->reverse_flags));
317 desc->descs_per_op = 0;
318
319 hw_desc->cbc = XOR_CBCR_TGT_BIT;
320 if (flags & DMA_PREP_INTERRUPT)
321 /* Enable interrupt on completion */
322 hw_desc->cbc |= XOR_CBCR_CBCE_BIT;
323}
324
325#define DMA_CTRL_FLAGS_LAST DMA_PREP_FENCE
326#define DMA_PREP_ZERO_P (DMA_CTRL_FLAGS_LAST << 1)
327#define DMA_PREP_ZERO_Q (DMA_PREP_ZERO_P << 1)
328
329/**
330 * ppc440spe_desc_init_dma01pq - initialize the descriptors for PQ operation
331 * with DMA0/1
332 */
333static void ppc440spe_desc_init_dma01pq(struct ppc440spe_adma_desc_slot *desc,
334 int dst_cnt, int src_cnt, unsigned long flags,
335 unsigned long op)
336{
337 struct dma_cdb *hw_desc;
338 struct ppc440spe_adma_desc_slot *iter;
339 u8 dopc;
340
341 /* Common initialization of a PQ descriptors chain */
342 set_bits(op, &desc->flags);
343 desc->src_cnt = src_cnt;
344 desc->dst_cnt = dst_cnt;
345
346 /* WXOR MULTICAST if both P and Q are being computed
347 * MV_SG1_SG2 if Q only
348 */
349 dopc = (desc->dst_cnt == DMA_DEST_MAX_NUM) ?
350 DMA_CDB_OPC_MULTICAST : DMA_CDB_OPC_MV_SG1_SG2;
351
352 list_for_each_entry(iter, &desc->group_list, chain_node) {
353 hw_desc = iter->hw_desc;
354 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
355
356 if (likely(!list_is_last(&iter->chain_node,
357 &desc->group_list))) {
358 /* set 'next' pointer */
359 iter->hw_next = list_entry(iter->chain_node.next,
360 struct ppc440spe_adma_desc_slot, chain_node);
361 clear_bit(PPC440SPE_DESC_INT, &iter->flags);
362 } else {
363 /* this is the last descriptor.
364 * this slot will be pasted from ADMA level
365 * each time it wants to configure parameters
366 * of the transaction (src, dst, ...)
367 */
368 iter->hw_next = NULL;
369 if (flags & DMA_PREP_INTERRUPT)
370 set_bit(PPC440SPE_DESC_INT, &iter->flags);
371 else
372 clear_bit(PPC440SPE_DESC_INT, &iter->flags);
373 }
374 }
375
376 /* Set OPS depending on WXOR/RXOR type of operation */
377 if (!test_bit(PPC440SPE_DESC_RXOR, &desc->flags)) {
378 /* This is a WXOR only chain:
379 * - first descriptors are for zeroing destinations
380 * if PPC440SPE_ZERO_P/Q set;
381 * - descriptors remained are for GF-XOR operations.
382 */
383 iter = list_first_entry(&desc->group_list,
384 struct ppc440spe_adma_desc_slot,
385 chain_node);
386
387 if (test_bit(PPC440SPE_ZERO_P, &desc->flags)) {
388 hw_desc = iter->hw_desc;
389 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
390 iter = list_first_entry(&iter->chain_node,
391 struct ppc440spe_adma_desc_slot,
392 chain_node);
393 }
394
395 if (test_bit(PPC440SPE_ZERO_Q, &desc->flags)) {
396 hw_desc = iter->hw_desc;
397 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
398 iter = list_first_entry(&iter->chain_node,
399 struct ppc440spe_adma_desc_slot,
400 chain_node);
401 }
402
403 list_for_each_entry_from(iter, &desc->group_list, chain_node) {
404 hw_desc = iter->hw_desc;
405 hw_desc->opc = dopc;
406 }
407 } else {
408 /* This is either RXOR-only or mixed RXOR/WXOR */
409
410 /* The first 1 or 2 slots in chain are always RXOR,
411 * if need to calculate P & Q, then there are two
412 * RXOR slots; if only P or only Q, then there is one
413 */
414 iter = list_first_entry(&desc->group_list,
415 struct ppc440spe_adma_desc_slot,
416 chain_node);
417 hw_desc = iter->hw_desc;
418 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
419
420 if (desc->dst_cnt == DMA_DEST_MAX_NUM) {
421 iter = list_first_entry(&iter->chain_node,
422 struct ppc440spe_adma_desc_slot,
423 chain_node);
424 hw_desc = iter->hw_desc;
425 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
426 }
427
428 /* The remaining descs (if any) are WXORs */
429 if (test_bit(PPC440SPE_DESC_WXOR, &desc->flags)) {
430 iter = list_first_entry(&iter->chain_node,
431 struct ppc440spe_adma_desc_slot,
432 chain_node);
433 list_for_each_entry_from(iter, &desc->group_list,
434 chain_node) {
435 hw_desc = iter->hw_desc;
436 hw_desc->opc = dopc;
437 }
438 }
439 }
440}
441
442/**
443 * ppc440spe_desc_init_dma01pqzero_sum - initialize the descriptor
444 * for PQ_ZERO_SUM operation
445 */
446static void ppc440spe_desc_init_dma01pqzero_sum(
447 struct ppc440spe_adma_desc_slot *desc,
448 int dst_cnt, int src_cnt)
449{
450 struct dma_cdb *hw_desc;
451 struct ppc440spe_adma_desc_slot *iter;
452 int i = 0;
453 u8 dopc = (dst_cnt == 2) ? DMA_CDB_OPC_MULTICAST :
454 DMA_CDB_OPC_MV_SG1_SG2;
455 /*
456 * Initialize starting from 2nd or 3rd descriptor dependent
457 * on dst_cnt. First one or two slots are for cloning P
458 * and/or Q to chan->pdest and/or chan->qdest as we have
459 * to preserve original P/Q.
460 */
461 iter = list_first_entry(&desc->group_list,
462 struct ppc440spe_adma_desc_slot, chain_node);
463 iter = list_entry(iter->chain_node.next,
464 struct ppc440spe_adma_desc_slot, chain_node);
465
466 if (dst_cnt > 1) {
467 iter = list_entry(iter->chain_node.next,
468 struct ppc440spe_adma_desc_slot, chain_node);
469 }
470 /* initialize each source descriptor in chain */
471 list_for_each_entry_from(iter, &desc->group_list, chain_node) {
472 hw_desc = iter->hw_desc;
473 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
474 iter->src_cnt = 0;
475 iter->dst_cnt = 0;
476
477 /* This is a ZERO_SUM operation:
478 * - <src_cnt> descriptors starting from 2nd or 3rd
479 * descriptor are for GF-XOR operations;
480 * - remaining <dst_cnt> descriptors are for checking the result
481 */
482 if (i++ < src_cnt)
483 /* MV_SG1_SG2 if only Q is being verified
484 * MULTICAST if both P and Q are being verified
485 */
486 hw_desc->opc = dopc;
487 else
488 /* DMA_CDB_OPC_DCHECK128 operation */
489 hw_desc->opc = DMA_CDB_OPC_DCHECK128;
490
491 if (likely(!list_is_last(&iter->chain_node,
492 &desc->group_list))) {
493 /* set 'next' pointer */
494 iter->hw_next = list_entry(iter->chain_node.next,
495 struct ppc440spe_adma_desc_slot,
496 chain_node);
497 } else {
498 /* this is the last descriptor.
499 * this slot will be pasted from ADMA level
500 * each time it wants to configure parameters
501 * of the transaction (src, dst, ...)
502 */
503 iter->hw_next = NULL;
504 /* always enable interrupt generation since we get
505 * the status of pqzero from the handler
506 */
507 set_bit(PPC440SPE_DESC_INT, &iter->flags);
508 }
509 }
510 desc->src_cnt = src_cnt;
511 desc->dst_cnt = dst_cnt;
512}
513
514/**
515 * ppc440spe_desc_init_memcpy - initialize the descriptor for MEMCPY operation
516 */
517static void ppc440spe_desc_init_memcpy(struct ppc440spe_adma_desc_slot *desc,
518 unsigned long flags)
519{
520 struct dma_cdb *hw_desc = desc->hw_desc;
521
522 memset(desc->hw_desc, 0, sizeof(struct dma_cdb));
523 desc->hw_next = NULL;
524 desc->src_cnt = 1;
525 desc->dst_cnt = 1;
526
527 if (flags & DMA_PREP_INTERRUPT)
528 set_bit(PPC440SPE_DESC_INT, &desc->flags);
529 else
530 clear_bit(PPC440SPE_DESC_INT, &desc->flags);
531
532 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
533}
534
535/**
536 * ppc440spe_desc_init_memset - initialize the descriptor for MEMSET operation
537 */
538static void ppc440spe_desc_init_memset(struct ppc440spe_adma_desc_slot *desc,
539 int value, unsigned long flags)
540{
541 struct dma_cdb *hw_desc = desc->hw_desc;
542
543 memset(desc->hw_desc, 0, sizeof(struct dma_cdb));
544 desc->hw_next = NULL;
545 desc->src_cnt = 1;
546 desc->dst_cnt = 1;
547
548 if (flags & DMA_PREP_INTERRUPT)
549 set_bit(PPC440SPE_DESC_INT, &desc->flags);
550 else
551 clear_bit(PPC440SPE_DESC_INT, &desc->flags);
552
553 hw_desc->sg1u = hw_desc->sg1l = cpu_to_le32((u32)value);
554 hw_desc->sg3u = hw_desc->sg3l = cpu_to_le32((u32)value);
555 hw_desc->opc = DMA_CDB_OPC_DFILL128;
556}
557
558/**
559 * ppc440spe_desc_set_src_addr - set source address into the descriptor
560 */
561static void ppc440spe_desc_set_src_addr(struct ppc440spe_adma_desc_slot *desc,
562 struct ppc440spe_adma_chan *chan,
563 int src_idx, dma_addr_t addrh,
564 dma_addr_t addrl)
565{
566 struct dma_cdb *dma_hw_desc;
567 struct xor_cb *xor_hw_desc;
568 phys_addr_t addr64, tmplow, tmphi;
569
570 switch (chan->device->id) {
571 case PPC440SPE_DMA0_ID:
572 case PPC440SPE_DMA1_ID:
573 if (!addrh) {
574 addr64 = addrl;
575 tmphi = (addr64 >> 32);
576 tmplow = (addr64 & 0xFFFFFFFF);
577 } else {
578 tmphi = addrh;
579 tmplow = addrl;
580 }
581 dma_hw_desc = desc->hw_desc;
582 dma_hw_desc->sg1l = cpu_to_le32((u32)tmplow);
583 dma_hw_desc->sg1u |= cpu_to_le32((u32)tmphi);
584 break;
585 case PPC440SPE_XOR_ID:
586 xor_hw_desc = desc->hw_desc;
587 xor_hw_desc->ops[src_idx].l = addrl;
588 xor_hw_desc->ops[src_idx].h |= addrh;
589 break;
590 }
591}
592
593/**
594 * ppc440spe_desc_set_src_mult - set source address mult into the descriptor
595 */
596static void ppc440spe_desc_set_src_mult(struct ppc440spe_adma_desc_slot *desc,
597 struct ppc440spe_adma_chan *chan, u32 mult_index,
598 int sg_index, unsigned char mult_value)
599{
600 struct dma_cdb *dma_hw_desc;
601 struct xor_cb *xor_hw_desc;
602 u32 *psgu;
603
604 switch (chan->device->id) {
605 case PPC440SPE_DMA0_ID:
606 case PPC440SPE_DMA1_ID:
607 dma_hw_desc = desc->hw_desc;
608
609 switch (sg_index) {
610 /* for RXOR operations set multiplier
611 * into source cued address
612 */
613 case DMA_CDB_SG_SRC:
614 psgu = &dma_hw_desc->sg1u;
615 break;
616 /* for WXOR operations set multiplier
617 * into destination cued address(es)
618 */
619 case DMA_CDB_SG_DST1:
620 psgu = &dma_hw_desc->sg2u;
621 break;
622 case DMA_CDB_SG_DST2:
623 psgu = &dma_hw_desc->sg3u;
624 break;
625 default:
626 BUG();
627 }
628
629 *psgu |= cpu_to_le32(mult_value << mult_index);
630 break;
631 case PPC440SPE_XOR_ID:
632 xor_hw_desc = desc->hw_desc;
633 break;
634 default:
635 BUG();
636 }
637}
638
639/**
640 * ppc440spe_desc_set_dest_addr - set destination address into the descriptor
641 */
642static void ppc440spe_desc_set_dest_addr(struct ppc440spe_adma_desc_slot *desc,
643 struct ppc440spe_adma_chan *chan,
644 dma_addr_t addrh, dma_addr_t addrl,
645 u32 dst_idx)
646{
647 struct dma_cdb *dma_hw_desc;
648 struct xor_cb *xor_hw_desc;
649 phys_addr_t addr64, tmphi, tmplow;
650 u32 *psgu, *psgl;
651
652 switch (chan->device->id) {
653 case PPC440SPE_DMA0_ID:
654 case PPC440SPE_DMA1_ID:
655 if (!addrh) {
656 addr64 = addrl;
657 tmphi = (addr64 >> 32);
658 tmplow = (addr64 & 0xFFFFFFFF);
659 } else {
660 tmphi = addrh;
661 tmplow = addrl;
662 }
663 dma_hw_desc = desc->hw_desc;
664
665 psgu = dst_idx ? &dma_hw_desc->sg3u : &dma_hw_desc->sg2u;
666 psgl = dst_idx ? &dma_hw_desc->sg3l : &dma_hw_desc->sg2l;
667
668 *psgl = cpu_to_le32((u32)tmplow);
669 *psgu |= cpu_to_le32((u32)tmphi);
670 break;
671 case PPC440SPE_XOR_ID:
672 xor_hw_desc = desc->hw_desc;
673 xor_hw_desc->cbtal = addrl;
674 xor_hw_desc->cbtah |= addrh;
675 break;
676 }
677}
678
679/**
680 * ppc440spe_desc_set_byte_count - set number of data bytes involved
681 * into the operation
682 */
683static void ppc440spe_desc_set_byte_count(struct ppc440spe_adma_desc_slot *desc,
684 struct ppc440spe_adma_chan *chan,
685 u32 byte_count)
686{
687 struct dma_cdb *dma_hw_desc;
688 struct xor_cb *xor_hw_desc;
689
690 switch (chan->device->id) {
691 case PPC440SPE_DMA0_ID:
692 case PPC440SPE_DMA1_ID:
693 dma_hw_desc = desc->hw_desc;
694 dma_hw_desc->cnt = cpu_to_le32(byte_count);
695 break;
696 case PPC440SPE_XOR_ID:
697 xor_hw_desc = desc->hw_desc;
698 xor_hw_desc->cbbc = byte_count;
699 break;
700 }
701}
702
703/**
704 * ppc440spe_desc_set_rxor_block_size - set RXOR block size
705 */
706static inline void ppc440spe_desc_set_rxor_block_size(u32 byte_count)
707{
708 /* assume that byte_count is aligned on the 512-boundary;
709 * thus write it directly to the register (bits 23:31 are
710 * reserved there).
711 */
712 dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_CF2H, byte_count);
713}
714
715/**
716 * ppc440spe_desc_set_dcheck - set CHECK pattern
717 */
718static void ppc440spe_desc_set_dcheck(struct ppc440spe_adma_desc_slot *desc,
719 struct ppc440spe_adma_chan *chan, u8 *qword)
720{
721 struct dma_cdb *dma_hw_desc;
722
723 switch (chan->device->id) {
724 case PPC440SPE_DMA0_ID:
725 case PPC440SPE_DMA1_ID:
726 dma_hw_desc = desc->hw_desc;
727 iowrite32(qword[0], &dma_hw_desc->sg3l);
728 iowrite32(qword[4], &dma_hw_desc->sg3u);
729 iowrite32(qword[8], &dma_hw_desc->sg2l);
730 iowrite32(qword[12], &dma_hw_desc->sg2u);
731 break;
732 default:
733 BUG();
734 }
735}
736
737/**
738 * ppc440spe_xor_set_link - set link address in xor CB
739 */
740static void ppc440spe_xor_set_link(struct ppc440spe_adma_desc_slot *prev_desc,
741 struct ppc440spe_adma_desc_slot *next_desc)
742{
743 struct xor_cb *xor_hw_desc = prev_desc->hw_desc;
744
745 if (unlikely(!next_desc || !(next_desc->phys))) {
746 printk(KERN_ERR "%s: next_desc=0x%p; next_desc->phys=0x%llx\n",
747 __func__, next_desc,
748 next_desc ? next_desc->phys : 0);
749 BUG();
750 }
751
752 xor_hw_desc->cbs = 0;
753 xor_hw_desc->cblal = next_desc->phys;
754 xor_hw_desc->cblah = 0;
755 xor_hw_desc->cbc |= XOR_CBCR_LNK_BIT;
756}
757
758/**
759 * ppc440spe_desc_set_link - set the address of descriptor following this
760 * descriptor in chain
761 */
762static void ppc440spe_desc_set_link(struct ppc440spe_adma_chan *chan,
763 struct ppc440spe_adma_desc_slot *prev_desc,
764 struct ppc440spe_adma_desc_slot *next_desc)
765{
766 unsigned long flags;
767 struct ppc440spe_adma_desc_slot *tail = next_desc;
768
769 if (unlikely(!prev_desc || !next_desc ||
770 (prev_desc->hw_next && prev_desc->hw_next != next_desc))) {
771 /* If previous next is overwritten something is wrong.
772 * though we may refetch from append to initiate list
773 * processing; in this case - it's ok.
774 */
775 printk(KERN_ERR "%s: prev_desc=0x%p; next_desc=0x%p; "
776 "prev->hw_next=0x%p\n", __func__, prev_desc,
777 next_desc, prev_desc ? prev_desc->hw_next : 0);
778 BUG();
779 }
780
781 local_irq_save(flags);
782
783 /* do s/w chaining both for DMA and XOR descriptors */
784 prev_desc->hw_next = next_desc;
785
786 switch (chan->device->id) {
787 case PPC440SPE_DMA0_ID:
788 case PPC440SPE_DMA1_ID:
789 break;
790 case PPC440SPE_XOR_ID:
791 /* bind descriptor to the chain */
792 while (tail->hw_next)
793 tail = tail->hw_next;
794 xor_last_linked = tail;
795
796 if (prev_desc == xor_last_submit)
797 /* do not link to the last submitted CB */
798 break;
799 ppc440spe_xor_set_link(prev_desc, next_desc);
800 break;
801 }
802
803 local_irq_restore(flags);
804}
805
12458ea0
AG
806/**
807 * ppc440spe_desc_get_link - get the address of the descriptor that
808 * follows this one
809 */
810static inline u32 ppc440spe_desc_get_link(struct ppc440spe_adma_desc_slot *desc,
811 struct ppc440spe_adma_chan *chan)
812{
813 if (!desc->hw_next)
814 return 0;
815
816 return desc->hw_next->phys;
817}
818
819/**
820 * ppc440spe_desc_is_aligned - check alignment
821 */
822static inline int ppc440spe_desc_is_aligned(
823 struct ppc440spe_adma_desc_slot *desc, int num_slots)
824{
825 return (desc->idx & (num_slots - 1)) ? 0 : 1;
826}
827
828/**
829 * ppc440spe_chan_xor_slot_count - get the number of slots necessary for
830 * XOR operation
831 */
832static int ppc440spe_chan_xor_slot_count(size_t len, int src_cnt,
833 int *slots_per_op)
834{
835 int slot_cnt;
836
837 /* each XOR descriptor provides up to 16 source operands */
838 slot_cnt = *slots_per_op = (src_cnt + XOR_MAX_OPS - 1)/XOR_MAX_OPS;
839
840 if (likely(len <= PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT))
841 return slot_cnt;
842
843 printk(KERN_ERR "%s: len %d > max %d !!\n",
844 __func__, len, PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT);
845 BUG();
846 return slot_cnt;
847}
848
849/**
850 * ppc440spe_dma2_pq_slot_count - get the number of slots necessary for
851 * DMA2 PQ operation
852 */
853static int ppc440spe_dma2_pq_slot_count(dma_addr_t *srcs,
854 int src_cnt, size_t len)
855{
856 signed long long order = 0;
857 int state = 0;
858 int addr_count = 0;
859 int i;
860 for (i = 1; i < src_cnt; i++) {
861 dma_addr_t cur_addr = srcs[i];
862 dma_addr_t old_addr = srcs[i-1];
863 switch (state) {
864 case 0:
865 if (cur_addr == old_addr + len) {
866 /* direct RXOR */
867 order = 1;
868 state = 1;
869 if (i == src_cnt-1)
870 addr_count++;
871 } else if (old_addr == cur_addr + len) {
872 /* reverse RXOR */
873 order = -1;
874 state = 1;
875 if (i == src_cnt-1)
876 addr_count++;
877 } else {
878 state = 3;
879 }
880 break;
881 case 1:
882 if (i == src_cnt-2 || (order == -1
883 && cur_addr != old_addr - len)) {
884 order = 0;
885 state = 0;
886 addr_count++;
887 } else if (cur_addr == old_addr + len*order) {
888 state = 2;
889 if (i == src_cnt-1)
890 addr_count++;
891 } else if (cur_addr == old_addr + 2*len) {
892 state = 2;
893 if (i == src_cnt-1)
894 addr_count++;
895 } else if (cur_addr == old_addr + 3*len) {
896 state = 2;
897 if (i == src_cnt-1)
898 addr_count++;
899 } else {
900 order = 0;
901 state = 0;
902 addr_count++;
903 }
904 break;
905 case 2:
906 order = 0;
907 state = 0;
908 addr_count++;
909 break;
910 }
911 if (state == 3)
912 break;
913 }
914 if (src_cnt <= 1 || (state != 1 && state != 2)) {
915 pr_err("%s: src_cnt=%d, state=%d, addr_count=%d, order=%lld\n",
916 __func__, src_cnt, state, addr_count, order);
917 for (i = 0; i < src_cnt; i++)
918 pr_err("\t[%d] 0x%llx \n", i, srcs[i]);
919 BUG();
920 }
921
922 return (addr_count + XOR_MAX_OPS - 1) / XOR_MAX_OPS;
923}
924
925
926/******************************************************************************
927 * ADMA channel low-level routines
928 ******************************************************************************/
929
930static u32
931ppc440spe_chan_get_current_descriptor(struct ppc440spe_adma_chan *chan);
932static void ppc440spe_chan_append(struct ppc440spe_adma_chan *chan);
933
934/**
935 * ppc440spe_adma_device_clear_eot_status - interrupt ack to XOR or DMA engine
936 */
937static void ppc440spe_adma_device_clear_eot_status(
938 struct ppc440spe_adma_chan *chan)
939{
940 struct dma_regs *dma_reg;
941 struct xor_regs *xor_reg;
942 u8 *p = chan->device->dma_desc_pool_virt;
943 struct dma_cdb *cdb;
944 u32 rv, i;
945
946 switch (chan->device->id) {
947 case PPC440SPE_DMA0_ID:
948 case PPC440SPE_DMA1_ID:
949 /* read FIFO to ack */
950 dma_reg = chan->device->dma_reg;
951 while ((rv = ioread32(&dma_reg->csfpl))) {
952 i = rv & DMA_CDB_ADDR_MSK;
953 cdb = (struct dma_cdb *)&p[i -
954 (u32)chan->device->dma_desc_pool];
955
956 /* Clear opcode to ack. This is necessary for
957 * ZeroSum operations only
958 */
959 cdb->opc = 0;
960
961 if (test_bit(PPC440SPE_RXOR_RUN,
962 &ppc440spe_rxor_state)) {
963 /* probably this is a completed RXOR op,
964 * get pointer to CDB using the fact that
965 * physical and virtual addresses of CDB
966 * in pools have the same offsets
967 */
968 if (le32_to_cpu(cdb->sg1u) &
969 DMA_CUED_XOR_BASE) {
970 /* this is a RXOR */
971 clear_bit(PPC440SPE_RXOR_RUN,
972 &ppc440spe_rxor_state);
973 }
974 }
975
976 if (rv & DMA_CDB_STATUS_MSK) {
977 /* ZeroSum check failed
978 */
979 struct ppc440spe_adma_desc_slot *iter;
980 dma_addr_t phys = rv & ~DMA_CDB_MSK;
981
982 /*
983 * Update the status of corresponding
984 * descriptor.
985 */
986 list_for_each_entry(iter, &chan->chain,
987 chain_node) {
988 if (iter->phys == phys)
989 break;
990 }
991 /*
992 * if cannot find the corresponding
993 * slot it's a bug
994 */
995 BUG_ON(&iter->chain_node == &chan->chain);
996
997 if (iter->xor_check_result) {
998 if (test_bit(PPC440SPE_DESC_PCHECK,
999 &iter->flags)) {
1000 *iter->xor_check_result |=
1001 SUM_CHECK_P_RESULT;
1002 } else
1003 if (test_bit(PPC440SPE_DESC_QCHECK,
1004 &iter->flags)) {
1005 *iter->xor_check_result |=
1006 SUM_CHECK_Q_RESULT;
1007 } else
1008 BUG();
1009 }
1010 }
1011 }
1012
1013 rv = ioread32(&dma_reg->dsts);
1014 if (rv) {
1015 pr_err("DMA%d err status: 0x%x\n",
1016 chan->device->id, rv);
1017 /* write back to clear */
1018 iowrite32(rv, &dma_reg->dsts);
1019 }
1020 break;
1021 case PPC440SPE_XOR_ID:
1022 /* reset status bits to ack */
1023 xor_reg = chan->device->xor_reg;
1024 rv = ioread32be(&xor_reg->sr);
1025 iowrite32be(rv, &xor_reg->sr);
1026
1027 if (rv & (XOR_IE_ICBIE_BIT|XOR_IE_ICIE_BIT|XOR_IE_RPTIE_BIT)) {
1028 if (rv & XOR_IE_RPTIE_BIT) {
1029 /* Read PLB Timeout Error.
1030 * Try to resubmit the CB
1031 */
1032 u32 val = ioread32be(&xor_reg->ccbalr);
1033
1034 iowrite32be(val, &xor_reg->cblalr);
1035
1036 val = ioread32be(&xor_reg->crsr);
1037 iowrite32be(val | XOR_CRSR_XAE_BIT,
1038 &xor_reg->crsr);
1039 } else
1040 pr_err("XOR ERR 0x%x status\n", rv);
1041 break;
1042 }
1043
1044 /* if the XORcore is idle, but there are unprocessed CBs
1045 * then refetch the s/w chain here
1046 */
1047 if (!(ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT) &&
1048 do_xor_refetch)
1049 ppc440spe_chan_append(chan);
1050 break;
1051 }
1052}
1053
1054/**
1055 * ppc440spe_chan_is_busy - get the channel status
1056 */
1057static int ppc440spe_chan_is_busy(struct ppc440spe_adma_chan *chan)
1058{
1059 struct dma_regs *dma_reg;
1060 struct xor_regs *xor_reg;
1061 int busy = 0;
1062
1063 switch (chan->device->id) {
1064 case PPC440SPE_DMA0_ID:
1065 case PPC440SPE_DMA1_ID:
1066 dma_reg = chan->device->dma_reg;
1067 /* if command FIFO's head and tail pointers are equal and
1068 * status tail is the same as command, then channel is free
1069 */
1070 if (ioread16(&dma_reg->cpfhp) != ioread16(&dma_reg->cpftp) ||
1071 ioread16(&dma_reg->cpftp) != ioread16(&dma_reg->csftp))
1072 busy = 1;
1073 break;
1074 case PPC440SPE_XOR_ID:
1075 /* use the special status bit for the XORcore
1076 */
1077 xor_reg = chan->device->xor_reg;
1078 busy = (ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT) ? 1 : 0;
1079 break;
1080 }
1081
1082 return busy;
1083}
1084
1085/**
1086 * ppc440spe_chan_set_first_xor_descriptor - init XORcore chain
1087 */
1088static void ppc440spe_chan_set_first_xor_descriptor(
1089 struct ppc440spe_adma_chan *chan,
1090 struct ppc440spe_adma_desc_slot *next_desc)
1091{
1092 struct xor_regs *xor_reg = chan->device->xor_reg;
1093
1094 if (ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT)
1095 printk(KERN_INFO "%s: Warn: XORcore is running "
1096 "when try to set the first CDB!\n",
1097 __func__);
1098
1099 xor_last_submit = xor_last_linked = next_desc;
1100
1101 iowrite32be(XOR_CRSR_64BA_BIT, &xor_reg->crsr);
1102
1103 iowrite32be(next_desc->phys, &xor_reg->cblalr);
1104 iowrite32be(0, &xor_reg->cblahr);
1105 iowrite32be(ioread32be(&xor_reg->cbcr) | XOR_CBCR_LNK_BIT,
1106 &xor_reg->cbcr);
1107
1108 chan->hw_chain_inited = 1;
1109}
1110
1111/**
1112 * ppc440spe_dma_put_desc - put DMA0,1 descriptor to FIFO.
1113 * called with irqs disabled
1114 */
1115static void ppc440spe_dma_put_desc(struct ppc440spe_adma_chan *chan,
1116 struct ppc440spe_adma_desc_slot *desc)
1117{
1118 u32 pcdb;
1119 struct dma_regs *dma_reg = chan->device->dma_reg;
1120
1121 pcdb = desc->phys;
1122 if (!test_bit(PPC440SPE_DESC_INT, &desc->flags))
1123 pcdb |= DMA_CDB_NO_INT;
1124
1125 chan_last_sub[chan->device->id] = desc;
1126
1127 ADMA_LL_DBG(print_cb(chan, desc->hw_desc));
1128
1129 iowrite32(pcdb, &dma_reg->cpfpl);
1130}
1131
1132/**
1133 * ppc440spe_chan_append - update the h/w chain in the channel
1134 */
1135static void ppc440spe_chan_append(struct ppc440spe_adma_chan *chan)
1136{
1137 struct xor_regs *xor_reg;
1138 struct ppc440spe_adma_desc_slot *iter;
1139 struct xor_cb *xcb;
1140 u32 cur_desc;
1141 unsigned long flags;
1142
1143 local_irq_save(flags);
1144
1145 switch (chan->device->id) {
1146 case PPC440SPE_DMA0_ID:
1147 case PPC440SPE_DMA1_ID:
1148 cur_desc = ppc440spe_chan_get_current_descriptor(chan);
1149
1150 if (likely(cur_desc)) {
1151 iter = chan_last_sub[chan->device->id];
1152 BUG_ON(!iter);
1153 } else {
1154 /* first peer */
1155 iter = chan_first_cdb[chan->device->id];
1156 BUG_ON(!iter);
1157 ppc440spe_dma_put_desc(chan, iter);
1158 chan->hw_chain_inited = 1;
1159 }
1160
1161 /* is there something new to append */
1162 if (!iter->hw_next)
1163 break;
1164
1165 /* flush descriptors from the s/w queue to fifo */
1166 list_for_each_entry_continue(iter, &chan->chain, chain_node) {
1167 ppc440spe_dma_put_desc(chan, iter);
1168 if (!iter->hw_next)
1169 break;
1170 }
1171 break;
1172 case PPC440SPE_XOR_ID:
1173 /* update h/w links and refetch */
1174 if (!xor_last_submit->hw_next)
1175 break;
1176
1177 xor_reg = chan->device->xor_reg;
1178 /* the last linked CDB has to generate an interrupt
1179 * that we'd be able to append the next lists to h/w
1180 * regardless of the XOR engine state at the moment of
1181 * appending of these next lists
1182 */
1183 xcb = xor_last_linked->hw_desc;
1184 xcb->cbc |= XOR_CBCR_CBCE_BIT;
1185
1186 if (!(ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT)) {
1187 /* XORcore is idle. Refetch now */
1188 do_xor_refetch = 0;
1189 ppc440spe_xor_set_link(xor_last_submit,
1190 xor_last_submit->hw_next);
1191
1192 ADMA_LL_DBG(print_cb_list(chan,
1193 xor_last_submit->hw_next));
1194
1195 xor_last_submit = xor_last_linked;
1196 iowrite32be(ioread32be(&xor_reg->crsr) |
1197 XOR_CRSR_RCBE_BIT | XOR_CRSR_64BA_BIT,
1198 &xor_reg->crsr);
1199 } else {
1200 /* XORcore is running. Refetch later in the handler */
1201 do_xor_refetch = 1;
1202 }
1203
1204 break;
1205 }
1206
1207 local_irq_restore(flags);
1208}
1209
1210/**
1211 * ppc440spe_chan_get_current_descriptor - get the currently executed descriptor
1212 */
1213static u32
1214ppc440spe_chan_get_current_descriptor(struct ppc440spe_adma_chan *chan)
1215{
1216 struct dma_regs *dma_reg;
1217 struct xor_regs *xor_reg;
1218
1219 if (unlikely(!chan->hw_chain_inited))
1220 /* h/w descriptor chain is not initialized yet */
1221 return 0;
1222
1223 switch (chan->device->id) {
1224 case PPC440SPE_DMA0_ID:
1225 case PPC440SPE_DMA1_ID:
1226 dma_reg = chan->device->dma_reg;
1227 return ioread32(&dma_reg->acpl) & (~DMA_CDB_MSK);
1228 case PPC440SPE_XOR_ID:
1229 xor_reg = chan->device->xor_reg;
1230 return ioread32be(&xor_reg->ccbalr);
1231 }
1232 return 0;
1233}
1234
1235/**
1236 * ppc440spe_chan_run - enable the channel
1237 */
1238static void ppc440spe_chan_run(struct ppc440spe_adma_chan *chan)
1239{
1240 struct xor_regs *xor_reg;
1241
1242 switch (chan->device->id) {
1243 case PPC440SPE_DMA0_ID:
1244 case PPC440SPE_DMA1_ID:
1245 /* DMAs are always enabled, do nothing */
1246 break;
1247 case PPC440SPE_XOR_ID:
1248 /* drain write buffer */
1249 xor_reg = chan->device->xor_reg;
1250
1251 /* fetch descriptor pointed to in <link> */
1252 iowrite32be(XOR_CRSR_64BA_BIT | XOR_CRSR_XAE_BIT,
1253 &xor_reg->crsr);
1254 break;
1255 }
1256}
1257
1258/******************************************************************************
1259 * ADMA device level
1260 ******************************************************************************/
1261
1262static void ppc440spe_chan_start_null_xor(struct ppc440spe_adma_chan *chan);
1263static int ppc440spe_adma_alloc_chan_resources(struct dma_chan *chan);
1264
1265static dma_cookie_t
1266ppc440spe_adma_tx_submit(struct dma_async_tx_descriptor *tx);
1267
1268static void ppc440spe_adma_set_dest(struct ppc440spe_adma_desc_slot *tx,
1269 dma_addr_t addr, int index);
1270static void
1271ppc440spe_adma_memcpy_xor_set_src(struct ppc440spe_adma_desc_slot *tx,
1272 dma_addr_t addr, int index);
1273
1274static void
1275ppc440spe_adma_pq_set_dest(struct ppc440spe_adma_desc_slot *tx,
1276 dma_addr_t *paddr, unsigned long flags);
1277static void
1278ppc440spe_adma_pq_set_src(struct ppc440spe_adma_desc_slot *tx,
1279 dma_addr_t addr, int index);
1280static void
1281ppc440spe_adma_pq_set_src_mult(struct ppc440spe_adma_desc_slot *tx,
1282 unsigned char mult, int index, int dst_pos);
1283static void
1284ppc440spe_adma_pqzero_sum_set_dest(struct ppc440spe_adma_desc_slot *tx,
1285 dma_addr_t paddr, dma_addr_t qaddr);
1286
1287static struct page *ppc440spe_rxor_srcs[32];
1288
1289/**
1290 * ppc440spe_can_rxor - check if the operands may be processed with RXOR
1291 */
1292static int ppc440spe_can_rxor(struct page **srcs, int src_cnt, size_t len)
1293{
1294 int i, order = 0, state = 0;
1295 int idx = 0;
1296
1297 if (unlikely(!(src_cnt > 1)))
1298 return 0;
1299
1300 BUG_ON(src_cnt > ARRAY_SIZE(ppc440spe_rxor_srcs));
1301
1302 /* Skip holes in the source list before checking */
1303 for (i = 0; i < src_cnt; i++) {
1304 if (!srcs[i])
1305 continue;
1306 ppc440spe_rxor_srcs[idx++] = srcs[i];
1307 }
1308 src_cnt = idx;
1309
1310 for (i = 1; i < src_cnt; i++) {
1311 char *cur_addr = page_address(ppc440spe_rxor_srcs[i]);
1312 char *old_addr = page_address(ppc440spe_rxor_srcs[i - 1]);
1313
1314 switch (state) {
1315 case 0:
1316 if (cur_addr == old_addr + len) {
1317 /* direct RXOR */
1318 order = 1;
1319 state = 1;
1320 } else if (old_addr == cur_addr + len) {
1321 /* reverse RXOR */
1322 order = -1;
1323 state = 1;
1324 } else
1325 goto out;
1326 break;
1327 case 1:
1328 if ((i == src_cnt - 2) ||
1329 (order == -1 && cur_addr != old_addr - len)) {
1330 order = 0;
1331 state = 0;
1332 } else if ((cur_addr == old_addr + len * order) ||
1333 (cur_addr == old_addr + 2 * len) ||
1334 (cur_addr == old_addr + 3 * len)) {
1335 state = 2;
1336 } else {
1337 order = 0;
1338 state = 0;
1339 }
1340 break;
1341 case 2:
1342 order = 0;
1343 state = 0;
1344 break;
1345 }
1346 }
1347
1348out:
1349 if (state == 1 || state == 2)
1350 return 1;
1351
1352 return 0;
1353}
1354
1355/**
1356 * ppc440spe_adma_device_estimate - estimate the efficiency of processing
1357 * the operation given on this channel. It's assumed that 'chan' is
1358 * capable to process 'cap' type of operation.
1359 * @chan: channel to use
1360 * @cap: type of transaction
1361 * @dst_lst: array of destination pointers
1362 * @dst_cnt: number of destination operands
1363 * @src_lst: array of source pointers
1364 * @src_cnt: number of source operands
1365 * @src_sz: size of each source operand
1366 */
1367static int ppc440spe_adma_estimate(struct dma_chan *chan,
1368 enum dma_transaction_type cap, struct page **dst_lst, int dst_cnt,
1369 struct page **src_lst, int src_cnt, size_t src_sz)
1370{
1371 int ef = 1;
1372
1373 if (cap == DMA_PQ || cap == DMA_PQ_VAL) {
1374 /* If RAID-6 capabilities were not activated don't try
1375 * to use them
1376 */
1377 if (unlikely(!ppc440spe_r6_enabled))
1378 return -1;
1379 }
1380 /* In the current implementation of ppc440spe ADMA driver it
1381 * makes sense to pick out only pq case, because it may be
1382 * processed:
1383 * (1) either using Biskup method on DMA2;
1384 * (2) or on DMA0/1.
1385 * Thus we give a favour to (1) if the sources are suitable;
1386 * else let it be processed on one of the DMA0/1 engines.
1387 * In the sum_product case where destination is also the
1388 * source process it on DMA0/1 only.
1389 */
1390 if (cap == DMA_PQ && chan->chan_id == PPC440SPE_XOR_ID) {
1391
1392 if (dst_cnt == 1 && src_cnt == 2 && dst_lst[0] == src_lst[1])
1393 ef = 0; /* sum_product case, process on DMA0/1 */
1394 else if (ppc440spe_can_rxor(src_lst, src_cnt, src_sz))
1395 ef = 3; /* override (DMA0/1 + idle) */
1396 else
1397 ef = 0; /* can't process on DMA2 if !rxor */
1398 }
1399
1400 /* channel idleness increases the priority */
1401 if (likely(ef) &&
1402 !ppc440spe_chan_is_busy(to_ppc440spe_adma_chan(chan)))
1403 ef++;
1404
1405 return ef;
1406}
1407
1408struct dma_chan *
1409ppc440spe_async_tx_find_best_channel(enum dma_transaction_type cap,
1410 struct page **dst_lst, int dst_cnt, struct page **src_lst,
1411 int src_cnt, size_t src_sz)
1412{
1413 struct dma_chan *best_chan = NULL;
1414 struct ppc_dma_chan_ref *ref;
1415 int best_rank = -1;
1416
1417 if (unlikely(!src_sz))
1418 return NULL;
1419 if (src_sz > PAGE_SIZE) {
1420 /*
1421 * should a user of the api ever pass > PAGE_SIZE requests
1422 * we sort out cases where temporary page-sized buffers
1423 * are used.
1424 */
1425 switch (cap) {
1426 case DMA_PQ:
1427 if (src_cnt == 1 && dst_lst[1] == src_lst[0])
1428 return NULL;
1429 if (src_cnt == 2 && dst_lst[1] == src_lst[1])
1430 return NULL;
1431 break;
1432 case DMA_PQ_VAL:
1433 case DMA_XOR_VAL:
1434 return NULL;
1435 default:
1436 break;
1437 }
1438 }
1439
1440 list_for_each_entry(ref, &ppc440spe_adma_chan_list, node) {
1441 if (dma_has_cap(cap, ref->chan->device->cap_mask)) {
1442 int rank;
1443
1444 rank = ppc440spe_adma_estimate(ref->chan, cap, dst_lst,
1445 dst_cnt, src_lst, src_cnt, src_sz);
1446 if (rank > best_rank) {
1447 best_rank = rank;
1448 best_chan = ref->chan;
1449 }
1450 }
1451 }
1452
1453 return best_chan;
1454}
1455EXPORT_SYMBOL_GPL(ppc440spe_async_tx_find_best_channel);
1456
1457/**
1458 * ppc440spe_get_group_entry - get group entry with index idx
1459 * @tdesc: is the last allocated slot in the group.
1460 */
1461static struct ppc440spe_adma_desc_slot *
1462ppc440spe_get_group_entry(struct ppc440spe_adma_desc_slot *tdesc, u32 entry_idx)
1463{
1464 struct ppc440spe_adma_desc_slot *iter = tdesc->group_head;
1465 int i = 0;
1466
1467 if (entry_idx < 0 || entry_idx >= (tdesc->src_cnt + tdesc->dst_cnt)) {
1468 printk("%s: entry_idx %d, src_cnt %d, dst_cnt %d\n",
1469 __func__, entry_idx, tdesc->src_cnt, tdesc->dst_cnt);
1470 BUG();
1471 }
1472
1473 list_for_each_entry(iter, &tdesc->group_list, chain_node) {
1474 if (i++ == entry_idx)
1475 break;
1476 }
1477 return iter;
1478}
1479
1480/**
1481 * ppc440spe_adma_free_slots - flags descriptor slots for reuse
1482 * @slot: Slot to free
1483 * Caller must hold &ppc440spe_chan->lock while calling this function
1484 */
1485static void ppc440spe_adma_free_slots(struct ppc440spe_adma_desc_slot *slot,
1486 struct ppc440spe_adma_chan *chan)
1487{
1488 int stride = slot->slots_per_op;
1489
1490 while (stride--) {
1491 slot->slots_per_op = 0;
1492 slot = list_entry(slot->slot_node.next,
1493 struct ppc440spe_adma_desc_slot,
1494 slot_node);
1495 }
1496}
1497
12458ea0
AG
1498/**
1499 * ppc440spe_adma_run_tx_complete_actions - call functions to be called
1500 * upon completion
1501 */
1502static dma_cookie_t ppc440spe_adma_run_tx_complete_actions(
1503 struct ppc440spe_adma_desc_slot *desc,
1504 struct ppc440spe_adma_chan *chan,
1505 dma_cookie_t cookie)
1506{
1507 int i;
1508
1509 BUG_ON(desc->async_tx.cookie < 0);
1510 if (desc->async_tx.cookie > 0) {
1511 cookie = desc->async_tx.cookie;
1512 desc->async_tx.cookie = 0;
1513
1514 /* call the callback (must not sleep or submit new
1515 * operations to this channel)
1516 */
1517 if (desc->async_tx.callback)
1518 desc->async_tx.callback(
1519 desc->async_tx.callback_param);
1520
d38a8c62 1521 dma_descriptor_unmap(&desc->async_tx);
12458ea0
AG
1522 }
1523
1524 /* run dependent operations */
1525 dma_run_dependencies(&desc->async_tx);
1526
1527 return cookie;
1528}
1529
1530/**
1531 * ppc440spe_adma_clean_slot - clean up CDB slot (if ack is set)
1532 */
1533static int ppc440spe_adma_clean_slot(struct ppc440spe_adma_desc_slot *desc,
1534 struct ppc440spe_adma_chan *chan)
1535{
1536 /* the client is allowed to attach dependent operations
1537 * until 'ack' is set
1538 */
1539 if (!async_tx_test_ack(&desc->async_tx))
1540 return 0;
1541
1542 /* leave the last descriptor in the chain
1543 * so we can append to it
1544 */
1545 if (list_is_last(&desc->chain_node, &chan->chain) ||
1546 desc->phys == ppc440spe_chan_get_current_descriptor(chan))
1547 return 1;
1548
1549 if (chan->device->id != PPC440SPE_XOR_ID) {
1550 /* our DMA interrupt handler clears opc field of
1551 * each processed descriptor. For all types of
1552 * operations except for ZeroSum we do not actually
1553 * need ack from the interrupt handler. ZeroSum is a
1554 * special case since the result of this operation
1555 * is available from the handler only, so if we see
1556 * such type of descriptor (which is unprocessed yet)
1557 * then leave it in chain.
1558 */
1559 struct dma_cdb *cdb = desc->hw_desc;
1560 if (cdb->opc == DMA_CDB_OPC_DCHECK128)
1561 return 1;
1562 }
1563
1564 dev_dbg(chan->device->common.dev, "\tfree slot %llx: %d stride: %d\n",
1565 desc->phys, desc->idx, desc->slots_per_op);
1566
1567 list_del(&desc->chain_node);
1568 ppc440spe_adma_free_slots(desc, chan);
1569 return 0;
1570}
1571
1572/**
1573 * __ppc440spe_adma_slot_cleanup - this is the common clean-up routine
1574 * which runs through the channel CDBs list until reach the descriptor
1575 * currently processed. When routine determines that all CDBs of group
1576 * are completed then corresponding callbacks (if any) are called and slots
1577 * are freed.
1578 */
1579static void __ppc440spe_adma_slot_cleanup(struct ppc440spe_adma_chan *chan)
1580{
1581 struct ppc440spe_adma_desc_slot *iter, *_iter, *group_start = NULL;
1582 dma_cookie_t cookie = 0;
1583 u32 current_desc = ppc440spe_chan_get_current_descriptor(chan);
1584 int busy = ppc440spe_chan_is_busy(chan);
1585 int seen_current = 0, slot_cnt = 0, slots_per_op = 0;
1586
1587 dev_dbg(chan->device->common.dev, "ppc440spe adma%d: %s\n",
1588 chan->device->id, __func__);
1589
1590 if (!current_desc) {
1591 /* There were no transactions yet, so
1592 * nothing to clean
1593 */
1594 return;
1595 }
1596
1597 /* free completed slots from the chain starting with
1598 * the oldest descriptor
1599 */
1600 list_for_each_entry_safe(iter, _iter, &chan->chain,
1601 chain_node) {
1602 dev_dbg(chan->device->common.dev, "\tcookie: %d slot: %d "
1603 "busy: %d this_desc: %#llx next_desc: %#x "
1604 "cur: %#x ack: %d\n",
1605 iter->async_tx.cookie, iter->idx, busy, iter->phys,
1606 ppc440spe_desc_get_link(iter, chan), current_desc,
1607 async_tx_test_ack(&iter->async_tx));
1608 prefetch(_iter);
1609 prefetch(&_iter->async_tx);
1610
1611 /* do not advance past the current descriptor loaded into the
1612 * hardware channel,subsequent descriptors are either in process
1613 * or have not been submitted
1614 */
1615 if (seen_current)
1616 break;
1617
1618 /* stop the search if we reach the current descriptor and the
1619 * channel is busy, or if it appears that the current descriptor
1620 * needs to be re-read (i.e. has been appended to)
1621 */
1622 if (iter->phys == current_desc) {
1623 BUG_ON(seen_current++);
1624 if (busy || ppc440spe_desc_get_link(iter, chan)) {
1625 /* not all descriptors of the group have
1626 * been completed; exit.
1627 */
1628 break;
1629 }
1630 }
1631
1632 /* detect the start of a group transaction */
1633 if (!slot_cnt && !slots_per_op) {
1634 slot_cnt = iter->slot_cnt;
1635 slots_per_op = iter->slots_per_op;
1636 if (slot_cnt <= slots_per_op) {
1637 slot_cnt = 0;
1638 slots_per_op = 0;
1639 }
1640 }
1641
1642 if (slot_cnt) {
1643 if (!group_start)
1644 group_start = iter;
1645 slot_cnt -= slots_per_op;
1646 }
1647
1648 /* all the members of a group are complete */
1649 if (slots_per_op != 0 && slot_cnt == 0) {
1650 struct ppc440spe_adma_desc_slot *grp_iter, *_grp_iter;
1651 int end_of_chain = 0;
1652
1653 /* clean up the group */
1654 slot_cnt = group_start->slot_cnt;
1655 grp_iter = group_start;
1656 list_for_each_entry_safe_from(grp_iter, _grp_iter,
1657 &chan->chain, chain_node) {
1658
1659 cookie = ppc440spe_adma_run_tx_complete_actions(
1660 grp_iter, chan, cookie);
1661
1662 slot_cnt -= slots_per_op;
1663 end_of_chain = ppc440spe_adma_clean_slot(
1664 grp_iter, chan);
1665 if (end_of_chain && slot_cnt) {
1666 /* Should wait for ZeroSum completion */
1667 if (cookie > 0)
4d4e58de 1668 chan->common.completed_cookie = cookie;
12458ea0
AG
1669 return;
1670 }
1671
1672 if (slot_cnt == 0 || end_of_chain)
1673 break;
1674 }
1675
1676 /* the group should be complete at this point */
1677 BUG_ON(slot_cnt);
1678
1679 slots_per_op = 0;
1680 group_start = NULL;
1681 if (end_of_chain)
1682 break;
1683 else
1684 continue;
1685 } else if (slots_per_op) /* wait for group completion */
1686 continue;
1687
1688 cookie = ppc440spe_adma_run_tx_complete_actions(iter, chan,
1689 cookie);
1690
1691 if (ppc440spe_adma_clean_slot(iter, chan))
1692 break;
1693 }
1694
1695 BUG_ON(!seen_current);
1696
1697 if (cookie > 0) {
4d4e58de 1698 chan->common.completed_cookie = cookie;
12458ea0
AG
1699 pr_debug("\tcompleted cookie %d\n", cookie);
1700 }
1701
1702}
1703
1704/**
1705 * ppc440spe_adma_tasklet - clean up watch-dog initiator
1706 */
1707static void ppc440spe_adma_tasklet(unsigned long data)
1708{
1709 struct ppc440spe_adma_chan *chan = (struct ppc440spe_adma_chan *) data;
1710
1711 spin_lock_nested(&chan->lock, SINGLE_DEPTH_NESTING);
1712 __ppc440spe_adma_slot_cleanup(chan);
1713 spin_unlock(&chan->lock);
1714}
1715
1716/**
1717 * ppc440spe_adma_slot_cleanup - clean up scheduled initiator
1718 */
1719static void ppc440spe_adma_slot_cleanup(struct ppc440spe_adma_chan *chan)
1720{
1721 spin_lock_bh(&chan->lock);
1722 __ppc440spe_adma_slot_cleanup(chan);
1723 spin_unlock_bh(&chan->lock);
1724}
1725
1726/**
1727 * ppc440spe_adma_alloc_slots - allocate free slots (if any)
1728 */
1729static struct ppc440spe_adma_desc_slot *ppc440spe_adma_alloc_slots(
1730 struct ppc440spe_adma_chan *chan, int num_slots,
1731 int slots_per_op)
1732{
1733 struct ppc440spe_adma_desc_slot *iter = NULL, *_iter;
1734 struct ppc440spe_adma_desc_slot *alloc_start = NULL;
1735 struct list_head chain = LIST_HEAD_INIT(chain);
1736 int slots_found, retry = 0;
1737
1738
1739 BUG_ON(!num_slots || !slots_per_op);
1740 /* start search from the last allocated descrtiptor
1741 * if a contiguous allocation can not be found start searching
1742 * from the beginning of the list
1743 */
1744retry:
1745 slots_found = 0;
1746 if (retry == 0)
1747 iter = chan->last_used;
1748 else
1749 iter = list_entry(&chan->all_slots,
1750 struct ppc440spe_adma_desc_slot,
1751 slot_node);
1752 list_for_each_entry_safe_continue(iter, _iter, &chan->all_slots,
1753 slot_node) {
1754 prefetch(_iter);
1755 prefetch(&_iter->async_tx);
1756 if (iter->slots_per_op) {
1757 slots_found = 0;
1758 continue;
1759 }
1760
1761 /* start the allocation if the slot is correctly aligned */
1762 if (!slots_found++)
1763 alloc_start = iter;
1764
1765 if (slots_found == num_slots) {
1766 struct ppc440spe_adma_desc_slot *alloc_tail = NULL;
1767 struct ppc440spe_adma_desc_slot *last_used = NULL;
1768
1769 iter = alloc_start;
1770 while (num_slots) {
1771 int i;
1772 /* pre-ack all but the last descriptor */
1773 if (num_slots != slots_per_op)
1774 async_tx_ack(&iter->async_tx);
1775
1776 list_add_tail(&iter->chain_node, &chain);
1777 alloc_tail = iter;
1778 iter->async_tx.cookie = 0;
1779 iter->hw_next = NULL;
1780 iter->flags = 0;
1781 iter->slot_cnt = num_slots;
1782 iter->xor_check_result = NULL;
1783 for (i = 0; i < slots_per_op; i++) {
1784 iter->slots_per_op = slots_per_op - i;
1785 last_used = iter;
1786 iter = list_entry(iter->slot_node.next,
1787 struct ppc440spe_adma_desc_slot,
1788 slot_node);
1789 }
1790 num_slots -= slots_per_op;
1791 }
1792 alloc_tail->group_head = alloc_start;
1793 alloc_tail->async_tx.cookie = -EBUSY;
1794 list_splice(&chain, &alloc_tail->group_list);
1795 chan->last_used = last_used;
1796 return alloc_tail;
1797 }
1798 }
1799 if (!retry++)
1800 goto retry;
1801
1802 /* try to free some slots if the allocation fails */
1803 tasklet_schedule(&chan->irq_tasklet);
1804 return NULL;
1805}
1806
1807/**
1808 * ppc440spe_adma_alloc_chan_resources - allocate pools for CDB slots
1809 */
1810static int ppc440spe_adma_alloc_chan_resources(struct dma_chan *chan)
1811{
1812 struct ppc440spe_adma_chan *ppc440spe_chan;
1813 struct ppc440spe_adma_desc_slot *slot = NULL;
1814 char *hw_desc;
1815 int i, db_sz;
1816 int init;
1817
1818 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
1819 init = ppc440spe_chan->slots_allocated ? 0 : 1;
1820 chan->chan_id = ppc440spe_chan->device->id;
1821
1822 /* Allocate descriptor slots */
1823 i = ppc440spe_chan->slots_allocated;
1824 if (ppc440spe_chan->device->id != PPC440SPE_XOR_ID)
1825 db_sz = sizeof(struct dma_cdb);
1826 else
1827 db_sz = sizeof(struct xor_cb);
1828
1829 for (; i < (ppc440spe_chan->device->pool_size / db_sz); i++) {
1830 slot = kzalloc(sizeof(struct ppc440spe_adma_desc_slot),
1831 GFP_KERNEL);
1832 if (!slot) {
1833 printk(KERN_INFO "SPE ADMA Channel only initialized"
1834 " %d descriptor slots", i--);
1835 break;
1836 }
1837
1838 hw_desc = (char *) ppc440spe_chan->device->dma_desc_pool_virt;
1839 slot->hw_desc = (void *) &hw_desc[i * db_sz];
1840 dma_async_tx_descriptor_init(&slot->async_tx, chan);
1841 slot->async_tx.tx_submit = ppc440spe_adma_tx_submit;
1842 INIT_LIST_HEAD(&slot->chain_node);
1843 INIT_LIST_HEAD(&slot->slot_node);
1844 INIT_LIST_HEAD(&slot->group_list);
1845 slot->phys = ppc440spe_chan->device->dma_desc_pool + i * db_sz;
1846 slot->idx = i;
1847
1848 spin_lock_bh(&ppc440spe_chan->lock);
1849 ppc440spe_chan->slots_allocated++;
1850 list_add_tail(&slot->slot_node, &ppc440spe_chan->all_slots);
1851 spin_unlock_bh(&ppc440spe_chan->lock);
1852 }
1853
1854 if (i && !ppc440spe_chan->last_used) {
1855 ppc440spe_chan->last_used =
1856 list_entry(ppc440spe_chan->all_slots.next,
1857 struct ppc440spe_adma_desc_slot,
1858 slot_node);
1859 }
1860
1861 dev_dbg(ppc440spe_chan->device->common.dev,
1862 "ppc440spe adma%d: allocated %d descriptor slots\n",
1863 ppc440spe_chan->device->id, i);
1864
1865 /* initialize the channel and the chain with a null operation */
1866 if (init) {
1867 switch (ppc440spe_chan->device->id) {
1868 case PPC440SPE_DMA0_ID:
1869 case PPC440SPE_DMA1_ID:
1870 ppc440spe_chan->hw_chain_inited = 0;
1871 /* Use WXOR for self-testing */
1872 if (!ppc440spe_r6_tchan)
1873 ppc440spe_r6_tchan = ppc440spe_chan;
1874 break;
1875 case PPC440SPE_XOR_ID:
1876 ppc440spe_chan_start_null_xor(ppc440spe_chan);
1877 break;
1878 default:
1879 BUG();
1880 }
1881 ppc440spe_chan->needs_unmap = 1;
1882 }
1883
1884 return (i > 0) ? i : -ENOMEM;
1885}
1886
12458ea0
AG
1887/**
1888 * ppc440spe_rxor_set_region_data -
1889 */
1890static void ppc440spe_rxor_set_region(struct ppc440spe_adma_desc_slot *desc,
1891 u8 xor_arg_no, u32 mask)
1892{
1893 struct xor_cb *xcb = desc->hw_desc;
1894
1895 xcb->ops[xor_arg_no].h |= mask;
1896}
1897
1898/**
1899 * ppc440spe_rxor_set_src -
1900 */
1901static void ppc440spe_rxor_set_src(struct ppc440spe_adma_desc_slot *desc,
1902 u8 xor_arg_no, dma_addr_t addr)
1903{
1904 struct xor_cb *xcb = desc->hw_desc;
1905
1906 xcb->ops[xor_arg_no].h |= DMA_CUED_XOR_BASE;
1907 xcb->ops[xor_arg_no].l = addr;
1908}
1909
1910/**
1911 * ppc440spe_rxor_set_mult -
1912 */
1913static void ppc440spe_rxor_set_mult(struct ppc440spe_adma_desc_slot *desc,
1914 u8 xor_arg_no, u8 idx, u8 mult)
1915{
1916 struct xor_cb *xcb = desc->hw_desc;
1917
1918 xcb->ops[xor_arg_no].h |= mult << (DMA_CUED_MULT1_OFF + idx * 8);
1919}
1920
1921/**
1922 * ppc440spe_adma_check_threshold - append CDBs to h/w chain if threshold
1923 * has been achieved
1924 */
1925static void ppc440spe_adma_check_threshold(struct ppc440spe_adma_chan *chan)
1926{
1927 dev_dbg(chan->device->common.dev, "ppc440spe adma%d: pending: %d\n",
1928 chan->device->id, chan->pending);
1929
1930 if (chan->pending >= PPC440SPE_ADMA_THRESHOLD) {
1931 chan->pending = 0;
1932 ppc440spe_chan_append(chan);
1933 }
1934}
1935
1936/**
1937 * ppc440spe_adma_tx_submit - submit new descriptor group to the channel
1938 * (it's not necessary that descriptors will be submitted to the h/w
1939 * chains too right now)
1940 */
1941static dma_cookie_t ppc440spe_adma_tx_submit(struct dma_async_tx_descriptor *tx)
1942{
1943 struct ppc440spe_adma_desc_slot *sw_desc;
1944 struct ppc440spe_adma_chan *chan = to_ppc440spe_adma_chan(tx->chan);
1945 struct ppc440spe_adma_desc_slot *group_start, *old_chain_tail;
1946 int slot_cnt;
1947 int slots_per_op;
1948 dma_cookie_t cookie;
1949
1950 sw_desc = tx_to_ppc440spe_adma_slot(tx);
1951
1952 group_start = sw_desc->group_head;
1953 slot_cnt = group_start->slot_cnt;
1954 slots_per_op = group_start->slots_per_op;
1955
1956 spin_lock_bh(&chan->lock);
884485e1 1957 cookie = dma_cookie_assign(tx);
12458ea0
AG
1958
1959 if (unlikely(list_empty(&chan->chain))) {
1960 /* first peer */
1961 list_splice_init(&sw_desc->group_list, &chan->chain);
1962 chan_first_cdb[chan->device->id] = group_start;
1963 } else {
1964 /* isn't first peer, bind CDBs to chain */
1965 old_chain_tail = list_entry(chan->chain.prev,
1966 struct ppc440spe_adma_desc_slot,
1967 chain_node);
1968 list_splice_init(&sw_desc->group_list,
1969 &old_chain_tail->chain_node);
1970 /* fix up the hardware chain */
1971 ppc440spe_desc_set_link(chan, old_chain_tail, group_start);
1972 }
1973
1974 /* increment the pending count by the number of operations */
1975 chan->pending += slot_cnt / slots_per_op;
1976 ppc440spe_adma_check_threshold(chan);
1977 spin_unlock_bh(&chan->lock);
1978
1979 dev_dbg(chan->device->common.dev,
1980 "ppc440spe adma%d: %s cookie: %d slot: %d tx %p\n",
1981 chan->device->id, __func__,
1982 sw_desc->async_tx.cookie, sw_desc->idx, sw_desc);
1983
1984 return cookie;
1985}
1986
1987/**
1988 * ppc440spe_adma_prep_dma_interrupt - prepare CDB for a pseudo DMA operation
1989 */
1990static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_interrupt(
1991 struct dma_chan *chan, unsigned long flags)
1992{
1993 struct ppc440spe_adma_chan *ppc440spe_chan;
1994 struct ppc440spe_adma_desc_slot *sw_desc, *group_start;
1995 int slot_cnt, slots_per_op;
1996
1997 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
1998
1999 dev_dbg(ppc440spe_chan->device->common.dev,
2000 "ppc440spe adma%d: %s\n", ppc440spe_chan->device->id,
2001 __func__);
2002
2003 spin_lock_bh(&ppc440spe_chan->lock);
2004 slot_cnt = slots_per_op = 1;
2005 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt,
2006 slots_per_op);
2007 if (sw_desc) {
2008 group_start = sw_desc->group_head;
2009 ppc440spe_desc_init_interrupt(group_start, ppc440spe_chan);
2010 group_start->unmap_len = 0;
2011 sw_desc->async_tx.flags = flags;
2012 }
2013 spin_unlock_bh(&ppc440spe_chan->lock);
2014
2015 return sw_desc ? &sw_desc->async_tx : NULL;
2016}
2017
2018/**
2019 * ppc440spe_adma_prep_dma_memcpy - prepare CDB for a MEMCPY operation
2020 */
2021static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_memcpy(
2022 struct dma_chan *chan, dma_addr_t dma_dest,
2023 dma_addr_t dma_src, size_t len, unsigned long flags)
2024{
2025 struct ppc440spe_adma_chan *ppc440spe_chan;
2026 struct ppc440spe_adma_desc_slot *sw_desc, *group_start;
2027 int slot_cnt, slots_per_op;
2028
2029 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
2030
2031 if (unlikely(!len))
2032 return NULL;
2033
427cdf19 2034 BUG_ON(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT);
12458ea0
AG
2035
2036 spin_lock_bh(&ppc440spe_chan->lock);
2037
2038 dev_dbg(ppc440spe_chan->device->common.dev,
2039 "ppc440spe adma%d: %s len: %u int_en %d\n",
2040 ppc440spe_chan->device->id, __func__, len,
2041 flags & DMA_PREP_INTERRUPT ? 1 : 0);
2042 slot_cnt = slots_per_op = 1;
2043 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt,
2044 slots_per_op);
2045 if (sw_desc) {
2046 group_start = sw_desc->group_head;
2047 ppc440spe_desc_init_memcpy(group_start, flags);
2048 ppc440spe_adma_set_dest(group_start, dma_dest, 0);
2049 ppc440spe_adma_memcpy_xor_set_src(group_start, dma_src, 0);
2050 ppc440spe_desc_set_byte_count(group_start, ppc440spe_chan, len);
2051 sw_desc->unmap_len = len;
2052 sw_desc->async_tx.flags = flags;
2053 }
2054 spin_unlock_bh(&ppc440spe_chan->lock);
2055
2056 return sw_desc ? &sw_desc->async_tx : NULL;
2057}
2058
12458ea0
AG
2059/**
2060 * ppc440spe_adma_prep_dma_xor - prepare CDB for a XOR operation
2061 */
2062static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_xor(
2063 struct dma_chan *chan, dma_addr_t dma_dest,
2064 dma_addr_t *dma_src, u32 src_cnt, size_t len,
2065 unsigned long flags)
2066{
2067 struct ppc440spe_adma_chan *ppc440spe_chan;
2068 struct ppc440spe_adma_desc_slot *sw_desc, *group_start;
2069 int slot_cnt, slots_per_op;
2070
2071 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
2072
2073 ADMA_LL_DBG(prep_dma_xor_dbg(ppc440spe_chan->device->id,
2074 dma_dest, dma_src, src_cnt));
2075 if (unlikely(!len))
2076 return NULL;
427cdf19 2077 BUG_ON(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT);
12458ea0
AG
2078
2079 dev_dbg(ppc440spe_chan->device->common.dev,
2080 "ppc440spe adma%d: %s src_cnt: %d len: %u int_en: %d\n",
2081 ppc440spe_chan->device->id, __func__, src_cnt, len,
2082 flags & DMA_PREP_INTERRUPT ? 1 : 0);
2083
2084 spin_lock_bh(&ppc440spe_chan->lock);
2085 slot_cnt = ppc440spe_chan_xor_slot_count(len, src_cnt, &slots_per_op);
2086 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt,
2087 slots_per_op);
2088 if (sw_desc) {
2089 group_start = sw_desc->group_head;
2090 ppc440spe_desc_init_xor(group_start, src_cnt, flags);
2091 ppc440spe_adma_set_dest(group_start, dma_dest, 0);
2092 while (src_cnt--)
2093 ppc440spe_adma_memcpy_xor_set_src(group_start,
2094 dma_src[src_cnt], src_cnt);
2095 ppc440spe_desc_set_byte_count(group_start, ppc440spe_chan, len);
2096 sw_desc->unmap_len = len;
2097 sw_desc->async_tx.flags = flags;
2098 }
2099 spin_unlock_bh(&ppc440spe_chan->lock);
2100
2101 return sw_desc ? &sw_desc->async_tx : NULL;
2102}
2103
2104static inline void
2105ppc440spe_desc_set_xor_src_cnt(struct ppc440spe_adma_desc_slot *desc,
2106 int src_cnt);
2107static void ppc440spe_init_rxor_cursor(struct ppc440spe_rxor *cursor);
2108
2109/**
2110 * ppc440spe_adma_init_dma2rxor_slot -
2111 */
2112static void ppc440spe_adma_init_dma2rxor_slot(
2113 struct ppc440spe_adma_desc_slot *desc,
2114 dma_addr_t *src, int src_cnt)
2115{
2116 int i;
2117
2118 /* initialize CDB */
2119 for (i = 0; i < src_cnt; i++) {
2120 ppc440spe_adma_dma2rxor_prep_src(desc, &desc->rxor_cursor, i,
2121 desc->src_cnt, (u32)src[i]);
2122 }
2123}
2124
2125/**
2126 * ppc440spe_dma01_prep_mult -
2127 * for Q operation where destination is also the source
2128 */
2129static struct ppc440spe_adma_desc_slot *ppc440spe_dma01_prep_mult(
2130 struct ppc440spe_adma_chan *ppc440spe_chan,
2131 dma_addr_t *dst, int dst_cnt, dma_addr_t *src, int src_cnt,
2132 const unsigned char *scf, size_t len, unsigned long flags)
2133{
2134 struct ppc440spe_adma_desc_slot *sw_desc = NULL;
2135 unsigned long op = 0;
2136 int slot_cnt;
2137
2138 set_bit(PPC440SPE_DESC_WXOR, &op);
2139 slot_cnt = 2;
2140
2141 spin_lock_bh(&ppc440spe_chan->lock);
2142
2143 /* use WXOR, each descriptor occupies one slot */
2144 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 1);
2145 if (sw_desc) {
2146 struct ppc440spe_adma_chan *chan;
2147 struct ppc440spe_adma_desc_slot *iter;
2148 struct dma_cdb *hw_desc;
2149
2150 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
2151 set_bits(op, &sw_desc->flags);
2152 sw_desc->src_cnt = src_cnt;
2153 sw_desc->dst_cnt = dst_cnt;
2154 /* First descriptor, zero data in the destination and copy it
2155 * to q page using MULTICAST transfer.
2156 */
2157 iter = list_first_entry(&sw_desc->group_list,
2158 struct ppc440spe_adma_desc_slot,
2159 chain_node);
2160 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
2161 /* set 'next' pointer */
2162 iter->hw_next = list_entry(iter->chain_node.next,
2163 struct ppc440spe_adma_desc_slot,
2164 chain_node);
2165 clear_bit(PPC440SPE_DESC_INT, &iter->flags);
2166 hw_desc = iter->hw_desc;
2167 hw_desc->opc = DMA_CDB_OPC_MULTICAST;
2168
2169 ppc440spe_desc_set_dest_addr(iter, chan,
2170 DMA_CUED_XOR_BASE, dst[0], 0);
2171 ppc440spe_desc_set_dest_addr(iter, chan, 0, dst[1], 1);
2172 ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB,
2173 src[0]);
2174 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
2175 iter->unmap_len = len;
2176
2177 /*
2178 * Second descriptor, multiply data from the q page
2179 * and store the result in real destination.
2180 */
2181 iter = list_first_entry(&iter->chain_node,
2182 struct ppc440spe_adma_desc_slot,
2183 chain_node);
2184 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
2185 iter->hw_next = NULL;
2186 if (flags & DMA_PREP_INTERRUPT)
2187 set_bit(PPC440SPE_DESC_INT, &iter->flags);
2188 else
2189 clear_bit(PPC440SPE_DESC_INT, &iter->flags);
2190
2191 hw_desc = iter->hw_desc;
2192 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
2193 ppc440spe_desc_set_src_addr(iter, chan, 0,
2194 DMA_CUED_XOR_HB, dst[1]);
2195 ppc440spe_desc_set_dest_addr(iter, chan,
2196 DMA_CUED_XOR_BASE, dst[0], 0);
2197
2198 ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF,
2199 DMA_CDB_SG_DST1, scf[0]);
2200 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
2201 iter->unmap_len = len;
2202 sw_desc->async_tx.flags = flags;
2203 }
2204
2205 spin_unlock_bh(&ppc440spe_chan->lock);
2206
2207 return sw_desc;
2208}
2209
2210/**
2211 * ppc440spe_dma01_prep_sum_product -
2212 * Dx = A*(P+Pxy) + B*(Q+Qxy) operation where destination is also
2213 * the source.
2214 */
2215static struct ppc440spe_adma_desc_slot *ppc440spe_dma01_prep_sum_product(
2216 struct ppc440spe_adma_chan *ppc440spe_chan,
2217 dma_addr_t *dst, dma_addr_t *src, int src_cnt,
2218 const unsigned char *scf, size_t len, unsigned long flags)
2219{
2220 struct ppc440spe_adma_desc_slot *sw_desc = NULL;
2221 unsigned long op = 0;
2222 int slot_cnt;
2223
2224 set_bit(PPC440SPE_DESC_WXOR, &op);
2225 slot_cnt = 3;
2226
2227 spin_lock_bh(&ppc440spe_chan->lock);
2228
2229 /* WXOR, each descriptor occupies one slot */
2230 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 1);
2231 if (sw_desc) {
2232 struct ppc440spe_adma_chan *chan;
2233 struct ppc440spe_adma_desc_slot *iter;
2234 struct dma_cdb *hw_desc;
2235
2236 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
2237 set_bits(op, &sw_desc->flags);
2238 sw_desc->src_cnt = src_cnt;
2239 sw_desc->dst_cnt = 1;
2240 /* 1st descriptor, src[1] data to q page and zero destination */
2241 iter = list_first_entry(&sw_desc->group_list,
2242 struct ppc440spe_adma_desc_slot,
2243 chain_node);
2244 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
2245 iter->hw_next = list_entry(iter->chain_node.next,
2246 struct ppc440spe_adma_desc_slot,
2247 chain_node);
2248 clear_bit(PPC440SPE_DESC_INT, &iter->flags);
2249 hw_desc = iter->hw_desc;
2250 hw_desc->opc = DMA_CDB_OPC_MULTICAST;
2251
2252 ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE,
2253 *dst, 0);
2254 ppc440spe_desc_set_dest_addr(iter, chan, 0,
2255 ppc440spe_chan->qdest, 1);
2256 ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB,
2257 src[1]);
2258 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
2259 iter->unmap_len = len;
2260
2261 /* 2nd descriptor, multiply src[1] data and store the
2262 * result in destination */
2263 iter = list_first_entry(&iter->chain_node,
2264 struct ppc440spe_adma_desc_slot,
2265 chain_node);
2266 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
2267 /* set 'next' pointer */
2268 iter->hw_next = list_entry(iter->chain_node.next,
2269 struct ppc440spe_adma_desc_slot,
2270 chain_node);
2271 if (flags & DMA_PREP_INTERRUPT)
2272 set_bit(PPC440SPE_DESC_INT, &iter->flags);
2273 else
2274 clear_bit(PPC440SPE_DESC_INT, &iter->flags);
2275
2276 hw_desc = iter->hw_desc;
2277 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
2278 ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB,
2279 ppc440spe_chan->qdest);
2280 ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE,
2281 *dst, 0);
2282 ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF,
2283 DMA_CDB_SG_DST1, scf[1]);
2284 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
2285 iter->unmap_len = len;
2286
2287 /*
2288 * 3rd descriptor, multiply src[0] data and xor it
2289 * with destination
2290 */
2291 iter = list_first_entry(&iter->chain_node,
2292 struct ppc440spe_adma_desc_slot,
2293 chain_node);
2294 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
2295 iter->hw_next = NULL;
2296 if (flags & DMA_PREP_INTERRUPT)
2297 set_bit(PPC440SPE_DESC_INT, &iter->flags);
2298 else
2299 clear_bit(PPC440SPE_DESC_INT, &iter->flags);
2300
2301 hw_desc = iter->hw_desc;
2302 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
2303 ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB,
2304 src[0]);
2305 ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE,
2306 *dst, 0);
2307 ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF,
2308 DMA_CDB_SG_DST1, scf[0]);
2309 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
2310 iter->unmap_len = len;
2311 sw_desc->async_tx.flags = flags;
2312 }
2313
2314 spin_unlock_bh(&ppc440spe_chan->lock);
2315
2316 return sw_desc;
2317}
2318
2319static struct ppc440spe_adma_desc_slot *ppc440spe_dma01_prep_pq(
2320 struct ppc440spe_adma_chan *ppc440spe_chan,
2321 dma_addr_t *dst, int dst_cnt, dma_addr_t *src, int src_cnt,
2322 const unsigned char *scf, size_t len, unsigned long flags)
2323{
2324 int slot_cnt;
2325 struct ppc440spe_adma_desc_slot *sw_desc = NULL, *iter;
2326 unsigned long op = 0;
2327 unsigned char mult = 1;
2328
2329 pr_debug("%s: dst_cnt %d, src_cnt %d, len %d\n",
2330 __func__, dst_cnt, src_cnt, len);
2331 /* select operations WXOR/RXOR depending on the
2332 * source addresses of operators and the number
2333 * of destinations (RXOR support only Q-parity calculations)
2334 */
2335 set_bit(PPC440SPE_DESC_WXOR, &op);
2336 if (!test_and_set_bit(PPC440SPE_RXOR_RUN, &ppc440spe_rxor_state)) {
2337 /* no active RXOR;
2338 * do RXOR if:
2339 * - there are more than 1 source,
2340 * - len is aligned on 512-byte boundary,
2341 * - source addresses fit to one of 4 possible regions.
2342 */
2343 if (src_cnt > 1 &&
2344 !(len & MQ0_CF2H_RXOR_BS_MASK) &&
2345 (src[0] + len) == src[1]) {
2346 /* may do RXOR R1 R2 */
2347 set_bit(PPC440SPE_DESC_RXOR, &op);
2348 if (src_cnt != 2) {
2349 /* may try to enhance region of RXOR */
2350 if ((src[1] + len) == src[2]) {
2351 /* do RXOR R1 R2 R3 */
2352 set_bit(PPC440SPE_DESC_RXOR123,
2353 &op);
2354 } else if ((src[1] + len * 2) == src[2]) {
2355 /* do RXOR R1 R2 R4 */
2356 set_bit(PPC440SPE_DESC_RXOR124, &op);
2357 } else if ((src[1] + len * 3) == src[2]) {
2358 /* do RXOR R1 R2 R5 */
2359 set_bit(PPC440SPE_DESC_RXOR125,
2360 &op);
2361 } else {
2362 /* do RXOR R1 R2 */
2363 set_bit(PPC440SPE_DESC_RXOR12,
2364 &op);
2365 }
2366 } else {
2367 /* do RXOR R1 R2 */
2368 set_bit(PPC440SPE_DESC_RXOR12, &op);
2369 }
2370 }
2371
2372 if (!test_bit(PPC440SPE_DESC_RXOR, &op)) {
2373 /* can not do this operation with RXOR */
2374 clear_bit(PPC440SPE_RXOR_RUN,
2375 &ppc440spe_rxor_state);
2376 } else {
2377 /* can do; set block size right now */
2378 ppc440spe_desc_set_rxor_block_size(len);
2379 }
2380 }
2381
2382 /* Number of necessary slots depends on operation type selected */
2383 if (!test_bit(PPC440SPE_DESC_RXOR, &op)) {
2384 /* This is a WXOR only chain. Need descriptors for each
2385 * source to GF-XOR them with WXOR, and need descriptors
2386 * for each destination to zero them with WXOR
2387 */
2388 slot_cnt = src_cnt;
2389
2390 if (flags & DMA_PREP_ZERO_P) {
2391 slot_cnt++;
2392 set_bit(PPC440SPE_ZERO_P, &op);
2393 }
2394 if (flags & DMA_PREP_ZERO_Q) {
2395 slot_cnt++;
2396 set_bit(PPC440SPE_ZERO_Q, &op);
2397 }
2398 } else {
2399 /* Need 1/2 descriptor for RXOR operation, and
2400 * need (src_cnt - (2 or 3)) for WXOR of sources
2401 * remained (if any)
2402 */
2403 slot_cnt = dst_cnt;
2404
2405 if (flags & DMA_PREP_ZERO_P)
2406 set_bit(PPC440SPE_ZERO_P, &op);
2407 if (flags & DMA_PREP_ZERO_Q)
2408 set_bit(PPC440SPE_ZERO_Q, &op);
2409
2410 if (test_bit(PPC440SPE_DESC_RXOR12, &op))
2411 slot_cnt += src_cnt - 2;
2412 else
2413 slot_cnt += src_cnt - 3;
2414
2415 /* Thus we have either RXOR only chain or
2416 * mixed RXOR/WXOR
2417 */
2418 if (slot_cnt == dst_cnt)
2419 /* RXOR only chain */
2420 clear_bit(PPC440SPE_DESC_WXOR, &op);
2421 }
2422
2423 spin_lock_bh(&ppc440spe_chan->lock);
2424 /* for both RXOR/WXOR each descriptor occupies one slot */
2425 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 1);
2426 if (sw_desc) {
2427 ppc440spe_desc_init_dma01pq(sw_desc, dst_cnt, src_cnt,
2428 flags, op);
2429
2430 /* setup dst/src/mult */
2431 pr_debug("%s: set dst descriptor 0, 1: 0x%016llx, 0x%016llx\n",
2432 __func__, dst[0], dst[1]);
2433 ppc440spe_adma_pq_set_dest(sw_desc, dst, flags);
2434 while (src_cnt--) {
2435 ppc440spe_adma_pq_set_src(sw_desc, src[src_cnt],
2436 src_cnt);
2437
2438 /* NOTE: "Multi = 0 is equivalent to = 1" as it
2439 * stated in 440SPSPe_RAID6_Addendum_UM_1_17.pdf
2440 * doesn't work for RXOR with DMA0/1! Instead, multi=0
2441 * leads to zeroing source data after RXOR.
2442 * So, for P case set-up mult=1 explicitly.
2443 */
2444 if (!(flags & DMA_PREP_PQ_DISABLE_Q))
2445 mult = scf[src_cnt];
2446 ppc440spe_adma_pq_set_src_mult(sw_desc,
2447 mult, src_cnt, dst_cnt - 1);
2448 }
2449
2450 /* Setup byte count foreach slot just allocated */
2451 sw_desc->async_tx.flags = flags;
2452 list_for_each_entry(iter, &sw_desc->group_list,
2453 chain_node) {
2454 ppc440spe_desc_set_byte_count(iter,
2455 ppc440spe_chan, len);
2456 iter->unmap_len = len;
2457 }
2458 }
2459 spin_unlock_bh(&ppc440spe_chan->lock);
2460
2461 return sw_desc;
2462}
2463
2464static struct ppc440spe_adma_desc_slot *ppc440spe_dma2_prep_pq(
2465 struct ppc440spe_adma_chan *ppc440spe_chan,
2466 dma_addr_t *dst, int dst_cnt, dma_addr_t *src, int src_cnt,
2467 const unsigned char *scf, size_t len, unsigned long flags)
2468{
2469 int slot_cnt, descs_per_op;
2470 struct ppc440spe_adma_desc_slot *sw_desc = NULL, *iter;
2471 unsigned long op = 0;
2472 unsigned char mult = 1;
2473
2474 BUG_ON(!dst_cnt);
2475 /*pr_debug("%s: dst_cnt %d, src_cnt %d, len %d\n",
2476 __func__, dst_cnt, src_cnt, len);*/
2477
2478 spin_lock_bh(&ppc440spe_chan->lock);
2479 descs_per_op = ppc440spe_dma2_pq_slot_count(src, src_cnt, len);
2480 if (descs_per_op < 0) {
2481 spin_unlock_bh(&ppc440spe_chan->lock);
2482 return NULL;
2483 }
2484
2485 /* depending on number of sources we have 1 or 2 RXOR chains */
2486 slot_cnt = descs_per_op * dst_cnt;
2487
2488 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 1);
2489 if (sw_desc) {
2490 op = slot_cnt;
2491 sw_desc->async_tx.flags = flags;
2492 list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
2493 ppc440spe_desc_init_dma2pq(iter, dst_cnt, src_cnt,
2494 --op ? 0 : flags);
2495 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan,
2496 len);
2497 iter->unmap_len = len;
2498
2499 ppc440spe_init_rxor_cursor(&(iter->rxor_cursor));
2500 iter->rxor_cursor.len = len;
2501 iter->descs_per_op = descs_per_op;
2502 }
2503 op = 0;
2504 list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
2505 op++;
2506 if (op % descs_per_op == 0)
2507 ppc440spe_adma_init_dma2rxor_slot(iter, src,
2508 src_cnt);
2509 if (likely(!list_is_last(&iter->chain_node,
2510 &sw_desc->group_list))) {
2511 /* set 'next' pointer */
2512 iter->hw_next =
2513 list_entry(iter->chain_node.next,
2514 struct ppc440spe_adma_desc_slot,
2515 chain_node);
2516 ppc440spe_xor_set_link(iter, iter->hw_next);
2517 } else {
2518 /* this is the last descriptor. */
2519 iter->hw_next = NULL;
2520 }
2521 }
2522
2523 /* fixup head descriptor */
2524 sw_desc->dst_cnt = dst_cnt;
2525 if (flags & DMA_PREP_ZERO_P)
2526 set_bit(PPC440SPE_ZERO_P, &sw_desc->flags);
2527 if (flags & DMA_PREP_ZERO_Q)
2528 set_bit(PPC440SPE_ZERO_Q, &sw_desc->flags);
2529
2530 /* setup dst/src/mult */
2531 ppc440spe_adma_pq_set_dest(sw_desc, dst, flags);
2532
2533 while (src_cnt--) {
2534 /* handle descriptors (if dst_cnt == 2) inside
2535 * the ppc440spe_adma_pq_set_srcxxx() functions
2536 */
2537 ppc440spe_adma_pq_set_src(sw_desc, src[src_cnt],
2538 src_cnt);
2539 if (!(flags & DMA_PREP_PQ_DISABLE_Q))
2540 mult = scf[src_cnt];
2541 ppc440spe_adma_pq_set_src_mult(sw_desc,
2542 mult, src_cnt, dst_cnt - 1);
2543 }
2544 }
2545 spin_unlock_bh(&ppc440spe_chan->lock);
2546 ppc440spe_desc_set_rxor_block_size(len);
2547 return sw_desc;
2548}
2549
2550/**
2551 * ppc440spe_adma_prep_dma_pq - prepare CDB (group) for a GF-XOR operation
2552 */
2553static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_pq(
2554 struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
2555 unsigned int src_cnt, const unsigned char *scf,
2556 size_t len, unsigned long flags)
2557{
2558 struct ppc440spe_adma_chan *ppc440spe_chan;
2559 struct ppc440spe_adma_desc_slot *sw_desc = NULL;
2560 int dst_cnt = 0;
2561
2562 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
2563
2564 ADMA_LL_DBG(prep_dma_pq_dbg(ppc440spe_chan->device->id,
2565 dst, src, src_cnt));
2566 BUG_ON(!len);
427cdf19 2567 BUG_ON(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT);
12458ea0
AG
2568 BUG_ON(!src_cnt);
2569
2570 if (src_cnt == 1 && dst[1] == src[0]) {
2571 dma_addr_t dest[2];
2572
2573 /* dst[1] is real destination (Q) */
2574 dest[0] = dst[1];
2575 /* this is the page to multicast source data to */
2576 dest[1] = ppc440spe_chan->qdest;
2577 sw_desc = ppc440spe_dma01_prep_mult(ppc440spe_chan,
2578 dest, 2, src, src_cnt, scf, len, flags);
2579 return sw_desc ? &sw_desc->async_tx : NULL;
2580 }
2581
2582 if (src_cnt == 2 && dst[1] == src[1]) {
2583 sw_desc = ppc440spe_dma01_prep_sum_product(ppc440spe_chan,
2584 &dst[1], src, 2, scf, len, flags);
2585 return sw_desc ? &sw_desc->async_tx : NULL;
2586 }
2587
2588 if (!(flags & DMA_PREP_PQ_DISABLE_P)) {
2589 BUG_ON(!dst[0]);
2590 dst_cnt++;
2591 flags |= DMA_PREP_ZERO_P;
2592 }
2593
2594 if (!(flags & DMA_PREP_PQ_DISABLE_Q)) {
2595 BUG_ON(!dst[1]);
2596 dst_cnt++;
2597 flags |= DMA_PREP_ZERO_Q;
2598 }
2599
2600 BUG_ON(!dst_cnt);
2601
2602 dev_dbg(ppc440spe_chan->device->common.dev,
2603 "ppc440spe adma%d: %s src_cnt: %d len: %u int_en: %d\n",
2604 ppc440spe_chan->device->id, __func__, src_cnt, len,
2605 flags & DMA_PREP_INTERRUPT ? 1 : 0);
2606
2607 switch (ppc440spe_chan->device->id) {
2608 case PPC440SPE_DMA0_ID:
2609 case PPC440SPE_DMA1_ID:
2610 sw_desc = ppc440spe_dma01_prep_pq(ppc440spe_chan,
2611 dst, dst_cnt, src, src_cnt, scf,
2612 len, flags);
2613 break;
2614
2615 case PPC440SPE_XOR_ID:
2616 sw_desc = ppc440spe_dma2_prep_pq(ppc440spe_chan,
2617 dst, dst_cnt, src, src_cnt, scf,
2618 len, flags);
2619 break;
2620 }
2621
2622 return sw_desc ? &sw_desc->async_tx : NULL;
2623}
2624
2625/**
2626 * ppc440spe_adma_prep_dma_pqzero_sum - prepare CDB group for
2627 * a PQ_ZERO_SUM operation
2628 */
2629static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_pqzero_sum(
2630 struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
2631 unsigned int src_cnt, const unsigned char *scf, size_t len,
2632 enum sum_check_flags *pqres, unsigned long flags)
2633{
2634 struct ppc440spe_adma_chan *ppc440spe_chan;
2635 struct ppc440spe_adma_desc_slot *sw_desc, *iter;
2636 dma_addr_t pdest, qdest;
2637 int slot_cnt, slots_per_op, idst, dst_cnt;
2638
2639 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
2640
2641 if (flags & DMA_PREP_PQ_DISABLE_P)
2642 pdest = 0;
2643 else
2644 pdest = pq[0];
2645
2646 if (flags & DMA_PREP_PQ_DISABLE_Q)
2647 qdest = 0;
2648 else
2649 qdest = pq[1];
2650
2651 ADMA_LL_DBG(prep_dma_pqzero_sum_dbg(ppc440spe_chan->device->id,
2652 src, src_cnt, scf));
2653
2654 /* Always use WXOR for P/Q calculations (two destinations).
2655 * Need 1 or 2 extra slots to verify results are zero.
2656 */
2657 idst = dst_cnt = (pdest && qdest) ? 2 : 1;
2658
2659 /* One additional slot per destination to clone P/Q
2660 * before calculation (we have to preserve destinations).
2661 */
2662 slot_cnt = src_cnt + dst_cnt * 2;
2663 slots_per_op = 1;
2664
2665 spin_lock_bh(&ppc440spe_chan->lock);
2666 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt,
2667 slots_per_op);
2668 if (sw_desc) {
2669 ppc440spe_desc_init_dma01pqzero_sum(sw_desc, dst_cnt, src_cnt);
2670
2671 /* Setup byte count for each slot just allocated */
2672 sw_desc->async_tx.flags = flags;
2673 list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
2674 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan,
2675 len);
2676 iter->unmap_len = len;
2677 }
2678
2679 if (pdest) {
2680 struct dma_cdb *hw_desc;
2681 struct ppc440spe_adma_chan *chan;
2682
2683 iter = sw_desc->group_head;
2684 chan = to_ppc440spe_adma_chan(iter->async_tx.chan);
2685 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
2686 iter->hw_next = list_entry(iter->chain_node.next,
2687 struct ppc440spe_adma_desc_slot,
2688 chain_node);
2689 hw_desc = iter->hw_desc;
2690 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
2691 iter->src_cnt = 0;
2692 iter->dst_cnt = 0;
2693 ppc440spe_desc_set_dest_addr(iter, chan, 0,
2694 ppc440spe_chan->pdest, 0);
2695 ppc440spe_desc_set_src_addr(iter, chan, 0, 0, pdest);
2696 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan,
2697 len);
2698 iter->unmap_len = 0;
2699 /* override pdest to preserve original P */
2700 pdest = ppc440spe_chan->pdest;
2701 }
2702 if (qdest) {
2703 struct dma_cdb *hw_desc;
2704 struct ppc440spe_adma_chan *chan;
2705
2706 iter = list_first_entry(&sw_desc->group_list,
2707 struct ppc440spe_adma_desc_slot,
2708 chain_node);
2709 chan = to_ppc440spe_adma_chan(iter->async_tx.chan);
2710
2711 if (pdest) {
2712 iter = list_entry(iter->chain_node.next,
2713 struct ppc440spe_adma_desc_slot,
2714 chain_node);
2715 }
2716
2717 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
2718 iter->hw_next = list_entry(iter->chain_node.next,
2719 struct ppc440spe_adma_desc_slot,
2720 chain_node);
2721 hw_desc = iter->hw_desc;
2722 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
2723 iter->src_cnt = 0;
2724 iter->dst_cnt = 0;
2725 ppc440spe_desc_set_dest_addr(iter, chan, 0,
2726 ppc440spe_chan->qdest, 0);
2727 ppc440spe_desc_set_src_addr(iter, chan, 0, 0, qdest);
2728 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan,
2729 len);
2730 iter->unmap_len = 0;
2731 /* override qdest to preserve original Q */
2732 qdest = ppc440spe_chan->qdest;
2733 }
2734
2735 /* Setup destinations for P/Q ops */
2736 ppc440spe_adma_pqzero_sum_set_dest(sw_desc, pdest, qdest);
2737
2738 /* Setup zero QWORDs into DCHECK CDBs */
2739 idst = dst_cnt;
2740 list_for_each_entry_reverse(iter, &sw_desc->group_list,
2741 chain_node) {
2742 /*
2743 * The last CDB corresponds to Q-parity check,
2744 * the one before last CDB corresponds
2745 * P-parity check
2746 */
2747 if (idst == DMA_DEST_MAX_NUM) {
2748 if (idst == dst_cnt) {
2749 set_bit(PPC440SPE_DESC_QCHECK,
2750 &iter->flags);
2751 } else {
2752 set_bit(PPC440SPE_DESC_PCHECK,
2753 &iter->flags);
2754 }
2755 } else {
2756 if (qdest) {
2757 set_bit(PPC440SPE_DESC_QCHECK,
2758 &iter->flags);
2759 } else {
2760 set_bit(PPC440SPE_DESC_PCHECK,
2761 &iter->flags);
2762 }
2763 }
2764 iter->xor_check_result = pqres;
2765
2766 /*
2767 * set it to zero, if check fail then result will
2768 * be updated
2769 */
2770 *iter->xor_check_result = 0;
2771 ppc440spe_desc_set_dcheck(iter, ppc440spe_chan,
2772 ppc440spe_qword);
2773
2774 if (!(--dst_cnt))
2775 break;
2776 }
2777
2778 /* Setup sources and mults for P/Q ops */
2779 list_for_each_entry_continue_reverse(iter, &sw_desc->group_list,
2780 chain_node) {
2781 struct ppc440spe_adma_chan *chan;
2782 u32 mult_dst;
2783
2784 chan = to_ppc440spe_adma_chan(iter->async_tx.chan);
2785 ppc440spe_desc_set_src_addr(iter, chan, 0,
2786 DMA_CUED_XOR_HB,
2787 src[src_cnt - 1]);
2788 if (qdest) {
2789 mult_dst = (dst_cnt - 1) ? DMA_CDB_SG_DST2 :
2790 DMA_CDB_SG_DST1;
2791 ppc440spe_desc_set_src_mult(iter, chan,
2792 DMA_CUED_MULT1_OFF,
2793 mult_dst,
2794 scf[src_cnt - 1]);
2795 }
2796 if (!(--src_cnt))
2797 break;
2798 }
2799 }
2800 spin_unlock_bh(&ppc440spe_chan->lock);
2801 return sw_desc ? &sw_desc->async_tx : NULL;
2802}
2803
2804/**
2805 * ppc440spe_adma_prep_dma_xor_zero_sum - prepare CDB group for
2806 * XOR ZERO_SUM operation
2807 */
2808static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_xor_zero_sum(
2809 struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
2810 size_t len, enum sum_check_flags *result, unsigned long flags)
2811{
2812 struct dma_async_tx_descriptor *tx;
2813 dma_addr_t pq[2];
2814
2815 /* validate P, disable Q */
2816 pq[0] = src[0];
2817 pq[1] = 0;
2818 flags |= DMA_PREP_PQ_DISABLE_Q;
2819
2820 tx = ppc440spe_adma_prep_dma_pqzero_sum(chan, pq, &src[1],
2821 src_cnt - 1, 0, len,
2822 result, flags);
2823 return tx;
2824}
2825
2826/**
2827 * ppc440spe_adma_set_dest - set destination address into descriptor
2828 */
2829static void ppc440spe_adma_set_dest(struct ppc440spe_adma_desc_slot *sw_desc,
2830 dma_addr_t addr, int index)
2831{
2832 struct ppc440spe_adma_chan *chan;
2833
2834 BUG_ON(index >= sw_desc->dst_cnt);
2835
2836 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
2837
2838 switch (chan->device->id) {
2839 case PPC440SPE_DMA0_ID:
2840 case PPC440SPE_DMA1_ID:
2841 /* to do: support transfers lengths >
2842 * PPC440SPE_ADMA_DMA/XOR_MAX_BYTE_COUNT
2843 */
2844 ppc440spe_desc_set_dest_addr(sw_desc->group_head,
2845 chan, 0, addr, index);
2846 break;
2847 case PPC440SPE_XOR_ID:
2848 sw_desc = ppc440spe_get_group_entry(sw_desc, index);
2849 ppc440spe_desc_set_dest_addr(sw_desc,
2850 chan, 0, addr, index);
2851 break;
2852 }
2853}
2854
2855static void ppc440spe_adma_pq_zero_op(struct ppc440spe_adma_desc_slot *iter,
2856 struct ppc440spe_adma_chan *chan, dma_addr_t addr)
2857{
2858 /* To clear destinations update the descriptor
2859 * (P or Q depending on index) as follows:
2860 * addr is destination (0 corresponds to SG2):
2861 */
2862 ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE, addr, 0);
2863
2864 /* ... and the addr is source: */
2865 ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB, addr);
2866
2867 /* addr is always SG2 then the mult is always DST1 */
2868 ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF,
2869 DMA_CDB_SG_DST1, 1);
2870}
2871
2872/**
2873 * ppc440spe_adma_pq_set_dest - set destination address into descriptor
2874 * for the PQXOR operation
2875 */
2876static void ppc440spe_adma_pq_set_dest(struct ppc440spe_adma_desc_slot *sw_desc,
2877 dma_addr_t *addrs, unsigned long flags)
2878{
2879 struct ppc440spe_adma_desc_slot *iter;
2880 struct ppc440spe_adma_chan *chan;
2881 dma_addr_t paddr, qaddr;
2882 dma_addr_t addr = 0, ppath, qpath;
2883 int index = 0, i;
2884
2885 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
2886
2887 if (flags & DMA_PREP_PQ_DISABLE_P)
2888 paddr = 0;
2889 else
2890 paddr = addrs[0];
2891
2892 if (flags & DMA_PREP_PQ_DISABLE_Q)
2893 qaddr = 0;
2894 else
2895 qaddr = addrs[1];
2896
2897 if (!paddr || !qaddr)
2898 addr = paddr ? paddr : qaddr;
2899
2900 switch (chan->device->id) {
2901 case PPC440SPE_DMA0_ID:
2902 case PPC440SPE_DMA1_ID:
2903 /* walk through the WXOR source list and set P/Q-destinations
2904 * for each slot:
2905 */
2906 if (!test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags)) {
2907 /* This is WXOR-only chain; may have 1/2 zero descs */
2908 if (test_bit(PPC440SPE_ZERO_P, &sw_desc->flags))
2909 index++;
2910 if (test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags))
2911 index++;
2912
2913 iter = ppc440spe_get_group_entry(sw_desc, index);
2914 if (addr) {
2915 /* one destination */
2916 list_for_each_entry_from(iter,
2917 &sw_desc->group_list, chain_node)
2918 ppc440spe_desc_set_dest_addr(iter, chan,
2919 DMA_CUED_XOR_BASE, addr, 0);
2920 } else {
2921 /* two destinations */
2922 list_for_each_entry_from(iter,
2923 &sw_desc->group_list, chain_node) {
2924 ppc440spe_desc_set_dest_addr(iter, chan,
2925 DMA_CUED_XOR_BASE, paddr, 0);
2926 ppc440spe_desc_set_dest_addr(iter, chan,
2927 DMA_CUED_XOR_BASE, qaddr, 1);
2928 }
2929 }
2930
2931 if (index) {
2932 /* To clear destinations update the descriptor
2933 * (1st,2nd, or both depending on flags)
2934 */
2935 index = 0;
2936 if (test_bit(PPC440SPE_ZERO_P,
2937 &sw_desc->flags)) {
2938 iter = ppc440spe_get_group_entry(
2939 sw_desc, index++);
2940 ppc440spe_adma_pq_zero_op(iter, chan,
2941 paddr);
2942 }
2943
2944 if (test_bit(PPC440SPE_ZERO_Q,
2945 &sw_desc->flags)) {
2946 iter = ppc440spe_get_group_entry(
2947 sw_desc, index++);
2948 ppc440spe_adma_pq_zero_op(iter, chan,
2949 qaddr);
2950 }
2951
2952 return;
2953 }
2954 } else {
2955 /* This is RXOR-only or RXOR/WXOR mixed chain */
2956
2957 /* If we want to include destination into calculations,
2958 * then make dest addresses cued with mult=1 (XOR).
2959 */
2960 ppath = test_bit(PPC440SPE_ZERO_P, &sw_desc->flags) ?
2961 DMA_CUED_XOR_HB :
2962 DMA_CUED_XOR_BASE |
2963 (1 << DMA_CUED_MULT1_OFF);
2964 qpath = test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags) ?
2965 DMA_CUED_XOR_HB :
2966 DMA_CUED_XOR_BASE |
2967 (1 << DMA_CUED_MULT1_OFF);
2968
2969 /* Setup destination(s) in RXOR slot(s) */
2970 iter = ppc440spe_get_group_entry(sw_desc, index++);
2971 ppc440spe_desc_set_dest_addr(iter, chan,
2972 paddr ? ppath : qpath,
2973 paddr ? paddr : qaddr, 0);
2974 if (!addr) {
2975 /* two destinations */
2976 iter = ppc440spe_get_group_entry(sw_desc,
2977 index++);
2978 ppc440spe_desc_set_dest_addr(iter, chan,
2979 qpath, qaddr, 0);
2980 }
2981
2982 if (test_bit(PPC440SPE_DESC_WXOR, &sw_desc->flags)) {
2983 /* Setup destination(s) in remaining WXOR
2984 * slots
2985 */
2986 iter = ppc440spe_get_group_entry(sw_desc,
2987 index);
2988 if (addr) {
2989 /* one destination */
2990 list_for_each_entry_from(iter,
2991 &sw_desc->group_list,
2992 chain_node)
2993 ppc440spe_desc_set_dest_addr(
2994 iter, chan,
2995 DMA_CUED_XOR_BASE,
2996 addr, 0);
2997
2998 } else {
2999 /* two destinations */
3000 list_for_each_entry_from(iter,
3001 &sw_desc->group_list,
3002 chain_node) {
3003 ppc440spe_desc_set_dest_addr(
3004 iter, chan,
3005 DMA_CUED_XOR_BASE,
3006 paddr, 0);
3007 ppc440spe_desc_set_dest_addr(
3008 iter, chan,
3009 DMA_CUED_XOR_BASE,
3010 qaddr, 1);
3011 }
3012 }
3013 }
3014
3015 }
3016 break;
3017
3018 case PPC440SPE_XOR_ID:
3019 /* DMA2 descriptors have only 1 destination, so there are
3020 * two chains - one for each dest.
3021 * If we want to include destination into calculations,
3022 * then make dest addresses cued with mult=1 (XOR).
3023 */
3024 ppath = test_bit(PPC440SPE_ZERO_P, &sw_desc->flags) ?
3025 DMA_CUED_XOR_HB :
3026 DMA_CUED_XOR_BASE |
3027 (1 << DMA_CUED_MULT1_OFF);
3028
3029 qpath = test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags) ?
3030 DMA_CUED_XOR_HB :
3031 DMA_CUED_XOR_BASE |
3032 (1 << DMA_CUED_MULT1_OFF);
3033
3034 iter = ppc440spe_get_group_entry(sw_desc, 0);
3035 for (i = 0; i < sw_desc->descs_per_op; i++) {
3036 ppc440spe_desc_set_dest_addr(iter, chan,
3037 paddr ? ppath : qpath,
3038 paddr ? paddr : qaddr, 0);
3039 iter = list_entry(iter->chain_node.next,
3040 struct ppc440spe_adma_desc_slot,
3041 chain_node);
3042 }
3043
3044 if (!addr) {
3045 /* Two destinations; setup Q here */
3046 iter = ppc440spe_get_group_entry(sw_desc,
3047 sw_desc->descs_per_op);
3048 for (i = 0; i < sw_desc->descs_per_op; i++) {
3049 ppc440spe_desc_set_dest_addr(iter,
3050 chan, qpath, qaddr, 0);
3051 iter = list_entry(iter->chain_node.next,
3052 struct ppc440spe_adma_desc_slot,
3053 chain_node);
3054 }
3055 }
3056
3057 break;
3058 }
3059}
3060
3061/**
3062 * ppc440spe_adma_pq_zero_sum_set_dest - set destination address into descriptor
3063 * for the PQ_ZERO_SUM operation
3064 */
3065static void ppc440spe_adma_pqzero_sum_set_dest(
3066 struct ppc440spe_adma_desc_slot *sw_desc,
3067 dma_addr_t paddr, dma_addr_t qaddr)
3068{
3069 struct ppc440spe_adma_desc_slot *iter, *end;
3070 struct ppc440spe_adma_chan *chan;
3071 dma_addr_t addr = 0;
3072 int idx;
3073
3074 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
3075
3076 /* walk through the WXOR source list and set P/Q-destinations
3077 * for each slot
3078 */
3079 idx = (paddr && qaddr) ? 2 : 1;
3080 /* set end */
3081 list_for_each_entry_reverse(end, &sw_desc->group_list,
3082 chain_node) {
3083 if (!(--idx))
3084 break;
3085 }
3086 /* set start */
3087 idx = (paddr && qaddr) ? 2 : 1;
3088 iter = ppc440spe_get_group_entry(sw_desc, idx);
3089
3090 if (paddr && qaddr) {
3091 /* two destinations */
3092 list_for_each_entry_from(iter, &sw_desc->group_list,
3093 chain_node) {
3094 if (unlikely(iter == end))
3095 break;
3096 ppc440spe_desc_set_dest_addr(iter, chan,
3097 DMA_CUED_XOR_BASE, paddr, 0);
3098 ppc440spe_desc_set_dest_addr(iter, chan,
3099 DMA_CUED_XOR_BASE, qaddr, 1);
3100 }
3101 } else {
3102 /* one destination */
3103 addr = paddr ? paddr : qaddr;
3104 list_for_each_entry_from(iter, &sw_desc->group_list,
3105 chain_node) {
3106 if (unlikely(iter == end))
3107 break;
3108 ppc440spe_desc_set_dest_addr(iter, chan,
3109 DMA_CUED_XOR_BASE, addr, 0);
3110 }
3111 }
3112
3113 /* The remaining descriptors are DATACHECK. These have no need in
3114 * destination. Actually, these destinations are used there
3115 * as sources for check operation. So, set addr as source.
3116 */
3117 ppc440spe_desc_set_src_addr(end, chan, 0, 0, addr ? addr : paddr);
3118
3119 if (!addr) {
3120 end = list_entry(end->chain_node.next,
3121 struct ppc440spe_adma_desc_slot, chain_node);
3122 ppc440spe_desc_set_src_addr(end, chan, 0, 0, qaddr);
3123 }
3124}
3125
3126/**
3127 * ppc440spe_desc_set_xor_src_cnt - set source count into descriptor
3128 */
3129static inline void ppc440spe_desc_set_xor_src_cnt(
3130 struct ppc440spe_adma_desc_slot *desc,
3131 int src_cnt)
3132{
3133 struct xor_cb *hw_desc = desc->hw_desc;
3134
3135 hw_desc->cbc &= ~XOR_CDCR_OAC_MSK;
3136 hw_desc->cbc |= src_cnt;
3137}
3138
3139/**
3140 * ppc440spe_adma_pq_set_src - set source address into descriptor
3141 */
3142static void ppc440spe_adma_pq_set_src(struct ppc440spe_adma_desc_slot *sw_desc,
3143 dma_addr_t addr, int index)
3144{
3145 struct ppc440spe_adma_chan *chan;
3146 dma_addr_t haddr = 0;
3147 struct ppc440spe_adma_desc_slot *iter = NULL;
3148
3149 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
3150
3151 switch (chan->device->id) {
3152 case PPC440SPE_DMA0_ID:
3153 case PPC440SPE_DMA1_ID:
3154 /* DMA0,1 may do: WXOR, RXOR, RXOR+WXORs chain
3155 */
3156 if (test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags)) {
3157 /* RXOR-only or RXOR/WXOR operation */
3158 int iskip = test_bit(PPC440SPE_DESC_RXOR12,
3159 &sw_desc->flags) ? 2 : 3;
3160
3161 if (index == 0) {
3162 /* 1st slot (RXOR) */
3163 /* setup sources region (R1-2-3, R1-2-4,
3164 * or R1-2-5)
3165 */
3166 if (test_bit(PPC440SPE_DESC_RXOR12,
3167 &sw_desc->flags))
3168 haddr = DMA_RXOR12 <<
3169 DMA_CUED_REGION_OFF;
3170 else if (test_bit(PPC440SPE_DESC_RXOR123,
3171 &sw_desc->flags))
3172 haddr = DMA_RXOR123 <<
3173 DMA_CUED_REGION_OFF;
3174 else if (test_bit(PPC440SPE_DESC_RXOR124,
3175 &sw_desc->flags))
3176 haddr = DMA_RXOR124 <<
3177 DMA_CUED_REGION_OFF;
3178 else if (test_bit(PPC440SPE_DESC_RXOR125,
3179 &sw_desc->flags))
3180 haddr = DMA_RXOR125 <<
3181 DMA_CUED_REGION_OFF;
3182 else
3183 BUG();
3184 haddr |= DMA_CUED_XOR_BASE;
3185 iter = ppc440spe_get_group_entry(sw_desc, 0);
3186 } else if (index < iskip) {
3187 /* 1st slot (RXOR)
3188 * shall actually set source address only once
3189 * instead of first <iskip>
3190 */
3191 iter = NULL;
3192 } else {
3193 /* 2nd/3d and next slots (WXOR);
3194 * skip first slot with RXOR
3195 */
3196 haddr = DMA_CUED_XOR_HB;
3197 iter = ppc440spe_get_group_entry(sw_desc,
3198 index - iskip + sw_desc->dst_cnt);
3199 }
3200 } else {
3201 int znum = 0;
3202
3203 /* WXOR-only operation; skip first slots with
3204 * zeroing destinations
3205 */
3206 if (test_bit(PPC440SPE_ZERO_P, &sw_desc->flags))
3207 znum++;
3208 if (test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags))
3209 znum++;
3210
3211 haddr = DMA_CUED_XOR_HB;
3212 iter = ppc440spe_get_group_entry(sw_desc,
3213 index + znum);
3214 }
3215
3216 if (likely(iter)) {
3217 ppc440spe_desc_set_src_addr(iter, chan, 0, haddr, addr);
3218
3219 if (!index &&
3220 test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags) &&
3221 sw_desc->dst_cnt == 2) {
3222 /* if we have two destinations for RXOR, then
3223 * setup source in the second descr too
3224 */
3225 iter = ppc440spe_get_group_entry(sw_desc, 1);
3226 ppc440spe_desc_set_src_addr(iter, chan, 0,
3227 haddr, addr);
3228 }
3229 }
3230 break;
3231
3232 case PPC440SPE_XOR_ID:
3233 /* DMA2 may do Biskup */
3234 iter = sw_desc->group_head;
3235 if (iter->dst_cnt == 2) {
3236 /* both P & Q calculations required; set P src here */
3237 ppc440spe_adma_dma2rxor_set_src(iter, index, addr);
3238
3239 /* this is for Q */
3240 iter = ppc440spe_get_group_entry(sw_desc,
3241 sw_desc->descs_per_op);
3242 }
3243 ppc440spe_adma_dma2rxor_set_src(iter, index, addr);
3244 break;
3245 }
3246}
3247
3248/**
3249 * ppc440spe_adma_memcpy_xor_set_src - set source address into descriptor
3250 */
3251static void ppc440spe_adma_memcpy_xor_set_src(
3252 struct ppc440spe_adma_desc_slot *sw_desc,
3253 dma_addr_t addr, int index)
3254{
3255 struct ppc440spe_adma_chan *chan;
3256
3257 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
3258 sw_desc = sw_desc->group_head;
3259
3260 if (likely(sw_desc))
3261 ppc440spe_desc_set_src_addr(sw_desc, chan, index, 0, addr);
3262}
3263
3264/**
3265 * ppc440spe_adma_dma2rxor_inc_addr -
3266 */
3267static void ppc440spe_adma_dma2rxor_inc_addr(
3268 struct ppc440spe_adma_desc_slot *desc,
3269 struct ppc440spe_rxor *cursor, int index, int src_cnt)
3270{
3271 cursor->addr_count++;
3272 if (index == src_cnt - 1) {
3273 ppc440spe_desc_set_xor_src_cnt(desc, cursor->addr_count);
3274 } else if (cursor->addr_count == XOR_MAX_OPS) {
3275 ppc440spe_desc_set_xor_src_cnt(desc, cursor->addr_count);
3276 cursor->addr_count = 0;
3277 cursor->desc_count++;
3278 }
3279}
3280
3281/**
3282 * ppc440spe_adma_dma2rxor_prep_src - setup RXOR types in DMA2 CDB
3283 */
3284static int ppc440spe_adma_dma2rxor_prep_src(
3285 struct ppc440spe_adma_desc_slot *hdesc,
3286 struct ppc440spe_rxor *cursor, int index,
3287 int src_cnt, u32 addr)
3288{
3289 int rval = 0;
3290 u32 sign;
3291 struct ppc440spe_adma_desc_slot *desc = hdesc;
3292 int i;
3293
3294 for (i = 0; i < cursor->desc_count; i++) {
3295 desc = list_entry(hdesc->chain_node.next,
3296 struct ppc440spe_adma_desc_slot,
3297 chain_node);
3298 }
3299
3300 switch (cursor->state) {
3301 case 0:
3302 if (addr == cursor->addrl + cursor->len) {
3303 /* direct RXOR */
3304 cursor->state = 1;
3305 cursor->xor_count++;
3306 if (index == src_cnt-1) {
3307 ppc440spe_rxor_set_region(desc,
3308 cursor->addr_count,
3309 DMA_RXOR12 << DMA_CUED_REGION_OFF);
3310 ppc440spe_adma_dma2rxor_inc_addr(
3311 desc, cursor, index, src_cnt);
3312 }
3313 } else if (cursor->addrl == addr + cursor->len) {
3314 /* reverse RXOR */
3315 cursor->state = 1;
3316 cursor->xor_count++;
3317 set_bit(cursor->addr_count, &desc->reverse_flags[0]);
3318 if (index == src_cnt-1) {
3319 ppc440spe_rxor_set_region(desc,
3320 cursor->addr_count,
3321 DMA_RXOR12 << DMA_CUED_REGION_OFF);
3322 ppc440spe_adma_dma2rxor_inc_addr(
3323 desc, cursor, index, src_cnt);
3324 }
3325 } else {
3326 printk(KERN_ERR "Cannot build "
3327 "DMA2 RXOR command block.\n");
3328 BUG();
3329 }
3330 break;
3331 case 1:
3332 sign = test_bit(cursor->addr_count,
3333 desc->reverse_flags)
3334 ? -1 : 1;
3335 if (index == src_cnt-2 || (sign == -1
3336 && addr != cursor->addrl - 2*cursor->len)) {
3337 cursor->state = 0;
3338 cursor->xor_count = 1;
3339 cursor->addrl = addr;
3340 ppc440spe_rxor_set_region(desc,
3341 cursor->addr_count,
3342 DMA_RXOR12 << DMA_CUED_REGION_OFF);
3343 ppc440spe_adma_dma2rxor_inc_addr(
3344 desc, cursor, index, src_cnt);
3345 } else if (addr == cursor->addrl + 2*sign*cursor->len) {
3346 cursor->state = 2;
3347 cursor->xor_count = 0;
3348 ppc440spe_rxor_set_region(desc,
3349 cursor->addr_count,
3350 DMA_RXOR123 << DMA_CUED_REGION_OFF);
3351 if (index == src_cnt-1) {
3352 ppc440spe_adma_dma2rxor_inc_addr(
3353 desc, cursor, index, src_cnt);
3354 }
3355 } else if (addr == cursor->addrl + 3*cursor->len) {
3356 cursor->state = 2;
3357 cursor->xor_count = 0;
3358 ppc440spe_rxor_set_region(desc,
3359 cursor->addr_count,
3360 DMA_RXOR124 << DMA_CUED_REGION_OFF);
3361 if (index == src_cnt-1) {
3362 ppc440spe_adma_dma2rxor_inc_addr(
3363 desc, cursor, index, src_cnt);
3364 }
3365 } else if (addr == cursor->addrl + 4*cursor->len) {
3366 cursor->state = 2;
3367 cursor->xor_count = 0;
3368 ppc440spe_rxor_set_region(desc,
3369 cursor->addr_count,
3370 DMA_RXOR125 << DMA_CUED_REGION_OFF);
3371 if (index == src_cnt-1) {
3372 ppc440spe_adma_dma2rxor_inc_addr(
3373 desc, cursor, index, src_cnt);
3374 }
3375 } else {
3376 cursor->state = 0;
3377 cursor->xor_count = 1;
3378 cursor->addrl = addr;
3379 ppc440spe_rxor_set_region(desc,
3380 cursor->addr_count,
3381 DMA_RXOR12 << DMA_CUED_REGION_OFF);
3382 ppc440spe_adma_dma2rxor_inc_addr(
3383 desc, cursor, index, src_cnt);
3384 }
3385 break;
3386 case 2:
3387 cursor->state = 0;
3388 cursor->addrl = addr;
3389 cursor->xor_count++;
3390 if (index) {
3391 ppc440spe_adma_dma2rxor_inc_addr(
3392 desc, cursor, index, src_cnt);
3393 }
3394 break;
3395 }
3396
3397 return rval;
3398}
3399
3400/**
3401 * ppc440spe_adma_dma2rxor_set_src - set RXOR source address; it's assumed that
3402 * ppc440spe_adma_dma2rxor_prep_src() has already done prior this call
3403 */
3404static void ppc440spe_adma_dma2rxor_set_src(
3405 struct ppc440spe_adma_desc_slot *desc,
3406 int index, dma_addr_t addr)
3407{
3408 struct xor_cb *xcb = desc->hw_desc;
3409 int k = 0, op = 0, lop = 0;
3410
3411 /* get the RXOR operand which corresponds to index addr */
3412 while (op <= index) {
3413 lop = op;
3414 if (k == XOR_MAX_OPS) {
3415 k = 0;
3416 desc = list_entry(desc->chain_node.next,
3417 struct ppc440spe_adma_desc_slot, chain_node);
3418 xcb = desc->hw_desc;
3419
3420 }
3421 if ((xcb->ops[k++].h & (DMA_RXOR12 << DMA_CUED_REGION_OFF)) ==
3422 (DMA_RXOR12 << DMA_CUED_REGION_OFF))
3423 op += 2;
3424 else
3425 op += 3;
3426 }
3427
3428 BUG_ON(k < 1);
3429
3430 if (test_bit(k-1, desc->reverse_flags)) {
3431 /* reverse operand order; put last op in RXOR group */
3432 if (index == op - 1)
3433 ppc440spe_rxor_set_src(desc, k - 1, addr);
3434 } else {
3435 /* direct operand order; put first op in RXOR group */
3436 if (index == lop)
3437 ppc440spe_rxor_set_src(desc, k - 1, addr);
3438 }
3439}
3440
3441/**
3442 * ppc440spe_adma_dma2rxor_set_mult - set RXOR multipliers; it's assumed that
3443 * ppc440spe_adma_dma2rxor_prep_src() has already done prior this call
3444 */
3445static void ppc440spe_adma_dma2rxor_set_mult(
3446 struct ppc440spe_adma_desc_slot *desc,
3447 int index, u8 mult)
3448{
3449 struct xor_cb *xcb = desc->hw_desc;
3450 int k = 0, op = 0, lop = 0;
3451
3452 /* get the RXOR operand which corresponds to index mult */
3453 while (op <= index) {
3454 lop = op;
3455 if (k == XOR_MAX_OPS) {
3456 k = 0;
3457 desc = list_entry(desc->chain_node.next,
3458 struct ppc440spe_adma_desc_slot,
3459 chain_node);
3460 xcb = desc->hw_desc;
3461
3462 }
3463 if ((xcb->ops[k++].h & (DMA_RXOR12 << DMA_CUED_REGION_OFF)) ==
3464 (DMA_RXOR12 << DMA_CUED_REGION_OFF))
3465 op += 2;
3466 else
3467 op += 3;
3468 }
3469
3470 BUG_ON(k < 1);
3471 if (test_bit(k-1, desc->reverse_flags)) {
3472 /* reverse order */
3473 ppc440spe_rxor_set_mult(desc, k - 1, op - index - 1, mult);
3474 } else {
3475 /* direct order */
3476 ppc440spe_rxor_set_mult(desc, k - 1, index - lop, mult);
3477 }
3478}
3479
3480/**
3481 * ppc440spe_init_rxor_cursor -
3482 */
3483static void ppc440spe_init_rxor_cursor(struct ppc440spe_rxor *cursor)
3484{
3485 memset(cursor, 0, sizeof(struct ppc440spe_rxor));
3486 cursor->state = 2;
3487}
3488
3489/**
3490 * ppc440spe_adma_pq_set_src_mult - set multiplication coefficient into
3491 * descriptor for the PQXOR operation
3492 */
3493static void ppc440spe_adma_pq_set_src_mult(
3494 struct ppc440spe_adma_desc_slot *sw_desc,
3495 unsigned char mult, int index, int dst_pos)
3496{
3497 struct ppc440spe_adma_chan *chan;
3498 u32 mult_idx, mult_dst;
3499 struct ppc440spe_adma_desc_slot *iter = NULL, *iter1 = NULL;
3500
3501 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
3502
3503 switch (chan->device->id) {
3504 case PPC440SPE_DMA0_ID:
3505 case PPC440SPE_DMA1_ID:
3506 if (test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags)) {
3507 int region = test_bit(PPC440SPE_DESC_RXOR12,
3508 &sw_desc->flags) ? 2 : 3;
3509
3510 if (index < region) {
3511 /* RXOR multipliers */
3512 iter = ppc440spe_get_group_entry(sw_desc,
3513 sw_desc->dst_cnt - 1);
3514 if (sw_desc->dst_cnt == 2)
3515 iter1 = ppc440spe_get_group_entry(
3516 sw_desc, 0);
3517
3518 mult_idx = DMA_CUED_MULT1_OFF + (index << 3);
3519 mult_dst = DMA_CDB_SG_SRC;
3520 } else {
3521 /* WXOR multiplier */
3522 iter = ppc440spe_get_group_entry(sw_desc,
3523 index - region +
3524 sw_desc->dst_cnt);
3525 mult_idx = DMA_CUED_MULT1_OFF;
3526 mult_dst = dst_pos ? DMA_CDB_SG_DST2 :
3527 DMA_CDB_SG_DST1;
3528 }
3529 } else {
3530 int znum = 0;
3531
3532 /* WXOR-only;
3533 * skip first slots with destinations (if ZERO_DST has
3534 * place)
3535 */
3536 if (test_bit(PPC440SPE_ZERO_P, &sw_desc->flags))
3537 znum++;
3538 if (test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags))
3539 znum++;
3540
3541 iter = ppc440spe_get_group_entry(sw_desc, index + znum);
3542 mult_idx = DMA_CUED_MULT1_OFF;
3543 mult_dst = dst_pos ? DMA_CDB_SG_DST2 : DMA_CDB_SG_DST1;
3544 }
3545
3546 if (likely(iter)) {
3547 ppc440spe_desc_set_src_mult(iter, chan,
3548 mult_idx, mult_dst, mult);
3549
3550 if (unlikely(iter1)) {
3551 /* if we have two destinations for RXOR, then
3552 * we've just set Q mult. Set-up P now.
3553 */
3554 ppc440spe_desc_set_src_mult(iter1, chan,
3555 mult_idx, mult_dst, 1);
3556 }
3557
3558 }
3559 break;
3560
3561 case PPC440SPE_XOR_ID:
3562 iter = sw_desc->group_head;
3563 if (sw_desc->dst_cnt == 2) {
3564 /* both P & Q calculations required; set P mult here */
3565 ppc440spe_adma_dma2rxor_set_mult(iter, index, 1);
3566
3567 /* and then set Q mult */
3568 iter = ppc440spe_get_group_entry(sw_desc,
3569 sw_desc->descs_per_op);
3570 }
3571 ppc440spe_adma_dma2rxor_set_mult(iter, index, mult);
3572 break;
3573 }
3574}
3575
3576/**
3577 * ppc440spe_adma_free_chan_resources - free the resources allocated
3578 */
3579static void ppc440spe_adma_free_chan_resources(struct dma_chan *chan)
3580{
3581 struct ppc440spe_adma_chan *ppc440spe_chan;
3582 struct ppc440spe_adma_desc_slot *iter, *_iter;
3583 int in_use_descs = 0;
3584
3585 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
3586 ppc440spe_adma_slot_cleanup(ppc440spe_chan);
3587
3588 spin_lock_bh(&ppc440spe_chan->lock);
3589 list_for_each_entry_safe(iter, _iter, &ppc440spe_chan->chain,
3590 chain_node) {
3591 in_use_descs++;
3592 list_del(&iter->chain_node);
3593 }
3594 list_for_each_entry_safe_reverse(iter, _iter,
3595 &ppc440spe_chan->all_slots, slot_node) {
3596 list_del(&iter->slot_node);
3597 kfree(iter);
3598 ppc440spe_chan->slots_allocated--;
3599 }
3600 ppc440spe_chan->last_used = NULL;
3601
3602 dev_dbg(ppc440spe_chan->device->common.dev,
3603 "ppc440spe adma%d %s slots_allocated %d\n",
3604 ppc440spe_chan->device->id,
3605 __func__, ppc440spe_chan->slots_allocated);
3606 spin_unlock_bh(&ppc440spe_chan->lock);
3607
3608 /* one is ok since we left it on there on purpose */
3609 if (in_use_descs > 1)
3610 printk(KERN_ERR "SPE: Freeing %d in use descriptors!\n",
3611 in_use_descs - 1);
3612}
3613
3614/**
07934481 3615 * ppc440spe_adma_tx_status - poll the status of an ADMA transaction
12458ea0
AG
3616 * @chan: ADMA channel handle
3617 * @cookie: ADMA transaction identifier
07934481 3618 * @txstate: a holder for the current state of the channel
12458ea0 3619 */
07934481
LW
3620static enum dma_status ppc440spe_adma_tx_status(struct dma_chan *chan,
3621 dma_cookie_t cookie, struct dma_tx_state *txstate)
12458ea0
AG
3622{
3623 struct ppc440spe_adma_chan *ppc440spe_chan;
12458ea0
AG
3624 enum dma_status ret;
3625
3626 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
96a2af41 3627 ret = dma_cookie_status(chan, cookie, txstate);
5738992b 3628 if (ret == DMA_COMPLETE)
12458ea0
AG
3629 return ret;
3630
3631 ppc440spe_adma_slot_cleanup(ppc440spe_chan);
3632
96a2af41 3633 return dma_cookie_status(chan, cookie, txstate);
12458ea0
AG
3634}
3635
3636/**
3637 * ppc440spe_adma_eot_handler - end of transfer interrupt handler
3638 */
3639static irqreturn_t ppc440spe_adma_eot_handler(int irq, void *data)
3640{
3641 struct ppc440spe_adma_chan *chan = data;
3642
3643 dev_dbg(chan->device->common.dev,
3644 "ppc440spe adma%d: %s\n", chan->device->id, __func__);
3645
3646 tasklet_schedule(&chan->irq_tasklet);
3647 ppc440spe_adma_device_clear_eot_status(chan);
3648
3649 return IRQ_HANDLED;
3650}
3651
3652/**
3653 * ppc440spe_adma_err_handler - DMA error interrupt handler;
3654 * do the same things as a eot handler
3655 */
3656static irqreturn_t ppc440spe_adma_err_handler(int irq, void *data)
3657{
3658 struct ppc440spe_adma_chan *chan = data;
3659
3660 dev_dbg(chan->device->common.dev,
3661 "ppc440spe adma%d: %s\n", chan->device->id, __func__);
3662
3663 tasklet_schedule(&chan->irq_tasklet);
3664 ppc440spe_adma_device_clear_eot_status(chan);
3665
3666 return IRQ_HANDLED;
3667}
3668
3669/**
3670 * ppc440spe_test_callback - called when test operation has been done
3671 */
3672static void ppc440spe_test_callback(void *unused)
3673{
3674 complete(&ppc440spe_r6_test_comp);
3675}
3676
3677/**
3678 * ppc440spe_adma_issue_pending - flush all pending descriptors to h/w
3679 */
3680static void ppc440spe_adma_issue_pending(struct dma_chan *chan)
3681{
3682 struct ppc440spe_adma_chan *ppc440spe_chan;
3683
3684 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
3685 dev_dbg(ppc440spe_chan->device->common.dev,
3686 "ppc440spe adma%d: %s %d \n", ppc440spe_chan->device->id,
3687 __func__, ppc440spe_chan->pending);
3688
3689 if (ppc440spe_chan->pending) {
3690 ppc440spe_chan->pending = 0;
3691 ppc440spe_chan_append(ppc440spe_chan);
3692 }
3693}
3694
3695/**
3696 * ppc440spe_chan_start_null_xor - initiate the first XOR operation (DMA engines
3697 * use FIFOs (as opposite to chains used in XOR) so this is a XOR
3698 * specific operation)
3699 */
3700static void ppc440spe_chan_start_null_xor(struct ppc440spe_adma_chan *chan)
3701{
3702 struct ppc440spe_adma_desc_slot *sw_desc, *group_start;
3703 dma_cookie_t cookie;
3704 int slot_cnt, slots_per_op;
3705
3706 dev_dbg(chan->device->common.dev,
3707 "ppc440spe adma%d: %s\n", chan->device->id, __func__);
3708
3709 spin_lock_bh(&chan->lock);
3710 slot_cnt = ppc440spe_chan_xor_slot_count(0, 2, &slots_per_op);
3711 sw_desc = ppc440spe_adma_alloc_slots(chan, slot_cnt, slots_per_op);
3712 if (sw_desc) {
3713 group_start = sw_desc->group_head;
3714 list_splice_init(&sw_desc->group_list, &chan->chain);
3715 async_tx_ack(&sw_desc->async_tx);
3716 ppc440spe_desc_init_null_xor(group_start);
3717
2a926e46 3718 cookie = dma_cookie_assign(&sw_desc->async_tx);
12458ea0
AG
3719
3720 /* initialize the completed cookie to be less than
3721 * the most recently used cookie
3722 */
4d4e58de 3723 chan->common.completed_cookie = cookie - 1;
12458ea0
AG
3724
3725 /* channel should not be busy */
3726 BUG_ON(ppc440spe_chan_is_busy(chan));
3727
3728 /* set the descriptor address */
3729 ppc440spe_chan_set_first_xor_descriptor(chan, sw_desc);
3730
3731 /* run the descriptor */
3732 ppc440spe_chan_run(chan);
3733 } else
3734 printk(KERN_ERR "ppc440spe adma%d"
3735 " failed to allocate null descriptor\n",
3736 chan->device->id);
3737 spin_unlock_bh(&chan->lock);
3738}
3739
3740/**
3741 * ppc440spe_test_raid6 - test are RAID-6 capabilities enabled successfully.
3742 * For this we just perform one WXOR operation with the same source
3743 * and destination addresses, the GF-multiplier is 1; so if RAID-6
3744 * capabilities are enabled then we'll get src/dst filled with zero.
3745 */
3746static int ppc440spe_test_raid6(struct ppc440spe_adma_chan *chan)
3747{
3748 struct ppc440spe_adma_desc_slot *sw_desc, *iter;
3749 struct page *pg;
3750 char *a;
3751 dma_addr_t dma_addr, addrs[2];
3752 unsigned long op = 0;
3753 int rval = 0;
3754
3755 set_bit(PPC440SPE_DESC_WXOR, &op);
3756
3757 pg = alloc_page(GFP_KERNEL);
3758 if (!pg)
3759 return -ENOMEM;
3760
3761 spin_lock_bh(&chan->lock);
3762 sw_desc = ppc440spe_adma_alloc_slots(chan, 1, 1);
3763 if (sw_desc) {
3764 /* 1 src, 1 dsr, int_ena, WXOR */
3765 ppc440spe_desc_init_dma01pq(sw_desc, 1, 1, 1, op);
3766 list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
3767 ppc440spe_desc_set_byte_count(iter, chan, PAGE_SIZE);
3768 iter->unmap_len = PAGE_SIZE;
3769 }
3770 } else {
3771 rval = -EFAULT;
3772 spin_unlock_bh(&chan->lock);
3773 goto exit;
3774 }
3775 spin_unlock_bh(&chan->lock);
3776
3777 /* Fill the test page with ones */
3778 memset(page_address(pg), 0xFF, PAGE_SIZE);
3779 dma_addr = dma_map_page(chan->device->dev, pg, 0,
3780 PAGE_SIZE, DMA_BIDIRECTIONAL);
3781
3782 /* Setup addresses */
3783 ppc440spe_adma_pq_set_src(sw_desc, dma_addr, 0);
3784 ppc440spe_adma_pq_set_src_mult(sw_desc, 1, 0, 0);
3785 addrs[0] = dma_addr;
3786 addrs[1] = 0;
3787 ppc440spe_adma_pq_set_dest(sw_desc, addrs, DMA_PREP_PQ_DISABLE_Q);
3788
3789 async_tx_ack(&sw_desc->async_tx);
3790 sw_desc->async_tx.callback = ppc440spe_test_callback;
3791 sw_desc->async_tx.callback_param = NULL;
3792
3793 init_completion(&ppc440spe_r6_test_comp);
3794
3795 ppc440spe_adma_tx_submit(&sw_desc->async_tx);
3796 ppc440spe_adma_issue_pending(&chan->common);
3797
3798 wait_for_completion(&ppc440spe_r6_test_comp);
3799
3800 /* Now check if the test page is zeroed */
3801 a = page_address(pg);
3802 if ((*(u32 *)a) == 0 && memcmp(a, a+4, PAGE_SIZE-4) == 0) {
3803 /* page is zero - RAID-6 enabled */
3804 rval = 0;
3805 } else {
3806 /* RAID-6 was not enabled */
3807 rval = -EINVAL;
3808 }
3809exit:
3810 __free_page(pg);
3811 return rval;
3812}
3813
3814static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev)
3815{
3816 switch (adev->id) {
3817 case PPC440SPE_DMA0_ID:
3818 case PPC440SPE_DMA1_ID:
3819 dma_cap_set(DMA_MEMCPY, adev->common.cap_mask);
3820 dma_cap_set(DMA_INTERRUPT, adev->common.cap_mask);
12458ea0
AG
3821 dma_cap_set(DMA_PQ, adev->common.cap_mask);
3822 dma_cap_set(DMA_PQ_VAL, adev->common.cap_mask);
3823 dma_cap_set(DMA_XOR_VAL, adev->common.cap_mask);
3824 break;
3825 case PPC440SPE_XOR_ID:
3826 dma_cap_set(DMA_XOR, adev->common.cap_mask);
3827 dma_cap_set(DMA_PQ, adev->common.cap_mask);
3828 dma_cap_set(DMA_INTERRUPT, adev->common.cap_mask);
3829 adev->common.cap_mask = adev->common.cap_mask;
3830 break;
3831 }
3832
3833 /* Set base routines */
3834 adev->common.device_alloc_chan_resources =
3835 ppc440spe_adma_alloc_chan_resources;
3836 adev->common.device_free_chan_resources =
3837 ppc440spe_adma_free_chan_resources;
07934481 3838 adev->common.device_tx_status = ppc440spe_adma_tx_status;
12458ea0
AG
3839 adev->common.device_issue_pending = ppc440spe_adma_issue_pending;
3840
3841 /* Set prep routines based on capability */
3842 if (dma_has_cap(DMA_MEMCPY, adev->common.cap_mask)) {
3843 adev->common.device_prep_dma_memcpy =
3844 ppc440spe_adma_prep_dma_memcpy;
3845 }
12458ea0
AG
3846 if (dma_has_cap(DMA_XOR, adev->common.cap_mask)) {
3847 adev->common.max_xor = XOR_MAX_OPS;
3848 adev->common.device_prep_dma_xor =
3849 ppc440spe_adma_prep_dma_xor;
3850 }
3851 if (dma_has_cap(DMA_PQ, adev->common.cap_mask)) {
3852 switch (adev->id) {
3853 case PPC440SPE_DMA0_ID:
3854 dma_set_maxpq(&adev->common,
3855 DMA0_FIFO_SIZE / sizeof(struct dma_cdb), 0);
3856 break;
3857 case PPC440SPE_DMA1_ID:
3858 dma_set_maxpq(&adev->common,
3859 DMA1_FIFO_SIZE / sizeof(struct dma_cdb), 0);
3860 break;
3861 case PPC440SPE_XOR_ID:
3862 adev->common.max_pq = XOR_MAX_OPS * 3;
3863 break;
3864 }
3865 adev->common.device_prep_dma_pq =
3866 ppc440spe_adma_prep_dma_pq;
3867 }
3868 if (dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask)) {
3869 switch (adev->id) {
3870 case PPC440SPE_DMA0_ID:
3871 adev->common.max_pq = DMA0_FIFO_SIZE /
3872 sizeof(struct dma_cdb);
3873 break;
3874 case PPC440SPE_DMA1_ID:
3875 adev->common.max_pq = DMA1_FIFO_SIZE /
3876 sizeof(struct dma_cdb);
3877 break;
3878 }
3879 adev->common.device_prep_dma_pq_val =
3880 ppc440spe_adma_prep_dma_pqzero_sum;
3881 }
3882 if (dma_has_cap(DMA_XOR_VAL, adev->common.cap_mask)) {
3883 switch (adev->id) {
3884 case PPC440SPE_DMA0_ID:
3885 adev->common.max_xor = DMA0_FIFO_SIZE /
3886 sizeof(struct dma_cdb);
3887 break;
3888 case PPC440SPE_DMA1_ID:
3889 adev->common.max_xor = DMA1_FIFO_SIZE /
3890 sizeof(struct dma_cdb);
3891 break;
3892 }
3893 adev->common.device_prep_dma_xor_val =
3894 ppc440spe_adma_prep_dma_xor_zero_sum;
3895 }
3896 if (dma_has_cap(DMA_INTERRUPT, adev->common.cap_mask)) {
3897 adev->common.device_prep_dma_interrupt =
3898 ppc440spe_adma_prep_dma_interrupt;
3899 }
3900 pr_info("%s: AMCC(R) PPC440SP(E) ADMA Engine: "
3901 "( %s%s%s%s%s%s%s)\n",
3902 dev_name(adev->dev),
3903 dma_has_cap(DMA_PQ, adev->common.cap_mask) ? "pq " : "",
3904 dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask) ? "pq_val " : "",
3905 dma_has_cap(DMA_XOR, adev->common.cap_mask) ? "xor " : "",
3906 dma_has_cap(DMA_XOR_VAL, adev->common.cap_mask) ? "xor_val " : "",
3907 dma_has_cap(DMA_MEMCPY, adev->common.cap_mask) ? "memcpy " : "",
12458ea0
AG
3908 dma_has_cap(DMA_INTERRUPT, adev->common.cap_mask) ? "intr " : "");
3909}
3910
3911static int ppc440spe_adma_setup_irqs(struct ppc440spe_adma_device *adev,
3912 struct ppc440spe_adma_chan *chan,
3913 int *initcode)
3914{
2dc11581 3915 struct platform_device *ofdev;
12458ea0
AG
3916 struct device_node *np;
3917 int ret;
3918
2dc11581 3919 ofdev = container_of(adev->dev, struct platform_device, dev);
3e6b02d9 3920 np = ofdev->dev.of_node;
12458ea0
AG
3921 if (adev->id != PPC440SPE_XOR_ID) {
3922 adev->err_irq = irq_of_parse_and_map(np, 1);
3923 if (adev->err_irq == NO_IRQ) {
3924 dev_warn(adev->dev, "no err irq resource?\n");
3925 *initcode = PPC_ADMA_INIT_IRQ2;
3926 adev->err_irq = -ENXIO;
3927 } else
3928 atomic_inc(&ppc440spe_adma_err_irq_ref);
3929 } else {
3930 adev->err_irq = -ENXIO;
3931 }
3932
3933 adev->irq = irq_of_parse_and_map(np, 0);
3934 if (adev->irq == NO_IRQ) {
3935 dev_err(adev->dev, "no irq resource\n");
3936 *initcode = PPC_ADMA_INIT_IRQ1;
3937 ret = -ENXIO;
3938 goto err_irq_map;
3939 }
3940 dev_dbg(adev->dev, "irq %d, err irq %d\n",
3941 adev->irq, adev->err_irq);
3942
3943 ret = request_irq(adev->irq, ppc440spe_adma_eot_handler,
3944 0, dev_driver_string(adev->dev), chan);
3945 if (ret) {
3946 dev_err(adev->dev, "can't request irq %d\n",
3947 adev->irq);
3948 *initcode = PPC_ADMA_INIT_IRQ1;
3949 ret = -EIO;
3950 goto err_req1;
3951 }
3952
3953 /* only DMA engines have a separate error IRQ
3954 * so it's Ok if err_irq < 0 in XOR engine case.
3955 */
3956 if (adev->err_irq > 0) {
3957 /* both DMA engines share common error IRQ */
3958 ret = request_irq(adev->err_irq,
3959 ppc440spe_adma_err_handler,
3960 IRQF_SHARED,
3961 dev_driver_string(adev->dev),
3962 chan);
3963 if (ret) {
3964 dev_err(adev->dev, "can't request irq %d\n",
3965 adev->err_irq);
3966 *initcode = PPC_ADMA_INIT_IRQ2;
3967 ret = -EIO;
3968 goto err_req2;
3969 }
3970 }
3971
3972 if (adev->id == PPC440SPE_XOR_ID) {
3973 /* enable XOR engine interrupts */
3974 iowrite32be(XOR_IE_CBCIE_BIT | XOR_IE_ICBIE_BIT |
3975 XOR_IE_ICIE_BIT | XOR_IE_RPTIE_BIT,
3976 &adev->xor_reg->ier);
3977 } else {
3978 u32 mask, enable;
3979
3980 np = of_find_compatible_node(NULL, NULL, "ibm,i2o-440spe");
3981 if (!np) {
3982 pr_err("%s: can't find I2O device tree node\n",
3983 __func__);
3984 ret = -ENODEV;
3985 goto err_req2;
3986 }
3987 adev->i2o_reg = of_iomap(np, 0);
3988 if (!adev->i2o_reg) {
3989 pr_err("%s: failed to map I2O registers\n", __func__);
3990 of_node_put(np);
3991 ret = -EINVAL;
3992 goto err_req2;
3993 }
3994 of_node_put(np);
3995 /* Unmask 'CS FIFO Attention' interrupts and
3996 * enable generating interrupts on errors
3997 */
3998 enable = (adev->id == PPC440SPE_DMA0_ID) ?
3999 ~(I2O_IOPIM_P0SNE | I2O_IOPIM_P0EM) :
4000 ~(I2O_IOPIM_P1SNE | I2O_IOPIM_P1EM);
4001 mask = ioread32(&adev->i2o_reg->iopim) & enable;
4002 iowrite32(mask, &adev->i2o_reg->iopim);
4003 }
4004 return 0;
4005
4006err_req2:
4007 free_irq(adev->irq, chan);
4008err_req1:
4009 irq_dispose_mapping(adev->irq);
4010err_irq_map:
4011 if (adev->err_irq > 0) {
4012 if (atomic_dec_and_test(&ppc440spe_adma_err_irq_ref))
4013 irq_dispose_mapping(adev->err_irq);
4014 }
4015 return ret;
4016}
4017
4018static void ppc440spe_adma_release_irqs(struct ppc440spe_adma_device *adev,
4019 struct ppc440spe_adma_chan *chan)
4020{
4021 u32 mask, disable;
4022
4023 if (adev->id == PPC440SPE_XOR_ID) {
4024 /* disable XOR engine interrupts */
4025 mask = ioread32be(&adev->xor_reg->ier);
4026 mask &= ~(XOR_IE_CBCIE_BIT | XOR_IE_ICBIE_BIT |
4027 XOR_IE_ICIE_BIT | XOR_IE_RPTIE_BIT);
4028 iowrite32be(mask, &adev->xor_reg->ier);
4029 } else {
4030 /* disable DMAx engine interrupts */
4031 disable = (adev->id == PPC440SPE_DMA0_ID) ?
4032 (I2O_IOPIM_P0SNE | I2O_IOPIM_P0EM) :
4033 (I2O_IOPIM_P1SNE | I2O_IOPIM_P1EM);
4034 mask = ioread32(&adev->i2o_reg->iopim) | disable;
4035 iowrite32(mask, &adev->i2o_reg->iopim);
4036 }
4037 free_irq(adev->irq, chan);
4038 irq_dispose_mapping(adev->irq);
4039 if (adev->err_irq > 0) {
4040 free_irq(adev->err_irq, chan);
4041 if (atomic_dec_and_test(&ppc440spe_adma_err_irq_ref)) {
4042 irq_dispose_mapping(adev->err_irq);
4043 iounmap(adev->i2o_reg);
4044 }
4045 }
4046}
4047
4048/**
4049 * ppc440spe_adma_probe - probe the asynch device
4050 */
463a1f8b 4051static int ppc440spe_adma_probe(struct platform_device *ofdev)
12458ea0 4052{
05c02542 4053 struct device_node *np = ofdev->dev.of_node;
12458ea0
AG
4054 struct resource res;
4055 struct ppc440spe_adma_device *adev;
4056 struct ppc440spe_adma_chan *chan;
4057 struct ppc_dma_chan_ref *ref, *_ref;
4058 int ret = 0, initcode = PPC_ADMA_INIT_OK;
4059 const u32 *idx;
4060 int len;
4061 void *regs;
4062 u32 id, pool_size;
4063
4064 if (of_device_is_compatible(np, "amcc,xor-accelerator")) {
4065 id = PPC440SPE_XOR_ID;
4066 /* As far as the XOR engine is concerned, it does not
4067 * use FIFOs but uses linked list. So there is no dependency
4068 * between pool size to allocate and the engine configuration.
4069 */
4070 pool_size = PAGE_SIZE << 1;
4071 } else {
4072 /* it is DMA0 or DMA1 */
4073 idx = of_get_property(np, "cell-index", &len);
4074 if (!idx || (len != sizeof(u32))) {
4075 dev_err(&ofdev->dev, "Device node %s has missing "
4076 "or invalid cell-index property\n",
4077 np->full_name);
4078 return -EINVAL;
4079 }
4080 id = *idx;
4081 /* DMA0,1 engines use FIFO to maintain CDBs, so we
4082 * should allocate the pool accordingly to size of this
4083 * FIFO. Thus, the pool size depends on the FIFO depth:
4084 * how much CDBs pointers the FIFO may contain then so
4085 * much CDBs we should provide in the pool.
4086 * That is
4087 * CDB size = 32B;
4088 * CDBs number = (DMA0_FIFO_SIZE >> 3);
4089 * Pool size = CDBs number * CDB size =
4090 * = (DMA0_FIFO_SIZE >> 3) << 5 = DMA0_FIFO_SIZE << 2.
4091 */
4092 pool_size = (id == PPC440SPE_DMA0_ID) ?
4093 DMA0_FIFO_SIZE : DMA1_FIFO_SIZE;
4094 pool_size <<= 2;
4095 }
4096
4097 if (of_address_to_resource(np, 0, &res)) {
4098 dev_err(&ofdev->dev, "failed to get memory resource\n");
4099 initcode = PPC_ADMA_INIT_MEMRES;
4100 ret = -ENODEV;
4101 goto out;
4102 }
4103
4104 if (!request_mem_region(res.start, resource_size(&res),
4105 dev_driver_string(&ofdev->dev))) {
a584bff5
JP
4106 dev_err(&ofdev->dev, "failed to request memory region %pR\n",
4107 &res);
12458ea0
AG
4108 initcode = PPC_ADMA_INIT_MEMREG;
4109 ret = -EBUSY;
4110 goto out;
4111 }
4112
4113 /* create a device */
4114 adev = kzalloc(sizeof(*adev), GFP_KERNEL);
4115 if (!adev) {
4116 dev_err(&ofdev->dev, "failed to allocate device\n");
4117 initcode = PPC_ADMA_INIT_ALLOC;
4118 ret = -ENOMEM;
4119 goto err_adev_alloc;
4120 }
4121
4122 adev->id = id;
4123 adev->pool_size = pool_size;
4124 /* allocate coherent memory for hardware descriptors */
4125 adev->dma_desc_pool_virt = dma_alloc_coherent(&ofdev->dev,
4126 adev->pool_size, &adev->dma_desc_pool,
4127 GFP_KERNEL);
4128 if (adev->dma_desc_pool_virt == NULL) {
4129 dev_err(&ofdev->dev, "failed to allocate %d bytes of coherent "
4130 "memory for hardware descriptors\n",
4131 adev->pool_size);
4132 initcode = PPC_ADMA_INIT_COHERENT;
4133 ret = -ENOMEM;
4134 goto err_dma_alloc;
4135 }
d73111c6 4136 dev_dbg(&ofdev->dev, "allocated descriptor pool virt 0x%p phys 0x%llx\n",
12458ea0
AG
4137 adev->dma_desc_pool_virt, (u64)adev->dma_desc_pool);
4138
4139 regs = ioremap(res.start, resource_size(&res));
4140 if (!regs) {
4141 dev_err(&ofdev->dev, "failed to ioremap regs!\n");
f3b77727 4142 ret = -ENOMEM;
12458ea0
AG
4143 goto err_regs_alloc;
4144 }
4145
4146 if (adev->id == PPC440SPE_XOR_ID) {
4147 adev->xor_reg = regs;
4148 /* Reset XOR */
4149 iowrite32be(XOR_CRSR_XASR_BIT, &adev->xor_reg->crsr);
4150 iowrite32be(XOR_CRSR_64BA_BIT, &adev->xor_reg->crrr);
4151 } else {
4152 size_t fifo_size = (adev->id == PPC440SPE_DMA0_ID) ?
4153 DMA0_FIFO_SIZE : DMA1_FIFO_SIZE;
4154 adev->dma_reg = regs;
4155 /* DMAx_FIFO_SIZE is defined in bytes,
4156 * <fsiz> - is defined in number of CDB pointers (8byte).
4157 * DMA FIFO Length = CSlength + CPlength, where
4158 * CSlength = CPlength = (fsiz + 1) * 8.
4159 */
4160 iowrite32(DMA_FIFO_ENABLE | ((fifo_size >> 3) - 2),
4161 &adev->dma_reg->fsiz);
4162 /* Configure DMA engine */
4163 iowrite32(DMA_CFG_DXEPR_HP | DMA_CFG_DFMPP_HP | DMA_CFG_FALGN,
4164 &adev->dma_reg->cfg);
4165 /* Clear Status */
4166 iowrite32(~0, &adev->dma_reg->dsts);
4167 }
4168
4169 adev->dev = &ofdev->dev;
4170 adev->common.dev = &ofdev->dev;
4171 INIT_LIST_HEAD(&adev->common.channels);
dd3daca1 4172 platform_set_drvdata(ofdev, adev);
12458ea0
AG
4173
4174 /* create a channel */
4175 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
4176 if (!chan) {
4177 dev_err(&ofdev->dev, "can't allocate channel structure\n");
4178 initcode = PPC_ADMA_INIT_CHANNEL;
4179 ret = -ENOMEM;
4180 goto err_chan_alloc;
4181 }
4182
4183 spin_lock_init(&chan->lock);
4184 INIT_LIST_HEAD(&chan->chain);
4185 INIT_LIST_HEAD(&chan->all_slots);
4186 chan->device = adev;
4187 chan->common.device = &adev->common;
8ac69546 4188 dma_cookie_init(&chan->common);
12458ea0
AG
4189 list_add_tail(&chan->common.device_node, &adev->common.channels);
4190 tasklet_init(&chan->irq_tasklet, ppc440spe_adma_tasklet,
4191 (unsigned long)chan);
4192
4193 /* allocate and map helper pages for async validation or
4194 * async_mult/async_sum_product operations on DMA0/1.
4195 */
4196 if (adev->id != PPC440SPE_XOR_ID) {
4197 chan->pdest_page = alloc_page(GFP_KERNEL);
4198 chan->qdest_page = alloc_page(GFP_KERNEL);
4199 if (!chan->pdest_page ||
4200 !chan->qdest_page) {
4201 if (chan->pdest_page)
4202 __free_page(chan->pdest_page);
4203 if (chan->qdest_page)
4204 __free_page(chan->qdest_page);
4205 ret = -ENOMEM;
4206 goto err_page_alloc;
4207 }
4208 chan->pdest = dma_map_page(&ofdev->dev, chan->pdest_page, 0,
4209 PAGE_SIZE, DMA_BIDIRECTIONAL);
4210 chan->qdest = dma_map_page(&ofdev->dev, chan->qdest_page, 0,
4211 PAGE_SIZE, DMA_BIDIRECTIONAL);
4212 }
4213
4214 ref = kmalloc(sizeof(*ref), GFP_KERNEL);
4215 if (ref) {
4216 ref->chan = &chan->common;
4217 INIT_LIST_HEAD(&ref->node);
4218 list_add_tail(&ref->node, &ppc440spe_adma_chan_list);
4219 } else {
4220 dev_err(&ofdev->dev, "failed to allocate channel reference!\n");
4221 ret = -ENOMEM;
4222 goto err_ref_alloc;
4223 }
4224
4225 ret = ppc440spe_adma_setup_irqs(adev, chan, &initcode);
4226 if (ret)
4227 goto err_irq;
4228
4229 ppc440spe_adma_init_capabilities(adev);
4230
4231 ret = dma_async_device_register(&adev->common);
4232 if (ret) {
4233 initcode = PPC_ADMA_INIT_REGISTER;
4234 dev_err(&ofdev->dev, "failed to register dma device\n");
4235 goto err_dev_reg;
4236 }
4237
4238 goto out;
4239
4240err_dev_reg:
4241 ppc440spe_adma_release_irqs(adev, chan);
4242err_irq:
4243 list_for_each_entry_safe(ref, _ref, &ppc440spe_adma_chan_list, node) {
4244 if (chan == to_ppc440spe_adma_chan(ref->chan)) {
4245 list_del(&ref->node);
4246 kfree(ref);
4247 }
4248 }
4249err_ref_alloc:
4250 if (adev->id != PPC440SPE_XOR_ID) {
4251 dma_unmap_page(&ofdev->dev, chan->pdest,
4252 PAGE_SIZE, DMA_BIDIRECTIONAL);
4253 dma_unmap_page(&ofdev->dev, chan->qdest,
4254 PAGE_SIZE, DMA_BIDIRECTIONAL);
4255 __free_page(chan->pdest_page);
4256 __free_page(chan->qdest_page);
4257 }
4258err_page_alloc:
4259 kfree(chan);
4260err_chan_alloc:
4261 if (adev->id == PPC440SPE_XOR_ID)
4262 iounmap(adev->xor_reg);
4263 else
4264 iounmap(adev->dma_reg);
4265err_regs_alloc:
4266 dma_free_coherent(adev->dev, adev->pool_size,
4267 adev->dma_desc_pool_virt,
4268 adev->dma_desc_pool);
4269err_dma_alloc:
4270 kfree(adev);
4271err_adev_alloc:
4272 release_mem_region(res.start, resource_size(&res));
4273out:
4274 if (id < PPC440SPE_ADMA_ENGINES_NUM)
4275 ppc440spe_adma_devices[id] = initcode;
4276
4277 return ret;
4278}
4279
4280/**
4281 * ppc440spe_adma_remove - remove the asynch device
4282 */
4bf27b8b 4283static int ppc440spe_adma_remove(struct platform_device *ofdev)
12458ea0 4284{
dd3daca1 4285 struct ppc440spe_adma_device *adev = platform_get_drvdata(ofdev);
05c02542 4286 struct device_node *np = ofdev->dev.of_node;
12458ea0
AG
4287 struct resource res;
4288 struct dma_chan *chan, *_chan;
4289 struct ppc_dma_chan_ref *ref, *_ref;
4290 struct ppc440spe_adma_chan *ppc440spe_chan;
4291
12458ea0
AG
4292 if (adev->id < PPC440SPE_ADMA_ENGINES_NUM)
4293 ppc440spe_adma_devices[adev->id] = -1;
4294
4295 dma_async_device_unregister(&adev->common);
4296
4297 list_for_each_entry_safe(chan, _chan, &adev->common.channels,
4298 device_node) {
4299 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
4300 ppc440spe_adma_release_irqs(adev, ppc440spe_chan);
4301 tasklet_kill(&ppc440spe_chan->irq_tasklet);
4302 if (adev->id != PPC440SPE_XOR_ID) {
4303 dma_unmap_page(&ofdev->dev, ppc440spe_chan->pdest,
4304 PAGE_SIZE, DMA_BIDIRECTIONAL);
4305 dma_unmap_page(&ofdev->dev, ppc440spe_chan->qdest,
4306 PAGE_SIZE, DMA_BIDIRECTIONAL);
4307 __free_page(ppc440spe_chan->pdest_page);
4308 __free_page(ppc440spe_chan->qdest_page);
4309 }
4310 list_for_each_entry_safe(ref, _ref, &ppc440spe_adma_chan_list,
4311 node) {
4312 if (ppc440spe_chan ==
4313 to_ppc440spe_adma_chan(ref->chan)) {
4314 list_del(&ref->node);
4315 kfree(ref);
4316 }
4317 }
4318 list_del(&chan->device_node);
4319 kfree(ppc440spe_chan);
4320 }
4321
4322 dma_free_coherent(adev->dev, adev->pool_size,
4323 adev->dma_desc_pool_virt, adev->dma_desc_pool);
4324 if (adev->id == PPC440SPE_XOR_ID)
4325 iounmap(adev->xor_reg);
4326 else
4327 iounmap(adev->dma_reg);
4328 of_address_to_resource(np, 0, &res);
4329 release_mem_region(res.start, resource_size(&res));
4330 kfree(adev);
4331 return 0;
4332}
4333
4334/*
4335 * /sys driver interface to enable h/w RAID-6 capabilities
4336 * Files created in e.g. /sys/devices/plb.0/400100100.dma0/driver/
4337 * directory are "devices", "enable" and "poly".
4338 * "devices" shows available engines.
4339 * "enable" is used to enable RAID-6 capabilities or to check
4340 * whether these has been activated.
4341 * "poly" allows setting/checking used polynomial (for PPC440SPe only).
4342 */
4343
4344static ssize_t show_ppc440spe_devices(struct device_driver *dev, char *buf)
4345{
4346 ssize_t size = 0;
4347 int i;
4348
4349 for (i = 0; i < PPC440SPE_ADMA_ENGINES_NUM; i++) {
4350 if (ppc440spe_adma_devices[i] == -1)
4351 continue;
4352 size += snprintf(buf + size, PAGE_SIZE - size,
4353 "PPC440SP(E)-ADMA.%d: %s\n", i,
4354 ppc_adma_errors[ppc440spe_adma_devices[i]]);
4355 }
4356 return size;
4357}
4358
4359static ssize_t show_ppc440spe_r6enable(struct device_driver *dev, char *buf)
4360{
4361 return snprintf(buf, PAGE_SIZE,
4362 "PPC440SP(e) RAID-6 capabilities are %sABLED.\n",
4363 ppc440spe_r6_enabled ? "EN" : "DIS");
4364}
4365
4366static ssize_t store_ppc440spe_r6enable(struct device_driver *dev,
4367 const char *buf, size_t count)
4368{
4369 unsigned long val;
4370
4371 if (!count || count > 11)
4372 return -EINVAL;
4373
4374 if (!ppc440spe_r6_tchan)
4375 return -EFAULT;
4376
4377 /* Write a key */
4378 sscanf(buf, "%lx", &val);
4379 dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_XORBA, val);
4380 isync();
4381
4382 /* Verify whether it really works now */
4383 if (ppc440spe_test_raid6(ppc440spe_r6_tchan) == 0) {
4384 pr_info("PPC440SP(e) RAID-6 has been activated "
4385 "successfully\n");
4386 ppc440spe_r6_enabled = 1;
4387 } else {
4388 pr_info("PPC440SP(e) RAID-6 hasn't been activated!"
4389 " Error key ?\n");
4390 ppc440spe_r6_enabled = 0;
4391 }
4392 return count;
4393}
4394
4395static ssize_t show_ppc440spe_r6poly(struct device_driver *dev, char *buf)
4396{
4397 ssize_t size = 0;
4398 u32 reg;
4399
4400#ifdef CONFIG_440SP
4401 /* 440SP has fixed polynomial */
4402 reg = 0x4d;
4403#else
4404 reg = dcr_read(ppc440spe_mq_dcr_host, DCRN_MQ0_CFBHL);
4405 reg >>= MQ0_CFBHL_POLY;
4406 reg &= 0xFF;
4407#endif
4408
4409 size = snprintf(buf, PAGE_SIZE, "PPC440SP(e) RAID-6 driver "
4410 "uses 0x1%02x polynomial.\n", reg);
4411 return size;
4412}
4413
4414static ssize_t store_ppc440spe_r6poly(struct device_driver *dev,
4415 const char *buf, size_t count)
4416{
4417 unsigned long reg, val;
4418
4419#ifdef CONFIG_440SP
4420 /* 440SP uses default 0x14D polynomial only */
4421 return -EINVAL;
4422#endif
4423
4424 if (!count || count > 6)
4425 return -EINVAL;
4426
4427 /* e.g., 0x14D or 0x11D */
4428 sscanf(buf, "%lx", &val);
4429
4430 if (val & ~0x1FF)
4431 return -EINVAL;
4432
4433 val &= 0xFF;
4434 reg = dcr_read(ppc440spe_mq_dcr_host, DCRN_MQ0_CFBHL);
4435 reg &= ~(0xFF << MQ0_CFBHL_POLY);
4436 reg |= val << MQ0_CFBHL_POLY;
4437 dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_CFBHL, reg);
4438
4439 return count;
4440}
4441
4442static DRIVER_ATTR(devices, S_IRUGO, show_ppc440spe_devices, NULL);
4443static DRIVER_ATTR(enable, S_IRUGO | S_IWUSR, show_ppc440spe_r6enable,
4444 store_ppc440spe_r6enable);
4445static DRIVER_ATTR(poly, S_IRUGO | S_IWUSR, show_ppc440spe_r6poly,
4446 store_ppc440spe_r6poly);
4447
4448/*
4449 * Common initialisation for RAID engines; allocate memory for
4450 * DMAx FIFOs, perform configuration common for all DMA engines.
4451 * Further DMA engine specific configuration is done at probe time.
4452 */
4453static int ppc440spe_configure_raid_devices(void)
4454{
4455 struct device_node *np;
4456 struct resource i2o_res;
4457 struct i2o_regs __iomem *i2o_reg;
4458 dcr_host_t i2o_dcr_host;
4459 unsigned int dcr_base, dcr_len;
4460 int i, ret;
4461
4462 np = of_find_compatible_node(NULL, NULL, "ibm,i2o-440spe");
4463 if (!np) {
4464 pr_err("%s: can't find I2O device tree node\n",
4465 __func__);
4466 return -ENODEV;
4467 }
4468
4469 if (of_address_to_resource(np, 0, &i2o_res)) {
4470 of_node_put(np);
4471 return -EINVAL;
4472 }
4473
4474 i2o_reg = of_iomap(np, 0);
4475 if (!i2o_reg) {
4476 pr_err("%s: failed to map I2O registers\n", __func__);
4477 of_node_put(np);
4478 return -EINVAL;
4479 }
4480
4481 /* Get I2O DCRs base */
4482 dcr_base = dcr_resource_start(np, 0);
4483 dcr_len = dcr_resource_len(np, 0);
4484 if (!dcr_base && !dcr_len) {
4485 pr_err("%s: can't get DCR registers base/len!\n",
4486 np->full_name);
4487 of_node_put(np);
4488 iounmap(i2o_reg);
4489 return -ENODEV;
4490 }
4491
4492 i2o_dcr_host = dcr_map(np, dcr_base, dcr_len);
4493 if (!DCR_MAP_OK(i2o_dcr_host)) {
4494 pr_err("%s: failed to map DCRs!\n", np->full_name);
4495 of_node_put(np);
4496 iounmap(i2o_reg);
4497 return -ENODEV;
4498 }
4499 of_node_put(np);
4500
4501 /* Provide memory regions for DMA's FIFOs: I2O, DMA0 and DMA1 share
4502 * the base address of FIFO memory space.
4503 * Actually we need twice more physical memory than programmed in the
4504 * <fsiz> register (because there are two FIFOs for each DMA: CP and CS)
4505 */
4506 ppc440spe_dma_fifo_buf = kmalloc((DMA0_FIFO_SIZE + DMA1_FIFO_SIZE) << 1,
4507 GFP_KERNEL);
4508 if (!ppc440spe_dma_fifo_buf) {
4509 pr_err("%s: DMA FIFO buffer allocation failed.\n", __func__);
4510 iounmap(i2o_reg);
4511 dcr_unmap(i2o_dcr_host, dcr_len);
4512 return -ENOMEM;
4513 }
4514
4515 /*
4516 * Configure h/w
4517 */
4518 /* Reset I2O/DMA */
4519 mtdcri(SDR0, DCRN_SDR0_SRST, DCRN_SDR0_SRST_I2ODMA);
4520 mtdcri(SDR0, DCRN_SDR0_SRST, 0);
4521
4522 /* Setup the base address of mmaped registers */
4523 dcr_write(i2o_dcr_host, DCRN_I2O0_IBAH, (u32)(i2o_res.start >> 32));
4524 dcr_write(i2o_dcr_host, DCRN_I2O0_IBAL, (u32)(i2o_res.start) |
4525 I2O_REG_ENABLE);
4526 dcr_unmap(i2o_dcr_host, dcr_len);
4527
4528 /* Setup FIFO memory space base address */
4529 iowrite32(0, &i2o_reg->ifbah);
4530 iowrite32(((u32)__pa(ppc440spe_dma_fifo_buf)), &i2o_reg->ifbal);
4531
4532 /* set zero FIFO size for I2O, so the whole
4533 * ppc440spe_dma_fifo_buf is used by DMAs.
4534 * DMAx_FIFOs will be configured while probe.
4535 */
4536 iowrite32(0, &i2o_reg->ifsiz);
4537 iounmap(i2o_reg);
4538
4539 /* To prepare WXOR/RXOR functionality we need access to
4540 * Memory Queue Module DCRs (finally it will be enabled
4541 * via /sys interface of the ppc440spe ADMA driver).
4542 */
4543 np = of_find_compatible_node(NULL, NULL, "ibm,mq-440spe");
4544 if (!np) {
4545 pr_err("%s: can't find MQ device tree node\n",
4546 __func__);
4547 ret = -ENODEV;
4548 goto out_free;
4549 }
4550
4551 /* Get MQ DCRs base */
4552 dcr_base = dcr_resource_start(np, 0);
4553 dcr_len = dcr_resource_len(np, 0);
4554 if (!dcr_base && !dcr_len) {
4555 pr_err("%s: can't get DCR registers base/len!\n",
4556 np->full_name);
4557 ret = -ENODEV;
4558 goto out_mq;
4559 }
4560
4561 ppc440spe_mq_dcr_host = dcr_map(np, dcr_base, dcr_len);
4562 if (!DCR_MAP_OK(ppc440spe_mq_dcr_host)) {
4563 pr_err("%s: failed to map DCRs!\n", np->full_name);
4564 ret = -ENODEV;
4565 goto out_mq;
4566 }
4567 of_node_put(np);
4568 ppc440spe_mq_dcr_len = dcr_len;
4569
4570 /* Set HB alias */
4571 dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_BAUH, DMA_CUED_XOR_HB);
4572
4573 /* Set:
4574 * - LL transaction passing limit to 1;
4575 * - Memory controller cycle limit to 1;
4576 * - Galois Polynomial to 0x14d (default)
4577 */
4578 dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_CFBHL,
4579 (1 << MQ0_CFBHL_TPLM) | (1 << MQ0_CFBHL_HBCL) |
4580 (PPC440SPE_DEFAULT_POLY << MQ0_CFBHL_POLY));
4581
4582 atomic_set(&ppc440spe_adma_err_irq_ref, 0);
4583 for (i = 0; i < PPC440SPE_ADMA_ENGINES_NUM; i++)
4584 ppc440spe_adma_devices[i] = -1;
4585
4586 return 0;
4587
4588out_mq:
4589 of_node_put(np);
4590out_free:
4591 kfree(ppc440spe_dma_fifo_buf);
4592 return ret;
4593}
4594
4bf27b8b 4595static const struct of_device_id ppc440spe_adma_of_match[] = {
12458ea0
AG
4596 { .compatible = "ibm,dma-440spe", },
4597 { .compatible = "amcc,xor-accelerator", },
4598 {},
4599};
4600MODULE_DEVICE_TABLE(of, ppc440spe_adma_of_match);
4601
00006124 4602static struct platform_driver ppc440spe_adma_driver = {
12458ea0 4603 .probe = ppc440spe_adma_probe,
a7d6e3ec 4604 .remove = ppc440spe_adma_remove,
12458ea0
AG
4605 .driver = {
4606 .name = "PPC440SP(E)-ADMA",
4607 .owner = THIS_MODULE,
4018294b 4608 .of_match_table = ppc440spe_adma_of_match,
12458ea0
AG
4609 },
4610};
4611
4612static __init int ppc440spe_adma_init(void)
4613{
4614 int ret;
4615
4616 ret = ppc440spe_configure_raid_devices();
4617 if (ret)
4618 return ret;
4619
00006124 4620 ret = platform_driver_register(&ppc440spe_adma_driver);
12458ea0
AG
4621 if (ret) {
4622 pr_err("%s: failed to register platform driver\n",
4623 __func__);
4624 goto out_reg;
4625 }
4626
4627 /* Initialization status */
4628 ret = driver_create_file(&ppc440spe_adma_driver.driver,
4629 &driver_attr_devices);
4630 if (ret)
4631 goto out_dev;
4632
4633 /* RAID-6 h/w enable entry */
4634 ret = driver_create_file(&ppc440spe_adma_driver.driver,
4635 &driver_attr_enable);
4636 if (ret)
4637 goto out_en;
4638
4639 /* GF polynomial to use */
4640 ret = driver_create_file(&ppc440spe_adma_driver.driver,
4641 &driver_attr_poly);
4642 if (!ret)
4643 return ret;
4644
4645 driver_remove_file(&ppc440spe_adma_driver.driver,
4646 &driver_attr_enable);
4647out_en:
4648 driver_remove_file(&ppc440spe_adma_driver.driver,
4649 &driver_attr_devices);
4650out_dev:
4651 /* User will not be able to enable h/w RAID-6 */
4652 pr_err("%s: failed to create RAID-6 driver interface\n",
4653 __func__);
00006124 4654 platform_driver_unregister(&ppc440spe_adma_driver);
12458ea0
AG
4655out_reg:
4656 dcr_unmap(ppc440spe_mq_dcr_host, ppc440spe_mq_dcr_len);
4657 kfree(ppc440spe_dma_fifo_buf);
4658 return ret;
4659}
4660
4661static void __exit ppc440spe_adma_exit(void)
4662{
4663 driver_remove_file(&ppc440spe_adma_driver.driver,
4664 &driver_attr_poly);
4665 driver_remove_file(&ppc440spe_adma_driver.driver,
4666 &driver_attr_enable);
4667 driver_remove_file(&ppc440spe_adma_driver.driver,
4668 &driver_attr_devices);
00006124 4669 platform_driver_unregister(&ppc440spe_adma_driver);
12458ea0
AG
4670 dcr_unmap(ppc440spe_mq_dcr_host, ppc440spe_mq_dcr_len);
4671 kfree(ppc440spe_dma_fifo_buf);
4672}
4673
4674arch_initcall(ppc440spe_adma_init);
4675module_exit(ppc440spe_adma_exit);
4676
4677MODULE_AUTHOR("Yuri Tikhonov <yur@emcraft.com>");
4678MODULE_DESCRIPTION("PPC440SPE ADMA Engine Driver");
4679MODULE_LICENSE("GPL");