2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
7 * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
23 * Copyright(c) 2004-2009 Intel Corporation. All rights reserved.
25 * Redistribution and use in source and binary forms, with or without
26 * modification, are permitted provided that the following conditions are met:
28 * * Redistributions of source code must retain the above copyright
29 * notice, this list of conditions and the following disclaimer.
30 * * Redistributions in binary form must reproduce the above copyright
31 * notice, this list of conditions and the following disclaimer in
32 * the documentation and/or other materials provided with the
34 * * Neither the name of Intel Corporation nor the names of its
35 * contributors may be used to endorse or promote products derived
36 * from this software without specific prior written permission.
38 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
39 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
40 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
41 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
42 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
43 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
44 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
45 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
46 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
47 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
48 * POSSIBILITY OF SUCH DAMAGE.
52 * Support routines for v3+ hardware
54 #include <linux/module.h>
55 #include <linux/pci.h>
56 #include <linux/gfp.h>
57 #include <linux/dmaengine.h>
58 #include <linux/dma-mapping.h>
59 #include <linux/prefetch.h>
60 #include "../dmaengine.h"
61 #include "registers.h"
66 extern struct kmem_cache *ioat3_sed_cache;
68 /* ioat hardware assumes at least two sources for raid operations */
69 #define src_cnt_to_sw(x) ((x) + 2)
70 #define src_cnt_to_hw(x) ((x) - 2)
71 #define ndest_to_sw(x) ((x) + 1)
72 #define ndest_to_hw(x) ((x) - 1)
73 #define src16_cnt_to_sw(x) ((x) + 9)
74 #define src16_cnt_to_hw(x) ((x) - 9)
76 /* provide a lookup table for setting the source address in the base or
77 * extended descriptor of an xor or pq descriptor
79 static const u8 xor_idx_to_desc = 0xe0;
80 static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
81 static const u8 pq_idx_to_desc = 0xf8;
82 static const u8 pq16_idx_to_desc[] = { 0, 0, 1, 1, 1, 1, 1, 1, 1,
83 2, 2, 2, 2, 2, 2, 2 };
84 static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
85 static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7,
86 0, 1, 2, 3, 4, 5, 6 };
88 static void ioat3_eh(struct ioat2_dma_chan *ioat);
90 static void xor_set_src(struct ioat_raw_descriptor *descs[2],
91 dma_addr_t addr, u32 offset, int idx)
93 struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1];
95 raw->field[xor_idx_to_field[idx]] = addr + offset;
98 static dma_addr_t pq_get_src(struct ioat_raw_descriptor *descs[2], int idx)
100 struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1];
102 return raw->field[pq_idx_to_field[idx]];
105 static dma_addr_t pq16_get_src(struct ioat_raw_descriptor *desc[3], int idx)
107 struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]];
109 return raw->field[pq16_idx_to_field[idx]];
112 static void pq_set_src(struct ioat_raw_descriptor *descs[2],
113 dma_addr_t addr, u32 offset, u8 coef, int idx)
115 struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *) descs[0];
116 struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1];
118 raw->field[pq_idx_to_field[idx]] = addr + offset;
119 pq->coef[idx] = coef;
122 static bool is_jf_ioat(struct pci_dev *pdev)
124 switch (pdev->device) {
125 case PCI_DEVICE_ID_INTEL_IOAT_JSF0:
126 case PCI_DEVICE_ID_INTEL_IOAT_JSF1:
127 case PCI_DEVICE_ID_INTEL_IOAT_JSF2:
128 case PCI_DEVICE_ID_INTEL_IOAT_JSF3:
129 case PCI_DEVICE_ID_INTEL_IOAT_JSF4:
130 case PCI_DEVICE_ID_INTEL_IOAT_JSF5:
131 case PCI_DEVICE_ID_INTEL_IOAT_JSF6:
132 case PCI_DEVICE_ID_INTEL_IOAT_JSF7:
133 case PCI_DEVICE_ID_INTEL_IOAT_JSF8:
134 case PCI_DEVICE_ID_INTEL_IOAT_JSF9:
141 static bool is_snb_ioat(struct pci_dev *pdev)
143 switch (pdev->device) {
144 case PCI_DEVICE_ID_INTEL_IOAT_SNB0:
145 case PCI_DEVICE_ID_INTEL_IOAT_SNB1:
146 case PCI_DEVICE_ID_INTEL_IOAT_SNB2:
147 case PCI_DEVICE_ID_INTEL_IOAT_SNB3:
148 case PCI_DEVICE_ID_INTEL_IOAT_SNB4:
149 case PCI_DEVICE_ID_INTEL_IOAT_SNB5:
150 case PCI_DEVICE_ID_INTEL_IOAT_SNB6:
151 case PCI_DEVICE_ID_INTEL_IOAT_SNB7:
152 case PCI_DEVICE_ID_INTEL_IOAT_SNB8:
153 case PCI_DEVICE_ID_INTEL_IOAT_SNB9:
160 static bool is_ivb_ioat(struct pci_dev *pdev)
162 switch (pdev->device) {
163 case PCI_DEVICE_ID_INTEL_IOAT_IVB0:
164 case PCI_DEVICE_ID_INTEL_IOAT_IVB1:
165 case PCI_DEVICE_ID_INTEL_IOAT_IVB2:
166 case PCI_DEVICE_ID_INTEL_IOAT_IVB3:
167 case PCI_DEVICE_ID_INTEL_IOAT_IVB4:
168 case PCI_DEVICE_ID_INTEL_IOAT_IVB5:
169 case PCI_DEVICE_ID_INTEL_IOAT_IVB6:
170 case PCI_DEVICE_ID_INTEL_IOAT_IVB7:
171 case PCI_DEVICE_ID_INTEL_IOAT_IVB8:
172 case PCI_DEVICE_ID_INTEL_IOAT_IVB9:
180 static bool is_hsw_ioat(struct pci_dev *pdev)
182 switch (pdev->device) {
183 case PCI_DEVICE_ID_INTEL_IOAT_HSW0:
184 case PCI_DEVICE_ID_INTEL_IOAT_HSW1:
185 case PCI_DEVICE_ID_INTEL_IOAT_HSW2:
186 case PCI_DEVICE_ID_INTEL_IOAT_HSW3:
187 case PCI_DEVICE_ID_INTEL_IOAT_HSW4:
188 case PCI_DEVICE_ID_INTEL_IOAT_HSW5:
189 case PCI_DEVICE_ID_INTEL_IOAT_HSW6:
190 case PCI_DEVICE_ID_INTEL_IOAT_HSW7:
191 case PCI_DEVICE_ID_INTEL_IOAT_HSW8:
192 case PCI_DEVICE_ID_INTEL_IOAT_HSW9:
200 static bool is_xeon_cb32(struct pci_dev *pdev)
202 return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) ||
206 static bool is_bwd_ioat(struct pci_dev *pdev)
208 switch (pdev->device) {
209 case PCI_DEVICE_ID_INTEL_IOAT_BWD0:
210 case PCI_DEVICE_ID_INTEL_IOAT_BWD1:
211 case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
212 case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
213 /* even though not Atom, BDX-DE has same DMA silicon */
214 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0:
215 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1:
216 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2:
217 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3:
224 static bool is_bwd_noraid(struct pci_dev *pdev)
226 switch (pdev->device) {
227 case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
228 case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
236 static void pq16_set_src(struct ioat_raw_descriptor *desc[3],
237 dma_addr_t addr, u32 offset, u8 coef, unsigned idx)
239 struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *)desc[0];
240 struct ioat_pq16a_descriptor *pq16 =
241 (struct ioat_pq16a_descriptor *)desc[1];
242 struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]];
244 raw->field[pq16_idx_to_field[idx]] = addr + offset;
247 pq->coef[idx] = coef;
249 pq16->coef[idx - 8] = coef;
252 static struct ioat_sed_ent *
253 ioat3_alloc_sed(struct ioatdma_device *device, unsigned int hw_pool)
255 struct ioat_sed_ent *sed;
256 gfp_t flags = __GFP_ZERO | GFP_ATOMIC;
258 sed = kmem_cache_alloc(ioat3_sed_cache, flags);
262 sed->hw_pool = hw_pool;
263 sed->hw = dma_pool_alloc(device->sed_hw_pool[hw_pool],
266 kmem_cache_free(ioat3_sed_cache, sed);
273 static void ioat3_free_sed(struct ioatdma_device *device, struct ioat_sed_ent *sed)
278 dma_pool_free(device->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma);
279 kmem_cache_free(ioat3_sed_cache, sed);
282 static bool desc_has_ext(struct ioat_ring_ent *desc)
284 struct ioat_dma_descriptor *hw = desc->hw;
286 if (hw->ctl_f.op == IOAT_OP_XOR ||
287 hw->ctl_f.op == IOAT_OP_XOR_VAL) {
288 struct ioat_xor_descriptor *xor = desc->xor;
290 if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5)
292 } else if (hw->ctl_f.op == IOAT_OP_PQ ||
293 hw->ctl_f.op == IOAT_OP_PQ_VAL) {
294 struct ioat_pq_descriptor *pq = desc->pq;
296 if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3)
303 static u64 ioat3_get_current_completion(struct ioat_chan_common *chan)
308 completion = *chan->completion;
309 phys_complete = ioat_chansts_to_addr(completion);
311 dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__,
312 (unsigned long long) phys_complete);
314 return phys_complete;
317 static bool ioat3_cleanup_preamble(struct ioat_chan_common *chan,
320 *phys_complete = ioat3_get_current_completion(chan);
321 if (*phys_complete == chan->last_completion)
324 clear_bit(IOAT_COMPLETION_ACK, &chan->state);
325 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
331 desc_get_errstat(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc)
333 struct ioat_dma_descriptor *hw = desc->hw;
335 switch (hw->ctl_f.op) {
337 case IOAT_OP_PQ_VAL_16S:
339 struct ioat_pq_descriptor *pq = desc->pq;
341 /* check if there's error written */
342 if (!pq->dwbes_f.wbes)
345 /* need to set a chanerr var for checking to clear later */
347 if (pq->dwbes_f.p_val_err)
348 *desc->result |= SUM_CHECK_P_RESULT;
350 if (pq->dwbes_f.q_val_err)
351 *desc->result |= SUM_CHECK_Q_RESULT;
361 * __cleanup - reclaim used descriptors
362 * @ioat: channel (ring) to clean
364 * The difference from the dma_v2.c __cleanup() is that this routine
365 * handles extended descriptors and dma-unmapping raid operations.
367 static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
369 struct ioat_chan_common *chan = &ioat->base;
370 struct ioatdma_device *device = chan->device;
371 struct ioat_ring_ent *desc;
372 bool seen_current = false;
373 int idx = ioat->tail, i;
376 dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n",
377 __func__, ioat->head, ioat->tail, ioat->issued);
380 * At restart of the channel, the completion address and the
381 * channel status will be 0 due to starting a new chain. Since
382 * it's new chain and the first descriptor "fails", there is
383 * nothing to clean up. We do not want to reap the entire submitted
384 * chain due to this 0 address value and then BUG.
389 active = ioat2_ring_active(ioat);
390 for (i = 0; i < active && !seen_current; i++) {
391 struct dma_async_tx_descriptor *tx;
393 smp_read_barrier_depends();
394 prefetch(ioat2_get_ring_ent(ioat, idx + i + 1));
395 desc = ioat2_get_ring_ent(ioat, idx + i);
396 dump_desc_dbg(ioat, desc);
398 /* set err stat if we are using dwbes */
399 if (device->cap & IOAT_CAP_DWBES)
400 desc_get_errstat(ioat, desc);
404 dma_cookie_complete(tx);
405 dma_descriptor_unmap(tx);
407 tx->callback(tx->callback_param);
412 if (tx->phys == phys_complete)
415 /* skip extended descriptors */
416 if (desc_has_ext(desc)) {
417 BUG_ON(i + 1 >= active);
421 /* cleanup super extended descriptors */
423 ioat3_free_sed(device, desc->sed);
427 smp_mb(); /* finish all descriptor reads before incrementing tail */
428 ioat->tail = idx + i;
429 BUG_ON(active && !seen_current); /* no active descs have written a completion? */
430 chan->last_completion = phys_complete;
432 if (active - i == 0) {
433 dev_dbg(to_dev(chan), "%s: cancel completion timeout\n",
435 clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
436 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
438 /* 5 microsecond delay per pending descriptor */
439 writew(min((5 * (active - i)), IOAT_INTRDELAY_MASK),
440 chan->device->reg_base + IOAT_INTRDELAY_OFFSET);
443 static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
445 struct ioat_chan_common *chan = &ioat->base;
448 spin_lock_bh(&chan->cleanup_lock);
450 if (ioat3_cleanup_preamble(chan, &phys_complete))
451 __cleanup(ioat, phys_complete);
453 if (is_ioat_halted(*chan->completion)) {
454 u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
456 if (chanerr & IOAT_CHANERR_HANDLE_MASK) {
457 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
462 spin_unlock_bh(&chan->cleanup_lock);
465 static void ioat3_cleanup_event(unsigned long data)
467 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
468 struct ioat_chan_common *chan = &ioat->base;
471 if (!test_bit(IOAT_RUN, &chan->state))
473 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
476 static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
478 struct ioat_chan_common *chan = &ioat->base;
481 ioat2_quiesce(chan, 0);
482 if (ioat3_cleanup_preamble(chan, &phys_complete))
483 __cleanup(ioat, phys_complete);
485 __ioat2_restart_chan(ioat);
488 static void ioat3_eh(struct ioat2_dma_chan *ioat)
490 struct ioat_chan_common *chan = &ioat->base;
491 struct pci_dev *pdev = to_pdev(chan);
492 struct ioat_dma_descriptor *hw;
493 struct dma_async_tx_descriptor *tx;
495 struct ioat_ring_ent *desc;
500 /* cleanup so tail points to descriptor that caused the error */
501 if (ioat3_cleanup_preamble(chan, &phys_complete))
502 __cleanup(ioat, phys_complete);
504 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
505 pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int);
507 dev_dbg(to_dev(chan), "%s: error = %x:%x\n",
508 __func__, chanerr, chanerr_int);
510 desc = ioat2_get_ring_ent(ioat, ioat->tail);
512 dump_desc_dbg(ioat, desc);
514 switch (hw->ctl_f.op) {
515 case IOAT_OP_XOR_VAL:
516 if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
517 *desc->result |= SUM_CHECK_P_RESULT;
518 err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
522 case IOAT_OP_PQ_VAL_16S:
523 if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
524 *desc->result |= SUM_CHECK_P_RESULT;
525 err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
527 if (chanerr & IOAT_CHANERR_XOR_Q_ERR) {
528 *desc->result |= SUM_CHECK_Q_RESULT;
529 err_handled |= IOAT_CHANERR_XOR_Q_ERR;
534 /* fault on unhandled error or spurious halt */
535 if (chanerr ^ err_handled || chanerr == 0) {
536 dev_err(to_dev(chan), "%s: fatal error (%x:%x)\n",
537 __func__, chanerr, err_handled);
539 } else { /* cleanup the faulty descriptor */
542 dma_cookie_complete(tx);
543 dma_descriptor_unmap(tx);
545 tx->callback(tx->callback_param);
551 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
552 pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int);
554 /* mark faulting descriptor as complete */
555 *chan->completion = desc->txd.phys;
557 spin_lock_bh(&ioat->prep_lock);
558 ioat3_restart_channel(ioat);
559 spin_unlock_bh(&ioat->prep_lock);
562 static void check_active(struct ioat2_dma_chan *ioat)
564 struct ioat_chan_common *chan = &ioat->base;
566 if (ioat2_ring_active(ioat)) {
567 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
571 if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &chan->state))
572 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
573 else if (ioat->alloc_order > ioat_get_alloc_order()) {
574 /* if the ring is idle, empty, and oversized try to step
577 reshape_ring(ioat, ioat->alloc_order - 1);
579 /* keep shrinking until we get back to our minimum
582 if (ioat->alloc_order > ioat_get_alloc_order())
583 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
588 static void ioat3_timer_event(unsigned long data)
590 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
591 struct ioat_chan_common *chan = &ioat->base;
592 dma_addr_t phys_complete;
595 status = ioat_chansts(chan);
597 /* when halted due to errors check for channel
598 * programming errors before advancing the completion state
600 if (is_ioat_halted(status)) {
603 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
604 dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
606 if (test_bit(IOAT_RUN, &chan->state))
607 BUG_ON(is_ioat_bug(chanerr));
608 else /* we never got off the ground */
612 /* if we haven't made progress and we have already
613 * acknowledged a pending completion once, then be more
614 * forceful with a restart
616 spin_lock_bh(&chan->cleanup_lock);
617 if (ioat_cleanup_preamble(chan, &phys_complete))
618 __cleanup(ioat, phys_complete);
619 else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) {
620 spin_lock_bh(&ioat->prep_lock);
621 ioat3_restart_channel(ioat);
622 spin_unlock_bh(&ioat->prep_lock);
623 spin_unlock_bh(&chan->cleanup_lock);
626 set_bit(IOAT_COMPLETION_ACK, &chan->state);
627 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
631 if (ioat2_ring_active(ioat))
632 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
634 spin_lock_bh(&ioat->prep_lock);
636 spin_unlock_bh(&ioat->prep_lock);
638 spin_unlock_bh(&chan->cleanup_lock);
641 static enum dma_status
642 ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie,
643 struct dma_tx_state *txstate)
645 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
648 ret = dma_cookie_status(c, cookie, txstate);
649 if (ret == DMA_COMPLETE)
654 return dma_cookie_status(c, cookie, txstate);
657 static struct dma_async_tx_descriptor *
658 __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result,
659 dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt,
660 size_t len, unsigned long flags)
662 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
663 struct ioat_ring_ent *compl_desc;
664 struct ioat_ring_ent *desc;
665 struct ioat_ring_ent *ext;
666 size_t total_len = len;
667 struct ioat_xor_descriptor *xor;
668 struct ioat_xor_ext_descriptor *xor_ex = NULL;
669 struct ioat_dma_descriptor *hw;
670 int num_descs, with_ext, idx, i;
672 u8 op = result ? IOAT_OP_XOR_VAL : IOAT_OP_XOR;
676 num_descs = ioat2_xferlen_to_descs(ioat, len);
677 /* we need 2x the number of descriptors to cover greater than 5
686 /* completion writes from the raid engine may pass completion
687 * writes from the legacy engine, so we need one extra null
688 * (legacy) descriptor to ensure all completion writes arrive in
691 if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs+1) == 0)
697 struct ioat_raw_descriptor *descs[2];
698 size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
701 desc = ioat2_get_ring_ent(ioat, idx + i);
704 /* save a branch by unconditionally retrieving the
705 * extended descriptor xor_set_src() knows to not write
706 * to it in the single descriptor case
708 ext = ioat2_get_ring_ent(ioat, idx + i + 1);
709 xor_ex = ext->xor_ex;
711 descs[0] = (struct ioat_raw_descriptor *) xor;
712 descs[1] = (struct ioat_raw_descriptor *) xor_ex;
713 for (s = 0; s < src_cnt; s++)
714 xor_set_src(descs, src[s], offset, s);
715 xor->size = xfer_size;
716 xor->dst_addr = dest + offset;
719 xor->ctl_f.src_cnt = src_cnt_to_hw(src_cnt);
723 dump_desc_dbg(ioat, desc);
724 } while ((i += 1 + with_ext) < num_descs);
726 /* last xor descriptor carries the unmap parameters and fence bit */
727 desc->txd.flags = flags;
728 desc->len = total_len;
730 desc->result = result;
731 xor->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
733 /* completion descriptor carries interrupt bit */
734 compl_desc = ioat2_get_ring_ent(ioat, idx + i);
735 compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
739 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
740 hw->ctl_f.compl_write = 1;
741 hw->size = NULL_DESC_BUFFER_SIZE;
742 dump_desc_dbg(ioat, compl_desc);
744 /* we leave the channel locked to ensure in order submission */
745 return &compl_desc->txd;
748 static struct dma_async_tx_descriptor *
749 ioat3_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
750 unsigned int src_cnt, size_t len, unsigned long flags)
752 return __ioat3_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags);
755 static struct dma_async_tx_descriptor *
756 ioat3_prep_xor_val(struct dma_chan *chan, dma_addr_t *src,
757 unsigned int src_cnt, size_t len,
758 enum sum_check_flags *result, unsigned long flags)
760 /* the cleanup routine only sets bits on validate failure, it
761 * does not clear bits on validate success... so clear it here
765 return __ioat3_prep_xor_lock(chan, result, src[0], &src[1],
766 src_cnt - 1, len, flags);
770 dump_pq_desc_dbg(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc, struct ioat_ring_ent *ext)
772 struct device *dev = to_dev(&ioat->base);
773 struct ioat_pq_descriptor *pq = desc->pq;
774 struct ioat_pq_ext_descriptor *pq_ex = ext ? ext->pq_ex : NULL;
775 struct ioat_raw_descriptor *descs[] = { (void *) pq, (void *) pq_ex };
776 int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt);
779 dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
780 " sz: %#10.8x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
782 desc_id(desc), (unsigned long long) desc->txd.phys,
783 (unsigned long long) (pq_ex ? pq_ex->next : pq->next),
784 desc->txd.flags, pq->size, pq->ctl, pq->ctl_f.op, pq->ctl_f.int_en,
785 pq->ctl_f.compl_write,
786 pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q",
788 for (i = 0; i < src_cnt; i++)
789 dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i,
790 (unsigned long long) pq_get_src(descs, i), pq->coef[i]);
791 dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
792 dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
793 dev_dbg(dev, "\tNEXT: %#llx\n", pq->next);
796 static void dump_pq16_desc_dbg(struct ioat2_dma_chan *ioat,
797 struct ioat_ring_ent *desc)
799 struct device *dev = to_dev(&ioat->base);
800 struct ioat_pq_descriptor *pq = desc->pq;
801 struct ioat_raw_descriptor *descs[] = { (void *)pq,
804 int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt);
808 descs[1] = (void *)desc->sed->hw;
809 descs[2] = (void *)desc->sed->hw + 64;
812 dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
813 " sz: %#x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
815 desc_id(desc), (unsigned long long) desc->txd.phys,
816 (unsigned long long) pq->next,
817 desc->txd.flags, pq->size, pq->ctl,
818 pq->ctl_f.op, pq->ctl_f.int_en,
819 pq->ctl_f.compl_write,
820 pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q",
822 for (i = 0; i < src_cnt; i++) {
823 dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i,
824 (unsigned long long) pq16_get_src(descs, i),
827 dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
828 dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
831 static struct dma_async_tx_descriptor *
832 __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
833 const dma_addr_t *dst, const dma_addr_t *src,
834 unsigned int src_cnt, const unsigned char *scf,
835 size_t len, unsigned long flags)
837 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
838 struct ioat_chan_common *chan = &ioat->base;
839 struct ioatdma_device *device = chan->device;
840 struct ioat_ring_ent *compl_desc;
841 struct ioat_ring_ent *desc;
842 struct ioat_ring_ent *ext;
843 size_t total_len = len;
844 struct ioat_pq_descriptor *pq;
845 struct ioat_pq_ext_descriptor *pq_ex = NULL;
846 struct ioat_dma_descriptor *hw;
848 u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ;
849 int i, s, idx, with_ext, num_descs;
850 int cb32 = (device->version < IOAT_VER_3_3) ? 1 : 0;
852 dev_dbg(to_dev(chan), "%s\n", __func__);
853 /* the engine requires at least two sources (we provide
854 * at least 1 implied source in the DMA_PREP_CONTINUE case)
856 BUG_ON(src_cnt + dmaf_continue(flags) < 2);
858 num_descs = ioat2_xferlen_to_descs(ioat, len);
859 /* we need 2x the number of descriptors to cover greater than 3
860 * sources (we need 1 extra source in the q-only continuation
861 * case and 3 extra sources in the p+q continuation case.
863 if (src_cnt + dmaf_p_disabled_continue(flags) > 3 ||
864 (dmaf_continue(flags) && !dmaf_p_disabled_continue(flags))) {
870 /* completion writes from the raid engine may pass completion
871 * writes from the legacy engine, so we need one extra null
872 * (legacy) descriptor to ensure all completion writes arrive in
875 if (likely(num_descs) &&
876 ioat2_check_space_lock(ioat, num_descs + cb32) == 0)
882 struct ioat_raw_descriptor *descs[2];
883 size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
885 desc = ioat2_get_ring_ent(ioat, idx + i);
888 /* save a branch by unconditionally retrieving the
889 * extended descriptor pq_set_src() knows to not write
890 * to it in the single descriptor case
892 ext = ioat2_get_ring_ent(ioat, idx + i + with_ext);
895 descs[0] = (struct ioat_raw_descriptor *) pq;
896 descs[1] = (struct ioat_raw_descriptor *) pq_ex;
898 for (s = 0; s < src_cnt; s++)
899 pq_set_src(descs, src[s], offset, scf[s], s);
901 /* see the comment for dma_maxpq in include/linux/dmaengine.h */
902 if (dmaf_p_disabled_continue(flags))
903 pq_set_src(descs, dst[1], offset, 1, s++);
904 else if (dmaf_continue(flags)) {
905 pq_set_src(descs, dst[0], offset, 0, s++);
906 pq_set_src(descs, dst[1], offset, 1, s++);
907 pq_set_src(descs, dst[1], offset, 0, s++);
909 pq->size = xfer_size;
910 pq->p_addr = dst[0] + offset;
911 pq->q_addr = dst[1] + offset;
914 /* we turn on descriptor write back error status */
915 if (device->cap & IOAT_CAP_DWBES)
916 pq->ctl_f.wb_en = result ? 1 : 0;
917 pq->ctl_f.src_cnt = src_cnt_to_hw(s);
918 pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
919 pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
923 } while ((i += 1 + with_ext) < num_descs);
925 /* last pq descriptor carries the unmap parameters and fence bit */
926 desc->txd.flags = flags;
927 desc->len = total_len;
929 desc->result = result;
930 pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
931 dump_pq_desc_dbg(ioat, desc, ext);
934 pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
935 pq->ctl_f.compl_write = 1;
938 /* completion descriptor carries interrupt bit */
939 compl_desc = ioat2_get_ring_ent(ioat, idx + i);
940 compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
944 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
945 hw->ctl_f.compl_write = 1;
946 hw->size = NULL_DESC_BUFFER_SIZE;
947 dump_desc_dbg(ioat, compl_desc);
951 /* we leave the channel locked to ensure in order submission */
952 return &compl_desc->txd;
955 static struct dma_async_tx_descriptor *
956 __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result,
957 const dma_addr_t *dst, const dma_addr_t *src,
958 unsigned int src_cnt, const unsigned char *scf,
959 size_t len, unsigned long flags)
961 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
962 struct ioat_chan_common *chan = &ioat->base;
963 struct ioatdma_device *device = chan->device;
964 struct ioat_ring_ent *desc;
965 size_t total_len = len;
966 struct ioat_pq_descriptor *pq;
969 int i, s, idx, num_descs;
971 /* this function is only called with 9-16 sources */
972 op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S;
974 dev_dbg(to_dev(chan), "%s\n", __func__);
976 num_descs = ioat2_xferlen_to_descs(ioat, len);
979 * 16 source pq is only available on cb3.3 and has no completion
982 if (num_descs && ioat2_check_space_lock(ioat, num_descs) == 0)
990 struct ioat_raw_descriptor *descs[4];
991 size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
993 desc = ioat2_get_ring_ent(ioat, idx + i);
996 descs[0] = (struct ioat_raw_descriptor *) pq;
998 desc->sed = ioat3_alloc_sed(device, (src_cnt-2) >> 3);
1000 dev_err(to_dev(chan),
1001 "%s: no free sed entries\n", __func__);
1005 pq->sed_addr = desc->sed->dma;
1006 desc->sed->parent = desc;
1008 descs[1] = (struct ioat_raw_descriptor *)desc->sed->hw;
1009 descs[2] = (void *)descs[1] + 64;
1011 for (s = 0; s < src_cnt; s++)
1012 pq16_set_src(descs, src[s], offset, scf[s], s);
1014 /* see the comment for dma_maxpq in include/linux/dmaengine.h */
1015 if (dmaf_p_disabled_continue(flags))
1016 pq16_set_src(descs, dst[1], offset, 1, s++);
1017 else if (dmaf_continue(flags)) {
1018 pq16_set_src(descs, dst[0], offset, 0, s++);
1019 pq16_set_src(descs, dst[1], offset, 1, s++);
1020 pq16_set_src(descs, dst[1], offset, 0, s++);
1023 pq->size = xfer_size;
1024 pq->p_addr = dst[0] + offset;
1025 pq->q_addr = dst[1] + offset;
1028 pq->ctl_f.src_cnt = src16_cnt_to_hw(s);
1029 /* we turn on descriptor write back error status */
1030 if (device->cap & IOAT_CAP_DWBES)
1031 pq->ctl_f.wb_en = result ? 1 : 0;
1032 pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
1033 pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
1036 offset += xfer_size;
1037 } while (++i < num_descs);
1039 /* last pq descriptor carries the unmap parameters and fence bit */
1040 desc->txd.flags = flags;
1041 desc->len = total_len;
1043 desc->result = result;
1044 pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
1046 /* with cb3.3 we should be able to do completion w/o a null desc */
1047 pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
1048 pq->ctl_f.compl_write = 1;
1050 dump_pq16_desc_dbg(ioat, desc);
1052 /* we leave the channel locked to ensure in order submission */
1056 static int src_cnt_flags(unsigned int src_cnt, unsigned long flags)
1058 if (dmaf_p_disabled_continue(flags))
1060 else if (dmaf_continue(flags))
1066 static struct dma_async_tx_descriptor *
1067 ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
1068 unsigned int src_cnt, const unsigned char *scf, size_t len,
1069 unsigned long flags)
1071 /* specify valid address for disabled result */
1072 if (flags & DMA_PREP_PQ_DISABLE_P)
1074 if (flags & DMA_PREP_PQ_DISABLE_Q)
1077 /* handle the single source multiply case from the raid6
1080 if ((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1) {
1081 dma_addr_t single_source[2];
1082 unsigned char single_source_coef[2];
1084 BUG_ON(flags & DMA_PREP_PQ_DISABLE_Q);
1085 single_source[0] = src[0];
1086 single_source[1] = src[0];
1087 single_source_coef[0] = scf[0];
1088 single_source_coef[1] = 0;
1090 return src_cnt_flags(src_cnt, flags) > 8 ?
1091 __ioat3_prep_pq16_lock(chan, NULL, dst, single_source,
1092 2, single_source_coef, len,
1094 __ioat3_prep_pq_lock(chan, NULL, dst, single_source, 2,
1095 single_source_coef, len, flags);
1098 return src_cnt_flags(src_cnt, flags) > 8 ?
1099 __ioat3_prep_pq16_lock(chan, NULL, dst, src, src_cnt,
1101 __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt,
1106 static struct dma_async_tx_descriptor *
1107 ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
1108 unsigned int src_cnt, const unsigned char *scf, size_t len,
1109 enum sum_check_flags *pqres, unsigned long flags)
1111 /* specify valid address for disabled result */
1112 if (flags & DMA_PREP_PQ_DISABLE_P)
1114 if (flags & DMA_PREP_PQ_DISABLE_Q)
1117 /* the cleanup routine only sets bits on validate failure, it
1118 * does not clear bits on validate success... so clear it here
1122 return src_cnt_flags(src_cnt, flags) > 8 ?
1123 __ioat3_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len,
1125 __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len,
1129 static struct dma_async_tx_descriptor *
1130 ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
1131 unsigned int src_cnt, size_t len, unsigned long flags)
1133 unsigned char scf[src_cnt];
1136 memset(scf, 0, src_cnt);
1138 flags |= DMA_PREP_PQ_DISABLE_Q;
1139 pq[1] = dst; /* specify valid address for disabled result */
1141 return src_cnt_flags(src_cnt, flags) > 8 ?
1142 __ioat3_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len,
1144 __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len,
1148 static struct dma_async_tx_descriptor *
1149 ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
1150 unsigned int src_cnt, size_t len,
1151 enum sum_check_flags *result, unsigned long flags)
1153 unsigned char scf[src_cnt];
1156 /* the cleanup routine only sets bits on validate failure, it
1157 * does not clear bits on validate success... so clear it here
1161 memset(scf, 0, src_cnt);
1163 flags |= DMA_PREP_PQ_DISABLE_Q;
1164 pq[1] = pq[0]; /* specify valid address for disabled result */
1166 return src_cnt_flags(src_cnt, flags) > 8 ?
1167 __ioat3_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1,
1169 __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1,
1173 static struct dma_async_tx_descriptor *
1174 ioat3_prep_interrupt_lock(struct dma_chan *c, unsigned long flags)
1176 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
1177 struct ioat_ring_ent *desc;
1178 struct ioat_dma_descriptor *hw;
1180 if (ioat2_check_space_lock(ioat, 1) == 0)
1181 desc = ioat2_get_ring_ent(ioat, ioat->head);
1188 hw->ctl_f.int_en = 1;
1189 hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
1190 hw->ctl_f.compl_write = 1;
1191 hw->size = NULL_DESC_BUFFER_SIZE;
1195 desc->txd.flags = flags;
1198 dump_desc_dbg(ioat, desc);
1200 /* we leave the channel locked to ensure in order submission */
1204 static void ioat3_dma_test_callback(void *dma_async_param)
1206 struct completion *cmp = dma_async_param;
1211 #define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */
1212 static int ioat_xor_val_self_test(struct ioatdma_device *device)
1216 struct page *xor_srcs[IOAT_NUM_SRC_TEST];
1217 struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1];
1218 dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1];
1219 dma_addr_t dest_dma;
1220 struct dma_async_tx_descriptor *tx;
1221 struct dma_chan *dma_chan;
1222 dma_cookie_t cookie;
1227 struct completion cmp;
1229 struct device *dev = &device->pdev->dev;
1230 struct dma_device *dma = &device->common;
1233 dev_dbg(dev, "%s\n", __func__);
1235 if (!dma_has_cap(DMA_XOR, dma->cap_mask))
1238 for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
1239 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
1240 if (!xor_srcs[src_idx]) {
1242 __free_page(xor_srcs[src_idx]);
1247 dest = alloc_page(GFP_KERNEL);
1250 __free_page(xor_srcs[src_idx]);
1254 /* Fill in src buffers */
1255 for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
1256 u8 *ptr = page_address(xor_srcs[src_idx]);
1257 for (i = 0; i < PAGE_SIZE; i++)
1258 ptr[i] = (1 << src_idx);
1261 for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++)
1262 cmp_byte ^= (u8) (1 << src_idx);
1264 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
1265 (cmp_byte << 8) | cmp_byte;
1267 memset(page_address(dest), 0, PAGE_SIZE);
1269 dma_chan = container_of(dma->channels.next, struct dma_chan,
1271 if (dma->device_alloc_chan_resources(dma_chan) < 1) {
1279 dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE);
1280 if (dma_mapping_error(dev, dest_dma))
1283 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
1284 dma_srcs[i] = DMA_ERROR_CODE;
1285 for (i = 0; i < IOAT_NUM_SRC_TEST; i++) {
1286 dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE,
1288 if (dma_mapping_error(dev, dma_srcs[i]))
1291 tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
1292 IOAT_NUM_SRC_TEST, PAGE_SIZE,
1293 DMA_PREP_INTERRUPT);
1296 dev_err(dev, "Self-test xor prep failed\n");
1302 init_completion(&cmp);
1303 tx->callback = ioat3_dma_test_callback;
1304 tx->callback_param = &cmp;
1305 cookie = tx->tx_submit(tx);
1307 dev_err(dev, "Self-test xor setup failed\n");
1311 dma->device_issue_pending(dma_chan);
1313 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
1316 dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
1317 dev_err(dev, "Self-test xor timed out\n");
1322 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
1323 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
1325 dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
1326 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
1327 u32 *ptr = page_address(dest);
1328 if (ptr[i] != cmp_word) {
1329 dev_err(dev, "Self-test xor failed compare\n");
1331 goto free_resources;
1334 dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
1336 dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
1338 /* skip validate if the capability is not present */
1339 if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
1340 goto free_resources;
1342 op = IOAT_OP_XOR_VAL;
1344 /* validate the sources with the destintation page */
1345 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
1346 xor_val_srcs[i] = xor_srcs[i];
1347 xor_val_srcs[i] = dest;
1351 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
1352 dma_srcs[i] = DMA_ERROR_CODE;
1353 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
1354 dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
1356 if (dma_mapping_error(dev, dma_srcs[i]))
1359 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
1360 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
1361 &xor_val_result, DMA_PREP_INTERRUPT);
1363 dev_err(dev, "Self-test zero prep failed\n");
1369 init_completion(&cmp);
1370 tx->callback = ioat3_dma_test_callback;
1371 tx->callback_param = &cmp;
1372 cookie = tx->tx_submit(tx);
1374 dev_err(dev, "Self-test zero setup failed\n");
1378 dma->device_issue_pending(dma_chan);
1380 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
1383 dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
1384 dev_err(dev, "Self-test validate timed out\n");
1389 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
1390 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
1392 if (xor_val_result != 0) {
1393 dev_err(dev, "Self-test validate failed compare\n");
1395 goto free_resources;
1398 memset(page_address(dest), 0, PAGE_SIZE);
1400 /* test for non-zero parity sum */
1401 op = IOAT_OP_XOR_VAL;
1404 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
1405 dma_srcs[i] = DMA_ERROR_CODE;
1406 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
1407 dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
1409 if (dma_mapping_error(dev, dma_srcs[i]))
1412 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
1413 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
1414 &xor_val_result, DMA_PREP_INTERRUPT);
1416 dev_err(dev, "Self-test 2nd zero prep failed\n");
1422 init_completion(&cmp);
1423 tx->callback = ioat3_dma_test_callback;
1424 tx->callback_param = &cmp;
1425 cookie = tx->tx_submit(tx);
1427 dev_err(dev, "Self-test 2nd zero setup failed\n");
1431 dma->device_issue_pending(dma_chan);
1433 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
1436 dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
1437 dev_err(dev, "Self-test 2nd validate timed out\n");
1442 if (xor_val_result != SUM_CHECK_P_RESULT) {
1443 dev_err(dev, "Self-test validate failed compare\n");
1448 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
1449 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
1451 goto free_resources;
1453 if (op == IOAT_OP_XOR) {
1454 if (dest_dma != DMA_ERROR_CODE)
1455 dma_unmap_page(dev, dest_dma, PAGE_SIZE,
1457 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
1458 if (dma_srcs[i] != DMA_ERROR_CODE)
1459 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
1461 } else if (op == IOAT_OP_XOR_VAL) {
1462 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
1463 if (dma_srcs[i] != DMA_ERROR_CODE)
1464 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
1468 dma->device_free_chan_resources(dma_chan);
1470 src_idx = IOAT_NUM_SRC_TEST;
1472 __free_page(xor_srcs[src_idx]);
1477 static int ioat3_dma_self_test(struct ioatdma_device *device)
1479 int rc = ioat_dma_self_test(device);
1484 rc = ioat_xor_val_self_test(device);
1491 static int ioat3_irq_reinit(struct ioatdma_device *device)
1493 struct pci_dev *pdev = device->pdev;
1494 int irq = pdev->irq, i;
1496 if (!is_bwd_ioat(pdev))
1499 switch (device->irq_mode) {
1501 for (i = 0; i < device->common.chancnt; i++) {
1502 struct msix_entry *msix = &device->msix_entries[i];
1503 struct ioat_chan_common *chan;
1505 chan = ioat_chan_by_index(device, i);
1506 devm_free_irq(&pdev->dev, msix->vector, chan);
1509 pci_disable_msix(pdev);
1512 pci_disable_msi(pdev);
1515 devm_free_irq(&pdev->dev, irq, device);
1520 device->irq_mode = IOAT_NOIRQ;
1522 return ioat_dma_setup_interrupts(device);
1525 static int ioat3_reset_hw(struct ioat_chan_common *chan)
1527 /* throw away whatever the channel was doing and get it
1528 * initialized, with ioat3 specific workarounds
1530 struct ioatdma_device *device = chan->device;
1531 struct pci_dev *pdev = device->pdev;
1536 ioat2_quiesce(chan, msecs_to_jiffies(100));
1538 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
1539 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
1541 if (device->version < IOAT_VER_3_3) {
1542 /* clear any pending errors */
1543 err = pci_read_config_dword(pdev,
1544 IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
1547 "channel error register unreachable\n");
1550 pci_write_config_dword(pdev,
1551 IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
1553 /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
1554 * (workaround for spurious config parity error after restart)
1556 pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
1557 if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
1558 pci_write_config_dword(pdev,
1559 IOAT_PCI_DMAUNCERRSTS_OFFSET,
1564 err = ioat2_reset_sync(chan, msecs_to_jiffies(200));
1566 err = ioat3_irq_reinit(device);
1569 dev_err(&pdev->dev, "Failed to reset: %d\n", err);
1574 static void ioat3_intr_quirk(struct ioatdma_device *device)
1576 struct dma_device *dma;
1578 struct ioat_chan_common *chan;
1581 dma = &device->common;
1584 * if we have descriptor write back error status, we mask the
1587 if (device->cap & IOAT_CAP_DWBES) {
1588 list_for_each_entry(c, &dma->channels, device_node) {
1589 chan = to_chan_common(c);
1590 errmask = readl(chan->reg_base +
1591 IOAT_CHANERR_MASK_OFFSET);
1592 errmask |= IOAT_CHANERR_XOR_P_OR_CRC_ERR |
1593 IOAT_CHANERR_XOR_Q_ERR;
1594 writel(errmask, chan->reg_base +
1595 IOAT_CHANERR_MASK_OFFSET);
1600 int ioat3_dma_probe(struct ioatdma_device *device, int dca)
1602 struct pci_dev *pdev = device->pdev;
1603 int dca_en = system_has_dca_enabled(pdev);
1604 struct dma_device *dma;
1606 struct ioat_chan_common *chan;
1607 bool is_raid_device = false;
1610 device->enumerate_channels = ioat2_enumerate_channels;
1611 device->reset_hw = ioat3_reset_hw;
1612 device->self_test = ioat3_dma_self_test;
1613 device->intr_quirk = ioat3_intr_quirk;
1614 dma = &device->common;
1615 dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
1616 dma->device_issue_pending = ioat2_issue_pending;
1617 dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
1618 dma->device_free_chan_resources = ioat2_free_chan_resources;
1620 dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
1621 dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock;
1623 device->cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET);
1625 if (is_xeon_cb32(pdev) || is_bwd_noraid(pdev))
1626 device->cap &= ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS);
1628 /* dca is incompatible with raid operations */
1629 if (dca_en && (device->cap & (IOAT_CAP_XOR|IOAT_CAP_PQ)))
1630 device->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ);
1632 if (device->cap & IOAT_CAP_XOR) {
1633 is_raid_device = true;
1636 dma_cap_set(DMA_XOR, dma->cap_mask);
1637 dma->device_prep_dma_xor = ioat3_prep_xor;
1639 dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
1640 dma->device_prep_dma_xor_val = ioat3_prep_xor_val;
1643 if (device->cap & IOAT_CAP_PQ) {
1644 is_raid_device = true;
1646 dma->device_prep_dma_pq = ioat3_prep_pq;
1647 dma->device_prep_dma_pq_val = ioat3_prep_pq_val;
1648 dma_cap_set(DMA_PQ, dma->cap_mask);
1649 dma_cap_set(DMA_PQ_VAL, dma->cap_mask);
1651 if (device->cap & IOAT_CAP_RAID16SS) {
1652 dma_set_maxpq(dma, 16, 0);
1654 dma_set_maxpq(dma, 8, 0);
1657 if (!(device->cap & IOAT_CAP_XOR)) {
1658 dma->device_prep_dma_xor = ioat3_prep_pqxor;
1659 dma->device_prep_dma_xor_val = ioat3_prep_pqxor_val;
1660 dma_cap_set(DMA_XOR, dma->cap_mask);
1661 dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
1663 if (device->cap & IOAT_CAP_RAID16SS) {
1671 dma->device_tx_status = ioat3_tx_status;
1672 device->cleanup_fn = ioat3_cleanup_event;
1673 device->timer_fn = ioat3_timer_event;
1675 /* starting with CB3.3 super extended descriptors are supported */
1676 if (device->cap & IOAT_CAP_RAID16SS) {
1680 for (i = 0; i < MAX_SED_POOLS; i++) {
1681 snprintf(pool_name, 14, "ioat_hw%d_sed", i);
1683 /* allocate SED DMA pool */
1684 device->sed_hw_pool[i] = dmam_pool_create(pool_name,
1686 SED_SIZE * (i + 1), 64, 0);
1687 if (!device->sed_hw_pool[i])
1693 err = ioat_probe(device);
1697 list_for_each_entry(c, &dma->channels, device_node) {
1698 chan = to_chan_common(c);
1699 writel(IOAT_DMA_DCA_ANY_CPU,
1700 chan->reg_base + IOAT_DCACTRL_OFFSET);
1703 err = ioat_register(device);
1707 ioat_kobject_add(device, &ioat2_ktype);
1710 device->dca = ioat3_dca_init(pdev, device->reg_base);