2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2007 Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
24 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/pci.h>
31 #include <linux/interrupt.h>
32 #include <linux/dmaengine.h>
33 #include <linux/delay.h>
34 #include <linux/dma-mapping.h>
36 #include "ioatdma_registers.h"
37 #include "ioatdma_hw.h"
39 #define INITIAL_IOAT_DESC_COUNT 128
41 #define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common)
42 #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common)
43 #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
44 #define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx)
46 /* internal functions */
47 static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan);
48 static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan);
49 static struct ioat_desc_sw *
50 ioat_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
52 static inline struct ioat_dma_chan *ioat_lookup_chan_by_index(
53 struct ioatdma_device *device,
56 return device->idx[index];
60 * ioat_dma_do_interrupt - handler used for single vector interrupt mode
62 * @data: interrupt data
64 static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
66 struct ioatdma_device *instance = data;
67 struct ioat_dma_chan *ioat_chan;
68 unsigned long attnstatus;
72 intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
74 if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
77 if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
78 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
82 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
83 for_each_bit(bit, &attnstatus, BITS_PER_LONG) {
84 ioat_chan = ioat_lookup_chan_by_index(instance, bit);
85 tasklet_schedule(&ioat_chan->cleanup_task);
88 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
93 * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
95 * @data: interrupt data
97 static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
99 struct ioat_dma_chan *ioat_chan = data;
101 tasklet_schedule(&ioat_chan->cleanup_task);
106 static void ioat_dma_cleanup_tasklet(unsigned long data);
109 * ioat_dma_enumerate_channels - find and initialize the device's channels
110 * @device: the device to be enumerated
112 static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
117 struct ioat_dma_chan *ioat_chan;
119 device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
120 xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
121 xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
123 for (i = 0; i < device->common.chancnt; i++) {
124 ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL);
126 device->common.chancnt = i;
130 ioat_chan->device = device;
131 ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1));
132 ioat_chan->xfercap = xfercap;
133 spin_lock_init(&ioat_chan->cleanup_lock);
134 spin_lock_init(&ioat_chan->desc_lock);
135 INIT_LIST_HEAD(&ioat_chan->free_desc);
136 INIT_LIST_HEAD(&ioat_chan->used_desc);
137 /* This should be made common somewhere in dmaengine.c */
138 ioat_chan->common.device = &device->common;
139 list_add_tail(&ioat_chan->common.device_node,
140 &device->common.channels);
141 device->idx[i] = ioat_chan;
142 tasklet_init(&ioat_chan->cleanup_task,
143 ioat_dma_cleanup_tasklet,
144 (unsigned long) ioat_chan);
145 tasklet_disable(&ioat_chan->cleanup_task);
147 return device->common.chancnt;
150 static void ioat_set_src(dma_addr_t addr,
151 struct dma_async_tx_descriptor *tx,
154 tx_to_ioat_desc(tx)->src = addr;
157 static void ioat_set_dest(dma_addr_t addr,
158 struct dma_async_tx_descriptor *tx,
161 tx_to_ioat_desc(tx)->dst = addr;
164 static dma_cookie_t ioat_tx_submit(struct dma_async_tx_descriptor *tx)
166 struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
167 struct ioat_desc_sw *first = tx_to_ioat_desc(tx);
168 struct ioat_desc_sw *prev, *new;
169 struct ioat_dma_descriptor *hw;
172 LIST_HEAD(new_chain);
177 unsigned int desc_count = 0;
179 /* src and dest and len are stored in the initial descriptor */
183 orig_ack = first->async_tx.ack;
186 spin_lock_bh(&ioat_chan->desc_lock);
187 prev = to_ioat_desc(ioat_chan->used_desc.prev);
190 copy = min((u32) len, ioat_chan->xfercap);
192 new->async_tx.ack = 1;
201 /* chain together the physical address list for the HW */
203 prev->hw->next = (u64) new->async_tx.phys;
209 list_add_tail(&new->node, &new_chain);
212 } while (len && (new = ioat_dma_get_next_descriptor(ioat_chan)));
214 hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
215 new->tx_cnt = desc_count;
216 new->async_tx.ack = orig_ack; /* client is in control of this ack */
218 /* store the original values for use in later cleanup */
220 new->src = first->src;
221 new->dst = first->dst;
222 new->len = first->len;
225 /* cookie incr and addition to used_list must be atomic */
226 cookie = ioat_chan->common.cookie;
230 ioat_chan->common.cookie = new->async_tx.cookie = cookie;
232 /* write address into NextDescriptor field of last desc in chain */
233 to_ioat_desc(ioat_chan->used_desc.prev)->hw->next =
234 first->async_tx.phys;
235 __list_splice(&new_chain, ioat_chan->used_desc.prev);
237 ioat_chan->pending += desc_count;
238 if (ioat_chan->pending >= 4) {
240 ioat_chan->pending = 0;
242 spin_unlock_bh(&ioat_chan->desc_lock);
245 writeb(IOAT_CHANCMD_APPEND,
246 ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
251 static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
252 struct ioat_dma_chan *ioat_chan,
255 struct ioat_dma_descriptor *desc;
256 struct ioat_desc_sw *desc_sw;
257 struct ioatdma_device *ioatdma_device;
260 ioatdma_device = to_ioatdma_device(ioat_chan->common.device);
261 desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys);
265 desc_sw = kzalloc(sizeof(*desc_sw), flags);
266 if (unlikely(!desc_sw)) {
267 pci_pool_free(ioatdma_device->dma_pool, desc, phys);
271 memset(desc, 0, sizeof(*desc));
272 dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common);
273 desc_sw->async_tx.tx_set_src = ioat_set_src;
274 desc_sw->async_tx.tx_set_dest = ioat_set_dest;
275 desc_sw->async_tx.tx_submit = ioat_tx_submit;
276 INIT_LIST_HEAD(&desc_sw->async_tx.tx_list);
278 desc_sw->async_tx.phys = phys;
283 /* returns the actual number of allocated descriptors */
284 static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
286 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
287 struct ioat_desc_sw *desc = NULL;
293 /* have we already been set up? */
294 if (!list_empty(&ioat_chan->free_desc))
295 return INITIAL_IOAT_DESC_COUNT;
297 /* Setup register to interrupt and write completion status on error */
298 chanctrl = IOAT_CHANCTRL_ERR_INT_EN |
299 IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
300 IOAT_CHANCTRL_ERR_COMPLETION_EN;
301 writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
303 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
305 dev_err(&ioat_chan->device->pdev->dev,
306 "CHANERR = %x, clearing\n", chanerr);
307 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
310 /* Allocate descriptors */
311 for (i = 0; i < INITIAL_IOAT_DESC_COUNT; i++) {
312 desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);
314 dev_err(&ioat_chan->device->pdev->dev,
315 "Only %d initial descriptors\n", i);
318 list_add_tail(&desc->node, &tmp_list);
320 spin_lock_bh(&ioat_chan->desc_lock);
321 list_splice(&tmp_list, &ioat_chan->free_desc);
322 spin_unlock_bh(&ioat_chan->desc_lock);
324 /* allocate a completion writeback area */
325 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
326 ioat_chan->completion_virt =
327 pci_pool_alloc(ioat_chan->device->completion_pool,
329 &ioat_chan->completion_addr);
330 memset(ioat_chan->completion_virt, 0,
331 sizeof(*ioat_chan->completion_virt));
332 writel(((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF,
333 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
334 writel(((u64) ioat_chan->completion_addr) >> 32,
335 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
337 tasklet_enable(&ioat_chan->cleanup_task);
338 ioat_dma_start_null_desc(ioat_chan);
342 static void ioat_dma_free_chan_resources(struct dma_chan *chan)
344 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
345 struct ioatdma_device *ioatdma_device = to_ioatdma_device(chan->device);
346 struct ioat_desc_sw *desc, *_desc;
347 int in_use_descs = 0;
349 tasklet_disable(&ioat_chan->cleanup_task);
350 ioat_dma_memcpy_cleanup(ioat_chan);
352 /* Delay 100ms after reset to allow internal DMA logic to quiesce
353 * before removing DMA descriptor resources.
355 writeb(IOAT_CHANCMD_RESET, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
358 spin_lock_bh(&ioat_chan->desc_lock);
359 list_for_each_entry_safe(desc, _desc, &ioat_chan->used_desc, node) {
361 list_del(&desc->node);
362 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
363 desc->async_tx.phys);
366 list_for_each_entry_safe(desc, _desc, &ioat_chan->free_desc, node) {
367 list_del(&desc->node);
368 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
369 desc->async_tx.phys);
372 spin_unlock_bh(&ioat_chan->desc_lock);
374 pci_pool_free(ioatdma_device->completion_pool,
375 ioat_chan->completion_virt,
376 ioat_chan->completion_addr);
378 /* one is ok since we left it on there on purpose */
379 if (in_use_descs > 1)
380 dev_err(&ioat_chan->device->pdev->dev,
381 "Freeing %d in use descriptors!\n",
384 ioat_chan->last_completion = ioat_chan->completion_addr = 0;
385 ioat_chan->pending = 0;
389 * ioat_dma_get_next_descriptor - return the next available descriptor
390 * @ioat_chan: IOAT DMA channel handle
392 * Gets the next descriptor from the chain, and must be called with the
393 * channel's desc_lock held. Allocates more descriptors if the channel
396 static struct ioat_desc_sw *
397 ioat_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
399 struct ioat_desc_sw *new = NULL;
401 if (!list_empty(&ioat_chan->free_desc)) {
402 new = to_ioat_desc(ioat_chan->free_desc.next);
403 list_del(&new->node);
405 /* try to get another desc */
406 new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
407 /* will this ever happen? */
408 /* TODO add upper limit on these */
416 static struct dma_async_tx_descriptor *ioat_dma_prep_memcpy(
417 struct dma_chan *chan,
421 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
422 struct ioat_desc_sw *new;
424 spin_lock_bh(&ioat_chan->desc_lock);
425 new = ioat_dma_get_next_descriptor(ioat_chan);
427 spin_unlock_bh(&ioat_chan->desc_lock);
429 return new ? &new->async_tx : NULL;
433 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
435 * @chan: DMA channel handle
437 static void ioat_dma_memcpy_issue_pending(struct dma_chan *chan)
439 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
441 if (ioat_chan->pending != 0) {
442 ioat_chan->pending = 0;
443 writeb(IOAT_CHANCMD_APPEND,
444 ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
448 static void ioat_dma_cleanup_tasklet(unsigned long data)
450 struct ioat_dma_chan *chan = (void *)data;
451 ioat_dma_memcpy_cleanup(chan);
452 writew(IOAT_CHANCTRL_INT_DISABLE,
453 chan->reg_base + IOAT_CHANCTRL_OFFSET);
456 static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
458 unsigned long phys_complete;
459 struct ioat_desc_sw *desc, *_desc;
460 dma_cookie_t cookie = 0;
462 prefetch(ioat_chan->completion_virt);
464 if (!spin_trylock_bh(&ioat_chan->cleanup_lock))
467 /* The completion writeback can happen at any time,
468 so reads by the driver need to be atomic operations
469 The descriptor physical addresses are limited to 32-bits
470 when the CPU can only do a 32-bit mov */
472 #if (BITS_PER_LONG == 64)
474 ioat_chan->completion_virt->full
475 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
478 ioat_chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK;
481 if ((ioat_chan->completion_virt->full
482 & IOAT_CHANSTS_DMA_TRANSFER_STATUS) ==
483 IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) {
484 dev_err(&ioat_chan->device->pdev->dev,
485 "Channel halted, chanerr = %x\n",
486 readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET));
488 /* TODO do something to salvage the situation */
491 if (phys_complete == ioat_chan->last_completion) {
492 spin_unlock_bh(&ioat_chan->cleanup_lock);
497 spin_lock_bh(&ioat_chan->desc_lock);
498 list_for_each_entry_safe(desc, _desc, &ioat_chan->used_desc, node) {
501 * Incoming DMA requests may use multiple descriptors, due to
502 * exceeding xfercap, perhaps. If so, only the last one will
503 * have a cookie, and require unmapping.
505 if (desc->async_tx.cookie) {
506 cookie = desc->async_tx.cookie;
509 * yes we are unmapping both _page and _single alloc'd
510 * regions with unmap_page. Is this *really* that bad?
512 pci_unmap_page(ioat_chan->device->pdev,
513 pci_unmap_addr(desc, dst),
514 pci_unmap_len(desc, len),
516 pci_unmap_page(ioat_chan->device->pdev,
517 pci_unmap_addr(desc, src),
518 pci_unmap_len(desc, len),
522 if (desc->async_tx.phys != phys_complete) {
524 * a completed entry, but not the last, so cleanup
525 * if the client is done with the descriptor
527 if (desc->async_tx.ack) {
528 list_del(&desc->node);
529 list_add_tail(&desc->node,
530 &ioat_chan->free_desc);
532 desc->async_tx.cookie = 0;
535 * last used desc. Do not remove, so we can append from
536 * it, but don't look at it next time, either
538 desc->async_tx.cookie = 0;
540 /* TODO check status bits? */
545 spin_unlock_bh(&ioat_chan->desc_lock);
547 ioat_chan->last_completion = phys_complete;
549 ioat_chan->completed_cookie = cookie;
551 spin_unlock_bh(&ioat_chan->cleanup_lock);
554 static void ioat_dma_dependency_added(struct dma_chan *chan)
556 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
557 spin_lock_bh(&ioat_chan->desc_lock);
558 if (ioat_chan->pending == 0) {
559 spin_unlock_bh(&ioat_chan->desc_lock);
560 ioat_dma_memcpy_cleanup(ioat_chan);
562 spin_unlock_bh(&ioat_chan->desc_lock);
566 * ioat_dma_is_complete - poll the status of a IOAT DMA transaction
567 * @chan: IOAT DMA channel handle
568 * @cookie: DMA transaction identifier
569 * @done: if not %NULL, updated with last completed transaction
570 * @used: if not %NULL, updated with last used transaction
572 static enum dma_status ioat_dma_is_complete(struct dma_chan *chan,
577 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
578 dma_cookie_t last_used;
579 dma_cookie_t last_complete;
582 last_used = chan->cookie;
583 last_complete = ioat_chan->completed_cookie;
586 *done = last_complete;
590 ret = dma_async_is_complete(cookie, last_complete, last_used);
591 if (ret == DMA_SUCCESS)
594 ioat_dma_memcpy_cleanup(ioat_chan);
596 last_used = chan->cookie;
597 last_complete = ioat_chan->completed_cookie;
600 *done = last_complete;
604 return dma_async_is_complete(cookie, last_complete, last_used);
609 static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
611 struct ioat_desc_sw *desc;
613 spin_lock_bh(&ioat_chan->desc_lock);
615 desc = ioat_dma_get_next_descriptor(ioat_chan);
616 desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL
617 | IOAT_DMA_DESCRIPTOR_CTL_INT_GN
618 | IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
621 desc->hw->src_addr = 0;
622 desc->hw->dst_addr = 0;
623 desc->async_tx.ack = 1;
625 list_add_tail(&desc->node, &ioat_chan->used_desc);
626 spin_unlock_bh(&ioat_chan->desc_lock);
628 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
629 ioat_chan->reg_base + IOAT_CHAINADDR_OFFSET_LOW);
630 writel(((u64) desc->async_tx.phys) >> 32,
631 ioat_chan->reg_base + IOAT_CHAINADDR_OFFSET_HIGH);
633 writeb(IOAT_CHANCMD_START, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
637 * Perform a IOAT transaction to verify the HW works.
639 #define IOAT_TEST_SIZE 2000
642 * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
643 * @device: device to be tested
645 static int ioat_dma_self_test(struct ioatdma_device *device)
650 struct dma_chan *dma_chan;
651 struct dma_async_tx_descriptor *tx = NULL;
656 src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
659 dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
665 /* Fill in src buffer */
666 for (i = 0; i < IOAT_TEST_SIZE; i++)
669 /* Start copy, using first DMA channel */
670 dma_chan = container_of(device->common.channels.next,
673 if (ioat_dma_alloc_chan_resources(dma_chan) < 1) {
674 dev_err(&device->pdev->dev,
675 "selftest cannot allocate chan resource\n");
680 tx = ioat_dma_prep_memcpy(dma_chan, IOAT_TEST_SIZE, 0);
682 dev_err(&device->pdev->dev,
683 "Self-test prep failed, disabling\n");
689 addr = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE,
691 ioat_set_src(addr, tx, 0);
692 addr = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE,
694 ioat_set_dest(addr, tx, 0);
695 cookie = ioat_tx_submit(tx);
697 dev_err(&device->pdev->dev,
698 "Self-test setup failed, disabling\n");
702 ioat_dma_memcpy_issue_pending(dma_chan);
705 if (ioat_dma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
706 dev_err(&device->pdev->dev,
707 "Self-test copy timed out, disabling\n");
711 if (memcmp(src, dest, IOAT_TEST_SIZE)) {
712 dev_err(&device->pdev->dev,
713 "Self-test copy failed compare, disabling\n");
719 ioat_dma_free_chan_resources(dma_chan);
726 static char ioat_interrupt_style[32] = "msix";
727 module_param_string(ioat_interrupt_style, ioat_interrupt_style,
728 sizeof(ioat_interrupt_style), 0644);
729 MODULE_PARM_DESC(ioat_interrupt_style,
730 "set ioat interrupt style: msix (default), "
731 "msix-single-vector, msi, intx)");
734 * ioat_dma_setup_interrupts - setup interrupt handler
735 * @device: ioat device
737 static int ioat_dma_setup_interrupts(struct ioatdma_device *device)
739 struct ioat_dma_chan *ioat_chan;
740 int err, i, j, msixcnt;
743 if (!strcmp(ioat_interrupt_style, "msix"))
745 if (!strcmp(ioat_interrupt_style, "msix-single-vector"))
746 goto msix_single_vector;
747 if (!strcmp(ioat_interrupt_style, "msi"))
749 if (!strcmp(ioat_interrupt_style, "intx"))
751 dev_err(&device->pdev->dev, "invalid ioat_interrupt_style %s\n",
752 ioat_interrupt_style);
756 /* The number of MSI-X vectors should equal the number of channels */
757 msixcnt = device->common.chancnt;
758 for (i = 0; i < msixcnt; i++)
759 device->msix_entries[i].entry = i;
761 err = pci_enable_msix(device->pdev, device->msix_entries, msixcnt);
765 goto msix_single_vector;
767 for (i = 0; i < msixcnt; i++) {
768 ioat_chan = ioat_lookup_chan_by_index(device, i);
769 err = request_irq(device->msix_entries[i].vector,
770 ioat_dma_do_interrupt_msix,
771 0, "ioat-msix", ioat_chan);
773 for (j = 0; j < i; j++) {
775 ioat_lookup_chan_by_index(device, j);
776 free_irq(device->msix_entries[j].vector,
779 goto msix_single_vector;
782 intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
783 device->irq_mode = msix_multi_vector;
787 device->msix_entries[0].entry = 0;
788 err = pci_enable_msix(device->pdev, device->msix_entries, 1);
792 err = request_irq(device->msix_entries[0].vector, ioat_dma_do_interrupt,
793 0, "ioat-msix", device);
795 pci_disable_msix(device->pdev);
798 device->irq_mode = msix_single_vector;
802 err = pci_enable_msi(device->pdev);
806 err = request_irq(device->pdev->irq, ioat_dma_do_interrupt,
807 0, "ioat-msi", device);
809 pci_disable_msi(device->pdev);
813 * CB 1.2 devices need a bit set in configuration space to enable MSI
815 if (device->version == IOAT_VER_1_2) {
817 pci_read_config_dword(device->pdev,
818 IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
819 dmactrl |= IOAT_PCI_DMACTRL_MSI_EN;
820 pci_write_config_dword(device->pdev,
821 IOAT_PCI_DMACTRL_OFFSET, dmactrl);
823 device->irq_mode = msi;
827 err = request_irq(device->pdev->irq, ioat_dma_do_interrupt,
828 IRQF_SHARED, "ioat-intx", device);
831 device->irq_mode = intx;
834 intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
835 writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
839 /* Disable all interrupt generation */
840 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
841 dev_err(&device->pdev->dev, "no usable interrupts\n");
842 device->irq_mode = none;
847 * ioat_dma_remove_interrupts - remove whatever interrupts were set
848 * @device: ioat device
850 static void ioat_dma_remove_interrupts(struct ioatdma_device *device)
852 struct ioat_dma_chan *ioat_chan;
855 /* Disable all interrupt generation */
856 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
858 switch (device->irq_mode) {
859 case msix_multi_vector:
860 for (i = 0; i < device->common.chancnt; i++) {
861 ioat_chan = ioat_lookup_chan_by_index(device, i);
862 free_irq(device->msix_entries[i].vector, ioat_chan);
864 pci_disable_msix(device->pdev);
866 case msix_single_vector:
867 free_irq(device->msix_entries[0].vector, device);
868 pci_disable_msix(device->pdev);
871 free_irq(device->pdev->irq, device);
872 pci_disable_msi(device->pdev);
875 free_irq(device->pdev->irq, device);
878 dev_warn(&device->pdev->dev,
879 "call to %s without interrupts setup\n", __func__);
881 device->irq_mode = none;
884 struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
885 void __iomem *iobase)
888 struct ioatdma_device *device;
890 device = kzalloc(sizeof(*device), GFP_KERNEL);
896 device->reg_base = iobase;
897 device->version = readb(device->reg_base + IOAT_VER_OFFSET);
899 /* DMA coherent memory pool for DMA descriptor allocations */
900 device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
901 sizeof(struct ioat_dma_descriptor),
903 if (!device->dma_pool) {
908 device->completion_pool = pci_pool_create("completion_pool", pdev,
909 sizeof(u64), SMP_CACHE_BYTES,
911 if (!device->completion_pool) {
913 goto err_completion_pool;
916 INIT_LIST_HEAD(&device->common.channels);
917 ioat_dma_enumerate_channels(device);
919 dma_cap_set(DMA_MEMCPY, device->common.cap_mask);
920 device->common.device_alloc_chan_resources =
921 ioat_dma_alloc_chan_resources;
922 device->common.device_free_chan_resources =
923 ioat_dma_free_chan_resources;
924 device->common.device_prep_dma_memcpy = ioat_dma_prep_memcpy;
925 device->common.device_is_tx_complete = ioat_dma_is_complete;
926 device->common.device_issue_pending = ioat_dma_memcpy_issue_pending;
927 device->common.device_dependency_added = ioat_dma_dependency_added;
928 device->common.dev = &pdev->dev;
929 dev_err(&device->pdev->dev,
930 "Intel(R) I/OAT DMA Engine found,"
931 " %d channels, device version 0x%02x, driver version %s\n",
932 device->common.chancnt, device->version, IOAT_DMA_VERSION);
934 err = ioat_dma_setup_interrupts(device);
936 goto err_setup_interrupts;
938 err = ioat_dma_self_test(device);
942 dma_async_device_register(&device->common);
947 ioat_dma_remove_interrupts(device);
948 err_setup_interrupts:
949 pci_pool_destroy(device->completion_pool);
951 pci_pool_destroy(device->dma_pool);
955 dev_err(&device->pdev->dev,
956 "Intel(R) I/OAT DMA Engine initialization failed\n");
960 void ioat_dma_remove(struct ioatdma_device *device)
962 struct dma_chan *chan, *_chan;
963 struct ioat_dma_chan *ioat_chan;
965 ioat_dma_remove_interrupts(device);
967 dma_async_device_unregister(&device->common);
969 pci_pool_destroy(device->dma_pool);
970 pci_pool_destroy(device->completion_pool);
972 iounmap(device->reg_base);
973 pci_release_regions(device->pdev);
974 pci_disable_device(device->pdev);
976 list_for_each_entry_safe(chan, _chan,
977 &device->common.channels, device_node) {
978 ioat_chan = to_ioat_chan(chan);
979 list_del(&chan->device_node);