2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2009 Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
24 * This driver supports an Intel I/OAT DMA engine (versions >= 2), which
25 * does asynchronous data movement and checksumming operations.
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/slab.h>
31 #include <linux/pci.h>
32 #include <linux/interrupt.h>
33 #include <linux/dmaengine.h>
34 #include <linux/delay.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/workqueue.h>
37 #include <linux/prefetch.h>
38 #include <linux/i7300_idle.h>
41 #include "registers.h"
44 #include "../dmaengine.h"
46 int ioat_ring_alloc_order = 8;
47 module_param(ioat_ring_alloc_order, int, 0644);
48 MODULE_PARM_DESC(ioat_ring_alloc_order,
49 "ioat2+: allocate 2^n descriptors per channel"
50 " (default: 8 max: 16)");
51 static int ioat_ring_max_alloc_order = IOAT_MAX_ORDER;
52 module_param(ioat_ring_max_alloc_order, int, 0644);
53 MODULE_PARM_DESC(ioat_ring_max_alloc_order,
54 "ioat2+: upper limit for ring size (default: 16)");
56 void __ioat2_issue_pending(struct ioat2_dma_chan *ioat)
58 struct ioat_chan_common *chan = &ioat->base;
60 ioat->dmacount += ioat2_ring_pending(ioat);
61 ioat->issued = ioat->head;
62 writew(ioat->dmacount, chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
64 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
65 __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount);
68 void ioat2_issue_pending(struct dma_chan *c)
70 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
72 if (ioat2_ring_pending(ioat)) {
73 spin_lock_bh(&ioat->prep_lock);
74 __ioat2_issue_pending(ioat);
75 spin_unlock_bh(&ioat->prep_lock);
80 * ioat2_update_pending - log pending descriptors
81 * @ioat: ioat2+ channel
83 * Check if the number of unsubmitted descriptors has exceeded the
84 * watermark. Called with prep_lock held
86 static void ioat2_update_pending(struct ioat2_dma_chan *ioat)
88 if (ioat2_ring_pending(ioat) > ioat_pending_level)
89 __ioat2_issue_pending(ioat);
92 static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
94 struct ioat_ring_ent *desc;
95 struct ioat_dma_descriptor *hw;
97 if (ioat2_ring_space(ioat) < 1) {
98 dev_err(to_dev(&ioat->base),
99 "Unable to start null desc - ring full\n");
103 dev_dbg(to_dev(&ioat->base), "%s: head: %#x tail: %#x issued: %#x\n",
104 __func__, ioat->head, ioat->tail, ioat->issued);
105 desc = ioat2_get_ring_ent(ioat, ioat->head);
110 hw->ctl_f.int_en = 1;
111 hw->ctl_f.compl_write = 1;
112 /* set size to non-zero value (channel returns error when size is 0) */
113 hw->size = NULL_DESC_BUFFER_SIZE;
116 async_tx_ack(&desc->txd);
117 ioat2_set_chainaddr(ioat, desc->txd.phys);
118 dump_desc_dbg(ioat, desc);
121 __ioat2_issue_pending(ioat);
124 static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
126 spin_lock_bh(&ioat->prep_lock);
127 __ioat2_start_null_desc(ioat);
128 spin_unlock_bh(&ioat->prep_lock);
131 static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
133 struct ioat_chan_common *chan = &ioat->base;
134 struct dma_async_tx_descriptor *tx;
135 struct ioat_ring_ent *desc;
136 bool seen_current = false;
138 int idx = ioat->tail, i;
140 dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n",
141 __func__, ioat->head, ioat->tail, ioat->issued);
143 active = ioat2_ring_active(ioat);
144 for (i = 0; i < active && !seen_current; i++) {
145 smp_read_barrier_depends();
146 prefetch(ioat2_get_ring_ent(ioat, idx + i + 1));
147 desc = ioat2_get_ring_ent(ioat, idx + i);
149 dump_desc_dbg(ioat, desc);
151 ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
152 dma_cookie_complete(tx);
154 tx->callback(tx->callback_param);
159 if (tx->phys == phys_complete)
162 smp_mb(); /* finish all descriptor reads before incrementing tail */
163 ioat->tail = idx + i;
164 BUG_ON(active && !seen_current); /* no active descs have written a completion? */
166 chan->last_completion = phys_complete;
167 if (active - i == 0) {
168 dev_dbg(to_dev(chan), "%s: cancel completion timeout\n",
170 clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
171 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
176 * ioat2_cleanup - clean finished descriptors (advance tail pointer)
177 * @chan: ioat channel to be cleaned up
179 static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
181 struct ioat_chan_common *chan = &ioat->base;
182 dma_addr_t phys_complete;
184 spin_lock_bh(&chan->cleanup_lock);
185 if (ioat_cleanup_preamble(chan, &phys_complete))
186 __cleanup(ioat, phys_complete);
187 spin_unlock_bh(&chan->cleanup_lock);
190 void ioat2_cleanup_event(unsigned long data)
192 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
195 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
198 void __ioat2_restart_chan(struct ioat2_dma_chan *ioat)
200 struct ioat_chan_common *chan = &ioat->base;
202 /* set the tail to be re-issued */
203 ioat->issued = ioat->tail;
205 set_bit(IOAT_COMPLETION_PENDING, &chan->state);
206 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
208 dev_dbg(to_dev(chan),
209 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
210 __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount);
212 if (ioat2_ring_pending(ioat)) {
213 struct ioat_ring_ent *desc;
215 desc = ioat2_get_ring_ent(ioat, ioat->tail);
216 ioat2_set_chainaddr(ioat, desc->txd.phys);
217 __ioat2_issue_pending(ioat);
219 __ioat2_start_null_desc(ioat);
222 int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo)
224 unsigned long end = jiffies + tmo;
228 status = ioat_chansts(chan);
229 if (is_ioat_active(status) || is_ioat_idle(status))
231 while (is_ioat_active(status) || is_ioat_idle(status)) {
232 if (tmo && time_after(jiffies, end)) {
236 status = ioat_chansts(chan);
243 int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo)
245 unsigned long end = jiffies + tmo;
249 while (ioat_reset_pending(chan)) {
250 if (end && time_after(jiffies, end)) {
260 static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
262 struct ioat_chan_common *chan = &ioat->base;
263 dma_addr_t phys_complete;
265 ioat2_quiesce(chan, 0);
266 if (ioat_cleanup_preamble(chan, &phys_complete))
267 __cleanup(ioat, phys_complete);
269 __ioat2_restart_chan(ioat);
272 void ioat2_timer_event(unsigned long data)
274 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
275 struct ioat_chan_common *chan = &ioat->base;
277 if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
278 dma_addr_t phys_complete;
281 status = ioat_chansts(chan);
283 /* when halted due to errors check for channel
284 * programming errors before advancing the completion state
286 if (is_ioat_halted(status)) {
289 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
290 dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
292 if (test_bit(IOAT_RUN, &chan->state))
293 BUG_ON(is_ioat_bug(chanerr));
294 else /* we never got off the ground */
298 /* if we haven't made progress and we have already
299 * acknowledged a pending completion once, then be more
300 * forceful with a restart
302 spin_lock_bh(&chan->cleanup_lock);
303 if (ioat_cleanup_preamble(chan, &phys_complete)) {
304 __cleanup(ioat, phys_complete);
305 } else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) {
306 spin_lock_bh(&ioat->prep_lock);
307 ioat2_restart_channel(ioat);
308 spin_unlock_bh(&ioat->prep_lock);
310 set_bit(IOAT_COMPLETION_ACK, &chan->state);
311 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
313 spin_unlock_bh(&chan->cleanup_lock);
317 /* if the ring is idle, empty, and oversized try to step
320 spin_lock_bh(&chan->cleanup_lock);
321 spin_lock_bh(&ioat->prep_lock);
322 active = ioat2_ring_active(ioat);
323 if (active == 0 && ioat->alloc_order > ioat_get_alloc_order())
324 reshape_ring(ioat, ioat->alloc_order-1);
325 spin_unlock_bh(&ioat->prep_lock);
326 spin_unlock_bh(&chan->cleanup_lock);
328 /* keep shrinking until we get back to our minimum
331 if (ioat->alloc_order > ioat_get_alloc_order())
332 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
336 static int ioat2_reset_hw(struct ioat_chan_common *chan)
338 /* throw away whatever the channel was doing and get it initialized */
341 ioat2_quiesce(chan, msecs_to_jiffies(100));
343 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
344 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
346 return ioat2_reset_sync(chan, msecs_to_jiffies(200));
350 * ioat2_enumerate_channels - find and initialize the device's channels
351 * @device: the device to be enumerated
353 int ioat2_enumerate_channels(struct ioatdma_device *device)
355 struct ioat2_dma_chan *ioat;
356 struct device *dev = &device->pdev->dev;
357 struct dma_device *dma = &device->common;
361 INIT_LIST_HEAD(&dma->channels);
362 dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
363 dma->chancnt &= 0x1f; /* bits [4:0] valid */
364 if (dma->chancnt > ARRAY_SIZE(device->idx)) {
365 dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
366 dma->chancnt, ARRAY_SIZE(device->idx));
367 dma->chancnt = ARRAY_SIZE(device->idx);
369 xfercap_log = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
370 xfercap_log &= 0x1f; /* bits [4:0] valid */
371 if (xfercap_log == 0)
373 dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
375 /* FIXME which i/oat version is i7300? */
376 #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL
377 if (i7300_idle_platform_probe(NULL, NULL, 1) == 0)
380 for (i = 0; i < dma->chancnt; i++) {
381 ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL);
385 ioat_init_channel(device, &ioat->base, i);
386 ioat->xfercap_log = xfercap_log;
387 spin_lock_init(&ioat->prep_lock);
388 if (device->reset_hw(&ioat->base)) {
397 static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
399 struct dma_chan *c = tx->chan;
400 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
401 struct ioat_chan_common *chan = &ioat->base;
404 cookie = dma_cookie_assign(tx);
405 dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
407 if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state))
408 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
410 /* make descriptor updates visible before advancing ioat->head,
411 * this is purposefully not smp_wmb() since we are also
412 * publishing the descriptor updates to a dma device
416 ioat->head += ioat->produce;
418 ioat2_update_pending(ioat);
419 spin_unlock_bh(&ioat->prep_lock);
424 static struct ioat_ring_ent *ioat2_alloc_ring_ent(struct dma_chan *chan, gfp_t flags)
426 struct ioat_dma_descriptor *hw;
427 struct ioat_ring_ent *desc;
428 struct ioatdma_device *dma;
431 dma = to_ioatdma_device(chan->device);
432 hw = pci_pool_alloc(dma->dma_pool, flags, &phys);
435 memset(hw, 0, sizeof(*hw));
437 desc = kmem_cache_zalloc(ioat2_cache, flags);
439 pci_pool_free(dma->dma_pool, hw, phys);
443 dma_async_tx_descriptor_init(&desc->txd, chan);
444 desc->txd.tx_submit = ioat2_tx_submit_unlock;
446 desc->txd.phys = phys;
450 static void ioat2_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan)
452 struct ioatdma_device *dma;
454 dma = to_ioatdma_device(chan->device);
455 pci_pool_free(dma->dma_pool, desc->hw, desc->txd.phys);
456 kmem_cache_free(ioat2_cache, desc);
459 static struct ioat_ring_ent **ioat2_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
461 struct ioat_ring_ent **ring;
462 int descs = 1 << order;
465 if (order > ioat_get_max_alloc_order())
468 /* allocate the array to hold the software ring */
469 ring = kcalloc(descs, sizeof(*ring), flags);
472 for (i = 0; i < descs; i++) {
473 ring[i] = ioat2_alloc_ring_ent(c, flags);
476 ioat2_free_ring_ent(ring[i], c);
480 set_desc_id(ring[i], i);
484 for (i = 0; i < descs-1; i++) {
485 struct ioat_ring_ent *next = ring[i+1];
486 struct ioat_dma_descriptor *hw = ring[i]->hw;
488 hw->next = next->txd.phys;
490 ring[i]->hw->next = ring[0]->txd.phys;
495 void ioat2_free_chan_resources(struct dma_chan *c);
497 /* ioat2_alloc_chan_resources - allocate/initialize ioat2 descriptor ring
498 * @chan: channel to be initialized
500 int ioat2_alloc_chan_resources(struct dma_chan *c)
502 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
503 struct ioat_chan_common *chan = &ioat->base;
504 struct ioat_ring_ent **ring;
509 /* have we already been set up? */
511 return 1 << ioat->alloc_order;
513 /* Setup register to interrupt and write completion status on error */
514 writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET);
516 /* allocate a completion writeback area */
517 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
518 chan->completion = pci_pool_alloc(chan->device->completion_pool,
519 GFP_KERNEL, &chan->completion_dma);
520 if (!chan->completion)
523 memset(chan->completion, 0, sizeof(*chan->completion));
524 writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF,
525 chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
526 writel(((u64) chan->completion_dma) >> 32,
527 chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
529 order = ioat_get_alloc_order();
530 ring = ioat2_alloc_ring(c, order, GFP_KERNEL);
534 spin_lock_bh(&chan->cleanup_lock);
535 spin_lock_bh(&ioat->prep_lock);
540 ioat->alloc_order = order;
541 spin_unlock_bh(&ioat->prep_lock);
542 spin_unlock_bh(&chan->cleanup_lock);
544 tasklet_enable(&chan->cleanup_task);
545 ioat2_start_null_desc(ioat);
547 /* check that we got off the ground */
550 status = ioat_chansts(chan);
551 } while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status));
553 if (is_ioat_active(status) || is_ioat_idle(status)) {
554 set_bit(IOAT_RUN, &chan->state);
555 return 1 << ioat->alloc_order;
557 u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
559 dev_WARN(to_dev(chan),
560 "failed to start channel chanerr: %#x\n", chanerr);
561 ioat2_free_chan_resources(c);
566 bool reshape_ring(struct ioat2_dma_chan *ioat, int order)
568 /* reshape differs from normal ring allocation in that we want
569 * to allocate a new software ring while only
570 * extending/truncating the hardware ring
572 struct ioat_chan_common *chan = &ioat->base;
573 struct dma_chan *c = &chan->common;
574 const u32 curr_size = ioat2_ring_size(ioat);
575 const u16 active = ioat2_ring_active(ioat);
576 const u32 new_size = 1 << order;
577 struct ioat_ring_ent **ring;
580 if (order > ioat_get_max_alloc_order())
583 /* double check that we have at least 1 free descriptor */
584 if (active == curr_size)
587 /* when shrinking, verify that we can hold the current active
588 * set in the new ring
590 if (active >= new_size)
593 /* allocate the array to hold the software ring */
594 ring = kcalloc(new_size, sizeof(*ring), GFP_NOWAIT);
598 /* allocate/trim descriptors as needed */
599 if (new_size > curr_size) {
600 /* copy current descriptors to the new ring */
601 for (i = 0; i < curr_size; i++) {
602 u16 curr_idx = (ioat->tail+i) & (curr_size-1);
603 u16 new_idx = (ioat->tail+i) & (new_size-1);
605 ring[new_idx] = ioat->ring[curr_idx];
606 set_desc_id(ring[new_idx], new_idx);
609 /* add new descriptors to the ring */
610 for (i = curr_size; i < new_size; i++) {
611 u16 new_idx = (ioat->tail+i) & (new_size-1);
613 ring[new_idx] = ioat2_alloc_ring_ent(c, GFP_NOWAIT);
614 if (!ring[new_idx]) {
616 u16 new_idx = (ioat->tail+i) & (new_size-1);
618 ioat2_free_ring_ent(ring[new_idx], c);
623 set_desc_id(ring[new_idx], new_idx);
626 /* hw link new descriptors */
627 for (i = curr_size-1; i < new_size; i++) {
628 u16 new_idx = (ioat->tail+i) & (new_size-1);
629 struct ioat_ring_ent *next = ring[(new_idx+1) & (new_size-1)];
630 struct ioat_dma_descriptor *hw = ring[new_idx]->hw;
632 hw->next = next->txd.phys;
635 struct ioat_dma_descriptor *hw;
636 struct ioat_ring_ent *next;
638 /* copy current descriptors to the new ring, dropping the
639 * removed descriptors
641 for (i = 0; i < new_size; i++) {
642 u16 curr_idx = (ioat->tail+i) & (curr_size-1);
643 u16 new_idx = (ioat->tail+i) & (new_size-1);
645 ring[new_idx] = ioat->ring[curr_idx];
646 set_desc_id(ring[new_idx], new_idx);
649 /* free deleted descriptors */
650 for (i = new_size; i < curr_size; i++) {
651 struct ioat_ring_ent *ent;
653 ent = ioat2_get_ring_ent(ioat, ioat->tail+i);
654 ioat2_free_ring_ent(ent, c);
657 /* fix up hardware ring */
658 hw = ring[(ioat->tail+new_size-1) & (new_size-1)]->hw;
659 next = ring[(ioat->tail+new_size) & (new_size-1)];
660 hw->next = next->txd.phys;
663 dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
668 ioat->alloc_order = order;
674 * ioat2_check_space_lock - verify space and grab ring producer lock
675 * @ioat: ioat2,3 channel (ring) to operate on
676 * @num_descs: allocation length
678 int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs)
680 struct ioat_chan_common *chan = &ioat->base;
684 spin_lock_bh(&ioat->prep_lock);
685 /* never allow the last descriptor to be consumed, we need at
686 * least one free at all times to allow for on-the-fly ring
689 if (likely(ioat2_ring_space(ioat) > num_descs)) {
690 dev_dbg(to_dev(chan), "%s: num_descs: %d (%x:%x:%x)\n",
691 __func__, num_descs, ioat->head, ioat->tail, ioat->issued);
692 ioat->produce = num_descs;
693 return 0; /* with ioat->prep_lock held */
695 retry = test_and_set_bit(IOAT_RESHAPE_PENDING, &chan->state);
696 spin_unlock_bh(&ioat->prep_lock);
698 /* is another cpu already trying to expand the ring? */
702 spin_lock_bh(&chan->cleanup_lock);
703 spin_lock_bh(&ioat->prep_lock);
704 retry = reshape_ring(ioat, ioat->alloc_order + 1);
705 clear_bit(IOAT_RESHAPE_PENDING, &chan->state);
706 spin_unlock_bh(&ioat->prep_lock);
707 spin_unlock_bh(&chan->cleanup_lock);
709 /* if we were able to expand the ring retry the allocation */
713 if (printk_ratelimit())
714 dev_dbg(to_dev(chan), "%s: ring full! num_descs: %d (%x:%x:%x)\n",
715 __func__, num_descs, ioat->head, ioat->tail, ioat->issued);
717 /* progress reclaim in the allocation failure case we may be
718 * called under bh_disabled so we need to trigger the timer
721 if (jiffies > chan->timer.expires && timer_pending(&chan->timer)) {
722 struct ioatdma_device *device = chan->device;
724 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
725 device->timer_fn((unsigned long) &chan->common);
731 struct dma_async_tx_descriptor *
732 ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
733 dma_addr_t dma_src, size_t len, unsigned long flags)
735 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
736 struct ioat_dma_descriptor *hw;
737 struct ioat_ring_ent *desc;
738 dma_addr_t dst = dma_dest;
739 dma_addr_t src = dma_src;
740 size_t total_len = len;
741 int num_descs, idx, i;
743 num_descs = ioat2_xferlen_to_descs(ioat, len);
744 if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs) == 0)
750 size_t copy = min_t(size_t, len, 1 << ioat->xfercap_log);
752 desc = ioat2_get_ring_ent(ioat, idx + i);
763 dump_desc_dbg(ioat, desc);
764 } while (++i < num_descs);
766 desc->txd.flags = flags;
767 desc->len = total_len;
768 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
769 hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
770 hw->ctl_f.compl_write = 1;
771 dump_desc_dbg(ioat, desc);
772 /* we leave the channel locked to ensure in order submission */
778 * ioat2_free_chan_resources - release all the descriptors
779 * @chan: the channel to be cleaned
781 void ioat2_free_chan_resources(struct dma_chan *c)
783 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
784 struct ioat_chan_common *chan = &ioat->base;
785 struct ioatdma_device *device = chan->device;
786 struct ioat_ring_ent *desc;
787 const u16 total_descs = 1 << ioat->alloc_order;
791 /* Before freeing channel resources first check
792 * if they have been previously allocated for this channel.
797 tasklet_disable(&chan->cleanup_task);
798 del_timer_sync(&chan->timer);
799 device->cleanup_fn((unsigned long) c);
800 device->reset_hw(chan);
801 clear_bit(IOAT_RUN, &chan->state);
803 spin_lock_bh(&chan->cleanup_lock);
804 spin_lock_bh(&ioat->prep_lock);
805 descs = ioat2_ring_space(ioat);
806 dev_dbg(to_dev(chan), "freeing %d idle descriptors\n", descs);
807 for (i = 0; i < descs; i++) {
808 desc = ioat2_get_ring_ent(ioat, ioat->head + i);
809 ioat2_free_ring_ent(desc, c);
812 if (descs < total_descs)
813 dev_err(to_dev(chan), "Freeing %d in use descriptors!\n",
814 total_descs - descs);
816 for (i = 0; i < total_descs - descs; i++) {
817 desc = ioat2_get_ring_ent(ioat, ioat->tail + i);
818 dump_desc_dbg(ioat, desc);
819 ioat2_free_ring_ent(desc, c);
824 ioat->alloc_order = 0;
825 pci_pool_free(device->completion_pool, chan->completion,
826 chan->completion_dma);
827 spin_unlock_bh(&ioat->prep_lock);
828 spin_unlock_bh(&chan->cleanup_lock);
830 chan->last_completion = 0;
831 chan->completion_dma = 0;
835 static ssize_t ring_size_show(struct dma_chan *c, char *page)
837 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
839 return sprintf(page, "%d\n", (1 << ioat->alloc_order) & ~1);
841 static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size);
843 static ssize_t ring_active_show(struct dma_chan *c, char *page)
845 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
847 /* ...taken outside the lock, no need to be precise */
848 return sprintf(page, "%d\n", ioat2_ring_active(ioat));
850 static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active);
852 static struct attribute *ioat2_attrs[] = {
853 &ring_size_attr.attr,
854 &ring_active_attr.attr,
856 &ioat_version_attr.attr,
860 struct kobj_type ioat2_ktype = {
861 .sysfs_ops = &ioat_sysfs_ops,
862 .default_attrs = ioat2_attrs,
865 int ioat2_dma_probe(struct ioatdma_device *device, int dca)
867 struct pci_dev *pdev = device->pdev;
868 struct dma_device *dma;
870 struct ioat_chan_common *chan;
873 device->enumerate_channels = ioat2_enumerate_channels;
874 device->reset_hw = ioat2_reset_hw;
875 device->cleanup_fn = ioat2_cleanup_event;
876 device->timer_fn = ioat2_timer_event;
877 device->self_test = ioat_dma_self_test;
878 dma = &device->common;
879 dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
880 dma->device_issue_pending = ioat2_issue_pending;
881 dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
882 dma->device_free_chan_resources = ioat2_free_chan_resources;
883 dma->device_tx_status = ioat_dma_tx_status;
885 err = ioat_probe(device);
888 ioat_set_tcp_copy_break(2048);
890 list_for_each_entry(c, &dma->channels, device_node) {
891 chan = to_chan_common(c);
892 writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | IOAT_DMA_DCA_ANY_CPU,
893 chan->reg_base + IOAT_DCACTRL_OFFSET);
896 err = ioat_register(device);
900 ioat_kobject_add(device, &ioat2_ktype);
903 device->dca = ioat2_dca_init(pdev, device->reg_base);