Commit | Line | Data |
---|---|---|
0bbd5f4e | 1 | /* |
43d6e369 | 2 | * Intel I/OAT DMA Linux driver |
211a22ce | 3 | * Copyright(c) 2004 - 2009 Intel Corporation. |
0bbd5f4e CL |
4 | * |
5 | * This program is free software; you can redistribute it and/or modify it | |
43d6e369 SN |
6 | * under the terms and conditions of the GNU General Public License, |
7 | * version 2, as published by the Free Software Foundation. | |
0bbd5f4e CL |
8 | * |
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License along with | |
43d6e369 SN |
15 | * this program; if not, write to the Free Software Foundation, Inc., |
16 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | |
17 | * | |
18 | * The full GNU General Public License is included in this distribution in | |
19 | * the file called "COPYING". | |
0bbd5f4e | 20 | * |
0bbd5f4e CL |
21 | */ |
22 | ||
23 | /* | |
24 | * This driver supports an Intel I/OAT DMA engine, which does asynchronous | |
25 | * copy operations. | |
26 | */ | |
27 | ||
28 | #include <linux/init.h> | |
29 | #include <linux/module.h> | |
5a0e3ad6 | 30 | #include <linux/slab.h> |
0bbd5f4e CL |
31 | #include <linux/pci.h> |
32 | #include <linux/interrupt.h> | |
33 | #include <linux/dmaengine.h> | |
34 | #include <linux/delay.h> | |
6b00c92c | 35 | #include <linux/dma-mapping.h> |
09177e85 | 36 | #include <linux/workqueue.h> |
70c71606 | 37 | #include <linux/prefetch.h> |
3ad0b02e | 38 | #include <linux/i7300_idle.h> |
584ec227 DW |
39 | #include "dma.h" |
40 | #include "registers.h" | |
41 | #include "hw.h" | |
0bbd5f4e | 42 | |
d2ebfb33 RKAL |
43 | #include "../dmaengine.h" |
44 | ||
5cbafa65 | 45 | int ioat_pending_level = 4; |
7bb67c14 SN |
46 | module_param(ioat_pending_level, int, 0644); |
47 | MODULE_PARM_DESC(ioat_pending_level, | |
48 | "high-water mark for pushing ioat descriptors (default: 4)"); | |
49 | ||
0bbd5f4e | 50 | /* internal functions */ |
5cbafa65 DW |
51 | static void ioat1_cleanup(struct ioat_dma_chan *ioat); |
52 | static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat); | |
3e037454 SN |
53 | |
54 | /** | |
55 | * ioat_dma_do_interrupt - handler used for single vector interrupt mode | |
56 | * @irq: interrupt id | |
57 | * @data: interrupt data | |
58 | */ | |
59 | static irqreturn_t ioat_dma_do_interrupt(int irq, void *data) | |
60 | { | |
61 | struct ioatdma_device *instance = data; | |
dcbc853a | 62 | struct ioat_chan_common *chan; |
3e037454 SN |
63 | unsigned long attnstatus; |
64 | int bit; | |
65 | u8 intrctrl; | |
66 | ||
67 | intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET); | |
68 | ||
69 | if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN)) | |
70 | return IRQ_NONE; | |
71 | ||
72 | if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) { | |
73 | writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); | |
74 | return IRQ_NONE; | |
75 | } | |
76 | ||
77 | attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET); | |
984b3f57 | 78 | for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) { |
dcbc853a | 79 | chan = ioat_chan_by_index(instance, bit); |
da87ca4d DW |
80 | if (test_bit(IOAT_RUN, &chan->state)) |
81 | tasklet_schedule(&chan->cleanup_task); | |
3e037454 SN |
82 | } |
83 | ||
84 | writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); | |
85 | return IRQ_HANDLED; | |
86 | } | |
87 | ||
88 | /** | |
89 | * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode | |
90 | * @irq: interrupt id | |
91 | * @data: interrupt data | |
92 | */ | |
93 | static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data) | |
94 | { | |
dcbc853a | 95 | struct ioat_chan_common *chan = data; |
3e037454 | 96 | |
da87ca4d DW |
97 | if (test_bit(IOAT_RUN, &chan->state)) |
98 | tasklet_schedule(&chan->cleanup_task); | |
3e037454 SN |
99 | |
100 | return IRQ_HANDLED; | |
101 | } | |
102 | ||
5cbafa65 | 103 | /* common channel initialization */ |
aa4d72ae | 104 | void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *chan, int idx) |
5cbafa65 DW |
105 | { |
106 | struct dma_device *dma = &device->common; | |
aa4d72ae DW |
107 | struct dma_chan *c = &chan->common; |
108 | unsigned long data = (unsigned long) c; | |
5cbafa65 DW |
109 | |
110 | chan->device = device; | |
111 | chan->reg_base = device->reg_base + (0x80 * (idx + 1)); | |
5cbafa65 DW |
112 | spin_lock_init(&chan->cleanup_lock); |
113 | chan->common.device = dma; | |
8ac69546 | 114 | dma_cookie_init(&chan->common); |
5cbafa65 DW |
115 | list_add_tail(&chan->common.device_node, &dma->channels); |
116 | device->idx[idx] = chan; | |
09c8a5b8 | 117 | init_timer(&chan->timer); |
aa4d72ae DW |
118 | chan->timer.function = device->timer_fn; |
119 | chan->timer.data = data; | |
120 | tasklet_init(&chan->cleanup_task, device->cleanup_fn, data); | |
5cbafa65 DW |
121 | } |
122 | ||
3e037454 | 123 | /** |
5cbafa65 | 124 | * ioat1_dma_enumerate_channels - find and initialize the device's channels |
3e037454 SN |
125 | * @device: the device to be enumerated |
126 | */ | |
5cbafa65 | 127 | static int ioat1_enumerate_channels(struct ioatdma_device *device) |
0bbd5f4e CL |
128 | { |
129 | u8 xfercap_scale; | |
130 | u32 xfercap; | |
131 | int i; | |
dcbc853a | 132 | struct ioat_dma_chan *ioat; |
e6c0b69a | 133 | struct device *dev = &device->pdev->dev; |
f2427e27 | 134 | struct dma_device *dma = &device->common; |
0bbd5f4e | 135 | |
f2427e27 DW |
136 | INIT_LIST_HEAD(&dma->channels); |
137 | dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); | |
bb320786 DW |
138 | dma->chancnt &= 0x1f; /* bits [4:0] valid */ |
139 | if (dma->chancnt > ARRAY_SIZE(device->idx)) { | |
140 | dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n", | |
141 | dma->chancnt, ARRAY_SIZE(device->idx)); | |
142 | dma->chancnt = ARRAY_SIZE(device->idx); | |
143 | } | |
e3828811 | 144 | xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET); |
bb320786 | 145 | xfercap_scale &= 0x1f; /* bits [4:0] valid */ |
0bbd5f4e | 146 | xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale)); |
6df9183a | 147 | dev_dbg(dev, "%s: xfercap = %d\n", __func__, xfercap); |
0bbd5f4e | 148 | |
f371be63 | 149 | #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL |
f2427e27 DW |
150 | if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) |
151 | dma->chancnt--; | |
27471fdb | 152 | #endif |
f2427e27 | 153 | for (i = 0; i < dma->chancnt; i++) { |
dcbc853a | 154 | ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL); |
5cbafa65 | 155 | if (!ioat) |
0bbd5f4e | 156 | break; |
0bbd5f4e | 157 | |
aa4d72ae | 158 | ioat_init_channel(device, &ioat->base, i); |
dcbc853a | 159 | ioat->xfercap = xfercap; |
dcbc853a DW |
160 | spin_lock_init(&ioat->desc_lock); |
161 | INIT_LIST_HEAD(&ioat->free_desc); | |
162 | INIT_LIST_HEAD(&ioat->used_desc); | |
0bbd5f4e | 163 | } |
5cbafa65 DW |
164 | dma->chancnt = i; |
165 | return i; | |
0bbd5f4e CL |
166 | } |
167 | ||
711924b1 SN |
168 | /** |
169 | * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended | |
170 | * descriptors to hw | |
171 | * @chan: DMA channel handle | |
172 | */ | |
bc3c7025 | 173 | static inline void |
dcbc853a | 174 | __ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat) |
711924b1 | 175 | { |
dcbc853a DW |
176 | void __iomem *reg_base = ioat->base.reg_base; |
177 | ||
6df9183a DW |
178 | dev_dbg(to_dev(&ioat->base), "%s: pending: %d\n", |
179 | __func__, ioat->pending); | |
dcbc853a DW |
180 | ioat->pending = 0; |
181 | writeb(IOAT_CHANCMD_APPEND, reg_base + IOAT1_CHANCMD_OFFSET); | |
711924b1 SN |
182 | } |
183 | ||
184 | static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan) | |
185 | { | |
dcbc853a | 186 | struct ioat_dma_chan *ioat = to_ioat_chan(chan); |
711924b1 | 187 | |
dcbc853a DW |
188 | if (ioat->pending > 0) { |
189 | spin_lock_bh(&ioat->desc_lock); | |
190 | __ioat1_dma_memcpy_issue_pending(ioat); | |
191 | spin_unlock_bh(&ioat->desc_lock); | |
711924b1 SN |
192 | } |
193 | } | |
194 | ||
09177e85 | 195 | /** |
5cbafa65 | 196 | * ioat1_reset_channel - restart a channel |
dcbc853a | 197 | * @ioat: IOAT DMA channel handle |
09177e85 | 198 | */ |
5cbafa65 | 199 | static void ioat1_reset_channel(struct ioat_dma_chan *ioat) |
09177e85 | 200 | { |
dcbc853a DW |
201 | struct ioat_chan_common *chan = &ioat->base; |
202 | void __iomem *reg_base = chan->reg_base; | |
09177e85 MS |
203 | u32 chansts, chanerr; |
204 | ||
09c8a5b8 | 205 | dev_warn(to_dev(chan), "reset\n"); |
dcbc853a | 206 | chanerr = readl(reg_base + IOAT_CHANERR_OFFSET); |
09c8a5b8 | 207 | chansts = *chan->completion & IOAT_CHANSTS_STATUS; |
09177e85 | 208 | if (chanerr) { |
dcbc853a | 209 | dev_err(to_dev(chan), |
09177e85 | 210 | "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n", |
dcbc853a DW |
211 | chan_num(chan), chansts, chanerr); |
212 | writel(chanerr, reg_base + IOAT_CHANERR_OFFSET); | |
09177e85 MS |
213 | } |
214 | ||
215 | /* | |
216 | * whack it upside the head with a reset | |
217 | * and wait for things to settle out. | |
218 | * force the pending count to a really big negative | |
219 | * to make sure no one forces an issue_pending | |
220 | * while we're waiting. | |
221 | */ | |
222 | ||
dcbc853a | 223 | ioat->pending = INT_MIN; |
09177e85 | 224 | writeb(IOAT_CHANCMD_RESET, |
dcbc853a | 225 | reg_base + IOAT_CHANCMD_OFFSET(chan->device->version)); |
09c8a5b8 DW |
226 | set_bit(IOAT_RESET_PENDING, &chan->state); |
227 | mod_timer(&chan->timer, jiffies + RESET_DELAY); | |
09177e85 MS |
228 | } |
229 | ||
7bb67c14 | 230 | static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) |
7405f74b | 231 | { |
dcbc853a DW |
232 | struct dma_chan *c = tx->chan; |
233 | struct ioat_dma_chan *ioat = to_ioat_chan(c); | |
a0587bcf | 234 | struct ioat_desc_sw *desc = tx_to_ioat_desc(tx); |
09c8a5b8 | 235 | struct ioat_chan_common *chan = &ioat->base; |
a0587bcf DW |
236 | struct ioat_desc_sw *first; |
237 | struct ioat_desc_sw *chain_tail; | |
7405f74b | 238 | dma_cookie_t cookie; |
7405f74b | 239 | |
dcbc853a | 240 | spin_lock_bh(&ioat->desc_lock); |
7405f74b | 241 | /* cookie incr and addition to used_list must be atomic */ |
884485e1 | 242 | cookie = dma_cookie_assign(tx); |
6df9183a | 243 | dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); |
7405f74b DW |
244 | |
245 | /* write address into NextDescriptor field of last desc in chain */ | |
ea25968a | 246 | first = to_ioat_desc(desc->tx_list.next); |
dcbc853a | 247 | chain_tail = to_ioat_desc(ioat->used_desc.prev); |
a0587bcf DW |
248 | /* make descriptor updates globally visible before chaining */ |
249 | wmb(); | |
250 | chain_tail->hw->next = first->txd.phys; | |
ea25968a | 251 | list_splice_tail_init(&desc->tx_list, &ioat->used_desc); |
6df9183a DW |
252 | dump_desc_dbg(ioat, chain_tail); |
253 | dump_desc_dbg(ioat, first); | |
a0587bcf | 254 | |
09c8a5b8 DW |
255 | if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state)) |
256 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | |
257 | ||
5669e31c | 258 | ioat->active += desc->hw->tx_cnt; |
ad643f54 | 259 | ioat->pending += desc->hw->tx_cnt; |
dcbc853a DW |
260 | if (ioat->pending >= ioat_pending_level) |
261 | __ioat1_dma_memcpy_issue_pending(ioat); | |
262 | spin_unlock_bh(&ioat->desc_lock); | |
7405f74b | 263 | |
7bb67c14 SN |
264 | return cookie; |
265 | } | |
266 | ||
7bb67c14 SN |
267 | /** |
268 | * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair | |
dcbc853a | 269 | * @ioat: the channel supplying the memory pool for the descriptors |
7bb67c14 SN |
270 | * @flags: allocation flags |
271 | */ | |
bc3c7025 | 272 | static struct ioat_desc_sw * |
dcbc853a | 273 | ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat, gfp_t flags) |
0bbd5f4e CL |
274 | { |
275 | struct ioat_dma_descriptor *desc; | |
276 | struct ioat_desc_sw *desc_sw; | |
8ab89567 | 277 | struct ioatdma_device *ioatdma_device; |
0bbd5f4e CL |
278 | dma_addr_t phys; |
279 | ||
dcbc853a | 280 | ioatdma_device = ioat->base.device; |
8ab89567 | 281 | desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys); |
0bbd5f4e CL |
282 | if (unlikely(!desc)) |
283 | return NULL; | |
284 | ||
285 | desc_sw = kzalloc(sizeof(*desc_sw), flags); | |
286 | if (unlikely(!desc_sw)) { | |
8ab89567 | 287 | pci_pool_free(ioatdma_device->dma_pool, desc, phys); |
0bbd5f4e CL |
288 | return NULL; |
289 | } | |
290 | ||
291 | memset(desc, 0, sizeof(*desc)); | |
7bb67c14 | 292 | |
ea25968a | 293 | INIT_LIST_HEAD(&desc_sw->tx_list); |
5cbafa65 DW |
294 | dma_async_tx_descriptor_init(&desc_sw->txd, &ioat->base.common); |
295 | desc_sw->txd.tx_submit = ioat1_tx_submit; | |
0bbd5f4e | 296 | desc_sw->hw = desc; |
bc3c7025 | 297 | desc_sw->txd.phys = phys; |
6df9183a | 298 | set_desc_id(desc_sw, -1); |
0bbd5f4e CL |
299 | |
300 | return desc_sw; | |
301 | } | |
302 | ||
7bb67c14 SN |
303 | static int ioat_initial_desc_count = 256; |
304 | module_param(ioat_initial_desc_count, int, 0644); | |
305 | MODULE_PARM_DESC(ioat_initial_desc_count, | |
5cbafa65 | 306 | "ioat1: initial descriptors per channel (default: 256)"); |
7bb67c14 | 307 | /** |
5cbafa65 | 308 | * ioat1_dma_alloc_chan_resources - returns the number of allocated descriptors |
7bb67c14 SN |
309 | * @chan: the channel to be filled out |
310 | */ | |
5cbafa65 | 311 | static int ioat1_dma_alloc_chan_resources(struct dma_chan *c) |
0bbd5f4e | 312 | { |
dcbc853a DW |
313 | struct ioat_dma_chan *ioat = to_ioat_chan(c); |
314 | struct ioat_chan_common *chan = &ioat->base; | |
711924b1 | 315 | struct ioat_desc_sw *desc; |
0bbd5f4e CL |
316 | u32 chanerr; |
317 | int i; | |
318 | LIST_HEAD(tmp_list); | |
319 | ||
e4223976 | 320 | /* have we already been set up? */ |
dcbc853a DW |
321 | if (!list_empty(&ioat->free_desc)) |
322 | return ioat->desccount; | |
0bbd5f4e | 323 | |
43d6e369 | 324 | /* Setup register to interrupt and write completion status on error */ |
f6ab95b5 | 325 | writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET); |
0bbd5f4e | 326 | |
dcbc853a | 327 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); |
0bbd5f4e | 328 | if (chanerr) { |
dcbc853a DW |
329 | dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr); |
330 | writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); | |
0bbd5f4e CL |
331 | } |
332 | ||
333 | /* Allocate descriptors */ | |
7bb67c14 | 334 | for (i = 0; i < ioat_initial_desc_count; i++) { |
dcbc853a | 335 | desc = ioat_dma_alloc_descriptor(ioat, GFP_KERNEL); |
0bbd5f4e | 336 | if (!desc) { |
dcbc853a | 337 | dev_err(to_dev(chan), "Only %d initial descriptors\n", i); |
0bbd5f4e CL |
338 | break; |
339 | } | |
6df9183a | 340 | set_desc_id(desc, i); |
0bbd5f4e CL |
341 | list_add_tail(&desc->node, &tmp_list); |
342 | } | |
dcbc853a DW |
343 | spin_lock_bh(&ioat->desc_lock); |
344 | ioat->desccount = i; | |
345 | list_splice(&tmp_list, &ioat->free_desc); | |
dcbc853a | 346 | spin_unlock_bh(&ioat->desc_lock); |
0bbd5f4e CL |
347 | |
348 | /* allocate a completion writeback area */ | |
349 | /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ | |
4fb9b9e8 DW |
350 | chan->completion = pci_pool_alloc(chan->device->completion_pool, |
351 | GFP_KERNEL, &chan->completion_dma); | |
352 | memset(chan->completion, 0, sizeof(*chan->completion)); | |
353 | writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF, | |
dcbc853a | 354 | chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); |
4fb9b9e8 | 355 | writel(((u64) chan->completion_dma) >> 32, |
dcbc853a DW |
356 | chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); |
357 | ||
da87ca4d | 358 | set_bit(IOAT_RUN, &chan->state); |
5cbafa65 | 359 | ioat1_dma_start_null_desc(ioat); /* give chain to dma device */ |
6df9183a DW |
360 | dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n", |
361 | __func__, ioat->desccount); | |
dcbc853a | 362 | return ioat->desccount; |
0bbd5f4e CL |
363 | } |
364 | ||
da87ca4d DW |
365 | void ioat_stop(struct ioat_chan_common *chan) |
366 | { | |
367 | struct ioatdma_device *device = chan->device; | |
368 | struct pci_dev *pdev = device->pdev; | |
369 | int chan_id = chan_num(chan); | |
370 | struct msix_entry *msix; | |
371 | ||
372 | /* 1/ stop irq from firing tasklets | |
373 | * 2/ stop the tasklet from re-arming irqs | |
374 | */ | |
375 | clear_bit(IOAT_RUN, &chan->state); | |
376 | ||
377 | /* flush inflight interrupts */ | |
378 | switch (device->irq_mode) { | |
379 | case IOAT_MSIX: | |
380 | msix = &device->msix_entries[chan_id]; | |
381 | synchronize_irq(msix->vector); | |
382 | break; | |
383 | case IOAT_MSI: | |
384 | case IOAT_INTX: | |
385 | synchronize_irq(pdev->irq); | |
386 | break; | |
387 | default: | |
388 | break; | |
389 | } | |
390 | ||
391 | /* flush inflight timers */ | |
392 | del_timer_sync(&chan->timer); | |
393 | ||
394 | /* flush inflight tasklet runs */ | |
395 | tasklet_kill(&chan->cleanup_task); | |
396 | ||
397 | /* final cleanup now that everything is quiesced and can't re-arm */ | |
398 | device->cleanup_fn((unsigned long) &chan->common); | |
399 | } | |
400 | ||
7bb67c14 | 401 | /** |
5cbafa65 | 402 | * ioat1_dma_free_chan_resources - release all the descriptors |
7bb67c14 SN |
403 | * @chan: the channel to be cleaned |
404 | */ | |
5cbafa65 | 405 | static void ioat1_dma_free_chan_resources(struct dma_chan *c) |
0bbd5f4e | 406 | { |
dcbc853a DW |
407 | struct ioat_dma_chan *ioat = to_ioat_chan(c); |
408 | struct ioat_chan_common *chan = &ioat->base; | |
409 | struct ioatdma_device *ioatdma_device = chan->device; | |
0bbd5f4e | 410 | struct ioat_desc_sw *desc, *_desc; |
0bbd5f4e CL |
411 | int in_use_descs = 0; |
412 | ||
c3d4f44f MS |
413 | /* Before freeing channel resources first check |
414 | * if they have been previously allocated for this channel. | |
415 | */ | |
dcbc853a | 416 | if (ioat->desccount == 0) |
c3d4f44f MS |
417 | return; |
418 | ||
da87ca4d | 419 | ioat_stop(chan); |
0bbd5f4e | 420 | |
3e037454 SN |
421 | /* Delay 100ms after reset to allow internal DMA logic to quiesce |
422 | * before removing DMA descriptor resources. | |
423 | */ | |
7bb67c14 | 424 | writeb(IOAT_CHANCMD_RESET, |
dcbc853a | 425 | chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version)); |
3e037454 | 426 | mdelay(100); |
0bbd5f4e | 427 | |
dcbc853a | 428 | spin_lock_bh(&ioat->desc_lock); |
6df9183a DW |
429 | list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) { |
430 | dev_dbg(to_dev(chan), "%s: freeing %d from used list\n", | |
431 | __func__, desc_id(desc)); | |
432 | dump_desc_dbg(ioat, desc); | |
5cbafa65 DW |
433 | in_use_descs++; |
434 | list_del(&desc->node); | |
435 | pci_pool_free(ioatdma_device->dma_pool, desc->hw, | |
436 | desc->txd.phys); | |
437 | kfree(desc); | |
438 | } | |
439 | list_for_each_entry_safe(desc, _desc, | |
440 | &ioat->free_desc, node) { | |
441 | list_del(&desc->node); | |
8ab89567 | 442 | pci_pool_free(ioatdma_device->dma_pool, desc->hw, |
bc3c7025 | 443 | desc->txd.phys); |
0bbd5f4e CL |
444 | kfree(desc); |
445 | } | |
dcbc853a | 446 | spin_unlock_bh(&ioat->desc_lock); |
0bbd5f4e | 447 | |
8ab89567 | 448 | pci_pool_free(ioatdma_device->completion_pool, |
4fb9b9e8 DW |
449 | chan->completion, |
450 | chan->completion_dma); | |
0bbd5f4e CL |
451 | |
452 | /* one is ok since we left it on there on purpose */ | |
453 | if (in_use_descs > 1) | |
dcbc853a | 454 | dev_err(to_dev(chan), "Freeing %d in use descriptors!\n", |
0bbd5f4e CL |
455 | in_use_descs - 1); |
456 | ||
4fb9b9e8 DW |
457 | chan->last_completion = 0; |
458 | chan->completion_dma = 0; | |
dcbc853a | 459 | ioat->pending = 0; |
dcbc853a | 460 | ioat->desccount = 0; |
3e037454 | 461 | } |
7f2b291f | 462 | |
3e037454 | 463 | /** |
dcbc853a DW |
464 | * ioat1_dma_get_next_descriptor - return the next available descriptor |
465 | * @ioat: IOAT DMA channel handle | |
3e037454 SN |
466 | * |
467 | * Gets the next descriptor from the chain, and must be called with the | |
468 | * channel's desc_lock held. Allocates more descriptors if the channel | |
469 | * has run out. | |
470 | */ | |
7f2b291f | 471 | static struct ioat_desc_sw * |
dcbc853a | 472 | ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat) |
3e037454 | 473 | { |
711924b1 | 474 | struct ioat_desc_sw *new; |
3e037454 | 475 | |
dcbc853a DW |
476 | if (!list_empty(&ioat->free_desc)) { |
477 | new = to_ioat_desc(ioat->free_desc.next); | |
3e037454 SN |
478 | list_del(&new->node); |
479 | } else { | |
480 | /* try to get another desc */ | |
dcbc853a | 481 | new = ioat_dma_alloc_descriptor(ioat, GFP_ATOMIC); |
711924b1 | 482 | if (!new) { |
dcbc853a | 483 | dev_err(to_dev(&ioat->base), "alloc failed\n"); |
711924b1 SN |
484 | return NULL; |
485 | } | |
3e037454 | 486 | } |
6df9183a DW |
487 | dev_dbg(to_dev(&ioat->base), "%s: allocated: %d\n", |
488 | __func__, desc_id(new)); | |
3e037454 SN |
489 | prefetch(new->hw); |
490 | return new; | |
0bbd5f4e CL |
491 | } |
492 | ||
bc3c7025 | 493 | static struct dma_async_tx_descriptor * |
dcbc853a | 494 | ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest, |
bc3c7025 | 495 | dma_addr_t dma_src, size_t len, unsigned long flags) |
0bbd5f4e | 496 | { |
dcbc853a | 497 | struct ioat_dma_chan *ioat = to_ioat_chan(c); |
a0587bcf DW |
498 | struct ioat_desc_sw *desc; |
499 | size_t copy; | |
500 | LIST_HEAD(chain); | |
501 | dma_addr_t src = dma_src; | |
502 | dma_addr_t dest = dma_dest; | |
503 | size_t total_len = len; | |
504 | struct ioat_dma_descriptor *hw = NULL; | |
505 | int tx_cnt = 0; | |
0bbd5f4e | 506 | |
dcbc853a | 507 | spin_lock_bh(&ioat->desc_lock); |
5cbafa65 | 508 | desc = ioat1_dma_get_next_descriptor(ioat); |
a0587bcf DW |
509 | do { |
510 | if (!desc) | |
511 | break; | |
0bbd5f4e | 512 | |
a0587bcf | 513 | tx_cnt++; |
dcbc853a | 514 | copy = min_t(size_t, len, ioat->xfercap); |
a0587bcf DW |
515 | |
516 | hw = desc->hw; | |
517 | hw->size = copy; | |
518 | hw->ctl = 0; | |
519 | hw->src_addr = src; | |
520 | hw->dst_addr = dest; | |
521 | ||
522 | list_add_tail(&desc->node, &chain); | |
523 | ||
524 | len -= copy; | |
525 | dest += copy; | |
526 | src += copy; | |
527 | if (len) { | |
528 | struct ioat_desc_sw *next; | |
529 | ||
530 | async_tx_ack(&desc->txd); | |
5cbafa65 | 531 | next = ioat1_dma_get_next_descriptor(ioat); |
a0587bcf | 532 | hw->next = next ? next->txd.phys : 0; |
6df9183a | 533 | dump_desc_dbg(ioat, desc); |
a0587bcf DW |
534 | desc = next; |
535 | } else | |
536 | hw->next = 0; | |
537 | } while (len); | |
538 | ||
539 | if (!desc) { | |
dcbc853a DW |
540 | struct ioat_chan_common *chan = &ioat->base; |
541 | ||
542 | dev_err(to_dev(chan), | |
5cbafa65 | 543 | "chan%d - get_next_desc failed\n", chan_num(chan)); |
dcbc853a DW |
544 | list_splice(&chain, &ioat->free_desc); |
545 | spin_unlock_bh(&ioat->desc_lock); | |
711924b1 | 546 | return NULL; |
09177e85 | 547 | } |
dcbc853a | 548 | spin_unlock_bh(&ioat->desc_lock); |
a0587bcf DW |
549 | |
550 | desc->txd.flags = flags; | |
a0587bcf | 551 | desc->len = total_len; |
ea25968a | 552 | list_splice(&chain, &desc->tx_list); |
a0587bcf DW |
553 | hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); |
554 | hw->ctl_f.compl_write = 1; | |
ad643f54 | 555 | hw->tx_cnt = tx_cnt; |
6df9183a | 556 | dump_desc_dbg(ioat, desc); |
a0587bcf DW |
557 | |
558 | return &desc->txd; | |
0bbd5f4e CL |
559 | } |
560 | ||
aa4d72ae | 561 | static void ioat1_cleanup_event(unsigned long data) |
3e037454 | 562 | { |
aa4d72ae | 563 | struct ioat_dma_chan *ioat = to_ioat_chan((void *) data); |
da87ca4d | 564 | struct ioat_chan_common *chan = &ioat->base; |
f6ab95b5 | 565 | |
aa4d72ae | 566 | ioat1_cleanup(ioat); |
da87ca4d DW |
567 | if (!test_bit(IOAT_RUN, &chan->state)) |
568 | return; | |
aa4d72ae | 569 | writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); |
3e037454 SN |
570 | } |
571 | ||
27502935 | 572 | dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan) |
5cbafa65 | 573 | { |
27502935 | 574 | dma_addr_t phys_complete; |
4fb9b9e8 | 575 | u64 completion; |
0bbd5f4e | 576 | |
4fb9b9e8 | 577 | completion = *chan->completion; |
09c8a5b8 | 578 | phys_complete = ioat_chansts_to_addr(completion); |
0bbd5f4e | 579 | |
6df9183a DW |
580 | dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__, |
581 | (unsigned long long) phys_complete); | |
582 | ||
09c8a5b8 DW |
583 | if (is_ioat_halted(completion)) { |
584 | u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | |
dcbc853a | 585 | dev_err(to_dev(chan), "Channel halted, chanerr = %x\n", |
09c8a5b8 | 586 | chanerr); |
0bbd5f4e CL |
587 | |
588 | /* TODO do something to salvage the situation */ | |
589 | } | |
590 | ||
5cbafa65 DW |
591 | return phys_complete; |
592 | } | |
593 | ||
09c8a5b8 | 594 | bool ioat_cleanup_preamble(struct ioat_chan_common *chan, |
27502935 | 595 | dma_addr_t *phys_complete) |
5cbafa65 | 596 | { |
09c8a5b8 DW |
597 | *phys_complete = ioat_get_current_completion(chan); |
598 | if (*phys_complete == chan->last_completion) | |
599 | return false; | |
600 | clear_bit(IOAT_COMPLETION_ACK, &chan->state); | |
601 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | |
5cbafa65 | 602 | |
09c8a5b8 DW |
603 | return true; |
604 | } | |
0bbd5f4e | 605 | |
27502935 | 606 | static void __cleanup(struct ioat_dma_chan *ioat, dma_addr_t phys_complete) |
09c8a5b8 DW |
607 | { |
608 | struct ioat_chan_common *chan = &ioat->base; | |
609 | struct list_head *_desc, *n; | |
610 | struct dma_async_tx_descriptor *tx; | |
09177e85 | 611 | |
27502935 DW |
612 | dev_dbg(to_dev(chan), "%s: phys_complete: %llx\n", |
613 | __func__, (unsigned long long) phys_complete); | |
09c8a5b8 DW |
614 | list_for_each_safe(_desc, n, &ioat->used_desc) { |
615 | struct ioat_desc_sw *desc; | |
616 | ||
617 | prefetch(n); | |
618 | desc = list_entry(_desc, typeof(*desc), node); | |
5cbafa65 DW |
619 | tx = &desc->txd; |
620 | /* | |
621 | * Incoming DMA requests may use multiple descriptors, | |
622 | * due to exceeding xfercap, perhaps. If so, only the | |
623 | * last one will have a cookie, and require unmapping. | |
624 | */ | |
6df9183a | 625 | dump_desc_dbg(ioat, desc); |
5cbafa65 | 626 | if (tx->cookie) { |
f7fbce07 | 627 | dma_cookie_complete(tx); |
d38a8c62 | 628 | dma_descriptor_unmap(tx); |
5669e31c | 629 | ioat->active -= desc->hw->tx_cnt; |
5cbafa65 DW |
630 | if (tx->callback) { |
631 | tx->callback(tx->callback_param); | |
632 | tx->callback = NULL; | |
95218430 | 633 | } |
5cbafa65 | 634 | } |
0bbd5f4e | 635 | |
5cbafa65 DW |
636 | if (tx->phys != phys_complete) { |
637 | /* | |
638 | * a completed entry, but not the last, so clean | |
639 | * up if the client is done with the descriptor | |
640 | */ | |
641 | if (async_tx_test_ack(tx)) | |
642 | list_move_tail(&desc->node, &ioat->free_desc); | |
5cbafa65 DW |
643 | } else { |
644 | /* | |
645 | * last used desc. Do not remove, so we can | |
09c8a5b8 | 646 | * append from it. |
5cbafa65 | 647 | */ |
09c8a5b8 DW |
648 | |
649 | /* if nothing else is pending, cancel the | |
650 | * completion timeout | |
651 | */ | |
652 | if (n == &ioat->used_desc) { | |
653 | dev_dbg(to_dev(chan), | |
654 | "%s cancel completion timeout\n", | |
655 | __func__); | |
656 | clear_bit(IOAT_COMPLETION_PENDING, &chan->state); | |
657 | } | |
0bbd5f4e | 658 | |
5cbafa65 | 659 | /* TODO check status bits? */ |
0bbd5f4e CL |
660 | break; |
661 | } | |
662 | } | |
663 | ||
09c8a5b8 DW |
664 | chan->last_completion = phys_complete; |
665 | } | |
666 | ||
667 | /** | |
668 | * ioat1_cleanup - cleanup up finished descriptors | |
669 | * @chan: ioat channel to be cleaned up | |
670 | * | |
671 | * To prevent lock contention we defer cleanup when the locks are | |
672 | * contended with a terminal timeout that forces cleanup and catches | |
673 | * completion notification errors. | |
674 | */ | |
675 | static void ioat1_cleanup(struct ioat_dma_chan *ioat) | |
676 | { | |
677 | struct ioat_chan_common *chan = &ioat->base; | |
27502935 | 678 | dma_addr_t phys_complete; |
09c8a5b8 DW |
679 | |
680 | prefetch(chan->completion); | |
681 | ||
682 | if (!spin_trylock_bh(&chan->cleanup_lock)) | |
683 | return; | |
684 | ||
685 | if (!ioat_cleanup_preamble(chan, &phys_complete)) { | |
686 | spin_unlock_bh(&chan->cleanup_lock); | |
687 | return; | |
688 | } | |
689 | ||
690 | if (!spin_trylock_bh(&ioat->desc_lock)) { | |
691 | spin_unlock_bh(&chan->cleanup_lock); | |
692 | return; | |
693 | } | |
694 | ||
695 | __cleanup(ioat, phys_complete); | |
696 | ||
dcbc853a | 697 | spin_unlock_bh(&ioat->desc_lock); |
09c8a5b8 DW |
698 | spin_unlock_bh(&chan->cleanup_lock); |
699 | } | |
0bbd5f4e | 700 | |
09c8a5b8 DW |
701 | static void ioat1_timer_event(unsigned long data) |
702 | { | |
aa4d72ae | 703 | struct ioat_dma_chan *ioat = to_ioat_chan((void *) data); |
09c8a5b8 | 704 | struct ioat_chan_common *chan = &ioat->base; |
0bbd5f4e | 705 | |
09c8a5b8 DW |
706 | dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state); |
707 | ||
708 | spin_lock_bh(&chan->cleanup_lock); | |
709 | if (test_and_clear_bit(IOAT_RESET_PENDING, &chan->state)) { | |
710 | struct ioat_desc_sw *desc; | |
711 | ||
712 | spin_lock_bh(&ioat->desc_lock); | |
713 | ||
714 | /* restart active descriptors */ | |
715 | desc = to_ioat_desc(ioat->used_desc.prev); | |
716 | ioat_set_chainaddr(ioat, desc->txd.phys); | |
717 | ioat_start(chan); | |
718 | ||
719 | ioat->pending = 0; | |
720 | set_bit(IOAT_COMPLETION_PENDING, &chan->state); | |
721 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | |
722 | spin_unlock_bh(&ioat->desc_lock); | |
723 | } else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { | |
27502935 | 724 | dma_addr_t phys_complete; |
09c8a5b8 DW |
725 | |
726 | spin_lock_bh(&ioat->desc_lock); | |
727 | /* if we haven't made progress and we have already | |
728 | * acknowledged a pending completion once, then be more | |
729 | * forceful with a restart | |
730 | */ | |
731 | if (ioat_cleanup_preamble(chan, &phys_complete)) | |
732 | __cleanup(ioat, phys_complete); | |
733 | else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) | |
734 | ioat1_reset_channel(ioat); | |
735 | else { | |
736 | u64 status = ioat_chansts(chan); | |
737 | ||
738 | /* manually update the last completion address */ | |
739 | if (ioat_chansts_to_addr(status) != 0) | |
740 | *chan->completion = status; | |
741 | ||
742 | set_bit(IOAT_COMPLETION_ACK, &chan->state); | |
743 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | |
744 | } | |
745 | spin_unlock_bh(&ioat->desc_lock); | |
746 | } | |
dcbc853a | 747 | spin_unlock_bh(&chan->cleanup_lock); |
0bbd5f4e CL |
748 | } |
749 | ||
aa4d72ae | 750 | enum dma_status |
07934481 LW |
751 | ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, |
752 | struct dma_tx_state *txstate) | |
0bbd5f4e | 753 | { |
aa4d72ae DW |
754 | struct ioat_chan_common *chan = to_chan_common(c); |
755 | struct ioatdma_device *device = chan->device; | |
96a2af41 | 756 | enum dma_status ret; |
0bbd5f4e | 757 | |
96a2af41 | 758 | ret = dma_cookie_status(c, cookie, txstate); |
2f16f802 | 759 | if (ret == DMA_COMPLETE) |
96a2af41 | 760 | return ret; |
0bbd5f4e | 761 | |
aa4d72ae | 762 | device->cleanup_fn((unsigned long) c); |
0bbd5f4e | 763 | |
96a2af41 | 764 | return dma_cookie_status(c, cookie, txstate); |
0bbd5f4e CL |
765 | } |
766 | ||
5cbafa65 | 767 | static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat) |
0bbd5f4e | 768 | { |
dcbc853a | 769 | struct ioat_chan_common *chan = &ioat->base; |
0bbd5f4e | 770 | struct ioat_desc_sw *desc; |
c7984f4e | 771 | struct ioat_dma_descriptor *hw; |
0bbd5f4e | 772 | |
dcbc853a | 773 | spin_lock_bh(&ioat->desc_lock); |
0bbd5f4e | 774 | |
5cbafa65 | 775 | desc = ioat1_dma_get_next_descriptor(ioat); |
7f1b358a MS |
776 | |
777 | if (!desc) { | |
dcbc853a | 778 | dev_err(to_dev(chan), |
7f1b358a | 779 | "Unable to start null desc - get next desc failed\n"); |
dcbc853a | 780 | spin_unlock_bh(&ioat->desc_lock); |
7f1b358a MS |
781 | return; |
782 | } | |
783 | ||
c7984f4e DW |
784 | hw = desc->hw; |
785 | hw->ctl = 0; | |
786 | hw->ctl_f.null = 1; | |
787 | hw->ctl_f.int_en = 1; | |
788 | hw->ctl_f.compl_write = 1; | |
7f1b358a | 789 | /* set size to non-zero value (channel returns error when size is 0) */ |
c7984f4e DW |
790 | hw->size = NULL_DESC_BUFFER_SIZE; |
791 | hw->src_addr = 0; | |
792 | hw->dst_addr = 0; | |
bc3c7025 | 793 | async_tx_ack(&desc->txd); |
5cbafa65 DW |
794 | hw->next = 0; |
795 | list_add_tail(&desc->node, &ioat->used_desc); | |
6df9183a | 796 | dump_desc_dbg(ioat, desc); |
7bb67c14 | 797 | |
09c8a5b8 DW |
798 | ioat_set_chainaddr(ioat, desc->txd.phys); |
799 | ioat_start(chan); | |
dcbc853a | 800 | spin_unlock_bh(&ioat->desc_lock); |
0bbd5f4e CL |
801 | } |
802 | ||
803 | /* | |
804 | * Perform a IOAT transaction to verify the HW works. | |
805 | */ | |
806 | #define IOAT_TEST_SIZE 2000 | |
807 | ||
4bf27b8b | 808 | static void ioat_dma_test_callback(void *dma_async_param) |
95218430 | 809 | { |
b9bdcbba DW |
810 | struct completion *cmp = dma_async_param; |
811 | ||
812 | complete(cmp); | |
95218430 SN |
813 | } |
814 | ||
3e037454 SN |
815 | /** |
816 | * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works. | |
817 | * @device: device to be tested | |
818 | */ | |
4bf27b8b | 819 | int ioat_dma_self_test(struct ioatdma_device *device) |
0bbd5f4e CL |
820 | { |
821 | int i; | |
822 | u8 *src; | |
823 | u8 *dest; | |
bc3c7025 DW |
824 | struct dma_device *dma = &device->common; |
825 | struct device *dev = &device->pdev->dev; | |
0bbd5f4e | 826 | struct dma_chan *dma_chan; |
711924b1 | 827 | struct dma_async_tx_descriptor *tx; |
0036731c | 828 | dma_addr_t dma_dest, dma_src; |
0bbd5f4e CL |
829 | dma_cookie_t cookie; |
830 | int err = 0; | |
b9bdcbba | 831 | struct completion cmp; |
0c33e1ca | 832 | unsigned long tmo; |
4f005dbe | 833 | unsigned long flags; |
0bbd5f4e | 834 | |
e94b1766 | 835 | src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); |
0bbd5f4e CL |
836 | if (!src) |
837 | return -ENOMEM; | |
e94b1766 | 838 | dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); |
0bbd5f4e CL |
839 | if (!dest) { |
840 | kfree(src); | |
841 | return -ENOMEM; | |
842 | } | |
843 | ||
844 | /* Fill in src buffer */ | |
845 | for (i = 0; i < IOAT_TEST_SIZE; i++) | |
846 | src[i] = (u8)i; | |
847 | ||
848 | /* Start copy, using first DMA channel */ | |
bc3c7025 | 849 | dma_chan = container_of(dma->channels.next, struct dma_chan, |
43d6e369 | 850 | device_node); |
bc3c7025 DW |
851 | if (dma->device_alloc_chan_resources(dma_chan) < 1) { |
852 | dev_err(dev, "selftest cannot allocate chan resource\n"); | |
0bbd5f4e CL |
853 | err = -ENODEV; |
854 | goto out; | |
855 | } | |
856 | ||
bc3c7025 | 857 | dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); |
3532e566 JL |
858 | if (dma_mapping_error(dev, dma_src)) { |
859 | dev_err(dev, "mapping src buffer failed\n"); | |
860 | goto free_resources; | |
861 | } | |
bc3c7025 | 862 | dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); |
3532e566 JL |
863 | if (dma_mapping_error(dev, dma_dest)) { |
864 | dev_err(dev, "mapping dest buffer failed\n"); | |
865 | goto unmap_src; | |
866 | } | |
0776ae7b | 867 | flags = DMA_PREP_INTERRUPT; |
0036731c | 868 | tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, |
4f005dbe | 869 | IOAT_TEST_SIZE, flags); |
5149fd01 | 870 | if (!tx) { |
bc3c7025 | 871 | dev_err(dev, "Self-test prep failed, disabling\n"); |
5149fd01 | 872 | err = -ENODEV; |
522d9744 | 873 | goto unmap_dma; |
5149fd01 SN |
874 | } |
875 | ||
7405f74b | 876 | async_tx_ack(tx); |
b9bdcbba | 877 | init_completion(&cmp); |
95218430 | 878 | tx->callback = ioat_dma_test_callback; |
b9bdcbba | 879 | tx->callback_param = &cmp; |
7bb67c14 | 880 | cookie = tx->tx_submit(tx); |
7f2b291f | 881 | if (cookie < 0) { |
bc3c7025 | 882 | dev_err(dev, "Self-test setup failed, disabling\n"); |
7f2b291f | 883 | err = -ENODEV; |
522d9744 | 884 | goto unmap_dma; |
7f2b291f | 885 | } |
bc3c7025 | 886 | dma->device_issue_pending(dma_chan); |
532d3b1f | 887 | |
0c33e1ca | 888 | tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); |
0bbd5f4e | 889 | |
0c33e1ca | 890 | if (tmo == 0 || |
07934481 | 891 | dma->device_tx_status(dma_chan, cookie, NULL) |
2f16f802 | 892 | != DMA_COMPLETE) { |
bc3c7025 | 893 | dev_err(dev, "Self-test copy timed out, disabling\n"); |
0bbd5f4e | 894 | err = -ENODEV; |
522d9744 | 895 | goto unmap_dma; |
0bbd5f4e CL |
896 | } |
897 | if (memcmp(src, dest, IOAT_TEST_SIZE)) { | |
bc3c7025 | 898 | dev_err(dev, "Self-test copy failed compare, disabling\n"); |
0bbd5f4e CL |
899 | err = -ENODEV; |
900 | goto free_resources; | |
901 | } | |
902 | ||
522d9744 | 903 | unmap_dma: |
522d9744 | 904 | dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); |
3532e566 JL |
905 | unmap_src: |
906 | dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE); | |
0bbd5f4e | 907 | free_resources: |
bc3c7025 | 908 | dma->device_free_chan_resources(dma_chan); |
0bbd5f4e CL |
909 | out: |
910 | kfree(src); | |
911 | kfree(dest); | |
912 | return err; | |
913 | } | |
914 | ||
3e037454 SN |
915 | static char ioat_interrupt_style[32] = "msix"; |
916 | module_param_string(ioat_interrupt_style, ioat_interrupt_style, | |
917 | sizeof(ioat_interrupt_style), 0644); | |
918 | MODULE_PARM_DESC(ioat_interrupt_style, | |
4c5d9619 | 919 | "set ioat interrupt style: msix (default), msi, intx"); |
3e037454 SN |
920 | |
921 | /** | |
922 | * ioat_dma_setup_interrupts - setup interrupt handler | |
923 | * @device: ioat device | |
924 | */ | |
8a52b9ff | 925 | int ioat_dma_setup_interrupts(struct ioatdma_device *device) |
3e037454 | 926 | { |
dcbc853a | 927 | struct ioat_chan_common *chan; |
e6c0b69a DW |
928 | struct pci_dev *pdev = device->pdev; |
929 | struct device *dev = &pdev->dev; | |
930 | struct msix_entry *msix; | |
931 | int i, j, msixcnt; | |
932 | int err = -EINVAL; | |
3e037454 SN |
933 | u8 intrctrl = 0; |
934 | ||
935 | if (!strcmp(ioat_interrupt_style, "msix")) | |
936 | goto msix; | |
3e037454 SN |
937 | if (!strcmp(ioat_interrupt_style, "msi")) |
938 | goto msi; | |
939 | if (!strcmp(ioat_interrupt_style, "intx")) | |
940 | goto intx; | |
e6c0b69a | 941 | dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style); |
5149fd01 | 942 | goto err_no_irq; |
3e037454 SN |
943 | |
944 | msix: | |
945 | /* The number of MSI-X vectors should equal the number of channels */ | |
946 | msixcnt = device->common.chancnt; | |
947 | for (i = 0; i < msixcnt; i++) | |
948 | device->msix_entries[i].entry = i; | |
949 | ||
368da992 | 950 | err = pci_enable_msix_exact(pdev, device->msix_entries, msixcnt); |
4c5d9619 | 951 | if (err) |
3e037454 | 952 | goto msi; |
3e037454 SN |
953 | |
954 | for (i = 0; i < msixcnt; i++) { | |
e6c0b69a | 955 | msix = &device->msix_entries[i]; |
dcbc853a | 956 | chan = ioat_chan_by_index(device, i); |
e6c0b69a DW |
957 | err = devm_request_irq(dev, msix->vector, |
958 | ioat_dma_do_interrupt_msix, 0, | |
dcbc853a | 959 | "ioat-msix", chan); |
3e037454 SN |
960 | if (err) { |
961 | for (j = 0; j < i; j++) { | |
e6c0b69a | 962 | msix = &device->msix_entries[j]; |
dcbc853a DW |
963 | chan = ioat_chan_by_index(device, j); |
964 | devm_free_irq(dev, msix->vector, chan); | |
3e037454 | 965 | } |
4c5d9619 | 966 | goto msi; |
3e037454 SN |
967 | } |
968 | } | |
969 | intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL; | |
8a52b9ff | 970 | device->irq_mode = IOAT_MSIX; |
3e037454 SN |
971 | goto done; |
972 | ||
3e037454 | 973 | msi: |
e6c0b69a | 974 | err = pci_enable_msi(pdev); |
3e037454 SN |
975 | if (err) |
976 | goto intx; | |
977 | ||
e6c0b69a DW |
978 | err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0, |
979 | "ioat-msi", device); | |
3e037454 | 980 | if (err) { |
e6c0b69a | 981 | pci_disable_msi(pdev); |
3e037454 SN |
982 | goto intx; |
983 | } | |
779e561a | 984 | device->irq_mode = IOAT_MSI; |
3e037454 SN |
985 | goto done; |
986 | ||
987 | intx: | |
e6c0b69a DW |
988 | err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, |
989 | IRQF_SHARED, "ioat-intx", device); | |
3e037454 SN |
990 | if (err) |
991 | goto err_no_irq; | |
3e037454 | 992 | |
8a52b9ff | 993 | device->irq_mode = IOAT_INTX; |
3e037454 | 994 | done: |
f2427e27 DW |
995 | if (device->intr_quirk) |
996 | device->intr_quirk(device); | |
3e037454 SN |
997 | intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN; |
998 | writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET); | |
999 | return 0; | |
1000 | ||
1001 | err_no_irq: | |
1002 | /* Disable all interrupt generation */ | |
1003 | writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); | |
8a52b9ff | 1004 | device->irq_mode = IOAT_NOIRQ; |
e6c0b69a DW |
1005 | dev_err(dev, "no usable interrupts\n"); |
1006 | return err; | |
3e037454 | 1007 | } |
8a52b9ff | 1008 | EXPORT_SYMBOL(ioat_dma_setup_interrupts); |
3e037454 | 1009 | |
e6c0b69a | 1010 | static void ioat_disable_interrupts(struct ioatdma_device *device) |
3e037454 | 1011 | { |
3e037454 SN |
1012 | /* Disable all interrupt generation */ |
1013 | writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); | |
3e037454 SN |
1014 | } |
1015 | ||
4bf27b8b | 1016 | int ioat_probe(struct ioatdma_device *device) |
0bbd5f4e | 1017 | { |
f2427e27 DW |
1018 | int err = -ENODEV; |
1019 | struct dma_device *dma = &device->common; | |
1020 | struct pci_dev *pdev = device->pdev; | |
e6c0b69a | 1021 | struct device *dev = &pdev->dev; |
0bbd5f4e CL |
1022 | |
1023 | /* DMA coherent memory pool for DMA descriptor allocations */ | |
1024 | device->dma_pool = pci_pool_create("dma_desc_pool", pdev, | |
8ab89567 SN |
1025 | sizeof(struct ioat_dma_descriptor), |
1026 | 64, 0); | |
0bbd5f4e CL |
1027 | if (!device->dma_pool) { |
1028 | err = -ENOMEM; | |
1029 | goto err_dma_pool; | |
1030 | } | |
1031 | ||
43d6e369 SN |
1032 | device->completion_pool = pci_pool_create("completion_pool", pdev, |
1033 | sizeof(u64), SMP_CACHE_BYTES, | |
1034 | SMP_CACHE_BYTES); | |
5cbafa65 | 1035 | |
0bbd5f4e CL |
1036 | if (!device->completion_pool) { |
1037 | err = -ENOMEM; | |
1038 | goto err_completion_pool; | |
1039 | } | |
1040 | ||
5cbafa65 | 1041 | device->enumerate_channels(device); |
0bbd5f4e | 1042 | |
f2427e27 | 1043 | dma_cap_set(DMA_MEMCPY, dma->cap_mask); |
f2427e27 | 1044 | dma->dev = &pdev->dev; |
7bb67c14 | 1045 | |
bc3c7025 | 1046 | if (!dma->chancnt) { |
a6d52d70 | 1047 | dev_err(dev, "channel enumeration error\n"); |
8b794b14 MS |
1048 | goto err_setup_interrupts; |
1049 | } | |
1050 | ||
3e037454 | 1051 | err = ioat_dma_setup_interrupts(device); |
8ab89567 | 1052 | if (err) |
3e037454 | 1053 | goto err_setup_interrupts; |
0bbd5f4e | 1054 | |
9de6fc71 | 1055 | err = device->self_test(device); |
0bbd5f4e CL |
1056 | if (err) |
1057 | goto err_self_test; | |
1058 | ||
f2427e27 | 1059 | return 0; |
0bbd5f4e CL |
1060 | |
1061 | err_self_test: | |
e6c0b69a | 1062 | ioat_disable_interrupts(device); |
3e037454 | 1063 | err_setup_interrupts: |
0bbd5f4e CL |
1064 | pci_pool_destroy(device->completion_pool); |
1065 | err_completion_pool: | |
1066 | pci_pool_destroy(device->dma_pool); | |
1067 | err_dma_pool: | |
f2427e27 DW |
1068 | return err; |
1069 | } | |
1070 | ||
4bf27b8b | 1071 | int ioat_register(struct ioatdma_device *device) |
f2427e27 DW |
1072 | { |
1073 | int err = dma_async_device_register(&device->common); | |
1074 | ||
1075 | if (err) { | |
1076 | ioat_disable_interrupts(device); | |
1077 | pci_pool_destroy(device->completion_pool); | |
1078 | pci_pool_destroy(device->dma_pool); | |
1079 | } | |
1080 | ||
1081 | return err; | |
1082 | } | |
1083 | ||
1084 | /* ioat1_intr_quirk - fix up dma ctrl register to enable / disable msi */ | |
1085 | static void ioat1_intr_quirk(struct ioatdma_device *device) | |
1086 | { | |
1087 | struct pci_dev *pdev = device->pdev; | |
1088 | u32 dmactrl; | |
1089 | ||
1090 | pci_read_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, &dmactrl); | |
1091 | if (pdev->msi_enabled) | |
1092 | dmactrl |= IOAT_PCI_DMACTRL_MSI_EN; | |
1093 | else | |
1094 | dmactrl &= ~IOAT_PCI_DMACTRL_MSI_EN; | |
1095 | pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl); | |
1096 | } | |
1097 | ||
5669e31c DW |
1098 | static ssize_t ring_size_show(struct dma_chan *c, char *page) |
1099 | { | |
1100 | struct ioat_dma_chan *ioat = to_ioat_chan(c); | |
1101 | ||
1102 | return sprintf(page, "%d\n", ioat->desccount); | |
1103 | } | |
1104 | static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size); | |
1105 | ||
1106 | static ssize_t ring_active_show(struct dma_chan *c, char *page) | |
1107 | { | |
1108 | struct ioat_dma_chan *ioat = to_ioat_chan(c); | |
1109 | ||
1110 | return sprintf(page, "%d\n", ioat->active); | |
1111 | } | |
1112 | static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active); | |
1113 | ||
1114 | static ssize_t cap_show(struct dma_chan *c, char *page) | |
1115 | { | |
1116 | struct dma_device *dma = c->device; | |
1117 | ||
48a9db46 | 1118 | return sprintf(page, "copy%s%s%s%s%s\n", |
5669e31c DW |
1119 | dma_has_cap(DMA_PQ, dma->cap_mask) ? " pq" : "", |
1120 | dma_has_cap(DMA_PQ_VAL, dma->cap_mask) ? " pq_val" : "", | |
1121 | dma_has_cap(DMA_XOR, dma->cap_mask) ? " xor" : "", | |
1122 | dma_has_cap(DMA_XOR_VAL, dma->cap_mask) ? " xor_val" : "", | |
5669e31c DW |
1123 | dma_has_cap(DMA_INTERRUPT, dma->cap_mask) ? " intr" : ""); |
1124 | ||
1125 | } | |
1126 | struct ioat_sysfs_entry ioat_cap_attr = __ATTR_RO(cap); | |
1127 | ||
1128 | static ssize_t version_show(struct dma_chan *c, char *page) | |
1129 | { | |
1130 | struct dma_device *dma = c->device; | |
1131 | struct ioatdma_device *device = to_ioatdma_device(dma); | |
1132 | ||
1133 | return sprintf(page, "%d.%d\n", | |
1134 | device->version >> 4, device->version & 0xf); | |
1135 | } | |
1136 | struct ioat_sysfs_entry ioat_version_attr = __ATTR_RO(version); | |
1137 | ||
1138 | static struct attribute *ioat1_attrs[] = { | |
1139 | &ring_size_attr.attr, | |
1140 | &ring_active_attr.attr, | |
1141 | &ioat_cap_attr.attr, | |
1142 | &ioat_version_attr.attr, | |
1143 | NULL, | |
1144 | }; | |
1145 | ||
1146 | static ssize_t | |
1147 | ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | |
1148 | { | |
1149 | struct ioat_sysfs_entry *entry; | |
1150 | struct ioat_chan_common *chan; | |
1151 | ||
1152 | entry = container_of(attr, struct ioat_sysfs_entry, attr); | |
1153 | chan = container_of(kobj, struct ioat_chan_common, kobj); | |
1154 | ||
1155 | if (!entry->show) | |
1156 | return -EIO; | |
1157 | return entry->show(&chan->common, page); | |
1158 | } | |
1159 | ||
52cf25d0 | 1160 | const struct sysfs_ops ioat_sysfs_ops = { |
5669e31c DW |
1161 | .show = ioat_attr_show, |
1162 | }; | |
1163 | ||
1164 | static struct kobj_type ioat1_ktype = { | |
1165 | .sysfs_ops = &ioat_sysfs_ops, | |
1166 | .default_attrs = ioat1_attrs, | |
1167 | }; | |
1168 | ||
1169 | void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type) | |
1170 | { | |
1171 | struct dma_device *dma = &device->common; | |
1172 | struct dma_chan *c; | |
1173 | ||
1174 | list_for_each_entry(c, &dma->channels, device_node) { | |
1175 | struct ioat_chan_common *chan = to_chan_common(c); | |
1176 | struct kobject *parent = &c->dev->device.kobj; | |
1177 | int err; | |
1178 | ||
1179 | err = kobject_init_and_add(&chan->kobj, type, parent, "quickdata"); | |
1180 | if (err) { | |
1181 | dev_warn(to_dev(chan), | |
1182 | "sysfs init error (%d), continuing...\n", err); | |
1183 | kobject_put(&chan->kobj); | |
1184 | set_bit(IOAT_KOBJ_INIT_FAIL, &chan->state); | |
1185 | } | |
1186 | } | |
1187 | } | |
1188 | ||
1189 | void ioat_kobject_del(struct ioatdma_device *device) | |
1190 | { | |
1191 | struct dma_device *dma = &device->common; | |
1192 | struct dma_chan *c; | |
1193 | ||
1194 | list_for_each_entry(c, &dma->channels, device_node) { | |
1195 | struct ioat_chan_common *chan = to_chan_common(c); | |
1196 | ||
1197 | if (!test_bit(IOAT_KOBJ_INIT_FAIL, &chan->state)) { | |
1198 | kobject_del(&chan->kobj); | |
1199 | kobject_put(&chan->kobj); | |
1200 | } | |
1201 | } | |
1202 | } | |
1203 | ||
4bf27b8b | 1204 | int ioat1_dma_probe(struct ioatdma_device *device, int dca) |
f2427e27 DW |
1205 | { |
1206 | struct pci_dev *pdev = device->pdev; | |
1207 | struct dma_device *dma; | |
1208 | int err; | |
1209 | ||
1210 | device->intr_quirk = ioat1_intr_quirk; | |
5cbafa65 | 1211 | device->enumerate_channels = ioat1_enumerate_channels; |
9de6fc71 | 1212 | device->self_test = ioat_dma_self_test; |
aa4d72ae DW |
1213 | device->timer_fn = ioat1_timer_event; |
1214 | device->cleanup_fn = ioat1_cleanup_event; | |
f2427e27 DW |
1215 | dma = &device->common; |
1216 | dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy; | |
1217 | dma->device_issue_pending = ioat1_dma_memcpy_issue_pending; | |
5cbafa65 DW |
1218 | dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources; |
1219 | dma->device_free_chan_resources = ioat1_dma_free_chan_resources; | |
07934481 | 1220 | dma->device_tx_status = ioat_dma_tx_status; |
f2427e27 DW |
1221 | |
1222 | err = ioat_probe(device); | |
1223 | if (err) | |
1224 | return err; | |
f2427e27 DW |
1225 | err = ioat_register(device); |
1226 | if (err) | |
1227 | return err; | |
5669e31c DW |
1228 | ioat_kobject_add(device, &ioat1_ktype); |
1229 | ||
f2427e27 DW |
1230 | if (dca) |
1231 | device->dca = ioat_dca_init(pdev, device->reg_base); | |
1232 | ||
f2427e27 DW |
1233 | return err; |
1234 | } | |
1235 | ||
4bf27b8b | 1236 | void ioat_dma_remove(struct ioatdma_device *device) |
0bbd5f4e | 1237 | { |
bc3c7025 | 1238 | struct dma_device *dma = &device->common; |
0bbd5f4e | 1239 | |
e6c0b69a | 1240 | ioat_disable_interrupts(device); |
8ab89567 | 1241 | |
5669e31c DW |
1242 | ioat_kobject_del(device); |
1243 | ||
bc3c7025 | 1244 | dma_async_device_unregister(dma); |
dfe2299e | 1245 | |
0bbd5f4e CL |
1246 | pci_pool_destroy(device->dma_pool); |
1247 | pci_pool_destroy(device->completion_pool); | |
8ab89567 | 1248 | |
dcbc853a | 1249 | INIT_LIST_HEAD(&dma->channels); |
0bbd5f4e | 1250 | } |