Linux 4.10-rc2
[linux-2.6-block.git] / drivers / dma / ioat / init.c
CommitLineData
c0f28ce6
DJ
1/*
2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2015 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
16 *
17 */
18
19#include <linux/init.h>
20#include <linux/module.h>
21#include <linux/slab.h>
22#include <linux/pci.h>
23#include <linux/interrupt.h>
24#include <linux/dmaengine.h>
25#include <linux/delay.h>
26#include <linux/dma-mapping.h>
27#include <linux/workqueue.h>
28#include <linux/prefetch.h>
29#include <linux/dca.h>
4222a907 30#include <linux/aer.h>
dd4645eb 31#include <linux/sizes.h>
c0f28ce6
DJ
32#include "dma.h"
33#include "registers.h"
34#include "hw.h"
35
36#include "../dmaengine.h"
37
38MODULE_VERSION(IOAT_DMA_VERSION);
39MODULE_LICENSE("Dual BSD/GPL");
40MODULE_AUTHOR("Intel Corporation");
41
42static struct pci_device_id ioat_pci_tbl[] = {
43 /* I/OAT v3 platforms */
44 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) },
45 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) },
46 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG2) },
47 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG3) },
48 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG4) },
49 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG5) },
50 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG6) },
51 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG7) },
52
53 /* I/OAT v3.2 platforms */
54 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF0) },
55 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF1) },
56 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF2) },
57 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF3) },
58 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF4) },
59 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF5) },
60 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF6) },
61 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF7) },
62 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF8) },
63 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF9) },
64
65 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB0) },
66 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB1) },
67 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB2) },
68 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB3) },
69 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB4) },
70 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB5) },
71 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB6) },
72 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB7) },
73 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB8) },
74 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB9) },
75
76 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB0) },
77 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB1) },
78 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB2) },
79 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB3) },
80 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB4) },
81 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB5) },
82 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB6) },
83 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB7) },
84 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB8) },
85 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB9) },
86
87 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW0) },
88 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW1) },
89 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW2) },
90 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW3) },
91 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW4) },
92 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW5) },
93 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW6) },
94 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW7) },
95 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW8) },
96 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW9) },
97
ab98193d
DJ
98 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX0) },
99 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX1) },
100 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX2) },
101 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX3) },
102 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX4) },
103 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX5) },
104 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX6) },
105 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX7) },
106 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX8) },
107 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX9) },
108
c0f28ce6
DJ
109 /* I/OAT v3.3 platforms */
110 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0) },
111 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1) },
112 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD2) },
113 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD3) },
114
115 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE0) },
116 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE1) },
117 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE2) },
118 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE3) },
119
120 { 0, }
121};
122MODULE_DEVICE_TABLE(pci, ioat_pci_tbl);
123
124static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id);
125static void ioat_remove(struct pci_dev *pdev);
599d49de
DJ
126static void
127ioat_init_channel(struct ioatdma_device *ioat_dma,
128 struct ioatdma_chan *ioat_chan, int idx);
ef97bd0f
DJ
129static void ioat_intr_quirk(struct ioatdma_device *ioat_dma);
130static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma);
131static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma);
c0f28ce6
DJ
132
133static int ioat_dca_enabled = 1;
134module_param(ioat_dca_enabled, int, 0644);
135MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)");
136int ioat_pending_level = 4;
137module_param(ioat_pending_level, int, 0644);
138MODULE_PARM_DESC(ioat_pending_level,
139 "high-water mark for pushing ioat descriptors (default: 4)");
c0f28ce6
DJ
140static char ioat_interrupt_style[32] = "msix";
141module_param_string(ioat_interrupt_style, ioat_interrupt_style,
142 sizeof(ioat_interrupt_style), 0644);
143MODULE_PARM_DESC(ioat_interrupt_style,
144 "set ioat interrupt style: msix (default), msi, intx");
145
146struct kmem_cache *ioat_cache;
147struct kmem_cache *ioat_sed_cache;
148
149static bool is_jf_ioat(struct pci_dev *pdev)
150{
151 switch (pdev->device) {
152 case PCI_DEVICE_ID_INTEL_IOAT_JSF0:
153 case PCI_DEVICE_ID_INTEL_IOAT_JSF1:
154 case PCI_DEVICE_ID_INTEL_IOAT_JSF2:
155 case PCI_DEVICE_ID_INTEL_IOAT_JSF3:
156 case PCI_DEVICE_ID_INTEL_IOAT_JSF4:
157 case PCI_DEVICE_ID_INTEL_IOAT_JSF5:
158 case PCI_DEVICE_ID_INTEL_IOAT_JSF6:
159 case PCI_DEVICE_ID_INTEL_IOAT_JSF7:
160 case PCI_DEVICE_ID_INTEL_IOAT_JSF8:
161 case PCI_DEVICE_ID_INTEL_IOAT_JSF9:
162 return true;
163 default:
164 return false;
165 }
166}
167
168static bool is_snb_ioat(struct pci_dev *pdev)
169{
170 switch (pdev->device) {
171 case PCI_DEVICE_ID_INTEL_IOAT_SNB0:
172 case PCI_DEVICE_ID_INTEL_IOAT_SNB1:
173 case PCI_DEVICE_ID_INTEL_IOAT_SNB2:
174 case PCI_DEVICE_ID_INTEL_IOAT_SNB3:
175 case PCI_DEVICE_ID_INTEL_IOAT_SNB4:
176 case PCI_DEVICE_ID_INTEL_IOAT_SNB5:
177 case PCI_DEVICE_ID_INTEL_IOAT_SNB6:
178 case PCI_DEVICE_ID_INTEL_IOAT_SNB7:
179 case PCI_DEVICE_ID_INTEL_IOAT_SNB8:
180 case PCI_DEVICE_ID_INTEL_IOAT_SNB9:
181 return true;
182 default:
183 return false;
184 }
185}
186
187static bool is_ivb_ioat(struct pci_dev *pdev)
188{
189 switch (pdev->device) {
190 case PCI_DEVICE_ID_INTEL_IOAT_IVB0:
191 case PCI_DEVICE_ID_INTEL_IOAT_IVB1:
192 case PCI_DEVICE_ID_INTEL_IOAT_IVB2:
193 case PCI_DEVICE_ID_INTEL_IOAT_IVB3:
194 case PCI_DEVICE_ID_INTEL_IOAT_IVB4:
195 case PCI_DEVICE_ID_INTEL_IOAT_IVB5:
196 case PCI_DEVICE_ID_INTEL_IOAT_IVB6:
197 case PCI_DEVICE_ID_INTEL_IOAT_IVB7:
198 case PCI_DEVICE_ID_INTEL_IOAT_IVB8:
199 case PCI_DEVICE_ID_INTEL_IOAT_IVB9:
200 return true;
201 default:
202 return false;
203 }
204
205}
206
207static bool is_hsw_ioat(struct pci_dev *pdev)
208{
209 switch (pdev->device) {
210 case PCI_DEVICE_ID_INTEL_IOAT_HSW0:
211 case PCI_DEVICE_ID_INTEL_IOAT_HSW1:
212 case PCI_DEVICE_ID_INTEL_IOAT_HSW2:
213 case PCI_DEVICE_ID_INTEL_IOAT_HSW3:
214 case PCI_DEVICE_ID_INTEL_IOAT_HSW4:
215 case PCI_DEVICE_ID_INTEL_IOAT_HSW5:
216 case PCI_DEVICE_ID_INTEL_IOAT_HSW6:
217 case PCI_DEVICE_ID_INTEL_IOAT_HSW7:
218 case PCI_DEVICE_ID_INTEL_IOAT_HSW8:
219 case PCI_DEVICE_ID_INTEL_IOAT_HSW9:
220 return true;
221 default:
222 return false;
223 }
224
225}
226
ab98193d
DJ
227static bool is_bdx_ioat(struct pci_dev *pdev)
228{
229 switch (pdev->device) {
230 case PCI_DEVICE_ID_INTEL_IOAT_BDX0:
231 case PCI_DEVICE_ID_INTEL_IOAT_BDX1:
232 case PCI_DEVICE_ID_INTEL_IOAT_BDX2:
233 case PCI_DEVICE_ID_INTEL_IOAT_BDX3:
234 case PCI_DEVICE_ID_INTEL_IOAT_BDX4:
235 case PCI_DEVICE_ID_INTEL_IOAT_BDX5:
236 case PCI_DEVICE_ID_INTEL_IOAT_BDX6:
237 case PCI_DEVICE_ID_INTEL_IOAT_BDX7:
238 case PCI_DEVICE_ID_INTEL_IOAT_BDX8:
239 case PCI_DEVICE_ID_INTEL_IOAT_BDX9:
240 return true;
241 default:
242 return false;
243 }
244}
245
c0f28ce6
DJ
246static bool is_xeon_cb32(struct pci_dev *pdev)
247{
248 return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) ||
ab98193d 249 is_hsw_ioat(pdev) || is_bdx_ioat(pdev);
c0f28ce6
DJ
250}
251
252bool is_bwd_ioat(struct pci_dev *pdev)
253{
254 switch (pdev->device) {
255 case PCI_DEVICE_ID_INTEL_IOAT_BWD0:
256 case PCI_DEVICE_ID_INTEL_IOAT_BWD1:
257 case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
258 case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
259 /* even though not Atom, BDX-DE has same DMA silicon */
260 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0:
261 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1:
262 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2:
263 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3:
264 return true;
265 default:
266 return false;
267 }
268}
269
270static bool is_bwd_noraid(struct pci_dev *pdev)
271{
272 switch (pdev->device) {
273 case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
274 case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
275 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0:
276 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1:
277 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2:
278 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3:
279 return true;
280 default:
281 return false;
282 }
283
284}
285
286/*
287 * Perform a IOAT transaction to verify the HW works.
288 */
289#define IOAT_TEST_SIZE 2000
290
291static void ioat_dma_test_callback(void *dma_async_param)
292{
293 struct completion *cmp = dma_async_param;
294
295 complete(cmp);
296}
297
298/**
299 * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
300 * @ioat_dma: dma device to be tested
301 */
599d49de 302static int ioat_dma_self_test(struct ioatdma_device *ioat_dma)
c0f28ce6
DJ
303{
304 int i;
305 u8 *src;
306 u8 *dest;
307 struct dma_device *dma = &ioat_dma->dma_dev;
308 struct device *dev = &ioat_dma->pdev->dev;
309 struct dma_chan *dma_chan;
310 struct dma_async_tx_descriptor *tx;
311 dma_addr_t dma_dest, dma_src;
312 dma_cookie_t cookie;
313 int err = 0;
314 struct completion cmp;
315 unsigned long tmo;
316 unsigned long flags;
317
318 src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
319 if (!src)
320 return -ENOMEM;
321 dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
322 if (!dest) {
323 kfree(src);
324 return -ENOMEM;
325 }
326
327 /* Fill in src buffer */
328 for (i = 0; i < IOAT_TEST_SIZE; i++)
329 src[i] = (u8)i;
330
331 /* Start copy, using first DMA channel */
332 dma_chan = container_of(dma->channels.next, struct dma_chan,
333 device_node);
334 if (dma->device_alloc_chan_resources(dma_chan) < 1) {
335 dev_err(dev, "selftest cannot allocate chan resource\n");
336 err = -ENODEV;
337 goto out;
338 }
339
340 dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
341 if (dma_mapping_error(dev, dma_src)) {
342 dev_err(dev, "mapping src buffer failed\n");
b424d2a0 343 err = -ENOMEM;
c0f28ce6
DJ
344 goto free_resources;
345 }
346 dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
347 if (dma_mapping_error(dev, dma_dest)) {
348 dev_err(dev, "mapping dest buffer failed\n");
b424d2a0 349 err = -ENOMEM;
c0f28ce6
DJ
350 goto unmap_src;
351 }
352 flags = DMA_PREP_INTERRUPT;
353 tx = ioat_dma->dma_dev.device_prep_dma_memcpy(dma_chan, dma_dest,
354 dma_src, IOAT_TEST_SIZE,
355 flags);
356 if (!tx) {
357 dev_err(dev, "Self-test prep failed, disabling\n");
358 err = -ENODEV;
359 goto unmap_dma;
360 }
361
362 async_tx_ack(tx);
363 init_completion(&cmp);
364 tx->callback = ioat_dma_test_callback;
365 tx->callback_param = &cmp;
366 cookie = tx->tx_submit(tx);
367 if (cookie < 0) {
368 dev_err(dev, "Self-test setup failed, disabling\n");
369 err = -ENODEV;
370 goto unmap_dma;
371 }
372 dma->device_issue_pending(dma_chan);
373
374 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
375
376 if (tmo == 0 ||
377 dma->device_tx_status(dma_chan, cookie, NULL)
378 != DMA_COMPLETE) {
379 dev_err(dev, "Self-test copy timed out, disabling\n");
380 err = -ENODEV;
381 goto unmap_dma;
382 }
383 if (memcmp(src, dest, IOAT_TEST_SIZE)) {
384 dev_err(dev, "Self-test copy failed compare, disabling\n");
385 err = -ENODEV;
386 goto free_resources;
387 }
388
389unmap_dma:
390 dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
391unmap_src:
392 dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
393free_resources:
394 dma->device_free_chan_resources(dma_chan);
395out:
396 kfree(src);
397 kfree(dest);
398 return err;
399}
400
401/**
402 * ioat_dma_setup_interrupts - setup interrupt handler
403 * @ioat_dma: ioat dma device
404 */
405int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma)
406{
407 struct ioatdma_chan *ioat_chan;
408 struct pci_dev *pdev = ioat_dma->pdev;
409 struct device *dev = &pdev->dev;
410 struct msix_entry *msix;
411 int i, j, msixcnt;
412 int err = -EINVAL;
413 u8 intrctrl = 0;
414
415 if (!strcmp(ioat_interrupt_style, "msix"))
416 goto msix;
417 if (!strcmp(ioat_interrupt_style, "msi"))
418 goto msi;
419 if (!strcmp(ioat_interrupt_style, "intx"))
420 goto intx;
421 dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style);
422 goto err_no_irq;
423
424msix:
425 /* The number of MSI-X vectors should equal the number of channels */
426 msixcnt = ioat_dma->dma_dev.chancnt;
427 for (i = 0; i < msixcnt; i++)
428 ioat_dma->msix_entries[i].entry = i;
429
430 err = pci_enable_msix_exact(pdev, ioat_dma->msix_entries, msixcnt);
431 if (err)
432 goto msi;
433
434 for (i = 0; i < msixcnt; i++) {
435 msix = &ioat_dma->msix_entries[i];
436 ioat_chan = ioat_chan_by_index(ioat_dma, i);
437 err = devm_request_irq(dev, msix->vector,
438 ioat_dma_do_interrupt_msix, 0,
439 "ioat-msix", ioat_chan);
440 if (err) {
441 for (j = 0; j < i; j++) {
442 msix = &ioat_dma->msix_entries[j];
443 ioat_chan = ioat_chan_by_index(ioat_dma, j);
444 devm_free_irq(dev, msix->vector, ioat_chan);
445 }
446 goto msi;
447 }
448 }
449 intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
450 ioat_dma->irq_mode = IOAT_MSIX;
451 goto done;
452
453msi:
454 err = pci_enable_msi(pdev);
455 if (err)
456 goto intx;
457
458 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0,
459 "ioat-msi", ioat_dma);
460 if (err) {
461 pci_disable_msi(pdev);
462 goto intx;
463 }
464 ioat_dma->irq_mode = IOAT_MSI;
465 goto done;
466
467intx:
468 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt,
469 IRQF_SHARED, "ioat-intx", ioat_dma);
470 if (err)
471 goto err_no_irq;
472
473 ioat_dma->irq_mode = IOAT_INTX;
474done:
ef97bd0f
DJ
475 if (is_bwd_ioat(pdev))
476 ioat_intr_quirk(ioat_dma);
c0f28ce6
DJ
477 intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
478 writeb(intrctrl, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET);
479 return 0;
480
481err_no_irq:
482 /* Disable all interrupt generation */
483 writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET);
484 ioat_dma->irq_mode = IOAT_NOIRQ;
485 dev_err(dev, "no usable interrupts\n");
486 return err;
487}
c0f28ce6
DJ
488
489static void ioat_disable_interrupts(struct ioatdma_device *ioat_dma)
490{
491 /* Disable all interrupt generation */
492 writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET);
493}
494
599d49de 495static int ioat_probe(struct ioatdma_device *ioat_dma)
c0f28ce6
DJ
496{
497 int err = -ENODEV;
498 struct dma_device *dma = &ioat_dma->dma_dev;
499 struct pci_dev *pdev = ioat_dma->pdev;
500 struct device *dev = &pdev->dev;
501
679cfbf7 502 ioat_dma->completion_pool = dma_pool_create("completion_pool", dev,
c0f28ce6
DJ
503 sizeof(u64),
504 SMP_CACHE_BYTES,
505 SMP_CACHE_BYTES);
506
507 if (!ioat_dma->completion_pool) {
508 err = -ENOMEM;
dd4645eb 509 goto err_out;
c0f28ce6
DJ
510 }
511
ef97bd0f 512 ioat_enumerate_channels(ioat_dma);
c0f28ce6
DJ
513
514 dma_cap_set(DMA_MEMCPY, dma->cap_mask);
515 dma->dev = &pdev->dev;
516
517 if (!dma->chancnt) {
518 dev_err(dev, "channel enumeration error\n");
519 goto err_setup_interrupts;
520 }
521
522 err = ioat_dma_setup_interrupts(ioat_dma);
523 if (err)
524 goto err_setup_interrupts;
525
ef97bd0f 526 err = ioat3_dma_self_test(ioat_dma);
c0f28ce6
DJ
527 if (err)
528 goto err_self_test;
529
530 return 0;
531
532err_self_test:
533 ioat_disable_interrupts(ioat_dma);
534err_setup_interrupts:
679cfbf7 535 dma_pool_destroy(ioat_dma->completion_pool);
dd4645eb 536err_out:
c0f28ce6
DJ
537 return err;
538}
539
599d49de 540static int ioat_register(struct ioatdma_device *ioat_dma)
c0f28ce6
DJ
541{
542 int err = dma_async_device_register(&ioat_dma->dma_dev);
543
544 if (err) {
545 ioat_disable_interrupts(ioat_dma);
679cfbf7 546 dma_pool_destroy(ioat_dma->completion_pool);
c0f28ce6
DJ
547 }
548
549 return err;
550}
551
599d49de 552static void ioat_dma_remove(struct ioatdma_device *ioat_dma)
c0f28ce6
DJ
553{
554 struct dma_device *dma = &ioat_dma->dma_dev;
555
556 ioat_disable_interrupts(ioat_dma);
557
558 ioat_kobject_del(ioat_dma);
559
560 dma_async_device_unregister(dma);
561
679cfbf7 562 dma_pool_destroy(ioat_dma->completion_pool);
c0f28ce6
DJ
563
564 INIT_LIST_HEAD(&dma->channels);
565}
566
567/**
568 * ioat_enumerate_channels - find and initialize the device's channels
569 * @ioat_dma: the ioat dma device to be enumerated
570 */
599d49de 571static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
c0f28ce6
DJ
572{
573 struct ioatdma_chan *ioat_chan;
574 struct device *dev = &ioat_dma->pdev->dev;
575 struct dma_device *dma = &ioat_dma->dma_dev;
576 u8 xfercap_log;
577 int i;
578
579 INIT_LIST_HEAD(&dma->channels);
580 dma->chancnt = readb(ioat_dma->reg_base + IOAT_CHANCNT_OFFSET);
581 dma->chancnt &= 0x1f; /* bits [4:0] valid */
582 if (dma->chancnt > ARRAY_SIZE(ioat_dma->idx)) {
583 dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
584 dma->chancnt, ARRAY_SIZE(ioat_dma->idx));
585 dma->chancnt = ARRAY_SIZE(ioat_dma->idx);
586 }
587 xfercap_log = readb(ioat_dma->reg_base + IOAT_XFERCAP_OFFSET);
588 xfercap_log &= 0x1f; /* bits [4:0] valid */
589 if (xfercap_log == 0)
590 return 0;
591 dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
592
593 for (i = 0; i < dma->chancnt; i++) {
594 ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL);
595 if (!ioat_chan)
596 break;
597
598 ioat_init_channel(ioat_dma, ioat_chan, i);
599 ioat_chan->xfercap_log = xfercap_log;
600 spin_lock_init(&ioat_chan->prep_lock);
ef97bd0f 601 if (ioat_reset_hw(ioat_chan)) {
c0f28ce6
DJ
602 i = 0;
603 break;
604 }
605 }
606 dma->chancnt = i;
607 return i;
608}
609
610/**
611 * ioat_free_chan_resources - release all the descriptors
612 * @chan: the channel to be cleaned
613 */
599d49de 614static void ioat_free_chan_resources(struct dma_chan *c)
c0f28ce6
DJ
615{
616 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
617 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
618 struct ioat_ring_ent *desc;
619 const int total_descs = 1 << ioat_chan->alloc_order;
620 int descs;
621 int i;
622
623 /* Before freeing channel resources first check
624 * if they have been previously allocated for this channel.
625 */
626 if (!ioat_chan->ring)
627 return;
628
629 ioat_stop(ioat_chan);
ef97bd0f 630 ioat_reset_hw(ioat_chan);
c0f28ce6
DJ
631
632 spin_lock_bh(&ioat_chan->cleanup_lock);
633 spin_lock_bh(&ioat_chan->prep_lock);
634 descs = ioat_ring_space(ioat_chan);
635 dev_dbg(to_dev(ioat_chan), "freeing %d idle descriptors\n", descs);
636 for (i = 0; i < descs; i++) {
637 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head + i);
638 ioat_free_ring_ent(desc, c);
639 }
640
641 if (descs < total_descs)
642 dev_err(to_dev(ioat_chan), "Freeing %d in use descriptors!\n",
643 total_descs - descs);
644
645 for (i = 0; i < total_descs - descs; i++) {
646 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail + i);
647 dump_desc_dbg(ioat_chan, desc);
648 ioat_free_ring_ent(desc, c);
649 }
650
dd4645eb
DJ
651 for (i = 0; i < ioat_chan->desc_chunks; i++) {
652 dma_free_coherent(to_dev(ioat_chan), SZ_2M,
653 ioat_chan->descs[i].virt,
654 ioat_chan->descs[i].hw);
655 ioat_chan->descs[i].virt = NULL;
656 ioat_chan->descs[i].hw = 0;
657 }
658 ioat_chan->desc_chunks = 0;
659
c0f28ce6
DJ
660 kfree(ioat_chan->ring);
661 ioat_chan->ring = NULL;
662 ioat_chan->alloc_order = 0;
679cfbf7 663 dma_pool_free(ioat_dma->completion_pool, ioat_chan->completion,
c0f28ce6
DJ
664 ioat_chan->completion_dma);
665 spin_unlock_bh(&ioat_chan->prep_lock);
666 spin_unlock_bh(&ioat_chan->cleanup_lock);
667
668 ioat_chan->last_completion = 0;
669 ioat_chan->completion_dma = 0;
670 ioat_chan->dmacount = 0;
671}
672
673/* ioat_alloc_chan_resources - allocate/initialize ioat descriptor ring
674 * @chan: channel to be initialized
675 */
599d49de 676static int ioat_alloc_chan_resources(struct dma_chan *c)
c0f28ce6
DJ
677{
678 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
679 struct ioat_ring_ent **ring;
680 u64 status;
681 int order;
682 int i = 0;
683 u32 chanerr;
684
685 /* have we already been set up? */
686 if (ioat_chan->ring)
687 return 1 << ioat_chan->alloc_order;
688
689 /* Setup register to interrupt and write completion status on error */
690 writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
691
692 /* allocate a completion writeback area */
693 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
694 ioat_chan->completion =
305697fa
JL
695 dma_pool_zalloc(ioat_chan->ioat_dma->completion_pool,
696 GFP_KERNEL, &ioat_chan->completion_dma);
c0f28ce6
DJ
697 if (!ioat_chan->completion)
698 return -ENOMEM;
699
c0f28ce6
DJ
700 writel(((u64)ioat_chan->completion_dma) & 0x00000000FFFFFFFF,
701 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
702 writel(((u64)ioat_chan->completion_dma) >> 32,
703 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
704
cd60cd96 705 order = IOAT_MAX_ORDER;
c0f28ce6
DJ
706 ring = ioat_alloc_ring(c, order, GFP_KERNEL);
707 if (!ring)
708 return -ENOMEM;
709
710 spin_lock_bh(&ioat_chan->cleanup_lock);
711 spin_lock_bh(&ioat_chan->prep_lock);
712 ioat_chan->ring = ring;
713 ioat_chan->head = 0;
714 ioat_chan->issued = 0;
715 ioat_chan->tail = 0;
716 ioat_chan->alloc_order = order;
717 set_bit(IOAT_RUN, &ioat_chan->state);
718 spin_unlock_bh(&ioat_chan->prep_lock);
719 spin_unlock_bh(&ioat_chan->cleanup_lock);
720
721 ioat_start_null_desc(ioat_chan);
722
723 /* check that we got off the ground */
724 do {
725 udelay(1);
726 status = ioat_chansts(ioat_chan);
727 } while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status));
728
729 if (is_ioat_active(status) || is_ioat_idle(status))
730 return 1 << ioat_chan->alloc_order;
731
732 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
733
734 dev_WARN(to_dev(ioat_chan),
735 "failed to start channel chanerr: %#x\n", chanerr);
736 ioat_free_chan_resources(c);
737 return -EFAULT;
738}
739
740/* common channel initialization */
599d49de 741static void
c0f28ce6
DJ
742ioat_init_channel(struct ioatdma_device *ioat_dma,
743 struct ioatdma_chan *ioat_chan, int idx)
744{
745 struct dma_device *dma = &ioat_dma->dma_dev;
746 struct dma_chan *c = &ioat_chan->dma_chan;
747 unsigned long data = (unsigned long) c;
748
749 ioat_chan->ioat_dma = ioat_dma;
750 ioat_chan->reg_base = ioat_dma->reg_base + (0x80 * (idx + 1));
751 spin_lock_init(&ioat_chan->cleanup_lock);
752 ioat_chan->dma_chan.device = dma;
753 dma_cookie_init(&ioat_chan->dma_chan);
754 list_add_tail(&ioat_chan->dma_chan.device_node, &dma->channels);
755 ioat_dma->idx[idx] = ioat_chan;
756 init_timer(&ioat_chan->timer);
ef97bd0f 757 ioat_chan->timer.function = ioat_timer_event;
c0f28ce6 758 ioat_chan->timer.data = data;
ef97bd0f 759 tasklet_init(&ioat_chan->cleanup_task, ioat_cleanup_event, data);
c0f28ce6
DJ
760}
761
c0f28ce6
DJ
762#define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */
763static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma)
764{
765 int i, src_idx;
766 struct page *dest;
767 struct page *xor_srcs[IOAT_NUM_SRC_TEST];
768 struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1];
769 dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1];
770 dma_addr_t dest_dma;
771 struct dma_async_tx_descriptor *tx;
772 struct dma_chan *dma_chan;
773 dma_cookie_t cookie;
774 u8 cmp_byte = 0;
775 u32 cmp_word;
776 u32 xor_val_result;
777 int err = 0;
778 struct completion cmp;
779 unsigned long tmo;
780 struct device *dev = &ioat_dma->pdev->dev;
781 struct dma_device *dma = &ioat_dma->dma_dev;
782 u8 op = 0;
783
784 dev_dbg(dev, "%s\n", __func__);
785
786 if (!dma_has_cap(DMA_XOR, dma->cap_mask))
787 return 0;
788
789 for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
790 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
791 if (!xor_srcs[src_idx]) {
792 while (src_idx--)
793 __free_page(xor_srcs[src_idx]);
794 return -ENOMEM;
795 }
796 }
797
798 dest = alloc_page(GFP_KERNEL);
799 if (!dest) {
800 while (src_idx--)
801 __free_page(xor_srcs[src_idx]);
802 return -ENOMEM;
803 }
804
805 /* Fill in src buffers */
806 for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
807 u8 *ptr = page_address(xor_srcs[src_idx]);
808
809 for (i = 0; i < PAGE_SIZE; i++)
810 ptr[i] = (1 << src_idx);
811 }
812
813 for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++)
814 cmp_byte ^= (u8) (1 << src_idx);
815
816 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
817 (cmp_byte << 8) | cmp_byte;
818
819 memset(page_address(dest), 0, PAGE_SIZE);
820
821 dma_chan = container_of(dma->channels.next, struct dma_chan,
822 device_node);
823 if (dma->device_alloc_chan_resources(dma_chan) < 1) {
824 err = -ENODEV;
825 goto out;
826 }
827
828 /* test xor */
829 op = IOAT_OP_XOR;
830
831 dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE);
7393fca9
PB
832 if (dma_mapping_error(dev, dest_dma)) {
833 err = -ENOMEM;
2eab9b1a 834 goto free_resources;
7393fca9 835 }
c0f28ce6
DJ
836
837 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
838 dma_srcs[i] = DMA_ERROR_CODE;
839 for (i = 0; i < IOAT_NUM_SRC_TEST; i++) {
840 dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE,
841 DMA_TO_DEVICE);
7393fca9
PB
842 if (dma_mapping_error(dev, dma_srcs[i])) {
843 err = -ENOMEM;
c0f28ce6 844 goto dma_unmap;
7393fca9 845 }
c0f28ce6
DJ
846 }
847 tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
848 IOAT_NUM_SRC_TEST, PAGE_SIZE,
849 DMA_PREP_INTERRUPT);
850
851 if (!tx) {
852 dev_err(dev, "Self-test xor prep failed\n");
853 err = -ENODEV;
854 goto dma_unmap;
855 }
856
857 async_tx_ack(tx);
858 init_completion(&cmp);
3372de58 859 tx->callback = ioat_dma_test_callback;
c0f28ce6
DJ
860 tx->callback_param = &cmp;
861 cookie = tx->tx_submit(tx);
862 if (cookie < 0) {
863 dev_err(dev, "Self-test xor setup failed\n");
864 err = -ENODEV;
865 goto dma_unmap;
866 }
867 dma->device_issue_pending(dma_chan);
868
869 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
870
871 if (tmo == 0 ||
872 dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
873 dev_err(dev, "Self-test xor timed out\n");
874 err = -ENODEV;
875 goto dma_unmap;
876 }
877
878 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
879 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
880
881 dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
882 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
883 u32 *ptr = page_address(dest);
884
885 if (ptr[i] != cmp_word) {
886 dev_err(dev, "Self-test xor failed compare\n");
887 err = -ENODEV;
888 goto free_resources;
889 }
890 }
891 dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
892
893 dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
894
895 /* skip validate if the capability is not present */
896 if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
897 goto free_resources;
898
899 op = IOAT_OP_XOR_VAL;
900
901 /* validate the sources with the destintation page */
902 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
903 xor_val_srcs[i] = xor_srcs[i];
904 xor_val_srcs[i] = dest;
905
906 xor_val_result = 1;
907
908 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
909 dma_srcs[i] = DMA_ERROR_CODE;
910 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
911 dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
912 DMA_TO_DEVICE);
7393fca9
PB
913 if (dma_mapping_error(dev, dma_srcs[i])) {
914 err = -ENOMEM;
c0f28ce6 915 goto dma_unmap;
7393fca9 916 }
c0f28ce6
DJ
917 }
918 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
919 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
920 &xor_val_result, DMA_PREP_INTERRUPT);
921 if (!tx) {
922 dev_err(dev, "Self-test zero prep failed\n");
923 err = -ENODEV;
924 goto dma_unmap;
925 }
926
927 async_tx_ack(tx);
928 init_completion(&cmp);
3372de58 929 tx->callback = ioat_dma_test_callback;
c0f28ce6
DJ
930 tx->callback_param = &cmp;
931 cookie = tx->tx_submit(tx);
932 if (cookie < 0) {
933 dev_err(dev, "Self-test zero setup failed\n");
934 err = -ENODEV;
935 goto dma_unmap;
936 }
937 dma->device_issue_pending(dma_chan);
938
939 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
940
941 if (tmo == 0 ||
942 dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
943 dev_err(dev, "Self-test validate timed out\n");
944 err = -ENODEV;
945 goto dma_unmap;
946 }
947
948 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
949 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
950
951 if (xor_val_result != 0) {
952 dev_err(dev, "Self-test validate failed compare\n");
953 err = -ENODEV;
954 goto free_resources;
955 }
956
957 memset(page_address(dest), 0, PAGE_SIZE);
958
959 /* test for non-zero parity sum */
960 op = IOAT_OP_XOR_VAL;
961
962 xor_val_result = 0;
963 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
964 dma_srcs[i] = DMA_ERROR_CODE;
965 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
966 dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
967 DMA_TO_DEVICE);
7393fca9
PB
968 if (dma_mapping_error(dev, dma_srcs[i])) {
969 err = -ENOMEM;
c0f28ce6 970 goto dma_unmap;
7393fca9 971 }
c0f28ce6
DJ
972 }
973 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
974 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
975 &xor_val_result, DMA_PREP_INTERRUPT);
976 if (!tx) {
977 dev_err(dev, "Self-test 2nd zero prep failed\n");
978 err = -ENODEV;
979 goto dma_unmap;
980 }
981
982 async_tx_ack(tx);
983 init_completion(&cmp);
3372de58 984 tx->callback = ioat_dma_test_callback;
c0f28ce6
DJ
985 tx->callback_param = &cmp;
986 cookie = tx->tx_submit(tx);
987 if (cookie < 0) {
988 dev_err(dev, "Self-test 2nd zero setup failed\n");
989 err = -ENODEV;
990 goto dma_unmap;
991 }
992 dma->device_issue_pending(dma_chan);
993
994 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
995
996 if (tmo == 0 ||
997 dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
998 dev_err(dev, "Self-test 2nd validate timed out\n");
999 err = -ENODEV;
1000 goto dma_unmap;
1001 }
1002
1003 if (xor_val_result != SUM_CHECK_P_RESULT) {
1004 dev_err(dev, "Self-test validate failed compare\n");
1005 err = -ENODEV;
1006 goto dma_unmap;
1007 }
1008
1009 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
1010 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
1011
1012 goto free_resources;
1013dma_unmap:
1014 if (op == IOAT_OP_XOR) {
1015 if (dest_dma != DMA_ERROR_CODE)
1016 dma_unmap_page(dev, dest_dma, PAGE_SIZE,
1017 DMA_FROM_DEVICE);
1018 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
1019 if (dma_srcs[i] != DMA_ERROR_CODE)
1020 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
1021 DMA_TO_DEVICE);
1022 } else if (op == IOAT_OP_XOR_VAL) {
1023 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
1024 if (dma_srcs[i] != DMA_ERROR_CODE)
1025 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
1026 DMA_TO_DEVICE);
1027 }
1028free_resources:
1029 dma->device_free_chan_resources(dma_chan);
1030out:
1031 src_idx = IOAT_NUM_SRC_TEST;
1032 while (src_idx--)
1033 __free_page(xor_srcs[src_idx]);
1034 __free_page(dest);
1035 return err;
1036}
1037
1038static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma)
1039{
64f1d0ff 1040 int rc;
c0f28ce6 1041
64f1d0ff 1042 rc = ioat_dma_self_test(ioat_dma);
c0f28ce6
DJ
1043 if (rc)
1044 return rc;
1045
1046 rc = ioat_xor_val_self_test(ioat_dma);
c0f28ce6 1047
64f1d0ff 1048 return rc;
c0f28ce6
DJ
1049}
1050
3372de58 1051static void ioat_intr_quirk(struct ioatdma_device *ioat_dma)
c0f28ce6
DJ
1052{
1053 struct dma_device *dma;
1054 struct dma_chan *c;
1055 struct ioatdma_chan *ioat_chan;
1056 u32 errmask;
1057
1058 dma = &ioat_dma->dma_dev;
1059
1060 /*
1061 * if we have descriptor write back error status, we mask the
1062 * error interrupts
1063 */
1064 if (ioat_dma->cap & IOAT_CAP_DWBES) {
1065 list_for_each_entry(c, &dma->channels, device_node) {
1066 ioat_chan = to_ioat_chan(c);
1067 errmask = readl(ioat_chan->reg_base +
1068 IOAT_CHANERR_MASK_OFFSET);
1069 errmask |= IOAT_CHANERR_XOR_P_OR_CRC_ERR |
1070 IOAT_CHANERR_XOR_Q_ERR;
1071 writel(errmask, ioat_chan->reg_base +
1072 IOAT_CHANERR_MASK_OFFSET);
1073 }
1074 }
1075}
1076
599d49de 1077static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
c0f28ce6
DJ
1078{
1079 struct pci_dev *pdev = ioat_dma->pdev;
1080 int dca_en = system_has_dca_enabled(pdev);
1081 struct dma_device *dma;
1082 struct dma_chan *c;
1083 struct ioatdma_chan *ioat_chan;
c0f28ce6 1084 int err;
511deae0 1085 u16 val16;
c0f28ce6 1086
c0f28ce6
DJ
1087 dma = &ioat_dma->dma_dev;
1088 dma->device_prep_dma_memcpy = ioat_dma_prep_memcpy_lock;
1089 dma->device_issue_pending = ioat_issue_pending;
1090 dma->device_alloc_chan_resources = ioat_alloc_chan_resources;
1091 dma->device_free_chan_resources = ioat_free_chan_resources;
1092
1093 dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
1094 dma->device_prep_dma_interrupt = ioat_prep_interrupt_lock;
1095
1096 ioat_dma->cap = readl(ioat_dma->reg_base + IOAT_DMA_CAP_OFFSET);
1097
1098 if (is_xeon_cb32(pdev) || is_bwd_noraid(pdev))
1099 ioat_dma->cap &=
1100 ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS);
1101
1102 /* dca is incompatible with raid operations */
1103 if (dca_en && (ioat_dma->cap & (IOAT_CAP_XOR|IOAT_CAP_PQ)))
1104 ioat_dma->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ);
1105
1106 if (ioat_dma->cap & IOAT_CAP_XOR) {
c0f28ce6
DJ
1107 dma->max_xor = 8;
1108
1109 dma_cap_set(DMA_XOR, dma->cap_mask);
1110 dma->device_prep_dma_xor = ioat_prep_xor;
1111
1112 dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
1113 dma->device_prep_dma_xor_val = ioat_prep_xor_val;
1114 }
1115
1116 if (ioat_dma->cap & IOAT_CAP_PQ) {
c0f28ce6
DJ
1117
1118 dma->device_prep_dma_pq = ioat_prep_pq;
1119 dma->device_prep_dma_pq_val = ioat_prep_pq_val;
1120 dma_cap_set(DMA_PQ, dma->cap_mask);
1121 dma_cap_set(DMA_PQ_VAL, dma->cap_mask);
1122
1123 if (ioat_dma->cap & IOAT_CAP_RAID16SS)
1124 dma_set_maxpq(dma, 16, 0);
1125 else
1126 dma_set_maxpq(dma, 8, 0);
1127
1128 if (!(ioat_dma->cap & IOAT_CAP_XOR)) {
1129 dma->device_prep_dma_xor = ioat_prep_pqxor;
1130 dma->device_prep_dma_xor_val = ioat_prep_pqxor_val;
1131 dma_cap_set(DMA_XOR, dma->cap_mask);
1132 dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
1133
1134 if (ioat_dma->cap & IOAT_CAP_RAID16SS)
1135 dma->max_xor = 16;
1136 else
1137 dma->max_xor = 8;
1138 }
1139 }
1140
1141 dma->device_tx_status = ioat_tx_status;
c0f28ce6
DJ
1142
1143 /* starting with CB3.3 super extended descriptors are supported */
1144 if (ioat_dma->cap & IOAT_CAP_RAID16SS) {
1145 char pool_name[14];
1146 int i;
1147
1148 for (i = 0; i < MAX_SED_POOLS; i++) {
1149 snprintf(pool_name, 14, "ioat_hw%d_sed", i);
1150
1151 /* allocate SED DMA pool */
1152 ioat_dma->sed_hw_pool[i] = dmam_pool_create(pool_name,
1153 &pdev->dev,
1154 SED_SIZE * (i + 1), 64, 0);
1155 if (!ioat_dma->sed_hw_pool[i])
1156 return -ENOMEM;
1157
1158 }
1159 }
1160
1161 if (!(ioat_dma->cap & (IOAT_CAP_XOR | IOAT_CAP_PQ)))
1162 dma_cap_set(DMA_PRIVATE, dma->cap_mask);
1163
1164 err = ioat_probe(ioat_dma);
1165 if (err)
1166 return err;
1167
1168 list_for_each_entry(c, &dma->channels, device_node) {
1169 ioat_chan = to_ioat_chan(c);
1170 writel(IOAT_DMA_DCA_ANY_CPU,
1171 ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
1172 }
1173
1174 err = ioat_register(ioat_dma);
1175 if (err)
1176 return err;
1177
1178 ioat_kobject_add(ioat_dma, &ioat_ktype);
1179
1180 if (dca)
3372de58 1181 ioat_dma->dca = ioat_dca_init(pdev, ioat_dma->reg_base);
c0f28ce6 1182
511deae0
DJ
1183 /* disable relaxed ordering */
1184 err = pcie_capability_read_word(pdev, IOAT_DEVCTRL_OFFSET, &val16);
1185 if (err)
1186 return err;
1187
1188 /* clear relaxed ordering enable */
1189 val16 &= ~IOAT_DEVCTRL_ROE;
1190 err = pcie_capability_write_word(pdev, IOAT_DEVCTRL_OFFSET, val16);
1191 if (err)
1192 return err;
1193
c0f28ce6
DJ
1194 return 0;
1195}
1196
ad4a7b50
DJ
1197static void ioat_shutdown(struct pci_dev *pdev)
1198{
1199 struct ioatdma_device *ioat_dma = pci_get_drvdata(pdev);
1200 struct ioatdma_chan *ioat_chan;
1201 int i;
1202
1203 if (!ioat_dma)
1204 return;
1205
1206 for (i = 0; i < IOAT_MAX_CHANS; i++) {
1207 ioat_chan = ioat_dma->idx[i];
1208 if (!ioat_chan)
1209 continue;
1210
1211 spin_lock_bh(&ioat_chan->prep_lock);
1212 set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
1213 del_timer_sync(&ioat_chan->timer);
1214 spin_unlock_bh(&ioat_chan->prep_lock);
1215 /* this should quiesce then reset */
1216 ioat_reset_hw(ioat_chan);
1217 }
1218
1219 ioat_disable_interrupts(ioat_dma);
1220}
1221
184ff2aa 1222static void ioat_resume(struct ioatdma_device *ioat_dma)
4222a907
DJ
1223{
1224 struct ioatdma_chan *ioat_chan;
1225 u32 chanerr;
1226 int i;
1227
1228 for (i = 0; i < IOAT_MAX_CHANS; i++) {
1229 ioat_chan = ioat_dma->idx[i];
1230 if (!ioat_chan)
1231 continue;
1232
1233 spin_lock_bh(&ioat_chan->prep_lock);
1234 clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
1235 spin_unlock_bh(&ioat_chan->prep_lock);
1236
1237 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
1238 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
1239
1240 /* no need to reset as shutdown already did that */
1241 }
1242}
1243
c0f28ce6
DJ
1244#define DRV_NAME "ioatdma"
1245
4222a907
DJ
1246static pci_ers_result_t ioat_pcie_error_detected(struct pci_dev *pdev,
1247 enum pci_channel_state error)
1248{
1249 dev_dbg(&pdev->dev, "%s: PCIe AER error %d\n", DRV_NAME, error);
1250
1251 /* quiesce and block I/O */
1252 ioat_shutdown(pdev);
1253
1254 return PCI_ERS_RESULT_NEED_RESET;
1255}
1256
1257static pci_ers_result_t ioat_pcie_error_slot_reset(struct pci_dev *pdev)
1258{
1259 pci_ers_result_t result = PCI_ERS_RESULT_RECOVERED;
1260 int err;
1261
1262 dev_dbg(&pdev->dev, "%s post reset handling\n", DRV_NAME);
1263
1264 if (pci_enable_device_mem(pdev) < 0) {
1265 dev_err(&pdev->dev,
1266 "Failed to enable PCIe device after reset.\n");
1267 result = PCI_ERS_RESULT_DISCONNECT;
1268 } else {
1269 pci_set_master(pdev);
1270 pci_restore_state(pdev);
1271 pci_save_state(pdev);
1272 pci_wake_from_d3(pdev, false);
1273 }
1274
1275 err = pci_cleanup_aer_uncorrect_error_status(pdev);
1276 if (err) {
1277 dev_err(&pdev->dev,
1278 "AER uncorrect error status clear failed: %#x\n", err);
1279 }
1280
1281 return result;
1282}
1283
1284static void ioat_pcie_error_resume(struct pci_dev *pdev)
1285{
1286 struct ioatdma_device *ioat_dma = pci_get_drvdata(pdev);
1287
1288 dev_dbg(&pdev->dev, "%s: AER handling resuming\n", DRV_NAME);
1289
1290 /* initialize and bring everything back */
1291 ioat_resume(ioat_dma);
1292}
1293
1294static const struct pci_error_handlers ioat_err_handler = {
1295 .error_detected = ioat_pcie_error_detected,
1296 .slot_reset = ioat_pcie_error_slot_reset,
1297 .resume = ioat_pcie_error_resume,
1298};
1299
c0f28ce6
DJ
1300static struct pci_driver ioat_pci_driver = {
1301 .name = DRV_NAME,
1302 .id_table = ioat_pci_tbl,
1303 .probe = ioat_pci_probe,
1304 .remove = ioat_remove,
ad4a7b50 1305 .shutdown = ioat_shutdown,
4222a907 1306 .err_handler = &ioat_err_handler,
c0f28ce6
DJ
1307};
1308
1309static struct ioatdma_device *
1310alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase)
1311{
1312 struct device *dev = &pdev->dev;
1313 struct ioatdma_device *d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
1314
1315 if (!d)
1316 return NULL;
1317 d->pdev = pdev;
1318 d->reg_base = iobase;
1319 return d;
1320}
1321
1322static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1323{
1324 void __iomem * const *iomap;
1325 struct device *dev = &pdev->dev;
1326 struct ioatdma_device *device;
1327 int err;
1328
1329 err = pcim_enable_device(pdev);
1330 if (err)
1331 return err;
1332
1333 err = pcim_iomap_regions(pdev, 1 << IOAT_MMIO_BAR, DRV_NAME);
1334 if (err)
1335 return err;
1336 iomap = pcim_iomap_table(pdev);
1337 if (!iomap)
1338 return -ENOMEM;
1339
1340 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1341 if (err)
1342 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1343 if (err)
1344 return err;
1345
1346 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1347 if (err)
1348 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1349 if (err)
1350 return err;
1351
1352 device = alloc_ioatdma(pdev, iomap[IOAT_MMIO_BAR]);
1353 if (!device)
1354 return -ENOMEM;
1355 pci_set_master(pdev);
1356 pci_set_drvdata(pdev, device);
1357
1358 device->version = readb(device->reg_base + IOAT_VER_OFFSET);
4222a907 1359 if (device->version >= IOAT_VER_3_0) {
c0f28ce6 1360 err = ioat3_dma_probe(device, ioat_dca_enabled);
4222a907
DJ
1361
1362 if (device->version >= IOAT_VER_3_3)
1363 pci_enable_pcie_error_reporting(pdev);
1364 } else
c0f28ce6
DJ
1365 return -ENODEV;
1366
1367 if (err) {
1368 dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n");
4222a907 1369 pci_disable_pcie_error_reporting(pdev);
c0f28ce6
DJ
1370 return -ENODEV;
1371 }
1372
1373 return 0;
1374}
1375
1376static void ioat_remove(struct pci_dev *pdev)
1377{
1378 struct ioatdma_device *device = pci_get_drvdata(pdev);
1379
1380 if (!device)
1381 return;
1382
1383 dev_err(&pdev->dev, "Removing dma and dca services\n");
1384 if (device->dca) {
1385 unregister_dca_provider(device->dca, &pdev->dev);
1386 free_dca_provider(device->dca);
1387 device->dca = NULL;
1388 }
4222a907
DJ
1389
1390 pci_disable_pcie_error_reporting(pdev);
c0f28ce6
DJ
1391 ioat_dma_remove(device);
1392}
1393
1394static int __init ioat_init_module(void)
1395{
1396 int err = -ENOMEM;
1397
1398 pr_info("%s: Intel(R) QuickData Technology Driver %s\n",
1399 DRV_NAME, IOAT_DMA_VERSION);
1400
1401 ioat_cache = kmem_cache_create("ioat", sizeof(struct ioat_ring_ent),
1402 0, SLAB_HWCACHE_ALIGN, NULL);
1403 if (!ioat_cache)
1404 return -ENOMEM;
1405
1406 ioat_sed_cache = KMEM_CACHE(ioat_sed_ent, 0);
1407 if (!ioat_sed_cache)
1408 goto err_ioat_cache;
1409
1410 err = pci_register_driver(&ioat_pci_driver);
1411 if (err)
1412 goto err_ioat3_cache;
1413
1414 return 0;
1415
1416 err_ioat3_cache:
1417 kmem_cache_destroy(ioat_sed_cache);
1418
1419 err_ioat_cache:
1420 kmem_cache_destroy(ioat_cache);
1421
1422 return err;
1423}
1424module_init(ioat_init_module);
1425
1426static void __exit ioat_exit_module(void)
1427{
1428 pci_unregister_driver(&ioat_pci_driver);
1429 kmem_cache_destroy(ioat_cache);
1430}
1431module_exit(ioat_exit_module);