Commit | Line | Data |
---|---|---|
c0f28ce6 DJ |
1 | /* |
2 | * Intel I/OAT DMA Linux driver | |
3 | * Copyright(c) 2004 - 2015 Intel Corporation. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms and conditions of the GNU General Public License, | |
7 | * version 2, as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
14 | * The full GNU General Public License is included in this distribution in | |
15 | * the file called "COPYING". | |
16 | * | |
17 | */ | |
18 | ||
19 | #include <linux/init.h> | |
20 | #include <linux/module.h> | |
21 | #include <linux/slab.h> | |
22 | #include <linux/pci.h> | |
23 | #include <linux/interrupt.h> | |
24 | #include <linux/dmaengine.h> | |
25 | #include <linux/delay.h> | |
26 | #include <linux/dma-mapping.h> | |
27 | #include <linux/workqueue.h> | |
28 | #include <linux/prefetch.h> | |
29 | #include <linux/dca.h> | |
4222a907 | 30 | #include <linux/aer.h> |
c0f28ce6 DJ |
31 | #include "dma.h" |
32 | #include "registers.h" | |
33 | #include "hw.h" | |
34 | ||
35 | #include "../dmaengine.h" | |
36 | ||
37 | MODULE_VERSION(IOAT_DMA_VERSION); | |
38 | MODULE_LICENSE("Dual BSD/GPL"); | |
39 | MODULE_AUTHOR("Intel Corporation"); | |
40 | ||
41 | static struct pci_device_id ioat_pci_tbl[] = { | |
42 | /* I/OAT v3 platforms */ | |
43 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) }, | |
44 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) }, | |
45 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG2) }, | |
46 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG3) }, | |
47 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG4) }, | |
48 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG5) }, | |
49 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG6) }, | |
50 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG7) }, | |
51 | ||
52 | /* I/OAT v3.2 platforms */ | |
53 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF0) }, | |
54 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF1) }, | |
55 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF2) }, | |
56 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF3) }, | |
57 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF4) }, | |
58 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF5) }, | |
59 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF6) }, | |
60 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF7) }, | |
61 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF8) }, | |
62 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF9) }, | |
63 | ||
64 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB0) }, | |
65 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB1) }, | |
66 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB2) }, | |
67 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB3) }, | |
68 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB4) }, | |
69 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB5) }, | |
70 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB6) }, | |
71 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB7) }, | |
72 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB8) }, | |
73 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB9) }, | |
74 | ||
75 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB0) }, | |
76 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB1) }, | |
77 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB2) }, | |
78 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB3) }, | |
79 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB4) }, | |
80 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB5) }, | |
81 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB6) }, | |
82 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB7) }, | |
83 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB8) }, | |
84 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB9) }, | |
85 | ||
86 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW0) }, | |
87 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW1) }, | |
88 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW2) }, | |
89 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW3) }, | |
90 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW4) }, | |
91 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW5) }, | |
92 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW6) }, | |
93 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW7) }, | |
94 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW8) }, | |
95 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW9) }, | |
96 | ||
ab98193d DJ |
97 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX0) }, |
98 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX1) }, | |
99 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX2) }, | |
100 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX3) }, | |
101 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX4) }, | |
102 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX5) }, | |
103 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX6) }, | |
104 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX7) }, | |
105 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX8) }, | |
106 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX9) }, | |
107 | ||
c0f28ce6 DJ |
108 | /* I/OAT v3.3 platforms */ |
109 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0) }, | |
110 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1) }, | |
111 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD2) }, | |
112 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD3) }, | |
113 | ||
114 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE0) }, | |
115 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE1) }, | |
116 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE2) }, | |
117 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE3) }, | |
118 | ||
119 | { 0, } | |
120 | }; | |
121 | MODULE_DEVICE_TABLE(pci, ioat_pci_tbl); | |
122 | ||
123 | static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id); | |
124 | static void ioat_remove(struct pci_dev *pdev); | |
599d49de DJ |
125 | static void |
126 | ioat_init_channel(struct ioatdma_device *ioat_dma, | |
127 | struct ioatdma_chan *ioat_chan, int idx); | |
ef97bd0f DJ |
128 | static void ioat_intr_quirk(struct ioatdma_device *ioat_dma); |
129 | static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma); | |
130 | static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma); | |
c0f28ce6 DJ |
131 | |
132 | static int ioat_dca_enabled = 1; | |
133 | module_param(ioat_dca_enabled, int, 0644); | |
134 | MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)"); | |
135 | int ioat_pending_level = 4; | |
136 | module_param(ioat_pending_level, int, 0644); | |
137 | MODULE_PARM_DESC(ioat_pending_level, | |
138 | "high-water mark for pushing ioat descriptors (default: 4)"); | |
139 | int ioat_ring_alloc_order = 8; | |
140 | module_param(ioat_ring_alloc_order, int, 0644); | |
141 | MODULE_PARM_DESC(ioat_ring_alloc_order, | |
142 | "ioat+: allocate 2^n descriptors per channel (default: 8 max: 16)"); | |
143 | int ioat_ring_max_alloc_order = IOAT_MAX_ORDER; | |
144 | module_param(ioat_ring_max_alloc_order, int, 0644); | |
145 | MODULE_PARM_DESC(ioat_ring_max_alloc_order, | |
146 | "ioat+: upper limit for ring size (default: 16)"); | |
147 | static char ioat_interrupt_style[32] = "msix"; | |
148 | module_param_string(ioat_interrupt_style, ioat_interrupt_style, | |
149 | sizeof(ioat_interrupt_style), 0644); | |
150 | MODULE_PARM_DESC(ioat_interrupt_style, | |
151 | "set ioat interrupt style: msix (default), msi, intx"); | |
152 | ||
153 | struct kmem_cache *ioat_cache; | |
154 | struct kmem_cache *ioat_sed_cache; | |
155 | ||
156 | static bool is_jf_ioat(struct pci_dev *pdev) | |
157 | { | |
158 | switch (pdev->device) { | |
159 | case PCI_DEVICE_ID_INTEL_IOAT_JSF0: | |
160 | case PCI_DEVICE_ID_INTEL_IOAT_JSF1: | |
161 | case PCI_DEVICE_ID_INTEL_IOAT_JSF2: | |
162 | case PCI_DEVICE_ID_INTEL_IOAT_JSF3: | |
163 | case PCI_DEVICE_ID_INTEL_IOAT_JSF4: | |
164 | case PCI_DEVICE_ID_INTEL_IOAT_JSF5: | |
165 | case PCI_DEVICE_ID_INTEL_IOAT_JSF6: | |
166 | case PCI_DEVICE_ID_INTEL_IOAT_JSF7: | |
167 | case PCI_DEVICE_ID_INTEL_IOAT_JSF8: | |
168 | case PCI_DEVICE_ID_INTEL_IOAT_JSF9: | |
169 | return true; | |
170 | default: | |
171 | return false; | |
172 | } | |
173 | } | |
174 | ||
175 | static bool is_snb_ioat(struct pci_dev *pdev) | |
176 | { | |
177 | switch (pdev->device) { | |
178 | case PCI_DEVICE_ID_INTEL_IOAT_SNB0: | |
179 | case PCI_DEVICE_ID_INTEL_IOAT_SNB1: | |
180 | case PCI_DEVICE_ID_INTEL_IOAT_SNB2: | |
181 | case PCI_DEVICE_ID_INTEL_IOAT_SNB3: | |
182 | case PCI_DEVICE_ID_INTEL_IOAT_SNB4: | |
183 | case PCI_DEVICE_ID_INTEL_IOAT_SNB5: | |
184 | case PCI_DEVICE_ID_INTEL_IOAT_SNB6: | |
185 | case PCI_DEVICE_ID_INTEL_IOAT_SNB7: | |
186 | case PCI_DEVICE_ID_INTEL_IOAT_SNB8: | |
187 | case PCI_DEVICE_ID_INTEL_IOAT_SNB9: | |
188 | return true; | |
189 | default: | |
190 | return false; | |
191 | } | |
192 | } | |
193 | ||
194 | static bool is_ivb_ioat(struct pci_dev *pdev) | |
195 | { | |
196 | switch (pdev->device) { | |
197 | case PCI_DEVICE_ID_INTEL_IOAT_IVB0: | |
198 | case PCI_DEVICE_ID_INTEL_IOAT_IVB1: | |
199 | case PCI_DEVICE_ID_INTEL_IOAT_IVB2: | |
200 | case PCI_DEVICE_ID_INTEL_IOAT_IVB3: | |
201 | case PCI_DEVICE_ID_INTEL_IOAT_IVB4: | |
202 | case PCI_DEVICE_ID_INTEL_IOAT_IVB5: | |
203 | case PCI_DEVICE_ID_INTEL_IOAT_IVB6: | |
204 | case PCI_DEVICE_ID_INTEL_IOAT_IVB7: | |
205 | case PCI_DEVICE_ID_INTEL_IOAT_IVB8: | |
206 | case PCI_DEVICE_ID_INTEL_IOAT_IVB9: | |
207 | return true; | |
208 | default: | |
209 | return false; | |
210 | } | |
211 | ||
212 | } | |
213 | ||
214 | static bool is_hsw_ioat(struct pci_dev *pdev) | |
215 | { | |
216 | switch (pdev->device) { | |
217 | case PCI_DEVICE_ID_INTEL_IOAT_HSW0: | |
218 | case PCI_DEVICE_ID_INTEL_IOAT_HSW1: | |
219 | case PCI_DEVICE_ID_INTEL_IOAT_HSW2: | |
220 | case PCI_DEVICE_ID_INTEL_IOAT_HSW3: | |
221 | case PCI_DEVICE_ID_INTEL_IOAT_HSW4: | |
222 | case PCI_DEVICE_ID_INTEL_IOAT_HSW5: | |
223 | case PCI_DEVICE_ID_INTEL_IOAT_HSW6: | |
224 | case PCI_DEVICE_ID_INTEL_IOAT_HSW7: | |
225 | case PCI_DEVICE_ID_INTEL_IOAT_HSW8: | |
226 | case PCI_DEVICE_ID_INTEL_IOAT_HSW9: | |
227 | return true; | |
228 | default: | |
229 | return false; | |
230 | } | |
231 | ||
232 | } | |
233 | ||
ab98193d DJ |
234 | static bool is_bdx_ioat(struct pci_dev *pdev) |
235 | { | |
236 | switch (pdev->device) { | |
237 | case PCI_DEVICE_ID_INTEL_IOAT_BDX0: | |
238 | case PCI_DEVICE_ID_INTEL_IOAT_BDX1: | |
239 | case PCI_DEVICE_ID_INTEL_IOAT_BDX2: | |
240 | case PCI_DEVICE_ID_INTEL_IOAT_BDX3: | |
241 | case PCI_DEVICE_ID_INTEL_IOAT_BDX4: | |
242 | case PCI_DEVICE_ID_INTEL_IOAT_BDX5: | |
243 | case PCI_DEVICE_ID_INTEL_IOAT_BDX6: | |
244 | case PCI_DEVICE_ID_INTEL_IOAT_BDX7: | |
245 | case PCI_DEVICE_ID_INTEL_IOAT_BDX8: | |
246 | case PCI_DEVICE_ID_INTEL_IOAT_BDX9: | |
247 | return true; | |
248 | default: | |
249 | return false; | |
250 | } | |
251 | } | |
252 | ||
c0f28ce6 DJ |
253 | static bool is_xeon_cb32(struct pci_dev *pdev) |
254 | { | |
255 | return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) || | |
ab98193d | 256 | is_hsw_ioat(pdev) || is_bdx_ioat(pdev); |
c0f28ce6 DJ |
257 | } |
258 | ||
259 | bool is_bwd_ioat(struct pci_dev *pdev) | |
260 | { | |
261 | switch (pdev->device) { | |
262 | case PCI_DEVICE_ID_INTEL_IOAT_BWD0: | |
263 | case PCI_DEVICE_ID_INTEL_IOAT_BWD1: | |
264 | case PCI_DEVICE_ID_INTEL_IOAT_BWD2: | |
265 | case PCI_DEVICE_ID_INTEL_IOAT_BWD3: | |
266 | /* even though not Atom, BDX-DE has same DMA silicon */ | |
267 | case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0: | |
268 | case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1: | |
269 | case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2: | |
270 | case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3: | |
271 | return true; | |
272 | default: | |
273 | return false; | |
274 | } | |
275 | } | |
276 | ||
277 | static bool is_bwd_noraid(struct pci_dev *pdev) | |
278 | { | |
279 | switch (pdev->device) { | |
280 | case PCI_DEVICE_ID_INTEL_IOAT_BWD2: | |
281 | case PCI_DEVICE_ID_INTEL_IOAT_BWD3: | |
282 | case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0: | |
283 | case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1: | |
284 | case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2: | |
285 | case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3: | |
286 | return true; | |
287 | default: | |
288 | return false; | |
289 | } | |
290 | ||
291 | } | |
292 | ||
293 | /* | |
294 | * Perform a IOAT transaction to verify the HW works. | |
295 | */ | |
296 | #define IOAT_TEST_SIZE 2000 | |
297 | ||
298 | static void ioat_dma_test_callback(void *dma_async_param) | |
299 | { | |
300 | struct completion *cmp = dma_async_param; | |
301 | ||
302 | complete(cmp); | |
303 | } | |
304 | ||
305 | /** | |
306 | * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works. | |
307 | * @ioat_dma: dma device to be tested | |
308 | */ | |
599d49de | 309 | static int ioat_dma_self_test(struct ioatdma_device *ioat_dma) |
c0f28ce6 DJ |
310 | { |
311 | int i; | |
312 | u8 *src; | |
313 | u8 *dest; | |
314 | struct dma_device *dma = &ioat_dma->dma_dev; | |
315 | struct device *dev = &ioat_dma->pdev->dev; | |
316 | struct dma_chan *dma_chan; | |
317 | struct dma_async_tx_descriptor *tx; | |
318 | dma_addr_t dma_dest, dma_src; | |
319 | dma_cookie_t cookie; | |
320 | int err = 0; | |
321 | struct completion cmp; | |
322 | unsigned long tmo; | |
323 | unsigned long flags; | |
324 | ||
325 | src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); | |
326 | if (!src) | |
327 | return -ENOMEM; | |
328 | dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); | |
329 | if (!dest) { | |
330 | kfree(src); | |
331 | return -ENOMEM; | |
332 | } | |
333 | ||
334 | /* Fill in src buffer */ | |
335 | for (i = 0; i < IOAT_TEST_SIZE; i++) | |
336 | src[i] = (u8)i; | |
337 | ||
338 | /* Start copy, using first DMA channel */ | |
339 | dma_chan = container_of(dma->channels.next, struct dma_chan, | |
340 | device_node); | |
341 | if (dma->device_alloc_chan_resources(dma_chan) < 1) { | |
342 | dev_err(dev, "selftest cannot allocate chan resource\n"); | |
343 | err = -ENODEV; | |
344 | goto out; | |
345 | } | |
346 | ||
347 | dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); | |
348 | if (dma_mapping_error(dev, dma_src)) { | |
349 | dev_err(dev, "mapping src buffer failed\n"); | |
350 | goto free_resources; | |
351 | } | |
352 | dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); | |
353 | if (dma_mapping_error(dev, dma_dest)) { | |
354 | dev_err(dev, "mapping dest buffer failed\n"); | |
355 | goto unmap_src; | |
356 | } | |
357 | flags = DMA_PREP_INTERRUPT; | |
358 | tx = ioat_dma->dma_dev.device_prep_dma_memcpy(dma_chan, dma_dest, | |
359 | dma_src, IOAT_TEST_SIZE, | |
360 | flags); | |
361 | if (!tx) { | |
362 | dev_err(dev, "Self-test prep failed, disabling\n"); | |
363 | err = -ENODEV; | |
364 | goto unmap_dma; | |
365 | } | |
366 | ||
367 | async_tx_ack(tx); | |
368 | init_completion(&cmp); | |
369 | tx->callback = ioat_dma_test_callback; | |
370 | tx->callback_param = &cmp; | |
371 | cookie = tx->tx_submit(tx); | |
372 | if (cookie < 0) { | |
373 | dev_err(dev, "Self-test setup failed, disabling\n"); | |
374 | err = -ENODEV; | |
375 | goto unmap_dma; | |
376 | } | |
377 | dma->device_issue_pending(dma_chan); | |
378 | ||
379 | tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); | |
380 | ||
381 | if (tmo == 0 || | |
382 | dma->device_tx_status(dma_chan, cookie, NULL) | |
383 | != DMA_COMPLETE) { | |
384 | dev_err(dev, "Self-test copy timed out, disabling\n"); | |
385 | err = -ENODEV; | |
386 | goto unmap_dma; | |
387 | } | |
388 | if (memcmp(src, dest, IOAT_TEST_SIZE)) { | |
389 | dev_err(dev, "Self-test copy failed compare, disabling\n"); | |
390 | err = -ENODEV; | |
391 | goto free_resources; | |
392 | } | |
393 | ||
394 | unmap_dma: | |
395 | dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); | |
396 | unmap_src: | |
397 | dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE); | |
398 | free_resources: | |
399 | dma->device_free_chan_resources(dma_chan); | |
400 | out: | |
401 | kfree(src); | |
402 | kfree(dest); | |
403 | return err; | |
404 | } | |
405 | ||
406 | /** | |
407 | * ioat_dma_setup_interrupts - setup interrupt handler | |
408 | * @ioat_dma: ioat dma device | |
409 | */ | |
410 | int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma) | |
411 | { | |
412 | struct ioatdma_chan *ioat_chan; | |
413 | struct pci_dev *pdev = ioat_dma->pdev; | |
414 | struct device *dev = &pdev->dev; | |
415 | struct msix_entry *msix; | |
416 | int i, j, msixcnt; | |
417 | int err = -EINVAL; | |
418 | u8 intrctrl = 0; | |
419 | ||
420 | if (!strcmp(ioat_interrupt_style, "msix")) | |
421 | goto msix; | |
422 | if (!strcmp(ioat_interrupt_style, "msi")) | |
423 | goto msi; | |
424 | if (!strcmp(ioat_interrupt_style, "intx")) | |
425 | goto intx; | |
426 | dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style); | |
427 | goto err_no_irq; | |
428 | ||
429 | msix: | |
430 | /* The number of MSI-X vectors should equal the number of channels */ | |
431 | msixcnt = ioat_dma->dma_dev.chancnt; | |
432 | for (i = 0; i < msixcnt; i++) | |
433 | ioat_dma->msix_entries[i].entry = i; | |
434 | ||
435 | err = pci_enable_msix_exact(pdev, ioat_dma->msix_entries, msixcnt); | |
436 | if (err) | |
437 | goto msi; | |
438 | ||
439 | for (i = 0; i < msixcnt; i++) { | |
440 | msix = &ioat_dma->msix_entries[i]; | |
441 | ioat_chan = ioat_chan_by_index(ioat_dma, i); | |
442 | err = devm_request_irq(dev, msix->vector, | |
443 | ioat_dma_do_interrupt_msix, 0, | |
444 | "ioat-msix", ioat_chan); | |
445 | if (err) { | |
446 | for (j = 0; j < i; j++) { | |
447 | msix = &ioat_dma->msix_entries[j]; | |
448 | ioat_chan = ioat_chan_by_index(ioat_dma, j); | |
449 | devm_free_irq(dev, msix->vector, ioat_chan); | |
450 | } | |
451 | goto msi; | |
452 | } | |
453 | } | |
454 | intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL; | |
455 | ioat_dma->irq_mode = IOAT_MSIX; | |
456 | goto done; | |
457 | ||
458 | msi: | |
459 | err = pci_enable_msi(pdev); | |
460 | if (err) | |
461 | goto intx; | |
462 | ||
463 | err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0, | |
464 | "ioat-msi", ioat_dma); | |
465 | if (err) { | |
466 | pci_disable_msi(pdev); | |
467 | goto intx; | |
468 | } | |
469 | ioat_dma->irq_mode = IOAT_MSI; | |
470 | goto done; | |
471 | ||
472 | intx: | |
473 | err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, | |
474 | IRQF_SHARED, "ioat-intx", ioat_dma); | |
475 | if (err) | |
476 | goto err_no_irq; | |
477 | ||
478 | ioat_dma->irq_mode = IOAT_INTX; | |
479 | done: | |
ef97bd0f DJ |
480 | if (is_bwd_ioat(pdev)) |
481 | ioat_intr_quirk(ioat_dma); | |
c0f28ce6 DJ |
482 | intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN; |
483 | writeb(intrctrl, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET); | |
484 | return 0; | |
485 | ||
486 | err_no_irq: | |
487 | /* Disable all interrupt generation */ | |
488 | writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET); | |
489 | ioat_dma->irq_mode = IOAT_NOIRQ; | |
490 | dev_err(dev, "no usable interrupts\n"); | |
491 | return err; | |
492 | } | |
c0f28ce6 DJ |
493 | |
494 | static void ioat_disable_interrupts(struct ioatdma_device *ioat_dma) | |
495 | { | |
496 | /* Disable all interrupt generation */ | |
497 | writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET); | |
498 | } | |
499 | ||
599d49de | 500 | static int ioat_probe(struct ioatdma_device *ioat_dma) |
c0f28ce6 DJ |
501 | { |
502 | int err = -ENODEV; | |
503 | struct dma_device *dma = &ioat_dma->dma_dev; | |
504 | struct pci_dev *pdev = ioat_dma->pdev; | |
505 | struct device *dev = &pdev->dev; | |
506 | ||
507 | /* DMA coherent memory pool for DMA descriptor allocations */ | |
508 | ioat_dma->dma_pool = pci_pool_create("dma_desc_pool", pdev, | |
509 | sizeof(struct ioat_dma_descriptor), | |
510 | 64, 0); | |
511 | if (!ioat_dma->dma_pool) { | |
512 | err = -ENOMEM; | |
513 | goto err_dma_pool; | |
514 | } | |
515 | ||
516 | ioat_dma->completion_pool = pci_pool_create("completion_pool", pdev, | |
517 | sizeof(u64), | |
518 | SMP_CACHE_BYTES, | |
519 | SMP_CACHE_BYTES); | |
520 | ||
521 | if (!ioat_dma->completion_pool) { | |
522 | err = -ENOMEM; | |
523 | goto err_completion_pool; | |
524 | } | |
525 | ||
ef97bd0f | 526 | ioat_enumerate_channels(ioat_dma); |
c0f28ce6 DJ |
527 | |
528 | dma_cap_set(DMA_MEMCPY, dma->cap_mask); | |
529 | dma->dev = &pdev->dev; | |
530 | ||
531 | if (!dma->chancnt) { | |
532 | dev_err(dev, "channel enumeration error\n"); | |
533 | goto err_setup_interrupts; | |
534 | } | |
535 | ||
536 | err = ioat_dma_setup_interrupts(ioat_dma); | |
537 | if (err) | |
538 | goto err_setup_interrupts; | |
539 | ||
ef97bd0f | 540 | err = ioat3_dma_self_test(ioat_dma); |
c0f28ce6 DJ |
541 | if (err) |
542 | goto err_self_test; | |
543 | ||
544 | return 0; | |
545 | ||
546 | err_self_test: | |
547 | ioat_disable_interrupts(ioat_dma); | |
548 | err_setup_interrupts: | |
549 | pci_pool_destroy(ioat_dma->completion_pool); | |
550 | err_completion_pool: | |
551 | pci_pool_destroy(ioat_dma->dma_pool); | |
552 | err_dma_pool: | |
553 | return err; | |
554 | } | |
555 | ||
599d49de | 556 | static int ioat_register(struct ioatdma_device *ioat_dma) |
c0f28ce6 DJ |
557 | { |
558 | int err = dma_async_device_register(&ioat_dma->dma_dev); | |
559 | ||
560 | if (err) { | |
561 | ioat_disable_interrupts(ioat_dma); | |
562 | pci_pool_destroy(ioat_dma->completion_pool); | |
563 | pci_pool_destroy(ioat_dma->dma_pool); | |
564 | } | |
565 | ||
566 | return err; | |
567 | } | |
568 | ||
599d49de | 569 | static void ioat_dma_remove(struct ioatdma_device *ioat_dma) |
c0f28ce6 DJ |
570 | { |
571 | struct dma_device *dma = &ioat_dma->dma_dev; | |
572 | ||
573 | ioat_disable_interrupts(ioat_dma); | |
574 | ||
575 | ioat_kobject_del(ioat_dma); | |
576 | ||
577 | dma_async_device_unregister(dma); | |
578 | ||
579 | pci_pool_destroy(ioat_dma->dma_pool); | |
580 | pci_pool_destroy(ioat_dma->completion_pool); | |
581 | ||
582 | INIT_LIST_HEAD(&dma->channels); | |
583 | } | |
584 | ||
585 | /** | |
586 | * ioat_enumerate_channels - find and initialize the device's channels | |
587 | * @ioat_dma: the ioat dma device to be enumerated | |
588 | */ | |
599d49de | 589 | static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma) |
c0f28ce6 DJ |
590 | { |
591 | struct ioatdma_chan *ioat_chan; | |
592 | struct device *dev = &ioat_dma->pdev->dev; | |
593 | struct dma_device *dma = &ioat_dma->dma_dev; | |
594 | u8 xfercap_log; | |
595 | int i; | |
596 | ||
597 | INIT_LIST_HEAD(&dma->channels); | |
598 | dma->chancnt = readb(ioat_dma->reg_base + IOAT_CHANCNT_OFFSET); | |
599 | dma->chancnt &= 0x1f; /* bits [4:0] valid */ | |
600 | if (dma->chancnt > ARRAY_SIZE(ioat_dma->idx)) { | |
601 | dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n", | |
602 | dma->chancnt, ARRAY_SIZE(ioat_dma->idx)); | |
603 | dma->chancnt = ARRAY_SIZE(ioat_dma->idx); | |
604 | } | |
605 | xfercap_log = readb(ioat_dma->reg_base + IOAT_XFERCAP_OFFSET); | |
606 | xfercap_log &= 0x1f; /* bits [4:0] valid */ | |
607 | if (xfercap_log == 0) | |
608 | return 0; | |
609 | dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log); | |
610 | ||
611 | for (i = 0; i < dma->chancnt; i++) { | |
612 | ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL); | |
613 | if (!ioat_chan) | |
614 | break; | |
615 | ||
616 | ioat_init_channel(ioat_dma, ioat_chan, i); | |
617 | ioat_chan->xfercap_log = xfercap_log; | |
618 | spin_lock_init(&ioat_chan->prep_lock); | |
ef97bd0f | 619 | if (ioat_reset_hw(ioat_chan)) { |
c0f28ce6 DJ |
620 | i = 0; |
621 | break; | |
622 | } | |
623 | } | |
624 | dma->chancnt = i; | |
625 | return i; | |
626 | } | |
627 | ||
628 | /** | |
629 | * ioat_free_chan_resources - release all the descriptors | |
630 | * @chan: the channel to be cleaned | |
631 | */ | |
599d49de | 632 | static void ioat_free_chan_resources(struct dma_chan *c) |
c0f28ce6 DJ |
633 | { |
634 | struct ioatdma_chan *ioat_chan = to_ioat_chan(c); | |
635 | struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; | |
636 | struct ioat_ring_ent *desc; | |
637 | const int total_descs = 1 << ioat_chan->alloc_order; | |
638 | int descs; | |
639 | int i; | |
640 | ||
641 | /* Before freeing channel resources first check | |
642 | * if they have been previously allocated for this channel. | |
643 | */ | |
644 | if (!ioat_chan->ring) | |
645 | return; | |
646 | ||
647 | ioat_stop(ioat_chan); | |
ef97bd0f | 648 | ioat_reset_hw(ioat_chan); |
c0f28ce6 DJ |
649 | |
650 | spin_lock_bh(&ioat_chan->cleanup_lock); | |
651 | spin_lock_bh(&ioat_chan->prep_lock); | |
652 | descs = ioat_ring_space(ioat_chan); | |
653 | dev_dbg(to_dev(ioat_chan), "freeing %d idle descriptors\n", descs); | |
654 | for (i = 0; i < descs; i++) { | |
655 | desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head + i); | |
656 | ioat_free_ring_ent(desc, c); | |
657 | } | |
658 | ||
659 | if (descs < total_descs) | |
660 | dev_err(to_dev(ioat_chan), "Freeing %d in use descriptors!\n", | |
661 | total_descs - descs); | |
662 | ||
663 | for (i = 0; i < total_descs - descs; i++) { | |
664 | desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail + i); | |
665 | dump_desc_dbg(ioat_chan, desc); | |
666 | ioat_free_ring_ent(desc, c); | |
667 | } | |
668 | ||
669 | kfree(ioat_chan->ring); | |
670 | ioat_chan->ring = NULL; | |
671 | ioat_chan->alloc_order = 0; | |
672 | pci_pool_free(ioat_dma->completion_pool, ioat_chan->completion, | |
673 | ioat_chan->completion_dma); | |
674 | spin_unlock_bh(&ioat_chan->prep_lock); | |
675 | spin_unlock_bh(&ioat_chan->cleanup_lock); | |
676 | ||
677 | ioat_chan->last_completion = 0; | |
678 | ioat_chan->completion_dma = 0; | |
679 | ioat_chan->dmacount = 0; | |
680 | } | |
681 | ||
682 | /* ioat_alloc_chan_resources - allocate/initialize ioat descriptor ring | |
683 | * @chan: channel to be initialized | |
684 | */ | |
599d49de | 685 | static int ioat_alloc_chan_resources(struct dma_chan *c) |
c0f28ce6 DJ |
686 | { |
687 | struct ioatdma_chan *ioat_chan = to_ioat_chan(c); | |
688 | struct ioat_ring_ent **ring; | |
689 | u64 status; | |
690 | int order; | |
691 | int i = 0; | |
692 | u32 chanerr; | |
693 | ||
694 | /* have we already been set up? */ | |
695 | if (ioat_chan->ring) | |
696 | return 1 << ioat_chan->alloc_order; | |
697 | ||
698 | /* Setup register to interrupt and write completion status on error */ | |
699 | writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET); | |
700 | ||
701 | /* allocate a completion writeback area */ | |
702 | /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ | |
703 | ioat_chan->completion = | |
704 | pci_pool_alloc(ioat_chan->ioat_dma->completion_pool, | |
705 | GFP_KERNEL, &ioat_chan->completion_dma); | |
706 | if (!ioat_chan->completion) | |
707 | return -ENOMEM; | |
708 | ||
709 | memset(ioat_chan->completion, 0, sizeof(*ioat_chan->completion)); | |
710 | writel(((u64)ioat_chan->completion_dma) & 0x00000000FFFFFFFF, | |
711 | ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); | |
712 | writel(((u64)ioat_chan->completion_dma) >> 32, | |
713 | ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); | |
714 | ||
715 | order = ioat_get_alloc_order(); | |
716 | ring = ioat_alloc_ring(c, order, GFP_KERNEL); | |
717 | if (!ring) | |
718 | return -ENOMEM; | |
719 | ||
720 | spin_lock_bh(&ioat_chan->cleanup_lock); | |
721 | spin_lock_bh(&ioat_chan->prep_lock); | |
722 | ioat_chan->ring = ring; | |
723 | ioat_chan->head = 0; | |
724 | ioat_chan->issued = 0; | |
725 | ioat_chan->tail = 0; | |
726 | ioat_chan->alloc_order = order; | |
727 | set_bit(IOAT_RUN, &ioat_chan->state); | |
728 | spin_unlock_bh(&ioat_chan->prep_lock); | |
729 | spin_unlock_bh(&ioat_chan->cleanup_lock); | |
730 | ||
731 | ioat_start_null_desc(ioat_chan); | |
732 | ||
733 | /* check that we got off the ground */ | |
734 | do { | |
735 | udelay(1); | |
736 | status = ioat_chansts(ioat_chan); | |
737 | } while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status)); | |
738 | ||
739 | if (is_ioat_active(status) || is_ioat_idle(status)) | |
740 | return 1 << ioat_chan->alloc_order; | |
741 | ||
742 | chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); | |
743 | ||
744 | dev_WARN(to_dev(ioat_chan), | |
745 | "failed to start channel chanerr: %#x\n", chanerr); | |
746 | ioat_free_chan_resources(c); | |
747 | return -EFAULT; | |
748 | } | |
749 | ||
750 | /* common channel initialization */ | |
599d49de | 751 | static void |
c0f28ce6 DJ |
752 | ioat_init_channel(struct ioatdma_device *ioat_dma, |
753 | struct ioatdma_chan *ioat_chan, int idx) | |
754 | { | |
755 | struct dma_device *dma = &ioat_dma->dma_dev; | |
756 | struct dma_chan *c = &ioat_chan->dma_chan; | |
757 | unsigned long data = (unsigned long) c; | |
758 | ||
759 | ioat_chan->ioat_dma = ioat_dma; | |
760 | ioat_chan->reg_base = ioat_dma->reg_base + (0x80 * (idx + 1)); | |
761 | spin_lock_init(&ioat_chan->cleanup_lock); | |
762 | ioat_chan->dma_chan.device = dma; | |
763 | dma_cookie_init(&ioat_chan->dma_chan); | |
764 | list_add_tail(&ioat_chan->dma_chan.device_node, &dma->channels); | |
765 | ioat_dma->idx[idx] = ioat_chan; | |
766 | init_timer(&ioat_chan->timer); | |
ef97bd0f | 767 | ioat_chan->timer.function = ioat_timer_event; |
c0f28ce6 | 768 | ioat_chan->timer.data = data; |
ef97bd0f | 769 | tasklet_init(&ioat_chan->cleanup_task, ioat_cleanup_event, data); |
c0f28ce6 DJ |
770 | } |
771 | ||
c0f28ce6 DJ |
772 | #define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */ |
773 | static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma) | |
774 | { | |
775 | int i, src_idx; | |
776 | struct page *dest; | |
777 | struct page *xor_srcs[IOAT_NUM_SRC_TEST]; | |
778 | struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1]; | |
779 | dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1]; | |
780 | dma_addr_t dest_dma; | |
781 | struct dma_async_tx_descriptor *tx; | |
782 | struct dma_chan *dma_chan; | |
783 | dma_cookie_t cookie; | |
784 | u8 cmp_byte = 0; | |
785 | u32 cmp_word; | |
786 | u32 xor_val_result; | |
787 | int err = 0; | |
788 | struct completion cmp; | |
789 | unsigned long tmo; | |
790 | struct device *dev = &ioat_dma->pdev->dev; | |
791 | struct dma_device *dma = &ioat_dma->dma_dev; | |
792 | u8 op = 0; | |
793 | ||
794 | dev_dbg(dev, "%s\n", __func__); | |
795 | ||
796 | if (!dma_has_cap(DMA_XOR, dma->cap_mask)) | |
797 | return 0; | |
798 | ||
799 | for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) { | |
800 | xor_srcs[src_idx] = alloc_page(GFP_KERNEL); | |
801 | if (!xor_srcs[src_idx]) { | |
802 | while (src_idx--) | |
803 | __free_page(xor_srcs[src_idx]); | |
804 | return -ENOMEM; | |
805 | } | |
806 | } | |
807 | ||
808 | dest = alloc_page(GFP_KERNEL); | |
809 | if (!dest) { | |
810 | while (src_idx--) | |
811 | __free_page(xor_srcs[src_idx]); | |
812 | return -ENOMEM; | |
813 | } | |
814 | ||
815 | /* Fill in src buffers */ | |
816 | for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) { | |
817 | u8 *ptr = page_address(xor_srcs[src_idx]); | |
818 | ||
819 | for (i = 0; i < PAGE_SIZE; i++) | |
820 | ptr[i] = (1 << src_idx); | |
821 | } | |
822 | ||
823 | for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) | |
824 | cmp_byte ^= (u8) (1 << src_idx); | |
825 | ||
826 | cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | | |
827 | (cmp_byte << 8) | cmp_byte; | |
828 | ||
829 | memset(page_address(dest), 0, PAGE_SIZE); | |
830 | ||
831 | dma_chan = container_of(dma->channels.next, struct dma_chan, | |
832 | device_node); | |
833 | if (dma->device_alloc_chan_resources(dma_chan) < 1) { | |
834 | err = -ENODEV; | |
835 | goto out; | |
836 | } | |
837 | ||
838 | /* test xor */ | |
839 | op = IOAT_OP_XOR; | |
840 | ||
841 | dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE); | |
842 | if (dma_mapping_error(dev, dest_dma)) | |
843 | goto dma_unmap; | |
844 | ||
845 | for (i = 0; i < IOAT_NUM_SRC_TEST; i++) | |
846 | dma_srcs[i] = DMA_ERROR_CODE; | |
847 | for (i = 0; i < IOAT_NUM_SRC_TEST; i++) { | |
848 | dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE, | |
849 | DMA_TO_DEVICE); | |
850 | if (dma_mapping_error(dev, dma_srcs[i])) | |
851 | goto dma_unmap; | |
852 | } | |
853 | tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs, | |
854 | IOAT_NUM_SRC_TEST, PAGE_SIZE, | |
855 | DMA_PREP_INTERRUPT); | |
856 | ||
857 | if (!tx) { | |
858 | dev_err(dev, "Self-test xor prep failed\n"); | |
859 | err = -ENODEV; | |
860 | goto dma_unmap; | |
861 | } | |
862 | ||
863 | async_tx_ack(tx); | |
864 | init_completion(&cmp); | |
3372de58 | 865 | tx->callback = ioat_dma_test_callback; |
c0f28ce6 DJ |
866 | tx->callback_param = &cmp; |
867 | cookie = tx->tx_submit(tx); | |
868 | if (cookie < 0) { | |
869 | dev_err(dev, "Self-test xor setup failed\n"); | |
870 | err = -ENODEV; | |
871 | goto dma_unmap; | |
872 | } | |
873 | dma->device_issue_pending(dma_chan); | |
874 | ||
875 | tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); | |
876 | ||
877 | if (tmo == 0 || | |
878 | dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { | |
879 | dev_err(dev, "Self-test xor timed out\n"); | |
880 | err = -ENODEV; | |
881 | goto dma_unmap; | |
882 | } | |
883 | ||
884 | for (i = 0; i < IOAT_NUM_SRC_TEST; i++) | |
885 | dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); | |
886 | ||
887 | dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); | |
888 | for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { | |
889 | u32 *ptr = page_address(dest); | |
890 | ||
891 | if (ptr[i] != cmp_word) { | |
892 | dev_err(dev, "Self-test xor failed compare\n"); | |
893 | err = -ENODEV; | |
894 | goto free_resources; | |
895 | } | |
896 | } | |
897 | dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); | |
898 | ||
899 | dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); | |
900 | ||
901 | /* skip validate if the capability is not present */ | |
902 | if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask)) | |
903 | goto free_resources; | |
904 | ||
905 | op = IOAT_OP_XOR_VAL; | |
906 | ||
907 | /* validate the sources with the destintation page */ | |
908 | for (i = 0; i < IOAT_NUM_SRC_TEST; i++) | |
909 | xor_val_srcs[i] = xor_srcs[i]; | |
910 | xor_val_srcs[i] = dest; | |
911 | ||
912 | xor_val_result = 1; | |
913 | ||
914 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) | |
915 | dma_srcs[i] = DMA_ERROR_CODE; | |
916 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) { | |
917 | dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, | |
918 | DMA_TO_DEVICE); | |
919 | if (dma_mapping_error(dev, dma_srcs[i])) | |
920 | goto dma_unmap; | |
921 | } | |
922 | tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, | |
923 | IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, | |
924 | &xor_val_result, DMA_PREP_INTERRUPT); | |
925 | if (!tx) { | |
926 | dev_err(dev, "Self-test zero prep failed\n"); | |
927 | err = -ENODEV; | |
928 | goto dma_unmap; | |
929 | } | |
930 | ||
931 | async_tx_ack(tx); | |
932 | init_completion(&cmp); | |
3372de58 | 933 | tx->callback = ioat_dma_test_callback; |
c0f28ce6 DJ |
934 | tx->callback_param = &cmp; |
935 | cookie = tx->tx_submit(tx); | |
936 | if (cookie < 0) { | |
937 | dev_err(dev, "Self-test zero setup failed\n"); | |
938 | err = -ENODEV; | |
939 | goto dma_unmap; | |
940 | } | |
941 | dma->device_issue_pending(dma_chan); | |
942 | ||
943 | tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); | |
944 | ||
945 | if (tmo == 0 || | |
946 | dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { | |
947 | dev_err(dev, "Self-test validate timed out\n"); | |
948 | err = -ENODEV; | |
949 | goto dma_unmap; | |
950 | } | |
951 | ||
952 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) | |
953 | dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); | |
954 | ||
955 | if (xor_val_result != 0) { | |
956 | dev_err(dev, "Self-test validate failed compare\n"); | |
957 | err = -ENODEV; | |
958 | goto free_resources; | |
959 | } | |
960 | ||
961 | memset(page_address(dest), 0, PAGE_SIZE); | |
962 | ||
963 | /* test for non-zero parity sum */ | |
964 | op = IOAT_OP_XOR_VAL; | |
965 | ||
966 | xor_val_result = 0; | |
967 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) | |
968 | dma_srcs[i] = DMA_ERROR_CODE; | |
969 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) { | |
970 | dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, | |
971 | DMA_TO_DEVICE); | |
972 | if (dma_mapping_error(dev, dma_srcs[i])) | |
973 | goto dma_unmap; | |
974 | } | |
975 | tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, | |
976 | IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, | |
977 | &xor_val_result, DMA_PREP_INTERRUPT); | |
978 | if (!tx) { | |
979 | dev_err(dev, "Self-test 2nd zero prep failed\n"); | |
980 | err = -ENODEV; | |
981 | goto dma_unmap; | |
982 | } | |
983 | ||
984 | async_tx_ack(tx); | |
985 | init_completion(&cmp); | |
3372de58 | 986 | tx->callback = ioat_dma_test_callback; |
c0f28ce6 DJ |
987 | tx->callback_param = &cmp; |
988 | cookie = tx->tx_submit(tx); | |
989 | if (cookie < 0) { | |
990 | dev_err(dev, "Self-test 2nd zero setup failed\n"); | |
991 | err = -ENODEV; | |
992 | goto dma_unmap; | |
993 | } | |
994 | dma->device_issue_pending(dma_chan); | |
995 | ||
996 | tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); | |
997 | ||
998 | if (tmo == 0 || | |
999 | dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { | |
1000 | dev_err(dev, "Self-test 2nd validate timed out\n"); | |
1001 | err = -ENODEV; | |
1002 | goto dma_unmap; | |
1003 | } | |
1004 | ||
1005 | if (xor_val_result != SUM_CHECK_P_RESULT) { | |
1006 | dev_err(dev, "Self-test validate failed compare\n"); | |
1007 | err = -ENODEV; | |
1008 | goto dma_unmap; | |
1009 | } | |
1010 | ||
1011 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) | |
1012 | dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); | |
1013 | ||
1014 | goto free_resources; | |
1015 | dma_unmap: | |
1016 | if (op == IOAT_OP_XOR) { | |
1017 | if (dest_dma != DMA_ERROR_CODE) | |
1018 | dma_unmap_page(dev, dest_dma, PAGE_SIZE, | |
1019 | DMA_FROM_DEVICE); | |
1020 | for (i = 0; i < IOAT_NUM_SRC_TEST; i++) | |
1021 | if (dma_srcs[i] != DMA_ERROR_CODE) | |
1022 | dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, | |
1023 | DMA_TO_DEVICE); | |
1024 | } else if (op == IOAT_OP_XOR_VAL) { | |
1025 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) | |
1026 | if (dma_srcs[i] != DMA_ERROR_CODE) | |
1027 | dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, | |
1028 | DMA_TO_DEVICE); | |
1029 | } | |
1030 | free_resources: | |
1031 | dma->device_free_chan_resources(dma_chan); | |
1032 | out: | |
1033 | src_idx = IOAT_NUM_SRC_TEST; | |
1034 | while (src_idx--) | |
1035 | __free_page(xor_srcs[src_idx]); | |
1036 | __free_page(dest); | |
1037 | return err; | |
1038 | } | |
1039 | ||
1040 | static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma) | |
1041 | { | |
64f1d0ff | 1042 | int rc; |
c0f28ce6 | 1043 | |
64f1d0ff | 1044 | rc = ioat_dma_self_test(ioat_dma); |
c0f28ce6 DJ |
1045 | if (rc) |
1046 | return rc; | |
1047 | ||
1048 | rc = ioat_xor_val_self_test(ioat_dma); | |
c0f28ce6 | 1049 | |
64f1d0ff | 1050 | return rc; |
c0f28ce6 DJ |
1051 | } |
1052 | ||
3372de58 | 1053 | static void ioat_intr_quirk(struct ioatdma_device *ioat_dma) |
c0f28ce6 DJ |
1054 | { |
1055 | struct dma_device *dma; | |
1056 | struct dma_chan *c; | |
1057 | struct ioatdma_chan *ioat_chan; | |
1058 | u32 errmask; | |
1059 | ||
1060 | dma = &ioat_dma->dma_dev; | |
1061 | ||
1062 | /* | |
1063 | * if we have descriptor write back error status, we mask the | |
1064 | * error interrupts | |
1065 | */ | |
1066 | if (ioat_dma->cap & IOAT_CAP_DWBES) { | |
1067 | list_for_each_entry(c, &dma->channels, device_node) { | |
1068 | ioat_chan = to_ioat_chan(c); | |
1069 | errmask = readl(ioat_chan->reg_base + | |
1070 | IOAT_CHANERR_MASK_OFFSET); | |
1071 | errmask |= IOAT_CHANERR_XOR_P_OR_CRC_ERR | | |
1072 | IOAT_CHANERR_XOR_Q_ERR; | |
1073 | writel(errmask, ioat_chan->reg_base + | |
1074 | IOAT_CHANERR_MASK_OFFSET); | |
1075 | } | |
1076 | } | |
1077 | } | |
1078 | ||
599d49de | 1079 | static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca) |
c0f28ce6 DJ |
1080 | { |
1081 | struct pci_dev *pdev = ioat_dma->pdev; | |
1082 | int dca_en = system_has_dca_enabled(pdev); | |
1083 | struct dma_device *dma; | |
1084 | struct dma_chan *c; | |
1085 | struct ioatdma_chan *ioat_chan; | |
1086 | bool is_raid_device = false; | |
1087 | int err; | |
1088 | ||
c0f28ce6 DJ |
1089 | dma = &ioat_dma->dma_dev; |
1090 | dma->device_prep_dma_memcpy = ioat_dma_prep_memcpy_lock; | |
1091 | dma->device_issue_pending = ioat_issue_pending; | |
1092 | dma->device_alloc_chan_resources = ioat_alloc_chan_resources; | |
1093 | dma->device_free_chan_resources = ioat_free_chan_resources; | |
1094 | ||
1095 | dma_cap_set(DMA_INTERRUPT, dma->cap_mask); | |
1096 | dma->device_prep_dma_interrupt = ioat_prep_interrupt_lock; | |
1097 | ||
1098 | ioat_dma->cap = readl(ioat_dma->reg_base + IOAT_DMA_CAP_OFFSET); | |
1099 | ||
1100 | if (is_xeon_cb32(pdev) || is_bwd_noraid(pdev)) | |
1101 | ioat_dma->cap &= | |
1102 | ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS); | |
1103 | ||
1104 | /* dca is incompatible with raid operations */ | |
1105 | if (dca_en && (ioat_dma->cap & (IOAT_CAP_XOR|IOAT_CAP_PQ))) | |
1106 | ioat_dma->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ); | |
1107 | ||
1108 | if (ioat_dma->cap & IOAT_CAP_XOR) { | |
1109 | is_raid_device = true; | |
1110 | dma->max_xor = 8; | |
1111 | ||
1112 | dma_cap_set(DMA_XOR, dma->cap_mask); | |
1113 | dma->device_prep_dma_xor = ioat_prep_xor; | |
1114 | ||
1115 | dma_cap_set(DMA_XOR_VAL, dma->cap_mask); | |
1116 | dma->device_prep_dma_xor_val = ioat_prep_xor_val; | |
1117 | } | |
1118 | ||
1119 | if (ioat_dma->cap & IOAT_CAP_PQ) { | |
1120 | is_raid_device = true; | |
1121 | ||
1122 | dma->device_prep_dma_pq = ioat_prep_pq; | |
1123 | dma->device_prep_dma_pq_val = ioat_prep_pq_val; | |
1124 | dma_cap_set(DMA_PQ, dma->cap_mask); | |
1125 | dma_cap_set(DMA_PQ_VAL, dma->cap_mask); | |
1126 | ||
1127 | if (ioat_dma->cap & IOAT_CAP_RAID16SS) | |
1128 | dma_set_maxpq(dma, 16, 0); | |
1129 | else | |
1130 | dma_set_maxpq(dma, 8, 0); | |
1131 | ||
1132 | if (!(ioat_dma->cap & IOAT_CAP_XOR)) { | |
1133 | dma->device_prep_dma_xor = ioat_prep_pqxor; | |
1134 | dma->device_prep_dma_xor_val = ioat_prep_pqxor_val; | |
1135 | dma_cap_set(DMA_XOR, dma->cap_mask); | |
1136 | dma_cap_set(DMA_XOR_VAL, dma->cap_mask); | |
1137 | ||
1138 | if (ioat_dma->cap & IOAT_CAP_RAID16SS) | |
1139 | dma->max_xor = 16; | |
1140 | else | |
1141 | dma->max_xor = 8; | |
1142 | } | |
1143 | } | |
1144 | ||
1145 | dma->device_tx_status = ioat_tx_status; | |
c0f28ce6 DJ |
1146 | |
1147 | /* starting with CB3.3 super extended descriptors are supported */ | |
1148 | if (ioat_dma->cap & IOAT_CAP_RAID16SS) { | |
1149 | char pool_name[14]; | |
1150 | int i; | |
1151 | ||
1152 | for (i = 0; i < MAX_SED_POOLS; i++) { | |
1153 | snprintf(pool_name, 14, "ioat_hw%d_sed", i); | |
1154 | ||
1155 | /* allocate SED DMA pool */ | |
1156 | ioat_dma->sed_hw_pool[i] = dmam_pool_create(pool_name, | |
1157 | &pdev->dev, | |
1158 | SED_SIZE * (i + 1), 64, 0); | |
1159 | if (!ioat_dma->sed_hw_pool[i]) | |
1160 | return -ENOMEM; | |
1161 | ||
1162 | } | |
1163 | } | |
1164 | ||
1165 | if (!(ioat_dma->cap & (IOAT_CAP_XOR | IOAT_CAP_PQ))) | |
1166 | dma_cap_set(DMA_PRIVATE, dma->cap_mask); | |
1167 | ||
1168 | err = ioat_probe(ioat_dma); | |
1169 | if (err) | |
1170 | return err; | |
1171 | ||
1172 | list_for_each_entry(c, &dma->channels, device_node) { | |
1173 | ioat_chan = to_ioat_chan(c); | |
1174 | writel(IOAT_DMA_DCA_ANY_CPU, | |
1175 | ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); | |
1176 | } | |
1177 | ||
1178 | err = ioat_register(ioat_dma); | |
1179 | if (err) | |
1180 | return err; | |
1181 | ||
1182 | ioat_kobject_add(ioat_dma, &ioat_ktype); | |
1183 | ||
1184 | if (dca) | |
3372de58 | 1185 | ioat_dma->dca = ioat_dca_init(pdev, ioat_dma->reg_base); |
c0f28ce6 DJ |
1186 | |
1187 | return 0; | |
1188 | } | |
1189 | ||
ad4a7b50 DJ |
1190 | static void ioat_shutdown(struct pci_dev *pdev) |
1191 | { | |
1192 | struct ioatdma_device *ioat_dma = pci_get_drvdata(pdev); | |
1193 | struct ioatdma_chan *ioat_chan; | |
1194 | int i; | |
1195 | ||
1196 | if (!ioat_dma) | |
1197 | return; | |
1198 | ||
1199 | for (i = 0; i < IOAT_MAX_CHANS; i++) { | |
1200 | ioat_chan = ioat_dma->idx[i]; | |
1201 | if (!ioat_chan) | |
1202 | continue; | |
1203 | ||
1204 | spin_lock_bh(&ioat_chan->prep_lock); | |
1205 | set_bit(IOAT_CHAN_DOWN, &ioat_chan->state); | |
1206 | del_timer_sync(&ioat_chan->timer); | |
1207 | spin_unlock_bh(&ioat_chan->prep_lock); | |
1208 | /* this should quiesce then reset */ | |
1209 | ioat_reset_hw(ioat_chan); | |
1210 | } | |
1211 | ||
1212 | ioat_disable_interrupts(ioat_dma); | |
1213 | } | |
1214 | ||
4222a907 DJ |
1215 | void ioat_resume(struct ioatdma_device *ioat_dma) |
1216 | { | |
1217 | struct ioatdma_chan *ioat_chan; | |
1218 | u32 chanerr; | |
1219 | int i; | |
1220 | ||
1221 | for (i = 0; i < IOAT_MAX_CHANS; i++) { | |
1222 | ioat_chan = ioat_dma->idx[i]; | |
1223 | if (!ioat_chan) | |
1224 | continue; | |
1225 | ||
1226 | spin_lock_bh(&ioat_chan->prep_lock); | |
1227 | clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state); | |
1228 | spin_unlock_bh(&ioat_chan->prep_lock); | |
1229 | ||
1230 | chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); | |
1231 | writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); | |
1232 | ||
1233 | /* no need to reset as shutdown already did that */ | |
1234 | } | |
1235 | } | |
1236 | ||
c0f28ce6 DJ |
1237 | #define DRV_NAME "ioatdma" |
1238 | ||
4222a907 DJ |
1239 | static pci_ers_result_t ioat_pcie_error_detected(struct pci_dev *pdev, |
1240 | enum pci_channel_state error) | |
1241 | { | |
1242 | dev_dbg(&pdev->dev, "%s: PCIe AER error %d\n", DRV_NAME, error); | |
1243 | ||
1244 | /* quiesce and block I/O */ | |
1245 | ioat_shutdown(pdev); | |
1246 | ||
1247 | return PCI_ERS_RESULT_NEED_RESET; | |
1248 | } | |
1249 | ||
1250 | static pci_ers_result_t ioat_pcie_error_slot_reset(struct pci_dev *pdev) | |
1251 | { | |
1252 | pci_ers_result_t result = PCI_ERS_RESULT_RECOVERED; | |
1253 | int err; | |
1254 | ||
1255 | dev_dbg(&pdev->dev, "%s post reset handling\n", DRV_NAME); | |
1256 | ||
1257 | if (pci_enable_device_mem(pdev) < 0) { | |
1258 | dev_err(&pdev->dev, | |
1259 | "Failed to enable PCIe device after reset.\n"); | |
1260 | result = PCI_ERS_RESULT_DISCONNECT; | |
1261 | } else { | |
1262 | pci_set_master(pdev); | |
1263 | pci_restore_state(pdev); | |
1264 | pci_save_state(pdev); | |
1265 | pci_wake_from_d3(pdev, false); | |
1266 | } | |
1267 | ||
1268 | err = pci_cleanup_aer_uncorrect_error_status(pdev); | |
1269 | if (err) { | |
1270 | dev_err(&pdev->dev, | |
1271 | "AER uncorrect error status clear failed: %#x\n", err); | |
1272 | } | |
1273 | ||
1274 | return result; | |
1275 | } | |
1276 | ||
1277 | static void ioat_pcie_error_resume(struct pci_dev *pdev) | |
1278 | { | |
1279 | struct ioatdma_device *ioat_dma = pci_get_drvdata(pdev); | |
1280 | ||
1281 | dev_dbg(&pdev->dev, "%s: AER handling resuming\n", DRV_NAME); | |
1282 | ||
1283 | /* initialize and bring everything back */ | |
1284 | ioat_resume(ioat_dma); | |
1285 | } | |
1286 | ||
1287 | static const struct pci_error_handlers ioat_err_handler = { | |
1288 | .error_detected = ioat_pcie_error_detected, | |
1289 | .slot_reset = ioat_pcie_error_slot_reset, | |
1290 | .resume = ioat_pcie_error_resume, | |
1291 | }; | |
1292 | ||
c0f28ce6 DJ |
1293 | static struct pci_driver ioat_pci_driver = { |
1294 | .name = DRV_NAME, | |
1295 | .id_table = ioat_pci_tbl, | |
1296 | .probe = ioat_pci_probe, | |
1297 | .remove = ioat_remove, | |
ad4a7b50 | 1298 | .shutdown = ioat_shutdown, |
4222a907 | 1299 | .err_handler = &ioat_err_handler, |
c0f28ce6 DJ |
1300 | }; |
1301 | ||
1302 | static struct ioatdma_device * | |
1303 | alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase) | |
1304 | { | |
1305 | struct device *dev = &pdev->dev; | |
1306 | struct ioatdma_device *d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL); | |
1307 | ||
1308 | if (!d) | |
1309 | return NULL; | |
1310 | d->pdev = pdev; | |
1311 | d->reg_base = iobase; | |
1312 | return d; | |
1313 | } | |
1314 | ||
1315 | static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |
1316 | { | |
1317 | void __iomem * const *iomap; | |
1318 | struct device *dev = &pdev->dev; | |
1319 | struct ioatdma_device *device; | |
1320 | int err; | |
1321 | ||
1322 | err = pcim_enable_device(pdev); | |
1323 | if (err) | |
1324 | return err; | |
1325 | ||
1326 | err = pcim_iomap_regions(pdev, 1 << IOAT_MMIO_BAR, DRV_NAME); | |
1327 | if (err) | |
1328 | return err; | |
1329 | iomap = pcim_iomap_table(pdev); | |
1330 | if (!iomap) | |
1331 | return -ENOMEM; | |
1332 | ||
1333 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); | |
1334 | if (err) | |
1335 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | |
1336 | if (err) | |
1337 | return err; | |
1338 | ||
1339 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | |
1340 | if (err) | |
1341 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | |
1342 | if (err) | |
1343 | return err; | |
1344 | ||
1345 | device = alloc_ioatdma(pdev, iomap[IOAT_MMIO_BAR]); | |
1346 | if (!device) | |
1347 | return -ENOMEM; | |
1348 | pci_set_master(pdev); | |
1349 | pci_set_drvdata(pdev, device); | |
1350 | ||
1351 | device->version = readb(device->reg_base + IOAT_VER_OFFSET); | |
4222a907 | 1352 | if (device->version >= IOAT_VER_3_0) { |
c0f28ce6 | 1353 | err = ioat3_dma_probe(device, ioat_dca_enabled); |
4222a907 DJ |
1354 | |
1355 | if (device->version >= IOAT_VER_3_3) | |
1356 | pci_enable_pcie_error_reporting(pdev); | |
1357 | } else | |
c0f28ce6 DJ |
1358 | return -ENODEV; |
1359 | ||
1360 | if (err) { | |
1361 | dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n"); | |
4222a907 | 1362 | pci_disable_pcie_error_reporting(pdev); |
c0f28ce6 DJ |
1363 | return -ENODEV; |
1364 | } | |
1365 | ||
1366 | return 0; | |
1367 | } | |
1368 | ||
1369 | static void ioat_remove(struct pci_dev *pdev) | |
1370 | { | |
1371 | struct ioatdma_device *device = pci_get_drvdata(pdev); | |
1372 | ||
1373 | if (!device) | |
1374 | return; | |
1375 | ||
1376 | dev_err(&pdev->dev, "Removing dma and dca services\n"); | |
1377 | if (device->dca) { | |
1378 | unregister_dca_provider(device->dca, &pdev->dev); | |
1379 | free_dca_provider(device->dca); | |
1380 | device->dca = NULL; | |
1381 | } | |
4222a907 DJ |
1382 | |
1383 | pci_disable_pcie_error_reporting(pdev); | |
c0f28ce6 DJ |
1384 | ioat_dma_remove(device); |
1385 | } | |
1386 | ||
1387 | static int __init ioat_init_module(void) | |
1388 | { | |
1389 | int err = -ENOMEM; | |
1390 | ||
1391 | pr_info("%s: Intel(R) QuickData Technology Driver %s\n", | |
1392 | DRV_NAME, IOAT_DMA_VERSION); | |
1393 | ||
1394 | ioat_cache = kmem_cache_create("ioat", sizeof(struct ioat_ring_ent), | |
1395 | 0, SLAB_HWCACHE_ALIGN, NULL); | |
1396 | if (!ioat_cache) | |
1397 | return -ENOMEM; | |
1398 | ||
1399 | ioat_sed_cache = KMEM_CACHE(ioat_sed_ent, 0); | |
1400 | if (!ioat_sed_cache) | |
1401 | goto err_ioat_cache; | |
1402 | ||
1403 | err = pci_register_driver(&ioat_pci_driver); | |
1404 | if (err) | |
1405 | goto err_ioat3_cache; | |
1406 | ||
1407 | return 0; | |
1408 | ||
1409 | err_ioat3_cache: | |
1410 | kmem_cache_destroy(ioat_sed_cache); | |
1411 | ||
1412 | err_ioat_cache: | |
1413 | kmem_cache_destroy(ioat_cache); | |
1414 | ||
1415 | return err; | |
1416 | } | |
1417 | module_init(ioat_init_module); | |
1418 | ||
1419 | static void __exit ioat_exit_module(void) | |
1420 | { | |
1421 | pci_unregister_driver(&ioat_pci_driver); | |
1422 | kmem_cache_destroy(ioat_cache); | |
1423 | } | |
1424 | module_exit(ioat_exit_module); |