Commit | Line | Data |
---|---|---|
bfe1d560 DJ |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ | |
3 | #include <linux/init.h> | |
4 | #include <linux/kernel.h> | |
5 | #include <linux/module.h> | |
6 | #include <linux/slab.h> | |
7 | #include <linux/pci.h> | |
8 | #include <linux/interrupt.h> | |
9 | #include <linux/delay.h> | |
10 | #include <linux/dma-mapping.h> | |
11 | #include <linux/workqueue.h> | |
12 | #include <linux/aer.h> | |
13 | #include <linux/fs.h> | |
14 | #include <linux/io-64-nonatomic-lo-hi.h> | |
15 | #include <linux/device.h> | |
16 | #include <linux/idr.h> | |
8e50d392 DJ |
17 | #include <linux/intel-svm.h> |
18 | #include <linux/iommu.h> | |
bfe1d560 | 19 | #include <uapi/linux/idxd.h> |
8f47d1a5 DJ |
20 | #include <linux/dmaengine.h> |
21 | #include "../dmaengine.h" | |
bfe1d560 DJ |
22 | #include "registers.h" |
23 | #include "idxd.h" | |
0bde4444 | 24 | #include "perfmon.h" |
bfe1d560 DJ |
25 | |
26 | MODULE_VERSION(IDXD_DRIVER_VERSION); | |
27 | MODULE_LICENSE("GPL v2"); | |
28 | MODULE_AUTHOR("Intel Corporation"); | |
d9e5481f | 29 | MODULE_IMPORT_NS(IDXD); |
bfe1d560 | 30 | |
03d939c7 DJ |
31 | static bool sva = true; |
32 | module_param(sva, bool, 0644); | |
33 | MODULE_PARM_DESC(sva, "Toggle SVA support on/off"); | |
34 | ||
ade8a86b DJ |
35 | bool tc_override; |
36 | module_param(tc_override, bool, 0644); | |
37 | MODULE_PARM_DESC(tc_override, "Override traffic class defaults"); | |
38 | ||
bfe1d560 DJ |
39 | #define DRV_NAME "idxd" |
40 | ||
8e50d392 | 41 | bool support_enqcmd; |
4b73e4eb | 42 | DEFINE_IDA(idxd_ida); |
bfe1d560 | 43 | |
435b512d DJ |
44 | static struct idxd_driver_data idxd_driver_data[] = { |
45 | [IDXD_TYPE_DSA] = { | |
46 | .name_prefix = "dsa", | |
47 | .type = IDXD_TYPE_DSA, | |
48 | .compl_size = sizeof(struct dsa_completion_record), | |
49 | .align = 32, | |
50 | .dev_type = &dsa_device_type, | |
51 | }, | |
52 | [IDXD_TYPE_IAX] = { | |
53 | .name_prefix = "iax", | |
54 | .type = IDXD_TYPE_IAX, | |
55 | .compl_size = sizeof(struct iax_completion_record), | |
56 | .align = 64, | |
57 | .dev_type = &iax_device_type, | |
58 | }, | |
59 | }; | |
60 | ||
bfe1d560 DJ |
61 | static struct pci_device_id idxd_pci_tbl[] = { |
62 | /* DSA ver 1.0 platforms */ | |
435b512d | 63 | { PCI_DEVICE_DATA(INTEL, DSA_SPR0, &idxd_driver_data[IDXD_TYPE_DSA]) }, |
f25b4638 DJ |
64 | |
65 | /* IAX ver 1.0 platforms */ | |
435b512d | 66 | { PCI_DEVICE_DATA(INTEL, IAX_SPR0, &idxd_driver_data[IDXD_TYPE_IAX]) }, |
bfe1d560 DJ |
67 | { 0, } |
68 | }; | |
69 | MODULE_DEVICE_TABLE(pci, idxd_pci_tbl); | |
70 | ||
bfe1d560 DJ |
71 | static int idxd_setup_interrupts(struct idxd_device *idxd) |
72 | { | |
73 | struct pci_dev *pdev = idxd->pdev; | |
74 | struct device *dev = &pdev->dev; | |
ec0d6423 | 75 | struct idxd_irq_entry *ie; |
bfe1d560 DJ |
76 | int i, msixcnt; |
77 | int rc = 0; | |
78 | ||
79 | msixcnt = pci_msix_vec_count(pdev); | |
80 | if (msixcnt < 0) { | |
81 | dev_err(dev, "Not MSI-X interrupt capable.\n"); | |
5fc8e85f | 82 | return -ENOSPC; |
bfe1d560 | 83 | } |
8b67426e | 84 | idxd->irq_cnt = msixcnt; |
bfe1d560 | 85 | |
5fc8e85f DJ |
86 | rc = pci_alloc_irq_vectors(pdev, msixcnt, msixcnt, PCI_IRQ_MSIX); |
87 | if (rc != msixcnt) { | |
88 | dev_err(dev, "Failed enabling %d MSIX entries: %d\n", msixcnt, rc); | |
89 | return -ENOSPC; | |
bfe1d560 DJ |
90 | } |
91 | dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt); | |
92 | ||
d5c10e0f | 93 | |
ec0d6423 DJ |
94 | ie = idxd_get_ie(idxd, 0); |
95 | ie->vector = pci_irq_vector(pdev, 0); | |
96 | rc = request_threaded_irq(ie->vector, NULL, idxd_misc_thread, 0, "idxd-misc", ie); | |
bfe1d560 DJ |
97 | if (rc < 0) { |
98 | dev_err(dev, "Failed to allocate misc interrupt.\n"); | |
5fc8e85f | 99 | goto err_misc_irq; |
bfe1d560 | 100 | } |
403a2e23 | 101 | dev_dbg(dev, "Requested idxd-misc handler on msix vector %d\n", ie->vector); |
bfe1d560 | 102 | |
ec0d6423 DJ |
103 | for (i = 0; i < idxd->max_wqs; i++) { |
104 | int msix_idx = i + 1; | |
105 | ||
106 | ie = idxd_get_ie(idxd, msix_idx); | |
ec0d6423 | 107 | ie->id = msix_idx; |
ec0d6423 | 108 | ie->int_handle = INVALID_INT_HANDLE; |
403a2e23 DJ |
109 | ie->pasid = INVALID_IOASID; |
110 | ||
ec0d6423 DJ |
111 | spin_lock_init(&ie->list_lock); |
112 | init_llist_head(&ie->pending_llist); | |
113 | INIT_LIST_HEAD(&ie->work_list); | |
bfe1d560 DJ |
114 | } |
115 | ||
116 | idxd_unmask_error_interrupts(idxd); | |
bfe1d560 DJ |
117 | return 0; |
118 | ||
5fc8e85f | 119 | err_misc_irq: |
bfe1d560 | 120 | idxd_mask_error_interrupts(idxd); |
5fc8e85f | 121 | pci_free_irq_vectors(pdev); |
bfe1d560 DJ |
122 | dev_err(dev, "No usable interrupts\n"); |
123 | return rc; | |
124 | } | |
125 | ||
ddf742d4 DJ |
126 | static void idxd_cleanup_interrupts(struct idxd_device *idxd) |
127 | { | |
128 | struct pci_dev *pdev = idxd->pdev; | |
ec0d6423 | 129 | struct idxd_irq_entry *ie; |
403a2e23 | 130 | int msixcnt; |
ddf742d4 | 131 | |
403a2e23 DJ |
132 | msixcnt = pci_msix_vec_count(pdev); |
133 | if (msixcnt <= 0) | |
134 | return; | |
ddf742d4 | 135 | |
403a2e23 | 136 | ie = idxd_get_ie(idxd, 0); |
ddf742d4 | 137 | idxd_mask_error_interrupts(idxd); |
403a2e23 | 138 | free_irq(ie->vector, ie); |
ddf742d4 DJ |
139 | pci_free_irq_vectors(pdev); |
140 | } | |
141 | ||
7c5dd23e DJ |
142 | static int idxd_setup_wqs(struct idxd_device *idxd) |
143 | { | |
144 | struct device *dev = &idxd->pdev->dev; | |
145 | struct idxd_wq *wq; | |
700af3a0 | 146 | struct device *conf_dev; |
7c5dd23e DJ |
147 | int i, rc; |
148 | ||
149 | idxd->wqs = kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *), | |
150 | GFP_KERNEL, dev_to_node(dev)); | |
151 | if (!idxd->wqs) | |
152 | return -ENOMEM; | |
153 | ||
154 | for (i = 0; i < idxd->max_wqs; i++) { | |
155 | wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev)); | |
156 | if (!wq) { | |
157 | rc = -ENOMEM; | |
158 | goto err; | |
159 | } | |
160 | ||
700af3a0 DJ |
161 | idxd_dev_set_type(&wq->idxd_dev, IDXD_DEV_WQ); |
162 | conf_dev = wq_confdev(wq); | |
7c5dd23e DJ |
163 | wq->id = i; |
164 | wq->idxd = idxd; | |
700af3a0 DJ |
165 | device_initialize(wq_confdev(wq)); |
166 | conf_dev->parent = idxd_confdev(idxd); | |
167 | conf_dev->bus = &dsa_bus_type; | |
168 | conf_dev->type = &idxd_wq_device_type; | |
169 | rc = dev_set_name(conf_dev, "wq%d.%d", idxd->id, wq->id); | |
7c5dd23e | 170 | if (rc < 0) { |
700af3a0 | 171 | put_device(conf_dev); |
7c5dd23e DJ |
172 | goto err; |
173 | } | |
174 | ||
175 | mutex_init(&wq->wq_lock); | |
04922b74 | 176 | init_waitqueue_head(&wq->err_queue); |
93a40a6d | 177 | init_completion(&wq->wq_dead); |
56fc39f5 | 178 | init_completion(&wq->wq_resurrect); |
92452a72 DJ |
179 | wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER; |
180 | wq->max_batch_size = WQ_DEFAULT_MAX_BATCH; | |
7930d855 | 181 | wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES; |
7c5dd23e DJ |
182 | wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev)); |
183 | if (!wq->wqcfg) { | |
700af3a0 | 184 | put_device(conf_dev); |
7c5dd23e DJ |
185 | rc = -ENOMEM; |
186 | goto err; | |
187 | } | |
188 | idxd->wqs[i] = wq; | |
189 | } | |
190 | ||
191 | return 0; | |
192 | ||
193 | err: | |
700af3a0 DJ |
194 | while (--i >= 0) { |
195 | wq = idxd->wqs[i]; | |
196 | conf_dev = wq_confdev(wq); | |
197 | put_device(conf_dev); | |
198 | } | |
7c5dd23e DJ |
199 | return rc; |
200 | } | |
201 | ||
75b91130 DJ |
202 | static int idxd_setup_engines(struct idxd_device *idxd) |
203 | { | |
204 | struct idxd_engine *engine; | |
205 | struct device *dev = &idxd->pdev->dev; | |
700af3a0 | 206 | struct device *conf_dev; |
75b91130 DJ |
207 | int i, rc; |
208 | ||
209 | idxd->engines = kcalloc_node(idxd->max_engines, sizeof(struct idxd_engine *), | |
210 | GFP_KERNEL, dev_to_node(dev)); | |
211 | if (!idxd->engines) | |
212 | return -ENOMEM; | |
213 | ||
214 | for (i = 0; i < idxd->max_engines; i++) { | |
215 | engine = kzalloc_node(sizeof(*engine), GFP_KERNEL, dev_to_node(dev)); | |
216 | if (!engine) { | |
217 | rc = -ENOMEM; | |
218 | goto err; | |
219 | } | |
220 | ||
700af3a0 DJ |
221 | idxd_dev_set_type(&engine->idxd_dev, IDXD_DEV_ENGINE); |
222 | conf_dev = engine_confdev(engine); | |
75b91130 DJ |
223 | engine->id = i; |
224 | engine->idxd = idxd; | |
700af3a0 DJ |
225 | device_initialize(conf_dev); |
226 | conf_dev->parent = idxd_confdev(idxd); | |
227 | conf_dev->bus = &dsa_bus_type; | |
228 | conf_dev->type = &idxd_engine_device_type; | |
229 | rc = dev_set_name(conf_dev, "engine%d.%d", idxd->id, engine->id); | |
75b91130 | 230 | if (rc < 0) { |
700af3a0 | 231 | put_device(conf_dev); |
75b91130 DJ |
232 | goto err; |
233 | } | |
234 | ||
235 | idxd->engines[i] = engine; | |
236 | } | |
237 | ||
238 | return 0; | |
239 | ||
240 | err: | |
700af3a0 DJ |
241 | while (--i >= 0) { |
242 | engine = idxd->engines[i]; | |
243 | conf_dev = engine_confdev(engine); | |
244 | put_device(conf_dev); | |
245 | } | |
75b91130 DJ |
246 | return rc; |
247 | } | |
248 | ||
defe49f9 | 249 | static int idxd_setup_groups(struct idxd_device *idxd) |
bfe1d560 DJ |
250 | { |
251 | struct device *dev = &idxd->pdev->dev; | |
700af3a0 | 252 | struct device *conf_dev; |
defe49f9 | 253 | struct idxd_group *group; |
7c5dd23e | 254 | int i, rc; |
bfe1d560 | 255 | |
defe49f9 DJ |
256 | idxd->groups = kcalloc_node(idxd->max_groups, sizeof(struct idxd_group *), |
257 | GFP_KERNEL, dev_to_node(dev)); | |
258 | if (!idxd->groups) | |
259 | return -ENOMEM; | |
260 | ||
261 | for (i = 0; i < idxd->max_groups; i++) { | |
262 | group = kzalloc_node(sizeof(*group), GFP_KERNEL, dev_to_node(dev)); | |
263 | if (!group) { | |
264 | rc = -ENOMEM; | |
265 | goto err; | |
266 | } | |
267 | ||
700af3a0 DJ |
268 | idxd_dev_set_type(&group->idxd_dev, IDXD_DEV_GROUP); |
269 | conf_dev = group_confdev(group); | |
defe49f9 DJ |
270 | group->id = i; |
271 | group->idxd = idxd; | |
700af3a0 DJ |
272 | device_initialize(conf_dev); |
273 | conf_dev->parent = idxd_confdev(idxd); | |
274 | conf_dev->bus = &dsa_bus_type; | |
275 | conf_dev->type = &idxd_group_device_type; | |
276 | rc = dev_set_name(conf_dev, "group%d.%d", idxd->id, group->id); | |
defe49f9 | 277 | if (rc < 0) { |
700af3a0 | 278 | put_device(conf_dev); |
defe49f9 DJ |
279 | goto err; |
280 | } | |
281 | ||
282 | idxd->groups[i] = group; | |
ade8a86b DJ |
283 | if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override) { |
284 | group->tc_a = 1; | |
285 | group->tc_b = 1; | |
286 | } else { | |
287 | group->tc_a = -1; | |
288 | group->tc_b = -1; | |
289 | } | |
defe49f9 DJ |
290 | } |
291 | ||
292 | return 0; | |
293 | ||
294 | err: | |
700af3a0 DJ |
295 | while (--i >= 0) { |
296 | group = idxd->groups[i]; | |
297 | put_device(group_confdev(group)); | |
298 | } | |
defe49f9 DJ |
299 | return rc; |
300 | } | |
301 | ||
ddf742d4 DJ |
302 | static void idxd_cleanup_internals(struct idxd_device *idxd) |
303 | { | |
304 | int i; | |
305 | ||
306 | for (i = 0; i < idxd->max_groups; i++) | |
700af3a0 | 307 | put_device(group_confdev(idxd->groups[i])); |
ddf742d4 | 308 | for (i = 0; i < idxd->max_engines; i++) |
700af3a0 | 309 | put_device(engine_confdev(idxd->engines[i])); |
ddf742d4 | 310 | for (i = 0; i < idxd->max_wqs; i++) |
700af3a0 | 311 | put_device(wq_confdev(idxd->wqs[i])); |
ddf742d4 DJ |
312 | destroy_workqueue(idxd->wq); |
313 | } | |
314 | ||
defe49f9 DJ |
315 | static int idxd_setup_internals(struct idxd_device *idxd) |
316 | { | |
317 | struct device *dev = &idxd->pdev->dev; | |
318 | int rc, i; | |
319 | ||
0d5c10b4 | 320 | init_waitqueue_head(&idxd->cmd_waitq); |
7c5dd23e DJ |
321 | |
322 | rc = idxd_setup_wqs(idxd); | |
323 | if (rc < 0) | |
eb15e715 | 324 | goto err_wqs; |
7c5dd23e | 325 | |
75b91130 DJ |
326 | rc = idxd_setup_engines(idxd); |
327 | if (rc < 0) | |
328 | goto err_engine; | |
329 | ||
defe49f9 DJ |
330 | rc = idxd_setup_groups(idxd); |
331 | if (rc < 0) | |
332 | goto err_group; | |
bfe1d560 | 333 | |
0d5c10b4 | 334 | idxd->wq = create_workqueue(dev_name(dev)); |
7c5dd23e DJ |
335 | if (!idxd->wq) { |
336 | rc = -ENOMEM; | |
defe49f9 | 337 | goto err_wkq_create; |
7c5dd23e | 338 | } |
0d5c10b4 | 339 | |
bfe1d560 | 340 | return 0; |
7c5dd23e | 341 | |
defe49f9 DJ |
342 | err_wkq_create: |
343 | for (i = 0; i < idxd->max_groups; i++) | |
700af3a0 | 344 | put_device(group_confdev(idxd->groups[i])); |
defe49f9 | 345 | err_group: |
75b91130 | 346 | for (i = 0; i < idxd->max_engines; i++) |
700af3a0 | 347 | put_device(engine_confdev(idxd->engines[i])); |
75b91130 | 348 | err_engine: |
7c5dd23e | 349 | for (i = 0; i < idxd->max_wqs; i++) |
700af3a0 | 350 | put_device(wq_confdev(idxd->wqs[i])); |
eb15e715 | 351 | err_wqs: |
7c5dd23e | 352 | return rc; |
bfe1d560 DJ |
353 | } |
354 | ||
355 | static void idxd_read_table_offsets(struct idxd_device *idxd) | |
356 | { | |
357 | union offsets_reg offsets; | |
358 | struct device *dev = &idxd->pdev->dev; | |
359 | ||
360 | offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET); | |
2f8417a9 DJ |
361 | offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET + sizeof(u64)); |
362 | idxd->grpcfg_offset = offsets.grpcfg * IDXD_TABLE_MULT; | |
bfe1d560 | 363 | dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset); |
2f8417a9 DJ |
364 | idxd->wqcfg_offset = offsets.wqcfg * IDXD_TABLE_MULT; |
365 | dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n", idxd->wqcfg_offset); | |
366 | idxd->msix_perm_offset = offsets.msix_perm * IDXD_TABLE_MULT; | |
367 | dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n", idxd->msix_perm_offset); | |
368 | idxd->perfmon_offset = offsets.perfmon * IDXD_TABLE_MULT; | |
bfe1d560 DJ |
369 | dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset); |
370 | } | |
371 | ||
372 | static void idxd_read_caps(struct idxd_device *idxd) | |
373 | { | |
374 | struct device *dev = &idxd->pdev->dev; | |
375 | int i; | |
376 | ||
377 | /* reading generic capabilities */ | |
378 | idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET); | |
379 | dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits); | |
eb15e715 DJ |
380 | |
381 | if (idxd->hw.gen_cap.cmd_cap) { | |
382 | idxd->hw.cmd_cap = ioread32(idxd->reg_base + IDXD_CMDCAP_OFFSET); | |
383 | dev_dbg(dev, "cmd_cap: %#x\n", idxd->hw.cmd_cap); | |
384 | } | |
385 | ||
8b67426e DJ |
386 | /* reading command capabilities */ |
387 | if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)) | |
388 | idxd->request_int_handles = true; | |
389 | ||
bfe1d560 DJ |
390 | idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift; |
391 | dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes); | |
392 | idxd->max_batch_size = 1U << idxd->hw.gen_cap.max_batch_shift; | |
393 | dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size); | |
394 | if (idxd->hw.gen_cap.config_en) | |
395 | set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags); | |
396 | ||
397 | /* reading group capabilities */ | |
398 | idxd->hw.group_cap.bits = | |
399 | ioread64(idxd->reg_base + IDXD_GRPCAP_OFFSET); | |
400 | dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits); | |
401 | idxd->max_groups = idxd->hw.group_cap.num_groups; | |
402 | dev_dbg(dev, "max groups: %u\n", idxd->max_groups); | |
7ed6f1b8 DJ |
403 | idxd->max_rdbufs = idxd->hw.group_cap.total_rdbufs; |
404 | dev_dbg(dev, "max read buffers: %u\n", idxd->max_rdbufs); | |
405 | idxd->nr_rdbufs = idxd->max_rdbufs; | |
bfe1d560 DJ |
406 | |
407 | /* read engine capabilities */ | |
408 | idxd->hw.engine_cap.bits = | |
409 | ioread64(idxd->reg_base + IDXD_ENGCAP_OFFSET); | |
410 | dev_dbg(dev, "engine_cap: %#llx\n", idxd->hw.engine_cap.bits); | |
411 | idxd->max_engines = idxd->hw.engine_cap.num_engines; | |
412 | dev_dbg(dev, "max engines: %u\n", idxd->max_engines); | |
413 | ||
414 | /* read workqueue capabilities */ | |
415 | idxd->hw.wq_cap.bits = ioread64(idxd->reg_base + IDXD_WQCAP_OFFSET); | |
416 | dev_dbg(dev, "wq_cap: %#llx\n", idxd->hw.wq_cap.bits); | |
417 | idxd->max_wq_size = idxd->hw.wq_cap.total_wq_size; | |
418 | dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size); | |
419 | idxd->max_wqs = idxd->hw.wq_cap.num_wqs; | |
420 | dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs); | |
d98793b5 DJ |
421 | idxd->wqcfg_size = 1 << (idxd->hw.wq_cap.wqcfg_size + IDXD_WQCFG_MIN); |
422 | dev_dbg(dev, "wqcfg size: %u\n", idxd->wqcfg_size); | |
bfe1d560 DJ |
423 | |
424 | /* reading operation capabilities */ | |
425 | for (i = 0; i < 4; i++) { | |
426 | idxd->hw.opcap.bits[i] = ioread64(idxd->reg_base + | |
427 | IDXD_OPCAP_OFFSET + i * sizeof(u64)); | |
428 | dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]); | |
429 | } | |
430 | } | |
431 | ||
435b512d | 432 | static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_data *data) |
bfe1d560 DJ |
433 | { |
434 | struct device *dev = &pdev->dev; | |
700af3a0 | 435 | struct device *conf_dev; |
bfe1d560 | 436 | struct idxd_device *idxd; |
47c16ac2 | 437 | int rc; |
bfe1d560 | 438 | |
47c16ac2 | 439 | idxd = kzalloc_node(sizeof(*idxd), GFP_KERNEL, dev_to_node(dev)); |
bfe1d560 DJ |
440 | if (!idxd) |
441 | return NULL; | |
442 | ||
700af3a0 | 443 | conf_dev = idxd_confdev(idxd); |
bfe1d560 | 444 | idxd->pdev = pdev; |
435b512d | 445 | idxd->data = data; |
700af3a0 | 446 | idxd_dev_set_type(&idxd->idxd_dev, idxd->data->type); |
4b73e4eb | 447 | idxd->id = ida_alloc(&idxd_ida, GFP_KERNEL); |
47c16ac2 DJ |
448 | if (idxd->id < 0) |
449 | return NULL; | |
450 | ||
700af3a0 DJ |
451 | device_initialize(conf_dev); |
452 | conf_dev->parent = dev; | |
453 | conf_dev->bus = &dsa_bus_type; | |
454 | conf_dev->type = idxd->data->dev_type; | |
455 | rc = dev_set_name(conf_dev, "%s%d", idxd->data->name_prefix, idxd->id); | |
47c16ac2 | 456 | if (rc < 0) { |
700af3a0 | 457 | put_device(conf_dev); |
47c16ac2 DJ |
458 | return NULL; |
459 | } | |
460 | ||
bfe1d560 | 461 | spin_lock_init(&idxd->dev_lock); |
53b2ee7f | 462 | spin_lock_init(&idxd->cmd_lock); |
bfe1d560 DJ |
463 | |
464 | return idxd; | |
465 | } | |
466 | ||
8e50d392 DJ |
467 | static int idxd_enable_system_pasid(struct idxd_device *idxd) |
468 | { | |
469 | int flags; | |
470 | unsigned int pasid; | |
471 | struct iommu_sva *sva; | |
472 | ||
473 | flags = SVM_FLAG_SUPERVISOR_MODE; | |
474 | ||
475 | sva = iommu_sva_bind_device(&idxd->pdev->dev, NULL, &flags); | |
476 | if (IS_ERR(sva)) { | |
477 | dev_warn(&idxd->pdev->dev, | |
478 | "iommu sva bind failed: %ld\n", PTR_ERR(sva)); | |
479 | return PTR_ERR(sva); | |
480 | } | |
481 | ||
482 | pasid = iommu_sva_get_pasid(sva); | |
483 | if (pasid == IOMMU_PASID_INVALID) { | |
484 | iommu_sva_unbind_device(sva); | |
485 | return -ENODEV; | |
486 | } | |
487 | ||
488 | idxd->sva = sva; | |
489 | idxd->pasid = pasid; | |
490 | dev_dbg(&idxd->pdev->dev, "system pasid: %u\n", pasid); | |
491 | return 0; | |
492 | } | |
493 | ||
494 | static void idxd_disable_system_pasid(struct idxd_device *idxd) | |
495 | { | |
496 | ||
497 | iommu_sva_unbind_device(idxd->sva); | |
498 | idxd->sva = NULL; | |
499 | } | |
500 | ||
bfe1d560 DJ |
501 | static int idxd_probe(struct idxd_device *idxd) |
502 | { | |
503 | struct pci_dev *pdev = idxd->pdev; | |
504 | struct device *dev = &pdev->dev; | |
505 | int rc; | |
506 | ||
507 | dev_dbg(dev, "%s entered and resetting device\n", __func__); | |
89e3becd DJ |
508 | rc = idxd_device_init_reset(idxd); |
509 | if (rc < 0) | |
510 | return rc; | |
511 | ||
bfe1d560 DJ |
512 | dev_dbg(dev, "IDXD reset complete\n"); |
513 | ||
03d939c7 | 514 | if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) { |
8ffccd11 | 515 | if (iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA)) { |
42a1b738 | 516 | dev_warn(dev, "Unable to turn on user SVA feature.\n"); |
8ffccd11 | 517 | } else { |
42a1b738 DJ |
518 | set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags); |
519 | ||
8ffccd11 JS |
520 | if (idxd_enable_system_pasid(idxd)) |
521 | dev_warn(dev, "No in-kernel DMA with PASID.\n"); | |
522 | else | |
523 | set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags); | |
524 | } | |
03d939c7 DJ |
525 | } else if (!sva) { |
526 | dev_warn(dev, "User forced SVA off via module param.\n"); | |
8e50d392 DJ |
527 | } |
528 | ||
bfe1d560 DJ |
529 | idxd_read_caps(idxd); |
530 | idxd_read_table_offsets(idxd); | |
531 | ||
532 | rc = idxd_setup_internals(idxd); | |
533 | if (rc) | |
7c5dd23e | 534 | goto err; |
bfe1d560 | 535 | |
8c66bbdc DJ |
536 | /* If the configs are readonly, then load them from device */ |
537 | if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) { | |
538 | dev_dbg(dev, "Loading RO device config\n"); | |
539 | rc = idxd_device_load_config(idxd); | |
540 | if (rc < 0) | |
ddf742d4 | 541 | goto err_config; |
8c66bbdc DJ |
542 | } |
543 | ||
bfe1d560 DJ |
544 | rc = idxd_setup_interrupts(idxd); |
545 | if (rc) | |
ddf742d4 | 546 | goto err_config; |
bfe1d560 | 547 | |
42d279f9 DJ |
548 | idxd->major = idxd_cdev_get_major(idxd); |
549 | ||
0bde4444 TZ |
550 | rc = perfmon_pmu_init(idxd); |
551 | if (rc < 0) | |
552 | dev_warn(dev, "Failed to initialize perfmon. No PMU support: %d\n", rc); | |
553 | ||
bfe1d560 DJ |
554 | dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id); |
555 | return 0; | |
556 | ||
ddf742d4 DJ |
557 | err_config: |
558 | idxd_cleanup_internals(idxd); | |
7c5dd23e | 559 | err: |
8e50d392 DJ |
560 | if (device_pasid_enabled(idxd)) |
561 | idxd_disable_system_pasid(idxd); | |
42a1b738 DJ |
562 | if (device_user_pasid_enabled(idxd)) |
563 | iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA); | |
bfe1d560 DJ |
564 | return rc; |
565 | } | |
566 | ||
ddf742d4 DJ |
567 | static void idxd_cleanup(struct idxd_device *idxd) |
568 | { | |
569 | struct device *dev = &idxd->pdev->dev; | |
570 | ||
571 | perfmon_pmu_remove(idxd); | |
572 | idxd_cleanup_interrupts(idxd); | |
573 | idxd_cleanup_internals(idxd); | |
574 | if (device_pasid_enabled(idxd)) | |
575 | idxd_disable_system_pasid(idxd); | |
42a1b738 DJ |
576 | if (device_user_pasid_enabled(idxd)) |
577 | iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA); | |
ddf742d4 DJ |
578 | } |
579 | ||
bfe1d560 DJ |
580 | static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
581 | { | |
bfe1d560 DJ |
582 | struct device *dev = &pdev->dev; |
583 | struct idxd_device *idxd; | |
435b512d | 584 | struct idxd_driver_data *data = (struct idxd_driver_data *)id->driver_data; |
bfe1d560 | 585 | int rc; |
bfe1d560 | 586 | |
a39c7cd0 | 587 | rc = pci_enable_device(pdev); |
bfe1d560 DJ |
588 | if (rc) |
589 | return rc; | |
590 | ||
8e50d392 | 591 | dev_dbg(dev, "Alloc IDXD context\n"); |
435b512d | 592 | idxd = idxd_alloc(pdev, data); |
a39c7cd0 DJ |
593 | if (!idxd) { |
594 | rc = -ENOMEM; | |
595 | goto err_idxd_alloc; | |
596 | } | |
bfe1d560 | 597 | |
8e50d392 | 598 | dev_dbg(dev, "Mapping BARs\n"); |
a39c7cd0 DJ |
599 | idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0); |
600 | if (!idxd->reg_base) { | |
601 | rc = -ENOMEM; | |
602 | goto err_iomap; | |
603 | } | |
bfe1d560 DJ |
604 | |
605 | dev_dbg(dev, "Set DMA masks\n"); | |
53b50458 | 606 | rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); |
bfe1d560 | 607 | if (rc) |
a39c7cd0 | 608 | goto err; |
bfe1d560 | 609 | |
bfe1d560 DJ |
610 | dev_dbg(dev, "Set PCI master\n"); |
611 | pci_set_master(pdev); | |
612 | pci_set_drvdata(pdev, idxd); | |
613 | ||
614 | idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET); | |
615 | rc = idxd_probe(idxd); | |
616 | if (rc) { | |
617 | dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n"); | |
a39c7cd0 | 618 | goto err; |
bfe1d560 DJ |
619 | } |
620 | ||
47c16ac2 | 621 | rc = idxd_register_devices(idxd); |
c52ca478 DJ |
622 | if (rc) { |
623 | dev_err(dev, "IDXD sysfs setup failed\n"); | |
ddf742d4 | 624 | goto err_dev_register; |
c52ca478 DJ |
625 | } |
626 | ||
bfe1d560 DJ |
627 | dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n", |
628 | idxd->hw.version); | |
629 | ||
630 | return 0; | |
a39c7cd0 | 631 | |
ddf742d4 DJ |
632 | err_dev_register: |
633 | idxd_cleanup(idxd); | |
a39c7cd0 DJ |
634 | err: |
635 | pci_iounmap(pdev, idxd->reg_base); | |
636 | err_iomap: | |
700af3a0 | 637 | put_device(idxd_confdev(idxd)); |
a39c7cd0 DJ |
638 | err_idxd_alloc: |
639 | pci_disable_device(pdev); | |
640 | return rc; | |
bfe1d560 DJ |
641 | } |
642 | ||
5b0c68c4 DJ |
643 | void idxd_wqs_quiesce(struct idxd_device *idxd) |
644 | { | |
645 | struct idxd_wq *wq; | |
646 | int i; | |
647 | ||
648 | for (i = 0; i < idxd->max_wqs; i++) { | |
649 | wq = idxd->wqs[i]; | |
650 | if (wq->state == IDXD_WQ_ENABLED && wq->type == IDXD_WQT_KERNEL) | |
651 | idxd_wq_quiesce(wq); | |
652 | } | |
653 | } | |
654 | ||
bfe1d560 DJ |
655 | static void idxd_shutdown(struct pci_dev *pdev) |
656 | { | |
657 | struct idxd_device *idxd = pci_get_drvdata(pdev); | |
bfe1d560 | 658 | struct idxd_irq_entry *irq_entry; |
403a2e23 | 659 | int rc; |
bfe1d560 | 660 | |
bfe1d560 | 661 | rc = idxd_device_disable(idxd); |
bfe1d560 DJ |
662 | if (rc) |
663 | dev_err(&pdev->dev, "Disabling device failed\n"); | |
664 | ||
403a2e23 DJ |
665 | irq_entry = &idxd->ie; |
666 | synchronize_irq(irq_entry->vector); | |
bfe1d560 | 667 | idxd_mask_error_interrupts(idxd); |
49c4959f | 668 | flush_workqueue(idxd->wq); |
bfe1d560 DJ |
669 | } |
670 | ||
671 | static void idxd_remove(struct pci_dev *pdev) | |
672 | { | |
673 | struct idxd_device *idxd = pci_get_drvdata(pdev); | |
49c4959f | 674 | struct idxd_irq_entry *irq_entry; |
bfe1d560 | 675 | |
98da0106 DJ |
676 | idxd_unregister_devices(idxd); |
677 | /* | |
678 | * When ->release() is called for the idxd->conf_dev, it frees all the memory related | |
679 | * to the idxd context. The driver still needs those bits in order to do the rest of | |
680 | * the cleanup. However, we do need to unbound the idxd sub-driver. So take a ref | |
681 | * on the device here to hold off the freeing while allowing the idxd sub-driver | |
682 | * to unbind. | |
683 | */ | |
684 | get_device(idxd_confdev(idxd)); | |
685 | device_unregister(idxd_confdev(idxd)); | |
bfe1d560 | 686 | idxd_shutdown(pdev); |
8e50d392 DJ |
687 | if (device_pasid_enabled(idxd)) |
688 | idxd_disable_system_pasid(idxd); | |
49c4959f | 689 | |
403a2e23 DJ |
690 | irq_entry = idxd_get_ie(idxd, 0); |
691 | free_irq(irq_entry->vector, irq_entry); | |
49c4959f DJ |
692 | pci_free_irq_vectors(pdev); |
693 | pci_iounmap(pdev, idxd->reg_base); | |
42a1b738 DJ |
694 | if (device_user_pasid_enabled(idxd)) |
695 | iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA); | |
49c4959f DJ |
696 | pci_disable_device(pdev); |
697 | destroy_workqueue(idxd->wq); | |
698 | perfmon_pmu_remove(idxd); | |
98da0106 | 699 | put_device(idxd_confdev(idxd)); |
bfe1d560 DJ |
700 | } |
701 | ||
702 | static struct pci_driver idxd_pci_driver = { | |
703 | .name = DRV_NAME, | |
704 | .id_table = idxd_pci_tbl, | |
705 | .probe = idxd_pci_probe, | |
706 | .remove = idxd_remove, | |
707 | .shutdown = idxd_shutdown, | |
708 | }; | |
709 | ||
710 | static int __init idxd_init_module(void) | |
711 | { | |
4b73e4eb | 712 | int err; |
bfe1d560 DJ |
713 | |
714 | /* | |
8e50d392 | 715 | * If the CPU does not support MOVDIR64B or ENQCMDS, there's no point in |
bfe1d560 DJ |
716 | * enumerating the device. We can not utilize it. |
717 | */ | |
74b2fc88 | 718 | if (!cpu_feature_enabled(X86_FEATURE_MOVDIR64B)) { |
bfe1d560 DJ |
719 | pr_warn("idxd driver failed to load without MOVDIR64B.\n"); |
720 | return -ENODEV; | |
721 | } | |
722 | ||
74b2fc88 | 723 | if (!cpu_feature_enabled(X86_FEATURE_ENQCMD)) |
8e50d392 DJ |
724 | pr_warn("Platform does not have ENQCMD(S) support.\n"); |
725 | else | |
726 | support_enqcmd = true; | |
bfe1d560 | 727 | |
0bde4444 TZ |
728 | perfmon_init(); |
729 | ||
034b3290 DJ |
730 | err = idxd_driver_register(&idxd_drv); |
731 | if (err < 0) | |
732 | goto err_idxd_driver_register; | |
733 | ||
0cda4f69 DJ |
734 | err = idxd_driver_register(&idxd_dmaengine_drv); |
735 | if (err < 0) | |
736 | goto err_idxd_dmaengine_driver_register; | |
737 | ||
448c3de8 DJ |
738 | err = idxd_driver_register(&idxd_user_drv); |
739 | if (err < 0) | |
740 | goto err_idxd_user_driver_register; | |
741 | ||
42d279f9 DJ |
742 | err = idxd_cdev_register(); |
743 | if (err) | |
744 | goto err_cdev_register; | |
745 | ||
bfe1d560 DJ |
746 | err = pci_register_driver(&idxd_pci_driver); |
747 | if (err) | |
c52ca478 | 748 | goto err_pci_register; |
bfe1d560 DJ |
749 | |
750 | return 0; | |
c52ca478 DJ |
751 | |
752 | err_pci_register: | |
42d279f9 DJ |
753 | idxd_cdev_remove(); |
754 | err_cdev_register: | |
448c3de8 DJ |
755 | idxd_driver_unregister(&idxd_user_drv); |
756 | err_idxd_user_driver_register: | |
0cda4f69 DJ |
757 | idxd_driver_unregister(&idxd_dmaengine_drv); |
758 | err_idxd_dmaengine_driver_register: | |
034b3290 DJ |
759 | idxd_driver_unregister(&idxd_drv); |
760 | err_idxd_driver_register: | |
c52ca478 | 761 | return err; |
bfe1d560 DJ |
762 | } |
763 | module_init(idxd_init_module); | |
764 | ||
765 | static void __exit idxd_exit_module(void) | |
766 | { | |
448c3de8 | 767 | idxd_driver_unregister(&idxd_user_drv); |
0cda4f69 | 768 | idxd_driver_unregister(&idxd_dmaengine_drv); |
034b3290 | 769 | idxd_driver_unregister(&idxd_drv); |
bfe1d560 | 770 | pci_unregister_driver(&idxd_pci_driver); |
42d279f9 | 771 | idxd_cdev_remove(); |
0bde4444 | 772 | perfmon_exit(); |
bfe1d560 DJ |
773 | } |
774 | module_exit(idxd_exit_module); |