1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/slab.h>
8 #include <linux/interrupt.h>
9 #include <linux/delay.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/workqueue.h>
13 #include <linux/io-64-nonatomic-lo-hi.h>
14 #include <linux/device.h>
15 #include <linux/idr.h>
16 #include <linux/iommu.h>
17 #include <uapi/linux/idxd.h>
18 #include <linux/dmaengine.h>
19 #include "../dmaengine.h"
20 #include "registers.h"
24 MODULE_VERSION(IDXD_DRIVER_VERSION);
25 MODULE_LICENSE("GPL v2");
26 MODULE_AUTHOR("Intel Corporation");
27 MODULE_IMPORT_NS(IDXD);
29 static bool sva = true;
30 module_param(sva, bool, 0644);
31 MODULE_PARM_DESC(sva, "Toggle SVA support on/off");
34 module_param(tc_override, bool, 0644);
35 MODULE_PARM_DESC(tc_override, "Override traffic class defaults");
37 #define DRV_NAME "idxd"
42 static struct idxd_driver_data idxd_driver_data[] = {
45 .type = IDXD_TYPE_DSA,
46 .compl_size = sizeof(struct dsa_completion_record),
48 .dev_type = &dsa_device_type,
52 .type = IDXD_TYPE_IAX,
53 .compl_size = sizeof(struct iax_completion_record),
55 .dev_type = &iax_device_type,
59 static struct pci_device_id idxd_pci_tbl[] = {
60 /* DSA ver 1.0 platforms */
61 { PCI_DEVICE_DATA(INTEL, DSA_SPR0, &idxd_driver_data[IDXD_TYPE_DSA]) },
63 /* IAX ver 1.0 platforms */
64 { PCI_DEVICE_DATA(INTEL, IAX_SPR0, &idxd_driver_data[IDXD_TYPE_IAX]) },
67 MODULE_DEVICE_TABLE(pci, idxd_pci_tbl);
69 static int idxd_setup_interrupts(struct idxd_device *idxd)
71 struct pci_dev *pdev = idxd->pdev;
72 struct device *dev = &pdev->dev;
73 struct idxd_irq_entry *ie;
77 msixcnt = pci_msix_vec_count(pdev);
79 dev_err(dev, "Not MSI-X interrupt capable.\n");
82 idxd->irq_cnt = msixcnt;
84 rc = pci_alloc_irq_vectors(pdev, msixcnt, msixcnt, PCI_IRQ_MSIX);
86 dev_err(dev, "Failed enabling %d MSIX entries: %d\n", msixcnt, rc);
89 dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt);
92 ie = idxd_get_ie(idxd, 0);
93 ie->vector = pci_irq_vector(pdev, 0);
94 rc = request_threaded_irq(ie->vector, NULL, idxd_misc_thread, 0, "idxd-misc", ie);
96 dev_err(dev, "Failed to allocate misc interrupt.\n");
99 dev_dbg(dev, "Requested idxd-misc handler on msix vector %d\n", ie->vector);
101 for (i = 0; i < idxd->max_wqs; i++) {
102 int msix_idx = i + 1;
104 ie = idxd_get_ie(idxd, msix_idx);
106 ie->int_handle = INVALID_INT_HANDLE;
107 ie->pasid = INVALID_IOASID;
109 spin_lock_init(&ie->list_lock);
110 init_llist_head(&ie->pending_llist);
111 INIT_LIST_HEAD(&ie->work_list);
114 idxd_unmask_error_interrupts(idxd);
118 idxd_mask_error_interrupts(idxd);
119 pci_free_irq_vectors(pdev);
120 dev_err(dev, "No usable interrupts\n");
124 static void idxd_cleanup_interrupts(struct idxd_device *idxd)
126 struct pci_dev *pdev = idxd->pdev;
127 struct idxd_irq_entry *ie;
130 msixcnt = pci_msix_vec_count(pdev);
134 ie = idxd_get_ie(idxd, 0);
135 idxd_mask_error_interrupts(idxd);
136 free_irq(ie->vector, ie);
137 pci_free_irq_vectors(pdev);
140 static int idxd_setup_wqs(struct idxd_device *idxd)
142 struct device *dev = &idxd->pdev->dev;
144 struct device *conf_dev;
147 idxd->wqs = kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *),
148 GFP_KERNEL, dev_to_node(dev));
152 idxd->wq_enable_map = bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL, dev_to_node(dev));
153 if (!idxd->wq_enable_map) {
158 for (i = 0; i < idxd->max_wqs; i++) {
159 wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev));
165 idxd_dev_set_type(&wq->idxd_dev, IDXD_DEV_WQ);
166 conf_dev = wq_confdev(wq);
169 device_initialize(wq_confdev(wq));
170 conf_dev->parent = idxd_confdev(idxd);
171 conf_dev->bus = &dsa_bus_type;
172 conf_dev->type = &idxd_wq_device_type;
173 rc = dev_set_name(conf_dev, "wq%d.%d", idxd->id, wq->id);
175 put_device(conf_dev);
179 mutex_init(&wq->wq_lock);
180 init_waitqueue_head(&wq->err_queue);
181 init_completion(&wq->wq_dead);
182 init_completion(&wq->wq_resurrect);
183 wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER;
184 idxd_wq_set_max_batch_size(idxd->data->type, wq, WQ_DEFAULT_MAX_BATCH);
185 wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES;
186 wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev));
188 put_device(conf_dev);
193 if (idxd->hw.wq_cap.op_config) {
194 wq->opcap_bmap = bitmap_zalloc(IDXD_MAX_OPCAP_BITS, GFP_KERNEL);
195 if (!wq->opcap_bmap) {
196 put_device(conf_dev);
200 bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS);
210 conf_dev = wq_confdev(wq);
211 put_device(conf_dev);
216 static int idxd_setup_engines(struct idxd_device *idxd)
218 struct idxd_engine *engine;
219 struct device *dev = &idxd->pdev->dev;
220 struct device *conf_dev;
223 idxd->engines = kcalloc_node(idxd->max_engines, sizeof(struct idxd_engine *),
224 GFP_KERNEL, dev_to_node(dev));
228 for (i = 0; i < idxd->max_engines; i++) {
229 engine = kzalloc_node(sizeof(*engine), GFP_KERNEL, dev_to_node(dev));
235 idxd_dev_set_type(&engine->idxd_dev, IDXD_DEV_ENGINE);
236 conf_dev = engine_confdev(engine);
239 device_initialize(conf_dev);
240 conf_dev->parent = idxd_confdev(idxd);
241 conf_dev->bus = &dsa_bus_type;
242 conf_dev->type = &idxd_engine_device_type;
243 rc = dev_set_name(conf_dev, "engine%d.%d", idxd->id, engine->id);
245 put_device(conf_dev);
249 idxd->engines[i] = engine;
256 engine = idxd->engines[i];
257 conf_dev = engine_confdev(engine);
258 put_device(conf_dev);
263 static int idxd_setup_groups(struct idxd_device *idxd)
265 struct device *dev = &idxd->pdev->dev;
266 struct device *conf_dev;
267 struct idxd_group *group;
270 idxd->groups = kcalloc_node(idxd->max_groups, sizeof(struct idxd_group *),
271 GFP_KERNEL, dev_to_node(dev));
275 for (i = 0; i < idxd->max_groups; i++) {
276 group = kzalloc_node(sizeof(*group), GFP_KERNEL, dev_to_node(dev));
282 idxd_dev_set_type(&group->idxd_dev, IDXD_DEV_GROUP);
283 conf_dev = group_confdev(group);
286 device_initialize(conf_dev);
287 conf_dev->parent = idxd_confdev(idxd);
288 conf_dev->bus = &dsa_bus_type;
289 conf_dev->type = &idxd_group_device_type;
290 rc = dev_set_name(conf_dev, "group%d.%d", idxd->id, group->id);
292 put_device(conf_dev);
296 idxd->groups[i] = group;
297 if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override) {
305 * The default value is the same as the value of
306 * total read buffers in GRPCAP.
308 group->rdbufs_allowed = idxd->max_rdbufs;
315 group = idxd->groups[i];
316 put_device(group_confdev(group));
321 static void idxd_cleanup_internals(struct idxd_device *idxd)
325 for (i = 0; i < idxd->max_groups; i++)
326 put_device(group_confdev(idxd->groups[i]));
327 for (i = 0; i < idxd->max_engines; i++)
328 put_device(engine_confdev(idxd->engines[i]));
329 for (i = 0; i < idxd->max_wqs; i++)
330 put_device(wq_confdev(idxd->wqs[i]));
331 destroy_workqueue(idxd->wq);
334 static int idxd_init_evl(struct idxd_device *idxd)
336 struct device *dev = &idxd->pdev->dev;
337 struct idxd_evl *evl;
339 if (idxd->hw.gen_cap.evl_support == 0)
342 evl = kzalloc_node(sizeof(*evl), GFP_KERNEL, dev_to_node(dev));
346 spin_lock_init(&evl->lock);
347 evl->size = IDXD_EVL_SIZE_MIN;
349 idxd->evl_cache = kmem_cache_create(dev_name(idxd_confdev(idxd)),
350 sizeof(struct idxd_evl_fault) + evl_ent_size(idxd),
352 if (!idxd->evl_cache) {
361 static int idxd_setup_internals(struct idxd_device *idxd)
363 struct device *dev = &idxd->pdev->dev;
366 init_waitqueue_head(&idxd->cmd_waitq);
368 rc = idxd_setup_wqs(idxd);
372 rc = idxd_setup_engines(idxd);
376 rc = idxd_setup_groups(idxd);
380 idxd->wq = create_workqueue(dev_name(dev));
386 rc = idxd_init_evl(idxd);
393 destroy_workqueue(idxd->wq);
395 for (i = 0; i < idxd->max_groups; i++)
396 put_device(group_confdev(idxd->groups[i]));
398 for (i = 0; i < idxd->max_engines; i++)
399 put_device(engine_confdev(idxd->engines[i]));
401 for (i = 0; i < idxd->max_wqs; i++)
402 put_device(wq_confdev(idxd->wqs[i]));
407 static void idxd_read_table_offsets(struct idxd_device *idxd)
409 union offsets_reg offsets;
410 struct device *dev = &idxd->pdev->dev;
412 offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET);
413 offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET + sizeof(u64));
414 idxd->grpcfg_offset = offsets.grpcfg * IDXD_TABLE_MULT;
415 dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset);
416 idxd->wqcfg_offset = offsets.wqcfg * IDXD_TABLE_MULT;
417 dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n", idxd->wqcfg_offset);
418 idxd->msix_perm_offset = offsets.msix_perm * IDXD_TABLE_MULT;
419 dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n", idxd->msix_perm_offset);
420 idxd->perfmon_offset = offsets.perfmon * IDXD_TABLE_MULT;
421 dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset);
424 void multi_u64_to_bmap(unsigned long *bmap, u64 *val, int count)
428 for (i = 0, nr = 0; i < count; i++) {
429 for (j = 0; j < BITS_PER_LONG_LONG; j++) {
437 static void idxd_read_caps(struct idxd_device *idxd)
439 struct device *dev = &idxd->pdev->dev;
442 /* reading generic capabilities */
443 idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET);
444 dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits);
446 if (idxd->hw.gen_cap.cmd_cap) {
447 idxd->hw.cmd_cap = ioread32(idxd->reg_base + IDXD_CMDCAP_OFFSET);
448 dev_dbg(dev, "cmd_cap: %#x\n", idxd->hw.cmd_cap);
451 /* reading command capabilities */
452 if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE))
453 idxd->request_int_handles = true;
455 idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift;
456 dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes);
457 idxd_set_max_batch_size(idxd->data->type, idxd, 1U << idxd->hw.gen_cap.max_batch_shift);
458 dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size);
459 if (idxd->hw.gen_cap.config_en)
460 set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags);
462 /* reading group capabilities */
463 idxd->hw.group_cap.bits =
464 ioread64(idxd->reg_base + IDXD_GRPCAP_OFFSET);
465 dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits);
466 idxd->max_groups = idxd->hw.group_cap.num_groups;
467 dev_dbg(dev, "max groups: %u\n", idxd->max_groups);
468 idxd->max_rdbufs = idxd->hw.group_cap.total_rdbufs;
469 dev_dbg(dev, "max read buffers: %u\n", idxd->max_rdbufs);
470 idxd->nr_rdbufs = idxd->max_rdbufs;
472 /* read engine capabilities */
473 idxd->hw.engine_cap.bits =
474 ioread64(idxd->reg_base + IDXD_ENGCAP_OFFSET);
475 dev_dbg(dev, "engine_cap: %#llx\n", idxd->hw.engine_cap.bits);
476 idxd->max_engines = idxd->hw.engine_cap.num_engines;
477 dev_dbg(dev, "max engines: %u\n", idxd->max_engines);
479 /* read workqueue capabilities */
480 idxd->hw.wq_cap.bits = ioread64(idxd->reg_base + IDXD_WQCAP_OFFSET);
481 dev_dbg(dev, "wq_cap: %#llx\n", idxd->hw.wq_cap.bits);
482 idxd->max_wq_size = idxd->hw.wq_cap.total_wq_size;
483 dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size);
484 idxd->max_wqs = idxd->hw.wq_cap.num_wqs;
485 dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs);
486 idxd->wqcfg_size = 1 << (idxd->hw.wq_cap.wqcfg_size + IDXD_WQCFG_MIN);
487 dev_dbg(dev, "wqcfg size: %u\n", idxd->wqcfg_size);
489 /* reading operation capabilities */
490 for (i = 0; i < 4; i++) {
491 idxd->hw.opcap.bits[i] = ioread64(idxd->reg_base +
492 IDXD_OPCAP_OFFSET + i * sizeof(u64));
493 dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]);
495 multi_u64_to_bmap(idxd->opcap_bmap, &idxd->hw.opcap.bits[0], 4);
498 if (idxd->data->type == IDXD_TYPE_IAX && idxd->hw.version >= DEVICE_VERSION_2)
499 idxd->hw.iaa_cap.bits = ioread64(idxd->reg_base + IDXD_IAACAP_OFFSET);
502 static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_data *data)
504 struct device *dev = &pdev->dev;
505 struct device *conf_dev;
506 struct idxd_device *idxd;
509 idxd = kzalloc_node(sizeof(*idxd), GFP_KERNEL, dev_to_node(dev));
513 conf_dev = idxd_confdev(idxd);
516 idxd_dev_set_type(&idxd->idxd_dev, idxd->data->type);
517 idxd->id = ida_alloc(&idxd_ida, GFP_KERNEL);
521 idxd->opcap_bmap = bitmap_zalloc_node(IDXD_MAX_OPCAP_BITS, GFP_KERNEL, dev_to_node(dev));
522 if (!idxd->opcap_bmap) {
523 ida_free(&idxd_ida, idxd->id);
527 device_initialize(conf_dev);
528 conf_dev->parent = dev;
529 conf_dev->bus = &dsa_bus_type;
530 conf_dev->type = idxd->data->dev_type;
531 rc = dev_set_name(conf_dev, "%s%d", idxd->data->name_prefix, idxd->id);
533 put_device(conf_dev);
537 spin_lock_init(&idxd->dev_lock);
538 spin_lock_init(&idxd->cmd_lock);
543 static int idxd_enable_system_pasid(struct idxd_device *idxd)
548 static void idxd_disable_system_pasid(struct idxd_device *idxd)
551 iommu_sva_unbind_device(idxd->sva);
555 static int idxd_probe(struct idxd_device *idxd)
557 struct pci_dev *pdev = idxd->pdev;
558 struct device *dev = &pdev->dev;
561 dev_dbg(dev, "%s entered and resetting device\n", __func__);
562 rc = idxd_device_init_reset(idxd);
566 dev_dbg(dev, "IDXD reset complete\n");
568 if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) {
569 if (iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA)) {
570 dev_warn(dev, "Unable to turn on user SVA feature.\n");
572 set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags);
574 if (idxd_enable_system_pasid(idxd))
575 dev_warn(dev, "No in-kernel DMA with PASID.\n");
577 set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
580 dev_warn(dev, "User forced SVA off via module param.\n");
583 idxd_read_caps(idxd);
584 idxd_read_table_offsets(idxd);
586 rc = idxd_setup_internals(idxd);
590 /* If the configs are readonly, then load them from device */
591 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
592 dev_dbg(dev, "Loading RO device config\n");
593 rc = idxd_device_load_config(idxd);
598 rc = idxd_setup_interrupts(idxd);
602 idxd->major = idxd_cdev_get_major(idxd);
604 rc = perfmon_pmu_init(idxd);
606 dev_warn(dev, "Failed to initialize perfmon. No PMU support: %d\n", rc);
608 dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id);
612 idxd_cleanup_internals(idxd);
614 if (device_pasid_enabled(idxd))
615 idxd_disable_system_pasid(idxd);
616 if (device_user_pasid_enabled(idxd))
617 iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
621 static void idxd_cleanup(struct idxd_device *idxd)
623 struct device *dev = &idxd->pdev->dev;
625 perfmon_pmu_remove(idxd);
626 idxd_cleanup_interrupts(idxd);
627 idxd_cleanup_internals(idxd);
628 if (device_pasid_enabled(idxd))
629 idxd_disable_system_pasid(idxd);
630 if (device_user_pasid_enabled(idxd))
631 iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
634 static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
636 struct device *dev = &pdev->dev;
637 struct idxd_device *idxd;
638 struct idxd_driver_data *data = (struct idxd_driver_data *)id->driver_data;
641 rc = pci_enable_device(pdev);
645 dev_dbg(dev, "Alloc IDXD context\n");
646 idxd = idxd_alloc(pdev, data);
652 dev_dbg(dev, "Mapping BARs\n");
653 idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0);
654 if (!idxd->reg_base) {
659 dev_dbg(dev, "Set DMA masks\n");
660 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
664 dev_dbg(dev, "Set PCI master\n");
665 pci_set_master(pdev);
666 pci_set_drvdata(pdev, idxd);
668 idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET);
669 rc = idxd_probe(idxd);
671 dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n");
675 rc = idxd_register_devices(idxd);
677 dev_err(dev, "IDXD sysfs setup failed\n");
678 goto err_dev_register;
681 rc = idxd_device_init_debugfs(idxd);
683 dev_warn(dev, "IDXD debugfs failed to setup\n");
685 dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n",
693 pci_iounmap(pdev, idxd->reg_base);
695 put_device(idxd_confdev(idxd));
697 pci_disable_device(pdev);
701 void idxd_wqs_quiesce(struct idxd_device *idxd)
706 for (i = 0; i < idxd->max_wqs; i++) {
708 if (wq->state == IDXD_WQ_ENABLED && wq->type == IDXD_WQT_KERNEL)
713 static void idxd_shutdown(struct pci_dev *pdev)
715 struct idxd_device *idxd = pci_get_drvdata(pdev);
716 struct idxd_irq_entry *irq_entry;
719 rc = idxd_device_disable(idxd);
721 dev_err(&pdev->dev, "Disabling device failed\n");
723 irq_entry = &idxd->ie;
724 synchronize_irq(irq_entry->vector);
725 idxd_mask_error_interrupts(idxd);
726 flush_workqueue(idxd->wq);
729 static void idxd_remove(struct pci_dev *pdev)
731 struct idxd_device *idxd = pci_get_drvdata(pdev);
732 struct idxd_irq_entry *irq_entry;
734 idxd_unregister_devices(idxd);
736 * When ->release() is called for the idxd->conf_dev, it frees all the memory related
737 * to the idxd context. The driver still needs those bits in order to do the rest of
738 * the cleanup. However, we do need to unbound the idxd sub-driver. So take a ref
739 * on the device here to hold off the freeing while allowing the idxd sub-driver
742 get_device(idxd_confdev(idxd));
743 device_unregister(idxd_confdev(idxd));
745 if (device_pasid_enabled(idxd))
746 idxd_disable_system_pasid(idxd);
747 idxd_device_remove_debugfs(idxd);
749 irq_entry = idxd_get_ie(idxd, 0);
750 free_irq(irq_entry->vector, irq_entry);
751 pci_free_irq_vectors(pdev);
752 pci_iounmap(pdev, idxd->reg_base);
753 if (device_user_pasid_enabled(idxd))
754 iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
755 pci_disable_device(pdev);
756 destroy_workqueue(idxd->wq);
757 perfmon_pmu_remove(idxd);
758 put_device(idxd_confdev(idxd));
761 static struct pci_driver idxd_pci_driver = {
763 .id_table = idxd_pci_tbl,
764 .probe = idxd_pci_probe,
765 .remove = idxd_remove,
766 .shutdown = idxd_shutdown,
769 static int __init idxd_init_module(void)
774 * If the CPU does not support MOVDIR64B or ENQCMDS, there's no point in
775 * enumerating the device. We can not utilize it.
777 if (!cpu_feature_enabled(X86_FEATURE_MOVDIR64B)) {
778 pr_warn("idxd driver failed to load without MOVDIR64B.\n");
782 if (!cpu_feature_enabled(X86_FEATURE_ENQCMD))
783 pr_warn("Platform does not have ENQCMD(S) support.\n");
785 support_enqcmd = true;
789 err = idxd_driver_register(&idxd_drv);
791 goto err_idxd_driver_register;
793 err = idxd_driver_register(&idxd_dmaengine_drv);
795 goto err_idxd_dmaengine_driver_register;
797 err = idxd_driver_register(&idxd_user_drv);
799 goto err_idxd_user_driver_register;
801 err = idxd_cdev_register();
803 goto err_cdev_register;
805 err = idxd_init_debugfs();
809 err = pci_register_driver(&idxd_pci_driver);
811 goto err_pci_register;
816 idxd_remove_debugfs();
820 idxd_driver_unregister(&idxd_user_drv);
821 err_idxd_user_driver_register:
822 idxd_driver_unregister(&idxd_dmaengine_drv);
823 err_idxd_dmaengine_driver_register:
824 idxd_driver_unregister(&idxd_drv);
825 err_idxd_driver_register:
828 module_init(idxd_init_module);
830 static void __exit idxd_exit_module(void)
832 idxd_driver_unregister(&idxd_user_drv);
833 idxd_driver_unregister(&idxd_dmaengine_drv);
834 idxd_driver_unregister(&idxd_drv);
835 pci_unregister_driver(&idxd_pci_driver);
838 idxd_remove_debugfs();
840 module_exit(idxd_exit_module);