1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
6 #include <linux/sbitmap.h>
7 #include <linux/dmaengine.h>
8 #include <linux/percpu-rwsem.h>
9 #include <linux/wait.h>
10 #include <linux/cdev.h>
11 #include <linux/idr.h>
12 #include <linux/pci.h>
13 #include <linux/ioasid.h>
14 #include <linux/bitmap.h>
15 #include <linux/perf_event.h>
16 #include <uapi/linux/idxd.h>
17 #include "registers.h"
19 #define IDXD_DRIVER_VERSION "1.00"
21 extern struct kmem_cache *idxd_desc_pool;
22 extern bool tc_override;
39 struct device conf_dev;
40 enum idxd_dev_type type;
43 #define IDXD_REG_TIMEOUT 50
44 #define IDXD_DRAIN_TIMEOUT 5000
47 IDXD_TYPE_UNKNOWN = -1,
53 #define IDXD_NAME_SIZE 128
54 #define IDXD_PMU_EVENT_MAX 64
56 #define IDXD_ENQCMDS_RETRIES 32
57 #define IDXD_ENQCMDS_MAX_RETRIES 64
59 struct idxd_device_driver {
61 enum idxd_dev_type *type;
62 int (*probe)(struct idxd_dev *idxd_dev);
63 void (*remove)(struct idxd_dev *idxd_dev);
64 struct device_driver drv;
67 extern struct idxd_device_driver dsa_drv;
68 extern struct idxd_device_driver idxd_drv;
69 extern struct idxd_device_driver idxd_dmaengine_drv;
70 extern struct idxd_device_driver idxd_user_drv;
72 #define INVALID_INT_HANDLE -1
73 struct idxd_irq_entry {
76 struct llist_head pending_llist;
77 struct list_head work_list;
79 * Lock to protect access between irq thread process descriptor
80 * and irq thread processing error descriptor.
88 struct idxd_dev idxd_dev;
89 struct idxd_device *idxd;
99 int desc_progress_limit;
100 int batch_progress_limit;
104 struct idxd_device *idxd;
106 struct perf_event *event_list[IDXD_PMU_EVENT_MAX];
109 DECLARE_BITMAP(used_mask, IDXD_PMU_EVENT_MAX);
112 char name[IDXD_NAME_SIZE];
117 int n_event_categories;
119 bool per_counter_caps_supported;
120 unsigned long supported_event_categories;
122 unsigned long supported_filters;
125 struct hlist_node cpuhp_node;
128 #define IDXD_MAX_PRIORITY 0xf
131 IDXD_WQ_DISABLED = 0,
136 WQ_FLAG_DEDICATED = 0,
137 WQ_FLAG_BLOCK_ON_FAULT,
150 struct idxd_dev idxd_dev;
154 #define IDXD_ALLOCATED_BATCH_SIZE 128U
155 #define WQ_NAME_SIZE 1024
156 #define WQ_TYPE_SIZE 10
158 #define WQ_DEFAULT_QUEUE_DEPTH 16
159 #define WQ_DEFAULT_MAX_XFER SZ_2M
160 #define WQ_DEFAULT_MAX_BATCH 32
164 IDXD_OP_NONBLOCK = 1,
167 enum idxd_complete_type {
168 IDXD_COMPLETE_NORMAL = 0,
170 IDXD_COMPLETE_DEV_FAIL,
173 struct idxd_dma_chan {
174 struct dma_chan chan;
179 void __iomem *portal;
181 unsigned int enqcmds_retries;
182 struct percpu_ref wq_active;
183 struct completion wq_dead;
184 struct completion wq_resurrect;
185 struct idxd_dev idxd_dev;
186 struct idxd_cdev *idxd_cdev;
187 struct wait_queue_head err_queue;
188 struct idxd_device *idxd;
190 struct idxd_irq_entry ie;
191 enum idxd_wq_type type;
192 struct idxd_group *group;
194 struct mutex wq_lock; /* mutex for workqueue */
198 enum idxd_wq_state state;
201 unsigned long *opcap_bmap;
203 struct dsa_hw_desc **hw_descs;
206 struct dsa_completion_record *compls;
207 struct iax_completion_record *iax_compls;
209 dma_addr_t compls_addr;
211 struct idxd_desc **descs;
212 struct sbitmap_queue sbq;
213 struct idxd_dma_chan *idxd_chan;
214 char name[WQ_NAME_SIZE + 1];
220 struct idxd_dev idxd_dev;
222 struct idxd_group *group;
223 struct idxd_device *idxd;
226 /* shadow registers */
229 union gen_cap_reg gen_cap;
230 union wq_cap_reg wq_cap;
231 union group_cap_reg group_cap;
232 union engine_cap_reg engine_cap;
235 union iaa_cap_reg iaa_cap;
238 enum idxd_device_state {
239 IDXD_DEV_HALTED = -1,
240 IDXD_DEV_DISABLED = 0,
244 enum idxd_device_flag {
245 IDXD_FLAG_CONFIGURABLE = 0,
246 IDXD_FLAG_CMD_RUNNING,
247 IDXD_FLAG_PASID_ENABLED,
248 IDXD_FLAG_USER_PASID_ENABLED,
251 struct idxd_dma_dev {
252 struct idxd_device *idxd;
253 struct dma_device dma;
256 struct idxd_driver_data {
257 const char *name_prefix;
259 struct device_type *dev_type;
265 /* Lock to protect event log access. */
269 /* Total size of event log = number of entries * entry size. */
270 unsigned int log_size;
271 /* The number of entries in the event log. */
277 struct idxd_dev idxd_dev;
278 struct idxd_driver_data *data;
279 struct list_head list;
281 enum idxd_device_state state;
286 struct idxd_irq_entry ie; /* misc irq, msix 0 */
288 struct pci_dev *pdev;
289 void __iomem *reg_base;
291 spinlock_t dev_lock; /* spinlock for device */
292 spinlock_t cmd_lock; /* spinlock for device commands */
293 struct completion *cmd_done;
294 struct idxd_group **groups;
295 struct idxd_wq **wqs;
296 struct idxd_engine **engines;
298 struct iommu_sva *sva;
303 bool request_int_handles;
305 u32 msix_perm_offset;
318 int nr_rdbufs; /* non-reserved read buffers */
319 unsigned int wqcfg_size;
320 unsigned long *wq_enable_map;
322 union sw_err_reg sw_err;
323 wait_queue_head_t cmd_waitq;
325 struct idxd_dma_dev *idxd_dma;
326 struct workqueue_struct *wq;
327 struct work_struct work;
329 struct idxd_pmu *idxd_pmu;
331 unsigned long *opcap_bmap;
332 struct idxd_evl *evl;
334 struct dentry *dbgfs_dir;
335 struct dentry *dbgfs_evl_file;
338 static inline unsigned int evl_ent_size(struct idxd_device *idxd)
340 return idxd->hw.gen_cap.evl_support ?
341 (32 * (1 << idxd->hw.gen_cap.evl_support)) : 0;
344 static inline unsigned int evl_size(struct idxd_device *idxd)
346 return idxd->evl->size * evl_ent_size(idxd);
349 /* IDXD software descriptor */
352 struct dsa_hw_desc *hw;
353 struct iax_hw_desc *iax_hw;
357 struct dsa_completion_record *completion;
358 struct iax_completion_record *iax_completion;
360 dma_addr_t compl_dma;
361 struct dma_async_tx_descriptor txd;
362 struct llist_node llnode;
363 struct list_head list;
370 * This is software defined error for the completion status. We overload the error code
371 * that will never appear in completion status and only SWERR register.
373 enum idxd_completion_status {
374 IDXD_COMP_DESC_ABORT = 0xff,
377 #define idxd_confdev(idxd) &idxd->idxd_dev.conf_dev
378 #define wq_confdev(wq) &wq->idxd_dev.conf_dev
379 #define engine_confdev(engine) &engine->idxd_dev.conf_dev
380 #define group_confdev(group) &group->idxd_dev.conf_dev
381 #define cdev_dev(cdev) &cdev->idxd_dev.conf_dev
383 #define confdev_to_idxd_dev(dev) container_of(dev, struct idxd_dev, conf_dev)
384 #define idxd_dev_to_idxd(idxd_dev) container_of(idxd_dev, struct idxd_device, idxd_dev)
385 #define idxd_dev_to_wq(idxd_dev) container_of(idxd_dev, struct idxd_wq, idxd_dev)
387 static inline struct idxd_device *confdev_to_idxd(struct device *dev)
389 struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
391 return idxd_dev_to_idxd(idxd_dev);
394 static inline struct idxd_wq *confdev_to_wq(struct device *dev)
396 struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
398 return idxd_dev_to_wq(idxd_dev);
401 static inline struct idxd_engine *confdev_to_engine(struct device *dev)
403 struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
405 return container_of(idxd_dev, struct idxd_engine, idxd_dev);
408 static inline struct idxd_group *confdev_to_group(struct device *dev)
410 struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
412 return container_of(idxd_dev, struct idxd_group, idxd_dev);
415 static inline struct idxd_cdev *dev_to_cdev(struct device *dev)
417 struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
419 return container_of(idxd_dev, struct idxd_cdev, idxd_dev);
422 static inline void idxd_dev_set_type(struct idxd_dev *idev, int type)
424 if (type >= IDXD_DEV_MAX_TYPE) {
425 idev->type = IDXD_DEV_NONE;
432 static inline struct idxd_irq_entry *idxd_get_ie(struct idxd_device *idxd, int idx)
434 return (idx == 0) ? &idxd->ie : &idxd->wqs[idx - 1]->ie;
437 static inline struct idxd_wq *ie_to_wq(struct idxd_irq_entry *ie)
439 return container_of(ie, struct idxd_wq, ie);
442 static inline struct idxd_device *ie_to_idxd(struct idxd_irq_entry *ie)
444 return container_of(ie, struct idxd_device, ie);
447 extern struct bus_type dsa_bus_type;
449 extern bool support_enqcmd;
450 extern struct ida idxd_ida;
451 extern struct device_type dsa_device_type;
452 extern struct device_type iax_device_type;
453 extern struct device_type idxd_wq_device_type;
454 extern struct device_type idxd_engine_device_type;
455 extern struct device_type idxd_group_device_type;
457 static inline bool is_dsa_dev(struct idxd_dev *idxd_dev)
459 return idxd_dev->type == IDXD_DEV_DSA;
462 static inline bool is_iax_dev(struct idxd_dev *idxd_dev)
464 return idxd_dev->type == IDXD_DEV_IAX;
467 static inline bool is_idxd_dev(struct idxd_dev *idxd_dev)
469 return is_dsa_dev(idxd_dev) || is_iax_dev(idxd_dev);
472 static inline bool is_idxd_wq_dev(struct idxd_dev *idxd_dev)
474 return idxd_dev->type == IDXD_DEV_WQ;
477 static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
479 if (wq->type == IDXD_WQT_KERNEL && strcmp(wq->name, "dmaengine") == 0)
484 static inline bool is_idxd_wq_user(struct idxd_wq *wq)
486 return wq->type == IDXD_WQT_USER;
489 static inline bool is_idxd_wq_kernel(struct idxd_wq *wq)
491 return wq->type == IDXD_WQT_KERNEL;
494 static inline bool wq_dedicated(struct idxd_wq *wq)
496 return test_bit(WQ_FLAG_DEDICATED, &wq->flags);
499 static inline bool wq_shared(struct idxd_wq *wq)
501 return !test_bit(WQ_FLAG_DEDICATED, &wq->flags);
504 static inline bool device_pasid_enabled(struct idxd_device *idxd)
506 return test_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
509 static inline bool device_user_pasid_enabled(struct idxd_device *idxd)
511 return test_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags);
514 static inline bool wq_pasid_enabled(struct idxd_wq *wq)
516 return (is_idxd_wq_kernel(wq) && device_pasid_enabled(wq->idxd)) ||
517 (is_idxd_wq_user(wq) && device_user_pasid_enabled(wq->idxd));
520 static inline bool wq_shared_supported(struct idxd_wq *wq)
522 return (support_enqcmd && wq_pasid_enabled(wq));
525 enum idxd_portal_prot {
526 IDXD_PORTAL_UNLIMITED = 0,
530 enum idxd_interrupt_type {
535 static inline int idxd_get_wq_portal_offset(enum idxd_portal_prot prot)
537 return prot * 0x1000;
540 static inline int idxd_get_wq_portal_full_offset(int wq_id,
541 enum idxd_portal_prot prot)
543 return ((wq_id * 4) << PAGE_SHIFT) + idxd_get_wq_portal_offset(prot);
546 #define IDXD_PORTAL_MASK (PAGE_SIZE - 1)
549 * Even though this function can be accessed by multiple threads, it is safe to use.
550 * At worst the address gets used more than once before it gets incremented. We don't
551 * hit a threshold until iops becomes many million times a second. So the occasional
552 * reuse of the same address is tolerable compare to using an atomic variable. This is
553 * safe on a system that has atomic load/store for 32bit integers. Given that this is an
554 * Intel iEP device, that should not be a problem.
556 static inline void __iomem *idxd_wq_portal_addr(struct idxd_wq *wq)
558 int ofs = wq->portal_offset;
560 wq->portal_offset = (ofs + sizeof(struct dsa_raw_desc)) & IDXD_PORTAL_MASK;
561 return wq->portal + ofs;
564 static inline void idxd_wq_get(struct idxd_wq *wq)
569 static inline void idxd_wq_put(struct idxd_wq *wq)
574 static inline int idxd_wq_refcount(struct idxd_wq *wq)
576 return wq->client_count;
580 * Intel IAA does not support batch processing.
581 * The max batch size of device, max batch size of wq and
582 * max batch shift of wqcfg should be always 0 on IAA.
584 static inline void idxd_set_max_batch_size(int idxd_type, struct idxd_device *idxd,
587 if (idxd_type == IDXD_TYPE_IAX)
588 idxd->max_batch_size = 0;
590 idxd->max_batch_size = max_batch_size;
593 static inline void idxd_wq_set_max_batch_size(int idxd_type, struct idxd_wq *wq,
596 if (idxd_type == IDXD_TYPE_IAX)
597 wq->max_batch_size = 0;
599 wq->max_batch_size = max_batch_size;
602 static inline void idxd_wqcfg_set_max_batch_shift(int idxd_type, union wqcfg *wqcfg,
605 if (idxd_type == IDXD_TYPE_IAX)
606 wqcfg->max_batch_shift = 0;
608 wqcfg->max_batch_shift = max_batch_shift;
611 int __must_check __idxd_driver_register(struct idxd_device_driver *idxd_drv,
612 struct module *module, const char *mod_name);
613 #define idxd_driver_register(driver) \
614 __idxd_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
616 void idxd_driver_unregister(struct idxd_device_driver *idxd_drv);
618 #define module_idxd_driver(__idxd_driver) \
619 module_driver(__idxd_driver, idxd_driver_register, idxd_driver_unregister)
621 int idxd_register_bus_type(void);
622 void idxd_unregister_bus_type(void);
623 int idxd_register_devices(struct idxd_device *idxd);
624 void idxd_unregister_devices(struct idxd_device *idxd);
625 int idxd_register_driver(void);
626 void idxd_unregister_driver(void);
627 void idxd_wqs_quiesce(struct idxd_device *idxd);
628 bool idxd_queue_int_handle_resubmit(struct idxd_desc *desc);
629 void multi_u64_to_bmap(unsigned long *bmap, u64 *val, int count);
631 /* device interrupt control */
632 irqreturn_t idxd_misc_thread(int vec, void *data);
633 irqreturn_t idxd_wq_thread(int irq, void *data);
634 void idxd_mask_error_interrupts(struct idxd_device *idxd);
635 void idxd_unmask_error_interrupts(struct idxd_device *idxd);
638 int idxd_register_idxd_drv(void);
639 void idxd_unregister_idxd_drv(void);
640 int idxd_device_drv_probe(struct idxd_dev *idxd_dev);
641 void idxd_device_drv_remove(struct idxd_dev *idxd_dev);
642 int drv_enable_wq(struct idxd_wq *wq);
643 void drv_disable_wq(struct idxd_wq *wq);
644 int idxd_device_init_reset(struct idxd_device *idxd);
645 int idxd_device_enable(struct idxd_device *idxd);
646 int idxd_device_disable(struct idxd_device *idxd);
647 void idxd_device_reset(struct idxd_device *idxd);
648 void idxd_device_clear_state(struct idxd_device *idxd);
649 int idxd_device_config(struct idxd_device *idxd);
650 void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid);
651 int idxd_device_load_config(struct idxd_device *idxd);
652 int idxd_device_request_int_handle(struct idxd_device *idxd, int idx, int *handle,
653 enum idxd_interrupt_type irq_type);
654 int idxd_device_release_int_handle(struct idxd_device *idxd, int handle,
655 enum idxd_interrupt_type irq_type);
657 /* work queue control */
658 void idxd_wqs_unmap_portal(struct idxd_device *idxd);
659 int idxd_wq_alloc_resources(struct idxd_wq *wq);
660 void idxd_wq_free_resources(struct idxd_wq *wq);
661 int idxd_wq_enable(struct idxd_wq *wq);
662 int idxd_wq_disable(struct idxd_wq *wq, bool reset_config);
663 void idxd_wq_drain(struct idxd_wq *wq);
664 void idxd_wq_reset(struct idxd_wq *wq);
665 int idxd_wq_map_portal(struct idxd_wq *wq);
666 void idxd_wq_unmap_portal(struct idxd_wq *wq);
667 int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid);
668 int idxd_wq_disable_pasid(struct idxd_wq *wq);
669 void __idxd_wq_quiesce(struct idxd_wq *wq);
670 void idxd_wq_quiesce(struct idxd_wq *wq);
671 int idxd_wq_init_percpu_ref(struct idxd_wq *wq);
672 void idxd_wq_free_irq(struct idxd_wq *wq);
673 int idxd_wq_request_irq(struct idxd_wq *wq);
676 int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc);
677 struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype);
678 void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc);
679 int idxd_enqcmds(struct idxd_wq *wq, void __iomem *portal, const void *desc);
682 int idxd_register_dma_device(struct idxd_device *idxd);
683 void idxd_unregister_dma_device(struct idxd_device *idxd);
684 void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res);
685 void idxd_dma_complete_txd(struct idxd_desc *desc,
686 enum idxd_complete_type comp_type, bool free_desc);
689 int idxd_cdev_register(void);
690 void idxd_cdev_remove(void);
691 int idxd_cdev_get_major(struct idxd_device *idxd);
692 int idxd_wq_add_cdev(struct idxd_wq *wq);
693 void idxd_wq_del_cdev(struct idxd_wq *wq);
696 #if IS_ENABLED(CONFIG_INTEL_IDXD_PERFMON)
697 int perfmon_pmu_init(struct idxd_device *idxd);
698 void perfmon_pmu_remove(struct idxd_device *idxd);
699 void perfmon_counter_overflow(struct idxd_device *idxd);
700 void perfmon_init(void);
701 void perfmon_exit(void);
703 static inline int perfmon_pmu_init(struct idxd_device *idxd) { return 0; }
704 static inline void perfmon_pmu_remove(struct idxd_device *idxd) {}
705 static inline void perfmon_counter_overflow(struct idxd_device *idxd) {}
706 static inline void perfmon_init(void) {}
707 static inline void perfmon_exit(void) {}
711 int idxd_device_init_debugfs(struct idxd_device *idxd);
712 void idxd_device_remove_debugfs(struct idxd_device *idxd);
713 int idxd_init_debugfs(void);
714 void idxd_remove_debugfs(void);