1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/device.h>
8 #include <linux/sched/task.h>
9 #include <linux/io-64-nonatomic-lo-hi.h>
10 #include <linux/cdev.h>
12 #include <linux/poll.h>
13 #include <linux/iommu.h>
14 #include <uapi/linux/idxd.h>
15 #include "registers.h"
18 struct idxd_cdev_context {
25 * ictx is an array based off of accelerator types. enum idxd_type
28 static struct idxd_cdev_context ictx[IDXD_TYPE_MAX] = {
33 struct idxd_user_context {
35 struct task_struct *task;
38 struct iommu_sva *sva;
41 static void idxd_cdev_dev_release(struct device *dev)
43 struct idxd_cdev *idxd_cdev = dev_to_cdev(dev);
44 struct idxd_cdev_context *cdev_ctx;
45 struct idxd_wq *wq = idxd_cdev->wq;
47 cdev_ctx = &ictx[wq->idxd->data->type];
48 ida_simple_remove(&cdev_ctx->minor_ida, idxd_cdev->minor);
52 static struct device_type idxd_cdev_device_type = {
54 .release = idxd_cdev_dev_release,
57 static inline struct idxd_cdev *inode_idxd_cdev(struct inode *inode)
59 struct cdev *cdev = inode->i_cdev;
61 return container_of(cdev, struct idxd_cdev, cdev);
64 static inline struct idxd_wq *inode_wq(struct inode *inode)
66 struct idxd_cdev *idxd_cdev = inode_idxd_cdev(inode);
71 static int idxd_cdev_open(struct inode *inode, struct file *filp)
73 struct idxd_user_context *ctx;
74 struct idxd_device *idxd;
78 struct iommu_sva *sva;
83 dev = &idxd->pdev->dev;
85 dev_dbg(dev, "%s called: %d\n", __func__, idxd_wq_refcount(wq));
87 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
91 mutex_lock(&wq->wq_lock);
93 if (idxd_wq_refcount(wq) > 0 && wq_dedicated(wq)) {
99 filp->private_data = ctx;
101 if (device_user_pasid_enabled(idxd)) {
102 sva = iommu_sva_bind_device(dev, current->mm);
105 dev_err(dev, "pasid allocation failed: %d\n", rc);
109 pasid = iommu_sva_get_pasid(sva);
110 if (pasid == IOMMU_PASID_INVALID) {
111 iommu_sva_unbind_device(sva);
119 if (wq_dedicated(wq)) {
120 rc = idxd_wq_set_pasid(wq, pasid);
122 iommu_sva_unbind_device(sva);
123 dev_err(dev, "wq set pasid failed: %d\n", rc);
130 mutex_unlock(&wq->wq_lock);
134 mutex_unlock(&wq->wq_lock);
139 static int idxd_cdev_release(struct inode *node, struct file *filep)
141 struct idxd_user_context *ctx = filep->private_data;
142 struct idxd_wq *wq = ctx->wq;
143 struct idxd_device *idxd = wq->idxd;
144 struct device *dev = &idxd->pdev->dev;
147 dev_dbg(dev, "%s called\n", __func__);
148 filep->private_data = NULL;
150 /* Wait for in-flight operations to complete. */
152 idxd_device_drain_pasid(idxd, ctx->pasid);
154 if (device_user_pasid_enabled(idxd)) {
155 /* The wq disable in the disable pasid function will drain the wq */
156 rc = idxd_wq_disable_pasid(wq);
158 dev_err(dev, "wq disable pasid failed.\n");
165 iommu_sva_unbind_device(ctx->sva);
167 mutex_lock(&wq->wq_lock);
169 mutex_unlock(&wq->wq_lock);
173 static int check_vma(struct idxd_wq *wq, struct vm_area_struct *vma,
176 struct device *dev = &wq->idxd->pdev->dev;
178 if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
179 dev_info_ratelimited(dev,
180 "%s: %s: mapping too large: %lu\n",
182 vma->vm_end - vma->vm_start);
189 static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
191 struct idxd_user_context *ctx = filp->private_data;
192 struct idxd_wq *wq = ctx->wq;
193 struct idxd_device *idxd = wq->idxd;
194 struct pci_dev *pdev = idxd->pdev;
195 phys_addr_t base = pci_resource_start(pdev, IDXD_WQ_BAR);
199 dev_dbg(&pdev->dev, "%s called\n", __func__);
200 rc = check_vma(wq, vma, __func__);
204 vma->vm_flags |= VM_DONTCOPY;
205 pfn = (base + idxd_get_wq_portal_full_offset(wq->id,
206 IDXD_PORTAL_LIMITED)) >> PAGE_SHIFT;
207 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
208 vma->vm_private_data = ctx;
210 return io_remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE,
214 static __poll_t idxd_cdev_poll(struct file *filp,
215 struct poll_table_struct *wait)
217 struct idxd_user_context *ctx = filp->private_data;
218 struct idxd_wq *wq = ctx->wq;
219 struct idxd_device *idxd = wq->idxd;
222 poll_wait(filp, &wq->err_queue, wait);
223 spin_lock(&idxd->dev_lock);
224 if (idxd->sw_err.valid)
225 out = EPOLLIN | EPOLLRDNORM;
226 spin_unlock(&idxd->dev_lock);
231 static const struct file_operations idxd_cdev_fops = {
232 .owner = THIS_MODULE,
233 .open = idxd_cdev_open,
234 .release = idxd_cdev_release,
235 .mmap = idxd_cdev_mmap,
236 .poll = idxd_cdev_poll,
239 int idxd_cdev_get_major(struct idxd_device *idxd)
241 return MAJOR(ictx[idxd->data->type].devt);
244 int idxd_wq_add_cdev(struct idxd_wq *wq)
246 struct idxd_device *idxd = wq->idxd;
247 struct idxd_cdev *idxd_cdev;
250 struct idxd_cdev_context *cdev_ctx;
253 idxd_cdev = kzalloc(sizeof(*idxd_cdev), GFP_KERNEL);
257 idxd_cdev->idxd_dev.type = IDXD_DEV_CDEV;
259 cdev = &idxd_cdev->cdev;
260 dev = cdev_dev(idxd_cdev);
261 cdev_ctx = &ictx[wq->idxd->data->type];
262 minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL);
267 idxd_cdev->minor = minor;
269 device_initialize(dev);
270 dev->parent = wq_confdev(wq);
271 dev->bus = &dsa_bus_type;
272 dev->type = &idxd_cdev_device_type;
273 dev->devt = MKDEV(MAJOR(cdev_ctx->devt), minor);
275 rc = dev_set_name(dev, "%s/wq%u.%u", idxd->data->name_prefix, idxd->id, wq->id);
279 wq->idxd_cdev = idxd_cdev;
280 cdev_init(cdev, &idxd_cdev_fops);
281 rc = cdev_device_add(cdev, dev);
283 dev_dbg(&wq->idxd->pdev->dev, "cdev_add failed: %d\n", rc);
291 wq->idxd_cdev = NULL;
295 void idxd_wq_del_cdev(struct idxd_wq *wq)
297 struct idxd_cdev *idxd_cdev;
299 idxd_cdev = wq->idxd_cdev;
300 wq->idxd_cdev = NULL;
301 cdev_device_del(&idxd_cdev->cdev, cdev_dev(idxd_cdev));
302 put_device(cdev_dev(idxd_cdev));
305 static int idxd_user_drv_probe(struct idxd_dev *idxd_dev)
307 struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
308 struct idxd_device *idxd = wq->idxd;
311 if (idxd->state != IDXD_DEV_ENABLED)
315 * User type WQ is enabled only when SVA is enabled for two reasons:
316 * - If no IOMMU or IOMMU Passthrough without SVA, userspace
317 * can directly access physical address through the WQ.
318 * - The IDXD cdev driver does not provide any ways to pin
319 * user pages and translate the address from user VA to IOVA or
320 * PA without IOMMU SVA. Therefore the application has no way
321 * to instruct the device to perform DMA function. This makes
322 * the cdev not usable for normal application usage.
324 if (!device_user_pasid_enabled(idxd)) {
325 idxd->cmd_status = IDXD_SCMD_WQ_USER_NO_IOMMU;
326 dev_dbg(&idxd->pdev->dev,
327 "User type WQ cannot be enabled without SVA.\n");
332 mutex_lock(&wq->wq_lock);
333 wq->type = IDXD_WQT_USER;
334 rc = drv_enable_wq(wq);
338 rc = idxd_wq_add_cdev(wq);
340 idxd->cmd_status = IDXD_SCMD_CDEV_ERR;
344 idxd->cmd_status = 0;
345 mutex_unlock(&wq->wq_lock);
351 wq->type = IDXD_WQT_NONE;
352 mutex_unlock(&wq->wq_lock);
356 static void idxd_user_drv_remove(struct idxd_dev *idxd_dev)
358 struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
360 mutex_lock(&wq->wq_lock);
361 idxd_wq_del_cdev(wq);
363 wq->type = IDXD_WQT_NONE;
364 mutex_unlock(&wq->wq_lock);
367 static enum idxd_dev_type dev_types[] = {
372 struct idxd_device_driver idxd_user_drv = {
373 .probe = idxd_user_drv_probe,
374 .remove = idxd_user_drv_remove,
378 EXPORT_SYMBOL_GPL(idxd_user_drv);
380 int idxd_cdev_register(void)
384 for (i = 0; i < IDXD_TYPE_MAX; i++) {
385 ida_init(&ictx[i].minor_ida);
386 rc = alloc_chrdev_region(&ictx[i].devt, 0, MINORMASK,
389 goto err_free_chrdev_region;
394 err_free_chrdev_region:
395 for (i--; i >= 0; i--)
396 unregister_chrdev_region(ictx[i].devt, MINORMASK);
401 void idxd_cdev_remove(void)
405 for (i = 0; i < IDXD_TYPE_MAX; i++) {
406 unregister_chrdev_region(ictx[i].devt, MINORMASK);
407 ida_destroy(&ictx[i].minor_ida);