Merge branch 'mlx5-next' of git://git.kernel.org/pub/scm/linux/kernel/git/mellanox...
[linux-block.git] / drivers / vhost / vdpa.c
CommitLineData
4c8cf318
TB
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018-2020 Intel Corporation.
4 * Copyright (C) 2020 Red Hat, Inc.
5 *
6 * Author: Tiwei Bie <tiwei.bie@intel.com>
7 * Jason Wang <jasowang@redhat.com>
8 *
9 * Thanks Michael S. Tsirkin for the valuable comments and
10 * suggestions. And thanks to Cunming Liang and Zhihong Wang for all
11 * their supports.
12 */
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/cdev.h>
17#include <linux/device.h>
ddd89d0a 18#include <linux/mm.h>
4c8cf318
TB
19#include <linux/iommu.h>
20#include <linux/uuid.h>
21#include <linux/vdpa.h>
22#include <linux/nospec.h>
23#include <linux/vhost.h>
24#include <linux/virtio_net.h>
776f3950 25#include <linux/kernel.h>
4c8cf318
TB
26
27#include "vhost.h"
28
4c8cf318
TB
29/* Currently, only network backend w/o multiqueue is supported. */
30#define VHOST_VDPA_VQ_MAX 2
31
32#define VHOST_VDPA_DEV_MAX (1U << MINORBITS)
33
34struct vhost_vdpa {
35 struct vhost_dev vdev;
36 struct iommu_domain *domain;
37 struct vhost_virtqueue *vqs;
38 struct completion completion;
39 struct vdpa_device *vdpa;
40 struct device dev;
41 struct cdev cdev;
42 atomic_t opened;
43 int nvqs;
44 int virtio_id;
45 int minor;
776f3950 46 struct eventfd_ctx *config_ctx;
4c8cf318
TB
47};
48
49static DEFINE_IDA(vhost_vdpa_ida);
50
51static dev_t vhost_vdpa_major;
52
4c8cf318
TB
53static void handle_vq_kick(struct vhost_work *work)
54{
55 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
56 poll.work);
57 struct vhost_vdpa *v = container_of(vq->dev, struct vhost_vdpa, vdev);
58 const struct vdpa_config_ops *ops = v->vdpa->config;
59
60 ops->kick_vq(v->vdpa, vq - v->vqs);
61}
62
63static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
64{
65 struct vhost_virtqueue *vq = private;
265a0ad8 66 struct eventfd_ctx *call_ctx = vq->call_ctx.ctx;
4c8cf318
TB
67
68 if (call_ctx)
69 eventfd_signal(call_ctx, 1);
70
71 return IRQ_HANDLED;
72}
73
776f3950
ZL
74static irqreturn_t vhost_vdpa_config_cb(void *private)
75{
76 struct vhost_vdpa *v = private;
77 struct eventfd_ctx *config_ctx = v->config_ctx;
78
79 if (config_ctx)
80 eventfd_signal(config_ctx, 1);
81
82 return IRQ_HANDLED;
83}
84
2cf1ba9a
ZL
85static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
86{
87 struct vhost_virtqueue *vq = &v->vqs[qid];
88 const struct vdpa_config_ops *ops = v->vdpa->config;
89 struct vdpa_device *vdpa = v->vdpa;
90 int ret, irq;
91
92 if (!ops->get_vq_irq)
93 return;
94
95 irq = ops->get_vq_irq(vdpa, qid);
96 spin_lock(&vq->call_ctx.ctx_lock);
97 irq_bypass_unregister_producer(&vq->call_ctx.producer);
98 if (!vq->call_ctx.ctx || irq < 0) {
99 spin_unlock(&vq->call_ctx.ctx_lock);
100 return;
101 }
102
103 vq->call_ctx.producer.token = vq->call_ctx.ctx;
104 vq->call_ctx.producer.irq = irq;
105 ret = irq_bypass_register_producer(&vq->call_ctx.producer);
106 spin_unlock(&vq->call_ctx.ctx_lock);
107}
108
109static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
110{
111 struct vhost_virtqueue *vq = &v->vqs[qid];
112
113 spin_lock(&vq->call_ctx.ctx_lock);
114 irq_bypass_unregister_producer(&vq->call_ctx.producer);
115 spin_unlock(&vq->call_ctx.ctx_lock);
116}
117
4c8cf318
TB
118static void vhost_vdpa_reset(struct vhost_vdpa *v)
119{
120 struct vdpa_device *vdpa = v->vdpa;
4c8cf318 121
0d234007 122 vdpa_reset(vdpa);
4c8cf318
TB
123}
124
125static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp)
126{
127 struct vdpa_device *vdpa = v->vdpa;
128 const struct vdpa_config_ops *ops = vdpa->config;
129 u32 device_id;
130
131 device_id = ops->get_device_id(vdpa);
132
133 if (copy_to_user(argp, &device_id, sizeof(device_id)))
134 return -EFAULT;
135
136 return 0;
137}
138
139static long vhost_vdpa_get_status(struct vhost_vdpa *v, u8 __user *statusp)
140{
141 struct vdpa_device *vdpa = v->vdpa;
142 const struct vdpa_config_ops *ops = vdpa->config;
143 u8 status;
144
145 status = ops->get_status(vdpa);
146
147 if (copy_to_user(statusp, &status, sizeof(status)))
148 return -EFAULT;
149
150 return 0;
151}
152
153static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
154{
155 struct vdpa_device *vdpa = v->vdpa;
156 const struct vdpa_config_ops *ops = vdpa->config;
2cf1ba9a
ZL
157 u8 status, status_old;
158 int nvqs = v->nvqs;
159 u16 i;
4c8cf318
TB
160
161 if (copy_from_user(&status, statusp, sizeof(status)))
162 return -EFAULT;
163
2cf1ba9a
ZL
164 status_old = ops->get_status(vdpa);
165
4c8cf318
TB
166 /*
167 * Userspace shouldn't remove status bits unless reset the
168 * status to 0.
169 */
170 if (status != 0 && (ops->get_status(vdpa) & ~status) != 0)
171 return -EINVAL;
172
173 ops->set_status(vdpa, status);
174
2cf1ba9a
ZL
175 if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && !(status_old & VIRTIO_CONFIG_S_DRIVER_OK))
176 for (i = 0; i < nvqs; i++)
177 vhost_vdpa_setup_vq_irq(v, i);
178
179 if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) && !(status & VIRTIO_CONFIG_S_DRIVER_OK))
180 for (i = 0; i < nvqs; i++)
181 vhost_vdpa_unsetup_vq_irq(v, i);
182
4c8cf318
TB
183 return 0;
184}
185
186static int vhost_vdpa_config_validate(struct vhost_vdpa *v,
187 struct vhost_vdpa_config *c)
188{
189 long size = 0;
190
191 switch (v->virtio_id) {
192 case VIRTIO_ID_NET:
193 size = sizeof(struct virtio_net_config);
194 break;
195 }
196
197 if (c->len == 0)
198 return -EINVAL;
199
200 if (c->len > size - c->off)
201 return -E2BIG;
202
203 return 0;
204}
205
206static long vhost_vdpa_get_config(struct vhost_vdpa *v,
207 struct vhost_vdpa_config __user *c)
208{
209 struct vdpa_device *vdpa = v->vdpa;
4c8cf318
TB
210 struct vhost_vdpa_config config;
211 unsigned long size = offsetof(struct vhost_vdpa_config, buf);
212 u8 *buf;
213
214 if (copy_from_user(&config, c, size))
215 return -EFAULT;
216 if (vhost_vdpa_config_validate(v, &config))
217 return -EINVAL;
218 buf = kvzalloc(config.len, GFP_KERNEL);
219 if (!buf)
220 return -ENOMEM;
221
0d234007 222 vdpa_get_config(vdpa, config.off, buf, config.len);
4c8cf318
TB
223
224 if (copy_to_user(c->buf, buf, config.len)) {
225 kvfree(buf);
226 return -EFAULT;
227 }
228
229 kvfree(buf);
230 return 0;
231}
232
233static long vhost_vdpa_set_config(struct vhost_vdpa *v,
234 struct vhost_vdpa_config __user *c)
235{
236 struct vdpa_device *vdpa = v->vdpa;
237 const struct vdpa_config_ops *ops = vdpa->config;
238 struct vhost_vdpa_config config;
239 unsigned long size = offsetof(struct vhost_vdpa_config, buf);
240 u8 *buf;
241
242 if (copy_from_user(&config, c, size))
243 return -EFAULT;
244 if (vhost_vdpa_config_validate(v, &config))
245 return -EINVAL;
246 buf = kvzalloc(config.len, GFP_KERNEL);
247 if (!buf)
248 return -ENOMEM;
249
250 if (copy_from_user(buf, c->buf, config.len)) {
251 kvfree(buf);
252 return -EFAULT;
253 }
254
255 ops->set_config(vdpa, config.off, buf, config.len);
256
257 kvfree(buf);
258 return 0;
259}
260
261static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
262{
263 struct vdpa_device *vdpa = v->vdpa;
264 const struct vdpa_config_ops *ops = vdpa->config;
265 u64 features;
266
267 features = ops->get_features(vdpa);
4c8cf318
TB
268
269 if (copy_to_user(featurep, &features, sizeof(features)))
270 return -EFAULT;
271
272 return 0;
273}
274
275static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
276{
277 struct vdpa_device *vdpa = v->vdpa;
278 const struct vdpa_config_ops *ops = vdpa->config;
279 u64 features;
280
281 /*
282 * It's not allowed to change the features after they have
283 * been negotiated.
284 */
285 if (ops->get_status(vdpa) & VIRTIO_CONFIG_S_FEATURES_OK)
286 return -EBUSY;
287
288 if (copy_from_user(&features, featurep, sizeof(features)))
289 return -EFAULT;
290
0d234007 291 if (vdpa_set_features(vdpa, features))
4c8cf318
TB
292 return -EINVAL;
293
294 return 0;
295}
296
297static long vhost_vdpa_get_vring_num(struct vhost_vdpa *v, u16 __user *argp)
298{
299 struct vdpa_device *vdpa = v->vdpa;
300 const struct vdpa_config_ops *ops = vdpa->config;
301 u16 num;
302
303 num = ops->get_vq_num_max(vdpa);
304
305 if (copy_to_user(argp, &num, sizeof(num)))
306 return -EFAULT;
307
308 return 0;
309}
310
776f3950
ZL
311static void vhost_vdpa_config_put(struct vhost_vdpa *v)
312{
313 if (v->config_ctx)
314 eventfd_ctx_put(v->config_ctx);
315}
316
317static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
318{
319 struct vdpa_callback cb;
320 int fd;
321 struct eventfd_ctx *ctx;
322
323 cb.callback = vhost_vdpa_config_cb;
324 cb.private = v->vdpa;
325 if (copy_from_user(&fd, argp, sizeof(fd)))
326 return -EFAULT;
327
328 ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd);
329 swap(ctx, v->config_ctx);
330
331 if (!IS_ERR_OR_NULL(ctx))
332 eventfd_ctx_put(ctx);
333
334 if (IS_ERR(v->config_ctx))
335 return PTR_ERR(v->config_ctx);
336
337 v->vdpa->config->set_config_cb(v->vdpa, &cb);
338
339 return 0;
340}
2cf1ba9a 341
4c8cf318
TB
342static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
343 void __user *argp)
344{
345 struct vdpa_device *vdpa = v->vdpa;
346 const struct vdpa_config_ops *ops = vdpa->config;
347 struct vdpa_callback cb;
348 struct vhost_virtqueue *vq;
349 struct vhost_vring_state s;
4c8cf318
TB
350 u32 idx;
351 long r;
352
353 r = get_user(idx, (u32 __user *)argp);
354 if (r < 0)
355 return r;
356
357 if (idx >= v->nvqs)
358 return -ENOBUFS;
359
360 idx = array_index_nospec(idx, v->nvqs);
361 vq = &v->vqs[idx];
362
4c8cf318
TB
363 if (cmd == VHOST_VDPA_SET_VRING_ENABLE) {
364 if (copy_from_user(&s, argp, sizeof(s)))
365 return -EFAULT;
366 ops->set_vq_ready(vdpa, idx, s.num);
367 return 0;
368 }
369
370 if (cmd == VHOST_GET_VRING_BASE)
371 vq->last_avail_idx = ops->get_vq_state(v->vdpa, idx);
372
373 r = vhost_vring_ioctl(&v->vdev, cmd, argp);
374 if (r)
375 return r;
376
377 switch (cmd) {
378 case VHOST_SET_VRING_ADDR:
379 if (ops->set_vq_address(vdpa, idx,
380 (u64)(uintptr_t)vq->desc,
381 (u64)(uintptr_t)vq->avail,
382 (u64)(uintptr_t)vq->used))
383 r = -EINVAL;
384 break;
385
386 case VHOST_SET_VRING_BASE:
387 if (ops->set_vq_state(vdpa, idx, vq->last_avail_idx))
388 r = -EINVAL;
389 break;
390
391 case VHOST_SET_VRING_CALL:
265a0ad8 392 if (vq->call_ctx.ctx) {
4c8cf318
TB
393 cb.callback = vhost_vdpa_virtqueue_cb;
394 cb.private = vq;
395 } else {
396 cb.callback = NULL;
397 cb.private = NULL;
398 }
399 ops->set_vq_cb(vdpa, idx, &cb);
2cf1ba9a 400 vhost_vdpa_setup_vq_irq(v, idx);
4c8cf318
TB
401 break;
402
403 case VHOST_SET_VRING_NUM:
404 ops->set_vq_num(vdpa, idx, vq->num);
405 break;
406 }
407
408 return r;
409}
410
411static long vhost_vdpa_unlocked_ioctl(struct file *filep,
412 unsigned int cmd, unsigned long arg)
413{
414 struct vhost_vdpa *v = filep->private_data;
415 struct vhost_dev *d = &v->vdev;
416 void __user *argp = (void __user *)arg;
417 long r;
418
419 mutex_lock(&d->mutex);
420
421 switch (cmd) {
422 case VHOST_VDPA_GET_DEVICE_ID:
423 r = vhost_vdpa_get_device_id(v, argp);
424 break;
425 case VHOST_VDPA_GET_STATUS:
426 r = vhost_vdpa_get_status(v, argp);
427 break;
428 case VHOST_VDPA_SET_STATUS:
429 r = vhost_vdpa_set_status(v, argp);
430 break;
431 case VHOST_VDPA_GET_CONFIG:
432 r = vhost_vdpa_get_config(v, argp);
433 break;
434 case VHOST_VDPA_SET_CONFIG:
435 r = vhost_vdpa_set_config(v, argp);
436 break;
437 case VHOST_GET_FEATURES:
438 r = vhost_vdpa_get_features(v, argp);
439 break;
440 case VHOST_SET_FEATURES:
441 r = vhost_vdpa_set_features(v, argp);
442 break;
443 case VHOST_VDPA_GET_VRING_NUM:
444 r = vhost_vdpa_get_vring_num(v, argp);
445 break;
446 case VHOST_SET_LOG_BASE:
447 case VHOST_SET_LOG_FD:
448 r = -ENOIOCTLCMD;
449 break;
776f3950
ZL
450 case VHOST_VDPA_SET_CONFIG_CALL:
451 r = vhost_vdpa_set_config_call(v, argp);
452 break;
4c8cf318
TB
453 default:
454 r = vhost_dev_ioctl(&v->vdev, cmd, argp);
455 if (r == -ENOIOCTLCMD)
456 r = vhost_vdpa_vring_ioctl(v, cmd, argp);
457 break;
458 }
459
460 mutex_unlock(&d->mutex);
461 return r;
462}
463
464static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
465{
466 struct vhost_dev *dev = &v->vdev;
467 struct vhost_iotlb *iotlb = dev->iotlb;
468 struct vhost_iotlb_map *map;
469 struct page *page;
470 unsigned long pfn, pinned;
471
472 while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
473 pinned = map->size >> PAGE_SHIFT;
474 for (pfn = map->addr >> PAGE_SHIFT;
475 pinned > 0; pfn++, pinned--) {
476 page = pfn_to_page(pfn);
477 if (map->perm & VHOST_ACCESS_WO)
478 set_page_dirty_lock(page);
479 unpin_user_page(page);
480 }
481 atomic64_sub(map->size >> PAGE_SHIFT, &dev->mm->pinned_vm);
482 vhost_iotlb_map_free(iotlb, map);
483 }
484}
485
486static void vhost_vdpa_iotlb_free(struct vhost_vdpa *v)
487{
488 struct vhost_dev *dev = &v->vdev;
489
490 vhost_vdpa_iotlb_unmap(v, 0ULL, 0ULL - 1);
491 kfree(dev->iotlb);
492 dev->iotlb = NULL;
493}
494
495static int perm_to_iommu_flags(u32 perm)
496{
497 int flags = 0;
498
499 switch (perm) {
500 case VHOST_ACCESS_WO:
501 flags |= IOMMU_WRITE;
502 break;
503 case VHOST_ACCESS_RO:
504 flags |= IOMMU_READ;
505 break;
506 case VHOST_ACCESS_RW:
507 flags |= (IOMMU_WRITE | IOMMU_READ);
508 break;
509 default:
510 WARN(1, "invalidate vhost IOTLB permission\n");
511 break;
512 }
513
514 return flags | IOMMU_CACHE;
515}
516
517static int vhost_vdpa_map(struct vhost_vdpa *v,
518 u64 iova, u64 size, u64 pa, u32 perm)
519{
520 struct vhost_dev *dev = &v->vdev;
521 struct vdpa_device *vdpa = v->vdpa;
522 const struct vdpa_config_ops *ops = vdpa->config;
523 int r = 0;
524
525 r = vhost_iotlb_add_range(dev->iotlb, iova, iova + size - 1,
526 pa, perm);
527 if (r)
528 return r;
529
530 if (ops->dma_map)
531 r = ops->dma_map(vdpa, iova, size, pa, perm);
532 else if (ops->set_map)
533 r = ops->set_map(vdpa, dev->iotlb);
534 else
535 r = iommu_map(v->domain, iova, pa, size,
536 perm_to_iommu_flags(perm));
537
538 return r;
539}
540
541static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size)
542{
543 struct vhost_dev *dev = &v->vdev;
544 struct vdpa_device *vdpa = v->vdpa;
545 const struct vdpa_config_ops *ops = vdpa->config;
546
547 vhost_vdpa_iotlb_unmap(v, iova, iova + size - 1);
548
549 if (ops->dma_map)
550 ops->dma_unmap(vdpa, iova, size);
551 else if (ops->set_map)
552 ops->set_map(vdpa, dev->iotlb);
553 else
554 iommu_unmap(v->domain, iova, size);
555}
556
557static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
558 struct vhost_iotlb_msg *msg)
559{
560 struct vhost_dev *dev = &v->vdev;
561 struct vhost_iotlb *iotlb = dev->iotlb;
562 struct page **page_list;
563 unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
564 unsigned int gup_flags = FOLL_LONGTERM;
565 unsigned long npages, cur_base, map_pfn, last_pfn = 0;
566 unsigned long locked, lock_limit, pinned, i;
567 u64 iova = msg->iova;
568 int ret = 0;
569
570 if (vhost_iotlb_itree_first(iotlb, msg->iova,
571 msg->iova + msg->size - 1))
572 return -EEXIST;
573
574 page_list = (struct page **) __get_free_page(GFP_KERNEL);
575 if (!page_list)
576 return -ENOMEM;
577
578 if (msg->perm & VHOST_ACCESS_WO)
579 gup_flags |= FOLL_WRITE;
580
581 npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >> PAGE_SHIFT;
582 if (!npages)
583 return -EINVAL;
584
d8ed45c5 585 mmap_read_lock(dev->mm);
4c8cf318
TB
586
587 locked = atomic64_add_return(npages, &dev->mm->pinned_vm);
588 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
589
590 if (locked > lock_limit) {
591 ret = -ENOMEM;
592 goto out;
593 }
594
595 cur_base = msg->uaddr & PAGE_MASK;
596 iova &= PAGE_MASK;
597
598 while (npages) {
599 pinned = min_t(unsigned long, npages, list_size);
600 ret = pin_user_pages(cur_base, pinned,
601 gup_flags, page_list, NULL);
602 if (ret != pinned)
603 goto out;
604
605 if (!last_pfn)
606 map_pfn = page_to_pfn(page_list[0]);
607
608 for (i = 0; i < ret; i++) {
609 unsigned long this_pfn = page_to_pfn(page_list[i]);
610 u64 csize;
611
612 if (last_pfn && (this_pfn != last_pfn + 1)) {
613 /* Pin a contiguous chunk of memory */
614 csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
615 if (vhost_vdpa_map(v, iova, csize,
616 map_pfn << PAGE_SHIFT,
617 msg->perm))
618 goto out;
619 map_pfn = this_pfn;
620 iova += csize;
621 }
622
623 last_pfn = this_pfn;
624 }
625
626 cur_base += ret << PAGE_SHIFT;
627 npages -= ret;
628 }
629
630 /* Pin the rest chunk */
631 ret = vhost_vdpa_map(v, iova, (last_pfn - map_pfn + 1) << PAGE_SHIFT,
632 map_pfn << PAGE_SHIFT, msg->perm);
633out:
634 if (ret) {
635 vhost_vdpa_unmap(v, msg->iova, msg->size);
636 atomic64_sub(npages, &dev->mm->pinned_vm);
637 }
d8ed45c5 638 mmap_read_unlock(dev->mm);
4c8cf318
TB
639 free_page((unsigned long)page_list);
640 return ret;
641}
642
643static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
644 struct vhost_iotlb_msg *msg)
645{
646 struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
647 int r = 0;
648
649 r = vhost_dev_check_owner(dev);
650 if (r)
651 return r;
652
653 switch (msg->type) {
654 case VHOST_IOTLB_UPDATE:
655 r = vhost_vdpa_process_iotlb_update(v, msg);
656 break;
657 case VHOST_IOTLB_INVALIDATE:
658 vhost_vdpa_unmap(v, msg->iova, msg->size);
659 break;
660 default:
661 r = -EINVAL;
662 break;
663 }
664
665 return r;
666}
667
668static ssize_t vhost_vdpa_chr_write_iter(struct kiocb *iocb,
669 struct iov_iter *from)
670{
671 struct file *file = iocb->ki_filp;
672 struct vhost_vdpa *v = file->private_data;
673 struct vhost_dev *dev = &v->vdev;
674
675 return vhost_chr_write_iter(dev, from);
676}
677
678static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
679{
680 struct vdpa_device *vdpa = v->vdpa;
681 const struct vdpa_config_ops *ops = vdpa->config;
682 struct device *dma_dev = vdpa_get_dma_dev(vdpa);
683 struct bus_type *bus;
684 int ret;
685
686 /* Device want to do DMA by itself */
687 if (ops->set_map || ops->dma_map)
688 return 0;
689
690 bus = dma_dev->bus;
691 if (!bus)
692 return -EFAULT;
693
694 if (!iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY))
695 return -ENOTSUPP;
696
697 v->domain = iommu_domain_alloc(bus);
698 if (!v->domain)
699 return -EIO;
700
701 ret = iommu_attach_device(v->domain, dma_dev);
702 if (ret)
703 goto err_attach;
704
705 return 0;
706
707err_attach:
708 iommu_domain_free(v->domain);
709 return ret;
710}
711
712static void vhost_vdpa_free_domain(struct vhost_vdpa *v)
713{
714 struct vdpa_device *vdpa = v->vdpa;
715 struct device *dma_dev = vdpa_get_dma_dev(vdpa);
716
717 if (v->domain) {
718 iommu_detach_device(v->domain, dma_dev);
719 iommu_domain_free(v->domain);
720 }
721
722 v->domain = NULL;
723}
724
725static int vhost_vdpa_open(struct inode *inode, struct file *filep)
726{
727 struct vhost_vdpa *v;
728 struct vhost_dev *dev;
729 struct vhost_virtqueue **vqs;
730 int nvqs, i, r, opened;
731
732 v = container_of(inode->i_cdev, struct vhost_vdpa, cdev);
4c8cf318
TB
733
734 opened = atomic_cmpxchg(&v->opened, 0, 1);
735 if (opened)
736 return -EBUSY;
737
738 nvqs = v->nvqs;
739 vhost_vdpa_reset(v);
740
741 vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
742 if (!vqs) {
743 r = -ENOMEM;
744 goto err;
745 }
746
747 dev = &v->vdev;
748 for (i = 0; i < nvqs; i++) {
749 vqs[i] = &v->vqs[i];
750 vqs[i]->handle_kick = handle_vq_kick;
751 }
01fcb1cb 752 vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false,
4c8cf318
TB
753 vhost_vdpa_process_iotlb_msg);
754
755 dev->iotlb = vhost_iotlb_alloc(0, 0);
756 if (!dev->iotlb) {
757 r = -ENOMEM;
758 goto err_init_iotlb;
759 }
760
761 r = vhost_vdpa_alloc_domain(v);
762 if (r)
763 goto err_init_iotlb;
764
765 filep->private_data = v;
766
767 return 0;
768
769err_init_iotlb:
770 vhost_dev_cleanup(&v->vdev);
771err:
772 atomic_dec(&v->opened);
773 return r;
774}
775
2cf1ba9a
ZL
776static void vhost_vdpa_clean_irq(struct vhost_vdpa *v)
777{
778 struct vhost_virtqueue *vq;
779 int i;
780
781 for (i = 0; i < v->nvqs; i++) {
782 vq = &v->vqs[i];
783 if (vq->call_ctx.producer.irq)
784 irq_bypass_unregister_producer(&vq->call_ctx.producer);
785 }
786}
787
4c8cf318
TB
788static int vhost_vdpa_release(struct inode *inode, struct file *filep)
789{
790 struct vhost_vdpa *v = filep->private_data;
791 struct vhost_dev *d = &v->vdev;
792
793 mutex_lock(&d->mutex);
794 filep->private_data = NULL;
795 vhost_vdpa_reset(v);
796 vhost_dev_stop(&v->vdev);
797 vhost_vdpa_iotlb_free(v);
798 vhost_vdpa_free_domain(v);
776f3950 799 vhost_vdpa_config_put(v);
2cf1ba9a 800 vhost_vdpa_clean_irq(v);
4c8cf318
TB
801 vhost_dev_cleanup(&v->vdev);
802 kfree(v->vdev.vqs);
803 mutex_unlock(&d->mutex);
804
805 atomic_dec(&v->opened);
806 complete(&v->completion);
807
808 return 0;
809}
810
4b4e4867 811#ifdef CONFIG_MMU
ddd89d0a
JW
812static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
813{
814 struct vhost_vdpa *v = vmf->vma->vm_file->private_data;
815 struct vdpa_device *vdpa = v->vdpa;
816 const struct vdpa_config_ops *ops = vdpa->config;
817 struct vdpa_notification_area notify;
818 struct vm_area_struct *vma = vmf->vma;
819 u16 index = vma->vm_pgoff;
820
821 notify = ops->get_vq_notification(vdpa, index);
822
823 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
824 if (remap_pfn_range(vma, vmf->address & PAGE_MASK,
825 notify.addr >> PAGE_SHIFT, PAGE_SIZE,
826 vma->vm_page_prot))
827 return VM_FAULT_SIGBUS;
828
829 return VM_FAULT_NOPAGE;
830}
831
832static const struct vm_operations_struct vhost_vdpa_vm_ops = {
833 .fault = vhost_vdpa_fault,
834};
835
836static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
837{
838 struct vhost_vdpa *v = vma->vm_file->private_data;
839 struct vdpa_device *vdpa = v->vdpa;
840 const struct vdpa_config_ops *ops = vdpa->config;
841 struct vdpa_notification_area notify;
c09cc2c3 842 unsigned long index = vma->vm_pgoff;
ddd89d0a
JW
843
844 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
845 return -EINVAL;
846 if ((vma->vm_flags & VM_SHARED) == 0)
847 return -EINVAL;
848 if (vma->vm_flags & VM_READ)
849 return -EINVAL;
850 if (index > 65535)
851 return -EINVAL;
852 if (!ops->get_vq_notification)
853 return -ENOTSUPP;
854
855 /* To be safe and easily modelled by userspace, We only
856 * support the doorbell which sits on the page boundary and
857 * does not share the page with other registers.
858 */
859 notify = ops->get_vq_notification(vdpa, index);
860 if (notify.addr & (PAGE_SIZE - 1))
861 return -EINVAL;
862 if (vma->vm_end - vma->vm_start != notify.size)
863 return -ENOTSUPP;
864
865 vma->vm_ops = &vhost_vdpa_vm_ops;
866 return 0;
867}
4b4e4867 868#endif /* CONFIG_MMU */
ddd89d0a 869
4c8cf318
TB
870static const struct file_operations vhost_vdpa_fops = {
871 .owner = THIS_MODULE,
872 .open = vhost_vdpa_open,
873 .release = vhost_vdpa_release,
874 .write_iter = vhost_vdpa_chr_write_iter,
875 .unlocked_ioctl = vhost_vdpa_unlocked_ioctl,
4b4e4867 876#ifdef CONFIG_MMU
ddd89d0a 877 .mmap = vhost_vdpa_mmap,
4b4e4867 878#endif /* CONFIG_MMU */
4c8cf318
TB
879 .compat_ioctl = compat_ptr_ioctl,
880};
881
882static void vhost_vdpa_release_dev(struct device *device)
883{
884 struct vhost_vdpa *v =
885 container_of(device, struct vhost_vdpa, dev);
886
887 ida_simple_remove(&vhost_vdpa_ida, v->minor);
888 kfree(v->vqs);
889 kfree(v);
890}
891
892static int vhost_vdpa_probe(struct vdpa_device *vdpa)
893{
894 const struct vdpa_config_ops *ops = vdpa->config;
895 struct vhost_vdpa *v;
896 int minor, nvqs = VHOST_VDPA_VQ_MAX;
897 int r;
898
899 /* Currently, we only accept the network devices. */
900 if (ops->get_device_id(vdpa) != VIRTIO_ID_NET)
901 return -ENOTSUPP;
902
903 v = kzalloc(sizeof(*v), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
904 if (!v)
905 return -ENOMEM;
906
907 minor = ida_simple_get(&vhost_vdpa_ida, 0,
908 VHOST_VDPA_DEV_MAX, GFP_KERNEL);
909 if (minor < 0) {
910 kfree(v);
911 return minor;
912 }
913
914 atomic_set(&v->opened, 0);
915 v->minor = minor;
916 v->vdpa = vdpa;
917 v->nvqs = nvqs;
918 v->virtio_id = ops->get_device_id(vdpa);
919
920 device_initialize(&v->dev);
921 v->dev.release = vhost_vdpa_release_dev;
922 v->dev.parent = &vdpa->dev;
923 v->dev.devt = MKDEV(MAJOR(vhost_vdpa_major), minor);
924 v->vqs = kmalloc_array(nvqs, sizeof(struct vhost_virtqueue),
925 GFP_KERNEL);
926 if (!v->vqs) {
927 r = -ENOMEM;
928 goto err;
929 }
930
931 r = dev_set_name(&v->dev, "vhost-vdpa-%u", minor);
932 if (r)
933 goto err;
934
935 cdev_init(&v->cdev, &vhost_vdpa_fops);
936 v->cdev.owner = THIS_MODULE;
937
938 r = cdev_device_add(&v->cdev, &v->dev);
939 if (r)
940 goto err;
941
942 init_completion(&v->completion);
943 vdpa_set_drvdata(vdpa, v);
944
945 return 0;
946
947err:
948 put_device(&v->dev);
949 return r;
950}
951
952static void vhost_vdpa_remove(struct vdpa_device *vdpa)
953{
954 struct vhost_vdpa *v = vdpa_get_drvdata(vdpa);
955 int opened;
956
957 cdev_device_del(&v->cdev, &v->dev);
958
959 do {
960 opened = atomic_cmpxchg(&v->opened, 0, 1);
961 if (!opened)
962 break;
963 wait_for_completion(&v->completion);
964 } while (1);
965
966 put_device(&v->dev);
967}
968
969static struct vdpa_driver vhost_vdpa_driver = {
970 .driver = {
971 .name = "vhost_vdpa",
972 },
973 .probe = vhost_vdpa_probe,
974 .remove = vhost_vdpa_remove,
975};
976
977static int __init vhost_vdpa_init(void)
978{
979 int r;
980
981 r = alloc_chrdev_region(&vhost_vdpa_major, 0, VHOST_VDPA_DEV_MAX,
982 "vhost-vdpa");
983 if (r)
984 goto err_alloc_chrdev;
985
986 r = vdpa_register_driver(&vhost_vdpa_driver);
987 if (r)
988 goto err_vdpa_register_driver;
989
990 return 0;
991
992err_vdpa_register_driver:
993 unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
994err_alloc_chrdev:
995 return r;
996}
997module_init(vhost_vdpa_init);
998
999static void __exit vhost_vdpa_exit(void)
1000{
1001 vdpa_unregister_driver(&vhost_vdpa_driver);
1002 unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
1003}
1004module_exit(vhost_vdpa_exit);
1005
1006MODULE_VERSION("0.0.1");
1007MODULE_LICENSE("GPL v2");
1008MODULE_AUTHOR("Intel Corporation");
1009MODULE_DESCRIPTION("vDPA-based vhost backend for virtio");