vDPA: fix 'cast to restricted le16' warnings in vdpa.c
[linux-block.git] / drivers / vhost / vdpa.c
CommitLineData
4c8cf318
TB
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018-2020 Intel Corporation.
4 * Copyright (C) 2020 Red Hat, Inc.
5 *
6 * Author: Tiwei Bie <tiwei.bie@intel.com>
7 * Jason Wang <jasowang@redhat.com>
8 *
9 * Thanks Michael S. Tsirkin for the valuable comments and
10 * suggestions. And thanks to Cunming Liang and Zhihong Wang for all
11 * their supports.
12 */
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/cdev.h>
17#include <linux/device.h>
ddd89d0a 18#include <linux/mm.h>
9d6d97bf 19#include <linux/slab.h>
4c8cf318
TB
20#include <linux/iommu.h>
21#include <linux/uuid.h>
22#include <linux/vdpa.h>
23#include <linux/nospec.h>
24#include <linux/vhost.h>
4c8cf318
TB
25
26#include "vhost.h"
27
653055b9 28enum {
25abc060
JW
29 VHOST_VDPA_BACKEND_FEATURES =
30 (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) |
aaca8373
GD
31 (1ULL << VHOST_BACKEND_F_IOTLB_BATCH) |
32 (1ULL << VHOST_BACKEND_F_IOTLB_ASID),
653055b9
JW
33};
34
4c8cf318
TB
35#define VHOST_VDPA_DEV_MAX (1U << MINORBITS)
36
3d569879
GD
37#define VHOST_VDPA_IOTLB_BUCKETS 16
38
39struct vhost_vdpa_as {
40 struct hlist_node hash_link;
41 struct vhost_iotlb iotlb;
42 u32 id;
43};
44
4c8cf318
TB
45struct vhost_vdpa {
46 struct vhost_dev vdev;
47 struct iommu_domain *domain;
48 struct vhost_virtqueue *vqs;
49 struct completion completion;
50 struct vdpa_device *vdpa;
3d569879 51 struct hlist_head as[VHOST_VDPA_IOTLB_BUCKETS];
4c8cf318
TB
52 struct device dev;
53 struct cdev cdev;
54 atomic_t opened;
81d46d69 55 u32 nvqs;
4c8cf318
TB
56 int virtio_id;
57 int minor;
776f3950 58 struct eventfd_ctx *config_ctx;
25abc060 59 int in_batch;
1b48dc03 60 struct vdpa_iova_range range;
aaca8373 61 u32 batch_asid;
4c8cf318
TB
62};
63
64static DEFINE_IDA(vhost_vdpa_ida);
65
66static dev_t vhost_vdpa_major;
67
aaca8373
GD
68static inline u32 iotlb_to_asid(struct vhost_iotlb *iotlb)
69{
70 struct vhost_vdpa_as *as = container_of(iotlb, struct
71 vhost_vdpa_as, iotlb);
72 return as->id;
73}
74
3d569879
GD
75static struct vhost_vdpa_as *asid_to_as(struct vhost_vdpa *v, u32 asid)
76{
77 struct hlist_head *head = &v->as[asid % VHOST_VDPA_IOTLB_BUCKETS];
78 struct vhost_vdpa_as *as;
79
80 hlist_for_each_entry(as, head, hash_link)
81 if (as->id == asid)
82 return as;
83
84 return NULL;
85}
86
aaca8373
GD
87static struct vhost_iotlb *asid_to_iotlb(struct vhost_vdpa *v, u32 asid)
88{
89 struct vhost_vdpa_as *as = asid_to_as(v, asid);
90
91 if (!as)
92 return NULL;
93
94 return &as->iotlb;
95}
96
3d569879
GD
97static struct vhost_vdpa_as *vhost_vdpa_alloc_as(struct vhost_vdpa *v, u32 asid)
98{
99 struct hlist_head *head = &v->as[asid % VHOST_VDPA_IOTLB_BUCKETS];
100 struct vhost_vdpa_as *as;
101
102 if (asid_to_as(v, asid))
103 return NULL;
104
aaca8373
GD
105 if (asid >= v->vdpa->nas)
106 return NULL;
107
3d569879
GD
108 as = kmalloc(sizeof(*as), GFP_KERNEL);
109 if (!as)
110 return NULL;
111
112 vhost_iotlb_init(&as->iotlb, 0, 0);
113 as->id = asid;
114 hlist_add_head(&as->hash_link, head);
115
116 return as;
117}
118
aaca8373
GD
119static struct vhost_vdpa_as *vhost_vdpa_find_alloc_as(struct vhost_vdpa *v,
120 u32 asid)
121{
122 struct vhost_vdpa_as *as = asid_to_as(v, asid);
123
124 if (as)
125 return as;
126
127 return vhost_vdpa_alloc_as(v, asid);
128}
129
3d569879
GD
130static int vhost_vdpa_remove_as(struct vhost_vdpa *v, u32 asid)
131{
132 struct vhost_vdpa_as *as = asid_to_as(v, asid);
133
134 if (!as)
135 return -EINVAL;
136
137 hlist_del(&as->hash_link);
138 vhost_iotlb_reset(&as->iotlb);
139 kfree(as);
140
141 return 0;
142}
143
4c8cf318
TB
144static void handle_vq_kick(struct vhost_work *work)
145{
146 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
147 poll.work);
148 struct vhost_vdpa *v = container_of(vq->dev, struct vhost_vdpa, vdev);
149 const struct vdpa_config_ops *ops = v->vdpa->config;
150
151 ops->kick_vq(v->vdpa, vq - v->vqs);
152}
153
154static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
155{
156 struct vhost_virtqueue *vq = private;
265a0ad8 157 struct eventfd_ctx *call_ctx = vq->call_ctx.ctx;
4c8cf318
TB
158
159 if (call_ctx)
160 eventfd_signal(call_ctx, 1);
161
162 return IRQ_HANDLED;
163}
164
776f3950
ZL
165static irqreturn_t vhost_vdpa_config_cb(void *private)
166{
167 struct vhost_vdpa *v = private;
168 struct eventfd_ctx *config_ctx = v->config_ctx;
169
170 if (config_ctx)
171 eventfd_signal(config_ctx, 1);
172
173 return IRQ_HANDLED;
174}
175
2cf1ba9a
ZL
176static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
177{
178 struct vhost_virtqueue *vq = &v->vqs[qid];
179 const struct vdpa_config_ops *ops = v->vdpa->config;
180 struct vdpa_device *vdpa = v->vdpa;
181 int ret, irq;
182
183 if (!ops->get_vq_irq)
184 return;
185
186 irq = ops->get_vq_irq(vdpa, qid);
cce0ab2b
ZL
187 if (irq < 0)
188 return;
189
2cf1ba9a 190 irq_bypass_unregister_producer(&vq->call_ctx.producer);
cce0ab2b 191 if (!vq->call_ctx.ctx)
2cf1ba9a 192 return;
2cf1ba9a
ZL
193
194 vq->call_ctx.producer.token = vq->call_ctx.ctx;
195 vq->call_ctx.producer.irq = irq;
196 ret = irq_bypass_register_producer(&vq->call_ctx.producer);
e01afe36
ZL
197 if (unlikely(ret))
198 dev_info(&v->dev, "vq %u, irq bypass producer (token %p) registration fails, ret = %d\n",
199 qid, vq->call_ctx.producer.token, ret);
2cf1ba9a
ZL
200}
201
202static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
203{
204 struct vhost_virtqueue *vq = &v->vqs[qid];
205
2cf1ba9a 206 irq_bypass_unregister_producer(&vq->call_ctx.producer);
2cf1ba9a
ZL
207}
208
7f05630d 209static int vhost_vdpa_reset(struct vhost_vdpa *v)
4c8cf318
TB
210{
211 struct vdpa_device *vdpa = v->vdpa;
4c8cf318 212
25abc060 213 v->in_batch = 0;
7f05630d
XY
214
215 return vdpa_reset(vdpa);
4c8cf318
TB
216}
217
218static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp)
219{
220 struct vdpa_device *vdpa = v->vdpa;
221 const struct vdpa_config_ops *ops = vdpa->config;
222 u32 device_id;
223
224 device_id = ops->get_device_id(vdpa);
225
226 if (copy_to_user(argp, &device_id, sizeof(device_id)))
227 return -EFAULT;
228
229 return 0;
230}
231
232static long vhost_vdpa_get_status(struct vhost_vdpa *v, u8 __user *statusp)
233{
234 struct vdpa_device *vdpa = v->vdpa;
f6d955d8 235 const struct vdpa_config_ops *ops = vdpa->config;
4c8cf318
TB
236 u8 status;
237
f6d955d8 238 status = ops->get_status(vdpa);
4c8cf318
TB
239
240 if (copy_to_user(statusp, &status, sizeof(status)))
241 return -EFAULT;
242
243 return 0;
244}
245
246static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
247{
248 struct vdpa_device *vdpa = v->vdpa;
249 const struct vdpa_config_ops *ops = vdpa->config;
2cf1ba9a 250 u8 status, status_old;
81d46d69
L
251 u32 nvqs = v->nvqs;
252 int ret;
2cf1ba9a 253 u16 i;
4c8cf318
TB
254
255 if (copy_from_user(&status, statusp, sizeof(status)))
256 return -EFAULT;
257
f6d955d8 258 status_old = ops->get_status(vdpa);
2cf1ba9a 259
4c8cf318
TB
260 /*
261 * Userspace shouldn't remove status bits unless reset the
262 * status to 0.
263 */
23118b09 264 if (status != 0 && (status_old & ~status) != 0)
4c8cf318
TB
265 return -EINVAL;
266
97f854be
WZ
267 if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) && !(status & VIRTIO_CONFIG_S_DRIVER_OK))
268 for (i = 0; i < nvqs; i++)
269 vhost_vdpa_unsetup_vq_irq(v, i);
270
0686082d 271 if (status == 0) {
680ab9d6 272 ret = vdpa_reset(vdpa);
0686082d
XY
273 if (ret)
274 return ret;
275 } else
73bc0dbb 276 vdpa_set_status(vdpa, status);
4c8cf318 277
2cf1ba9a
ZL
278 if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && !(status_old & VIRTIO_CONFIG_S_DRIVER_OK))
279 for (i = 0; i < nvqs; i++)
280 vhost_vdpa_setup_vq_irq(v, i);
281
4c8cf318
TB
282 return 0;
283}
284
285static int vhost_vdpa_config_validate(struct vhost_vdpa *v,
286 struct vhost_vdpa_config *c)
287{
d6d8bb92 288 struct vdpa_device *vdpa = v->vdpa;
870aaff9 289 size_t size = vdpa->config->get_config_size(vdpa);
4c8cf318 290
3ed21c14 291 if (c->len == 0 || c->off > size)
4c8cf318
TB
292 return -EINVAL;
293
294 if (c->len > size - c->off)
295 return -E2BIG;
296
297 return 0;
298}
299
300static long vhost_vdpa_get_config(struct vhost_vdpa *v,
301 struct vhost_vdpa_config __user *c)
302{
303 struct vdpa_device *vdpa = v->vdpa;
4c8cf318
TB
304 struct vhost_vdpa_config config;
305 unsigned long size = offsetof(struct vhost_vdpa_config, buf);
306 u8 *buf;
307
308 if (copy_from_user(&config, c, size))
309 return -EFAULT;
310 if (vhost_vdpa_config_validate(v, &config))
311 return -EINVAL;
312 buf = kvzalloc(config.len, GFP_KERNEL);
313 if (!buf)
314 return -ENOMEM;
315
0d234007 316 vdpa_get_config(vdpa, config.off, buf, config.len);
4c8cf318
TB
317
318 if (copy_to_user(c->buf, buf, config.len)) {
319 kvfree(buf);
320 return -EFAULT;
321 }
322
323 kvfree(buf);
324 return 0;
325}
326
327static long vhost_vdpa_set_config(struct vhost_vdpa *v,
328 struct vhost_vdpa_config __user *c)
329{
330 struct vdpa_device *vdpa = v->vdpa;
4c8cf318
TB
331 struct vhost_vdpa_config config;
332 unsigned long size = offsetof(struct vhost_vdpa_config, buf);
333 u8 *buf;
334
335 if (copy_from_user(&config, c, size))
336 return -EFAULT;
337 if (vhost_vdpa_config_validate(v, &config))
338 return -EINVAL;
4c8cf318 339
0ab4b890
TT
340 buf = vmemdup_user(c->buf, config.len);
341 if (IS_ERR(buf))
342 return PTR_ERR(buf);
4c8cf318 343
6dbb1f16 344 vdpa_set_config(vdpa, config.off, buf, config.len);
4c8cf318
TB
345
346 kvfree(buf);
347 return 0;
348}
349
350static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
351{
352 struct vdpa_device *vdpa = v->vdpa;
353 const struct vdpa_config_ops *ops = vdpa->config;
354 u64 features;
355
a64917bc 356 features = ops->get_device_features(vdpa);
4c8cf318
TB
357
358 if (copy_to_user(featurep, &features, sizeof(features)))
359 return -EFAULT;
360
361 return 0;
362}
363
364static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
365{
366 struct vdpa_device *vdpa = v->vdpa;
367 const struct vdpa_config_ops *ops = vdpa->config;
368 u64 features;
369
370 /*
371 * It's not allowed to change the features after they have
372 * been negotiated.
373 */
374 if (ops->get_status(vdpa) & VIRTIO_CONFIG_S_FEATURES_OK)
375 return -EBUSY;
376
377 if (copy_from_user(&features, featurep, sizeof(features)))
378 return -EFAULT;
379
e0077cc1 380 if (vdpa_set_features(vdpa, features))
4c8cf318
TB
381 return -EINVAL;
382
383 return 0;
384}
385
386static long vhost_vdpa_get_vring_num(struct vhost_vdpa *v, u16 __user *argp)
387{
388 struct vdpa_device *vdpa = v->vdpa;
389 const struct vdpa_config_ops *ops = vdpa->config;
390 u16 num;
391
392 num = ops->get_vq_num_max(vdpa);
393
394 if (copy_to_user(argp, &num, sizeof(num)))
395 return -EFAULT;
396
397 return 0;
398}
399
776f3950
ZL
400static void vhost_vdpa_config_put(struct vhost_vdpa *v)
401{
f6bbf001 402 if (v->config_ctx) {
776f3950 403 eventfd_ctx_put(v->config_ctx);
f6bbf001
SG
404 v->config_ctx = NULL;
405 }
776f3950
ZL
406}
407
408static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
409{
410 struct vdpa_callback cb;
411 int fd;
412 struct eventfd_ctx *ctx;
413
414 cb.callback = vhost_vdpa_config_cb;
bcef9356 415 cb.private = v;
776f3950
ZL
416 if (copy_from_user(&fd, argp, sizeof(fd)))
417 return -EFAULT;
418
419 ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd);
420 swap(ctx, v->config_ctx);
421
422 if (!IS_ERR_OR_NULL(ctx))
423 eventfd_ctx_put(ctx);
424
0bde59c1
SG
425 if (IS_ERR(v->config_ctx)) {
426 long ret = PTR_ERR(v->config_ctx);
427
428 v->config_ctx = NULL;
429 return ret;
430 }
776f3950
ZL
431
432 v->vdpa->config->set_config_cb(v->vdpa, &cb);
433
434 return 0;
435}
2cf1ba9a 436
1b48dc03
JW
437static long vhost_vdpa_get_iova_range(struct vhost_vdpa *v, u32 __user *argp)
438{
439 struct vhost_vdpa_iova_range range = {
440 .first = v->range.first,
441 .last = v->range.last,
442 };
443
2c602741
DC
444 if (copy_to_user(argp, &range, sizeof(range)))
445 return -EFAULT;
446 return 0;
1b48dc03
JW
447}
448
a61280dd
L
449static long vhost_vdpa_get_config_size(struct vhost_vdpa *v, u32 __user *argp)
450{
451 struct vdpa_device *vdpa = v->vdpa;
452 const struct vdpa_config_ops *ops = vdpa->config;
453 u32 size;
454
455 size = ops->get_config_size(vdpa);
456
457 if (copy_to_user(argp, &size, sizeof(size)))
458 return -EFAULT;
459
460 return 0;
461}
462
b04d910a
L
463static long vhost_vdpa_get_vqs_count(struct vhost_vdpa *v, u32 __user *argp)
464{
465 struct vdpa_device *vdpa = v->vdpa;
466
467 if (copy_to_user(argp, &vdpa->nvqs, sizeof(vdpa->nvqs)))
468 return -EFAULT;
469
470 return 0;
471}
472
4c8cf318
TB
473static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
474 void __user *argp)
475{
476 struct vdpa_device *vdpa = v->vdpa;
477 const struct vdpa_config_ops *ops = vdpa->config;
aac50c0b 478 struct vdpa_vq_state vq_state;
4c8cf318
TB
479 struct vdpa_callback cb;
480 struct vhost_virtqueue *vq;
481 struct vhost_vring_state s;
4c8cf318
TB
482 u32 idx;
483 long r;
484
485 r = get_user(idx, (u32 __user *)argp);
486 if (r < 0)
487 return r;
488
489 if (idx >= v->nvqs)
490 return -ENOBUFS;
491
492 idx = array_index_nospec(idx, v->nvqs);
493 vq = &v->vqs[idx];
494
b0bd82bf
JW
495 switch (cmd) {
496 case VHOST_VDPA_SET_VRING_ENABLE:
4c8cf318
TB
497 if (copy_from_user(&s, argp, sizeof(s)))
498 return -EFAULT;
499 ops->set_vq_ready(vdpa, idx, s.num);
500 return 0;
2d1fcb77 501 case VHOST_VDPA_GET_VRING_GROUP:
00d1f546
JW
502 if (!ops->get_vq_group)
503 return -EOPNOTSUPP;
2d1fcb77
GD
504 s.index = idx;
505 s.num = ops->get_vq_group(vdpa, idx);
506 if (s.num >= vdpa->ngroups)
507 return -EIO;
508 else if (copy_to_user(argp, &s, sizeof(s)))
509 return -EFAULT;
510 return 0;
84d7c8fd
GD
511 case VHOST_VDPA_SET_GROUP_ASID:
512 if (copy_from_user(&s, argp, sizeof(s)))
513 return -EFAULT;
514 if (s.num >= vdpa->nas)
515 return -EINVAL;
516 if (!ops->set_group_asid)
517 return -EOPNOTSUPP;
518 return ops->set_group_asid(vdpa, idx, s.num);
b0bd82bf 519 case VHOST_GET_VRING_BASE:
23750e39
EC
520 r = ops->get_vq_state(v->vdpa, idx, &vq_state);
521 if (r)
522 return r;
523
530a5678 524 vq->last_avail_idx = vq_state.split.avail_index;
b0bd82bf
JW
525 break;
526 }
4c8cf318
TB
527
528 r = vhost_vring_ioctl(&v->vdev, cmd, argp);
529 if (r)
530 return r;
531
532 switch (cmd) {
533 case VHOST_SET_VRING_ADDR:
534 if (ops->set_vq_address(vdpa, idx,
535 (u64)(uintptr_t)vq->desc,
536 (u64)(uintptr_t)vq->avail,
537 (u64)(uintptr_t)vq->used))
538 r = -EINVAL;
539 break;
540
541 case VHOST_SET_VRING_BASE:
530a5678 542 vq_state.split.avail_index = vq->last_avail_idx;
aac50c0b 543 if (ops->set_vq_state(vdpa, idx, &vq_state))
4c8cf318
TB
544 r = -EINVAL;
545 break;
546
547 case VHOST_SET_VRING_CALL:
265a0ad8 548 if (vq->call_ctx.ctx) {
4c8cf318
TB
549 cb.callback = vhost_vdpa_virtqueue_cb;
550 cb.private = vq;
551 } else {
552 cb.callback = NULL;
553 cb.private = NULL;
554 }
555 ops->set_vq_cb(vdpa, idx, &cb);
2cf1ba9a 556 vhost_vdpa_setup_vq_irq(v, idx);
4c8cf318
TB
557 break;
558
559 case VHOST_SET_VRING_NUM:
560 ops->set_vq_num(vdpa, idx, vq->num);
561 break;
562 }
563
564 return r;
565}
566
567static long vhost_vdpa_unlocked_ioctl(struct file *filep,
568 unsigned int cmd, unsigned long arg)
569{
570 struct vhost_vdpa *v = filep->private_data;
571 struct vhost_dev *d = &v->vdev;
572 void __user *argp = (void __user *)arg;
a127c5bb
JW
573 u64 __user *featurep = argp;
574 u64 features;
7922460e 575 long r = 0;
4c8cf318 576
a127c5bb 577 if (cmd == VHOST_SET_BACKEND_FEATURES) {
7922460e
DC
578 if (copy_from_user(&features, featurep, sizeof(features)))
579 return -EFAULT;
a127c5bb
JW
580 if (features & ~VHOST_VDPA_BACKEND_FEATURES)
581 return -EOPNOTSUPP;
582 vhost_set_backend_features(&v->vdev, features);
583 return 0;
584 }
585
4c8cf318
TB
586 mutex_lock(&d->mutex);
587
588 switch (cmd) {
589 case VHOST_VDPA_GET_DEVICE_ID:
590 r = vhost_vdpa_get_device_id(v, argp);
591 break;
592 case VHOST_VDPA_GET_STATUS:
593 r = vhost_vdpa_get_status(v, argp);
594 break;
595 case VHOST_VDPA_SET_STATUS:
596 r = vhost_vdpa_set_status(v, argp);
597 break;
598 case VHOST_VDPA_GET_CONFIG:
599 r = vhost_vdpa_get_config(v, argp);
600 break;
601 case VHOST_VDPA_SET_CONFIG:
602 r = vhost_vdpa_set_config(v, argp);
603 break;
604 case VHOST_GET_FEATURES:
605 r = vhost_vdpa_get_features(v, argp);
606 break;
607 case VHOST_SET_FEATURES:
608 r = vhost_vdpa_set_features(v, argp);
609 break;
610 case VHOST_VDPA_GET_VRING_NUM:
611 r = vhost_vdpa_get_vring_num(v, argp);
612 break;
3ace88bd 613 case VHOST_VDPA_GET_GROUP_NUM:
f4a8686e
DC
614 if (copy_to_user(argp, &v->vdpa->ngroups,
615 sizeof(v->vdpa->ngroups)))
616 r = -EFAULT;
3ace88bd 617 break;
a0c95f20 618 case VHOST_VDPA_GET_AS_NUM:
f4a8686e
DC
619 if (copy_to_user(argp, &v->vdpa->nas, sizeof(v->vdpa->nas)))
620 r = -EFAULT;
a0c95f20 621 break;
4c8cf318
TB
622 case VHOST_SET_LOG_BASE:
623 case VHOST_SET_LOG_FD:
624 r = -ENOIOCTLCMD;
625 break;
776f3950
ZL
626 case VHOST_VDPA_SET_CONFIG_CALL:
627 r = vhost_vdpa_set_config_call(v, argp);
628 break;
a127c5bb
JW
629 case VHOST_GET_BACKEND_FEATURES:
630 features = VHOST_VDPA_BACKEND_FEATURES;
7922460e
DC
631 if (copy_to_user(featurep, &features, sizeof(features)))
632 r = -EFAULT;
a127c5bb 633 break;
1b48dc03
JW
634 case VHOST_VDPA_GET_IOVA_RANGE:
635 r = vhost_vdpa_get_iova_range(v, argp);
636 break;
a61280dd
L
637 case VHOST_VDPA_GET_CONFIG_SIZE:
638 r = vhost_vdpa_get_config_size(v, argp);
639 break;
b04d910a
L
640 case VHOST_VDPA_GET_VQS_COUNT:
641 r = vhost_vdpa_get_vqs_count(v, argp);
642 break;
4c8cf318
TB
643 default:
644 r = vhost_dev_ioctl(&v->vdev, cmd, argp);
645 if (r == -ENOIOCTLCMD)
646 r = vhost_vdpa_vring_ioctl(v, cmd, argp);
647 break;
648 }
649
650 mutex_unlock(&d->mutex);
651 return r;
652}
653
ae967246
GD
654static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v,
655 struct vhost_iotlb *iotlb,
656 u64 start, u64 last)
4c8cf318
TB
657{
658 struct vhost_dev *dev = &v->vdev;
4c8cf318
TB
659 struct vhost_iotlb_map *map;
660 struct page *page;
661 unsigned long pfn, pinned;
662
663 while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
729ce5a5
CH
664 pinned = PFN_DOWN(map->size);
665 for (pfn = PFN_DOWN(map->addr);
4c8cf318
TB
666 pinned > 0; pfn++, pinned--) {
667 page = pfn_to_page(pfn);
668 if (map->perm & VHOST_ACCESS_WO)
669 set_page_dirty_lock(page);
670 unpin_user_page(page);
671 }
729ce5a5 672 atomic64_sub(PFN_DOWN(map->size), &dev->mm->pinned_vm);
4c8cf318
TB
673 vhost_iotlb_map_free(iotlb, map);
674 }
675}
676
ae967246
GD
677static void vhost_vdpa_va_unmap(struct vhost_vdpa *v,
678 struct vhost_iotlb *iotlb,
679 u64 start, u64 last)
d8945ec4 680{
d8945ec4
XY
681 struct vhost_iotlb_map *map;
682 struct vdpa_map_file *map_file;
683
684 while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
685 map_file = (struct vdpa_map_file *)map->opaque;
686 fput(map_file->file);
687 kfree(map_file);
688 vhost_iotlb_map_free(iotlb, map);
689 }
690}
691
ae967246
GD
692static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v,
693 struct vhost_iotlb *iotlb,
694 u64 start, u64 last)
22af48cf 695{
d8945ec4
XY
696 struct vdpa_device *vdpa = v->vdpa;
697
698 if (vdpa->use_va)
ae967246 699 return vhost_vdpa_va_unmap(v, iotlb, start, last);
d8945ec4 700
ae967246 701 return vhost_vdpa_pa_unmap(v, iotlb, start, last);
22af48cf
XY
702}
703
4c8cf318
TB
704static int perm_to_iommu_flags(u32 perm)
705{
706 int flags = 0;
707
708 switch (perm) {
709 case VHOST_ACCESS_WO:
710 flags |= IOMMU_WRITE;
711 break;
712 case VHOST_ACCESS_RO:
713 flags |= IOMMU_READ;
714 break;
715 case VHOST_ACCESS_RW:
716 flags |= (IOMMU_WRITE | IOMMU_READ);
717 break;
718 default:
719 WARN(1, "invalidate vhost IOTLB permission\n");
720 break;
721 }
722
723 return flags | IOMMU_CACHE;
724}
725
ae967246
GD
726static int vhost_vdpa_map(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
727 u64 iova, u64 size, u64 pa, u32 perm, void *opaque)
4c8cf318
TB
728{
729 struct vhost_dev *dev = &v->vdev;
730 struct vdpa_device *vdpa = v->vdpa;
731 const struct vdpa_config_ops *ops = vdpa->config;
aaca8373 732 u32 asid = iotlb_to_asid(iotlb);
4c8cf318
TB
733 int r = 0;
734
ae967246 735 r = vhost_iotlb_add_range_ctx(iotlb, iova, iova + size - 1,
d8945ec4 736 pa, perm, opaque);
4c8cf318
TB
737 if (r)
738 return r;
739
25abc060 740 if (ops->dma_map) {
aaca8373 741 r = ops->dma_map(vdpa, asid, iova, size, pa, perm, opaque);
25abc060
JW
742 } else if (ops->set_map) {
743 if (!v->in_batch)
aaca8373 744 r = ops->set_map(vdpa, asid, iotlb);
25abc060 745 } else {
4c8cf318
TB
746 r = iommu_map(v->domain, iova, pa, size,
747 perm_to_iommu_flags(perm));
25abc060 748 }
d8945ec4 749 if (r) {
ae967246 750 vhost_iotlb_del_range(iotlb, iova, iova + size - 1);
d8945ec4
XY
751 return r;
752 }
753
754 if (!vdpa->use_va)
729ce5a5 755 atomic64_add(PFN_DOWN(size), &dev->mm->pinned_vm);
1477c8ae 756
d8945ec4 757 return 0;
4c8cf318
TB
758}
759
ae967246
GD
760static void vhost_vdpa_unmap(struct vhost_vdpa *v,
761 struct vhost_iotlb *iotlb,
762 u64 iova, u64 size)
4c8cf318 763{
4c8cf318
TB
764 struct vdpa_device *vdpa = v->vdpa;
765 const struct vdpa_config_ops *ops = vdpa->config;
aaca8373 766 u32 asid = iotlb_to_asid(iotlb);
4c8cf318 767
ae967246 768 vhost_vdpa_iotlb_unmap(v, iotlb, iova, iova + size - 1);
4c8cf318 769
25abc060 770 if (ops->dma_map) {
aaca8373 771 ops->dma_unmap(vdpa, asid, iova, size);
25abc060
JW
772 } else if (ops->set_map) {
773 if (!v->in_batch)
aaca8373 774 ops->set_map(vdpa, asid, iotlb);
25abc060 775 } else {
4c8cf318 776 iommu_unmap(v->domain, iova, size);
25abc060 777 }
aaca8373
GD
778
779 /* If we are in the middle of batch processing, delay the free
780 * of AS until BATCH_END.
781 */
782 if (!v->in_batch && !iotlb->nmaps)
783 vhost_vdpa_remove_as(v, asid);
4c8cf318
TB
784}
785
d8945ec4 786static int vhost_vdpa_va_map(struct vhost_vdpa *v,
ae967246 787 struct vhost_iotlb *iotlb,
d8945ec4
XY
788 u64 iova, u64 size, u64 uaddr, u32 perm)
789{
790 struct vhost_dev *dev = &v->vdev;
791 u64 offset, map_size, map_iova = iova;
792 struct vdpa_map_file *map_file;
793 struct vm_area_struct *vma;
be9c6bad 794 int ret = 0;
d8945ec4
XY
795
796 mmap_read_lock(dev->mm);
797
798 while (size) {
799 vma = find_vma(dev->mm, uaddr);
800 if (!vma) {
801 ret = -EINVAL;
802 break;
803 }
804 map_size = min(size, vma->vm_end - uaddr);
805 if (!(vma->vm_file && (vma->vm_flags & VM_SHARED) &&
806 !(vma->vm_flags & (VM_IO | VM_PFNMAP))))
807 goto next;
808
809 map_file = kzalloc(sizeof(*map_file), GFP_KERNEL);
810 if (!map_file) {
811 ret = -ENOMEM;
812 break;
813 }
814 offset = (vma->vm_pgoff << PAGE_SHIFT) + uaddr - vma->vm_start;
815 map_file->offset = offset;
816 map_file->file = get_file(vma->vm_file);
ae967246 817 ret = vhost_vdpa_map(v, iotlb, map_iova, map_size, uaddr,
d8945ec4
XY
818 perm, map_file);
819 if (ret) {
820 fput(map_file->file);
821 kfree(map_file);
822 break;
823 }
824next:
825 size -= map_size;
826 uaddr += map_size;
827 map_iova += map_size;
828 }
829 if (ret)
ae967246 830 vhost_vdpa_unmap(v, iotlb, iova, map_iova - iova);
d8945ec4
XY
831
832 mmap_read_unlock(dev->mm);
833
834 return ret;
835}
836
22af48cf 837static int vhost_vdpa_pa_map(struct vhost_vdpa *v,
ae967246 838 struct vhost_iotlb *iotlb,
22af48cf 839 u64 iova, u64 size, u64 uaddr, u32 perm)
4c8cf318
TB
840{
841 struct vhost_dev *dev = &v->vdev;
4c8cf318 842 struct page **page_list;
5e1a3149 843 unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
4c8cf318 844 unsigned int gup_flags = FOLL_LONGTERM;
5e1a3149 845 unsigned long npages, cur_base, map_pfn, last_pfn = 0;
ad89653f 846 unsigned long lock_limit, sz2pin, nchunks, i;
22af48cf 847 u64 start = iova;
ad89653f 848 long pinned;
4c8cf318
TB
849 int ret = 0;
850
ad89653f 851 /* Limit the use of memory for bookkeeping */
5e1a3149
MT
852 page_list = (struct page **) __get_free_page(GFP_KERNEL);
853 if (!page_list)
854 return -ENOMEM;
855
22af48cf 856 if (perm & VHOST_ACCESS_WO)
4c8cf318
TB
857 gup_flags |= FOLL_WRITE;
858
22af48cf 859 npages = PFN_UP(size + (iova & ~PAGE_MASK));
ad89653f
SWL
860 if (!npages) {
861 ret = -EINVAL;
862 goto free;
863 }
4c8cf318 864
d8ed45c5 865 mmap_read_lock(dev->mm);
4c8cf318 866
729ce5a5 867 lock_limit = PFN_DOWN(rlimit(RLIMIT_MEMLOCK));
ad89653f 868 if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
5e1a3149 869 ret = -ENOMEM;
ad89653f 870 goto unlock;
7ed9e3d9 871 }
4c8cf318 872
22af48cf 873 cur_base = uaddr & PAGE_MASK;
7ed9e3d9 874 iova &= PAGE_MASK;
ad89653f 875 nchunks = 0;
5e1a3149
MT
876
877 while (npages) {
ad89653f
SWL
878 sz2pin = min_t(unsigned long, npages, list_size);
879 pinned = pin_user_pages(cur_base, sz2pin,
880 gup_flags, page_list, NULL);
881 if (sz2pin != pinned) {
882 if (pinned < 0) {
883 ret = pinned;
884 } else {
885 unpin_user_pages(page_list, pinned);
886 ret = -ENOMEM;
887 }
5e1a3149 888 goto out;
ad89653f
SWL
889 }
890 nchunks++;
5e1a3149
MT
891
892 if (!last_pfn)
893 map_pfn = page_to_pfn(page_list[0]);
894
ad89653f 895 for (i = 0; i < pinned; i++) {
5e1a3149
MT
896 unsigned long this_pfn = page_to_pfn(page_list[i]);
897 u64 csize;
898
899 if (last_pfn && (this_pfn != last_pfn + 1)) {
900 /* Pin a contiguous chunk of memory */
729ce5a5 901 csize = PFN_PHYS(last_pfn - map_pfn + 1);
ae967246 902 ret = vhost_vdpa_map(v, iotlb, iova, csize,
729ce5a5 903 PFN_PHYS(map_pfn),
d8945ec4 904 perm, NULL);
ad89653f
SWL
905 if (ret) {
906 /*
907 * Unpin the pages that are left unmapped
908 * from this point on in the current
909 * page_list. The remaining outstanding
910 * ones which may stride across several
911 * chunks will be covered in the common
912 * error path subsequently.
913 */
914 unpin_user_pages(&page_list[i],
915 pinned - i);
5e1a3149 916 goto out;
ad89653f
SWL
917 }
918
5e1a3149
MT
919 map_pfn = this_pfn;
920 iova += csize;
ad89653f 921 nchunks = 0;
4c8cf318 922 }
5e1a3149
MT
923
924 last_pfn = this_pfn;
4c8cf318 925 }
5e1a3149 926
729ce5a5 927 cur_base += PFN_PHYS(pinned);
ad89653f 928 npages -= pinned;
4c8cf318
TB
929 }
930
5e1a3149 931 /* Pin the rest chunk */
ae967246 932 ret = vhost_vdpa_map(v, iotlb, iova, PFN_PHYS(last_pfn - map_pfn + 1),
d8945ec4 933 PFN_PHYS(map_pfn), perm, NULL);
4c8cf318 934out:
5e1a3149 935 if (ret) {
ad89653f
SWL
936 if (nchunks) {
937 unsigned long pfn;
938
939 /*
940 * Unpin the outstanding pages which are yet to be
941 * mapped but haven't due to vdpa_map() or
942 * pin_user_pages() failure.
943 *
944 * Mapped pages are accounted in vdpa_map(), hence
945 * the corresponding unpinning will be handled by
946 * vdpa_unmap().
947 */
948 WARN_ON(!last_pfn);
949 for (pfn = map_pfn; pfn <= last_pfn; pfn++)
950 unpin_user_page(pfn_to_page(pfn));
951 }
ae967246 952 vhost_vdpa_unmap(v, iotlb, start, size);
5e1a3149 953 }
ad89653f 954unlock:
d8ed45c5 955 mmap_read_unlock(dev->mm);
ad89653f 956free:
5e1a3149 957 free_page((unsigned long)page_list);
4c8cf318 958 return ret;
22af48cf
XY
959
960}
961
962static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
ae967246 963 struct vhost_iotlb *iotlb,
22af48cf
XY
964 struct vhost_iotlb_msg *msg)
965{
d8945ec4 966 struct vdpa_device *vdpa = v->vdpa;
22af48cf
XY
967
968 if (msg->iova < v->range.first || !msg->size ||
969 msg->iova > U64_MAX - msg->size + 1 ||
970 msg->iova + msg->size - 1 > v->range.last)
971 return -EINVAL;
972
973 if (vhost_iotlb_itree_first(iotlb, msg->iova,
974 msg->iova + msg->size - 1))
975 return -EEXIST;
976
d8945ec4 977 if (vdpa->use_va)
ae967246 978 return vhost_vdpa_va_map(v, iotlb, msg->iova, msg->size,
d8945ec4
XY
979 msg->uaddr, msg->perm);
980
ae967246 981 return vhost_vdpa_pa_map(v, iotlb, msg->iova, msg->size, msg->uaddr,
22af48cf 982 msg->perm);
4c8cf318
TB
983}
984
91233ad7 985static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev, u32 asid,
4c8cf318
TB
986 struct vhost_iotlb_msg *msg)
987{
988 struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
25abc060
JW
989 struct vdpa_device *vdpa = v->vdpa;
990 const struct vdpa_config_ops *ops = vdpa->config;
aaca8373
GD
991 struct vhost_iotlb *iotlb = NULL;
992 struct vhost_vdpa_as *as = NULL;
4c8cf318
TB
993 int r = 0;
994
a9d06452
XY
995 mutex_lock(&dev->mutex);
996
4c8cf318
TB
997 r = vhost_dev_check_owner(dev);
998 if (r)
a9d06452 999 goto unlock;
4c8cf318 1000
aaca8373
GD
1001 if (msg->type == VHOST_IOTLB_UPDATE ||
1002 msg->type == VHOST_IOTLB_BATCH_BEGIN) {
1003 as = vhost_vdpa_find_alloc_as(v, asid);
1004 if (!as) {
1005 dev_err(&v->dev, "can't find and alloc asid %d\n",
1006 asid);
1007 r = -EINVAL;
1008 goto unlock;
1009 }
1010 iotlb = &as->iotlb;
1011 } else
1012 iotlb = asid_to_iotlb(v, asid);
1013
1014 if ((v->in_batch && v->batch_asid != asid) || !iotlb) {
1015 if (v->in_batch && v->batch_asid != asid) {
1016 dev_info(&v->dev, "batch id %d asid %d\n",
1017 v->batch_asid, asid);
1018 }
1019 if (!iotlb)
1020 dev_err(&v->dev, "no iotlb for asid %d\n", asid);
1021 r = -EINVAL;
1022 goto unlock;
1023 }
1024
4c8cf318
TB
1025 switch (msg->type) {
1026 case VHOST_IOTLB_UPDATE:
ae967246 1027 r = vhost_vdpa_process_iotlb_update(v, iotlb, msg);
4c8cf318
TB
1028 break;
1029 case VHOST_IOTLB_INVALIDATE:
ae967246 1030 vhost_vdpa_unmap(v, iotlb, msg->iova, msg->size);
4c8cf318 1031 break;
25abc060 1032 case VHOST_IOTLB_BATCH_BEGIN:
aaca8373 1033 v->batch_asid = asid;
25abc060
JW
1034 v->in_batch = true;
1035 break;
1036 case VHOST_IOTLB_BATCH_END:
1037 if (v->in_batch && ops->set_map)
aaca8373 1038 ops->set_map(vdpa, asid, iotlb);
25abc060 1039 v->in_batch = false;
aaca8373
GD
1040 if (!iotlb->nmaps)
1041 vhost_vdpa_remove_as(v, asid);
25abc060 1042 break;
4c8cf318
TB
1043 default:
1044 r = -EINVAL;
1045 break;
1046 }
a9d06452
XY
1047unlock:
1048 mutex_unlock(&dev->mutex);
4c8cf318
TB
1049
1050 return r;
1051}
1052
1053static ssize_t vhost_vdpa_chr_write_iter(struct kiocb *iocb,
1054 struct iov_iter *from)
1055{
1056 struct file *file = iocb->ki_filp;
1057 struct vhost_vdpa *v = file->private_data;
1058 struct vhost_dev *dev = &v->vdev;
1059
1060 return vhost_chr_write_iter(dev, from);
1061}
1062
1063static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
1064{
1065 struct vdpa_device *vdpa = v->vdpa;
1066 const struct vdpa_config_ops *ops = vdpa->config;
1067 struct device *dma_dev = vdpa_get_dma_dev(vdpa);
1068 struct bus_type *bus;
1069 int ret;
1070
1071 /* Device want to do DMA by itself */
1072 if (ops->set_map || ops->dma_map)
1073 return 0;
1074
1075 bus = dma_dev->bus;
1076 if (!bus)
1077 return -EFAULT;
1078
366958a7 1079 if (!device_iommu_capable(dma_dev, IOMMU_CAP_CACHE_COHERENCY))
4c8cf318
TB
1080 return -ENOTSUPP;
1081
1082 v->domain = iommu_domain_alloc(bus);
1083 if (!v->domain)
1084 return -EIO;
1085
1086 ret = iommu_attach_device(v->domain, dma_dev);
1087 if (ret)
1088 goto err_attach;
1089
1090 return 0;
1091
1092err_attach:
1093 iommu_domain_free(v->domain);
1094 return ret;
1095}
1096
1097static void vhost_vdpa_free_domain(struct vhost_vdpa *v)
1098{
1099 struct vdpa_device *vdpa = v->vdpa;
1100 struct device *dma_dev = vdpa_get_dma_dev(vdpa);
1101
1102 if (v->domain) {
1103 iommu_detach_device(v->domain, dma_dev);
1104 iommu_domain_free(v->domain);
1105 }
1106
1107 v->domain = NULL;
1108}
1109
1b48dc03
JW
1110static void vhost_vdpa_set_iova_range(struct vhost_vdpa *v)
1111{
1112 struct vdpa_iova_range *range = &v->range;
1b48dc03
JW
1113 struct vdpa_device *vdpa = v->vdpa;
1114 const struct vdpa_config_ops *ops = vdpa->config;
1115
1116 if (ops->get_iova_range) {
1117 *range = ops->get_iova_range(vdpa);
bc9a05ee
CH
1118 } else if (v->domain && v->domain->geometry.force_aperture) {
1119 range->first = v->domain->geometry.aperture_start;
1120 range->last = v->domain->geometry.aperture_end;
1b48dc03
JW
1121 } else {
1122 range->first = 0;
1123 range->last = ULLONG_MAX;
1124 }
1125}
1126
3d569879
GD
1127static void vhost_vdpa_cleanup(struct vhost_vdpa *v)
1128{
aaca8373
GD
1129 struct vhost_vdpa_as *as;
1130 u32 asid;
1131
3d569879
GD
1132 vhost_dev_cleanup(&v->vdev);
1133 kfree(v->vdev.vqs);
aaca8373
GD
1134
1135 for (asid = 0; asid < v->vdpa->nas; asid++) {
1136 as = asid_to_as(v, asid);
1137 if (as)
1138 vhost_vdpa_remove_as(v, asid);
1139 }
3d569879
GD
1140}
1141
4c8cf318
TB
1142static int vhost_vdpa_open(struct inode *inode, struct file *filep)
1143{
1144 struct vhost_vdpa *v;
1145 struct vhost_dev *dev;
1146 struct vhost_virtqueue **vqs;
81d46d69
L
1147 int r, opened;
1148 u32 i, nvqs;
4c8cf318
TB
1149
1150 v = container_of(inode->i_cdev, struct vhost_vdpa, cdev);
4c8cf318
TB
1151
1152 opened = atomic_cmpxchg(&v->opened, 0, 1);
1153 if (opened)
1154 return -EBUSY;
1155
1156 nvqs = v->nvqs;
7f05630d
XY
1157 r = vhost_vdpa_reset(v);
1158 if (r)
1159 goto err;
4c8cf318
TB
1160
1161 vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
1162 if (!vqs) {
1163 r = -ENOMEM;
1164 goto err;
1165 }
1166
1167 dev = &v->vdev;
1168 for (i = 0; i < nvqs; i++) {
1169 vqs[i] = &v->vqs[i];
1170 vqs[i]->handle_kick = handle_vq_kick;
1171 }
01fcb1cb 1172 vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false,
4c8cf318
TB
1173 vhost_vdpa_process_iotlb_msg);
1174
4c8cf318
TB
1175 r = vhost_vdpa_alloc_domain(v);
1176 if (r)
aaca8373 1177 goto err_alloc_domain;
4c8cf318 1178
1b48dc03
JW
1179 vhost_vdpa_set_iova_range(v);
1180
4c8cf318
TB
1181 filep->private_data = v;
1182
1183 return 0;
1184
aaca8373 1185err_alloc_domain:
3d569879 1186 vhost_vdpa_cleanup(v);
4c8cf318
TB
1187err:
1188 atomic_dec(&v->opened);
1189 return r;
1190}
1191
2cf1ba9a
ZL
1192static void vhost_vdpa_clean_irq(struct vhost_vdpa *v)
1193{
81d46d69 1194 u32 i;
2cf1ba9a 1195
4c050286
GD
1196 for (i = 0; i < v->nvqs; i++)
1197 vhost_vdpa_unsetup_vq_irq(v, i);
2cf1ba9a
ZL
1198}
1199
4c8cf318
TB
1200static int vhost_vdpa_release(struct inode *inode, struct file *filep)
1201{
1202 struct vhost_vdpa *v = filep->private_data;
1203 struct vhost_dev *d = &v->vdev;
1204
1205 mutex_lock(&d->mutex);
1206 filep->private_data = NULL;
ea8f17e4 1207 vhost_vdpa_clean_irq(v);
4c8cf318
TB
1208 vhost_vdpa_reset(v);
1209 vhost_dev_stop(&v->vdev);
4c8cf318 1210 vhost_vdpa_free_domain(v);
776f3950 1211 vhost_vdpa_config_put(v);
037d4305 1212 vhost_vdpa_cleanup(v);
4c8cf318
TB
1213 mutex_unlock(&d->mutex);
1214
1215 atomic_dec(&v->opened);
1216 complete(&v->completion);
1217
1218 return 0;
1219}
1220
4b4e4867 1221#ifdef CONFIG_MMU
ddd89d0a
JW
1222static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
1223{
1224 struct vhost_vdpa *v = vmf->vma->vm_file->private_data;
1225 struct vdpa_device *vdpa = v->vdpa;
1226 const struct vdpa_config_ops *ops = vdpa->config;
1227 struct vdpa_notification_area notify;
1228 struct vm_area_struct *vma = vmf->vma;
1229 u16 index = vma->vm_pgoff;
1230
1231 notify = ops->get_vq_notification(vdpa, index);
1232
1233 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1234 if (remap_pfn_range(vma, vmf->address & PAGE_MASK,
729ce5a5 1235 PFN_DOWN(notify.addr), PAGE_SIZE,
ddd89d0a
JW
1236 vma->vm_page_prot))
1237 return VM_FAULT_SIGBUS;
1238
1239 return VM_FAULT_NOPAGE;
1240}
1241
1242static const struct vm_operations_struct vhost_vdpa_vm_ops = {
1243 .fault = vhost_vdpa_fault,
1244};
1245
1246static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
1247{
1248 struct vhost_vdpa *v = vma->vm_file->private_data;
1249 struct vdpa_device *vdpa = v->vdpa;
1250 const struct vdpa_config_ops *ops = vdpa->config;
1251 struct vdpa_notification_area notify;
c09cc2c3 1252 unsigned long index = vma->vm_pgoff;
ddd89d0a
JW
1253
1254 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
1255 return -EINVAL;
1256 if ((vma->vm_flags & VM_SHARED) == 0)
1257 return -EINVAL;
1258 if (vma->vm_flags & VM_READ)
1259 return -EINVAL;
1260 if (index > 65535)
1261 return -EINVAL;
1262 if (!ops->get_vq_notification)
1263 return -ENOTSUPP;
1264
1265 /* To be safe and easily modelled by userspace, We only
1266 * support the doorbell which sits on the page boundary and
1267 * does not share the page with other registers.
1268 */
1269 notify = ops->get_vq_notification(vdpa, index);
1270 if (notify.addr & (PAGE_SIZE - 1))
1271 return -EINVAL;
1272 if (vma->vm_end - vma->vm_start != notify.size)
1273 return -ENOTSUPP;
1274
3a3e0fad 1275 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
ddd89d0a
JW
1276 vma->vm_ops = &vhost_vdpa_vm_ops;
1277 return 0;
1278}
4b4e4867 1279#endif /* CONFIG_MMU */
ddd89d0a 1280
4c8cf318
TB
1281static const struct file_operations vhost_vdpa_fops = {
1282 .owner = THIS_MODULE,
1283 .open = vhost_vdpa_open,
1284 .release = vhost_vdpa_release,
1285 .write_iter = vhost_vdpa_chr_write_iter,
1286 .unlocked_ioctl = vhost_vdpa_unlocked_ioctl,
4b4e4867 1287#ifdef CONFIG_MMU
ddd89d0a 1288 .mmap = vhost_vdpa_mmap,
4b4e4867 1289#endif /* CONFIG_MMU */
4c8cf318
TB
1290 .compat_ioctl = compat_ptr_ioctl,
1291};
1292
1293static void vhost_vdpa_release_dev(struct device *device)
1294{
1295 struct vhost_vdpa *v =
1296 container_of(device, struct vhost_vdpa, dev);
1297
1298 ida_simple_remove(&vhost_vdpa_ida, v->minor);
1299 kfree(v->vqs);
1300 kfree(v);
1301}
1302
1303static int vhost_vdpa_probe(struct vdpa_device *vdpa)
1304{
1305 const struct vdpa_config_ops *ops = vdpa->config;
1306 struct vhost_vdpa *v;
a9974489 1307 int minor;
3d569879 1308 int i, r;
4c8cf318 1309
aaca8373
GD
1310 /* We can't support platform IOMMU device with more than 1
1311 * group or as
1312 */
1313 if (!ops->set_map && !ops->dma_map &&
1314 (vdpa->ngroups > 1 || vdpa->nas > 1))
db9adcbf
GD
1315 return -EOPNOTSUPP;
1316
4c8cf318
TB
1317 v = kzalloc(sizeof(*v), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
1318 if (!v)
1319 return -ENOMEM;
1320
1321 minor = ida_simple_get(&vhost_vdpa_ida, 0,
1322 VHOST_VDPA_DEV_MAX, GFP_KERNEL);
1323 if (minor < 0) {
1324 kfree(v);
1325 return minor;
1326 }
1327
1328 atomic_set(&v->opened, 0);
1329 v->minor = minor;
1330 v->vdpa = vdpa;
a9974489 1331 v->nvqs = vdpa->nvqs;
4c8cf318
TB
1332 v->virtio_id = ops->get_device_id(vdpa);
1333
1334 device_initialize(&v->dev);
1335 v->dev.release = vhost_vdpa_release_dev;
1336 v->dev.parent = &vdpa->dev;
1337 v->dev.devt = MKDEV(MAJOR(vhost_vdpa_major), minor);
a9974489 1338 v->vqs = kmalloc_array(v->nvqs, sizeof(struct vhost_virtqueue),
4c8cf318
TB
1339 GFP_KERNEL);
1340 if (!v->vqs) {
1341 r = -ENOMEM;
1342 goto err;
1343 }
1344
1345 r = dev_set_name(&v->dev, "vhost-vdpa-%u", minor);
1346 if (r)
1347 goto err;
1348
1349 cdev_init(&v->cdev, &vhost_vdpa_fops);
1350 v->cdev.owner = THIS_MODULE;
1351
1352 r = cdev_device_add(&v->cdev, &v->dev);
1353 if (r)
1354 goto err;
1355
1356 init_completion(&v->completion);
1357 vdpa_set_drvdata(vdpa, v);
1358
3d569879
GD
1359 for (i = 0; i < VHOST_VDPA_IOTLB_BUCKETS; i++)
1360 INIT_HLIST_HEAD(&v->as[i]);
1361
4c8cf318
TB
1362 return 0;
1363
1364err:
1365 put_device(&v->dev);
1366 return r;
1367}
1368
1369static void vhost_vdpa_remove(struct vdpa_device *vdpa)
1370{
1371 struct vhost_vdpa *v = vdpa_get_drvdata(vdpa);
1372 int opened;
1373
1374 cdev_device_del(&v->cdev, &v->dev);
1375
1376 do {
1377 opened = atomic_cmpxchg(&v->opened, 0, 1);
1378 if (!opened)
1379 break;
1380 wait_for_completion(&v->completion);
1381 } while (1);
1382
1383 put_device(&v->dev);
1384}
1385
1386static struct vdpa_driver vhost_vdpa_driver = {
1387 .driver = {
1388 .name = "vhost_vdpa",
1389 },
1390 .probe = vhost_vdpa_probe,
1391 .remove = vhost_vdpa_remove,
1392};
1393
1394static int __init vhost_vdpa_init(void)
1395{
1396 int r;
1397
1398 r = alloc_chrdev_region(&vhost_vdpa_major, 0, VHOST_VDPA_DEV_MAX,
1399 "vhost-vdpa");
1400 if (r)
1401 goto err_alloc_chrdev;
1402
1403 r = vdpa_register_driver(&vhost_vdpa_driver);
1404 if (r)
1405 goto err_vdpa_register_driver;
1406
1407 return 0;
1408
1409err_vdpa_register_driver:
1410 unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
1411err_alloc_chrdev:
1412 return r;
1413}
1414module_init(vhost_vdpa_init);
1415
1416static void __exit vhost_vdpa_exit(void)
1417{
1418 vdpa_unregister_driver(&vhost_vdpa_driver);
1419 unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
1420}
1421module_exit(vhost_vdpa_exit);
1422
1423MODULE_VERSION("0.0.1");
1424MODULE_LICENSE("GPL v2");
1425MODULE_AUTHOR("Intel Corporation");
1426MODULE_DESCRIPTION("vDPA-based vhost backend for virtio");