vhost: Remove unnecessary variable
[linux-2.6-block.git] / drivers / vhost / vhost.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2009 Red Hat, Inc.
3  * Copyright (C) 2006 Rusty Russell IBM Corporation
4  *
5  * Author: Michael S. Tsirkin <mst@redhat.com>
6  *
7  * Inspiration, some code, and most witty comments come from
8  * Documentation/virtual/lguest/lguest.c, by Rusty Russell
9  *
10  * Generic code for virtio server in host kernel.
11  */
12
13 #include <linux/eventfd.h>
14 #include <linux/vhost.h>
15 #include <linux/uio.h>
16 #include <linux/mm.h>
17 #include <linux/mmu_context.h>
18 #include <linux/miscdevice.h>
19 #include <linux/mutex.h>
20 #include <linux/poll.h>
21 #include <linux/file.h>
22 #include <linux/highmem.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include <linux/kthread.h>
26 #include <linux/cgroup.h>
27 #include <linux/module.h>
28 #include <linux/sort.h>
29 #include <linux/sched/mm.h>
30 #include <linux/sched/signal.h>
31 #include <linux/interval_tree_generic.h>
32 #include <linux/nospec.h>
33
34 #include "vhost.h"
35
36 static ushort max_mem_regions = 64;
37 module_param(max_mem_regions, ushort, 0444);
38 MODULE_PARM_DESC(max_mem_regions,
39         "Maximum number of memory regions in memory map. (default: 64)");
40 static int max_iotlb_entries = 2048;
41 module_param(max_iotlb_entries, int, 0444);
42 MODULE_PARM_DESC(max_iotlb_entries,
43         "Maximum number of iotlb entries. (default: 2048)");
44
45 enum {
46         VHOST_MEMORY_F_LOG = 0x1,
47 };
48
49 #define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num])
50 #define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num])
51
52 INTERVAL_TREE_DEFINE(struct vhost_umem_node,
53                      rb, __u64, __subtree_last,
54                      START, LAST, static inline, vhost_umem_interval_tree);
55
56 #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
57 static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
58 {
59         vq->user_be = !virtio_legacy_is_little_endian();
60 }
61
62 static void vhost_enable_cross_endian_big(struct vhost_virtqueue *vq)
63 {
64         vq->user_be = true;
65 }
66
67 static void vhost_enable_cross_endian_little(struct vhost_virtqueue *vq)
68 {
69         vq->user_be = false;
70 }
71
72 static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
73 {
74         struct vhost_vring_state s;
75
76         if (vq->private_data)
77                 return -EBUSY;
78
79         if (copy_from_user(&s, argp, sizeof(s)))
80                 return -EFAULT;
81
82         if (s.num != VHOST_VRING_LITTLE_ENDIAN &&
83             s.num != VHOST_VRING_BIG_ENDIAN)
84                 return -EINVAL;
85
86         if (s.num == VHOST_VRING_BIG_ENDIAN)
87                 vhost_enable_cross_endian_big(vq);
88         else
89                 vhost_enable_cross_endian_little(vq);
90
91         return 0;
92 }
93
94 static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
95                                    int __user *argp)
96 {
97         struct vhost_vring_state s = {
98                 .index = idx,
99                 .num = vq->user_be
100         };
101
102         if (copy_to_user(argp, &s, sizeof(s)))
103                 return -EFAULT;
104
105         return 0;
106 }
107
108 static void vhost_init_is_le(struct vhost_virtqueue *vq)
109 {
110         /* Note for legacy virtio: user_be is initialized at reset time
111          * according to the host endianness. If userspace does not set an
112          * explicit endianness, the default behavior is native endian, as
113          * expected by legacy virtio.
114          */
115         vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) || !vq->user_be;
116 }
117 #else
118 static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
119 {
120 }
121
122 static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
123 {
124         return -ENOIOCTLCMD;
125 }
126
127 static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
128                                    int __user *argp)
129 {
130         return -ENOIOCTLCMD;
131 }
132
133 static void vhost_init_is_le(struct vhost_virtqueue *vq)
134 {
135         vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1)
136                 || virtio_legacy_is_little_endian();
137 }
138 #endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
139
140 static void vhost_reset_is_le(struct vhost_virtqueue *vq)
141 {
142         vhost_init_is_le(vq);
143 }
144
145 struct vhost_flush_struct {
146         struct vhost_work work;
147         struct completion wait_event;
148 };
149
150 static void vhost_flush_work(struct vhost_work *work)
151 {
152         struct vhost_flush_struct *s;
153
154         s = container_of(work, struct vhost_flush_struct, work);
155         complete(&s->wait_event);
156 }
157
158 static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
159                             poll_table *pt)
160 {
161         struct vhost_poll *poll;
162
163         poll = container_of(pt, struct vhost_poll, table);
164         poll->wqh = wqh;
165         add_wait_queue(wqh, &poll->wait);
166 }
167
168 static int vhost_poll_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync,
169                              void *key)
170 {
171         struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);
172
173         if (!(key_to_poll(key) & poll->mask))
174                 return 0;
175
176         vhost_poll_queue(poll);
177         return 0;
178 }
179
180 void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
181 {
182         clear_bit(VHOST_WORK_QUEUED, &work->flags);
183         work->fn = fn;
184 }
185 EXPORT_SYMBOL_GPL(vhost_work_init);
186
187 /* Init poll structure */
188 void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
189                      __poll_t mask, struct vhost_dev *dev)
190 {
191         init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
192         init_poll_funcptr(&poll->table, vhost_poll_func);
193         poll->mask = mask;
194         poll->dev = dev;
195         poll->wqh = NULL;
196
197         vhost_work_init(&poll->work, fn);
198 }
199 EXPORT_SYMBOL_GPL(vhost_poll_init);
200
201 /* Start polling a file. We add ourselves to file's wait queue. The caller must
202  * keep a reference to a file until after vhost_poll_stop is called. */
203 int vhost_poll_start(struct vhost_poll *poll, struct file *file)
204 {
205         __poll_t mask;
206
207         if (poll->wqh)
208                 return 0;
209
210         mask = vfs_poll(file, &poll->table);
211         if (mask)
212                 vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask));
213         if (mask & EPOLLERR) {
214                 vhost_poll_stop(poll);
215                 return -EINVAL;
216         }
217
218         return 0;
219 }
220 EXPORT_SYMBOL_GPL(vhost_poll_start);
221
222 /* Stop polling a file. After this function returns, it becomes safe to drop the
223  * file reference. You must also flush afterwards. */
224 void vhost_poll_stop(struct vhost_poll *poll)
225 {
226         if (poll->wqh) {
227                 remove_wait_queue(poll->wqh, &poll->wait);
228                 poll->wqh = NULL;
229         }
230 }
231 EXPORT_SYMBOL_GPL(vhost_poll_stop);
232
233 void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
234 {
235         struct vhost_flush_struct flush;
236
237         if (dev->worker) {
238                 init_completion(&flush.wait_event);
239                 vhost_work_init(&flush.work, vhost_flush_work);
240
241                 vhost_work_queue(dev, &flush.work);
242                 wait_for_completion(&flush.wait_event);
243         }
244 }
245 EXPORT_SYMBOL_GPL(vhost_work_flush);
246
247 /* Flush any work that has been scheduled. When calling this, don't hold any
248  * locks that are also used by the callback. */
249 void vhost_poll_flush(struct vhost_poll *poll)
250 {
251         vhost_work_flush(poll->dev, &poll->work);
252 }
253 EXPORT_SYMBOL_GPL(vhost_poll_flush);
254
255 void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
256 {
257         if (!dev->worker)
258                 return;
259
260         if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) {
261                 /* We can only add the work to the list after we're
262                  * sure it was not in the list.
263                  * test_and_set_bit() implies a memory barrier.
264                  */
265                 llist_add(&work->node, &dev->work_list);
266                 wake_up_process(dev->worker);
267         }
268 }
269 EXPORT_SYMBOL_GPL(vhost_work_queue);
270
271 /* A lockless hint for busy polling code to exit the loop */
272 bool vhost_has_work(struct vhost_dev *dev)
273 {
274         return !llist_empty(&dev->work_list);
275 }
276 EXPORT_SYMBOL_GPL(vhost_has_work);
277
278 void vhost_poll_queue(struct vhost_poll *poll)
279 {
280         vhost_work_queue(poll->dev, &poll->work);
281 }
282 EXPORT_SYMBOL_GPL(vhost_poll_queue);
283
284 static void __vhost_vq_meta_reset(struct vhost_virtqueue *vq)
285 {
286         int j;
287
288         for (j = 0; j < VHOST_NUM_ADDRS; j++)
289                 vq->meta_iotlb[j] = NULL;
290 }
291
292 static void vhost_vq_meta_reset(struct vhost_dev *d)
293 {
294         int i;
295
296         for (i = 0; i < d->nvqs; ++i)
297                 __vhost_vq_meta_reset(d->vqs[i]);
298 }
299
300 #if VHOST_ARCH_CAN_ACCEL_UACCESS
301 static void vhost_map_unprefetch(struct vhost_map *map)
302 {
303         kfree(map->pages);
304         map->pages = NULL;
305         map->npages = 0;
306         map->addr = NULL;
307 }
308
309 static void vhost_uninit_vq_maps(struct vhost_virtqueue *vq)
310 {
311         struct vhost_map *map[VHOST_NUM_ADDRS];
312         int i;
313
314         spin_lock(&vq->mmu_lock);
315         for (i = 0; i < VHOST_NUM_ADDRS; i++) {
316                 map[i] = rcu_dereference_protected(vq->maps[i],
317                                   lockdep_is_held(&vq->mmu_lock));
318                 if (map[i])
319                         rcu_assign_pointer(vq->maps[i], NULL);
320         }
321         spin_unlock(&vq->mmu_lock);
322
323         synchronize_rcu();
324
325         for (i = 0; i < VHOST_NUM_ADDRS; i++)
326                 if (map[i])
327                         vhost_map_unprefetch(map[i]);
328
329 }
330
331 static void vhost_reset_vq_maps(struct vhost_virtqueue *vq)
332 {
333         int i;
334
335         vhost_uninit_vq_maps(vq);
336         for (i = 0; i < VHOST_NUM_ADDRS; i++)
337                 vq->uaddrs[i].size = 0;
338 }
339
340 static bool vhost_map_range_overlap(struct vhost_uaddr *uaddr,
341                                      unsigned long start,
342                                      unsigned long end)
343 {
344         if (unlikely(!uaddr->size))
345                 return false;
346
347         return !(end < uaddr->uaddr || start > uaddr->uaddr - 1 + uaddr->size);
348 }
349
350 static void vhost_invalidate_vq_start(struct vhost_virtqueue *vq,
351                                       int index,
352                                       unsigned long start,
353                                       unsigned long end)
354 {
355         struct vhost_uaddr *uaddr = &vq->uaddrs[index];
356         struct vhost_map *map;
357         int i;
358
359         if (!vhost_map_range_overlap(uaddr, start, end))
360                 return;
361
362         spin_lock(&vq->mmu_lock);
363         ++vq->invalidate_count;
364
365         map = rcu_dereference_protected(vq->maps[index],
366                                         lockdep_is_held(&vq->mmu_lock));
367         if (map) {
368                 if (uaddr->write) {
369                         for (i = 0; i < map->npages; i++)
370                                 set_page_dirty(map->pages[i]);
371                 }
372                 rcu_assign_pointer(vq->maps[index], NULL);
373         }
374         spin_unlock(&vq->mmu_lock);
375
376         if (map) {
377                 synchronize_rcu();
378                 vhost_map_unprefetch(map);
379         }
380 }
381
382 static void vhost_invalidate_vq_end(struct vhost_virtqueue *vq,
383                                     int index,
384                                     unsigned long start,
385                                     unsigned long end)
386 {
387         if (!vhost_map_range_overlap(&vq->uaddrs[index], start, end))
388                 return;
389
390         spin_lock(&vq->mmu_lock);
391         --vq->invalidate_count;
392         spin_unlock(&vq->mmu_lock);
393 }
394
395 static int vhost_invalidate_range_start(struct mmu_notifier *mn,
396                                         const struct mmu_notifier_range *range)
397 {
398         struct vhost_dev *dev = container_of(mn, struct vhost_dev,
399                                              mmu_notifier);
400         int i, j;
401
402         if (!mmu_notifier_range_blockable(range))
403                 return -EAGAIN;
404
405         for (i = 0; i < dev->nvqs; i++) {
406                 struct vhost_virtqueue *vq = dev->vqs[i];
407
408                 for (j = 0; j < VHOST_NUM_ADDRS; j++)
409                         vhost_invalidate_vq_start(vq, j,
410                                                   range->start,
411                                                   range->end);
412         }
413
414         return 0;
415 }
416
417 static void vhost_invalidate_range_end(struct mmu_notifier *mn,
418                                        const struct mmu_notifier_range *range)
419 {
420         struct vhost_dev *dev = container_of(mn, struct vhost_dev,
421                                              mmu_notifier);
422         int i, j;
423
424         for (i = 0; i < dev->nvqs; i++) {
425                 struct vhost_virtqueue *vq = dev->vqs[i];
426
427                 for (j = 0; j < VHOST_NUM_ADDRS; j++)
428                         vhost_invalidate_vq_end(vq, j,
429                                                 range->start,
430                                                 range->end);
431         }
432 }
433
434 static const struct mmu_notifier_ops vhost_mmu_notifier_ops = {
435         .invalidate_range_start = vhost_invalidate_range_start,
436         .invalidate_range_end = vhost_invalidate_range_end,
437 };
438
439 static void vhost_init_maps(struct vhost_dev *dev)
440 {
441         struct vhost_virtqueue *vq;
442         int i, j;
443
444         dev->mmu_notifier.ops = &vhost_mmu_notifier_ops;
445
446         for (i = 0; i < dev->nvqs; ++i) {
447                 vq = dev->vqs[i];
448                 for (j = 0; j < VHOST_NUM_ADDRS; j++)
449                         RCU_INIT_POINTER(vq->maps[j], NULL);
450         }
451 }
452 #endif
453
454 static void vhost_vq_reset(struct vhost_dev *dev,
455                            struct vhost_virtqueue *vq)
456 {
457         vq->num = 1;
458         vq->desc = NULL;
459         vq->avail = NULL;
460         vq->used = NULL;
461         vq->last_avail_idx = 0;
462         vq->avail_idx = 0;
463         vq->last_used_idx = 0;
464         vq->signalled_used = 0;
465         vq->signalled_used_valid = false;
466         vq->used_flags = 0;
467         vq->log_used = false;
468         vq->log_addr = -1ull;
469         vq->private_data = NULL;
470         vq->acked_features = 0;
471         vq->acked_backend_features = 0;
472         vq->log_base = NULL;
473         vq->error_ctx = NULL;
474         vq->kick = NULL;
475         vq->call_ctx = NULL;
476         vq->log_ctx = NULL;
477         vhost_reset_is_le(vq);
478         vhost_disable_cross_endian(vq);
479         vq->busyloop_timeout = 0;
480         vq->umem = NULL;
481         vq->iotlb = NULL;
482         vq->invalidate_count = 0;
483         __vhost_vq_meta_reset(vq);
484 #if VHOST_ARCH_CAN_ACCEL_UACCESS
485         vhost_reset_vq_maps(vq);
486 #endif
487 }
488
489 static int vhost_worker(void *data)
490 {
491         struct vhost_dev *dev = data;
492         struct vhost_work *work, *work_next;
493         struct llist_node *node;
494         mm_segment_t oldfs = get_fs();
495
496         set_fs(USER_DS);
497         use_mm(dev->mm);
498
499         for (;;) {
500                 /* mb paired w/ kthread_stop */
501                 set_current_state(TASK_INTERRUPTIBLE);
502
503                 if (kthread_should_stop()) {
504                         __set_current_state(TASK_RUNNING);
505                         break;
506                 }
507
508                 node = llist_del_all(&dev->work_list);
509                 if (!node)
510                         schedule();
511
512                 node = llist_reverse_order(node);
513                 /* make sure flag is seen after deletion */
514                 smp_wmb();
515                 llist_for_each_entry_safe(work, work_next, node, node) {
516                         clear_bit(VHOST_WORK_QUEUED, &work->flags);
517                         __set_current_state(TASK_RUNNING);
518                         work->fn(work);
519                         if (need_resched())
520                                 schedule();
521                 }
522         }
523         unuse_mm(dev->mm);
524         set_fs(oldfs);
525         return 0;
526 }
527
528 static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
529 {
530         kfree(vq->indirect);
531         vq->indirect = NULL;
532         kfree(vq->log);
533         vq->log = NULL;
534         kfree(vq->heads);
535         vq->heads = NULL;
536 }
537
538 /* Helper to allocate iovec buffers for all vqs. */
539 static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
540 {
541         struct vhost_virtqueue *vq;
542         int i;
543
544         for (i = 0; i < dev->nvqs; ++i) {
545                 vq = dev->vqs[i];
546                 vq->indirect = kmalloc_array(UIO_MAXIOV,
547                                              sizeof(*vq->indirect),
548                                              GFP_KERNEL);
549                 vq->log = kmalloc_array(dev->iov_limit, sizeof(*vq->log),
550                                         GFP_KERNEL);
551                 vq->heads = kmalloc_array(dev->iov_limit, sizeof(*vq->heads),
552                                           GFP_KERNEL);
553                 if (!vq->indirect || !vq->log || !vq->heads)
554                         goto err_nomem;
555         }
556         return 0;
557
558 err_nomem:
559         for (; i >= 0; --i)
560                 vhost_vq_free_iovecs(dev->vqs[i]);
561         return -ENOMEM;
562 }
563
564 static void vhost_dev_free_iovecs(struct vhost_dev *dev)
565 {
566         int i;
567
568         for (i = 0; i < dev->nvqs; ++i)
569                 vhost_vq_free_iovecs(dev->vqs[i]);
570 }
571
572 bool vhost_exceeds_weight(struct vhost_virtqueue *vq,
573                           int pkts, int total_len)
574 {
575         struct vhost_dev *dev = vq->dev;
576
577         if ((dev->byte_weight && total_len >= dev->byte_weight) ||
578             pkts >= dev->weight) {
579                 vhost_poll_queue(&vq->poll);
580                 return true;
581         }
582
583         return false;
584 }
585 EXPORT_SYMBOL_GPL(vhost_exceeds_weight);
586
587 static size_t vhost_get_avail_size(struct vhost_virtqueue *vq,
588                                    unsigned int num)
589 {
590         size_t event __maybe_unused =
591                vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
592
593         return sizeof(*vq->avail) +
594                sizeof(*vq->avail->ring) * num + event;
595 }
596
597 static size_t vhost_get_used_size(struct vhost_virtqueue *vq,
598                                   unsigned int num)
599 {
600         size_t event __maybe_unused =
601                vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
602
603         return sizeof(*vq->used) +
604                sizeof(*vq->used->ring) * num + event;
605 }
606
607 static size_t vhost_get_desc_size(struct vhost_virtqueue *vq,
608                                   unsigned int num)
609 {
610         return sizeof(*vq->desc) * num;
611 }
612
613 void vhost_dev_init(struct vhost_dev *dev,
614                     struct vhost_virtqueue **vqs, int nvqs,
615                     int iov_limit, int weight, int byte_weight)
616 {
617         struct vhost_virtqueue *vq;
618         int i;
619
620         dev->vqs = vqs;
621         dev->nvqs = nvqs;
622         mutex_init(&dev->mutex);
623         dev->log_ctx = NULL;
624         dev->umem = NULL;
625         dev->iotlb = NULL;
626         dev->mm = NULL;
627         dev->worker = NULL;
628         dev->iov_limit = iov_limit;
629         dev->weight = weight;
630         dev->byte_weight = byte_weight;
631         init_llist_head(&dev->work_list);
632         init_waitqueue_head(&dev->wait);
633         INIT_LIST_HEAD(&dev->read_list);
634         INIT_LIST_HEAD(&dev->pending_list);
635         spin_lock_init(&dev->iotlb_lock);
636 #if VHOST_ARCH_CAN_ACCEL_UACCESS
637         vhost_init_maps(dev);
638 #endif
639
640         for (i = 0; i < dev->nvqs; ++i) {
641                 vq = dev->vqs[i];
642                 vq->log = NULL;
643                 vq->indirect = NULL;
644                 vq->heads = NULL;
645                 vq->dev = dev;
646                 mutex_init(&vq->mutex);
647                 spin_lock_init(&vq->mmu_lock);
648                 vhost_vq_reset(dev, vq);
649                 if (vq->handle_kick)
650                         vhost_poll_init(&vq->poll, vq->handle_kick,
651                                         EPOLLIN, dev);
652         }
653 }
654 EXPORT_SYMBOL_GPL(vhost_dev_init);
655
656 /* Caller should have device mutex */
657 long vhost_dev_check_owner(struct vhost_dev *dev)
658 {
659         /* Are you the owner? If not, I don't think you mean to do that */
660         return dev->mm == current->mm ? 0 : -EPERM;
661 }
662 EXPORT_SYMBOL_GPL(vhost_dev_check_owner);
663
664 struct vhost_attach_cgroups_struct {
665         struct vhost_work work;
666         struct task_struct *owner;
667         int ret;
668 };
669
670 static void vhost_attach_cgroups_work(struct vhost_work *work)
671 {
672         struct vhost_attach_cgroups_struct *s;
673
674         s = container_of(work, struct vhost_attach_cgroups_struct, work);
675         s->ret = cgroup_attach_task_all(s->owner, current);
676 }
677
678 static int vhost_attach_cgroups(struct vhost_dev *dev)
679 {
680         struct vhost_attach_cgroups_struct attach;
681
682         attach.owner = current;
683         vhost_work_init(&attach.work, vhost_attach_cgroups_work);
684         vhost_work_queue(dev, &attach.work);
685         vhost_work_flush(dev, &attach.work);
686         return attach.ret;
687 }
688
689 /* Caller should have device mutex */
690 bool vhost_dev_has_owner(struct vhost_dev *dev)
691 {
692         return dev->mm;
693 }
694 EXPORT_SYMBOL_GPL(vhost_dev_has_owner);
695
696 /* Caller should have device mutex */
697 long vhost_dev_set_owner(struct vhost_dev *dev)
698 {
699         struct task_struct *worker;
700         int err;
701
702         /* Is there an owner already? */
703         if (vhost_dev_has_owner(dev)) {
704                 err = -EBUSY;
705                 goto err_mm;
706         }
707
708         /* No owner, become one */
709         dev->mm = get_task_mm(current);
710         worker = kthread_create(vhost_worker, dev, "vhost-%d", current->pid);
711         if (IS_ERR(worker)) {
712                 err = PTR_ERR(worker);
713                 goto err_worker;
714         }
715
716         dev->worker = worker;
717         wake_up_process(worker);        /* avoid contributing to loadavg */
718
719         err = vhost_attach_cgroups(dev);
720         if (err)
721                 goto err_cgroup;
722
723         err = vhost_dev_alloc_iovecs(dev);
724         if (err)
725                 goto err_cgroup;
726
727 #if VHOST_ARCH_CAN_ACCEL_UACCESS
728         err = mmu_notifier_register(&dev->mmu_notifier, dev->mm);
729         if (err)
730                 goto err_mmu_notifier;
731 #endif
732
733         return 0;
734
735 #if VHOST_ARCH_CAN_ACCEL_UACCESS
736 err_mmu_notifier:
737         vhost_dev_free_iovecs(dev);
738 #endif
739 err_cgroup:
740         kthread_stop(worker);
741         dev->worker = NULL;
742 err_worker:
743         if (dev->mm)
744                 mmput(dev->mm);
745         dev->mm = NULL;
746 err_mm:
747         return err;
748 }
749 EXPORT_SYMBOL_GPL(vhost_dev_set_owner);
750
751 struct vhost_umem *vhost_dev_reset_owner_prepare(void)
752 {
753         return kvzalloc(sizeof(struct vhost_umem), GFP_KERNEL);
754 }
755 EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare);
756
757 /* Caller should have device mutex */
758 void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_umem *umem)
759 {
760         int i;
761
762         vhost_dev_cleanup(dev);
763
764         /* Restore memory to default empty mapping. */
765         INIT_LIST_HEAD(&umem->umem_list);
766         dev->umem = umem;
767         /* We don't need VQ locks below since vhost_dev_cleanup makes sure
768          * VQs aren't running.
769          */
770         for (i = 0; i < dev->nvqs; ++i)
771                 dev->vqs[i]->umem = umem;
772 }
773 EXPORT_SYMBOL_GPL(vhost_dev_reset_owner);
774
775 void vhost_dev_stop(struct vhost_dev *dev)
776 {
777         int i;
778
779         for (i = 0; i < dev->nvqs; ++i) {
780                 if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick) {
781                         vhost_poll_stop(&dev->vqs[i]->poll);
782                         vhost_poll_flush(&dev->vqs[i]->poll);
783                 }
784         }
785 }
786 EXPORT_SYMBOL_GPL(vhost_dev_stop);
787
788 static void vhost_umem_free(struct vhost_umem *umem,
789                             struct vhost_umem_node *node)
790 {
791         vhost_umem_interval_tree_remove(node, &umem->umem_tree);
792         list_del(&node->link);
793         kfree(node);
794         umem->numem--;
795 }
796
797 static void vhost_umem_clean(struct vhost_umem *umem)
798 {
799         struct vhost_umem_node *node, *tmp;
800
801         if (!umem)
802                 return;
803
804         list_for_each_entry_safe(node, tmp, &umem->umem_list, link)
805                 vhost_umem_free(umem, node);
806
807         kvfree(umem);
808 }
809
810 static void vhost_clear_msg(struct vhost_dev *dev)
811 {
812         struct vhost_msg_node *node, *n;
813
814         spin_lock(&dev->iotlb_lock);
815
816         list_for_each_entry_safe(node, n, &dev->read_list, node) {
817                 list_del(&node->node);
818                 kfree(node);
819         }
820
821         list_for_each_entry_safe(node, n, &dev->pending_list, node) {
822                 list_del(&node->node);
823                 kfree(node);
824         }
825
826         spin_unlock(&dev->iotlb_lock);
827 }
828
829 #if VHOST_ARCH_CAN_ACCEL_UACCESS
830 static void vhost_setup_uaddr(struct vhost_virtqueue *vq,
831                               int index, unsigned long uaddr,
832                               size_t size, bool write)
833 {
834         struct vhost_uaddr *addr = &vq->uaddrs[index];
835
836         addr->uaddr = uaddr;
837         addr->size = size;
838         addr->write = write;
839 }
840
841 static void vhost_setup_vq_uaddr(struct vhost_virtqueue *vq)
842 {
843         vhost_setup_uaddr(vq, VHOST_ADDR_DESC,
844                           (unsigned long)vq->desc,
845                           vhost_get_desc_size(vq, vq->num),
846                           false);
847         vhost_setup_uaddr(vq, VHOST_ADDR_AVAIL,
848                           (unsigned long)vq->avail,
849                           vhost_get_avail_size(vq, vq->num),
850                           false);
851         vhost_setup_uaddr(vq, VHOST_ADDR_USED,
852                           (unsigned long)vq->used,
853                           vhost_get_used_size(vq, vq->num),
854                           true);
855 }
856
857 static int vhost_map_prefetch(struct vhost_virtqueue *vq,
858                                int index)
859 {
860         struct vhost_map *map;
861         struct vhost_uaddr *uaddr = &vq->uaddrs[index];
862         struct page **pages;
863         int npages = DIV_ROUND_UP(uaddr->size, PAGE_SIZE);
864         int npinned;
865         void *vaddr, *v;
866         int err;
867         int i;
868
869         spin_lock(&vq->mmu_lock);
870
871         err = -EFAULT;
872         if (vq->invalidate_count)
873                 goto err;
874
875         err = -ENOMEM;
876         map = kmalloc(sizeof(*map), GFP_ATOMIC);
877         if (!map)
878                 goto err;
879
880         pages = kmalloc_array(npages, sizeof(struct page *), GFP_ATOMIC);
881         if (!pages)
882                 goto err_pages;
883
884         err = EFAULT;
885         npinned = __get_user_pages_fast(uaddr->uaddr, npages,
886                                         uaddr->write, pages);
887         if (npinned > 0)
888                 release_pages(pages, npinned);
889         if (npinned != npages)
890                 goto err_gup;
891
892         for (i = 0; i < npinned; i++)
893                 if (PageHighMem(pages[i]))
894                         goto err_gup;
895
896         vaddr = v = page_address(pages[0]);
897
898         /* For simplicity, fallback to userspace address if VA is not
899          * contigious.
900          */
901         for (i = 1; i < npinned; i++) {
902                 v += PAGE_SIZE;
903                 if (v != page_address(pages[i]))
904                         goto err_gup;
905         }
906
907         map->addr = vaddr + (uaddr->uaddr & (PAGE_SIZE - 1));
908         map->npages = npages;
909         map->pages = pages;
910
911         rcu_assign_pointer(vq->maps[index], map);
912         /* No need for a synchronize_rcu(). This function should be
913          * called by dev->worker so we are serialized with all
914          * readers.
915          */
916         spin_unlock(&vq->mmu_lock);
917
918         return 0;
919
920 err_gup:
921         kfree(pages);
922 err_pages:
923         kfree(map);
924 err:
925         spin_unlock(&vq->mmu_lock);
926         return err;
927 }
928 #endif
929
930 void vhost_dev_cleanup(struct vhost_dev *dev)
931 {
932         int i;
933
934         for (i = 0; i < dev->nvqs; ++i) {
935                 if (dev->vqs[i]->error_ctx)
936                         eventfd_ctx_put(dev->vqs[i]->error_ctx);
937                 if (dev->vqs[i]->kick)
938                         fput(dev->vqs[i]->kick);
939                 if (dev->vqs[i]->call_ctx)
940                         eventfd_ctx_put(dev->vqs[i]->call_ctx);
941                 vhost_vq_reset(dev, dev->vqs[i]);
942         }
943         vhost_dev_free_iovecs(dev);
944         if (dev->log_ctx)
945                 eventfd_ctx_put(dev->log_ctx);
946         dev->log_ctx = NULL;
947         /* No one will access memory at this point */
948         vhost_umem_clean(dev->umem);
949         dev->umem = NULL;
950         vhost_umem_clean(dev->iotlb);
951         dev->iotlb = NULL;
952         vhost_clear_msg(dev);
953         wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM);
954         WARN_ON(!llist_empty(&dev->work_list));
955         if (dev->worker) {
956                 kthread_stop(dev->worker);
957                 dev->worker = NULL;
958         }
959         if (dev->mm) {
960 #if VHOST_ARCH_CAN_ACCEL_UACCESS
961                 mmu_notifier_unregister(&dev->mmu_notifier, dev->mm);
962 #endif
963                 mmput(dev->mm);
964         }
965 #if VHOST_ARCH_CAN_ACCEL_UACCESS
966         for (i = 0; i < dev->nvqs; i++)
967                 vhost_uninit_vq_maps(dev->vqs[i]);
968 #endif
969         dev->mm = NULL;
970 }
971 EXPORT_SYMBOL_GPL(vhost_dev_cleanup);
972
973 static bool log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
974 {
975         u64 a = addr / VHOST_PAGE_SIZE / 8;
976
977         /* Make sure 64 bit math will not overflow. */
978         if (a > ULONG_MAX - (unsigned long)log_base ||
979             a + (unsigned long)log_base > ULONG_MAX)
980                 return false;
981
982         return access_ok(log_base + a,
983                          (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
984 }
985
986 static bool vhost_overflow(u64 uaddr, u64 size)
987 {
988         /* Make sure 64 bit math will not overflow. */
989         return uaddr > ULONG_MAX || size > ULONG_MAX || uaddr > ULONG_MAX - size;
990 }
991
992 /* Caller should have vq mutex and device mutex. */
993 static bool vq_memory_access_ok(void __user *log_base, struct vhost_umem *umem,
994                                 int log_all)
995 {
996         struct vhost_umem_node *node;
997
998         if (!umem)
999                 return false;
1000
1001         list_for_each_entry(node, &umem->umem_list, link) {
1002                 unsigned long a = node->userspace_addr;
1003
1004                 if (vhost_overflow(node->userspace_addr, node->size))
1005                         return false;
1006
1007
1008                 if (!access_ok((void __user *)a,
1009                                     node->size))
1010                         return false;
1011                 else if (log_all && !log_access_ok(log_base,
1012                                                    node->start,
1013                                                    node->size))
1014                         return false;
1015         }
1016         return true;
1017 }
1018
1019 static inline void __user *vhost_vq_meta_fetch(struct vhost_virtqueue *vq,
1020                                                u64 addr, unsigned int size,
1021                                                int type)
1022 {
1023         const struct vhost_umem_node *node = vq->meta_iotlb[type];
1024
1025         if (!node)
1026                 return NULL;
1027
1028         return (void *)(uintptr_t)(node->userspace_addr + addr - node->start);
1029 }
1030
1031 /* Can we switch to this memory table? */
1032 /* Caller should have device mutex but not vq mutex */
1033 static bool memory_access_ok(struct vhost_dev *d, struct vhost_umem *umem,
1034                              int log_all)
1035 {
1036         int i;
1037
1038         for (i = 0; i < d->nvqs; ++i) {
1039                 bool ok;
1040                 bool log;
1041
1042                 mutex_lock(&d->vqs[i]->mutex);
1043                 log = log_all || vhost_has_feature(d->vqs[i], VHOST_F_LOG_ALL);
1044                 /* If ring is inactive, will check when it's enabled. */
1045                 if (d->vqs[i]->private_data)
1046                         ok = vq_memory_access_ok(d->vqs[i]->log_base,
1047                                                  umem, log);
1048                 else
1049                         ok = true;
1050                 mutex_unlock(&d->vqs[i]->mutex);
1051                 if (!ok)
1052                         return false;
1053         }
1054         return true;
1055 }
1056
1057 static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
1058                           struct iovec iov[], int iov_size, int access);
1059
1060 static int vhost_copy_to_user(struct vhost_virtqueue *vq, void __user *to,
1061                               const void *from, unsigned size)
1062 {
1063         int ret;
1064
1065         if (!vq->iotlb)
1066                 return __copy_to_user(to, from, size);
1067         else {
1068                 /* This function should be called after iotlb
1069                  * prefetch, which means we're sure that all vq
1070                  * could be access through iotlb. So -EAGAIN should
1071                  * not happen in this case.
1072                  */
1073                 struct iov_iter t;
1074                 void __user *uaddr = vhost_vq_meta_fetch(vq,
1075                                      (u64)(uintptr_t)to, size,
1076                                      VHOST_ADDR_USED);
1077
1078                 if (uaddr)
1079                         return __copy_to_user(uaddr, from, size);
1080
1081                 ret = translate_desc(vq, (u64)(uintptr_t)to, size, vq->iotlb_iov,
1082                                      ARRAY_SIZE(vq->iotlb_iov),
1083                                      VHOST_ACCESS_WO);
1084                 if (ret < 0)
1085                         goto out;
1086                 iov_iter_init(&t, WRITE, vq->iotlb_iov, ret, size);
1087                 ret = copy_to_iter(from, size, &t);
1088                 if (ret == size)
1089                         ret = 0;
1090         }
1091 out:
1092         return ret;
1093 }
1094
1095 static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to,
1096                                 void __user *from, unsigned size)
1097 {
1098         int ret;
1099
1100         if (!vq->iotlb)
1101                 return __copy_from_user(to, from, size);
1102         else {
1103                 /* This function should be called after iotlb
1104                  * prefetch, which means we're sure that vq
1105                  * could be access through iotlb. So -EAGAIN should
1106                  * not happen in this case.
1107                  */
1108                 void __user *uaddr = vhost_vq_meta_fetch(vq,
1109                                      (u64)(uintptr_t)from, size,
1110                                      VHOST_ADDR_DESC);
1111                 struct iov_iter f;
1112
1113                 if (uaddr)
1114                         return __copy_from_user(to, uaddr, size);
1115
1116                 ret = translate_desc(vq, (u64)(uintptr_t)from, size, vq->iotlb_iov,
1117                                      ARRAY_SIZE(vq->iotlb_iov),
1118                                      VHOST_ACCESS_RO);
1119                 if (ret < 0) {
1120                         vq_err(vq, "IOTLB translation failure: uaddr "
1121                                "%p size 0x%llx\n", from,
1122                                (unsigned long long) size);
1123                         goto out;
1124                 }
1125                 iov_iter_init(&f, READ, vq->iotlb_iov, ret, size);
1126                 ret = copy_from_iter(to, size, &f);
1127                 if (ret == size)
1128                         ret = 0;
1129         }
1130
1131 out:
1132         return ret;
1133 }
1134
1135 static void __user *__vhost_get_user_slow(struct vhost_virtqueue *vq,
1136                                           void __user *addr, unsigned int size,
1137                                           int type)
1138 {
1139         int ret;
1140
1141         ret = translate_desc(vq, (u64)(uintptr_t)addr, size, vq->iotlb_iov,
1142                              ARRAY_SIZE(vq->iotlb_iov),
1143                              VHOST_ACCESS_RO);
1144         if (ret < 0) {
1145                 vq_err(vq, "IOTLB translation failure: uaddr "
1146                         "%p size 0x%llx\n", addr,
1147                         (unsigned long long) size);
1148                 return NULL;
1149         }
1150
1151         if (ret != 1 || vq->iotlb_iov[0].iov_len != size) {
1152                 vq_err(vq, "Non atomic userspace memory access: uaddr "
1153                         "%p size 0x%llx\n", addr,
1154                         (unsigned long long) size);
1155                 return NULL;
1156         }
1157
1158         return vq->iotlb_iov[0].iov_base;
1159 }
1160
1161 /* This function should be called after iotlb
1162  * prefetch, which means we're sure that vq
1163  * could be access through iotlb. So -EAGAIN should
1164  * not happen in this case.
1165  */
1166 static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
1167                                             void *addr, unsigned int size,
1168                                             int type)
1169 {
1170         void __user *uaddr = vhost_vq_meta_fetch(vq,
1171                              (u64)(uintptr_t)addr, size, type);
1172         if (uaddr)
1173                 return uaddr;
1174
1175         return __vhost_get_user_slow(vq, addr, size, type);
1176 }
1177
1178 #define vhost_put_user(vq, x, ptr)              \
1179 ({ \
1180         int ret = -EFAULT; \
1181         if (!vq->iotlb) { \
1182                 ret = __put_user(x, ptr); \
1183         } else { \
1184                 __typeof__(ptr) to = \
1185                         (__typeof__(ptr)) __vhost_get_user(vq, ptr,     \
1186                                           sizeof(*ptr), VHOST_ADDR_USED); \
1187                 if (to != NULL) \
1188                         ret = __put_user(x, to); \
1189                 else \
1190                         ret = -EFAULT;  \
1191         } \
1192         ret; \
1193 })
1194
1195 static inline int vhost_put_avail_event(struct vhost_virtqueue *vq)
1196 {
1197 #if VHOST_ARCH_CAN_ACCEL_UACCESS
1198         struct vhost_map *map;
1199         struct vring_used *used;
1200
1201         if (!vq->iotlb) {
1202                 rcu_read_lock();
1203
1204                 map = rcu_dereference(vq->maps[VHOST_ADDR_USED]);
1205                 if (likely(map)) {
1206                         used = map->addr;
1207                         *((__virtio16 *)&used->ring[vq->num]) =
1208                                 cpu_to_vhost16(vq, vq->avail_idx);
1209                         rcu_read_unlock();
1210                         return 0;
1211                 }
1212
1213                 rcu_read_unlock();
1214         }
1215 #endif
1216
1217         return vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx),
1218                               vhost_avail_event(vq));
1219 }
1220
1221 static inline int vhost_put_used(struct vhost_virtqueue *vq,
1222                                  struct vring_used_elem *head, int idx,
1223                                  int count)
1224 {
1225 #if VHOST_ARCH_CAN_ACCEL_UACCESS
1226         struct vhost_map *map;
1227         struct vring_used *used;
1228         size_t size;
1229
1230         if (!vq->iotlb) {
1231                 rcu_read_lock();
1232
1233                 map = rcu_dereference(vq->maps[VHOST_ADDR_USED]);
1234                 if (likely(map)) {
1235                         used = map->addr;
1236                         size = count * sizeof(*head);
1237                         memcpy(used->ring + idx, head, size);
1238                         rcu_read_unlock();
1239                         return 0;
1240                 }
1241
1242                 rcu_read_unlock();
1243         }
1244 #endif
1245
1246         return vhost_copy_to_user(vq, vq->used->ring + idx, head,
1247                                   count * sizeof(*head));
1248 }
1249
1250 static inline int vhost_put_used_flags(struct vhost_virtqueue *vq)
1251
1252 {
1253 #if VHOST_ARCH_CAN_ACCEL_UACCESS
1254         struct vhost_map *map;
1255         struct vring_used *used;
1256
1257         if (!vq->iotlb) {
1258                 rcu_read_lock();
1259
1260                 map = rcu_dereference(vq->maps[VHOST_ADDR_USED]);
1261                 if (likely(map)) {
1262                         used = map->addr;
1263                         used->flags = cpu_to_vhost16(vq, vq->used_flags);
1264                         rcu_read_unlock();
1265                         return 0;
1266                 }
1267
1268                 rcu_read_unlock();
1269         }
1270 #endif
1271
1272         return vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags),
1273                               &vq->used->flags);
1274 }
1275
1276 static inline int vhost_put_used_idx(struct vhost_virtqueue *vq)
1277
1278 {
1279 #if VHOST_ARCH_CAN_ACCEL_UACCESS
1280         struct vhost_map *map;
1281         struct vring_used *used;
1282
1283         if (!vq->iotlb) {
1284                 rcu_read_lock();
1285
1286                 map = rcu_dereference(vq->maps[VHOST_ADDR_USED]);
1287                 if (likely(map)) {
1288                         used = map->addr;
1289                         used->idx = cpu_to_vhost16(vq, vq->last_used_idx);
1290                         rcu_read_unlock();
1291                         return 0;
1292                 }
1293
1294                 rcu_read_unlock();
1295         }
1296 #endif
1297
1298         return vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx),
1299                               &vq->used->idx);
1300 }
1301
1302 #define vhost_get_user(vq, x, ptr, type)                \
1303 ({ \
1304         int ret; \
1305         if (!vq->iotlb) { \
1306                 ret = __get_user(x, ptr); \
1307         } else { \
1308                 __typeof__(ptr) from = \
1309                         (__typeof__(ptr)) __vhost_get_user(vq, ptr, \
1310                                                            sizeof(*ptr), \
1311                                                            type); \
1312                 if (from != NULL) \
1313                         ret = __get_user(x, from); \
1314                 else \
1315                         ret = -EFAULT; \
1316         } \
1317         ret; \
1318 })
1319
1320 #define vhost_get_avail(vq, x, ptr) \
1321         vhost_get_user(vq, x, ptr, VHOST_ADDR_AVAIL)
1322
1323 #define vhost_get_used(vq, x, ptr) \
1324         vhost_get_user(vq, x, ptr, VHOST_ADDR_USED)
1325
1326 static void vhost_dev_lock_vqs(struct vhost_dev *d)
1327 {
1328         int i = 0;
1329         for (i = 0; i < d->nvqs; ++i)
1330                 mutex_lock_nested(&d->vqs[i]->mutex, i);
1331 }
1332
1333 static void vhost_dev_unlock_vqs(struct vhost_dev *d)
1334 {
1335         int i = 0;
1336         for (i = 0; i < d->nvqs; ++i)
1337                 mutex_unlock(&d->vqs[i]->mutex);
1338 }
1339
1340 static inline int vhost_get_avail_idx(struct vhost_virtqueue *vq,
1341                                       __virtio16 *idx)
1342 {
1343 #if VHOST_ARCH_CAN_ACCEL_UACCESS
1344         struct vhost_map *map;
1345         struct vring_avail *avail;
1346
1347         if (!vq->iotlb) {
1348                 rcu_read_lock();
1349
1350                 map = rcu_dereference(vq->maps[VHOST_ADDR_AVAIL]);
1351                 if (likely(map)) {
1352                         avail = map->addr;
1353                         *idx = avail->idx;
1354                         rcu_read_unlock();
1355                         return 0;
1356                 }
1357
1358                 rcu_read_unlock();
1359         }
1360 #endif
1361
1362         return vhost_get_avail(vq, *idx, &vq->avail->idx);
1363 }
1364
1365 static inline int vhost_get_avail_head(struct vhost_virtqueue *vq,
1366                                        __virtio16 *head, int idx)
1367 {
1368 #if VHOST_ARCH_CAN_ACCEL_UACCESS
1369         struct vhost_map *map;
1370         struct vring_avail *avail;
1371
1372         if (!vq->iotlb) {
1373                 rcu_read_lock();
1374
1375                 map = rcu_dereference(vq->maps[VHOST_ADDR_AVAIL]);
1376                 if (likely(map)) {
1377                         avail = map->addr;
1378                         *head = avail->ring[idx & (vq->num - 1)];
1379                         rcu_read_unlock();
1380                         return 0;
1381                 }
1382
1383                 rcu_read_unlock();
1384         }
1385 #endif
1386
1387         return vhost_get_avail(vq, *head,
1388                                &vq->avail->ring[idx & (vq->num - 1)]);
1389 }
1390
1391 static inline int vhost_get_avail_flags(struct vhost_virtqueue *vq,
1392                                         __virtio16 *flags)
1393 {
1394 #if VHOST_ARCH_CAN_ACCEL_UACCESS
1395         struct vhost_map *map;
1396         struct vring_avail *avail;
1397
1398         if (!vq->iotlb) {
1399                 rcu_read_lock();
1400
1401                 map = rcu_dereference(vq->maps[VHOST_ADDR_AVAIL]);
1402                 if (likely(map)) {
1403                         avail = map->addr;
1404                         *flags = avail->flags;
1405                         rcu_read_unlock();
1406                         return 0;
1407                 }
1408
1409                 rcu_read_unlock();
1410         }
1411 #endif
1412
1413         return vhost_get_avail(vq, *flags, &vq->avail->flags);
1414 }
1415
1416 static inline int vhost_get_used_event(struct vhost_virtqueue *vq,
1417                                        __virtio16 *event)
1418 {
1419 #if VHOST_ARCH_CAN_ACCEL_UACCESS
1420         struct vhost_map *map;
1421         struct vring_avail *avail;
1422
1423         if (!vq->iotlb) {
1424                 rcu_read_lock();
1425                 map = rcu_dereference(vq->maps[VHOST_ADDR_AVAIL]);
1426                 if (likely(map)) {
1427                         avail = map->addr;
1428                         *event = (__virtio16)avail->ring[vq->num];
1429                         rcu_read_unlock();
1430                         return 0;
1431                 }
1432                 rcu_read_unlock();
1433         }
1434 #endif
1435
1436         return vhost_get_avail(vq, *event, vhost_used_event(vq));
1437 }
1438
1439 static inline int vhost_get_used_idx(struct vhost_virtqueue *vq,
1440                                      __virtio16 *idx)
1441 {
1442 #if VHOST_ARCH_CAN_ACCEL_UACCESS
1443         struct vhost_map *map;
1444         struct vring_used *used;
1445
1446         if (!vq->iotlb) {
1447                 rcu_read_lock();
1448
1449                 map = rcu_dereference(vq->maps[VHOST_ADDR_USED]);
1450                 if (likely(map)) {
1451                         used = map->addr;
1452                         *idx = used->idx;
1453                         rcu_read_unlock();
1454                         return 0;
1455                 }
1456
1457                 rcu_read_unlock();
1458         }
1459 #endif
1460
1461         return vhost_get_used(vq, *idx, &vq->used->idx);
1462 }
1463
1464 static inline int vhost_get_desc(struct vhost_virtqueue *vq,
1465                                  struct vring_desc *desc, int idx)
1466 {
1467 #if VHOST_ARCH_CAN_ACCEL_UACCESS
1468         struct vhost_map *map;
1469         struct vring_desc *d;
1470
1471         if (!vq->iotlb) {
1472                 rcu_read_lock();
1473
1474                 map = rcu_dereference(vq->maps[VHOST_ADDR_DESC]);
1475                 if (likely(map)) {
1476                         d = map->addr;
1477                         *desc = *(d + idx);
1478                         rcu_read_unlock();
1479                         return 0;
1480                 }
1481
1482                 rcu_read_unlock();
1483         }
1484 #endif
1485
1486         return vhost_copy_from_user(vq, desc, vq->desc + idx, sizeof(*desc));
1487 }
1488
1489 static int vhost_new_umem_range(struct vhost_umem *umem,
1490                                 u64 start, u64 size, u64 end,
1491                                 u64 userspace_addr, int perm)
1492 {
1493         struct vhost_umem_node *tmp, *node;
1494
1495         if (!size)
1496                 return -EFAULT;
1497
1498         node = kmalloc(sizeof(*node), GFP_ATOMIC);
1499         if (!node)
1500                 return -ENOMEM;
1501
1502         if (umem->numem == max_iotlb_entries) {
1503                 tmp = list_first_entry(&umem->umem_list, typeof(*tmp), link);
1504                 vhost_umem_free(umem, tmp);
1505         }
1506
1507         node->start = start;
1508         node->size = size;
1509         node->last = end;
1510         node->userspace_addr = userspace_addr;
1511         node->perm = perm;
1512         INIT_LIST_HEAD(&node->link);
1513         list_add_tail(&node->link, &umem->umem_list);
1514         vhost_umem_interval_tree_insert(node, &umem->umem_tree);
1515         umem->numem++;
1516
1517         return 0;
1518 }
1519
1520 static void vhost_del_umem_range(struct vhost_umem *umem,
1521                                  u64 start, u64 end)
1522 {
1523         struct vhost_umem_node *node;
1524
1525         while ((node = vhost_umem_interval_tree_iter_first(&umem->umem_tree,
1526                                                            start, end)))
1527                 vhost_umem_free(umem, node);
1528 }
1529
1530 static void vhost_iotlb_notify_vq(struct vhost_dev *d,
1531                                   struct vhost_iotlb_msg *msg)
1532 {
1533         struct vhost_msg_node *node, *n;
1534
1535         spin_lock(&d->iotlb_lock);
1536
1537         list_for_each_entry_safe(node, n, &d->pending_list, node) {
1538                 struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb;
1539                 if (msg->iova <= vq_msg->iova &&
1540                     msg->iova + msg->size - 1 >= vq_msg->iova &&
1541                     vq_msg->type == VHOST_IOTLB_MISS) {
1542                         vhost_poll_queue(&node->vq->poll);
1543                         list_del(&node->node);
1544                         kfree(node);
1545                 }
1546         }
1547
1548         spin_unlock(&d->iotlb_lock);
1549 }
1550
1551 static bool umem_access_ok(u64 uaddr, u64 size, int access)
1552 {
1553         unsigned long a = uaddr;
1554
1555         /* Make sure 64 bit math will not overflow. */
1556         if (vhost_overflow(uaddr, size))
1557                 return false;
1558
1559         if ((access & VHOST_ACCESS_RO) &&
1560             !access_ok((void __user *)a, size))
1561                 return false;
1562         if ((access & VHOST_ACCESS_WO) &&
1563             !access_ok((void __user *)a, size))
1564                 return false;
1565         return true;
1566 }
1567
1568 static int vhost_process_iotlb_msg(struct vhost_dev *dev,
1569                                    struct vhost_iotlb_msg *msg)
1570 {
1571         int ret = 0;
1572
1573         mutex_lock(&dev->mutex);
1574         vhost_dev_lock_vqs(dev);
1575         switch (msg->type) {
1576         case VHOST_IOTLB_UPDATE:
1577                 if (!dev->iotlb) {
1578                         ret = -EFAULT;
1579                         break;
1580                 }
1581                 if (!umem_access_ok(msg->uaddr, msg->size, msg->perm)) {
1582                         ret = -EFAULT;
1583                         break;
1584                 }
1585                 vhost_vq_meta_reset(dev);
1586                 if (vhost_new_umem_range(dev->iotlb, msg->iova, msg->size,
1587                                          msg->iova + msg->size - 1,
1588                                          msg->uaddr, msg->perm)) {
1589                         ret = -ENOMEM;
1590                         break;
1591                 }
1592                 vhost_iotlb_notify_vq(dev, msg);
1593                 break;
1594         case VHOST_IOTLB_INVALIDATE:
1595                 if (!dev->iotlb) {
1596                         ret = -EFAULT;
1597                         break;
1598                 }
1599                 vhost_vq_meta_reset(dev);
1600                 vhost_del_umem_range(dev->iotlb, msg->iova,
1601                                      msg->iova + msg->size - 1);
1602                 break;
1603         default:
1604                 ret = -EINVAL;
1605                 break;
1606         }
1607
1608         vhost_dev_unlock_vqs(dev);
1609         mutex_unlock(&dev->mutex);
1610
1611         return ret;
1612 }
1613 ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
1614                              struct iov_iter *from)
1615 {
1616         struct vhost_iotlb_msg msg;
1617         size_t offset;
1618         int type, ret;
1619
1620         ret = copy_from_iter(&type, sizeof(type), from);
1621         if (ret != sizeof(type)) {
1622                 ret = -EINVAL;
1623                 goto done;
1624         }
1625
1626         switch (type) {
1627         case VHOST_IOTLB_MSG:
1628                 /* There maybe a hole after type for V1 message type,
1629                  * so skip it here.
1630                  */
1631                 offset = offsetof(struct vhost_msg, iotlb) - sizeof(int);
1632                 break;
1633         case VHOST_IOTLB_MSG_V2:
1634                 offset = sizeof(__u32);
1635                 break;
1636         default:
1637                 ret = -EINVAL;
1638                 goto done;
1639         }
1640
1641         iov_iter_advance(from, offset);
1642         ret = copy_from_iter(&msg, sizeof(msg), from);
1643         if (ret != sizeof(msg)) {
1644                 ret = -EINVAL;
1645                 goto done;
1646         }
1647         if (vhost_process_iotlb_msg(dev, &msg)) {
1648                 ret = -EFAULT;
1649                 goto done;
1650         }
1651
1652         ret = (type == VHOST_IOTLB_MSG) ? sizeof(struct vhost_msg) :
1653               sizeof(struct vhost_msg_v2);
1654 done:
1655         return ret;
1656 }
1657 EXPORT_SYMBOL(vhost_chr_write_iter);
1658
1659 __poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev,
1660                             poll_table *wait)
1661 {
1662         __poll_t mask = 0;
1663
1664         poll_wait(file, &dev->wait, wait);
1665
1666         if (!list_empty(&dev->read_list))
1667                 mask |= EPOLLIN | EPOLLRDNORM;
1668
1669         return mask;
1670 }
1671 EXPORT_SYMBOL(vhost_chr_poll);
1672
1673 ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
1674                             int noblock)
1675 {
1676         DEFINE_WAIT(wait);
1677         struct vhost_msg_node *node;
1678         ssize_t ret = 0;
1679         unsigned size = sizeof(struct vhost_msg);
1680
1681         if (iov_iter_count(to) < size)
1682                 return 0;
1683
1684         while (1) {
1685                 if (!noblock)
1686                         prepare_to_wait(&dev->wait, &wait,
1687                                         TASK_INTERRUPTIBLE);
1688
1689                 node = vhost_dequeue_msg(dev, &dev->read_list);
1690                 if (node)
1691                         break;
1692                 if (noblock) {
1693                         ret = -EAGAIN;
1694                         break;
1695                 }
1696                 if (signal_pending(current)) {
1697                         ret = -ERESTARTSYS;
1698                         break;
1699                 }
1700                 if (!dev->iotlb) {
1701                         ret = -EBADFD;
1702                         break;
1703                 }
1704
1705                 schedule();
1706         }
1707
1708         if (!noblock)
1709                 finish_wait(&dev->wait, &wait);
1710
1711         if (node) {
1712                 struct vhost_iotlb_msg *msg;
1713                 void *start = &node->msg;
1714
1715                 switch (node->msg.type) {
1716                 case VHOST_IOTLB_MSG:
1717                         size = sizeof(node->msg);
1718                         msg = &node->msg.iotlb;
1719                         break;
1720                 case VHOST_IOTLB_MSG_V2:
1721                         size = sizeof(node->msg_v2);
1722                         msg = &node->msg_v2.iotlb;
1723                         break;
1724                 default:
1725                         BUG();
1726                         break;
1727                 }
1728
1729                 ret = copy_to_iter(start, size, to);
1730                 if (ret != size || msg->type != VHOST_IOTLB_MISS) {
1731                         kfree(node);
1732                         return ret;
1733                 }
1734                 vhost_enqueue_msg(dev, &dev->pending_list, node);
1735         }
1736
1737         return ret;
1738 }
1739 EXPORT_SYMBOL_GPL(vhost_chr_read_iter);
1740
1741 static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, int access)
1742 {
1743         struct vhost_dev *dev = vq->dev;
1744         struct vhost_msg_node *node;
1745         struct vhost_iotlb_msg *msg;
1746         bool v2 = vhost_backend_has_feature(vq, VHOST_BACKEND_F_IOTLB_MSG_V2);
1747
1748         node = vhost_new_msg(vq, v2 ? VHOST_IOTLB_MSG_V2 : VHOST_IOTLB_MSG);
1749         if (!node)
1750                 return -ENOMEM;
1751
1752         if (v2) {
1753                 node->msg_v2.type = VHOST_IOTLB_MSG_V2;
1754                 msg = &node->msg_v2.iotlb;
1755         } else {
1756                 msg = &node->msg.iotlb;
1757         }
1758
1759         msg->type = VHOST_IOTLB_MISS;
1760         msg->iova = iova;
1761         msg->perm = access;
1762
1763         vhost_enqueue_msg(dev, &dev->read_list, node);
1764
1765         return 0;
1766 }
1767
1768 static bool vq_access_ok(struct vhost_virtqueue *vq, unsigned int num,
1769                          struct vring_desc __user *desc,
1770                          struct vring_avail __user *avail,
1771                          struct vring_used __user *used)
1772
1773 {
1774         return access_ok(desc, vhost_get_desc_size(vq, num)) &&
1775                access_ok(avail, vhost_get_avail_size(vq, num)) &&
1776                access_ok(used, vhost_get_used_size(vq, num));
1777 }
1778
1779 static void vhost_vq_meta_update(struct vhost_virtqueue *vq,
1780                                  const struct vhost_umem_node *node,
1781                                  int type)
1782 {
1783         int access = (type == VHOST_ADDR_USED) ?
1784                      VHOST_ACCESS_WO : VHOST_ACCESS_RO;
1785
1786         if (likely(node->perm & access))
1787                 vq->meta_iotlb[type] = node;
1788 }
1789
1790 static bool iotlb_access_ok(struct vhost_virtqueue *vq,
1791                             int access, u64 addr, u64 len, int type)
1792 {
1793         const struct vhost_umem_node *node;
1794         struct vhost_umem *umem = vq->iotlb;
1795         u64 s = 0, size, orig_addr = addr, last = addr + len - 1;
1796
1797         if (vhost_vq_meta_fetch(vq, addr, len, type))
1798                 return true;
1799
1800         while (len > s) {
1801                 node = vhost_umem_interval_tree_iter_first(&umem->umem_tree,
1802                                                            addr,
1803                                                            last);
1804                 if (node == NULL || node->start > addr) {
1805                         vhost_iotlb_miss(vq, addr, access);
1806                         return false;
1807                 } else if (!(node->perm & access)) {
1808                         /* Report the possible access violation by
1809                          * request another translation from userspace.
1810                          */
1811                         return false;
1812                 }
1813
1814                 size = node->size - addr + node->start;
1815
1816                 if (orig_addr == addr && size >= len)
1817                         vhost_vq_meta_update(vq, node, type);
1818
1819                 s += size;
1820                 addr += size;
1821         }
1822
1823         return true;
1824 }
1825
1826 #if VHOST_ARCH_CAN_ACCEL_UACCESS
1827 static void vhost_vq_map_prefetch(struct vhost_virtqueue *vq)
1828 {
1829         struct vhost_map __rcu *map;
1830         int i;
1831
1832         for (i = 0; i < VHOST_NUM_ADDRS; i++) {
1833                 rcu_read_lock();
1834                 map = rcu_dereference(vq->maps[i]);
1835                 rcu_read_unlock();
1836                 if (unlikely(!map))
1837                         vhost_map_prefetch(vq, i);
1838         }
1839 }
1840 #endif
1841
1842 int vq_meta_prefetch(struct vhost_virtqueue *vq)
1843 {
1844         unsigned int num = vq->num;
1845
1846         if (!vq->iotlb) {
1847 #if VHOST_ARCH_CAN_ACCEL_UACCESS
1848                 vhost_vq_map_prefetch(vq);
1849 #endif
1850                 return 1;
1851         }
1852
1853         return iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->desc,
1854                                vhost_get_desc_size(vq, num), VHOST_ADDR_DESC) &&
1855                iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->avail,
1856                                vhost_get_avail_size(vq, num),
1857                                VHOST_ADDR_AVAIL) &&
1858                iotlb_access_ok(vq, VHOST_ACCESS_WO, (u64)(uintptr_t)vq->used,
1859                                vhost_get_used_size(vq, num), VHOST_ADDR_USED);
1860 }
1861 EXPORT_SYMBOL_GPL(vq_meta_prefetch);
1862
1863 /* Can we log writes? */
1864 /* Caller should have device mutex but not vq mutex */
1865 bool vhost_log_access_ok(struct vhost_dev *dev)
1866 {
1867         return memory_access_ok(dev, dev->umem, 1);
1868 }
1869 EXPORT_SYMBOL_GPL(vhost_log_access_ok);
1870
1871 /* Verify access for write logging. */
1872 /* Caller should have vq mutex and device mutex */
1873 static bool vq_log_access_ok(struct vhost_virtqueue *vq,
1874                              void __user *log_base)
1875 {
1876         return vq_memory_access_ok(log_base, vq->umem,
1877                                    vhost_has_feature(vq, VHOST_F_LOG_ALL)) &&
1878                 (!vq->log_used || log_access_ok(log_base, vq->log_addr,
1879                                   vhost_get_used_size(vq, vq->num)));
1880 }
1881
1882 /* Can we start vq? */
1883 /* Caller should have vq mutex and device mutex */
1884 bool vhost_vq_access_ok(struct vhost_virtqueue *vq)
1885 {
1886         if (!vq_log_access_ok(vq, vq->log_base))
1887                 return false;
1888
1889         /* Access validation occurs at prefetch time with IOTLB */
1890         if (vq->iotlb)
1891                 return true;
1892
1893         return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used);
1894 }
1895 EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
1896
1897 static struct vhost_umem *vhost_umem_alloc(void)
1898 {
1899         struct vhost_umem *umem = kvzalloc(sizeof(*umem), GFP_KERNEL);
1900
1901         if (!umem)
1902                 return NULL;
1903
1904         umem->umem_tree = RB_ROOT_CACHED;
1905         umem->numem = 0;
1906         INIT_LIST_HEAD(&umem->umem_list);
1907
1908         return umem;
1909 }
1910
1911 static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
1912 {
1913         struct vhost_memory mem, *newmem;
1914         struct vhost_memory_region *region;
1915         struct vhost_umem *newumem, *oldumem;
1916         unsigned long size = offsetof(struct vhost_memory, regions);
1917         int i;
1918
1919         if (copy_from_user(&mem, m, size))
1920                 return -EFAULT;
1921         if (mem.padding)
1922                 return -EOPNOTSUPP;
1923         if (mem.nregions > max_mem_regions)
1924                 return -E2BIG;
1925         newmem = kvzalloc(struct_size(newmem, regions, mem.nregions),
1926                         GFP_KERNEL);
1927         if (!newmem)
1928                 return -ENOMEM;
1929
1930         memcpy(newmem, &mem, size);
1931         if (copy_from_user(newmem->regions, m->regions,
1932                            mem.nregions * sizeof *m->regions)) {
1933                 kvfree(newmem);
1934                 return -EFAULT;
1935         }
1936
1937         newumem = vhost_umem_alloc();
1938         if (!newumem) {
1939                 kvfree(newmem);
1940                 return -ENOMEM;
1941         }
1942
1943         for (region = newmem->regions;
1944              region < newmem->regions + mem.nregions;
1945              region++) {
1946                 if (vhost_new_umem_range(newumem,
1947                                          region->guest_phys_addr,
1948                                          region->memory_size,
1949                                          region->guest_phys_addr +
1950                                          region->memory_size - 1,
1951                                          region->userspace_addr,
1952                                          VHOST_ACCESS_RW))
1953                         goto err;
1954         }
1955
1956         if (!memory_access_ok(d, newumem, 0))
1957                 goto err;
1958
1959         oldumem = d->umem;
1960         d->umem = newumem;
1961
1962         /* All memory accesses are done under some VQ mutex. */
1963         for (i = 0; i < d->nvqs; ++i) {
1964                 mutex_lock(&d->vqs[i]->mutex);
1965                 d->vqs[i]->umem = newumem;
1966                 mutex_unlock(&d->vqs[i]->mutex);
1967         }
1968
1969         kvfree(newmem);
1970         vhost_umem_clean(oldumem);
1971         return 0;
1972
1973 err:
1974         vhost_umem_clean(newumem);
1975         kvfree(newmem);
1976         return -EFAULT;
1977 }
1978
1979 static long vhost_vring_set_num(struct vhost_dev *d,
1980                                 struct vhost_virtqueue *vq,
1981                                 void __user *argp)
1982 {
1983         struct vhost_vring_state s;
1984
1985         /* Resizing ring with an active backend?
1986          * You don't want to do that. */
1987         if (vq->private_data)
1988                 return -EBUSY;
1989
1990         if (copy_from_user(&s, argp, sizeof s))
1991                 return -EFAULT;
1992
1993         if (!s.num || s.num > 0xffff || (s.num & (s.num - 1)))
1994                 return -EINVAL;
1995         vq->num = s.num;
1996
1997         return 0;
1998 }
1999
2000 static long vhost_vring_set_addr(struct vhost_dev *d,
2001                                  struct vhost_virtqueue *vq,
2002                                  void __user *argp)
2003 {
2004         struct vhost_vring_addr a;
2005
2006         if (copy_from_user(&a, argp, sizeof a))
2007                 return -EFAULT;
2008         if (a.flags & ~(0x1 << VHOST_VRING_F_LOG))
2009                 return -EOPNOTSUPP;
2010
2011         /* For 32bit, verify that the top 32bits of the user
2012            data are set to zero. */
2013         if ((u64)(unsigned long)a.desc_user_addr != a.desc_user_addr ||
2014             (u64)(unsigned long)a.used_user_addr != a.used_user_addr ||
2015             (u64)(unsigned long)a.avail_user_addr != a.avail_user_addr)
2016                 return -EFAULT;
2017
2018         /* Make sure it's safe to cast pointers to vring types. */
2019         BUILD_BUG_ON(__alignof__ *vq->avail > VRING_AVAIL_ALIGN_SIZE);
2020         BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE);
2021         if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) ||
2022             (a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) ||
2023             (a.log_guest_addr & (VRING_USED_ALIGN_SIZE - 1)))
2024                 return -EINVAL;
2025
2026         /* We only verify access here if backend is configured.
2027          * If it is not, we don't as size might not have been setup.
2028          * We will verify when backend is configured. */
2029         if (vq->private_data) {
2030                 if (!vq_access_ok(vq, vq->num,
2031                         (void __user *)(unsigned long)a.desc_user_addr,
2032                         (void __user *)(unsigned long)a.avail_user_addr,
2033                         (void __user *)(unsigned long)a.used_user_addr))
2034                         return -EINVAL;
2035
2036                 /* Also validate log access for used ring if enabled. */
2037                 if ((a.flags & (0x1 << VHOST_VRING_F_LOG)) &&
2038                         !log_access_ok(vq->log_base, a.log_guest_addr,
2039                                 sizeof *vq->used +
2040                                 vq->num * sizeof *vq->used->ring))
2041                         return -EINVAL;
2042         }
2043
2044         vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG));
2045         vq->desc = (void __user *)(unsigned long)a.desc_user_addr;
2046         vq->avail = (void __user *)(unsigned long)a.avail_user_addr;
2047         vq->log_addr = a.log_guest_addr;
2048         vq->used = (void __user *)(unsigned long)a.used_user_addr;
2049
2050         return 0;
2051 }
2052
2053 static long vhost_vring_set_num_addr(struct vhost_dev *d,
2054                                      struct vhost_virtqueue *vq,
2055                                      unsigned int ioctl,
2056                                      void __user *argp)
2057 {
2058         long r;
2059
2060         mutex_lock(&vq->mutex);
2061
2062 #if VHOST_ARCH_CAN_ACCEL_UACCESS
2063         /* Unregister MMU notifer to allow invalidation callback
2064          * can access vq->uaddrs[] without holding a lock.
2065          */
2066         if (d->mm)
2067                 mmu_notifier_unregister(&d->mmu_notifier, d->mm);
2068
2069         vhost_uninit_vq_maps(vq);
2070 #endif
2071
2072         switch (ioctl) {
2073         case VHOST_SET_VRING_NUM:
2074                 r = vhost_vring_set_num(d, vq, argp);
2075                 break;
2076         case VHOST_SET_VRING_ADDR:
2077                 r = vhost_vring_set_addr(d, vq, argp);
2078                 break;
2079         default:
2080                 BUG();
2081         }
2082
2083 #if VHOST_ARCH_CAN_ACCEL_UACCESS
2084         vhost_setup_vq_uaddr(vq);
2085
2086         if (d->mm)
2087                 mmu_notifier_register(&d->mmu_notifier, d->mm);
2088 #endif
2089
2090         mutex_unlock(&vq->mutex);
2091
2092         return r;
2093 }
2094 long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
2095 {
2096         struct file *eventfp, *filep = NULL;
2097         bool pollstart = false, pollstop = false;
2098         struct eventfd_ctx *ctx = NULL;
2099         u32 __user *idxp = argp;
2100         struct vhost_virtqueue *vq;
2101         struct vhost_vring_state s;
2102         struct vhost_vring_file f;
2103         u32 idx;
2104         long r;
2105
2106         r = get_user(idx, idxp);
2107         if (r < 0)
2108                 return r;
2109         if (idx >= d->nvqs)
2110                 return -ENOBUFS;
2111
2112         idx = array_index_nospec(idx, d->nvqs);
2113         vq = d->vqs[idx];
2114
2115         if (ioctl == VHOST_SET_VRING_NUM ||
2116             ioctl == VHOST_SET_VRING_ADDR) {
2117                 return vhost_vring_set_num_addr(d, vq, ioctl, argp);
2118         }
2119
2120         mutex_lock(&vq->mutex);
2121
2122         switch (ioctl) {
2123         case VHOST_SET_VRING_BASE:
2124                 /* Moving base with an active backend?
2125                  * You don't want to do that. */
2126                 if (vq->private_data) {
2127                         r = -EBUSY;
2128                         break;
2129                 }
2130                 if (copy_from_user(&s, argp, sizeof s)) {
2131                         r = -EFAULT;
2132                         break;
2133                 }
2134                 if (s.num > 0xffff) {
2135                         r = -EINVAL;
2136                         break;
2137                 }
2138                 vq->last_avail_idx = s.num;
2139                 /* Forget the cached index value. */
2140                 vq->avail_idx = vq->last_avail_idx;
2141                 break;
2142         case VHOST_GET_VRING_BASE:
2143                 s.index = idx;
2144                 s.num = vq->last_avail_idx;
2145                 if (copy_to_user(argp, &s, sizeof s))
2146                         r = -EFAULT;
2147                 break;
2148         case VHOST_SET_VRING_KICK:
2149                 if (copy_from_user(&f, argp, sizeof f)) {
2150                         r = -EFAULT;
2151                         break;
2152                 }
2153                 eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
2154                 if (IS_ERR(eventfp)) {
2155                         r = PTR_ERR(eventfp);
2156                         break;
2157                 }
2158                 if (eventfp != vq->kick) {
2159                         pollstop = (filep = vq->kick) != NULL;
2160                         pollstart = (vq->kick = eventfp) != NULL;
2161                 } else
2162                         filep = eventfp;
2163                 break;
2164         case VHOST_SET_VRING_CALL:
2165                 if (copy_from_user(&f, argp, sizeof f)) {
2166                         r = -EFAULT;
2167                         break;
2168                 }
2169                 ctx = f.fd == -1 ? NULL : eventfd_ctx_fdget(f.fd);
2170                 if (IS_ERR(ctx)) {
2171                         r = PTR_ERR(ctx);
2172                         break;
2173                 }
2174                 swap(ctx, vq->call_ctx);
2175                 break;
2176         case VHOST_SET_VRING_ERR:
2177                 if (copy_from_user(&f, argp, sizeof f)) {
2178                         r = -EFAULT;
2179                         break;
2180                 }
2181                 ctx = f.fd == -1 ? NULL : eventfd_ctx_fdget(f.fd);
2182                 if (IS_ERR(ctx)) {
2183                         r = PTR_ERR(ctx);
2184                         break;
2185                 }
2186                 swap(ctx, vq->error_ctx);
2187                 break;
2188         case VHOST_SET_VRING_ENDIAN:
2189                 r = vhost_set_vring_endian(vq, argp);
2190                 break;
2191         case VHOST_GET_VRING_ENDIAN:
2192                 r = vhost_get_vring_endian(vq, idx, argp);
2193                 break;
2194         case VHOST_SET_VRING_BUSYLOOP_TIMEOUT:
2195                 if (copy_from_user(&s, argp, sizeof(s))) {
2196                         r = -EFAULT;
2197                         break;
2198                 }
2199                 vq->busyloop_timeout = s.num;
2200                 break;
2201         case VHOST_GET_VRING_BUSYLOOP_TIMEOUT:
2202                 s.index = idx;
2203                 s.num = vq->busyloop_timeout;
2204                 if (copy_to_user(argp, &s, sizeof(s)))
2205                         r = -EFAULT;
2206                 break;
2207         default:
2208                 r = -ENOIOCTLCMD;
2209         }
2210
2211         if (pollstop && vq->handle_kick)
2212                 vhost_poll_stop(&vq->poll);
2213
2214         if (!IS_ERR_OR_NULL(ctx))
2215                 eventfd_ctx_put(ctx);
2216         if (filep)
2217                 fput(filep);
2218
2219         if (pollstart && vq->handle_kick)
2220                 r = vhost_poll_start(&vq->poll, vq->kick);
2221
2222         mutex_unlock(&vq->mutex);
2223
2224         if (pollstop && vq->handle_kick)
2225                 vhost_poll_flush(&vq->poll);
2226         return r;
2227 }
2228 EXPORT_SYMBOL_GPL(vhost_vring_ioctl);
2229
2230 int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled)
2231 {
2232         struct vhost_umem *niotlb, *oiotlb;
2233         int i;
2234
2235         niotlb = vhost_umem_alloc();
2236         if (!niotlb)
2237                 return -ENOMEM;
2238
2239         oiotlb = d->iotlb;
2240         d->iotlb = niotlb;
2241
2242         for (i = 0; i < d->nvqs; ++i) {
2243                 struct vhost_virtqueue *vq = d->vqs[i];
2244
2245                 mutex_lock(&vq->mutex);
2246                 vq->iotlb = niotlb;
2247                 __vhost_vq_meta_reset(vq);
2248                 mutex_unlock(&vq->mutex);
2249         }
2250
2251         vhost_umem_clean(oiotlb);
2252
2253         return 0;
2254 }
2255 EXPORT_SYMBOL_GPL(vhost_init_device_iotlb);
2256
2257 /* Caller must have device mutex */
2258 long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
2259 {
2260         struct eventfd_ctx *ctx;
2261         u64 p;
2262         long r;
2263         int i, fd;
2264
2265         /* If you are not the owner, you can become one */
2266         if (ioctl == VHOST_SET_OWNER) {
2267                 r = vhost_dev_set_owner(d);
2268                 goto done;
2269         }
2270
2271         /* You must be the owner to do anything else */
2272         r = vhost_dev_check_owner(d);
2273         if (r)
2274                 goto done;
2275
2276         switch (ioctl) {
2277         case VHOST_SET_MEM_TABLE:
2278                 r = vhost_set_memory(d, argp);
2279                 break;
2280         case VHOST_SET_LOG_BASE:
2281                 if (copy_from_user(&p, argp, sizeof p)) {
2282                         r = -EFAULT;
2283                         break;
2284                 }
2285                 if ((u64)(unsigned long)p != p) {
2286                         r = -EFAULT;
2287                         break;
2288                 }
2289                 for (i = 0; i < d->nvqs; ++i) {
2290                         struct vhost_virtqueue *vq;
2291                         void __user *base = (void __user *)(unsigned long)p;
2292                         vq = d->vqs[i];
2293                         mutex_lock(&vq->mutex);
2294                         /* If ring is inactive, will check when it's enabled. */
2295                         if (vq->private_data && !vq_log_access_ok(vq, base))
2296                                 r = -EFAULT;
2297                         else
2298                                 vq->log_base = base;
2299                         mutex_unlock(&vq->mutex);
2300                 }
2301                 break;
2302         case VHOST_SET_LOG_FD:
2303                 r = get_user(fd, (int __user *)argp);
2304                 if (r < 0)
2305                         break;
2306                 ctx = fd == -1 ? NULL : eventfd_ctx_fdget(fd);
2307                 if (IS_ERR(ctx)) {
2308                         r = PTR_ERR(ctx);
2309                         break;
2310                 }
2311                 swap(ctx, d->log_ctx);
2312                 for (i = 0; i < d->nvqs; ++i) {
2313                         mutex_lock(&d->vqs[i]->mutex);
2314                         d->vqs[i]->log_ctx = d->log_ctx;
2315                         mutex_unlock(&d->vqs[i]->mutex);
2316                 }
2317                 if (ctx)
2318                         eventfd_ctx_put(ctx);
2319                 break;
2320         default:
2321                 r = -ENOIOCTLCMD;
2322                 break;
2323         }
2324 done:
2325         return r;
2326 }
2327 EXPORT_SYMBOL_GPL(vhost_dev_ioctl);
2328
2329 /* TODO: This is really inefficient.  We need something like get_user()
2330  * (instruction directly accesses the data, with an exception table entry
2331  * returning -EFAULT). See Documentation/x86/exception-tables.rst.
2332  */
2333 static int set_bit_to_user(int nr, void __user *addr)
2334 {
2335         unsigned long log = (unsigned long)addr;
2336         struct page *page;
2337         void *base;
2338         int bit = nr + (log % PAGE_SIZE) * 8;
2339         int r;
2340
2341         r = get_user_pages_fast(log, 1, FOLL_WRITE, &page);
2342         if (r < 0)
2343                 return r;
2344         BUG_ON(r != 1);
2345         base = kmap_atomic(page);
2346         set_bit(bit, base);
2347         kunmap_atomic(base);
2348         set_page_dirty_lock(page);
2349         put_page(page);
2350         return 0;
2351 }
2352
2353 static int log_write(void __user *log_base,
2354                      u64 write_address, u64 write_length)
2355 {
2356         u64 write_page = write_address / VHOST_PAGE_SIZE;
2357         int r;
2358
2359         if (!write_length)
2360                 return 0;
2361         write_length += write_address % VHOST_PAGE_SIZE;
2362         for (;;) {
2363                 u64 base = (u64)(unsigned long)log_base;
2364                 u64 log = base + write_page / 8;
2365                 int bit = write_page % 8;
2366                 if ((u64)(unsigned long)log != log)
2367                         return -EFAULT;
2368                 r = set_bit_to_user(bit, (void __user *)(unsigned long)log);
2369                 if (r < 0)
2370                         return r;
2371                 if (write_length <= VHOST_PAGE_SIZE)
2372                         break;
2373                 write_length -= VHOST_PAGE_SIZE;
2374                 write_page += 1;
2375         }
2376         return r;
2377 }
2378
2379 static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len)
2380 {
2381         struct vhost_umem *umem = vq->umem;
2382         struct vhost_umem_node *u;
2383         u64 start, end, l, min;
2384         int r;
2385         bool hit = false;
2386
2387         while (len) {
2388                 min = len;
2389                 /* More than one GPAs can be mapped into a single HVA. So
2390                  * iterate all possible umems here to be safe.
2391                  */
2392                 list_for_each_entry(u, &umem->umem_list, link) {
2393                         if (u->userspace_addr > hva - 1 + len ||
2394                             u->userspace_addr - 1 + u->size < hva)
2395                                 continue;
2396                         start = max(u->userspace_addr, hva);
2397                         end = min(u->userspace_addr - 1 + u->size,
2398                                   hva - 1 + len);
2399                         l = end - start + 1;
2400                         r = log_write(vq->log_base,
2401                                       u->start + start - u->userspace_addr,
2402                                       l);
2403                         if (r < 0)
2404                                 return r;
2405                         hit = true;
2406                         min = min(l, min);
2407                 }
2408
2409                 if (!hit)
2410                         return -EFAULT;
2411
2412                 len -= min;
2413                 hva += min;
2414         }
2415
2416         return 0;
2417 }
2418
2419 static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len)
2420 {
2421         struct iovec iov[64];
2422         int i, ret;
2423
2424         if (!vq->iotlb)
2425                 return log_write(vq->log_base, vq->log_addr + used_offset, len);
2426
2427         ret = translate_desc(vq, (uintptr_t)vq->used + used_offset,
2428                              len, iov, 64, VHOST_ACCESS_WO);
2429         if (ret < 0)
2430                 return ret;
2431
2432         for (i = 0; i < ret; i++) {
2433                 ret = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
2434                                     iov[i].iov_len);
2435                 if (ret)
2436                         return ret;
2437         }
2438
2439         return 0;
2440 }
2441
2442 int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
2443                     unsigned int log_num, u64 len, struct iovec *iov, int count)
2444 {
2445         int i, r;
2446
2447         /* Make sure data written is seen before log. */
2448         smp_wmb();
2449
2450         if (vq->iotlb) {
2451                 for (i = 0; i < count; i++) {
2452                         r = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
2453                                           iov[i].iov_len);
2454                         if (r < 0)
2455                                 return r;
2456                 }
2457                 return 0;
2458         }
2459
2460         for (i = 0; i < log_num; ++i) {
2461                 u64 l = min(log[i].len, len);
2462                 r = log_write(vq->log_base, log[i].addr, l);
2463                 if (r < 0)
2464                         return r;
2465                 len -= l;
2466                 if (!len) {
2467                         if (vq->log_ctx)
2468                                 eventfd_signal(vq->log_ctx, 1);
2469                         return 0;
2470                 }
2471         }
2472         /* Length written exceeds what we have stored. This is a bug. */
2473         BUG();
2474         return 0;
2475 }
2476 EXPORT_SYMBOL_GPL(vhost_log_write);
2477
2478 static int vhost_update_used_flags(struct vhost_virtqueue *vq)
2479 {
2480         void __user *used;
2481         if (vhost_put_used_flags(vq))
2482                 return -EFAULT;
2483         if (unlikely(vq->log_used)) {
2484                 /* Make sure the flag is seen before log. */
2485                 smp_wmb();
2486                 /* Log used flag write. */
2487                 used = &vq->used->flags;
2488                 log_used(vq, (used - (void __user *)vq->used),
2489                          sizeof vq->used->flags);
2490                 if (vq->log_ctx)
2491                         eventfd_signal(vq->log_ctx, 1);
2492         }
2493         return 0;
2494 }
2495
2496 static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
2497 {
2498         if (vhost_put_avail_event(vq))
2499                 return -EFAULT;
2500         if (unlikely(vq->log_used)) {
2501                 void __user *used;
2502                 /* Make sure the event is seen before log. */
2503                 smp_wmb();
2504                 /* Log avail event write */
2505                 used = vhost_avail_event(vq);
2506                 log_used(vq, (used - (void __user *)vq->used),
2507                          sizeof *vhost_avail_event(vq));
2508                 if (vq->log_ctx)
2509                         eventfd_signal(vq->log_ctx, 1);
2510         }
2511         return 0;
2512 }
2513
2514 int vhost_vq_init_access(struct vhost_virtqueue *vq)
2515 {
2516         __virtio16 last_used_idx;
2517         int r;
2518         bool is_le = vq->is_le;
2519
2520         if (!vq->private_data)
2521                 return 0;
2522
2523         vhost_init_is_le(vq);
2524
2525         r = vhost_update_used_flags(vq);
2526         if (r)
2527                 goto err;
2528         vq->signalled_used_valid = false;
2529         if (!vq->iotlb &&
2530             !access_ok(&vq->used->idx, sizeof vq->used->idx)) {
2531                 r = -EFAULT;
2532                 goto err;
2533         }
2534         r = vhost_get_used_idx(vq, &last_used_idx);
2535         if (r) {
2536                 vq_err(vq, "Can't access used idx at %p\n",
2537                        &vq->used->idx);
2538                 goto err;
2539         }
2540         vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx);
2541         return 0;
2542
2543 err:
2544         vq->is_le = is_le;
2545         return r;
2546 }
2547 EXPORT_SYMBOL_GPL(vhost_vq_init_access);
2548
2549 static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
2550                           struct iovec iov[], int iov_size, int access)
2551 {
2552         const struct vhost_umem_node *node;
2553         struct vhost_dev *dev = vq->dev;
2554         struct vhost_umem *umem = dev->iotlb ? dev->iotlb : dev->umem;
2555         struct iovec *_iov;
2556         u64 s = 0;
2557         int ret = 0;
2558
2559         while ((u64)len > s) {
2560                 u64 size;
2561                 if (unlikely(ret >= iov_size)) {
2562                         ret = -ENOBUFS;
2563                         break;
2564                 }
2565
2566                 node = vhost_umem_interval_tree_iter_first(&umem->umem_tree,
2567                                                         addr, addr + len - 1);
2568                 if (node == NULL || node->start > addr) {
2569                         if (umem != dev->iotlb) {
2570                                 ret = -EFAULT;
2571                                 break;
2572                         }
2573                         ret = -EAGAIN;
2574                         break;
2575                 } else if (!(node->perm & access)) {
2576                         ret = -EPERM;
2577                         break;
2578                 }
2579
2580                 _iov = iov + ret;
2581                 size = node->size - addr + node->start;
2582                 _iov->iov_len = min((u64)len - s, size);
2583                 _iov->iov_base = (void __user *)(unsigned long)
2584                         (node->userspace_addr + addr - node->start);
2585                 s += size;
2586                 addr += size;
2587                 ++ret;
2588         }
2589
2590         if (ret == -EAGAIN)
2591                 vhost_iotlb_miss(vq, addr, access);
2592         return ret;
2593 }
2594
2595 /* Each buffer in the virtqueues is actually a chain of descriptors.  This
2596  * function returns the next descriptor in the chain,
2597  * or -1U if we're at the end. */
2598 static unsigned next_desc(struct vhost_virtqueue *vq, struct vring_desc *desc)
2599 {
2600         unsigned int next;
2601
2602         /* If this descriptor says it doesn't chain, we're done. */
2603         if (!(desc->flags & cpu_to_vhost16(vq, VRING_DESC_F_NEXT)))
2604                 return -1U;
2605
2606         /* Check they're not leading us off end of descriptors. */
2607         next = vhost16_to_cpu(vq, READ_ONCE(desc->next));
2608         return next;
2609 }
2610
2611 static int get_indirect(struct vhost_virtqueue *vq,
2612                         struct iovec iov[], unsigned int iov_size,
2613                         unsigned int *out_num, unsigned int *in_num,
2614                         struct vhost_log *log, unsigned int *log_num,
2615                         struct vring_desc *indirect)
2616 {
2617         struct vring_desc desc;
2618         unsigned int i = 0, count, found = 0;
2619         u32 len = vhost32_to_cpu(vq, indirect->len);
2620         struct iov_iter from;
2621         int ret, access;
2622
2623         /* Sanity check */
2624         if (unlikely(len % sizeof desc)) {
2625                 vq_err(vq, "Invalid length in indirect descriptor: "
2626                        "len 0x%llx not multiple of 0x%zx\n",
2627                        (unsigned long long)len,
2628                        sizeof desc);
2629                 return -EINVAL;
2630         }
2631
2632         ret = translate_desc(vq, vhost64_to_cpu(vq, indirect->addr), len, vq->indirect,
2633                              UIO_MAXIOV, VHOST_ACCESS_RO);
2634         if (unlikely(ret < 0)) {
2635                 if (ret != -EAGAIN)
2636                         vq_err(vq, "Translation failure %d in indirect.\n", ret);
2637                 return ret;
2638         }
2639         iov_iter_init(&from, READ, vq->indirect, ret, len);
2640
2641         /* We will use the result as an address to read from, so most
2642          * architectures only need a compiler barrier here. */
2643         read_barrier_depends();
2644
2645         count = len / sizeof desc;
2646         /* Buffers are chained via a 16 bit next field, so
2647          * we can have at most 2^16 of these. */
2648         if (unlikely(count > USHRT_MAX + 1)) {
2649                 vq_err(vq, "Indirect buffer length too big: %d\n",
2650                        indirect->len);
2651                 return -E2BIG;
2652         }
2653
2654         do {
2655                 unsigned iov_count = *in_num + *out_num;
2656                 if (unlikely(++found > count)) {
2657                         vq_err(vq, "Loop detected: last one at %u "
2658                                "indirect size %u\n",
2659                                i, count);
2660                         return -EINVAL;
2661                 }
2662                 if (unlikely(!copy_from_iter_full(&desc, sizeof(desc), &from))) {
2663                         vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
2664                                i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
2665                         return -EINVAL;
2666                 }
2667                 if (unlikely(desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT))) {
2668                         vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
2669                                i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
2670                         return -EINVAL;
2671                 }
2672
2673                 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
2674                         access = VHOST_ACCESS_WO;
2675                 else
2676                         access = VHOST_ACCESS_RO;
2677
2678                 ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
2679                                      vhost32_to_cpu(vq, desc.len), iov + iov_count,
2680                                      iov_size - iov_count, access);
2681                 if (unlikely(ret < 0)) {
2682                         if (ret != -EAGAIN)
2683                                 vq_err(vq, "Translation failure %d indirect idx %d\n",
2684                                         ret, i);
2685                         return ret;
2686                 }
2687                 /* If this is an input descriptor, increment that count. */
2688                 if (access == VHOST_ACCESS_WO) {
2689                         *in_num += ret;
2690                         if (unlikely(log)) {
2691                                 log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
2692                                 log[*log_num].len = vhost32_to_cpu(vq, desc.len);
2693                                 ++*log_num;
2694                         }
2695                 } else {
2696                         /* If it's an output descriptor, they're all supposed
2697                          * to come before any input descriptors. */
2698                         if (unlikely(*in_num)) {
2699                                 vq_err(vq, "Indirect descriptor "
2700                                        "has out after in: idx %d\n", i);
2701                                 return -EINVAL;
2702                         }
2703                         *out_num += ret;
2704                 }
2705         } while ((i = next_desc(vq, &desc)) != -1);
2706         return 0;
2707 }
2708
2709 /* This looks in the virtqueue and for the first available buffer, and converts
2710  * it to an iovec for convenient access.  Since descriptors consist of some
2711  * number of output then some number of input descriptors, it's actually two
2712  * iovecs, but we pack them into one and note how many of each there were.
2713  *
2714  * This function returns the descriptor number found, or vq->num (which is
2715  * never a valid descriptor number) if none was found.  A negative code is
2716  * returned on error. */
2717 int vhost_get_vq_desc(struct vhost_virtqueue *vq,
2718                       struct iovec iov[], unsigned int iov_size,
2719                       unsigned int *out_num, unsigned int *in_num,
2720                       struct vhost_log *log, unsigned int *log_num)
2721 {
2722         struct vring_desc desc;
2723         unsigned int i, head, found = 0;
2724         u16 last_avail_idx;
2725         __virtio16 avail_idx;
2726         __virtio16 ring_head;
2727         int ret, access;
2728
2729         /* Check it isn't doing very strange things with descriptor numbers. */
2730         last_avail_idx = vq->last_avail_idx;
2731
2732         if (vq->avail_idx == vq->last_avail_idx) {
2733                 if (unlikely(vhost_get_avail_idx(vq, &avail_idx))) {
2734                         vq_err(vq, "Failed to access avail idx at %p\n",
2735                                 &vq->avail->idx);
2736                         return -EFAULT;
2737                 }
2738                 vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
2739
2740                 if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
2741                         vq_err(vq, "Guest moved used index from %u to %u",
2742                                 last_avail_idx, vq->avail_idx);
2743                         return -EFAULT;
2744                 }
2745
2746                 /* If there's nothing new since last we looked, return
2747                  * invalid.
2748                  */
2749                 if (vq->avail_idx == last_avail_idx)
2750                         return vq->num;
2751
2752                 /* Only get avail ring entries after they have been
2753                  * exposed by guest.
2754                  */
2755                 smp_rmb();
2756         }
2757
2758         /* Grab the next descriptor number they're advertising, and increment
2759          * the index we've seen. */
2760         if (unlikely(vhost_get_avail_head(vq, &ring_head, last_avail_idx))) {
2761                 vq_err(vq, "Failed to read head: idx %d address %p\n",
2762                        last_avail_idx,
2763                        &vq->avail->ring[last_avail_idx % vq->num]);
2764                 return -EFAULT;
2765         }
2766
2767         head = vhost16_to_cpu(vq, ring_head);
2768
2769         /* If their number is silly, that's an error. */
2770         if (unlikely(head >= vq->num)) {
2771                 vq_err(vq, "Guest says index %u > %u is available",
2772                        head, vq->num);
2773                 return -EINVAL;
2774         }
2775
2776         /* When we start there are none of either input nor output. */
2777         *out_num = *in_num = 0;
2778         if (unlikely(log))
2779                 *log_num = 0;
2780
2781         i = head;
2782         do {
2783                 unsigned iov_count = *in_num + *out_num;
2784                 if (unlikely(i >= vq->num)) {
2785                         vq_err(vq, "Desc index is %u > %u, head = %u",
2786                                i, vq->num, head);
2787                         return -EINVAL;
2788                 }
2789                 if (unlikely(++found > vq->num)) {
2790                         vq_err(vq, "Loop detected: last one at %u "
2791                                "vq size %u head %u\n",
2792                                i, vq->num, head);
2793                         return -EINVAL;
2794                 }
2795                 ret = vhost_get_desc(vq, &desc, i);
2796                 if (unlikely(ret)) {
2797                         vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
2798                                i, vq->desc + i);
2799                         return -EFAULT;
2800                 }
2801                 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT)) {
2802                         ret = get_indirect(vq, iov, iov_size,
2803                                            out_num, in_num,
2804                                            log, log_num, &desc);
2805                         if (unlikely(ret < 0)) {
2806                                 if (ret != -EAGAIN)
2807                                         vq_err(vq, "Failure detected "
2808                                                 "in indirect descriptor at idx %d\n", i);
2809                                 return ret;
2810                         }
2811                         continue;
2812                 }
2813
2814                 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
2815                         access = VHOST_ACCESS_WO;
2816                 else
2817                         access = VHOST_ACCESS_RO;
2818                 ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
2819                                      vhost32_to_cpu(vq, desc.len), iov + iov_count,
2820                                      iov_size - iov_count, access);
2821                 if (unlikely(ret < 0)) {
2822                         if (ret != -EAGAIN)
2823                                 vq_err(vq, "Translation failure %d descriptor idx %d\n",
2824                                         ret, i);
2825                         return ret;
2826                 }
2827                 if (access == VHOST_ACCESS_WO) {
2828                         /* If this is an input descriptor,
2829                          * increment that count. */
2830                         *in_num += ret;
2831                         if (unlikely(log)) {
2832                                 log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
2833                                 log[*log_num].len = vhost32_to_cpu(vq, desc.len);
2834                                 ++*log_num;
2835                         }
2836                 } else {
2837                         /* If it's an output descriptor, they're all supposed
2838                          * to come before any input descriptors. */
2839                         if (unlikely(*in_num)) {
2840                                 vq_err(vq, "Descriptor has out after in: "
2841                                        "idx %d\n", i);
2842                                 return -EINVAL;
2843                         }
2844                         *out_num += ret;
2845                 }
2846         } while ((i = next_desc(vq, &desc)) != -1);
2847
2848         /* On success, increment avail index. */
2849         vq->last_avail_idx++;
2850
2851         /* Assume notifications from guest are disabled at this point,
2852          * if they aren't we would need to update avail_event index. */
2853         BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY));
2854         return head;
2855 }
2856 EXPORT_SYMBOL_GPL(vhost_get_vq_desc);
2857
2858 /* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
2859 void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n)
2860 {
2861         vq->last_avail_idx -= n;
2862 }
2863 EXPORT_SYMBOL_GPL(vhost_discard_vq_desc);
2864
2865 /* After we've used one of their buffers, we tell them about it.  We'll then
2866  * want to notify the guest, using eventfd. */
2867 int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
2868 {
2869         struct vring_used_elem heads = {
2870                 cpu_to_vhost32(vq, head),
2871                 cpu_to_vhost32(vq, len)
2872         };
2873
2874         return vhost_add_used_n(vq, &heads, 1);
2875 }
2876 EXPORT_SYMBOL_GPL(vhost_add_used);
2877
2878 static int __vhost_add_used_n(struct vhost_virtqueue *vq,
2879                             struct vring_used_elem *heads,
2880                             unsigned count)
2881 {
2882         struct vring_used_elem __user *used;
2883         u16 old, new;
2884         int start;
2885
2886         start = vq->last_used_idx & (vq->num - 1);
2887         used = vq->used->ring + start;
2888         if (vhost_put_used(vq, heads, start, count)) {
2889                 vq_err(vq, "Failed to write used");
2890                 return -EFAULT;
2891         }
2892         if (unlikely(vq->log_used)) {
2893                 /* Make sure data is seen before log. */
2894                 smp_wmb();
2895                 /* Log used ring entry write. */
2896                 log_used(vq, ((void __user *)used - (void __user *)vq->used),
2897                          count * sizeof *used);
2898         }
2899         old = vq->last_used_idx;
2900         new = (vq->last_used_idx += count);
2901         /* If the driver never bothers to signal in a very long while,
2902          * used index might wrap around. If that happens, invalidate
2903          * signalled_used index we stored. TODO: make sure driver
2904          * signals at least once in 2^16 and remove this. */
2905         if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old)))
2906                 vq->signalled_used_valid = false;
2907         return 0;
2908 }
2909
2910 /* After we've used one of their buffers, we tell them about it.  We'll then
2911  * want to notify the guest, using eventfd. */
2912 int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
2913                      unsigned count)
2914 {
2915         int start, n, r;
2916
2917         start = vq->last_used_idx & (vq->num - 1);
2918         n = vq->num - start;
2919         if (n < count) {
2920                 r = __vhost_add_used_n(vq, heads, n);
2921                 if (r < 0)
2922                         return r;
2923                 heads += n;
2924                 count -= n;
2925         }
2926         r = __vhost_add_used_n(vq, heads, count);
2927
2928         /* Make sure buffer is written before we update index. */
2929         smp_wmb();
2930         if (vhost_put_used_idx(vq)) {
2931                 vq_err(vq, "Failed to increment used idx");
2932                 return -EFAULT;
2933         }
2934         if (unlikely(vq->log_used)) {
2935                 /* Make sure used idx is seen before log. */
2936                 smp_wmb();
2937                 /* Log used index update. */
2938                 log_used(vq, offsetof(struct vring_used, idx),
2939                          sizeof vq->used->idx);
2940                 if (vq->log_ctx)
2941                         eventfd_signal(vq->log_ctx, 1);
2942         }
2943         return r;
2944 }
2945 EXPORT_SYMBOL_GPL(vhost_add_used_n);
2946
2947 static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2948 {
2949         __u16 old, new;
2950         __virtio16 event;
2951         bool v;
2952         /* Flush out used index updates. This is paired
2953          * with the barrier that the Guest executes when enabling
2954          * interrupts. */
2955         smp_mb();
2956
2957         if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) &&
2958             unlikely(vq->avail_idx == vq->last_avail_idx))
2959                 return true;
2960
2961         if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
2962                 __virtio16 flags;
2963                 if (vhost_get_avail_flags(vq, &flags)) {
2964                         vq_err(vq, "Failed to get flags");
2965                         return true;
2966                 }
2967                 return !(flags & cpu_to_vhost16(vq, VRING_AVAIL_F_NO_INTERRUPT));
2968         }
2969         old = vq->signalled_used;
2970         v = vq->signalled_used_valid;
2971         new = vq->signalled_used = vq->last_used_idx;
2972         vq->signalled_used_valid = true;
2973
2974         if (unlikely(!v))
2975                 return true;
2976
2977         if (vhost_get_used_event(vq, &event)) {
2978                 vq_err(vq, "Failed to get used event idx");
2979                 return true;
2980         }
2981         return vring_need_event(vhost16_to_cpu(vq, event), new, old);
2982 }
2983
2984 /* This actually signals the guest, using eventfd. */
2985 void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2986 {
2987         /* Signal the Guest tell them we used something up. */
2988         if (vq->call_ctx && vhost_notify(dev, vq))
2989                 eventfd_signal(vq->call_ctx, 1);
2990 }
2991 EXPORT_SYMBOL_GPL(vhost_signal);
2992
2993 /* And here's the combo meal deal.  Supersize me! */
2994 void vhost_add_used_and_signal(struct vhost_dev *dev,
2995                                struct vhost_virtqueue *vq,
2996                                unsigned int head, int len)
2997 {
2998         vhost_add_used(vq, head, len);
2999         vhost_signal(dev, vq);
3000 }
3001 EXPORT_SYMBOL_GPL(vhost_add_used_and_signal);
3002
3003 /* multi-buffer version of vhost_add_used_and_signal */
3004 void vhost_add_used_and_signal_n(struct vhost_dev *dev,
3005                                  struct vhost_virtqueue *vq,
3006                                  struct vring_used_elem *heads, unsigned count)
3007 {
3008         vhost_add_used_n(vq, heads, count);
3009         vhost_signal(dev, vq);
3010 }
3011 EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n);
3012
3013 /* return true if we're sure that avaiable ring is empty */
3014 bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
3015 {
3016         __virtio16 avail_idx;
3017         int r;
3018
3019         if (vq->avail_idx != vq->last_avail_idx)
3020                 return false;
3021
3022         r = vhost_get_avail_idx(vq, &avail_idx);
3023         if (unlikely(r))
3024                 return false;
3025         vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
3026
3027         return vq->avail_idx == vq->last_avail_idx;
3028 }
3029 EXPORT_SYMBOL_GPL(vhost_vq_avail_empty);
3030
3031 /* OK, now we need to know about added descriptors. */
3032 bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
3033 {
3034         __virtio16 avail_idx;
3035         int r;
3036
3037         if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
3038                 return false;
3039         vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
3040         if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
3041                 r = vhost_update_used_flags(vq);
3042                 if (r) {
3043                         vq_err(vq, "Failed to enable notification at %p: %d\n",
3044                                &vq->used->flags, r);
3045                         return false;
3046                 }
3047         } else {
3048                 r = vhost_update_avail_event(vq, vq->avail_idx);
3049                 if (r) {
3050                         vq_err(vq, "Failed to update avail event index at %p: %d\n",
3051                                vhost_avail_event(vq), r);
3052                         return false;
3053                 }
3054         }
3055         /* They could have slipped one in as we were doing that: make
3056          * sure it's written, then check again. */
3057         smp_mb();
3058         r = vhost_get_avail_idx(vq, &avail_idx);
3059         if (r) {
3060                 vq_err(vq, "Failed to check avail idx at %p: %d\n",
3061                        &vq->avail->idx, r);
3062                 return false;
3063         }
3064
3065         return vhost16_to_cpu(vq, avail_idx) != vq->avail_idx;
3066 }
3067 EXPORT_SYMBOL_GPL(vhost_enable_notify);
3068
3069 /* We don't need to be notified again. */
3070 void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
3071 {
3072         int r;
3073
3074         if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
3075                 return;
3076         vq->used_flags |= VRING_USED_F_NO_NOTIFY;
3077         if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
3078                 r = vhost_update_used_flags(vq);
3079                 if (r)
3080                         vq_err(vq, "Failed to enable notification at %p: %d\n",
3081                                &vq->used->flags, r);
3082         }
3083 }
3084 EXPORT_SYMBOL_GPL(vhost_disable_notify);
3085
3086 /* Create a new message. */
3087 struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type)
3088 {
3089         struct vhost_msg_node *node = kmalloc(sizeof *node, GFP_KERNEL);
3090         if (!node)
3091                 return NULL;
3092
3093         /* Make sure all padding within the structure is initialized. */
3094         memset(&node->msg, 0, sizeof node->msg);
3095         node->vq = vq;
3096         node->msg.type = type;
3097         return node;
3098 }
3099 EXPORT_SYMBOL_GPL(vhost_new_msg);
3100
3101 void vhost_enqueue_msg(struct vhost_dev *dev, struct list_head *head,
3102                        struct vhost_msg_node *node)
3103 {
3104         spin_lock(&dev->iotlb_lock);
3105         list_add_tail(&node->node, head);
3106         spin_unlock(&dev->iotlb_lock);
3107
3108         wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM);
3109 }
3110 EXPORT_SYMBOL_GPL(vhost_enqueue_msg);
3111
3112 struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
3113                                          struct list_head *head)
3114 {
3115         struct vhost_msg_node *node = NULL;
3116
3117         spin_lock(&dev->iotlb_lock);
3118         if (!list_empty(head)) {
3119                 node = list_first_entry(head, struct vhost_msg_node,
3120                                         node);
3121                 list_del(&node->node);
3122         }
3123         spin_unlock(&dev->iotlb_lock);
3124
3125         return node;
3126 }
3127 EXPORT_SYMBOL_GPL(vhost_dequeue_msg);
3128
3129
3130 static int __init vhost_init(void)
3131 {
3132         return 0;
3133 }
3134
3135 static void __exit vhost_exit(void)
3136 {
3137 }
3138
3139 module_init(vhost_init);
3140 module_exit(vhost_exit);
3141
3142 MODULE_VERSION("0.0.1");
3143 MODULE_LICENSE("GPL v2");
3144 MODULE_AUTHOR("Michael S. Tsirkin");
3145 MODULE_DESCRIPTION("Host kernel accelerator for virtio");