Merge tag 'compat-ioctl-5.5' of git://git.kernel.org:/pub/scm/linux/kernel/git/arnd...
[linux-2.6-block.git] / drivers / misc / vmw_vmci / vmci_host.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * VMware VMCI Driver
4  *
5  * Copyright (C) 2012 VMware, Inc. All rights reserved.
6  */
7
8 #include <linux/vmw_vmci_defs.h>
9 #include <linux/vmw_vmci_api.h>
10 #include <linux/miscdevice.h>
11 #include <linux/interrupt.h>
12 #include <linux/highmem.h>
13 #include <linux/atomic.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/sched.h>
18 #include <linux/cred.h>
19 #include <linux/slab.h>
20 #include <linux/file.h>
21 #include <linux/init.h>
22 #include <linux/poll.h>
23 #include <linux/pci.h>
24 #include <linux/smp.h>
25 #include <linux/fs.h>
26 #include <linux/io.h>
27
28 #include "vmci_handle_array.h"
29 #include "vmci_queue_pair.h"
30 #include "vmci_datagram.h"
31 #include "vmci_doorbell.h"
32 #include "vmci_resource.h"
33 #include "vmci_context.h"
34 #include "vmci_driver.h"
35 #include "vmci_event.h"
36
37 #define VMCI_UTIL_NUM_RESOURCES 1
38
39 enum {
40         VMCI_NOTIFY_RESOURCE_QUEUE_PAIR = 0,
41         VMCI_NOTIFY_RESOURCE_DOOR_BELL = 1,
42 };
43
44 enum {
45         VMCI_NOTIFY_RESOURCE_ACTION_NOTIFY = 0,
46         VMCI_NOTIFY_RESOURCE_ACTION_CREATE = 1,
47         VMCI_NOTIFY_RESOURCE_ACTION_DESTROY = 2,
48 };
49
50 /*
51  * VMCI driver initialization. This block can also be used to
52  * pass initial group membership etc.
53  */
54 struct vmci_init_blk {
55         u32 cid;
56         u32 flags;
57 };
58
59 /* VMCIqueue_pairAllocInfo_VMToVM */
60 struct vmci_qp_alloc_info_vmvm {
61         struct vmci_handle handle;
62         u32 peer;
63         u32 flags;
64         u64 produce_size;
65         u64 consume_size;
66         u64 produce_page_file;    /* User VA. */
67         u64 consume_page_file;    /* User VA. */
68         u64 produce_page_file_size;  /* Size of the file name array. */
69         u64 consume_page_file_size;  /* Size of the file name array. */
70         s32 result;
71         u32 _pad;
72 };
73
74 /* VMCISetNotifyInfo: Used to pass notify flag's address to the host driver. */
75 struct vmci_set_notify_info {
76         u64 notify_uva;
77         s32 result;
78         u32 _pad;
79 };
80
81 /*
82  * Per-instance host state
83  */
84 struct vmci_host_dev {
85         struct vmci_ctx *context;
86         int user_version;
87         enum vmci_obj_type ct_type;
88         struct mutex lock;  /* Mutex lock for vmci context access */
89 };
90
91 static struct vmci_ctx *host_context;
92 static bool vmci_host_device_initialized;
93 static atomic_t vmci_host_active_users = ATOMIC_INIT(0);
94
95 /*
96  * Determines whether the VMCI host personality is
97  * available. Since the core functionality of the host driver is
98  * always present, all guests could possibly use the host
99  * personality. However, to minimize the deviation from the
100  * pre-unified driver state of affairs, we only consider the host
101  * device active if there is no active guest device or if there
102  * are VMX'en with active VMCI contexts using the host device.
103  */
104 bool vmci_host_code_active(void)
105 {
106         return vmci_host_device_initialized &&
107             (!vmci_guest_code_active() ||
108              atomic_read(&vmci_host_active_users) > 0);
109 }
110
111 int vmci_host_users(void)
112 {
113         return atomic_read(&vmci_host_active_users);
114 }
115
116 /*
117  * Called on open of /dev/vmci.
118  */
119 static int vmci_host_open(struct inode *inode, struct file *filp)
120 {
121         struct vmci_host_dev *vmci_host_dev;
122
123         vmci_host_dev = kzalloc(sizeof(struct vmci_host_dev), GFP_KERNEL);
124         if (vmci_host_dev == NULL)
125                 return -ENOMEM;
126
127         vmci_host_dev->ct_type = VMCIOBJ_NOT_SET;
128         mutex_init(&vmci_host_dev->lock);
129         filp->private_data = vmci_host_dev;
130
131         return 0;
132 }
133
134 /*
135  * Called on close of /dev/vmci, most often when the process
136  * exits.
137  */
138 static int vmci_host_close(struct inode *inode, struct file *filp)
139 {
140         struct vmci_host_dev *vmci_host_dev = filp->private_data;
141
142         if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) {
143                 vmci_ctx_destroy(vmci_host_dev->context);
144                 vmci_host_dev->context = NULL;
145
146                 /*
147                  * The number of active contexts is used to track whether any
148                  * VMX'en are using the host personality. It is incremented when
149                  * a context is created through the IOCTL_VMCI_INIT_CONTEXT
150                  * ioctl.
151                  */
152                 atomic_dec(&vmci_host_active_users);
153         }
154         vmci_host_dev->ct_type = VMCIOBJ_NOT_SET;
155
156         kfree(vmci_host_dev);
157         filp->private_data = NULL;
158         return 0;
159 }
160
161 /*
162  * This is used to wake up the VMX when a VMCI call arrives, or
163  * to wake up select() or poll() at the next clock tick.
164  */
165 static __poll_t vmci_host_poll(struct file *filp, poll_table *wait)
166 {
167         struct vmci_host_dev *vmci_host_dev = filp->private_data;
168         struct vmci_ctx *context = vmci_host_dev->context;
169         __poll_t mask = 0;
170
171         if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) {
172                 /* Check for VMCI calls to this VM context. */
173                 if (wait)
174                         poll_wait(filp, &context->host_context.wait_queue,
175                                   wait);
176
177                 spin_lock(&context->lock);
178                 if (context->pending_datagrams > 0 ||
179                     vmci_handle_arr_get_size(
180                                 context->pending_doorbell_array) > 0) {
181                         mask = EPOLLIN;
182                 }
183                 spin_unlock(&context->lock);
184         }
185         return mask;
186 }
187
188 /*
189  * Copies the handles of a handle array into a user buffer, and
190  * returns the new length in userBufferSize. If the copy to the
191  * user buffer fails, the functions still returns VMCI_SUCCESS,
192  * but retval != 0.
193  */
194 static int drv_cp_harray_to_user(void __user *user_buf_uva,
195                                  u64 *user_buf_size,
196                                  struct vmci_handle_arr *handle_array,
197                                  int *retval)
198 {
199         u32 array_size = 0;
200         struct vmci_handle *handles;
201
202         if (handle_array)
203                 array_size = vmci_handle_arr_get_size(handle_array);
204
205         if (array_size * sizeof(*handles) > *user_buf_size)
206                 return VMCI_ERROR_MORE_DATA;
207
208         *user_buf_size = array_size * sizeof(*handles);
209         if (*user_buf_size)
210                 *retval = copy_to_user(user_buf_uva,
211                                        vmci_handle_arr_get_handles
212                                        (handle_array), *user_buf_size);
213
214         return VMCI_SUCCESS;
215 }
216
217 /*
218  * Sets up a given context for notify to work. Maps the notify
219  * boolean in user VA into kernel space.
220  */
221 static int vmci_host_setup_notify(struct vmci_ctx *context,
222                                   unsigned long uva)
223 {
224         int retval;
225
226         if (context->notify_page) {
227                 pr_devel("%s: Notify mechanism is already set up\n", __func__);
228                 return VMCI_ERROR_DUPLICATE_ENTRY;
229         }
230
231         /*
232          * We are using 'bool' internally, but let's make sure we explicit
233          * about the size.
234          */
235         BUILD_BUG_ON(sizeof(bool) != sizeof(u8));
236         if (!access_ok((void __user *)uva, sizeof(u8)))
237                 return VMCI_ERROR_GENERIC;
238
239         /*
240          * Lock physical page backing a given user VA.
241          */
242         retval = get_user_pages_fast(uva, 1, FOLL_WRITE, &context->notify_page);
243         if (retval != 1) {
244                 context->notify_page = NULL;
245                 return VMCI_ERROR_GENERIC;
246         }
247
248         /*
249          * Map the locked page and set up notify pointer.
250          */
251         context->notify = kmap(context->notify_page) + (uva & (PAGE_SIZE - 1));
252         vmci_ctx_check_signal_notify(context);
253
254         return VMCI_SUCCESS;
255 }
256
257 static int vmci_host_get_version(struct vmci_host_dev *vmci_host_dev,
258                                  unsigned int cmd, void __user *uptr)
259 {
260         if (cmd == IOCTL_VMCI_VERSION2) {
261                 int __user *vptr = uptr;
262                 if (get_user(vmci_host_dev->user_version, vptr))
263                         return -EFAULT;
264         }
265
266         /*
267          * The basic logic here is:
268          *
269          * If the user sends in a version of 0 tell it our version.
270          * If the user didn't send in a version, tell it our version.
271          * If the user sent in an old version, tell it -its- version.
272          * If the user sent in an newer version, tell it our version.
273          *
274          * The rationale behind telling the caller its version is that
275          * Workstation 6.5 required that VMX and VMCI kernel module were
276          * version sync'd.  All new VMX users will be programmed to
277          * handle the VMCI kernel module version.
278          */
279
280         if (vmci_host_dev->user_version > 0 &&
281             vmci_host_dev->user_version < VMCI_VERSION_HOSTQP) {
282                 return vmci_host_dev->user_version;
283         }
284
285         return VMCI_VERSION;
286 }
287
288 #define vmci_ioctl_err(fmt, ...)        \
289         pr_devel("%s: " fmt, ioctl_name, ##__VA_ARGS__)
290
291 static int vmci_host_do_init_context(struct vmci_host_dev *vmci_host_dev,
292                                      const char *ioctl_name,
293                                      void __user *uptr)
294 {
295         struct vmci_init_blk init_block;
296         const struct cred *cred;
297         int retval;
298
299         if (copy_from_user(&init_block, uptr, sizeof(init_block))) {
300                 vmci_ioctl_err("error reading init block\n");
301                 return -EFAULT;
302         }
303
304         mutex_lock(&vmci_host_dev->lock);
305
306         if (vmci_host_dev->ct_type != VMCIOBJ_NOT_SET) {
307                 vmci_ioctl_err("received VMCI init on initialized handle\n");
308                 retval = -EINVAL;
309                 goto out;
310         }
311
312         if (init_block.flags & ~VMCI_PRIVILEGE_FLAG_RESTRICTED) {
313                 vmci_ioctl_err("unsupported VMCI restriction flag\n");
314                 retval = -EINVAL;
315                 goto out;
316         }
317
318         cred = get_current_cred();
319         vmci_host_dev->context = vmci_ctx_create(init_block.cid,
320                                                  init_block.flags, 0,
321                                                  vmci_host_dev->user_version,
322                                                  cred);
323         put_cred(cred);
324         if (IS_ERR(vmci_host_dev->context)) {
325                 retval = PTR_ERR(vmci_host_dev->context);
326                 vmci_ioctl_err("error initializing context\n");
327                 goto out;
328         }
329
330         /*
331          * Copy cid to userlevel, we do this to allow the VMX
332          * to enforce its policy on cid generation.
333          */
334         init_block.cid = vmci_ctx_get_id(vmci_host_dev->context);
335         if (copy_to_user(uptr, &init_block, sizeof(init_block))) {
336                 vmci_ctx_destroy(vmci_host_dev->context);
337                 vmci_host_dev->context = NULL;
338                 vmci_ioctl_err("error writing init block\n");
339                 retval = -EFAULT;
340                 goto out;
341         }
342
343         vmci_host_dev->ct_type = VMCIOBJ_CONTEXT;
344         atomic_inc(&vmci_host_active_users);
345
346         vmci_call_vsock_callback(true);
347
348         retval = 0;
349
350 out:
351         mutex_unlock(&vmci_host_dev->lock);
352         return retval;
353 }
354
355 static int vmci_host_do_send_datagram(struct vmci_host_dev *vmci_host_dev,
356                                       const char *ioctl_name,
357                                       void __user *uptr)
358 {
359         struct vmci_datagram_snd_rcv_info send_info;
360         struct vmci_datagram *dg = NULL;
361         u32 cid;
362
363         if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
364                 vmci_ioctl_err("only valid for contexts\n");
365                 return -EINVAL;
366         }
367
368         if (copy_from_user(&send_info, uptr, sizeof(send_info)))
369                 return -EFAULT;
370
371         if (send_info.len > VMCI_MAX_DG_SIZE) {
372                 vmci_ioctl_err("datagram is too big (size=%d)\n",
373                                send_info.len);
374                 return -EINVAL;
375         }
376
377         if (send_info.len < sizeof(*dg)) {
378                 vmci_ioctl_err("datagram is too small (size=%d)\n",
379                                send_info.len);
380                 return -EINVAL;
381         }
382
383         dg = memdup_user((void __user *)(uintptr_t)send_info.addr,
384                          send_info.len);
385         if (IS_ERR(dg)) {
386                 vmci_ioctl_err(
387                         "cannot allocate memory to dispatch datagram\n");
388                 return PTR_ERR(dg);
389         }
390
391         if (VMCI_DG_SIZE(dg) != send_info.len) {
392                 vmci_ioctl_err("datagram size mismatch\n");
393                 kfree(dg);
394                 return -EINVAL;
395         }
396
397         pr_devel("Datagram dst (handle=0x%x:0x%x) src (handle=0x%x:0x%x), payload (size=%llu bytes)\n",
398                  dg->dst.context, dg->dst.resource,
399                  dg->src.context, dg->src.resource,
400                  (unsigned long long)dg->payload_size);
401
402         /* Get source context id. */
403         cid = vmci_ctx_get_id(vmci_host_dev->context);
404         send_info.result = vmci_datagram_dispatch(cid, dg, true);
405         kfree(dg);
406
407         return copy_to_user(uptr, &send_info, sizeof(send_info)) ? -EFAULT : 0;
408 }
409
410 static int vmci_host_do_receive_datagram(struct vmci_host_dev *vmci_host_dev,
411                                          const char *ioctl_name,
412                                          void __user *uptr)
413 {
414         struct vmci_datagram_snd_rcv_info recv_info;
415         struct vmci_datagram *dg = NULL;
416         int retval;
417         size_t size;
418
419         if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
420                 vmci_ioctl_err("only valid for contexts\n");
421                 return -EINVAL;
422         }
423
424         if (copy_from_user(&recv_info, uptr, sizeof(recv_info)))
425                 return -EFAULT;
426
427         size = recv_info.len;
428         recv_info.result = vmci_ctx_dequeue_datagram(vmci_host_dev->context,
429                                                      &size, &dg);
430
431         if (recv_info.result >= VMCI_SUCCESS) {
432                 void __user *ubuf = (void __user *)(uintptr_t)recv_info.addr;
433                 retval = copy_to_user(ubuf, dg, VMCI_DG_SIZE(dg));
434                 kfree(dg);
435                 if (retval != 0)
436                         return -EFAULT;
437         }
438
439         return copy_to_user(uptr, &recv_info, sizeof(recv_info)) ? -EFAULT : 0;
440 }
441
442 static int vmci_host_do_alloc_queuepair(struct vmci_host_dev *vmci_host_dev,
443                                         const char *ioctl_name,
444                                         void __user *uptr)
445 {
446         struct vmci_handle handle;
447         int vmci_status;
448         int __user *retptr;
449
450         if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
451                 vmci_ioctl_err("only valid for contexts\n");
452                 return -EINVAL;
453         }
454
455         if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
456                 struct vmci_qp_alloc_info_vmvm alloc_info;
457                 struct vmci_qp_alloc_info_vmvm __user *info = uptr;
458
459                 if (copy_from_user(&alloc_info, uptr, sizeof(alloc_info)))
460                         return -EFAULT;
461
462                 handle = alloc_info.handle;
463                 retptr = &info->result;
464
465                 vmci_status = vmci_qp_broker_alloc(alloc_info.handle,
466                                                 alloc_info.peer,
467                                                 alloc_info.flags,
468                                                 VMCI_NO_PRIVILEGE_FLAGS,
469                                                 alloc_info.produce_size,
470                                                 alloc_info.consume_size,
471                                                 NULL,
472                                                 vmci_host_dev->context);
473
474                 if (vmci_status == VMCI_SUCCESS)
475                         vmci_status = VMCI_SUCCESS_QUEUEPAIR_CREATE;
476         } else {
477                 struct vmci_qp_alloc_info alloc_info;
478                 struct vmci_qp_alloc_info __user *info = uptr;
479                 struct vmci_qp_page_store page_store;
480
481                 if (copy_from_user(&alloc_info, uptr, sizeof(alloc_info)))
482                         return -EFAULT;
483
484                 handle = alloc_info.handle;
485                 retptr = &info->result;
486
487                 page_store.pages = alloc_info.ppn_va;
488                 page_store.len = alloc_info.num_ppns;
489
490                 vmci_status = vmci_qp_broker_alloc(alloc_info.handle,
491                                                 alloc_info.peer,
492                                                 alloc_info.flags,
493                                                 VMCI_NO_PRIVILEGE_FLAGS,
494                                                 alloc_info.produce_size,
495                                                 alloc_info.consume_size,
496                                                 &page_store,
497                                                 vmci_host_dev->context);
498         }
499
500         if (put_user(vmci_status, retptr)) {
501                 if (vmci_status >= VMCI_SUCCESS) {
502                         vmci_status = vmci_qp_broker_detach(handle,
503                                                         vmci_host_dev->context);
504                 }
505                 return -EFAULT;
506         }
507
508         return 0;
509 }
510
511 static int vmci_host_do_queuepair_setva(struct vmci_host_dev *vmci_host_dev,
512                                         const char *ioctl_name,
513                                         void __user *uptr)
514 {
515         struct vmci_qp_set_va_info set_va_info;
516         struct vmci_qp_set_va_info __user *info = uptr;
517         s32 result;
518
519         if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
520                 vmci_ioctl_err("only valid for contexts\n");
521                 return -EINVAL;
522         }
523
524         if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
525                 vmci_ioctl_err("is not allowed\n");
526                 return -EINVAL;
527         }
528
529         if (copy_from_user(&set_va_info, uptr, sizeof(set_va_info)))
530                 return -EFAULT;
531
532         if (set_va_info.va) {
533                 /*
534                  * VMX is passing down a new VA for the queue
535                  * pair mapping.
536                  */
537                 result = vmci_qp_broker_map(set_va_info.handle,
538                                             vmci_host_dev->context,
539                                             set_va_info.va);
540         } else {
541                 /*
542                  * The queue pair is about to be unmapped by
543                  * the VMX.
544                  */
545                 result = vmci_qp_broker_unmap(set_va_info.handle,
546                                          vmci_host_dev->context, 0);
547         }
548
549         return put_user(result, &info->result) ? -EFAULT : 0;
550 }
551
552 static int vmci_host_do_queuepair_setpf(struct vmci_host_dev *vmci_host_dev,
553                                         const char *ioctl_name,
554                                         void __user *uptr)
555 {
556         struct vmci_qp_page_file_info page_file_info;
557         struct vmci_qp_page_file_info __user *info = uptr;
558         s32 result;
559
560         if (vmci_host_dev->user_version < VMCI_VERSION_HOSTQP ||
561             vmci_host_dev->user_version >= VMCI_VERSION_NOVMVM) {
562                 vmci_ioctl_err("not supported on this VMX (version=%d)\n",
563                                vmci_host_dev->user_version);
564                 return -EINVAL;
565         }
566
567         if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
568                 vmci_ioctl_err("only valid for contexts\n");
569                 return -EINVAL;
570         }
571
572         if (copy_from_user(&page_file_info, uptr, sizeof(*info)))
573                 return -EFAULT;
574
575         /*
576          * Communicate success pre-emptively to the caller.  Note that the
577          * basic premise is that it is incumbent upon the caller not to look at
578          * the info.result field until after the ioctl() returns.  And then,
579          * only if the ioctl() result indicates no error.  We send up the
580          * SUCCESS status before calling SetPageStore() store because failing
581          * to copy up the result code means unwinding the SetPageStore().
582          *
583          * It turns out the logic to unwind a SetPageStore() opens a can of
584          * worms.  For example, if a host had created the queue_pair and a
585          * guest attaches and SetPageStore() is successful but writing success
586          * fails, then ... the host has to be stopped from writing (anymore)
587          * data into the queue_pair.  That means an additional test in the
588          * VMCI_Enqueue() code path.  Ugh.
589          */
590
591         if (put_user(VMCI_SUCCESS, &info->result)) {
592                 /*
593                  * In this case, we can't write a result field of the
594                  * caller's info block.  So, we don't even try to
595                  * SetPageStore().
596                  */
597                 return -EFAULT;
598         }
599
600         result = vmci_qp_broker_set_page_store(page_file_info.handle,
601                                                 page_file_info.produce_va,
602                                                 page_file_info.consume_va,
603                                                 vmci_host_dev->context);
604         if (result < VMCI_SUCCESS) {
605                 if (put_user(result, &info->result)) {
606                         /*
607                          * Note that in this case the SetPageStore()
608                          * call failed but we were unable to
609                          * communicate that to the caller (because the
610                          * copy_to_user() call failed).  So, if we
611                          * simply return an error (in this case
612                          * -EFAULT) then the caller will know that the
613                          *  SetPageStore failed even though we couldn't
614                          *  put the result code in the result field and
615                          *  indicate exactly why it failed.
616                          *
617                          * That says nothing about the issue where we
618                          * were once able to write to the caller's info
619                          * memory and now can't.  Something more
620                          * serious is probably going on than the fact
621                          * that SetPageStore() didn't work.
622                          */
623                         return -EFAULT;
624                 }
625         }
626
627         return 0;
628 }
629
630 static int vmci_host_do_qp_detach(struct vmci_host_dev *vmci_host_dev,
631                                   const char *ioctl_name,
632                                   void __user *uptr)
633 {
634         struct vmci_qp_dtch_info detach_info;
635         struct vmci_qp_dtch_info __user *info = uptr;
636         s32 result;
637
638         if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
639                 vmci_ioctl_err("only valid for contexts\n");
640                 return -EINVAL;
641         }
642
643         if (copy_from_user(&detach_info, uptr, sizeof(detach_info)))
644                 return -EFAULT;
645
646         result = vmci_qp_broker_detach(detach_info.handle,
647                                        vmci_host_dev->context);
648         if (result == VMCI_SUCCESS &&
649             vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
650                 result = VMCI_SUCCESS_LAST_DETACH;
651         }
652
653         return put_user(result, &info->result) ? -EFAULT : 0;
654 }
655
656 static int vmci_host_do_ctx_add_notify(struct vmci_host_dev *vmci_host_dev,
657                                        const char *ioctl_name,
658                                        void __user *uptr)
659 {
660         struct vmci_ctx_info ar_info;
661         struct vmci_ctx_info __user *info = uptr;
662         s32 result;
663         u32 cid;
664
665         if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
666                 vmci_ioctl_err("only valid for contexts\n");
667                 return -EINVAL;
668         }
669
670         if (copy_from_user(&ar_info, uptr, sizeof(ar_info)))
671                 return -EFAULT;
672
673         cid = vmci_ctx_get_id(vmci_host_dev->context);
674         result = vmci_ctx_add_notification(cid, ar_info.remote_cid);
675
676         return put_user(result, &info->result) ? -EFAULT : 0;
677 }
678
679 static int vmci_host_do_ctx_remove_notify(struct vmci_host_dev *vmci_host_dev,
680                                           const char *ioctl_name,
681                                           void __user *uptr)
682 {
683         struct vmci_ctx_info ar_info;
684         struct vmci_ctx_info __user *info = uptr;
685         u32 cid;
686         int result;
687
688         if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
689                 vmci_ioctl_err("only valid for contexts\n");
690                 return -EINVAL;
691         }
692
693         if (copy_from_user(&ar_info, uptr, sizeof(ar_info)))
694                 return -EFAULT;
695
696         cid = vmci_ctx_get_id(vmci_host_dev->context);
697         result = vmci_ctx_remove_notification(cid,
698                                               ar_info.remote_cid);
699
700         return put_user(result, &info->result) ? -EFAULT : 0;
701 }
702
703 static int vmci_host_do_ctx_get_cpt_state(struct vmci_host_dev *vmci_host_dev,
704                                           const char *ioctl_name,
705                                           void __user *uptr)
706 {
707         struct vmci_ctx_chkpt_buf_info get_info;
708         u32 cid;
709         void *cpt_buf;
710         int retval;
711
712         if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
713                 vmci_ioctl_err("only valid for contexts\n");
714                 return -EINVAL;
715         }
716
717         if (copy_from_user(&get_info, uptr, sizeof(get_info)))
718                 return -EFAULT;
719
720         cid = vmci_ctx_get_id(vmci_host_dev->context);
721         get_info.result = vmci_ctx_get_chkpt_state(cid, get_info.cpt_type,
722                                                 &get_info.buf_size, &cpt_buf);
723         if (get_info.result == VMCI_SUCCESS && get_info.buf_size) {
724                 void __user *ubuf = (void __user *)(uintptr_t)get_info.cpt_buf;
725                 retval = copy_to_user(ubuf, cpt_buf, get_info.buf_size);
726                 kfree(cpt_buf);
727
728                 if (retval)
729                         return -EFAULT;
730         }
731
732         return copy_to_user(uptr, &get_info, sizeof(get_info)) ? -EFAULT : 0;
733 }
734
735 static int vmci_host_do_ctx_set_cpt_state(struct vmci_host_dev *vmci_host_dev,
736                                           const char *ioctl_name,
737                                           void __user *uptr)
738 {
739         struct vmci_ctx_chkpt_buf_info set_info;
740         u32 cid;
741         void *cpt_buf;
742         int retval;
743
744         if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
745                 vmci_ioctl_err("only valid for contexts\n");
746                 return -EINVAL;
747         }
748
749         if (copy_from_user(&set_info, uptr, sizeof(set_info)))
750                 return -EFAULT;
751
752         cpt_buf = memdup_user((void __user *)(uintptr_t)set_info.cpt_buf,
753                                 set_info.buf_size);
754         if (IS_ERR(cpt_buf))
755                 return PTR_ERR(cpt_buf);
756
757         cid = vmci_ctx_get_id(vmci_host_dev->context);
758         set_info.result = vmci_ctx_set_chkpt_state(cid, set_info.cpt_type,
759                                                    set_info.buf_size, cpt_buf);
760
761         retval = copy_to_user(uptr, &set_info, sizeof(set_info)) ? -EFAULT : 0;
762
763         kfree(cpt_buf);
764         return retval;
765 }
766
767 static int vmci_host_do_get_context_id(struct vmci_host_dev *vmci_host_dev,
768                                        const char *ioctl_name,
769                                        void __user *uptr)
770 {
771         u32 __user *u32ptr = uptr;
772
773         return put_user(VMCI_HOST_CONTEXT_ID, u32ptr) ? -EFAULT : 0;
774 }
775
776 static int vmci_host_do_set_notify(struct vmci_host_dev *vmci_host_dev,
777                                    const char *ioctl_name,
778                                    void __user *uptr)
779 {
780         struct vmci_set_notify_info notify_info;
781
782         if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
783                 vmci_ioctl_err("only valid for contexts\n");
784                 return -EINVAL;
785         }
786
787         if (copy_from_user(&notify_info, uptr, sizeof(notify_info)))
788                 return -EFAULT;
789
790         if (notify_info.notify_uva) {
791                 notify_info.result =
792                         vmci_host_setup_notify(vmci_host_dev->context,
793                                                notify_info.notify_uva);
794         } else {
795                 vmci_ctx_unset_notify(vmci_host_dev->context);
796                 notify_info.result = VMCI_SUCCESS;
797         }
798
799         return copy_to_user(uptr, &notify_info, sizeof(notify_info)) ?
800                 -EFAULT : 0;
801 }
802
803 static int vmci_host_do_notify_resource(struct vmci_host_dev *vmci_host_dev,
804                                         const char *ioctl_name,
805                                         void __user *uptr)
806 {
807         struct vmci_dbell_notify_resource_info info;
808         u32 cid;
809
810         if (vmci_host_dev->user_version < VMCI_VERSION_NOTIFY) {
811                 vmci_ioctl_err("invalid for current VMX versions\n");
812                 return -EINVAL;
813         }
814
815         if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
816                 vmci_ioctl_err("only valid for contexts\n");
817                 return -EINVAL;
818         }
819
820         if (copy_from_user(&info, uptr, sizeof(info)))
821                 return -EFAULT;
822
823         cid = vmci_ctx_get_id(vmci_host_dev->context);
824
825         switch (info.action) {
826         case VMCI_NOTIFY_RESOURCE_ACTION_NOTIFY:
827                 if (info.resource == VMCI_NOTIFY_RESOURCE_DOOR_BELL) {
828                         u32 flags = VMCI_NO_PRIVILEGE_FLAGS;
829                         info.result = vmci_ctx_notify_dbell(cid, info.handle,
830                                                             flags);
831                 } else {
832                         info.result = VMCI_ERROR_UNAVAILABLE;
833                 }
834                 break;
835
836         case VMCI_NOTIFY_RESOURCE_ACTION_CREATE:
837                 info.result = vmci_ctx_dbell_create(cid, info.handle);
838                 break;
839
840         case VMCI_NOTIFY_RESOURCE_ACTION_DESTROY:
841                 info.result = vmci_ctx_dbell_destroy(cid, info.handle);
842                 break;
843
844         default:
845                 vmci_ioctl_err("got unknown action (action=%d)\n",
846                                info.action);
847                 info.result = VMCI_ERROR_INVALID_ARGS;
848         }
849
850         return copy_to_user(uptr, &info, sizeof(info)) ? -EFAULT : 0;
851 }
852
853 static int vmci_host_do_recv_notifications(struct vmci_host_dev *vmci_host_dev,
854                                            const char *ioctl_name,
855                                            void __user *uptr)
856 {
857         struct vmci_ctx_notify_recv_info info;
858         struct vmci_handle_arr *db_handle_array;
859         struct vmci_handle_arr *qp_handle_array;
860         void __user *ubuf;
861         u32 cid;
862         int retval = 0;
863
864         if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
865                 vmci_ioctl_err("only valid for contexts\n");
866                 return -EINVAL;
867         }
868
869         if (vmci_host_dev->user_version < VMCI_VERSION_NOTIFY) {
870                 vmci_ioctl_err("not supported for the current vmx version\n");
871                 return -EINVAL;
872         }
873
874         if (copy_from_user(&info, uptr, sizeof(info)))
875                 return -EFAULT;
876
877         if ((info.db_handle_buf_size && !info.db_handle_buf_uva) ||
878             (info.qp_handle_buf_size && !info.qp_handle_buf_uva)) {
879                 return -EINVAL;
880         }
881
882         cid = vmci_ctx_get_id(vmci_host_dev->context);
883
884         info.result = vmci_ctx_rcv_notifications_get(cid,
885                                 &db_handle_array, &qp_handle_array);
886         if (info.result != VMCI_SUCCESS)
887                 return copy_to_user(uptr, &info, sizeof(info)) ? -EFAULT : 0;
888
889         ubuf = (void __user *)(uintptr_t)info.db_handle_buf_uva;
890         info.result = drv_cp_harray_to_user(ubuf, &info.db_handle_buf_size,
891                                             db_handle_array, &retval);
892         if (info.result == VMCI_SUCCESS && !retval) {
893                 ubuf = (void __user *)(uintptr_t)info.qp_handle_buf_uva;
894                 info.result = drv_cp_harray_to_user(ubuf,
895                                                     &info.qp_handle_buf_size,
896                                                     qp_handle_array, &retval);
897         }
898
899         if (!retval && copy_to_user(uptr, &info, sizeof(info)))
900                 retval = -EFAULT;
901
902         vmci_ctx_rcv_notifications_release(cid,
903                                 db_handle_array, qp_handle_array,
904                                 info.result == VMCI_SUCCESS && !retval);
905
906         return retval;
907 }
908
909 static long vmci_host_unlocked_ioctl(struct file *filp,
910                                      unsigned int iocmd, unsigned long ioarg)
911 {
912 #define VMCI_DO_IOCTL(ioctl_name, ioctl_fn) do {                        \
913                 char *name = __stringify(IOCTL_VMCI_ ## ioctl_name);    \
914                 return vmci_host_do_ ## ioctl_fn(                       \
915                         vmci_host_dev, name, uptr);                     \
916         } while (0)
917
918         struct vmci_host_dev *vmci_host_dev = filp->private_data;
919         void __user *uptr = (void __user *)ioarg;
920
921         switch (iocmd) {
922         case IOCTL_VMCI_INIT_CONTEXT:
923                 VMCI_DO_IOCTL(INIT_CONTEXT, init_context);
924         case IOCTL_VMCI_DATAGRAM_SEND:
925                 VMCI_DO_IOCTL(DATAGRAM_SEND, send_datagram);
926         case IOCTL_VMCI_DATAGRAM_RECEIVE:
927                 VMCI_DO_IOCTL(DATAGRAM_RECEIVE, receive_datagram);
928         case IOCTL_VMCI_QUEUEPAIR_ALLOC:
929                 VMCI_DO_IOCTL(QUEUEPAIR_ALLOC, alloc_queuepair);
930         case IOCTL_VMCI_QUEUEPAIR_SETVA:
931                 VMCI_DO_IOCTL(QUEUEPAIR_SETVA, queuepair_setva);
932         case IOCTL_VMCI_QUEUEPAIR_SETPAGEFILE:
933                 VMCI_DO_IOCTL(QUEUEPAIR_SETPAGEFILE, queuepair_setpf);
934         case IOCTL_VMCI_QUEUEPAIR_DETACH:
935                 VMCI_DO_IOCTL(QUEUEPAIR_DETACH, qp_detach);
936         case IOCTL_VMCI_CTX_ADD_NOTIFICATION:
937                 VMCI_DO_IOCTL(CTX_ADD_NOTIFICATION, ctx_add_notify);
938         case IOCTL_VMCI_CTX_REMOVE_NOTIFICATION:
939                 VMCI_DO_IOCTL(CTX_REMOVE_NOTIFICATION, ctx_remove_notify);
940         case IOCTL_VMCI_CTX_GET_CPT_STATE:
941                 VMCI_DO_IOCTL(CTX_GET_CPT_STATE, ctx_get_cpt_state);
942         case IOCTL_VMCI_CTX_SET_CPT_STATE:
943                 VMCI_DO_IOCTL(CTX_SET_CPT_STATE, ctx_set_cpt_state);
944         case IOCTL_VMCI_GET_CONTEXT_ID:
945                 VMCI_DO_IOCTL(GET_CONTEXT_ID, get_context_id);
946         case IOCTL_VMCI_SET_NOTIFY:
947                 VMCI_DO_IOCTL(SET_NOTIFY, set_notify);
948         case IOCTL_VMCI_NOTIFY_RESOURCE:
949                 VMCI_DO_IOCTL(NOTIFY_RESOURCE, notify_resource);
950         case IOCTL_VMCI_NOTIFICATIONS_RECEIVE:
951                 VMCI_DO_IOCTL(NOTIFICATIONS_RECEIVE, recv_notifications);
952
953         case IOCTL_VMCI_VERSION:
954         case IOCTL_VMCI_VERSION2:
955                 return vmci_host_get_version(vmci_host_dev, iocmd, uptr);
956
957         default:
958                 pr_devel("%s: Unknown ioctl (iocmd=%d)\n", __func__, iocmd);
959                 return -EINVAL;
960         }
961
962 #undef VMCI_DO_IOCTL
963 }
964
965 static const struct file_operations vmuser_fops = {
966         .owner          = THIS_MODULE,
967         .open           = vmci_host_open,
968         .release        = vmci_host_close,
969         .poll           = vmci_host_poll,
970         .unlocked_ioctl = vmci_host_unlocked_ioctl,
971         .compat_ioctl   = compat_ptr_ioctl,
972 };
973
974 static struct miscdevice vmci_host_miscdev = {
975          .name = "vmci",
976          .minor = MISC_DYNAMIC_MINOR,
977          .fops = &vmuser_fops,
978 };
979
980 int __init vmci_host_init(void)
981 {
982         int error;
983
984         host_context = vmci_ctx_create(VMCI_HOST_CONTEXT_ID,
985                                         VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS,
986                                         -1, VMCI_VERSION, NULL);
987         if (IS_ERR(host_context)) {
988                 error = PTR_ERR(host_context);
989                 pr_warn("Failed to initialize VMCIContext (error%d)\n",
990                         error);
991                 return error;
992         }
993
994         error = misc_register(&vmci_host_miscdev);
995         if (error) {
996                 pr_warn("Module registration error (name=%s, major=%d, minor=%d, err=%d)\n",
997                         vmci_host_miscdev.name,
998                         MISC_MAJOR, vmci_host_miscdev.minor,
999                         error);
1000                 pr_warn("Unable to initialize host personality\n");
1001                 vmci_ctx_destroy(host_context);
1002                 return error;
1003         }
1004
1005         pr_info("VMCI host device registered (name=%s, major=%d, minor=%d)\n",
1006                 vmci_host_miscdev.name, MISC_MAJOR, vmci_host_miscdev.minor);
1007
1008         vmci_host_device_initialized = true;
1009         return 0;
1010 }
1011
1012 void __exit vmci_host_exit(void)
1013 {
1014         vmci_host_device_initialized = false;
1015
1016         misc_deregister(&vmci_host_miscdev);
1017         vmci_ctx_destroy(host_context);
1018         vmci_qp_broker_exit();
1019
1020         pr_debug("VMCI host driver module unloaded\n");
1021 }