VMCI: rename PPNset to ppn_set to avoid camel case
[linux-2.6-block.git] / drivers / misc / vmw_vmci / vmci_host.c
CommitLineData
8bf50399
GZ
1/*
2 * VMware VMCI Driver
3 *
4 * Copyright (C) 2012 VMware, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation version 2 and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * for more details.
14 */
15
16#include <linux/vmw_vmci_defs.h>
17#include <linux/vmw_vmci_api.h>
18#include <linux/moduleparam.h>
19#include <linux/miscdevice.h>
20#include <linux/interrupt.h>
21#include <linux/highmem.h>
22#include <linux/atomic.h>
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/mutex.h>
26#include <linux/sched.h>
27#include <linux/file.h>
28#include <linux/init.h>
29#include <linux/poll.h>
30#include <linux/pci.h>
31#include <linux/smp.h>
32#include <linux/fs.h>
33#include <linux/io.h>
34
35#include "vmci_handle_array.h"
36#include "vmci_queue_pair.h"
37#include "vmci_datagram.h"
38#include "vmci_doorbell.h"
39#include "vmci_resource.h"
40#include "vmci_context.h"
41#include "vmci_driver.h"
42#include "vmci_event.h"
43
44#define VMCI_UTIL_NUM_RESOURCES 1
45
46enum {
47 VMCI_NOTIFY_RESOURCE_QUEUE_PAIR = 0,
48 VMCI_NOTIFY_RESOURCE_DOOR_BELL = 1,
49};
50
51enum {
52 VMCI_NOTIFY_RESOURCE_ACTION_NOTIFY = 0,
53 VMCI_NOTIFY_RESOURCE_ACTION_CREATE = 1,
54 VMCI_NOTIFY_RESOURCE_ACTION_DESTROY = 2,
55};
56
57/*
58 * VMCI driver initialization. This block can also be used to
59 * pass initial group membership etc.
60 */
61struct vmci_init_blk {
62 u32 cid;
63 u32 flags;
64};
65
66/* VMCIqueue_pairAllocInfo_VMToVM */
67struct vmci_qp_alloc_info_vmvm {
68 struct vmci_handle handle;
69 u32 peer;
70 u32 flags;
71 u64 produce_size;
72 u64 consume_size;
73 u64 produce_page_file; /* User VA. */
74 u64 consume_page_file; /* User VA. */
75 u64 produce_page_file_size; /* Size of the file name array. */
76 u64 consume_page_file_size; /* Size of the file name array. */
77 s32 result;
78 u32 _pad;
79};
80
81/* VMCISetNotifyInfo: Used to pass notify flag's address to the host driver. */
82struct vmci_set_notify_info {
83 u64 notify_uva;
84 s32 result;
85 u32 _pad;
86};
87
88/*
89 * Per-instance host state
90 */
91struct vmci_host_dev {
92 struct vmci_ctx *context;
93 int user_version;
94 enum vmci_obj_type ct_type;
95 struct mutex lock; /* Mutex lock for vmci context access */
96};
97
98static struct vmci_ctx *host_context;
99static bool vmci_host_device_initialized;
100static atomic_t vmci_host_active_users = ATOMIC_INIT(0);
101
102/*
103 * Determines whether the VMCI host personality is
104 * available. Since the core functionality of the host driver is
105 * always present, all guests could possibly use the host
106 * personality. However, to minimize the deviation from the
107 * pre-unified driver state of affairs, we only consider the host
108 * device active if there is no active guest device or if there
109 * are VMX'en with active VMCI contexts using the host device.
110 */
111bool vmci_host_code_active(void)
112{
113 return vmci_host_device_initialized &&
114 (!vmci_guest_code_active() ||
115 atomic_read(&vmci_host_active_users) > 0);
116}
117
118/*
119 * Called on open of /dev/vmci.
120 */
121static int vmci_host_open(struct inode *inode, struct file *filp)
122{
123 struct vmci_host_dev *vmci_host_dev;
124
125 vmci_host_dev = kzalloc(sizeof(struct vmci_host_dev), GFP_KERNEL);
126 if (vmci_host_dev == NULL)
127 return -ENOMEM;
128
129 vmci_host_dev->ct_type = VMCIOBJ_NOT_SET;
130 mutex_init(&vmci_host_dev->lock);
131 filp->private_data = vmci_host_dev;
132
133 return 0;
134}
135
136/*
137 * Called on close of /dev/vmci, most often when the process
138 * exits.
139 */
140static int vmci_host_close(struct inode *inode, struct file *filp)
141{
142 struct vmci_host_dev *vmci_host_dev = filp->private_data;
143
144 if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) {
145 vmci_ctx_destroy(vmci_host_dev->context);
146 vmci_host_dev->context = NULL;
147
148 /*
149 * The number of active contexts is used to track whether any
150 * VMX'en are using the host personality. It is incremented when
151 * a context is created through the IOCTL_VMCI_INIT_CONTEXT
152 * ioctl.
153 */
154 atomic_dec(&vmci_host_active_users);
155 }
156 vmci_host_dev->ct_type = VMCIOBJ_NOT_SET;
157
158 kfree(vmci_host_dev);
159 filp->private_data = NULL;
160 return 0;
161}
162
163/*
164 * This is used to wake up the VMX when a VMCI call arrives, or
165 * to wake up select() or poll() at the next clock tick.
166 */
167static unsigned int vmci_host_poll(struct file *filp, poll_table *wait)
168{
169 struct vmci_host_dev *vmci_host_dev = filp->private_data;
170 struct vmci_ctx *context = vmci_host_dev->context;
171 unsigned int mask = 0;
172
173 if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) {
174 /* Check for VMCI calls to this VM context. */
175 if (wait)
176 poll_wait(filp, &context->host_context.wait_queue,
177 wait);
178
179 spin_lock(&context->lock);
180 if (context->pending_datagrams > 0 ||
181 vmci_handle_arr_get_size(
182 context->pending_doorbell_array) > 0) {
183 mask = POLLIN;
184 }
185 spin_unlock(&context->lock);
186 }
187 return mask;
188}
189
190/*
191 * Copies the handles of a handle array into a user buffer, and
192 * returns the new length in userBufferSize. If the copy to the
193 * user buffer fails, the functions still returns VMCI_SUCCESS,
194 * but retval != 0.
195 */
196static int drv_cp_harray_to_user(void __user *user_buf_uva,
197 u64 *user_buf_size,
198 struct vmci_handle_arr *handle_array,
199 int *retval)
200{
201 u32 array_size = 0;
202 struct vmci_handle *handles;
203
204 if (handle_array)
205 array_size = vmci_handle_arr_get_size(handle_array);
206
207 if (array_size * sizeof(*handles) > *user_buf_size)
208 return VMCI_ERROR_MORE_DATA;
209
210 *user_buf_size = array_size * sizeof(*handles);
211 if (*user_buf_size)
212 *retval = copy_to_user(user_buf_uva,
213 vmci_handle_arr_get_handles
214 (handle_array), *user_buf_size);
215
216 return VMCI_SUCCESS;
217}
218
219/*
220 * Sets up a given context for notify to work. Calls drv_map_bool_ptr()
221 * which maps the notify boolean in user VA in kernel space.
222 */
223static int vmci_host_setup_notify(struct vmci_ctx *context,
224 unsigned long uva)
225{
226 struct page *page;
227 int retval;
228
229 if (context->notify_page) {
230 pr_devel("%s: Notify mechanism is already set up\n", __func__);
231 return VMCI_ERROR_DUPLICATE_ENTRY;
232 }
233
234 /*
235 * We are using 'bool' internally, but let's make sure we explicit
236 * about the size.
237 */
238 BUILD_BUG_ON(sizeof(bool) != sizeof(u8));
239 if (!access_ok(VERIFY_WRITE, (void __user *)uva, sizeof(u8)))
240 return VMCI_ERROR_GENERIC;
241
242 /*
243 * Lock physical page backing a given user VA.
244 */
245 down_read(&current->mm->mmap_sem);
246 retval = get_user_pages(current, current->mm,
247 PAGE_ALIGN(uva),
248 1, 1, 0, &page, NULL);
249 up_read(&current->mm->mmap_sem);
250 if (retval != 1)
251 return VMCI_ERROR_GENERIC;
252
253 /*
254 * Map the locked page and set up notify pointer.
255 */
256 context->notify = kmap(page) + (uva & (PAGE_SIZE - 1));
257 vmci_ctx_check_signal_notify(context);
258
259 return VMCI_SUCCESS;
260}
261
262static int vmci_host_get_version(struct vmci_host_dev *vmci_host_dev,
263 unsigned int cmd, void __user *uptr)
264{
265 if (cmd == IOCTL_VMCI_VERSION2) {
266 int __user *vptr = uptr;
267 if (get_user(vmci_host_dev->user_version, vptr))
268 return -EFAULT;
269 }
270
271 /*
272 * The basic logic here is:
273 *
274 * If the user sends in a version of 0 tell it our version.
275 * If the user didn't send in a version, tell it our version.
276 * If the user sent in an old version, tell it -its- version.
277 * If the user sent in an newer version, tell it our version.
278 *
279 * The rationale behind telling the caller its version is that
280 * Workstation 6.5 required that VMX and VMCI kernel module were
281 * version sync'd. All new VMX users will be programmed to
282 * handle the VMCI kernel module version.
283 */
284
285 if (vmci_host_dev->user_version > 0 &&
286 vmci_host_dev->user_version < VMCI_VERSION_HOSTQP) {
287 return vmci_host_dev->user_version;
288 }
289
290 return VMCI_VERSION;
291}
292
293#define vmci_ioctl_err(fmt, ...) \
294 pr_devel("%s: " fmt, ioctl_name, ##__VA_ARGS__)
295
296static int vmci_host_do_init_context(struct vmci_host_dev *vmci_host_dev,
297 const char *ioctl_name,
298 void __user *uptr)
299{
300 struct vmci_init_blk init_block;
301 const struct cred *cred;
302 int retval;
303
304 if (copy_from_user(&init_block, uptr, sizeof(init_block))) {
305 vmci_ioctl_err("error reading init block\n");
306 return -EFAULT;
307 }
308
309 mutex_lock(&vmci_host_dev->lock);
310
311 if (vmci_host_dev->ct_type != VMCIOBJ_NOT_SET) {
312 vmci_ioctl_err("received VMCI init on initialized handle\n");
313 retval = -EINVAL;
314 goto out;
315 }
316
317 if (init_block.flags & ~VMCI_PRIVILEGE_FLAG_RESTRICTED) {
318 vmci_ioctl_err("unsupported VMCI restriction flag\n");
319 retval = -EINVAL;
320 goto out;
321 }
322
323 cred = get_current_cred();
324 vmci_host_dev->context = vmci_ctx_create(init_block.cid,
325 init_block.flags, 0,
326 vmci_host_dev->user_version,
327 cred);
328 put_cred(cred);
329 if (IS_ERR(vmci_host_dev->context)) {
330 retval = PTR_ERR(vmci_host_dev->context);
331 vmci_ioctl_err("error initializing context\n");
332 goto out;
333 }
334
335 /*
336 * Copy cid to userlevel, we do this to allow the VMX
337 * to enforce its policy on cid generation.
338 */
339 init_block.cid = vmci_ctx_get_id(vmci_host_dev->context);
340 if (copy_to_user(uptr, &init_block, sizeof(init_block))) {
341 vmci_ctx_destroy(vmci_host_dev->context);
342 vmci_host_dev->context = NULL;
343 vmci_ioctl_err("error writing init block\n");
344 retval = -EFAULT;
345 goto out;
346 }
347
348 vmci_host_dev->ct_type = VMCIOBJ_CONTEXT;
349 atomic_inc(&vmci_host_active_users);
350
351 retval = 0;
352
353out:
354 mutex_unlock(&vmci_host_dev->lock);
355 return retval;
356}
357
358static int vmci_host_do_send_datagram(struct vmci_host_dev *vmci_host_dev,
359 const char *ioctl_name,
360 void __user *uptr)
361{
362 struct vmci_datagram_snd_rcv_info send_info;
363 struct vmci_datagram *dg = NULL;
364 u32 cid;
365
366 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
367 vmci_ioctl_err("only valid for contexts\n");
368 return -EINVAL;
369 }
370
371 if (copy_from_user(&send_info, uptr, sizeof(send_info)))
372 return -EFAULT;
373
374 if (send_info.len > VMCI_MAX_DG_SIZE) {
375 vmci_ioctl_err("datagram is too big (size=%d)\n",
376 send_info.len);
377 return -EINVAL;
378 }
379
380 if (send_info.len < sizeof(*dg)) {
381 vmci_ioctl_err("datagram is too small (size=%d)\n",
382 send_info.len);
383 return -EINVAL;
384 }
385
386 dg = kmalloc(send_info.len, GFP_KERNEL);
387 if (!dg) {
388 vmci_ioctl_err(
389 "cannot allocate memory to dispatch datagram\n");
390 return -ENOMEM;
391 }
392
393 if (copy_from_user(dg, (void __user *)(uintptr_t)send_info.addr,
394 send_info.len)) {
395 vmci_ioctl_err("error getting datagram\n");
396 kfree(dg);
397 return -EFAULT;
398 }
399
400 pr_devel("Datagram dst (handle=0x%x:0x%x) src (handle=0x%x:0x%x), payload (size=%llu bytes)\n",
401 dg->dst.context, dg->dst.resource,
402 dg->src.context, dg->src.resource,
403 (unsigned long long)dg->payload_size);
404
405 /* Get source context id. */
406 cid = vmci_ctx_get_id(vmci_host_dev->context);
407 send_info.result = vmci_datagram_dispatch(cid, dg, true);
408 kfree(dg);
409
410 return copy_to_user(uptr, &send_info, sizeof(send_info)) ? -EFAULT : 0;
411}
412
413static int vmci_host_do_receive_datagram(struct vmci_host_dev *vmci_host_dev,
414 const char *ioctl_name,
415 void __user *uptr)
416{
417 struct vmci_datagram_snd_rcv_info recv_info;
418 struct vmci_datagram *dg = NULL;
419 int retval;
420 size_t size;
421
422 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
423 vmci_ioctl_err("only valid for contexts\n");
424 return -EINVAL;
425 }
426
427 if (copy_from_user(&recv_info, uptr, sizeof(recv_info)))
428 return -EFAULT;
429
430 size = recv_info.len;
431 recv_info.result = vmci_ctx_dequeue_datagram(vmci_host_dev->context,
432 &size, &dg);
433
434 if (recv_info.result >= VMCI_SUCCESS) {
435 void __user *ubuf = (void __user *)(uintptr_t)recv_info.addr;
436 retval = copy_to_user(ubuf, dg, VMCI_DG_SIZE(dg));
437 kfree(dg);
438 if (retval != 0)
439 return -EFAULT;
440 }
441
442 return copy_to_user(uptr, &recv_info, sizeof(recv_info)) ? -EFAULT : 0;
443}
444
445static int vmci_host_do_alloc_queuepair(struct vmci_host_dev *vmci_host_dev,
446 const char *ioctl_name,
447 void __user *uptr)
448{
449 struct vmci_handle handle;
450 int vmci_status;
451 int __user *retptr;
452 u32 cid;
453
454 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
455 vmci_ioctl_err("only valid for contexts\n");
456 return -EINVAL;
457 }
458
459 cid = vmci_ctx_get_id(vmci_host_dev->context);
460
461 if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
462 struct vmci_qp_alloc_info_vmvm alloc_info;
463 struct vmci_qp_alloc_info_vmvm __user *info = uptr;
464
465 if (copy_from_user(&alloc_info, uptr, sizeof(alloc_info)))
466 return -EFAULT;
467
468 handle = alloc_info.handle;
469 retptr = &info->result;
470
471 vmci_status = vmci_qp_broker_alloc(alloc_info.handle,
472 alloc_info.peer,
473 alloc_info.flags,
474 VMCI_NO_PRIVILEGE_FLAGS,
475 alloc_info.produce_size,
476 alloc_info.consume_size,
477 NULL,
478 vmci_host_dev->context);
479
480 if (vmci_status == VMCI_SUCCESS)
481 vmci_status = VMCI_SUCCESS_QUEUEPAIR_CREATE;
482 } else {
483 struct vmci_qp_alloc_info alloc_info;
484 struct vmci_qp_alloc_info __user *info = uptr;
485 struct vmci_qp_page_store page_store;
486
487 if (copy_from_user(&alloc_info, uptr, sizeof(alloc_info)))
488 return -EFAULT;
489
490 handle = alloc_info.handle;
491 retptr = &info->result;
492
493 page_store.pages = alloc_info.ppn_va;
494 page_store.len = alloc_info.num_ppns;
495
496 vmci_status = vmci_qp_broker_alloc(alloc_info.handle,
497 alloc_info.peer,
498 alloc_info.flags,
499 VMCI_NO_PRIVILEGE_FLAGS,
500 alloc_info.produce_size,
501 alloc_info.consume_size,
502 &page_store,
503 vmci_host_dev->context);
504 }
505
506 if (put_user(vmci_status, retptr)) {
507 if (vmci_status >= VMCI_SUCCESS) {
508 vmci_status = vmci_qp_broker_detach(handle,
509 vmci_host_dev->context);
510 }
511 return -EFAULT;
512 }
513
514 return 0;
515}
516
517static int vmci_host_do_queuepair_setva(struct vmci_host_dev *vmci_host_dev,
518 const char *ioctl_name,
519 void __user *uptr)
520{
521 struct vmci_qp_set_va_info set_va_info;
522 struct vmci_qp_set_va_info __user *info = uptr;
523 s32 result;
524
525 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
526 vmci_ioctl_err("only valid for contexts\n");
527 return -EINVAL;
528 }
529
530 if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
531 vmci_ioctl_err("is not allowed\n");
532 return -EINVAL;
533 }
534
535 if (copy_from_user(&set_va_info, uptr, sizeof(set_va_info)))
536 return -EFAULT;
537
538 if (set_va_info.va) {
539 /*
540 * VMX is passing down a new VA for the queue
541 * pair mapping.
542 */
543 result = vmci_qp_broker_map(set_va_info.handle,
544 vmci_host_dev->context,
545 set_va_info.va);
546 } else {
547 /*
548 * The queue pair is about to be unmapped by
549 * the VMX.
550 */
551 result = vmci_qp_broker_unmap(set_va_info.handle,
552 vmci_host_dev->context, 0);
553 }
554
555 return put_user(result, &info->result) ? -EFAULT : 0;
556}
557
558static int vmci_host_do_queuepair_setpf(struct vmci_host_dev *vmci_host_dev,
559 const char *ioctl_name,
560 void __user *uptr)
561{
562 struct vmci_qp_page_file_info page_file_info;
563 struct vmci_qp_page_file_info __user *info = uptr;
564 s32 result;
565
566 if (vmci_host_dev->user_version < VMCI_VERSION_HOSTQP ||
567 vmci_host_dev->user_version >= VMCI_VERSION_NOVMVM) {
568 vmci_ioctl_err("not supported on this VMX (version=%d)\n",
569 vmci_host_dev->user_version);
570 return -EINVAL;
571 }
572
573 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
574 vmci_ioctl_err("only valid for contexts\n");
575 return -EINVAL;
576 }
577
578 if (copy_from_user(&page_file_info, uptr, sizeof(*info)))
579 return -EFAULT;
580
581 /*
582 * Communicate success pre-emptively to the caller. Note that the
583 * basic premise is that it is incumbent upon the caller not to look at
584 * the info.result field until after the ioctl() returns. And then,
585 * only if the ioctl() result indicates no error. We send up the
586 * SUCCESS status before calling SetPageStore() store because failing
587 * to copy up the result code means unwinding the SetPageStore().
588 *
589 * It turns out the logic to unwind a SetPageStore() opens a can of
590 * worms. For example, if a host had created the queue_pair and a
591 * guest attaches and SetPageStore() is successful but writing success
592 * fails, then ... the host has to be stopped from writing (anymore)
593 * data into the queue_pair. That means an additional test in the
594 * VMCI_Enqueue() code path. Ugh.
595 */
596
597 if (put_user(VMCI_SUCCESS, &info->result)) {
598 /*
599 * In this case, we can't write a result field of the
600 * caller's info block. So, we don't even try to
601 * SetPageStore().
602 */
603 return -EFAULT;
604 }
605
606 result = vmci_qp_broker_set_page_store(page_file_info.handle,
607 page_file_info.produce_va,
608 page_file_info.consume_va,
609 vmci_host_dev->context);
610 if (result < VMCI_SUCCESS) {
611 if (put_user(result, &info->result)) {
612 /*
613 * Note that in this case the SetPageStore()
614 * call failed but we were unable to
615 * communicate that to the caller (because the
616 * copy_to_user() call failed). So, if we
617 * simply return an error (in this case
618 * -EFAULT) then the caller will know that the
619 * SetPageStore failed even though we couldn't
620 * put the result code in the result field and
621 * indicate exactly why it failed.
622 *
623 * That says nothing about the issue where we
624 * were once able to write to the caller's info
625 * memory and now can't. Something more
626 * serious is probably going on than the fact
627 * that SetPageStore() didn't work.
628 */
629 return -EFAULT;
630 }
631 }
632
633 return 0;
634}
635
636static int vmci_host_do_qp_detach(struct vmci_host_dev *vmci_host_dev,
637 const char *ioctl_name,
638 void __user *uptr)
639{
640 struct vmci_qp_dtch_info detach_info;
641 struct vmci_qp_dtch_info __user *info = uptr;
642 s32 result;
643
644 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
645 vmci_ioctl_err("only valid for contexts\n");
646 return -EINVAL;
647 }
648
649 if (copy_from_user(&detach_info, uptr, sizeof(detach_info)))
650 return -EFAULT;
651
652 result = vmci_qp_broker_detach(detach_info.handle,
653 vmci_host_dev->context);
654 if (result == VMCI_SUCCESS &&
655 vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
656 result = VMCI_SUCCESS_LAST_DETACH;
657 }
658
659 return put_user(result, &info->result) ? -EFAULT : 0;
660}
661
662static int vmci_host_do_ctx_add_notify(struct vmci_host_dev *vmci_host_dev,
663 const char *ioctl_name,
664 void __user *uptr)
665{
666 struct vmci_ctx_info ar_info;
667 struct vmci_ctx_info __user *info = uptr;
668 s32 result;
669 u32 cid;
670
671 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
672 vmci_ioctl_err("only valid for contexts\n");
673 return -EINVAL;
674 }
675
676 if (copy_from_user(&ar_info, uptr, sizeof(ar_info)))
677 return -EFAULT;
678
679 cid = vmci_ctx_get_id(vmci_host_dev->context);
680 result = vmci_ctx_add_notification(cid, ar_info.remote_cid);
681
682 return put_user(result, &info->result) ? -EFAULT : 0;
683}
684
685static int vmci_host_do_ctx_remove_notify(struct vmci_host_dev *vmci_host_dev,
686 const char *ioctl_name,
687 void __user *uptr)
688{
689 struct vmci_ctx_info ar_info;
690 struct vmci_ctx_info __user *info = uptr;
691 u32 cid;
692 int result;
693
694 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
695 vmci_ioctl_err("only valid for contexts\n");
696 return -EINVAL;
697 }
698
699 if (copy_from_user(&ar_info, uptr, sizeof(ar_info)))
700 return -EFAULT;
701
702 cid = vmci_ctx_get_id(vmci_host_dev->context);
703 result = vmci_ctx_remove_notification(cid,
704 ar_info.remote_cid);
705
706 return put_user(result, &info->result) ? -EFAULT : 0;
707}
708
709static int vmci_host_do_ctx_get_cpt_state(struct vmci_host_dev *vmci_host_dev,
710 const char *ioctl_name,
711 void __user *uptr)
712{
713 struct vmci_ctx_chkpt_buf_info get_info;
714 u32 cid;
715 void *cpt_buf;
716 int retval;
717
718 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
719 vmci_ioctl_err("only valid for contexts\n");
720 return -EINVAL;
721 }
722
723 if (copy_from_user(&get_info, uptr, sizeof(get_info)))
724 return -EFAULT;
725
726 cid = vmci_ctx_get_id(vmci_host_dev->context);
727 get_info.result = vmci_ctx_get_chkpt_state(cid, get_info.cpt_type,
728 &get_info.buf_size, &cpt_buf);
729 if (get_info.result == VMCI_SUCCESS && get_info.buf_size) {
730 void __user *ubuf = (void __user *)(uintptr_t)get_info.cpt_buf;
731 retval = copy_to_user(ubuf, cpt_buf, get_info.buf_size);
732 kfree(cpt_buf);
733
734 if (retval)
735 return -EFAULT;
736 }
737
738 return copy_to_user(uptr, &get_info, sizeof(get_info)) ? -EFAULT : 0;
739}
740
741static int vmci_host_do_ctx_set_cpt_state(struct vmci_host_dev *vmci_host_dev,
742 const char *ioctl_name,
743 void __user *uptr)
744{
745 struct vmci_ctx_chkpt_buf_info set_info;
746 u32 cid;
747 void *cpt_buf;
748 int retval;
749
750 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
751 vmci_ioctl_err("only valid for contexts\n");
752 return -EINVAL;
753 }
754
755 if (copy_from_user(&set_info, uptr, sizeof(set_info)))
756 return -EFAULT;
757
758 cpt_buf = kmalloc(set_info.buf_size, GFP_KERNEL);
759 if (!cpt_buf) {
760 vmci_ioctl_err(
761 "cannot allocate memory to set cpt state (type=%d)\n",
762 set_info.cpt_type);
763 return -ENOMEM;
764 }
765
766 if (copy_from_user(cpt_buf, (void __user *)(uintptr_t)set_info.cpt_buf,
767 set_info.buf_size)) {
768 retval = -EFAULT;
769 goto out;
770 }
771
772 cid = vmci_ctx_get_id(vmci_host_dev->context);
773 set_info.result = vmci_ctx_set_chkpt_state(cid, set_info.cpt_type,
774 set_info.buf_size, cpt_buf);
775
776 retval = copy_to_user(uptr, &set_info, sizeof(set_info)) ? -EFAULT : 0;
777
778out:
779 kfree(cpt_buf);
780 return retval;
781}
782
783static int vmci_host_do_get_context_id(struct vmci_host_dev *vmci_host_dev,
784 const char *ioctl_name,
785 void __user *uptr)
786{
787 u32 __user *u32ptr = uptr;
788
789 return put_user(VMCI_HOST_CONTEXT_ID, u32ptr) ? -EFAULT : 0;
790}
791
792static int vmci_host_do_set_notify(struct vmci_host_dev *vmci_host_dev,
793 const char *ioctl_name,
794 void __user *uptr)
795{
796 struct vmci_set_notify_info notify_info;
797
798 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
799 vmci_ioctl_err("only valid for contexts\n");
800 return -EINVAL;
801 }
802
803 if (copy_from_user(&notify_info, uptr, sizeof(notify_info)))
804 return -EFAULT;
805
806 if (notify_info.notify_uva) {
807 notify_info.result =
808 vmci_host_setup_notify(vmci_host_dev->context,
809 notify_info.notify_uva);
810 } else {
811 vmci_ctx_unset_notify(vmci_host_dev->context);
812 notify_info.result = VMCI_SUCCESS;
813 }
814
815 return copy_to_user(uptr, &notify_info, sizeof(notify_info)) ?
816 -EFAULT : 0;
817}
818
819static int vmci_host_do_notify_resource(struct vmci_host_dev *vmci_host_dev,
820 const char *ioctl_name,
821 void __user *uptr)
822{
823 struct vmci_dbell_notify_resource_info info;
824 u32 cid;
825
826 if (vmci_host_dev->user_version < VMCI_VERSION_NOTIFY) {
827 vmci_ioctl_err("invalid for current VMX versions\n");
828 return -EINVAL;
829 }
830
831 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
832 vmci_ioctl_err("only valid for contexts\n");
833 return -EINVAL;
834 }
835
836 if (copy_from_user(&info, uptr, sizeof(info)))
837 return -EFAULT;
838
839 cid = vmci_ctx_get_id(vmci_host_dev->context);
840
841 switch (info.action) {
842 case VMCI_NOTIFY_RESOURCE_ACTION_NOTIFY:
843 if (info.resource == VMCI_NOTIFY_RESOURCE_DOOR_BELL) {
844 u32 flags = VMCI_NO_PRIVILEGE_FLAGS;
845 info.result = vmci_ctx_notify_dbell(cid, info.handle,
846 flags);
847 } else {
848 info.result = VMCI_ERROR_UNAVAILABLE;
849 }
850 break;
851
852 case VMCI_NOTIFY_RESOURCE_ACTION_CREATE:
853 info.result = vmci_ctx_dbell_create(cid, info.handle);
854 break;
855
856 case VMCI_NOTIFY_RESOURCE_ACTION_DESTROY:
857 info.result = vmci_ctx_dbell_destroy(cid, info.handle);
858 break;
859
860 default:
861 vmci_ioctl_err("got unknown action (action=%d)\n",
862 info.action);
863 info.result = VMCI_ERROR_INVALID_ARGS;
864 }
865
866 return copy_to_user(uptr, &info, sizeof(info)) ? -EFAULT : 0;
867}
868
869static int vmci_host_do_recv_notifications(struct vmci_host_dev *vmci_host_dev,
870 const char *ioctl_name,
871 void __user *uptr)
872{
873 struct vmci_ctx_notify_recv_info info;
874 struct vmci_handle_arr *db_handle_array;
875 struct vmci_handle_arr *qp_handle_array;
876 void __user *ubuf;
877 u32 cid;
878 int retval = 0;
879
880 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
881 vmci_ioctl_err("only valid for contexts\n");
882 return -EINVAL;
883 }
884
885 if (vmci_host_dev->user_version < VMCI_VERSION_NOTIFY) {
886 vmci_ioctl_err("not supported for the current vmx version\n");
887 return -EINVAL;
888 }
889
890 if (copy_from_user(&info, uptr, sizeof(info)))
891 return -EFAULT;
892
893 if ((info.db_handle_buf_size && !info.db_handle_buf_uva) ||
894 (info.qp_handle_buf_size && !info.qp_handle_buf_uva)) {
895 return -EINVAL;
896 }
897
898 cid = vmci_ctx_get_id(vmci_host_dev->context);
899
900 info.result = vmci_ctx_rcv_notifications_get(cid,
901 &db_handle_array, &qp_handle_array);
902 if (info.result != VMCI_SUCCESS)
903 return copy_to_user(uptr, &info, sizeof(info)) ? -EFAULT : 0;
904
905 ubuf = (void __user *)(uintptr_t)info.db_handle_buf_uva;
906 info.result = drv_cp_harray_to_user(ubuf, &info.db_handle_buf_size,
907 db_handle_array, &retval);
908 if (info.result == VMCI_SUCCESS && !retval) {
909 ubuf = (void __user *)(uintptr_t)info.qp_handle_buf_uva;
910 info.result = drv_cp_harray_to_user(ubuf,
911 &info.qp_handle_buf_size,
912 qp_handle_array, &retval);
913 }
914
915 if (!retval && copy_to_user(uptr, &info, sizeof(info)))
916 retval = -EFAULT;
917
918 vmci_ctx_rcv_notifications_release(cid,
919 db_handle_array, qp_handle_array,
920 info.result == VMCI_SUCCESS && !retval);
921
922 return retval;
923}
924
925static long vmci_host_unlocked_ioctl(struct file *filp,
926 unsigned int iocmd, unsigned long ioarg)
927{
928#define VMCI_DO_IOCTL(ioctl_name, ioctl_fn) do { \
929 char *name = __stringify(IOCTL_VMCI_ ## ioctl_name); \
930 return vmci_host_do_ ## ioctl_fn( \
931 vmci_host_dev, name, uptr); \
932 } while (0)
933
934 struct vmci_host_dev *vmci_host_dev = filp->private_data;
935 void __user *uptr = (void __user *)ioarg;
936
937 switch (iocmd) {
938 case IOCTL_VMCI_INIT_CONTEXT:
939 VMCI_DO_IOCTL(INIT_CONTEXT, init_context);
940 case IOCTL_VMCI_DATAGRAM_SEND:
941 VMCI_DO_IOCTL(DATAGRAM_SEND, send_datagram);
942 case IOCTL_VMCI_DATAGRAM_RECEIVE:
943 VMCI_DO_IOCTL(DATAGRAM_RECEIVE, receive_datagram);
944 case IOCTL_VMCI_QUEUEPAIR_ALLOC:
945 VMCI_DO_IOCTL(QUEUEPAIR_ALLOC, alloc_queuepair);
946 case IOCTL_VMCI_QUEUEPAIR_SETVA:
947 VMCI_DO_IOCTL(QUEUEPAIR_SETVA, queuepair_setva);
948 case IOCTL_VMCI_QUEUEPAIR_SETPAGEFILE:
949 VMCI_DO_IOCTL(QUEUEPAIR_SETPAGEFILE, queuepair_setpf);
950 case IOCTL_VMCI_QUEUEPAIR_DETACH:
951 VMCI_DO_IOCTL(QUEUEPAIR_DETACH, qp_detach);
952 case IOCTL_VMCI_CTX_ADD_NOTIFICATION:
953 VMCI_DO_IOCTL(CTX_ADD_NOTIFICATION, ctx_add_notify);
954 case IOCTL_VMCI_CTX_REMOVE_NOTIFICATION:
955 VMCI_DO_IOCTL(CTX_REMOVE_NOTIFICATION, ctx_remove_notify);
956 case IOCTL_VMCI_CTX_GET_CPT_STATE:
957 VMCI_DO_IOCTL(CTX_GET_CPT_STATE, ctx_get_cpt_state);
958 case IOCTL_VMCI_CTX_SET_CPT_STATE:
959 VMCI_DO_IOCTL(CTX_SET_CPT_STATE, ctx_set_cpt_state);
960 case IOCTL_VMCI_GET_CONTEXT_ID:
961 VMCI_DO_IOCTL(GET_CONTEXT_ID, get_context_id);
962 case IOCTL_VMCI_SET_NOTIFY:
963 VMCI_DO_IOCTL(SET_NOTIFY, set_notify);
964 case IOCTL_VMCI_NOTIFY_RESOURCE:
965 VMCI_DO_IOCTL(NOTIFY_RESOURCE, notify_resource);
966 case IOCTL_VMCI_NOTIFICATIONS_RECEIVE:
967 VMCI_DO_IOCTL(NOTIFICATIONS_RECEIVE, recv_notifications);
968
969 case IOCTL_VMCI_VERSION:
970 case IOCTL_VMCI_VERSION2:
971 return vmci_host_get_version(vmci_host_dev, iocmd, uptr);
972
973 default:
974 pr_devel("%s: Unknown ioctl (iocmd=%d)\n", __func__, iocmd);
975 return -EINVAL;
976 }
977
978#undef VMCI_DO_IOCTL
979}
980
981static const struct file_operations vmuser_fops = {
982 .owner = THIS_MODULE,
983 .open = vmci_host_open,
984 .release = vmci_host_close,
985 .poll = vmci_host_poll,
986 .unlocked_ioctl = vmci_host_unlocked_ioctl,
987 .compat_ioctl = vmci_host_unlocked_ioctl,
988};
989
990static struct miscdevice vmci_host_miscdev = {
991 .name = "vmci",
992 .minor = MISC_DYNAMIC_MINOR,
993 .fops = &vmuser_fops,
994};
995
996int __init vmci_host_init(void)
997{
998 int error;
999
1000 host_context = vmci_ctx_create(VMCI_HOST_CONTEXT_ID,
1001 VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS,
1002 -1, VMCI_VERSION, NULL);
1003 if (IS_ERR(host_context)) {
1004 error = PTR_ERR(host_context);
1005 pr_warn("Failed to initialize VMCIContext (error%d)\n",
1006 error);
1007 return error;
1008 }
1009
1010 error = misc_register(&vmci_host_miscdev);
1011 if (error) {
1012 pr_warn("Module registration error (name=%s, major=%d, minor=%d, err=%d)\n",
1013 vmci_host_miscdev.name,
1014 MISC_MAJOR, vmci_host_miscdev.minor,
1015 error);
1016 pr_warn("Unable to initialize host personality\n");
1017 vmci_ctx_destroy(host_context);
1018 return error;
1019 }
1020
1021 pr_info("VMCI host device registered (name=%s, major=%d, minor=%d)\n",
1022 vmci_host_miscdev.name, MISC_MAJOR, vmci_host_miscdev.minor);
1023
1024 vmci_host_device_initialized = true;
1025 return 0;
1026}
1027
1028void __exit vmci_host_exit(void)
1029{
1030 int error;
1031
1032 vmci_host_device_initialized = false;
1033
1034 error = misc_deregister(&vmci_host_miscdev);
1035 if (error)
1036 pr_warn("Error unregistering character device: %d\n", error);
1037
1038 vmci_ctx_destroy(host_context);
1039 vmci_qp_broker_exit();
1040
1041 pr_debug("VMCI host driver module unloaded\n");
1042}