1 /******************************************************************************
4 * Interface to privileged domain-0 commands.
6 * Copyright (c) 2002-2004, K A Fraser, B Dragovic
9 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
18 #include <linux/mman.h>
19 #include <linux/uaccess.h>
20 #include <linux/swap.h>
21 #include <linux/highmem.h>
22 #include <linux/pagemap.h>
23 #include <linux/seq_file.h>
24 #include <linux/miscdevice.h>
25 #include <linux/moduleparam.h>
27 #include <asm/pgalloc.h>
28 #include <asm/pgtable.h>
30 #include <asm/xen/hypervisor.h>
31 #include <asm/xen/hypercall.h>
34 #include <xen/privcmd.h>
35 #include <xen/interface/xen.h>
36 #include <xen/interface/hvm/dm_op.h>
37 #include <xen/features.h>
39 #include <xen/xen-ops.h>
40 #include <xen/balloon.h>
44 MODULE_LICENSE("GPL");
46 #define PRIV_VMA_LOCKED ((void *)1)
48 static unsigned int privcmd_dm_op_max_num = 16;
49 module_param_named(dm_op_max_nr_bufs, privcmd_dm_op_max_num, uint, 0644);
50 MODULE_PARM_DESC(dm_op_max_nr_bufs,
51 "Maximum number of buffers per dm_op hypercall");
53 static unsigned int privcmd_dm_op_buf_max_size = 4096;
54 module_param_named(dm_op_buf_max_size, privcmd_dm_op_buf_max_size, uint,
56 MODULE_PARM_DESC(dm_op_buf_max_size,
57 "Maximum size of a dm_op hypercall buffer");
63 static int privcmd_vma_range_is_mapped(
64 struct vm_area_struct *vma,
66 unsigned long nr_pages);
68 static long privcmd_ioctl_hypercall(struct file *file, void __user *udata)
70 struct privcmd_data *data = file->private_data;
71 struct privcmd_hypercall hypercall;
74 /* Disallow arbitrary hypercalls if restricted */
75 if (data->domid != DOMID_INVALID)
78 if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
81 xen_preemptible_hcall_begin();
82 ret = privcmd_call(hypercall.op,
83 hypercall.arg[0], hypercall.arg[1],
84 hypercall.arg[2], hypercall.arg[3],
86 xen_preemptible_hcall_end();
91 static void free_page_list(struct list_head *pages)
95 list_for_each_entry_safe(p, n, pages, lru)
98 INIT_LIST_HEAD(pages);
102 * Given an array of items in userspace, return a list of pages
103 * containing the data. If copying fails, either because of memory
104 * allocation failure or a problem reading user memory, return an
105 * error code; its up to the caller to dispose of any partial list.
107 static int gather_array(struct list_head *pagelist,
108 unsigned nelem, size_t size,
109 const void __user *data)
115 if (size > PAGE_SIZE)
119 pagedata = NULL; /* quiet, gcc */
121 if (pageidx > PAGE_SIZE-size) {
122 struct page *page = alloc_page(GFP_KERNEL);
128 pagedata = page_address(page);
130 list_add_tail(&page->lru, pagelist);
135 if (copy_from_user(pagedata + pageidx, data, size))
149 * Call function "fn" on each element of the array fragmented
150 * over a list of pages.
152 static int traverse_pages(unsigned nelem, size_t size,
153 struct list_head *pos,
154 int (*fn)(void *data, void *state),
161 BUG_ON(size > PAGE_SIZE);
164 pagedata = NULL; /* hush, gcc */
167 if (pageidx > PAGE_SIZE-size) {
170 page = list_entry(pos, struct page, lru);
171 pagedata = page_address(page);
175 ret = (*fn)(pagedata + pageidx, state);
185 * Similar to traverse_pages, but use each page as a "block" of
186 * data to be processed as one unit.
188 static int traverse_pages_block(unsigned nelem, size_t size,
189 struct list_head *pos,
190 int (*fn)(void *data, int nr, void *state),
196 BUG_ON(size > PAGE_SIZE);
199 int nr = (PAGE_SIZE/size);
204 page = list_entry(pos, struct page, lru);
205 pagedata = page_address(page);
206 ret = (*fn)(pagedata, nr, state);
215 struct mmap_gfn_state {
217 struct vm_area_struct *vma;
221 static int mmap_gfn_range(void *data, void *state)
223 struct privcmd_mmap_entry *msg = data;
224 struct mmap_gfn_state *st = state;
225 struct vm_area_struct *vma = st->vma;
228 /* Do not allow range to wrap the address space. */
229 if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
230 ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va))
233 /* Range chunks must be contiguous in va space. */
234 if ((msg->va != st->va) ||
235 ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
238 rc = xen_remap_domain_gfn_range(vma,
240 msg->mfn, msg->npages,
246 st->va += msg->npages << PAGE_SHIFT;
251 static long privcmd_ioctl_mmap(struct file *file, void __user *udata)
253 struct privcmd_data *data = file->private_data;
254 struct privcmd_mmap mmapcmd;
255 struct mm_struct *mm = current->mm;
256 struct vm_area_struct *vma;
259 struct mmap_gfn_state state;
261 /* We only support privcmd_ioctl_mmap_batch for auto translated. */
262 if (xen_feature(XENFEAT_auto_translated_physmap))
265 if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
268 /* If restriction is in place, check the domid matches */
269 if (data->domid != DOMID_INVALID && data->domid != mmapcmd.dom)
272 rc = gather_array(&pagelist,
273 mmapcmd.num, sizeof(struct privcmd_mmap_entry),
276 if (rc || list_empty(&pagelist))
279 down_write(&mm->mmap_sem);
282 struct page *page = list_first_entry(&pagelist,
284 struct privcmd_mmap_entry *msg = page_address(page);
286 vma = find_vma(mm, msg->va);
289 if (!vma || (msg->va != vma->vm_start) || vma->vm_private_data)
291 vma->vm_private_data = PRIV_VMA_LOCKED;
294 state.va = vma->vm_start;
296 state.domain = mmapcmd.dom;
298 rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
300 mmap_gfn_range, &state);
304 up_write(&mm->mmap_sem);
307 free_page_list(&pagelist);
312 struct mmap_batch_state {
315 struct vm_area_struct *vma;
319 * 1 if at least one error has happened (and no
320 * -ENOENT errors have happened)
321 * -ENOENT if at least 1 -ENOENT has happened.
326 /* User-space gfn array to store errors in the second pass for V1. */
327 xen_pfn_t __user *user_gfn;
328 /* User-space int array to store errors in the second pass for V2. */
329 int __user *user_err;
332 /* auto translated dom0 note: if domU being created is PV, then gfn is
333 * mfn(addr on bus). If it's auto xlated, then gfn is pfn (input to HAP).
335 static int mmap_batch_fn(void *data, int nr, void *state)
337 xen_pfn_t *gfnp = data;
338 struct mmap_batch_state *st = state;
339 struct vm_area_struct *vma = st->vma;
340 struct page **pages = vma->vm_private_data;
341 struct page **cur_pages = NULL;
344 if (xen_feature(XENFEAT_auto_translated_physmap))
345 cur_pages = &pages[st->index];
348 ret = xen_remap_domain_gfn_array(st->vma, st->va & PAGE_MASK, gfnp, nr,
349 (int *)gfnp, st->vma->vm_page_prot,
350 st->domain, cur_pages);
352 /* Adjust the global_error? */
355 st->global_error = -ENOENT;
357 /* Record that at least one error has happened. */
358 if (st->global_error == 0)
359 st->global_error = 1;
362 st->va += XEN_PAGE_SIZE * nr;
363 st->index += nr / XEN_PFN_PER_PAGE;
368 static int mmap_return_error(int err, struct mmap_batch_state *st)
372 if (st->version == 1) {
376 ret = get_user(gfn, st->user_gfn);
380 * V1 encodes the error codes in the 32bit top
381 * nibble of the gfn (with its known
382 * limitations vis-a-vis 64 bit callers).
384 gfn |= (err == -ENOENT) ?
385 PRIVCMD_MMAPBATCH_PAGED_ERROR :
386 PRIVCMD_MMAPBATCH_MFN_ERROR;
387 return __put_user(gfn, st->user_gfn++);
390 } else { /* st->version == 2 */
392 return __put_user(err, st->user_err++);
400 static int mmap_return_errors(void *data, int nr, void *state)
402 struct mmap_batch_state *st = state;
407 for (i = 0; i < nr; i++) {
408 ret = mmap_return_error(errs[i], st);
415 /* Allocate pfns that are then mapped with gfns from foreign domid. Update
416 * the vma with the page info to use later.
417 * Returns: 0 if success, otherwise -errno
419 static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
424 pages = kcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL);
428 rc = alloc_xenballooned_pages(numpgs, pages);
430 pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__,
435 BUG_ON(vma->vm_private_data != NULL);
436 vma->vm_private_data = pages;
441 static const struct vm_operations_struct privcmd_vm_ops;
443 static long privcmd_ioctl_mmap_batch(
444 struct file *file, void __user *udata, int version)
446 struct privcmd_data *data = file->private_data;
448 struct privcmd_mmapbatch_v2 m;
449 struct mm_struct *mm = current->mm;
450 struct vm_area_struct *vma;
451 unsigned long nr_pages;
453 struct mmap_batch_state state;
457 if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch)))
459 /* Returns per-frame error in m.arr. */
461 if (!access_ok(VERIFY_WRITE, m.arr, m.num * sizeof(*m.arr)))
465 if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch_v2)))
467 /* Returns per-frame error code in m.err. */
468 if (!access_ok(VERIFY_WRITE, m.err, m.num * (sizeof(*m.err))))
475 /* If restriction is in place, check the domid matches */
476 if (data->domid != DOMID_INVALID && data->domid != m.dom)
479 nr_pages = DIV_ROUND_UP(m.num, XEN_PFN_PER_PAGE);
480 if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
483 ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t), m.arr);
487 if (list_empty(&pagelist)) {
493 /* Zero error array now to only copy back actual errors. */
494 if (clear_user(m.err, sizeof(int) * m.num)) {
500 down_write(&mm->mmap_sem);
502 vma = find_vma(mm, m.addr);
504 vma->vm_ops != &privcmd_vm_ops) {
510 * Caller must either:
512 * Map the whole VMA range, which will also allocate all the
513 * pages required for the auto_translated_physmap case.
517 * Map unmapped holes left from a previous map attempt (e.g.,
518 * because those foreign frames were previously paged out).
520 if (vma->vm_private_data == NULL) {
521 if (m.addr != vma->vm_start ||
522 m.addr + (nr_pages << PAGE_SHIFT) != vma->vm_end) {
526 if (xen_feature(XENFEAT_auto_translated_physmap)) {
527 ret = alloc_empty_pages(vma, nr_pages);
531 vma->vm_private_data = PRIV_VMA_LOCKED;
533 if (m.addr < vma->vm_start ||
534 m.addr + (nr_pages << PAGE_SHIFT) > vma->vm_end) {
538 if (privcmd_vma_range_is_mapped(vma, m.addr, nr_pages)) {
544 state.domain = m.dom;
548 state.global_error = 0;
549 state.version = version;
551 BUILD_BUG_ON(((PAGE_SIZE / sizeof(xen_pfn_t)) % XEN_PFN_PER_PAGE) != 0);
552 /* mmap_batch_fn guarantees ret == 0 */
553 BUG_ON(traverse_pages_block(m.num, sizeof(xen_pfn_t),
554 &pagelist, mmap_batch_fn, &state));
556 up_write(&mm->mmap_sem);
558 if (state.global_error) {
559 /* Write back errors in second pass. */
560 state.user_gfn = (xen_pfn_t *)m.arr;
561 state.user_err = m.err;
562 ret = traverse_pages_block(m.num, sizeof(xen_pfn_t),
563 &pagelist, mmap_return_errors, &state);
567 /* If we have not had any EFAULT-like global errors then set the global
568 * error to -ENOENT if necessary. */
569 if ((ret == 0) && (state.global_error == -ENOENT))
573 free_page_list(&pagelist);
577 up_write(&mm->mmap_sem);
581 static int lock_pages(
582 struct privcmd_dm_op_buf kbufs[], unsigned int num,
583 struct page *pages[], unsigned int nr_pages)
587 for (i = 0; i < num; i++) {
588 unsigned int requested;
591 requested = DIV_ROUND_UP(
592 offset_in_page(kbufs[i].uptr) + kbufs[i].size,
594 if (requested > nr_pages)
597 pinned = get_user_pages_fast(
598 (unsigned long) kbufs[i].uptr,
599 requested, FOLL_WRITE, pages);
610 static void unlock_pages(struct page *pages[], unsigned int nr_pages)
617 for (i = 0; i < nr_pages; i++) {
623 static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
625 struct privcmd_data *data = file->private_data;
626 struct privcmd_dm_op kdata;
627 struct privcmd_dm_op_buf *kbufs;
628 unsigned int nr_pages = 0;
629 struct page **pages = NULL;
630 struct xen_dm_op_buf *xbufs = NULL;
634 if (copy_from_user(&kdata, udata, sizeof(kdata)))
637 /* If restriction is in place, check the domid matches */
638 if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
644 if (kdata.num > privcmd_dm_op_max_num)
647 kbufs = kcalloc(kdata.num, sizeof(*kbufs), GFP_KERNEL);
651 if (copy_from_user(kbufs, kdata.ubufs,
652 sizeof(*kbufs) * kdata.num)) {
657 for (i = 0; i < kdata.num; i++) {
658 if (kbufs[i].size > privcmd_dm_op_buf_max_size) {
663 if (!access_ok(VERIFY_WRITE, kbufs[i].uptr,
669 nr_pages += DIV_ROUND_UP(
670 offset_in_page(kbufs[i].uptr) + kbufs[i].size,
674 pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
680 xbufs = kcalloc(kdata.num, sizeof(*xbufs), GFP_KERNEL);
686 rc = lock_pages(kbufs, kdata.num, pages, nr_pages);
690 for (i = 0; i < kdata.num; i++) {
691 set_xen_guest_handle(xbufs[i].h, kbufs[i].uptr);
692 xbufs[i].size = kbufs[i].size;
695 xen_preemptible_hcall_begin();
696 rc = HYPERVISOR_dm_op(kdata.dom, kdata.num, xbufs);
697 xen_preemptible_hcall_end();
700 unlock_pages(pages, nr_pages);
708 static long privcmd_ioctl_restrict(struct file *file, void __user *udata)
710 struct privcmd_data *data = file->private_data;
713 if (copy_from_user(&dom, udata, sizeof(dom)))
716 /* Set restriction to the specified domain, or check it matches */
717 if (data->domid == DOMID_INVALID)
719 else if (data->domid != dom)
725 static long privcmd_ioctl(struct file *file,
726 unsigned int cmd, unsigned long data)
729 void __user *udata = (void __user *) data;
732 case IOCTL_PRIVCMD_HYPERCALL:
733 ret = privcmd_ioctl_hypercall(file, udata);
736 case IOCTL_PRIVCMD_MMAP:
737 ret = privcmd_ioctl_mmap(file, udata);
740 case IOCTL_PRIVCMD_MMAPBATCH:
741 ret = privcmd_ioctl_mmap_batch(file, udata, 1);
744 case IOCTL_PRIVCMD_MMAPBATCH_V2:
745 ret = privcmd_ioctl_mmap_batch(file, udata, 2);
748 case IOCTL_PRIVCMD_DM_OP:
749 ret = privcmd_ioctl_dm_op(file, udata);
752 case IOCTL_PRIVCMD_RESTRICT:
753 ret = privcmd_ioctl_restrict(file, udata);
763 static int privcmd_open(struct inode *ino, struct file *file)
765 struct privcmd_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
770 /* DOMID_INVALID implies no restriction */
771 data->domid = DOMID_INVALID;
773 file->private_data = data;
777 static int privcmd_release(struct inode *ino, struct file *file)
779 struct privcmd_data *data = file->private_data;
785 static void privcmd_close(struct vm_area_struct *vma)
787 struct page **pages = vma->vm_private_data;
788 int numpgs = vma_pages(vma);
789 int numgfns = (vma->vm_end - vma->vm_start) >> XEN_PAGE_SHIFT;
792 if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
795 rc = xen_unmap_domain_gfn_range(vma, numgfns, pages);
797 free_xenballooned_pages(numpgs, pages);
799 pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
804 static int privcmd_fault(struct vm_fault *vmf)
806 printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
807 vmf->vma, vmf->vma->vm_start, vmf->vma->vm_end,
808 vmf->pgoff, (void *)vmf->address);
810 return VM_FAULT_SIGBUS;
813 static const struct vm_operations_struct privcmd_vm_ops = {
814 .close = privcmd_close,
815 .fault = privcmd_fault
818 static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
820 /* DONTCOPY is essential for Xen because copy_page_range doesn't know
821 * how to recreate these mappings */
822 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTCOPY |
823 VM_DONTEXPAND | VM_DONTDUMP;
824 vma->vm_ops = &privcmd_vm_ops;
825 vma->vm_private_data = NULL;
831 * For MMAPBATCH*. This allows asserting the singleshot mapping
832 * on a per pfn/pte basis. Mapping calls that fail with ENOENT
833 * can be then retried until success.
835 static int is_mapped_fn(pte_t *pte, struct page *pmd_page,
836 unsigned long addr, void *data)
838 return pte_none(*pte) ? 0 : -EBUSY;
841 static int privcmd_vma_range_is_mapped(
842 struct vm_area_struct *vma,
844 unsigned long nr_pages)
846 return apply_to_page_range(vma->vm_mm, addr, nr_pages << PAGE_SHIFT,
847 is_mapped_fn, NULL) != 0;
850 const struct file_operations xen_privcmd_fops = {
851 .owner = THIS_MODULE,
852 .unlocked_ioctl = privcmd_ioctl,
853 .open = privcmd_open,
854 .release = privcmd_release,
855 .mmap = privcmd_mmap,
857 EXPORT_SYMBOL_GPL(xen_privcmd_fops);
859 static struct miscdevice privcmd_dev = {
860 .minor = MISC_DYNAMIC_MINOR,
861 .name = "xen/privcmd",
862 .fops = &xen_privcmd_fops,
865 static int __init privcmd_init(void)
872 err = misc_register(&privcmd_dev);
874 pr_err("Could not register Xen privcmd device\n");
880 static void __exit privcmd_exit(void)
882 misc_deregister(&privcmd_dev);
885 module_init(privcmd_init);
886 module_exit(privcmd_exit);