2 * Copyright © 2012-2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include <drm/i915_drm.h>
28 #include "i915_trace.h"
29 #include "intel_drv.h"
30 #include <linux/mmu_context.h>
31 #include <linux/mmu_notifier.h>
32 #include <linux/mempolicy.h>
33 #include <linux/swap.h>
34 #include <linux/sched/mm.h>
36 struct i915_mm_struct {
38 struct drm_i915_private *i915;
39 struct i915_mmu_notifier *mn;
40 struct hlist_node node;
42 struct work_struct work;
45 #if defined(CONFIG_MMU_NOTIFIER)
46 #include <linux/interval_tree.h>
48 struct i915_mmu_notifier {
50 struct hlist_node node;
51 struct mmu_notifier mn;
52 struct rb_root objects;
53 struct workqueue_struct *wq;
56 struct i915_mmu_object {
57 struct i915_mmu_notifier *mn;
58 struct drm_i915_gem_object *obj;
59 struct interval_tree_node it;
60 struct list_head link;
61 struct work_struct work;
65 static void cancel_userptr(struct work_struct *work)
67 struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
68 struct drm_i915_gem_object *obj = mo->obj;
69 struct work_struct *active;
71 /* Cancel any active worker and force us to re-evaluate gup */
72 mutex_lock(&obj->mm.lock);
73 active = fetch_and_zero(&obj->userptr.work);
74 mutex_unlock(&obj->mm.lock);
78 i915_gem_object_wait(obj, I915_WAIT_ALL, MAX_SCHEDULE_TIMEOUT, NULL);
80 mutex_lock(&obj->base.dev->struct_mutex);
82 /* We are inside a kthread context and can't be interrupted */
83 if (i915_gem_object_unbind(obj) == 0)
84 __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
85 WARN_ONCE(obj->mm.pages,
86 "Failed to release pages: bind_count=%d, pages_pin_count=%d, pin_display=%d\n",
88 atomic_read(&obj->mm.pages_pin_count),
91 mutex_unlock(&obj->base.dev->struct_mutex);
94 i915_gem_object_put(obj);
97 static void add_object(struct i915_mmu_object *mo)
102 interval_tree_insert(&mo->it, &mo->mn->objects);
106 static void del_object(struct i915_mmu_object *mo)
111 interval_tree_remove(&mo->it, &mo->mn->objects);
112 mo->attached = false;
115 static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
116 struct mm_struct *mm,
120 struct i915_mmu_notifier *mn =
121 container_of(_mn, struct i915_mmu_notifier, mn);
122 struct i915_mmu_object *mo;
123 struct interval_tree_node *it;
124 LIST_HEAD(cancelled);
126 if (RB_EMPTY_ROOT(&mn->objects))
129 /* interval ranges are inclusive, but invalidate range is exclusive */
132 spin_lock(&mn->lock);
133 it = interval_tree_iter_first(&mn->objects, start, end);
135 /* The mmu_object is released late when destroying the
136 * GEM object so it is entirely possible to gain a
137 * reference on an object in the process of being freed
138 * since our serialisation is via the spinlock and not
139 * the struct_mutex - and consequently use it after it
140 * is freed and then double free it. To prevent that
141 * use-after-free we only acquire a reference on the
142 * object if it is not in the process of being destroyed.
144 mo = container_of(it, struct i915_mmu_object, it);
145 if (kref_get_unless_zero(&mo->obj->base.refcount))
146 queue_work(mn->wq, &mo->work);
148 list_add(&mo->link, &cancelled);
149 it = interval_tree_iter_next(it, start, end);
151 list_for_each_entry(mo, &cancelled, link)
153 spin_unlock(&mn->lock);
155 if (!list_empty(&cancelled))
156 flush_workqueue(mn->wq);
159 static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
160 .invalidate_range_start = i915_gem_userptr_mn_invalidate_range_start,
163 static struct i915_mmu_notifier *
164 i915_mmu_notifier_create(struct mm_struct *mm)
166 struct i915_mmu_notifier *mn;
169 mn = kmalloc(sizeof(*mn), GFP_KERNEL);
171 return ERR_PTR(-ENOMEM);
173 spin_lock_init(&mn->lock);
174 mn->mn.ops = &i915_gem_userptr_notifier;
175 mn->objects = RB_ROOT;
176 mn->wq = alloc_workqueue("i915-userptr-release", WQ_UNBOUND, 0);
177 if (mn->wq == NULL) {
179 return ERR_PTR(-ENOMEM);
182 /* Protected by mmap_sem (write-lock) */
183 ret = __mmu_notifier_register(&mn->mn, mm);
185 destroy_workqueue(mn->wq);
194 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
196 struct i915_mmu_object *mo;
198 mo = obj->userptr.mmu_object;
202 spin_lock(&mo->mn->lock);
204 spin_unlock(&mo->mn->lock);
207 obj->userptr.mmu_object = NULL;
210 static struct i915_mmu_notifier *
211 i915_mmu_notifier_find(struct i915_mm_struct *mm)
213 struct i915_mmu_notifier *mn = mm->mn;
219 down_write(&mm->mm->mmap_sem);
220 mutex_lock(&mm->i915->mm_lock);
221 if ((mn = mm->mn) == NULL) {
222 mn = i915_mmu_notifier_create(mm->mm);
226 mutex_unlock(&mm->i915->mm_lock);
227 up_write(&mm->mm->mmap_sem);
233 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
236 struct i915_mmu_notifier *mn;
237 struct i915_mmu_object *mo;
239 if (flags & I915_USERPTR_UNSYNCHRONIZED)
240 return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
242 if (WARN_ON(obj->userptr.mm == NULL))
245 mn = i915_mmu_notifier_find(obj->userptr.mm);
249 mo = kzalloc(sizeof(*mo), GFP_KERNEL);
255 mo->it.start = obj->userptr.ptr;
256 mo->it.last = obj->userptr.ptr + obj->base.size - 1;
257 INIT_WORK(&mo->work, cancel_userptr);
259 obj->userptr.mmu_object = mo;
264 i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
265 struct mm_struct *mm)
270 mmu_notifier_unregister(&mn->mn, mm);
271 destroy_workqueue(mn->wq);
278 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
283 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
286 if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0)
289 if (!capable(CAP_SYS_ADMIN))
296 i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
297 struct mm_struct *mm)
303 static struct i915_mm_struct *
304 __i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
306 struct i915_mm_struct *mm;
308 /* Protected by dev_priv->mm_lock */
309 hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real)
317 i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
319 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
320 struct i915_mm_struct *mm;
323 /* During release of the GEM object we hold the struct_mutex. This
324 * precludes us from calling mmput() at that time as that may be
325 * the last reference and so call exit_mmap(). exit_mmap() will
326 * attempt to reap the vma, and if we were holding a GTT mmap
327 * would then call drm_gem_vm_close() and attempt to reacquire
328 * the struct mutex. So in order to avoid that recursion, we have
329 * to defer releasing the mm reference until after we drop the
330 * struct_mutex, i.e. we need to schedule a worker to do the clean
333 mutex_lock(&dev_priv->mm_lock);
334 mm = __i915_mm_struct_find(dev_priv, current->mm);
336 mm = kmalloc(sizeof(*mm), GFP_KERNEL);
342 kref_init(&mm->kref);
343 mm->i915 = to_i915(obj->base.dev);
345 mm->mm = current->mm;
350 /* Protected by dev_priv->mm_lock */
351 hash_add(dev_priv->mm_structs,
352 &mm->node, (unsigned long)mm->mm);
356 obj->userptr.mm = mm;
358 mutex_unlock(&dev_priv->mm_lock);
363 __i915_mm_struct_free__worker(struct work_struct *work)
365 struct i915_mm_struct *mm = container_of(work, typeof(*mm), work);
366 i915_mmu_notifier_free(mm->mn, mm->mm);
372 __i915_mm_struct_free(struct kref *kref)
374 struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref);
376 /* Protected by dev_priv->mm_lock */
378 mutex_unlock(&mm->i915->mm_lock);
380 INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
381 queue_work(mm->i915->mm.userptr_wq, &mm->work);
385 i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
387 if (obj->userptr.mm == NULL)
390 kref_put_mutex(&obj->userptr.mm->kref,
391 __i915_mm_struct_free,
392 &to_i915(obj->base.dev)->mm_lock);
393 obj->userptr.mm = NULL;
396 struct get_pages_work {
397 struct work_struct work;
398 struct drm_i915_gem_object *obj;
399 struct task_struct *task;
402 #if IS_ENABLED(CONFIG_SWIOTLB)
403 #define swiotlb_active() swiotlb_nr_tbl()
405 #define swiotlb_active() 0
409 st_set_pages(struct sg_table **st, struct page **pvec, int num_pages)
411 struct scatterlist *sg;
414 *st = kmalloc(sizeof(**st), GFP_KERNEL);
418 if (swiotlb_active()) {
419 ret = sg_alloc_table(*st, num_pages, GFP_KERNEL);
423 for_each_sg((*st)->sgl, sg, num_pages, n)
424 sg_set_page(sg, pvec[n], PAGE_SIZE, 0);
426 ret = sg_alloc_table_from_pages(*st, pvec, num_pages,
427 0, num_pages << PAGE_SHIFT,
441 static struct sg_table *
442 __i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj,
443 struct page **pvec, int num_pages)
445 struct sg_table *pages;
448 ret = st_set_pages(&pages, pvec, num_pages);
452 ret = i915_gem_gtt_prepare_pages(obj, pages);
454 sg_free_table(pages);
463 __i915_gem_userptr_set_active(struct drm_i915_gem_object *obj,
468 /* During mm_invalidate_range we need to cancel any userptr that
469 * overlaps the range being invalidated. Doing so requires the
470 * struct_mutex, and that risks recursion. In order to cause
471 * recursion, the user must alias the userptr address space with
472 * a GTT mmapping (possible with a MAP_FIXED) - then when we have
473 * to invalidate that mmaping, mm_invalidate_range is called with
474 * the userptr address *and* the struct_mutex held. To prevent that
475 * we set a flag under the i915_mmu_notifier spinlock to indicate
476 * whether this object is valid.
478 #if defined(CONFIG_MMU_NOTIFIER)
479 if (obj->userptr.mmu_object == NULL)
482 spin_lock(&obj->userptr.mmu_object->mn->lock);
483 /* In order to serialise get_pages with an outstanding
484 * cancel_userptr, we must drop the struct_mutex and try again.
487 del_object(obj->userptr.mmu_object);
488 else if (!work_pending(&obj->userptr.mmu_object->work))
489 add_object(obj->userptr.mmu_object);
492 spin_unlock(&obj->userptr.mmu_object->mn->lock);
499 __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
501 struct get_pages_work *work = container_of(_work, typeof(*work), work);
502 struct drm_i915_gem_object *obj = work->obj;
503 const int npages = obj->base.size >> PAGE_SHIFT;
510 pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_TEMPORARY);
512 struct mm_struct *mm = obj->userptr.mm->mm;
513 unsigned int flags = 0;
515 if (!obj->userptr.read_only)
519 if (mmget_not_zero(mm)) {
520 down_read(&mm->mmap_sem);
521 while (pinned < npages) {
522 ret = get_user_pages_remote
524 obj->userptr.ptr + pinned * PAGE_SIZE,
527 pvec + pinned, NULL, NULL);
533 up_read(&mm->mmap_sem);
538 mutex_lock(&obj->mm.lock);
539 if (obj->userptr.work == &work->work) {
540 struct sg_table *pages = ERR_PTR(ret);
542 if (pinned == npages) {
543 pages = __i915_gem_userptr_set_pages(obj, pvec, npages);
544 if (!IS_ERR(pages)) {
545 __i915_gem_object_set_pages(obj, pages);
551 obj->userptr.work = ERR_CAST(pages);
553 __i915_gem_userptr_set_active(obj, false);
555 mutex_unlock(&obj->mm.lock);
557 release_pages(pvec, pinned, 0);
560 i915_gem_object_put(obj);
561 put_task_struct(work->task);
565 static struct sg_table *
566 __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj)
568 struct get_pages_work *work;
570 /* Spawn a worker so that we can acquire the
571 * user pages without holding our mutex. Access
572 * to the user pages requires mmap_sem, and we have
573 * a strict lock ordering of mmap_sem, struct_mutex -
574 * we already hold struct_mutex here and so cannot
575 * call gup without encountering a lock inversion.
577 * Userspace will keep on repeating the operation
578 * (thanks to EAGAIN) until either we hit the fast
579 * path or the worker completes. If the worker is
580 * cancelled or superseded, the task is still run
581 * but the results ignored. (This leads to
582 * complications that we may have a stray object
583 * refcount that we need to be wary of when
584 * checking for existing objects during creation.)
585 * If the worker encounters an error, it reports
586 * that error back to this function through
587 * obj->userptr.work = ERR_PTR.
589 work = kmalloc(sizeof(*work), GFP_KERNEL);
591 return ERR_PTR(-ENOMEM);
593 obj->userptr.work = &work->work;
595 work->obj = i915_gem_object_get(obj);
597 work->task = current;
598 get_task_struct(work->task);
600 INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
601 queue_work(to_i915(obj->base.dev)->mm.userptr_wq, &work->work);
603 return ERR_PTR(-EAGAIN);
606 static struct sg_table *
607 i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
609 const int num_pages = obj->base.size >> PAGE_SHIFT;
610 struct mm_struct *mm = obj->userptr.mm->mm;
612 struct sg_table *pages;
616 /* If userspace should engineer that these pages are replaced in
617 * the vma between us binding this page into the GTT and completion
618 * of rendering... Their loss. If they change the mapping of their
619 * pages they need to create a new bo to point to the new vma.
621 * However, that still leaves open the possibility of the vma
622 * being copied upon fork. Which falls under the same userspace
623 * synchronisation issue as a regular bo, except that this time
624 * the process may not be expecting that a particular piece of
625 * memory is tied to the GPU.
627 * Fortunately, we can hook into the mmu_notifier in order to
628 * discard the page references prior to anything nasty happening
629 * to the vma (discard or cloning) which should prevent the more
630 * egregious cases from causing harm.
633 if (obj->userptr.work) {
634 /* active flag should still be held for the pending work */
635 if (IS_ERR(obj->userptr.work))
636 return ERR_CAST(obj->userptr.work);
638 return ERR_PTR(-EAGAIN);
644 if (mm == current->mm) {
645 pvec = kvmalloc_array(num_pages, sizeof(struct page *),
649 if (pvec) /* defer to worker if malloc fails */
650 pinned = __get_user_pages_fast(obj->userptr.ptr,
652 !obj->userptr.read_only,
658 pages = ERR_PTR(pinned);
660 } else if (pinned < num_pages) {
661 pages = __i915_gem_userptr_get_pages_schedule(obj);
662 active = pages == ERR_PTR(-EAGAIN);
664 pages = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
665 active = !IS_ERR(pages);
668 __i915_gem_userptr_set_active(obj, true);
671 release_pages(pvec, pinned, 0);
678 i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
679 struct sg_table *pages)
681 struct sgt_iter sgt_iter;
684 BUG_ON(obj->userptr.work != NULL);
685 __i915_gem_userptr_set_active(obj, false);
687 if (obj->mm.madv != I915_MADV_WILLNEED)
688 obj->mm.dirty = false;
690 i915_gem_gtt_finish_pages(obj, pages);
692 for_each_sgt_page(page, sgt_iter, pages) {
694 set_page_dirty(page);
696 mark_page_accessed(page);
699 obj->mm.dirty = false;
701 sg_free_table(pages);
706 i915_gem_userptr_release(struct drm_i915_gem_object *obj)
708 i915_gem_userptr_release__mmu_notifier(obj);
709 i915_gem_userptr_release__mm_struct(obj);
713 i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
715 if (obj->userptr.mmu_object)
718 return i915_gem_userptr_init__mmu_notifier(obj, 0);
721 static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
722 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
723 I915_GEM_OBJECT_IS_SHRINKABLE,
724 .get_pages = i915_gem_userptr_get_pages,
725 .put_pages = i915_gem_userptr_put_pages,
726 .dmabuf_export = i915_gem_userptr_dmabuf_export,
727 .release = i915_gem_userptr_release,
731 * Creates a new mm object that wraps some normal memory from the process
732 * context - user memory.
734 * We impose several restrictions upon the memory being mapped
736 * 1. It must be page aligned (both start/end addresses, i.e ptr and size).
737 * 2. It must be normal system memory, not a pointer into another map of IO
738 * space (e.g. it must not be a GTT mmapping of another object).
739 * 3. We only allow a bo as large as we could in theory map into the GTT,
740 * that is we limit the size to the total size of the GTT.
741 * 4. The bo is marked as being snoopable. The backing pages are left
742 * accessible directly by the CPU, but reads and writes by the GPU may
743 * incur the cost of a snoop (unless you have an LLC architecture).
745 * Synchronisation between multiple users and the GPU is left to userspace
746 * through the normal set-domain-ioctl. The kernel will enforce that the
747 * GPU relinquishes the VMA before it is returned back to the system
748 * i.e. upon free(), munmap() or process termination. However, the userspace
749 * malloc() library may not immediately relinquish the VMA after free() and
750 * instead reuse it whilst the GPU is still reading and writing to the VMA.
753 * Also note, that the object created here is not currently a "first class"
754 * object, in that several ioctls are banned. These are the CPU access
755 * ioctls: mmap(), pwrite and pread. In practice, you are expected to use
756 * direct access via your pointer rather than use those ioctls. Another
757 * restriction is that we do not allow userptr surfaces to be pinned to the
758 * hardware and so we reject any attempt to create a framebuffer out of a
761 * If you think this is a good interface to use to pass GPU memory between
762 * drivers, please use dma-buf instead. In fact, wherever possible use
766 i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
768 struct drm_i915_private *dev_priv = to_i915(dev);
769 struct drm_i915_gem_userptr *args = data;
770 struct drm_i915_gem_object *obj;
774 if (!HAS_LLC(dev_priv) && !HAS_SNOOP(dev_priv)) {
775 /* We cannot support coherent userptr objects on hw without
776 * LLC and broken snooping.
781 if (args->flags & ~(I915_USERPTR_READ_ONLY |
782 I915_USERPTR_UNSYNCHRONIZED))
785 if (offset_in_page(args->user_ptr | args->user_size))
788 if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE,
789 (char __user *)(unsigned long)args->user_ptr, args->user_size))
792 if (args->flags & I915_USERPTR_READ_ONLY) {
793 /* On almost all of the current hw, we cannot tell the GPU that a
794 * page is readonly, so this is just a placeholder in the uAPI.
799 obj = i915_gem_object_alloc(dev_priv);
803 drm_gem_private_object_init(dev, &obj->base, args->user_size);
804 i915_gem_object_init(obj, &i915_gem_userptr_ops);
805 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
806 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
807 obj->cache_level = I915_CACHE_LLC;
808 obj->cache_coherent = i915_gem_object_is_coherent(obj);
809 obj->cache_dirty = !obj->cache_coherent;
811 obj->userptr.ptr = args->user_ptr;
812 obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY);
814 /* And keep a pointer to the current->mm for resolving the user pages
815 * at binding. This means that we need to hook into the mmu_notifier
816 * in order to detect if the mmu is destroyed.
818 ret = i915_gem_userptr_init__mm_struct(obj);
820 ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
822 ret = drm_gem_handle_create(file, &obj->base, &handle);
824 /* drop reference from allocate - handle holds it now */
825 i915_gem_object_put(obj);
829 args->handle = handle;
833 int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
835 mutex_init(&dev_priv->mm_lock);
836 hash_init(dev_priv->mm_structs);
838 dev_priv->mm.userptr_wq =
839 alloc_workqueue("i915-userptr-acquire", WQ_HIGHPRI, 0);
840 if (!dev_priv->mm.userptr_wq)
846 void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv)
848 destroy_workqueue(dev_priv->mm.userptr_wq);