2 * SPDX-License-Identifier: MIT
4 * Copyright © 2014-2016 Intel Corporation
7 #include <linux/anon_inodes.h>
8 #include <linux/mman.h>
9 #include <linux/pfn_t.h>
10 #include <linux/sizes.h>
12 #include "gt/intel_gt.h"
13 #include "gt/intel_gt_requests.h"
16 #include "i915_gem_gtt.h"
17 #include "i915_gem_ioctls.h"
18 #include "i915_gem_object.h"
19 #include "i915_gem_mman.h"
20 #include "i915_trace.h"
21 #include "i915_user_extensions.h"
25 __vma_matches(struct vm_area_struct *vma, struct file *filp,
26 unsigned long addr, unsigned long size)
28 if (vma->vm_file != filp)
31 return vma->vm_start == addr &&
32 (vma->vm_end - vma->vm_start) == PAGE_ALIGN(size);
36 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
39 * @data: ioctl data blob
42 * While the mapping holds a reference on the contents of the object, it doesn't
43 * imply a ref on the object itself.
47 * DRM driver writers who look a this function as an example for how to do GEM
48 * mmap support, please don't implement mmap support like here. The modern way
49 * to implement DRM mmap support is with an mmap offset ioctl (like
50 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
51 * That way debug tooling like valgrind will understand what's going on, hiding
52 * the mmap call in a driver private ioctl will break that. The i915 driver only
53 * does cpu mmaps this way because we didn't know better.
56 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
57 struct drm_file *file)
59 struct drm_i915_gem_mmap *args = data;
60 struct drm_i915_gem_object *obj;
63 if (args->flags & ~(I915_MMAP_WC))
66 if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
69 obj = i915_gem_object_lookup(file, args->handle);
73 /* prime objects have no backing filp to GEM mmap
76 if (!obj->base.filp) {
81 if (range_overflows(args->offset, args->size, (u64)obj->base.size)) {
86 addr = vm_mmap(obj->base.filp, 0, args->size,
87 PROT_READ | PROT_WRITE, MAP_SHARED,
89 if (IS_ERR_VALUE(addr))
92 if (args->flags & I915_MMAP_WC) {
93 struct mm_struct *mm = current->mm;
94 struct vm_area_struct *vma;
96 if (mmap_write_lock_killable(mm)) {
100 vma = find_vma(mm, addr);
101 if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
103 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
106 mmap_write_unlock(mm);
107 if (IS_ERR_VALUE(addr))
110 i915_gem_object_put(obj);
112 args->addr_ptr = (u64)addr;
116 i915_gem_object_put(obj);
120 static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
122 return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT;
126 * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
128 * A history of the GTT mmap interface:
130 * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
131 * aligned and suitable for fencing, and still fit into the available
132 * mappable space left by the pinned display objects. A classic problem
133 * we called the page-fault-of-doom where we would ping-pong between
134 * two objects that could not fit inside the GTT and so the memcpy
135 * would page one object in at the expense of the other between every
138 * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
139 * as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
140 * object is too large for the available space (or simply too large
141 * for the mappable aperture!), a view is created instead and faulted
142 * into userspace. (This view is aligned and sized appropriately for
145 * 2 - Recognise WC as a separate cache domain so that we can flush the
146 * delayed writes via GTT before performing direct access via WC.
148 * 3 - Remove implicit set-domain(GTT) and synchronisation on initial
149 * pagefault; swapin remains transparent.
151 * 4 - Support multiple fault handlers per object depending on object's
152 * backing storage (a.k.a. MMAP_OFFSET).
156 * * snoopable objects cannot be accessed via the GTT. It can cause machine
157 * hangs on some architectures, corruption on others. An attempt to service
158 * a GTT page fault from a snoopable object will generate a SIGBUS.
160 * * the object must be able to fit into RAM (physical memory, though no
161 * limited to the mappable aperture).
166 * * a new GTT page fault will synchronize rendering from the GPU and flush
167 * all data to system memory. Subsequent access will not be synchronized.
169 * * all mappings are revoked on runtime device suspend.
171 * * there are only 8, 16 or 32 fence registers to share between all users
172 * (older machines require fence register for display and blitter access
173 * as well). Contention of the fence registers will cause the previous users
174 * to be unmapped and any new access will generate new page faults.
176 * * running out of memory while servicing a fault may generate a SIGBUS,
177 * rather than the expected SIGSEGV.
179 int i915_gem_mmap_gtt_version(void)
184 static inline struct i915_ggtt_view
185 compute_partial_view(const struct drm_i915_gem_object *obj,
189 struct i915_ggtt_view view;
191 if (i915_gem_object_is_tiled(obj))
192 chunk = roundup(chunk, tile_row_pages(obj) ?: 1);
194 view.type = I915_GGTT_VIEW_PARTIAL;
195 view.partial.offset = rounddown(page_offset, chunk);
197 min_t(unsigned int, chunk,
198 (obj->base.size >> PAGE_SHIFT) - view.partial.offset);
200 /* If the partial covers the entire object, just create a normal VMA. */
201 if (chunk >= obj->base.size >> PAGE_SHIFT)
202 view.type = I915_GGTT_VIEW_NORMAL;
207 static vm_fault_t i915_error_to_vmf_fault(int err)
211 WARN_ONCE(err, "unhandled error in %s: %i\n", __func__, err);
213 case -EIO: /* shmemfs failure from swap device */
214 case -EFAULT: /* purged object */
215 case -ENODEV: /* bad object, how did you get here! */
216 case -ENXIO: /* unable to access backing store (on device) */
217 return VM_FAULT_SIGBUS;
219 case -ENOMEM: /* our allocation failure */
224 case -ENOSPC: /* transient failure to evict? */
229 * EBUSY is ok: this just means that another thread
230 * already did the job.
232 return VM_FAULT_NOPAGE;
236 static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
238 struct vm_area_struct *area = vmf->vma;
239 struct i915_mmap_offset *mmo = area->vm_private_data;
240 struct drm_i915_gem_object *obj = mmo->obj;
241 resource_size_t iomap;
244 /* Sanity check that we allow writing into this object */
245 if (unlikely(i915_gem_object_is_readonly(obj) &&
246 area->vm_flags & VM_WRITE))
247 return VM_FAULT_SIGBUS;
249 if (i915_gem_object_lock_interruptible(obj, NULL))
250 return VM_FAULT_NOPAGE;
252 err = i915_gem_object_pin_pages(obj);
257 if (!i915_gem_object_has_struct_page(obj)) {
258 iomap = obj->mm.region->iomap.base;
259 iomap -= obj->mm.region->region.start;
262 /* PTEs are revoked in obj->ops->put_pages() */
263 err = remap_io_sg(area,
264 area->vm_start, area->vm_end - area->vm_start,
265 obj->mm.pages->sgl, iomap);
267 if (area->vm_flags & VM_WRITE) {
268 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
269 obj->mm.dirty = true;
272 i915_gem_object_unpin_pages(obj);
275 i915_gem_object_unlock(obj);
276 return i915_error_to_vmf_fault(err);
279 static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
281 #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
282 struct vm_area_struct *area = vmf->vma;
283 struct i915_mmap_offset *mmo = area->vm_private_data;
284 struct drm_i915_gem_object *obj = mmo->obj;
285 struct drm_device *dev = obj->base.dev;
286 struct drm_i915_private *i915 = to_i915(dev);
287 struct intel_runtime_pm *rpm = &i915->runtime_pm;
288 struct i915_ggtt *ggtt = &i915->ggtt;
289 bool write = area->vm_flags & VM_WRITE;
290 struct i915_gem_ww_ctx ww;
291 intel_wakeref_t wakeref;
292 struct i915_vma *vma;
297 /* We don't use vmf->pgoff since that has the fake offset */
298 page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
300 trace_i915_gem_object_fault(obj, page_offset, true, write);
302 wakeref = intel_runtime_pm_get(rpm);
304 i915_gem_ww_ctx_init(&ww, true);
306 ret = i915_gem_object_lock(obj, &ww);
310 /* Sanity check that we allow writing into this object */
311 if (i915_gem_object_is_readonly(obj) && write) {
316 ret = i915_gem_object_pin_pages(obj);
320 ret = intel_gt_reset_trylock(ggtt->vm.gt, &srcu);
324 /* Now pin it into the GTT as needed */
325 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL, 0, 0,
327 PIN_NONBLOCK /* NOWARN */ |
329 if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) {
330 /* Use a partial view if it is bigger than available space */
331 struct i915_ggtt_view view =
332 compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
335 flags = PIN_MAPPABLE | PIN_NOSEARCH;
336 if (view.type == I915_GGTT_VIEW_NORMAL)
337 flags |= PIN_NONBLOCK; /* avoid warnings for pinned */
340 * Userspace is now writing through an untracked VMA, abandon
341 * all hope that the hardware is able to track future writes.
344 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
345 if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) {
346 flags = PIN_MAPPABLE;
347 view.type = I915_GGTT_VIEW_PARTIAL;
348 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
351 /* The entire mappable GGTT is pinned? Unexpected! */
352 GEM_BUG_ON(vma == ERR_PTR(-ENOSPC));
359 /* Access to snoopable pages through the GTT is incoherent. */
360 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(i915)) {
365 ret = i915_vma_pin_fence(vma);
369 /* Finally, remap it using the new GTT offset */
370 ret = remap_io_mapping(area,
371 area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
372 (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
373 min_t(u64, vma->size, area->vm_end - area->vm_start),
378 assert_rpm_wakelock_held(rpm);
380 /* Mark as being mmapped into userspace for later revocation */
381 mutex_lock(&i915->ggtt.vm.mutex);
382 if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
383 list_add(&obj->userfault_link, &i915->ggtt.userfault_list);
384 mutex_unlock(&i915->ggtt.vm.mutex);
386 /* Track the mmo associated with the fenced vma */
389 if (IS_ACTIVE(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND))
390 intel_wakeref_auto(&i915->ggtt.userfault_wakeref,
391 msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
394 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
395 i915_vma_set_ggtt_write(vma);
396 obj->mm.dirty = true;
400 i915_vma_unpin_fence(vma);
402 __i915_vma_unpin(vma);
404 intel_gt_reset_unlock(ggtt->vm.gt, srcu);
406 i915_gem_object_unpin_pages(obj);
408 if (ret == -EDEADLK) {
409 ret = i915_gem_ww_ctx_backoff(&ww);
413 i915_gem_ww_ctx_fini(&ww);
414 intel_runtime_pm_put(rpm, wakeref);
415 return i915_error_to_vmf_fault(ret);
419 vm_access(struct vm_area_struct *area, unsigned long addr,
420 void *buf, int len, int write)
422 struct i915_mmap_offset *mmo = area->vm_private_data;
423 struct drm_i915_gem_object *obj = mmo->obj;
424 struct i915_gem_ww_ctx ww;
428 if (i915_gem_object_is_readonly(obj) && write)
431 addr -= area->vm_start;
432 if (addr >= obj->base.size)
435 i915_gem_ww_ctx_init(&ww, true);
437 err = i915_gem_object_lock(obj, &ww);
441 /* As this is primarily for debugging, let's focus on simplicity */
442 vaddr = i915_gem_object_pin_map(obj, I915_MAP_FORCE_WC);
444 err = PTR_ERR(vaddr);
449 memcpy(vaddr + addr, buf, len);
450 __i915_gem_object_flush_map(obj, addr, len);
452 memcpy(buf, vaddr + addr, len);
455 i915_gem_object_unpin_map(obj);
457 if (err == -EDEADLK) {
458 err = i915_gem_ww_ctx_backoff(&ww);
462 i915_gem_ww_ctx_fini(&ww);
470 void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
472 struct i915_vma *vma;
474 GEM_BUG_ON(!obj->userfault_count);
476 for_each_ggtt_vma(vma, obj)
477 i915_vma_revoke_mmap(vma);
479 GEM_BUG_ON(obj->userfault_count);
483 * It is vital that we remove the page mapping if we have mapped a tiled
484 * object through the GTT and then lose the fence register due to
485 * resource pressure. Similarly if the object has been moved out of the
486 * aperture, than pages mapped into userspace must be revoked. Removing the
487 * mapping will then trigger a page fault on the next user access, allowing
488 * fixup by vm_fault_gtt().
490 void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
492 struct drm_i915_private *i915 = to_i915(obj->base.dev);
493 intel_wakeref_t wakeref;
496 * Serialisation between user GTT access and our code depends upon
497 * revoking the CPU's PTE whilst the mutex is held. The next user
498 * pagefault then has to wait until we release the mutex.
500 * Note that RPM complicates somewhat by adding an additional
501 * requirement that operations to the GGTT be made holding the RPM
504 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
505 mutex_lock(&i915->ggtt.vm.mutex);
507 if (!obj->userfault_count)
510 __i915_gem_object_release_mmap_gtt(obj);
513 * Ensure that the CPU's PTE are revoked and there are not outstanding
514 * memory transactions from userspace before we return. The TLB
515 * flushing implied above by changing the PTE above *should* be
516 * sufficient, an extra barrier here just provides us with a bit
517 * of paranoid documentation about our requirement to serialise
518 * memory writes before touching registers / GSM.
523 mutex_unlock(&i915->ggtt.vm.mutex);
524 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
527 void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
529 struct i915_mmap_offset *mmo, *mn;
531 spin_lock(&obj->mmo.lock);
532 rbtree_postorder_for_each_entry_safe(mmo, mn,
533 &obj->mmo.offsets, offset) {
535 * vma_node_unmap for GTT mmaps handled already in
536 * __i915_gem_object_release_mmap_gtt
538 if (mmo->mmap_type == I915_MMAP_TYPE_GTT)
541 spin_unlock(&obj->mmo.lock);
542 drm_vma_node_unmap(&mmo->vma_node,
543 obj->base.dev->anon_inode->i_mapping);
544 spin_lock(&obj->mmo.lock);
546 spin_unlock(&obj->mmo.lock);
549 static struct i915_mmap_offset *
550 lookup_mmo(struct drm_i915_gem_object *obj,
551 enum i915_mmap_type mmap_type)
555 spin_lock(&obj->mmo.lock);
556 rb = obj->mmo.offsets.rb_node;
558 struct i915_mmap_offset *mmo =
559 rb_entry(rb, typeof(*mmo), offset);
561 if (mmo->mmap_type == mmap_type) {
562 spin_unlock(&obj->mmo.lock);
566 if (mmo->mmap_type < mmap_type)
571 spin_unlock(&obj->mmo.lock);
576 static struct i915_mmap_offset *
577 insert_mmo(struct drm_i915_gem_object *obj, struct i915_mmap_offset *mmo)
579 struct rb_node *rb, **p;
581 spin_lock(&obj->mmo.lock);
583 p = &obj->mmo.offsets.rb_node;
585 struct i915_mmap_offset *pos;
588 pos = rb_entry(rb, typeof(*pos), offset);
590 if (pos->mmap_type == mmo->mmap_type) {
591 spin_unlock(&obj->mmo.lock);
592 drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
598 if (pos->mmap_type < mmo->mmap_type)
603 rb_link_node(&mmo->offset, rb, p);
604 rb_insert_color(&mmo->offset, &obj->mmo.offsets);
605 spin_unlock(&obj->mmo.lock);
610 static struct i915_mmap_offset *
611 mmap_offset_attach(struct drm_i915_gem_object *obj,
612 enum i915_mmap_type mmap_type,
613 struct drm_file *file)
615 struct drm_i915_private *i915 = to_i915(obj->base.dev);
616 struct i915_mmap_offset *mmo;
619 mmo = lookup_mmo(obj, mmap_type);
623 mmo = kmalloc(sizeof(*mmo), GFP_KERNEL);
625 return ERR_PTR(-ENOMEM);
628 mmo->mmap_type = mmap_type;
629 drm_vma_node_reset(&mmo->vma_node);
631 err = drm_vma_offset_add(obj->base.dev->vma_offset_manager,
632 &mmo->vma_node, obj->base.size / PAGE_SIZE);
636 /* Attempt to reap some mmap space from dead objects */
637 err = intel_gt_retire_requests_timeout(&i915->gt, MAX_SCHEDULE_TIMEOUT);
641 i915_gem_drain_freed_objects(i915);
642 err = drm_vma_offset_add(obj->base.dev->vma_offset_manager,
643 &mmo->vma_node, obj->base.size / PAGE_SIZE);
648 mmo = insert_mmo(obj, mmo);
649 GEM_BUG_ON(lookup_mmo(obj, mmap_type) != mmo);
652 drm_vma_node_allow(&mmo->vma_node, file);
661 __assign_mmap_offset(struct drm_file *file,
663 enum i915_mmap_type mmap_type,
666 struct drm_i915_gem_object *obj;
667 struct i915_mmap_offset *mmo;
670 obj = i915_gem_object_lookup(file, handle);
674 if (i915_gem_object_never_mmap(obj)) {
679 if (mmap_type != I915_MMAP_TYPE_GTT &&
680 !i915_gem_object_has_struct_page(obj) &&
681 !i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM)) {
686 mmo = mmap_offset_attach(obj, mmap_type, file);
692 *offset = drm_vma_node_offset_addr(&mmo->vma_node);
695 i915_gem_object_put(obj);
700 i915_gem_dumb_mmap_offset(struct drm_file *file,
701 struct drm_device *dev,
705 enum i915_mmap_type mmap_type;
707 if (boot_cpu_has(X86_FEATURE_PAT))
708 mmap_type = I915_MMAP_TYPE_WC;
709 else if (!i915_ggtt_has_aperture(&to_i915(dev)->ggtt))
712 mmap_type = I915_MMAP_TYPE_GTT;
714 return __assign_mmap_offset(file, handle, mmap_type, offset);
718 * i915_gem_mmap_offset_ioctl - prepare an object for GTT mmap'ing
720 * @data: GTT mapping ioctl data
721 * @file: GEM object info
723 * Simply returns the fake offset to userspace so it can mmap it.
724 * The mmap call will end up in drm_gem_mmap(), which will set things
725 * up so we can get faults in the handler above.
727 * The fault handler will take care of binding the object into the GTT
728 * (since it may have been evicted to make room for something), allocating
729 * a fence register, and mapping the appropriate aperture address into
733 i915_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
734 struct drm_file *file)
736 struct drm_i915_private *i915 = to_i915(dev);
737 struct drm_i915_gem_mmap_offset *args = data;
738 enum i915_mmap_type type;
742 * Historically we failed to check args.pad and args.offset
743 * and so we cannot use those fields for user input and we cannot
744 * add -EINVAL for them as the ABI is fixed, i.e. old userspace
745 * may be feeding in garbage in those fields.
747 * if (args->pad) return -EINVAL; is verbotten!
750 err = i915_user_extensions(u64_to_user_ptr(args->extensions),
755 switch (args->flags) {
756 case I915_MMAP_OFFSET_GTT:
757 if (!i915_ggtt_has_aperture(&i915->ggtt))
759 type = I915_MMAP_TYPE_GTT;
762 case I915_MMAP_OFFSET_WC:
763 if (!boot_cpu_has(X86_FEATURE_PAT))
765 type = I915_MMAP_TYPE_WC;
768 case I915_MMAP_OFFSET_WB:
769 type = I915_MMAP_TYPE_WB;
772 case I915_MMAP_OFFSET_UC:
773 if (!boot_cpu_has(X86_FEATURE_PAT))
775 type = I915_MMAP_TYPE_UC;
782 return __assign_mmap_offset(file, args->handle, type, &args->offset);
785 static void vm_open(struct vm_area_struct *vma)
787 struct i915_mmap_offset *mmo = vma->vm_private_data;
788 struct drm_i915_gem_object *obj = mmo->obj;
791 i915_gem_object_get(obj);
794 static void vm_close(struct vm_area_struct *vma)
796 struct i915_mmap_offset *mmo = vma->vm_private_data;
797 struct drm_i915_gem_object *obj = mmo->obj;
800 i915_gem_object_put(obj);
803 static const struct vm_operations_struct vm_ops_gtt = {
804 .fault = vm_fault_gtt,
810 static const struct vm_operations_struct vm_ops_cpu = {
811 .fault = vm_fault_cpu,
817 static int singleton_release(struct inode *inode, struct file *file)
819 struct drm_i915_private *i915 = file->private_data;
821 cmpxchg(&i915->gem.mmap_singleton, file, NULL);
822 drm_dev_put(&i915->drm);
827 static const struct file_operations singleton_fops = {
828 .owner = THIS_MODULE,
829 .release = singleton_release,
832 static struct file *mmap_singleton(struct drm_i915_private *i915)
837 file = READ_ONCE(i915->gem.mmap_singleton);
838 if (file && !get_file_rcu(file))
844 file = anon_inode_getfile("i915.gem", &singleton_fops, i915, O_RDWR);
848 /* Everyone shares a single global address space */
849 file->f_mapping = i915->drm.anon_inode->i_mapping;
851 smp_store_mb(i915->gem.mmap_singleton, file);
852 drm_dev_get(&i915->drm);
858 * This overcomes the limitation in drm_gem_mmap's assignment of a
859 * drm_gem_object as the vma->vm_private_data. Since we need to
860 * be able to resolve multiple mmap offsets which could be tied
861 * to a single gem object.
863 int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
865 struct drm_vma_offset_node *node;
866 struct drm_file *priv = filp->private_data;
867 struct drm_device *dev = priv->minor->dev;
868 struct drm_i915_gem_object *obj = NULL;
869 struct i915_mmap_offset *mmo = NULL;
872 if (drm_dev_is_unplugged(dev))
876 drm_vma_offset_lock_lookup(dev->vma_offset_manager);
877 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
880 if (node && drm_vma_node_is_allowed(node, priv)) {
882 * Skip 0-refcnted objects as it is in the process of being
883 * destroyed and will be invalid when the vma manager lock
886 mmo = container_of(node, struct i915_mmap_offset, vma_node);
887 obj = i915_gem_object_get_rcu(mmo->obj);
889 drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
892 return node ? -EACCES : -EINVAL;
894 if (i915_gem_object_is_readonly(obj)) {
895 if (vma->vm_flags & VM_WRITE) {
896 i915_gem_object_put(obj);
899 vma->vm_flags &= ~VM_MAYWRITE;
902 anon = mmap_singleton(to_i915(dev));
904 i915_gem_object_put(obj);
905 return PTR_ERR(anon);
908 vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
909 vma->vm_private_data = mmo;
912 * We keep the ref on mmo->obj, not vm_file, but we require
913 * vma->vm_file->f_mapping, see vma_link(), for later revocation.
914 * Our userspace is accustomed to having per-file resource cleanup
915 * (i.e. contexts, objects and requests) on their close(fd), which
916 * requires avoiding extraneous references to their filp, hence why
917 * we prefer to use an anonymous file for their mmaps.
919 vma_set_file(vma, anon);
920 /* Drop the initial creation reference, the vma is now holding one. */
923 switch (mmo->mmap_type) {
924 case I915_MMAP_TYPE_WC:
926 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
927 vma->vm_ops = &vm_ops_cpu;
930 case I915_MMAP_TYPE_WB:
931 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
932 vma->vm_ops = &vm_ops_cpu;
935 case I915_MMAP_TYPE_UC:
937 pgprot_noncached(vm_get_page_prot(vma->vm_flags));
938 vma->vm_ops = &vm_ops_cpu;
941 case I915_MMAP_TYPE_GTT:
943 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
944 vma->vm_ops = &vm_ops_gtt;
947 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
952 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
953 #include "selftests/i915_gem_mman.c"