2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/sched/mm.h>
26 #include <drm/drm_gem.h>
28 #include "display/intel_frontbuffer.h"
30 #include "gem/i915_gem_lmem.h"
31 #include "gt/intel_engine.h"
32 #include "gt/intel_engine_heartbeat.h"
33 #include "gt/intel_gt.h"
34 #include "gt/intel_gt_requests.h"
37 #include "i915_sw_fence_work.h"
38 #include "i915_trace.h"
40 #include "i915_vma_resource.h"
42 static struct kmem_cache *slab_vmas;
44 static struct i915_vma *i915_vma_alloc(void)
46 return kmem_cache_zalloc(slab_vmas, GFP_KERNEL);
49 static void i915_vma_free(struct i915_vma *vma)
51 return kmem_cache_free(slab_vmas, vma);
54 #if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
56 #include <linux/stackdepot.h>
58 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
62 if (!vma->node.stack) {
63 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n",
64 vma->node.start, vma->node.size, reason);
68 stack_depot_snprint(vma->node.stack, buf, sizeof(buf), 0);
69 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
70 vma->node.start, vma->node.size, reason, buf);
75 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
81 static inline struct i915_vma *active_to_vma(struct i915_active *ref)
83 return container_of(ref, typeof(struct i915_vma), active);
86 static int __i915_vma_active(struct i915_active *ref)
88 return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
91 static void __i915_vma_retire(struct i915_active *ref)
93 i915_vma_put(active_to_vma(ref));
96 static struct i915_vma *
97 vma_create(struct drm_i915_gem_object *obj,
98 struct i915_address_space *vm,
99 const struct i915_ggtt_view *view)
101 struct i915_vma *pos = ERR_PTR(-E2BIG);
102 struct i915_vma *vma;
103 struct rb_node *rb, **p;
105 /* The aliasing_ppgtt should never be used directly! */
106 GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm);
108 vma = i915_vma_alloc();
110 return ERR_PTR(-ENOMEM);
112 kref_init(&vma->ref);
113 vma->vm = i915_vm_get(vm);
114 vma->ops = &vm->vma_ops;
116 vma->size = obj->base.size;
117 vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
119 i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire, 0);
121 /* Declare ourselves safe for use inside shrinkers */
122 if (IS_ENABLED(CONFIG_LOCKDEP)) {
123 fs_reclaim_acquire(GFP_KERNEL);
124 might_lock(&vma->active.mutex);
125 fs_reclaim_release(GFP_KERNEL);
128 INIT_LIST_HEAD(&vma->closed_link);
130 if (view && view->type != I915_GGTT_VIEW_NORMAL) {
131 vma->ggtt_view = *view;
132 if (view->type == I915_GGTT_VIEW_PARTIAL) {
133 GEM_BUG_ON(range_overflows_t(u64,
134 view->partial.offset,
136 obj->base.size >> PAGE_SHIFT));
137 vma->size = view->partial.size;
138 vma->size <<= PAGE_SHIFT;
139 GEM_BUG_ON(vma->size > obj->base.size);
140 } else if (view->type == I915_GGTT_VIEW_ROTATED) {
141 vma->size = intel_rotation_info_size(&view->rotated);
142 vma->size <<= PAGE_SHIFT;
143 } else if (view->type == I915_GGTT_VIEW_REMAPPED) {
144 vma->size = intel_remapped_info_size(&view->remapped);
145 vma->size <<= PAGE_SHIFT;
149 if (unlikely(vma->size > vm->total))
152 GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
154 spin_lock(&obj->vma.lock);
156 if (i915_is_ggtt(vm)) {
157 if (unlikely(overflows_type(vma->size, u32)))
160 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
161 i915_gem_object_get_tiling(obj),
162 i915_gem_object_get_stride(obj));
163 if (unlikely(vma->fence_size < vma->size || /* overflow */
164 vma->fence_size > vm->total))
167 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
169 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
170 i915_gem_object_get_tiling(obj),
171 i915_gem_object_get_stride(obj));
172 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
174 __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
178 p = &obj->vma.tree.rb_node;
183 pos = rb_entry(rb, struct i915_vma, obj_node);
186 * If the view already exists in the tree, another thread
187 * already created a matching vma, so return the older instance
188 * and dispose of ours.
190 cmp = i915_vma_compare(pos, vm, view);
198 rb_link_node(&vma->obj_node, rb, p);
199 rb_insert_color(&vma->obj_node, &obj->vma.tree);
201 if (i915_vma_is_ggtt(vma))
203 * We put the GGTT vma at the start of the vma-list, followed
204 * by the ppGGTT vma. This allows us to break early when
205 * iterating over only the GGTT vma for an object, see
206 * for_each_ggtt_vma()
208 list_add(&vma->obj_link, &obj->vma.list);
210 list_add_tail(&vma->obj_link, &obj->vma.list);
212 spin_unlock(&obj->vma.lock);
217 spin_unlock(&obj->vma.lock);
224 static struct i915_vma *
225 i915_vma_lookup(struct drm_i915_gem_object *obj,
226 struct i915_address_space *vm,
227 const struct i915_ggtt_view *view)
231 rb = obj->vma.tree.rb_node;
233 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
236 cmp = i915_vma_compare(vma, vm, view);
250 * i915_vma_instance - return the singleton instance of the VMA
251 * @obj: parent &struct drm_i915_gem_object to be mapped
252 * @vm: address space in which the mapping is located
253 * @view: additional mapping requirements
255 * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
256 * the same @view characteristics. If a match is not found, one is created.
257 * Once created, the VMA is kept until either the object is freed, or the
258 * address space is closed.
260 * Returns the vma, or an error pointer.
263 i915_vma_instance(struct drm_i915_gem_object *obj,
264 struct i915_address_space *vm,
265 const struct i915_ggtt_view *view)
267 struct i915_vma *vma;
269 GEM_BUG_ON(view && !i915_is_ggtt_or_dpt(vm));
270 GEM_BUG_ON(!atomic_read(&vm->open));
272 spin_lock(&obj->vma.lock);
273 vma = i915_vma_lookup(obj, vm, view);
274 spin_unlock(&obj->vma.lock);
276 /* vma_create() will resolve the race if another creates the vma */
278 vma = vma_create(obj, vm, view);
280 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
284 struct i915_vma_work {
285 struct dma_fence_work base;
286 struct i915_address_space *vm;
287 struct i915_vm_pt_stash stash;
288 struct i915_vma_resource *vma_res;
289 struct drm_i915_gem_object *pinned;
290 struct i915_sw_dma_fence_cb cb;
291 enum i915_cache_level cache_level;
295 static void __vma_bind(struct dma_fence_work *work)
297 struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
298 struct i915_vma_resource *vma_res = vw->vma_res;
300 vma_res->ops->bind_vma(vma_res->vm, &vw->stash,
301 vma_res, vw->cache_level, vw->flags);
305 static void __vma_release(struct dma_fence_work *work)
307 struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
310 i915_gem_object_put(vw->pinned);
312 i915_vm_free_pt_stash(vw->vm, &vw->stash);
315 i915_vma_resource_put(vw->vma_res);
318 static const struct dma_fence_work_ops bind_ops = {
321 .release = __vma_release,
324 struct i915_vma_work *i915_vma_work(void)
326 struct i915_vma_work *vw;
328 vw = kzalloc(sizeof(*vw), GFP_KERNEL);
332 dma_fence_work_init(&vw->base, &bind_ops);
333 vw->base.dma.error = -EAGAIN; /* disable the worker by default */
338 int i915_vma_wait_for_bind(struct i915_vma *vma)
342 if (rcu_access_pointer(vma->active.excl.fence)) {
343 struct dma_fence *fence;
346 fence = dma_fence_get_rcu_safe(&vma->active.excl.fence);
349 err = dma_fence_wait(fence, true);
350 dma_fence_put(fence);
357 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
358 static int i915_vma_verify_bind_complete(struct i915_vma *vma)
360 struct dma_fence *fence = i915_active_fence_get(&vma->active.excl);
366 if (dma_fence_is_signaled(fence))
371 dma_fence_put(fence);
376 #define i915_vma_verify_bind_complete(_vma) 0
379 I915_SELFTEST_EXPORT void
380 i915_vma_resource_init_from_vma(struct i915_vma_resource *vma_res,
381 struct i915_vma *vma)
383 struct drm_i915_gem_object *obj = vma->obj;
385 i915_vma_resource_init(vma_res, vma->vm, vma->pages, &vma->page_sizes,
386 obj->mm.rsgt, i915_gem_object_is_readonly(obj),
387 i915_gem_object_is_lmem(obj), obj->mm.region,
388 vma->ops, vma->private, vma->node.start,
389 vma->node.size, vma->size);
393 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
395 * @cache_level: mapping cache level
396 * @flags: flags like global or local mapping
397 * @work: preallocated worker for allocating and binding the PTE
398 * @vma_res: pointer to a preallocated vma resource. The resource is either
401 * DMA addresses are taken from the scatter-gather table of this object (or of
402 * this VMA in case of non-default GGTT views) and PTE entries set up.
403 * Note that DMA addresses are also the only part of the SG table we care about.
405 int i915_vma_bind(struct i915_vma *vma,
406 enum i915_cache_level cache_level,
408 struct i915_vma_work *work,
409 struct i915_vma_resource *vma_res)
415 lockdep_assert_held(&vma->vm->mutex);
416 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
417 GEM_BUG_ON(vma->size > vma->node.size);
419 if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
422 i915_vma_resource_free(vma_res);
426 if (GEM_DEBUG_WARN_ON(!flags)) {
427 i915_vma_resource_free(vma_res);
432 bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
434 vma_flags = atomic_read(&vma->flags);
435 vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
437 bind_flags &= ~vma_flags;
438 if (bind_flags == 0) {
439 i915_vma_resource_free(vma_res);
443 GEM_BUG_ON(!atomic_read(&vma->pages_count));
445 /* Wait for or await async unbinds touching our range */
446 if (work && bind_flags & vma->vm->bind_async_flags)
447 ret = i915_vma_resource_bind_dep_await(vma->vm,
453 __GFP_RETRY_MAYFAIL |
456 ret = i915_vma_resource_bind_dep_sync(vma->vm, vma->node.start,
457 vma->node.size, true);
459 i915_vma_resource_free(vma_res);
463 if (vma->resource || !vma_res) {
464 /* Rebinding with an additional I915_VMA_*_BIND */
465 GEM_WARN_ON(!vma_flags);
468 i915_vma_resource_init_from_vma(vma_res, vma);
469 vma->resource = vma_res;
471 trace_i915_vma_bind(vma, bind_flags);
472 if (work && bind_flags & vma->vm->bind_async_flags) {
473 struct dma_fence *prev;
475 work->vma_res = i915_vma_resource_get(vma->resource);
476 work->cache_level = cache_level;
477 work->flags = bind_flags;
480 * Note we only want to chain up to the migration fence on
481 * the pages (not the object itself). As we don't track that,
482 * yet, we have to use the exclusive fence instead.
484 * Also note that we do not want to track the async vma as
485 * part of the obj->resv->excl_fence as it only affects
486 * execution and not content or object's backing store lifetime.
488 prev = i915_active_set_exclusive(&vma->active, &work->base.dma);
490 __i915_sw_fence_await_dma_fence(&work->base.chain,
496 work->base.dma.error = 0; /* enable the queue_work() */
499 * If we don't have the refcounted pages list, keep a reference
500 * on the object to avoid waiting for the async bind to
501 * complete in the object destruction path.
503 if (!work->vma_res->bi.pages_rsgt)
504 work->pinned = i915_gem_object_get(vma->obj);
509 ret = i915_gem_object_wait_moving_fence(vma->obj, true);
511 i915_vma_resource_free(vma->resource);
512 vma->resource = NULL;
517 vma->ops->bind_vma(vma->vm, NULL, vma->resource, cache_level,
521 atomic_or(bind_flags, &vma->flags);
525 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
530 if (!i915_gem_object_is_lmem(vma->obj)) {
531 if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
537 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
538 GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND));
539 GEM_BUG_ON(i915_vma_verify_bind_complete(vma));
541 ptr = READ_ONCE(vma->iomap);
544 * TODO: consider just using i915_gem_object_pin_map() for lmem
545 * instead, which already supports mapping non-contiguous chunks
546 * of pages, that way we can also drop the
547 * I915_BO_ALLOC_CONTIGUOUS when allocating the object.
549 if (i915_gem_object_is_lmem(vma->obj))
550 ptr = i915_gem_object_lmem_io_map(vma->obj, 0,
551 vma->obj->base.size);
553 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
561 if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) {
562 io_mapping_unmap(ptr);
569 err = i915_vma_pin_fence(vma);
573 i915_vma_set_ggtt_write(vma);
575 /* NB Access through the GTT requires the device to be awake. */
579 __i915_vma_unpin(vma);
581 return IO_ERR_PTR(err);
584 void i915_vma_flush_writes(struct i915_vma *vma)
586 if (i915_vma_unset_ggtt_write(vma))
587 intel_gt_flush_ggtt_writes(vma->vm->gt);
590 void i915_vma_unpin_iomap(struct i915_vma *vma)
592 GEM_BUG_ON(vma->iomap == NULL);
594 i915_vma_flush_writes(vma);
596 i915_vma_unpin_fence(vma);
600 void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
602 struct i915_vma *vma;
603 struct drm_i915_gem_object *obj;
605 vma = fetch_and_zero(p_vma);
614 if (flags & I915_VMA_RELEASE_MAP)
615 i915_gem_object_unpin_map(obj);
617 i915_gem_object_put(obj);
620 bool i915_vma_misplaced(const struct i915_vma *vma,
621 u64 size, u64 alignment, u64 flags)
623 if (!drm_mm_node_allocated(&vma->node))
626 if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma)))
629 if (vma->node.size < size)
632 GEM_BUG_ON(alignment && !is_power_of_2(alignment));
633 if (alignment && !IS_ALIGNED(vma->node.start, alignment))
636 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
639 if (flags & PIN_OFFSET_BIAS &&
640 vma->node.start < (flags & PIN_OFFSET_MASK))
643 if (flags & PIN_OFFSET_FIXED &&
644 vma->node.start != (flags & PIN_OFFSET_MASK))
650 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
652 bool mappable, fenceable;
654 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
655 GEM_BUG_ON(!vma->fence_size);
657 fenceable = (vma->node.size >= vma->fence_size &&
658 IS_ALIGNED(vma->node.start, vma->fence_alignment));
660 mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
662 if (mappable && fenceable)
663 set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
665 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
668 bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
670 struct drm_mm_node *node = &vma->node;
671 struct drm_mm_node *other;
674 * On some machines we have to be careful when putting differing types
675 * of snoopable memory together to avoid the prefetcher crossing memory
676 * domains and dying. During vm initialisation, we decide whether or not
677 * these constraints apply and set the drm_mm.color_adjust
680 if (!i915_vm_has_cache_coloring(vma->vm))
683 /* Only valid to be called on an already inserted vma */
684 GEM_BUG_ON(!drm_mm_node_allocated(node));
685 GEM_BUG_ON(list_empty(&node->node_list));
687 other = list_prev_entry(node, node_list);
688 if (i915_node_color_differs(other, color) &&
689 !drm_mm_hole_follows(other))
692 other = list_next_entry(node, node_list);
693 if (i915_node_color_differs(other, color) &&
694 !drm_mm_hole_follows(node))
701 * i915_vma_insert - finds a slot for the vma in its address space
703 * @size: requested size in bytes (can be larger than the VMA)
704 * @alignment: required alignment
705 * @flags: mask of PIN_* flags to use
707 * First we try to allocate some free space that meets the requirements for
708 * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
709 * preferrably the oldest idle entry to make room for the new VMA.
712 * 0 on success, negative error code otherwise.
715 i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
721 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
722 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
724 size = max(size, vma->size);
725 alignment = max(alignment, vma->display_alignment);
726 if (flags & PIN_MAPPABLE) {
727 size = max_t(typeof(size), size, vma->fence_size);
728 alignment = max_t(typeof(alignment),
729 alignment, vma->fence_alignment);
732 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
733 GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
734 GEM_BUG_ON(!is_power_of_2(alignment));
736 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
737 GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
739 end = vma->vm->total;
740 if (flags & PIN_MAPPABLE)
741 end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end);
742 if (flags & PIN_ZONE_4G)
743 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
744 GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
746 /* If binding the object/GGTT view requires more space than the entire
747 * aperture has, reject it early before evicting everything in a vain
748 * attempt to find space.
751 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
752 size, flags & PIN_MAPPABLE ? "mappable" : "total",
758 if (i915_vm_has_cache_coloring(vma->vm))
759 color = vma->obj->cache_level;
761 if (flags & PIN_OFFSET_FIXED) {
762 u64 offset = flags & PIN_OFFSET_MASK;
763 if (!IS_ALIGNED(offset, alignment) ||
764 range_overflows(offset, size, end))
767 ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
774 * We only support huge gtt pages through the 48b PPGTT,
775 * however we also don't want to force any alignment for
776 * objects which need to be tightly packed into the low 32bits.
778 * Note that we assume that GGTT are limited to 4GiB for the
779 * forseeable future. See also i915_ggtt_offset().
781 if (upper_32_bits(end - 1) &&
782 vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
784 * We can't mix 64K and 4K PTEs in the same page-table
785 * (2M block), and so to avoid the ugliness and
786 * complexity of coloring we opt for just aligning 64K
790 rounddown_pow_of_two(vma->page_sizes.sg |
791 I915_GTT_PAGE_SIZE_2M);
794 * Check we don't expand for the limited Global GTT
795 * (mappable aperture is even more precious!). This
796 * also checks that we exclude the aliasing-ppgtt.
798 GEM_BUG_ON(i915_vma_is_ggtt(vma));
800 alignment = max(alignment, page_alignment);
802 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
803 size = round_up(size, I915_GTT_PAGE_SIZE_2M);
806 ret = i915_gem_gtt_insert(vma->vm, &vma->node,
807 size, alignment, color,
812 GEM_BUG_ON(vma->node.start < start);
813 GEM_BUG_ON(vma->node.start + vma->node.size > end);
815 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
816 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
818 list_add_tail(&vma->vm_link, &vma->vm->bound_list);
824 i915_vma_detach(struct i915_vma *vma)
826 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
827 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
830 * And finally now the object is completely decoupled from this
831 * vma, we can drop its hold on the backing storage and allow
832 * it to be reaped by the shrinker.
834 list_del(&vma->vm_link);
837 static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
842 bound = atomic_read(&vma->flags);
844 if (unlikely(flags & ~bound))
847 if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR)))
850 if (!(bound & I915_VMA_PIN_MASK))
853 GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0);
854 } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
860 * If pin_count==0, but we are bound, check under the lock to avoid
861 * racing with a concurrent i915_vma_unbind().
863 mutex_lock(&vma->vm->mutex);
865 if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) {
870 if (unlikely(flags & ~bound)) {
874 } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
875 mutex_unlock(&vma->vm->mutex);
880 static struct scatterlist *
881 rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
882 unsigned int width, unsigned int height,
883 unsigned int src_stride, unsigned int dst_stride,
884 struct sg_table *st, struct scatterlist *sg)
886 unsigned int column, row;
887 unsigned int src_idx;
889 for (column = 0; column < width; column++) {
892 src_idx = src_stride * (height - 1) + column + offset;
893 for (row = 0; row < height; row++) {
896 * We don't need the pages, but need to initialize
897 * the entries so the sg list can be happily traversed.
898 * The only thing we need are DMA addresses.
900 sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
902 i915_gem_object_get_dma_address(obj, src_idx);
903 sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
905 src_idx -= src_stride;
908 left = (dst_stride - height) * I915_GTT_PAGE_SIZE;
916 * The DE ignores the PTEs for the padding tiles, the sg entry
917 * here is just a conenience to indicate how many padding PTEs
918 * to insert at this spot.
920 sg_set_page(sg, NULL, left, 0);
921 sg_dma_address(sg) = 0;
922 sg_dma_len(sg) = left;
929 static noinline struct sg_table *
930 intel_rotate_pages(struct intel_rotation_info *rot_info,
931 struct drm_i915_gem_object *obj)
933 unsigned int size = intel_rotation_info_size(rot_info);
934 struct drm_i915_private *i915 = to_i915(obj->base.dev);
936 struct scatterlist *sg;
940 /* Allocate target SG list. */
941 st = kmalloc(sizeof(*st), GFP_KERNEL);
945 ret = sg_alloc_table(st, size, GFP_KERNEL);
952 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
953 sg = rotate_pages(obj, rot_info->plane[i].offset,
954 rot_info->plane[i].width, rot_info->plane[i].height,
955 rot_info->plane[i].src_stride,
956 rot_info->plane[i].dst_stride,
965 drm_dbg(&i915->drm, "Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
966 obj->base.size, rot_info->plane[0].width,
967 rot_info->plane[0].height, size);
972 static struct scatterlist *
973 remap_pages(struct drm_i915_gem_object *obj,
974 unsigned int offset, unsigned int alignment_pad,
975 unsigned int width, unsigned int height,
976 unsigned int src_stride, unsigned int dst_stride,
977 struct sg_table *st, struct scatterlist *sg)
981 if (!width || !height)
988 * The DE ignores the PTEs for the padding tiles, the sg entry
989 * here is just a convenience to indicate how many padding PTEs
990 * to insert at this spot.
992 sg_set_page(sg, NULL, alignment_pad * 4096, 0);
993 sg_dma_address(sg) = 0;
994 sg_dma_len(sg) = alignment_pad * 4096;
998 for (row = 0; row < height; row++) {
999 unsigned int left = width * I915_GTT_PAGE_SIZE;
1003 unsigned int length;
1006 * We don't need the pages, but need to initialize
1007 * the entries so the sg list can be happily traversed.
1008 * The only thing we need are DMA addresses.
1011 addr = i915_gem_object_get_dma_address_len(obj, offset, &length);
1013 length = min(left, length);
1017 sg_set_page(sg, NULL, length, 0);
1018 sg_dma_address(sg) = addr;
1019 sg_dma_len(sg) = length;
1022 offset += length / I915_GTT_PAGE_SIZE;
1026 offset += src_stride - width;
1028 left = (dst_stride - width) * I915_GTT_PAGE_SIZE;
1036 * The DE ignores the PTEs for the padding tiles, the sg entry
1037 * here is just a conenience to indicate how many padding PTEs
1038 * to insert at this spot.
1040 sg_set_page(sg, NULL, left, 0);
1041 sg_dma_address(sg) = 0;
1042 sg_dma_len(sg) = left;
1049 static noinline struct sg_table *
1050 intel_remap_pages(struct intel_remapped_info *rem_info,
1051 struct drm_i915_gem_object *obj)
1053 unsigned int size = intel_remapped_info_size(rem_info);
1054 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1055 struct sg_table *st;
1056 struct scatterlist *sg;
1057 unsigned int gtt_offset = 0;
1061 /* Allocate target SG list. */
1062 st = kmalloc(sizeof(*st), GFP_KERNEL);
1066 ret = sg_alloc_table(st, size, GFP_KERNEL);
1073 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
1074 unsigned int alignment_pad = 0;
1076 if (rem_info->plane_alignment)
1077 alignment_pad = ALIGN(gtt_offset, rem_info->plane_alignment) - gtt_offset;
1079 sg = remap_pages(obj,
1080 rem_info->plane[i].offset, alignment_pad,
1081 rem_info->plane[i].width, rem_info->plane[i].height,
1082 rem_info->plane[i].src_stride, rem_info->plane[i].dst_stride,
1085 gtt_offset += alignment_pad +
1086 rem_info->plane[i].dst_stride * rem_info->plane[i].height;
1097 drm_dbg(&i915->drm, "Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n",
1098 obj->base.size, rem_info->plane[0].width,
1099 rem_info->plane[0].height, size);
1101 return ERR_PTR(ret);
1104 static noinline struct sg_table *
1105 intel_partial_pages(const struct i915_ggtt_view *view,
1106 struct drm_i915_gem_object *obj)
1108 struct sg_table *st;
1109 struct scatterlist *sg, *iter;
1110 unsigned int count = view->partial.size;
1111 unsigned int offset;
1114 st = kmalloc(sizeof(*st), GFP_KERNEL);
1118 ret = sg_alloc_table(st, count, GFP_KERNEL);
1122 iter = i915_gem_object_get_sg_dma(obj, view->partial.offset, &offset);
1130 len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT),
1131 count << PAGE_SHIFT);
1132 sg_set_page(sg, NULL, len, 0);
1133 sg_dma_address(sg) =
1134 sg_dma_address(iter) + (offset << PAGE_SHIFT);
1135 sg_dma_len(sg) = len;
1138 count -= len >> PAGE_SHIFT;
1141 i915_sg_trim(st); /* Drop any unused tail entries. */
1147 iter = __sg_next(iter);
1154 return ERR_PTR(ret);
1158 __i915_vma_get_pages(struct i915_vma *vma)
1160 struct sg_table *pages;
1164 * The vma->pages are only valid within the lifespan of the borrowed
1165 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
1166 * must be the vma->pages. A simple rule is that vma->pages must only
1167 * be accessed when the obj->mm.pages are pinned.
1169 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
1171 switch (vma->ggtt_view.type) {
1173 GEM_BUG_ON(vma->ggtt_view.type);
1175 case I915_GGTT_VIEW_NORMAL:
1176 pages = vma->obj->mm.pages;
1179 case I915_GGTT_VIEW_ROTATED:
1181 intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
1184 case I915_GGTT_VIEW_REMAPPED:
1186 intel_remap_pages(&vma->ggtt_view.remapped, vma->obj);
1189 case I915_GGTT_VIEW_PARTIAL:
1190 pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
1195 if (IS_ERR(pages)) {
1196 ret = PTR_ERR(pages);
1198 drm_err(&vma->vm->i915->drm,
1199 "Failed to get pages for VMA view type %u (%d)!\n",
1200 vma->ggtt_view.type, ret);
1208 I915_SELFTEST_EXPORT int i915_vma_get_pages(struct i915_vma *vma)
1212 if (atomic_add_unless(&vma->pages_count, 1, 0))
1215 err = i915_gem_object_pin_pages(vma->obj);
1219 err = __i915_vma_get_pages(vma);
1223 vma->page_sizes = vma->obj->mm.page_sizes;
1224 atomic_inc(&vma->pages_count);
1229 __i915_gem_object_unpin_pages(vma->obj);
1234 static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
1236 /* We allocate under vma_get_pages, so beware the shrinker */
1237 struct sg_table *pages = READ_ONCE(vma->pages);
1239 GEM_BUG_ON(atomic_read(&vma->pages_count) < count);
1241 if (atomic_sub_return(count, &vma->pages_count) == 0) {
1243 * The atomic_sub_return is a read barrier for the READ_ONCE of
1246 * READ_ONCE is safe because this is either called from the same
1247 * function (i915_vma_pin_ww), or guarded by vma->vm->mutex.
1249 * TODO: We're leaving vma->pages dangling, until vma->obj->resv
1252 if (pages != vma->obj->mm.pages) {
1253 sg_free_table(pages);
1257 i915_gem_object_unpin_pages(vma->obj);
1261 I915_SELFTEST_EXPORT void i915_vma_put_pages(struct i915_vma *vma)
1263 if (atomic_add_unless(&vma->pages_count, -1, 1))
1266 __vma_put_pages(vma, 1);
1269 static void vma_unbind_pages(struct i915_vma *vma)
1273 lockdep_assert_held(&vma->vm->mutex);
1275 /* The upper portion of pages_count is the number of bindings */
1276 count = atomic_read(&vma->pages_count);
1277 count >>= I915_VMA_PAGES_BIAS;
1280 __vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS);
1283 int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
1284 u64 size, u64 alignment, u64 flags)
1286 struct i915_vma_work *work = NULL;
1287 struct dma_fence *moving = NULL;
1288 struct i915_vma_resource *vma_res = NULL;
1289 intel_wakeref_t wakeref = 0;
1293 assert_vma_held(vma);
1296 BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
1297 BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
1299 GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL)));
1301 /* First try and grab the pin without rebinding the vma */
1302 if (try_qad_pin(vma, flags & I915_VMA_BIND_MASK))
1305 err = i915_vma_get_pages(vma);
1309 if (flags & PIN_GLOBAL)
1310 wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
1312 moving = vma->obj ? i915_gem_object_get_moving_fence(vma->obj) : NULL;
1313 if (flags & vma->vm->bind_async_flags || moving) {
1315 err = i915_vm_lock_objects(vma->vm, ww);
1319 work = i915_vma_work();
1325 work->vm = i915_vm_get(vma->vm);
1327 dma_fence_work_chain(&work->base, moving);
1329 /* Allocate enough page directories to used PTE */
1330 if (vma->vm->allocate_va_range) {
1331 err = i915_vm_alloc_pt_stash(vma->vm,
1337 err = i915_vm_map_pt_stash(vma->vm, &work->stash);
1343 vma_res = i915_vma_resource_alloc();
1344 if (IS_ERR(vma_res)) {
1345 err = PTR_ERR(vma_res);
1350 * Differentiate between user/kernel vma inside the aliasing-ppgtt.
1352 * We conflate the Global GTT with the user's vma when using the
1353 * aliasing-ppgtt, but it is still vitally important to try and
1354 * keep the use cases distinct. For example, userptr objects are
1355 * not allowed inside the Global GTT as that will cause lock
1356 * inversions when we have to evict them the mmu_notifier callbacks -
1357 * but they are allowed to be part of the user ppGTT which can never
1358 * be mapped. As such we try to give the distinct users of the same
1359 * mutex, distinct lockclasses [equivalent to how we keep i915_ggtt
1360 * and i915_ppgtt separate].
1362 * NB this may cause us to mask real lock inversions -- while the
1363 * code is safe today, lockdep may not be able to spot future
1366 err = mutex_lock_interruptible_nested(&vma->vm->mutex,
1367 !(flags & PIN_GLOBAL));
1371 /* No more allocations allowed now we hold vm->mutex */
1373 if (unlikely(i915_vma_is_closed(vma))) {
1378 bound = atomic_read(&vma->flags);
1379 if (unlikely(bound & I915_VMA_ERROR)) {
1384 if (unlikely(!((bound + 1) & I915_VMA_PIN_MASK))) {
1385 err = -EAGAIN; /* pins are meant to be fairly temporary */
1389 if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) {
1390 __i915_vma_pin(vma);
1394 err = i915_active_acquire(&vma->active);
1398 if (!(bound & I915_VMA_BIND_MASK)) {
1399 err = i915_vma_insert(vma, size, alignment, flags);
1403 if (i915_is_ggtt(vma->vm))
1404 __i915_vma_set_map_and_fenceable(vma);
1407 GEM_BUG_ON(!vma->pages);
1408 err = i915_vma_bind(vma,
1409 vma->obj->cache_level,
1410 flags, work, vma_res);
1415 /* There should only be at most 2 active bindings (user, global) */
1416 GEM_BUG_ON(bound + I915_VMA_PAGES_ACTIVE < bound);
1417 atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count);
1418 list_move_tail(&vma->vm_link, &vma->vm->bound_list);
1420 __i915_vma_pin(vma);
1421 GEM_BUG_ON(!i915_vma_is_pinned(vma));
1422 GEM_BUG_ON(!i915_vma_is_bound(vma, flags));
1423 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
1426 if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) {
1427 i915_vma_detach(vma);
1428 drm_mm_remove_node(&vma->node);
1431 i915_active_release(&vma->active);
1433 mutex_unlock(&vma->vm->mutex);
1438 dma_fence_work_commit_imm(&work->base);
1441 intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
1444 dma_fence_put(moving);
1446 i915_vma_put_pages(vma);
1450 static void flush_idle_contexts(struct intel_gt *gt)
1452 struct intel_engine_cs *engine;
1453 enum intel_engine_id id;
1455 for_each_engine(engine, gt, id)
1456 intel_engine_flush_barriers(engine);
1458 intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
1461 static int __i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
1462 u32 align, unsigned int flags)
1464 struct i915_address_space *vm = vma->vm;
1468 err = i915_vma_pin_ww(vma, ww, 0, align, flags | PIN_GLOBAL);
1470 if (err != -ENOSPC) {
1472 err = i915_vma_wait_for_bind(vma);
1474 i915_vma_unpin(vma);
1479 /* Unlike i915_vma_pin, we don't take no for an answer! */
1480 flush_idle_contexts(vm->gt);
1481 if (mutex_lock_interruptible(&vm->mutex) == 0) {
1482 i915_gem_evict_vm(vm);
1483 mutex_unlock(&vm->mutex);
1488 int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
1489 u32 align, unsigned int flags)
1491 struct i915_gem_ww_ctx _ww;
1494 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
1497 return __i915_ggtt_pin(vma, ww, align, flags);
1499 #ifdef CONFIG_LOCKDEP
1500 WARN_ON(dma_resv_held(vma->obj->base.resv));
1503 for_i915_gem_ww(&_ww, err, true) {
1504 err = i915_gem_object_lock(vma->obj, &_ww);
1506 err = __i915_ggtt_pin(vma, &_ww, align, flags);
1512 static void __vma_close(struct i915_vma *vma, struct intel_gt *gt)
1515 * We defer actually closing, unbinding and destroying the VMA until
1516 * the next idle point, or if the object is freed in the meantime. By
1517 * postponing the unbind, we allow for it to be resurrected by the
1518 * client, avoiding the work required to rebind the VMA. This is
1519 * advantageous for DRI, where the client/server pass objects
1520 * between themselves, temporarily opening a local VMA to the
1521 * object, and then closing it again. The same object is then reused
1522 * on the next frame (or two, depending on the depth of the swap queue)
1523 * causing us to rebind the VMA once more. This ends up being a lot
1524 * of wasted work for the steady state.
1526 GEM_BUG_ON(i915_vma_is_closed(vma));
1527 list_add(&vma->closed_link, >->closed_vma);
1530 void i915_vma_close(struct i915_vma *vma)
1532 struct intel_gt *gt = vma->vm->gt;
1533 unsigned long flags;
1535 if (i915_vma_is_ggtt(vma))
1538 GEM_BUG_ON(!atomic_read(&vma->open_count));
1539 if (atomic_dec_and_lock_irqsave(&vma->open_count,
1542 __vma_close(vma, gt);
1543 spin_unlock_irqrestore(>->closed_lock, flags);
1547 static void __i915_vma_remove_closed(struct i915_vma *vma)
1549 struct intel_gt *gt = vma->vm->gt;
1551 spin_lock_irq(>->closed_lock);
1552 list_del_init(&vma->closed_link);
1553 spin_unlock_irq(>->closed_lock);
1556 void i915_vma_reopen(struct i915_vma *vma)
1558 if (i915_vma_is_closed(vma))
1559 __i915_vma_remove_closed(vma);
1562 void i915_vma_release(struct kref *ref)
1564 struct i915_vma *vma = container_of(ref, typeof(*vma), ref);
1565 struct drm_i915_gem_object *obj = vma->obj;
1567 if (drm_mm_node_allocated(&vma->node)) {
1568 mutex_lock(&vma->vm->mutex);
1569 atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
1570 WARN_ON(__i915_vma_unbind(vma));
1571 mutex_unlock(&vma->vm->mutex);
1572 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
1574 GEM_BUG_ON(i915_vma_is_active(vma));
1576 spin_lock(&obj->vma.lock);
1577 list_del(&vma->obj_link);
1578 if (!RB_EMPTY_NODE(&vma->obj_node))
1579 rb_erase(&vma->obj_node, &obj->vma.tree);
1580 spin_unlock(&obj->vma.lock);
1582 __i915_vma_remove_closed(vma);
1583 i915_vm_put(vma->vm);
1585 i915_active_fini(&vma->active);
1586 GEM_WARN_ON(vma->resource);
1590 void i915_vma_parked(struct intel_gt *gt)
1592 struct i915_vma *vma, *next;
1595 spin_lock_irq(>->closed_lock);
1596 list_for_each_entry_safe(vma, next, >->closed_vma, closed_link) {
1597 struct drm_i915_gem_object *obj = vma->obj;
1598 struct i915_address_space *vm = vma->vm;
1600 /* XXX All to avoid keeping a reference on i915_vma itself */
1602 if (!kref_get_unless_zero(&obj->base.refcount))
1605 if (!i915_vm_tryopen(vm)) {
1606 i915_gem_object_put(obj);
1610 list_move(&vma->closed_link, &closed);
1612 spin_unlock_irq(>->closed_lock);
1614 /* As the GT is held idle, no vma can be reopened as we destroy them */
1615 list_for_each_entry_safe(vma, next, &closed, closed_link) {
1616 struct drm_i915_gem_object *obj = vma->obj;
1617 struct i915_address_space *vm = vma->vm;
1619 INIT_LIST_HEAD(&vma->closed_link);
1620 __i915_vma_put(vma);
1622 i915_gem_object_put(obj);
1627 static void __i915_vma_iounmap(struct i915_vma *vma)
1629 GEM_BUG_ON(i915_vma_is_pinned(vma));
1631 if (vma->iomap == NULL)
1634 io_mapping_unmap(vma->iomap);
1638 void i915_vma_revoke_mmap(struct i915_vma *vma)
1640 struct drm_vma_offset_node *node;
1643 if (!i915_vma_has_userfault(vma))
1646 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
1647 GEM_BUG_ON(!vma->obj->userfault_count);
1649 node = &vma->mmo->vma_node;
1650 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
1651 unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
1652 drm_vma_node_offset_addr(node) + vma_offset,
1656 i915_vma_unset_userfault(vma);
1657 if (!--vma->obj->userfault_count)
1658 list_del(&vma->obj->userfault_link);
1662 __i915_request_await_bind(struct i915_request *rq, struct i915_vma *vma)
1664 return __i915_request_await_exclusive(rq, &vma->active);
1667 static int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
1671 GEM_BUG_ON(!i915_vma_is_pinned(vma));
1673 /* Wait for the vma to be bound before we start! */
1674 err = __i915_request_await_bind(rq, vma);
1678 return i915_active_add_request(&vma->active, rq);
1681 int _i915_vma_move_to_active(struct i915_vma *vma,
1682 struct i915_request *rq,
1683 struct dma_fence *fence,
1686 struct drm_i915_gem_object *obj = vma->obj;
1689 assert_object_held(obj);
1691 err = __i915_vma_move_to_active(vma, rq);
1695 if (flags & EXEC_OBJECT_WRITE) {
1696 struct intel_frontbuffer *front;
1698 front = __intel_frontbuffer_get(obj);
1699 if (unlikely(front)) {
1700 if (intel_frontbuffer_invalidate(front, ORIGIN_CS))
1701 i915_active_add_request(&front->write, rq);
1702 intel_frontbuffer_put(front);
1706 dma_resv_add_excl_fence(vma->obj->base.resv, fence);
1707 obj->write_domain = I915_GEM_DOMAIN_RENDER;
1708 obj->read_domains = 0;
1711 if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
1712 err = dma_resv_reserve_shared(vma->obj->base.resv, 1);
1718 dma_resv_add_shared_fence(vma->obj->base.resv, fence);
1719 obj->write_domain = 0;
1723 if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence)
1724 i915_active_add_request(&vma->fence->active, rq);
1726 obj->read_domains |= I915_GEM_GPU_DOMAINS;
1727 obj->mm.dirty = true;
1729 GEM_BUG_ON(!i915_vma_is_active(vma));
1733 struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
1735 struct i915_vma_resource *vma_res = vma->resource;
1736 struct dma_fence *unbind_fence;
1738 GEM_BUG_ON(i915_vma_is_pinned(vma));
1740 if (i915_vma_is_map_and_fenceable(vma)) {
1741 /* Force a pagefault for domain tracking on next user access */
1742 i915_vma_revoke_mmap(vma);
1745 * Check that we have flushed all writes through the GGTT
1746 * before the unbind, other due to non-strict nature of those
1747 * indirect writes they may end up referencing the GGTT PTE
1750 * Note that we may be concurrently poking at the GGTT_WRITE
1751 * bit from set-domain, as we mark all GGTT vma associated
1752 * with an object. We know this is for another vma, as we
1753 * are currently unbinding this one -- so if this vma will be
1754 * reused, it will be refaulted and have its dirty bit set
1755 * before the next write.
1757 i915_vma_flush_writes(vma);
1759 /* release the fence reg _after_ flushing */
1760 i915_vma_revoke_fence(vma);
1762 __i915_vma_iounmap(vma);
1763 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
1765 GEM_BUG_ON(vma->fence);
1766 GEM_BUG_ON(i915_vma_has_userfault(vma));
1768 /* Object backend must be async capable. */
1769 GEM_WARN_ON(async && !vma->resource->bi.pages_rsgt);
1771 /* If vm is not open, unbind is a nop. */
1772 vma_res->needs_wakeref = i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND) &&
1773 atomic_read(&vma->vm->open);
1774 trace_i915_vma_unbind(vma);
1776 unbind_fence = i915_vma_resource_unbind(vma_res);
1777 vma->resource = NULL;
1779 atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE),
1782 i915_vma_detach(vma);
1784 if (!async && unbind_fence) {
1785 dma_fence_wait(unbind_fence, false);
1786 dma_fence_put(unbind_fence);
1787 unbind_fence = NULL;
1791 * Binding itself may not have completed until the unbind fence signals,
1792 * so don't drop the pages until that happens, unless the resource is
1796 vma_unbind_pages(vma);
1797 return unbind_fence;
1800 int __i915_vma_unbind(struct i915_vma *vma)
1804 lockdep_assert_held(&vma->vm->mutex);
1806 if (!drm_mm_node_allocated(&vma->node))
1809 if (i915_vma_is_pinned(vma)) {
1810 vma_print_allocator(vma, "is pinned");
1815 * After confirming that no one else is pinning this vma, wait for
1816 * any laggards who may have crept in during the wait (through
1817 * a residual pin skipping the vm->mutex) to complete.
1819 ret = i915_vma_sync(vma);
1823 GEM_BUG_ON(i915_vma_is_active(vma));
1824 __i915_vma_evict(vma, false);
1826 drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
1830 static struct dma_fence *__i915_vma_unbind_async(struct i915_vma *vma)
1832 struct dma_fence *fence;
1834 lockdep_assert_held(&vma->vm->mutex);
1836 if (!drm_mm_node_allocated(&vma->node))
1839 if (i915_vma_is_pinned(vma) ||
1840 &vma->obj->mm.rsgt->table != vma->resource->bi.pages)
1841 return ERR_PTR(-EAGAIN);
1844 * We probably need to replace this with awaiting the fences of the
1845 * object's dma_resv when the vma active goes away. When doing that
1846 * we need to be careful to not add the vma_resource unbind fence
1847 * immediately to the object's dma_resv, because then unbinding
1848 * the next vma from the object, in case there are many, will
1849 * actually await the unbinding of the previous vmas, which is
1852 if (i915_sw_fence_await_active(&vma->resource->chain, &vma->active,
1853 I915_ACTIVE_AWAIT_EXCL |
1854 I915_ACTIVE_AWAIT_ACTIVE) < 0) {
1855 return ERR_PTR(-EBUSY);
1858 fence = __i915_vma_evict(vma, true);
1860 drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
1865 int i915_vma_unbind(struct i915_vma *vma)
1867 struct i915_address_space *vm = vma->vm;
1868 intel_wakeref_t wakeref = 0;
1871 /* Optimistic wait before taking the mutex */
1872 err = i915_vma_sync(vma);
1876 if (!drm_mm_node_allocated(&vma->node))
1879 if (i915_vma_is_pinned(vma)) {
1880 vma_print_allocator(vma, "is pinned");
1884 if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
1885 /* XXX not always required: nop_clear_range */
1886 wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
1888 err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref);
1892 err = __i915_vma_unbind(vma);
1893 mutex_unlock(&vm->mutex);
1897 intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
1901 int i915_vma_unbind_async(struct i915_vma *vma, bool trylock_vm)
1903 struct drm_i915_gem_object *obj = vma->obj;
1904 struct i915_address_space *vm = vma->vm;
1905 intel_wakeref_t wakeref = 0;
1906 struct dma_fence *fence;
1910 * We need the dma-resv lock since we add the
1911 * unbind fence to the dma-resv object.
1913 assert_object_held(obj);
1915 if (!drm_mm_node_allocated(&vma->node))
1918 if (i915_vma_is_pinned(vma)) {
1919 vma_print_allocator(vma, "is pinned");
1926 err = dma_resv_reserve_shared(obj->base.resv, 1);
1931 * It would be great if we could grab this wakeref from the
1932 * async unbind work if needed, but we can't because it uses
1933 * kmalloc and it's in the dma-fence signalling critical path.
1935 if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
1936 wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
1938 if (trylock_vm && !mutex_trylock(&vm->mutex)) {
1941 } else if (!trylock_vm) {
1942 err = mutex_lock_interruptible_nested(&vm->mutex, !wakeref);
1947 fence = __i915_vma_unbind_async(vma);
1948 mutex_unlock(&vm->mutex);
1949 if (IS_ERR_OR_NULL(fence)) {
1950 err = PTR_ERR_OR_ZERO(fence);
1954 dma_resv_add_shared_fence(obj->base.resv, fence);
1955 dma_fence_put(fence);
1959 intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
1963 struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma)
1965 i915_gem_object_make_unshrinkable(vma->obj);
1969 void i915_vma_make_shrinkable(struct i915_vma *vma)
1971 i915_gem_object_make_shrinkable(vma->obj);
1974 void i915_vma_make_purgeable(struct i915_vma *vma)
1976 i915_gem_object_make_purgeable(vma->obj);
1979 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1980 #include "selftests/i915_vma.c"
1983 void i915_vma_module_exit(void)
1985 kmem_cache_destroy(slab_vmas);
1988 int __init i915_vma_module_init(void)
1990 slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);