2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/sched/mm.h>
26 #include <linux/dma-fence-array.h>
27 #include <drm/drm_gem.h>
29 #include "display/intel_display.h"
30 #include "display/intel_frontbuffer.h"
31 #include "gem/i915_gem_lmem.h"
32 #include "gem/i915_gem_tiling.h"
33 #include "gt/intel_engine.h"
34 #include "gt/intel_engine_heartbeat.h"
35 #include "gt/intel_gt.h"
36 #include "gt/intel_gt_requests.h"
37 #include "gt/intel_tlb.h"
40 #include "i915_gem_evict.h"
41 #include "i915_sw_fence_work.h"
42 #include "i915_trace.h"
44 #include "i915_vma_resource.h"
46 static inline void assert_vma_held_evict(const struct i915_vma *vma)
49 * We may be forced to unbind when the vm is dead, to clean it up.
50 * This is the only exception to the requirement of the object lock
53 if (kref_read(&vma->vm->ref))
54 assert_object_held_shared(vma->obj);
57 static struct kmem_cache *slab_vmas;
59 static struct i915_vma *i915_vma_alloc(void)
61 return kmem_cache_zalloc(slab_vmas, GFP_KERNEL);
64 static void i915_vma_free(struct i915_vma *vma)
66 return kmem_cache_free(slab_vmas, vma);
69 #if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
71 #include <linux/stackdepot.h>
73 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
77 if (!vma->node.stack) {
78 drm_dbg(vma->obj->base.dev,
79 "vma.node [%08llx + %08llx] %s: unknown owner\n",
80 vma->node.start, vma->node.size, reason);
84 stack_depot_snprint(vma->node.stack, buf, sizeof(buf), 0);
85 drm_dbg(vma->obj->base.dev,
86 "vma.node [%08llx + %08llx] %s: inserted at %s\n",
87 vma->node.start, vma->node.size, reason, buf);
92 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
98 static inline struct i915_vma *active_to_vma(struct i915_active *ref)
100 return container_of(ref, typeof(struct i915_vma), active);
103 static int __i915_vma_active(struct i915_active *ref)
105 return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
108 static void __i915_vma_retire(struct i915_active *ref)
110 i915_vma_put(active_to_vma(ref));
113 static struct i915_vma *
114 vma_create(struct drm_i915_gem_object *obj,
115 struct i915_address_space *vm,
116 const struct i915_gtt_view *view)
118 struct i915_vma *pos = ERR_PTR(-E2BIG);
119 struct i915_vma *vma;
120 struct rb_node *rb, **p;
123 /* The aliasing_ppgtt should never be used directly! */
124 GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm);
126 vma = i915_vma_alloc();
128 return ERR_PTR(-ENOMEM);
130 vma->ops = &vm->vma_ops;
132 vma->size = obj->base.size;
133 vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
135 i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire, 0);
137 /* Declare ourselves safe for use inside shrinkers */
138 if (IS_ENABLED(CONFIG_LOCKDEP)) {
139 fs_reclaim_acquire(GFP_KERNEL);
140 might_lock(&vma->active.mutex);
141 fs_reclaim_release(GFP_KERNEL);
144 INIT_LIST_HEAD(&vma->closed_link);
145 INIT_LIST_HEAD(&vma->obj_link);
146 RB_CLEAR_NODE(&vma->obj_node);
148 if (view && view->type != I915_GTT_VIEW_NORMAL) {
149 vma->gtt_view = *view;
150 if (view->type == I915_GTT_VIEW_PARTIAL) {
151 GEM_BUG_ON(range_overflows_t(u64,
152 view->partial.offset,
154 obj->base.size >> PAGE_SHIFT));
155 vma->size = view->partial.size;
156 vma->size <<= PAGE_SHIFT;
157 GEM_BUG_ON(vma->size > obj->base.size);
158 } else if (view->type == I915_GTT_VIEW_ROTATED) {
159 vma->size = intel_rotation_info_size(&view->rotated);
160 vma->size <<= PAGE_SHIFT;
161 } else if (view->type == I915_GTT_VIEW_REMAPPED) {
162 vma->size = intel_remapped_info_size(&view->remapped);
163 vma->size <<= PAGE_SHIFT;
167 if (unlikely(vma->size > vm->total))
170 GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
172 err = mutex_lock_interruptible(&vm->mutex);
179 list_add_tail(&vma->vm_link, &vm->unbound_list);
181 spin_lock(&obj->vma.lock);
182 if (i915_is_ggtt(vm)) {
183 if (unlikely(overflows_type(vma->size, u32)))
186 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
187 i915_gem_object_get_tiling(obj),
188 i915_gem_object_get_stride(obj));
189 if (unlikely(vma->fence_size < vma->size || /* overflow */
190 vma->fence_size > vm->total))
193 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
195 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
196 i915_gem_object_get_tiling(obj),
197 i915_gem_object_get_stride(obj));
198 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
200 __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
204 p = &obj->vma.tree.rb_node;
209 pos = rb_entry(rb, struct i915_vma, obj_node);
212 * If the view already exists in the tree, another thread
213 * already created a matching vma, so return the older instance
214 * and dispose of ours.
216 cmp = i915_vma_compare(pos, vm, view);
224 rb_link_node(&vma->obj_node, rb, p);
225 rb_insert_color(&vma->obj_node, &obj->vma.tree);
227 if (i915_vma_is_ggtt(vma))
229 * We put the GGTT vma at the start of the vma-list, followed
230 * by the ppGGTT vma. This allows us to break early when
231 * iterating over only the GGTT vma for an object, see
232 * for_each_ggtt_vma()
234 list_add(&vma->obj_link, &obj->vma.list);
236 list_add_tail(&vma->obj_link, &obj->vma.list);
238 spin_unlock(&obj->vma.lock);
239 mutex_unlock(&vm->mutex);
244 spin_unlock(&obj->vma.lock);
245 list_del_init(&vma->vm_link);
246 mutex_unlock(&vm->mutex);
252 static struct i915_vma *
253 i915_vma_lookup(struct drm_i915_gem_object *obj,
254 struct i915_address_space *vm,
255 const struct i915_gtt_view *view)
259 rb = obj->vma.tree.rb_node;
261 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
264 cmp = i915_vma_compare(vma, vm, view);
278 * i915_vma_instance - return the singleton instance of the VMA
279 * @obj: parent &struct drm_i915_gem_object to be mapped
280 * @vm: address space in which the mapping is located
281 * @view: additional mapping requirements
283 * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
284 * the same @view characteristics. If a match is not found, one is created.
285 * Once created, the VMA is kept until either the object is freed, or the
286 * address space is closed.
288 * Returns the vma, or an error pointer.
291 i915_vma_instance(struct drm_i915_gem_object *obj,
292 struct i915_address_space *vm,
293 const struct i915_gtt_view *view)
295 struct i915_vma *vma;
297 GEM_BUG_ON(view && !i915_is_ggtt_or_dpt(vm));
298 GEM_BUG_ON(!kref_read(&vm->ref));
300 spin_lock(&obj->vma.lock);
301 vma = i915_vma_lookup(obj, vm, view);
302 spin_unlock(&obj->vma.lock);
304 /* vma_create() will resolve the race if another creates the vma */
306 vma = vma_create(obj, vm, view);
308 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
312 struct i915_vma_work {
313 struct dma_fence_work base;
314 struct i915_address_space *vm;
315 struct i915_vm_pt_stash stash;
316 struct i915_vma_resource *vma_res;
317 struct drm_i915_gem_object *obj;
318 struct i915_sw_dma_fence_cb cb;
319 unsigned int pat_index;
323 static void __vma_bind(struct dma_fence_work *work)
325 struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
326 struct i915_vma_resource *vma_res = vw->vma_res;
329 * We are about the bind the object, which must mean we have already
330 * signaled the work to potentially clear/move the pages underneath. If
331 * something went wrong at that stage then the object should have
332 * unknown_state set, in which case we need to skip the bind.
334 if (i915_gem_object_has_unknown_state(vw->obj))
337 vma_res->ops->bind_vma(vma_res->vm, &vw->stash,
338 vma_res, vw->pat_index, vw->flags);
341 static void __vma_release(struct dma_fence_work *work)
343 struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
346 i915_gem_object_put(vw->obj);
348 i915_vm_free_pt_stash(vw->vm, &vw->stash);
350 i915_vma_resource_put(vw->vma_res);
353 static const struct dma_fence_work_ops bind_ops = {
356 .release = __vma_release,
359 struct i915_vma_work *i915_vma_work(void)
361 struct i915_vma_work *vw;
363 vw = kzalloc(sizeof(*vw), GFP_KERNEL);
367 dma_fence_work_init(&vw->base, &bind_ops);
368 vw->base.dma.error = -EAGAIN; /* disable the worker by default */
373 int i915_vma_wait_for_bind(struct i915_vma *vma)
377 if (rcu_access_pointer(vma->active.excl.fence)) {
378 struct dma_fence *fence;
381 fence = dma_fence_get_rcu_safe(&vma->active.excl.fence);
384 err = dma_fence_wait(fence, true);
385 dma_fence_put(fence);
392 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
393 static int i915_vma_verify_bind_complete(struct i915_vma *vma)
395 struct dma_fence *fence = i915_active_fence_get(&vma->active.excl);
401 if (dma_fence_is_signaled(fence))
406 dma_fence_put(fence);
411 #define i915_vma_verify_bind_complete(_vma) 0
414 I915_SELFTEST_EXPORT void
415 i915_vma_resource_init_from_vma(struct i915_vma_resource *vma_res,
416 struct i915_vma *vma)
418 struct drm_i915_gem_object *obj = vma->obj;
420 i915_vma_resource_init(vma_res, vma->vm, vma->pages, &vma->page_sizes,
421 obj->mm.rsgt, i915_gem_object_is_readonly(obj),
422 i915_gem_object_is_lmem(obj), obj->mm.region,
423 vma->ops, vma->private, __i915_vma_offset(vma),
424 __i915_vma_size(vma), vma->size, vma->guard);
428 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
430 * @pat_index: PAT index to set in PTE
431 * @flags: flags like global or local mapping
432 * @work: preallocated worker for allocating and binding the PTE
433 * @vma_res: pointer to a preallocated vma resource. The resource is either
436 * DMA addresses are taken from the scatter-gather table of this object (or of
437 * this VMA in case of non-default GGTT views) and PTE entries set up.
438 * Note that DMA addresses are also the only part of the SG table we care about.
440 int i915_vma_bind(struct i915_vma *vma,
441 unsigned int pat_index,
443 struct i915_vma_work *work,
444 struct i915_vma_resource *vma_res)
450 lockdep_assert_held(&vma->vm->mutex);
451 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
452 GEM_BUG_ON(vma->size > i915_vma_size(vma));
454 if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
457 i915_vma_resource_free(vma_res);
461 if (GEM_DEBUG_WARN_ON(!flags)) {
462 i915_vma_resource_free(vma_res);
467 bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
469 vma_flags = atomic_read(&vma->flags);
470 vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
472 bind_flags &= ~vma_flags;
473 if (bind_flags == 0) {
474 i915_vma_resource_free(vma_res);
478 GEM_BUG_ON(!atomic_read(&vma->pages_count));
480 /* Wait for or await async unbinds touching our range */
481 if (work && bind_flags & vma->vm->bind_async_flags)
482 ret = i915_vma_resource_bind_dep_await(vma->vm,
488 __GFP_RETRY_MAYFAIL |
491 ret = i915_vma_resource_bind_dep_sync(vma->vm, vma->node.start,
492 vma->node.size, true);
494 i915_vma_resource_free(vma_res);
498 if (vma->resource || !vma_res) {
499 /* Rebinding with an additional I915_VMA_*_BIND */
500 GEM_WARN_ON(!vma_flags);
501 i915_vma_resource_free(vma_res);
503 i915_vma_resource_init_from_vma(vma_res, vma);
504 vma->resource = vma_res;
506 trace_i915_vma_bind(vma, bind_flags);
507 if (work && bind_flags & vma->vm->bind_async_flags) {
508 struct dma_fence *prev;
510 work->vma_res = i915_vma_resource_get(vma->resource);
511 work->pat_index = pat_index;
512 work->flags = bind_flags;
515 * Note we only want to chain up to the migration fence on
516 * the pages (not the object itself). As we don't track that,
517 * yet, we have to use the exclusive fence instead.
519 * Also note that we do not want to track the async vma as
520 * part of the obj->resv->excl_fence as it only affects
521 * execution and not content or object's backing store lifetime.
523 prev = i915_active_set_exclusive(&vma->active, &work->base.dma);
525 __i915_sw_fence_await_dma_fence(&work->base.chain,
531 work->base.dma.error = 0; /* enable the queue_work() */
532 work->obj = i915_gem_object_get(vma->obj);
534 ret = i915_gem_object_wait_moving_fence(vma->obj, true);
536 i915_vma_resource_free(vma->resource);
537 vma->resource = NULL;
541 vma->ops->bind_vma(vma->vm, NULL, vma->resource, pat_index,
545 atomic_or(bind_flags, &vma->flags);
549 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
554 if (WARN_ON_ONCE(vma->obj->flags & I915_BO_ALLOC_GPU_ONLY))
555 return IOMEM_ERR_PTR(-EINVAL);
557 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
558 GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND));
559 GEM_BUG_ON(i915_vma_verify_bind_complete(vma));
561 ptr = READ_ONCE(vma->iomap);
564 * TODO: consider just using i915_gem_object_pin_map() for lmem
565 * instead, which already supports mapping non-contiguous chunks
566 * of pages, that way we can also drop the
567 * I915_BO_ALLOC_CONTIGUOUS when allocating the object.
569 if (i915_gem_object_is_lmem(vma->obj)) {
570 ptr = i915_gem_object_lmem_io_map(vma->obj, 0,
571 vma->obj->base.size);
572 } else if (i915_vma_is_map_and_fenceable(vma)) {
573 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
574 i915_vma_offset(vma),
577 ptr = (void __iomem *)
578 i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
583 ptr = page_pack_bits(ptr, 1);
591 if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) {
592 if (page_unmask_bits(ptr))
593 __i915_gem_object_release_map(vma->obj);
595 io_mapping_unmap(ptr);
602 err = i915_vma_pin_fence(vma);
606 i915_vma_set_ggtt_write(vma);
608 /* NB Access through the GTT requires the device to be awake. */
609 return page_mask_bits(ptr);
612 __i915_vma_unpin(vma);
614 return IOMEM_ERR_PTR(err);
617 void i915_vma_flush_writes(struct i915_vma *vma)
619 if (i915_vma_unset_ggtt_write(vma))
620 intel_gt_flush_ggtt_writes(vma->vm->gt);
623 void i915_vma_unpin_iomap(struct i915_vma *vma)
625 GEM_BUG_ON(vma->iomap == NULL);
627 /* XXX We keep the mapping until __i915_vma_unbind()/evict() */
629 i915_vma_flush_writes(vma);
631 i915_vma_unpin_fence(vma);
635 void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
637 struct i915_vma *vma;
638 struct drm_i915_gem_object *obj;
640 vma = fetch_and_zero(p_vma);
649 if (flags & I915_VMA_RELEASE_MAP)
650 i915_gem_object_unpin_map(obj);
652 i915_gem_object_put(obj);
655 bool i915_vma_misplaced(const struct i915_vma *vma,
656 u64 size, u64 alignment, u64 flags)
658 if (!drm_mm_node_allocated(&vma->node))
661 if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma)))
664 if (i915_vma_size(vma) < size)
667 GEM_BUG_ON(alignment && !is_power_of_2(alignment));
668 if (alignment && !IS_ALIGNED(i915_vma_offset(vma), alignment))
671 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
674 if (flags & PIN_OFFSET_BIAS &&
675 i915_vma_offset(vma) < (flags & PIN_OFFSET_MASK))
678 if (flags & PIN_OFFSET_FIXED &&
679 i915_vma_offset(vma) != (flags & PIN_OFFSET_MASK))
682 if (flags & PIN_OFFSET_GUARD &&
683 vma->guard < (flags & PIN_OFFSET_MASK))
689 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
691 bool mappable, fenceable;
693 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
694 GEM_BUG_ON(!vma->fence_size);
696 fenceable = (i915_vma_size(vma) >= vma->fence_size &&
697 IS_ALIGNED(i915_vma_offset(vma), vma->fence_alignment));
699 mappable = i915_ggtt_offset(vma) + vma->fence_size <=
700 i915_vm_to_ggtt(vma->vm)->mappable_end;
702 if (mappable && fenceable)
703 set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
705 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
708 bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
710 struct drm_mm_node *node = &vma->node;
711 struct drm_mm_node *other;
714 * On some machines we have to be careful when putting differing types
715 * of snoopable memory together to avoid the prefetcher crossing memory
716 * domains and dying. During vm initialisation, we decide whether or not
717 * these constraints apply and set the drm_mm.color_adjust
720 if (!i915_vm_has_cache_coloring(vma->vm))
723 /* Only valid to be called on an already inserted vma */
724 GEM_BUG_ON(!drm_mm_node_allocated(node));
725 GEM_BUG_ON(list_empty(&node->node_list));
727 other = list_prev_entry(node, node_list);
728 if (i915_node_color_differs(other, color) &&
729 !drm_mm_hole_follows(other))
732 other = list_next_entry(node, node_list);
733 if (i915_node_color_differs(other, color) &&
734 !drm_mm_hole_follows(node))
741 * i915_vma_insert - finds a slot for the vma in its address space
743 * @ww: An optional struct i915_gem_ww_ctx
744 * @size: requested size in bytes (can be larger than the VMA)
745 * @alignment: required alignment
746 * @flags: mask of PIN_* flags to use
748 * First we try to allocate some free space that meets the requirements for
749 * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
750 * preferrably the oldest idle entry to make room for the new VMA.
753 * 0 on success, negative error code otherwise.
756 i915_vma_insert(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
757 u64 size, u64 alignment, u64 flags)
759 unsigned long color, guard;
763 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
764 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
765 GEM_BUG_ON(hweight64(flags & (PIN_OFFSET_GUARD | PIN_OFFSET_FIXED | PIN_OFFSET_BIAS)) > 1);
767 size = max(size, vma->size);
768 alignment = max_t(typeof(alignment), alignment, vma->display_alignment);
769 if (flags & PIN_MAPPABLE) {
770 size = max_t(typeof(size), size, vma->fence_size);
771 alignment = max_t(typeof(alignment),
772 alignment, vma->fence_alignment);
775 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
776 GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
777 GEM_BUG_ON(!is_power_of_2(alignment));
779 guard = vma->guard; /* retain guard across rebinds */
780 if (flags & PIN_OFFSET_GUARD) {
781 GEM_BUG_ON(overflows_type(flags & PIN_OFFSET_MASK, u32));
782 guard = max_t(u32, guard, flags & PIN_OFFSET_MASK);
785 * As we align the node upon insertion, but the hardware gets
786 * node.start + guard, the easiest way to make that work is
787 * to make the guard a multiple of the alignment size.
789 guard = ALIGN(guard, alignment);
791 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
792 GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
794 end = vma->vm->total;
795 if (flags & PIN_MAPPABLE)
796 end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end);
797 if (flags & PIN_ZONE_4G)
798 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
799 GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
801 alignment = max(alignment, i915_vm_obj_min_alignment(vma->vm, vma->obj));
804 * If binding the object/GGTT view requires more space than the entire
805 * aperture has, reject it early before evicting everything in a vain
806 * attempt to find space.
808 if (size > end - 2 * guard) {
809 drm_dbg(vma->obj->base.dev,
810 "Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
811 size, flags & PIN_MAPPABLE ? "mappable" : "total", end);
817 if (i915_vm_has_cache_coloring(vma->vm))
818 color = vma->obj->pat_index;
820 if (flags & PIN_OFFSET_FIXED) {
821 u64 offset = flags & PIN_OFFSET_MASK;
822 if (!IS_ALIGNED(offset, alignment) ||
823 range_overflows(offset, size, end))
826 * The caller knows not of the guard added by others and
827 * requests for the offset of the start of its buffer
828 * to be fixed, which may not be the same as the position
829 * of the vma->node due to the guard pages.
831 if (offset < guard || offset + size > end - guard)
834 ret = i915_gem_gtt_reserve(vma->vm, ww, &vma->node,
843 * We only support huge gtt pages through the 48b PPGTT,
844 * however we also don't want to force any alignment for
845 * objects which need to be tightly packed into the low 32bits.
847 * Note that we assume that GGTT are limited to 4GiB for the
848 * forseeable future. See also i915_ggtt_offset().
850 if (upper_32_bits(end - 1) &&
851 vma->page_sizes.sg > I915_GTT_PAGE_SIZE &&
852 !HAS_64K_PAGES(vma->vm->i915)) {
854 * We can't mix 64K and 4K PTEs in the same page-table
855 * (2M block), and so to avoid the ugliness and
856 * complexity of coloring we opt for just aligning 64K
860 rounddown_pow_of_two(vma->page_sizes.sg |
861 I915_GTT_PAGE_SIZE_2M);
864 * Check we don't expand for the limited Global GTT
865 * (mappable aperture is even more precious!). This
866 * also checks that we exclude the aliasing-ppgtt.
868 GEM_BUG_ON(i915_vma_is_ggtt(vma));
870 alignment = max(alignment, page_alignment);
872 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
873 size = round_up(size, I915_GTT_PAGE_SIZE_2M);
876 ret = i915_gem_gtt_insert(vma->vm, ww, &vma->node,
877 size, alignment, color,
882 GEM_BUG_ON(vma->node.start < start);
883 GEM_BUG_ON(vma->node.start + vma->node.size > end);
885 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
886 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
888 list_move_tail(&vma->vm_link, &vma->vm->bound_list);
895 i915_vma_detach(struct i915_vma *vma)
897 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
898 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
901 * And finally now the object is completely decoupled from this
902 * vma, we can drop its hold on the backing storage and allow
903 * it to be reaped by the shrinker.
905 list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
908 static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
912 bound = atomic_read(&vma->flags);
914 if (flags & PIN_VALIDATE) {
915 flags &= I915_VMA_BIND_MASK;
917 return (flags & bound) == flags;
920 /* with the lock mandatory for unbind, we don't race here */
921 flags &= I915_VMA_BIND_MASK;
923 if (unlikely(flags & ~bound))
926 if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR)))
929 GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0);
930 } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
935 static struct scatterlist *
936 rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
937 unsigned int width, unsigned int height,
938 unsigned int src_stride, unsigned int dst_stride,
939 struct sg_table *st, struct scatterlist *sg)
941 unsigned int column, row;
944 for (column = 0; column < width; column++) {
947 src_idx = src_stride * (height - 1) + column + offset;
948 for (row = 0; row < height; row++) {
951 * We don't need the pages, but need to initialize
952 * the entries so the sg list can be happily traversed.
953 * The only thing we need are DMA addresses.
955 sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
957 i915_gem_object_get_dma_address(obj, src_idx);
958 sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
960 src_idx -= src_stride;
963 left = (dst_stride - height) * I915_GTT_PAGE_SIZE;
971 * The DE ignores the PTEs for the padding tiles, the sg entry
972 * here is just a conenience to indicate how many padding PTEs
973 * to insert at this spot.
975 sg_set_page(sg, NULL, left, 0);
976 sg_dma_address(sg) = 0;
977 sg_dma_len(sg) = left;
984 static noinline struct sg_table *
985 intel_rotate_pages(struct intel_rotation_info *rot_info,
986 struct drm_i915_gem_object *obj)
988 unsigned int size = intel_rotation_info_size(rot_info);
989 struct drm_i915_private *i915 = to_i915(obj->base.dev);
991 struct scatterlist *sg;
995 /* Allocate target SG list. */
996 st = kmalloc(sizeof(*st), GFP_KERNEL);
1000 ret = sg_alloc_table(st, size, GFP_KERNEL);
1007 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
1008 sg = rotate_pages(obj, rot_info->plane[i].offset,
1009 rot_info->plane[i].width, rot_info->plane[i].height,
1010 rot_info->plane[i].src_stride,
1011 rot_info->plane[i].dst_stride,
1020 drm_dbg(&i915->drm, "Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
1021 obj->base.size, rot_info->plane[0].width,
1022 rot_info->plane[0].height, size);
1024 return ERR_PTR(ret);
1027 static struct scatterlist *
1028 add_padding_pages(unsigned int count,
1029 struct sg_table *st, struct scatterlist *sg)
1034 * The DE ignores the PTEs for the padding tiles, the sg entry
1035 * here is just a convenience to indicate how many padding PTEs
1036 * to insert at this spot.
1038 sg_set_page(sg, NULL, count * I915_GTT_PAGE_SIZE, 0);
1039 sg_dma_address(sg) = 0;
1040 sg_dma_len(sg) = count * I915_GTT_PAGE_SIZE;
1046 static struct scatterlist *
1047 remap_tiled_color_plane_pages(struct drm_i915_gem_object *obj,
1048 unsigned long offset, unsigned int alignment_pad,
1049 unsigned int width, unsigned int height,
1050 unsigned int src_stride, unsigned int dst_stride,
1051 struct sg_table *st, struct scatterlist *sg,
1052 unsigned int *gtt_offset)
1056 if (!width || !height)
1060 sg = add_padding_pages(alignment_pad, st, sg);
1062 for (row = 0; row < height; row++) {
1063 unsigned int left = width * I915_GTT_PAGE_SIZE;
1067 unsigned int length;
1070 * We don't need the pages, but need to initialize
1071 * the entries so the sg list can be happily traversed.
1072 * The only thing we need are DMA addresses.
1075 addr = i915_gem_object_get_dma_address_len(obj, offset, &length);
1077 length = min(left, length);
1081 sg_set_page(sg, NULL, length, 0);
1082 sg_dma_address(sg) = addr;
1083 sg_dma_len(sg) = length;
1086 offset += length / I915_GTT_PAGE_SIZE;
1090 offset += src_stride - width;
1092 left = (dst_stride - width) * I915_GTT_PAGE_SIZE;
1097 sg = add_padding_pages(left >> PAGE_SHIFT, st, sg);
1100 *gtt_offset += alignment_pad + dst_stride * height;
1105 static struct scatterlist *
1106 remap_contiguous_pages(struct drm_i915_gem_object *obj,
1109 struct sg_table *st, struct scatterlist *sg)
1111 struct scatterlist *iter;
1112 unsigned int offset;
1114 iter = i915_gem_object_get_sg_dma(obj, obj_offset, &offset);
1120 len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT),
1121 count << PAGE_SHIFT);
1122 sg_set_page(sg, NULL, len, 0);
1123 sg_dma_address(sg) =
1124 sg_dma_address(iter) + (offset << PAGE_SHIFT);
1125 sg_dma_len(sg) = len;
1128 count -= len >> PAGE_SHIFT;
1133 iter = __sg_next(iter);
1138 static struct scatterlist *
1139 remap_linear_color_plane_pages(struct drm_i915_gem_object *obj,
1140 pgoff_t obj_offset, unsigned int alignment_pad,
1142 struct sg_table *st, struct scatterlist *sg,
1143 unsigned int *gtt_offset)
1149 sg = add_padding_pages(alignment_pad, st, sg);
1151 sg = remap_contiguous_pages(obj, obj_offset, size, st, sg);
1154 *gtt_offset += alignment_pad + size;
1159 static struct scatterlist *
1160 remap_color_plane_pages(const struct intel_remapped_info *rem_info,
1161 struct drm_i915_gem_object *obj,
1163 struct sg_table *st, struct scatterlist *sg,
1164 unsigned int *gtt_offset)
1166 unsigned int alignment_pad = 0;
1168 if (rem_info->plane_alignment)
1169 alignment_pad = ALIGN(*gtt_offset, rem_info->plane_alignment) - *gtt_offset;
1171 if (rem_info->plane[color_plane].linear)
1172 sg = remap_linear_color_plane_pages(obj,
1173 rem_info->plane[color_plane].offset,
1175 rem_info->plane[color_plane].size,
1180 sg = remap_tiled_color_plane_pages(obj,
1181 rem_info->plane[color_plane].offset,
1183 rem_info->plane[color_plane].width,
1184 rem_info->plane[color_plane].height,
1185 rem_info->plane[color_plane].src_stride,
1186 rem_info->plane[color_plane].dst_stride,
1193 static noinline struct sg_table *
1194 intel_remap_pages(struct intel_remapped_info *rem_info,
1195 struct drm_i915_gem_object *obj)
1197 unsigned int size = intel_remapped_info_size(rem_info);
1198 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1199 struct sg_table *st;
1200 struct scatterlist *sg;
1201 unsigned int gtt_offset = 0;
1205 /* Allocate target SG list. */
1206 st = kmalloc(sizeof(*st), GFP_KERNEL);
1210 ret = sg_alloc_table(st, size, GFP_KERNEL);
1217 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
1218 sg = remap_color_plane_pages(rem_info, obj, i, st, sg, >t_offset);
1228 drm_dbg(&i915->drm, "Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n",
1229 obj->base.size, rem_info->plane[0].width,
1230 rem_info->plane[0].height, size);
1232 return ERR_PTR(ret);
1235 static noinline struct sg_table *
1236 intel_partial_pages(const struct i915_gtt_view *view,
1237 struct drm_i915_gem_object *obj)
1239 struct sg_table *st;
1240 struct scatterlist *sg;
1241 unsigned int count = view->partial.size;
1244 st = kmalloc(sizeof(*st), GFP_KERNEL);
1248 ret = sg_alloc_table(st, count, GFP_KERNEL);
1254 sg = remap_contiguous_pages(obj, view->partial.offset, count, st, st->sgl);
1257 i915_sg_trim(st); /* Drop any unused tail entries. */
1264 return ERR_PTR(ret);
1268 __i915_vma_get_pages(struct i915_vma *vma)
1270 struct sg_table *pages;
1273 * The vma->pages are only valid within the lifespan of the borrowed
1274 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
1275 * must be the vma->pages. A simple rule is that vma->pages must only
1276 * be accessed when the obj->mm.pages are pinned.
1278 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
1280 switch (vma->gtt_view.type) {
1282 GEM_BUG_ON(vma->gtt_view.type);
1284 case I915_GTT_VIEW_NORMAL:
1285 pages = vma->obj->mm.pages;
1288 case I915_GTT_VIEW_ROTATED:
1290 intel_rotate_pages(&vma->gtt_view.rotated, vma->obj);
1293 case I915_GTT_VIEW_REMAPPED:
1295 intel_remap_pages(&vma->gtt_view.remapped, vma->obj);
1298 case I915_GTT_VIEW_PARTIAL:
1299 pages = intel_partial_pages(&vma->gtt_view, vma->obj);
1303 if (IS_ERR(pages)) {
1304 drm_err(&vma->vm->i915->drm,
1305 "Failed to get pages for VMA view type %u (%ld)!\n",
1306 vma->gtt_view.type, PTR_ERR(pages));
1307 return PTR_ERR(pages);
1315 I915_SELFTEST_EXPORT int i915_vma_get_pages(struct i915_vma *vma)
1319 if (atomic_add_unless(&vma->pages_count, 1, 0))
1322 err = i915_gem_object_pin_pages(vma->obj);
1326 err = __i915_vma_get_pages(vma);
1330 vma->page_sizes = vma->obj->mm.page_sizes;
1331 atomic_inc(&vma->pages_count);
1336 __i915_gem_object_unpin_pages(vma->obj);
1341 void vma_invalidate_tlb(struct i915_address_space *vm, u32 *tlb)
1343 struct intel_gt *gt;
1350 * Before we release the pages that were bound by this vma, we
1351 * must invalidate all the TLBs that may still have a reference
1352 * back to our physical address. It only needs to be done once,
1353 * so after updating the PTE to point away from the pages, record
1354 * the most recent TLB invalidation seqno, and if we have not yet
1355 * flushed the TLBs upon release, perform a full invalidation.
1357 for_each_gt(gt, vm->i915, id)
1359 intel_gt_next_invalidate_tlb_full(gt));
1362 static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
1364 /* We allocate under vma_get_pages, so beware the shrinker */
1365 GEM_BUG_ON(atomic_read(&vma->pages_count) < count);
1367 if (atomic_sub_return(count, &vma->pages_count) == 0) {
1368 if (vma->pages != vma->obj->mm.pages) {
1369 sg_free_table(vma->pages);
1374 i915_gem_object_unpin_pages(vma->obj);
1378 I915_SELFTEST_EXPORT void i915_vma_put_pages(struct i915_vma *vma)
1380 if (atomic_add_unless(&vma->pages_count, -1, 1))
1383 __vma_put_pages(vma, 1);
1386 static void vma_unbind_pages(struct i915_vma *vma)
1390 lockdep_assert_held(&vma->vm->mutex);
1392 /* The upper portion of pages_count is the number of bindings */
1393 count = atomic_read(&vma->pages_count);
1394 count >>= I915_VMA_PAGES_BIAS;
1397 __vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS);
1400 int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
1401 u64 size, u64 alignment, u64 flags)
1403 struct i915_vma_work *work = NULL;
1404 struct dma_fence *moving = NULL;
1405 struct i915_vma_resource *vma_res = NULL;
1406 intel_wakeref_t wakeref = 0;
1410 assert_vma_held(vma);
1413 BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
1414 BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
1416 GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL)));
1418 /* First try and grab the pin without rebinding the vma */
1419 if (try_qad_pin(vma, flags))
1422 err = i915_vma_get_pages(vma);
1426 if (flags & PIN_GLOBAL)
1427 wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
1429 if (flags & vma->vm->bind_async_flags) {
1431 err = i915_vm_lock_objects(vma->vm, ww);
1435 work = i915_vma_work();
1443 err = i915_gem_object_get_moving_fence(vma->obj, &moving);
1447 dma_fence_work_chain(&work->base, moving);
1449 /* Allocate enough page directories to used PTE */
1450 if (vma->vm->allocate_va_range) {
1451 err = i915_vm_alloc_pt_stash(vma->vm,
1457 err = i915_vm_map_pt_stash(vma->vm, &work->stash);
1463 vma_res = i915_vma_resource_alloc();
1464 if (IS_ERR(vma_res)) {
1465 err = PTR_ERR(vma_res);
1470 * Differentiate between user/kernel vma inside the aliasing-ppgtt.
1472 * We conflate the Global GTT with the user's vma when using the
1473 * aliasing-ppgtt, but it is still vitally important to try and
1474 * keep the use cases distinct. For example, userptr objects are
1475 * not allowed inside the Global GTT as that will cause lock
1476 * inversions when we have to evict them the mmu_notifier callbacks -
1477 * but they are allowed to be part of the user ppGTT which can never
1478 * be mapped. As such we try to give the distinct users of the same
1479 * mutex, distinct lockclasses [equivalent to how we keep i915_ggtt
1480 * and i915_ppgtt separate].
1482 * NB this may cause us to mask real lock inversions -- while the
1483 * code is safe today, lockdep may not be able to spot future
1486 err = mutex_lock_interruptible_nested(&vma->vm->mutex,
1487 !(flags & PIN_GLOBAL));
1491 /* No more allocations allowed now we hold vm->mutex */
1493 if (unlikely(i915_vma_is_closed(vma))) {
1498 bound = atomic_read(&vma->flags);
1499 if (unlikely(bound & I915_VMA_ERROR)) {
1504 if (unlikely(!((bound + 1) & I915_VMA_PIN_MASK))) {
1505 err = -EAGAIN; /* pins are meant to be fairly temporary */
1509 if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) {
1510 if (!(flags & PIN_VALIDATE))
1511 __i915_vma_pin(vma);
1515 err = i915_active_acquire(&vma->active);
1519 if (!(bound & I915_VMA_BIND_MASK)) {
1520 err = i915_vma_insert(vma, ww, size, alignment, flags);
1524 if (i915_is_ggtt(vma->vm))
1525 __i915_vma_set_map_and_fenceable(vma);
1528 GEM_BUG_ON(!vma->pages);
1529 err = i915_vma_bind(vma,
1530 vma->obj->pat_index,
1531 flags, work, vma_res);
1536 /* There should only be at most 2 active bindings (user, global) */
1537 GEM_BUG_ON(bound + I915_VMA_PAGES_ACTIVE < bound);
1538 atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count);
1539 list_move_tail(&vma->vm_link, &vma->vm->bound_list);
1541 if (!(flags & PIN_VALIDATE)) {
1542 __i915_vma_pin(vma);
1543 GEM_BUG_ON(!i915_vma_is_pinned(vma));
1545 GEM_BUG_ON(!i915_vma_is_bound(vma, flags));
1546 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
1549 if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) {
1550 i915_vma_detach(vma);
1551 drm_mm_remove_node(&vma->node);
1554 i915_active_release(&vma->active);
1556 mutex_unlock(&vma->vm->mutex);
1558 i915_vma_resource_free(vma_res);
1561 dma_fence_work_commit_imm(&work->base);
1564 intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
1567 dma_fence_put(moving);
1569 i915_vma_put_pages(vma);
1573 static void flush_idle_contexts(struct intel_gt *gt)
1575 struct intel_engine_cs *engine;
1576 enum intel_engine_id id;
1578 for_each_engine(engine, gt, id)
1579 intel_engine_flush_barriers(engine);
1581 intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
1584 static int __i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
1585 u32 align, unsigned int flags)
1587 struct i915_address_space *vm = vma->vm;
1588 struct intel_gt *gt;
1589 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
1593 err = i915_vma_pin_ww(vma, ww, 0, align, flags | PIN_GLOBAL);
1595 if (err != -ENOSPC) {
1597 err = i915_vma_wait_for_bind(vma);
1599 i915_vma_unpin(vma);
1604 /* Unlike i915_vma_pin, we don't take no for an answer! */
1605 list_for_each_entry(gt, &ggtt->gt_list, ggtt_link)
1606 flush_idle_contexts(gt);
1607 if (mutex_lock_interruptible(&vm->mutex) == 0) {
1609 * We pass NULL ww here, as we don't want to unbind
1610 * locked objects when called from execbuf when pinning
1611 * is removed. This would probably regress badly.
1613 i915_gem_evict_vm(vm, NULL, NULL);
1614 mutex_unlock(&vm->mutex);
1619 int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
1620 u32 align, unsigned int flags)
1622 struct i915_gem_ww_ctx _ww;
1625 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
1628 return __i915_ggtt_pin(vma, ww, align, flags);
1630 lockdep_assert_not_held(&vma->obj->base.resv->lock.base);
1632 for_i915_gem_ww(&_ww, err, true) {
1633 err = i915_gem_object_lock(vma->obj, &_ww);
1635 err = __i915_ggtt_pin(vma, &_ww, align, flags);
1642 * i915_ggtt_clear_scanout - Clear scanout flag for all objects ggtt vmas
1643 * @obj: i915 GEM object
1644 * This function clears scanout flags for objects ggtt vmas. These flags are set
1645 * when object is pinned for display use and this function to clear them all is
1646 * targeted to be called by frontbuffer tracking code when the frontbuffer is
1647 * about to be released.
1649 void i915_ggtt_clear_scanout(struct drm_i915_gem_object *obj)
1651 struct i915_vma *vma;
1653 spin_lock(&obj->vma.lock);
1654 for_each_ggtt_vma(vma, obj) {
1655 i915_vma_clear_scanout(vma);
1656 vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
1658 spin_unlock(&obj->vma.lock);
1661 static void __vma_close(struct i915_vma *vma, struct intel_gt *gt)
1664 * We defer actually closing, unbinding and destroying the VMA until
1665 * the next idle point, or if the object is freed in the meantime. By
1666 * postponing the unbind, we allow for it to be resurrected by the
1667 * client, avoiding the work required to rebind the VMA. This is
1668 * advantageous for DRI, where the client/server pass objects
1669 * between themselves, temporarily opening a local VMA to the
1670 * object, and then closing it again. The same object is then reused
1671 * on the next frame (or two, depending on the depth of the swap queue)
1672 * causing us to rebind the VMA once more. This ends up being a lot
1673 * of wasted work for the steady state.
1675 GEM_BUG_ON(i915_vma_is_closed(vma));
1676 list_add(&vma->closed_link, >->closed_vma);
1679 void i915_vma_close(struct i915_vma *vma)
1681 struct intel_gt *gt = vma->vm->gt;
1682 unsigned long flags;
1684 if (i915_vma_is_ggtt(vma))
1687 GEM_BUG_ON(!atomic_read(&vma->open_count));
1688 if (atomic_dec_and_lock_irqsave(&vma->open_count,
1691 __vma_close(vma, gt);
1692 spin_unlock_irqrestore(>->closed_lock, flags);
1696 static void __i915_vma_remove_closed(struct i915_vma *vma)
1698 list_del_init(&vma->closed_link);
1701 void i915_vma_reopen(struct i915_vma *vma)
1703 struct intel_gt *gt = vma->vm->gt;
1705 spin_lock_irq(>->closed_lock);
1706 if (i915_vma_is_closed(vma))
1707 __i915_vma_remove_closed(vma);
1708 spin_unlock_irq(>->closed_lock);
1711 static void force_unbind(struct i915_vma *vma)
1713 if (!drm_mm_node_allocated(&vma->node))
1716 atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
1717 WARN_ON(__i915_vma_unbind(vma));
1718 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
1721 static void release_references(struct i915_vma *vma, struct intel_gt *gt,
1724 struct drm_i915_gem_object *obj = vma->obj;
1726 GEM_BUG_ON(i915_vma_is_active(vma));
1728 spin_lock(&obj->vma.lock);
1729 list_del(&vma->obj_link);
1730 if (!RB_EMPTY_NODE(&vma->obj_node))
1731 rb_erase(&vma->obj_node, &obj->vma.tree);
1733 spin_unlock(&obj->vma.lock);
1735 spin_lock_irq(>->closed_lock);
1736 __i915_vma_remove_closed(vma);
1737 spin_unlock_irq(>->closed_lock);
1740 i915_vm_resv_put(vma->vm);
1742 /* Wait for async active retire */
1743 i915_active_wait(&vma->active);
1744 i915_active_fini(&vma->active);
1745 GEM_WARN_ON(vma->resource);
1750 * i915_vma_destroy_locked - Remove all weak reference to the vma and put
1751 * the initial reference.
1753 * This function should be called when it's decided the vma isn't needed
1754 * anymore. The caller must assure that it doesn't race with another lookup
1755 * plus destroy, typically by taking an appropriate reference.
1757 * Current callsites are
1758 * - __i915_gem_object_pages_fini()
1759 * - __i915_vm_close() - Blocks the above function by taking a reference on
1761 * - __i915_vma_parked() - Blocks the above functions by taking a reference
1762 * on the vm and a reference on the object. Also takes the object lock so
1763 * destruction from __i915_vma_parked() can be blocked by holding the
1764 * object lock. Since the object lock is only allowed from within i915 with
1765 * an object refcount, holding the object lock also implicitly blocks the
1766 * vma freeing from __i915_gem_object_pages_fini().
1768 * Because of locks taken during destruction, a vma is also guaranteed to
1769 * stay alive while the following locks are held if it was looked up while
1770 * holding one of the locks:
1775 void i915_vma_destroy_locked(struct i915_vma *vma)
1777 lockdep_assert_held(&vma->vm->mutex);
1780 list_del_init(&vma->vm_link);
1781 release_references(vma, vma->vm->gt, false);
1784 void i915_vma_destroy(struct i915_vma *vma)
1786 struct intel_gt *gt;
1789 mutex_lock(&vma->vm->mutex);
1791 list_del_init(&vma->vm_link);
1792 vm_ddestroy = vma->vm_ddestroy;
1793 vma->vm_ddestroy = false;
1795 /* vma->vm may be freed when releasing vma->vm->mutex. */
1797 mutex_unlock(&vma->vm->mutex);
1798 release_references(vma, gt, vm_ddestroy);
1801 void i915_vma_parked(struct intel_gt *gt)
1803 struct i915_vma *vma, *next;
1806 spin_lock_irq(>->closed_lock);
1807 list_for_each_entry_safe(vma, next, >->closed_vma, closed_link) {
1808 struct drm_i915_gem_object *obj = vma->obj;
1809 struct i915_address_space *vm = vma->vm;
1811 /* XXX All to avoid keeping a reference on i915_vma itself */
1813 if (!kref_get_unless_zero(&obj->base.refcount))
1816 if (!i915_vm_tryget(vm)) {
1817 i915_gem_object_put(obj);
1821 list_move(&vma->closed_link, &closed);
1823 spin_unlock_irq(>->closed_lock);
1825 /* As the GT is held idle, no vma can be reopened as we destroy them */
1826 list_for_each_entry_safe(vma, next, &closed, closed_link) {
1827 struct drm_i915_gem_object *obj = vma->obj;
1828 struct i915_address_space *vm = vma->vm;
1830 if (i915_gem_object_trylock(obj, NULL)) {
1831 INIT_LIST_HEAD(&vma->closed_link);
1832 i915_vma_destroy(vma);
1833 i915_gem_object_unlock(obj);
1836 spin_lock_irq(>->closed_lock);
1837 list_add(&vma->closed_link, >->closed_vma);
1838 spin_unlock_irq(>->closed_lock);
1841 i915_gem_object_put(obj);
1846 static void __i915_vma_iounmap(struct i915_vma *vma)
1848 GEM_BUG_ON(i915_vma_is_pinned(vma));
1850 if (vma->iomap == NULL)
1853 if (page_unmask_bits(vma->iomap))
1854 __i915_gem_object_release_map(vma->obj);
1856 io_mapping_unmap(vma->iomap);
1860 void i915_vma_revoke_mmap(struct i915_vma *vma)
1862 struct drm_vma_offset_node *node;
1865 if (!i915_vma_has_userfault(vma))
1868 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
1869 GEM_BUG_ON(!vma->obj->userfault_count);
1871 node = &vma->mmo->vma_node;
1872 vma_offset = vma->gtt_view.partial.offset << PAGE_SHIFT;
1873 unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
1874 drm_vma_node_offset_addr(node) + vma_offset,
1878 i915_vma_unset_userfault(vma);
1879 if (!--vma->obj->userfault_count)
1880 list_del(&vma->obj->userfault_link);
1884 __i915_request_await_bind(struct i915_request *rq, struct i915_vma *vma)
1886 return __i915_request_await_exclusive(rq, &vma->active);
1889 static int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
1893 /* Wait for the vma to be bound before we start! */
1894 err = __i915_request_await_bind(rq, vma);
1898 return i915_active_add_request(&vma->active, rq);
1901 int _i915_vma_move_to_active(struct i915_vma *vma,
1902 struct i915_request *rq,
1903 struct dma_fence *fence,
1906 struct drm_i915_gem_object *obj = vma->obj;
1909 assert_object_held(obj);
1911 GEM_BUG_ON(!vma->pages);
1913 if (!(flags & __EXEC_OBJECT_NO_REQUEST_AWAIT)) {
1914 err = i915_request_await_object(rq, vma->obj, flags & EXEC_OBJECT_WRITE);
1918 err = __i915_vma_move_to_active(vma, rq);
1923 * Reserve fences slot early to prevent an allocation after preparing
1924 * the workload and associating fences with dma_resv.
1926 if (fence && !(flags & __EXEC_OBJECT_NO_RESERVE)) {
1927 struct dma_fence *curr;
1930 dma_fence_array_for_each(curr, idx, fence)
1932 err = dma_resv_reserve_fences(vma->obj->base.resv, idx);
1937 if (flags & EXEC_OBJECT_WRITE) {
1938 struct intel_frontbuffer *front;
1940 front = i915_gem_object_get_frontbuffer(obj);
1941 if (unlikely(front)) {
1942 if (intel_frontbuffer_invalidate(front, ORIGIN_CS))
1943 i915_active_add_request(&front->write, rq);
1944 intel_frontbuffer_put(front);
1949 struct dma_fence *curr;
1950 enum dma_resv_usage usage;
1953 if (flags & EXEC_OBJECT_WRITE) {
1954 usage = DMA_RESV_USAGE_WRITE;
1955 obj->write_domain = I915_GEM_DOMAIN_RENDER;
1956 obj->read_domains = 0;
1958 usage = DMA_RESV_USAGE_READ;
1959 obj->write_domain = 0;
1962 dma_fence_array_for_each(curr, idx, fence)
1963 dma_resv_add_fence(vma->obj->base.resv, curr, usage);
1966 if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence)
1967 i915_active_add_request(&vma->fence->active, rq);
1969 obj->read_domains |= I915_GEM_GPU_DOMAINS;
1970 obj->mm.dirty = true;
1972 GEM_BUG_ON(!i915_vma_is_active(vma));
1976 struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
1978 struct i915_vma_resource *vma_res = vma->resource;
1979 struct dma_fence *unbind_fence;
1981 GEM_BUG_ON(i915_vma_is_pinned(vma));
1982 assert_vma_held_evict(vma);
1984 if (i915_vma_is_map_and_fenceable(vma)) {
1985 /* Force a pagefault for domain tracking on next user access */
1986 i915_vma_revoke_mmap(vma);
1989 * Check that we have flushed all writes through the GGTT
1990 * before the unbind, other due to non-strict nature of those
1991 * indirect writes they may end up referencing the GGTT PTE
1994 * Note that we may be concurrently poking at the GGTT_WRITE
1995 * bit from set-domain, as we mark all GGTT vma associated
1996 * with an object. We know this is for another vma, as we
1997 * are currently unbinding this one -- so if this vma will be
1998 * reused, it will be refaulted and have its dirty bit set
1999 * before the next write.
2001 i915_vma_flush_writes(vma);
2003 /* release the fence reg _after_ flushing */
2004 i915_vma_revoke_fence(vma);
2006 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
2009 __i915_vma_iounmap(vma);
2011 GEM_BUG_ON(vma->fence);
2012 GEM_BUG_ON(i915_vma_has_userfault(vma));
2014 /* Object backend must be async capable. */
2015 GEM_WARN_ON(async && !vma->resource->bi.pages_rsgt);
2017 /* If vm is not open, unbind is a nop. */
2018 vma_res->needs_wakeref = i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND) &&
2019 kref_read(&vma->vm->ref);
2020 vma_res->skip_pte_rewrite = !kref_read(&vma->vm->ref) ||
2021 vma->vm->skip_pte_rewrite;
2022 trace_i915_vma_unbind(vma);
2025 unbind_fence = i915_vma_resource_unbind(vma_res,
2028 unbind_fence = i915_vma_resource_unbind(vma_res, NULL);
2030 vma->resource = NULL;
2032 atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE),
2035 i915_vma_detach(vma);
2039 dma_fence_wait(unbind_fence, false);
2040 dma_fence_put(unbind_fence);
2041 unbind_fence = NULL;
2043 vma_invalidate_tlb(vma->vm, vma->obj->mm.tlb);
2047 * Binding itself may not have completed until the unbind fence signals,
2048 * so don't drop the pages until that happens, unless the resource is
2052 vma_unbind_pages(vma);
2053 return unbind_fence;
2056 int __i915_vma_unbind(struct i915_vma *vma)
2060 lockdep_assert_held(&vma->vm->mutex);
2061 assert_vma_held_evict(vma);
2063 if (!drm_mm_node_allocated(&vma->node))
2066 if (i915_vma_is_pinned(vma)) {
2067 vma_print_allocator(vma, "is pinned");
2072 * After confirming that no one else is pinning this vma, wait for
2073 * any laggards who may have crept in during the wait (through
2074 * a residual pin skipping the vm->mutex) to complete.
2076 ret = i915_vma_sync(vma);
2080 GEM_BUG_ON(i915_vma_is_active(vma));
2081 __i915_vma_evict(vma, false);
2083 drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
2087 static struct dma_fence *__i915_vma_unbind_async(struct i915_vma *vma)
2089 struct dma_fence *fence;
2091 lockdep_assert_held(&vma->vm->mutex);
2093 if (!drm_mm_node_allocated(&vma->node))
2096 if (i915_vma_is_pinned(vma) ||
2097 &vma->obj->mm.rsgt->table != vma->resource->bi.pages)
2098 return ERR_PTR(-EAGAIN);
2101 * We probably need to replace this with awaiting the fences of the
2102 * object's dma_resv when the vma active goes away. When doing that
2103 * we need to be careful to not add the vma_resource unbind fence
2104 * immediately to the object's dma_resv, because then unbinding
2105 * the next vma from the object, in case there are many, will
2106 * actually await the unbinding of the previous vmas, which is
2109 if (i915_sw_fence_await_active(&vma->resource->chain, &vma->active,
2110 I915_ACTIVE_AWAIT_EXCL |
2111 I915_ACTIVE_AWAIT_ACTIVE) < 0) {
2112 return ERR_PTR(-EBUSY);
2115 fence = __i915_vma_evict(vma, true);
2117 drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
2122 int i915_vma_unbind(struct i915_vma *vma)
2124 struct i915_address_space *vm = vma->vm;
2125 intel_wakeref_t wakeref = 0;
2128 assert_object_held_shared(vma->obj);
2130 /* Optimistic wait before taking the mutex */
2131 err = i915_vma_sync(vma);
2135 if (!drm_mm_node_allocated(&vma->node))
2138 if (i915_vma_is_pinned(vma)) {
2139 vma_print_allocator(vma, "is pinned");
2143 if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
2144 /* XXX not always required: nop_clear_range */
2145 wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
2147 err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref);
2151 err = __i915_vma_unbind(vma);
2152 mutex_unlock(&vm->mutex);
2156 intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
2160 int i915_vma_unbind_async(struct i915_vma *vma, bool trylock_vm)
2162 struct drm_i915_gem_object *obj = vma->obj;
2163 struct i915_address_space *vm = vma->vm;
2164 intel_wakeref_t wakeref = 0;
2165 struct dma_fence *fence;
2169 * We need the dma-resv lock since we add the
2170 * unbind fence to the dma-resv object.
2172 assert_object_held(obj);
2174 if (!drm_mm_node_allocated(&vma->node))
2177 if (i915_vma_is_pinned(vma)) {
2178 vma_print_allocator(vma, "is pinned");
2185 err = dma_resv_reserve_fences(obj->base.resv, 2);
2190 * It would be great if we could grab this wakeref from the
2191 * async unbind work if needed, but we can't because it uses
2192 * kmalloc and it's in the dma-fence signalling critical path.
2194 if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
2195 wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
2197 if (trylock_vm && !mutex_trylock(&vm->mutex)) {
2200 } else if (!trylock_vm) {
2201 err = mutex_lock_interruptible_nested(&vm->mutex, !wakeref);
2206 fence = __i915_vma_unbind_async(vma);
2207 mutex_unlock(&vm->mutex);
2208 if (IS_ERR_OR_NULL(fence)) {
2209 err = PTR_ERR_OR_ZERO(fence);
2213 dma_resv_add_fence(obj->base.resv, fence, DMA_RESV_USAGE_READ);
2214 dma_fence_put(fence);
2218 intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
2222 int i915_vma_unbind_unlocked(struct i915_vma *vma)
2226 i915_gem_object_lock(vma->obj, NULL);
2227 err = i915_vma_unbind(vma);
2228 i915_gem_object_unlock(vma->obj);
2233 struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma)
2235 i915_gem_object_make_unshrinkable(vma->obj);
2239 void i915_vma_make_shrinkable(struct i915_vma *vma)
2241 i915_gem_object_make_shrinkable(vma->obj);
2244 void i915_vma_make_purgeable(struct i915_vma *vma)
2246 i915_gem_object_make_purgeable(vma->obj);
2249 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2250 #include "selftests/i915_vma.c"
2253 void i915_vma_module_exit(void)
2255 kmem_cache_destroy(slab_vmas);
2258 int __init i915_vma_module_init(void)
2260 slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);