Merge tag 'drm-intel-gt-next-2023-04-06' of git://anongit.freedesktop.org/drm/drm...
[linux-block.git] / drivers / gpu / drm / i915 / i915_vma.c
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24
25 #include <linux/sched/mm.h>
26 #include <linux/dma-fence-array.h>
27 #include <drm/drm_gem.h>
28
29 #include "display/intel_display.h"
30 #include "display/intel_frontbuffer.h"
31 #include "gem/i915_gem_lmem.h"
32 #include "gem/i915_gem_tiling.h"
33 #include "gt/intel_engine.h"
34 #include "gt/intel_engine_heartbeat.h"
35 #include "gt/intel_gt.h"
36 #include "gt/intel_gt_requests.h"
37
38 #include "i915_drv.h"
39 #include "i915_gem_evict.h"
40 #include "i915_sw_fence_work.h"
41 #include "i915_trace.h"
42 #include "i915_vma.h"
43 #include "i915_vma_resource.h"
44
45 static inline void assert_vma_held_evict(const struct i915_vma *vma)
46 {
47         /*
48          * We may be forced to unbind when the vm is dead, to clean it up.
49          * This is the only exception to the requirement of the object lock
50          * being held.
51          */
52         if (kref_read(&vma->vm->ref))
53                 assert_object_held_shared(vma->obj);
54 }
55
56 static struct kmem_cache *slab_vmas;
57
58 static struct i915_vma *i915_vma_alloc(void)
59 {
60         return kmem_cache_zalloc(slab_vmas, GFP_KERNEL);
61 }
62
63 static void i915_vma_free(struct i915_vma *vma)
64 {
65         return kmem_cache_free(slab_vmas, vma);
66 }
67
68 #if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
69
70 #include <linux/stackdepot.h>
71
72 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
73 {
74         char buf[512];
75
76         if (!vma->node.stack) {
77                 drm_dbg(&to_i915(vma->obj->base.dev)->drm,
78                         "vma.node [%08llx + %08llx] %s: unknown owner\n",
79                         vma->node.start, vma->node.size, reason);
80                 return;
81         }
82
83         stack_depot_snprint(vma->node.stack, buf, sizeof(buf), 0);
84         drm_dbg(&to_i915(vma->obj->base.dev)->drm,
85                 "vma.node [%08llx + %08llx] %s: inserted at %s\n",
86                 vma->node.start, vma->node.size, reason, buf);
87 }
88
89 #else
90
91 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
92 {
93 }
94
95 #endif
96
97 static inline struct i915_vma *active_to_vma(struct i915_active *ref)
98 {
99         return container_of(ref, typeof(struct i915_vma), active);
100 }
101
102 static int __i915_vma_active(struct i915_active *ref)
103 {
104         return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
105 }
106
107 static void __i915_vma_retire(struct i915_active *ref)
108 {
109         i915_vma_put(active_to_vma(ref));
110 }
111
112 static struct i915_vma *
113 vma_create(struct drm_i915_gem_object *obj,
114            struct i915_address_space *vm,
115            const struct i915_gtt_view *view)
116 {
117         struct i915_vma *pos = ERR_PTR(-E2BIG);
118         struct i915_vma *vma;
119         struct rb_node *rb, **p;
120         int err;
121
122         /* The aliasing_ppgtt should never be used directly! */
123         GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm);
124
125         vma = i915_vma_alloc();
126         if (vma == NULL)
127                 return ERR_PTR(-ENOMEM);
128
129         vma->ops = &vm->vma_ops;
130         vma->obj = obj;
131         vma->size = obj->base.size;
132         vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
133
134         i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire, 0);
135
136         /* Declare ourselves safe for use inside shrinkers */
137         if (IS_ENABLED(CONFIG_LOCKDEP)) {
138                 fs_reclaim_acquire(GFP_KERNEL);
139                 might_lock(&vma->active.mutex);
140                 fs_reclaim_release(GFP_KERNEL);
141         }
142
143         INIT_LIST_HEAD(&vma->closed_link);
144         INIT_LIST_HEAD(&vma->obj_link);
145         RB_CLEAR_NODE(&vma->obj_node);
146
147         if (view && view->type != I915_GTT_VIEW_NORMAL) {
148                 vma->gtt_view = *view;
149                 if (view->type == I915_GTT_VIEW_PARTIAL) {
150                         GEM_BUG_ON(range_overflows_t(u64,
151                                                      view->partial.offset,
152                                                      view->partial.size,
153                                                      obj->base.size >> PAGE_SHIFT));
154                         vma->size = view->partial.size;
155                         vma->size <<= PAGE_SHIFT;
156                         GEM_BUG_ON(vma->size > obj->base.size);
157                 } else if (view->type == I915_GTT_VIEW_ROTATED) {
158                         vma->size = intel_rotation_info_size(&view->rotated);
159                         vma->size <<= PAGE_SHIFT;
160                 } else if (view->type == I915_GTT_VIEW_REMAPPED) {
161                         vma->size = intel_remapped_info_size(&view->remapped);
162                         vma->size <<= PAGE_SHIFT;
163                 }
164         }
165
166         if (unlikely(vma->size > vm->total))
167                 goto err_vma;
168
169         GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
170
171         err = mutex_lock_interruptible(&vm->mutex);
172         if (err) {
173                 pos = ERR_PTR(err);
174                 goto err_vma;
175         }
176
177         vma->vm = vm;
178         list_add_tail(&vma->vm_link, &vm->unbound_list);
179
180         spin_lock(&obj->vma.lock);
181         if (i915_is_ggtt(vm)) {
182                 if (unlikely(overflows_type(vma->size, u32)))
183                         goto err_unlock;
184
185                 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
186                                                       i915_gem_object_get_tiling(obj),
187                                                       i915_gem_object_get_stride(obj));
188                 if (unlikely(vma->fence_size < vma->size || /* overflow */
189                              vma->fence_size > vm->total))
190                         goto err_unlock;
191
192                 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
193
194                 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
195                                                                 i915_gem_object_get_tiling(obj),
196                                                                 i915_gem_object_get_stride(obj));
197                 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
198
199                 __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
200         }
201
202         rb = NULL;
203         p = &obj->vma.tree.rb_node;
204         while (*p) {
205                 long cmp;
206
207                 rb = *p;
208                 pos = rb_entry(rb, struct i915_vma, obj_node);
209
210                 /*
211                  * If the view already exists in the tree, another thread
212                  * already created a matching vma, so return the older instance
213                  * and dispose of ours.
214                  */
215                 cmp = i915_vma_compare(pos, vm, view);
216                 if (cmp < 0)
217                         p = &rb->rb_right;
218                 else if (cmp > 0)
219                         p = &rb->rb_left;
220                 else
221                         goto err_unlock;
222         }
223         rb_link_node(&vma->obj_node, rb, p);
224         rb_insert_color(&vma->obj_node, &obj->vma.tree);
225
226         if (i915_vma_is_ggtt(vma))
227                 /*
228                  * We put the GGTT vma at the start of the vma-list, followed
229                  * by the ppGGTT vma. This allows us to break early when
230                  * iterating over only the GGTT vma for an object, see
231                  * for_each_ggtt_vma()
232                  */
233                 list_add(&vma->obj_link, &obj->vma.list);
234         else
235                 list_add_tail(&vma->obj_link, &obj->vma.list);
236
237         spin_unlock(&obj->vma.lock);
238         mutex_unlock(&vm->mutex);
239
240         return vma;
241
242 err_unlock:
243         spin_unlock(&obj->vma.lock);
244         list_del_init(&vma->vm_link);
245         mutex_unlock(&vm->mutex);
246 err_vma:
247         i915_vma_free(vma);
248         return pos;
249 }
250
251 static struct i915_vma *
252 i915_vma_lookup(struct drm_i915_gem_object *obj,
253            struct i915_address_space *vm,
254            const struct i915_gtt_view *view)
255 {
256         struct rb_node *rb;
257
258         rb = obj->vma.tree.rb_node;
259         while (rb) {
260                 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
261                 long cmp;
262
263                 cmp = i915_vma_compare(vma, vm, view);
264                 if (cmp == 0)
265                         return vma;
266
267                 if (cmp < 0)
268                         rb = rb->rb_right;
269                 else
270                         rb = rb->rb_left;
271         }
272
273         return NULL;
274 }
275
276 /**
277  * i915_vma_instance - return the singleton instance of the VMA
278  * @obj: parent &struct drm_i915_gem_object to be mapped
279  * @vm: address space in which the mapping is located
280  * @view: additional mapping requirements
281  *
282  * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
283  * the same @view characteristics. If a match is not found, one is created.
284  * Once created, the VMA is kept until either the object is freed, or the
285  * address space is closed.
286  *
287  * Returns the vma, or an error pointer.
288  */
289 struct i915_vma *
290 i915_vma_instance(struct drm_i915_gem_object *obj,
291                   struct i915_address_space *vm,
292                   const struct i915_gtt_view *view)
293 {
294         struct i915_vma *vma;
295
296         GEM_BUG_ON(view && !i915_is_ggtt_or_dpt(vm));
297         GEM_BUG_ON(!kref_read(&vm->ref));
298
299         spin_lock(&obj->vma.lock);
300         vma = i915_vma_lookup(obj, vm, view);
301         spin_unlock(&obj->vma.lock);
302
303         /* vma_create() will resolve the race if another creates the vma */
304         if (unlikely(!vma))
305                 vma = vma_create(obj, vm, view);
306
307         GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
308         return vma;
309 }
310
311 struct i915_vma_work {
312         struct dma_fence_work base;
313         struct i915_address_space *vm;
314         struct i915_vm_pt_stash stash;
315         struct i915_vma_resource *vma_res;
316         struct drm_i915_gem_object *obj;
317         struct i915_sw_dma_fence_cb cb;
318         enum i915_cache_level cache_level;
319         unsigned int flags;
320 };
321
322 static void __vma_bind(struct dma_fence_work *work)
323 {
324         struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
325         struct i915_vma_resource *vma_res = vw->vma_res;
326
327         /*
328          * We are about the bind the object, which must mean we have already
329          * signaled the work to potentially clear/move the pages underneath. If
330          * something went wrong at that stage then the object should have
331          * unknown_state set, in which case we need to skip the bind.
332          */
333         if (i915_gem_object_has_unknown_state(vw->obj))
334                 return;
335
336         vma_res->ops->bind_vma(vma_res->vm, &vw->stash,
337                                vma_res, vw->cache_level, vw->flags);
338 }
339
340 static void __vma_release(struct dma_fence_work *work)
341 {
342         struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
343
344         if (vw->obj)
345                 i915_gem_object_put(vw->obj);
346
347         i915_vm_free_pt_stash(vw->vm, &vw->stash);
348         if (vw->vma_res)
349                 i915_vma_resource_put(vw->vma_res);
350 }
351
352 static const struct dma_fence_work_ops bind_ops = {
353         .name = "bind",
354         .work = __vma_bind,
355         .release = __vma_release,
356 };
357
358 struct i915_vma_work *i915_vma_work(void)
359 {
360         struct i915_vma_work *vw;
361
362         vw = kzalloc(sizeof(*vw), GFP_KERNEL);
363         if (!vw)
364                 return NULL;
365
366         dma_fence_work_init(&vw->base, &bind_ops);
367         vw->base.dma.error = -EAGAIN; /* disable the worker by default */
368
369         return vw;
370 }
371
372 int i915_vma_wait_for_bind(struct i915_vma *vma)
373 {
374         int err = 0;
375
376         if (rcu_access_pointer(vma->active.excl.fence)) {
377                 struct dma_fence *fence;
378
379                 rcu_read_lock();
380                 fence = dma_fence_get_rcu_safe(&vma->active.excl.fence);
381                 rcu_read_unlock();
382                 if (fence) {
383                         err = dma_fence_wait(fence, true);
384                         dma_fence_put(fence);
385                 }
386         }
387
388         return err;
389 }
390
391 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
392 static int i915_vma_verify_bind_complete(struct i915_vma *vma)
393 {
394         struct dma_fence *fence = i915_active_fence_get(&vma->active.excl);
395         int err;
396
397         if (!fence)
398                 return 0;
399
400         if (dma_fence_is_signaled(fence))
401                 err = fence->error;
402         else
403                 err = -EBUSY;
404
405         dma_fence_put(fence);
406
407         return err;
408 }
409 #else
410 #define i915_vma_verify_bind_complete(_vma) 0
411 #endif
412
413 I915_SELFTEST_EXPORT void
414 i915_vma_resource_init_from_vma(struct i915_vma_resource *vma_res,
415                                 struct i915_vma *vma)
416 {
417         struct drm_i915_gem_object *obj = vma->obj;
418
419         i915_vma_resource_init(vma_res, vma->vm, vma->pages, &vma->page_sizes,
420                                obj->mm.rsgt, i915_gem_object_is_readonly(obj),
421                                i915_gem_object_is_lmem(obj), obj->mm.region,
422                                vma->ops, vma->private, __i915_vma_offset(vma),
423                                __i915_vma_size(vma), vma->size, vma->guard);
424 }
425
426 /**
427  * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
428  * @vma: VMA to map
429  * @cache_level: mapping cache level
430  * @flags: flags like global or local mapping
431  * @work: preallocated worker for allocating and binding the PTE
432  * @vma_res: pointer to a preallocated vma resource. The resource is either
433  * consumed or freed.
434  *
435  * DMA addresses are taken from the scatter-gather table of this object (or of
436  * this VMA in case of non-default GGTT views) and PTE entries set up.
437  * Note that DMA addresses are also the only part of the SG table we care about.
438  */
439 int i915_vma_bind(struct i915_vma *vma,
440                   enum i915_cache_level cache_level,
441                   u32 flags,
442                   struct i915_vma_work *work,
443                   struct i915_vma_resource *vma_res)
444 {
445         u32 bind_flags;
446         u32 vma_flags;
447         int ret;
448
449         lockdep_assert_held(&vma->vm->mutex);
450         GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
451         GEM_BUG_ON(vma->size > i915_vma_size(vma));
452
453         if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
454                                               vma->node.size,
455                                               vma->vm->total))) {
456                 i915_vma_resource_free(vma_res);
457                 return -ENODEV;
458         }
459
460         if (GEM_DEBUG_WARN_ON(!flags)) {
461                 i915_vma_resource_free(vma_res);
462                 return -EINVAL;
463         }
464
465         bind_flags = flags;
466         bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
467
468         vma_flags = atomic_read(&vma->flags);
469         vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
470
471         bind_flags &= ~vma_flags;
472         if (bind_flags == 0) {
473                 i915_vma_resource_free(vma_res);
474                 return 0;
475         }
476
477         GEM_BUG_ON(!atomic_read(&vma->pages_count));
478
479         /* Wait for or await async unbinds touching our range */
480         if (work && bind_flags & vma->vm->bind_async_flags)
481                 ret = i915_vma_resource_bind_dep_await(vma->vm,
482                                                        &work->base.chain,
483                                                        vma->node.start,
484                                                        vma->node.size,
485                                                        true,
486                                                        GFP_NOWAIT |
487                                                        __GFP_RETRY_MAYFAIL |
488                                                        __GFP_NOWARN);
489         else
490                 ret = i915_vma_resource_bind_dep_sync(vma->vm, vma->node.start,
491                                                       vma->node.size, true);
492         if (ret) {
493                 i915_vma_resource_free(vma_res);
494                 return ret;
495         }
496
497         if (vma->resource || !vma_res) {
498                 /* Rebinding with an additional I915_VMA_*_BIND */
499                 GEM_WARN_ON(!vma_flags);
500                 i915_vma_resource_free(vma_res);
501         } else {
502                 i915_vma_resource_init_from_vma(vma_res, vma);
503                 vma->resource = vma_res;
504         }
505         trace_i915_vma_bind(vma, bind_flags);
506         if (work && bind_flags & vma->vm->bind_async_flags) {
507                 struct dma_fence *prev;
508
509                 work->vma_res = i915_vma_resource_get(vma->resource);
510                 work->cache_level = cache_level;
511                 work->flags = bind_flags;
512
513                 /*
514                  * Note we only want to chain up to the migration fence on
515                  * the pages (not the object itself). As we don't track that,
516                  * yet, we have to use the exclusive fence instead.
517                  *
518                  * Also note that we do not want to track the async vma as
519                  * part of the obj->resv->excl_fence as it only affects
520                  * execution and not content or object's backing store lifetime.
521                  */
522                 prev = i915_active_set_exclusive(&vma->active, &work->base.dma);
523                 if (prev) {
524                         __i915_sw_fence_await_dma_fence(&work->base.chain,
525                                                         prev,
526                                                         &work->cb);
527                         dma_fence_put(prev);
528                 }
529
530                 work->base.dma.error = 0; /* enable the queue_work() */
531                 work->obj = i915_gem_object_get(vma->obj);
532         } else {
533                 ret = i915_gem_object_wait_moving_fence(vma->obj, true);
534                 if (ret) {
535                         i915_vma_resource_free(vma->resource);
536                         vma->resource = NULL;
537
538                         return ret;
539                 }
540                 vma->ops->bind_vma(vma->vm, NULL, vma->resource, cache_level,
541                                    bind_flags);
542         }
543
544         atomic_or(bind_flags, &vma->flags);
545         return 0;
546 }
547
548 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
549 {
550         void __iomem *ptr;
551         int err;
552
553         if (WARN_ON_ONCE(vma->obj->flags & I915_BO_ALLOC_GPU_ONLY))
554                 return IOMEM_ERR_PTR(-EINVAL);
555
556         GEM_BUG_ON(!i915_vma_is_ggtt(vma));
557         GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND));
558         GEM_BUG_ON(i915_vma_verify_bind_complete(vma));
559
560         ptr = READ_ONCE(vma->iomap);
561         if (ptr == NULL) {
562                 /*
563                  * TODO: consider just using i915_gem_object_pin_map() for lmem
564                  * instead, which already supports mapping non-contiguous chunks
565                  * of pages, that way we can also drop the
566                  * I915_BO_ALLOC_CONTIGUOUS when allocating the object.
567                  */
568                 if (i915_gem_object_is_lmem(vma->obj)) {
569                         ptr = i915_gem_object_lmem_io_map(vma->obj, 0,
570                                                           vma->obj->base.size);
571                 } else if (i915_vma_is_map_and_fenceable(vma)) {
572                         ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
573                                                 i915_vma_offset(vma),
574                                                 i915_vma_size(vma));
575                 } else {
576                         ptr = (void __iomem *)
577                                 i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
578                         if (IS_ERR(ptr)) {
579                                 err = PTR_ERR(ptr);
580                                 goto err;
581                         }
582                         ptr = page_pack_bits(ptr, 1);
583                 }
584
585                 if (ptr == NULL) {
586                         err = -ENOMEM;
587                         goto err;
588                 }
589
590                 if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) {
591                         if (page_unmask_bits(ptr))
592                                 __i915_gem_object_release_map(vma->obj);
593                         else
594                                 io_mapping_unmap(ptr);
595                         ptr = vma->iomap;
596                 }
597         }
598
599         __i915_vma_pin(vma);
600
601         err = i915_vma_pin_fence(vma);
602         if (err)
603                 goto err_unpin;
604
605         i915_vma_set_ggtt_write(vma);
606
607         /* NB Access through the GTT requires the device to be awake. */
608         return page_mask_bits(ptr);
609
610 err_unpin:
611         __i915_vma_unpin(vma);
612 err:
613         return IOMEM_ERR_PTR(err);
614 }
615
616 void i915_vma_flush_writes(struct i915_vma *vma)
617 {
618         if (i915_vma_unset_ggtt_write(vma))
619                 intel_gt_flush_ggtt_writes(vma->vm->gt);
620 }
621
622 void i915_vma_unpin_iomap(struct i915_vma *vma)
623 {
624         GEM_BUG_ON(vma->iomap == NULL);
625
626         /* XXX We keep the mapping until __i915_vma_unbind()/evict() */
627
628         i915_vma_flush_writes(vma);
629
630         i915_vma_unpin_fence(vma);
631         i915_vma_unpin(vma);
632 }
633
634 void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
635 {
636         struct i915_vma *vma;
637         struct drm_i915_gem_object *obj;
638
639         vma = fetch_and_zero(p_vma);
640         if (!vma)
641                 return;
642
643         obj = vma->obj;
644         GEM_BUG_ON(!obj);
645
646         i915_vma_unpin(vma);
647
648         if (flags & I915_VMA_RELEASE_MAP)
649                 i915_gem_object_unpin_map(obj);
650
651         i915_gem_object_put(obj);
652 }
653
654 bool i915_vma_misplaced(const struct i915_vma *vma,
655                         u64 size, u64 alignment, u64 flags)
656 {
657         if (!drm_mm_node_allocated(&vma->node))
658                 return false;
659
660         if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma)))
661                 return true;
662
663         if (i915_vma_size(vma) < size)
664                 return true;
665
666         GEM_BUG_ON(alignment && !is_power_of_2(alignment));
667         if (alignment && !IS_ALIGNED(i915_vma_offset(vma), alignment))
668                 return true;
669
670         if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
671                 return true;
672
673         if (flags & PIN_OFFSET_BIAS &&
674             i915_vma_offset(vma) < (flags & PIN_OFFSET_MASK))
675                 return true;
676
677         if (flags & PIN_OFFSET_FIXED &&
678             i915_vma_offset(vma) != (flags & PIN_OFFSET_MASK))
679                 return true;
680
681         if (flags & PIN_OFFSET_GUARD &&
682             vma->guard < (flags & PIN_OFFSET_MASK))
683                 return true;
684
685         return false;
686 }
687
688 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
689 {
690         bool mappable, fenceable;
691
692         GEM_BUG_ON(!i915_vma_is_ggtt(vma));
693         GEM_BUG_ON(!vma->fence_size);
694
695         fenceable = (i915_vma_size(vma) >= vma->fence_size &&
696                      IS_ALIGNED(i915_vma_offset(vma), vma->fence_alignment));
697
698         mappable = i915_ggtt_offset(vma) + vma->fence_size <=
699                    i915_vm_to_ggtt(vma->vm)->mappable_end;
700
701         if (mappable && fenceable)
702                 set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
703         else
704                 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
705 }
706
707 bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
708 {
709         struct drm_mm_node *node = &vma->node;
710         struct drm_mm_node *other;
711
712         /*
713          * On some machines we have to be careful when putting differing types
714          * of snoopable memory together to avoid the prefetcher crossing memory
715          * domains and dying. During vm initialisation, we decide whether or not
716          * these constraints apply and set the drm_mm.color_adjust
717          * appropriately.
718          */
719         if (!i915_vm_has_cache_coloring(vma->vm))
720                 return true;
721
722         /* Only valid to be called on an already inserted vma */
723         GEM_BUG_ON(!drm_mm_node_allocated(node));
724         GEM_BUG_ON(list_empty(&node->node_list));
725
726         other = list_prev_entry(node, node_list);
727         if (i915_node_color_differs(other, color) &&
728             !drm_mm_hole_follows(other))
729                 return false;
730
731         other = list_next_entry(node, node_list);
732         if (i915_node_color_differs(other, color) &&
733             !drm_mm_hole_follows(node))
734                 return false;
735
736         return true;
737 }
738
739 /**
740  * i915_vma_insert - finds a slot for the vma in its address space
741  * @vma: the vma
742  * @ww: An optional struct i915_gem_ww_ctx
743  * @size: requested size in bytes (can be larger than the VMA)
744  * @alignment: required alignment
745  * @flags: mask of PIN_* flags to use
746  *
747  * First we try to allocate some free space that meets the requirements for
748  * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
749  * preferrably the oldest idle entry to make room for the new VMA.
750  *
751  * Returns:
752  * 0 on success, negative error code otherwise.
753  */
754 static int
755 i915_vma_insert(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
756                 u64 size, u64 alignment, u64 flags)
757 {
758         unsigned long color, guard;
759         u64 start, end;
760         int ret;
761
762         GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
763         GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
764         GEM_BUG_ON(hweight64(flags & (PIN_OFFSET_GUARD | PIN_OFFSET_FIXED | PIN_OFFSET_BIAS)) > 1);
765
766         size = max(size, vma->size);
767         alignment = max_t(typeof(alignment), alignment, vma->display_alignment);
768         if (flags & PIN_MAPPABLE) {
769                 size = max_t(typeof(size), size, vma->fence_size);
770                 alignment = max_t(typeof(alignment),
771                                   alignment, vma->fence_alignment);
772         }
773
774         GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
775         GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
776         GEM_BUG_ON(!is_power_of_2(alignment));
777
778         guard = vma->guard; /* retain guard across rebinds */
779         if (flags & PIN_OFFSET_GUARD) {
780                 GEM_BUG_ON(overflows_type(flags & PIN_OFFSET_MASK, u32));
781                 guard = max_t(u32, guard, flags & PIN_OFFSET_MASK);
782         }
783         /*
784          * As we align the node upon insertion, but the hardware gets
785          * node.start + guard, the easiest way to make that work is
786          * to make the guard a multiple of the alignment size.
787          */
788         guard = ALIGN(guard, alignment);
789
790         start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
791         GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
792
793         end = vma->vm->total;
794         if (flags & PIN_MAPPABLE)
795                 end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end);
796         if (flags & PIN_ZONE_4G)
797                 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
798         GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
799
800         alignment = max(alignment, i915_vm_obj_min_alignment(vma->vm, vma->obj));
801
802         /*
803          * If binding the object/GGTT view requires more space than the entire
804          * aperture has, reject it early before evicting everything in a vain
805          * attempt to find space.
806          */
807         if (size > end - 2 * guard) {
808                 drm_dbg(&to_i915(vma->obj->base.dev)->drm,
809                         "Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
810                         size, flags & PIN_MAPPABLE ? "mappable" : "total", end);
811                 return -ENOSPC;
812         }
813
814         color = 0;
815
816         if (i915_vm_has_cache_coloring(vma->vm))
817                 color = vma->obj->cache_level;
818
819         if (flags & PIN_OFFSET_FIXED) {
820                 u64 offset = flags & PIN_OFFSET_MASK;
821                 if (!IS_ALIGNED(offset, alignment) ||
822                     range_overflows(offset, size, end))
823                         return -EINVAL;
824                 /*
825                  * The caller knows not of the guard added by others and
826                  * requests for the offset of the start of its buffer
827                  * to be fixed, which may not be the same as the position
828                  * of the vma->node due to the guard pages.
829                  */
830                 if (offset < guard || offset + size > end - guard)
831                         return -ENOSPC;
832
833                 ret = i915_gem_gtt_reserve(vma->vm, ww, &vma->node,
834                                            size + 2 * guard,
835                                            offset - guard,
836                                            color, flags);
837                 if (ret)
838                         return ret;
839         } else {
840                 size += 2 * guard;
841                 /*
842                  * We only support huge gtt pages through the 48b PPGTT,
843                  * however we also don't want to force any alignment for
844                  * objects which need to be tightly packed into the low 32bits.
845                  *
846                  * Note that we assume that GGTT are limited to 4GiB for the
847                  * forseeable future. See also i915_ggtt_offset().
848                  */
849                 if (upper_32_bits(end - 1) &&
850                     vma->page_sizes.sg > I915_GTT_PAGE_SIZE &&
851                     !HAS_64K_PAGES(vma->vm->i915)) {
852                         /*
853                          * We can't mix 64K and 4K PTEs in the same page-table
854                          * (2M block), and so to avoid the ugliness and
855                          * complexity of coloring we opt for just aligning 64K
856                          * objects to 2M.
857                          */
858                         u64 page_alignment =
859                                 rounddown_pow_of_two(vma->page_sizes.sg |
860                                                      I915_GTT_PAGE_SIZE_2M);
861
862                         /*
863                          * Check we don't expand for the limited Global GTT
864                          * (mappable aperture is even more precious!). This
865                          * also checks that we exclude the aliasing-ppgtt.
866                          */
867                         GEM_BUG_ON(i915_vma_is_ggtt(vma));
868
869                         alignment = max(alignment, page_alignment);
870
871                         if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
872                                 size = round_up(size, I915_GTT_PAGE_SIZE_2M);
873                 }
874
875                 ret = i915_gem_gtt_insert(vma->vm, ww, &vma->node,
876                                           size, alignment, color,
877                                           start, end, flags);
878                 if (ret)
879                         return ret;
880
881                 GEM_BUG_ON(vma->node.start < start);
882                 GEM_BUG_ON(vma->node.start + vma->node.size > end);
883         }
884         GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
885         GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
886
887         list_move_tail(&vma->vm_link, &vma->vm->bound_list);
888         vma->guard = guard;
889
890         return 0;
891 }
892
893 static void
894 i915_vma_detach(struct i915_vma *vma)
895 {
896         GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
897         GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
898
899         /*
900          * And finally now the object is completely decoupled from this
901          * vma, we can drop its hold on the backing storage and allow
902          * it to be reaped by the shrinker.
903          */
904         list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
905 }
906
907 static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
908 {
909         unsigned int bound;
910
911         bound = atomic_read(&vma->flags);
912
913         if (flags & PIN_VALIDATE) {
914                 flags &= I915_VMA_BIND_MASK;
915
916                 return (flags & bound) == flags;
917         }
918
919         /* with the lock mandatory for unbind, we don't race here */
920         flags &= I915_VMA_BIND_MASK;
921         do {
922                 if (unlikely(flags & ~bound))
923                         return false;
924
925                 if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR)))
926                         return false;
927
928                 GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0);
929         } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
930
931         return true;
932 }
933
934 static struct scatterlist *
935 rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
936              unsigned int width, unsigned int height,
937              unsigned int src_stride, unsigned int dst_stride,
938              struct sg_table *st, struct scatterlist *sg)
939 {
940         unsigned int column, row;
941         pgoff_t src_idx;
942
943         for (column = 0; column < width; column++) {
944                 unsigned int left;
945
946                 src_idx = src_stride * (height - 1) + column + offset;
947                 for (row = 0; row < height; row++) {
948                         st->nents++;
949                         /*
950                          * We don't need the pages, but need to initialize
951                          * the entries so the sg list can be happily traversed.
952                          * The only thing we need are DMA addresses.
953                          */
954                         sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
955                         sg_dma_address(sg) =
956                                 i915_gem_object_get_dma_address(obj, src_idx);
957                         sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
958                         sg = sg_next(sg);
959                         src_idx -= src_stride;
960                 }
961
962                 left = (dst_stride - height) * I915_GTT_PAGE_SIZE;
963
964                 if (!left)
965                         continue;
966
967                 st->nents++;
968
969                 /*
970                  * The DE ignores the PTEs for the padding tiles, the sg entry
971                  * here is just a conenience to indicate how many padding PTEs
972                  * to insert at this spot.
973                  */
974                 sg_set_page(sg, NULL, left, 0);
975                 sg_dma_address(sg) = 0;
976                 sg_dma_len(sg) = left;
977                 sg = sg_next(sg);
978         }
979
980         return sg;
981 }
982
983 static noinline struct sg_table *
984 intel_rotate_pages(struct intel_rotation_info *rot_info,
985                    struct drm_i915_gem_object *obj)
986 {
987         unsigned int size = intel_rotation_info_size(rot_info);
988         struct drm_i915_private *i915 = to_i915(obj->base.dev);
989         struct sg_table *st;
990         struct scatterlist *sg;
991         int ret = -ENOMEM;
992         int i;
993
994         /* Allocate target SG list. */
995         st = kmalloc(sizeof(*st), GFP_KERNEL);
996         if (!st)
997                 goto err_st_alloc;
998
999         ret = sg_alloc_table(st, size, GFP_KERNEL);
1000         if (ret)
1001                 goto err_sg_alloc;
1002
1003         st->nents = 0;
1004         sg = st->sgl;
1005
1006         for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
1007                 sg = rotate_pages(obj, rot_info->plane[i].offset,
1008                                   rot_info->plane[i].width, rot_info->plane[i].height,
1009                                   rot_info->plane[i].src_stride,
1010                                   rot_info->plane[i].dst_stride,
1011                                   st, sg);
1012
1013         return st;
1014
1015 err_sg_alloc:
1016         kfree(st);
1017 err_st_alloc:
1018
1019         drm_dbg(&i915->drm, "Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
1020                 obj->base.size, rot_info->plane[0].width,
1021                 rot_info->plane[0].height, size);
1022
1023         return ERR_PTR(ret);
1024 }
1025
1026 static struct scatterlist *
1027 add_padding_pages(unsigned int count,
1028                   struct sg_table *st, struct scatterlist *sg)
1029 {
1030         st->nents++;
1031
1032         /*
1033          * The DE ignores the PTEs for the padding tiles, the sg entry
1034          * here is just a convenience to indicate how many padding PTEs
1035          * to insert at this spot.
1036          */
1037         sg_set_page(sg, NULL, count * I915_GTT_PAGE_SIZE, 0);
1038         sg_dma_address(sg) = 0;
1039         sg_dma_len(sg) = count * I915_GTT_PAGE_SIZE;
1040         sg = sg_next(sg);
1041
1042         return sg;
1043 }
1044
1045 static struct scatterlist *
1046 remap_tiled_color_plane_pages(struct drm_i915_gem_object *obj,
1047                               unsigned long offset, unsigned int alignment_pad,
1048                               unsigned int width, unsigned int height,
1049                               unsigned int src_stride, unsigned int dst_stride,
1050                               struct sg_table *st, struct scatterlist *sg,
1051                               unsigned int *gtt_offset)
1052 {
1053         unsigned int row;
1054
1055         if (!width || !height)
1056                 return sg;
1057
1058         if (alignment_pad)
1059                 sg = add_padding_pages(alignment_pad, st, sg);
1060
1061         for (row = 0; row < height; row++) {
1062                 unsigned int left = width * I915_GTT_PAGE_SIZE;
1063
1064                 while (left) {
1065                         dma_addr_t addr;
1066                         unsigned int length;
1067
1068                         /*
1069                          * We don't need the pages, but need to initialize
1070                          * the entries so the sg list can be happily traversed.
1071                          * The only thing we need are DMA addresses.
1072                          */
1073
1074                         addr = i915_gem_object_get_dma_address_len(obj, offset, &length);
1075
1076                         length = min(left, length);
1077
1078                         st->nents++;
1079
1080                         sg_set_page(sg, NULL, length, 0);
1081                         sg_dma_address(sg) = addr;
1082                         sg_dma_len(sg) = length;
1083                         sg = sg_next(sg);
1084
1085                         offset += length / I915_GTT_PAGE_SIZE;
1086                         left -= length;
1087                 }
1088
1089                 offset += src_stride - width;
1090
1091                 left = (dst_stride - width) * I915_GTT_PAGE_SIZE;
1092
1093                 if (!left)
1094                         continue;
1095
1096                 sg = add_padding_pages(left >> PAGE_SHIFT, st, sg);
1097         }
1098
1099         *gtt_offset += alignment_pad + dst_stride * height;
1100
1101         return sg;
1102 }
1103
1104 static struct scatterlist *
1105 remap_contiguous_pages(struct drm_i915_gem_object *obj,
1106                        pgoff_t obj_offset,
1107                        unsigned int count,
1108                        struct sg_table *st, struct scatterlist *sg)
1109 {
1110         struct scatterlist *iter;
1111         unsigned int offset;
1112
1113         iter = i915_gem_object_get_sg_dma(obj, obj_offset, &offset);
1114         GEM_BUG_ON(!iter);
1115
1116         do {
1117                 unsigned int len;
1118
1119                 len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT),
1120                           count << PAGE_SHIFT);
1121                 sg_set_page(sg, NULL, len, 0);
1122                 sg_dma_address(sg) =
1123                         sg_dma_address(iter) + (offset << PAGE_SHIFT);
1124                 sg_dma_len(sg) = len;
1125
1126                 st->nents++;
1127                 count -= len >> PAGE_SHIFT;
1128                 if (count == 0)
1129                         return sg;
1130
1131                 sg = __sg_next(sg);
1132                 iter = __sg_next(iter);
1133                 offset = 0;
1134         } while (1);
1135 }
1136
1137 static struct scatterlist *
1138 remap_linear_color_plane_pages(struct drm_i915_gem_object *obj,
1139                                pgoff_t obj_offset, unsigned int alignment_pad,
1140                                unsigned int size,
1141                                struct sg_table *st, struct scatterlist *sg,
1142                                unsigned int *gtt_offset)
1143 {
1144         if (!size)
1145                 return sg;
1146
1147         if (alignment_pad)
1148                 sg = add_padding_pages(alignment_pad, st, sg);
1149
1150         sg = remap_contiguous_pages(obj, obj_offset, size, st, sg);
1151         sg = sg_next(sg);
1152
1153         *gtt_offset += alignment_pad + size;
1154
1155         return sg;
1156 }
1157
1158 static struct scatterlist *
1159 remap_color_plane_pages(const struct intel_remapped_info *rem_info,
1160                         struct drm_i915_gem_object *obj,
1161                         int color_plane,
1162                         struct sg_table *st, struct scatterlist *sg,
1163                         unsigned int *gtt_offset)
1164 {
1165         unsigned int alignment_pad = 0;
1166
1167         if (rem_info->plane_alignment)
1168                 alignment_pad = ALIGN(*gtt_offset, rem_info->plane_alignment) - *gtt_offset;
1169
1170         if (rem_info->plane[color_plane].linear)
1171                 sg = remap_linear_color_plane_pages(obj,
1172                                                     rem_info->plane[color_plane].offset,
1173                                                     alignment_pad,
1174                                                     rem_info->plane[color_plane].size,
1175                                                     st, sg,
1176                                                     gtt_offset);
1177
1178         else
1179                 sg = remap_tiled_color_plane_pages(obj,
1180                                                    rem_info->plane[color_plane].offset,
1181                                                    alignment_pad,
1182                                                    rem_info->plane[color_plane].width,
1183                                                    rem_info->plane[color_plane].height,
1184                                                    rem_info->plane[color_plane].src_stride,
1185                                                    rem_info->plane[color_plane].dst_stride,
1186                                                    st, sg,
1187                                                    gtt_offset);
1188
1189         return sg;
1190 }
1191
1192 static noinline struct sg_table *
1193 intel_remap_pages(struct intel_remapped_info *rem_info,
1194                   struct drm_i915_gem_object *obj)
1195 {
1196         unsigned int size = intel_remapped_info_size(rem_info);
1197         struct drm_i915_private *i915 = to_i915(obj->base.dev);
1198         struct sg_table *st;
1199         struct scatterlist *sg;
1200         unsigned int gtt_offset = 0;
1201         int ret = -ENOMEM;
1202         int i;
1203
1204         /* Allocate target SG list. */
1205         st = kmalloc(sizeof(*st), GFP_KERNEL);
1206         if (!st)
1207                 goto err_st_alloc;
1208
1209         ret = sg_alloc_table(st, size, GFP_KERNEL);
1210         if (ret)
1211                 goto err_sg_alloc;
1212
1213         st->nents = 0;
1214         sg = st->sgl;
1215
1216         for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
1217                 sg = remap_color_plane_pages(rem_info, obj, i, st, sg, &gtt_offset);
1218
1219         i915_sg_trim(st);
1220
1221         return st;
1222
1223 err_sg_alloc:
1224         kfree(st);
1225 err_st_alloc:
1226
1227         drm_dbg(&i915->drm, "Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n",
1228                 obj->base.size, rem_info->plane[0].width,
1229                 rem_info->plane[0].height, size);
1230
1231         return ERR_PTR(ret);
1232 }
1233
1234 static noinline struct sg_table *
1235 intel_partial_pages(const struct i915_gtt_view *view,
1236                     struct drm_i915_gem_object *obj)
1237 {
1238         struct sg_table *st;
1239         struct scatterlist *sg;
1240         unsigned int count = view->partial.size;
1241         int ret = -ENOMEM;
1242
1243         st = kmalloc(sizeof(*st), GFP_KERNEL);
1244         if (!st)
1245                 goto err_st_alloc;
1246
1247         ret = sg_alloc_table(st, count, GFP_KERNEL);
1248         if (ret)
1249                 goto err_sg_alloc;
1250
1251         st->nents = 0;
1252
1253         sg = remap_contiguous_pages(obj, view->partial.offset, count, st, st->sgl);
1254
1255         sg_mark_end(sg);
1256         i915_sg_trim(st); /* Drop any unused tail entries. */
1257
1258         return st;
1259
1260 err_sg_alloc:
1261         kfree(st);
1262 err_st_alloc:
1263         return ERR_PTR(ret);
1264 }
1265
1266 static int
1267 __i915_vma_get_pages(struct i915_vma *vma)
1268 {
1269         struct sg_table *pages;
1270
1271         /*
1272          * The vma->pages are only valid within the lifespan of the borrowed
1273          * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
1274          * must be the vma->pages. A simple rule is that vma->pages must only
1275          * be accessed when the obj->mm.pages are pinned.
1276          */
1277         GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
1278
1279         switch (vma->gtt_view.type) {
1280         default:
1281                 GEM_BUG_ON(vma->gtt_view.type);
1282                 fallthrough;
1283         case I915_GTT_VIEW_NORMAL:
1284                 pages = vma->obj->mm.pages;
1285                 break;
1286
1287         case I915_GTT_VIEW_ROTATED:
1288                 pages =
1289                         intel_rotate_pages(&vma->gtt_view.rotated, vma->obj);
1290                 break;
1291
1292         case I915_GTT_VIEW_REMAPPED:
1293                 pages =
1294                         intel_remap_pages(&vma->gtt_view.remapped, vma->obj);
1295                 break;
1296
1297         case I915_GTT_VIEW_PARTIAL:
1298                 pages = intel_partial_pages(&vma->gtt_view, vma->obj);
1299                 break;
1300         }
1301
1302         if (IS_ERR(pages)) {
1303                 drm_err(&vma->vm->i915->drm,
1304                         "Failed to get pages for VMA view type %u (%ld)!\n",
1305                         vma->gtt_view.type, PTR_ERR(pages));
1306                 return PTR_ERR(pages);
1307         }
1308
1309         vma->pages = pages;
1310
1311         return 0;
1312 }
1313
1314 I915_SELFTEST_EXPORT int i915_vma_get_pages(struct i915_vma *vma)
1315 {
1316         int err;
1317
1318         if (atomic_add_unless(&vma->pages_count, 1, 0))
1319                 return 0;
1320
1321         err = i915_gem_object_pin_pages(vma->obj);
1322         if (err)
1323                 return err;
1324
1325         err = __i915_vma_get_pages(vma);
1326         if (err)
1327                 goto err_unpin;
1328
1329         vma->page_sizes = vma->obj->mm.page_sizes;
1330         atomic_inc(&vma->pages_count);
1331
1332         return 0;
1333
1334 err_unpin:
1335         __i915_gem_object_unpin_pages(vma->obj);
1336
1337         return err;
1338 }
1339
1340 void vma_invalidate_tlb(struct i915_address_space *vm, u32 *tlb)
1341 {
1342         /*
1343          * Before we release the pages that were bound by this vma, we
1344          * must invalidate all the TLBs that may still have a reference
1345          * back to our physical address. It only needs to be done once,
1346          * so after updating the PTE to point away from the pages, record
1347          * the most recent TLB invalidation seqno, and if we have not yet
1348          * flushed the TLBs upon release, perform a full invalidation.
1349          */
1350         WRITE_ONCE(*tlb, intel_gt_next_invalidate_tlb_full(vm->gt));
1351 }
1352
1353 static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
1354 {
1355         /* We allocate under vma_get_pages, so beware the shrinker */
1356         GEM_BUG_ON(atomic_read(&vma->pages_count) < count);
1357
1358         if (atomic_sub_return(count, &vma->pages_count) == 0) {
1359                 if (vma->pages != vma->obj->mm.pages) {
1360                         sg_free_table(vma->pages);
1361                         kfree(vma->pages);
1362                 }
1363                 vma->pages = NULL;
1364
1365                 i915_gem_object_unpin_pages(vma->obj);
1366         }
1367 }
1368
1369 I915_SELFTEST_EXPORT void i915_vma_put_pages(struct i915_vma *vma)
1370 {
1371         if (atomic_add_unless(&vma->pages_count, -1, 1))
1372                 return;
1373
1374         __vma_put_pages(vma, 1);
1375 }
1376
1377 static void vma_unbind_pages(struct i915_vma *vma)
1378 {
1379         unsigned int count;
1380
1381         lockdep_assert_held(&vma->vm->mutex);
1382
1383         /* The upper portion of pages_count is the number of bindings */
1384         count = atomic_read(&vma->pages_count);
1385         count >>= I915_VMA_PAGES_BIAS;
1386         GEM_BUG_ON(!count);
1387
1388         __vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS);
1389 }
1390
1391 int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
1392                     u64 size, u64 alignment, u64 flags)
1393 {
1394         struct i915_vma_work *work = NULL;
1395         struct dma_fence *moving = NULL;
1396         struct i915_vma_resource *vma_res = NULL;
1397         intel_wakeref_t wakeref = 0;
1398         unsigned int bound;
1399         int err;
1400
1401         assert_vma_held(vma);
1402         GEM_BUG_ON(!ww);
1403
1404         BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
1405         BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
1406
1407         GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL)));
1408
1409         /* First try and grab the pin without rebinding the vma */
1410         if (try_qad_pin(vma, flags))
1411                 return 0;
1412
1413         err = i915_vma_get_pages(vma);
1414         if (err)
1415                 return err;
1416
1417         if (flags & PIN_GLOBAL)
1418                 wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
1419
1420         if (flags & vma->vm->bind_async_flags) {
1421                 /* lock VM */
1422                 err = i915_vm_lock_objects(vma->vm, ww);
1423                 if (err)
1424                         goto err_rpm;
1425
1426                 work = i915_vma_work();
1427                 if (!work) {
1428                         err = -ENOMEM;
1429                         goto err_rpm;
1430                 }
1431
1432                 work->vm = vma->vm;
1433
1434                 err = i915_gem_object_get_moving_fence(vma->obj, &moving);
1435                 if (err)
1436                         goto err_rpm;
1437
1438                 dma_fence_work_chain(&work->base, moving);
1439
1440                 /* Allocate enough page directories to used PTE */
1441                 if (vma->vm->allocate_va_range) {
1442                         err = i915_vm_alloc_pt_stash(vma->vm,
1443                                                      &work->stash,
1444                                                      vma->size);
1445                         if (err)
1446                                 goto err_fence;
1447
1448                         err = i915_vm_map_pt_stash(vma->vm, &work->stash);
1449                         if (err)
1450                                 goto err_fence;
1451                 }
1452         }
1453
1454         vma_res = i915_vma_resource_alloc();
1455         if (IS_ERR(vma_res)) {
1456                 err = PTR_ERR(vma_res);
1457                 goto err_fence;
1458         }
1459
1460         /*
1461          * Differentiate between user/kernel vma inside the aliasing-ppgtt.
1462          *
1463          * We conflate the Global GTT with the user's vma when using the
1464          * aliasing-ppgtt, but it is still vitally important to try and
1465          * keep the use cases distinct. For example, userptr objects are
1466          * not allowed inside the Global GTT as that will cause lock
1467          * inversions when we have to evict them the mmu_notifier callbacks -
1468          * but they are allowed to be part of the user ppGTT which can never
1469          * be mapped. As such we try to give the distinct users of the same
1470          * mutex, distinct lockclasses [equivalent to how we keep i915_ggtt
1471          * and i915_ppgtt separate].
1472          *
1473          * NB this may cause us to mask real lock inversions -- while the
1474          * code is safe today, lockdep may not be able to spot future
1475          * transgressions.
1476          */
1477         err = mutex_lock_interruptible_nested(&vma->vm->mutex,
1478                                               !(flags & PIN_GLOBAL));
1479         if (err)
1480                 goto err_vma_res;
1481
1482         /* No more allocations allowed now we hold vm->mutex */
1483
1484         if (unlikely(i915_vma_is_closed(vma))) {
1485                 err = -ENOENT;
1486                 goto err_unlock;
1487         }
1488
1489         bound = atomic_read(&vma->flags);
1490         if (unlikely(bound & I915_VMA_ERROR)) {
1491                 err = -ENOMEM;
1492                 goto err_unlock;
1493         }
1494
1495         if (unlikely(!((bound + 1) & I915_VMA_PIN_MASK))) {
1496                 err = -EAGAIN; /* pins are meant to be fairly temporary */
1497                 goto err_unlock;
1498         }
1499
1500         if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) {
1501                 if (!(flags & PIN_VALIDATE))
1502                         __i915_vma_pin(vma);
1503                 goto err_unlock;
1504         }
1505
1506         err = i915_active_acquire(&vma->active);
1507         if (err)
1508                 goto err_unlock;
1509
1510         if (!(bound & I915_VMA_BIND_MASK)) {
1511                 err = i915_vma_insert(vma, ww, size, alignment, flags);
1512                 if (err)
1513                         goto err_active;
1514
1515                 if (i915_is_ggtt(vma->vm))
1516                         __i915_vma_set_map_and_fenceable(vma);
1517         }
1518
1519         GEM_BUG_ON(!vma->pages);
1520         err = i915_vma_bind(vma,
1521                             vma->obj->cache_level,
1522                             flags, work, vma_res);
1523         vma_res = NULL;
1524         if (err)
1525                 goto err_remove;
1526
1527         /* There should only be at most 2 active bindings (user, global) */
1528         GEM_BUG_ON(bound + I915_VMA_PAGES_ACTIVE < bound);
1529         atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count);
1530         list_move_tail(&vma->vm_link, &vma->vm->bound_list);
1531
1532         if (!(flags & PIN_VALIDATE)) {
1533                 __i915_vma_pin(vma);
1534                 GEM_BUG_ON(!i915_vma_is_pinned(vma));
1535         }
1536         GEM_BUG_ON(!i915_vma_is_bound(vma, flags));
1537         GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
1538
1539 err_remove:
1540         if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) {
1541                 i915_vma_detach(vma);
1542                 drm_mm_remove_node(&vma->node);
1543         }
1544 err_active:
1545         i915_active_release(&vma->active);
1546 err_unlock:
1547         mutex_unlock(&vma->vm->mutex);
1548 err_vma_res:
1549         i915_vma_resource_free(vma_res);
1550 err_fence:
1551         if (work)
1552                 dma_fence_work_commit_imm(&work->base);
1553 err_rpm:
1554         if (wakeref)
1555                 intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
1556
1557         if (moving)
1558                 dma_fence_put(moving);
1559
1560         i915_vma_put_pages(vma);
1561         return err;
1562 }
1563
1564 static void flush_idle_contexts(struct intel_gt *gt)
1565 {
1566         struct intel_engine_cs *engine;
1567         enum intel_engine_id id;
1568
1569         for_each_engine(engine, gt, id)
1570                 intel_engine_flush_barriers(engine);
1571
1572         intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
1573 }
1574
1575 static int __i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
1576                            u32 align, unsigned int flags)
1577 {
1578         struct i915_address_space *vm = vma->vm;
1579         struct intel_gt *gt;
1580         struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
1581         int err;
1582
1583         do {
1584                 err = i915_vma_pin_ww(vma, ww, 0, align, flags | PIN_GLOBAL);
1585
1586                 if (err != -ENOSPC) {
1587                         if (!err) {
1588                                 err = i915_vma_wait_for_bind(vma);
1589                                 if (err)
1590                                         i915_vma_unpin(vma);
1591                         }
1592                         return err;
1593                 }
1594
1595                 /* Unlike i915_vma_pin, we don't take no for an answer! */
1596                 list_for_each_entry(gt, &ggtt->gt_list, ggtt_link)
1597                         flush_idle_contexts(gt);
1598                 if (mutex_lock_interruptible(&vm->mutex) == 0) {
1599                         /*
1600                          * We pass NULL ww here, as we don't want to unbind
1601                          * locked objects when called from execbuf when pinning
1602                          * is removed. This would probably regress badly.
1603                          */
1604                         i915_gem_evict_vm(vm, NULL, NULL);
1605                         mutex_unlock(&vm->mutex);
1606                 }
1607         } while (1);
1608 }
1609
1610 int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
1611                   u32 align, unsigned int flags)
1612 {
1613         struct i915_gem_ww_ctx _ww;
1614         int err;
1615
1616         GEM_BUG_ON(!i915_vma_is_ggtt(vma));
1617
1618         if (ww)
1619                 return __i915_ggtt_pin(vma, ww, align, flags);
1620
1621         lockdep_assert_not_held(&vma->obj->base.resv->lock.base);
1622
1623         for_i915_gem_ww(&_ww, err, true) {
1624                 err = i915_gem_object_lock(vma->obj, &_ww);
1625                 if (!err)
1626                         err = __i915_ggtt_pin(vma, &_ww, align, flags);
1627         }
1628
1629         return err;
1630 }
1631
1632 static void __vma_close(struct i915_vma *vma, struct intel_gt *gt)
1633 {
1634         /*
1635          * We defer actually closing, unbinding and destroying the VMA until
1636          * the next idle point, or if the object is freed in the meantime. By
1637          * postponing the unbind, we allow for it to be resurrected by the
1638          * client, avoiding the work required to rebind the VMA. This is
1639          * advantageous for DRI, where the client/server pass objects
1640          * between themselves, temporarily opening a local VMA to the
1641          * object, and then closing it again. The same object is then reused
1642          * on the next frame (or two, depending on the depth of the swap queue)
1643          * causing us to rebind the VMA once more. This ends up being a lot
1644          * of wasted work for the steady state.
1645          */
1646         GEM_BUG_ON(i915_vma_is_closed(vma));
1647         list_add(&vma->closed_link, &gt->closed_vma);
1648 }
1649
1650 void i915_vma_close(struct i915_vma *vma)
1651 {
1652         struct intel_gt *gt = vma->vm->gt;
1653         unsigned long flags;
1654
1655         if (i915_vma_is_ggtt(vma))
1656                 return;
1657
1658         GEM_BUG_ON(!atomic_read(&vma->open_count));
1659         if (atomic_dec_and_lock_irqsave(&vma->open_count,
1660                                         &gt->closed_lock,
1661                                         flags)) {
1662                 __vma_close(vma, gt);
1663                 spin_unlock_irqrestore(&gt->closed_lock, flags);
1664         }
1665 }
1666
1667 static void __i915_vma_remove_closed(struct i915_vma *vma)
1668 {
1669         list_del_init(&vma->closed_link);
1670 }
1671
1672 void i915_vma_reopen(struct i915_vma *vma)
1673 {
1674         struct intel_gt *gt = vma->vm->gt;
1675
1676         spin_lock_irq(&gt->closed_lock);
1677         if (i915_vma_is_closed(vma))
1678                 __i915_vma_remove_closed(vma);
1679         spin_unlock_irq(&gt->closed_lock);
1680 }
1681
1682 static void force_unbind(struct i915_vma *vma)
1683 {
1684         if (!drm_mm_node_allocated(&vma->node))
1685                 return;
1686
1687         atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
1688         WARN_ON(__i915_vma_unbind(vma));
1689         GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
1690 }
1691
1692 static void release_references(struct i915_vma *vma, struct intel_gt *gt,
1693                                bool vm_ddestroy)
1694 {
1695         struct drm_i915_gem_object *obj = vma->obj;
1696
1697         GEM_BUG_ON(i915_vma_is_active(vma));
1698
1699         spin_lock(&obj->vma.lock);
1700         list_del(&vma->obj_link);
1701         if (!RB_EMPTY_NODE(&vma->obj_node))
1702                 rb_erase(&vma->obj_node, &obj->vma.tree);
1703
1704         spin_unlock(&obj->vma.lock);
1705
1706         spin_lock_irq(&gt->closed_lock);
1707         __i915_vma_remove_closed(vma);
1708         spin_unlock_irq(&gt->closed_lock);
1709
1710         if (vm_ddestroy)
1711                 i915_vm_resv_put(vma->vm);
1712
1713         i915_active_fini(&vma->active);
1714         GEM_WARN_ON(vma->resource);
1715         i915_vma_free(vma);
1716 }
1717
1718 /*
1719  * i915_vma_destroy_locked - Remove all weak reference to the vma and put
1720  * the initial reference.
1721  *
1722  * This function should be called when it's decided the vma isn't needed
1723  * anymore. The caller must assure that it doesn't race with another lookup
1724  * plus destroy, typically by taking an appropriate reference.
1725  *
1726  * Current callsites are
1727  * - __i915_gem_object_pages_fini()
1728  * - __i915_vm_close() - Blocks the above function by taking a reference on
1729  * the object.
1730  * - __i915_vma_parked() - Blocks the above functions by taking a reference
1731  * on the vm and a reference on the object. Also takes the object lock so
1732  * destruction from __i915_vma_parked() can be blocked by holding the
1733  * object lock. Since the object lock is only allowed from within i915 with
1734  * an object refcount, holding the object lock also implicitly blocks the
1735  * vma freeing from __i915_gem_object_pages_fini().
1736  *
1737  * Because of locks taken during destruction, a vma is also guaranteed to
1738  * stay alive while the following locks are held if it was looked up while
1739  * holding one of the locks:
1740  * - vm->mutex
1741  * - obj->vma.lock
1742  * - gt->closed_lock
1743  */
1744 void i915_vma_destroy_locked(struct i915_vma *vma)
1745 {
1746         lockdep_assert_held(&vma->vm->mutex);
1747
1748         force_unbind(vma);
1749         list_del_init(&vma->vm_link);
1750         release_references(vma, vma->vm->gt, false);
1751 }
1752
1753 void i915_vma_destroy(struct i915_vma *vma)
1754 {
1755         struct intel_gt *gt;
1756         bool vm_ddestroy;
1757
1758         mutex_lock(&vma->vm->mutex);
1759         force_unbind(vma);
1760         list_del_init(&vma->vm_link);
1761         vm_ddestroy = vma->vm_ddestroy;
1762         vma->vm_ddestroy = false;
1763
1764         /* vma->vm may be freed when releasing vma->vm->mutex. */
1765         gt = vma->vm->gt;
1766         mutex_unlock(&vma->vm->mutex);
1767         release_references(vma, gt, vm_ddestroy);
1768 }
1769
1770 void i915_vma_parked(struct intel_gt *gt)
1771 {
1772         struct i915_vma *vma, *next;
1773         LIST_HEAD(closed);
1774
1775         spin_lock_irq(&gt->closed_lock);
1776         list_for_each_entry_safe(vma, next, &gt->closed_vma, closed_link) {
1777                 struct drm_i915_gem_object *obj = vma->obj;
1778                 struct i915_address_space *vm = vma->vm;
1779
1780                 /* XXX All to avoid keeping a reference on i915_vma itself */
1781
1782                 if (!kref_get_unless_zero(&obj->base.refcount))
1783                         continue;
1784
1785                 if (!i915_vm_tryget(vm)) {
1786                         i915_gem_object_put(obj);
1787                         continue;
1788                 }
1789
1790                 list_move(&vma->closed_link, &closed);
1791         }
1792         spin_unlock_irq(&gt->closed_lock);
1793
1794         /* As the GT is held idle, no vma can be reopened as we destroy them */
1795         list_for_each_entry_safe(vma, next, &closed, closed_link) {
1796                 struct drm_i915_gem_object *obj = vma->obj;
1797                 struct i915_address_space *vm = vma->vm;
1798
1799                 if (i915_gem_object_trylock(obj, NULL)) {
1800                         INIT_LIST_HEAD(&vma->closed_link);
1801                         i915_vma_destroy(vma);
1802                         i915_gem_object_unlock(obj);
1803                 } else {
1804                         /* back you go.. */
1805                         spin_lock_irq(&gt->closed_lock);
1806                         list_add(&vma->closed_link, &gt->closed_vma);
1807                         spin_unlock_irq(&gt->closed_lock);
1808                 }
1809
1810                 i915_gem_object_put(obj);
1811                 i915_vm_put(vm);
1812         }
1813 }
1814
1815 static void __i915_vma_iounmap(struct i915_vma *vma)
1816 {
1817         GEM_BUG_ON(i915_vma_is_pinned(vma));
1818
1819         if (vma->iomap == NULL)
1820                 return;
1821
1822         if (page_unmask_bits(vma->iomap))
1823                 __i915_gem_object_release_map(vma->obj);
1824         else
1825                 io_mapping_unmap(vma->iomap);
1826         vma->iomap = NULL;
1827 }
1828
1829 void i915_vma_revoke_mmap(struct i915_vma *vma)
1830 {
1831         struct drm_vma_offset_node *node;
1832         u64 vma_offset;
1833
1834         if (!i915_vma_has_userfault(vma))
1835                 return;
1836
1837         GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
1838         GEM_BUG_ON(!vma->obj->userfault_count);
1839
1840         node = &vma->mmo->vma_node;
1841         vma_offset = vma->gtt_view.partial.offset << PAGE_SHIFT;
1842         unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
1843                             drm_vma_node_offset_addr(node) + vma_offset,
1844                             vma->size,
1845                             1);
1846
1847         i915_vma_unset_userfault(vma);
1848         if (!--vma->obj->userfault_count)
1849                 list_del(&vma->obj->userfault_link);
1850 }
1851
1852 static int
1853 __i915_request_await_bind(struct i915_request *rq, struct i915_vma *vma)
1854 {
1855         return __i915_request_await_exclusive(rq, &vma->active);
1856 }
1857
1858 static int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
1859 {
1860         int err;
1861
1862         /* Wait for the vma to be bound before we start! */
1863         err = __i915_request_await_bind(rq, vma);
1864         if (err)
1865                 return err;
1866
1867         return i915_active_add_request(&vma->active, rq);
1868 }
1869
1870 int _i915_vma_move_to_active(struct i915_vma *vma,
1871                              struct i915_request *rq,
1872                              struct dma_fence *fence,
1873                              unsigned int flags)
1874 {
1875         struct drm_i915_gem_object *obj = vma->obj;
1876         int err;
1877
1878         assert_object_held(obj);
1879
1880         GEM_BUG_ON(!vma->pages);
1881
1882         if (!(flags & __EXEC_OBJECT_NO_REQUEST_AWAIT)) {
1883                 err = i915_request_await_object(rq, vma->obj, flags & EXEC_OBJECT_WRITE);
1884                 if (unlikely(err))
1885                         return err;
1886         }
1887         err = __i915_vma_move_to_active(vma, rq);
1888         if (unlikely(err))
1889                 return err;
1890
1891         /*
1892          * Reserve fences slot early to prevent an allocation after preparing
1893          * the workload and associating fences with dma_resv.
1894          */
1895         if (fence && !(flags & __EXEC_OBJECT_NO_RESERVE)) {
1896                 struct dma_fence *curr;
1897                 int idx;
1898
1899                 dma_fence_array_for_each(curr, idx, fence)
1900                         ;
1901                 err = dma_resv_reserve_fences(vma->obj->base.resv, idx);
1902                 if (unlikely(err))
1903                         return err;
1904         }
1905
1906         if (flags & EXEC_OBJECT_WRITE) {
1907                 struct intel_frontbuffer *front;
1908
1909                 front = __intel_frontbuffer_get(obj);
1910                 if (unlikely(front)) {
1911                         if (intel_frontbuffer_invalidate(front, ORIGIN_CS))
1912                                 i915_active_add_request(&front->write, rq);
1913                         intel_frontbuffer_put(front);
1914                 }
1915         }
1916
1917         if (fence) {
1918                 struct dma_fence *curr;
1919                 enum dma_resv_usage usage;
1920                 int idx;
1921
1922                 if (flags & EXEC_OBJECT_WRITE) {
1923                         usage = DMA_RESV_USAGE_WRITE;
1924                         obj->write_domain = I915_GEM_DOMAIN_RENDER;
1925                         obj->read_domains = 0;
1926                 } else {
1927                         usage = DMA_RESV_USAGE_READ;
1928                         obj->write_domain = 0;
1929                 }
1930
1931                 dma_fence_array_for_each(curr, idx, fence)
1932                         dma_resv_add_fence(vma->obj->base.resv, curr, usage);
1933         }
1934
1935         if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence)
1936                 i915_active_add_request(&vma->fence->active, rq);
1937
1938         obj->read_domains |= I915_GEM_GPU_DOMAINS;
1939         obj->mm.dirty = true;
1940
1941         GEM_BUG_ON(!i915_vma_is_active(vma));
1942         return 0;
1943 }
1944
1945 struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
1946 {
1947         struct i915_vma_resource *vma_res = vma->resource;
1948         struct dma_fence *unbind_fence;
1949
1950         GEM_BUG_ON(i915_vma_is_pinned(vma));
1951         assert_vma_held_evict(vma);
1952
1953         if (i915_vma_is_map_and_fenceable(vma)) {
1954                 /* Force a pagefault for domain tracking on next user access */
1955                 i915_vma_revoke_mmap(vma);
1956
1957                 /*
1958                  * Check that we have flushed all writes through the GGTT
1959                  * before the unbind, other due to non-strict nature of those
1960                  * indirect writes they may end up referencing the GGTT PTE
1961                  * after the unbind.
1962                  *
1963                  * Note that we may be concurrently poking at the GGTT_WRITE
1964                  * bit from set-domain, as we mark all GGTT vma associated
1965                  * with an object. We know this is for another vma, as we
1966                  * are currently unbinding this one -- so if this vma will be
1967                  * reused, it will be refaulted and have its dirty bit set
1968                  * before the next write.
1969                  */
1970                 i915_vma_flush_writes(vma);
1971
1972                 /* release the fence reg _after_ flushing */
1973                 i915_vma_revoke_fence(vma);
1974
1975                 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
1976         }
1977
1978         __i915_vma_iounmap(vma);
1979
1980         GEM_BUG_ON(vma->fence);
1981         GEM_BUG_ON(i915_vma_has_userfault(vma));
1982
1983         /* Object backend must be async capable. */
1984         GEM_WARN_ON(async && !vma->resource->bi.pages_rsgt);
1985
1986         /* If vm is not open, unbind is a nop. */
1987         vma_res->needs_wakeref = i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND) &&
1988                 kref_read(&vma->vm->ref);
1989         vma_res->skip_pte_rewrite = !kref_read(&vma->vm->ref) ||
1990                 vma->vm->skip_pte_rewrite;
1991         trace_i915_vma_unbind(vma);
1992
1993         if (async)
1994                 unbind_fence = i915_vma_resource_unbind(vma_res,
1995                                                         &vma->obj->mm.tlb);
1996         else
1997                 unbind_fence = i915_vma_resource_unbind(vma_res, NULL);
1998
1999         vma->resource = NULL;
2000
2001         atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE),
2002                    &vma->flags);
2003
2004         i915_vma_detach(vma);
2005
2006         if (!async) {
2007                 if (unbind_fence) {
2008                         dma_fence_wait(unbind_fence, false);
2009                         dma_fence_put(unbind_fence);
2010                         unbind_fence = NULL;
2011                 }
2012                 vma_invalidate_tlb(vma->vm, &vma->obj->mm.tlb);
2013         }
2014
2015         /*
2016          * Binding itself may not have completed until the unbind fence signals,
2017          * so don't drop the pages until that happens, unless the resource is
2018          * async_capable.
2019          */
2020
2021         vma_unbind_pages(vma);
2022         return unbind_fence;
2023 }
2024
2025 int __i915_vma_unbind(struct i915_vma *vma)
2026 {
2027         int ret;
2028
2029         lockdep_assert_held(&vma->vm->mutex);
2030         assert_vma_held_evict(vma);
2031
2032         if (!drm_mm_node_allocated(&vma->node))
2033                 return 0;
2034
2035         if (i915_vma_is_pinned(vma)) {
2036                 vma_print_allocator(vma, "is pinned");
2037                 return -EAGAIN;
2038         }
2039
2040         /*
2041          * After confirming that no one else is pinning this vma, wait for
2042          * any laggards who may have crept in during the wait (through
2043          * a residual pin skipping the vm->mutex) to complete.
2044          */
2045         ret = i915_vma_sync(vma);
2046         if (ret)
2047                 return ret;
2048
2049         GEM_BUG_ON(i915_vma_is_active(vma));
2050         __i915_vma_evict(vma, false);
2051
2052         drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
2053         return 0;
2054 }
2055
2056 static struct dma_fence *__i915_vma_unbind_async(struct i915_vma *vma)
2057 {
2058         struct dma_fence *fence;
2059
2060         lockdep_assert_held(&vma->vm->mutex);
2061
2062         if (!drm_mm_node_allocated(&vma->node))
2063                 return NULL;
2064
2065         if (i915_vma_is_pinned(vma) ||
2066             &vma->obj->mm.rsgt->table != vma->resource->bi.pages)
2067                 return ERR_PTR(-EAGAIN);
2068
2069         /*
2070          * We probably need to replace this with awaiting the fences of the
2071          * object's dma_resv when the vma active goes away. When doing that
2072          * we need to be careful to not add the vma_resource unbind fence
2073          * immediately to the object's dma_resv, because then unbinding
2074          * the next vma from the object, in case there are many, will
2075          * actually await the unbinding of the previous vmas, which is
2076          * undesirable.
2077          */
2078         if (i915_sw_fence_await_active(&vma->resource->chain, &vma->active,
2079                                        I915_ACTIVE_AWAIT_EXCL |
2080                                        I915_ACTIVE_AWAIT_ACTIVE) < 0) {
2081                 return ERR_PTR(-EBUSY);
2082         }
2083
2084         fence = __i915_vma_evict(vma, true);
2085
2086         drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
2087
2088         return fence;
2089 }
2090
2091 int i915_vma_unbind(struct i915_vma *vma)
2092 {
2093         struct i915_address_space *vm = vma->vm;
2094         intel_wakeref_t wakeref = 0;
2095         int err;
2096
2097         assert_object_held_shared(vma->obj);
2098
2099         /* Optimistic wait before taking the mutex */
2100         err = i915_vma_sync(vma);
2101         if (err)
2102                 return err;
2103
2104         if (!drm_mm_node_allocated(&vma->node))
2105                 return 0;
2106
2107         if (i915_vma_is_pinned(vma)) {
2108                 vma_print_allocator(vma, "is pinned");
2109                 return -EAGAIN;
2110         }
2111
2112         if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
2113                 /* XXX not always required: nop_clear_range */
2114                 wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
2115
2116         err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref);
2117         if (err)
2118                 goto out_rpm;
2119
2120         err = __i915_vma_unbind(vma);
2121         mutex_unlock(&vm->mutex);
2122
2123 out_rpm:
2124         if (wakeref)
2125                 intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
2126         return err;
2127 }
2128
2129 int i915_vma_unbind_async(struct i915_vma *vma, bool trylock_vm)
2130 {
2131         struct drm_i915_gem_object *obj = vma->obj;
2132         struct i915_address_space *vm = vma->vm;
2133         intel_wakeref_t wakeref = 0;
2134         struct dma_fence *fence;
2135         int err;
2136
2137         /*
2138          * We need the dma-resv lock since we add the
2139          * unbind fence to the dma-resv object.
2140          */
2141         assert_object_held(obj);
2142
2143         if (!drm_mm_node_allocated(&vma->node))
2144                 return 0;
2145
2146         if (i915_vma_is_pinned(vma)) {
2147                 vma_print_allocator(vma, "is pinned");
2148                 return -EAGAIN;
2149         }
2150
2151         if (!obj->mm.rsgt)
2152                 return -EBUSY;
2153
2154         err = dma_resv_reserve_fences(obj->base.resv, 2);
2155         if (err)
2156                 return -EBUSY;
2157
2158         /*
2159          * It would be great if we could grab this wakeref from the
2160          * async unbind work if needed, but we can't because it uses
2161          * kmalloc and it's in the dma-fence signalling critical path.
2162          */
2163         if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
2164                 wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
2165
2166         if (trylock_vm && !mutex_trylock(&vm->mutex)) {
2167                 err = -EBUSY;
2168                 goto out_rpm;
2169         } else if (!trylock_vm) {
2170                 err = mutex_lock_interruptible_nested(&vm->mutex, !wakeref);
2171                 if (err)
2172                         goto out_rpm;
2173         }
2174
2175         fence = __i915_vma_unbind_async(vma);
2176         mutex_unlock(&vm->mutex);
2177         if (IS_ERR_OR_NULL(fence)) {
2178                 err = PTR_ERR_OR_ZERO(fence);
2179                 goto out_rpm;
2180         }
2181
2182         dma_resv_add_fence(obj->base.resv, fence, DMA_RESV_USAGE_READ);
2183         dma_fence_put(fence);
2184
2185 out_rpm:
2186         if (wakeref)
2187                 intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
2188         return err;
2189 }
2190
2191 int i915_vma_unbind_unlocked(struct i915_vma *vma)
2192 {
2193         int err;
2194
2195         i915_gem_object_lock(vma->obj, NULL);
2196         err = i915_vma_unbind(vma);
2197         i915_gem_object_unlock(vma->obj);
2198
2199         return err;
2200 }
2201
2202 struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma)
2203 {
2204         i915_gem_object_make_unshrinkable(vma->obj);
2205         return vma;
2206 }
2207
2208 void i915_vma_make_shrinkable(struct i915_vma *vma)
2209 {
2210         i915_gem_object_make_shrinkable(vma->obj);
2211 }
2212
2213 void i915_vma_make_purgeable(struct i915_vma *vma)
2214 {
2215         i915_gem_object_make_purgeable(vma->obj);
2216 }
2217
2218 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2219 #include "selftests/i915_vma.c"
2220 #endif
2221
2222 void i915_vma_module_exit(void)
2223 {
2224         kmem_cache_destroy(slab_vmas);
2225 }
2226
2227 int __init i915_vma_module_init(void)
2228 {
2229         slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
2230         if (!slab_vmas)
2231                 return -ENOMEM;
2232
2233         return 0;
2234 }