3911c78f828279c79049f8e992fa38eaa4630e8a
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_vm.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28
29 #include <linux/dma-fence-array.h>
30 #include <linux/interval_tree_generic.h>
31 #include <linux/idr.h>
32 #include <linux/dma-buf.h>
33
34 #include <drm/amdgpu_drm.h>
35 #include <drm/drm_drv.h>
36 #include <drm/ttm/ttm_tt.h>
37 #include <drm/drm_exec.h>
38 #include "amdgpu.h"
39 #include "amdgpu_vm.h"
40 #include "amdgpu_trace.h"
41 #include "amdgpu_amdkfd.h"
42 #include "amdgpu_gmc.h"
43 #include "amdgpu_xgmi.h"
44 #include "amdgpu_dma_buf.h"
45 #include "amdgpu_res_cursor.h"
46 #include "kfd_svm.h"
47
48 /**
49  * DOC: GPUVM
50  *
51  * GPUVM is the MMU functionality provided on the GPU.
52  * GPUVM is similar to the legacy GART on older asics, however
53  * rather than there being a single global GART table
54  * for the entire GPU, there can be multiple GPUVM page tables active
55  * at any given time.  The GPUVM page tables can contain a mix
56  * VRAM pages and system pages (both memory and MMIO) and system pages
57  * can be mapped as snooped (cached system pages) or unsnooped
58  * (uncached system pages).
59  *
60  * Each active GPUVM has an ID associated with it and there is a page table
61  * linked with each VMID.  When executing a command buffer,
62  * the kernel tells the engine what VMID to use for that command
63  * buffer.  VMIDs are allocated dynamically as commands are submitted.
64  * The userspace drivers maintain their own address space and the kernel
65  * sets up their pages tables accordingly when they submit their
66  * command buffers and a VMID is assigned.
67  * The hardware supports up to 16 active GPUVMs at any given time.
68  *
69  * Each GPUVM is represented by a 1-2 or 1-5 level page table, depending
70  * on the ASIC family.  GPUVM supports RWX attributes on each page as well
71  * as other features such as encryption and caching attributes.
72  *
73  * VMID 0 is special.  It is the GPUVM used for the kernel driver.  In
74  * addition to an aperture managed by a page table, VMID 0 also has
75  * several other apertures.  There is an aperture for direct access to VRAM
76  * and there is a legacy AGP aperture which just forwards accesses directly
77  * to the matching system physical addresses (or IOVAs when an IOMMU is
78  * present).  These apertures provide direct access to these memories without
79  * incurring the overhead of a page table.  VMID 0 is used by the kernel
80  * driver for tasks like memory management.
81  *
82  * GPU clients (i.e., engines on the GPU) use GPUVM VMIDs to access memory.
83  * For user applications, each application can have their own unique GPUVM
84  * address space.  The application manages the address space and the kernel
85  * driver manages the GPUVM page tables for each process.  If an GPU client
86  * accesses an invalid page, it will generate a GPU page fault, similar to
87  * accessing an invalid page on a CPU.
88  */
89
90 #define START(node) ((node)->start)
91 #define LAST(node) ((node)->last)
92
93 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
94                      START, LAST, static, amdgpu_vm_it)
95
96 #undef START
97 #undef LAST
98
99 /**
100  * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
101  */
102 struct amdgpu_prt_cb {
103
104         /**
105          * @adev: amdgpu device
106          */
107         struct amdgpu_device *adev;
108
109         /**
110          * @cb: callback
111          */
112         struct dma_fence_cb cb;
113 };
114
115 /**
116  * struct amdgpu_vm_tlb_seq_struct - Helper to increment the TLB flush sequence
117  */
118 struct amdgpu_vm_tlb_seq_struct {
119         /**
120          * @vm: pointer to the amdgpu_vm structure to set the fence sequence on
121          */
122         struct amdgpu_vm *vm;
123
124         /**
125          * @cb: callback
126          */
127         struct dma_fence_cb cb;
128 };
129
130 /**
131  * amdgpu_vm_set_pasid - manage pasid and vm ptr mapping
132  *
133  * @adev: amdgpu_device pointer
134  * @vm: amdgpu_vm pointer
135  * @pasid: the pasid the VM is using on this GPU
136  *
137  * Set the pasid this VM is using on this GPU, can also be used to remove the
138  * pasid by passing in zero.
139  *
140  */
141 int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
142                         u32 pasid)
143 {
144         int r;
145
146         if (vm->pasid == pasid)
147                 return 0;
148
149         if (vm->pasid) {
150                 r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid));
151                 if (r < 0)
152                         return r;
153
154                 vm->pasid = 0;
155         }
156
157         if (pasid) {
158                 r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm,
159                                         GFP_KERNEL));
160                 if (r < 0)
161                         return r;
162
163                 vm->pasid = pasid;
164         }
165
166
167         return 0;
168 }
169
170 /**
171  * amdgpu_vm_bo_evicted - vm_bo is evicted
172  *
173  * @vm_bo: vm_bo which is evicted
174  *
175  * State for PDs/PTs and per VM BOs which are not at the location they should
176  * be.
177  */
178 static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
179 {
180         struct amdgpu_vm *vm = vm_bo->vm;
181         struct amdgpu_bo *bo = vm_bo->bo;
182
183         vm_bo->moved = true;
184         spin_lock(&vm_bo->vm->status_lock);
185         if (bo->tbo.type == ttm_bo_type_kernel)
186                 list_move(&vm_bo->vm_status, &vm->evicted);
187         else
188                 list_move_tail(&vm_bo->vm_status, &vm->evicted);
189         spin_unlock(&vm_bo->vm->status_lock);
190 }
191 /**
192  * amdgpu_vm_bo_moved - vm_bo is moved
193  *
194  * @vm_bo: vm_bo which is moved
195  *
196  * State for per VM BOs which are moved, but that change is not yet reflected
197  * in the page tables.
198  */
199 static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
200 {
201         spin_lock(&vm_bo->vm->status_lock);
202         list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
203         spin_unlock(&vm_bo->vm->status_lock);
204 }
205
206 /**
207  * amdgpu_vm_bo_idle - vm_bo is idle
208  *
209  * @vm_bo: vm_bo which is now idle
210  *
211  * State for PDs/PTs and per VM BOs which have gone through the state machine
212  * and are now idle.
213  */
214 static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
215 {
216         spin_lock(&vm_bo->vm->status_lock);
217         list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
218         spin_unlock(&vm_bo->vm->status_lock);
219         vm_bo->moved = false;
220 }
221
222 /**
223  * amdgpu_vm_bo_invalidated - vm_bo is invalidated
224  *
225  * @vm_bo: vm_bo which is now invalidated
226  *
227  * State for normal BOs which are invalidated and that change not yet reflected
228  * in the PTs.
229  */
230 static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
231 {
232         spin_lock(&vm_bo->vm->status_lock);
233         list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
234         spin_unlock(&vm_bo->vm->status_lock);
235 }
236
237 /**
238  * amdgpu_vm_bo_evicted_user - vm_bo is evicted
239  *
240  * @vm_bo: vm_bo which is evicted
241  *
242  * State for BOs used by user mode queues which are not at the location they
243  * should be.
244  */
245 static void amdgpu_vm_bo_evicted_user(struct amdgpu_vm_bo_base *vm_bo)
246 {
247         vm_bo->moved = true;
248         spin_lock(&vm_bo->vm->status_lock);
249         list_move(&vm_bo->vm_status, &vm_bo->vm->evicted_user);
250         spin_unlock(&vm_bo->vm->status_lock);
251 }
252
253 /**
254  * amdgpu_vm_bo_relocated - vm_bo is reloacted
255  *
256  * @vm_bo: vm_bo which is relocated
257  *
258  * State for PDs/PTs which needs to update their parent PD.
259  * For the root PD, just move to idle state.
260  */
261 static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
262 {
263         if (vm_bo->bo->parent) {
264                 spin_lock(&vm_bo->vm->status_lock);
265                 list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
266                 spin_unlock(&vm_bo->vm->status_lock);
267         } else {
268                 amdgpu_vm_bo_idle(vm_bo);
269         }
270 }
271
272 /**
273  * amdgpu_vm_bo_done - vm_bo is done
274  *
275  * @vm_bo: vm_bo which is now done
276  *
277  * State for normal BOs which are invalidated and that change has been updated
278  * in the PTs.
279  */
280 static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
281 {
282         spin_lock(&vm_bo->vm->status_lock);
283         list_move(&vm_bo->vm_status, &vm_bo->vm->done);
284         spin_unlock(&vm_bo->vm->status_lock);
285 }
286
287 /**
288  * amdgpu_vm_bo_reset_state_machine - reset the vm_bo state machine
289  * @vm: the VM which state machine to reset
290  *
291  * Move all vm_bo object in the VM into a state where they will be updated
292  * again during validation.
293  */
294 static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
295 {
296         struct amdgpu_vm_bo_base *vm_bo, *tmp;
297
298         spin_lock(&vm->status_lock);
299         list_splice_init(&vm->done, &vm->invalidated);
300         list_for_each_entry(vm_bo, &vm->invalidated, vm_status)
301                 vm_bo->moved = true;
302         list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) {
303                 struct amdgpu_bo *bo = vm_bo->bo;
304
305                 vm_bo->moved = true;
306                 if (!bo || bo->tbo.type != ttm_bo_type_kernel)
307                         list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
308                 else if (bo->parent)
309                         list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
310         }
311         spin_unlock(&vm->status_lock);
312 }
313
314 /**
315  * amdgpu_vm_update_shared - helper to update shared memory stat
316  * @base: base structure for tracking BO usage in a VM
317  *
318  * Takes the vm status_lock and updates the shared memory stat. If the basic
319  * stat changed (e.g. buffer was moved) amdgpu_vm_update_stats need to be called
320  * as well.
321  */
322 static void amdgpu_vm_update_shared(struct amdgpu_vm_bo_base *base)
323 {
324         struct amdgpu_vm *vm = base->vm;
325         struct amdgpu_bo *bo = base->bo;
326         uint64_t size = amdgpu_bo_size(bo);
327         uint32_t bo_memtype = amdgpu_bo_mem_stats_placement(bo);
328         bool shared;
329
330         spin_lock(&vm->status_lock);
331         shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);
332         if (base->shared != shared) {
333                 base->shared = shared;
334                 if (shared) {
335                         vm->stats[bo_memtype].drm.shared += size;
336                         vm->stats[bo_memtype].drm.private -= size;
337                 } else {
338                         vm->stats[bo_memtype].drm.shared -= size;
339                         vm->stats[bo_memtype].drm.private += size;
340                 }
341         }
342         spin_unlock(&vm->status_lock);
343 }
344
345 /**
346  * amdgpu_vm_bo_update_shared - callback when bo gets shared/unshared
347  * @bo: amdgpu buffer object
348  *
349  * Update the per VM stats for all the vm if needed from private to shared or
350  * vice versa.
351  */
352 void amdgpu_vm_bo_update_shared(struct amdgpu_bo *bo)
353 {
354         struct amdgpu_vm_bo_base *base;
355
356         for (base = bo->vm_bo; base; base = base->next)
357                 amdgpu_vm_update_shared(base);
358 }
359
360 /**
361  * amdgpu_vm_update_stats_locked - helper to update normal memory stat
362  * @base: base structure for tracking BO usage in a VM
363  * @res:  the ttm_resource to use for the purpose of accounting, may or may not
364  *        be bo->tbo.resource
365  * @sign: if we should add (+1) or subtract (-1) from the stat
366  *
367  * Caller need to have the vm status_lock held. Useful for when multiple update
368  * need to happen at the same time.
369  */
370 static void amdgpu_vm_update_stats_locked(struct amdgpu_vm_bo_base *base,
371                             struct ttm_resource *res, int sign)
372 {
373         struct amdgpu_vm *vm = base->vm;
374         struct amdgpu_bo *bo = base->bo;
375         int64_t size = sign * amdgpu_bo_size(bo);
376         uint32_t bo_memtype = amdgpu_bo_mem_stats_placement(bo);
377
378         /* For drm-total- and drm-shared-, BO are accounted by their preferred
379          * placement, see also amdgpu_bo_mem_stats_placement.
380          */
381         if (base->shared)
382                 vm->stats[bo_memtype].drm.shared += size;
383         else
384                 vm->stats[bo_memtype].drm.private += size;
385
386         if (res && res->mem_type < __AMDGPU_PL_NUM) {
387                 uint32_t res_memtype = res->mem_type;
388
389                 vm->stats[res_memtype].drm.resident += size;
390                 /* BO only count as purgeable if it is resident,
391                  * since otherwise there's nothing to purge.
392                  */
393                 if (bo->flags & AMDGPU_GEM_CREATE_DISCARDABLE)
394                         vm->stats[res_memtype].drm.purgeable += size;
395                 if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(res_memtype)))
396                         vm->stats[bo_memtype].evicted += size;
397         }
398 }
399
400 /**
401  * amdgpu_vm_update_stats - helper to update normal memory stat
402  * @base: base structure for tracking BO usage in a VM
403  * @res:  the ttm_resource to use for the purpose of accounting, may or may not
404  *        be bo->tbo.resource
405  * @sign: if we should add (+1) or subtract (-1) from the stat
406  *
407  * Updates the basic memory stat when bo is added/deleted/moved.
408  */
409 void amdgpu_vm_update_stats(struct amdgpu_vm_bo_base *base,
410                             struct ttm_resource *res, int sign)
411 {
412         struct amdgpu_vm *vm = base->vm;
413
414         spin_lock(&vm->status_lock);
415         amdgpu_vm_update_stats_locked(base, res, sign);
416         spin_unlock(&vm->status_lock);
417 }
418
419 /**
420  * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
421  *
422  * @base: base structure for tracking BO usage in a VM
423  * @vm: vm to which bo is to be added
424  * @bo: amdgpu buffer object
425  *
426  * Initialize a bo_va_base structure and add it to the appropriate lists
427  *
428  */
429 void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
430                             struct amdgpu_vm *vm, struct amdgpu_bo *bo)
431 {
432         base->vm = vm;
433         base->bo = bo;
434         base->next = NULL;
435         INIT_LIST_HEAD(&base->vm_status);
436
437         if (!bo)
438                 return;
439         base->next = bo->vm_bo;
440         bo->vm_bo = base;
441
442         spin_lock(&vm->status_lock);
443         base->shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);
444         amdgpu_vm_update_stats_locked(base, bo->tbo.resource, +1);
445         spin_unlock(&vm->status_lock);
446
447         if (!amdgpu_vm_is_bo_always_valid(vm, bo))
448                 return;
449
450         dma_resv_assert_held(vm->root.bo->tbo.base.resv);
451
452         ttm_bo_set_bulk_move(&bo->tbo, &vm->lru_bulk_move);
453         if (bo->tbo.type == ttm_bo_type_kernel && bo->parent)
454                 amdgpu_vm_bo_relocated(base);
455         else
456                 amdgpu_vm_bo_idle(base);
457
458         if (bo->preferred_domains &
459             amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type))
460                 return;
461
462         /*
463          * we checked all the prerequisites, but it looks like this per vm bo
464          * is currently evicted. add the bo to the evicted list to make sure it
465          * is validated on next vm use to avoid fault.
466          * */
467         amdgpu_vm_bo_evicted(base);
468 }
469
470 /**
471  * amdgpu_vm_lock_pd - lock PD in drm_exec
472  *
473  * @vm: vm providing the BOs
474  * @exec: drm execution context
475  * @num_fences: number of extra fences to reserve
476  *
477  * Lock the VM root PD in the DRM execution context.
478  */
479 int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
480                       unsigned int num_fences)
481 {
482         /* We need at least two fences for the VM PD/PT updates */
483         return drm_exec_prepare_obj(exec, &vm->root.bo->tbo.base,
484                                     2 + num_fences);
485 }
486
487 /**
488  * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
489  *
490  * @adev: amdgpu device pointer
491  * @vm: vm providing the BOs
492  *
493  * Move all BOs to the end of LRU and remember their positions to put them
494  * together.
495  */
496 void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
497                                 struct amdgpu_vm *vm)
498 {
499         spin_lock(&adev->mman.bdev.lru_lock);
500         ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
501         spin_unlock(&adev->mman.bdev.lru_lock);
502 }
503
504 /* Create scheduler entities for page table updates */
505 static int amdgpu_vm_init_entities(struct amdgpu_device *adev,
506                                    struct amdgpu_vm *vm)
507 {
508         int r;
509
510         r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL,
511                                   adev->vm_manager.vm_pte_scheds,
512                                   adev->vm_manager.vm_pte_num_scheds, NULL);
513         if (r)
514                 goto error;
515
516         return drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL,
517                                      adev->vm_manager.vm_pte_scheds,
518                                      adev->vm_manager.vm_pte_num_scheds, NULL);
519
520 error:
521         drm_sched_entity_destroy(&vm->immediate);
522         return r;
523 }
524
525 /* Destroy the entities for page table updates again */
526 static void amdgpu_vm_fini_entities(struct amdgpu_vm *vm)
527 {
528         drm_sched_entity_destroy(&vm->immediate);
529         drm_sched_entity_destroy(&vm->delayed);
530 }
531
532 /**
533  * amdgpu_vm_generation - return the page table re-generation counter
534  * @adev: the amdgpu_device
535  * @vm: optional VM to check, might be NULL
536  *
537  * Returns a page table re-generation token to allow checking if submissions
538  * are still valid to use this VM. The VM parameter might be NULL in which case
539  * just the VRAM lost counter will be used.
540  */
541 uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm)
542 {
543         uint64_t result = (u64)atomic_read(&adev->vram_lost_counter) << 32;
544
545         if (!vm)
546                 return result;
547
548         result += lower_32_bits(vm->generation);
549         /* Add one if the page tables will be re-generated on next CS */
550         if (drm_sched_entity_error(&vm->delayed))
551                 ++result;
552
553         return result;
554 }
555
556 /**
557  * amdgpu_vm_validate - validate evicted BOs tracked in the VM
558  *
559  * @adev: amdgpu device pointer
560  * @vm: vm providing the BOs
561  * @ticket: optional reservation ticket used to reserve the VM
562  * @validate: callback to do the validation
563  * @param: parameter for the validation callback
564  *
565  * Validate the page table BOs and per-VM BOs on command submission if
566  * necessary. If a ticket is given, also try to validate evicted user queue
567  * BOs. They must already be reserved with the given ticket.
568  *
569  * Returns:
570  * Validation result.
571  */
572 int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
573                        struct ww_acquire_ctx *ticket,
574                        int (*validate)(void *p, struct amdgpu_bo *bo),
575                        void *param)
576 {
577         uint64_t new_vm_generation = amdgpu_vm_generation(adev, vm);
578         struct amdgpu_vm_bo_base *bo_base;
579         struct amdgpu_bo *bo;
580         int r;
581
582         if (vm->generation != new_vm_generation) {
583                 vm->generation = new_vm_generation;
584                 amdgpu_vm_bo_reset_state_machine(vm);
585                 amdgpu_vm_fini_entities(vm);
586                 r = amdgpu_vm_init_entities(adev, vm);
587                 if (r)
588                         return r;
589         }
590
591         spin_lock(&vm->status_lock);
592         while (!list_empty(&vm->evicted)) {
593                 bo_base = list_first_entry(&vm->evicted,
594                                            struct amdgpu_vm_bo_base,
595                                            vm_status);
596                 spin_unlock(&vm->status_lock);
597
598                 bo = bo_base->bo;
599
600                 r = validate(param, bo);
601                 if (r)
602                         return r;
603
604                 if (bo->tbo.type != ttm_bo_type_kernel) {
605                         amdgpu_vm_bo_moved(bo_base);
606                 } else {
607                         vm->update_funcs->map_table(to_amdgpu_bo_vm(bo));
608                         amdgpu_vm_bo_relocated(bo_base);
609                 }
610                 spin_lock(&vm->status_lock);
611         }
612         while (ticket && !list_empty(&vm->evicted_user)) {
613                 bo_base = list_first_entry(&vm->evicted_user,
614                                            struct amdgpu_vm_bo_base,
615                                            vm_status);
616                 spin_unlock(&vm->status_lock);
617
618                 bo = bo_base->bo;
619
620                 if (dma_resv_locking_ctx(bo->tbo.base.resv) != ticket) {
621                         struct amdgpu_task_info *ti = amdgpu_vm_get_task_info_vm(vm);
622
623                         pr_warn_ratelimited("Evicted user BO is not reserved\n");
624                         if (ti) {
625                                 pr_warn_ratelimited("pid %d\n", ti->pid);
626                                 amdgpu_vm_put_task_info(ti);
627                         }
628
629                         return -EINVAL;
630                 }
631
632                 r = validate(param, bo);
633                 if (r)
634                         return r;
635
636                 amdgpu_vm_bo_invalidated(bo_base);
637
638                 spin_lock(&vm->status_lock);
639         }
640         spin_unlock(&vm->status_lock);
641
642         amdgpu_vm_eviction_lock(vm);
643         vm->evicting = false;
644         amdgpu_vm_eviction_unlock(vm);
645
646         return 0;
647 }
648
649 /**
650  * amdgpu_vm_ready - check VM is ready for updates
651  *
652  * @vm: VM to check
653  *
654  * Check if all VM PDs/PTs are ready for updates
655  *
656  * Returns:
657  * True if VM is not evicting.
658  */
659 bool amdgpu_vm_ready(struct amdgpu_vm *vm)
660 {
661         bool empty;
662         bool ret;
663
664         amdgpu_vm_eviction_lock(vm);
665         ret = !vm->evicting;
666         amdgpu_vm_eviction_unlock(vm);
667
668         spin_lock(&vm->status_lock);
669         empty = list_empty(&vm->evicted);
670         spin_unlock(&vm->status_lock);
671
672         return ret && empty;
673 }
674
675 /**
676  * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
677  *
678  * @adev: amdgpu_device pointer
679  */
680 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
681 {
682         const struct amdgpu_ip_block *ip_block;
683         bool has_compute_vm_bug;
684         struct amdgpu_ring *ring;
685         int i;
686
687         has_compute_vm_bug = false;
688
689         ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
690         if (ip_block) {
691                 /* Compute has a VM bug for GFX version < 7.
692                    Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
693                 if (ip_block->version->major <= 7)
694                         has_compute_vm_bug = true;
695                 else if (ip_block->version->major == 8)
696                         if (adev->gfx.mec_fw_version < 673)
697                                 has_compute_vm_bug = true;
698         }
699
700         for (i = 0; i < adev->num_rings; i++) {
701                 ring = adev->rings[i];
702                 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
703                         /* only compute rings */
704                         ring->has_compute_vm_bug = has_compute_vm_bug;
705                 else
706                         ring->has_compute_vm_bug = false;
707         }
708 }
709
710 /**
711  * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
712  *
713  * @ring: ring on which the job will be submitted
714  * @job: job to submit
715  *
716  * Returns:
717  * True if sync is needed.
718  */
719 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
720                                   struct amdgpu_job *job)
721 {
722         struct amdgpu_device *adev = ring->adev;
723         unsigned vmhub = ring->vm_hub;
724         struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
725
726         if (job->vmid == 0)
727                 return false;
728
729         if (job->vm_needs_flush || ring->has_compute_vm_bug)
730                 return true;
731
732         if (ring->funcs->emit_gds_switch && job->gds_switch_needed)
733                 return true;
734
735         if (amdgpu_vmid_had_gpu_reset(adev, &id_mgr->ids[job->vmid]))
736                 return true;
737
738         return false;
739 }
740
741 /**
742  * amdgpu_vm_flush - hardware flush the vm
743  *
744  * @ring: ring to use for flush
745  * @job:  related job
746  * @need_pipe_sync: is pipe sync needed
747  *
748  * Emit a VM flush when it is necessary.
749  *
750  * Returns:
751  * 0 on success, errno otherwise.
752  */
753 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
754                     bool need_pipe_sync)
755 {
756         struct amdgpu_device *adev = ring->adev;
757         struct amdgpu_isolation *isolation = &adev->isolation[ring->xcp_id];
758         unsigned vmhub = ring->vm_hub;
759         struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
760         struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
761         bool spm_update_needed = job->spm_update_needed;
762         bool gds_switch_needed = ring->funcs->emit_gds_switch &&
763                 job->gds_switch_needed;
764         bool vm_flush_needed = job->vm_needs_flush;
765         bool cleaner_shader_needed = false;
766         bool pasid_mapping_needed = false;
767         struct dma_fence *fence = NULL;
768         unsigned int patch;
769         int r;
770
771         if (amdgpu_vmid_had_gpu_reset(adev, id)) {
772                 gds_switch_needed = true;
773                 vm_flush_needed = true;
774                 pasid_mapping_needed = true;
775                 spm_update_needed = true;
776         }
777
778         mutex_lock(&id_mgr->lock);
779         if (id->pasid != job->pasid || !id->pasid_mapping ||
780             !dma_fence_is_signaled(id->pasid_mapping))
781                 pasid_mapping_needed = true;
782         mutex_unlock(&id_mgr->lock);
783
784         gds_switch_needed &= !!ring->funcs->emit_gds_switch;
785         vm_flush_needed &= !!ring->funcs->emit_vm_flush  &&
786                         job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
787         pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
788                 ring->funcs->emit_wreg;
789
790         cleaner_shader_needed = job->run_cleaner_shader &&
791                 adev->gfx.enable_cleaner_shader &&
792                 ring->funcs->emit_cleaner_shader && job->base.s_fence &&
793                 &job->base.s_fence->scheduled == isolation->spearhead;
794
795         if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync &&
796             !cleaner_shader_needed)
797                 return 0;
798
799         amdgpu_ring_ib_begin(ring);
800         if (ring->funcs->init_cond_exec)
801                 patch = amdgpu_ring_init_cond_exec(ring,
802                                                    ring->cond_exe_gpu_addr);
803
804         if (need_pipe_sync)
805                 amdgpu_ring_emit_pipeline_sync(ring);
806
807         if (cleaner_shader_needed)
808                 ring->funcs->emit_cleaner_shader(ring);
809
810         if (vm_flush_needed) {
811                 trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
812                 amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
813         }
814
815         if (pasid_mapping_needed)
816                 amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
817
818         if (spm_update_needed && adev->gfx.rlc.funcs->update_spm_vmid)
819                 adev->gfx.rlc.funcs->update_spm_vmid(adev, ring, job->vmid);
820
821         if (ring->funcs->emit_gds_switch &&
822             gds_switch_needed) {
823                 amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
824                                             job->gds_size, job->gws_base,
825                                             job->gws_size, job->oa_base,
826                                             job->oa_size);
827         }
828
829         if (vm_flush_needed || pasid_mapping_needed || cleaner_shader_needed) {
830                 r = amdgpu_fence_emit(ring, &fence, NULL, 0);
831                 if (r)
832                         return r;
833         }
834
835         if (vm_flush_needed) {
836                 mutex_lock(&id_mgr->lock);
837                 dma_fence_put(id->last_flush);
838                 id->last_flush = dma_fence_get(fence);
839                 id->current_gpu_reset_count =
840                         atomic_read(&adev->gpu_reset_counter);
841                 mutex_unlock(&id_mgr->lock);
842         }
843
844         if (pasid_mapping_needed) {
845                 mutex_lock(&id_mgr->lock);
846                 id->pasid = job->pasid;
847                 dma_fence_put(id->pasid_mapping);
848                 id->pasid_mapping = dma_fence_get(fence);
849                 mutex_unlock(&id_mgr->lock);
850         }
851
852         /*
853          * Make sure that all other submissions wait for the cleaner shader to
854          * finish before we push them to the HW.
855          */
856         if (cleaner_shader_needed) {
857                 trace_amdgpu_cleaner_shader(ring, fence);
858                 mutex_lock(&adev->enforce_isolation_mutex);
859                 dma_fence_put(isolation->spearhead);
860                 isolation->spearhead = dma_fence_get(fence);
861                 mutex_unlock(&adev->enforce_isolation_mutex);
862         }
863         dma_fence_put(fence);
864
865         amdgpu_ring_patch_cond_exec(ring, patch);
866
867         /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
868         if (ring->funcs->emit_switch_buffer) {
869                 amdgpu_ring_emit_switch_buffer(ring);
870                 amdgpu_ring_emit_switch_buffer(ring);
871         }
872
873         amdgpu_ring_ib_end(ring);
874         return 0;
875 }
876
877 /**
878  * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
879  *
880  * @vm: requested vm
881  * @bo: requested buffer object
882  *
883  * Find @bo inside the requested vm.
884  * Search inside the @bos vm list for the requested vm
885  * Returns the found bo_va or NULL if none is found
886  *
887  * Object has to be reserved!
888  *
889  * Returns:
890  * Found bo_va or NULL.
891  */
892 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
893                                        struct amdgpu_bo *bo)
894 {
895         struct amdgpu_vm_bo_base *base;
896
897         for (base = bo->vm_bo; base; base = base->next) {
898                 if (base->vm != vm)
899                         continue;
900
901                 return container_of(base, struct amdgpu_bo_va, base);
902         }
903         return NULL;
904 }
905
906 /**
907  * amdgpu_vm_map_gart - Resolve gart mapping of addr
908  *
909  * @pages_addr: optional DMA address to use for lookup
910  * @addr: the unmapped addr
911  *
912  * Look up the physical address of the page that the pte resolves
913  * to.
914  *
915  * Returns:
916  * The pointer for the page table entry.
917  */
918 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
919 {
920         uint64_t result;
921
922         /* page table offset */
923         result = pages_addr[addr >> PAGE_SHIFT];
924
925         /* in case cpu page size != gpu page size*/
926         result |= addr & (~PAGE_MASK);
927
928         result &= 0xFFFFFFFFFFFFF000ULL;
929
930         return result;
931 }
932
933 /**
934  * amdgpu_vm_update_pdes - make sure that all directories are valid
935  *
936  * @adev: amdgpu_device pointer
937  * @vm: requested vm
938  * @immediate: submit immediately to the paging queue
939  *
940  * Makes sure all directories are up to date.
941  *
942  * Returns:
943  * 0 for success, error for failure.
944  */
945 int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
946                           struct amdgpu_vm *vm, bool immediate)
947 {
948         struct amdgpu_vm_update_params params;
949         struct amdgpu_vm_bo_base *entry;
950         bool flush_tlb_needed = false;
951         LIST_HEAD(relocated);
952         int r, idx;
953
954         spin_lock(&vm->status_lock);
955         list_splice_init(&vm->relocated, &relocated);
956         spin_unlock(&vm->status_lock);
957
958         if (list_empty(&relocated))
959                 return 0;
960
961         if (!drm_dev_enter(adev_to_drm(adev), &idx))
962                 return -ENODEV;
963
964         memset(&params, 0, sizeof(params));
965         params.adev = adev;
966         params.vm = vm;
967         params.immediate = immediate;
968
969         r = vm->update_funcs->prepare(&params, NULL);
970         if (r)
971                 goto error;
972
973         list_for_each_entry(entry, &relocated, vm_status) {
974                 /* vm_flush_needed after updating moved PDEs */
975                 flush_tlb_needed |= entry->moved;
976
977                 r = amdgpu_vm_pde_update(&params, entry);
978                 if (r)
979                         goto error;
980         }
981
982         r = vm->update_funcs->commit(&params, &vm->last_update);
983         if (r)
984                 goto error;
985
986         if (flush_tlb_needed)
987                 atomic64_inc(&vm->tlb_seq);
988
989         while (!list_empty(&relocated)) {
990                 entry = list_first_entry(&relocated, struct amdgpu_vm_bo_base,
991                                          vm_status);
992                 amdgpu_vm_bo_idle(entry);
993         }
994
995 error:
996         drm_dev_exit(idx);
997         return r;
998 }
999
1000 /**
1001  * amdgpu_vm_tlb_seq_cb - make sure to increment tlb sequence
1002  * @fence: unused
1003  * @cb: the callback structure
1004  *
1005  * Increments the tlb sequence to make sure that future CS execute a VM flush.
1006  */
1007 static void amdgpu_vm_tlb_seq_cb(struct dma_fence *fence,
1008                                  struct dma_fence_cb *cb)
1009 {
1010         struct amdgpu_vm_tlb_seq_struct *tlb_cb;
1011
1012         tlb_cb = container_of(cb, typeof(*tlb_cb), cb);
1013         atomic64_inc(&tlb_cb->vm->tlb_seq);
1014         kfree(tlb_cb);
1015 }
1016
1017 /**
1018  * amdgpu_vm_tlb_flush - prepare TLB flush
1019  *
1020  * @params: parameters for update
1021  * @fence: input fence to sync TLB flush with
1022  * @tlb_cb: the callback structure
1023  *
1024  * Increments the tlb sequence to make sure that future CS execute a VM flush.
1025  */
1026 static void
1027 amdgpu_vm_tlb_flush(struct amdgpu_vm_update_params *params,
1028                     struct dma_fence **fence,
1029                     struct amdgpu_vm_tlb_seq_struct *tlb_cb)
1030 {
1031         struct amdgpu_vm *vm = params->vm;
1032
1033         tlb_cb->vm = vm;
1034         if (!fence || !*fence) {
1035                 amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb);
1036                 return;
1037         }
1038
1039         if (!dma_fence_add_callback(*fence, &tlb_cb->cb,
1040                                     amdgpu_vm_tlb_seq_cb)) {
1041                 dma_fence_put(vm->last_tlb_flush);
1042                 vm->last_tlb_flush = dma_fence_get(*fence);
1043         } else {
1044                 amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb);
1045         }
1046
1047         /* Prepare a TLB flush fence to be attached to PTs */
1048         if (!params->unlocked && vm->is_compute_context) {
1049                 amdgpu_vm_tlb_fence_create(params->adev, vm, fence);
1050
1051                 /* Makes sure no PD/PT is freed before the flush */
1052                 dma_resv_add_fence(vm->root.bo->tbo.base.resv, *fence,
1053                                    DMA_RESV_USAGE_BOOKKEEP);
1054         }
1055 }
1056
1057 /**
1058  * amdgpu_vm_update_range - update a range in the vm page table
1059  *
1060  * @adev: amdgpu_device pointer to use for commands
1061  * @vm: the VM to update the range
1062  * @immediate: immediate submission in a page fault
1063  * @unlocked: unlocked invalidation during MM callback
1064  * @flush_tlb: trigger tlb invalidation after update completed
1065  * @allow_override: change MTYPE for local NUMA nodes
1066  * @sync: fences we need to sync to
1067  * @start: start of mapped range
1068  * @last: last mapped entry
1069  * @flags: flags for the entries
1070  * @offset: offset into nodes and pages_addr
1071  * @vram_base: base for vram mappings
1072  * @res: ttm_resource to map
1073  * @pages_addr: DMA addresses to use for mapping
1074  * @fence: optional resulting fence
1075  *
1076  * Fill in the page table entries between @start and @last.
1077  *
1078  * Returns:
1079  * 0 for success, negative erro code for failure.
1080  */
1081 int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1082                            bool immediate, bool unlocked, bool flush_tlb,
1083                            bool allow_override, struct amdgpu_sync *sync,
1084                            uint64_t start, uint64_t last, uint64_t flags,
1085                            uint64_t offset, uint64_t vram_base,
1086                            struct ttm_resource *res, dma_addr_t *pages_addr,
1087                            struct dma_fence **fence)
1088 {
1089         struct amdgpu_vm_tlb_seq_struct *tlb_cb;
1090         struct amdgpu_vm_update_params params;
1091         struct amdgpu_res_cursor cursor;
1092         int r, idx;
1093
1094         if (!drm_dev_enter(adev_to_drm(adev), &idx))
1095                 return -ENODEV;
1096
1097         tlb_cb = kmalloc(sizeof(*tlb_cb), GFP_KERNEL);
1098         if (!tlb_cb) {
1099                 drm_dev_exit(idx);
1100                 return -ENOMEM;
1101         }
1102
1103         /* Vega20+XGMI where PTEs get inadvertently cached in L2 texture cache,
1104          * heavy-weight flush TLB unconditionally.
1105          */
1106         flush_tlb |= adev->gmc.xgmi.num_physical_nodes &&
1107                      amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0);
1108
1109         /*
1110          * On GFX8 and older any 8 PTE block with a valid bit set enters the TLB
1111          */
1112         flush_tlb |= amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 0, 0);
1113
1114         memset(&params, 0, sizeof(params));
1115         params.adev = adev;
1116         params.vm = vm;
1117         params.immediate = immediate;
1118         params.pages_addr = pages_addr;
1119         params.unlocked = unlocked;
1120         params.needs_flush = flush_tlb;
1121         params.allow_override = allow_override;
1122         INIT_LIST_HEAD(&params.tlb_flush_waitlist);
1123
1124         amdgpu_vm_eviction_lock(vm);
1125         if (vm->evicting) {
1126                 r = -EBUSY;
1127                 goto error_free;
1128         }
1129
1130         if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) {
1131                 struct dma_fence *tmp = dma_fence_get_stub();
1132
1133                 amdgpu_bo_fence(vm->root.bo, vm->last_unlocked, true);
1134                 swap(vm->last_unlocked, tmp);
1135                 dma_fence_put(tmp);
1136         }
1137
1138         r = vm->update_funcs->prepare(&params, sync);
1139         if (r)
1140                 goto error_free;
1141
1142         amdgpu_res_first(pages_addr ? NULL : res, offset,
1143                          (last - start + 1) * AMDGPU_GPU_PAGE_SIZE, &cursor);
1144         while (cursor.remaining) {
1145                 uint64_t tmp, num_entries, addr;
1146
1147                 num_entries = cursor.size >> AMDGPU_GPU_PAGE_SHIFT;
1148                 if (pages_addr) {
1149                         bool contiguous = true;
1150
1151                         if (num_entries > AMDGPU_GPU_PAGES_IN_CPU_PAGE) {
1152                                 uint64_t pfn = cursor.start >> PAGE_SHIFT;
1153                                 uint64_t count;
1154
1155                                 contiguous = pages_addr[pfn + 1] ==
1156                                         pages_addr[pfn] + PAGE_SIZE;
1157
1158                                 tmp = num_entries /
1159                                         AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1160                                 for (count = 2; count < tmp; ++count) {
1161                                         uint64_t idx = pfn + count;
1162
1163                                         if (contiguous != (pages_addr[idx] ==
1164                                             pages_addr[idx - 1] + PAGE_SIZE))
1165                                                 break;
1166                                 }
1167                                 if (!contiguous)
1168                                         count--;
1169                                 num_entries = count *
1170                                         AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1171                         }
1172
1173                         if (!contiguous) {
1174                                 addr = cursor.start;
1175                                 params.pages_addr = pages_addr;
1176                         } else {
1177                                 addr = pages_addr[cursor.start >> PAGE_SHIFT];
1178                                 params.pages_addr = NULL;
1179                         }
1180
1181                 } else if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT_FLAG(adev))) {
1182                         addr = vram_base + cursor.start;
1183                 } else {
1184                         addr = 0;
1185                 }
1186
1187                 tmp = start + num_entries;
1188                 r = amdgpu_vm_ptes_update(&params, start, tmp, addr, flags);
1189                 if (r)
1190                         goto error_free;
1191
1192                 amdgpu_res_next(&cursor, num_entries * AMDGPU_GPU_PAGE_SIZE);
1193                 start = tmp;
1194         }
1195
1196         r = vm->update_funcs->commit(&params, fence);
1197         if (r)
1198                 goto error_free;
1199
1200         if (params.needs_flush) {
1201                 amdgpu_vm_tlb_flush(&params, fence, tlb_cb);
1202                 tlb_cb = NULL;
1203         }
1204
1205         amdgpu_vm_pt_free_list(adev, &params);
1206
1207 error_free:
1208         kfree(tlb_cb);
1209         amdgpu_vm_eviction_unlock(vm);
1210         drm_dev_exit(idx);
1211         return r;
1212 }
1213
1214 void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
1215                           struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM])
1216 {
1217         spin_lock(&vm->status_lock);
1218         memcpy(stats, vm->stats, sizeof(*stats) * __AMDGPU_PL_NUM);
1219         spin_unlock(&vm->status_lock);
1220 }
1221
1222 /**
1223  * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1224  *
1225  * @adev: amdgpu_device pointer
1226  * @bo_va: requested BO and VM object
1227  * @clear: if true clear the entries
1228  *
1229  * Fill in the page table entries for @bo_va.
1230  *
1231  * Returns:
1232  * 0 for success, -EINVAL for failure.
1233  */
1234 int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
1235                         bool clear)
1236 {
1237         struct amdgpu_bo *bo = bo_va->base.bo;
1238         struct amdgpu_vm *vm = bo_va->base.vm;
1239         struct amdgpu_bo_va_mapping *mapping;
1240         struct dma_fence **last_update;
1241         dma_addr_t *pages_addr = NULL;
1242         struct ttm_resource *mem;
1243         struct amdgpu_sync sync;
1244         bool flush_tlb = clear;
1245         uint64_t vram_base;
1246         uint64_t flags;
1247         bool uncached;
1248         int r;
1249
1250         amdgpu_sync_create(&sync);
1251         if (clear) {
1252                 mem = NULL;
1253
1254                 /* Implicitly sync to command submissions in the same VM before
1255                  * unmapping.
1256                  */
1257                 r = amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.base.resv,
1258                                      AMDGPU_SYNC_EQ_OWNER, vm);
1259                 if (r)
1260                         goto error_free;
1261                 if (bo) {
1262                         r = amdgpu_sync_kfd(&sync, bo->tbo.base.resv);
1263                         if (r)
1264                                 goto error_free;
1265                 }
1266         } else if (!bo) {
1267                 mem = NULL;
1268
1269                 /* PRT map operations don't need to sync to anything. */
1270
1271         } else {
1272                 struct drm_gem_object *obj = &bo->tbo.base;
1273
1274                 if (obj->import_attach && bo_va->is_xgmi) {
1275                         struct dma_buf *dma_buf = obj->import_attach->dmabuf;
1276                         struct drm_gem_object *gobj = dma_buf->priv;
1277                         struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
1278
1279                         if (abo->tbo.resource &&
1280                             abo->tbo.resource->mem_type == TTM_PL_VRAM)
1281                                 bo = gem_to_amdgpu_bo(gobj);
1282                 }
1283                 mem = bo->tbo.resource;
1284                 if (mem && (mem->mem_type == TTM_PL_TT ||
1285                             mem->mem_type == AMDGPU_PL_PREEMPT))
1286                         pages_addr = bo->tbo.ttm->dma_address;
1287
1288                 /* Implicitly sync to moving fences before mapping anything */
1289                 r = amdgpu_sync_resv(adev, &sync, bo->tbo.base.resv,
1290                                      AMDGPU_SYNC_EXPLICIT, vm);
1291                 if (r)
1292                         goto error_free;
1293         }
1294
1295         if (bo) {
1296                 struct amdgpu_device *bo_adev;
1297
1298                 flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
1299
1300                 if (amdgpu_bo_encrypted(bo))
1301                         flags |= AMDGPU_PTE_TMZ;
1302
1303                 bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
1304                 vram_base = bo_adev->vm_manager.vram_base_offset;
1305                 uncached = (bo->flags & AMDGPU_GEM_CREATE_UNCACHED) != 0;
1306         } else {
1307                 flags = 0x0;
1308                 vram_base = 0;
1309                 uncached = false;
1310         }
1311
1312         if (clear || amdgpu_vm_is_bo_always_valid(vm, bo))
1313                 last_update = &vm->last_update;
1314         else
1315                 last_update = &bo_va->last_pt_update;
1316
1317         if (!clear && bo_va->base.moved) {
1318                 flush_tlb = true;
1319                 list_splice_init(&bo_va->valids, &bo_va->invalids);
1320
1321         } else if (bo_va->cleared != clear) {
1322                 list_splice_init(&bo_va->valids, &bo_va->invalids);
1323         }
1324
1325         list_for_each_entry(mapping, &bo_va->invalids, list) {
1326                 uint64_t update_flags = flags;
1327
1328                 /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1329                  * but in case of something, we filter the flags in first place
1330                  */
1331                 if (!(mapping->flags & AMDGPU_PTE_READABLE))
1332                         update_flags &= ~AMDGPU_PTE_READABLE;
1333                 if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1334                         update_flags &= ~AMDGPU_PTE_WRITEABLE;
1335
1336                 /* Apply ASIC specific mapping flags */
1337                 amdgpu_gmc_get_vm_pte(adev, mapping, &update_flags);
1338
1339                 trace_amdgpu_vm_bo_update(mapping);
1340
1341                 r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb,
1342                                            !uncached, &sync, mapping->start,
1343                                            mapping->last, update_flags,
1344                                            mapping->offset, vram_base, mem,
1345                                            pages_addr, last_update);
1346                 if (r)
1347                         goto error_free;
1348         }
1349
1350         /* If the BO is not in its preferred location add it back to
1351          * the evicted list so that it gets validated again on the
1352          * next command submission.
1353          */
1354         if (amdgpu_vm_is_bo_always_valid(vm, bo)) {
1355                 if (bo->tbo.resource &&
1356                     !(bo->preferred_domains &
1357                       amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type)))
1358                         amdgpu_vm_bo_evicted(&bo_va->base);
1359                 else
1360                         amdgpu_vm_bo_idle(&bo_va->base);
1361         } else {
1362                 amdgpu_vm_bo_done(&bo_va->base);
1363         }
1364
1365         list_splice_init(&bo_va->invalids, &bo_va->valids);
1366         bo_va->cleared = clear;
1367         bo_va->base.moved = false;
1368
1369         if (trace_amdgpu_vm_bo_mapping_enabled()) {
1370                 list_for_each_entry(mapping, &bo_va->valids, list)
1371                         trace_amdgpu_vm_bo_mapping(mapping);
1372         }
1373
1374 error_free:
1375         amdgpu_sync_free(&sync);
1376         return r;
1377 }
1378
1379 /**
1380  * amdgpu_vm_update_prt_state - update the global PRT state
1381  *
1382  * @adev: amdgpu_device pointer
1383  */
1384 static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1385 {
1386         unsigned long flags;
1387         bool enable;
1388
1389         spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1390         enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1391         adev->gmc.gmc_funcs->set_prt(adev, enable);
1392         spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
1393 }
1394
1395 /**
1396  * amdgpu_vm_prt_get - add a PRT user
1397  *
1398  * @adev: amdgpu_device pointer
1399  */
1400 static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
1401 {
1402         if (!adev->gmc.gmc_funcs->set_prt)
1403                 return;
1404
1405         if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
1406                 amdgpu_vm_update_prt_state(adev);
1407 }
1408
1409 /**
1410  * amdgpu_vm_prt_put - drop a PRT user
1411  *
1412  * @adev: amdgpu_device pointer
1413  */
1414 static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
1415 {
1416         if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
1417                 amdgpu_vm_update_prt_state(adev);
1418 }
1419
1420 /**
1421  * amdgpu_vm_prt_cb - callback for updating the PRT status
1422  *
1423  * @fence: fence for the callback
1424  * @_cb: the callback function
1425  */
1426 static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
1427 {
1428         struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
1429
1430         amdgpu_vm_prt_put(cb->adev);
1431         kfree(cb);
1432 }
1433
1434 /**
1435  * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1436  *
1437  * @adev: amdgpu_device pointer
1438  * @fence: fence for the callback
1439  */
1440 static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
1441                                  struct dma_fence *fence)
1442 {
1443         struct amdgpu_prt_cb *cb;
1444
1445         if (!adev->gmc.gmc_funcs->set_prt)
1446                 return;
1447
1448         cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
1449         if (!cb) {
1450                 /* Last resort when we are OOM */
1451                 if (fence)
1452                         dma_fence_wait(fence, false);
1453
1454                 amdgpu_vm_prt_put(adev);
1455         } else {
1456                 cb->adev = adev;
1457                 if (!fence || dma_fence_add_callback(fence, &cb->cb,
1458                                                      amdgpu_vm_prt_cb))
1459                         amdgpu_vm_prt_cb(fence, &cb->cb);
1460         }
1461 }
1462
1463 /**
1464  * amdgpu_vm_free_mapping - free a mapping
1465  *
1466  * @adev: amdgpu_device pointer
1467  * @vm: requested vm
1468  * @mapping: mapping to be freed
1469  * @fence: fence of the unmap operation
1470  *
1471  * Free a mapping and make sure we decrease the PRT usage count if applicable.
1472  */
1473 static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
1474                                    struct amdgpu_vm *vm,
1475                                    struct amdgpu_bo_va_mapping *mapping,
1476                                    struct dma_fence *fence)
1477 {
1478         if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev))
1479                 amdgpu_vm_add_prt_cb(adev, fence);
1480         kfree(mapping);
1481 }
1482
1483 /**
1484  * amdgpu_vm_prt_fini - finish all prt mappings
1485  *
1486  * @adev: amdgpu_device pointer
1487  * @vm: requested vm
1488  *
1489  * Register a cleanup callback to disable PRT support after VM dies.
1490  */
1491 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1492 {
1493         struct dma_resv *resv = vm->root.bo->tbo.base.resv;
1494         struct dma_resv_iter cursor;
1495         struct dma_fence *fence;
1496
1497         dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, fence) {
1498                 /* Add a callback for each fence in the reservation object */
1499                 amdgpu_vm_prt_get(adev);
1500                 amdgpu_vm_add_prt_cb(adev, fence);
1501         }
1502 }
1503
1504 /**
1505  * amdgpu_vm_clear_freed - clear freed BOs in the PT
1506  *
1507  * @adev: amdgpu_device pointer
1508  * @vm: requested vm
1509  * @fence: optional resulting fence (unchanged if no work needed to be done
1510  * or if an error occurred)
1511  *
1512  * Make sure all freed BOs are cleared in the PT.
1513  * PTs have to be reserved and mutex must be locked!
1514  *
1515  * Returns:
1516  * 0 for success.
1517  *
1518  */
1519 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1520                           struct amdgpu_vm *vm,
1521                           struct dma_fence **fence)
1522 {
1523         struct amdgpu_bo_va_mapping *mapping;
1524         struct dma_fence *f = NULL;
1525         struct amdgpu_sync sync;
1526         int r;
1527
1528
1529         /*
1530          * Implicitly sync to command submissions in the same VM before
1531          * unmapping.
1532          */
1533         amdgpu_sync_create(&sync);
1534         r = amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.base.resv,
1535                              AMDGPU_SYNC_EQ_OWNER, vm);
1536         if (r)
1537                 goto error_free;
1538
1539         while (!list_empty(&vm->freed)) {
1540                 mapping = list_first_entry(&vm->freed,
1541                         struct amdgpu_bo_va_mapping, list);
1542                 list_del(&mapping->list);
1543
1544                 r = amdgpu_vm_update_range(adev, vm, false, false, true, false,
1545                                            &sync, mapping->start, mapping->last,
1546                                            0, 0, 0, NULL, NULL, &f);
1547                 amdgpu_vm_free_mapping(adev, vm, mapping, f);
1548                 if (r) {
1549                         dma_fence_put(f);
1550                         goto error_free;
1551                 }
1552         }
1553
1554         if (fence && f) {
1555                 dma_fence_put(*fence);
1556                 *fence = f;
1557         } else {
1558                 dma_fence_put(f);
1559         }
1560
1561 error_free:
1562         amdgpu_sync_free(&sync);
1563         return r;
1564
1565 }
1566
1567 /**
1568  * amdgpu_vm_handle_moved - handle moved BOs in the PT
1569  *
1570  * @adev: amdgpu_device pointer
1571  * @vm: requested vm
1572  * @ticket: optional reservation ticket used to reserve the VM
1573  *
1574  * Make sure all BOs which are moved are updated in the PTs.
1575  *
1576  * Returns:
1577  * 0 for success.
1578  *
1579  * PTs have to be reserved!
1580  */
1581 int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
1582                            struct amdgpu_vm *vm,
1583                            struct ww_acquire_ctx *ticket)
1584 {
1585         struct amdgpu_bo_va *bo_va;
1586         struct dma_resv *resv;
1587         bool clear, unlock;
1588         int r;
1589
1590         spin_lock(&vm->status_lock);
1591         while (!list_empty(&vm->moved)) {
1592                 bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va,
1593                                          base.vm_status);
1594                 spin_unlock(&vm->status_lock);
1595
1596                 /* Per VM BOs never need to bo cleared in the page tables */
1597                 r = amdgpu_vm_bo_update(adev, bo_va, false);
1598                 if (r)
1599                         return r;
1600                 spin_lock(&vm->status_lock);
1601         }
1602
1603         while (!list_empty(&vm->invalidated)) {
1604                 bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
1605                                          base.vm_status);
1606                 resv = bo_va->base.bo->tbo.base.resv;
1607                 spin_unlock(&vm->status_lock);
1608
1609                 /* Try to reserve the BO to avoid clearing its ptes */
1610                 if (!adev->debug_vm && dma_resv_trylock(resv)) {
1611                         clear = false;
1612                         unlock = true;
1613                 /* The caller is already holding the reservation lock */
1614                 } else if (ticket && dma_resv_locking_ctx(resv) == ticket) {
1615                         clear = false;
1616                         unlock = false;
1617                 /* Somebody else is using the BO right now */
1618                 } else {
1619                         clear = true;
1620                         unlock = false;
1621                 }
1622
1623                 r = amdgpu_vm_bo_update(adev, bo_va, clear);
1624
1625                 if (unlock)
1626                         dma_resv_unlock(resv);
1627                 if (r)
1628                         return r;
1629
1630                 /* Remember evicted DMABuf imports in compute VMs for later
1631                  * validation
1632                  */
1633                 if (vm->is_compute_context &&
1634                     bo_va->base.bo->tbo.base.import_attach &&
1635                     (!bo_va->base.bo->tbo.resource ||
1636                      bo_va->base.bo->tbo.resource->mem_type == TTM_PL_SYSTEM))
1637                         amdgpu_vm_bo_evicted_user(&bo_va->base);
1638
1639                 spin_lock(&vm->status_lock);
1640         }
1641         spin_unlock(&vm->status_lock);
1642
1643         return 0;
1644 }
1645
1646 /**
1647  * amdgpu_vm_flush_compute_tlb - Flush TLB on compute VM
1648  *
1649  * @adev: amdgpu_device pointer
1650  * @vm: requested vm
1651  * @flush_type: flush type
1652  * @xcc_mask: mask of XCCs that belong to the compute partition in need of a TLB flush.
1653  *
1654  * Flush TLB if needed for a compute VM.
1655  *
1656  * Returns:
1657  * 0 for success.
1658  */
1659 int amdgpu_vm_flush_compute_tlb(struct amdgpu_device *adev,
1660                                 struct amdgpu_vm *vm,
1661                                 uint32_t flush_type,
1662                                 uint32_t xcc_mask)
1663 {
1664         uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm);
1665         bool all_hub = false;
1666         int xcc = 0, r = 0;
1667
1668         WARN_ON_ONCE(!vm->is_compute_context);
1669
1670         /*
1671          * It can be that we race and lose here, but that is extremely unlikely
1672          * and the worst thing which could happen is that we flush the changes
1673          * into the TLB once more which is harmless.
1674          */
1675         if (atomic64_xchg(&vm->kfd_last_flushed_seq, tlb_seq) == tlb_seq)
1676                 return 0;
1677
1678         if (adev->family == AMDGPU_FAMILY_AI ||
1679             adev->family == AMDGPU_FAMILY_RV)
1680                 all_hub = true;
1681
1682         for_each_inst(xcc, xcc_mask) {
1683                 r = amdgpu_gmc_flush_gpu_tlb_pasid(adev, vm->pasid, flush_type,
1684                                                    all_hub, xcc);
1685                 if (r)
1686                         break;
1687         }
1688         return r;
1689 }
1690
1691 /**
1692  * amdgpu_vm_bo_add - add a bo to a specific vm
1693  *
1694  * @adev: amdgpu_device pointer
1695  * @vm: requested vm
1696  * @bo: amdgpu buffer object
1697  *
1698  * Add @bo into the requested vm.
1699  * Add @bo to the list of bos associated with the vm
1700  *
1701  * Returns:
1702  * Newly added bo_va or NULL for failure
1703  *
1704  * Object has to be reserved!
1705  */
1706 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1707                                       struct amdgpu_vm *vm,
1708                                       struct amdgpu_bo *bo)
1709 {
1710         struct amdgpu_bo_va *bo_va;
1711
1712         bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
1713         if (bo_va == NULL) {
1714                 return NULL;
1715         }
1716         amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
1717
1718         bo_va->ref_count = 1;
1719         bo_va->last_pt_update = dma_fence_get_stub();
1720         INIT_LIST_HEAD(&bo_va->valids);
1721         INIT_LIST_HEAD(&bo_va->invalids);
1722
1723         if (!bo)
1724                 return bo_va;
1725
1726         dma_resv_assert_held(bo->tbo.base.resv);
1727         if (amdgpu_dmabuf_is_xgmi_accessible(adev, bo)) {
1728                 bo_va->is_xgmi = true;
1729                 /* Power up XGMI if it can be potentially used */
1730                 amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MAX_VEGA20);
1731         }
1732
1733         return bo_va;
1734 }
1735
1736
1737 /**
1738  * amdgpu_vm_bo_insert_map - insert a new mapping
1739  *
1740  * @adev: amdgpu_device pointer
1741  * @bo_va: bo_va to store the address
1742  * @mapping: the mapping to insert
1743  *
1744  * Insert a new mapping into all structures.
1745  */
1746 static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
1747                                     struct amdgpu_bo_va *bo_va,
1748                                     struct amdgpu_bo_va_mapping *mapping)
1749 {
1750         struct amdgpu_vm *vm = bo_va->base.vm;
1751         struct amdgpu_bo *bo = bo_va->base.bo;
1752
1753         mapping->bo_va = bo_va;
1754         list_add(&mapping->list, &bo_va->invalids);
1755         amdgpu_vm_it_insert(mapping, &vm->va);
1756
1757         if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev))
1758                 amdgpu_vm_prt_get(adev);
1759
1760         if (amdgpu_vm_is_bo_always_valid(vm, bo) && !bo_va->base.moved)
1761                 amdgpu_vm_bo_moved(&bo_va->base);
1762
1763         trace_amdgpu_vm_bo_map(bo_va, mapping);
1764 }
1765
1766 /* Validate operation parameters to prevent potential abuse */
1767 static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
1768                                           struct amdgpu_bo *bo,
1769                                           uint64_t saddr,
1770                                           uint64_t offset,
1771                                           uint64_t size)
1772 {
1773         uint64_t tmp, lpfn;
1774
1775         if (saddr & AMDGPU_GPU_PAGE_MASK
1776             || offset & AMDGPU_GPU_PAGE_MASK
1777             || size & AMDGPU_GPU_PAGE_MASK)
1778                 return -EINVAL;
1779
1780         if (check_add_overflow(saddr, size, &tmp)
1781             || check_add_overflow(offset, size, &tmp)
1782             || size == 0 /* which also leads to end < begin */)
1783                 return -EINVAL;
1784
1785         /* make sure object fit at this offset */
1786         if (bo && offset + size > amdgpu_bo_size(bo))
1787                 return -EINVAL;
1788
1789         /* Ensure last pfn not exceed max_pfn */
1790         lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT;
1791         if (lpfn >= adev->vm_manager.max_pfn)
1792                 return -EINVAL;
1793
1794         return 0;
1795 }
1796
1797 /**
1798  * amdgpu_vm_bo_map - map bo inside a vm
1799  *
1800  * @adev: amdgpu_device pointer
1801  * @bo_va: bo_va to store the address
1802  * @saddr: where to map the BO
1803  * @offset: requested offset in the BO
1804  * @size: BO size in bytes
1805  * @flags: attributes of pages (read/write/valid/etc.)
1806  *
1807  * Add a mapping of the BO at the specefied addr into the VM.
1808  *
1809  * Returns:
1810  * 0 for success, error for failure.
1811  *
1812  * Object has to be reserved and unreserved outside!
1813  */
1814 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1815                      struct amdgpu_bo_va *bo_va,
1816                      uint64_t saddr, uint64_t offset,
1817                      uint64_t size, uint64_t flags)
1818 {
1819         struct amdgpu_bo_va_mapping *mapping, *tmp;
1820         struct amdgpu_bo *bo = bo_va->base.bo;
1821         struct amdgpu_vm *vm = bo_va->base.vm;
1822         uint64_t eaddr;
1823         int r;
1824
1825         r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
1826         if (r)
1827                 return r;
1828
1829         saddr /= AMDGPU_GPU_PAGE_SIZE;
1830         eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
1831
1832         tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
1833         if (tmp) {
1834                 /* bo and tmp overlap, invalid addr */
1835                 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1836                         "0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
1837                         tmp->start, tmp->last + 1);
1838                 return -EINVAL;
1839         }
1840
1841         mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1842         if (!mapping)
1843                 return -ENOMEM;
1844
1845         mapping->start = saddr;
1846         mapping->last = eaddr;
1847         mapping->offset = offset;
1848         mapping->flags = flags;
1849
1850         amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
1851
1852         return 0;
1853 }
1854
1855 /**
1856  * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
1857  *
1858  * @adev: amdgpu_device pointer
1859  * @bo_va: bo_va to store the address
1860  * @saddr: where to map the BO
1861  * @offset: requested offset in the BO
1862  * @size: BO size in bytes
1863  * @flags: attributes of pages (read/write/valid/etc.)
1864  *
1865  * Add a mapping of the BO at the specefied addr into the VM. Replace existing
1866  * mappings as we do so.
1867  *
1868  * Returns:
1869  * 0 for success, error for failure.
1870  *
1871  * Object has to be reserved and unreserved outside!
1872  */
1873 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
1874                              struct amdgpu_bo_va *bo_va,
1875                              uint64_t saddr, uint64_t offset,
1876                              uint64_t size, uint64_t flags)
1877 {
1878         struct amdgpu_bo_va_mapping *mapping;
1879         struct amdgpu_bo *bo = bo_va->base.bo;
1880         uint64_t eaddr;
1881         int r;
1882
1883         r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
1884         if (r)
1885                 return r;
1886
1887         /* Allocate all the needed memory */
1888         mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1889         if (!mapping)
1890                 return -ENOMEM;
1891
1892         r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
1893         if (r) {
1894                 kfree(mapping);
1895                 return r;
1896         }
1897
1898         saddr /= AMDGPU_GPU_PAGE_SIZE;
1899         eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
1900
1901         mapping->start = saddr;
1902         mapping->last = eaddr;
1903         mapping->offset = offset;
1904         mapping->flags = flags;
1905
1906         amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
1907
1908         return 0;
1909 }
1910
1911 /**
1912  * amdgpu_vm_bo_unmap - remove bo mapping from vm
1913  *
1914  * @adev: amdgpu_device pointer
1915  * @bo_va: bo_va to remove the address from
1916  * @saddr: where to the BO is mapped
1917  *
1918  * Remove a mapping of the BO at the specefied addr from the VM.
1919  *
1920  * Returns:
1921  * 0 for success, error for failure.
1922  *
1923  * Object has to be reserved and unreserved outside!
1924  */
1925 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1926                        struct amdgpu_bo_va *bo_va,
1927                        uint64_t saddr)
1928 {
1929         struct amdgpu_bo_va_mapping *mapping;
1930         struct amdgpu_vm *vm = bo_va->base.vm;
1931         bool valid = true;
1932
1933         saddr /= AMDGPU_GPU_PAGE_SIZE;
1934
1935         list_for_each_entry(mapping, &bo_va->valids, list) {
1936                 if (mapping->start == saddr)
1937                         break;
1938         }
1939
1940         if (&mapping->list == &bo_va->valids) {
1941                 valid = false;
1942
1943                 list_for_each_entry(mapping, &bo_va->invalids, list) {
1944                         if (mapping->start == saddr)
1945                                 break;
1946                 }
1947
1948                 if (&mapping->list == &bo_va->invalids)
1949                         return -ENOENT;
1950         }
1951
1952         list_del(&mapping->list);
1953         amdgpu_vm_it_remove(mapping, &vm->va);
1954         mapping->bo_va = NULL;
1955         trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1956
1957         if (valid)
1958                 list_add(&mapping->list, &vm->freed);
1959         else
1960                 amdgpu_vm_free_mapping(adev, vm, mapping,
1961                                        bo_va->last_pt_update);
1962
1963         return 0;
1964 }
1965
1966 /**
1967  * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
1968  *
1969  * @adev: amdgpu_device pointer
1970  * @vm: VM structure to use
1971  * @saddr: start of the range
1972  * @size: size of the range
1973  *
1974  * Remove all mappings in a range, split them as appropriate.
1975  *
1976  * Returns:
1977  * 0 for success, error for failure.
1978  */
1979 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
1980                                 struct amdgpu_vm *vm,
1981                                 uint64_t saddr, uint64_t size)
1982 {
1983         struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
1984         LIST_HEAD(removed);
1985         uint64_t eaddr;
1986         int r;
1987
1988         r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size);
1989         if (r)
1990                 return r;
1991
1992         saddr /= AMDGPU_GPU_PAGE_SIZE;
1993         eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
1994
1995         /* Allocate all the needed memory */
1996         before = kzalloc(sizeof(*before), GFP_KERNEL);
1997         if (!before)
1998                 return -ENOMEM;
1999         INIT_LIST_HEAD(&before->list);
2000
2001         after = kzalloc(sizeof(*after), GFP_KERNEL);
2002         if (!after) {
2003                 kfree(before);
2004                 return -ENOMEM;
2005         }
2006         INIT_LIST_HEAD(&after->list);
2007
2008         /* Now gather all removed mappings */
2009         tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2010         while (tmp) {
2011                 /* Remember mapping split at the start */
2012                 if (tmp->start < saddr) {
2013                         before->start = tmp->start;
2014                         before->last = saddr - 1;
2015                         before->offset = tmp->offset;
2016                         before->flags = tmp->flags;
2017                         before->bo_va = tmp->bo_va;
2018                         list_add(&before->list, &tmp->bo_va->invalids);
2019                 }
2020
2021                 /* Remember mapping split at the end */
2022                 if (tmp->last > eaddr) {
2023                         after->start = eaddr + 1;
2024                         after->last = tmp->last;
2025                         after->offset = tmp->offset;
2026                         after->offset += (after->start - tmp->start) << PAGE_SHIFT;
2027                         after->flags = tmp->flags;
2028                         after->bo_va = tmp->bo_va;
2029                         list_add(&after->list, &tmp->bo_va->invalids);
2030                 }
2031
2032                 list_del(&tmp->list);
2033                 list_add(&tmp->list, &removed);
2034
2035                 tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
2036         }
2037
2038         /* And free them up */
2039         list_for_each_entry_safe(tmp, next, &removed, list) {
2040                 amdgpu_vm_it_remove(tmp, &vm->va);
2041                 list_del(&tmp->list);
2042
2043                 if (tmp->start < saddr)
2044                     tmp->start = saddr;
2045                 if (tmp->last > eaddr)
2046                     tmp->last = eaddr;
2047
2048                 tmp->bo_va = NULL;
2049                 list_add(&tmp->list, &vm->freed);
2050                 trace_amdgpu_vm_bo_unmap(NULL, tmp);
2051         }
2052
2053         /* Insert partial mapping before the range */
2054         if (!list_empty(&before->list)) {
2055                 struct amdgpu_bo *bo = before->bo_va->base.bo;
2056
2057                 amdgpu_vm_it_insert(before, &vm->va);
2058                 if (before->flags & AMDGPU_PTE_PRT_FLAG(adev))
2059                         amdgpu_vm_prt_get(adev);
2060
2061                 if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
2062                     !before->bo_va->base.moved)
2063                         amdgpu_vm_bo_moved(&before->bo_va->base);
2064         } else {
2065                 kfree(before);
2066         }
2067
2068         /* Insert partial mapping after the range */
2069         if (!list_empty(&after->list)) {
2070                 struct amdgpu_bo *bo = after->bo_va->base.bo;
2071
2072                 amdgpu_vm_it_insert(after, &vm->va);
2073                 if (after->flags & AMDGPU_PTE_PRT_FLAG(adev))
2074                         amdgpu_vm_prt_get(adev);
2075
2076                 if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
2077                     !after->bo_va->base.moved)
2078                         amdgpu_vm_bo_moved(&after->bo_va->base);
2079         } else {
2080                 kfree(after);
2081         }
2082
2083         return 0;
2084 }
2085
2086 /**
2087  * amdgpu_vm_bo_lookup_mapping - find mapping by address
2088  *
2089  * @vm: the requested VM
2090  * @addr: the address
2091  *
2092  * Find a mapping by it's address.
2093  *
2094  * Returns:
2095  * The amdgpu_bo_va_mapping matching for addr or NULL
2096  *
2097  */
2098 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
2099                                                          uint64_t addr)
2100 {
2101         return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
2102 }
2103
2104 /**
2105  * amdgpu_vm_bo_trace_cs - trace all reserved mappings
2106  *
2107  * @vm: the requested vm
2108  * @ticket: CS ticket
2109  *
2110  * Trace all mappings of BOs reserved during a command submission.
2111  */
2112 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
2113 {
2114         struct amdgpu_bo_va_mapping *mapping;
2115
2116         if (!trace_amdgpu_vm_bo_cs_enabled())
2117                 return;
2118
2119         for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
2120              mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
2121                 if (mapping->bo_va && mapping->bo_va->base.bo) {
2122                         struct amdgpu_bo *bo;
2123
2124                         bo = mapping->bo_va->base.bo;
2125                         if (dma_resv_locking_ctx(bo->tbo.base.resv) !=
2126                             ticket)
2127                                 continue;
2128                 }
2129
2130                 trace_amdgpu_vm_bo_cs(mapping);
2131         }
2132 }
2133
2134 /**
2135  * amdgpu_vm_bo_del - remove a bo from a specific vm
2136  *
2137  * @adev: amdgpu_device pointer
2138  * @bo_va: requested bo_va
2139  *
2140  * Remove @bo_va->bo from the requested vm.
2141  *
2142  * Object have to be reserved!
2143  */
2144 void amdgpu_vm_bo_del(struct amdgpu_device *adev,
2145                       struct amdgpu_bo_va *bo_va)
2146 {
2147         struct amdgpu_bo_va_mapping *mapping, *next;
2148         struct amdgpu_bo *bo = bo_va->base.bo;
2149         struct amdgpu_vm *vm = bo_va->base.vm;
2150         struct amdgpu_vm_bo_base **base;
2151
2152         dma_resv_assert_held(vm->root.bo->tbo.base.resv);
2153
2154         if (bo) {
2155                 dma_resv_assert_held(bo->tbo.base.resv);
2156                 if (amdgpu_vm_is_bo_always_valid(vm, bo))
2157                         ttm_bo_set_bulk_move(&bo->tbo, NULL);
2158
2159                 for (base = &bo_va->base.bo->vm_bo; *base;
2160                      base = &(*base)->next) {
2161                         if (*base != &bo_va->base)
2162                                 continue;
2163
2164                         amdgpu_vm_update_stats(*base, bo->tbo.resource, -1);
2165                         *base = bo_va->base.next;
2166                         break;
2167                 }
2168         }
2169
2170         spin_lock(&vm->status_lock);
2171         list_del(&bo_va->base.vm_status);
2172         spin_unlock(&vm->status_lock);
2173
2174         list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
2175                 list_del(&mapping->list);
2176                 amdgpu_vm_it_remove(mapping, &vm->va);
2177                 mapping->bo_va = NULL;
2178                 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2179                 list_add(&mapping->list, &vm->freed);
2180         }
2181         list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
2182                 list_del(&mapping->list);
2183                 amdgpu_vm_it_remove(mapping, &vm->va);
2184                 amdgpu_vm_free_mapping(adev, vm, mapping,
2185                                        bo_va->last_pt_update);
2186         }
2187
2188         dma_fence_put(bo_va->last_pt_update);
2189
2190         if (bo && bo_va->is_xgmi)
2191                 amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MIN);
2192
2193         kfree(bo_va);
2194 }
2195
2196 /**
2197  * amdgpu_vm_evictable - check if we can evict a VM
2198  *
2199  * @bo: A page table of the VM.
2200  *
2201  * Check if it is possible to evict a VM.
2202  */
2203 bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
2204 {
2205         struct amdgpu_vm_bo_base *bo_base = bo->vm_bo;
2206
2207         /* Page tables of a destroyed VM can go away immediately */
2208         if (!bo_base || !bo_base->vm)
2209                 return true;
2210
2211         /* Don't evict VM page tables while they are busy */
2212         if (!dma_resv_test_signaled(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP))
2213                 return false;
2214
2215         /* Try to block ongoing updates */
2216         if (!amdgpu_vm_eviction_trylock(bo_base->vm))
2217                 return false;
2218
2219         /* Don't evict VM page tables while they are updated */
2220         if (!dma_fence_is_signaled(bo_base->vm->last_unlocked)) {
2221                 amdgpu_vm_eviction_unlock(bo_base->vm);
2222                 return false;
2223         }
2224
2225         bo_base->vm->evicting = true;
2226         amdgpu_vm_eviction_unlock(bo_base->vm);
2227         return true;
2228 }
2229
2230 /**
2231  * amdgpu_vm_bo_invalidate - mark the bo as invalid
2232  *
2233  * @bo: amdgpu buffer object
2234  * @evicted: is the BO evicted
2235  *
2236  * Mark @bo as invalid.
2237  */
2238 void amdgpu_vm_bo_invalidate(struct amdgpu_bo *bo, bool evicted)
2239 {
2240         struct amdgpu_vm_bo_base *bo_base;
2241
2242         for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
2243                 struct amdgpu_vm *vm = bo_base->vm;
2244
2245                 if (evicted && amdgpu_vm_is_bo_always_valid(vm, bo)) {
2246                         amdgpu_vm_bo_evicted(bo_base);
2247                         continue;
2248                 }
2249
2250                 if (bo_base->moved)
2251                         continue;
2252                 bo_base->moved = true;
2253
2254                 if (bo->tbo.type == ttm_bo_type_kernel)
2255                         amdgpu_vm_bo_relocated(bo_base);
2256                 else if (amdgpu_vm_is_bo_always_valid(vm, bo))
2257                         amdgpu_vm_bo_moved(bo_base);
2258                 else
2259                         amdgpu_vm_bo_invalidated(bo_base);
2260         }
2261 }
2262
2263 /**
2264  * amdgpu_vm_bo_move - handle BO move
2265  *
2266  * @bo: amdgpu buffer object
2267  * @new_mem: the new placement of the BO move
2268  * @evicted: is the BO evicted
2269  *
2270  * Update the memory stats for the new placement and mark @bo as invalid.
2271  */
2272 void amdgpu_vm_bo_move(struct amdgpu_bo *bo, struct ttm_resource *new_mem,
2273                        bool evicted)
2274 {
2275         struct amdgpu_vm_bo_base *bo_base;
2276
2277         for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
2278                 struct amdgpu_vm *vm = bo_base->vm;
2279
2280                 spin_lock(&vm->status_lock);
2281                 amdgpu_vm_update_stats_locked(bo_base, bo->tbo.resource, -1);
2282                 amdgpu_vm_update_stats_locked(bo_base, new_mem, +1);
2283                 spin_unlock(&vm->status_lock);
2284         }
2285
2286         amdgpu_vm_bo_invalidate(bo, evicted);
2287 }
2288
2289 /**
2290  * amdgpu_vm_get_block_size - calculate VM page table size as power of two
2291  *
2292  * @vm_size: VM size
2293  *
2294  * Returns:
2295  * VM page table as power of two
2296  */
2297 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
2298 {
2299         /* Total bits covered by PD + PTs */
2300         unsigned bits = ilog2(vm_size) + 18;
2301
2302         /* Make sure the PD is 4K in size up to 8GB address space.
2303            Above that split equal between PD and PTs */
2304         if (vm_size <= 8)
2305                 return (bits - 9);
2306         else
2307                 return ((bits + 3) / 2);
2308 }
2309
2310 /**
2311  * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2312  *
2313  * @adev: amdgpu_device pointer
2314  * @min_vm_size: the minimum vm size in GB if it's set auto
2315  * @fragment_size_default: Default PTE fragment size
2316  * @max_level: max VMPT level
2317  * @max_bits: max address space size in bits
2318  *
2319  */
2320 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
2321                            uint32_t fragment_size_default, unsigned max_level,
2322                            unsigned max_bits)
2323 {
2324         unsigned int max_size = 1 << (max_bits - 30);
2325         unsigned int vm_size;
2326         uint64_t tmp;
2327
2328         /* adjust vm size first */
2329         if (amdgpu_vm_size != -1) {
2330                 vm_size = amdgpu_vm_size;
2331                 if (vm_size > max_size) {
2332                         dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
2333                                  amdgpu_vm_size, max_size);
2334                         vm_size = max_size;
2335                 }
2336         } else {
2337                 struct sysinfo si;
2338                 unsigned int phys_ram_gb;
2339
2340                 /* Optimal VM size depends on the amount of physical
2341                  * RAM available. Underlying requirements and
2342                  * assumptions:
2343                  *
2344                  *  - Need to map system memory and VRAM from all GPUs
2345                  *     - VRAM from other GPUs not known here
2346                  *     - Assume VRAM <= system memory
2347                  *  - On GFX8 and older, VM space can be segmented for
2348                  *    different MTYPEs
2349                  *  - Need to allow room for fragmentation, guard pages etc.
2350                  *
2351                  * This adds up to a rough guess of system memory x3.
2352                  * Round up to power of two to maximize the available
2353                  * VM size with the given page table size.
2354                  */
2355                 si_meminfo(&si);
2356                 phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
2357                                (1 << 30) - 1) >> 30;
2358                 vm_size = roundup_pow_of_two(
2359                         clamp(phys_ram_gb * 3, min_vm_size, max_size));
2360         }
2361
2362         adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
2363
2364         tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
2365         if (amdgpu_vm_block_size != -1)
2366                 tmp >>= amdgpu_vm_block_size - 9;
2367         tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
2368         adev->vm_manager.num_level = min_t(unsigned int, max_level, tmp);
2369         switch (adev->vm_manager.num_level) {
2370         case 3:
2371                 adev->vm_manager.root_level = AMDGPU_VM_PDB2;
2372                 break;
2373         case 2:
2374                 adev->vm_manager.root_level = AMDGPU_VM_PDB1;
2375                 break;
2376         case 1:
2377                 adev->vm_manager.root_level = AMDGPU_VM_PDB0;
2378                 break;
2379         default:
2380                 dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
2381         }
2382         /* block size depends on vm size and hw setup*/
2383         if (amdgpu_vm_block_size != -1)
2384                 adev->vm_manager.block_size =
2385                         min((unsigned)amdgpu_vm_block_size, max_bits
2386                             - AMDGPU_GPU_PAGE_SHIFT
2387                             - 9 * adev->vm_manager.num_level);
2388         else if (adev->vm_manager.num_level > 1)
2389                 adev->vm_manager.block_size = 9;
2390         else
2391                 adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
2392
2393         if (amdgpu_vm_fragment_size == -1)
2394                 adev->vm_manager.fragment_size = fragment_size_default;
2395         else
2396                 adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
2397
2398         DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
2399                  vm_size, adev->vm_manager.num_level + 1,
2400                  adev->vm_manager.block_size,
2401                  adev->vm_manager.fragment_size);
2402 }
2403
2404 /**
2405  * amdgpu_vm_wait_idle - wait for the VM to become idle
2406  *
2407  * @vm: VM object to wait for
2408  * @timeout: timeout to wait for VM to become idle
2409  */
2410 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
2411 {
2412         timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv,
2413                                         DMA_RESV_USAGE_BOOKKEEP,
2414                                         true, timeout);
2415         if (timeout <= 0)
2416                 return timeout;
2417
2418         return dma_fence_wait_timeout(vm->last_unlocked, true, timeout);
2419 }
2420
2421 static void amdgpu_vm_destroy_task_info(struct kref *kref)
2422 {
2423         struct amdgpu_task_info *ti = container_of(kref, struct amdgpu_task_info, refcount);
2424
2425         kfree(ti);
2426 }
2427
2428 static inline struct amdgpu_vm *
2429 amdgpu_vm_get_vm_from_pasid(struct amdgpu_device *adev, u32 pasid)
2430 {
2431         struct amdgpu_vm *vm;
2432         unsigned long flags;
2433
2434         xa_lock_irqsave(&adev->vm_manager.pasids, flags);
2435         vm = xa_load(&adev->vm_manager.pasids, pasid);
2436         xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
2437
2438         return vm;
2439 }
2440
2441 /**
2442  * amdgpu_vm_put_task_info - reference down the vm task_info ptr
2443  *
2444  * @task_info: task_info struct under discussion.
2445  *
2446  * frees the vm task_info ptr at the last put
2447  */
2448 void amdgpu_vm_put_task_info(struct amdgpu_task_info *task_info)
2449 {
2450         kref_put(&task_info->refcount, amdgpu_vm_destroy_task_info);
2451 }
2452
2453 /**
2454  * amdgpu_vm_get_task_info_vm - Extracts task info for a vm.
2455  *
2456  * @vm: VM to get info from
2457  *
2458  * Returns the reference counted task_info structure, which must be
2459  * referenced down with amdgpu_vm_put_task_info.
2460  */
2461 struct amdgpu_task_info *
2462 amdgpu_vm_get_task_info_vm(struct amdgpu_vm *vm)
2463 {
2464         struct amdgpu_task_info *ti = NULL;
2465
2466         if (vm) {
2467                 ti = vm->task_info;
2468                 kref_get(&vm->task_info->refcount);
2469         }
2470
2471         return ti;
2472 }
2473
2474 /**
2475  * amdgpu_vm_get_task_info_pasid - Extracts task info for a PASID.
2476  *
2477  * @adev: drm device pointer
2478  * @pasid: PASID identifier for VM
2479  *
2480  * Returns the reference counted task_info structure, which must be
2481  * referenced down with amdgpu_vm_put_task_info.
2482  */
2483 struct amdgpu_task_info *
2484 amdgpu_vm_get_task_info_pasid(struct amdgpu_device *adev, u32 pasid)
2485 {
2486         return amdgpu_vm_get_task_info_vm(
2487                         amdgpu_vm_get_vm_from_pasid(adev, pasid));
2488 }
2489
2490 static int amdgpu_vm_create_task_info(struct amdgpu_vm *vm)
2491 {
2492         vm->task_info = kzalloc(sizeof(struct amdgpu_task_info), GFP_KERNEL);
2493         if (!vm->task_info)
2494                 return -ENOMEM;
2495
2496         kref_init(&vm->task_info->refcount);
2497         return 0;
2498 }
2499
2500 /**
2501  * amdgpu_vm_set_task_info - Sets VMs task info.
2502  *
2503  * @vm: vm for which to set the info
2504  */
2505 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
2506 {
2507         if (!vm->task_info)
2508                 return;
2509
2510         if (vm->task_info->pid == current->pid)
2511                 return;
2512
2513         vm->task_info->pid = current->pid;
2514         get_task_comm(vm->task_info->task_name, current);
2515
2516         if (current->group_leader->mm != current->mm)
2517                 return;
2518
2519         vm->task_info->tgid = current->group_leader->pid;
2520         get_task_comm(vm->task_info->process_name, current->group_leader);
2521 }
2522
2523 /**
2524  * amdgpu_vm_init - initialize a vm instance
2525  *
2526  * @adev: amdgpu_device pointer
2527  * @vm: requested vm
2528  * @xcp_id: GPU partition selection id
2529  *
2530  * Init @vm fields.
2531  *
2532  * Returns:
2533  * 0 for success, error for failure.
2534  */
2535 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2536                    int32_t xcp_id)
2537 {
2538         struct amdgpu_bo *root_bo;
2539         struct amdgpu_bo_vm *root;
2540         int r, i;
2541
2542         vm->va = RB_ROOT_CACHED;
2543         for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2544                 vm->reserved_vmid[i] = NULL;
2545         INIT_LIST_HEAD(&vm->evicted);
2546         INIT_LIST_HEAD(&vm->evicted_user);
2547         INIT_LIST_HEAD(&vm->relocated);
2548         INIT_LIST_HEAD(&vm->moved);
2549         INIT_LIST_HEAD(&vm->idle);
2550         INIT_LIST_HEAD(&vm->invalidated);
2551         spin_lock_init(&vm->status_lock);
2552         INIT_LIST_HEAD(&vm->freed);
2553         INIT_LIST_HEAD(&vm->done);
2554         INIT_KFIFO(vm->faults);
2555
2556         r = amdgpu_vm_init_entities(adev, vm);
2557         if (r)
2558                 return r;
2559
2560         ttm_lru_bulk_move_init(&vm->lru_bulk_move);
2561
2562         vm->is_compute_context = false;
2563
2564         vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2565                                     AMDGPU_VM_USE_CPU_FOR_GFX);
2566
2567         DRM_DEBUG_DRIVER("VM update mode is %s\n",
2568                          vm->use_cpu_for_update ? "CPU" : "SDMA");
2569         WARN_ONCE((vm->use_cpu_for_update &&
2570                    !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2571                   "CPU update of VM recommended only for large BAR system\n");
2572
2573         if (vm->use_cpu_for_update)
2574                 vm->update_funcs = &amdgpu_vm_cpu_funcs;
2575         else
2576                 vm->update_funcs = &amdgpu_vm_sdma_funcs;
2577
2578         vm->last_update = dma_fence_get_stub();
2579         vm->last_unlocked = dma_fence_get_stub();
2580         vm->last_tlb_flush = dma_fence_get_stub();
2581         vm->generation = amdgpu_vm_generation(adev, NULL);
2582
2583         mutex_init(&vm->eviction_lock);
2584         vm->evicting = false;
2585         vm->tlb_fence_context = dma_fence_context_alloc(1);
2586
2587         r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level,
2588                                 false, &root, xcp_id);
2589         if (r)
2590                 goto error_free_delayed;
2591
2592         root_bo = amdgpu_bo_ref(&root->bo);
2593         r = amdgpu_bo_reserve(root_bo, true);
2594         if (r) {
2595                 amdgpu_bo_unref(&root_bo);
2596                 goto error_free_delayed;
2597         }
2598
2599         amdgpu_vm_bo_base_init(&vm->root, vm, root_bo);
2600         r = dma_resv_reserve_fences(root_bo->tbo.base.resv, 1);
2601         if (r)
2602                 goto error_free_root;
2603
2604         r = amdgpu_vm_pt_clear(adev, vm, root, false);
2605         if (r)
2606                 goto error_free_root;
2607
2608         r = amdgpu_vm_create_task_info(vm);
2609         if (r)
2610                 DRM_DEBUG("Failed to create task info for VM\n");
2611
2612         amdgpu_bo_unreserve(vm->root.bo);
2613         amdgpu_bo_unref(&root_bo);
2614
2615         return 0;
2616
2617 error_free_root:
2618         amdgpu_vm_pt_free_root(adev, vm);
2619         amdgpu_bo_unreserve(vm->root.bo);
2620         amdgpu_bo_unref(&root_bo);
2621
2622 error_free_delayed:
2623         dma_fence_put(vm->last_tlb_flush);
2624         dma_fence_put(vm->last_unlocked);
2625         ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
2626         amdgpu_vm_fini_entities(vm);
2627
2628         return r;
2629 }
2630
2631 /**
2632  * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
2633  *
2634  * @adev: amdgpu_device pointer
2635  * @vm: requested vm
2636  *
2637  * This only works on GFX VMs that don't have any BOs added and no
2638  * page tables allocated yet.
2639  *
2640  * Changes the following VM parameters:
2641  * - use_cpu_for_update
2642  * - pte_supports_ats
2643  *
2644  * Reinitializes the page directory to reflect the changed ATS
2645  * setting.
2646  *
2647  * Returns:
2648  * 0 for success, -errno for errors.
2649  */
2650 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2651 {
2652         int r;
2653
2654         r = amdgpu_bo_reserve(vm->root.bo, true);
2655         if (r)
2656                 return r;
2657
2658         /* Update VM state */
2659         vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2660                                     AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2661         DRM_DEBUG_DRIVER("VM update mode is %s\n",
2662                          vm->use_cpu_for_update ? "CPU" : "SDMA");
2663         WARN_ONCE((vm->use_cpu_for_update &&
2664                    !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2665                   "CPU update of VM recommended only for large BAR system\n");
2666
2667         if (vm->use_cpu_for_update) {
2668                 /* Sync with last SDMA update/clear before switching to CPU */
2669                 r = amdgpu_bo_sync_wait(vm->root.bo,
2670                                         AMDGPU_FENCE_OWNER_UNDEFINED, true);
2671                 if (r)
2672                         goto unreserve_bo;
2673
2674                 vm->update_funcs = &amdgpu_vm_cpu_funcs;
2675                 r = amdgpu_vm_pt_map_tables(adev, vm);
2676                 if (r)
2677                         goto unreserve_bo;
2678
2679         } else {
2680                 vm->update_funcs = &amdgpu_vm_sdma_funcs;
2681         }
2682
2683         dma_fence_put(vm->last_update);
2684         vm->last_update = dma_fence_get_stub();
2685         vm->is_compute_context = true;
2686
2687 unreserve_bo:
2688         amdgpu_bo_unreserve(vm->root.bo);
2689         return r;
2690 }
2691
2692 static int amdgpu_vm_stats_is_zero(struct amdgpu_vm *vm)
2693 {
2694         for (int i = 0; i < __AMDGPU_PL_NUM; ++i) {
2695                 if (!(drm_memory_stats_is_zero(&vm->stats[i].drm) &&
2696                       vm->stats[i].evicted == 0))
2697                         return false;
2698         }
2699         return true;
2700 }
2701
2702 /**
2703  * amdgpu_vm_fini - tear down a vm instance
2704  *
2705  * @adev: amdgpu_device pointer
2706  * @vm: requested vm
2707  *
2708  * Tear down @vm.
2709  * Unbind the VM and remove all bos from the vm bo list
2710  */
2711 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2712 {
2713         struct amdgpu_bo_va_mapping *mapping, *tmp;
2714         bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
2715         struct amdgpu_bo *root;
2716         unsigned long flags;
2717         int i;
2718
2719         amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
2720
2721         root = amdgpu_bo_ref(vm->root.bo);
2722         amdgpu_bo_reserve(root, true);
2723         amdgpu_vm_set_pasid(adev, vm, 0);
2724         dma_fence_wait(vm->last_unlocked, false);
2725         dma_fence_put(vm->last_unlocked);
2726         dma_fence_wait(vm->last_tlb_flush, false);
2727         /* Make sure that all fence callbacks have completed */
2728         spin_lock_irqsave(vm->last_tlb_flush->lock, flags);
2729         spin_unlock_irqrestore(vm->last_tlb_flush->lock, flags);
2730         dma_fence_put(vm->last_tlb_flush);
2731
2732         list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
2733                 if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev) && prt_fini_needed) {
2734                         amdgpu_vm_prt_fini(adev, vm);
2735                         prt_fini_needed = false;
2736                 }
2737
2738                 list_del(&mapping->list);
2739                 amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
2740         }
2741
2742         amdgpu_vm_pt_free_root(adev, vm);
2743         amdgpu_bo_unreserve(root);
2744         amdgpu_bo_unref(&root);
2745         WARN_ON(vm->root.bo);
2746
2747         amdgpu_vm_fini_entities(vm);
2748
2749         if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
2750                 dev_err(adev->dev, "still active bo inside vm\n");
2751         }
2752         rbtree_postorder_for_each_entry_safe(mapping, tmp,
2753                                              &vm->va.rb_root, rb) {
2754                 /* Don't remove the mapping here, we don't want to trigger a
2755                  * rebalance and the tree is about to be destroyed anyway.
2756                  */
2757                 list_del(&mapping->list);
2758                 kfree(mapping);
2759         }
2760
2761         dma_fence_put(vm->last_update);
2762
2763         for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) {
2764                 if (vm->reserved_vmid[i]) {
2765                         amdgpu_vmid_free_reserved(adev, i);
2766                         vm->reserved_vmid[i] = false;
2767                 }
2768         }
2769
2770         ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
2771
2772         if (!amdgpu_vm_stats_is_zero(vm)) {
2773                 struct amdgpu_task_info *ti = vm->task_info;
2774
2775                 dev_warn(adev->dev,
2776                          "VM memory stats for proc %s(%d) task %s(%d) is non-zero when fini\n",
2777                          ti->process_name, ti->pid, ti->task_name, ti->tgid);
2778         }
2779
2780         amdgpu_vm_put_task_info(vm->task_info);
2781 }
2782
2783 /**
2784  * amdgpu_vm_manager_init - init the VM manager
2785  *
2786  * @adev: amdgpu_device pointer
2787  *
2788  * Initialize the VM manager structures
2789  */
2790 void amdgpu_vm_manager_init(struct amdgpu_device *adev)
2791 {
2792         unsigned i;
2793
2794         /* Concurrent flushes are only possible starting with Vega10 and
2795          * are broken on Navi10 and Navi14.
2796          */
2797         adev->vm_manager.concurrent_flush = !(adev->asic_type < CHIP_VEGA10 ||
2798                                               adev->asic_type == CHIP_NAVI10 ||
2799                                               adev->asic_type == CHIP_NAVI14);
2800         amdgpu_vmid_mgr_init(adev);
2801
2802         adev->vm_manager.fence_context =
2803                 dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2804         for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
2805                 adev->vm_manager.seqno[i] = 0;
2806
2807         spin_lock_init(&adev->vm_manager.prt_lock);
2808         atomic_set(&adev->vm_manager.num_prt_users, 0);
2809
2810         /* If not overridden by the user, by default, only in large BAR systems
2811          * Compute VM tables will be updated by CPU
2812          */
2813 #ifdef CONFIG_X86_64
2814         if (amdgpu_vm_update_mode == -1) {
2815                 /* For asic with VF MMIO access protection
2816                  * avoid using CPU for VM table updates
2817                  */
2818                 if (amdgpu_gmc_vram_full_visible(&adev->gmc) &&
2819                     !amdgpu_sriov_vf_mmio_access_protection(adev))
2820                         adev->vm_manager.vm_update_mode =
2821                                 AMDGPU_VM_USE_CPU_FOR_COMPUTE;
2822                 else
2823                         adev->vm_manager.vm_update_mode = 0;
2824         } else
2825                 adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
2826 #else
2827         adev->vm_manager.vm_update_mode = 0;
2828 #endif
2829
2830         xa_init_flags(&adev->vm_manager.pasids, XA_FLAGS_LOCK_IRQ);
2831 }
2832
2833 /**
2834  * amdgpu_vm_manager_fini - cleanup VM manager
2835  *
2836  * @adev: amdgpu_device pointer
2837  *
2838  * Cleanup the VM manager and free resources.
2839  */
2840 void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
2841 {
2842         WARN_ON(!xa_empty(&adev->vm_manager.pasids));
2843         xa_destroy(&adev->vm_manager.pasids);
2844
2845         amdgpu_vmid_mgr_fini(adev);
2846 }
2847
2848 /**
2849  * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
2850  *
2851  * @dev: drm device pointer
2852  * @data: drm_amdgpu_vm
2853  * @filp: drm file pointer
2854  *
2855  * Returns:
2856  * 0 for success, -errno for errors.
2857  */
2858 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
2859 {
2860         union drm_amdgpu_vm *args = data;
2861         struct amdgpu_device *adev = drm_to_adev(dev);
2862         struct amdgpu_fpriv *fpriv = filp->driver_priv;
2863
2864         /* No valid flags defined yet */
2865         if (args->in.flags)
2866                 return -EINVAL;
2867
2868         switch (args->in.op) {
2869         case AMDGPU_VM_OP_RESERVE_VMID:
2870                 /* We only have requirement to reserve vmid from gfxhub */
2871                 if (!fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) {
2872                         amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(0));
2873                         fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = true;
2874                 }
2875
2876                 break;
2877         case AMDGPU_VM_OP_UNRESERVE_VMID:
2878                 if (fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) {
2879                         amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(0));
2880                         fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = false;
2881                 }
2882                 break;
2883         default:
2884                 return -EINVAL;
2885         }
2886
2887         return 0;
2888 }
2889
2890 /**
2891  * amdgpu_vm_handle_fault - graceful handling of VM faults.
2892  * @adev: amdgpu device pointer
2893  * @pasid: PASID of the VM
2894  * @ts: Timestamp of the fault
2895  * @vmid: VMID, only used for GFX 9.4.3.
2896  * @node_id: Node_id received in IH cookie. Only applicable for
2897  *           GFX 9.4.3.
2898  * @addr: Address of the fault
2899  * @write_fault: true is write fault, false is read fault
2900  *
2901  * Try to gracefully handle a VM fault. Return true if the fault was handled and
2902  * shouldn't be reported any more.
2903  */
2904 bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
2905                             u32 vmid, u32 node_id, uint64_t addr, uint64_t ts,
2906                             bool write_fault)
2907 {
2908         bool is_compute_context = false;
2909         struct amdgpu_bo *root;
2910         unsigned long irqflags;
2911         uint64_t value, flags;
2912         struct amdgpu_vm *vm;
2913         int r;
2914
2915         xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
2916         vm = xa_load(&adev->vm_manager.pasids, pasid);
2917         if (vm) {
2918                 root = amdgpu_bo_ref(vm->root.bo);
2919                 is_compute_context = vm->is_compute_context;
2920         } else {
2921                 root = NULL;
2922         }
2923         xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
2924
2925         if (!root)
2926                 return false;
2927
2928         addr /= AMDGPU_GPU_PAGE_SIZE;
2929
2930         if (is_compute_context && !svm_range_restore_pages(adev, pasid, vmid,
2931             node_id, addr, ts, write_fault)) {
2932                 amdgpu_bo_unref(&root);
2933                 return true;
2934         }
2935
2936         r = amdgpu_bo_reserve(root, true);
2937         if (r)
2938                 goto error_unref;
2939
2940         /* Double check that the VM still exists */
2941         xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
2942         vm = xa_load(&adev->vm_manager.pasids, pasid);
2943         if (vm && vm->root.bo != root)
2944                 vm = NULL;
2945         xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
2946         if (!vm)
2947                 goto error_unlock;
2948
2949         flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED |
2950                 AMDGPU_PTE_SYSTEM;
2951
2952         if (is_compute_context) {
2953                 /* Intentionally setting invalid PTE flag
2954                  * combination to force a no-retry-fault
2955                  */
2956                 flags = AMDGPU_VM_NORETRY_FLAGS;
2957                 value = 0;
2958         } else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) {
2959                 /* Redirect the access to the dummy page */
2960                 value = adev->dummy_page_addr;
2961                 flags |= AMDGPU_PTE_EXECUTABLE | AMDGPU_PTE_READABLE |
2962                         AMDGPU_PTE_WRITEABLE;
2963
2964         } else {
2965                 /* Let the hw retry silently on the PTE */
2966                 value = 0;
2967         }
2968
2969         r = dma_resv_reserve_fences(root->tbo.base.resv, 1);
2970         if (r) {
2971                 pr_debug("failed %d to reserve fence slot\n", r);
2972                 goto error_unlock;
2973         }
2974
2975         r = amdgpu_vm_update_range(adev, vm, true, false, false, false,
2976                                    NULL, addr, addr, flags, value, 0, NULL, NULL, NULL);
2977         if (r)
2978                 goto error_unlock;
2979
2980         r = amdgpu_vm_update_pdes(adev, vm, true);
2981
2982 error_unlock:
2983         amdgpu_bo_unreserve(root);
2984         if (r < 0)
2985                 DRM_ERROR("Can't handle page fault (%d)\n", r);
2986
2987 error_unref:
2988         amdgpu_bo_unref(&root);
2989
2990         return false;
2991 }
2992
2993 #if defined(CONFIG_DEBUG_FS)
2994 /**
2995  * amdgpu_debugfs_vm_bo_info  - print BO info for the VM
2996  *
2997  * @vm: Requested VM for printing BO info
2998  * @m: debugfs file
2999  *
3000  * Print BO information in debugfs file for the VM
3001  */
3002 void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
3003 {
3004         struct amdgpu_bo_va *bo_va, *tmp;
3005         u64 total_idle = 0;
3006         u64 total_evicted = 0;
3007         u64 total_relocated = 0;
3008         u64 total_moved = 0;
3009         u64 total_invalidated = 0;
3010         u64 total_done = 0;
3011         unsigned int total_idle_objs = 0;
3012         unsigned int total_evicted_objs = 0;
3013         unsigned int total_relocated_objs = 0;
3014         unsigned int total_moved_objs = 0;
3015         unsigned int total_invalidated_objs = 0;
3016         unsigned int total_done_objs = 0;
3017         unsigned int id = 0;
3018
3019         spin_lock(&vm->status_lock);
3020         seq_puts(m, "\tIdle BOs:\n");
3021         list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
3022                 if (!bo_va->base.bo)
3023                         continue;
3024                 total_idle += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3025         }
3026         total_idle_objs = id;
3027         id = 0;
3028
3029         seq_puts(m, "\tEvicted BOs:\n");
3030         list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) {
3031                 if (!bo_va->base.bo)
3032                         continue;
3033                 total_evicted += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3034         }
3035         total_evicted_objs = id;
3036         id = 0;
3037
3038         seq_puts(m, "\tRelocated BOs:\n");
3039         list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) {
3040                 if (!bo_va->base.bo)
3041                         continue;
3042                 total_relocated += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3043         }
3044         total_relocated_objs = id;
3045         id = 0;
3046
3047         seq_puts(m, "\tMoved BOs:\n");
3048         list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
3049                 if (!bo_va->base.bo)
3050                         continue;
3051                 total_moved += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3052         }
3053         total_moved_objs = id;
3054         id = 0;
3055
3056         seq_puts(m, "\tInvalidated BOs:\n");
3057         list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
3058                 if (!bo_va->base.bo)
3059                         continue;
3060                 total_invalidated += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3061         }
3062         total_invalidated_objs = id;
3063         id = 0;
3064
3065         seq_puts(m, "\tDone BOs:\n");
3066         list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) {
3067                 if (!bo_va->base.bo)
3068                         continue;
3069                 total_done += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3070         }
3071         spin_unlock(&vm->status_lock);
3072         total_done_objs = id;
3073
3074         seq_printf(m, "\tTotal idle size:        %12lld\tobjs:\t%d\n", total_idle,
3075                    total_idle_objs);
3076         seq_printf(m, "\tTotal evicted size:     %12lld\tobjs:\t%d\n", total_evicted,
3077                    total_evicted_objs);
3078         seq_printf(m, "\tTotal relocated size:   %12lld\tobjs:\t%d\n", total_relocated,
3079                    total_relocated_objs);
3080         seq_printf(m, "\tTotal moved size:       %12lld\tobjs:\t%d\n", total_moved,
3081                    total_moved_objs);
3082         seq_printf(m, "\tTotal invalidated size: %12lld\tobjs:\t%d\n", total_invalidated,
3083                    total_invalidated_objs);
3084         seq_printf(m, "\tTotal done size:        %12lld\tobjs:\t%d\n", total_done,
3085                    total_done_objs);
3086 }
3087 #endif
3088
3089 /**
3090  * amdgpu_vm_update_fault_cache - update cached fault into.
3091  * @adev: amdgpu device pointer
3092  * @pasid: PASID of the VM
3093  * @addr: Address of the fault
3094  * @status: GPUVM fault status register
3095  * @vmhub: which vmhub got the fault
3096  *
3097  * Cache the fault info for later use by userspace in debugging.
3098  */
3099 void amdgpu_vm_update_fault_cache(struct amdgpu_device *adev,
3100                                   unsigned int pasid,
3101                                   uint64_t addr,
3102                                   uint32_t status,
3103                                   unsigned int vmhub)
3104 {
3105         struct amdgpu_vm *vm;
3106         unsigned long flags;
3107
3108         xa_lock_irqsave(&adev->vm_manager.pasids, flags);
3109
3110         vm = xa_load(&adev->vm_manager.pasids, pasid);
3111         /* Don't update the fault cache if status is 0.  In the multiple
3112          * fault case, subsequent faults will return a 0 status which is
3113          * useless for userspace and replaces the useful fault status, so
3114          * only update if status is non-0.
3115          */
3116         if (vm && status) {
3117                 vm->fault_info.addr = addr;
3118                 vm->fault_info.status = status;
3119                 /*
3120                  * Update the fault information globally for later usage
3121                  * when vm could be stale or freed.
3122                  */
3123                 adev->vm_manager.fault_info.addr = addr;
3124                 adev->vm_manager.fault_info.vmhub = vmhub;
3125                 adev->vm_manager.fault_info.status = status;
3126
3127                 if (AMDGPU_IS_GFXHUB(vmhub)) {
3128                         vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_GFX;
3129                         vm->fault_info.vmhub |=
3130                                 (vmhub - AMDGPU_GFXHUB_START) << AMDGPU_VMHUB_IDX_SHIFT;
3131                 } else if (AMDGPU_IS_MMHUB0(vmhub)) {
3132                         vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM0;
3133                         vm->fault_info.vmhub |=
3134                                 (vmhub - AMDGPU_MMHUB0_START) << AMDGPU_VMHUB_IDX_SHIFT;
3135                 } else if (AMDGPU_IS_MMHUB1(vmhub)) {
3136                         vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM1;
3137                         vm->fault_info.vmhub |=
3138                                 (vmhub - AMDGPU_MMHUB1_START) << AMDGPU_VMHUB_IDX_SHIFT;
3139                 } else {
3140                         WARN_ONCE(1, "Invalid vmhub %u\n", vmhub);
3141                 }
3142         }
3143         xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
3144 }
3145
3146 /**
3147  * amdgpu_vm_is_bo_always_valid - check if the BO is VM always valid
3148  *
3149  * @vm: VM to test against.
3150  * @bo: BO to be tested.
3151  *
3152  * Returns true if the BO shares the dma_resv object with the root PD and is
3153  * always guaranteed to be valid inside the VM.
3154  */
3155 bool amdgpu_vm_is_bo_always_valid(struct amdgpu_vm *vm, struct amdgpu_bo *bo)
3156 {
3157         return bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv;
3158 }