Commit | Line | Data |
---|---|---|
a46a2cd1 FK |
1 | /* |
2 | * Copyright 2014-2018 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | */ | |
22 | ||
23 | #define pr_fmt(fmt) "kfd2kgd: " fmt | |
24 | ||
25 | #include <linux/list.h> | |
548da31d | 26 | #include <linux/pagemap.h> |
5ae0283e | 27 | #include <linux/sched/mm.h> |
1dde0ea9 | 28 | #include <linux/dma-buf.h> |
a46a2cd1 FK |
29 | #include <drm/drmP.h> |
30 | #include "amdgpu_object.h" | |
31 | #include "amdgpu_vm.h" | |
32 | #include "amdgpu_amdkfd.h" | |
33 | ||
34 | /* Special VM and GART address alignment needed for VI pre-Fiji due to | |
35 | * a HW bug. | |
36 | */ | |
37 | #define VI_BO_SIZE_ALIGN (0x8000) | |
38 | ||
5ae0283e FK |
39 | /* BO flag to indicate a KFD userptr BO */ |
40 | #define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63) | |
41 | ||
42 | /* Userptr restore delay, just long enough to allow consecutive VM | |
43 | * changes to accumulate | |
44 | */ | |
45 | #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1 | |
46 | ||
a46a2cd1 FK |
47 | /* Impose limit on how much memory KFD can use */ |
48 | static struct { | |
49 | uint64_t max_system_mem_limit; | |
5d240da9 | 50 | uint64_t max_ttm_mem_limit; |
a46a2cd1 | 51 | int64_t system_mem_used; |
5d240da9 | 52 | int64_t ttm_mem_used; |
a46a2cd1 FK |
53 | spinlock_t mem_limit_lock; |
54 | } kfd_mem_limit; | |
55 | ||
56 | /* Struct used for amdgpu_amdkfd_bo_validate */ | |
57 | struct amdgpu_vm_parser { | |
58 | uint32_t domain; | |
59 | bool wait; | |
60 | }; | |
61 | ||
62 | static const char * const domain_bit_to_string[] = { | |
63 | "CPU", | |
64 | "GTT", | |
65 | "VRAM", | |
66 | "GDS", | |
67 | "GWS", | |
68 | "OA" | |
69 | }; | |
70 | ||
71 | #define domain_string(domain) domain_bit_to_string[ffs(domain)-1] | |
72 | ||
5ae0283e | 73 | static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work); |
a46a2cd1 FK |
74 | |
75 | ||
76 | static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) | |
77 | { | |
78 | return (struct amdgpu_device *)kgd; | |
79 | } | |
80 | ||
81 | static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm, | |
82 | struct kgd_mem *mem) | |
83 | { | |
84 | struct kfd_bo_va_list *entry; | |
85 | ||
86 | list_for_each_entry(entry, &mem->bo_va_list, bo_list) | |
87 | if (entry->bo_va->base.vm == avm) | |
88 | return false; | |
89 | ||
90 | return true; | |
91 | } | |
92 | ||
93 | /* Set memory usage limits. Current, limits are | |
5d240da9 EH |
94 | * System (TTM + userptr) memory - 3/4th System RAM |
95 | * TTM memory - 3/8th System RAM | |
a46a2cd1 FK |
96 | */ |
97 | void amdgpu_amdkfd_gpuvm_init_mem_limits(void) | |
98 | { | |
99 | struct sysinfo si; | |
100 | uint64_t mem; | |
101 | ||
102 | si_meminfo(&si); | |
103 | mem = si.totalram - si.totalhigh; | |
104 | mem *= si.mem_unit; | |
105 | ||
106 | spin_lock_init(&kfd_mem_limit.mem_limit_lock); | |
5d240da9 EH |
107 | kfd_mem_limit.max_system_mem_limit = (mem >> 1) + (mem >> 2); |
108 | kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3); | |
109 | pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n", | |
5ae0283e | 110 | (kfd_mem_limit.max_system_mem_limit >> 20), |
5d240da9 | 111 | (kfd_mem_limit.max_ttm_mem_limit >> 20)); |
a46a2cd1 FK |
112 | } |
113 | ||
611736d8 | 114 | static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, |
5d240da9 | 115 | uint64_t size, u32 domain, bool sg) |
a46a2cd1 | 116 | { |
611736d8 FK |
117 | size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed; |
118 | uint64_t reserved_for_pt = amdgpu_amdkfd_total_mem_size >> 9; | |
a46a2cd1 FK |
119 | int ret = 0; |
120 | ||
121 | acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size, | |
122 | sizeof(struct amdgpu_bo)); | |
123 | ||
611736d8 | 124 | vram_needed = 0; |
a46a2cd1 | 125 | if (domain == AMDGPU_GEM_DOMAIN_GTT) { |
5d240da9 EH |
126 | /* TTM GTT memory */ |
127 | system_mem_needed = acc_size + size; | |
128 | ttm_mem_needed = acc_size + size; | |
129 | } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) { | |
130 | /* Userptr */ | |
131 | system_mem_needed = acc_size + size; | |
132 | ttm_mem_needed = acc_size; | |
133 | } else { | |
134 | /* VRAM and SG */ | |
135 | system_mem_needed = acc_size; | |
136 | ttm_mem_needed = acc_size; | |
611736d8 FK |
137 | if (domain == AMDGPU_GEM_DOMAIN_VRAM) |
138 | vram_needed = size; | |
5d240da9 EH |
139 | } |
140 | ||
611736d8 FK |
141 | spin_lock(&kfd_mem_limit.mem_limit_lock); |
142 | ||
5d240da9 | 143 | if ((kfd_mem_limit.system_mem_used + system_mem_needed > |
611736d8 FK |
144 | kfd_mem_limit.max_system_mem_limit) || |
145 | (kfd_mem_limit.ttm_mem_used + ttm_mem_needed > | |
146 | kfd_mem_limit.max_ttm_mem_limit) || | |
147 | (adev->kfd.vram_used + vram_needed > | |
148 | adev->gmc.real_vram_size - reserved_for_pt)) { | |
5d240da9 | 149 | ret = -ENOMEM; |
611736d8 | 150 | } else { |
5d240da9 EH |
151 | kfd_mem_limit.system_mem_used += system_mem_needed; |
152 | kfd_mem_limit.ttm_mem_used += ttm_mem_needed; | |
611736d8 | 153 | adev->kfd.vram_used += vram_needed; |
a46a2cd1 | 154 | } |
5d240da9 | 155 | |
a46a2cd1 FK |
156 | spin_unlock(&kfd_mem_limit.mem_limit_lock); |
157 | return ret; | |
158 | } | |
159 | ||
611736d8 | 160 | static void unreserve_mem_limit(struct amdgpu_device *adev, |
5d240da9 | 161 | uint64_t size, u32 domain, bool sg) |
a46a2cd1 FK |
162 | { |
163 | size_t acc_size; | |
164 | ||
165 | acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size, | |
166 | sizeof(struct amdgpu_bo)); | |
167 | ||
168 | spin_lock(&kfd_mem_limit.mem_limit_lock); | |
5ae0283e | 169 | if (domain == AMDGPU_GEM_DOMAIN_GTT) { |
a46a2cd1 | 170 | kfd_mem_limit.system_mem_used -= (acc_size + size); |
5d240da9 EH |
171 | kfd_mem_limit.ttm_mem_used -= (acc_size + size); |
172 | } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) { | |
173 | kfd_mem_limit.system_mem_used -= (acc_size + size); | |
174 | kfd_mem_limit.ttm_mem_used -= acc_size; | |
175 | } else { | |
5ae0283e | 176 | kfd_mem_limit.system_mem_used -= acc_size; |
5d240da9 | 177 | kfd_mem_limit.ttm_mem_used -= acc_size; |
611736d8 FK |
178 | if (domain == AMDGPU_GEM_DOMAIN_VRAM) { |
179 | adev->kfd.vram_used -= size; | |
180 | WARN_ONCE(adev->kfd.vram_used < 0, | |
181 | "kfd VRAM memory accounting unbalanced"); | |
182 | } | |
5ae0283e | 183 | } |
a46a2cd1 FK |
184 | WARN_ONCE(kfd_mem_limit.system_mem_used < 0, |
185 | "kfd system memory accounting unbalanced"); | |
5d240da9 EH |
186 | WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0, |
187 | "kfd TTM memory accounting unbalanced"); | |
a46a2cd1 FK |
188 | |
189 | spin_unlock(&kfd_mem_limit.mem_limit_lock); | |
190 | } | |
191 | ||
611736d8 | 192 | void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo) |
a46a2cd1 | 193 | { |
611736d8 FK |
194 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); |
195 | u32 domain = bo->preferred_domains; | |
196 | bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU); | |
a46a2cd1 | 197 | |
5ae0283e | 198 | if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) { |
611736d8 FK |
199 | domain = AMDGPU_GEM_DOMAIN_CPU; |
200 | sg = false; | |
a46a2cd1 | 201 | } |
a46a2cd1 | 202 | |
611736d8 | 203 | unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg); |
a46a2cd1 FK |
204 | } |
205 | ||
206 | ||
2d086fde | 207 | /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's |
a46a2cd1 FK |
208 | * reservation object. |
209 | * | |
210 | * @bo: [IN] Remove eviction fence(s) from this BO | |
2d086fde | 211 | * @ef: [IN] This eviction fence is removed if it |
a46a2cd1 | 212 | * is present in the shared list. |
a46a2cd1 | 213 | * |
a46a2cd1 FK |
214 | * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held. |
215 | */ | |
216 | static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo, | |
2d086fde | 217 | struct amdgpu_amdkfd_fence *ef) |
a46a2cd1 | 218 | { |
e6f8d26e CK |
219 | struct reservation_object *resv = bo->tbo.resv; |
220 | struct reservation_object_list *old, *new; | |
221 | unsigned int i, j, k; | |
a46a2cd1 | 222 | |
2d086fde | 223 | if (!ef) |
a46a2cd1 FK |
224 | return -EINVAL; |
225 | ||
e6f8d26e CK |
226 | old = reservation_object_get_list(resv); |
227 | if (!old) | |
a46a2cd1 FK |
228 | return 0; |
229 | ||
e6f8d26e CK |
230 | new = kmalloc(offsetof(typeof(*new), shared[old->shared_max]), |
231 | GFP_KERNEL); | |
232 | if (!new) | |
233 | return -ENOMEM; | |
a46a2cd1 | 234 | |
e6f8d26e CK |
235 | /* Go through all the shared fences in the resevation object and sort |
236 | * the interesting ones to the end of the list. | |
a46a2cd1 | 237 | */ |
e6f8d26e | 238 | for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) { |
a46a2cd1 FK |
239 | struct dma_fence *f; |
240 | ||
e6f8d26e | 241 | f = rcu_dereference_protected(old->shared[i], |
a46a2cd1 FK |
242 | reservation_object_held(resv)); |
243 | ||
2d086fde | 244 | if (f->context == ef->base.context) |
e6f8d26e CK |
245 | RCU_INIT_POINTER(new->shared[--j], f); |
246 | else | |
247 | RCU_INIT_POINTER(new->shared[k++], f); | |
a46a2cd1 | 248 | } |
e6f8d26e CK |
249 | new->shared_max = old->shared_max; |
250 | new->shared_count = k; | |
a46a2cd1 | 251 | |
e6f8d26e CK |
252 | /* Install the new fence list, seqcount provides the barriers */ |
253 | preempt_disable(); | |
254 | write_seqcount_begin(&resv->seq); | |
255 | RCU_INIT_POINTER(resv->fence, new); | |
a46a2cd1 FK |
256 | write_seqcount_end(&resv->seq); |
257 | preempt_enable(); | |
258 | ||
e6f8d26e CK |
259 | /* Drop the references to the removed fences or move them to ef_list */ |
260 | for (i = j, k = 0; i < old->shared_count; ++i) { | |
261 | struct dma_fence *f; | |
262 | ||
263 | f = rcu_dereference_protected(new->shared[i], | |
264 | reservation_object_held(resv)); | |
2d086fde | 265 | dma_fence_put(f); |
e6f8d26e CK |
266 | } |
267 | kfree_rcu(old, rcu); | |
a46a2cd1 FK |
268 | |
269 | return 0; | |
270 | } | |
271 | ||
a46a2cd1 FK |
272 | static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain, |
273 | bool wait) | |
274 | { | |
275 | struct ttm_operation_ctx ctx = { false, false }; | |
276 | int ret; | |
277 | ||
278 | if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm), | |
279 | "Called with userptr BO")) | |
280 | return -EINVAL; | |
281 | ||
c704ab18 | 282 | amdgpu_bo_placement_from_domain(bo, domain); |
a46a2cd1 FK |
283 | |
284 | ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); | |
285 | if (ret) | |
286 | goto validate_fail; | |
2d086fde | 287 | if (wait) |
c60cd590 | 288 | amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false); |
a46a2cd1 FK |
289 | |
290 | validate_fail: | |
291 | return ret; | |
292 | } | |
293 | ||
294 | static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo) | |
295 | { | |
296 | struct amdgpu_vm_parser *p = param; | |
297 | ||
298 | return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait); | |
299 | } | |
300 | ||
301 | /* vm_validate_pt_pd_bos - Validate page table and directory BOs | |
302 | * | |
303 | * Page directories are not updated here because huge page handling | |
304 | * during page table updates can invalidate page directory entries | |
305 | * again. Page directories are only updated after updating page | |
306 | * tables. | |
307 | */ | |
5b21d3e5 | 308 | static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm) |
a46a2cd1 | 309 | { |
5b21d3e5 | 310 | struct amdgpu_bo *pd = vm->root.base.bo; |
a46a2cd1 FK |
311 | struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); |
312 | struct amdgpu_vm_parser param; | |
a46a2cd1 FK |
313 | int ret; |
314 | ||
315 | param.domain = AMDGPU_GEM_DOMAIN_VRAM; | |
316 | param.wait = false; | |
317 | ||
5b21d3e5 | 318 | ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate, |
a46a2cd1 FK |
319 | ¶m); |
320 | if (ret) { | |
321 | pr_err("amdgpu: failed to validate PT BOs\n"); | |
322 | return ret; | |
323 | } | |
324 | ||
325 | ret = amdgpu_amdkfd_validate(¶m, pd); | |
326 | if (ret) { | |
327 | pr_err("amdgpu: failed to validate PD\n"); | |
328 | return ret; | |
329 | } | |
330 | ||
11c3a249 | 331 | vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.base.bo); |
a46a2cd1 | 332 | |
5b21d3e5 | 333 | if (vm->use_cpu_for_update) { |
a46a2cd1 FK |
334 | ret = amdgpu_bo_kmap(pd, NULL); |
335 | if (ret) { | |
336 | pr_err("amdgpu: failed to kmap PD, ret=%d\n", ret); | |
337 | return ret; | |
338 | } | |
339 | } | |
340 | ||
341 | return 0; | |
342 | } | |
343 | ||
a46a2cd1 FK |
344 | static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync) |
345 | { | |
346 | struct amdgpu_bo *pd = vm->root.base.bo; | |
347 | struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); | |
348 | int ret; | |
349 | ||
350 | ret = amdgpu_vm_update_directories(adev, vm); | |
351 | if (ret) | |
352 | return ret; | |
353 | ||
5aae7335 | 354 | return amdgpu_sync_fence(NULL, sync, vm->last_update, false); |
a46a2cd1 FK |
355 | } |
356 | ||
357 | /* add_bo_to_vm - Add a BO to a VM | |
358 | * | |
359 | * Everything that needs to bo done only once when a BO is first added | |
360 | * to a VM. It can later be mapped and unmapped many times without | |
361 | * repeating these steps. | |
362 | * | |
363 | * 1. Allocate and initialize BO VA entry data structure | |
364 | * 2. Add BO to the VM | |
365 | * 3. Determine ASIC-specific PTE flags | |
366 | * 4. Alloc page tables and directories if needed | |
367 | * 4a. Validate new page tables and directories | |
368 | */ | |
369 | static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem, | |
5b21d3e5 | 370 | struct amdgpu_vm *vm, bool is_aql, |
a46a2cd1 FK |
371 | struct kfd_bo_va_list **p_bo_va_entry) |
372 | { | |
373 | int ret; | |
374 | struct kfd_bo_va_list *bo_va_entry; | |
a46a2cd1 FK |
375 | struct amdgpu_bo *bo = mem->bo; |
376 | uint64_t va = mem->va; | |
377 | struct list_head *list_bo_va = &mem->bo_va_list; | |
378 | unsigned long bo_size = bo->tbo.mem.size; | |
379 | ||
380 | if (!va) { | |
381 | pr_err("Invalid VA when adding BO to VM\n"); | |
382 | return -EINVAL; | |
383 | } | |
384 | ||
385 | if (is_aql) | |
386 | va += bo_size; | |
387 | ||
388 | bo_va_entry = kzalloc(sizeof(*bo_va_entry), GFP_KERNEL); | |
389 | if (!bo_va_entry) | |
390 | return -ENOMEM; | |
391 | ||
392 | pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va, | |
5b21d3e5 | 393 | va + bo_size, vm); |
a46a2cd1 FK |
394 | |
395 | /* Add BO to VM internal data structures*/ | |
5b21d3e5 | 396 | bo_va_entry->bo_va = amdgpu_vm_bo_add(adev, vm, bo); |
a46a2cd1 FK |
397 | if (!bo_va_entry->bo_va) { |
398 | ret = -EINVAL; | |
399 | pr_err("Failed to add BO object to VM. ret == %d\n", | |
400 | ret); | |
401 | goto err_vmadd; | |
402 | } | |
403 | ||
404 | bo_va_entry->va = va; | |
405 | bo_va_entry->pte_flags = amdgpu_gmc_get_pte_flags(adev, | |
406 | mem->mapping_flags); | |
407 | bo_va_entry->kgd_dev = (void *)adev; | |
408 | list_add(&bo_va_entry->bo_list, list_bo_va); | |
409 | ||
410 | if (p_bo_va_entry) | |
411 | *p_bo_va_entry = bo_va_entry; | |
412 | ||
0ce15d6f | 413 | /* Allocate validate page tables if needed */ |
5b21d3e5 | 414 | ret = vm_validate_pt_pd_bos(vm); |
a46a2cd1 FK |
415 | if (ret) { |
416 | pr_err("validate_pt_pd_bos() failed\n"); | |
417 | goto err_alloc_pts; | |
418 | } | |
419 | ||
a46a2cd1 FK |
420 | return 0; |
421 | ||
422 | err_alloc_pts: | |
a46a2cd1 FK |
423 | amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va); |
424 | list_del(&bo_va_entry->bo_list); | |
425 | err_vmadd: | |
426 | kfree(bo_va_entry); | |
427 | return ret; | |
428 | } | |
429 | ||
430 | static void remove_bo_from_vm(struct amdgpu_device *adev, | |
431 | struct kfd_bo_va_list *entry, unsigned long size) | |
432 | { | |
433 | pr_debug("\t remove VA 0x%llx - 0x%llx in entry %p\n", | |
434 | entry->va, | |
435 | entry->va + size, entry); | |
436 | amdgpu_vm_bo_rmv(adev, entry->bo_va); | |
437 | list_del(&entry->bo_list); | |
438 | kfree(entry); | |
439 | } | |
440 | ||
441 | static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem, | |
5ae0283e FK |
442 | struct amdkfd_process_info *process_info, |
443 | bool userptr) | |
a46a2cd1 FK |
444 | { |
445 | struct ttm_validate_buffer *entry = &mem->validate_list; | |
446 | struct amdgpu_bo *bo = mem->bo; | |
447 | ||
448 | INIT_LIST_HEAD(&entry->head); | |
a9f34c70 | 449 | entry->num_shared = 1; |
a46a2cd1 FK |
450 | entry->bo = &bo->tbo; |
451 | mutex_lock(&process_info->lock); | |
5ae0283e FK |
452 | if (userptr) |
453 | list_add_tail(&entry->head, &process_info->userptr_valid_list); | |
454 | else | |
455 | list_add_tail(&entry->head, &process_info->kfd_bo_list); | |
a46a2cd1 FK |
456 | mutex_unlock(&process_info->lock); |
457 | } | |
458 | ||
5ae0283e FK |
459 | /* Initializes user pages. It registers the MMU notifier and validates |
460 | * the userptr BO in the GTT domain. | |
461 | * | |
462 | * The BO must already be on the userptr_valid_list. Otherwise an | |
463 | * eviction and restore may happen that leaves the new BO unmapped | |
464 | * with the user mode queues running. | |
465 | * | |
466 | * Takes the process_info->lock to protect against concurrent restore | |
467 | * workers. | |
468 | * | |
469 | * Returns 0 for success, negative errno for errors. | |
470 | */ | |
471 | static int init_user_pages(struct kgd_mem *mem, struct mm_struct *mm, | |
472 | uint64_t user_addr) | |
473 | { | |
474 | struct amdkfd_process_info *process_info = mem->process_info; | |
475 | struct amdgpu_bo *bo = mem->bo; | |
476 | struct ttm_operation_ctx ctx = { true, false }; | |
477 | int ret = 0; | |
478 | ||
479 | mutex_lock(&process_info->lock); | |
480 | ||
481 | ret = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, user_addr, 0); | |
482 | if (ret) { | |
483 | pr_err("%s: Failed to set userptr: %d\n", __func__, ret); | |
484 | goto out; | |
485 | } | |
486 | ||
487 | ret = amdgpu_mn_register(bo, user_addr); | |
488 | if (ret) { | |
489 | pr_err("%s: Failed to register MMU notifier: %d\n", | |
490 | __func__, ret); | |
491 | goto out; | |
492 | } | |
493 | ||
318c3f4b AD |
494 | /* If no restore worker is running concurrently, user_pages |
495 | * should not be allocated | |
496 | */ | |
497 | WARN(mem->user_pages, "Leaking user_pages array"); | |
498 | ||
499 | mem->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages, | |
500 | sizeof(struct page *), | |
501 | GFP_KERNEL | __GFP_ZERO); | |
502 | if (!mem->user_pages) { | |
503 | pr_err("%s: Failed to allocate pages array\n", __func__); | |
504 | ret = -ENOMEM; | |
505 | goto unregister_out; | |
506 | } | |
507 | ||
508 | ret = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm, mem->user_pages); | |
5ae0283e FK |
509 | if (ret) { |
510 | pr_err("%s: Failed to get user pages: %d\n", __func__, ret); | |
318c3f4b | 511 | goto free_out; |
5ae0283e FK |
512 | } |
513 | ||
318c3f4b AD |
514 | amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, mem->user_pages); |
515 | ||
5ae0283e FK |
516 | ret = amdgpu_bo_reserve(bo, true); |
517 | if (ret) { | |
518 | pr_err("%s: Failed to reserve BO\n", __func__); | |
519 | goto release_out; | |
520 | } | |
c704ab18 | 521 | amdgpu_bo_placement_from_domain(bo, mem->domain); |
5ae0283e FK |
522 | ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); |
523 | if (ret) | |
524 | pr_err("%s: failed to validate BO\n", __func__); | |
525 | amdgpu_bo_unreserve(bo); | |
526 | ||
527 | release_out: | |
318c3f4b AD |
528 | if (ret) |
529 | release_pages(mem->user_pages, bo->tbo.ttm->num_pages); | |
530 | free_out: | |
531 | kvfree(mem->user_pages); | |
532 | mem->user_pages = NULL; | |
5ae0283e FK |
533 | unregister_out: |
534 | if (ret) | |
535 | amdgpu_mn_unregister(bo); | |
536 | out: | |
537 | mutex_unlock(&process_info->lock); | |
538 | return ret; | |
539 | } | |
540 | ||
a46a2cd1 FK |
541 | /* Reserving a BO and its page table BOs must happen atomically to |
542 | * avoid deadlocks. Some operations update multiple VMs at once. Track | |
543 | * all the reservation info in a context structure. Optionally a sync | |
544 | * object can track VM updates. | |
545 | */ | |
546 | struct bo_vm_reservation_context { | |
547 | struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */ | |
548 | unsigned int n_vms; /* Number of VMs reserved */ | |
549 | struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries */ | |
550 | struct ww_acquire_ctx ticket; /* Reservation ticket */ | |
551 | struct list_head list, duplicates; /* BO lists */ | |
552 | struct amdgpu_sync *sync; /* Pointer to sync object */ | |
553 | bool reserved; /* Whether BOs are reserved */ | |
554 | }; | |
555 | ||
556 | enum bo_vm_match { | |
557 | BO_VM_NOT_MAPPED = 0, /* Match VMs where a BO is not mapped */ | |
558 | BO_VM_MAPPED, /* Match VMs where a BO is mapped */ | |
559 | BO_VM_ALL, /* Match all VMs a BO was added to */ | |
560 | }; | |
561 | ||
562 | /** | |
563 | * reserve_bo_and_vm - reserve a BO and a VM unconditionally. | |
564 | * @mem: KFD BO structure. | |
565 | * @vm: the VM to reserve. | |
566 | * @ctx: the struct that will be used in unreserve_bo_and_vms(). | |
567 | */ | |
568 | static int reserve_bo_and_vm(struct kgd_mem *mem, | |
569 | struct amdgpu_vm *vm, | |
570 | struct bo_vm_reservation_context *ctx) | |
571 | { | |
572 | struct amdgpu_bo *bo = mem->bo; | |
573 | int ret; | |
574 | ||
575 | WARN_ON(!vm); | |
576 | ||
577 | ctx->reserved = false; | |
578 | ctx->n_vms = 1; | |
579 | ctx->sync = &mem->sync; | |
580 | ||
581 | INIT_LIST_HEAD(&ctx->list); | |
582 | INIT_LIST_HEAD(&ctx->duplicates); | |
583 | ||
584 | ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL); | |
585 | if (!ctx->vm_pd) | |
586 | return -ENOMEM; | |
587 | ||
a46a2cd1 FK |
588 | ctx->kfd_bo.priority = 0; |
589 | ctx->kfd_bo.tv.bo = &bo->tbo; | |
a9f34c70 | 590 | ctx->kfd_bo.tv.num_shared = 1; |
318c3f4b | 591 | ctx->kfd_bo.user_pages = NULL; |
a46a2cd1 FK |
592 | list_add(&ctx->kfd_bo.tv.head, &ctx->list); |
593 | ||
594 | amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]); | |
595 | ||
596 | ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list, | |
597 | false, &ctx->duplicates); | |
598 | if (!ret) | |
599 | ctx->reserved = true; | |
600 | else { | |
601 | pr_err("Failed to reserve buffers in ttm\n"); | |
602 | kfree(ctx->vm_pd); | |
603 | ctx->vm_pd = NULL; | |
604 | } | |
605 | ||
606 | return ret; | |
607 | } | |
608 | ||
609 | /** | |
610 | * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally | |
611 | * @mem: KFD BO structure. | |
612 | * @vm: the VM to reserve. If NULL, then all VMs associated with the BO | |
613 | * is used. Otherwise, a single VM associated with the BO. | |
614 | * @map_type: the mapping status that will be used to filter the VMs. | |
615 | * @ctx: the struct that will be used in unreserve_bo_and_vms(). | |
616 | * | |
617 | * Returns 0 for success, negative for failure. | |
618 | */ | |
619 | static int reserve_bo_and_cond_vms(struct kgd_mem *mem, | |
620 | struct amdgpu_vm *vm, enum bo_vm_match map_type, | |
621 | struct bo_vm_reservation_context *ctx) | |
622 | { | |
623 | struct amdgpu_bo *bo = mem->bo; | |
624 | struct kfd_bo_va_list *entry; | |
625 | unsigned int i; | |
626 | int ret; | |
627 | ||
628 | ctx->reserved = false; | |
629 | ctx->n_vms = 0; | |
630 | ctx->vm_pd = NULL; | |
631 | ctx->sync = &mem->sync; | |
632 | ||
633 | INIT_LIST_HEAD(&ctx->list); | |
634 | INIT_LIST_HEAD(&ctx->duplicates); | |
635 | ||
636 | list_for_each_entry(entry, &mem->bo_va_list, bo_list) { | |
637 | if ((vm && vm != entry->bo_va->base.vm) || | |
638 | (entry->is_mapped != map_type | |
639 | && map_type != BO_VM_ALL)) | |
640 | continue; | |
641 | ||
642 | ctx->n_vms++; | |
643 | } | |
644 | ||
645 | if (ctx->n_vms != 0) { | |
646 | ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), | |
647 | GFP_KERNEL); | |
648 | if (!ctx->vm_pd) | |
649 | return -ENOMEM; | |
650 | } | |
651 | ||
a46a2cd1 FK |
652 | ctx->kfd_bo.priority = 0; |
653 | ctx->kfd_bo.tv.bo = &bo->tbo; | |
a9f34c70 | 654 | ctx->kfd_bo.tv.num_shared = 1; |
318c3f4b | 655 | ctx->kfd_bo.user_pages = NULL; |
a46a2cd1 FK |
656 | list_add(&ctx->kfd_bo.tv.head, &ctx->list); |
657 | ||
658 | i = 0; | |
659 | list_for_each_entry(entry, &mem->bo_va_list, bo_list) { | |
660 | if ((vm && vm != entry->bo_va->base.vm) || | |
661 | (entry->is_mapped != map_type | |
662 | && map_type != BO_VM_ALL)) | |
663 | continue; | |
664 | ||
665 | amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list, | |
666 | &ctx->vm_pd[i]); | |
667 | i++; | |
668 | } | |
669 | ||
670 | ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list, | |
671 | false, &ctx->duplicates); | |
672 | if (!ret) | |
673 | ctx->reserved = true; | |
674 | else | |
675 | pr_err("Failed to reserve buffers in ttm.\n"); | |
676 | ||
677 | if (ret) { | |
678 | kfree(ctx->vm_pd); | |
679 | ctx->vm_pd = NULL; | |
680 | } | |
681 | ||
682 | return ret; | |
683 | } | |
684 | ||
685 | /** | |
686 | * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context | |
687 | * @ctx: Reservation context to unreserve | |
688 | * @wait: Optionally wait for a sync object representing pending VM updates | |
689 | * @intr: Whether the wait is interruptible | |
690 | * | |
691 | * Also frees any resources allocated in | |
692 | * reserve_bo_and_(cond_)vm(s). Returns the status from | |
693 | * amdgpu_sync_wait. | |
694 | */ | |
695 | static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx, | |
696 | bool wait, bool intr) | |
697 | { | |
698 | int ret = 0; | |
699 | ||
700 | if (wait) | |
701 | ret = amdgpu_sync_wait(ctx->sync, intr); | |
702 | ||
703 | if (ctx->reserved) | |
704 | ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list); | |
705 | kfree(ctx->vm_pd); | |
706 | ||
707 | ctx->sync = NULL; | |
708 | ||
709 | ctx->reserved = false; | |
710 | ctx->vm_pd = NULL; | |
711 | ||
712 | return ret; | |
713 | } | |
714 | ||
715 | static int unmap_bo_from_gpuvm(struct amdgpu_device *adev, | |
716 | struct kfd_bo_va_list *entry, | |
717 | struct amdgpu_sync *sync) | |
718 | { | |
719 | struct amdgpu_bo_va *bo_va = entry->bo_va; | |
720 | struct amdgpu_vm *vm = bo_va->base.vm; | |
a46a2cd1 | 721 | |
a46a2cd1 FK |
722 | amdgpu_vm_bo_unmap(adev, bo_va, entry->va); |
723 | ||
724 | amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update); | |
725 | ||
5aae7335 | 726 | amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false); |
a46a2cd1 FK |
727 | |
728 | return 0; | |
729 | } | |
730 | ||
731 | static int update_gpuvm_pte(struct amdgpu_device *adev, | |
732 | struct kfd_bo_va_list *entry, | |
733 | struct amdgpu_sync *sync) | |
734 | { | |
735 | int ret; | |
1e608013 | 736 | struct amdgpu_bo_va *bo_va = entry->bo_va; |
a46a2cd1 FK |
737 | |
738 | /* Update the page tables */ | |
739 | ret = amdgpu_vm_bo_update(adev, bo_va, false); | |
740 | if (ret) { | |
741 | pr_err("amdgpu_vm_bo_update failed\n"); | |
742 | return ret; | |
743 | } | |
744 | ||
5aae7335 | 745 | return amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false); |
a46a2cd1 FK |
746 | } |
747 | ||
748 | static int map_bo_to_gpuvm(struct amdgpu_device *adev, | |
5ae0283e FK |
749 | struct kfd_bo_va_list *entry, struct amdgpu_sync *sync, |
750 | bool no_update_pte) | |
a46a2cd1 FK |
751 | { |
752 | int ret; | |
753 | ||
754 | /* Set virtual address for the allocation */ | |
755 | ret = amdgpu_vm_bo_map(adev, entry->bo_va, entry->va, 0, | |
756 | amdgpu_bo_size(entry->bo_va->base.bo), | |
757 | entry->pte_flags); | |
758 | if (ret) { | |
759 | pr_err("Failed to map VA 0x%llx in vm. ret %d\n", | |
760 | entry->va, ret); | |
761 | return ret; | |
762 | } | |
763 | ||
5ae0283e FK |
764 | if (no_update_pte) |
765 | return 0; | |
766 | ||
a46a2cd1 FK |
767 | ret = update_gpuvm_pte(adev, entry, sync); |
768 | if (ret) { | |
769 | pr_err("update_gpuvm_pte() failed\n"); | |
770 | goto update_gpuvm_pte_failed; | |
771 | } | |
772 | ||
773 | return 0; | |
774 | ||
775 | update_gpuvm_pte_failed: | |
776 | unmap_bo_from_gpuvm(adev, entry, sync); | |
777 | return ret; | |
778 | } | |
779 | ||
b408a548 FK |
780 | static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size) |
781 | { | |
782 | struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL); | |
783 | ||
784 | if (!sg) | |
785 | return NULL; | |
786 | if (sg_alloc_table(sg, 1, GFP_KERNEL)) { | |
787 | kfree(sg); | |
788 | return NULL; | |
789 | } | |
790 | sg->sgl->dma_address = addr; | |
791 | sg->sgl->length = size; | |
792 | #ifdef CONFIG_NEED_SG_DMA_LENGTH | |
793 | sg->sgl->dma_length = size; | |
794 | #endif | |
795 | return sg; | |
796 | } | |
797 | ||
a46a2cd1 FK |
798 | static int process_validate_vms(struct amdkfd_process_info *process_info) |
799 | { | |
5b21d3e5 | 800 | struct amdgpu_vm *peer_vm; |
a46a2cd1 FK |
801 | int ret; |
802 | ||
803 | list_for_each_entry(peer_vm, &process_info->vm_list_head, | |
804 | vm_list_node) { | |
805 | ret = vm_validate_pt_pd_bos(peer_vm); | |
806 | if (ret) | |
807 | return ret; | |
808 | } | |
809 | ||
810 | return 0; | |
811 | } | |
812 | ||
9130cc01 HK |
813 | static int process_sync_pds_resv(struct amdkfd_process_info *process_info, |
814 | struct amdgpu_sync *sync) | |
815 | { | |
816 | struct amdgpu_vm *peer_vm; | |
817 | int ret; | |
818 | ||
819 | list_for_each_entry(peer_vm, &process_info->vm_list_head, | |
820 | vm_list_node) { | |
821 | struct amdgpu_bo *pd = peer_vm->root.base.bo; | |
822 | ||
5aae7335 | 823 | ret = amdgpu_sync_resv(NULL, |
9130cc01 HK |
824 | sync, pd->tbo.resv, |
825 | AMDGPU_FENCE_OWNER_UNDEFINED, false); | |
826 | if (ret) | |
827 | return ret; | |
828 | } | |
829 | ||
830 | return 0; | |
831 | } | |
832 | ||
a46a2cd1 FK |
833 | static int process_update_pds(struct amdkfd_process_info *process_info, |
834 | struct amdgpu_sync *sync) | |
835 | { | |
5b21d3e5 | 836 | struct amdgpu_vm *peer_vm; |
a46a2cd1 FK |
837 | int ret; |
838 | ||
839 | list_for_each_entry(peer_vm, &process_info->vm_list_head, | |
840 | vm_list_node) { | |
5b21d3e5 | 841 | ret = vm_update_pds(peer_vm, sync); |
a46a2cd1 FK |
842 | if (ret) |
843 | return ret; | |
844 | } | |
845 | ||
846 | return 0; | |
847 | } | |
848 | ||
ede0dd86 FK |
849 | static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info, |
850 | struct dma_fence **ef) | |
a46a2cd1 | 851 | { |
3486625b | 852 | struct amdkfd_process_info *info = NULL; |
ede0dd86 | 853 | int ret; |
a46a2cd1 FK |
854 | |
855 | if (!*process_info) { | |
856 | info = kzalloc(sizeof(*info), GFP_KERNEL); | |
ede0dd86 FK |
857 | if (!info) |
858 | return -ENOMEM; | |
a46a2cd1 FK |
859 | |
860 | mutex_init(&info->lock); | |
861 | INIT_LIST_HEAD(&info->vm_list_head); | |
862 | INIT_LIST_HEAD(&info->kfd_bo_list); | |
5ae0283e FK |
863 | INIT_LIST_HEAD(&info->userptr_valid_list); |
864 | INIT_LIST_HEAD(&info->userptr_inval_list); | |
a46a2cd1 FK |
865 | |
866 | info->eviction_fence = | |
867 | amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1), | |
868 | current->mm); | |
869 | if (!info->eviction_fence) { | |
870 | pr_err("Failed to create eviction fence\n"); | |
ede0dd86 | 871 | ret = -ENOMEM; |
a46a2cd1 FK |
872 | goto create_evict_fence_fail; |
873 | } | |
874 | ||
5ae0283e FK |
875 | info->pid = get_task_pid(current->group_leader, PIDTYPE_PID); |
876 | atomic_set(&info->evicted_bos, 0); | |
877 | INIT_DELAYED_WORK(&info->restore_userptr_work, | |
878 | amdgpu_amdkfd_restore_userptr_worker); | |
879 | ||
a46a2cd1 FK |
880 | *process_info = info; |
881 | *ef = dma_fence_get(&info->eviction_fence->base); | |
882 | } | |
883 | ||
ede0dd86 | 884 | vm->process_info = *process_info; |
a46a2cd1 | 885 | |
3486625b | 886 | /* Validate page directory and attach eviction fence */ |
ede0dd86 | 887 | ret = amdgpu_bo_reserve(vm->root.base.bo, true); |
3486625b FK |
888 | if (ret) |
889 | goto reserve_pd_fail; | |
ede0dd86 | 890 | ret = vm_validate_pt_pd_bos(vm); |
3486625b FK |
891 | if (ret) { |
892 | pr_err("validate_pt_pd_bos() failed\n"); | |
893 | goto validate_pd_fail; | |
894 | } | |
d38ca8f0 CIK |
895 | ret = amdgpu_bo_sync_wait(vm->root.base.bo, |
896 | AMDGPU_FENCE_OWNER_KFD, false); | |
3486625b FK |
897 | if (ret) |
898 | goto wait_pd_fail; | |
ede0dd86 FK |
899 | amdgpu_bo_fence(vm->root.base.bo, |
900 | &vm->process_info->eviction_fence->base, true); | |
901 | amdgpu_bo_unreserve(vm->root.base.bo); | |
3486625b FK |
902 | |
903 | /* Update process info */ | |
ede0dd86 FK |
904 | mutex_lock(&vm->process_info->lock); |
905 | list_add_tail(&vm->vm_list_node, | |
906 | &(vm->process_info->vm_list_head)); | |
907 | vm->process_info->n_vms++; | |
908 | mutex_unlock(&vm->process_info->lock); | |
a46a2cd1 | 909 | |
ede0dd86 | 910 | return 0; |
a46a2cd1 | 911 | |
3486625b FK |
912 | wait_pd_fail: |
913 | validate_pd_fail: | |
ede0dd86 | 914 | amdgpu_bo_unreserve(vm->root.base.bo); |
3486625b | 915 | reserve_pd_fail: |
ede0dd86 FK |
916 | vm->process_info = NULL; |
917 | if (info) { | |
918 | /* Two fence references: one in info and one in *ef */ | |
919 | dma_fence_put(&info->eviction_fence->base); | |
920 | dma_fence_put(*ef); | |
921 | *ef = NULL; | |
922 | *process_info = NULL; | |
5ae0283e | 923 | put_pid(info->pid); |
a46a2cd1 | 924 | create_evict_fence_fail: |
ede0dd86 FK |
925 | mutex_destroy(&info->lock); |
926 | kfree(info); | |
927 | } | |
928 | return ret; | |
929 | } | |
930 | ||
1685b01a OZ |
931 | int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, unsigned int pasid, |
932 | void **vm, void **process_info, | |
ede0dd86 FK |
933 | struct dma_fence **ef) |
934 | { | |
935 | struct amdgpu_device *adev = get_amdgpu_device(kgd); | |
936 | struct amdgpu_vm *new_vm; | |
937 | int ret; | |
938 | ||
939 | new_vm = kzalloc(sizeof(*new_vm), GFP_KERNEL); | |
940 | if (!new_vm) | |
941 | return -ENOMEM; | |
942 | ||
943 | /* Initialize AMDGPU part of the VM */ | |
1685b01a | 944 | ret = amdgpu_vm_init(adev, new_vm, AMDGPU_VM_CONTEXT_COMPUTE, pasid); |
ede0dd86 FK |
945 | if (ret) { |
946 | pr_err("Failed init vm ret %d\n", ret); | |
947 | goto amdgpu_vm_init_fail; | |
948 | } | |
949 | ||
950 | /* Initialize KFD part of the VM and process info */ | |
951 | ret = init_kfd_vm(new_vm, process_info, ef); | |
952 | if (ret) | |
953 | goto init_kfd_vm_fail; | |
954 | ||
955 | *vm = (void *) new_vm; | |
956 | ||
957 | return 0; | |
958 | ||
959 | init_kfd_vm_fail: | |
5b21d3e5 | 960 | amdgpu_vm_fini(adev, new_vm); |
ede0dd86 | 961 | amdgpu_vm_init_fail: |
a46a2cd1 FK |
962 | kfree(new_vm); |
963 | return ret; | |
a46a2cd1 FK |
964 | } |
965 | ||
ede0dd86 | 966 | int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd, |
1685b01a | 967 | struct file *filp, unsigned int pasid, |
ede0dd86 FK |
968 | void **vm, void **process_info, |
969 | struct dma_fence **ef) | |
a46a2cd1 FK |
970 | { |
971 | struct amdgpu_device *adev = get_amdgpu_device(kgd); | |
ede0dd86 FK |
972 | struct drm_file *drm_priv = filp->private_data; |
973 | struct amdgpu_fpriv *drv_priv = drm_priv->driver_priv; | |
974 | struct amdgpu_vm *avm = &drv_priv->vm; | |
975 | int ret; | |
a46a2cd1 | 976 | |
ede0dd86 FK |
977 | /* Already a compute VM? */ |
978 | if (avm->process_info) | |
979 | return -EINVAL; | |
980 | ||
981 | /* Convert VM into a compute VM */ | |
1685b01a | 982 | ret = amdgpu_vm_make_compute(adev, avm, pasid); |
ede0dd86 FK |
983 | if (ret) |
984 | return ret; | |
985 | ||
986 | /* Initialize KFD part of the VM and process info */ | |
987 | ret = init_kfd_vm(avm, process_info, ef); | |
988 | if (ret) | |
989 | return ret; | |
990 | ||
991 | *vm = (void *)avm; | |
992 | ||
993 | return 0; | |
994 | } | |
995 | ||
996 | void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, | |
997 | struct amdgpu_vm *vm) | |
998 | { | |
999 | struct amdkfd_process_info *process_info = vm->process_info; | |
1000 | struct amdgpu_bo *pd = vm->root.base.bo; | |
1001 | ||
1002 | if (!process_info) | |
a46a2cd1 FK |
1003 | return; |
1004 | ||
a46a2cd1 | 1005 | /* Release eviction fence from PD */ |
a46a2cd1 FK |
1006 | amdgpu_bo_reserve(pd, false); |
1007 | amdgpu_bo_fence(pd, NULL, false); | |
1008 | amdgpu_bo_unreserve(pd); | |
1009 | ||
ede0dd86 | 1010 | /* Update process info */ |
a46a2cd1 FK |
1011 | mutex_lock(&process_info->lock); |
1012 | process_info->n_vms--; | |
ede0dd86 | 1013 | list_del(&vm->vm_list_node); |
a46a2cd1 FK |
1014 | mutex_unlock(&process_info->lock); |
1015 | ||
ede0dd86 | 1016 | /* Release per-process resources when last compute VM is destroyed */ |
a46a2cd1 FK |
1017 | if (!process_info->n_vms) { |
1018 | WARN_ON(!list_empty(&process_info->kfd_bo_list)); | |
5ae0283e FK |
1019 | WARN_ON(!list_empty(&process_info->userptr_valid_list)); |
1020 | WARN_ON(!list_empty(&process_info->userptr_inval_list)); | |
a46a2cd1 FK |
1021 | |
1022 | dma_fence_put(&process_info->eviction_fence->base); | |
5ae0283e FK |
1023 | cancel_delayed_work_sync(&process_info->restore_userptr_work); |
1024 | put_pid(process_info->pid); | |
a46a2cd1 FK |
1025 | mutex_destroy(&process_info->lock); |
1026 | kfree(process_info); | |
1027 | } | |
ede0dd86 FK |
1028 | } |
1029 | ||
1030 | void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm) | |
1031 | { | |
1032 | struct amdgpu_device *adev = get_amdgpu_device(kgd); | |
1033 | struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; | |
1034 | ||
1035 | if (WARN_ON(!kgd || !vm)) | |
1036 | return; | |
1037 | ||
1038 | pr_debug("Destroying process vm %p\n", vm); | |
a46a2cd1 FK |
1039 | |
1040 | /* Release the VM context */ | |
1041 | amdgpu_vm_fini(adev, avm); | |
1042 | kfree(vm); | |
1043 | } | |
1044 | ||
bf47afba OZ |
1045 | void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm) |
1046 | { | |
1047 | struct amdgpu_device *adev = get_amdgpu_device(kgd); | |
1048 | struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; | |
1049 | ||
1050 | if (WARN_ON(!kgd || !vm)) | |
1051 | return; | |
1052 | ||
1053 | pr_debug("Releasing process vm %p\n", vm); | |
1054 | ||
1055 | /* The original pasid of amdgpu vm has already been | |
1056 | * released during making a amdgpu vm to a compute vm | |
1057 | * The current pasid is managed by kfd and will be | |
1058 | * released on kfd process destroy. Set amdgpu pasid | |
1059 | * to 0 to avoid duplicate release. | |
1060 | */ | |
1061 | amdgpu_vm_release_compute(adev, avm); | |
1062 | } | |
1063 | ||
e715c6d0 | 1064 | uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm) |
a46a2cd1 | 1065 | { |
5b21d3e5 | 1066 | struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; |
e715c6d0 SL |
1067 | struct amdgpu_bo *pd = avm->root.base.bo; |
1068 | struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); | |
a46a2cd1 | 1069 | |
e715c6d0 SL |
1070 | if (adev->asic_type < CHIP_VEGA10) |
1071 | return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT; | |
1072 | return avm->pd_phys_addr; | |
a46a2cd1 FK |
1073 | } |
1074 | ||
1075 | int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( | |
1076 | struct kgd_dev *kgd, uint64_t va, uint64_t size, | |
1077 | void *vm, struct kgd_mem **mem, | |
1078 | uint64_t *offset, uint32_t flags) | |
1079 | { | |
1080 | struct amdgpu_device *adev = get_amdgpu_device(kgd); | |
5b21d3e5 | 1081 | struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; |
b408a548 FK |
1082 | enum ttm_bo_type bo_type = ttm_bo_type_device; |
1083 | struct sg_table *sg = NULL; | |
5ae0283e | 1084 | uint64_t user_addr = 0; |
a46a2cd1 | 1085 | struct amdgpu_bo *bo; |
3216c6b7 | 1086 | struct amdgpu_bo_param bp; |
a46a2cd1 | 1087 | int byte_align; |
5ae0283e | 1088 | u32 domain, alloc_domain; |
a46a2cd1 FK |
1089 | u64 alloc_flags; |
1090 | uint32_t mapping_flags; | |
1091 | int ret; | |
1092 | ||
1093 | /* | |
1094 | * Check on which domain to allocate BO | |
1095 | */ | |
1096 | if (flags & ALLOC_MEM_FLAGS_VRAM) { | |
5ae0283e | 1097 | domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM; |
a46a2cd1 FK |
1098 | alloc_flags = AMDGPU_GEM_CREATE_VRAM_CLEARED; |
1099 | alloc_flags |= (flags & ALLOC_MEM_FLAGS_PUBLIC) ? | |
1100 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : | |
1101 | AMDGPU_GEM_CREATE_NO_CPU_ACCESS; | |
1102 | } else if (flags & ALLOC_MEM_FLAGS_GTT) { | |
5ae0283e FK |
1103 | domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT; |
1104 | alloc_flags = 0; | |
1105 | } else if (flags & ALLOC_MEM_FLAGS_USERPTR) { | |
1106 | domain = AMDGPU_GEM_DOMAIN_GTT; | |
1107 | alloc_domain = AMDGPU_GEM_DOMAIN_CPU; | |
a46a2cd1 | 1108 | alloc_flags = 0; |
5ae0283e FK |
1109 | if (!offset || !*offset) |
1110 | return -EINVAL; | |
1111 | user_addr = *offset; | |
b408a548 FK |
1112 | } else if (flags & ALLOC_MEM_FLAGS_DOORBELL) { |
1113 | domain = AMDGPU_GEM_DOMAIN_GTT; | |
1114 | alloc_domain = AMDGPU_GEM_DOMAIN_CPU; | |
1115 | bo_type = ttm_bo_type_sg; | |
1116 | alloc_flags = 0; | |
1117 | if (size > UINT_MAX) | |
1118 | return -EINVAL; | |
1119 | sg = create_doorbell_sg(*offset, size); | |
1120 | if (!sg) | |
1121 | return -ENOMEM; | |
a46a2cd1 FK |
1122 | } else { |
1123 | return -EINVAL; | |
1124 | } | |
1125 | ||
1126 | *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); | |
b408a548 FK |
1127 | if (!*mem) { |
1128 | ret = -ENOMEM; | |
1129 | goto err; | |
1130 | } | |
a46a2cd1 FK |
1131 | INIT_LIST_HEAD(&(*mem)->bo_va_list); |
1132 | mutex_init(&(*mem)->lock); | |
1133 | (*mem)->aql_queue = !!(flags & ALLOC_MEM_FLAGS_AQL_QUEUE_MEM); | |
1134 | ||
1135 | /* Workaround for AQL queue wraparound bug. Map the same | |
1136 | * memory twice. That means we only actually allocate half | |
1137 | * the memory. | |
1138 | */ | |
1139 | if ((*mem)->aql_queue) | |
1140 | size = size >> 1; | |
1141 | ||
1142 | /* Workaround for TLB bug on older VI chips */ | |
1143 | byte_align = (adev->family == AMDGPU_FAMILY_VI && | |
1144 | adev->asic_type != CHIP_FIJI && | |
1145 | adev->asic_type != CHIP_POLARIS10 && | |
846a44d7 GB |
1146 | adev->asic_type != CHIP_POLARIS11 && |
1147 | adev->asic_type != CHIP_POLARIS12) ? | |
a46a2cd1 FK |
1148 | VI_BO_SIZE_ALIGN : 1; |
1149 | ||
1150 | mapping_flags = AMDGPU_VM_PAGE_READABLE; | |
1151 | if (flags & ALLOC_MEM_FLAGS_WRITABLE) | |
1152 | mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE; | |
1153 | if (flags & ALLOC_MEM_FLAGS_EXECUTABLE) | |
1154 | mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE; | |
1155 | if (flags & ALLOC_MEM_FLAGS_COHERENT) | |
1156 | mapping_flags |= AMDGPU_VM_MTYPE_UC; | |
1157 | else | |
1158 | mapping_flags |= AMDGPU_VM_MTYPE_NC; | |
1159 | (*mem)->mapping_flags = mapping_flags; | |
1160 | ||
1161 | amdgpu_sync_create(&(*mem)->sync); | |
1162 | ||
b408a548 | 1163 | ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg); |
a46a2cd1 FK |
1164 | if (ret) { |
1165 | pr_debug("Insufficient system memory\n"); | |
5d240da9 | 1166 | goto err_reserve_limit; |
a46a2cd1 FK |
1167 | } |
1168 | ||
1169 | pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n", | |
1170 | va, size, domain_string(alloc_domain)); | |
1171 | ||
3216c6b7 CZ |
1172 | memset(&bp, 0, sizeof(bp)); |
1173 | bp.size = size; | |
1174 | bp.byte_align = byte_align; | |
1175 | bp.domain = alloc_domain; | |
1176 | bp.flags = alloc_flags; | |
b408a548 | 1177 | bp.type = bo_type; |
3216c6b7 CZ |
1178 | bp.resv = NULL; |
1179 | ret = amdgpu_bo_create(adev, &bp, &bo); | |
a46a2cd1 FK |
1180 | if (ret) { |
1181 | pr_debug("Failed to create BO on domain %s. ret %d\n", | |
1182 | domain_string(alloc_domain), ret); | |
1183 | goto err_bo_create; | |
1184 | } | |
b408a548 FK |
1185 | if (bo_type == ttm_bo_type_sg) { |
1186 | bo->tbo.sg = sg; | |
1187 | bo->tbo.ttm->sg = sg; | |
1188 | } | |
a46a2cd1 FK |
1189 | bo->kfd_bo = *mem; |
1190 | (*mem)->bo = bo; | |
5ae0283e FK |
1191 | if (user_addr) |
1192 | bo->flags |= AMDGPU_AMDKFD_USERPTR_BO; | |
a46a2cd1 FK |
1193 | |
1194 | (*mem)->va = va; | |
5ae0283e | 1195 | (*mem)->domain = domain; |
a46a2cd1 | 1196 | (*mem)->mapped_to_gpu_memory = 0; |
5b21d3e5 | 1197 | (*mem)->process_info = avm->process_info; |
5ae0283e FK |
1198 | add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr); |
1199 | ||
1200 | if (user_addr) { | |
1201 | ret = init_user_pages(*mem, current->mm, user_addr); | |
1202 | if (ret) { | |
1203 | mutex_lock(&avm->process_info->lock); | |
1204 | list_del(&(*mem)->validate_list.head); | |
1205 | mutex_unlock(&avm->process_info->lock); | |
1206 | goto allocate_init_user_pages_failed; | |
1207 | } | |
1208 | } | |
a46a2cd1 FK |
1209 | |
1210 | if (offset) | |
1211 | *offset = amdgpu_bo_mmap_offset(bo); | |
1212 | ||
1213 | return 0; | |
1214 | ||
5ae0283e FK |
1215 | allocate_init_user_pages_failed: |
1216 | amdgpu_bo_unref(&bo); | |
1217 | /* Don't unreserve system mem limit twice */ | |
5d240da9 | 1218 | goto err_reserve_limit; |
a46a2cd1 | 1219 | err_bo_create: |
b408a548 | 1220 | unreserve_mem_limit(adev, size, alloc_domain, !!sg); |
5d240da9 | 1221 | err_reserve_limit: |
a46a2cd1 FK |
1222 | mutex_destroy(&(*mem)->lock); |
1223 | kfree(*mem); | |
b408a548 FK |
1224 | err: |
1225 | if (sg) { | |
1226 | sg_free_table(sg); | |
1227 | kfree(sg); | |
1228 | } | |
a46a2cd1 FK |
1229 | return ret; |
1230 | } | |
1231 | ||
1232 | int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( | |
1233 | struct kgd_dev *kgd, struct kgd_mem *mem) | |
1234 | { | |
1235 | struct amdkfd_process_info *process_info = mem->process_info; | |
1236 | unsigned long bo_size = mem->bo->tbo.mem.size; | |
1237 | struct kfd_bo_va_list *entry, *tmp; | |
1238 | struct bo_vm_reservation_context ctx; | |
1239 | struct ttm_validate_buffer *bo_list_entry; | |
1240 | int ret; | |
1241 | ||
1242 | mutex_lock(&mem->lock); | |
1243 | ||
1244 | if (mem->mapped_to_gpu_memory > 0) { | |
1245 | pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n", | |
1246 | mem->va, bo_size); | |
1247 | mutex_unlock(&mem->lock); | |
1248 | return -EBUSY; | |
1249 | } | |
1250 | ||
1251 | mutex_unlock(&mem->lock); | |
1252 | /* lock is not needed after this, since mem is unused and will | |
1253 | * be freed anyway | |
1254 | */ | |
1255 | ||
5ae0283e FK |
1256 | /* No more MMU notifiers */ |
1257 | amdgpu_mn_unregister(mem->bo); | |
1258 | ||
a46a2cd1 FK |
1259 | /* Make sure restore workers don't access the BO any more */ |
1260 | bo_list_entry = &mem->validate_list; | |
1261 | mutex_lock(&process_info->lock); | |
1262 | list_del(&bo_list_entry->head); | |
1263 | mutex_unlock(&process_info->lock); | |
1264 | ||
318c3f4b AD |
1265 | /* Free user pages if necessary */ |
1266 | if (mem->user_pages) { | |
1267 | pr_debug("%s: Freeing user_pages array\n", __func__); | |
1268 | if (mem->user_pages[0]) | |
1269 | release_pages(mem->user_pages, | |
1270 | mem->bo->tbo.ttm->num_pages); | |
1271 | kvfree(mem->user_pages); | |
1272 | } | |
1273 | ||
a46a2cd1 FK |
1274 | ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx); |
1275 | if (unlikely(ret)) | |
1276 | return ret; | |
1277 | ||
1278 | /* The eviction fence should be removed by the last unmap. | |
1279 | * TODO: Log an error condition if the bo still has the eviction fence | |
1280 | * attached | |
1281 | */ | |
1282 | amdgpu_amdkfd_remove_eviction_fence(mem->bo, | |
2d086fde | 1283 | process_info->eviction_fence); |
a46a2cd1 FK |
1284 | pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va, |
1285 | mem->va + bo_size * (1 + mem->aql_queue)); | |
1286 | ||
1287 | /* Remove from VM internal data structures */ | |
1288 | list_for_each_entry_safe(entry, tmp, &mem->bo_va_list, bo_list) | |
1289 | remove_bo_from_vm((struct amdgpu_device *)entry->kgd_dev, | |
1290 | entry, bo_size); | |
1291 | ||
1292 | ret = unreserve_bo_and_vms(&ctx, false, false); | |
1293 | ||
1294 | /* Free the sync object */ | |
1295 | amdgpu_sync_free(&mem->sync); | |
1296 | ||
b408a548 FK |
1297 | /* If the SG is not NULL, it's one we created for a doorbell |
1298 | * BO. We need to free it. | |
1299 | */ | |
1300 | if (mem->bo->tbo.sg) { | |
1301 | sg_free_table(mem->bo->tbo.sg); | |
1302 | kfree(mem->bo->tbo.sg); | |
1303 | } | |
1304 | ||
a46a2cd1 FK |
1305 | /* Free the BO*/ |
1306 | amdgpu_bo_unref(&mem->bo); | |
1307 | mutex_destroy(&mem->lock); | |
1308 | kfree(mem); | |
1309 | ||
1310 | return ret; | |
1311 | } | |
1312 | ||
1313 | int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( | |
1314 | struct kgd_dev *kgd, struct kgd_mem *mem, void *vm) | |
1315 | { | |
1316 | struct amdgpu_device *adev = get_amdgpu_device(kgd); | |
5b21d3e5 | 1317 | struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; |
a46a2cd1 FK |
1318 | int ret; |
1319 | struct amdgpu_bo *bo; | |
1320 | uint32_t domain; | |
1321 | struct kfd_bo_va_list *entry; | |
1322 | struct bo_vm_reservation_context ctx; | |
1323 | struct kfd_bo_va_list *bo_va_entry = NULL; | |
1324 | struct kfd_bo_va_list *bo_va_entry_aql = NULL; | |
1325 | unsigned long bo_size; | |
5ae0283e | 1326 | bool is_invalid_userptr = false; |
a46a2cd1 FK |
1327 | |
1328 | bo = mem->bo; | |
a46a2cd1 FK |
1329 | if (!bo) { |
1330 | pr_err("Invalid BO when mapping memory to GPU\n"); | |
5ae0283e | 1331 | return -EINVAL; |
a46a2cd1 FK |
1332 | } |
1333 | ||
5ae0283e FK |
1334 | /* Make sure restore is not running concurrently. Since we |
1335 | * don't map invalid userptr BOs, we rely on the next restore | |
1336 | * worker to do the mapping | |
1337 | */ | |
1338 | mutex_lock(&mem->process_info->lock); | |
1339 | ||
1340 | /* Lock mmap-sem. If we find an invalid userptr BO, we can be | |
1341 | * sure that the MMU notifier is no longer running | |
1342 | * concurrently and the queues are actually stopped | |
1343 | */ | |
1344 | if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { | |
1345 | down_write(¤t->mm->mmap_sem); | |
1346 | is_invalid_userptr = atomic_read(&mem->invalid); | |
1347 | up_write(¤t->mm->mmap_sem); | |
1348 | } | |
1349 | ||
1350 | mutex_lock(&mem->lock); | |
1351 | ||
a46a2cd1 FK |
1352 | domain = mem->domain; |
1353 | bo_size = bo->tbo.mem.size; | |
1354 | ||
1355 | pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n", | |
1356 | mem->va, | |
1357 | mem->va + bo_size * (1 + mem->aql_queue), | |
1358 | vm, domain_string(domain)); | |
1359 | ||
1360 | ret = reserve_bo_and_vm(mem, vm, &ctx); | |
1361 | if (unlikely(ret)) | |
1362 | goto out; | |
1363 | ||
5ae0283e FK |
1364 | /* Userptr can be marked as "not invalid", but not actually be |
1365 | * validated yet (still in the system domain). In that case | |
1366 | * the queues are still stopped and we can leave mapping for | |
1367 | * the next restore worker | |
1368 | */ | |
0f04e538 FK |
1369 | if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && |
1370 | bo->tbo.mem.mem_type == TTM_PL_SYSTEM) | |
5ae0283e FK |
1371 | is_invalid_userptr = true; |
1372 | ||
5b21d3e5 FK |
1373 | if (check_if_add_bo_to_vm(avm, mem)) { |
1374 | ret = add_bo_to_vm(adev, mem, avm, false, | |
a46a2cd1 FK |
1375 | &bo_va_entry); |
1376 | if (ret) | |
1377 | goto add_bo_to_vm_failed; | |
1378 | if (mem->aql_queue) { | |
5b21d3e5 | 1379 | ret = add_bo_to_vm(adev, mem, avm, |
a46a2cd1 FK |
1380 | true, &bo_va_entry_aql); |
1381 | if (ret) | |
1382 | goto add_bo_to_vm_failed_aql; | |
1383 | } | |
1384 | } else { | |
5b21d3e5 | 1385 | ret = vm_validate_pt_pd_bos(avm); |
a46a2cd1 FK |
1386 | if (unlikely(ret)) |
1387 | goto add_bo_to_vm_failed; | |
1388 | } | |
1389 | ||
5ae0283e FK |
1390 | if (mem->mapped_to_gpu_memory == 0 && |
1391 | !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { | |
a46a2cd1 FK |
1392 | /* Validate BO only once. The eviction fence gets added to BO |
1393 | * the first time it is mapped. Validate will wait for all | |
1394 | * background evictions to complete. | |
1395 | */ | |
1396 | ret = amdgpu_amdkfd_bo_validate(bo, domain, true); | |
1397 | if (ret) { | |
1398 | pr_debug("Validate failed\n"); | |
1399 | goto map_bo_to_gpuvm_failed; | |
1400 | } | |
1401 | } | |
1402 | ||
1403 | list_for_each_entry(entry, &mem->bo_va_list, bo_list) { | |
1404 | if (entry->bo_va->base.vm == vm && !entry->is_mapped) { | |
1405 | pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n", | |
1406 | entry->va, entry->va + bo_size, | |
1407 | entry); | |
1408 | ||
5ae0283e FK |
1409 | ret = map_bo_to_gpuvm(adev, entry, ctx.sync, |
1410 | is_invalid_userptr); | |
a46a2cd1 FK |
1411 | if (ret) { |
1412 | pr_err("Failed to map radeon bo to gpuvm\n"); | |
1413 | goto map_bo_to_gpuvm_failed; | |
1414 | } | |
1415 | ||
1416 | ret = vm_update_pds(vm, ctx.sync); | |
1417 | if (ret) { | |
1418 | pr_err("Failed to update page directories\n"); | |
1419 | goto map_bo_to_gpuvm_failed; | |
1420 | } | |
1421 | ||
1422 | entry->is_mapped = true; | |
1423 | mem->mapped_to_gpu_memory++; | |
1424 | pr_debug("\t INC mapping count %d\n", | |
1425 | mem->mapped_to_gpu_memory); | |
1426 | } | |
1427 | } | |
1428 | ||
1429 | if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->pin_count) | |
1430 | amdgpu_bo_fence(bo, | |
5b21d3e5 | 1431 | &avm->process_info->eviction_fence->base, |
a46a2cd1 FK |
1432 | true); |
1433 | ret = unreserve_bo_and_vms(&ctx, false, false); | |
1434 | ||
1435 | goto out; | |
1436 | ||
1437 | map_bo_to_gpuvm_failed: | |
1438 | if (bo_va_entry_aql) | |
1439 | remove_bo_from_vm(adev, bo_va_entry_aql, bo_size); | |
1440 | add_bo_to_vm_failed_aql: | |
1441 | if (bo_va_entry) | |
1442 | remove_bo_from_vm(adev, bo_va_entry, bo_size); | |
1443 | add_bo_to_vm_failed: | |
1444 | unreserve_bo_and_vms(&ctx, false, false); | |
1445 | out: | |
1446 | mutex_unlock(&mem->process_info->lock); | |
1447 | mutex_unlock(&mem->lock); | |
1448 | return ret; | |
1449 | } | |
1450 | ||
1451 | int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( | |
1452 | struct kgd_dev *kgd, struct kgd_mem *mem, void *vm) | |
1453 | { | |
1454 | struct amdgpu_device *adev = get_amdgpu_device(kgd); | |
1455 | struct amdkfd_process_info *process_info = | |
5b21d3e5 | 1456 | ((struct amdgpu_vm *)vm)->process_info; |
a46a2cd1 FK |
1457 | unsigned long bo_size = mem->bo->tbo.mem.size; |
1458 | struct kfd_bo_va_list *entry; | |
1459 | struct bo_vm_reservation_context ctx; | |
1460 | int ret; | |
1461 | ||
1462 | mutex_lock(&mem->lock); | |
1463 | ||
1464 | ret = reserve_bo_and_cond_vms(mem, vm, BO_VM_MAPPED, &ctx); | |
1465 | if (unlikely(ret)) | |
1466 | goto out; | |
1467 | /* If no VMs were reserved, it means the BO wasn't actually mapped */ | |
1468 | if (ctx.n_vms == 0) { | |
1469 | ret = -EINVAL; | |
1470 | goto unreserve_out; | |
1471 | } | |
1472 | ||
5b21d3e5 | 1473 | ret = vm_validate_pt_pd_bos((struct amdgpu_vm *)vm); |
a46a2cd1 FK |
1474 | if (unlikely(ret)) |
1475 | goto unreserve_out; | |
1476 | ||
1477 | pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n", | |
1478 | mem->va, | |
1479 | mem->va + bo_size * (1 + mem->aql_queue), | |
1480 | vm); | |
1481 | ||
1482 | list_for_each_entry(entry, &mem->bo_va_list, bo_list) { | |
1483 | if (entry->bo_va->base.vm == vm && entry->is_mapped) { | |
1484 | pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n", | |
1485 | entry->va, | |
1486 | entry->va + bo_size, | |
1487 | entry); | |
1488 | ||
1489 | ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync); | |
1490 | if (ret == 0) { | |
1491 | entry->is_mapped = false; | |
1492 | } else { | |
1493 | pr_err("failed to unmap VA 0x%llx\n", | |
1494 | mem->va); | |
1495 | goto unreserve_out; | |
1496 | } | |
1497 | ||
1498 | mem->mapped_to_gpu_memory--; | |
1499 | pr_debug("\t DEC mapping count %d\n", | |
1500 | mem->mapped_to_gpu_memory); | |
1501 | } | |
1502 | } | |
1503 | ||
1504 | /* If BO is unmapped from all VMs, unfence it. It can be evicted if | |
1505 | * required. | |
1506 | */ | |
1507 | if (mem->mapped_to_gpu_memory == 0 && | |
1508 | !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && !mem->bo->pin_count) | |
1509 | amdgpu_amdkfd_remove_eviction_fence(mem->bo, | |
2d086fde | 1510 | process_info->eviction_fence); |
a46a2cd1 FK |
1511 | |
1512 | unreserve_out: | |
1513 | unreserve_bo_and_vms(&ctx, false, false); | |
1514 | out: | |
1515 | mutex_unlock(&mem->lock); | |
1516 | return ret; | |
1517 | } | |
1518 | ||
1519 | int amdgpu_amdkfd_gpuvm_sync_memory( | |
1520 | struct kgd_dev *kgd, struct kgd_mem *mem, bool intr) | |
1521 | { | |
1522 | struct amdgpu_sync sync; | |
1523 | int ret; | |
1524 | ||
1525 | amdgpu_sync_create(&sync); | |
1526 | ||
1527 | mutex_lock(&mem->lock); | |
1528 | amdgpu_sync_clone(&mem->sync, &sync); | |
1529 | mutex_unlock(&mem->lock); | |
1530 | ||
1531 | ret = amdgpu_sync_wait(&sync, intr); | |
1532 | amdgpu_sync_free(&sync); | |
1533 | return ret; | |
1534 | } | |
1535 | ||
1536 | int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd, | |
1537 | struct kgd_mem *mem, void **kptr, uint64_t *size) | |
1538 | { | |
1539 | int ret; | |
1540 | struct amdgpu_bo *bo = mem->bo; | |
1541 | ||
1542 | if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { | |
1543 | pr_err("userptr can't be mapped to kernel\n"); | |
1544 | return -EINVAL; | |
1545 | } | |
1546 | ||
1547 | /* delete kgd_mem from kfd_bo_list to avoid re-validating | |
1548 | * this BO in BO's restoring after eviction. | |
1549 | */ | |
1550 | mutex_lock(&mem->process_info->lock); | |
1551 | ||
1552 | ret = amdgpu_bo_reserve(bo, true); | |
1553 | if (ret) { | |
1554 | pr_err("Failed to reserve bo. ret %d\n", ret); | |
1555 | goto bo_reserve_failed; | |
1556 | } | |
1557 | ||
7b7c6c81 | 1558 | ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT); |
a46a2cd1 FK |
1559 | if (ret) { |
1560 | pr_err("Failed to pin bo. ret %d\n", ret); | |
1561 | goto pin_failed; | |
1562 | } | |
1563 | ||
1564 | ret = amdgpu_bo_kmap(bo, kptr); | |
1565 | if (ret) { | |
1566 | pr_err("Failed to map bo to kernel. ret %d\n", ret); | |
1567 | goto kmap_failed; | |
1568 | } | |
1569 | ||
1570 | amdgpu_amdkfd_remove_eviction_fence( | |
2d086fde | 1571 | bo, mem->process_info->eviction_fence); |
a46a2cd1 FK |
1572 | list_del_init(&mem->validate_list.head); |
1573 | ||
1574 | if (size) | |
1575 | *size = amdgpu_bo_size(bo); | |
1576 | ||
1577 | amdgpu_bo_unreserve(bo); | |
1578 | ||
1579 | mutex_unlock(&mem->process_info->lock); | |
1580 | return 0; | |
1581 | ||
1582 | kmap_failed: | |
1583 | amdgpu_bo_unpin(bo); | |
1584 | pin_failed: | |
1585 | amdgpu_bo_unreserve(bo); | |
1586 | bo_reserve_failed: | |
1587 | mutex_unlock(&mem->process_info->lock); | |
1588 | ||
1589 | return ret; | |
1590 | } | |
1591 | ||
b97dfa27 | 1592 | int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd, |
1593 | struct kfd_vm_fault_info *mem) | |
1594 | { | |
1595 | struct amdgpu_device *adev; | |
1596 | ||
1597 | adev = (struct amdgpu_device *)kgd; | |
1598 | if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) { | |
1599 | *mem = *adev->gmc.vm_fault_info; | |
1600 | mb(); | |
1601 | atomic_set(&adev->gmc.vm_fault_info_updated, 0); | |
1602 | } | |
1603 | return 0; | |
1604 | } | |
1605 | ||
1dde0ea9 FK |
1606 | int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd, |
1607 | struct dma_buf *dma_buf, | |
1608 | uint64_t va, void *vm, | |
1609 | struct kgd_mem **mem, uint64_t *size, | |
1610 | uint64_t *mmap_offset) | |
1611 | { | |
1612 | struct amdgpu_device *adev = (struct amdgpu_device *)kgd; | |
1613 | struct drm_gem_object *obj; | |
1614 | struct amdgpu_bo *bo; | |
1615 | struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; | |
1616 | ||
1617 | if (dma_buf->ops != &amdgpu_dmabuf_ops) | |
1618 | /* Can't handle non-graphics buffers */ | |
1619 | return -EINVAL; | |
1620 | ||
1621 | obj = dma_buf->priv; | |
1622 | if (obj->dev->dev_private != adev) | |
1623 | /* Can't handle buffers from other devices */ | |
1624 | return -EINVAL; | |
1625 | ||
1626 | bo = gem_to_amdgpu_bo(obj); | |
1627 | if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM | | |
1628 | AMDGPU_GEM_DOMAIN_GTT))) | |
1629 | /* Only VRAM and GTT BOs are supported */ | |
1630 | return -EINVAL; | |
1631 | ||
1632 | *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); | |
1633 | if (!*mem) | |
1634 | return -ENOMEM; | |
1635 | ||
1636 | if (size) | |
1637 | *size = amdgpu_bo_size(bo); | |
1638 | ||
1639 | if (mmap_offset) | |
1640 | *mmap_offset = amdgpu_bo_mmap_offset(bo); | |
1641 | ||
1642 | INIT_LIST_HEAD(&(*mem)->bo_va_list); | |
1643 | mutex_init(&(*mem)->lock); | |
1644 | (*mem)->mapping_flags = | |
1645 | AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE | | |
1646 | AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_NC; | |
1647 | ||
1648 | (*mem)->bo = amdgpu_bo_ref(bo); | |
1649 | (*mem)->va = va; | |
1650 | (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ? | |
1651 | AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT; | |
1652 | (*mem)->mapped_to_gpu_memory = 0; | |
1653 | (*mem)->process_info = avm->process_info; | |
1654 | add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false); | |
1655 | amdgpu_sync_create(&(*mem)->sync); | |
1656 | ||
1657 | return 0; | |
1658 | } | |
1659 | ||
5ae0283e FK |
1660 | /* Evict a userptr BO by stopping the queues if necessary |
1661 | * | |
1662 | * Runs in MMU notifier, may be in RECLAIM_FS context. This means it | |
1663 | * cannot do any memory allocations, and cannot take any locks that | |
1664 | * are held elsewhere while allocating memory. Therefore this is as | |
1665 | * simple as possible, using atomic counters. | |
1666 | * | |
1667 | * It doesn't do anything to the BO itself. The real work happens in | |
1668 | * restore, where we get updated page addresses. This function only | |
1669 | * ensures that GPU access to the BO is stopped. | |
1670 | */ | |
e52482de FK |
1671 | int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, |
1672 | struct mm_struct *mm) | |
1673 | { | |
5ae0283e FK |
1674 | struct amdkfd_process_info *process_info = mem->process_info; |
1675 | int invalid, evicted_bos; | |
1676 | int r = 0; | |
1677 | ||
1678 | invalid = atomic_inc_return(&mem->invalid); | |
1679 | evicted_bos = atomic_inc_return(&process_info->evicted_bos); | |
1680 | if (evicted_bos == 1) { | |
1681 | /* First eviction, stop the queues */ | |
8e07e267 | 1682 | r = kgd2kfd_quiesce_mm(mm); |
5ae0283e FK |
1683 | if (r) |
1684 | pr_err("Failed to quiesce KFD\n"); | |
1685 | schedule_delayed_work(&process_info->restore_userptr_work, | |
1686 | msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS)); | |
1687 | } | |
1688 | ||
1689 | return r; | |
1690 | } | |
1691 | ||
1692 | /* Update invalid userptr BOs | |
1693 | * | |
1694 | * Moves invalidated (evicted) userptr BOs from userptr_valid_list to | |
1695 | * userptr_inval_list and updates user pages for all BOs that have | |
1696 | * been invalidated since their last update. | |
1697 | */ | |
1698 | static int update_invalid_user_pages(struct amdkfd_process_info *process_info, | |
1699 | struct mm_struct *mm) | |
1700 | { | |
1701 | struct kgd_mem *mem, *tmp_mem; | |
1702 | struct amdgpu_bo *bo; | |
1703 | struct ttm_operation_ctx ctx = { false, false }; | |
1704 | int invalid, ret; | |
1705 | ||
1706 | /* Move all invalidated BOs to the userptr_inval_list and | |
1707 | * release their user pages by migration to the CPU domain | |
1708 | */ | |
1709 | list_for_each_entry_safe(mem, tmp_mem, | |
1710 | &process_info->userptr_valid_list, | |
1711 | validate_list.head) { | |
1712 | if (!atomic_read(&mem->invalid)) | |
1713 | continue; /* BO is still valid */ | |
1714 | ||
1715 | bo = mem->bo; | |
1716 | ||
1717 | if (amdgpu_bo_reserve(bo, true)) | |
1718 | return -EAGAIN; | |
c704ab18 | 1719 | amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); |
5ae0283e FK |
1720 | ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); |
1721 | amdgpu_bo_unreserve(bo); | |
1722 | if (ret) { | |
1723 | pr_err("%s: Failed to invalidate userptr BO\n", | |
1724 | __func__); | |
1725 | return -EAGAIN; | |
1726 | } | |
1727 | ||
1728 | list_move_tail(&mem->validate_list.head, | |
1729 | &process_info->userptr_inval_list); | |
1730 | } | |
1731 | ||
1732 | if (list_empty(&process_info->userptr_inval_list)) | |
1733 | return 0; /* All evicted userptr BOs were freed */ | |
1734 | ||
1735 | /* Go through userptr_inval_list and update any invalid user_pages */ | |
1736 | list_for_each_entry(mem, &process_info->userptr_inval_list, | |
1737 | validate_list.head) { | |
1738 | invalid = atomic_read(&mem->invalid); | |
1739 | if (!invalid) | |
1740 | /* BO hasn't been invalidated since the last | |
1741 | * revalidation attempt. Keep its BO list. | |
1742 | */ | |
1743 | continue; | |
1744 | ||
1745 | bo = mem->bo; | |
1746 | ||
318c3f4b AD |
1747 | if (!mem->user_pages) { |
1748 | mem->user_pages = | |
1749 | kvmalloc_array(bo->tbo.ttm->num_pages, | |
1750 | sizeof(struct page *), | |
1751 | GFP_KERNEL | __GFP_ZERO); | |
1752 | if (!mem->user_pages) { | |
1753 | pr_err("%s: Failed to allocate pages array\n", | |
1754 | __func__); | |
1755 | return -ENOMEM; | |
1756 | } | |
1757 | } else if (mem->user_pages[0]) { | |
1758 | release_pages(mem->user_pages, bo->tbo.ttm->num_pages); | |
1759 | } | |
1760 | ||
5ae0283e FK |
1761 | /* Get updated user pages */ |
1762 | ret = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm, | |
318c3f4b | 1763 | mem->user_pages); |
5ae0283e | 1764 | if (ret) { |
318c3f4b | 1765 | mem->user_pages[0] = NULL; |
5ae0283e FK |
1766 | pr_info("%s: Failed to get user pages: %d\n", |
1767 | __func__, ret); | |
1768 | /* Pretend it succeeded. It will fail later | |
1769 | * with a VM fault if the GPU tries to access | |
1770 | * it. Better than hanging indefinitely with | |
1771 | * stalled user mode queues. | |
1772 | */ | |
1773 | } | |
318c3f4b AD |
1774 | |
1775 | /* Mark the BO as valid unless it was invalidated | |
1776 | * again concurrently | |
1777 | */ | |
1778 | if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid) | |
1779 | return -EAGAIN; | |
5ae0283e FK |
1780 | } |
1781 | ||
e52482de FK |
1782 | return 0; |
1783 | } | |
1784 | ||
5ae0283e FK |
1785 | /* Validate invalid userptr BOs |
1786 | * | |
1787 | * Validates BOs on the userptr_inval_list, and moves them back to the | |
1788 | * userptr_valid_list. Also updates GPUVM page tables with new page | |
1789 | * addresses and waits for the page table updates to complete. | |
1790 | */ | |
1791 | static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) | |
1792 | { | |
1793 | struct amdgpu_bo_list_entry *pd_bo_list_entries; | |
1794 | struct list_head resv_list, duplicates; | |
1795 | struct ww_acquire_ctx ticket; | |
1796 | struct amdgpu_sync sync; | |
1797 | ||
1798 | struct amdgpu_vm *peer_vm; | |
1799 | struct kgd_mem *mem, *tmp_mem; | |
1800 | struct amdgpu_bo *bo; | |
1801 | struct ttm_operation_ctx ctx = { false, false }; | |
1802 | int i, ret; | |
1803 | ||
1804 | pd_bo_list_entries = kcalloc(process_info->n_vms, | |
1805 | sizeof(struct amdgpu_bo_list_entry), | |
1806 | GFP_KERNEL); | |
1807 | if (!pd_bo_list_entries) { | |
1808 | pr_err("%s: Failed to allocate PD BO list entries\n", __func__); | |
318c3f4b | 1809 | return -ENOMEM; |
5ae0283e FK |
1810 | } |
1811 | ||
1812 | INIT_LIST_HEAD(&resv_list); | |
1813 | INIT_LIST_HEAD(&duplicates); | |
1814 | ||
1815 | /* Get all the page directory BOs that need to be reserved */ | |
1816 | i = 0; | |
1817 | list_for_each_entry(peer_vm, &process_info->vm_list_head, | |
1818 | vm_list_node) | |
1819 | amdgpu_vm_get_pd_bo(peer_vm, &resv_list, | |
1820 | &pd_bo_list_entries[i++]); | |
1821 | /* Add the userptr_inval_list entries to resv_list */ | |
1822 | list_for_each_entry(mem, &process_info->userptr_inval_list, | |
1823 | validate_list.head) { | |
1824 | list_add_tail(&mem->resv_list.head, &resv_list); | |
1825 | mem->resv_list.bo = mem->validate_list.bo; | |
a9f34c70 | 1826 | mem->resv_list.num_shared = mem->validate_list.num_shared; |
5ae0283e FK |
1827 | } |
1828 | ||
1829 | /* Reserve all BOs and page tables for validation */ | |
1830 | ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates); | |
1831 | WARN(!list_empty(&duplicates), "Duplicates should be empty"); | |
1832 | if (ret) | |
318c3f4b | 1833 | goto out; |
5ae0283e FK |
1834 | |
1835 | amdgpu_sync_create(&sync); | |
1836 | ||
5ae0283e FK |
1837 | ret = process_validate_vms(process_info); |
1838 | if (ret) | |
1839 | goto unreserve_out; | |
1840 | ||
1841 | /* Validate BOs and update GPUVM page tables */ | |
1842 | list_for_each_entry_safe(mem, tmp_mem, | |
1843 | &process_info->userptr_inval_list, | |
1844 | validate_list.head) { | |
1845 | struct kfd_bo_va_list *bo_va_entry; | |
1846 | ||
1847 | bo = mem->bo; | |
1848 | ||
318c3f4b AD |
1849 | /* Copy pages array and validate the BO if we got user pages */ |
1850 | if (mem->user_pages[0]) { | |
1851 | amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, | |
1852 | mem->user_pages); | |
c704ab18 | 1853 | amdgpu_bo_placement_from_domain(bo, mem->domain); |
5ae0283e FK |
1854 | ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); |
1855 | if (ret) { | |
1856 | pr_err("%s: failed to validate BO\n", __func__); | |
1857 | goto unreserve_out; | |
1858 | } | |
1859 | } | |
1860 | ||
318c3f4b AD |
1861 | /* Validate succeeded, now the BO owns the pages, free |
1862 | * our copy of the pointer array. Put this BO back on | |
1863 | * the userptr_valid_list. If we need to revalidate | |
1864 | * it, we need to start from scratch. | |
1865 | */ | |
1866 | kvfree(mem->user_pages); | |
1867 | mem->user_pages = NULL; | |
5ae0283e FK |
1868 | list_move_tail(&mem->validate_list.head, |
1869 | &process_info->userptr_valid_list); | |
1870 | ||
1871 | /* Update mapping. If the BO was not validated | |
1872 | * (because we couldn't get user pages), this will | |
1873 | * clear the page table entries, which will result in | |
1874 | * VM faults if the GPU tries to access the invalid | |
1875 | * memory. | |
1876 | */ | |
1877 | list_for_each_entry(bo_va_entry, &mem->bo_va_list, bo_list) { | |
1878 | if (!bo_va_entry->is_mapped) | |
1879 | continue; | |
1880 | ||
1881 | ret = update_gpuvm_pte((struct amdgpu_device *) | |
1882 | bo_va_entry->kgd_dev, | |
1883 | bo_va_entry, &sync); | |
1884 | if (ret) { | |
1885 | pr_err("%s: update PTE failed\n", __func__); | |
1886 | /* make sure this gets validated again */ | |
1887 | atomic_inc(&mem->invalid); | |
1888 | goto unreserve_out; | |
1889 | } | |
1890 | } | |
1891 | } | |
1892 | ||
1893 | /* Update page directories */ | |
1894 | ret = process_update_pds(process_info, &sync); | |
1895 | ||
1896 | unreserve_out: | |
5ae0283e FK |
1897 | ttm_eu_backoff_reservation(&ticket, &resv_list); |
1898 | amdgpu_sync_wait(&sync, false); | |
1899 | amdgpu_sync_free(&sync); | |
318c3f4b | 1900 | out: |
5ae0283e FK |
1901 | kfree(pd_bo_list_entries); |
1902 | ||
1903 | return ret; | |
1904 | } | |
1905 | ||
1906 | /* Worker callback to restore evicted userptr BOs | |
1907 | * | |
1908 | * Tries to update and validate all userptr BOs. If successful and no | |
1909 | * concurrent evictions happened, the queues are restarted. Otherwise, | |
1910 | * reschedule for another attempt later. | |
1911 | */ | |
1912 | static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work) | |
1913 | { | |
1914 | struct delayed_work *dwork = to_delayed_work(work); | |
1915 | struct amdkfd_process_info *process_info = | |
1916 | container_of(dwork, struct amdkfd_process_info, | |
1917 | restore_userptr_work); | |
1918 | struct task_struct *usertask; | |
1919 | struct mm_struct *mm; | |
1920 | int evicted_bos; | |
1921 | ||
1922 | evicted_bos = atomic_read(&process_info->evicted_bos); | |
1923 | if (!evicted_bos) | |
1924 | return; | |
1925 | ||
1926 | /* Reference task and mm in case of concurrent process termination */ | |
1927 | usertask = get_pid_task(process_info->pid, PIDTYPE_PID); | |
1928 | if (!usertask) | |
1929 | return; | |
1930 | mm = get_task_mm(usertask); | |
1931 | if (!mm) { | |
1932 | put_task_struct(usertask); | |
1933 | return; | |
1934 | } | |
1935 | ||
1936 | mutex_lock(&process_info->lock); | |
1937 | ||
1938 | if (update_invalid_user_pages(process_info, mm)) | |
1939 | goto unlock_out; | |
1940 | /* userptr_inval_list can be empty if all evicted userptr BOs | |
1941 | * have been freed. In that case there is nothing to validate | |
1942 | * and we can just restart the queues. | |
1943 | */ | |
1944 | if (!list_empty(&process_info->userptr_inval_list)) { | |
1945 | if (atomic_read(&process_info->evicted_bos) != evicted_bos) | |
1946 | goto unlock_out; /* Concurrent eviction, try again */ | |
1947 | ||
1948 | if (validate_invalid_user_pages(process_info)) | |
1949 | goto unlock_out; | |
1950 | } | |
1951 | /* Final check for concurrent evicton and atomic update. If | |
1952 | * another eviction happens after successful update, it will | |
1953 | * be a first eviction that calls quiesce_mm. The eviction | |
1954 | * reference counting inside KFD will handle this case. | |
1955 | */ | |
1956 | if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) != | |
1957 | evicted_bos) | |
1958 | goto unlock_out; | |
1959 | evicted_bos = 0; | |
8e07e267 | 1960 | if (kgd2kfd_resume_mm(mm)) { |
5ae0283e FK |
1961 | pr_err("%s: Failed to resume KFD\n", __func__); |
1962 | /* No recovery from this failure. Probably the CP is | |
1963 | * hanging. No point trying again. | |
1964 | */ | |
1965 | } | |
1966 | unlock_out: | |
1967 | mutex_unlock(&process_info->lock); | |
1968 | mmput(mm); | |
1969 | put_task_struct(usertask); | |
1970 | ||
1971 | /* If validation failed, reschedule another attempt */ | |
1972 | if (evicted_bos) | |
1973 | schedule_delayed_work(&process_info->restore_userptr_work, | |
1974 | msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS)); | |
1975 | } | |
1976 | ||
a46a2cd1 FK |
1977 | /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given |
1978 | * KFD process identified by process_info | |
1979 | * | |
1980 | * @process_info: amdkfd_process_info of the KFD process | |
1981 | * | |
1982 | * After memory eviction, restore thread calls this function. The function | |
1983 | * should be called when the Process is still valid. BO restore involves - | |
1984 | * | |
1985 | * 1. Release old eviction fence and create new one | |
1986 | * 2. Get two copies of PD BO list from all the VMs. Keep one copy as pd_list. | |
1987 | * 3 Use the second PD list and kfd_bo_list to create a list (ctx.list) of | |
1988 | * BOs that need to be reserved. | |
1989 | * 4. Reserve all the BOs | |
1990 | * 5. Validate of PD and PT BOs. | |
1991 | * 6. Validate all KFD BOs using kfd_bo_list and Map them and add new fence | |
1992 | * 7. Add fence to all PD and PT BOs. | |
1993 | * 8. Unreserve all BOs | |
1994 | */ | |
1995 | int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) | |
1996 | { | |
1997 | struct amdgpu_bo_list_entry *pd_bo_list; | |
1998 | struct amdkfd_process_info *process_info = info; | |
5b21d3e5 | 1999 | struct amdgpu_vm *peer_vm; |
a46a2cd1 FK |
2000 | struct kgd_mem *mem; |
2001 | struct bo_vm_reservation_context ctx; | |
2002 | struct amdgpu_amdkfd_fence *new_fence; | |
2003 | int ret = 0, i; | |
2004 | struct list_head duplicate_save; | |
2005 | struct amdgpu_sync sync_obj; | |
2006 | ||
2007 | INIT_LIST_HEAD(&duplicate_save); | |
2008 | INIT_LIST_HEAD(&ctx.list); | |
2009 | INIT_LIST_HEAD(&ctx.duplicates); | |
2010 | ||
2011 | pd_bo_list = kcalloc(process_info->n_vms, | |
2012 | sizeof(struct amdgpu_bo_list_entry), | |
2013 | GFP_KERNEL); | |
2014 | if (!pd_bo_list) | |
2015 | return -ENOMEM; | |
2016 | ||
2017 | i = 0; | |
2018 | mutex_lock(&process_info->lock); | |
2019 | list_for_each_entry(peer_vm, &process_info->vm_list_head, | |
2020 | vm_list_node) | |
5b21d3e5 | 2021 | amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]); |
a46a2cd1 FK |
2022 | |
2023 | /* Reserve all BOs and page tables/directory. Add all BOs from | |
2024 | * kfd_bo_list to ctx.list | |
2025 | */ | |
2026 | list_for_each_entry(mem, &process_info->kfd_bo_list, | |
2027 | validate_list.head) { | |
2028 | ||
2029 | list_add_tail(&mem->resv_list.head, &ctx.list); | |
2030 | mem->resv_list.bo = mem->validate_list.bo; | |
a9f34c70 | 2031 | mem->resv_list.num_shared = mem->validate_list.num_shared; |
a46a2cd1 FK |
2032 | } |
2033 | ||
2034 | ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list, | |
2035 | false, &duplicate_save); | |
2036 | if (ret) { | |
2037 | pr_debug("Memory eviction: TTM Reserve Failed. Try again\n"); | |
2038 | goto ttm_reserve_fail; | |
2039 | } | |
2040 | ||
2041 | amdgpu_sync_create(&sync_obj); | |
2042 | ||
2043 | /* Validate PDs and PTs */ | |
2044 | ret = process_validate_vms(process_info); | |
2045 | if (ret) | |
2046 | goto validate_map_fail; | |
2047 | ||
9130cc01 HK |
2048 | ret = process_sync_pds_resv(process_info, &sync_obj); |
2049 | if (ret) { | |
2050 | pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n"); | |
2051 | goto validate_map_fail; | |
a46a2cd1 FK |
2052 | } |
2053 | ||
2054 | /* Validate BOs and map them to GPUVM (update VM page tables). */ | |
2055 | list_for_each_entry(mem, &process_info->kfd_bo_list, | |
2056 | validate_list.head) { | |
2057 | ||
2058 | struct amdgpu_bo *bo = mem->bo; | |
2059 | uint32_t domain = mem->domain; | |
2060 | struct kfd_bo_va_list *bo_va_entry; | |
2061 | ||
2062 | ret = amdgpu_amdkfd_bo_validate(bo, domain, false); | |
2063 | if (ret) { | |
2064 | pr_debug("Memory eviction: Validate BOs failed. Try again\n"); | |
2065 | goto validate_map_fail; | |
2066 | } | |
5aae7335 | 2067 | ret = amdgpu_sync_fence(NULL, &sync_obj, bo->tbo.moving, false); |
3d97da44 HK |
2068 | if (ret) { |
2069 | pr_debug("Memory eviction: Sync BO fence failed. Try again\n"); | |
2070 | goto validate_map_fail; | |
2071 | } | |
a46a2cd1 FK |
2072 | list_for_each_entry(bo_va_entry, &mem->bo_va_list, |
2073 | bo_list) { | |
2074 | ret = update_gpuvm_pte((struct amdgpu_device *) | |
2075 | bo_va_entry->kgd_dev, | |
2076 | bo_va_entry, | |
2077 | &sync_obj); | |
2078 | if (ret) { | |
2079 | pr_debug("Memory eviction: update PTE failed. Try again\n"); | |
2080 | goto validate_map_fail; | |
2081 | } | |
2082 | } | |
2083 | } | |
2084 | ||
2085 | /* Update page directories */ | |
2086 | ret = process_update_pds(process_info, &sync_obj); | |
2087 | if (ret) { | |
2088 | pr_debug("Memory eviction: update PDs failed. Try again\n"); | |
2089 | goto validate_map_fail; | |
2090 | } | |
2091 | ||
3d97da44 | 2092 | /* Wait for validate and PT updates to finish */ |
a46a2cd1 FK |
2093 | amdgpu_sync_wait(&sync_obj, false); |
2094 | ||
2095 | /* Release old eviction fence and create new one, because fence only | |
2096 | * goes from unsignaled to signaled, fence cannot be reused. | |
2097 | * Use context and mm from the old fence. | |
2098 | */ | |
2099 | new_fence = amdgpu_amdkfd_fence_create( | |
2100 | process_info->eviction_fence->base.context, | |
2101 | process_info->eviction_fence->mm); | |
2102 | if (!new_fence) { | |
2103 | pr_err("Failed to create eviction fence\n"); | |
2104 | ret = -ENOMEM; | |
2105 | goto validate_map_fail; | |
2106 | } | |
2107 | dma_fence_put(&process_info->eviction_fence->base); | |
2108 | process_info->eviction_fence = new_fence; | |
2109 | *ef = dma_fence_get(&new_fence->base); | |
2110 | ||
3d97da44 | 2111 | /* Attach new eviction fence to all BOs */ |
a46a2cd1 FK |
2112 | list_for_each_entry(mem, &process_info->kfd_bo_list, |
2113 | validate_list.head) | |
2114 | amdgpu_bo_fence(mem->bo, | |
2115 | &process_info->eviction_fence->base, true); | |
2116 | ||
2117 | /* Attach eviction fence to PD / PT BOs */ | |
2118 | list_for_each_entry(peer_vm, &process_info->vm_list_head, | |
2119 | vm_list_node) { | |
5b21d3e5 | 2120 | struct amdgpu_bo *bo = peer_vm->root.base.bo; |
a46a2cd1 FK |
2121 | |
2122 | amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true); | |
2123 | } | |
2124 | ||
2125 | validate_map_fail: | |
2126 | ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list); | |
2127 | amdgpu_sync_free(&sync_obj); | |
2128 | ttm_reserve_fail: | |
2129 | mutex_unlock(&process_info->lock); | |
2130 | kfree(pd_bo_list); | |
2131 | return ret; | |
2132 | } |