Commit | Line | Data |
---|---|---|
dd08ebf6 MB |
1 | // SPDX-License-Identifier: MIT |
2 | /* | |
3 | * Copyright © 2021 Intel Corporation | |
4 | */ | |
5 | ||
6 | #include "xe_vm.h" | |
7 | ||
8 | #include <linux/dma-fence-array.h> | |
e1fbc4f1 | 9 | #include <linux/nospec.h> |
dd08ebf6 | 10 | |
d490ecf5 | 11 | #include <drm/drm_exec.h> |
437bcbab | 12 | #include <drm/drm_print.h> |
dd08ebf6 MB |
13 | #include <drm/ttm/ttm_execbuf_util.h> |
14 | #include <drm/ttm/ttm_tt.h> | |
15 | #include <drm/xe_drm.h> | |
0eb2a18a | 16 | #include <linux/ascii85.h> |
9ca14f94 | 17 | #include <linux/delay.h> |
dd08ebf6 MB |
18 | #include <linux/kthread.h> |
19 | #include <linux/mm.h> | |
20 | #include <linux/swap.h> | |
21 | ||
a24d9099 DH |
22 | #include <generated/xe_wa_oob.h> |
23 | ||
28523083 | 24 | #include "xe_assert.h" |
dd08ebf6 MB |
25 | #include "xe_bo.h" |
26 | #include "xe_device.h" | |
2ff00c4f | 27 | #include "xe_drm_client.h" |
c22a4ed0 | 28 | #include "xe_exec_queue.h" |
dd08ebf6 MB |
29 | #include "xe_gt.h" |
30 | #include "xe_gt_pagefault.h" | |
a9351846 | 31 | #include "xe_gt_tlb_invalidation.h" |
dd08ebf6 | 32 | #include "xe_migrate.h" |
e1fbc4f1 | 33 | #include "xe_pat.h" |
dd08ebf6 MB |
34 | #include "xe_pm.h" |
35 | #include "xe_preempt_fence.h" | |
36 | #include "xe_pt.h" | |
37 | #include "xe_res_cursor.h" | |
dd08ebf6 | 38 | #include "xe_sync.h" |
ea9f879d | 39 | #include "xe_trace.h" |
7f6c6e50 | 40 | #include "xe_wa.h" |
dd08ebf6 | 41 | |
2714d509 TH |
42 | static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm) |
43 | { | |
44 | return vm->gpuvm.r_obj; | |
45 | } | |
46 | ||
dd08ebf6 MB |
47 | /** |
48 | * xe_vma_userptr_check_repin() - Advisory check for repin needed | |
5bd24e78 | 49 | * @uvma: The userptr vma |
dd08ebf6 MB |
50 | * |
51 | * Check if the userptr vma has been invalidated since last successful | |
52 | * repin. The check is advisory only and can the function can be called | |
53 | * without the vm->userptr.notifier_lock held. There is no guarantee that the | |
54 | * vma userptr will remain valid after a lockless check, so typically | |
55 | * the call needs to be followed by a proper check under the notifier_lock. | |
56 | * | |
57 | * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended. | |
58 | */ | |
5bd24e78 | 59 | int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma) |
dd08ebf6 | 60 | { |
5bd24e78 TH |
61 | return mmu_interval_check_retry(&uvma->userptr.notifier, |
62 | uvma->userptr.notifier_seq) ? | |
dd08ebf6 MB |
63 | -EAGAIN : 0; |
64 | } | |
65 | ||
5bd24e78 | 66 | int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma) |
dd08ebf6 | 67 | { |
5bd24e78 TH |
68 | struct xe_userptr *userptr = &uvma->userptr; |
69 | struct xe_vma *vma = &uvma->vma; | |
21ed3327 | 70 | struct xe_vm *vm = xe_vma_vm(vma); |
dd08ebf6 | 71 | struct xe_device *xe = vm->xe; |
21ed3327 | 72 | const unsigned long num_pages = xe_vma_size(vma) >> PAGE_SHIFT; |
dd08ebf6 MB |
73 | struct page **pages; |
74 | bool in_kthread = !current->mm; | |
75 | unsigned long notifier_seq; | |
76 | int pinned, ret, i; | |
21ed3327 | 77 | bool read_only = xe_vma_read_only(vma); |
dd08ebf6 MB |
78 | |
79 | lockdep_assert_held(&vm->lock); | |
c73acc1e | 80 | xe_assert(xe, xe_vma_is_userptr(vma)); |
dd08ebf6 | 81 | retry: |
b06d47be | 82 | if (vma->gpuva.flags & XE_VMA_DESTROYED) |
dd08ebf6 MB |
83 | return 0; |
84 | ||
5bd24e78 TH |
85 | notifier_seq = mmu_interval_read_begin(&userptr->notifier); |
86 | if (notifier_seq == userptr->notifier_seq) | |
dd08ebf6 MB |
87 | return 0; |
88 | ||
89 | pages = kvmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL); | |
90 | if (!pages) | |
91 | return -ENOMEM; | |
92 | ||
5bd24e78 | 93 | if (userptr->sg) { |
dd08ebf6 | 94 | dma_unmap_sgtable(xe->drm.dev, |
5bd24e78 | 95 | userptr->sg, |
dd08ebf6 MB |
96 | read_only ? DMA_TO_DEVICE : |
97 | DMA_BIDIRECTIONAL, 0); | |
5bd24e78 TH |
98 | sg_free_table(userptr->sg); |
99 | userptr->sg = NULL; | |
dd08ebf6 MB |
100 | } |
101 | ||
102 | pinned = ret = 0; | |
103 | if (in_kthread) { | |
5bd24e78 | 104 | if (!mmget_not_zero(userptr->notifier.mm)) { |
dd08ebf6 MB |
105 | ret = -EFAULT; |
106 | goto mm_closed; | |
107 | } | |
5bd24e78 | 108 | kthread_use_mm(userptr->notifier.mm); |
dd08ebf6 MB |
109 | } |
110 | ||
111 | while (pinned < num_pages) { | |
21ed3327 MB |
112 | ret = get_user_pages_fast(xe_vma_userptr(vma) + |
113 | pinned * PAGE_SIZE, | |
dd08ebf6 MB |
114 | num_pages - pinned, |
115 | read_only ? 0 : FOLL_WRITE, | |
116 | &pages[pinned]); | |
8087199c | 117 | if (ret < 0) |
dd08ebf6 | 118 | break; |
dd08ebf6 MB |
119 | |
120 | pinned += ret; | |
121 | ret = 0; | |
122 | } | |
123 | ||
124 | if (in_kthread) { | |
5bd24e78 TH |
125 | kthread_unuse_mm(userptr->notifier.mm); |
126 | mmput(userptr->notifier.mm); | |
dd08ebf6 MB |
127 | } |
128 | mm_closed: | |
129 | if (ret) | |
130 | goto out; | |
131 | ||
5bd24e78 | 132 | ret = sg_alloc_table_from_pages_segment(&userptr->sgt, pages, |
1b1d3710 NV |
133 | pinned, 0, |
134 | (u64)pinned << PAGE_SHIFT, | |
135 | xe_sg_segment_size(xe->drm.dev), | |
136 | GFP_KERNEL); | |
dd08ebf6 | 137 | if (ret) { |
5bd24e78 | 138 | userptr->sg = NULL; |
dd08ebf6 MB |
139 | goto out; |
140 | } | |
5bd24e78 | 141 | userptr->sg = &userptr->sgt; |
dd08ebf6 | 142 | |
5bd24e78 | 143 | ret = dma_map_sgtable(xe->drm.dev, userptr->sg, |
dd08ebf6 MB |
144 | read_only ? DMA_TO_DEVICE : |
145 | DMA_BIDIRECTIONAL, | |
146 | DMA_ATTR_SKIP_CPU_SYNC | | |
147 | DMA_ATTR_NO_KERNEL_MAPPING); | |
148 | if (ret) { | |
5bd24e78 TH |
149 | sg_free_table(userptr->sg); |
150 | userptr->sg = NULL; | |
dd08ebf6 MB |
151 | goto out; |
152 | } | |
153 | ||
154 | for (i = 0; i < pinned; ++i) { | |
155 | if (!read_only) { | |
156 | lock_page(pages[i]); | |
157 | set_page_dirty(pages[i]); | |
158 | unlock_page(pages[i]); | |
159 | } | |
160 | ||
161 | mark_page_accessed(pages[i]); | |
162 | } | |
163 | ||
164 | out: | |
165 | release_pages(pages, pinned); | |
166 | kvfree(pages); | |
167 | ||
168 | if (!(ret < 0)) { | |
5bd24e78 TH |
169 | userptr->notifier_seq = notifier_seq; |
170 | if (xe_vma_userptr_check_repin(uvma) == -EAGAIN) | |
dd08ebf6 MB |
171 | goto retry; |
172 | } | |
173 | ||
174 | return ret < 0 ? ret : 0; | |
175 | } | |
176 | ||
177 | static bool preempt_fences_waiting(struct xe_vm *vm) | |
178 | { | |
9b9529ce | 179 | struct xe_exec_queue *q; |
dd08ebf6 MB |
180 | |
181 | lockdep_assert_held(&vm->lock); | |
182 | xe_vm_assert_held(vm); | |
183 | ||
9b9529ce FD |
184 | list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) { |
185 | if (!q->compute.pfence || | |
186 | (q->compute.pfence && test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, | |
187 | &q->compute.pfence->flags))) { | |
dd08ebf6 MB |
188 | return true; |
189 | } | |
190 | } | |
191 | ||
192 | return false; | |
193 | } | |
194 | ||
195 | static void free_preempt_fences(struct list_head *list) | |
196 | { | |
197 | struct list_head *link, *next; | |
198 | ||
199 | list_for_each_safe(link, next, list) | |
200 | xe_preempt_fence_free(to_preempt_fence_from_link(link)); | |
201 | } | |
202 | ||
203 | static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list, | |
204 | unsigned int *count) | |
205 | { | |
206 | lockdep_assert_held(&vm->lock); | |
207 | xe_vm_assert_held(vm); | |
208 | ||
9b9529ce | 209 | if (*count >= vm->preempt.num_exec_queues) |
dd08ebf6 MB |
210 | return 0; |
211 | ||
9b9529ce | 212 | for (; *count < vm->preempt.num_exec_queues; ++(*count)) { |
dd08ebf6 MB |
213 | struct xe_preempt_fence *pfence = xe_preempt_fence_alloc(); |
214 | ||
215 | if (IS_ERR(pfence)) | |
216 | return PTR_ERR(pfence); | |
217 | ||
218 | list_move_tail(xe_preempt_fence_link(pfence), list); | |
219 | } | |
220 | ||
221 | return 0; | |
222 | } | |
223 | ||
224 | static int wait_for_existing_preempt_fences(struct xe_vm *vm) | |
225 | { | |
9b9529ce | 226 | struct xe_exec_queue *q; |
dd08ebf6 MB |
227 | |
228 | xe_vm_assert_held(vm); | |
229 | ||
9b9529ce FD |
230 | list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) { |
231 | if (q->compute.pfence) { | |
232 | long timeout = dma_fence_wait(q->compute.pfence, false); | |
dd08ebf6 MB |
233 | |
234 | if (timeout < 0) | |
235 | return -ETIME; | |
9b9529ce FD |
236 | dma_fence_put(q->compute.pfence); |
237 | q->compute.pfence = NULL; | |
dd08ebf6 MB |
238 | } |
239 | } | |
240 | ||
241 | return 0; | |
242 | } | |
243 | ||
8e41443e TH |
244 | static bool xe_vm_is_idle(struct xe_vm *vm) |
245 | { | |
9b9529ce | 246 | struct xe_exec_queue *q; |
8e41443e TH |
247 | |
248 | xe_vm_assert_held(vm); | |
9b9529ce FD |
249 | list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) { |
250 | if (!xe_exec_queue_is_idle(q)) | |
8e41443e TH |
251 | return false; |
252 | } | |
253 | ||
254 | return true; | |
255 | } | |
256 | ||
dd08ebf6 MB |
257 | static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list) |
258 | { | |
259 | struct list_head *link; | |
9b9529ce | 260 | struct xe_exec_queue *q; |
dd08ebf6 | 261 | |
9b9529ce | 262 | list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) { |
dd08ebf6 MB |
263 | struct dma_fence *fence; |
264 | ||
265 | link = list->next; | |
c73acc1e | 266 | xe_assert(vm->xe, link != list); |
dd08ebf6 MB |
267 | |
268 | fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link), | |
9b9529ce FD |
269 | q, q->compute.context, |
270 | ++q->compute.seqno); | |
271 | dma_fence_put(q->compute.pfence); | |
272 | q->compute.pfence = fence; | |
dd08ebf6 MB |
273 | } |
274 | } | |
275 | ||
276 | static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo) | |
277 | { | |
9b9529ce | 278 | struct xe_exec_queue *q; |
dd08ebf6 MB |
279 | int err; |
280 | ||
20277d8c MB |
281 | if (!vm->preempt.num_exec_queues) |
282 | return 0; | |
283 | ||
08a4f00e | 284 | err = xe_bo_lock(bo, true); |
dd08ebf6 MB |
285 | if (err) |
286 | return err; | |
287 | ||
08a4f00e TH |
288 | err = dma_resv_reserve_fences(bo->ttm.base.resv, vm->preempt.num_exec_queues); |
289 | if (err) | |
290 | goto out_unlock; | |
291 | ||
9b9529ce FD |
292 | list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) |
293 | if (q->compute.pfence) { | |
dd08ebf6 | 294 | dma_resv_add_fence(bo->ttm.base.resv, |
9b9529ce | 295 | q->compute.pfence, |
dd08ebf6 MB |
296 | DMA_RESV_USAGE_BOOKKEEP); |
297 | } | |
298 | ||
08a4f00e TH |
299 | out_unlock: |
300 | xe_bo_unlock(bo); | |
301 | return err; | |
dd08ebf6 MB |
302 | } |
303 | ||
24f947d5 TH |
304 | static void resume_and_reinstall_preempt_fences(struct xe_vm *vm, |
305 | struct drm_exec *exec) | |
dd08ebf6 | 306 | { |
9b9529ce | 307 | struct xe_exec_queue *q; |
dd08ebf6 MB |
308 | |
309 | lockdep_assert_held(&vm->lock); | |
310 | xe_vm_assert_held(vm); | |
311 | ||
9b9529ce FD |
312 | list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) { |
313 | q->ops->resume(q); | |
dd08ebf6 | 314 | |
24f947d5 TH |
315 | drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, q->compute.pfence, |
316 | DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP); | |
dd08ebf6 MB |
317 | } |
318 | } | |
319 | ||
9b9529ce | 320 | int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) |
dd08ebf6 | 321 | { |
24f947d5 TH |
322 | struct drm_gpuvm_exec vm_exec = { |
323 | .vm = &vm->gpuvm, | |
324 | .flags = DRM_EXEC_INTERRUPTIBLE_WAIT, | |
325 | .num_fences = 1, | |
326 | }; | |
327 | struct drm_exec *exec = &vm_exec.exec; | |
dd08ebf6 MB |
328 | struct dma_fence *pfence; |
329 | int err; | |
330 | bool wait; | |
331 | ||
fdb6a053 | 332 | xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm)); |
dd08ebf6 MB |
333 | |
334 | down_write(&vm->lock); | |
24f947d5 TH |
335 | err = drm_gpuvm_exec_lock(&vm_exec); |
336 | if (err) | |
cf46019e | 337 | goto out_up_write; |
dd08ebf6 | 338 | |
9b9529ce FD |
339 | pfence = xe_preempt_fence_create(q, q->compute.context, |
340 | ++q->compute.seqno); | |
dd08ebf6 MB |
341 | if (!pfence) { |
342 | err = -ENOMEM; | |
cf46019e | 343 | goto out_fini; |
dd08ebf6 MB |
344 | } |
345 | ||
9b9529ce FD |
346 | list_add(&q->compute.link, &vm->preempt.exec_queues); |
347 | ++vm->preempt.num_exec_queues; | |
348 | q->compute.pfence = pfence; | |
dd08ebf6 MB |
349 | |
350 | down_read(&vm->userptr.notifier_lock); | |
351 | ||
24f947d5 TH |
352 | drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, pfence, |
353 | DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP); | |
dd08ebf6 MB |
354 | |
355 | /* | |
356 | * Check to see if a preemption on VM is in flight or userptr | |
357 | * invalidation, if so trigger this preempt fence to sync state with | |
358 | * other preempt fences on the VM. | |
359 | */ | |
360 | wait = __xe_vm_userptr_needs_repin(vm) || preempt_fences_waiting(vm); | |
361 | if (wait) | |
362 | dma_fence_enable_sw_signaling(pfence); | |
363 | ||
364 | up_read(&vm->userptr.notifier_lock); | |
365 | ||
cf46019e | 366 | out_fini: |
24f947d5 | 367 | drm_exec_fini(exec); |
cf46019e | 368 | out_up_write: |
dd08ebf6 MB |
369 | up_write(&vm->lock); |
370 | ||
371 | return err; | |
372 | } | |
373 | ||
abce4e4b MB |
374 | /** |
375 | * xe_vm_remove_compute_exec_queue() - Remove compute exec queue from VM | |
376 | * @vm: The VM. | |
377 | * @q: The exec_queue | |
378 | */ | |
379 | void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) | |
380 | { | |
fdb6a053 | 381 | if (!xe_vm_in_preempt_fence_mode(vm)) |
abce4e4b MB |
382 | return; |
383 | ||
384 | down_write(&vm->lock); | |
385 | list_del(&q->compute.link); | |
386 | --vm->preempt.num_exec_queues; | |
387 | if (q->compute.pfence) { | |
388 | dma_fence_enable_sw_signaling(q->compute.pfence); | |
389 | dma_fence_put(q->compute.pfence); | |
390 | q->compute.pfence = NULL; | |
391 | } | |
392 | up_write(&vm->lock); | |
393 | } | |
394 | ||
dd08ebf6 MB |
395 | /** |
396 | * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs | |
397 | * that need repinning. | |
398 | * @vm: The VM. | |
399 | * | |
400 | * This function checks for whether the VM has userptrs that need repinning, | |
401 | * and provides a release-type barrier on the userptr.notifier_lock after | |
402 | * checking. | |
403 | * | |
404 | * Return: 0 if there are no userptrs needing repinning, -EAGAIN if there are. | |
405 | */ | |
406 | int __xe_vm_userptr_needs_repin(struct xe_vm *vm) | |
407 | { | |
408 | lockdep_assert_held_read(&vm->userptr.notifier_lock); | |
409 | ||
410 | return (list_empty(&vm->userptr.repin_list) && | |
411 | list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN; | |
412 | } | |
413 | ||
9ca14f94 NV |
414 | #define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000 |
415 | ||
9d858b69 MB |
416 | static void xe_vm_kill(struct xe_vm *vm) |
417 | { | |
9b9529ce | 418 | struct xe_exec_queue *q; |
9d858b69 MB |
419 | |
420 | lockdep_assert_held(&vm->lock); | |
421 | ||
d00e9cc2 | 422 | xe_vm_lock(vm, false); |
9d858b69 MB |
423 | vm->flags |= XE_VM_FLAG_BANNED; |
424 | trace_xe_vm_kill(vm); | |
425 | ||
9b9529ce FD |
426 | list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) |
427 | q->ops->kill(q); | |
d00e9cc2 | 428 | xe_vm_unlock(vm); |
9d858b69 MB |
429 | |
430 | /* TODO: Inform user the VM is banned */ | |
431 | } | |
432 | ||
d490ecf5 TH |
433 | /** |
434 | * xe_vm_validate_should_retry() - Whether to retry after a validate error. | |
435 | * @exec: The drm_exec object used for locking before validation. | |
436 | * @err: The error returned from ttm_bo_validate(). | |
437 | * @end: A ktime_t cookie that should be set to 0 before first use and | |
438 | * that should be reused on subsequent calls. | |
439 | * | |
440 | * With multiple active VMs, under memory pressure, it is possible that | |
441 | * ttm_bo_validate() run into -EDEADLK and in such case returns -ENOMEM. | |
442 | * Until ttm properly handles locking in such scenarios, best thing the | |
443 | * driver can do is retry with a timeout. Check if that is necessary, and | |
444 | * if so unlock the drm_exec's objects while keeping the ticket to prepare | |
445 | * for a rerun. | |
446 | * | |
447 | * Return: true if a retry after drm_exec_init() is recommended; | |
448 | * false otherwise. | |
449 | */ | |
450 | bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end) | |
451 | { | |
452 | ktime_t cur; | |
453 | ||
454 | if (err != -ENOMEM) | |
455 | return false; | |
456 | ||
457 | cur = ktime_get(); | |
458 | *end = *end ? : ktime_add_ms(cur, XE_VM_REBIND_RETRY_TIMEOUT_MS); | |
459 | if (!ktime_before(cur, *end)) | |
460 | return false; | |
461 | ||
d490ecf5 TH |
462 | msleep(20); |
463 | return true; | |
464 | } | |
465 | ||
24f947d5 TH |
466 | static int xe_gpuvm_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec) |
467 | { | |
468 | struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm); | |
469 | struct drm_gpuva *gpuva; | |
470 | int ret; | |
471 | ||
472 | lockdep_assert_held(&vm->lock); | |
473 | drm_gpuvm_bo_for_each_va(gpuva, vm_bo) | |
474 | list_move_tail(&gpuva_to_vma(gpuva)->combined_links.rebind, | |
475 | &vm->rebind_list); | |
476 | ||
477 | ret = xe_bo_validate(gem_to_xe_bo(vm_bo->obj), vm, false); | |
478 | if (ret) | |
479 | return ret; | |
480 | ||
481 | vm_bo->evicted = false; | |
482 | return 0; | |
483 | } | |
484 | ||
d490ecf5 TH |
485 | static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm, |
486 | bool *done) | |
487 | { | |
d490ecf5 TH |
488 | int err; |
489 | ||
30278e29 MB |
490 | /* |
491 | * 1 fence for each preempt fence plus a fence for each tile from a | |
492 | * possible rebind | |
493 | */ | |
24f947d5 | 494 | err = drm_gpuvm_prepare_vm(&vm->gpuvm, exec, vm->preempt.num_exec_queues + |
30278e29 | 495 | vm->xe->info.tile_count); |
d490ecf5 TH |
496 | if (err) |
497 | return err; | |
498 | ||
499 | if (xe_vm_is_idle(vm)) { | |
500 | vm->preempt.rebind_deactivated = true; | |
501 | *done = true; | |
502 | return 0; | |
503 | } | |
504 | ||
505 | if (!preempt_fences_waiting(vm)) { | |
506 | *done = true; | |
507 | return 0; | |
508 | } | |
509 | ||
24f947d5 | 510 | err = drm_gpuvm_prepare_objects(&vm->gpuvm, exec, vm->preempt.num_exec_queues); |
d490ecf5 TH |
511 | if (err) |
512 | return err; | |
513 | ||
514 | err = wait_for_existing_preempt_fences(vm); | |
515 | if (err) | |
516 | return err; | |
517 | ||
24f947d5 | 518 | return drm_gpuvm_validate(&vm->gpuvm, exec); |
d490ecf5 TH |
519 | } |
520 | ||
dd08ebf6 MB |
521 | static void preempt_rebind_work_func(struct work_struct *w) |
522 | { | |
523 | struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work); | |
d490ecf5 | 524 | struct drm_exec exec; |
dd08ebf6 MB |
525 | struct dma_fence *rebind_fence; |
526 | unsigned int fence_count = 0; | |
527 | LIST_HEAD(preempt_fences); | |
9ca14f94 | 528 | ktime_t end = 0; |
f3e9b1f4 | 529 | int err = 0; |
dd08ebf6 MB |
530 | long wait; |
531 | int __maybe_unused tries = 0; | |
532 | ||
fdb6a053 | 533 | xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm)); |
dd08ebf6 MB |
534 | trace_xe_vm_rebind_worker_enter(vm); |
535 | ||
9d858b69 MB |
536 | down_write(&vm->lock); |
537 | ||
538 | if (xe_vm_is_closed_or_banned(vm)) { | |
539 | up_write(&vm->lock); | |
dd08ebf6 MB |
540 | trace_xe_vm_rebind_worker_exit(vm); |
541 | return; | |
542 | } | |
543 | ||
dd08ebf6 | 544 | retry: |
dd08ebf6 MB |
545 | if (xe_vm_userptr_check_repin(vm)) { |
546 | err = xe_vm_userptr_pin(vm); | |
547 | if (err) | |
548 | goto out_unlock_outer; | |
549 | } | |
550 | ||
d2197029 | 551 | drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); |
8e41443e | 552 | |
d490ecf5 TH |
553 | drm_exec_until_all_locked(&exec) { |
554 | bool done = false; | |
dd08ebf6 | 555 | |
d490ecf5 TH |
556 | err = xe_preempt_work_begin(&exec, vm, &done); |
557 | drm_exec_retry_on_contention(&exec); | |
24f947d5 TH |
558 | if (err || done) { |
559 | drm_exec_fini(&exec); | |
560 | if (err && xe_vm_validate_should_retry(&exec, err, &end)) | |
561 | err = -EAGAIN; | |
562 | ||
d490ecf5 TH |
563 | goto out_unlock_outer; |
564 | } | |
d490ecf5 | 565 | } |
dd08ebf6 MB |
566 | |
567 | err = alloc_preempt_fences(vm, &preempt_fences, &fence_count); | |
568 | if (err) | |
569 | goto out_unlock; | |
570 | ||
dd08ebf6 MB |
571 | rebind_fence = xe_vm_rebind(vm, true); |
572 | if (IS_ERR(rebind_fence)) { | |
573 | err = PTR_ERR(rebind_fence); | |
574 | goto out_unlock; | |
575 | } | |
576 | ||
577 | if (rebind_fence) { | |
578 | dma_fence_wait(rebind_fence, false); | |
579 | dma_fence_put(rebind_fence); | |
580 | } | |
581 | ||
582 | /* Wait on munmap style VM unbinds */ | |
b06d47be | 583 | wait = dma_resv_wait_timeout(xe_vm_resv(vm), |
dd08ebf6 MB |
584 | DMA_RESV_USAGE_KERNEL, |
585 | false, MAX_SCHEDULE_TIMEOUT); | |
586 | if (wait <= 0) { | |
587 | err = -ETIME; | |
588 | goto out_unlock; | |
589 | } | |
590 | ||
591 | #define retry_required(__tries, __vm) \ | |
592 | (IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) ? \ | |
593 | (!(__tries)++ || __xe_vm_userptr_needs_repin(__vm)) : \ | |
594 | __xe_vm_userptr_needs_repin(__vm)) | |
595 | ||
596 | down_read(&vm->userptr.notifier_lock); | |
597 | if (retry_required(tries, vm)) { | |
598 | up_read(&vm->userptr.notifier_lock); | |
599 | err = -EAGAIN; | |
600 | goto out_unlock; | |
601 | } | |
602 | ||
603 | #undef retry_required | |
604 | ||
7ba4c5f0 MB |
605 | spin_lock(&vm->xe->ttm.lru_lock); |
606 | ttm_lru_bulk_move_tail(&vm->lru_bulk_move); | |
607 | spin_unlock(&vm->xe->ttm.lru_lock); | |
608 | ||
dd08ebf6 MB |
609 | /* Point of no return. */ |
610 | arm_preempt_fences(vm, &preempt_fences); | |
24f947d5 | 611 | resume_and_reinstall_preempt_fences(vm, &exec); |
dd08ebf6 MB |
612 | up_read(&vm->userptr.notifier_lock); |
613 | ||
614 | out_unlock: | |
d490ecf5 | 615 | drm_exec_fini(&exec); |
dd08ebf6 MB |
616 | out_unlock_outer: |
617 | if (err == -EAGAIN) { | |
618 | trace_xe_vm_rebind_worker_retry(vm); | |
619 | goto retry; | |
620 | } | |
9ca14f94 | 621 | |
9d858b69 MB |
622 | if (err) { |
623 | drm_warn(&vm->xe->drm, "VM worker error: %d\n", err); | |
624 | xe_vm_kill(vm); | |
625 | } | |
dd08ebf6 MB |
626 | up_write(&vm->lock); |
627 | ||
628 | free_preempt_fences(&preempt_fences); | |
629 | ||
dd08ebf6 MB |
630 | trace_xe_vm_rebind_worker_exit(vm); |
631 | } | |
632 | ||
dd08ebf6 MB |
633 | static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni, |
634 | const struct mmu_notifier_range *range, | |
635 | unsigned long cur_seq) | |
636 | { | |
5bd24e78 TH |
637 | struct xe_userptr *userptr = container_of(mni, typeof(*userptr), notifier); |
638 | struct xe_userptr_vma *uvma = container_of(userptr, typeof(*uvma), userptr); | |
639 | struct xe_vma *vma = &uvma->vma; | |
21ed3327 | 640 | struct xe_vm *vm = xe_vma_vm(vma); |
dd08ebf6 MB |
641 | struct dma_resv_iter cursor; |
642 | struct dma_fence *fence; | |
643 | long err; | |
644 | ||
c73acc1e | 645 | xe_assert(vm->xe, xe_vma_is_userptr(vma)); |
dd08ebf6 MB |
646 | trace_xe_vma_userptr_invalidate(vma); |
647 | ||
648 | if (!mmu_notifier_range_blockable(range)) | |
649 | return false; | |
650 | ||
651 | down_write(&vm->userptr.notifier_lock); | |
652 | mmu_interval_set_seq(mni, cur_seq); | |
653 | ||
654 | /* No need to stop gpu access if the userptr is not yet bound. */ | |
5bd24e78 | 655 | if (!userptr->initial_bind) { |
dd08ebf6 MB |
656 | up_write(&vm->userptr.notifier_lock); |
657 | return true; | |
658 | } | |
659 | ||
660 | /* | |
661 | * Tell exec and rebind worker they need to repin and rebind this | |
662 | * userptr. | |
663 | */ | |
b06d47be MB |
664 | if (!xe_vm_in_fault_mode(vm) && |
665 | !(vma->gpuva.flags & XE_VMA_DESTROYED) && vma->tile_present) { | |
dd08ebf6 | 666 | spin_lock(&vm->userptr.invalidated_lock); |
5bd24e78 | 667 | list_move_tail(&userptr->invalidate_link, |
dd08ebf6 MB |
668 | &vm->userptr.invalidated); |
669 | spin_unlock(&vm->userptr.invalidated_lock); | |
670 | } | |
671 | ||
672 | up_write(&vm->userptr.notifier_lock); | |
673 | ||
674 | /* | |
675 | * Preempt fences turn into schedule disables, pipeline these. | |
676 | * Note that even in fault mode, we need to wait for binds and | |
677 | * unbinds to complete, and those are attached as BOOKMARK fences | |
678 | * to the vm. | |
679 | */ | |
b06d47be | 680 | dma_resv_iter_begin(&cursor, xe_vm_resv(vm), |
dd08ebf6 MB |
681 | DMA_RESV_USAGE_BOOKKEEP); |
682 | dma_resv_for_each_fence_unlocked(&cursor, fence) | |
683 | dma_fence_enable_sw_signaling(fence); | |
684 | dma_resv_iter_end(&cursor); | |
685 | ||
b06d47be | 686 | err = dma_resv_wait_timeout(xe_vm_resv(vm), |
dd08ebf6 MB |
687 | DMA_RESV_USAGE_BOOKKEEP, |
688 | false, MAX_SCHEDULE_TIMEOUT); | |
689 | XE_WARN_ON(err <= 0); | |
690 | ||
691 | if (xe_vm_in_fault_mode(vm)) { | |
692 | err = xe_vm_invalidate_vma(vma); | |
693 | XE_WARN_ON(err); | |
694 | } | |
695 | ||
696 | trace_xe_vma_userptr_invalidate_complete(vma); | |
697 | ||
698 | return true; | |
699 | } | |
700 | ||
701 | static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = { | |
702 | .invalidate = vma_userptr_invalidate, | |
703 | }; | |
704 | ||
705 | int xe_vm_userptr_pin(struct xe_vm *vm) | |
706 | { | |
5bd24e78 | 707 | struct xe_userptr_vma *uvma, *next; |
dd08ebf6 MB |
708 | int err = 0; |
709 | LIST_HEAD(tmp_evict); | |
710 | ||
711 | lockdep_assert_held_write(&vm->lock); | |
712 | ||
713 | /* Collect invalidated userptrs */ | |
714 | spin_lock(&vm->userptr.invalidated_lock); | |
5bd24e78 | 715 | list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated, |
dd08ebf6 | 716 | userptr.invalidate_link) { |
5bd24e78 TH |
717 | list_del_init(&uvma->userptr.invalidate_link); |
718 | list_move_tail(&uvma->userptr.repin_link, | |
24f947d5 | 719 | &vm->userptr.repin_list); |
dd08ebf6 MB |
720 | } |
721 | spin_unlock(&vm->userptr.invalidated_lock); | |
722 | ||
723 | /* Pin and move to temporary list */ | |
5bd24e78 TH |
724 | list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list, |
725 | userptr.repin_link) { | |
726 | err = xe_vma_userptr_pin_pages(uvma); | |
dd08ebf6 | 727 | if (err < 0) |
24f947d5 | 728 | return err; |
dd08ebf6 | 729 | |
5bd24e78 TH |
730 | list_del_init(&uvma->userptr.repin_link); |
731 | list_move_tail(&uvma->vma.combined_links.rebind, &vm->rebind_list); | |
dd08ebf6 MB |
732 | } |
733 | ||
dd08ebf6 | 734 | return 0; |
dd08ebf6 MB |
735 | } |
736 | ||
737 | /** | |
738 | * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs | |
739 | * that need repinning. | |
740 | * @vm: The VM. | |
741 | * | |
742 | * This function does an advisory check for whether the VM has userptrs that | |
743 | * need repinning. | |
744 | * | |
745 | * Return: 0 if there are no indications of userptrs needing repinning, | |
746 | * -EAGAIN if there are. | |
747 | */ | |
748 | int xe_vm_userptr_check_repin(struct xe_vm *vm) | |
749 | { | |
750 | return (list_empty_careful(&vm->userptr.repin_list) && | |
751 | list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN; | |
752 | } | |
753 | ||
754 | static struct dma_fence * | |
9b9529ce | 755 | xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q, |
b06d47be MB |
756 | struct xe_sync_entry *syncs, u32 num_syncs, |
757 | bool first_op, bool last_op); | |
dd08ebf6 MB |
758 | |
759 | struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker) | |
760 | { | |
761 | struct dma_fence *fence = NULL; | |
762 | struct xe_vma *vma, *next; | |
763 | ||
764 | lockdep_assert_held(&vm->lock); | |
fdb6a053 | 765 | if (xe_vm_in_lr_mode(vm) && !rebind_worker) |
dd08ebf6 MB |
766 | return NULL; |
767 | ||
768 | xe_vm_assert_held(vm); | |
1655c893 MB |
769 | list_for_each_entry_safe(vma, next, &vm->rebind_list, |
770 | combined_links.rebind) { | |
c73acc1e | 771 | xe_assert(vm->xe, vma->tile_present); |
dd08ebf6 | 772 | |
1655c893 | 773 | list_del_init(&vma->combined_links.rebind); |
dd08ebf6 MB |
774 | dma_fence_put(fence); |
775 | if (rebind_worker) | |
776 | trace_xe_vma_rebind_worker(vma); | |
777 | else | |
778 | trace_xe_vma_rebind_exec(vma); | |
b06d47be | 779 | fence = xe_vm_bind_vma(vma, NULL, NULL, 0, false, false); |
dd08ebf6 MB |
780 | if (IS_ERR(fence)) |
781 | return fence; | |
782 | } | |
783 | ||
784 | return fence; | |
785 | } | |
786 | ||
5bd24e78 TH |
787 | static void xe_vma_free(struct xe_vma *vma) |
788 | { | |
789 | if (xe_vma_is_userptr(vma)) | |
790 | kfree(to_userptr_vma(vma)); | |
791 | else | |
792 | kfree(vma); | |
793 | } | |
794 | ||
3b97e3b2 MB |
795 | #define VMA_CREATE_FLAG_READ_ONLY BIT(0) |
796 | #define VMA_CREATE_FLAG_IS_NULL BIT(1) | |
ffb7249d | 797 | #define VMA_CREATE_FLAG_DUMPABLE BIT(2) |
3b97e3b2 | 798 | |
dd08ebf6 MB |
799 | static struct xe_vma *xe_vma_create(struct xe_vm *vm, |
800 | struct xe_bo *bo, | |
801 | u64 bo_offset_or_userptr, | |
802 | u64 start, u64 end, | |
3b97e3b2 | 803 | u16 pat_index, unsigned int flags) |
dd08ebf6 MB |
804 | { |
805 | struct xe_vma *vma; | |
876611c2 | 806 | struct xe_tile *tile; |
dd08ebf6 | 807 | u8 id; |
3b97e3b2 MB |
808 | bool read_only = (flags & VMA_CREATE_FLAG_READ_ONLY); |
809 | bool is_null = (flags & VMA_CREATE_FLAG_IS_NULL); | |
ffb7249d | 810 | bool dumpable = (flags & VMA_CREATE_FLAG_DUMPABLE); |
dd08ebf6 | 811 | |
c73acc1e FD |
812 | xe_assert(vm->xe, start < end); |
813 | xe_assert(vm->xe, end < vm->size); | |
dd08ebf6 | 814 | |
5bd24e78 TH |
815 | /* |
816 | * Allocate and ensure that the xe_vma_is_userptr() return | |
817 | * matches what was allocated. | |
818 | */ | |
819 | if (!bo && !is_null) { | |
820 | struct xe_userptr_vma *uvma = kzalloc(sizeof(*uvma), GFP_KERNEL); | |
821 | ||
822 | if (!uvma) | |
823 | return ERR_PTR(-ENOMEM); | |
824 | ||
825 | vma = &uvma->vma; | |
826 | } else { | |
a4cc60a5 | 827 | vma = kzalloc(sizeof(*vma), GFP_KERNEL); |
5bd24e78 TH |
828 | if (!vma) |
829 | return ERR_PTR(-ENOMEM); | |
830 | ||
831 | if (is_null) | |
832 | vma->gpuva.flags |= DRM_GPUVA_SPARSE; | |
833 | if (bo) | |
834 | vma->gpuva.gem.obj = &bo->ttm.base; | |
dd08ebf6 MB |
835 | } |
836 | ||
1655c893 | 837 | INIT_LIST_HEAD(&vma->combined_links.rebind); |
dd08ebf6 | 838 | |
b06d47be MB |
839 | INIT_LIST_HEAD(&vma->gpuva.gem.entry); |
840 | vma->gpuva.vm = &vm->gpuvm; | |
841 | vma->gpuva.va.addr = start; | |
842 | vma->gpuva.va.range = end - start + 1; | |
dd08ebf6 | 843 | if (read_only) |
b06d47be | 844 | vma->gpuva.flags |= XE_VMA_READ_ONLY; |
ffb7249d ML |
845 | if (dumpable) |
846 | vma->gpuva.flags |= XE_VMA_DUMPABLE; | |
dd08ebf6 | 847 | |
cad4a0d6 RV |
848 | for_each_tile(tile, vm->xe, id) |
849 | vma->tile_mask |= 0x1 << id; | |
dd08ebf6 | 850 | |
9be79251 | 851 | if (GRAPHICS_VER(vm->xe) >= 20 || vm->xe->info.platform == XE_PVC) |
b06d47be | 852 | vma->gpuva.flags |= XE_VMA_ATOMIC_PTE_BIT; |
dd08ebf6 | 853 | |
e1fbc4f1 MA |
854 | vma->pat_index = pat_index; |
855 | ||
dd08ebf6 | 856 | if (bo) { |
b06d47be MB |
857 | struct drm_gpuvm_bo *vm_bo; |
858 | ||
dd08ebf6 | 859 | xe_bo_assert_held(bo); |
b06d47be MB |
860 | |
861 | vm_bo = drm_gpuvm_bo_obtain(vma->gpuva.vm, &bo->ttm.base); | |
862 | if (IS_ERR(vm_bo)) { | |
5bd24e78 | 863 | xe_vma_free(vma); |
b06d47be MB |
864 | return ERR_CAST(vm_bo); |
865 | } | |
866 | ||
24f947d5 | 867 | drm_gpuvm_bo_extobj_add(vm_bo); |
b06d47be | 868 | drm_gem_object_get(&bo->ttm.base); |
b06d47be MB |
869 | vma->gpuva.gem.offset = bo_offset_or_userptr; |
870 | drm_gpuva_link(&vma->gpuva, vm_bo); | |
871 | drm_gpuvm_bo_put(vm_bo); | |
37430402 MB |
872 | } else /* userptr or null */ { |
873 | if (!is_null) { | |
5bd24e78 | 874 | struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr; |
37430402 MB |
875 | u64 size = end - start + 1; |
876 | int err; | |
dd08ebf6 | 877 | |
5bd24e78 TH |
878 | INIT_LIST_HEAD(&userptr->invalidate_link); |
879 | INIT_LIST_HEAD(&userptr->repin_link); | |
b06d47be | 880 | vma->gpuva.gem.offset = bo_offset_or_userptr; |
dd08ebf6 | 881 | |
5bd24e78 | 882 | err = mmu_interval_notifier_insert(&userptr->notifier, |
37430402 | 883 | current->mm, |
21ed3327 | 884 | xe_vma_userptr(vma), size, |
37430402 MB |
885 | &vma_userptr_notifier_ops); |
886 | if (err) { | |
5bd24e78 TH |
887 | xe_vma_free(vma); |
888 | return ERR_PTR(err); | |
37430402 MB |
889 | } |
890 | ||
5bd24e78 | 891 | userptr->notifier_seq = LONG_MAX; |
dd08ebf6 MB |
892 | } |
893 | ||
dd08ebf6 MB |
894 | xe_vm_get(vm); |
895 | } | |
896 | ||
897 | return vma; | |
898 | } | |
899 | ||
dd08ebf6 MB |
900 | static void xe_vma_destroy_late(struct xe_vma *vma) |
901 | { | |
21ed3327 | 902 | struct xe_vm *vm = xe_vma_vm(vma); |
dd08ebf6 | 903 | struct xe_device *xe = vm->xe; |
21ed3327 | 904 | bool read_only = xe_vma_read_only(vma); |
dd08ebf6 | 905 | |
158900ad MK |
906 | if (vma->ufence) { |
907 | xe_sync_ufence_put(vma->ufence); | |
908 | vma->ufence = NULL; | |
909 | } | |
910 | ||
dd08ebf6 | 911 | if (xe_vma_is_userptr(vma)) { |
5bd24e78 TH |
912 | struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr; |
913 | ||
914 | if (userptr->sg) { | |
dd08ebf6 | 915 | dma_unmap_sgtable(xe->drm.dev, |
5bd24e78 | 916 | userptr->sg, |
dd08ebf6 MB |
917 | read_only ? DMA_TO_DEVICE : |
918 | DMA_BIDIRECTIONAL, 0); | |
5bd24e78 TH |
919 | sg_free_table(userptr->sg); |
920 | userptr->sg = NULL; | |
dd08ebf6 MB |
921 | } |
922 | ||
923 | /* | |
924 | * Since userptr pages are not pinned, we can't remove | |
925 | * the notifer until we're sure the GPU is not accessing | |
926 | * them anymore | |
927 | */ | |
5bd24e78 | 928 | mmu_interval_notifier_remove(&userptr->notifier); |
dd08ebf6 | 929 | xe_vm_put(vm); |
37430402 MB |
930 | } else if (xe_vma_is_null(vma)) { |
931 | xe_vm_put(vm); | |
dd08ebf6 | 932 | } else { |
21ed3327 | 933 | xe_bo_put(xe_vma_bo(vma)); |
dd08ebf6 MB |
934 | } |
935 | ||
5bd24e78 | 936 | xe_vma_free(vma); |
dd08ebf6 MB |
937 | } |
938 | ||
939 | static void vma_destroy_work_func(struct work_struct *w) | |
940 | { | |
941 | struct xe_vma *vma = | |
942 | container_of(w, struct xe_vma, destroy_work); | |
943 | ||
944 | xe_vma_destroy_late(vma); | |
945 | } | |
946 | ||
dd08ebf6 MB |
947 | static void vma_destroy_cb(struct dma_fence *fence, |
948 | struct dma_fence_cb *cb) | |
949 | { | |
950 | struct xe_vma *vma = container_of(cb, struct xe_vma, destroy_cb); | |
951 | ||
952 | INIT_WORK(&vma->destroy_work, vma_destroy_work_func); | |
953 | queue_work(system_unbound_wq, &vma->destroy_work); | |
954 | } | |
955 | ||
956 | static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence) | |
957 | { | |
21ed3327 | 958 | struct xe_vm *vm = xe_vma_vm(vma); |
dd08ebf6 MB |
959 | |
960 | lockdep_assert_held_write(&vm->lock); | |
c73acc1e | 961 | xe_assert(vm->xe, list_empty(&vma->combined_links.destroy)); |
dd08ebf6 MB |
962 | |
963 | if (xe_vma_is_userptr(vma)) { | |
c73acc1e | 964 | xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED); |
b06d47be | 965 | |
dd08ebf6 | 966 | spin_lock(&vm->userptr.invalidated_lock); |
5bd24e78 | 967 | list_del(&to_userptr_vma(vma)->userptr.invalidate_link); |
dd08ebf6 | 968 | spin_unlock(&vm->userptr.invalidated_lock); |
37430402 | 969 | } else if (!xe_vma_is_null(vma)) { |
21ed3327 | 970 | xe_bo_assert_held(xe_vma_bo(vma)); |
dd08ebf6 | 971 | |
b06d47be | 972 | drm_gpuva_unlink(&vma->gpuva); |
dd08ebf6 MB |
973 | } |
974 | ||
975 | xe_vm_assert_held(vm); | |
dd08ebf6 MB |
976 | if (fence) { |
977 | int ret = dma_fence_add_callback(fence, &vma->destroy_cb, | |
978 | vma_destroy_cb); | |
979 | ||
980 | if (ret) { | |
981 | XE_WARN_ON(ret != -ENOENT); | |
982 | xe_vma_destroy_late(vma); | |
983 | } | |
984 | } else { | |
985 | xe_vma_destroy_late(vma); | |
986 | } | |
987 | } | |
988 | ||
2714d509 TH |
989 | /** |
990 | * xe_vm_prepare_vma() - drm_exec utility to lock a vma | |
991 | * @exec: The drm_exec object we're currently locking for. | |
992 | * @vma: The vma for witch we want to lock the vm resv and any attached | |
993 | * object's resv. | |
994 | * @num_shared: The number of dma-fence slots to pre-allocate in the | |
995 | * objects' reservation objects. | |
996 | * | |
997 | * Return: 0 on success, negative error code on error. In particular | |
998 | * may return -EDEADLK on WW transaction contention and -EINTR if | |
999 | * an interruptible wait is terminated by a signal. | |
1000 | */ | |
1001 | int xe_vm_prepare_vma(struct drm_exec *exec, struct xe_vma *vma, | |
1002 | unsigned int num_shared) | |
1003 | { | |
1004 | struct xe_vm *vm = xe_vma_vm(vma); | |
1005 | struct xe_bo *bo = xe_vma_bo(vma); | |
1006 | int err; | |
1007 | ||
1008 | XE_WARN_ON(!vm); | |
eb538b55 TH |
1009 | if (num_shared) |
1010 | err = drm_exec_prepare_obj(exec, xe_vm_obj(vm), num_shared); | |
1011 | else | |
1012 | err = drm_exec_lock_obj(exec, xe_vm_obj(vm)); | |
1013 | if (!err && bo && !bo->vm) { | |
1014 | if (num_shared) | |
1015 | err = drm_exec_prepare_obj(exec, &bo->ttm.base, num_shared); | |
1016 | else | |
1017 | err = drm_exec_lock_obj(exec, &bo->ttm.base); | |
1018 | } | |
2714d509 TH |
1019 | |
1020 | return err; | |
1021 | } | |
1022 | ||
dd08ebf6 MB |
1023 | static void xe_vma_destroy_unlocked(struct xe_vma *vma) |
1024 | { | |
1f727182 | 1025 | struct drm_exec exec; |
dd08ebf6 MB |
1026 | int err; |
1027 | ||
d2197029 | 1028 | drm_exec_init(&exec, 0, 0); |
1f727182 TH |
1029 | drm_exec_until_all_locked(&exec) { |
1030 | err = xe_vm_prepare_vma(&exec, vma, 0); | |
1031 | drm_exec_retry_on_contention(&exec); | |
1032 | if (XE_WARN_ON(err)) | |
1033 | break; | |
dd08ebf6 | 1034 | } |
dd08ebf6 MB |
1035 | |
1036 | xe_vma_destroy(vma, NULL); | |
1037 | ||
1f727182 | 1038 | drm_exec_fini(&exec); |
dd08ebf6 MB |
1039 | } |
1040 | ||
dd08ebf6 | 1041 | struct xe_vma * |
b06d47be | 1042 | xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range) |
dd08ebf6 | 1043 | { |
b06d47be | 1044 | struct drm_gpuva *gpuva; |
dd08ebf6 | 1045 | |
9d858b69 MB |
1046 | lockdep_assert_held(&vm->lock); |
1047 | ||
1048 | if (xe_vm_is_closed_or_banned(vm)) | |
dd08ebf6 MB |
1049 | return NULL; |
1050 | ||
c73acc1e | 1051 | xe_assert(vm->xe, start + range <= vm->size); |
dd08ebf6 | 1052 | |
b06d47be | 1053 | gpuva = drm_gpuva_find_first(&vm->gpuvm, start, range); |
dd08ebf6 | 1054 | |
b06d47be | 1055 | return gpuva ? gpuva_to_vma(gpuva) : NULL; |
dd08ebf6 MB |
1056 | } |
1057 | ||
b06d47be | 1058 | static int xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma) |
dd08ebf6 | 1059 | { |
b06d47be MB |
1060 | int err; |
1061 | ||
c73acc1e | 1062 | xe_assert(vm->xe, xe_vma_vm(vma) == vm); |
dd08ebf6 MB |
1063 | lockdep_assert_held(&vm->lock); |
1064 | ||
0cd99046 | 1065 | mutex_lock(&vm->snap_mutex); |
b06d47be | 1066 | err = drm_gpuva_insert(&vm->gpuvm, &vma->gpuva); |
0cd99046 | 1067 | mutex_unlock(&vm->snap_mutex); |
b06d47be MB |
1068 | XE_WARN_ON(err); /* Shouldn't be possible */ |
1069 | ||
1070 | return err; | |
dd08ebf6 MB |
1071 | } |
1072 | ||
1073 | static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma) | |
1074 | { | |
c73acc1e | 1075 | xe_assert(vm->xe, xe_vma_vm(vma) == vm); |
dd08ebf6 MB |
1076 | lockdep_assert_held(&vm->lock); |
1077 | ||
0cd99046 | 1078 | mutex_lock(&vm->snap_mutex); |
b06d47be | 1079 | drm_gpuva_remove(&vma->gpuva); |
0cd99046 | 1080 | mutex_unlock(&vm->snap_mutex); |
dd08ebf6 MB |
1081 | if (vm->usm.last_fault_vma == vma) |
1082 | vm->usm.last_fault_vma = NULL; | |
1083 | } | |
1084 | ||
b06d47be MB |
1085 | static struct drm_gpuva_op *xe_vm_op_alloc(void) |
1086 | { | |
1087 | struct xe_vma_op *op; | |
1088 | ||
1089 | op = kzalloc(sizeof(*op), GFP_KERNEL); | |
1090 | ||
1091 | if (unlikely(!op)) | |
1092 | return NULL; | |
1093 | ||
1094 | return &op->base; | |
1095 | } | |
1096 | ||
1097 | static void xe_vm_free(struct drm_gpuvm *gpuvm); | |
1098 | ||
480ea9e3 | 1099 | static const struct drm_gpuvm_ops gpuvm_ops = { |
b06d47be | 1100 | .op_alloc = xe_vm_op_alloc, |
24f947d5 | 1101 | .vm_bo_validate = xe_gpuvm_validate, |
b06d47be MB |
1102 | .vm_free = xe_vm_free, |
1103 | }; | |
1104 | ||
e814389f | 1105 | static u64 pde_encode_pat_index(struct xe_device *xe, u16 pat_index) |
0e5e77bd | 1106 | { |
fcd75139 | 1107 | u64 pte = 0; |
0e5e77bd | 1108 | |
fcd75139 LDM |
1109 | if (pat_index & BIT(0)) |
1110 | pte |= XE_PPGTT_PTE_PAT0; | |
0e5e77bd | 1111 | |
fcd75139 LDM |
1112 | if (pat_index & BIT(1)) |
1113 | pte |= XE_PPGTT_PTE_PAT1; | |
1114 | ||
1115 | return pte; | |
0e5e77bd LDM |
1116 | } |
1117 | ||
bf6d941c MA |
1118 | static u64 pte_encode_pat_index(struct xe_device *xe, u16 pat_index, |
1119 | u32 pt_level) | |
0e5e77bd | 1120 | { |
fcd75139 LDM |
1121 | u64 pte = 0; |
1122 | ||
1123 | if (pat_index & BIT(0)) | |
1124 | pte |= XE_PPGTT_PTE_PAT0; | |
1125 | ||
1126 | if (pat_index & BIT(1)) | |
1127 | pte |= XE_PPGTT_PTE_PAT1; | |
1128 | ||
bf6d941c MA |
1129 | if (pat_index & BIT(2)) { |
1130 | if (pt_level) | |
1131 | pte |= XE_PPGTT_PDE_PDPE_PAT2; | |
1132 | else | |
1133 | pte |= XE_PPGTT_PTE_PAT2; | |
1134 | } | |
fcd75139 LDM |
1135 | |
1136 | if (pat_index & BIT(3)) | |
1137 | pte |= XELPG_PPGTT_PTE_PAT3; | |
1138 | ||
5803bdc8 LDM |
1139 | if (pat_index & (BIT(4))) |
1140 | pte |= XE2_PPGTT_PTE_PAT4; | |
1141 | ||
fcd75139 | 1142 | return pte; |
0e5e77bd LDM |
1143 | } |
1144 | ||
1145 | static u64 pte_encode_ps(u32 pt_level) | |
1146 | { | |
e84d716d | 1147 | XE_WARN_ON(pt_level > MAX_HUGEPTE_LEVEL); |
0e5e77bd LDM |
1148 | |
1149 | if (pt_level == 1) | |
1150 | return XE_PDE_PS_2M; | |
1151 | else if (pt_level == 2) | |
1152 | return XE_PDPE_PS_1G; | |
1153 | ||
1154 | return 0; | |
1155 | } | |
1156 | ||
1157 | static u64 xelp_pde_encode_bo(struct xe_bo *bo, u64 bo_offset, | |
e814389f | 1158 | const u16 pat_index) |
0e5e77bd | 1159 | { |
fcd75139 | 1160 | struct xe_device *xe = xe_bo_device(bo); |
0e5e77bd LDM |
1161 | u64 pde; |
1162 | ||
1163 | pde = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE); | |
1164 | pde |= XE_PAGE_PRESENT | XE_PAGE_RW; | |
e814389f | 1165 | pde |= pde_encode_pat_index(xe, pat_index); |
0e5e77bd LDM |
1166 | |
1167 | return pde; | |
1168 | } | |
1169 | ||
1170 | static u64 xelp_pte_encode_bo(struct xe_bo *bo, u64 bo_offset, | |
e814389f | 1171 | u16 pat_index, u32 pt_level) |
0e5e77bd | 1172 | { |
fcd75139 | 1173 | struct xe_device *xe = xe_bo_device(bo); |
0e5e77bd LDM |
1174 | u64 pte; |
1175 | ||
1176 | pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE); | |
1177 | pte |= XE_PAGE_PRESENT | XE_PAGE_RW; | |
bf6d941c | 1178 | pte |= pte_encode_pat_index(xe, pat_index, pt_level); |
0e5e77bd LDM |
1179 | pte |= pte_encode_ps(pt_level); |
1180 | ||
1181 | if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo)) | |
1182 | pte |= XE_PPGTT_PTE_DM; | |
1183 | ||
1184 | return pte; | |
1185 | } | |
1186 | ||
1187 | static u64 xelp_pte_encode_vma(u64 pte, struct xe_vma *vma, | |
e814389f | 1188 | u16 pat_index, u32 pt_level) |
0e5e77bd | 1189 | { |
fcd75139 LDM |
1190 | struct xe_device *xe = xe_vma_vm(vma)->xe; |
1191 | ||
0e5e77bd LDM |
1192 | pte |= XE_PAGE_PRESENT; |
1193 | ||
1194 | if (likely(!xe_vma_read_only(vma))) | |
1195 | pte |= XE_PAGE_RW; | |
1196 | ||
bf6d941c | 1197 | pte |= pte_encode_pat_index(xe, pat_index, pt_level); |
0e5e77bd LDM |
1198 | pte |= pte_encode_ps(pt_level); |
1199 | ||
1200 | if (unlikely(xe_vma_is_null(vma))) | |
1201 | pte |= XE_PTE_NULL; | |
1202 | ||
1203 | return pte; | |
1204 | } | |
1205 | ||
fcd75139 | 1206 | static u64 xelp_pte_encode_addr(struct xe_device *xe, u64 addr, |
e814389f | 1207 | u16 pat_index, |
23c8495e LDM |
1208 | u32 pt_level, bool devmem, u64 flags) |
1209 | { | |
1210 | u64 pte; | |
1211 | ||
1212 | /* Avoid passing random bits directly as flags */ | |
28523083 | 1213 | xe_assert(xe, !(flags & ~XE_PTE_PS64)); |
23c8495e LDM |
1214 | |
1215 | pte = addr; | |
1216 | pte |= XE_PAGE_PRESENT | XE_PAGE_RW; | |
bf6d941c | 1217 | pte |= pte_encode_pat_index(xe, pat_index, pt_level); |
23c8495e LDM |
1218 | pte |= pte_encode_ps(pt_level); |
1219 | ||
1220 | if (devmem) | |
1221 | pte |= XE_PPGTT_PTE_DM; | |
1222 | ||
1223 | pte |= flags; | |
1224 | ||
1225 | return pte; | |
1226 | } | |
1227 | ||
0e5e77bd LDM |
1228 | static const struct xe_pt_ops xelp_pt_ops = { |
1229 | .pte_encode_bo = xelp_pte_encode_bo, | |
1230 | .pte_encode_vma = xelp_pte_encode_vma, | |
23c8495e | 1231 | .pte_encode_addr = xelp_pte_encode_addr, |
0e5e77bd LDM |
1232 | .pde_encode_bo = xelp_pde_encode_bo, |
1233 | }; | |
1234 | ||
dd08ebf6 MB |
1235 | static void vm_destroy_work_func(struct work_struct *w); |
1236 | ||
06951c2e TH |
1237 | /** |
1238 | * xe_vm_create_scratch() - Setup a scratch memory pagetable tree for the | |
1239 | * given tile and vm. | |
1240 | * @xe: xe device. | |
1241 | * @tile: tile to set up for. | |
1242 | * @vm: vm to set up for. | |
1243 | * | |
1244 | * Sets up a pagetable tree with one page-table per level and a single | |
1245 | * leaf PTE. All pagetable entries point to the single page-table or, | |
1246 | * for MAX_HUGEPTE_LEVEL, a NULL huge PTE returning 0 on read and | |
1247 | * writes become NOPs. | |
1248 | * | |
1249 | * Return: 0 on success, negative error code on error. | |
1250 | */ | |
1251 | static int xe_vm_create_scratch(struct xe_device *xe, struct xe_tile *tile, | |
1252 | struct xe_vm *vm) | |
1253 | { | |
1254 | u8 id = tile->id; | |
1255 | int i; | |
1256 | ||
1257 | for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; i++) { | |
1258 | vm->scratch_pt[id][i] = xe_pt_create(vm, tile, i); | |
1259 | if (IS_ERR(vm->scratch_pt[id][i])) | |
1260 | return PTR_ERR(vm->scratch_pt[id][i]); | |
1261 | ||
1262 | xe_pt_populate_empty(tile, vm, vm->scratch_pt[id][i]); | |
1263 | } | |
1264 | ||
1265 | return 0; | |
1266 | } | |
1267 | ||
1268 | static void xe_vm_free_scratch(struct xe_vm *vm) | |
1269 | { | |
1270 | struct xe_tile *tile; | |
1271 | u8 id; | |
1272 | ||
1273 | if (!xe_vm_has_scratch(vm)) | |
1274 | return; | |
1275 | ||
1276 | for_each_tile(tile, vm->xe, id) { | |
1277 | u32 i; | |
1278 | ||
1279 | if (!vm->pt_root[id]) | |
1280 | continue; | |
1281 | ||
1282 | for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; ++i) | |
1283 | if (vm->scratch_pt[id][i]) | |
1284 | xe_pt_destroy(vm->scratch_pt[id][i], vm->flags, NULL); | |
1285 | } | |
1286 | } | |
1287 | ||
dd08ebf6 MB |
1288 | struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags) |
1289 | { | |
b06d47be | 1290 | struct drm_gem_object *vm_resv_obj; |
dd08ebf6 | 1291 | struct xe_vm *vm; |
b06d47be | 1292 | int err, number_tiles = 0; |
876611c2 | 1293 | struct xe_tile *tile; |
dd08ebf6 MB |
1294 | u8 id; |
1295 | ||
1296 | vm = kzalloc(sizeof(*vm), GFP_KERNEL); | |
1297 | if (!vm) | |
1298 | return ERR_PTR(-ENOMEM); | |
1299 | ||
1300 | vm->xe = xe; | |
dd08ebf6 | 1301 | |
e9bb0891 | 1302 | vm->size = 1ull << xe->info.va_bits; |
dd08ebf6 | 1303 | |
dd08ebf6 MB |
1304 | vm->flags = flags; |
1305 | ||
1306 | init_rwsem(&vm->lock); | |
0cd99046 | 1307 | mutex_init(&vm->snap_mutex); |
dd08ebf6 MB |
1308 | |
1309 | INIT_LIST_HEAD(&vm->rebind_list); | |
1310 | ||
1311 | INIT_LIST_HEAD(&vm->userptr.repin_list); | |
1312 | INIT_LIST_HEAD(&vm->userptr.invalidated); | |
1313 | init_rwsem(&vm->userptr.notifier_lock); | |
1314 | spin_lock_init(&vm->userptr.invalidated_lock); | |
1315 | ||
dd08ebf6 MB |
1316 | INIT_WORK(&vm->destroy_work, vm_destroy_work_func); |
1317 | ||
9b9529ce | 1318 | INIT_LIST_HEAD(&vm->preempt.exec_queues); |
dd08ebf6 MB |
1319 | vm->preempt.min_run_period_ms = 10; /* FIXME: Wire up to uAPI */ |
1320 | ||
fd84041d MB |
1321 | for_each_tile(tile, xe, id) |
1322 | xe_range_fence_tree_init(&vm->rftree[id]); | |
1323 | ||
0e5e77bd LDM |
1324 | vm->pt_ops = &xelp_pt_ops; |
1325 | ||
2d30332a | 1326 | if (!(flags & XE_VM_FLAG_MIGRATION)) |
dd08ebf6 | 1327 | xe_device_mem_access_get(xe); |
dd08ebf6 | 1328 | |
b06d47be MB |
1329 | vm_resv_obj = drm_gpuvm_resv_object_alloc(&xe->drm); |
1330 | if (!vm_resv_obj) { | |
1331 | err = -ENOMEM; | |
1332 | goto err_no_resv; | |
1333 | } | |
1334 | ||
35705e32 TH |
1335 | drm_gpuvm_init(&vm->gpuvm, "Xe VM", DRM_GPUVM_RESV_PROTECTED, &xe->drm, |
1336 | vm_resv_obj, 0, vm->size, 0, 0, &gpuvm_ops); | |
b06d47be MB |
1337 | |
1338 | drm_gem_object_put(vm_resv_obj); | |
1339 | ||
1340 | err = dma_resv_lock_interruptible(xe_vm_resv(vm), NULL); | |
dd08ebf6 | 1341 | if (err) |
b06d47be | 1342 | goto err_close; |
dd08ebf6 MB |
1343 | |
1344 | if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) | |
0d39b6da | 1345 | vm->flags |= XE_VM_FLAG_64K; |
dd08ebf6 | 1346 | |
876611c2 | 1347 | for_each_tile(tile, xe, id) { |
dd08ebf6 | 1348 | if (flags & XE_VM_FLAG_MIGRATION && |
0d39b6da | 1349 | tile->id != XE_VM_FLAG_TILE_ID(flags)) |
dd08ebf6 MB |
1350 | continue; |
1351 | ||
876611c2 | 1352 | vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level); |
dd08ebf6 MB |
1353 | if (IS_ERR(vm->pt_root[id])) { |
1354 | err = PTR_ERR(vm->pt_root[id]); | |
1355 | vm->pt_root[id] = NULL; | |
b06d47be | 1356 | goto err_unlock_close; |
dd08ebf6 MB |
1357 | } |
1358 | } | |
1359 | ||
06951c2e | 1360 | if (xe_vm_has_scratch(vm)) { |
876611c2 | 1361 | for_each_tile(tile, xe, id) { |
dd08ebf6 MB |
1362 | if (!vm->pt_root[id]) |
1363 | continue; | |
1364 | ||
06951c2e | 1365 | err = xe_vm_create_scratch(xe, tile, vm); |
dd08ebf6 | 1366 | if (err) |
b06d47be | 1367 | goto err_unlock_close; |
dd08ebf6 | 1368 | } |
85dbfe47 | 1369 | vm->batch_invalidate_tlb = true; |
dd08ebf6 MB |
1370 | } |
1371 | ||
fdb6a053 | 1372 | if (flags & XE_VM_FLAG_LR_MODE) { |
dd08ebf6 | 1373 | INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func); |
fdb6a053 | 1374 | vm->flags |= XE_VM_FLAG_LR_MODE; |
85dbfe47 | 1375 | vm->batch_invalidate_tlb = false; |
dd08ebf6 MB |
1376 | } |
1377 | ||
dd08ebf6 | 1378 | /* Fill pt_root after allocating scratch tables */ |
876611c2 | 1379 | for_each_tile(tile, xe, id) { |
dd08ebf6 MB |
1380 | if (!vm->pt_root[id]) |
1381 | continue; | |
1382 | ||
876611c2 | 1383 | xe_pt_populate_empty(tile, vm, vm->pt_root[id]); |
dd08ebf6 | 1384 | } |
b06d47be | 1385 | dma_resv_unlock(xe_vm_resv(vm)); |
dd08ebf6 MB |
1386 | |
1387 | /* Kernel migration VM shouldn't have a circular loop.. */ | |
1388 | if (!(flags & XE_VM_FLAG_MIGRATION)) { | |
876611c2 | 1389 | for_each_tile(tile, xe, id) { |
f6929e80 | 1390 | struct xe_gt *gt = tile->primary_gt; |
dd08ebf6 | 1391 | struct xe_vm *migrate_vm; |
9b9529ce | 1392 | struct xe_exec_queue *q; |
d3d76739 | 1393 | u32 create_flags = EXEC_QUEUE_FLAG_VM; |
dd08ebf6 MB |
1394 | |
1395 | if (!vm->pt_root[id]) | |
1396 | continue; | |
1397 | ||
08dea767 | 1398 | migrate_vm = xe_migrate_get_vm(tile->migrate); |
9b9529ce FD |
1399 | q = xe_exec_queue_create_class(xe, gt, migrate_vm, |
1400 | XE_ENGINE_CLASS_COPY, | |
f3e9b1f4 | 1401 | create_flags); |
dd08ebf6 | 1402 | xe_vm_put(migrate_vm); |
9b9529ce FD |
1403 | if (IS_ERR(q)) { |
1404 | err = PTR_ERR(q); | |
b06d47be | 1405 | goto err_close; |
dd08ebf6 | 1406 | } |
9b9529ce | 1407 | vm->q[id] = q; |
876611c2 | 1408 | number_tiles++; |
dd08ebf6 MB |
1409 | } |
1410 | } | |
1411 | ||
876611c2 | 1412 | if (number_tiles > 1) |
dd08ebf6 MB |
1413 | vm->composite_fence_ctx = dma_fence_context_alloc(1); |
1414 | ||
1415 | mutex_lock(&xe->usm.lock); | |
1416 | if (flags & XE_VM_FLAG_FAULT_MODE) | |
1417 | xe->usm.num_vm_in_fault_mode++; | |
1418 | else if (!(flags & XE_VM_FLAG_MIGRATION)) | |
1419 | xe->usm.num_vm_in_non_fault_mode++; | |
1420 | mutex_unlock(&xe->usm.lock); | |
1421 | ||
1422 | trace_xe_vm_create(vm); | |
1423 | ||
1424 | return vm; | |
1425 | ||
b06d47be MB |
1426 | err_unlock_close: |
1427 | dma_resv_unlock(xe_vm_resv(vm)); | |
1428 | err_close: | |
1429 | xe_vm_close_and_put(vm); | |
1430 | return ERR_PTR(err); | |
dd08ebf6 | 1431 | |
b06d47be | 1432 | err_no_resv: |
0cd99046 | 1433 | mutex_destroy(&vm->snap_mutex); |
fd84041d MB |
1434 | for_each_tile(tile, xe, id) |
1435 | xe_range_fence_tree_fini(&vm->rftree[id]); | |
dd08ebf6 | 1436 | kfree(vm); |
2d30332a | 1437 | if (!(flags & XE_VM_FLAG_MIGRATION)) |
dd08ebf6 | 1438 | xe_device_mem_access_put(xe); |
dd08ebf6 MB |
1439 | return ERR_PTR(err); |
1440 | } | |
1441 | ||
9d858b69 MB |
1442 | static void xe_vm_close(struct xe_vm *vm) |
1443 | { | |
1444 | down_write(&vm->lock); | |
1445 | vm->size = 0; | |
1446 | up_write(&vm->lock); | |
1447 | } | |
1448 | ||
dd08ebf6 MB |
1449 | void xe_vm_close_and_put(struct xe_vm *vm) |
1450 | { | |
b06d47be | 1451 | LIST_HEAD(contested); |
dd08ebf6 | 1452 | struct xe_device *xe = vm->xe; |
876611c2 | 1453 | struct xe_tile *tile; |
b06d47be MB |
1454 | struct xe_vma *vma, *next_vma; |
1455 | struct drm_gpuva *gpuva, *next; | |
dd08ebf6 MB |
1456 | u8 id; |
1457 | ||
c73acc1e | 1458 | xe_assert(xe, !vm->preempt.num_exec_queues); |
dd08ebf6 | 1459 | |
9d858b69 | 1460 | xe_vm_close(vm); |
fdb6a053 | 1461 | if (xe_vm_in_preempt_fence_mode(vm)) |
dd08ebf6 MB |
1462 | flush_work(&vm->preempt.rebind_work); |
1463 | ||
e669f10c MB |
1464 | down_write(&vm->lock); |
1465 | for_each_tile(tile, xe, id) { | |
1466 | if (vm->q[id]) | |
1467 | xe_exec_queue_last_fence_put(vm->q[id], vm); | |
1468 | } | |
1469 | up_write(&vm->lock); | |
1470 | ||
876611c2 | 1471 | for_each_tile(tile, xe, id) { |
9b9529ce FD |
1472 | if (vm->q[id]) { |
1473 | xe_exec_queue_kill(vm->q[id]); | |
1474 | xe_exec_queue_put(vm->q[id]); | |
1475 | vm->q[id] = NULL; | |
dd08ebf6 MB |
1476 | } |
1477 | } | |
1478 | ||
1479 | down_write(&vm->lock); | |
d00e9cc2 | 1480 | xe_vm_lock(vm, false); |
b06d47be MB |
1481 | drm_gpuvm_for_each_va_safe(gpuva, next, &vm->gpuvm) { |
1482 | vma = gpuva_to_vma(gpuva); | |
dd08ebf6 | 1483 | |
37430402 | 1484 | if (xe_vma_has_no_bo(vma)) { |
dd08ebf6 | 1485 | down_read(&vm->userptr.notifier_lock); |
b06d47be | 1486 | vma->gpuva.flags |= XE_VMA_DESTROYED; |
dd08ebf6 MB |
1487 | up_read(&vm->userptr.notifier_lock); |
1488 | } | |
1489 | ||
b06d47be | 1490 | xe_vm_remove_vma(vm, vma); |
dd08ebf6 MB |
1491 | |
1492 | /* easy case, remove from VMA? */ | |
21ed3327 | 1493 | if (xe_vma_has_no_bo(vma) || xe_vma_bo(vma)->vm) { |
1655c893 | 1494 | list_del_init(&vma->combined_links.rebind); |
dd08ebf6 MB |
1495 | xe_vma_destroy(vma, NULL); |
1496 | continue; | |
1497 | } | |
1498 | ||
1655c893 | 1499 | list_move_tail(&vma->combined_links.destroy, &contested); |
ca8656a2 | 1500 | vma->gpuva.flags |= XE_VMA_DESTROYED; |
dd08ebf6 MB |
1501 | } |
1502 | ||
1503 | /* | |
1504 | * All vm operations will add shared fences to resv. | |
1505 | * The only exception is eviction for a shared object, | |
1506 | * but even so, the unbind when evicted would still | |
1507 | * install a fence to resv. Hence it's safe to | |
1508 | * destroy the pagetables immediately. | |
1509 | */ | |
06951c2e TH |
1510 | xe_vm_free_scratch(vm); |
1511 | ||
876611c2 | 1512 | for_each_tile(tile, xe, id) { |
b06d47be MB |
1513 | if (vm->pt_root[id]) { |
1514 | xe_pt_destroy(vm->pt_root[id], vm->flags, NULL); | |
1515 | vm->pt_root[id] = NULL; | |
1516 | } | |
dd08ebf6 | 1517 | } |
d00e9cc2 | 1518 | xe_vm_unlock(vm); |
dd08ebf6 | 1519 | |
b06d47be MB |
1520 | /* |
1521 | * VM is now dead, cannot re-add nodes to vm->vmas if it's NULL | |
1522 | * Since we hold a refcount to the bo, we can remove and free | |
1523 | * the members safely without locking. | |
1524 | */ | |
1655c893 MB |
1525 | list_for_each_entry_safe(vma, next_vma, &contested, |
1526 | combined_links.destroy) { | |
1527 | list_del_init(&vma->combined_links.destroy); | |
b06d47be | 1528 | xe_vma_destroy_unlocked(vma); |
dd08ebf6 MB |
1529 | } |
1530 | ||
dd08ebf6 MB |
1531 | up_write(&vm->lock); |
1532 | ||
cf667aec MB |
1533 | mutex_lock(&xe->usm.lock); |
1534 | if (vm->flags & XE_VM_FLAG_FAULT_MODE) | |
1535 | xe->usm.num_vm_in_fault_mode--; | |
1536 | else if (!(vm->flags & XE_VM_FLAG_MIGRATION)) | |
1537 | xe->usm.num_vm_in_non_fault_mode--; | |
1538 | mutex_unlock(&xe->usm.lock); | |
1539 | ||
fd84041d MB |
1540 | for_each_tile(tile, xe, id) |
1541 | xe_range_fence_tree_fini(&vm->rftree[id]); | |
1542 | ||
dd08ebf6 MB |
1543 | xe_vm_put(vm); |
1544 | } | |
1545 | ||
1546 | static void vm_destroy_work_func(struct work_struct *w) | |
1547 | { | |
1548 | struct xe_vm *vm = | |
1549 | container_of(w, struct xe_vm, destroy_work); | |
dd08ebf6 | 1550 | struct xe_device *xe = vm->xe; |
876611c2 | 1551 | struct xe_tile *tile; |
dd08ebf6 MB |
1552 | u8 id; |
1553 | void *lookup; | |
1554 | ||
1555 | /* xe_vm_close_and_put was not called? */ | |
c73acc1e | 1556 | xe_assert(xe, !vm->size); |
dd08ebf6 | 1557 | |
0eb2a18a ML |
1558 | mutex_destroy(&vm->snap_mutex); |
1559 | ||
dd08ebf6 MB |
1560 | if (!(vm->flags & XE_VM_FLAG_MIGRATION)) { |
1561 | xe_device_mem_access_put(xe); | |
dd08ebf6 | 1562 | |
06d5ae90 | 1563 | if (xe->info.has_asid && vm->usm.asid) { |
a12d9216 MB |
1564 | mutex_lock(&xe->usm.lock); |
1565 | lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid); | |
c73acc1e | 1566 | xe_assert(xe, lookup == vm); |
a12d9216 MB |
1567 | mutex_unlock(&xe->usm.lock); |
1568 | } | |
dd08ebf6 MB |
1569 | } |
1570 | ||
b06d47be MB |
1571 | for_each_tile(tile, xe, id) |
1572 | XE_WARN_ON(vm->pt_root[id]); | |
dd08ebf6 | 1573 | |
dd08ebf6 MB |
1574 | trace_xe_vm_free(vm); |
1575 | dma_fence_put(vm->rebind_fence); | |
dd08ebf6 | 1576 | kfree(vm); |
dd08ebf6 MB |
1577 | } |
1578 | ||
b06d47be | 1579 | static void xe_vm_free(struct drm_gpuvm *gpuvm) |
dd08ebf6 | 1580 | { |
b06d47be | 1581 | struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm); |
dd08ebf6 MB |
1582 | |
1583 | /* To destroy the VM we need to be able to sleep */ | |
1584 | queue_work(system_unbound_wq, &vm->destroy_work); | |
1585 | } | |
1586 | ||
1587 | struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id) | |
1588 | { | |
1589 | struct xe_vm *vm; | |
1590 | ||
1591 | mutex_lock(&xef->vm.lock); | |
1592 | vm = xa_load(&xef->vm.xa, id); | |
dd08ebf6 MB |
1593 | if (vm) |
1594 | xe_vm_get(vm); | |
5835dc7f | 1595 | mutex_unlock(&xef->vm.lock); |
dd08ebf6 MB |
1596 | |
1597 | return vm; | |
1598 | } | |
1599 | ||
876611c2 | 1600 | u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile) |
dd08ebf6 | 1601 | { |
0e5e77bd | 1602 | return vm->pt_ops->pde_encode_bo(vm->pt_root[tile->id]->bo, 0, |
e814389f | 1603 | tile_to_xe(tile)->pat.idx[XE_CACHE_WB]); |
dd08ebf6 MB |
1604 | } |
1605 | ||
e669f10c MB |
1606 | static struct xe_exec_queue * |
1607 | to_wait_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) | |
1608 | { | |
1609 | return q ? q : vm->q[0]; | |
1610 | } | |
1611 | ||
dd08ebf6 | 1612 | static struct dma_fence * |
9b9529ce | 1613 | xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q, |
b06d47be MB |
1614 | struct xe_sync_entry *syncs, u32 num_syncs, |
1615 | bool first_op, bool last_op) | |
dd08ebf6 | 1616 | { |
e669f10c MB |
1617 | struct xe_vm *vm = xe_vma_vm(vma); |
1618 | struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q); | |
876611c2 | 1619 | struct xe_tile *tile; |
dd08ebf6 MB |
1620 | struct dma_fence *fence = NULL; |
1621 | struct dma_fence **fences = NULL; | |
1622 | struct dma_fence_array *cf = NULL; | |
dd08ebf6 | 1623 | int cur_fence = 0, i; |
63412a5a | 1624 | int number_tiles = hweight8(vma->tile_present); |
dd08ebf6 MB |
1625 | int err; |
1626 | u8 id; | |
1627 | ||
1628 | trace_xe_vma_unbind(vma); | |
1629 | ||
158900ad MK |
1630 | if (vma->ufence) { |
1631 | struct xe_user_fence * const f = vma->ufence; | |
1632 | ||
1633 | if (!xe_sync_ufence_get_status(f)) | |
1634 | return ERR_PTR(-EBUSY); | |
1635 | ||
1636 | vma->ufence = NULL; | |
1637 | xe_sync_ufence_put(f); | |
1638 | } | |
1639 | ||
876611c2 MR |
1640 | if (number_tiles > 1) { |
1641 | fences = kmalloc_array(number_tiles, sizeof(*fences), | |
dd08ebf6 MB |
1642 | GFP_KERNEL); |
1643 | if (!fences) | |
1644 | return ERR_PTR(-ENOMEM); | |
1645 | } | |
1646 | ||
876611c2 MR |
1647 | for_each_tile(tile, vm->xe, id) { |
1648 | if (!(vma->tile_present & BIT(id))) | |
dd08ebf6 MB |
1649 | goto next; |
1650 | ||
9a674bef MB |
1651 | fence = __xe_pt_unbind_vma(tile, vma, q ? q : vm->q[id], |
1652 | first_op ? syncs : NULL, | |
b06d47be | 1653 | first_op ? num_syncs : 0); |
dd08ebf6 MB |
1654 | if (IS_ERR(fence)) { |
1655 | err = PTR_ERR(fence); | |
1656 | goto err_fences; | |
1657 | } | |
1658 | ||
1659 | if (fences) | |
1660 | fences[cur_fence++] = fence; | |
1661 | ||
1662 | next: | |
9b9529ce FD |
1663 | if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list)) |
1664 | q = list_next_entry(q, multi_gt_list); | |
dd08ebf6 MB |
1665 | } |
1666 | ||
1667 | if (fences) { | |
876611c2 | 1668 | cf = dma_fence_array_create(number_tiles, fences, |
dd08ebf6 MB |
1669 | vm->composite_fence_ctx, |
1670 | vm->composite_fence_seqno++, | |
1671 | false); | |
1672 | if (!cf) { | |
1673 | --vm->composite_fence_seqno; | |
1674 | err = -ENOMEM; | |
1675 | goto err_fences; | |
1676 | } | |
1677 | } | |
1678 | ||
04dfef5b BW |
1679 | fence = cf ? &cf->base : !fence ? |
1680 | xe_exec_queue_last_fence_get(wait_exec_queue, vm) : fence; | |
b06d47be MB |
1681 | if (last_op) { |
1682 | for (i = 0; i < num_syncs; i++) | |
04dfef5b | 1683 | xe_sync_entry_signal(&syncs[i], NULL, fence); |
b06d47be | 1684 | } |
dd08ebf6 | 1685 | |
04dfef5b | 1686 | return fence; |
dd08ebf6 MB |
1687 | |
1688 | err_fences: | |
1689 | if (fences) { | |
f3e9b1f4 | 1690 | while (cur_fence) |
dd08ebf6 | 1691 | dma_fence_put(fences[--cur_fence]); |
dd08ebf6 MB |
1692 | kfree(fences); |
1693 | } | |
1694 | ||
1695 | return ERR_PTR(err); | |
1696 | } | |
1697 | ||
1698 | static struct dma_fence * | |
9b9529ce | 1699 | xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q, |
b06d47be MB |
1700 | struct xe_sync_entry *syncs, u32 num_syncs, |
1701 | bool first_op, bool last_op) | |
dd08ebf6 | 1702 | { |
876611c2 | 1703 | struct xe_tile *tile; |
dd08ebf6 MB |
1704 | struct dma_fence *fence; |
1705 | struct dma_fence **fences = NULL; | |
1706 | struct dma_fence_array *cf = NULL; | |
21ed3327 | 1707 | struct xe_vm *vm = xe_vma_vm(vma); |
dd08ebf6 | 1708 | int cur_fence = 0, i; |
63412a5a | 1709 | int number_tiles = hweight8(vma->tile_mask); |
dd08ebf6 MB |
1710 | int err; |
1711 | u8 id; | |
1712 | ||
1713 | trace_xe_vma_bind(vma); | |
1714 | ||
876611c2 MR |
1715 | if (number_tiles > 1) { |
1716 | fences = kmalloc_array(number_tiles, sizeof(*fences), | |
dd08ebf6 MB |
1717 | GFP_KERNEL); |
1718 | if (!fences) | |
1719 | return ERR_PTR(-ENOMEM); | |
1720 | } | |
1721 | ||
876611c2 MR |
1722 | for_each_tile(tile, vm->xe, id) { |
1723 | if (!(vma->tile_mask & BIT(id))) | |
dd08ebf6 MB |
1724 | goto next; |
1725 | ||
9b9529ce | 1726 | fence = __xe_pt_bind_vma(tile, vma, q ? q : vm->q[id], |
7ead3315 | 1727 | first_op ? syncs : NULL, |
b06d47be | 1728 | first_op ? num_syncs : 0, |
876611c2 | 1729 | vma->tile_present & BIT(id)); |
dd08ebf6 MB |
1730 | if (IS_ERR(fence)) { |
1731 | err = PTR_ERR(fence); | |
1732 | goto err_fences; | |
1733 | } | |
1734 | ||
1735 | if (fences) | |
1736 | fences[cur_fence++] = fence; | |
1737 | ||
1738 | next: | |
9b9529ce FD |
1739 | if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list)) |
1740 | q = list_next_entry(q, multi_gt_list); | |
dd08ebf6 MB |
1741 | } |
1742 | ||
1743 | if (fences) { | |
876611c2 | 1744 | cf = dma_fence_array_create(number_tiles, fences, |
dd08ebf6 MB |
1745 | vm->composite_fence_ctx, |
1746 | vm->composite_fence_seqno++, | |
1747 | false); | |
1748 | if (!cf) { | |
1749 | --vm->composite_fence_seqno; | |
1750 | err = -ENOMEM; | |
1751 | goto err_fences; | |
1752 | } | |
1753 | } | |
1754 | ||
b06d47be MB |
1755 | if (last_op) { |
1756 | for (i = 0; i < num_syncs; i++) | |
1757 | xe_sync_entry_signal(&syncs[i], NULL, | |
1758 | cf ? &cf->base : fence); | |
1759 | } | |
dd08ebf6 MB |
1760 | |
1761 | return cf ? &cf->base : fence; | |
1762 | ||
1763 | err_fences: | |
1764 | if (fences) { | |
f3e9b1f4 | 1765 | while (cur_fence) |
dd08ebf6 | 1766 | dma_fence_put(fences[--cur_fence]); |
dd08ebf6 MB |
1767 | kfree(fences); |
1768 | } | |
1769 | ||
1770 | return ERR_PTR(err); | |
1771 | } | |
1772 | ||
158900ad MK |
1773 | static struct xe_user_fence * |
1774 | find_ufence_get(struct xe_sync_entry *syncs, u32 num_syncs) | |
1775 | { | |
1776 | unsigned int i; | |
1777 | ||
1778 | for (i = 0; i < num_syncs; i++) { | |
1779 | struct xe_sync_entry *e = &syncs[i]; | |
1780 | ||
1781 | if (xe_sync_is_ufence(e)) | |
1782 | return xe_sync_ufence_get(e); | |
1783 | } | |
1784 | ||
1785 | return NULL; | |
1786 | } | |
1787 | ||
dd08ebf6 | 1788 | static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, |
9b9529ce | 1789 | struct xe_exec_queue *q, struct xe_sync_entry *syncs, |
f3e9b1f4 MB |
1790 | u32 num_syncs, bool immediate, bool first_op, |
1791 | bool last_op) | |
dd08ebf6 MB |
1792 | { |
1793 | struct dma_fence *fence; | |
e669f10c | 1794 | struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q); |
158900ad | 1795 | struct xe_user_fence *ufence; |
dd08ebf6 MB |
1796 | |
1797 | xe_vm_assert_held(vm); | |
1798 | ||
158900ad MK |
1799 | ufence = find_ufence_get(syncs, num_syncs); |
1800 | if (vma->ufence && ufence) | |
1801 | xe_sync_ufence_put(vma->ufence); | |
1802 | ||
1803 | vma->ufence = ufence ?: vma->ufence; | |
1804 | ||
b06d47be | 1805 | if (immediate) { |
9b9529ce | 1806 | fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, first_op, |
b06d47be MB |
1807 | last_op); |
1808 | if (IS_ERR(fence)) | |
1809 | return PTR_ERR(fence); | |
1810 | } else { | |
1811 | int i; | |
1812 | ||
c73acc1e | 1813 | xe_assert(vm->xe, xe_vm_in_fault_mode(vm)); |
b06d47be | 1814 | |
e669f10c | 1815 | fence = xe_exec_queue_last_fence_get(wait_exec_queue, vm); |
b06d47be MB |
1816 | if (last_op) { |
1817 | for (i = 0; i < num_syncs; i++) | |
1818 | xe_sync_entry_signal(&syncs[i], NULL, fence); | |
1819 | } | |
1820 | } | |
dd08ebf6 | 1821 | |
e669f10c MB |
1822 | if (last_op) |
1823 | xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence); | |
dd08ebf6 | 1824 | dma_fence_put(fence); |
f3e9b1f4 | 1825 | |
dd08ebf6 MB |
1826 | return 0; |
1827 | } | |
1828 | ||
9b9529ce | 1829 | static int xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q, |
dd08ebf6 | 1830 | struct xe_bo *bo, struct xe_sync_entry *syncs, |
f3e9b1f4 MB |
1831 | u32 num_syncs, bool immediate, bool first_op, |
1832 | bool last_op) | |
dd08ebf6 MB |
1833 | { |
1834 | int err; | |
1835 | ||
1836 | xe_vm_assert_held(vm); | |
1837 | xe_bo_assert_held(bo); | |
1838 | ||
b06d47be | 1839 | if (bo && immediate) { |
dd08ebf6 MB |
1840 | err = xe_bo_validate(bo, vm, true); |
1841 | if (err) | |
1842 | return err; | |
1843 | } | |
1844 | ||
f3e9b1f4 MB |
1845 | return __xe_vm_bind(vm, vma, q, syncs, num_syncs, immediate, first_op, |
1846 | last_op); | |
dd08ebf6 MB |
1847 | } |
1848 | ||
1849 | static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma, | |
9b9529ce | 1850 | struct xe_exec_queue *q, struct xe_sync_entry *syncs, |
f3e9b1f4 | 1851 | u32 num_syncs, bool first_op, bool last_op) |
dd08ebf6 MB |
1852 | { |
1853 | struct dma_fence *fence; | |
e669f10c | 1854 | struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q); |
dd08ebf6 MB |
1855 | |
1856 | xe_vm_assert_held(vm); | |
21ed3327 | 1857 | xe_bo_assert_held(xe_vma_bo(vma)); |
dd08ebf6 | 1858 | |
9b9529ce | 1859 | fence = xe_vm_unbind_vma(vma, q, syncs, num_syncs, first_op, last_op); |
dd08ebf6 MB |
1860 | if (IS_ERR(fence)) |
1861 | return PTR_ERR(fence); | |
dd08ebf6 MB |
1862 | |
1863 | xe_vma_destroy(vma, fence); | |
e669f10c MB |
1864 | if (last_op) |
1865 | xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence); | |
dd08ebf6 MB |
1866 | dma_fence_put(fence); |
1867 | ||
1868 | return 0; | |
1869 | } | |
1870 | ||
3ac4a789 | 1871 | #define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE | \ |
9329f066 | 1872 | DRM_XE_VM_CREATE_FLAG_LR_MODE | \ |
3ac4a789 | 1873 | DRM_XE_VM_CREATE_FLAG_FAULT_MODE) |
dd08ebf6 MB |
1874 | |
1875 | int xe_vm_create_ioctl(struct drm_device *dev, void *data, | |
1876 | struct drm_file *file) | |
1877 | { | |
1878 | struct xe_device *xe = to_xe_device(dev); | |
1879 | struct xe_file *xef = to_xe_file(file); | |
1880 | struct drm_xe_vm_create *args = data; | |
2ff00c4f | 1881 | struct xe_tile *tile; |
dd08ebf6 MB |
1882 | struct xe_vm *vm; |
1883 | u32 id, asid; | |
1884 | int err; | |
1885 | u32 flags = 0; | |
1886 | ||
7224788f RV |
1887 | if (XE_IOCTL_DBG(xe, args->extensions)) |
1888 | return -EINVAL; | |
1889 | ||
7f6c6e50 | 1890 | if (XE_WA(xe_root_mmio_gt(xe), 14016763929)) |
3ac4a789 | 1891 | args->flags |= DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE; |
7f6c6e50 | 1892 | |
3ac4a789 | 1893 | if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE && |
5a92da34 | 1894 | !xe->info.has_usm)) |
7f6c6e50 OZ |
1895 | return -EINVAL; |
1896 | ||
b8c1ba83 | 1897 | if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) |
1799c761 CS |
1898 | return -EINVAL; |
1899 | ||
b8c1ba83 | 1900 | if (XE_IOCTL_DBG(xe, args->flags & ~ALL_DRM_XE_VM_CREATE_FLAGS)) |
dd08ebf6 MB |
1901 | return -EINVAL; |
1902 | ||
3ac4a789 FD |
1903 | if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE && |
1904 | args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE)) | |
dd08ebf6 MB |
1905 | return -EINVAL; |
1906 | ||
9329f066 | 1907 | if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE) && |
3ac4a789 | 1908 | args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE)) |
dd08ebf6 MB |
1909 | return -EINVAL; |
1910 | ||
3ac4a789 | 1911 | if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE && |
dd08ebf6 MB |
1912 | xe_device_in_non_fault_mode(xe))) |
1913 | return -EINVAL; | |
1914 | ||
3ac4a789 | 1915 | if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE) && |
dd08ebf6 MB |
1916 | xe_device_in_fault_mode(xe))) |
1917 | return -EINVAL; | |
1918 | ||
f3e9b1f4 MB |
1919 | if (XE_IOCTL_DBG(xe, args->extensions)) |
1920 | return -EINVAL; | |
1921 | ||
3ac4a789 | 1922 | if (args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE) |
dd08ebf6 | 1923 | flags |= XE_VM_FLAG_SCRATCH_PAGE; |
9329f066 | 1924 | if (args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE) |
fdb6a053 | 1925 | flags |= XE_VM_FLAG_LR_MODE; |
3ac4a789 | 1926 | if (args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE) |
9329f066 | 1927 | flags |= XE_VM_FLAG_FAULT_MODE; |
dd08ebf6 MB |
1928 | |
1929 | vm = xe_vm_create(xe, flags); | |
1930 | if (IS_ERR(vm)) | |
1931 | return PTR_ERR(vm); | |
1932 | ||
dd08ebf6 MB |
1933 | mutex_lock(&xef->vm.lock); |
1934 | err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL); | |
1935 | mutex_unlock(&xef->vm.lock); | |
f6bf0424 MH |
1936 | if (err) |
1937 | goto err_close_and_put; | |
dd08ebf6 | 1938 | |
5669899e | 1939 | if (xe->info.has_asid) { |
a12d9216 MB |
1940 | mutex_lock(&xe->usm.lock); |
1941 | err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm, | |
d2f51c50 | 1942 | XA_LIMIT(1, XE_MAX_ASID - 1), |
a12d9216 MB |
1943 | &xe->usm.next_asid, GFP_KERNEL); |
1944 | mutex_unlock(&xe->usm.lock); | |
f6bf0424 MH |
1945 | if (err < 0) |
1946 | goto err_free_id; | |
1947 | ||
a12d9216 | 1948 | vm->usm.asid = asid; |
dd08ebf6 | 1949 | } |
dd08ebf6 MB |
1950 | |
1951 | args->vm_id = id; | |
9e4e9761 | 1952 | vm->xef = xef; |
dd08ebf6 | 1953 | |
2ff00c4f TU |
1954 | /* Record BO memory for VM pagetable created against client */ |
1955 | for_each_tile(tile, xe, id) | |
1956 | if (vm->pt_root[id]) | |
1957 | xe_drm_client_add_bo(vm->xef->client, vm->pt_root[id]->bo); | |
1958 | ||
dd08ebf6 MB |
1959 | #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_MEM) |
1960 | /* Warning: Security issue - never enable by default */ | |
58e19acf | 1961 | args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE); |
dd08ebf6 MB |
1962 | #endif |
1963 | ||
1964 | return 0; | |
f6bf0424 MH |
1965 | |
1966 | err_free_id: | |
1967 | mutex_lock(&xef->vm.lock); | |
1968 | xa_erase(&xef->vm.xa, id); | |
1969 | mutex_unlock(&xef->vm.lock); | |
1970 | err_close_and_put: | |
1971 | xe_vm_close_and_put(vm); | |
1972 | ||
1973 | return err; | |
dd08ebf6 MB |
1974 | } |
1975 | ||
1976 | int xe_vm_destroy_ioctl(struct drm_device *dev, void *data, | |
1977 | struct drm_file *file) | |
1978 | { | |
1979 | struct xe_device *xe = to_xe_device(dev); | |
1980 | struct xe_file *xef = to_xe_file(file); | |
1981 | struct drm_xe_vm_destroy *args = data; | |
1982 | struct xe_vm *vm; | |
5835dc7f | 1983 | int err = 0; |
dd08ebf6 | 1984 | |
b8c1ba83 FD |
1985 | if (XE_IOCTL_DBG(xe, args->pad) || |
1986 | XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) | |
dd08ebf6 MB |
1987 | return -EINVAL; |
1988 | ||
dd08ebf6 | 1989 | mutex_lock(&xef->vm.lock); |
5835dc7f | 1990 | vm = xa_load(&xef->vm.xa, args->vm_id); |
b8c1ba83 | 1991 | if (XE_IOCTL_DBG(xe, !vm)) |
5835dc7f | 1992 | err = -ENOENT; |
9b9529ce | 1993 | else if (XE_IOCTL_DBG(xe, vm->preempt.num_exec_queues)) |
5835dc7f TH |
1994 | err = -EBUSY; |
1995 | else | |
1996 | xa_erase(&xef->vm.xa, args->vm_id); | |
dd08ebf6 MB |
1997 | mutex_unlock(&xef->vm.lock); |
1998 | ||
5835dc7f TH |
1999 | if (!err) |
2000 | xe_vm_close_and_put(vm); | |
dd08ebf6 | 2001 | |
5835dc7f | 2002 | return err; |
dd08ebf6 MB |
2003 | } |
2004 | ||
2005 | static const u32 region_to_mem_type[] = { | |
2006 | XE_PL_TT, | |
2007 | XE_PL_VRAM0, | |
2008 | XE_PL_VRAM1, | |
2009 | }; | |
2010 | ||
2011 | static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma, | |
9b9529ce | 2012 | struct xe_exec_queue *q, u32 region, |
dd08ebf6 | 2013 | struct xe_sync_entry *syncs, u32 num_syncs, |
f3e9b1f4 | 2014 | bool first_op, bool last_op) |
dd08ebf6 | 2015 | { |
e669f10c | 2016 | struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q); |
dd08ebf6 MB |
2017 | int err; |
2018 | ||
c73acc1e | 2019 | xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type)); |
dd08ebf6 | 2020 | |
37430402 | 2021 | if (!xe_vma_has_no_bo(vma)) { |
21ed3327 | 2022 | err = xe_bo_migrate(xe_vma_bo(vma), region_to_mem_type[region]); |
dd08ebf6 MB |
2023 | if (err) |
2024 | return err; | |
2025 | } | |
2026 | ||
876611c2 | 2027 | if (vma->tile_mask != (vma->tile_present & ~vma->usm.tile_invalidated)) { |
9b9529ce | 2028 | return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs, |
f3e9b1f4 | 2029 | true, first_op, last_op); |
dd08ebf6 MB |
2030 | } else { |
2031 | int i; | |
2032 | ||
2033 | /* Nothing to do, signal fences now */ | |
b06d47be | 2034 | if (last_op) { |
e669f10c MB |
2035 | for (i = 0; i < num_syncs; i++) { |
2036 | struct dma_fence *fence = | |
2037 | xe_exec_queue_last_fence_get(wait_exec_queue, vm); | |
2038 | ||
2039 | xe_sync_entry_signal(&syncs[i], NULL, fence); | |
a856b67a | 2040 | dma_fence_put(fence); |
e669f10c | 2041 | } |
b06d47be | 2042 | } |
dd08ebf6 | 2043 | |
f3e9b1f4 | 2044 | return 0; |
dd08ebf6 | 2045 | } |
dd08ebf6 MB |
2046 | } |
2047 | ||
b06d47be MB |
2048 | static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma, |
2049 | bool post_commit) | |
dd08ebf6 | 2050 | { |
b06d47be MB |
2051 | down_read(&vm->userptr.notifier_lock); |
2052 | vma->gpuva.flags |= XE_VMA_DESTROYED; | |
2053 | up_read(&vm->userptr.notifier_lock); | |
2054 | if (post_commit) | |
2055 | xe_vm_remove_vma(vm, vma); | |
dd08ebf6 MB |
2056 | } |
2057 | ||
b06d47be MB |
2058 | #undef ULL |
2059 | #define ULL unsigned long long | |
2060 | ||
2061 | #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM) | |
2062 | static void print_op(struct xe_device *xe, struct drm_gpuva_op *op) | |
dd08ebf6 | 2063 | { |
b06d47be | 2064 | struct xe_vma *vma; |
dd08ebf6 | 2065 | |
b06d47be MB |
2066 | switch (op->op) { |
2067 | case DRM_GPUVA_OP_MAP: | |
2068 | vm_dbg(&xe->drm, "MAP: addr=0x%016llx, range=0x%016llx", | |
2069 | (ULL)op->map.va.addr, (ULL)op->map.va.range); | |
2070 | break; | |
2071 | case DRM_GPUVA_OP_REMAP: | |
2072 | vma = gpuva_to_vma(op->remap.unmap->va); | |
2073 | vm_dbg(&xe->drm, "REMAP:UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d", | |
2074 | (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma), | |
5f01a35b | 2075 | op->remap.unmap->keep ? 1 : 0); |
b06d47be MB |
2076 | if (op->remap.prev) |
2077 | vm_dbg(&xe->drm, | |
2078 | "REMAP:PREV: addr=0x%016llx, range=0x%016llx", | |
2079 | (ULL)op->remap.prev->va.addr, | |
2080 | (ULL)op->remap.prev->va.range); | |
2081 | if (op->remap.next) | |
2082 | vm_dbg(&xe->drm, | |
2083 | "REMAP:NEXT: addr=0x%016llx, range=0x%016llx", | |
2084 | (ULL)op->remap.next->va.addr, | |
2085 | (ULL)op->remap.next->va.range); | |
2086 | break; | |
2087 | case DRM_GPUVA_OP_UNMAP: | |
2088 | vma = gpuva_to_vma(op->unmap.va); | |
2089 | vm_dbg(&xe->drm, "UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d", | |
2090 | (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma), | |
2091 | op->unmap.keep ? 1 : 0); | |
2092 | break; | |
b1f8f4b5 BW |
2093 | case DRM_GPUVA_OP_PREFETCH: |
2094 | vma = gpuva_to_vma(op->prefetch.va); | |
2095 | vm_dbg(&xe->drm, "PREFETCH: addr=0x%016llx, range=0x%016llx", | |
2096 | (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma)); | |
2097 | break; | |
b06d47be | 2098 | default: |
5c0553cd | 2099 | drm_warn(&xe->drm, "NOT POSSIBLE"); |
b06d47be MB |
2100 | } |
2101 | } | |
2102 | #else | |
2103 | static void print_op(struct xe_device *xe, struct drm_gpuva_op *op) | |
dd08ebf6 | 2104 | { |
dd08ebf6 | 2105 | } |
b06d47be | 2106 | #endif |
dd08ebf6 | 2107 | |
b06d47be MB |
2108 | /* |
2109 | * Create operations list from IOCTL arguments, setup operations fields so parse | |
2110 | * and commit steps are decoupled from IOCTL arguments. This step can fail. | |
2111 | */ | |
2112 | static struct drm_gpuva_ops * | |
2113 | vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo, | |
2114 | u64 bo_offset_or_userptr, u64 addr, u64 range, | |
cad4a0d6 | 2115 | u32 operation, u32 flags, |
e1fbc4f1 | 2116 | u32 prefetch_region, u16 pat_index) |
dd08ebf6 | 2117 | { |
b06d47be | 2118 | struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL; |
b06d47be MB |
2119 | struct drm_gpuva_ops *ops; |
2120 | struct drm_gpuva_op *__op; | |
b06d47be MB |
2121 | struct drm_gpuvm_bo *vm_bo; |
2122 | int err; | |
dd08ebf6 | 2123 | |
b06d47be | 2124 | lockdep_assert_held_write(&vm->lock); |
dd08ebf6 | 2125 | |
b06d47be MB |
2126 | vm_dbg(&vm->xe->drm, |
2127 | "op=%d, addr=0x%016llx, range=0x%016llx, bo_offset_or_userptr=0x%016llx", | |
78ddc872 | 2128 | operation, (ULL)addr, (ULL)range, |
b06d47be | 2129 | (ULL)bo_offset_or_userptr); |
dd08ebf6 | 2130 | |
78ddc872 | 2131 | switch (operation) { |
d5dc73db FD |
2132 | case DRM_XE_VM_BIND_OP_MAP: |
2133 | case DRM_XE_VM_BIND_OP_MAP_USERPTR: | |
b06d47be MB |
2134 | ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range, |
2135 | obj, bo_offset_or_userptr); | |
b06d47be | 2136 | break; |
d5dc73db | 2137 | case DRM_XE_VM_BIND_OP_UNMAP: |
b06d47be | 2138 | ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range); |
b06d47be | 2139 | break; |
d5dc73db | 2140 | case DRM_XE_VM_BIND_OP_PREFETCH: |
b06d47be | 2141 | ops = drm_gpuvm_prefetch_ops_create(&vm->gpuvm, addr, range); |
b06d47be | 2142 | break; |
d5dc73db | 2143 | case DRM_XE_VM_BIND_OP_UNMAP_ALL: |
c73acc1e | 2144 | xe_assert(vm->xe, bo); |
dd08ebf6 | 2145 | |
08a4f00e | 2146 | err = xe_bo_lock(bo, true); |
b06d47be MB |
2147 | if (err) |
2148 | return ERR_PTR(err); | |
dd08ebf6 | 2149 | |
9d0c1c56 TH |
2150 | vm_bo = drm_gpuvm_bo_obtain(&vm->gpuvm, obj); |
2151 | if (IS_ERR(vm_bo)) { | |
2152 | xe_bo_unlock(bo); | |
2153 | return ERR_CAST(vm_bo); | |
2154 | } | |
b06d47be MB |
2155 | |
2156 | ops = drm_gpuvm_bo_unmap_ops_create(vm_bo); | |
2157 | drm_gpuvm_bo_put(vm_bo); | |
08a4f00e | 2158 | xe_bo_unlock(bo); |
b06d47be MB |
2159 | break; |
2160 | default: | |
5c0553cd | 2161 | drm_warn(&vm->xe->drm, "NOT POSSIBLE"); |
b06d47be MB |
2162 | ops = ERR_PTR(-EINVAL); |
2163 | } | |
40709aa7 MB |
2164 | if (IS_ERR(ops)) |
2165 | return ops; | |
dd08ebf6 | 2166 | |
40709aa7 MB |
2167 | drm_gpuva_for_each_op(__op, ops) { |
2168 | struct xe_vma_op *op = gpuva_op_to_vma_op(__op); | |
2169 | ||
40709aa7 | 2170 | if (__op->op == DRM_GPUVA_OP_MAP) { |
40709aa7 | 2171 | op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL; |
ffb7249d | 2172 | op->map.dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE; |
e1fbc4f1 | 2173 | op->map.pat_index = pat_index; |
40709aa7 MB |
2174 | } else if (__op->op == DRM_GPUVA_OP_PREFETCH) { |
2175 | op->prefetch.region = prefetch_region; | |
2176 | } | |
2177 | ||
2178 | print_op(vm->xe, __op); | |
2179 | } | |
b06d47be MB |
2180 | |
2181 | return ops; | |
dd08ebf6 MB |
2182 | } |
2183 | ||
b06d47be | 2184 | static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op, |
3b97e3b2 | 2185 | u16 pat_index, unsigned int flags) |
dd08ebf6 | 2186 | { |
b06d47be | 2187 | struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL; |
24f947d5 | 2188 | struct drm_exec exec; |
b06d47be | 2189 | struct xe_vma *vma; |
b06d47be | 2190 | int err; |
dd08ebf6 | 2191 | |
b06d47be | 2192 | lockdep_assert_held_write(&vm->lock); |
dd08ebf6 | 2193 | |
b06d47be | 2194 | if (bo) { |
d2197029 | 2195 | drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); |
24f947d5 TH |
2196 | drm_exec_until_all_locked(&exec) { |
2197 | err = 0; | |
2198 | if (!bo->vm) { | |
2199 | err = drm_exec_lock_obj(&exec, xe_vm_obj(vm)); | |
2200 | drm_exec_retry_on_contention(&exec); | |
2201 | } | |
2202 | if (!err) { | |
2203 | err = drm_exec_lock_obj(&exec, &bo->ttm.base); | |
2204 | drm_exec_retry_on_contention(&exec); | |
2205 | } | |
2206 | if (err) { | |
2207 | drm_exec_fini(&exec); | |
2208 | return ERR_PTR(err); | |
2209 | } | |
2210 | } | |
dd08ebf6 | 2211 | } |
b06d47be MB |
2212 | vma = xe_vma_create(vm, bo, op->gem.offset, |
2213 | op->va.addr, op->va.addr + | |
3b97e3b2 | 2214 | op->va.range - 1, pat_index, flags); |
b06d47be | 2215 | if (bo) |
24f947d5 | 2216 | drm_exec_fini(&exec); |
dd08ebf6 | 2217 | |
b06d47be | 2218 | if (xe_vma_is_userptr(vma)) { |
5bd24e78 | 2219 | err = xe_vma_userptr_pin_pages(to_userptr_vma(vma)); |
b06d47be MB |
2220 | if (err) { |
2221 | prep_vma_destroy(vm, vma, false); | |
2222 | xe_vma_destroy_unlocked(vma); | |
2223 | return ERR_PTR(err); | |
dd08ebf6 | 2224 | } |
b06d47be | 2225 | } else if (!xe_vma_has_no_bo(vma) && !bo->vm) { |
b06d47be MB |
2226 | err = add_preempt_fences(vm, bo); |
2227 | if (err) { | |
2228 | prep_vma_destroy(vm, vma, false); | |
2229 | xe_vma_destroy_unlocked(vma); | |
2230 | return ERR_PTR(err); | |
2231 | } | |
2232 | } | |
2233 | ||
2234 | return vma; | |
2235 | } | |
2236 | ||
8f33b4f0 MB |
2237 | static u64 xe_vma_max_pte_size(struct xe_vma *vma) |
2238 | { | |
2239 | if (vma->gpuva.flags & XE_VMA_PTE_1G) | |
2240 | return SZ_1G; | |
0f688c0e | 2241 | else if (vma->gpuva.flags & (XE_VMA_PTE_2M | XE_VMA_PTE_COMPACT)) |
8f33b4f0 | 2242 | return SZ_2M; |
15f0e0c2 MB |
2243 | else if (vma->gpuva.flags & XE_VMA_PTE_64K) |
2244 | return SZ_64K; | |
5ad6af5c MB |
2245 | else if (vma->gpuva.flags & XE_VMA_PTE_4K) |
2246 | return SZ_4K; | |
8f33b4f0 | 2247 | |
5ad6af5c | 2248 | return SZ_1G; /* Uninitialized, used max size */ |
8f33b4f0 MB |
2249 | } |
2250 | ||
19adacce | 2251 | static void xe_vma_set_pte_size(struct xe_vma *vma, u64 size) |
c47794bd MB |
2252 | { |
2253 | switch (size) { | |
2254 | case SZ_1G: | |
2255 | vma->gpuva.flags |= XE_VMA_PTE_1G; | |
2256 | break; | |
2257 | case SZ_2M: | |
2258 | vma->gpuva.flags |= XE_VMA_PTE_2M; | |
2259 | break; | |
15f0e0c2 MB |
2260 | case SZ_64K: |
2261 | vma->gpuva.flags |= XE_VMA_PTE_64K; | |
2262 | break; | |
19adacce MB |
2263 | case SZ_4K: |
2264 | vma->gpuva.flags |= XE_VMA_PTE_4K; | |
2265 | break; | |
c47794bd | 2266 | } |
c47794bd MB |
2267 | } |
2268 | ||
617eebb9 MB |
2269 | static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op) |
2270 | { | |
2271 | int err = 0; | |
2272 | ||
2273 | lockdep_assert_held_write(&vm->lock); | |
2274 | ||
2275 | switch (op->base.op) { | |
2276 | case DRM_GPUVA_OP_MAP: | |
2277 | err |= xe_vm_insert_vma(vm, op->map.vma); | |
2278 | if (!err) | |
2279 | op->flags |= XE_VMA_OP_COMMITTED; | |
2280 | break; | |
2281 | case DRM_GPUVA_OP_REMAP: | |
81d11b9d MB |
2282 | { |
2283 | u8 tile_present = | |
2284 | gpuva_to_vma(op->base.remap.unmap->va)->tile_present; | |
2285 | ||
617eebb9 MB |
2286 | prep_vma_destroy(vm, gpuva_to_vma(op->base.remap.unmap->va), |
2287 | true); | |
2288 | op->flags |= XE_VMA_OP_COMMITTED; | |
2289 | ||
2290 | if (op->remap.prev) { | |
2291 | err |= xe_vm_insert_vma(vm, op->remap.prev); | |
2292 | if (!err) | |
2293 | op->flags |= XE_VMA_OP_PREV_COMMITTED; | |
81d11b9d MB |
2294 | if (!err && op->remap.skip_prev) { |
2295 | op->remap.prev->tile_present = | |
2296 | tile_present; | |
617eebb9 | 2297 | op->remap.prev = NULL; |
81d11b9d | 2298 | } |
617eebb9 MB |
2299 | } |
2300 | if (op->remap.next) { | |
2301 | err |= xe_vm_insert_vma(vm, op->remap.next); | |
2302 | if (!err) | |
2303 | op->flags |= XE_VMA_OP_NEXT_COMMITTED; | |
81d11b9d MB |
2304 | if (!err && op->remap.skip_next) { |
2305 | op->remap.next->tile_present = | |
2306 | tile_present; | |
617eebb9 | 2307 | op->remap.next = NULL; |
81d11b9d | 2308 | } |
617eebb9 MB |
2309 | } |
2310 | ||
2311 | /* Adjust for partial unbind after removin VMA from VM */ | |
2312 | if (!err) { | |
2313 | op->base.remap.unmap->va->va.addr = op->remap.start; | |
2314 | op->base.remap.unmap->va->va.range = op->remap.range; | |
2315 | } | |
2316 | break; | |
81d11b9d | 2317 | } |
617eebb9 MB |
2318 | case DRM_GPUVA_OP_UNMAP: |
2319 | prep_vma_destroy(vm, gpuva_to_vma(op->base.unmap.va), true); | |
2320 | op->flags |= XE_VMA_OP_COMMITTED; | |
2321 | break; | |
2322 | case DRM_GPUVA_OP_PREFETCH: | |
2323 | op->flags |= XE_VMA_OP_COMMITTED; | |
2324 | break; | |
2325 | default: | |
5c0553cd | 2326 | drm_warn(&vm->xe->drm, "NOT POSSIBLE"); |
617eebb9 MB |
2327 | } |
2328 | ||
2329 | return err; | |
2330 | } | |
2331 | ||
2332 | ||
9b9529ce | 2333 | static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q, |
617eebb9 | 2334 | struct drm_gpuva_ops *ops, |
b06d47be | 2335 | struct xe_sync_entry *syncs, u32 num_syncs, |
d3d76739 | 2336 | struct list_head *ops_list, bool last) |
b06d47be | 2337 | { |
de74079f | 2338 | struct xe_device *xe = vm->xe; |
b06d47be | 2339 | struct xe_vma_op *last_op = NULL; |
617eebb9 MB |
2340 | struct drm_gpuva_op *__op; |
2341 | int err = 0; | |
b06d47be MB |
2342 | |
2343 | lockdep_assert_held_write(&vm->lock); | |
b06d47be | 2344 | |
617eebb9 MB |
2345 | drm_gpuva_for_each_op(__op, ops) { |
2346 | struct xe_vma_op *op = gpuva_op_to_vma_op(__op); | |
3b97e3b2 | 2347 | struct xe_vma *vma; |
617eebb9 | 2348 | bool first = list_empty(ops_list); |
3b97e3b2 | 2349 | unsigned int flags = 0; |
dd08ebf6 | 2350 | |
617eebb9 MB |
2351 | INIT_LIST_HEAD(&op->link); |
2352 | list_add_tail(&op->link, ops_list); | |
dd08ebf6 | 2353 | |
617eebb9 MB |
2354 | if (first) { |
2355 | op->flags |= XE_VMA_OP_FIRST; | |
2356 | op->num_syncs = num_syncs; | |
2357 | op->syncs = syncs; | |
2358 | } | |
dd08ebf6 | 2359 | |
617eebb9 MB |
2360 | op->q = q; |
2361 | ||
2362 | switch (op->base.op) { | |
2363 | case DRM_GPUVA_OP_MAP: | |
2364 | { | |
3b97e3b2 MB |
2365 | flags |= op->map.is_null ? |
2366 | VMA_CREATE_FLAG_IS_NULL : 0; | |
ffb7249d ML |
2367 | flags |= op->map.dumpable ? |
2368 | VMA_CREATE_FLAG_DUMPABLE : 0; | |
dd08ebf6 | 2369 | |
3b97e3b2 MB |
2370 | vma = new_vma(vm, &op->base.map, op->map.pat_index, |
2371 | flags); | |
f3e9b1f4 MB |
2372 | if (IS_ERR(vma)) |
2373 | return PTR_ERR(vma); | |
dd08ebf6 | 2374 | |
617eebb9 MB |
2375 | op->map.vma = vma; |
2376 | break; | |
2377 | } | |
2378 | case DRM_GPUVA_OP_REMAP: | |
2379 | { | |
2380 | struct xe_vma *old = | |
2381 | gpuva_to_vma(op->base.remap.unmap->va); | |
dd08ebf6 | 2382 | |
617eebb9 MB |
2383 | op->remap.start = xe_vma_start(old); |
2384 | op->remap.range = xe_vma_size(old); | |
dd08ebf6 | 2385 | |
617eebb9 | 2386 | if (op->base.remap.prev) { |
3b97e3b2 MB |
2387 | flags |= op->base.remap.unmap->va->flags & |
2388 | XE_VMA_READ_ONLY ? | |
2389 | VMA_CREATE_FLAG_READ_ONLY : 0; | |
2390 | flags |= op->base.remap.unmap->va->flags & | |
2391 | DRM_GPUVA_SPARSE ? | |
2392 | VMA_CREATE_FLAG_IS_NULL : 0; | |
ffb7249d ML |
2393 | flags |= op->base.remap.unmap->va->flags & |
2394 | XE_VMA_DUMPABLE ? | |
2395 | VMA_CREATE_FLAG_DUMPABLE : 0; | |
3b97e3b2 MB |
2396 | |
2397 | vma = new_vma(vm, op->base.remap.prev, | |
2398 | old->pat_index, flags); | |
f3e9b1f4 MB |
2399 | if (IS_ERR(vma)) |
2400 | return PTR_ERR(vma); | |
dd08ebf6 | 2401 | |
617eebb9 MB |
2402 | op->remap.prev = vma; |
2403 | ||
2404 | /* | |
2405 | * Userptr creates a new SG mapping so | |
2406 | * we must also rebind. | |
2407 | */ | |
2408 | op->remap.skip_prev = !xe_vma_is_userptr(old) && | |
2409 | IS_ALIGNED(xe_vma_end(vma), | |
2410 | xe_vma_max_pte_size(old)); | |
2411 | if (op->remap.skip_prev) { | |
2412 | xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old)); | |
2413 | op->remap.range -= | |
2414 | xe_vma_end(vma) - | |
2415 | xe_vma_start(old); | |
2416 | op->remap.start = xe_vma_end(vma); | |
de74079f MB |
2417 | vm_dbg(&xe->drm, "REMAP:SKIP_PREV: addr=0x%016llx, range=0x%016llx", |
2418 | (ULL)op->remap.start, | |
2419 | (ULL)op->remap.range); | |
617eebb9 | 2420 | } |
b06d47be | 2421 | } |
617eebb9 MB |
2422 | |
2423 | if (op->base.remap.next) { | |
3b97e3b2 MB |
2424 | flags |= op->base.remap.unmap->va->flags & |
2425 | XE_VMA_READ_ONLY ? | |
2426 | VMA_CREATE_FLAG_READ_ONLY : 0; | |
2427 | flags |= op->base.remap.unmap->va->flags & | |
2428 | DRM_GPUVA_SPARSE ? | |
2429 | VMA_CREATE_FLAG_IS_NULL : 0; | |
ffb7249d ML |
2430 | flags |= op->base.remap.unmap->va->flags & |
2431 | XE_VMA_DUMPABLE ? | |
2432 | VMA_CREATE_FLAG_DUMPABLE : 0; | |
3b97e3b2 MB |
2433 | |
2434 | vma = new_vma(vm, op->base.remap.next, | |
2435 | old->pat_index, flags); | |
f3e9b1f4 MB |
2436 | if (IS_ERR(vma)) |
2437 | return PTR_ERR(vma); | |
dd08ebf6 | 2438 | |
617eebb9 MB |
2439 | op->remap.next = vma; |
2440 | ||
2441 | /* | |
2442 | * Userptr creates a new SG mapping so | |
2443 | * we must also rebind. | |
2444 | */ | |
2445 | op->remap.skip_next = !xe_vma_is_userptr(old) && | |
2446 | IS_ALIGNED(xe_vma_start(vma), | |
2447 | xe_vma_max_pte_size(old)); | |
2448 | if (op->remap.skip_next) { | |
2449 | xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old)); | |
2450 | op->remap.range -= | |
2451 | xe_vma_end(old) - | |
2452 | xe_vma_start(vma); | |
de74079f MB |
2453 | vm_dbg(&xe->drm, "REMAP:SKIP_NEXT: addr=0x%016llx, range=0x%016llx", |
2454 | (ULL)op->remap.start, | |
2455 | (ULL)op->remap.range); | |
8f33b4f0 | 2456 | } |
b06d47be | 2457 | } |
617eebb9 MB |
2458 | break; |
2459 | } | |
2460 | case DRM_GPUVA_OP_UNMAP: | |
2461 | case DRM_GPUVA_OP_PREFETCH: | |
2462 | /* Nothing to do */ | |
2463 | break; | |
2464 | default: | |
5c0553cd | 2465 | drm_warn(&vm->xe->drm, "NOT POSSIBLE"); |
dd08ebf6 | 2466 | } |
dd08ebf6 | 2467 | |
617eebb9 MB |
2468 | last_op = op; |
2469 | ||
2470 | err = xe_vma_op_commit(vm, op); | |
2471 | if (err) | |
f3e9b1f4 | 2472 | return err; |
dd08ebf6 | 2473 | } |
dd08ebf6 | 2474 | |
617eebb9 MB |
2475 | /* FIXME: Unhandled corner case */ |
2476 | XE_WARN_ON(!last_op && last && !list_empty(ops_list)); | |
dd08ebf6 | 2477 | |
617eebb9 | 2478 | if (!last_op) |
f3e9b1f4 MB |
2479 | return 0; |
2480 | ||
617eebb9 MB |
2481 | last_op->ops = ops; |
2482 | if (last) { | |
2483 | last_op->flags |= XE_VMA_OP_LAST; | |
2484 | last_op->num_syncs = num_syncs; | |
2485 | last_op->syncs = syncs; | |
617eebb9 | 2486 | } |
dd08ebf6 | 2487 | |
dd08ebf6 | 2488 | return 0; |
dd08ebf6 MB |
2489 | } |
2490 | ||
1f727182 TH |
2491 | static int op_execute(struct drm_exec *exec, struct xe_vm *vm, |
2492 | struct xe_vma *vma, struct xe_vma_op *op) | |
dd08ebf6 MB |
2493 | { |
2494 | int err; | |
2495 | ||
b06d47be | 2496 | lockdep_assert_held_write(&vm->lock); |
dd08ebf6 | 2497 | |
1f727182 TH |
2498 | err = xe_vm_prepare_vma(exec, vma, 1); |
2499 | if (err) | |
b06d47be | 2500 | return err; |
dd08ebf6 | 2501 | |
b06d47be MB |
2502 | xe_vm_assert_held(vm); |
2503 | xe_bo_assert_held(xe_vma_bo(vma)); | |
dd08ebf6 | 2504 | |
b06d47be MB |
2505 | switch (op->base.op) { |
2506 | case DRM_GPUVA_OP_MAP: | |
9b9529ce | 2507 | err = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma), |
f3e9b1f4 | 2508 | op->syncs, op->num_syncs, |
84a1ed5e | 2509 | !xe_vm_in_fault_mode(vm), |
b06d47be MB |
2510 | op->flags & XE_VMA_OP_FIRST, |
2511 | op->flags & XE_VMA_OP_LAST); | |
2512 | break; | |
2513 | case DRM_GPUVA_OP_REMAP: | |
2514 | { | |
2515 | bool prev = !!op->remap.prev; | |
2516 | bool next = !!op->remap.next; | |
2517 | ||
2518 | if (!op->remap.unmap_done) { | |
f3e9b1f4 | 2519 | if (prev || next) |
b06d47be | 2520 | vma->gpuva.flags |= XE_VMA_FIRST_REBIND; |
9b9529ce | 2521 | err = xe_vm_unbind(vm, vma, op->q, op->syncs, |
b06d47be | 2522 | op->num_syncs, |
b06d47be | 2523 | op->flags & XE_VMA_OP_FIRST, |
f3e9b1f4 MB |
2524 | op->flags & XE_VMA_OP_LAST && |
2525 | !prev && !next); | |
dd08ebf6 | 2526 | if (err) |
b06d47be MB |
2527 | break; |
2528 | op->remap.unmap_done = true; | |
dd08ebf6 | 2529 | } |
dd08ebf6 | 2530 | |
b06d47be MB |
2531 | if (prev) { |
2532 | op->remap.prev->gpuva.flags |= XE_VMA_LAST_REBIND; | |
9b9529ce | 2533 | err = xe_vm_bind(vm, op->remap.prev, op->q, |
b06d47be | 2534 | xe_vma_bo(op->remap.prev), op->syncs, |
f3e9b1f4 | 2535 | op->num_syncs, true, false, |
b06d47be MB |
2536 | op->flags & XE_VMA_OP_LAST && !next); |
2537 | op->remap.prev->gpuva.flags &= ~XE_VMA_LAST_REBIND; | |
dd08ebf6 | 2538 | if (err) |
b06d47be MB |
2539 | break; |
2540 | op->remap.prev = NULL; | |
dd08ebf6 | 2541 | } |
dd08ebf6 | 2542 | |
b06d47be MB |
2543 | if (next) { |
2544 | op->remap.next->gpuva.flags |= XE_VMA_LAST_REBIND; | |
9b9529ce | 2545 | err = xe_vm_bind(vm, op->remap.next, op->q, |
b06d47be MB |
2546 | xe_vma_bo(op->remap.next), |
2547 | op->syncs, op->num_syncs, | |
f3e9b1f4 | 2548 | true, false, |
b06d47be MB |
2549 | op->flags & XE_VMA_OP_LAST); |
2550 | op->remap.next->gpuva.flags &= ~XE_VMA_LAST_REBIND; | |
2551 | if (err) | |
2552 | break; | |
2553 | op->remap.next = NULL; | |
dd08ebf6 | 2554 | } |
b06d47be MB |
2555 | |
2556 | break; | |
dd08ebf6 | 2557 | } |
b06d47be | 2558 | case DRM_GPUVA_OP_UNMAP: |
9b9529ce | 2559 | err = xe_vm_unbind(vm, vma, op->q, op->syncs, |
f3e9b1f4 | 2560 | op->num_syncs, op->flags & XE_VMA_OP_FIRST, |
b06d47be MB |
2561 | op->flags & XE_VMA_OP_LAST); |
2562 | break; | |
2563 | case DRM_GPUVA_OP_PREFETCH: | |
9b9529ce | 2564 | err = xe_vm_prefetch(vm, vma, op->q, op->prefetch.region, |
f3e9b1f4 | 2565 | op->syncs, op->num_syncs, |
b06d47be MB |
2566 | op->flags & XE_VMA_OP_FIRST, |
2567 | op->flags & XE_VMA_OP_LAST); | |
2568 | break; | |
2569 | default: | |
5c0553cd | 2570 | drm_warn(&vm->xe->drm, "NOT POSSIBLE"); |
dd08ebf6 MB |
2571 | } |
2572 | ||
1f727182 TH |
2573 | if (err) |
2574 | trace_xe_vma_fail(vma); | |
2575 | ||
2576 | return err; | |
2577 | } | |
2578 | ||
2579 | static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma, | |
2580 | struct xe_vma_op *op) | |
2581 | { | |
2582 | struct drm_exec exec; | |
2583 | int err; | |
2584 | ||
2585 | retry_userptr: | |
d2197029 | 2586 | drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); |
1f727182 TH |
2587 | drm_exec_until_all_locked(&exec) { |
2588 | err = op_execute(&exec, vm, vma, op); | |
2589 | drm_exec_retry_on_contention(&exec); | |
2590 | if (err) | |
2591 | break; | |
2592 | } | |
2593 | drm_exec_fini(&exec); | |
2594 | ||
447f74d2 | 2595 | if (err == -EAGAIN) { |
b06d47be | 2596 | lockdep_assert_held_write(&vm->lock); |
dd08ebf6 | 2597 | |
447f74d2 MB |
2598 | if (op->base.op == DRM_GPUVA_OP_REMAP) { |
2599 | if (!op->remap.unmap_done) | |
2600 | vma = gpuva_to_vma(op->base.remap.unmap->va); | |
2601 | else if (op->remap.prev) | |
2602 | vma = op->remap.prev; | |
2603 | else | |
2604 | vma = op->remap.next; | |
2605 | } | |
2606 | ||
2607 | if (xe_vma_is_userptr(vma)) { | |
2608 | err = xe_vma_userptr_pin_pages(to_userptr_vma(vma)); | |
2609 | if (!err) | |
2610 | goto retry_userptr; | |
2611 | ||
2612 | trace_xe_vma_fail(vma); | |
2613 | } | |
1f727182 | 2614 | } |
b06d47be MB |
2615 | |
2616 | return err; | |
dd08ebf6 MB |
2617 | } |
2618 | ||
b06d47be | 2619 | static int xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op) |
dd08ebf6 | 2620 | { |
b06d47be | 2621 | int ret = 0; |
dd08ebf6 | 2622 | |
b06d47be | 2623 | lockdep_assert_held_write(&vm->lock); |
dd08ebf6 | 2624 | |
b06d47be MB |
2625 | switch (op->base.op) { |
2626 | case DRM_GPUVA_OP_MAP: | |
2627 | ret = __xe_vma_op_execute(vm, op->map.vma, op); | |
2628 | break; | |
2629 | case DRM_GPUVA_OP_REMAP: | |
2630 | { | |
2631 | struct xe_vma *vma; | |
2632 | ||
2633 | if (!op->remap.unmap_done) | |
2634 | vma = gpuva_to_vma(op->base.remap.unmap->va); | |
2635 | else if (op->remap.prev) | |
2636 | vma = op->remap.prev; | |
2637 | else | |
2638 | vma = op->remap.next; | |
2639 | ||
2640 | ret = __xe_vma_op_execute(vm, vma, op); | |
2641 | break; | |
2642 | } | |
2643 | case DRM_GPUVA_OP_UNMAP: | |
2644 | ret = __xe_vma_op_execute(vm, gpuva_to_vma(op->base.unmap.va), | |
2645 | op); | |
2646 | break; | |
2647 | case DRM_GPUVA_OP_PREFETCH: | |
2648 | ret = __xe_vma_op_execute(vm, | |
2649 | gpuva_to_vma(op->base.prefetch.va), | |
2650 | op); | |
2651 | break; | |
2652 | default: | |
5c0553cd | 2653 | drm_warn(&vm->xe->drm, "NOT POSSIBLE"); |
dd08ebf6 MB |
2654 | } |
2655 | ||
b06d47be MB |
2656 | return ret; |
2657 | } | |
dd08ebf6 | 2658 | |
b06d47be MB |
2659 | static void xe_vma_op_cleanup(struct xe_vm *vm, struct xe_vma_op *op) |
2660 | { | |
2661 | bool last = op->flags & XE_VMA_OP_LAST; | |
dd08ebf6 | 2662 | |
b06d47be MB |
2663 | if (last) { |
2664 | while (op->num_syncs--) | |
2665 | xe_sync_entry_cleanup(&op->syncs[op->num_syncs]); | |
2666 | kfree(op->syncs); | |
9b9529ce FD |
2667 | if (op->q) |
2668 | xe_exec_queue_put(op->q); | |
b06d47be | 2669 | } |
f3e9b1f4 | 2670 | if (!list_empty(&op->link)) |
b06d47be | 2671 | list_del(&op->link); |
b06d47be MB |
2672 | if (op->ops) |
2673 | drm_gpuva_ops_free(&vm->gpuvm, op->ops); | |
2674 | if (last) | |
2675 | xe_vm_put(vm); | |
dd08ebf6 MB |
2676 | } |
2677 | ||
b06d47be | 2678 | static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op, |
5ef091fc MB |
2679 | bool post_commit, bool prev_post_commit, |
2680 | bool next_post_commit) | |
dd08ebf6 | 2681 | { |
b06d47be | 2682 | lockdep_assert_held_write(&vm->lock); |
dd08ebf6 | 2683 | |
b06d47be MB |
2684 | switch (op->base.op) { |
2685 | case DRM_GPUVA_OP_MAP: | |
2686 | if (op->map.vma) { | |
2687 | prep_vma_destroy(vm, op->map.vma, post_commit); | |
2688 | xe_vma_destroy_unlocked(op->map.vma); | |
2689 | } | |
2690 | break; | |
2691 | case DRM_GPUVA_OP_UNMAP: | |
2692 | { | |
2693 | struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va); | |
dd08ebf6 | 2694 | |
617eebb9 MB |
2695 | if (vma) { |
2696 | down_read(&vm->userptr.notifier_lock); | |
2697 | vma->gpuva.flags &= ~XE_VMA_DESTROYED; | |
2698 | up_read(&vm->userptr.notifier_lock); | |
2699 | if (post_commit) | |
2700 | xe_vm_insert_vma(vm, vma); | |
2701 | } | |
b06d47be MB |
2702 | break; |
2703 | } | |
2704 | case DRM_GPUVA_OP_REMAP: | |
2705 | { | |
2706 | struct xe_vma *vma = gpuva_to_vma(op->base.remap.unmap->va); | |
dd08ebf6 | 2707 | |
b06d47be | 2708 | if (op->remap.prev) { |
5ef091fc | 2709 | prep_vma_destroy(vm, op->remap.prev, prev_post_commit); |
b06d47be MB |
2710 | xe_vma_destroy_unlocked(op->remap.prev); |
2711 | } | |
2712 | if (op->remap.next) { | |
5ef091fc | 2713 | prep_vma_destroy(vm, op->remap.next, next_post_commit); |
b06d47be MB |
2714 | xe_vma_destroy_unlocked(op->remap.next); |
2715 | } | |
617eebb9 MB |
2716 | if (vma) { |
2717 | down_read(&vm->userptr.notifier_lock); | |
2718 | vma->gpuva.flags &= ~XE_VMA_DESTROYED; | |
2719 | up_read(&vm->userptr.notifier_lock); | |
2720 | if (post_commit) | |
2721 | xe_vm_insert_vma(vm, vma); | |
2722 | } | |
b06d47be MB |
2723 | break; |
2724 | } | |
2725 | case DRM_GPUVA_OP_PREFETCH: | |
2726 | /* Nothing to do */ | |
2727 | break; | |
2728 | default: | |
5c0553cd | 2729 | drm_warn(&vm->xe->drm, "NOT POSSIBLE"); |
dd08ebf6 | 2730 | } |
b06d47be | 2731 | } |
dd08ebf6 | 2732 | |
b06d47be MB |
2733 | static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm, |
2734 | struct drm_gpuva_ops **ops, | |
2735 | int num_ops_list) | |
2736 | { | |
2737 | int i; | |
2738 | ||
3acc1ff1 | 2739 | for (i = num_ops_list - 1; i >= 0; --i) { |
b06d47be MB |
2740 | struct drm_gpuva_ops *__ops = ops[i]; |
2741 | struct drm_gpuva_op *__op; | |
2742 | ||
2743 | if (!__ops) | |
2744 | continue; | |
2745 | ||
617eebb9 | 2746 | drm_gpuva_for_each_op_reverse(__op, __ops) { |
b06d47be MB |
2747 | struct xe_vma_op *op = gpuva_op_to_vma_op(__op); |
2748 | ||
617eebb9 MB |
2749 | xe_vma_op_unwind(vm, op, |
2750 | op->flags & XE_VMA_OP_COMMITTED, | |
2751 | op->flags & XE_VMA_OP_PREV_COMMITTED, | |
2752 | op->flags & XE_VMA_OP_NEXT_COMMITTED); | |
b06d47be | 2753 | } |
617eebb9 MB |
2754 | |
2755 | drm_gpuva_ops_free(&vm->gpuvm, __ops); | |
b06d47be | 2756 | } |
dd08ebf6 MB |
2757 | } |
2758 | ||
f3e9b1f4 MB |
2759 | static int vm_bind_ioctl_ops_execute(struct xe_vm *vm, |
2760 | struct list_head *ops_list) | |
2761 | { | |
2762 | struct xe_vma_op *op, *next; | |
2763 | int err; | |
2764 | ||
2765 | lockdep_assert_held_write(&vm->lock); | |
2766 | ||
2767 | list_for_each_entry_safe(op, next, ops_list, link) { | |
2768 | err = xe_vma_op_execute(vm, op); | |
2769 | if (err) { | |
2770 | drm_warn(&vm->xe->drm, "VM op(%d) failed with %d", | |
2771 | op->base.op, err); | |
2772 | /* | |
2773 | * FIXME: Killing VM rather than proper error handling | |
2774 | */ | |
2775 | xe_vm_kill(vm); | |
2776 | return -ENOSPC; | |
2777 | } | |
2778 | xe_vma_op_cleanup(vm, op); | |
2779 | } | |
2780 | ||
2781 | return 0; | |
2782 | } | |
2783 | ||
84a1ed5e | 2784 | #define SUPPORTED_FLAGS (DRM_XE_VM_BIND_FLAG_NULL | \ |
76a86b58 | 2785 | DRM_XE_VM_BIND_FLAG_DUMPABLE) |
dd08ebf6 | 2786 | #define XE_64K_PAGE_MASK 0xffffull |
d3d76739 | 2787 | #define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP) |
dd08ebf6 | 2788 | |
dd08ebf6 MB |
2789 | static int vm_bind_ioctl_check_args(struct xe_device *xe, |
2790 | struct drm_xe_vm_bind *args, | |
d3d76739 | 2791 | struct drm_xe_vm_bind_op **bind_ops) |
dd08ebf6 MB |
2792 | { |
2793 | int err; | |
2794 | int i; | |
2795 | ||
7a56bd0c RV |
2796 | if (XE_IOCTL_DBG(xe, args->pad || args->pad2) || |
2797 | XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) | |
2798 | return -EINVAL; | |
2799 | ||
ba6bbdc6 | 2800 | if (XE_IOCTL_DBG(xe, args->extensions)) |
dd08ebf6 MB |
2801 | return -EINVAL; |
2802 | ||
2803 | if (args->num_binds > 1) { | |
2804 | u64 __user *bind_user = | |
2805 | u64_to_user_ptr(args->vector_of_binds); | |
2806 | ||
35ed1d2b MB |
2807 | *bind_ops = kvmalloc_array(args->num_binds, |
2808 | sizeof(struct drm_xe_vm_bind_op), | |
2809 | GFP_KERNEL | __GFP_ACCOUNT); | |
dd08ebf6 MB |
2810 | if (!*bind_ops) |
2811 | return -ENOMEM; | |
2812 | ||
2813 | err = __copy_from_user(*bind_ops, bind_user, | |
2814 | sizeof(struct drm_xe_vm_bind_op) * | |
2815 | args->num_binds); | |
b8c1ba83 | 2816 | if (XE_IOCTL_DBG(xe, err)) { |
dd08ebf6 MB |
2817 | err = -EFAULT; |
2818 | goto free_bind_ops; | |
2819 | } | |
2820 | } else { | |
2821 | *bind_ops = &args->bind; | |
2822 | } | |
2823 | ||
2824 | for (i = 0; i < args->num_binds; ++i) { | |
2825 | u64 range = (*bind_ops)[i].range; | |
2826 | u64 addr = (*bind_ops)[i].addr; | |
2827 | u32 op = (*bind_ops)[i].op; | |
ea0640fc | 2828 | u32 flags = (*bind_ops)[i].flags; |
dd08ebf6 MB |
2829 | u32 obj = (*bind_ops)[i].obj; |
2830 | u64 obj_offset = (*bind_ops)[i].obj_offset; | |
aaa115ff | 2831 | u32 prefetch_region = (*bind_ops)[i].prefetch_mem_region_instance; |
d5dc73db | 2832 | bool is_null = flags & DRM_XE_VM_BIND_FLAG_NULL; |
e1fbc4f1 MA |
2833 | u16 pat_index = (*bind_ops)[i].pat_index; |
2834 | u16 coh_mode; | |
2835 | ||
2836 | if (XE_IOCTL_DBG(xe, pat_index >= xe->pat.n_entries)) { | |
2837 | err = -EINVAL; | |
2838 | goto free_bind_ops; | |
2839 | } | |
2840 | ||
2841 | pat_index = array_index_nospec(pat_index, xe->pat.n_entries); | |
2842 | (*bind_ops)[i].pat_index = pat_index; | |
2843 | coh_mode = xe_pat_index_get_coh_mode(xe, pat_index); | |
2844 | if (XE_IOCTL_DBG(xe, !coh_mode)) { /* hw reserved */ | |
2845 | err = -EINVAL; | |
2846 | goto free_bind_ops; | |
2847 | } | |
2848 | ||
2849 | if (XE_WARN_ON(coh_mode > XE_COH_AT_LEAST_1WAY)) { | |
2850 | err = -EINVAL; | |
2851 | goto free_bind_ops; | |
2852 | } | |
1799c761 | 2853 | |
d5dc73db | 2854 | if (XE_IOCTL_DBG(xe, op > DRM_XE_VM_BIND_OP_PREFETCH) || |
ea0640fc | 2855 | XE_IOCTL_DBG(xe, flags & ~SUPPORTED_FLAGS) || |
b8c1ba83 FD |
2856 | XE_IOCTL_DBG(xe, obj && is_null) || |
2857 | XE_IOCTL_DBG(xe, obj_offset && is_null) || | |
d5dc73db | 2858 | XE_IOCTL_DBG(xe, op != DRM_XE_VM_BIND_OP_MAP && |
37430402 | 2859 | is_null) || |
b8c1ba83 | 2860 | XE_IOCTL_DBG(xe, !obj && |
d5dc73db | 2861 | op == DRM_XE_VM_BIND_OP_MAP && |
37430402 | 2862 | !is_null) || |
b8c1ba83 | 2863 | XE_IOCTL_DBG(xe, !obj && |
d5dc73db | 2864 | op == DRM_XE_VM_BIND_OP_UNMAP_ALL) || |
b8c1ba83 | 2865 | XE_IOCTL_DBG(xe, addr && |
d5dc73db | 2866 | op == DRM_XE_VM_BIND_OP_UNMAP_ALL) || |
b8c1ba83 | 2867 | XE_IOCTL_DBG(xe, range && |
d5dc73db | 2868 | op == DRM_XE_VM_BIND_OP_UNMAP_ALL) || |
b8c1ba83 | 2869 | XE_IOCTL_DBG(xe, obj && |
d5dc73db | 2870 | op == DRM_XE_VM_BIND_OP_MAP_USERPTR) || |
e1fbc4f1 MA |
2871 | XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE && |
2872 | op == DRM_XE_VM_BIND_OP_MAP_USERPTR) || | |
b8c1ba83 | 2873 | XE_IOCTL_DBG(xe, obj && |
d5dc73db | 2874 | op == DRM_XE_VM_BIND_OP_PREFETCH) || |
aaa115ff | 2875 | XE_IOCTL_DBG(xe, prefetch_region && |
d5dc73db | 2876 | op != DRM_XE_VM_BIND_OP_PREFETCH) || |
aaa115ff | 2877 | XE_IOCTL_DBG(xe, !(BIT(prefetch_region) & |
dd08ebf6 | 2878 | xe->info.mem_region_mask)) || |
b8c1ba83 | 2879 | XE_IOCTL_DBG(xe, obj && |
d5dc73db | 2880 | op == DRM_XE_VM_BIND_OP_UNMAP)) { |
dd08ebf6 MB |
2881 | err = -EINVAL; |
2882 | goto free_bind_ops; | |
2883 | } | |
2884 | ||
b8c1ba83 FD |
2885 | if (XE_IOCTL_DBG(xe, obj_offset & ~PAGE_MASK) || |
2886 | XE_IOCTL_DBG(xe, addr & ~PAGE_MASK) || | |
2887 | XE_IOCTL_DBG(xe, range & ~PAGE_MASK) || | |
f3e9b1f4 | 2888 | XE_IOCTL_DBG(xe, !range && |
d5dc73db | 2889 | op != DRM_XE_VM_BIND_OP_UNMAP_ALL)) { |
dd08ebf6 MB |
2890 | err = -EINVAL; |
2891 | goto free_bind_ops; | |
2892 | } | |
2893 | } | |
2894 | ||
2895 | return 0; | |
2896 | ||
2897 | free_bind_ops: | |
2898 | if (args->num_binds > 1) | |
35ed1d2b | 2899 | kvfree(*bind_ops); |
dd08ebf6 MB |
2900 | return err; |
2901 | } | |
2902 | ||
eb9702ad MB |
2903 | static int vm_bind_ioctl_signal_fences(struct xe_vm *vm, |
2904 | struct xe_exec_queue *q, | |
2905 | struct xe_sync_entry *syncs, | |
2906 | int num_syncs) | |
2907 | { | |
2908 | struct dma_fence *fence; | |
2909 | int i, err = 0; | |
2910 | ||
2911 | fence = xe_sync_in_fence_get(syncs, num_syncs, | |
2912 | to_wait_exec_queue(vm, q), vm); | |
2913 | if (IS_ERR(fence)) | |
2914 | return PTR_ERR(fence); | |
2915 | ||
2916 | for (i = 0; i < num_syncs; i++) | |
2917 | xe_sync_entry_signal(&syncs[i], NULL, fence); | |
2918 | ||
2919 | xe_exec_queue_last_fence_set(to_wait_exec_queue(vm, q), vm, | |
2920 | fence); | |
eb9702ad MB |
2921 | dma_fence_put(fence); |
2922 | ||
2923 | return err; | |
2924 | } | |
2925 | ||
dd08ebf6 MB |
2926 | int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file) |
2927 | { | |
2928 | struct xe_device *xe = to_xe_device(dev); | |
2929 | struct xe_file *xef = to_xe_file(file); | |
2930 | struct drm_xe_vm_bind *args = data; | |
2931 | struct drm_xe_sync __user *syncs_user; | |
2932 | struct xe_bo **bos = NULL; | |
b06d47be | 2933 | struct drm_gpuva_ops **ops = NULL; |
dd08ebf6 | 2934 | struct xe_vm *vm; |
9b9529ce | 2935 | struct xe_exec_queue *q = NULL; |
d1df9bfb | 2936 | u32 num_syncs, num_ufence = 0; |
dd08ebf6 MB |
2937 | struct xe_sync_entry *syncs = NULL; |
2938 | struct drm_xe_vm_bind_op *bind_ops; | |
b06d47be | 2939 | LIST_HEAD(ops_list); |
dd08ebf6 | 2940 | int err; |
b06d47be | 2941 | int i; |
dd08ebf6 | 2942 | |
d3d76739 | 2943 | err = vm_bind_ioctl_check_args(xe, args, &bind_ops); |
dd08ebf6 MB |
2944 | if (err) |
2945 | return err; | |
2946 | ||
9b9529ce FD |
2947 | if (args->exec_queue_id) { |
2948 | q = xe_exec_queue_lookup(xef, args->exec_queue_id); | |
2949 | if (XE_IOCTL_DBG(xe, !q)) { | |
dd08ebf6 | 2950 | err = -ENOENT; |
9d858b69 | 2951 | goto free_objs; |
dd08ebf6 | 2952 | } |
9d858b69 | 2953 | |
9b9529ce | 2954 | if (XE_IOCTL_DBG(xe, !(q->flags & EXEC_QUEUE_FLAG_VM))) { |
dd08ebf6 | 2955 | err = -EINVAL; |
9b9529ce | 2956 | goto put_exec_queue; |
dd08ebf6 MB |
2957 | } |
2958 | } | |
2959 | ||
9d858b69 | 2960 | vm = xe_vm_lookup(xef, args->vm_id); |
b8c1ba83 | 2961 | if (XE_IOCTL_DBG(xe, !vm)) { |
9d858b69 | 2962 | err = -EINVAL; |
9b9529ce | 2963 | goto put_exec_queue; |
9d858b69 MB |
2964 | } |
2965 | ||
2966 | err = down_write_killable(&vm->lock); | |
2967 | if (err) | |
2968 | goto put_vm; | |
2969 | ||
b8c1ba83 | 2970 | if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) { |
9d858b69 MB |
2971 | err = -ENOENT; |
2972 | goto release_vm_lock; | |
2973 | } | |
2974 | ||
dd08ebf6 MB |
2975 | for (i = 0; i < args->num_binds; ++i) { |
2976 | u64 range = bind_ops[i].range; | |
2977 | u64 addr = bind_ops[i].addr; | |
2978 | ||
b8c1ba83 FD |
2979 | if (XE_IOCTL_DBG(xe, range > vm->size) || |
2980 | XE_IOCTL_DBG(xe, addr > vm->size - range)) { | |
dd08ebf6 | 2981 | err = -EINVAL; |
9d858b69 | 2982 | goto release_vm_lock; |
dd08ebf6 | 2983 | } |
dd08ebf6 MB |
2984 | } |
2985 | ||
eb9702ad | 2986 | if (args->num_binds) { |
35ed1d2b MB |
2987 | bos = kvcalloc(args->num_binds, sizeof(*bos), |
2988 | GFP_KERNEL | __GFP_ACCOUNT); | |
eb9702ad MB |
2989 | if (!bos) { |
2990 | err = -ENOMEM; | |
2991 | goto release_vm_lock; | |
2992 | } | |
dd08ebf6 | 2993 | |
35ed1d2b MB |
2994 | ops = kvcalloc(args->num_binds, sizeof(*ops), |
2995 | GFP_KERNEL | __GFP_ACCOUNT); | |
eb9702ad MB |
2996 | if (!ops) { |
2997 | err = -ENOMEM; | |
2998 | goto release_vm_lock; | |
2999 | } | |
dd08ebf6 MB |
3000 | } |
3001 | ||
3002 | for (i = 0; i < args->num_binds; ++i) { | |
3003 | struct drm_gem_object *gem_obj; | |
3004 | u64 range = bind_ops[i].range; | |
3005 | u64 addr = bind_ops[i].addr; | |
3006 | u32 obj = bind_ops[i].obj; | |
3007 | u64 obj_offset = bind_ops[i].obj_offset; | |
e1fbc4f1 MA |
3008 | u16 pat_index = bind_ops[i].pat_index; |
3009 | u16 coh_mode; | |
dd08ebf6 MB |
3010 | |
3011 | if (!obj) | |
3012 | continue; | |
3013 | ||
3014 | gem_obj = drm_gem_object_lookup(file, obj); | |
b8c1ba83 | 3015 | if (XE_IOCTL_DBG(xe, !gem_obj)) { |
dd08ebf6 MB |
3016 | err = -ENOENT; |
3017 | goto put_obj; | |
3018 | } | |
3019 | bos[i] = gem_to_xe_bo(gem_obj); | |
3020 | ||
b8c1ba83 FD |
3021 | if (XE_IOCTL_DBG(xe, range > bos[i]->size) || |
3022 | XE_IOCTL_DBG(xe, obj_offset > | |
dd08ebf6 MB |
3023 | bos[i]->size - range)) { |
3024 | err = -EINVAL; | |
3025 | goto put_obj; | |
3026 | } | |
3027 | ||
3028 | if (bos[i]->flags & XE_BO_INTERNAL_64K) { | |
b8c1ba83 | 3029 | if (XE_IOCTL_DBG(xe, obj_offset & |
dd08ebf6 | 3030 | XE_64K_PAGE_MASK) || |
b8c1ba83 FD |
3031 | XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) || |
3032 | XE_IOCTL_DBG(xe, range & XE_64K_PAGE_MASK)) { | |
dd08ebf6 MB |
3033 | err = -EINVAL; |
3034 | goto put_obj; | |
3035 | } | |
3036 | } | |
e1fbc4f1 MA |
3037 | |
3038 | coh_mode = xe_pat_index_get_coh_mode(xe, pat_index); | |
3039 | if (bos[i]->cpu_caching) { | |
3040 | if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE && | |
3041 | bos[i]->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB)) { | |
3042 | err = -EINVAL; | |
3043 | goto put_obj; | |
3044 | } | |
3045 | } else if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE)) { | |
3046 | /* | |
3047 | * Imported dma-buf from a different device should | |
3048 | * require 1way or 2way coherency since we don't know | |
3049 | * how it was mapped on the CPU. Just assume is it | |
3050 | * potentially cached on CPU side. | |
3051 | */ | |
3052 | err = -EINVAL; | |
3053 | goto put_obj; | |
3054 | } | |
dd08ebf6 MB |
3055 | } |
3056 | ||
3057 | if (args->num_syncs) { | |
3058 | syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL); | |
3059 | if (!syncs) { | |
3060 | err = -ENOMEM; | |
3061 | goto put_obj; | |
3062 | } | |
3063 | } | |
3064 | ||
3065 | syncs_user = u64_to_user_ptr(args->syncs); | |
3066 | for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) { | |
3067 | err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs], | |
53bf60f6 | 3068 | &syncs_user[num_syncs], |
eb9702ad MB |
3069 | (xe_vm_in_lr_mode(vm) ? |
3070 | SYNC_PARSE_FLAG_LR_MODE : 0) | | |
3071 | (!args->num_binds ? | |
3072 | SYNC_PARSE_FLAG_DISALLOW_USER_FENCE : 0)); | |
dd08ebf6 MB |
3073 | if (err) |
3074 | goto free_syncs; | |
d1df9bfb MB |
3075 | |
3076 | if (xe_sync_is_ufence(&syncs[num_syncs])) | |
3077 | num_ufence++; | |
3078 | } | |
3079 | ||
3080 | if (XE_IOCTL_DBG(xe, num_ufence > 1)) { | |
3081 | err = -EINVAL; | |
3082 | goto free_syncs; | |
dd08ebf6 MB |
3083 | } |
3084 | ||
eb9702ad MB |
3085 | if (!args->num_binds) { |
3086 | err = -ENODATA; | |
3087 | goto free_syncs; | |
3088 | } | |
3089 | ||
dd08ebf6 MB |
3090 | for (i = 0; i < args->num_binds; ++i) { |
3091 | u64 range = bind_ops[i].range; | |
3092 | u64 addr = bind_ops[i].addr; | |
3093 | u32 op = bind_ops[i].op; | |
ea0640fc | 3094 | u32 flags = bind_ops[i].flags; |
dd08ebf6 | 3095 | u64 obj_offset = bind_ops[i].obj_offset; |
aaa115ff | 3096 | u32 prefetch_region = bind_ops[i].prefetch_mem_region_instance; |
e1fbc4f1 | 3097 | u16 pat_index = bind_ops[i].pat_index; |
dd08ebf6 | 3098 | |
b06d47be | 3099 | ops[i] = vm_bind_ioctl_ops_create(vm, bos[i], obj_offset, |
ea0640fc | 3100 | addr, range, op, flags, |
cad4a0d6 | 3101 | prefetch_region, pat_index); |
b06d47be MB |
3102 | if (IS_ERR(ops[i])) { |
3103 | err = PTR_ERR(ops[i]); | |
3104 | ops[i] = NULL; | |
3105 | goto unwind_ops; | |
dd08ebf6 | 3106 | } |
617eebb9 MB |
3107 | |
3108 | err = vm_bind_ioctl_ops_parse(vm, q, ops[i], syncs, num_syncs, | |
3109 | &ops_list, | |
d3d76739 | 3110 | i == args->num_binds - 1); |
617eebb9 MB |
3111 | if (err) |
3112 | goto unwind_ops; | |
dd08ebf6 MB |
3113 | } |
3114 | ||
617eebb9 MB |
3115 | /* Nothing to do */ |
3116 | if (list_empty(&ops_list)) { | |
3117 | err = -ENODATA; | |
b06d47be | 3118 | goto unwind_ops; |
617eebb9 | 3119 | } |
dd08ebf6 | 3120 | |
f3e9b1f4 MB |
3121 | xe_vm_get(vm); |
3122 | if (q) | |
3123 | xe_exec_queue_get(q); | |
3124 | ||
3125 | err = vm_bind_ioctl_ops_execute(vm, &ops_list); | |
3126 | ||
b06d47be | 3127 | up_write(&vm->lock); |
dd08ebf6 | 3128 | |
f3e9b1f4 MB |
3129 | if (q) |
3130 | xe_exec_queue_put(q); | |
3131 | xe_vm_put(vm); | |
3132 | ||
3133 | for (i = 0; bos && i < args->num_binds; ++i) | |
b06d47be | 3134 | xe_bo_put(bos[i]); |
dd08ebf6 | 3135 | |
35ed1d2b MB |
3136 | kvfree(bos); |
3137 | kvfree(ops); | |
b06d47be | 3138 | if (args->num_binds > 1) |
35ed1d2b | 3139 | kvfree(bind_ops); |
dd08ebf6 | 3140 | |
b06d47be | 3141 | return err; |
dd08ebf6 | 3142 | |
b06d47be MB |
3143 | unwind_ops: |
3144 | vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds); | |
dd08ebf6 | 3145 | free_syncs: |
eb9702ad MB |
3146 | if (err == -ENODATA) |
3147 | err = vm_bind_ioctl_signal_fences(vm, q, syncs, num_syncs); | |
b06d47be | 3148 | while (num_syncs--) |
dd08ebf6 | 3149 | xe_sync_entry_cleanup(&syncs[num_syncs]); |
dd08ebf6 MB |
3150 | |
3151 | kfree(syncs); | |
3152 | put_obj: | |
b06d47be | 3153 | for (i = 0; i < args->num_binds; ++i) |
dd08ebf6 | 3154 | xe_bo_put(bos[i]); |
9d858b69 MB |
3155 | release_vm_lock: |
3156 | up_write(&vm->lock); | |
3157 | put_vm: | |
3158 | xe_vm_put(vm); | |
9b9529ce FD |
3159 | put_exec_queue: |
3160 | if (q) | |
3161 | xe_exec_queue_put(q); | |
dd08ebf6 | 3162 | free_objs: |
35ed1d2b MB |
3163 | kvfree(bos); |
3164 | kvfree(ops); | |
dd08ebf6 | 3165 | if (args->num_binds > 1) |
35ed1d2b | 3166 | kvfree(bind_ops); |
eb9702ad | 3167 | return err; |
dd08ebf6 MB |
3168 | } |
3169 | ||
d00e9cc2 TH |
3170 | /** |
3171 | * xe_vm_lock() - Lock the vm's dma_resv object | |
3172 | * @vm: The struct xe_vm whose lock is to be locked | |
3173 | * @intr: Whether to perform any wait interruptible | |
3174 | * | |
3175 | * Return: 0 on success, -EINTR if @intr is true and the wait for a | |
3176 | * contended lock was interrupted. If @intr is false, the function | |
3177 | * always returns 0. | |
dd08ebf6 | 3178 | */ |
d00e9cc2 | 3179 | int xe_vm_lock(struct xe_vm *vm, bool intr) |
dd08ebf6 | 3180 | { |
d00e9cc2 TH |
3181 | if (intr) |
3182 | return dma_resv_lock_interruptible(xe_vm_resv(vm), NULL); | |
dd08ebf6 | 3183 | |
d00e9cc2 | 3184 | return dma_resv_lock(xe_vm_resv(vm), NULL); |
dd08ebf6 MB |
3185 | } |
3186 | ||
d00e9cc2 TH |
3187 | /** |
3188 | * xe_vm_unlock() - Unlock the vm's dma_resv object | |
3189 | * @vm: The struct xe_vm whose lock is to be released. | |
3190 | * | |
3191 | * Unlock a buffer object lock that was locked by xe_vm_lock(). | |
3192 | */ | |
3193 | void xe_vm_unlock(struct xe_vm *vm) | |
dd08ebf6 | 3194 | { |
b06d47be | 3195 | dma_resv_unlock(xe_vm_resv(vm)); |
dd08ebf6 MB |
3196 | } |
3197 | ||
3198 | /** | |
3199 | * xe_vm_invalidate_vma - invalidate GPU mappings for VMA without a lock | |
3200 | * @vma: VMA to invalidate | |
3201 | * | |
3202 | * Walks a list of page tables leaves which it memset the entries owned by this | |
3203 | * VMA to zero, invalidates the TLBs, and block until TLBs invalidation is | |
3204 | * complete. | |
3205 | * | |
3206 | * Returns 0 for success, negative error code otherwise. | |
3207 | */ | |
3208 | int xe_vm_invalidate_vma(struct xe_vma *vma) | |
3209 | { | |
21ed3327 | 3210 | struct xe_device *xe = xe_vma_vm(vma)->xe; |
876611c2 MR |
3211 | struct xe_tile *tile; |
3212 | u32 tile_needs_invalidate = 0; | |
a5edc7cd | 3213 | int seqno[XE_MAX_TILES_PER_DEVICE]; |
dd08ebf6 MB |
3214 | u8 id; |
3215 | int ret; | |
3216 | ||
c73acc1e FD |
3217 | xe_assert(xe, xe_vm_in_fault_mode(xe_vma_vm(vma))); |
3218 | xe_assert(xe, !xe_vma_is_null(vma)); | |
dd08ebf6 MB |
3219 | trace_xe_vma_usm_invalidate(vma); |
3220 | ||
3221 | /* Check that we don't race with page-table updates */ | |
3222 | if (IS_ENABLED(CONFIG_PROVE_LOCKING)) { | |
3223 | if (xe_vma_is_userptr(vma)) { | |
3224 | WARN_ON_ONCE(!mmu_interval_check_retry | |
5bd24e78 TH |
3225 | (&to_userptr_vma(vma)->userptr.notifier, |
3226 | to_userptr_vma(vma)->userptr.notifier_seq)); | |
b06d47be | 3227 | WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(xe_vma_vm(vma)), |
dd08ebf6 MB |
3228 | DMA_RESV_USAGE_BOOKKEEP)); |
3229 | ||
3230 | } else { | |
21ed3327 | 3231 | xe_bo_assert_held(xe_vma_bo(vma)); |
dd08ebf6 MB |
3232 | } |
3233 | } | |
3234 | ||
876611c2 MR |
3235 | for_each_tile(tile, xe, id) { |
3236 | if (xe_pt_zap_ptes(tile, vma)) { | |
3237 | tile_needs_invalidate |= BIT(id); | |
dd08ebf6 | 3238 | xe_device_wmb(xe); |
876611c2 MR |
3239 | /* |
3240 | * FIXME: We potentially need to invalidate multiple | |
3241 | * GTs within the tile | |
3242 | */ | |
f6929e80 | 3243 | seqno[id] = xe_gt_tlb_invalidation_vma(tile->primary_gt, NULL, vma); |
dd08ebf6 MB |
3244 | if (seqno[id] < 0) |
3245 | return seqno[id]; | |
3246 | } | |
3247 | } | |
3248 | ||
876611c2 MR |
3249 | for_each_tile(tile, xe, id) { |
3250 | if (tile_needs_invalidate & BIT(id)) { | |
f6929e80 | 3251 | ret = xe_gt_tlb_invalidation_wait(tile->primary_gt, seqno[id]); |
dd08ebf6 MB |
3252 | if (ret < 0) |
3253 | return ret; | |
3254 | } | |
3255 | } | |
3256 | ||
876611c2 | 3257 | vma->usm.tile_invalidated = vma->tile_mask; |
dd08ebf6 MB |
3258 | |
3259 | return 0; | |
3260 | } | |
3261 | ||
dd08ebf6 MB |
3262 | int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id) |
3263 | { | |
b06d47be | 3264 | struct drm_gpuva *gpuva; |
2a8477f7 | 3265 | bool is_vram; |
dd08ebf6 MB |
3266 | uint64_t addr; |
3267 | ||
3268 | if (!down_read_trylock(&vm->lock)) { | |
3269 | drm_printf(p, " Failed to acquire VM lock to dump capture"); | |
3270 | return 0; | |
3271 | } | |
3272 | if (vm->pt_root[gt_id]) { | |
937b4be7 LDM |
3273 | addr = xe_bo_addr(vm->pt_root[gt_id]->bo, 0, XE_PAGE_SIZE); |
3274 | is_vram = xe_bo_is_vram(vm->pt_root[gt_id]->bo); | |
3275 | drm_printf(p, " VM root: A:0x%llx %s\n", addr, | |
3276 | is_vram ? "VRAM" : "SYS"); | |
dd08ebf6 MB |
3277 | } |
3278 | ||
b06d47be MB |
3279 | drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) { |
3280 | struct xe_vma *vma = gpuva_to_vma(gpuva); | |
dd08ebf6 | 3281 | bool is_userptr = xe_vma_is_userptr(vma); |
37430402 | 3282 | bool is_null = xe_vma_is_null(vma); |
dd08ebf6 | 3283 | |
37430402 MB |
3284 | if (is_null) { |
3285 | addr = 0; | |
3286 | } else if (is_userptr) { | |
5bd24e78 | 3287 | struct sg_table *sg = to_userptr_vma(vma)->userptr.sg; |
dd08ebf6 MB |
3288 | struct xe_res_cursor cur; |
3289 | ||
5bd24e78 TH |
3290 | if (sg) { |
3291 | xe_res_first_sg(sg, 0, XE_PAGE_SIZE, &cur); | |
790bdc7c MB |
3292 | addr = xe_res_dma(&cur); |
3293 | } else { | |
3294 | addr = 0; | |
3295 | } | |
dd08ebf6 | 3296 | } else { |
937b4be7 LDM |
3297 | addr = __xe_bo_addr(xe_vma_bo(vma), 0, XE_PAGE_SIZE); |
3298 | is_vram = xe_bo_is_vram(xe_vma_bo(vma)); | |
dd08ebf6 MB |
3299 | } |
3300 | drm_printf(p, " [%016llx-%016llx] S:0x%016llx A:%016llx %s\n", | |
21ed3327 MB |
3301 | xe_vma_start(vma), xe_vma_end(vma) - 1, |
3302 | xe_vma_size(vma), | |
37430402 MB |
3303 | addr, is_null ? "NULL" : is_userptr ? "USR" : |
3304 | is_vram ? "VRAM" : "SYS"); | |
dd08ebf6 MB |
3305 | } |
3306 | up_read(&vm->lock); | |
3307 | ||
3308 | return 0; | |
3309 | } | |
0eb2a18a ML |
3310 | |
3311 | struct xe_vm_snapshot { | |
3312 | unsigned long num_snaps; | |
3313 | struct { | |
3314 | u64 ofs, bo_ofs; | |
3315 | unsigned long len; | |
3316 | struct xe_bo *bo; | |
3317 | void *data; | |
3318 | struct mm_struct *mm; | |
3319 | } snap[]; | |
3320 | }; | |
3321 | ||
3322 | struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm) | |
3323 | { | |
3324 | unsigned long num_snaps = 0, i; | |
3325 | struct xe_vm_snapshot *snap = NULL; | |
3326 | struct drm_gpuva *gpuva; | |
3327 | ||
3328 | if (!vm) | |
3329 | return NULL; | |
3330 | ||
3331 | mutex_lock(&vm->snap_mutex); | |
3332 | drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) { | |
3333 | if (gpuva->flags & XE_VMA_DUMPABLE) | |
3334 | num_snaps++; | |
3335 | } | |
3336 | ||
3337 | if (num_snaps) | |
3338 | snap = kvzalloc(offsetof(struct xe_vm_snapshot, snap[num_snaps]), GFP_NOWAIT); | |
3339 | if (!snap) | |
3340 | goto out_unlock; | |
3341 | ||
3342 | snap->num_snaps = num_snaps; | |
3343 | i = 0; | |
3344 | drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) { | |
3345 | struct xe_vma *vma = gpuva_to_vma(gpuva); | |
3346 | struct xe_bo *bo = vma->gpuva.gem.obj ? | |
3347 | gem_to_xe_bo(vma->gpuva.gem.obj) : NULL; | |
3348 | ||
3349 | if (!(gpuva->flags & XE_VMA_DUMPABLE)) | |
3350 | continue; | |
3351 | ||
3352 | snap->snap[i].ofs = xe_vma_start(vma); | |
3353 | snap->snap[i].len = xe_vma_size(vma); | |
3354 | if (bo) { | |
3355 | snap->snap[i].bo = xe_bo_get(bo); | |
3356 | snap->snap[i].bo_ofs = xe_vma_bo_offset(vma); | |
3357 | } else if (xe_vma_is_userptr(vma)) { | |
3358 | struct mm_struct *mm = | |
3359 | to_userptr_vma(vma)->userptr.notifier.mm; | |
3360 | ||
3361 | if (mmget_not_zero(mm)) | |
3362 | snap->snap[i].mm = mm; | |
3363 | else | |
3364 | snap->snap[i].data = ERR_PTR(-EFAULT); | |
3365 | ||
3366 | snap->snap[i].bo_ofs = xe_vma_userptr(vma); | |
3367 | } else { | |
3368 | snap->snap[i].data = ERR_PTR(-ENOENT); | |
3369 | } | |
3370 | i++; | |
3371 | } | |
3372 | ||
3373 | out_unlock: | |
3374 | mutex_unlock(&vm->snap_mutex); | |
3375 | return snap; | |
3376 | } | |
3377 | ||
3378 | void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap) | |
3379 | { | |
784b3410 ML |
3380 | if (!snap) |
3381 | return; | |
3382 | ||
0eb2a18a ML |
3383 | for (int i = 0; i < snap->num_snaps; i++) { |
3384 | struct xe_bo *bo = snap->snap[i].bo; | |
3385 | struct iosys_map src; | |
3386 | int err; | |
3387 | ||
3388 | if (IS_ERR(snap->snap[i].data)) | |
3389 | continue; | |
3390 | ||
3391 | snap->snap[i].data = kvmalloc(snap->snap[i].len, GFP_USER); | |
3392 | if (!snap->snap[i].data) { | |
3393 | snap->snap[i].data = ERR_PTR(-ENOMEM); | |
3394 | goto cleanup_bo; | |
3395 | } | |
3396 | ||
3397 | if (bo) { | |
3398 | dma_resv_lock(bo->ttm.base.resv, NULL); | |
3399 | err = ttm_bo_vmap(&bo->ttm, &src); | |
3400 | if (!err) { | |
3401 | xe_map_memcpy_from(xe_bo_device(bo), | |
3402 | snap->snap[i].data, | |
3403 | &src, snap->snap[i].bo_ofs, | |
3404 | snap->snap[i].len); | |
3405 | ttm_bo_vunmap(&bo->ttm, &src); | |
3406 | } | |
3407 | dma_resv_unlock(bo->ttm.base.resv); | |
3408 | } else { | |
3409 | void __user *userptr = (void __user *)(size_t)snap->snap[i].bo_ofs; | |
3410 | ||
3411 | kthread_use_mm(snap->snap[i].mm); | |
3412 | if (!copy_from_user(snap->snap[i].data, userptr, snap->snap[i].len)) | |
3413 | err = 0; | |
3414 | else | |
3415 | err = -EFAULT; | |
3416 | kthread_unuse_mm(snap->snap[i].mm); | |
3417 | ||
3418 | mmput(snap->snap[i].mm); | |
3419 | snap->snap[i].mm = NULL; | |
3420 | } | |
3421 | ||
3422 | if (err) { | |
3423 | kvfree(snap->snap[i].data); | |
3424 | snap->snap[i].data = ERR_PTR(err); | |
3425 | } | |
3426 | ||
3427 | cleanup_bo: | |
3428 | xe_bo_put(bo); | |
3429 | snap->snap[i].bo = NULL; | |
3430 | } | |
3431 | } | |
3432 | ||
3433 | void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p) | |
3434 | { | |
3435 | unsigned long i, j; | |
3436 | ||
3437 | for (i = 0; i < snap->num_snaps; i++) { | |
3438 | if (IS_ERR(snap->snap[i].data)) | |
3439 | goto uncaptured; | |
3440 | ||
3441 | drm_printf(p, "[%llx].length: 0x%lx\n", snap->snap[i].ofs, snap->snap[i].len); | |
3442 | drm_printf(p, "[%llx].data: ", | |
3443 | snap->snap[i].ofs); | |
3444 | ||
3445 | for (j = 0; j < snap->snap[i].len; j += sizeof(u32)) { | |
3446 | u32 *val = snap->snap[i].data + j; | |
3447 | char dumped[ASCII85_BUFSZ]; | |
3448 | ||
3449 | drm_puts(p, ascii85_encode(*val, dumped)); | |
3450 | } | |
3451 | ||
3452 | drm_puts(p, "\n"); | |
3453 | continue; | |
3454 | ||
3455 | uncaptured: | |
3456 | drm_printf(p, "Unable to capture range [%llx-%llx]: %li\n", | |
3457 | snap->snap[i].ofs, snap->snap[i].ofs + snap->snap[i].len - 1, | |
3458 | PTR_ERR(snap->snap[i].data)); | |
3459 | } | |
3460 | } | |
3461 | ||
3462 | void xe_vm_snapshot_free(struct xe_vm_snapshot *snap) | |
3463 | { | |
3464 | unsigned long i; | |
3465 | ||
3466 | if (!snap) | |
3467 | return; | |
3468 | ||
3469 | for (i = 0; i < snap->num_snaps; i++) { | |
3470 | if (!IS_ERR(snap->snap[i].data)) | |
3471 | kvfree(snap->snap[i].data); | |
3472 | xe_bo_put(snap->snap[i].bo); | |
3473 | if (snap->snap[i].mm) | |
3474 | mmput(snap->snap[i].mm); | |
3475 | } | |
3476 | kvfree(snap); | |
3477 | } |