Commit | Line | Data |
---|---|---|
dd08ebf6 MB |
1 | // SPDX-License-Identifier: MIT |
2 | /* | |
3 | * Copyright © 2022 Intel Corporation | |
4 | */ | |
5 | ||
ea9f879d LDM |
6 | #include "xe_exec.h" |
7 | ||
dd08ebf6 | 8 | #include <drm/drm_device.h> |
d490ecf5 | 9 | #include <drm/drm_exec.h> |
dd08ebf6 MB |
10 | #include <drm/drm_file.h> |
11 | #include <drm/xe_drm.h> | |
34f89ac8 | 12 | #include <linux/delay.h> |
dd08ebf6 MB |
13 | |
14 | #include "xe_bo.h" | |
15 | #include "xe_device.h" | |
c22a4ed0 | 16 | #include "xe_exec_queue.h" |
dd08ebf6 | 17 | #include "xe_macros.h" |
8ae8a2e8 | 18 | #include "xe_ring_ops_types.h" |
dd08ebf6 MB |
19 | #include "xe_sched_job.h" |
20 | #include "xe_sync.h" | |
21 | #include "xe_vm.h" | |
22 | ||
23 | /** | |
24 | * DOC: Execbuf (User GPU command submission) | |
25 | * | |
26 | * Execs have historically been rather complicated in DRM drivers (at least in | |
27 | * the i915) because a few things: | |
28 | * | |
29 | * - Passing in a list BO which are read / written to creating implicit syncs | |
30 | * - Binding at exec time | |
31 | * - Flow controlling the ring at exec time | |
32 | * | |
33 | * In XE we avoid all of this complication by not allowing a BO list to be | |
34 | * passed into an exec, using the dma-buf implicit sync uAPI, have binds as | |
35 | * seperate operations, and using the DRM scheduler to flow control the ring. | |
36 | * Let's deep dive on each of these. | |
37 | * | |
38 | * We can get away from a BO list by forcing the user to use in / out fences on | |
39 | * every exec rather than the kernel tracking dependencies of BO (e.g. if the | |
40 | * user knows an exec writes to a BO and reads from the BO in the next exec, it | |
41 | * is the user's responsibility to pass in / out fence between the two execs). | |
42 | * | |
43 | * Implicit dependencies for external BOs are handled by using the dma-buf | |
44 | * implicit dependency uAPI (TODO: add link). To make this works each exec must | |
45 | * install the job's fence into the DMA_RESV_USAGE_WRITE slot of every external | |
46 | * BO mapped in the VM. | |
47 | * | |
48 | * We do not allow a user to trigger a bind at exec time rather we have a VM | |
49 | * bind IOCTL which uses the same in / out fence interface as exec. In that | |
50 | * sense, a VM bind is basically the same operation as an exec from the user | |
51 | * perspective. e.g. If an exec depends on a VM bind use the in / out fence | |
52 | * interface (struct drm_xe_sync) to synchronize like syncing between two | |
53 | * dependent execs. | |
54 | * | |
55 | * Although a user cannot trigger a bind, we still have to rebind userptrs in | |
56 | * the VM that have been invalidated since the last exec, likewise we also have | |
57 | * to rebind BOs that have been evicted by the kernel. We schedule these rebinds | |
58 | * behind any pending kernel operations on any external BOs in VM or any BOs | |
59 | * private to the VM. This is accomplished by the rebinds waiting on BOs | |
60 | * DMA_RESV_USAGE_KERNEL slot (kernel ops) and kernel ops waiting on all BOs | |
61 | * slots (inflight execs are in the DMA_RESV_USAGE_BOOKING for private BOs and | |
62 | * in DMA_RESV_USAGE_WRITE for external BOs). | |
63 | * | |
64 | * Rebinds / dma-resv usage applies to non-compute mode VMs only as for compute | |
65 | * mode VMs we use preempt fences and a rebind worker (TODO: add link). | |
66 | * | |
67 | * There is no need to flow control the ring in the exec as we write the ring at | |
68 | * submission time and set the DRM scheduler max job limit SIZE_OF_RING / | |
69 | * MAX_JOB_SIZE. The DRM scheduler will then hold all jobs until space in the | |
70 | * ring is available. | |
71 | * | |
72 | * All of this results in a rather simple exec implementation. | |
73 | * | |
74 | * Flow | |
75 | * ~~~~ | |
76 | * | |
77 | * .. code-block:: | |
78 | * | |
79 | * Parse input arguments | |
80 | * Wait for any async VM bind passed as in-fences to start | |
81 | * <----------------------------------------------------------------------| | |
82 | * Lock global VM lock in read mode | | |
83 | * Pin userptrs (also finds userptr invalidated since last exec) | | |
84 | * Lock exec (VM dma-resv lock, external BOs dma-resv locks) | | |
85 | * Validate BOs that have been evicted | | |
86 | * Create job | | |
87 | * Rebind invalidated userptrs + evicted BOs (non-compute-mode) | | |
88 | * Add rebind fence dependency to job | | |
89 | * Add job VM dma-resv bookkeeping slot (non-compute mode) | | |
90 | * Add job to external BOs dma-resv write slots (non-compute mode) | | |
91 | * Check if any userptrs invalidated since pin ------ Drop locks ---------| | |
92 | * Install in / out fences for job | |
93 | * Submit job | |
94 | * Unlock all | |
95 | */ | |
96 | ||
24f947d5 | 97 | static int xe_exec_fn(struct drm_gpuvm_exec *vm_exec) |
dd08ebf6 | 98 | { |
29f424eb MA |
99 | struct xe_vm *vm = container_of(vm_exec->vm, struct xe_vm, gpuvm); |
100 | struct drm_gem_object *obj; | |
101 | unsigned long index; | |
102 | int num_fences; | |
103 | int ret; | |
104 | ||
105 | ret = drm_gpuvm_validate(vm_exec->vm, &vm_exec->exec); | |
106 | if (ret) | |
107 | return ret; | |
108 | ||
109 | /* | |
f4e8ab46 MA |
110 | * 1 fence slot for the final submit, and 1 more for every per-tile for |
111 | * GPU bind and 1 extra for CPU bind. Note that there are potentially | |
112 | * many vma per object/dma-resv, however the fence slot will just be | |
113 | * re-used, since they are largely the same timeline and the seqno | |
114 | * should be in order. In the case of CPU bind there is dummy fence used | |
115 | * for all CPU binds, so no need to have a per-tile slot for that. | |
29f424eb | 116 | */ |
f4e8ab46 | 117 | num_fences = 1 + 1 + vm->xe->info.tile_count; |
29f424eb MA |
118 | |
119 | /* | |
120 | * We don't know upfront exactly how many fence slots we will need at | |
121 | * the start of the exec, since the TTM bo_validate above can consume | |
122 | * numerous fence slots. Also due to how the dma_resv_reserve_fences() | |
123 | * works it only ensures that at least that many fence slots are | |
124 | * available i.e if there are already 10 slots available and we reserve | |
125 | * two more, it can just noop without reserving anything. With this it | |
126 | * is quite possible that TTM steals some of the fence slots and then | |
127 | * when it comes time to do the vma binding and final exec stage we are | |
128 | * lacking enough fence slots, leading to some nasty BUG_ON() when | |
129 | * adding the fences. Hence just add our own fences here, after the | |
130 | * validate stage. | |
131 | */ | |
132 | drm_exec_for_each_locked_object(&vm_exec->exec, index, obj) { | |
133 | ret = dma_resv_reserve_fences(obj->resv, num_fences); | |
134 | if (ret) | |
135 | return ret; | |
136 | } | |
137 | ||
138 | return 0; | |
dd08ebf6 MB |
139 | } |
140 | ||
dd08ebf6 MB |
141 | int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file) |
142 | { | |
143 | struct xe_device *xe = to_xe_device(dev); | |
144 | struct xe_file *xef = to_xe_file(file); | |
145 | struct drm_xe_exec *args = data; | |
146 | struct drm_xe_sync __user *syncs_user = u64_to_user_ptr(args->syncs); | |
147 | u64 __user *addresses_user = u64_to_user_ptr(args->address); | |
9b9529ce | 148 | struct xe_exec_queue *q; |
dd08ebf6 MB |
149 | struct xe_sync_entry *syncs = NULL; |
150 | u64 addresses[XE_HW_ENGINE_MAX_INSTANCE]; | |
24f947d5 TH |
151 | struct drm_gpuvm_exec vm_exec = {.extra.fn = xe_exec_fn}; |
152 | struct drm_exec *exec = &vm_exec.exec; | |
d1df9bfb | 153 | u32 i, num_syncs = 0, num_ufence = 0; |
dd08ebf6 MB |
154 | struct xe_sched_job *job; |
155 | struct dma_fence *rebind_fence; | |
156 | struct xe_vm *vm; | |
97d0047c | 157 | bool write_locked, skip_retry = false; |
d490ecf5 | 158 | ktime_t end = 0; |
dd08ebf6 MB |
159 | int err = 0; |
160 | ||
b8c1ba83 FD |
161 | if (XE_IOCTL_DBG(xe, args->extensions) || |
162 | XE_IOCTL_DBG(xe, args->pad[0] || args->pad[1] || args->pad[2]) || | |
163 | XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) | |
dd08ebf6 MB |
164 | return -EINVAL; |
165 | ||
9b9529ce FD |
166 | q = xe_exec_queue_lookup(xef, args->exec_queue_id); |
167 | if (XE_IOCTL_DBG(xe, !q)) | |
dd08ebf6 MB |
168 | return -ENOENT; |
169 | ||
9b9529ce | 170 | if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_VM)) |
dd08ebf6 MB |
171 | return -EINVAL; |
172 | ||
eb9702ad MB |
173 | if (XE_IOCTL_DBG(xe, args->num_batch_buffer && |
174 | q->width != args->num_batch_buffer)) | |
dd08ebf6 MB |
175 | return -EINVAL; |
176 | ||
9b9529ce | 177 | if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_BANNED)) { |
dd08ebf6 | 178 | err = -ECANCELED; |
9b9529ce | 179 | goto err_exec_queue; |
dd08ebf6 MB |
180 | } |
181 | ||
182 | if (args->num_syncs) { | |
183 | syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL); | |
184 | if (!syncs) { | |
185 | err = -ENOMEM; | |
9b9529ce | 186 | goto err_exec_queue; |
dd08ebf6 MB |
187 | } |
188 | } | |
189 | ||
9b9529ce | 190 | vm = q->vm; |
dd08ebf6 MB |
191 | |
192 | for (i = 0; i < args->num_syncs; i++) { | |
193 | err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs++], | |
53bf60f6 MB |
194 | &syncs_user[i], SYNC_PARSE_FLAG_EXEC | |
195 | (xe_vm_in_lr_mode(vm) ? | |
196 | SYNC_PARSE_FLAG_LR_MODE : 0)); | |
dd08ebf6 MB |
197 | if (err) |
198 | goto err_syncs; | |
d1df9bfb MB |
199 | |
200 | if (xe_sync_is_ufence(&syncs[i])) | |
201 | num_ufence++; | |
202 | } | |
203 | ||
204 | if (XE_IOCTL_DBG(xe, num_ufence > 1)) { | |
205 | err = -EINVAL; | |
206 | goto err_syncs; | |
dd08ebf6 MB |
207 | } |
208 | ||
9b9529ce | 209 | if (xe_exec_queue_is_parallel(q)) { |
dd08ebf6 | 210 | err = __copy_from_user(addresses, addresses_user, sizeof(u64) * |
9b9529ce | 211 | q->width); |
dd08ebf6 MB |
212 | if (err) { |
213 | err = -EFAULT; | |
214 | goto err_syncs; | |
215 | } | |
216 | } | |
217 | ||
dd08ebf6 | 218 | retry: |
fdb6a053 | 219 | if (!xe_vm_in_lr_mode(vm) && xe_vm_userptr_check_repin(vm)) { |
dd08ebf6 MB |
220 | err = down_write_killable(&vm->lock); |
221 | write_locked = true; | |
222 | } else { | |
223 | /* We don't allow execs while the VM is in error state */ | |
224 | err = down_read_interruptible(&vm->lock); | |
225 | write_locked = false; | |
226 | } | |
227 | if (err) | |
228 | goto err_syncs; | |
229 | ||
dd08ebf6 MB |
230 | if (write_locked) { |
231 | err = xe_vm_userptr_pin(vm); | |
232 | downgrade_write(&vm->lock); | |
233 | write_locked = false; | |
234 | if (err) | |
235 | goto err_unlock_list; | |
236 | } | |
237 | ||
24f947d5 | 238 | vm_exec.vm = &vm->gpuvm; |
24f947d5 TH |
239 | vm_exec.flags = DRM_EXEC_INTERRUPTIBLE_WAIT; |
240 | if (xe_vm_in_lr_mode(vm)) { | |
d2197029 | 241 | drm_exec_init(exec, vm_exec.flags, 0); |
24f947d5 TH |
242 | } else { |
243 | err = drm_gpuvm_exec_lock(&vm_exec); | |
244 | if (err) { | |
245 | if (xe_vm_validate_should_retry(exec, err, &end)) | |
246 | err = -EAGAIN; | |
d490ecf5 TH |
247 | goto err_unlock_list; |
248 | } | |
d490ecf5 | 249 | } |
dd08ebf6 | 250 | |
9b9529ce | 251 | if (xe_vm_is_closed_or_banned(q->vm)) { |
9d858b69 MB |
252 | drm_warn(&xe->drm, "Trying to schedule after vm is closed or banned\n"); |
253 | err = -ECANCELED; | |
d490ecf5 | 254 | goto err_exec; |
dd08ebf6 MB |
255 | } |
256 | ||
eb9702ad MB |
257 | if (!args->num_batch_buffer) { |
258 | if (!xe_vm_in_lr_mode(vm)) { | |
259 | struct dma_fence *fence; | |
260 | ||
261 | fence = xe_sync_in_fence_get(syncs, num_syncs, q, vm); | |
262 | if (IS_ERR(fence)) { | |
263 | err = PTR_ERR(fence); | |
264 | goto err_exec; | |
265 | } | |
266 | for (i = 0; i < num_syncs; i++) | |
267 | xe_sync_entry_signal(&syncs[i], NULL, fence); | |
268 | xe_exec_queue_last_fence_set(q, vm, fence); | |
269 | dma_fence_put(fence); | |
270 | } | |
271 | ||
272 | goto err_exec; | |
273 | } | |
274 | ||
9b9529ce | 275 | if (xe_exec_queue_is_lr(q) && xe_exec_queue_ring_full(q)) { |
97d0047c MB |
276 | err = -EWOULDBLOCK; /* Aliased to -EAGAIN */ |
277 | skip_retry = true; | |
d490ecf5 | 278 | goto err_exec; |
8ae8a2e8 MB |
279 | } |
280 | ||
9b9529ce | 281 | job = xe_sched_job_create(q, xe_exec_queue_is_parallel(q) ? |
dd08ebf6 MB |
282 | addresses : &args->address); |
283 | if (IS_ERR(job)) { | |
284 | err = PTR_ERR(job); | |
d490ecf5 | 285 | goto err_exec; |
dd08ebf6 MB |
286 | } |
287 | ||
288 | /* | |
289 | * Rebind any invalidated userptr or evicted BOs in the VM, non-compute | |
290 | * VM mode only. | |
291 | */ | |
292 | rebind_fence = xe_vm_rebind(vm, false); | |
293 | if (IS_ERR(rebind_fence)) { | |
294 | err = PTR_ERR(rebind_fence); | |
295 | goto err_put_job; | |
296 | } | |
297 | ||
298 | /* | |
299 | * We store the rebind_fence in the VM so subsequent execs don't get | |
300 | * scheduled before the rebinds of userptrs / evicted BOs is complete. | |
301 | */ | |
302 | if (rebind_fence) { | |
303 | dma_fence_put(vm->rebind_fence); | |
304 | vm->rebind_fence = rebind_fence; | |
305 | } | |
306 | if (vm->rebind_fence) { | |
307 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, | |
308 | &vm->rebind_fence->flags)) { | |
309 | dma_fence_put(vm->rebind_fence); | |
310 | vm->rebind_fence = NULL; | |
311 | } else { | |
312 | dma_fence_get(vm->rebind_fence); | |
313 | err = drm_sched_job_add_dependency(&job->drm, | |
314 | vm->rebind_fence); | |
315 | if (err) | |
316 | goto err_put_job; | |
317 | } | |
318 | } | |
319 | ||
320 | /* Wait behind munmap style rebinds */ | |
fdb6a053 | 321 | if (!xe_vm_in_lr_mode(vm)) { |
dd08ebf6 | 322 | err = drm_sched_job_add_resv_dependencies(&job->drm, |
b06d47be | 323 | xe_vm_resv(vm), |
dd08ebf6 MB |
324 | DMA_RESV_USAGE_KERNEL); |
325 | if (err) | |
326 | goto err_put_job; | |
327 | } | |
328 | ||
329 | for (i = 0; i < num_syncs && !err; i++) | |
330 | err = xe_sync_entry_add_deps(&syncs[i], job); | |
331 | if (err) | |
332 | goto err_put_job; | |
333 | ||
fdb6a053 | 334 | if (!xe_vm_in_lr_mode(vm)) { |
eb9702ad MB |
335 | err = xe_sched_job_last_fence_add_dep(job, vm); |
336 | if (err) | |
337 | goto err_put_job; | |
338 | ||
dd08ebf6 MB |
339 | err = down_read_interruptible(&vm->userptr.notifier_lock); |
340 | if (err) | |
341 | goto err_put_job; | |
342 | ||
343 | err = __xe_vm_userptr_needs_repin(vm); | |
344 | if (err) | |
345 | goto err_repin; | |
346 | } | |
347 | ||
348 | /* | |
349 | * Point of no return, if we error after this point just set an error on | |
350 | * the job and let the DRM scheduler / backend clean up the job. | |
351 | */ | |
352 | xe_sched_job_arm(job); | |
24f947d5 TH |
353 | if (!xe_vm_in_lr_mode(vm)) |
354 | drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, &job->drm.s_fence->finished, | |
355 | DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_WRITE); | |
dd08ebf6 MB |
356 | |
357 | for (i = 0; i < num_syncs; i++) | |
358 | xe_sync_entry_signal(&syncs[i], job, | |
359 | &job->drm.s_fence->finished); | |
360 | ||
9b9529ce FD |
361 | if (xe_exec_queue_is_lr(q)) |
362 | q->ring_ops->emit_job(job); | |
eb9702ad MB |
363 | if (!xe_vm_in_lr_mode(vm)) |
364 | xe_exec_queue_last_fence_set(q, vm, &job->drm.s_fence->finished); | |
dd08ebf6 | 365 | xe_sched_job_push(job); |
8e41443e | 366 | xe_vm_reactivate_rebind(vm); |
dd08ebf6 | 367 | |
fdb6a053 | 368 | if (!err && !xe_vm_in_lr_mode(vm)) { |
7ba4c5f0 MB |
369 | spin_lock(&xe->ttm.lru_lock); |
370 | ttm_lru_bulk_move_tail(&vm->lru_bulk_move); | |
371 | spin_unlock(&xe->ttm.lru_lock); | |
372 | } | |
373 | ||
dd08ebf6 | 374 | err_repin: |
fdb6a053 | 375 | if (!xe_vm_in_lr_mode(vm)) |
dd08ebf6 MB |
376 | up_read(&vm->userptr.notifier_lock); |
377 | err_put_job: | |
378 | if (err) | |
379 | xe_sched_job_put(job); | |
d490ecf5 | 380 | err_exec: |
24f947d5 | 381 | drm_exec_fini(exec); |
dd08ebf6 MB |
382 | err_unlock_list: |
383 | if (write_locked) | |
384 | up_write(&vm->lock); | |
385 | else | |
386 | up_read(&vm->lock); | |
97d0047c | 387 | if (err == -EAGAIN && !skip_retry) |
dd08ebf6 MB |
388 | goto retry; |
389 | err_syncs: | |
390 | for (i = 0; i < num_syncs; i++) | |
391 | xe_sync_entry_cleanup(&syncs[i]); | |
392 | kfree(syncs); | |
9b9529ce FD |
393 | err_exec_queue: |
394 | xe_exec_queue_put(q); | |
dd08ebf6 MB |
395 | |
396 | return err; | |
397 | } |