1 // SPDX-License-Identifier: MIT
3 * Copyright © 2021 Intel Corporation
8 #include <linux/dma-fence-array.h>
9 #include <linux/kthread.h>
10 #include <linux/sched/mm.h>
11 #include <linux/uaccess.h>
13 #include <drm/drm_print.h>
14 #include <drm/drm_syncobj.h>
15 #include <drm/xe_drm.h>
17 #include "xe_device_types.h"
18 #include "xe_exec_queue.h"
19 #include "xe_macros.h"
20 #include "xe_sched_job_types.h"
25 struct dma_fence_cb cb;
26 struct work_struct worker;
32 static void user_fence_destroy(struct kref *kref)
34 struct user_fence *ufence = container_of(kref, struct user_fence,
41 static void user_fence_get(struct user_fence *ufence)
43 kref_get(&ufence->refcount);
46 static void user_fence_put(struct user_fence *ufence)
48 kref_put(&ufence->refcount, user_fence_destroy);
51 static struct user_fence *user_fence_create(struct xe_device *xe, u64 addr,
54 struct user_fence *ufence;
56 ufence = kmalloc(sizeof(*ufence), GFP_KERNEL);
61 kref_init(&ufence->refcount);
62 ufence->addr = u64_to_user_ptr(addr);
63 ufence->value = value;
64 ufence->mm = current->mm;
70 static void user_fence_worker(struct work_struct *w)
72 struct user_fence *ufence = container_of(w, struct user_fence, worker);
74 if (mmget_not_zero(ufence->mm)) {
75 kthread_use_mm(ufence->mm);
76 if (copy_to_user(ufence->addr, &ufence->value, sizeof(ufence->value)))
77 XE_WARN_ON("Copy to user failed");
78 kthread_unuse_mm(ufence->mm);
82 wake_up_all(&ufence->xe->ufence_wq);
83 user_fence_put(ufence);
86 static void kick_ufence(struct user_fence *ufence, struct dma_fence *fence)
88 INIT_WORK(&ufence->worker, user_fence_worker);
89 queue_work(ufence->xe->ordered_wq, &ufence->worker);
93 static void user_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
95 struct user_fence *ufence = container_of(cb, struct user_fence, cb);
97 kick_ufence(ufence, fence);
100 int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
101 struct xe_sync_entry *sync,
102 struct drm_xe_sync __user *sync_user,
105 struct drm_xe_sync sync_in;
107 bool exec = flags & SYNC_PARSE_FLAG_EXEC;
108 bool in_lr_mode = flags & SYNC_PARSE_FLAG_LR_MODE;
109 bool disallow_user_fence = flags & SYNC_PARSE_FLAG_DISALLOW_USER_FENCE;
112 if (copy_from_user(&sync_in, sync_user, sizeof(*sync_user)))
115 if (XE_IOCTL_DBG(xe, sync_in.flags & ~DRM_XE_SYNC_FLAG_SIGNAL) ||
116 XE_IOCTL_DBG(xe, sync_in.reserved[0] || sync_in.reserved[1]))
119 signal = sync_in.flags & DRM_XE_SYNC_FLAG_SIGNAL;
120 switch (sync_in.type) {
121 case DRM_XE_SYNC_TYPE_SYNCOBJ:
122 if (XE_IOCTL_DBG(xe, in_lr_mode && signal))
125 if (XE_IOCTL_DBG(xe, upper_32_bits(sync_in.addr)))
128 sync->syncobj = drm_syncobj_find(xef->drm, sync_in.handle);
129 if (XE_IOCTL_DBG(xe, !sync->syncobj))
133 sync->fence = drm_syncobj_fence_get(sync->syncobj);
134 if (XE_IOCTL_DBG(xe, !sync->fence))
139 case DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ:
140 if (XE_IOCTL_DBG(xe, in_lr_mode && signal))
143 if (XE_IOCTL_DBG(xe, upper_32_bits(sync_in.addr)))
146 if (XE_IOCTL_DBG(xe, sync_in.timeline_value == 0))
149 sync->syncobj = drm_syncobj_find(xef->drm, sync_in.handle);
150 if (XE_IOCTL_DBG(xe, !sync->syncobj))
154 sync->chain_fence = dma_fence_chain_alloc();
155 if (!sync->chain_fence)
158 sync->fence = drm_syncobj_fence_get(sync->syncobj);
159 if (XE_IOCTL_DBG(xe, !sync->fence))
162 err = dma_fence_chain_find_seqno(&sync->fence,
163 sync_in.timeline_value);
169 case DRM_XE_SYNC_TYPE_USER_FENCE:
170 if (XE_IOCTL_DBG(xe, disallow_user_fence))
173 if (XE_IOCTL_DBG(xe, !signal))
176 if (XE_IOCTL_DBG(xe, sync_in.addr & 0x7))
180 sync->addr = sync_in.addr;
182 sync->ufence = user_fence_create(xe, sync_in.addr,
183 sync_in.timeline_value);
184 if (XE_IOCTL_DBG(xe, !sync->ufence))
194 sync->type = sync_in.type;
195 sync->flags = sync_in.flags;
196 sync->timeline_value = sync_in.timeline_value;
201 int xe_sync_entry_wait(struct xe_sync_entry *sync)
204 dma_fence_wait(sync->fence, true);
209 int xe_sync_entry_add_deps(struct xe_sync_entry *sync, struct xe_sched_job *job)
214 err = drm_sched_job_add_dependency(&job->drm,
215 dma_fence_get(sync->fence));
217 dma_fence_put(sync->fence);
225 void xe_sync_entry_signal(struct xe_sync_entry *sync, struct xe_sched_job *job,
226 struct dma_fence *fence)
228 if (!(sync->flags & DRM_XE_SYNC_FLAG_SIGNAL))
231 if (sync->chain_fence) {
232 drm_syncobj_add_point(sync->syncobj, sync->chain_fence,
233 fence, sync->timeline_value);
235 * The chain's ownership is transferred to the
238 sync->chain_fence = NULL;
239 } else if (sync->syncobj) {
240 drm_syncobj_replace_fence(sync->syncobj, fence);
241 } else if (sync->ufence) {
244 dma_fence_get(fence);
245 user_fence_get(sync->ufence);
246 err = dma_fence_add_callback(fence, &sync->ufence->cb,
248 if (err == -ENOENT) {
249 kick_ufence(sync->ufence, fence);
251 XE_WARN_ON("failed to add user fence");
252 user_fence_put(sync->ufence);
253 dma_fence_put(fence);
255 } else if (sync->type == DRM_XE_SYNC_TYPE_USER_FENCE) {
256 job->user_fence.used = true;
257 job->user_fence.addr = sync->addr;
258 job->user_fence.value = sync->timeline_value;
262 void xe_sync_entry_cleanup(struct xe_sync_entry *sync)
265 drm_syncobj_put(sync->syncobj);
267 dma_fence_put(sync->fence);
268 if (sync->chain_fence)
269 dma_fence_put(&sync->chain_fence->base);
271 user_fence_put(sync->ufence);
275 * xe_sync_in_fence_get() - Get a fence from syncs, exec queue, and VM
277 * @num_sync: number of syncs
281 * Get a fence from syncs, exec queue, and VM. If syncs contain in-fences create
282 * and return a composite fence of all in-fences + last fence. If no in-fences
283 * return last fence on input exec queue. Caller must drop reference to
286 * Return: fence on success, ERR_PTR(-ENOMEM) on failure
289 xe_sync_in_fence_get(struct xe_sync_entry *sync, int num_sync,
290 struct xe_exec_queue *q, struct xe_vm *vm)
292 struct dma_fence **fences = NULL;
293 struct dma_fence_array *cf = NULL;
294 struct dma_fence *fence;
295 int i, num_in_fence = 0, current_fence = 0;
297 lockdep_assert_held(&vm->lock);
299 /* Count in-fences */
300 for (i = 0; i < num_sync; ++i) {
303 fence = sync[i].fence;
309 fence = xe_exec_queue_last_fence_get(q, vm);
310 dma_fence_get(fence);
314 /* Create composite fence */
315 fences = kmalloc_array(num_in_fence + 1, sizeof(*fences), GFP_KERNEL);
317 return ERR_PTR(-ENOMEM);
318 for (i = 0; i < num_sync; ++i) {
320 dma_fence_get(sync[i].fence);
321 fences[current_fence++] = sync[i].fence;
324 fences[current_fence++] = xe_exec_queue_last_fence_get(q, vm);
325 dma_fence_get(fences[current_fence - 1]);
326 cf = dma_fence_array_create(num_in_fence, fences,
327 vm->composite_fence_ctx,
328 vm->composite_fence_seqno++,
331 --vm->composite_fence_seqno;
338 while (current_fence)
339 dma_fence_put(fences[--current_fence]);
343 return ERR_PTR(-ENOMEM);