Commit | Line | Data |
---|---|---|
1802d0be | 1 | // SPDX-License-Identifier: GPL-2.0-only |
e941759c ML |
2 | /* |
3 | * Fence mechanism for dma-buf and to allow for asynchronous dma access | |
4 | * | |
5 | * Copyright (C) 2012 Canonical Ltd | |
6 | * Copyright (C) 2012 Texas Instruments | |
7 | * | |
8 | * Authors: | |
9 | * Rob Clark <robdclark@gmail.com> | |
10 | * Maarten Lankhorst <maarten.lankhorst@canonical.com> | |
e941759c ML |
11 | */ |
12 | ||
13 | #include <linux/slab.h> | |
14 | #include <linux/export.h> | |
15 | #include <linux/atomic.h> | |
f54d1867 | 16 | #include <linux/dma-fence.h> |
174cd4b1 | 17 | #include <linux/sched/signal.h> |
a25efb38 | 18 | #include <linux/seq_file.h> |
e941759c ML |
19 | |
20 | #define CREATE_TRACE_POINTS | |
f54d1867 | 21 | #include <trace/events/dma_fence.h> |
e941759c | 22 | |
f54d1867 | 23 | EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit); |
8c96c678 | 24 | EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal); |
c36beba6 | 25 | EXPORT_TRACEPOINT_SYMBOL(dma_fence_signaled); |
e941759c | 26 | |
078dec33 CK |
27 | static DEFINE_SPINLOCK(dma_fence_stub_lock); |
28 | static struct dma_fence dma_fence_stub; | |
29 | ||
e9f3b796 | 30 | /* |
e941759c ML |
31 | * fence context counter: each execution context should have its own |
32 | * fence context, this allows checking if fences belong to the same | |
33 | * context or not. One device can have multiple separate contexts, | |
34 | * and they're used if some engine can run independently of another. | |
35 | */ | |
078dec33 | 36 | static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(1); |
e941759c | 37 | |
4dd3cdb2 DV |
38 | /** |
39 | * DOC: DMA fences overview | |
40 | * | |
41 | * DMA fences, represented by &struct dma_fence, are the kernel internal | |
42 | * synchronization primitive for DMA operations like GPU rendering, video | |
43 | * encoding/decoding, or displaying buffers on a screen. | |
44 | * | |
45 | * A fence is initialized using dma_fence_init() and completed using | |
46 | * dma_fence_signal(). Fences are associated with a context, allocated through | |
47 | * dma_fence_context_alloc(), and all fences on the same context are | |
48 | * fully ordered. | |
49 | * | |
50 | * Since the purposes of fences is to facilitate cross-device and | |
51 | * cross-application synchronization, there's multiple ways to use one: | |
52 | * | |
53 | * - Individual fences can be exposed as a &sync_file, accessed as a file | |
54 | * descriptor from userspace, created by calling sync_file_create(). This is | |
55 | * called explicit fencing, since userspace passes around explicit | |
56 | * synchronization points. | |
57 | * | |
58 | * - Some subsystems also have their own explicit fencing primitives, like | |
59 | * &drm_syncobj. Compared to &sync_file, a &drm_syncobj allows the underlying | |
60 | * fence to be updated. | |
61 | * | |
62 | * - Then there's also implicit fencing, where the synchronization points are | |
63 | * implicitly passed around as part of shared &dma_buf instances. Such | |
52791eee | 64 | * implicit fences are stored in &struct dma_resv through the |
4dd3cdb2 DV |
65 | * &dma_buf.resv pointer. |
66 | */ | |
67 | ||
d0b9a9ae DV |
68 | /** |
69 | * DOC: fence cross-driver contract | |
70 | * | |
71 | * Since &dma_fence provide a cross driver contract, all drivers must follow the | |
72 | * same rules: | |
73 | * | |
74 | * * Fences must complete in a reasonable time. Fences which represent kernels | |
75 | * and shaders submitted by userspace, which could run forever, must be backed | |
76 | * up by timeout and gpu hang recovery code. Minimally that code must prevent | |
77 | * further command submission and force complete all in-flight fences, e.g. | |
78 | * when the driver or hardware do not support gpu reset, or if the gpu reset | |
79 | * failed for some reason. Ideally the driver supports gpu recovery which only | |
80 | * affects the offending userspace context, and no other userspace | |
81 | * submissions. | |
82 | * | |
83 | * * Drivers may have different ideas of what completion within a reasonable | |
84 | * time means. Some hang recovery code uses a fixed timeout, others a mix | |
85 | * between observing forward progress and increasingly strict timeouts. | |
86 | * Drivers should not try to second guess timeout handling of fences from | |
87 | * other drivers. | |
88 | * | |
89 | * * To ensure there's no deadlocks of dma_fence_wait() against other locks | |
90 | * drivers should annotate all code required to reach dma_fence_signal(), | |
91 | * which completes the fences, with dma_fence_begin_signalling() and | |
92 | * dma_fence_end_signalling(). | |
93 | * | |
94 | * * Drivers are allowed to call dma_fence_wait() while holding dma_resv_lock(). | |
95 | * This means any code required for fence completion cannot acquire a | |
96 | * &dma_resv lock. Note that this also pulls in the entire established | |
97 | * locking hierarchy around dma_resv_lock() and dma_resv_unlock(). | |
98 | * | |
99 | * * Drivers are allowed to call dma_fence_wait() from their &shrinker | |
100 | * callbacks. This means any code required for fence completion cannot | |
101 | * allocate memory with GFP_KERNEL. | |
102 | * | |
103 | * * Drivers are allowed to call dma_fence_wait() from their &mmu_notifier | |
104 | * respectively &mmu_interval_notifier callbacks. This means any code required | |
45017df3 | 105 | * for fence completion cannot allocate memory with GFP_NOFS or GFP_NOIO. |
d0b9a9ae DV |
106 | * Only GFP_ATOMIC is permissible, which might fail. |
107 | * | |
108 | * Note that only GPU drivers have a reasonable excuse for both requiring | |
109 | * &mmu_interval_notifier and &shrinker callbacks at the same time as having to | |
110 | * track asynchronous compute work using &dma_fence. No driver outside of | |
111 | * drivers/gpu should ever call dma_fence_wait() in such contexts. | |
112 | */ | |
113 | ||
078dec33 CK |
114 | static const char *dma_fence_stub_get_name(struct dma_fence *fence) |
115 | { | |
116 | return "stub"; | |
117 | } | |
118 | ||
119 | static const struct dma_fence_ops dma_fence_stub_ops = { | |
120 | .get_driver_name = dma_fence_stub_get_name, | |
121 | .get_timeline_name = dma_fence_stub_get_name, | |
122 | }; | |
123 | ||
124 | /** | |
125 | * dma_fence_get_stub - return a signaled fence | |
126 | * | |
fd921693 DS |
127 | * Return a stub fence which is already signaled. The fence's |
128 | * timestamp corresponds to the first time after boot this | |
129 | * function is called. | |
078dec33 CK |
130 | */ |
131 | struct dma_fence *dma_fence_get_stub(void) | |
132 | { | |
133 | spin_lock(&dma_fence_stub_lock); | |
134 | if (!dma_fence_stub.ops) { | |
135 | dma_fence_init(&dma_fence_stub, | |
136 | &dma_fence_stub_ops, | |
137 | &dma_fence_stub_lock, | |
138 | 0, 0); | |
c85d00d4 AY |
139 | |
140 | set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, | |
141 | &dma_fence_stub.flags); | |
142 | ||
078dec33 CK |
143 | dma_fence_signal_locked(&dma_fence_stub); |
144 | } | |
145 | spin_unlock(&dma_fence_stub_lock); | |
146 | ||
147 | return dma_fence_get(&dma_fence_stub); | |
148 | } | |
149 | EXPORT_SYMBOL(dma_fence_get_stub); | |
150 | ||
fd921693 DS |
151 | /** |
152 | * dma_fence_allocate_private_stub - return a private, signaled fence | |
f781f661 | 153 | * @timestamp: timestamp when the fence was signaled |
fd921693 DS |
154 | * |
155 | * Return a newly allocated and signaled stub fence. | |
156 | */ | |
f781f661 | 157 | struct dma_fence *dma_fence_allocate_private_stub(ktime_t timestamp) |
fd921693 DS |
158 | { |
159 | struct dma_fence *fence; | |
160 | ||
161 | fence = kzalloc(sizeof(*fence), GFP_KERNEL); | |
162 | if (fence == NULL) | |
00ae1491 | 163 | return NULL; |
fd921693 DS |
164 | |
165 | dma_fence_init(fence, | |
166 | &dma_fence_stub_ops, | |
167 | &dma_fence_stub_lock, | |
168 | 0, 0); | |
c85d00d4 AY |
169 | |
170 | set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, | |
d2ceea0e | 171 | &fence->flags); |
c85d00d4 | 172 | |
f781f661 | 173 | dma_fence_signal_timestamp(fence, timestamp); |
fd921693 DS |
174 | |
175 | return fence; | |
176 | } | |
177 | EXPORT_SYMBOL(dma_fence_allocate_private_stub); | |
178 | ||
e941759c | 179 | /** |
f54d1867 | 180 | * dma_fence_context_alloc - allocate an array of fence contexts |
4dd3cdb2 | 181 | * @num: amount of contexts to allocate |
e941759c | 182 | * |
4dd3cdb2 DV |
183 | * This function will return the first index of the number of fence contexts |
184 | * allocated. The fence context is used for setting &dma_fence.context to a | |
185 | * unique number by passing the context to dma_fence_init(). | |
e941759c | 186 | */ |
f54d1867 | 187 | u64 dma_fence_context_alloc(unsigned num) |
e941759c | 188 | { |
6ce31263 | 189 | WARN_ON(!num); |
1c530d43 | 190 | return atomic64_fetch_add(num, &dma_fence_context_counter); |
e941759c | 191 | } |
f54d1867 | 192 | EXPORT_SYMBOL(dma_fence_context_alloc); |
e941759c | 193 | |
5fbff813 DV |
194 | /** |
195 | * DOC: fence signalling annotation | |
196 | * | |
197 | * Proving correctness of all the kernel code around &dma_fence through code | |
198 | * review and testing is tricky for a few reasons: | |
199 | * | |
200 | * * It is a cross-driver contract, and therefore all drivers must follow the | |
201 | * same rules for lock nesting order, calling contexts for various functions | |
202 | * and anything else significant for in-kernel interfaces. But it is also | |
203 | * impossible to test all drivers in a single machine, hence brute-force N vs. | |
204 | * N testing of all combinations is impossible. Even just limiting to the | |
205 | * possible combinations is infeasible. | |
206 | * | |
207 | * * There is an enormous amount of driver code involved. For render drivers | |
208 | * there's the tail of command submission, after fences are published, | |
209 | * scheduler code, interrupt and workers to process job completion, | |
210 | * and timeout, gpu reset and gpu hang recovery code. Plus for integration | |
211 | * with core mm with have &mmu_notifier, respectively &mmu_interval_notifier, | |
212 | * and &shrinker. For modesetting drivers there's the commit tail functions | |
213 | * between when fences for an atomic modeset are published, and when the | |
214 | * corresponding vblank completes, including any interrupt processing and | |
215 | * related workers. Auditing all that code, across all drivers, is not | |
216 | * feasible. | |
217 | * | |
218 | * * Due to how many other subsystems are involved and the locking hierarchies | |
219 | * this pulls in there is extremely thin wiggle-room for driver-specific | |
220 | * differences. &dma_fence interacts with almost all of the core memory | |
221 | * handling through page fault handlers via &dma_resv, dma_resv_lock() and | |
222 | * dma_resv_unlock(). On the other side it also interacts through all | |
223 | * allocation sites through &mmu_notifier and &shrinker. | |
224 | * | |
225 | * Furthermore lockdep does not handle cross-release dependencies, which means | |
226 | * any deadlocks between dma_fence_wait() and dma_fence_signal() can't be caught | |
227 | * at runtime with some quick testing. The simplest example is one thread | |
228 | * waiting on a &dma_fence while holding a lock:: | |
229 | * | |
230 | * lock(A); | |
231 | * dma_fence_wait(B); | |
232 | * unlock(A); | |
233 | * | |
234 | * while the other thread is stuck trying to acquire the same lock, which | |
235 | * prevents it from signalling the fence the previous thread is stuck waiting | |
236 | * on:: | |
237 | * | |
238 | * lock(A); | |
239 | * unlock(A); | |
240 | * dma_fence_signal(B); | |
241 | * | |
242 | * By manually annotating all code relevant to signalling a &dma_fence we can | |
243 | * teach lockdep about these dependencies, which also helps with the validation | |
244 | * headache since now lockdep can check all the rules for us:: | |
245 | * | |
246 | * cookie = dma_fence_begin_signalling(); | |
247 | * lock(A); | |
248 | * unlock(A); | |
249 | * dma_fence_signal(B); | |
250 | * dma_fence_end_signalling(cookie); | |
251 | * | |
252 | * For using dma_fence_begin_signalling() and dma_fence_end_signalling() to | |
253 | * annotate critical sections the following rules need to be observed: | |
254 | * | |
255 | * * All code necessary to complete a &dma_fence must be annotated, from the | |
256 | * point where a fence is accessible to other threads, to the point where | |
257 | * dma_fence_signal() is called. Un-annotated code can contain deadlock issues, | |
258 | * and due to the very strict rules and many corner cases it is infeasible to | |
259 | * catch these just with review or normal stress testing. | |
260 | * | |
261 | * * &struct dma_resv deserves a special note, since the readers are only | |
262 | * protected by rcu. This means the signalling critical section starts as soon | |
263 | * as the new fences are installed, even before dma_resv_unlock() is called. | |
264 | * | |
265 | * * The only exception are fast paths and opportunistic signalling code, which | |
266 | * calls dma_fence_signal() purely as an optimization, but is not required to | |
267 | * guarantee completion of a &dma_fence. The usual example is a wait IOCTL | |
268 | * which calls dma_fence_signal(), while the mandatory completion path goes | |
269 | * through a hardware interrupt and possible job completion worker. | |
270 | * | |
271 | * * To aid composability of code, the annotations can be freely nested, as long | |
272 | * as the overall locking hierarchy is consistent. The annotations also work | |
273 | * both in interrupt and process context. Due to implementation details this | |
274 | * requires that callers pass an opaque cookie from | |
275 | * dma_fence_begin_signalling() to dma_fence_end_signalling(). | |
276 | * | |
277 | * * Validation against the cross driver contract is implemented by priming | |
278 | * lockdep with the relevant hierarchy at boot-up. This means even just | |
279 | * testing with a single device is enough to validate a driver, at least as | |
280 | * far as deadlocks with dma_fence_wait() against dma_fence_signal() are | |
281 | * concerned. | |
282 | */ | |
283 | #ifdef CONFIG_LOCKDEP | |
fa07634d | 284 | static struct lockdep_map dma_fence_lockdep_map = { |
5fbff813 DV |
285 | .name = "dma_fence_map" |
286 | }; | |
287 | ||
288 | /** | |
289 | * dma_fence_begin_signalling - begin a critical DMA fence signalling section | |
290 | * | |
291 | * Drivers should use this to annotate the beginning of any code section | |
292 | * required to eventually complete &dma_fence by calling dma_fence_signal(). | |
293 | * | |
294 | * The end of these critical sections are annotated with | |
295 | * dma_fence_end_signalling(). | |
296 | * | |
297 | * Returns: | |
298 | * | |
299 | * Opaque cookie needed by the implementation, which needs to be passed to | |
300 | * dma_fence_end_signalling(). | |
301 | */ | |
302 | bool dma_fence_begin_signalling(void) | |
303 | { | |
304 | /* explicitly nesting ... */ | |
305 | if (lock_is_held_type(&dma_fence_lockdep_map, 1)) | |
306 | return true; | |
307 | ||
308 | /* rely on might_sleep check for soft/hardirq locks */ | |
309 | if (in_atomic()) | |
310 | return true; | |
311 | ||
312 | /* ... and non-recursive readlock */ | |
313 | lock_acquire(&dma_fence_lockdep_map, 0, 0, 1, 1, NULL, _RET_IP_); | |
314 | ||
315 | return false; | |
316 | } | |
317 | EXPORT_SYMBOL(dma_fence_begin_signalling); | |
318 | ||
319 | /** | |
320 | * dma_fence_end_signalling - end a critical DMA fence signalling section | |
e44cd6bc | 321 | * @cookie: opaque cookie from dma_fence_begin_signalling() |
5fbff813 DV |
322 | * |
323 | * Closes a critical section annotation opened by dma_fence_begin_signalling(). | |
324 | */ | |
325 | void dma_fence_end_signalling(bool cookie) | |
326 | { | |
327 | if (cookie) | |
328 | return; | |
329 | ||
330 | lock_release(&dma_fence_lockdep_map, _RET_IP_); | |
331 | } | |
332 | EXPORT_SYMBOL(dma_fence_end_signalling); | |
333 | ||
334 | void __dma_fence_might_wait(void) | |
335 | { | |
336 | bool tmp; | |
337 | ||
338 | tmp = lock_is_held_type(&dma_fence_lockdep_map, 1); | |
339 | if (tmp) | |
340 | lock_release(&dma_fence_lockdep_map, _THIS_IP_); | |
341 | lock_map_acquire(&dma_fence_lockdep_map); | |
342 | lock_map_release(&dma_fence_lockdep_map); | |
343 | if (tmp) | |
344 | lock_acquire(&dma_fence_lockdep_map, 0, 0, 1, 1, NULL, _THIS_IP_); | |
345 | } | |
346 | #endif | |
347 | ||
348 | ||
e941759c | 349 | /** |
5a164ac4 | 350 | * dma_fence_signal_timestamp_locked - signal completion of a fence |
e941759c | 351 | * @fence: the fence to signal |
5a164ac4 | 352 | * @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain |
e941759c ML |
353 | * |
354 | * Signal completion for software callbacks on a fence, this will unblock | |
f54d1867 CW |
355 | * dma_fence_wait() calls and run all the callbacks added with |
356 | * dma_fence_add_callback(). Can be called multiple times, but since a fence | |
4dd3cdb2 | 357 | * can only go from the unsignaled to the signaled state and not back, it will |
5a164ac4 VSS |
358 | * only be effective the first time. Set the timestamp provided as the fence |
359 | * signal timestamp. | |
4dd3cdb2 | 360 | * |
5a164ac4 VSS |
361 | * Unlike dma_fence_signal_timestamp(), this function must be called with |
362 | * &dma_fence.lock held. | |
e941759c | 363 | * |
4dd3cdb2 DV |
364 | * Returns 0 on success and a negative error value when @fence has been |
365 | * signalled already. | |
e941759c | 366 | */ |
5a164ac4 VSS |
367 | int dma_fence_signal_timestamp_locked(struct dma_fence *fence, |
368 | ktime_t timestamp) | |
e941759c | 369 | { |
f54d1867 | 370 | struct dma_fence_cb *cur, *tmp; |
f2cb60e9 | 371 | struct list_head cb_list; |
e941759c | 372 | |
78010cd9 RC |
373 | lockdep_assert_held(fence->lock); |
374 | ||
0fc89b68 CW |
375 | if (unlikely(test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, |
376 | &fence->flags))) | |
e941759c ML |
377 | return -EINVAL; |
378 | ||
f2cb60e9 CW |
379 | /* Stash the cb_list before replacing it with the timestamp */ |
380 | list_replace(&fence->cb_list, &cb_list); | |
381 | ||
5a164ac4 | 382 | fence->timestamp = timestamp; |
0fc89b68 CW |
383 | set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags); |
384 | trace_dma_fence_signaled(fence); | |
e941759c | 385 | |
f2cb60e9 CW |
386 | list_for_each_entry_safe(cur, tmp, &cb_list, node) { |
387 | INIT_LIST_HEAD(&cur->node); | |
388 | cur->func(fence, cur); | |
e941759c | 389 | } |
0fc89b68 CW |
390 | |
391 | return 0; | |
e941759c | 392 | } |
5a164ac4 VSS |
393 | EXPORT_SYMBOL(dma_fence_signal_timestamp_locked); |
394 | ||
395 | /** | |
396 | * dma_fence_signal_timestamp - signal completion of a fence | |
397 | * @fence: the fence to signal | |
398 | * @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain | |
399 | * | |
400 | * Signal completion for software callbacks on a fence, this will unblock | |
401 | * dma_fence_wait() calls and run all the callbacks added with | |
402 | * dma_fence_add_callback(). Can be called multiple times, but since a fence | |
403 | * can only go from the unsignaled to the signaled state and not back, it will | |
404 | * only be effective the first time. Set the timestamp provided as the fence | |
405 | * signal timestamp. | |
406 | * | |
407 | * Returns 0 on success and a negative error value when @fence has been | |
408 | * signalled already. | |
409 | */ | |
410 | int dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp) | |
411 | { | |
412 | unsigned long flags; | |
413 | int ret; | |
414 | ||
415 | if (!fence) | |
416 | return -EINVAL; | |
417 | ||
418 | spin_lock_irqsave(fence->lock, flags); | |
419 | ret = dma_fence_signal_timestamp_locked(fence, timestamp); | |
420 | spin_unlock_irqrestore(fence->lock, flags); | |
421 | ||
422 | return ret; | |
423 | } | |
424 | EXPORT_SYMBOL(dma_fence_signal_timestamp); | |
425 | ||
426 | /** | |
427 | * dma_fence_signal_locked - signal completion of a fence | |
428 | * @fence: the fence to signal | |
429 | * | |
430 | * Signal completion for software callbacks on a fence, this will unblock | |
431 | * dma_fence_wait() calls and run all the callbacks added with | |
432 | * dma_fence_add_callback(). Can be called multiple times, but since a fence | |
433 | * can only go from the unsignaled to the signaled state and not back, it will | |
434 | * only be effective the first time. | |
435 | * | |
436 | * Unlike dma_fence_signal(), this function must be called with &dma_fence.lock | |
437 | * held. | |
438 | * | |
439 | * Returns 0 on success and a negative error value when @fence has been | |
440 | * signalled already. | |
441 | */ | |
442 | int dma_fence_signal_locked(struct dma_fence *fence) | |
443 | { | |
444 | return dma_fence_signal_timestamp_locked(fence, ktime_get()); | |
445 | } | |
f54d1867 | 446 | EXPORT_SYMBOL(dma_fence_signal_locked); |
e941759c ML |
447 | |
448 | /** | |
f54d1867 | 449 | * dma_fence_signal - signal completion of a fence |
e941759c ML |
450 | * @fence: the fence to signal |
451 | * | |
452 | * Signal completion for software callbacks on a fence, this will unblock | |
f54d1867 CW |
453 | * dma_fence_wait() calls and run all the callbacks added with |
454 | * dma_fence_add_callback(). Can be called multiple times, but since a fence | |
4dd3cdb2 DV |
455 | * can only go from the unsignaled to the signaled state and not back, it will |
456 | * only be effective the first time. | |
457 | * | |
458 | * Returns 0 on success and a negative error value when @fence has been | |
459 | * signalled already. | |
e941759c | 460 | */ |
f54d1867 | 461 | int dma_fence_signal(struct dma_fence *fence) |
e941759c ML |
462 | { |
463 | unsigned long flags; | |
0fc89b68 | 464 | int ret; |
5fbff813 | 465 | bool tmp; |
e941759c ML |
466 | |
467 | if (!fence) | |
468 | return -EINVAL; | |
469 | ||
5fbff813 DV |
470 | tmp = dma_fence_begin_signalling(); |
471 | ||
0fc89b68 | 472 | spin_lock_irqsave(fence->lock, flags); |
5a164ac4 | 473 | ret = dma_fence_signal_timestamp_locked(fence, ktime_get()); |
0fc89b68 | 474 | spin_unlock_irqrestore(fence->lock, flags); |
e941759c | 475 | |
5fbff813 DV |
476 | dma_fence_end_signalling(tmp); |
477 | ||
0fc89b68 | 478 | return ret; |
e941759c | 479 | } |
f54d1867 | 480 | EXPORT_SYMBOL(dma_fence_signal); |
e941759c ML |
481 | |
482 | /** | |
f54d1867 | 483 | * dma_fence_wait_timeout - sleep until the fence gets signaled |
e941759c | 484 | * or until timeout elapses |
4dd3cdb2 DV |
485 | * @fence: the fence to wait on |
486 | * @intr: if true, do an interruptible wait | |
487 | * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT | |
e941759c ML |
488 | * |
489 | * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the | |
490 | * remaining timeout in jiffies on success. Other error values may be | |
491 | * returned on custom implementations. | |
492 | * | |
493 | * Performs a synchronous wait on this fence. It is assumed the caller | |
494 | * directly or indirectly (buf-mgr between reservation and committing) | |
495 | * holds a reference to the fence, otherwise the fence might be | |
496 | * freed before return, resulting in undefined behavior. | |
4dd3cdb2 DV |
497 | * |
498 | * See also dma_fence_wait() and dma_fence_wait_any_timeout(). | |
e941759c ML |
499 | */ |
500 | signed long | |
f54d1867 | 501 | dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout) |
e941759c ML |
502 | { |
503 | signed long ret; | |
504 | ||
505 | if (WARN_ON(timeout < 0)) | |
506 | return -EINVAL; | |
507 | ||
ef825550 DV |
508 | might_sleep(); |
509 | ||
5fbff813 DV |
510 | __dma_fence_might_wait(); |
511 | ||
b96fb1e7 AY |
512 | dma_fence_enable_sw_signaling(fence); |
513 | ||
f54d1867 | 514 | trace_dma_fence_wait_start(fence); |
418cc6ca DV |
515 | if (fence->ops->wait) |
516 | ret = fence->ops->wait(fence, intr, timeout); | |
517 | else | |
518 | ret = dma_fence_default_wait(fence, intr, timeout); | |
f54d1867 | 519 | trace_dma_fence_wait_end(fence); |
e941759c ML |
520 | return ret; |
521 | } | |
f54d1867 | 522 | EXPORT_SYMBOL(dma_fence_wait_timeout); |
e941759c | 523 | |
4dd3cdb2 | 524 | /** |
45017df3 | 525 | * dma_fence_release - default release function for fences |
4dd3cdb2 DV |
526 | * @kref: &dma_fence.recfount |
527 | * | |
528 | * This is the default release functions for &dma_fence. Drivers shouldn't call | |
529 | * this directly, but instead call dma_fence_put(). | |
530 | */ | |
f54d1867 | 531 | void dma_fence_release(struct kref *kref) |
e941759c | 532 | { |
f54d1867 CW |
533 | struct dma_fence *fence = |
534 | container_of(kref, struct dma_fence, refcount); | |
e941759c | 535 | |
f54d1867 | 536 | trace_dma_fence_destroy(fence); |
e941759c | 537 | |
f2cb60e9 CW |
538 | if (WARN(!list_empty(&fence->cb_list) && |
539 | !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags), | |
427231bc CW |
540 | "Fence %s:%s:%llx:%llx released with pending signals!\n", |
541 | fence->ops->get_driver_name(fence), | |
542 | fence->ops->get_timeline_name(fence), | |
543 | fence->context, fence->seqno)) { | |
544 | unsigned long flags; | |
545 | ||
546 | /* | |
547 | * Failed to signal before release, likely a refcounting issue. | |
548 | * | |
549 | * This should never happen, but if it does make sure that we | |
550 | * don't leave chains dangling. We set the error flag first | |
551 | * so that the callbacks know this signal is due to an error. | |
552 | */ | |
553 | spin_lock_irqsave(fence->lock, flags); | |
554 | fence->error = -EDEADLK; | |
555 | dma_fence_signal_locked(fence); | |
556 | spin_unlock_irqrestore(fence->lock, flags); | |
557 | } | |
e941759c ML |
558 | |
559 | if (fence->ops->release) | |
560 | fence->ops->release(fence); | |
561 | else | |
f54d1867 | 562 | dma_fence_free(fence); |
e941759c | 563 | } |
f54d1867 | 564 | EXPORT_SYMBOL(dma_fence_release); |
e941759c | 565 | |
4dd3cdb2 DV |
566 | /** |
567 | * dma_fence_free - default release function for &dma_fence. | |
568 | * @fence: fence to release | |
569 | * | |
570 | * This is the default implementation for &dma_fence_ops.release. It calls | |
571 | * kfree_rcu() on @fence. | |
572 | */ | |
f54d1867 | 573 | void dma_fence_free(struct dma_fence *fence) |
e941759c | 574 | { |
3c3b177a | 575 | kfree_rcu(fence, rcu); |
e941759c | 576 | } |
f54d1867 | 577 | EXPORT_SYMBOL(dma_fence_free); |
e941759c | 578 | |
9c98f021 CW |
579 | static bool __dma_fence_enable_signaling(struct dma_fence *fence) |
580 | { | |
581 | bool was_set; | |
582 | ||
583 | lockdep_assert_held(fence->lock); | |
584 | ||
585 | was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, | |
586 | &fence->flags); | |
587 | ||
588 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) | |
589 | return false; | |
590 | ||
591 | if (!was_set && fence->ops->enable_signaling) { | |
592 | trace_dma_fence_enable_signal(fence); | |
593 | ||
594 | if (!fence->ops->enable_signaling(fence)) { | |
595 | dma_fence_signal_locked(fence); | |
596 | return false; | |
597 | } | |
598 | } | |
599 | ||
600 | return true; | |
601 | } | |
602 | ||
e941759c | 603 | /** |
f54d1867 | 604 | * dma_fence_enable_sw_signaling - enable signaling on fence |
4dd3cdb2 | 605 | * @fence: the fence to enable |
e941759c | 606 | * |
4dd3cdb2 DV |
607 | * This will request for sw signaling to be enabled, to make the fence |
608 | * complete as soon as possible. This calls &dma_fence_ops.enable_signaling | |
609 | * internally. | |
e941759c | 610 | */ |
f54d1867 | 611 | void dma_fence_enable_sw_signaling(struct dma_fence *fence) |
e941759c ML |
612 | { |
613 | unsigned long flags; | |
614 | ||
9c98f021 CW |
615 | spin_lock_irqsave(fence->lock, flags); |
616 | __dma_fence_enable_signaling(fence); | |
617 | spin_unlock_irqrestore(fence->lock, flags); | |
e941759c | 618 | } |
f54d1867 | 619 | EXPORT_SYMBOL(dma_fence_enable_sw_signaling); |
e941759c ML |
620 | |
621 | /** | |
f54d1867 | 622 | * dma_fence_add_callback - add a callback to be called when the fence |
e941759c | 623 | * is signaled |
4dd3cdb2 DV |
624 | * @fence: the fence to wait on |
625 | * @cb: the callback to register | |
626 | * @func: the function to call | |
e941759c | 627 | * |
044e55b1 CK |
628 | * Add a software callback to the fence. The caller should keep a reference to |
629 | * the fence. | |
630 | * | |
4dd3cdb2 | 631 | * @cb will be initialized by dma_fence_add_callback(), no initialization |
e941759c ML |
632 | * by the caller is required. Any number of callbacks can be registered |
633 | * to a fence, but a callback can only be registered to one fence at a time. | |
634 | * | |
044e55b1 | 635 | * If fence is already signaled, this function will return -ENOENT (and |
4dd3cdb2 | 636 | * *not* call the callback). |
e941759c | 637 | * |
044e55b1 | 638 | * Note that the callback can be called from an atomic context or irq context. |
e941759c | 639 | * |
f642de16 GP |
640 | * Returns 0 in case of success, -ENOENT if the fence is already signaled |
641 | * and -EINVAL in case of error. | |
e941759c | 642 | */ |
f54d1867 CW |
643 | int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb, |
644 | dma_fence_func_t func) | |
e941759c ML |
645 | { |
646 | unsigned long flags; | |
647 | int ret = 0; | |
e941759c ML |
648 | |
649 | if (WARN_ON(!fence || !func)) | |
650 | return -EINVAL; | |
651 | ||
f54d1867 | 652 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { |
e941759c ML |
653 | INIT_LIST_HEAD(&cb->node); |
654 | return -ENOENT; | |
655 | } | |
656 | ||
657 | spin_lock_irqsave(fence->lock, flags); | |
658 | ||
9c98f021 | 659 | if (__dma_fence_enable_signaling(fence)) { |
e941759c ML |
660 | cb->func = func; |
661 | list_add_tail(&cb->node, &fence->cb_list); | |
9c98f021 | 662 | } else { |
e941759c | 663 | INIT_LIST_HEAD(&cb->node); |
9c98f021 CW |
664 | ret = -ENOENT; |
665 | } | |
666 | ||
e941759c ML |
667 | spin_unlock_irqrestore(fence->lock, flags); |
668 | ||
669 | return ret; | |
670 | } | |
f54d1867 | 671 | EXPORT_SYMBOL(dma_fence_add_callback); |
e941759c | 672 | |
d6c99f4b CW |
673 | /** |
674 | * dma_fence_get_status - returns the status upon completion | |
4dd3cdb2 | 675 | * @fence: the dma_fence to query |
d6c99f4b CW |
676 | * |
677 | * This wraps dma_fence_get_status_locked() to return the error status | |
678 | * condition on a signaled fence. See dma_fence_get_status_locked() for more | |
679 | * details. | |
680 | * | |
681 | * Returns 0 if the fence has not yet been signaled, 1 if the fence has | |
682 | * been signaled without an error condition, or a negative error code | |
683 | * if the fence has been completed in err. | |
684 | */ | |
685 | int dma_fence_get_status(struct dma_fence *fence) | |
686 | { | |
687 | unsigned long flags; | |
688 | int status; | |
689 | ||
690 | spin_lock_irqsave(fence->lock, flags); | |
691 | status = dma_fence_get_status_locked(fence); | |
692 | spin_unlock_irqrestore(fence->lock, flags); | |
693 | ||
694 | return status; | |
695 | } | |
696 | EXPORT_SYMBOL(dma_fence_get_status); | |
697 | ||
e941759c | 698 | /** |
f54d1867 | 699 | * dma_fence_remove_callback - remove a callback from the signaling list |
4dd3cdb2 DV |
700 | * @fence: the fence to wait on |
701 | * @cb: the callback to remove | |
e941759c ML |
702 | * |
703 | * Remove a previously queued callback from the fence. This function returns | |
f353d71f | 704 | * true if the callback is successfully removed, or false if the fence has |
e941759c ML |
705 | * already been signaled. |
706 | * | |
707 | * *WARNING*: | |
708 | * Cancelling a callback should only be done if you really know what you're | |
709 | * doing, since deadlocks and race conditions could occur all too easily. For | |
710 | * this reason, it should only ever be done on hardware lockup recovery, | |
711 | * with a reference held to the fence. | |
4dd3cdb2 DV |
712 | * |
713 | * Behaviour is undefined if @cb has not been added to @fence using | |
714 | * dma_fence_add_callback() beforehand. | |
e941759c ML |
715 | */ |
716 | bool | |
f54d1867 | 717 | dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb) |
e941759c ML |
718 | { |
719 | unsigned long flags; | |
720 | bool ret; | |
721 | ||
722 | spin_lock_irqsave(fence->lock, flags); | |
723 | ||
724 | ret = !list_empty(&cb->node); | |
725 | if (ret) | |
726 | list_del_init(&cb->node); | |
727 | ||
728 | spin_unlock_irqrestore(fence->lock, flags); | |
729 | ||
730 | return ret; | |
731 | } | |
f54d1867 | 732 | EXPORT_SYMBOL(dma_fence_remove_callback); |
e941759c ML |
733 | |
734 | struct default_wait_cb { | |
f54d1867 | 735 | struct dma_fence_cb base; |
e941759c ML |
736 | struct task_struct *task; |
737 | }; | |
738 | ||
739 | static void | |
f54d1867 | 740 | dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb) |
e941759c ML |
741 | { |
742 | struct default_wait_cb *wait = | |
743 | container_of(cb, struct default_wait_cb, base); | |
744 | ||
745 | wake_up_state(wait->task, TASK_NORMAL); | |
746 | } | |
747 | ||
748 | /** | |
f54d1867 | 749 | * dma_fence_default_wait - default sleep until the fence gets signaled |
e941759c | 750 | * or until timeout elapses |
4dd3cdb2 DV |
751 | * @fence: the fence to wait on |
752 | * @intr: if true, do an interruptible wait | |
753 | * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT | |
e941759c ML |
754 | * |
755 | * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the | |
bcc004b6 AD |
756 | * remaining timeout in jiffies on success. If timeout is zero the value one is |
757 | * returned if the fence is already signaled for consistency with other | |
758 | * functions taking a jiffies timeout. | |
e941759c ML |
759 | */ |
760 | signed long | |
f54d1867 | 761 | dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout) |
e941759c ML |
762 | { |
763 | struct default_wait_cb cb; | |
764 | unsigned long flags; | |
bcc004b6 | 765 | signed long ret = timeout ? timeout : 1; |
e941759c | 766 | |
e941759c ML |
767 | spin_lock_irqsave(fence->lock, flags); |
768 | ||
3cc3dd73 CK |
769 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) |
770 | goto out; | |
771 | ||
e941759c ML |
772 | if (intr && signal_pending(current)) { |
773 | ret = -ERESTARTSYS; | |
774 | goto out; | |
775 | } | |
776 | ||
03c0c5f6 AR |
777 | if (!timeout) { |
778 | ret = 0; | |
779 | goto out; | |
780 | } | |
781 | ||
f54d1867 | 782 | cb.base.func = dma_fence_default_wait_cb; |
e941759c ML |
783 | cb.task = current; |
784 | list_add(&cb.base.node, &fence->cb_list); | |
785 | ||
f54d1867 | 786 | while (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) { |
e941759c ML |
787 | if (intr) |
788 | __set_current_state(TASK_INTERRUPTIBLE); | |
789 | else | |
790 | __set_current_state(TASK_UNINTERRUPTIBLE); | |
791 | spin_unlock_irqrestore(fence->lock, flags); | |
792 | ||
793 | ret = schedule_timeout(ret); | |
794 | ||
795 | spin_lock_irqsave(fence->lock, flags); | |
796 | if (ret > 0 && intr && signal_pending(current)) | |
797 | ret = -ERESTARTSYS; | |
798 | } | |
799 | ||
800 | if (!list_empty(&cb.base.node)) | |
801 | list_del(&cb.base.node); | |
802 | __set_current_state(TASK_RUNNING); | |
803 | ||
804 | out: | |
805 | spin_unlock_irqrestore(fence->lock, flags); | |
806 | return ret; | |
807 | } | |
f54d1867 | 808 | EXPORT_SYMBOL(dma_fence_default_wait); |
e941759c | 809 | |
a519435a | 810 | static bool |
7392b4bb | 811 | dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count, |
812 | uint32_t *idx) | |
a519435a CK |
813 | { |
814 | int i; | |
815 | ||
816 | for (i = 0; i < count; ++i) { | |
f54d1867 | 817 | struct dma_fence *fence = fences[i]; |
7392b4bb | 818 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { |
819 | if (idx) | |
820 | *idx = i; | |
a519435a | 821 | return true; |
7392b4bb | 822 | } |
a519435a CK |
823 | } |
824 | return false; | |
825 | } | |
826 | ||
827 | /** | |
f54d1867 | 828 | * dma_fence_wait_any_timeout - sleep until any fence gets signaled |
a519435a | 829 | * or until timeout elapses |
4dd3cdb2 DV |
830 | * @fences: array of fences to wait on |
831 | * @count: number of fences to wait on | |
832 | * @intr: if true, do an interruptible wait | |
833 | * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT | |
834 | * @idx: used to store the first signaled fence index, meaningful only on | |
835 | * positive return | |
a519435a CK |
836 | * |
837 | * Returns -EINVAL on custom fence wait implementation, -ERESTARTSYS if | |
838 | * interrupted, 0 if the wait timed out, or the remaining timeout in jiffies | |
839 | * on success. | |
840 | * | |
841 | * Synchronous waits for the first fence in the array to be signaled. The | |
842 | * caller needs to hold a reference to all fences in the array, otherwise a | |
843 | * fence might be freed before return, resulting in undefined behavior. | |
4dd3cdb2 DV |
844 | * |
845 | * See also dma_fence_wait() and dma_fence_wait_timeout(). | |
a519435a CK |
846 | */ |
847 | signed long | |
f54d1867 | 848 | dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count, |
7392b4bb | 849 | bool intr, signed long timeout, uint32_t *idx) |
a519435a CK |
850 | { |
851 | struct default_wait_cb *cb; | |
852 | signed long ret = timeout; | |
853 | unsigned i; | |
854 | ||
855 | if (WARN_ON(!fences || !count || timeout < 0)) | |
856 | return -EINVAL; | |
857 | ||
858 | if (timeout == 0) { | |
859 | for (i = 0; i < count; ++i) | |
7392b4bb | 860 | if (dma_fence_is_signaled(fences[i])) { |
861 | if (idx) | |
862 | *idx = i; | |
a519435a | 863 | return 1; |
7392b4bb | 864 | } |
a519435a CK |
865 | |
866 | return 0; | |
867 | } | |
868 | ||
869 | cb = kcalloc(count, sizeof(struct default_wait_cb), GFP_KERNEL); | |
870 | if (cb == NULL) { | |
871 | ret = -ENOMEM; | |
872 | goto err_free_cb; | |
873 | } | |
874 | ||
875 | for (i = 0; i < count; ++i) { | |
f54d1867 | 876 | struct dma_fence *fence = fences[i]; |
a519435a | 877 | |
a519435a | 878 | cb[i].task = current; |
f54d1867 CW |
879 | if (dma_fence_add_callback(fence, &cb[i].base, |
880 | dma_fence_default_wait_cb)) { | |
a519435a | 881 | /* This fence is already signaled */ |
7392b4bb | 882 | if (idx) |
883 | *idx = i; | |
a519435a CK |
884 | goto fence_rm_cb; |
885 | } | |
886 | } | |
887 | ||
888 | while (ret > 0) { | |
889 | if (intr) | |
890 | set_current_state(TASK_INTERRUPTIBLE); | |
891 | else | |
892 | set_current_state(TASK_UNINTERRUPTIBLE); | |
893 | ||
7392b4bb | 894 | if (dma_fence_test_signaled_any(fences, count, idx)) |
a519435a CK |
895 | break; |
896 | ||
897 | ret = schedule_timeout(ret); | |
898 | ||
899 | if (ret > 0 && intr && signal_pending(current)) | |
900 | ret = -ERESTARTSYS; | |
901 | } | |
902 | ||
903 | __set_current_state(TASK_RUNNING); | |
904 | ||
905 | fence_rm_cb: | |
906 | while (i-- > 0) | |
f54d1867 | 907 | dma_fence_remove_callback(fences[i], &cb[i].base); |
a519435a CK |
908 | |
909 | err_free_cb: | |
910 | kfree(cb); | |
911 | ||
912 | return ret; | |
913 | } | |
f54d1867 | 914 | EXPORT_SYMBOL(dma_fence_wait_any_timeout); |
a519435a | 915 | |
aec11c8d RC |
916 | /** |
917 | * DOC: deadline hints | |
918 | * | |
919 | * In an ideal world, it would be possible to pipeline a workload sufficiently | |
920 | * that a utilization based device frequency governor could arrive at a minimum | |
921 | * frequency that meets the requirements of the use-case, in order to minimize | |
922 | * power consumption. But in the real world there are many workloads which | |
923 | * defy this ideal. For example, but not limited to: | |
924 | * | |
925 | * * Workloads that ping-pong between device and CPU, with alternating periods | |
926 | * of CPU waiting for device, and device waiting on CPU. This can result in | |
927 | * devfreq and cpufreq seeing idle time in their respective domains and in | |
928 | * result reduce frequency. | |
929 | * | |
930 | * * Workloads that interact with a periodic time based deadline, such as double | |
931 | * buffered GPU rendering vs vblank sync'd page flipping. In this scenario, | |
932 | * missing a vblank deadline results in an *increase* in idle time on the GPU | |
933 | * (since it has to wait an additional vblank period), sending a signal to | |
934 | * the GPU's devfreq to reduce frequency, when in fact the opposite is what is | |
935 | * needed. | |
936 | * | |
63ee4454 RC |
937 | * To this end, deadline hint(s) can be set on a &dma_fence via &dma_fence_set_deadline |
938 | * (or indirectly via userspace facing ioctls like &sync_set_deadline). | |
aec11c8d RC |
939 | * The deadline hint provides a way for the waiting driver, or userspace, to |
940 | * convey an appropriate sense of urgency to the signaling driver. | |
941 | * | |
942 | * A deadline hint is given in absolute ktime (CLOCK_MONOTONIC for userspace | |
943 | * facing APIs). The time could either be some point in the future (such as | |
944 | * the vblank based deadline for page-flipping, or the start of a compositor's | |
945 | * composition cycle), or the current time to indicate an immediate deadline | |
946 | * hint (Ie. forward progress cannot be made until this fence is signaled). | |
947 | * | |
948 | * Multiple deadlines may be set on a given fence, even in parallel. See the | |
949 | * documentation for &dma_fence_ops.set_deadline. | |
950 | * | |
951 | * The deadline hint is just that, a hint. The driver that created the fence | |
952 | * may react by increasing frequency, making different scheduling choices, etc. | |
953 | * Or doing nothing at all. | |
954 | */ | |
955 | ||
956 | /** | |
957 | * dma_fence_set_deadline - set desired fence-wait deadline hint | |
958 | * @fence: the fence that is to be waited on | |
959 | * @deadline: the time by which the waiter hopes for the fence to be | |
960 | * signaled | |
961 | * | |
962 | * Give the fence signaler a hint about an upcoming deadline, such as | |
963 | * vblank, by which point the waiter would prefer the fence to be | |
964 | * signaled by. This is intended to give feedback to the fence signaler | |
965 | * to aid in power management decisions, such as boosting GPU frequency | |
966 | * if a periodic vblank deadline is approaching but the fence is not | |
967 | * yet signaled.. | |
968 | */ | |
969 | void dma_fence_set_deadline(struct dma_fence *fence, ktime_t deadline) | |
970 | { | |
971 | if (fence->ops->set_deadline && !dma_fence_is_signaled(fence)) | |
972 | fence->ops->set_deadline(fence, deadline); | |
973 | } | |
974 | EXPORT_SYMBOL(dma_fence_set_deadline); | |
975 | ||
a25efb38 | 976 | /** |
45017df3 RD |
977 | * dma_fence_describe - Dump fence description into seq_file |
978 | * @fence: the fence to describe | |
a25efb38 CK |
979 | * @seq: the seq_file to put the textual description into |
980 | * | |
981 | * Dump a textual description of the fence and it's state into the seq_file. | |
982 | */ | |
983 | void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq) | |
984 | { | |
985 | seq_printf(seq, "%s %s seq %llu %ssignalled\n", | |
986 | fence->ops->get_driver_name(fence), | |
987 | fence->ops->get_timeline_name(fence), fence->seqno, | |
988 | dma_fence_is_signaled(fence) ? "" : "un"); | |
989 | } | |
990 | EXPORT_SYMBOL(dma_fence_describe); | |
991 | ||
e941759c | 992 | /** |
f54d1867 | 993 | * dma_fence_init - Initialize a custom fence. |
4dd3cdb2 DV |
994 | * @fence: the fence to initialize |
995 | * @ops: the dma_fence_ops for operations on this fence | |
996 | * @lock: the irqsafe spinlock to use for locking this fence | |
997 | * @context: the execution context this fence is run on | |
998 | * @seqno: a linear increasing sequence number for this context | |
e941759c ML |
999 | * |
1000 | * Initializes an allocated fence, the caller doesn't have to keep its | |
1001 | * refcount after committing with this fence, but it will need to hold a | |
4dd3cdb2 | 1002 | * refcount again if &dma_fence_ops.enable_signaling gets called. |
e941759c ML |
1003 | * |
1004 | * context and seqno are used for easy comparison between fences, allowing | |
4dd3cdb2 | 1005 | * to check which fence is later by simply using dma_fence_later(). |
e941759c ML |
1006 | */ |
1007 | void | |
f54d1867 | 1008 | dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, |
b312d8ca | 1009 | spinlock_t *lock, u64 context, u64 seqno) |
e941759c ML |
1010 | { |
1011 | BUG_ON(!lock); | |
418cc6ca | 1012 | BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name); |
e941759c ML |
1013 | |
1014 | kref_init(&fence->refcount); | |
1015 | fence->ops = ops; | |
1016 | INIT_LIST_HEAD(&fence->cb_list); | |
1017 | fence->lock = lock; | |
1018 | fence->context = context; | |
1019 | fence->seqno = seqno; | |
1020 | fence->flags = 0UL; | |
a009e975 | 1021 | fence->error = 0; |
e941759c | 1022 | |
f54d1867 | 1023 | trace_dma_fence_init(fence); |
e941759c | 1024 | } |
f54d1867 | 1025 | EXPORT_SYMBOL(dma_fence_init); |