Merge tag 'v5.8' into drm-next
[linux-2.6-block.git] / drivers / dma-buf / dma-resv.c
CommitLineData
786d7257 1/*
04a5faa8 2 * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
786d7257
ML
3 *
4 * Based on bo.c which bears the following copyright notice,
5 * but is dual licensed:
6 *
7 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
8 * All Rights Reserved.
9 *
10 * Permission is hereby granted, free of charge, to any person obtaining a
11 * copy of this software and associated documentation files (the
12 * "Software"), to deal in the Software without restriction, including
13 * without limitation the rights to use, copy, modify, merge, publish,
14 * distribute, sub license, and/or sell copies of the Software, and to
15 * permit persons to whom the Software is furnished to do so, subject to
16 * the following conditions:
17 *
18 * The above copyright notice and this permission notice (including the
19 * next paragraph) shall be included in all copies or substantial portions
20 * of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
25 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
26 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
27 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
28 * USE OR OTHER DEALINGS IN THE SOFTWARE.
29 *
30 **************************************************************************/
31/*
32 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
33 */
34
52791eee 35#include <linux/dma-resv.h>
786d7257 36#include <linux/export.h>
0adf65f5 37#include <linux/mm.h>
b2a8116e 38#include <linux/sched/mm.h>
d0b9a9ae 39#include <linux/mmu_notifier.h>
786d7257 40
dad6c394
RC
41/**
42 * DOC: Reservation Object Overview
43 *
44 * The reservation object provides a mechanism to manage shared and
45 * exclusive fences associated with a buffer. A reservation object
46 * can have attached one exclusive fence (normally associated with
47 * write operations) or N shared fences (read operations). The RCU
48 * mechanism is used to protect read access to fences from locked
49 * write-side updates.
50 */
51
08295b3b 52DEFINE_WD_CLASS(reservation_ww_class);
786d7257 53EXPORT_SYMBOL(reservation_ww_class);
04a5faa8 54
b016cd6e
CW
55struct lock_class_key reservation_seqcount_class;
56EXPORT_SYMBOL(reservation_seqcount_class);
57
58const char reservation_seqcount_string[] = "reservation_seqcount";
59EXPORT_SYMBOL(reservation_seqcount_string);
60
96e95496 61/**
52791eee 62 * dma_resv_list_alloc - allocate fence list
96e95496
CK
63 * @shared_max: number of fences we need space for
64 *
52791eee 65 * Allocate a new dma_resv_list and make sure to correctly initialize
96e95496
CK
66 * shared_max.
67 */
52791eee 68static struct dma_resv_list *dma_resv_list_alloc(unsigned int shared_max)
96e95496 69{
52791eee 70 struct dma_resv_list *list;
96e95496
CK
71
72 list = kmalloc(offsetof(typeof(*list), shared[shared_max]), GFP_KERNEL);
73 if (!list)
74 return NULL;
75
76 list->shared_max = (ksize(list) - offsetof(typeof(*list), shared)) /
77 sizeof(*list->shared);
78
79 return list;
80}
81
82/**
52791eee 83 * dma_resv_list_free - free fence list
96e95496
CK
84 * @list: list to free
85 *
52791eee 86 * Free a dma_resv_list and make sure to drop all references.
96e95496 87 */
52791eee 88static void dma_resv_list_free(struct dma_resv_list *list)
96e95496
CK
89{
90 unsigned int i;
91
92 if (!list)
93 return;
94
95 for (i = 0; i < list->shared_count; ++i)
96 dma_fence_put(rcu_dereference_protected(list->shared[i], true));
97
98 kfree_rcu(list, rcu);
99}
100
b2a8116e 101#if IS_ENABLED(CONFIG_LOCKDEP)
ffbbaa74 102static int __init dma_resv_lockdep(void)
b2a8116e
DV
103{
104 struct mm_struct *mm = mm_alloc();
fedf7a44 105 struct ww_acquire_ctx ctx;
b2a8116e 106 struct dma_resv obj;
fedf7a44 107 int ret;
b2a8116e
DV
108
109 if (!mm)
ffbbaa74 110 return -ENOMEM;
b2a8116e
DV
111
112 dma_resv_init(&obj);
113
0adf65f5 114 mmap_read_lock(mm);
fedf7a44
DV
115 ww_acquire_init(&ctx, &reservation_ww_class);
116 ret = dma_resv_lock(&obj, &ctx);
117 if (ret == -EDEADLK)
118 dma_resv_lock_slow(&obj, &ctx);
b2a8116e 119 fs_reclaim_acquire(GFP_KERNEL);
d0b9a9ae
DV
120#ifdef CONFIG_MMU_NOTIFIER
121 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
122 __dma_fence_might_wait();
123 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
124#else
125 __dma_fence_might_wait();
126#endif
b2a8116e
DV
127 fs_reclaim_release(GFP_KERNEL);
128 ww_mutex_unlock(&obj.lock);
fedf7a44 129 ww_acquire_fini(&ctx);
0adf65f5 130 mmap_read_unlock(mm);
b2a8116e
DV
131
132 mmput(mm);
ffbbaa74
SP
133
134 return 0;
b2a8116e
DV
135}
136subsys_initcall(dma_resv_lockdep);
137#endif
138
8735f168 139/**
52791eee 140 * dma_resv_init - initialize a reservation object
8735f168
CK
141 * @obj: the reservation object
142 */
52791eee 143void dma_resv_init(struct dma_resv *obj)
8735f168
CK
144{
145 ww_mutex_init(&obj->lock, &reservation_ww_class);
b016cd6e
CW
146
147 __seqcount_init(&obj->seq, reservation_seqcount_string,
148 &reservation_seqcount_class);
8735f168
CK
149 RCU_INIT_POINTER(obj->fence, NULL);
150 RCU_INIT_POINTER(obj->fence_excl, NULL);
151}
52791eee 152EXPORT_SYMBOL(dma_resv_init);
8735f168
CK
153
154/**
52791eee 155 * dma_resv_fini - destroys a reservation object
8735f168
CK
156 * @obj: the reservation object
157 */
52791eee 158void dma_resv_fini(struct dma_resv *obj)
8735f168 159{
52791eee 160 struct dma_resv_list *fobj;
8735f168
CK
161 struct dma_fence *excl;
162
163 /*
164 * This object should be dead and all references must have
165 * been released to it, so no need to be protected with rcu.
166 */
167 excl = rcu_dereference_protected(obj->fence_excl, 1);
168 if (excl)
169 dma_fence_put(excl);
170
171 fobj = rcu_dereference_protected(obj->fence, 1);
52791eee 172 dma_resv_list_free(fobj);
8735f168
CK
173 ww_mutex_destroy(&obj->lock);
174}
52791eee 175EXPORT_SYMBOL(dma_resv_fini);
8735f168 176
dad6c394 177/**
52791eee
CK
178 * dma_resv_reserve_shared - Reserve space to add shared fences to
179 * a dma_resv.
dad6c394 180 * @obj: reservation object
ca05359f 181 * @num_fences: number of fences we want to add
dad6c394 182 *
52791eee 183 * Should be called before dma_resv_add_shared_fence(). Must
dad6c394
RC
184 * be called with obj->lock held.
185 *
186 * RETURNS
187 * Zero for success, or -errno
04a5faa8 188 */
52791eee 189int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
04a5faa8 190{
52791eee 191 struct dma_resv_list *old, *new;
27836b64 192 unsigned int i, j, k, max;
04a5faa8 193
52791eee 194 dma_resv_assert_held(obj);
547c7138 195
52791eee 196 old = dma_resv_get_list(obj);
04a5faa8
ML
197
198 if (old && old->shared_max) {
ca05359f 199 if ((old->shared_count + num_fences) <= old->shared_max)
04a5faa8 200 return 0;
27836b64 201 else
ca05359f
CK
202 max = max(old->shared_count + num_fences,
203 old->shared_max * 2);
ca25fe5e 204 } else {
27836b64 205 max = 4;
ca25fe5e 206 }
3c3b177a 207
52791eee 208 new = dma_resv_list_alloc(max);
27836b64
CK
209 if (!new)
210 return -ENOMEM;
04a5faa8
ML
211
212 /*
213 * no need to bump fence refcounts, rcu_read access
214 * requires the use of kref_get_unless_zero, and the
215 * references from the old struct are carried over to
216 * the new.
217 */
27836b64
CK
218 for (i = 0, j = 0, k = max; i < (old ? old->shared_count : 0); ++i) {
219 struct dma_fence *fence;
3c3b177a 220
27836b64 221 fence = rcu_dereference_protected(old->shared[i],
52791eee 222 dma_resv_held(obj));
27836b64
CK
223 if (dma_fence_is_signaled(fence))
224 RCU_INIT_POINTER(new->shared[--k], fence);
4d9c62e8 225 else
27836b64 226 RCU_INIT_POINTER(new->shared[j++], fence);
04a5faa8 227 }
27836b64 228 new->shared_count = j;
04a5faa8 229
3c3b177a 230 /*
30fe7b07
CW
231 * We are not changing the effective set of fences here so can
232 * merely update the pointer to the new array; both existing
233 * readers and new readers will see exactly the same set of
234 * active (unsignaled) shared fences. Individual fences and the
235 * old array are protected by RCU and so will not vanish under
236 * the gaze of the rcu_read_lock() readers.
3c3b177a 237 */
30fe7b07 238 rcu_assign_pointer(obj->fence, new);
3c3b177a 239
4d9c62e8 240 if (!old)
27836b64 241 return 0;
3c3b177a 242
4d9c62e8 243 /* Drop the references to the signaled fences */
94eb1e10 244 for (i = k; i < max; ++i) {
27836b64 245 struct dma_fence *fence;
4d9c62e8 246
27836b64 247 fence = rcu_dereference_protected(new->shared[i],
52791eee 248 dma_resv_held(obj));
27836b64 249 dma_fence_put(fence);
4d9c62e8
CK
250 }
251 kfree_rcu(old, rcu);
27836b64
CK
252
253 return 0;
04a5faa8 254}
52791eee 255EXPORT_SYMBOL(dma_resv_reserve_shared);
04a5faa8 256
dad6c394 257/**
52791eee 258 * dma_resv_add_shared_fence - Add a fence to a shared slot
dad6c394
RC
259 * @obj: the reservation object
260 * @fence: the shared fence to add
261 *
04a5faa8 262 * Add a fence to a shared slot, obj->lock must be held, and
52791eee 263 * dma_resv_reserve_shared() has been called.
04a5faa8 264 */
52791eee 265void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
04a5faa8 266{
52791eee 267 struct dma_resv_list *fobj;
93505ee7 268 struct dma_fence *old;
a590d0fd 269 unsigned int i, count;
04a5faa8 270
27836b64
CK
271 dma_fence_get(fence);
272
52791eee 273 dma_resv_assert_held(obj);
547c7138 274
52791eee 275 fobj = dma_resv_get_list(obj);
a590d0fd 276 count = fobj->shared_count;
04a5faa8 277
b016cd6e
CW
278 preempt_disable();
279 write_seqcount_begin(&obj->seq);
280
a590d0fd 281 for (i = 0; i < count; ++i) {
27836b64 282
93505ee7 283 old = rcu_dereference_protected(fobj->shared[i],
52791eee 284 dma_resv_held(obj));
93505ee7
CK
285 if (old->context == fence->context ||
286 dma_fence_is_signaled(old))
27836b64 287 goto replace;
27836b64
CK
288 }
289
290 BUG_ON(fobj->shared_count >= fobj->shared_max);
93505ee7 291 old = NULL;
a590d0fd 292 count++;
27836b64
CK
293
294replace:
27836b64 295 RCU_INIT_POINTER(fobj->shared[i], fence);
a590d0fd
CW
296 /* pointer update must be visible before we extend the shared_count */
297 smp_store_mb(fobj->shared_count, count);
b016cd6e
CW
298
299 write_seqcount_end(&obj->seq);
300 preempt_enable();
93505ee7 301 dma_fence_put(old);
04a5faa8 302}
52791eee 303EXPORT_SYMBOL(dma_resv_add_shared_fence);
04a5faa8 304
dad6c394 305/**
52791eee 306 * dma_resv_add_excl_fence - Add an exclusive fence.
dad6c394
RC
307 * @obj: the reservation object
308 * @fence: the shared fence to add
309 *
310 * Add a fence to the exclusive slot. The obj->lock must be held.
311 */
52791eee 312void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
04a5faa8 313{
52791eee
CK
314 struct dma_fence *old_fence = dma_resv_get_excl(obj);
315 struct dma_resv_list *old;
04a5faa8
ML
316 u32 i = 0;
317
52791eee 318 dma_resv_assert_held(obj);
547c7138 319
52791eee 320 old = dma_resv_get_list(obj);
3c3b177a 321 if (old)
04a5faa8 322 i = old->shared_count;
04a5faa8
ML
323
324 if (fence)
f54d1867 325 dma_fence_get(fence);
04a5faa8 326
3c3b177a 327 preempt_disable();
b016cd6e
CW
328 write_seqcount_begin(&obj->seq);
329 /* write_seqcount_begin provides the necessary memory barrier */
330 RCU_INIT_POINTER(obj->fence_excl, fence);
3c3b177a 331 if (old)
b016cd6e
CW
332 old->shared_count = 0;
333 write_seqcount_end(&obj->seq);
3c3b177a 334 preempt_enable();
04a5faa8
ML
335
336 /* inplace update, no shared fences */
337 while (i--)
f54d1867 338 dma_fence_put(rcu_dereference_protected(old->shared[i],
52791eee 339 dma_resv_held(obj)));
04a5faa8 340
f3e31b73 341 dma_fence_put(old_fence);
04a5faa8 342}
52791eee 343EXPORT_SYMBOL(dma_resv_add_excl_fence);
3c3b177a 344
7faf952a 345/**
52791eee 346* dma_resv_copy_fences - Copy all fences from src to dst.
7faf952a
CK
347* @dst: the destination reservation object
348* @src: the source reservation object
349*
39e16ba1 350* Copy all fences from src to dst. dst-lock must be held.
7faf952a 351*/
52791eee 352int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
7faf952a 353{
52791eee 354 struct dma_resv_list *src_list, *dst_list;
7faf952a 355 struct dma_fence *old, *new;
b016cd6e 356 unsigned i;
7faf952a 357
52791eee 358 dma_resv_assert_held(dst);
547c7138 359
39e16ba1 360 rcu_read_lock();
b016cd6e 361 src_list = rcu_dereference(src->fence);
7faf952a 362
39e16ba1 363retry:
b016cd6e
CW
364 if (src_list) {
365 unsigned shared_count = src_list->shared_count;
366
39e16ba1
CK
367 rcu_read_unlock();
368
52791eee 369 dst_list = dma_resv_list_alloc(shared_count);
7faf952a
CK
370 if (!dst_list)
371 return -ENOMEM;
372
39e16ba1 373 rcu_read_lock();
b016cd6e
CW
374 src_list = rcu_dereference(src->fence);
375 if (!src_list || src_list->shared_count > shared_count) {
39e16ba1
CK
376 kfree(dst_list);
377 goto retry;
378 }
379
380 dst_list->shared_count = 0;
b016cd6e 381 for (i = 0; i < src_list->shared_count; ++i) {
39e16ba1
CK
382 struct dma_fence *fence;
383
384 fence = rcu_dereference(src_list->shared[i]);
385 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
386 &fence->flags))
387 continue;
388
389 if (!dma_fence_get_rcu(fence)) {
52791eee 390 dma_resv_list_free(dst_list);
b016cd6e 391 src_list = rcu_dereference(src->fence);
39e16ba1
CK
392 goto retry;
393 }
394
395 if (dma_fence_is_signaled(fence)) {
396 dma_fence_put(fence);
397 continue;
398 }
399
ad46d7b8 400 rcu_assign_pointer(dst_list->shared[dst_list->shared_count++], fence);
39e16ba1 401 }
7faf952a
CK
402 } else {
403 dst_list = NULL;
404 }
405
b016cd6e 406 new = dma_fence_get_rcu_safe(&src->fence_excl);
39e16ba1
CK
407 rcu_read_unlock();
408
52791eee
CK
409 src_list = dma_resv_get_list(dst);
410 old = dma_resv_get_excl(dst);
7faf952a
CK
411
412 preempt_disable();
b016cd6e
CW
413 write_seqcount_begin(&dst->seq);
414 /* write_seqcount_begin provides the necessary memory barrier */
415 RCU_INIT_POINTER(dst->fence_excl, new);
416 RCU_INIT_POINTER(dst->fence, dst_list);
417 write_seqcount_end(&dst->seq);
7faf952a
CK
418 preempt_enable();
419
52791eee 420 dma_resv_list_free(src_list);
7faf952a
CK
421 dma_fence_put(old);
422
423 return 0;
424}
52791eee 425EXPORT_SYMBOL(dma_resv_copy_fences);
7faf952a 426
dad6c394 427/**
52791eee 428 * dma_resv_get_fences_rcu - Get an object's shared and exclusive
dad6c394
RC
429 * fences without update side lock held
430 * @obj: the reservation object
431 * @pfence_excl: the returned exclusive fence (or NULL)
432 * @pshared_count: the number of shared fences returned
433 * @pshared: the array of shared fence ptrs returned (array is krealloc'd to
434 * the required size, and must be freed by caller)
435 *
a35f2f34
CK
436 * Retrieve all fences from the reservation object. If the pointer for the
437 * exclusive fence is not specified the fence is put into the array of the
438 * shared fences as well. Returns either zero or -ENOMEM.
dad6c394 439 */
52791eee
CK
440int dma_resv_get_fences_rcu(struct dma_resv *obj,
441 struct dma_fence **pfence_excl,
442 unsigned *pshared_count,
443 struct dma_fence ***pshared)
3c3b177a 444{
f54d1867
CW
445 struct dma_fence **shared = NULL;
446 struct dma_fence *fence_excl;
fedf5413
CW
447 unsigned int shared_count;
448 int ret = 1;
3c3b177a 449
fedf5413 450 do {
52791eee 451 struct dma_resv_list *fobj;
b016cd6e 452 unsigned int i, seq;
a35f2f34 453 size_t sz = 0;
3c3b177a 454
b016cd6e 455 shared_count = i = 0;
3c3b177a
ML
456
457 rcu_read_lock();
b016cd6e 458 seq = read_seqcount_begin(&obj->seq);
fedf5413 459
b016cd6e 460 fence_excl = rcu_dereference(obj->fence_excl);
f54d1867 461 if (fence_excl && !dma_fence_get_rcu(fence_excl))
fedf5413 462 goto unlock;
3c3b177a 463
b016cd6e 464 fobj = rcu_dereference(obj->fence);
a35f2f34
CK
465 if (fobj)
466 sz += sizeof(*shared) * fobj->shared_max;
467
468 if (!pfence_excl && fence_excl)
469 sz += sizeof(*shared);
470
471 if (sz) {
f54d1867 472 struct dma_fence **nshared;
3c3b177a
ML
473
474 nshared = krealloc(shared, sz,
475 GFP_NOWAIT | __GFP_NOWARN);
476 if (!nshared) {
477 rcu_read_unlock();
f5b07b04
CW
478
479 dma_fence_put(fence_excl);
480 fence_excl = NULL;
481
3c3b177a
ML
482 nshared = krealloc(shared, sz, GFP_KERNEL);
483 if (nshared) {
484 shared = nshared;
485 continue;
486 }
487
488 ret = -ENOMEM;
3c3b177a
ML
489 break;
490 }
491 shared = nshared;
b016cd6e 492 shared_count = fobj ? fobj->shared_count : 0;
3c3b177a 493 for (i = 0; i < shared_count; ++i) {
fedf5413 494 shared[i] = rcu_dereference(fobj->shared[i]);
f54d1867 495 if (!dma_fence_get_rcu(shared[i]))
fedf5413 496 break;
3c3b177a 497 }
fedf5413 498 }
3c3b177a 499
b016cd6e 500 if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
fedf5413 501 while (i--)
f54d1867
CW
502 dma_fence_put(shared[i]);
503 dma_fence_put(fence_excl);
fedf5413
CW
504 goto unlock;
505 }
506
507 ret = 0;
3c3b177a
ML
508unlock:
509 rcu_read_unlock();
fedf5413
CW
510 } while (ret);
511
b8c036df
CK
512 if (pfence_excl)
513 *pfence_excl = fence_excl;
514 else if (fence_excl)
7fbd0782 515 shared[shared_count++] = fence_excl;
b8c036df 516
fedf5413 517 if (!shared_count) {
3c3b177a 518 kfree(shared);
fedf5413 519 shared = NULL;
3c3b177a 520 }
fedf5413
CW
521
522 *pshared_count = shared_count;
523 *pshared = shared;
3c3b177a
ML
524 return ret;
525}
52791eee 526EXPORT_SYMBOL_GPL(dma_resv_get_fences_rcu);
3c3b177a 527
dad6c394 528/**
52791eee 529 * dma_resv_wait_timeout_rcu - Wait on reservation's objects
dad6c394
RC
530 * shared and/or exclusive fences.
531 * @obj: the reservation object
532 * @wait_all: if true, wait on all fences, else wait on just exclusive fence
533 * @intr: if true, do interruptible wait
534 * @timeout: timeout value in jiffies or zero to return immediately
535 *
536 * RETURNS
537 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
538 * greater than zer on success.
539 */
52791eee
CK
540long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
541 bool wait_all, bool intr,
542 unsigned long timeout)
3c3b177a 543{
f54d1867 544 struct dma_fence *fence;
b016cd6e 545 unsigned seq, shared_count;
06a66b5c 546 long ret = timeout ? timeout : 1;
5bffee86 547 int i;
fb8b7d2b 548
3c3b177a 549retry:
b016cd6e
CW
550 shared_count = 0;
551 seq = read_seqcount_begin(&obj->seq);
3c3b177a 552 rcu_read_lock();
5bffee86 553 i = -1;
3c3b177a 554
b016cd6e 555 fence = rcu_dereference(obj->fence_excl);
b88fa004
CK
556 if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
557 if (!dma_fence_get_rcu(fence))
558 goto unlock_retry;
559
560 if (dma_fence_is_signaled(fence)) {
561 dma_fence_put(fence);
562 fence = NULL;
563 }
564
565 } else {
566 fence = NULL;
567 }
568
5bffee86 569 if (wait_all) {
b016cd6e
CW
570 struct dma_resv_list *fobj = rcu_dereference(obj->fence);
571
572 if (fobj)
573 shared_count = fobj->shared_count;
574
5bffee86 575 for (i = 0; !fence && i < shared_count; ++i) {
f54d1867 576 struct dma_fence *lfence = rcu_dereference(fobj->shared[i]);
3c3b177a 577
f54d1867
CW
578 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
579 &lfence->flags))
3c3b177a
ML
580 continue;
581
f54d1867 582 if (!dma_fence_get_rcu(lfence))
3c3b177a
ML
583 goto unlock_retry;
584
f54d1867
CW
585 if (dma_fence_is_signaled(lfence)) {
586 dma_fence_put(lfence);
3c3b177a
ML
587 continue;
588 }
589
590 fence = lfence;
591 break;
592 }
593 }
594
3c3b177a
ML
595 rcu_read_unlock();
596 if (fence) {
b016cd6e
CW
597 if (read_seqcount_retry(&obj->seq, seq)) {
598 dma_fence_put(fence);
599 goto retry;
600 }
601
f54d1867
CW
602 ret = dma_fence_wait_timeout(fence, intr, ret);
603 dma_fence_put(fence);
3c3b177a
ML
604 if (ret > 0 && wait_all && (i + 1 < shared_count))
605 goto retry;
606 }
607 return ret;
608
609unlock_retry:
610 rcu_read_unlock();
611 goto retry;
612}
52791eee 613EXPORT_SYMBOL_GPL(dma_resv_wait_timeout_rcu);
3c3b177a
ML
614
615
52791eee 616static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
3c3b177a 617{
f54d1867 618 struct dma_fence *fence, *lfence = passed_fence;
3c3b177a
ML
619 int ret = 1;
620
f54d1867
CW
621 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
622 fence = dma_fence_get_rcu(lfence);
3c3b177a
ML
623 if (!fence)
624 return -1;
625
f54d1867
CW
626 ret = !!dma_fence_is_signaled(fence);
627 dma_fence_put(fence);
3c3b177a
ML
628 }
629 return ret;
630}
631
dad6c394 632/**
52791eee 633 * dma_resv_test_signaled_rcu - Test if a reservation object's
dad6c394
RC
634 * fences have been signaled.
635 * @obj: the reservation object
636 * @test_all: if true, test all fences, otherwise only test the exclusive
637 * fence
638 *
639 * RETURNS
640 * true if all fences signaled, else false
641 */
52791eee 642bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
3c3b177a 643{
b016cd6e 644 unsigned seq, shared_count;
b68d8379 645 int ret;
3c3b177a 646
b68d8379 647 rcu_read_lock();
3c3b177a 648retry:
b68d8379 649 ret = true;
b016cd6e
CW
650 shared_count = 0;
651 seq = read_seqcount_begin(&obj->seq);
3c3b177a
ML
652
653 if (test_all) {
654 unsigned i;
655
b016cd6e
CW
656 struct dma_resv_list *fobj = rcu_dereference(obj->fence);
657
658 if (fobj)
659 shared_count = fobj->shared_count;
660
3c3b177a 661 for (i = 0; i < shared_count; ++i) {
f54d1867 662 struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
3c3b177a 663
52791eee 664 ret = dma_resv_test_signaled_single(fence);
3c3b177a 665 if (ret < 0)
b68d8379 666 goto retry;
3c3b177a
ML
667 else if (!ret)
668 break;
669 }
3c3b177a 670
b016cd6e 671 if (read_seqcount_retry(&obj->seq, seq))
67c97fb7 672 goto retry;
3c3b177a
ML
673 }
674
b016cd6e
CW
675 if (!shared_count) {
676 struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl);
677
678 if (fence_excl) {
679 ret = dma_resv_test_signaled_single(fence_excl);
680 if (ret < 0)
681 goto retry;
682
683 if (read_seqcount_retry(&obj->seq, seq))
684 goto retry;
685 }
686 }
687
3c3b177a
ML
688 rcu_read_unlock();
689 return ret;
3c3b177a 690}
52791eee 691EXPORT_SYMBOL_GPL(dma_resv_test_signaled_rcu);