Merge tag 'exfat-for-5.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/linkin...
[linux-2.6-block.git] / drivers / dma-buf / dma-resv.c
CommitLineData
786d7257 1/*
04a5faa8 2 * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
786d7257
ML
3 *
4 * Based on bo.c which bears the following copyright notice,
5 * but is dual licensed:
6 *
7 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
8 * All Rights Reserved.
9 *
10 * Permission is hereby granted, free of charge, to any person obtaining a
11 * copy of this software and associated documentation files (the
12 * "Software"), to deal in the Software without restriction, including
13 * without limitation the rights to use, copy, modify, merge, publish,
14 * distribute, sub license, and/or sell copies of the Software, and to
15 * permit persons to whom the Software is furnished to do so, subject to
16 * the following conditions:
17 *
18 * The above copyright notice and this permission notice (including the
19 * next paragraph) shall be included in all copies or substantial portions
20 * of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
25 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
26 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
27 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
28 * USE OR OTHER DEALINGS IN THE SOFTWARE.
29 *
30 **************************************************************************/
31/*
32 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
33 */
34
52791eee 35#include <linux/dma-resv.h>
786d7257 36#include <linux/export.h>
0adf65f5 37#include <linux/mm.h>
b2a8116e 38#include <linux/sched/mm.h>
d0b9a9ae 39#include <linux/mmu_notifier.h>
786d7257 40
dad6c394
RC
41/**
42 * DOC: Reservation Object Overview
43 *
44 * The reservation object provides a mechanism to manage shared and
45 * exclusive fences associated with a buffer. A reservation object
46 * can have attached one exclusive fence (normally associated with
47 * write operations) or N shared fences (read operations). The RCU
48 * mechanism is used to protect read access to fences from locked
49 * write-side updates.
50 */
51
08295b3b 52DEFINE_WD_CLASS(reservation_ww_class);
786d7257 53EXPORT_SYMBOL(reservation_ww_class);
04a5faa8 54
96e95496 55/**
52791eee 56 * dma_resv_list_alloc - allocate fence list
96e95496
CK
57 * @shared_max: number of fences we need space for
58 *
52791eee 59 * Allocate a new dma_resv_list and make sure to correctly initialize
96e95496
CK
60 * shared_max.
61 */
52791eee 62static struct dma_resv_list *dma_resv_list_alloc(unsigned int shared_max)
96e95496 63{
52791eee 64 struct dma_resv_list *list;
96e95496
CK
65
66 list = kmalloc(offsetof(typeof(*list), shared[shared_max]), GFP_KERNEL);
67 if (!list)
68 return NULL;
69
70 list->shared_max = (ksize(list) - offsetof(typeof(*list), shared)) /
71 sizeof(*list->shared);
72
73 return list;
74}
75
76/**
52791eee 77 * dma_resv_list_free - free fence list
96e95496
CK
78 * @list: list to free
79 *
52791eee 80 * Free a dma_resv_list and make sure to drop all references.
96e95496 81 */
52791eee 82static void dma_resv_list_free(struct dma_resv_list *list)
96e95496
CK
83{
84 unsigned int i;
85
86 if (!list)
87 return;
88
89 for (i = 0; i < list->shared_count; ++i)
90 dma_fence_put(rcu_dereference_protected(list->shared[i], true));
91
92 kfree_rcu(list, rcu);
93}
94
b2a8116e 95#if IS_ENABLED(CONFIG_LOCKDEP)
ffbbaa74 96static int __init dma_resv_lockdep(void)
b2a8116e
DV
97{
98 struct mm_struct *mm = mm_alloc();
fedf7a44 99 struct ww_acquire_ctx ctx;
b2a8116e 100 struct dma_resv obj;
fedf7a44 101 int ret;
b2a8116e
DV
102
103 if (!mm)
ffbbaa74 104 return -ENOMEM;
b2a8116e
DV
105
106 dma_resv_init(&obj);
107
0adf65f5 108 mmap_read_lock(mm);
fedf7a44
DV
109 ww_acquire_init(&ctx, &reservation_ww_class);
110 ret = dma_resv_lock(&obj, &ctx);
111 if (ret == -EDEADLK)
112 dma_resv_lock_slow(&obj, &ctx);
b2a8116e 113 fs_reclaim_acquire(GFP_KERNEL);
d0b9a9ae
DV
114#ifdef CONFIG_MMU_NOTIFIER
115 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
116 __dma_fence_might_wait();
117 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
118#else
119 __dma_fence_might_wait();
120#endif
b2a8116e
DV
121 fs_reclaim_release(GFP_KERNEL);
122 ww_mutex_unlock(&obj.lock);
fedf7a44 123 ww_acquire_fini(&ctx);
0adf65f5 124 mmap_read_unlock(mm);
b2a8116e
DV
125
126 mmput(mm);
ffbbaa74
SP
127
128 return 0;
b2a8116e
DV
129}
130subsys_initcall(dma_resv_lockdep);
131#endif
132
8735f168 133/**
52791eee 134 * dma_resv_init - initialize a reservation object
8735f168
CK
135 * @obj: the reservation object
136 */
52791eee 137void dma_resv_init(struct dma_resv *obj)
8735f168
CK
138{
139 ww_mutex_init(&obj->lock, &reservation_ww_class);
cd29f220 140 seqcount_ww_mutex_init(&obj->seq, &obj->lock);
b016cd6e 141
8735f168
CK
142 RCU_INIT_POINTER(obj->fence, NULL);
143 RCU_INIT_POINTER(obj->fence_excl, NULL);
144}
52791eee 145EXPORT_SYMBOL(dma_resv_init);
8735f168
CK
146
147/**
52791eee 148 * dma_resv_fini - destroys a reservation object
8735f168
CK
149 * @obj: the reservation object
150 */
52791eee 151void dma_resv_fini(struct dma_resv *obj)
8735f168 152{
52791eee 153 struct dma_resv_list *fobj;
8735f168
CK
154 struct dma_fence *excl;
155
156 /*
157 * This object should be dead and all references must have
158 * been released to it, so no need to be protected with rcu.
159 */
160 excl = rcu_dereference_protected(obj->fence_excl, 1);
161 if (excl)
162 dma_fence_put(excl);
163
164 fobj = rcu_dereference_protected(obj->fence, 1);
52791eee 165 dma_resv_list_free(fobj);
8735f168
CK
166 ww_mutex_destroy(&obj->lock);
167}
52791eee 168EXPORT_SYMBOL(dma_resv_fini);
8735f168 169
dad6c394 170/**
52791eee
CK
171 * dma_resv_reserve_shared - Reserve space to add shared fences to
172 * a dma_resv.
dad6c394 173 * @obj: reservation object
ca05359f 174 * @num_fences: number of fences we want to add
dad6c394 175 *
52791eee 176 * Should be called before dma_resv_add_shared_fence(). Must
dad6c394
RC
177 * be called with obj->lock held.
178 *
179 * RETURNS
180 * Zero for success, or -errno
04a5faa8 181 */
52791eee 182int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
04a5faa8 183{
52791eee 184 struct dma_resv_list *old, *new;
27836b64 185 unsigned int i, j, k, max;
04a5faa8 186
52791eee 187 dma_resv_assert_held(obj);
547c7138 188
52791eee 189 old = dma_resv_get_list(obj);
04a5faa8
ML
190
191 if (old && old->shared_max) {
ca05359f 192 if ((old->shared_count + num_fences) <= old->shared_max)
04a5faa8 193 return 0;
27836b64 194 else
ca05359f
CK
195 max = max(old->shared_count + num_fences,
196 old->shared_max * 2);
ca25fe5e 197 } else {
27836b64 198 max = 4;
ca25fe5e 199 }
3c3b177a 200
52791eee 201 new = dma_resv_list_alloc(max);
27836b64
CK
202 if (!new)
203 return -ENOMEM;
04a5faa8
ML
204
205 /*
206 * no need to bump fence refcounts, rcu_read access
207 * requires the use of kref_get_unless_zero, and the
208 * references from the old struct are carried over to
209 * the new.
210 */
27836b64
CK
211 for (i = 0, j = 0, k = max; i < (old ? old->shared_count : 0); ++i) {
212 struct dma_fence *fence;
3c3b177a 213
27836b64 214 fence = rcu_dereference_protected(old->shared[i],
52791eee 215 dma_resv_held(obj));
27836b64
CK
216 if (dma_fence_is_signaled(fence))
217 RCU_INIT_POINTER(new->shared[--k], fence);
4d9c62e8 218 else
27836b64 219 RCU_INIT_POINTER(new->shared[j++], fence);
04a5faa8 220 }
27836b64 221 new->shared_count = j;
04a5faa8 222
3c3b177a 223 /*
30fe7b07
CW
224 * We are not changing the effective set of fences here so can
225 * merely update the pointer to the new array; both existing
226 * readers and new readers will see exactly the same set of
227 * active (unsignaled) shared fences. Individual fences and the
228 * old array are protected by RCU and so will not vanish under
229 * the gaze of the rcu_read_lock() readers.
3c3b177a 230 */
30fe7b07 231 rcu_assign_pointer(obj->fence, new);
3c3b177a 232
4d9c62e8 233 if (!old)
27836b64 234 return 0;
3c3b177a 235
4d9c62e8 236 /* Drop the references to the signaled fences */
94eb1e10 237 for (i = k; i < max; ++i) {
27836b64 238 struct dma_fence *fence;
4d9c62e8 239
27836b64 240 fence = rcu_dereference_protected(new->shared[i],
52791eee 241 dma_resv_held(obj));
27836b64 242 dma_fence_put(fence);
4d9c62e8
CK
243 }
244 kfree_rcu(old, rcu);
27836b64
CK
245
246 return 0;
04a5faa8 247}
52791eee 248EXPORT_SYMBOL(dma_resv_reserve_shared);
04a5faa8 249
dad6c394 250/**
52791eee 251 * dma_resv_add_shared_fence - Add a fence to a shared slot
dad6c394
RC
252 * @obj: the reservation object
253 * @fence: the shared fence to add
254 *
04a5faa8 255 * Add a fence to a shared slot, obj->lock must be held, and
52791eee 256 * dma_resv_reserve_shared() has been called.
04a5faa8 257 */
52791eee 258void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
04a5faa8 259{
52791eee 260 struct dma_resv_list *fobj;
93505ee7 261 struct dma_fence *old;
a590d0fd 262 unsigned int i, count;
04a5faa8 263
27836b64
CK
264 dma_fence_get(fence);
265
52791eee 266 dma_resv_assert_held(obj);
547c7138 267
52791eee 268 fobj = dma_resv_get_list(obj);
a590d0fd 269 count = fobj->shared_count;
04a5faa8 270
b016cd6e
CW
271 write_seqcount_begin(&obj->seq);
272
a590d0fd 273 for (i = 0; i < count; ++i) {
27836b64 274
93505ee7 275 old = rcu_dereference_protected(fobj->shared[i],
52791eee 276 dma_resv_held(obj));
93505ee7
CK
277 if (old->context == fence->context ||
278 dma_fence_is_signaled(old))
27836b64 279 goto replace;
27836b64
CK
280 }
281
282 BUG_ON(fobj->shared_count >= fobj->shared_max);
93505ee7 283 old = NULL;
a590d0fd 284 count++;
27836b64
CK
285
286replace:
27836b64 287 RCU_INIT_POINTER(fobj->shared[i], fence);
a590d0fd
CW
288 /* pointer update must be visible before we extend the shared_count */
289 smp_store_mb(fobj->shared_count, count);
b016cd6e
CW
290
291 write_seqcount_end(&obj->seq);
93505ee7 292 dma_fence_put(old);
04a5faa8 293}
52791eee 294EXPORT_SYMBOL(dma_resv_add_shared_fence);
04a5faa8 295
dad6c394 296/**
52791eee 297 * dma_resv_add_excl_fence - Add an exclusive fence.
dad6c394
RC
298 * @obj: the reservation object
299 * @fence: the shared fence to add
300 *
301 * Add a fence to the exclusive slot. The obj->lock must be held.
302 */
52791eee 303void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
04a5faa8 304{
52791eee
CK
305 struct dma_fence *old_fence = dma_resv_get_excl(obj);
306 struct dma_resv_list *old;
04a5faa8
ML
307 u32 i = 0;
308
52791eee 309 dma_resv_assert_held(obj);
547c7138 310
52791eee 311 old = dma_resv_get_list(obj);
3c3b177a 312 if (old)
04a5faa8 313 i = old->shared_count;
04a5faa8
ML
314
315 if (fence)
f54d1867 316 dma_fence_get(fence);
04a5faa8 317
b016cd6e
CW
318 write_seqcount_begin(&obj->seq);
319 /* write_seqcount_begin provides the necessary memory barrier */
320 RCU_INIT_POINTER(obj->fence_excl, fence);
3c3b177a 321 if (old)
b016cd6e
CW
322 old->shared_count = 0;
323 write_seqcount_end(&obj->seq);
04a5faa8
ML
324
325 /* inplace update, no shared fences */
326 while (i--)
f54d1867 327 dma_fence_put(rcu_dereference_protected(old->shared[i],
52791eee 328 dma_resv_held(obj)));
04a5faa8 329
f3e31b73 330 dma_fence_put(old_fence);
04a5faa8 331}
52791eee 332EXPORT_SYMBOL(dma_resv_add_excl_fence);
3c3b177a 333
7faf952a 334/**
52791eee 335* dma_resv_copy_fences - Copy all fences from src to dst.
7faf952a
CK
336* @dst: the destination reservation object
337* @src: the source reservation object
338*
39e16ba1 339* Copy all fences from src to dst. dst-lock must be held.
7faf952a 340*/
52791eee 341int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
7faf952a 342{
52791eee 343 struct dma_resv_list *src_list, *dst_list;
7faf952a 344 struct dma_fence *old, *new;
b016cd6e 345 unsigned i;
7faf952a 346
52791eee 347 dma_resv_assert_held(dst);
547c7138 348
39e16ba1 349 rcu_read_lock();
b016cd6e 350 src_list = rcu_dereference(src->fence);
7faf952a 351
39e16ba1 352retry:
b016cd6e
CW
353 if (src_list) {
354 unsigned shared_count = src_list->shared_count;
355
39e16ba1
CK
356 rcu_read_unlock();
357
52791eee 358 dst_list = dma_resv_list_alloc(shared_count);
7faf952a
CK
359 if (!dst_list)
360 return -ENOMEM;
361
39e16ba1 362 rcu_read_lock();
b016cd6e
CW
363 src_list = rcu_dereference(src->fence);
364 if (!src_list || src_list->shared_count > shared_count) {
39e16ba1
CK
365 kfree(dst_list);
366 goto retry;
367 }
368
369 dst_list->shared_count = 0;
b016cd6e 370 for (i = 0; i < src_list->shared_count; ++i) {
39e16ba1
CK
371 struct dma_fence *fence;
372
373 fence = rcu_dereference(src_list->shared[i]);
374 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
375 &fence->flags))
376 continue;
377
378 if (!dma_fence_get_rcu(fence)) {
52791eee 379 dma_resv_list_free(dst_list);
b016cd6e 380 src_list = rcu_dereference(src->fence);
39e16ba1
CK
381 goto retry;
382 }
383
384 if (dma_fence_is_signaled(fence)) {
385 dma_fence_put(fence);
386 continue;
387 }
388
ad46d7b8 389 rcu_assign_pointer(dst_list->shared[dst_list->shared_count++], fence);
39e16ba1 390 }
7faf952a
CK
391 } else {
392 dst_list = NULL;
393 }
394
b016cd6e 395 new = dma_fence_get_rcu_safe(&src->fence_excl);
39e16ba1
CK
396 rcu_read_unlock();
397
52791eee
CK
398 src_list = dma_resv_get_list(dst);
399 old = dma_resv_get_excl(dst);
7faf952a 400
b016cd6e
CW
401 write_seqcount_begin(&dst->seq);
402 /* write_seqcount_begin provides the necessary memory barrier */
403 RCU_INIT_POINTER(dst->fence_excl, new);
404 RCU_INIT_POINTER(dst->fence, dst_list);
405 write_seqcount_end(&dst->seq);
7faf952a 406
52791eee 407 dma_resv_list_free(src_list);
7faf952a
CK
408 dma_fence_put(old);
409
410 return 0;
411}
52791eee 412EXPORT_SYMBOL(dma_resv_copy_fences);
7faf952a 413
dad6c394 414/**
52791eee 415 * dma_resv_get_fences_rcu - Get an object's shared and exclusive
dad6c394
RC
416 * fences without update side lock held
417 * @obj: the reservation object
418 * @pfence_excl: the returned exclusive fence (or NULL)
419 * @pshared_count: the number of shared fences returned
420 * @pshared: the array of shared fence ptrs returned (array is krealloc'd to
421 * the required size, and must be freed by caller)
422 *
a35f2f34
CK
423 * Retrieve all fences from the reservation object. If the pointer for the
424 * exclusive fence is not specified the fence is put into the array of the
425 * shared fences as well. Returns either zero or -ENOMEM.
dad6c394 426 */
52791eee
CK
427int dma_resv_get_fences_rcu(struct dma_resv *obj,
428 struct dma_fence **pfence_excl,
429 unsigned *pshared_count,
430 struct dma_fence ***pshared)
3c3b177a 431{
f54d1867
CW
432 struct dma_fence **shared = NULL;
433 struct dma_fence *fence_excl;
fedf5413
CW
434 unsigned int shared_count;
435 int ret = 1;
3c3b177a 436
fedf5413 437 do {
52791eee 438 struct dma_resv_list *fobj;
b016cd6e 439 unsigned int i, seq;
a35f2f34 440 size_t sz = 0;
3c3b177a 441
b016cd6e 442 shared_count = i = 0;
3c3b177a
ML
443
444 rcu_read_lock();
b016cd6e 445 seq = read_seqcount_begin(&obj->seq);
fedf5413 446
b016cd6e 447 fence_excl = rcu_dereference(obj->fence_excl);
f54d1867 448 if (fence_excl && !dma_fence_get_rcu(fence_excl))
fedf5413 449 goto unlock;
3c3b177a 450
b016cd6e 451 fobj = rcu_dereference(obj->fence);
a35f2f34
CK
452 if (fobj)
453 sz += sizeof(*shared) * fobj->shared_max;
454
455 if (!pfence_excl && fence_excl)
456 sz += sizeof(*shared);
457
458 if (sz) {
f54d1867 459 struct dma_fence **nshared;
3c3b177a
ML
460
461 nshared = krealloc(shared, sz,
462 GFP_NOWAIT | __GFP_NOWARN);
463 if (!nshared) {
464 rcu_read_unlock();
f5b07b04
CW
465
466 dma_fence_put(fence_excl);
467 fence_excl = NULL;
468
3c3b177a
ML
469 nshared = krealloc(shared, sz, GFP_KERNEL);
470 if (nshared) {
471 shared = nshared;
472 continue;
473 }
474
475 ret = -ENOMEM;
3c3b177a
ML
476 break;
477 }
478 shared = nshared;
b016cd6e 479 shared_count = fobj ? fobj->shared_count : 0;
3c3b177a 480 for (i = 0; i < shared_count; ++i) {
fedf5413 481 shared[i] = rcu_dereference(fobj->shared[i]);
f54d1867 482 if (!dma_fence_get_rcu(shared[i]))
fedf5413 483 break;
3c3b177a 484 }
fedf5413 485 }
3c3b177a 486
b016cd6e 487 if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
fedf5413 488 while (i--)
f54d1867
CW
489 dma_fence_put(shared[i]);
490 dma_fence_put(fence_excl);
fedf5413
CW
491 goto unlock;
492 }
493
494 ret = 0;
3c3b177a
ML
495unlock:
496 rcu_read_unlock();
fedf5413
CW
497 } while (ret);
498
b8c036df
CK
499 if (pfence_excl)
500 *pfence_excl = fence_excl;
501 else if (fence_excl)
7fbd0782 502 shared[shared_count++] = fence_excl;
b8c036df 503
fedf5413 504 if (!shared_count) {
3c3b177a 505 kfree(shared);
fedf5413 506 shared = NULL;
3c3b177a 507 }
fedf5413
CW
508
509 *pshared_count = shared_count;
510 *pshared = shared;
3c3b177a
ML
511 return ret;
512}
52791eee 513EXPORT_SYMBOL_GPL(dma_resv_get_fences_rcu);
3c3b177a 514
dad6c394 515/**
52791eee 516 * dma_resv_wait_timeout_rcu - Wait on reservation's objects
dad6c394
RC
517 * shared and/or exclusive fences.
518 * @obj: the reservation object
519 * @wait_all: if true, wait on all fences, else wait on just exclusive fence
520 * @intr: if true, do interruptible wait
521 * @timeout: timeout value in jiffies or zero to return immediately
522 *
523 * RETURNS
524 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
525 * greater than zer on success.
526 */
52791eee
CK
527long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
528 bool wait_all, bool intr,
529 unsigned long timeout)
3c3b177a 530{
f54d1867 531 struct dma_fence *fence;
b016cd6e 532 unsigned seq, shared_count;
06a66b5c 533 long ret = timeout ? timeout : 1;
5bffee86 534 int i;
fb8b7d2b 535
3c3b177a 536retry:
b016cd6e
CW
537 shared_count = 0;
538 seq = read_seqcount_begin(&obj->seq);
3c3b177a 539 rcu_read_lock();
5bffee86 540 i = -1;
3c3b177a 541
b016cd6e 542 fence = rcu_dereference(obj->fence_excl);
b88fa004
CK
543 if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
544 if (!dma_fence_get_rcu(fence))
545 goto unlock_retry;
546
547 if (dma_fence_is_signaled(fence)) {
548 dma_fence_put(fence);
549 fence = NULL;
550 }
551
552 } else {
553 fence = NULL;
554 }
555
5bffee86 556 if (wait_all) {
b016cd6e
CW
557 struct dma_resv_list *fobj = rcu_dereference(obj->fence);
558
559 if (fobj)
560 shared_count = fobj->shared_count;
561
5bffee86 562 for (i = 0; !fence && i < shared_count; ++i) {
f54d1867 563 struct dma_fence *lfence = rcu_dereference(fobj->shared[i]);
3c3b177a 564
f54d1867
CW
565 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
566 &lfence->flags))
3c3b177a
ML
567 continue;
568
f54d1867 569 if (!dma_fence_get_rcu(lfence))
3c3b177a
ML
570 goto unlock_retry;
571
f54d1867
CW
572 if (dma_fence_is_signaled(lfence)) {
573 dma_fence_put(lfence);
3c3b177a
ML
574 continue;
575 }
576
577 fence = lfence;
578 break;
579 }
580 }
581
3c3b177a
ML
582 rcu_read_unlock();
583 if (fence) {
b016cd6e
CW
584 if (read_seqcount_retry(&obj->seq, seq)) {
585 dma_fence_put(fence);
586 goto retry;
587 }
588
f54d1867
CW
589 ret = dma_fence_wait_timeout(fence, intr, ret);
590 dma_fence_put(fence);
3c3b177a
ML
591 if (ret > 0 && wait_all && (i + 1 < shared_count))
592 goto retry;
593 }
594 return ret;
595
596unlock_retry:
597 rcu_read_unlock();
598 goto retry;
599}
52791eee 600EXPORT_SYMBOL_GPL(dma_resv_wait_timeout_rcu);
3c3b177a
ML
601
602
52791eee 603static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
3c3b177a 604{
f54d1867 605 struct dma_fence *fence, *lfence = passed_fence;
3c3b177a
ML
606 int ret = 1;
607
f54d1867
CW
608 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
609 fence = dma_fence_get_rcu(lfence);
3c3b177a
ML
610 if (!fence)
611 return -1;
612
f54d1867
CW
613 ret = !!dma_fence_is_signaled(fence);
614 dma_fence_put(fence);
3c3b177a
ML
615 }
616 return ret;
617}
618
dad6c394 619/**
52791eee 620 * dma_resv_test_signaled_rcu - Test if a reservation object's
dad6c394
RC
621 * fences have been signaled.
622 * @obj: the reservation object
623 * @test_all: if true, test all fences, otherwise only test the exclusive
624 * fence
625 *
626 * RETURNS
627 * true if all fences signaled, else false
628 */
52791eee 629bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
3c3b177a 630{
b016cd6e 631 unsigned seq, shared_count;
b68d8379 632 int ret;
3c3b177a 633
b68d8379 634 rcu_read_lock();
3c3b177a 635retry:
b68d8379 636 ret = true;
b016cd6e
CW
637 shared_count = 0;
638 seq = read_seqcount_begin(&obj->seq);
3c3b177a
ML
639
640 if (test_all) {
641 unsigned i;
642
b016cd6e
CW
643 struct dma_resv_list *fobj = rcu_dereference(obj->fence);
644
645 if (fobj)
646 shared_count = fobj->shared_count;
647
3c3b177a 648 for (i = 0; i < shared_count; ++i) {
f54d1867 649 struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
3c3b177a 650
52791eee 651 ret = dma_resv_test_signaled_single(fence);
3c3b177a 652 if (ret < 0)
b68d8379 653 goto retry;
3c3b177a
ML
654 else if (!ret)
655 break;
656 }
3c3b177a 657
b016cd6e 658 if (read_seqcount_retry(&obj->seq, seq))
67c97fb7 659 goto retry;
3c3b177a
ML
660 }
661
b016cd6e
CW
662 if (!shared_count) {
663 struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl);
664
665 if (fence_excl) {
666 ret = dma_resv_test_signaled_single(fence_excl);
667 if (ret < 0)
668 goto retry;
669
670 if (read_seqcount_retry(&obj->seq, seq))
671 goto retry;
672 }
673 }
674
3c3b177a
ML
675 rcu_read_unlock();
676 return ret;
3c3b177a 677}
52791eee 678EXPORT_SYMBOL_GPL(dma_resv_test_signaled_rcu);