Commit | Line | Data |
---|---|---|
1802d0be | 1 | // SPDX-License-Identifier: GPL-2.0-only |
b3dfbdf2 | 2 | /* |
f54d1867 | 3 | * dma-fence-array: aggregate fences to be waited together |
b3dfbdf2 GP |
4 | * |
5 | * Copyright (C) 2016 Collabora Ltd | |
6 | * Copyright (C) 2016 Advanced Micro Devices, Inc. | |
7 | * Authors: | |
8 | * Gustavo Padovan <gustavo@padovan.org> | |
9 | * Christian König <christian.koenig@amd.com> | |
b3dfbdf2 GP |
10 | */ |
11 | ||
12 | #include <linux/export.h> | |
13 | #include <linux/slab.h> | |
f54d1867 | 14 | #include <linux/dma-fence-array.h> |
b3dfbdf2 | 15 | |
1f70b8b8 CW |
16 | #define PENDING_ERROR 1 |
17 | ||
f54d1867 | 18 | static const char *dma_fence_array_get_driver_name(struct dma_fence *fence) |
b3dfbdf2 | 19 | { |
f54d1867 | 20 | return "dma_fence_array"; |
b3dfbdf2 GP |
21 | } |
22 | ||
f54d1867 | 23 | static const char *dma_fence_array_get_timeline_name(struct dma_fence *fence) |
b3dfbdf2 GP |
24 | { |
25 | return "unbound"; | |
26 | } | |
27 | ||
1f70b8b8 CW |
28 | static void dma_fence_array_set_pending_error(struct dma_fence_array *array, |
29 | int error) | |
30 | { | |
31 | /* | |
32 | * Propagate the first error reported by any of our fences, but only | |
33 | * before we ourselves are signaled. | |
34 | */ | |
35 | if (error) | |
36 | cmpxchg(&array->base.error, PENDING_ERROR, error); | |
37 | } | |
38 | ||
39 | static void dma_fence_array_clear_pending_error(struct dma_fence_array *array) | |
40 | { | |
41 | /* Clear the error flag if not actually set. */ | |
42 | cmpxchg(&array->base.error, PENDING_ERROR, 0); | |
43 | } | |
44 | ||
03e4e0a9 CW |
45 | static void irq_dma_fence_array_work(struct irq_work *wrk) |
46 | { | |
47 | struct dma_fence_array *array = container_of(wrk, typeof(*array), work); | |
48 | ||
1f70b8b8 CW |
49 | dma_fence_array_clear_pending_error(array); |
50 | ||
03e4e0a9 CW |
51 | dma_fence_signal(&array->base); |
52 | dma_fence_put(&array->base); | |
53 | } | |
54 | ||
f54d1867 CW |
55 | static void dma_fence_array_cb_func(struct dma_fence *f, |
56 | struct dma_fence_cb *cb) | |
b3dfbdf2 | 57 | { |
f54d1867 CW |
58 | struct dma_fence_array_cb *array_cb = |
59 | container_of(cb, struct dma_fence_array_cb, cb); | |
60 | struct dma_fence_array *array = array_cb->array; | |
b3dfbdf2 | 61 | |
1f70b8b8 CW |
62 | dma_fence_array_set_pending_error(array, f->error); |
63 | ||
b3dfbdf2 | 64 | if (atomic_dec_and_test(&array->num_pending)) |
03e4e0a9 CW |
65 | irq_work_queue(&array->work); |
66 | else | |
67 | dma_fence_put(&array->base); | |
b3dfbdf2 GP |
68 | } |
69 | ||
f54d1867 | 70 | static bool dma_fence_array_enable_signaling(struct dma_fence *fence) |
b3dfbdf2 | 71 | { |
f54d1867 CW |
72 | struct dma_fence_array *array = to_dma_fence_array(fence); |
73 | struct dma_fence_array_cb *cb = (void *)(&array[1]); | |
b3dfbdf2 GP |
74 | unsigned i; |
75 | ||
76 | for (i = 0; i < array->num_fences; ++i) { | |
77 | cb[i].array = array; | |
f7104568 CK |
78 | /* |
79 | * As we may report that the fence is signaled before all | |
80 | * callbacks are complete, we need to take an additional | |
81 | * reference count on the array so that we do not free it too | |
82 | * early. The core fence handling will only hold the reference | |
83 | * until we signal the array as complete (but that is now | |
84 | * insufficient). | |
85 | */ | |
f54d1867 CW |
86 | dma_fence_get(&array->base); |
87 | if (dma_fence_add_callback(array->fences[i], &cb[i].cb, | |
88 | dma_fence_array_cb_func)) { | |
1f70b8b8 CW |
89 | int error = array->fences[i]->error; |
90 | ||
91 | dma_fence_array_set_pending_error(array, error); | |
f54d1867 | 92 | dma_fence_put(&array->base); |
1f70b8b8 CW |
93 | if (atomic_dec_and_test(&array->num_pending)) { |
94 | dma_fence_array_clear_pending_error(array); | |
b3dfbdf2 | 95 | return false; |
1f70b8b8 | 96 | } |
f7104568 | 97 | } |
b3dfbdf2 GP |
98 | } |
99 | ||
100 | return true; | |
101 | } | |
102 | ||
f54d1867 | 103 | static bool dma_fence_array_signaled(struct dma_fence *fence) |
b3dfbdf2 | 104 | { |
f54d1867 | 105 | struct dma_fence_array *array = to_dma_fence_array(fence); |
b3dfbdf2 | 106 | |
95d35838 TH |
107 | if (atomic_read(&array->num_pending) > 0) |
108 | return false; | |
109 | ||
110 | dma_fence_array_clear_pending_error(array); | |
111 | return true; | |
b3dfbdf2 GP |
112 | } |
113 | ||
f54d1867 | 114 | static void dma_fence_array_release(struct dma_fence *fence) |
b3dfbdf2 | 115 | { |
f54d1867 | 116 | struct dma_fence_array *array = to_dma_fence_array(fence); |
b3dfbdf2 GP |
117 | unsigned i; |
118 | ||
119 | for (i = 0; i < array->num_fences; ++i) | |
f54d1867 | 120 | dma_fence_put(array->fences[i]); |
b3dfbdf2 GP |
121 | |
122 | kfree(array->fences); | |
f54d1867 | 123 | dma_fence_free(fence); |
b3dfbdf2 GP |
124 | } |
125 | ||
691fdba3 RC |
126 | static void dma_fence_array_set_deadline(struct dma_fence *fence, |
127 | ktime_t deadline) | |
128 | { | |
129 | struct dma_fence_array *array = to_dma_fence_array(fence); | |
130 | unsigned i; | |
131 | ||
132 | for (i = 0; i < array->num_fences; ++i) | |
133 | dma_fence_set_deadline(array->fences[i], deadline); | |
134 | } | |
135 | ||
f54d1867 CW |
136 | const struct dma_fence_ops dma_fence_array_ops = { |
137 | .get_driver_name = dma_fence_array_get_driver_name, | |
138 | .get_timeline_name = dma_fence_array_get_timeline_name, | |
139 | .enable_signaling = dma_fence_array_enable_signaling, | |
140 | .signaled = dma_fence_array_signaled, | |
f54d1867 | 141 | .release = dma_fence_array_release, |
691fdba3 | 142 | .set_deadline = dma_fence_array_set_deadline, |
b3dfbdf2 | 143 | }; |
f54d1867 | 144 | EXPORT_SYMBOL(dma_fence_array_ops); |
b3dfbdf2 GP |
145 | |
146 | /** | |
f54d1867 | 147 | * dma_fence_array_create - Create a custom fence array |
f7104568 CK |
148 | * @num_fences: [in] number of fences to add in the array |
149 | * @fences: [in] array containing the fences | |
150 | * @context: [in] fence context to use | |
151 | * @seqno: [in] sequence number to use | |
68acb6af | 152 | * @signal_on_any: [in] signal on any fence in the array |
b3dfbdf2 | 153 | * |
f54d1867 CW |
154 | * Allocate a dma_fence_array object and initialize the base fence with |
155 | * dma_fence_init(). | |
b3dfbdf2 GP |
156 | * In case of error it returns NULL. |
157 | * | |
68acb6af | 158 | * The caller should allocate the fences array with num_fences size |
b3dfbdf2 | 159 | * and fill it with the fences it wants to add to the object. Ownership of this |
f54d1867 | 160 | * array is taken and dma_fence_put() is used on each fence on release. |
f7104568 CK |
161 | * |
162 | * If @signal_on_any is true the fence array signals if any fence in the array | |
163 | * signals, otherwise it signals when all fences in the array signal. | |
b3dfbdf2 | 164 | */ |
f54d1867 CW |
165 | struct dma_fence_array *dma_fence_array_create(int num_fences, |
166 | struct dma_fence **fences, | |
167 | u64 context, unsigned seqno, | |
168 | bool signal_on_any) | |
b3dfbdf2 | 169 | { |
f54d1867 | 170 | struct dma_fence_array *array; |
b3dfbdf2 GP |
171 | size_t size = sizeof(*array); |
172 | ||
c42ee39c CK |
173 | WARN_ON(!num_fences || !fences); |
174 | ||
b3dfbdf2 | 175 | /* Allocate the callback structures behind the array. */ |
f54d1867 | 176 | size += num_fences * sizeof(struct dma_fence_array_cb); |
b3dfbdf2 GP |
177 | array = kzalloc(size, GFP_KERNEL); |
178 | if (!array) | |
179 | return NULL; | |
180 | ||
181 | spin_lock_init(&array->lock); | |
f54d1867 CW |
182 | dma_fence_init(&array->base, &dma_fence_array_ops, &array->lock, |
183 | context, seqno); | |
03e4e0a9 | 184 | init_irq_work(&array->work, irq_dma_fence_array_work); |
b3dfbdf2 GP |
185 | |
186 | array->num_fences = num_fences; | |
f7104568 | 187 | atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences); |
b3dfbdf2 GP |
188 | array->fences = fences; |
189 | ||
1f70b8b8 CW |
190 | array->base.error = PENDING_ERROR; |
191 | ||
0fd9803b CK |
192 | /* |
193 | * dma_fence_array objects should never contain any other fence | |
194 | * containers or otherwise we run into recursion and potential kernel | |
195 | * stack overflow on operations on the dma_fence_array. | |
196 | * | |
197 | * The correct way of handling this is to flatten out the array by the | |
198 | * caller instead. | |
199 | * | |
200 | * Enforce this here by checking that we don't create a dma_fence_array | |
201 | * with any container inside. | |
202 | */ | |
203 | while (num_fences--) | |
204 | WARN_ON(dma_fence_is_container(fences[num_fences])); | |
205 | ||
b3dfbdf2 GP |
206 | return array; |
207 | } | |
f54d1867 | 208 | EXPORT_SYMBOL(dma_fence_array_create); |
d5b72a21 PZ |
209 | |
210 | /** | |
211 | * dma_fence_match_context - Check if all fences are from the given context | |
212 | * @fence: [in] fence or fence array | |
213 | * @context: [in] fence context to check all fences against | |
214 | * | |
215 | * Checks the provided fence or, for a fence array, all fences in the array | |
216 | * against the given context. Returns false if any fence is from a different | |
217 | * context. | |
218 | */ | |
219 | bool dma_fence_match_context(struct dma_fence *fence, u64 context) | |
220 | { | |
221 | struct dma_fence_array *array = to_dma_fence_array(fence); | |
222 | unsigned i; | |
223 | ||
224 | if (!dma_fence_is_array(fence)) | |
225 | return fence->context == context; | |
226 | ||
227 | for (i = 0; i < array->num_fences; i++) { | |
228 | if (array->fences[i]->context != context) | |
229 | return false; | |
230 | } | |
231 | ||
232 | return true; | |
233 | } | |
234 | EXPORT_SYMBOL(dma_fence_match_context); | |
caaf2ae7 CK |
235 | |
236 | struct dma_fence *dma_fence_array_first(struct dma_fence *head) | |
237 | { | |
238 | struct dma_fence_array *array; | |
239 | ||
240 | if (!head) | |
241 | return NULL; | |
242 | ||
243 | array = to_dma_fence_array(head); | |
244 | if (!array) | |
245 | return head; | |
246 | ||
c42ee39c CK |
247 | if (!array->num_fences) |
248 | return NULL; | |
249 | ||
caaf2ae7 CK |
250 | return array->fences[0]; |
251 | } | |
252 | EXPORT_SYMBOL(dma_fence_array_first); | |
253 | ||
254 | struct dma_fence *dma_fence_array_next(struct dma_fence *head, | |
255 | unsigned int index) | |
256 | { | |
257 | struct dma_fence_array *array = to_dma_fence_array(head); | |
258 | ||
259 | if (!array || index >= array->num_fences) | |
260 | return NULL; | |
261 | ||
262 | return array->fences[index]; | |
263 | } | |
264 | EXPORT_SYMBOL(dma_fence_array_next); |