drm/dp_mst: Print errors on ACT timeouts
[linux-2.6-block.git] / drivers / dma-buf / dma-fence-chain.c
CommitLineData
1802d0be 1// SPDX-License-Identifier: GPL-2.0-only
7bf60c52
CK
2/*
3 * fence-chain: chain fences together in a timeline
4 *
5 * Copyright (C) 2018 Advanced Micro Devices, Inc.
6 * Authors:
7 * Christian König <christian.koenig@amd.com>
7bf60c52
CK
8 */
9
10#include <linux/dma-fence-chain.h>
11
12static bool dma_fence_chain_enable_signaling(struct dma_fence *fence);
13
14/**
15 * dma_fence_chain_get_prev - use RCU to get a reference to the previous fence
16 * @chain: chain node to get the previous node from
17 *
18 * Use dma_fence_get_rcu_safe to get a reference to the previous fence of the
19 * chain node.
20 */
21static struct dma_fence *dma_fence_chain_get_prev(struct dma_fence_chain *chain)
22{
23 struct dma_fence *prev;
24
25 rcu_read_lock();
26 prev = dma_fence_get_rcu_safe(&chain->prev);
27 rcu_read_unlock();
28 return prev;
29}
30
31/**
32 * dma_fence_chain_walk - chain walking function
33 * @fence: current chain node
34 *
35 * Walk the chain to the next node. Returns the next fence or NULL if we are at
36 * the end of the chain. Garbage collects chain nodes which are already
37 * signaled.
38 */
39struct dma_fence *dma_fence_chain_walk(struct dma_fence *fence)
40{
41 struct dma_fence_chain *chain, *prev_chain;
42 struct dma_fence *prev, *replacement, *tmp;
43
44 chain = to_dma_fence_chain(fence);
45 if (!chain) {
46 dma_fence_put(fence);
47 return NULL;
48 }
49
50 while ((prev = dma_fence_chain_get_prev(chain))) {
51
52 prev_chain = to_dma_fence_chain(prev);
53 if (prev_chain) {
54 if (!dma_fence_is_signaled(prev_chain->fence))
55 break;
56
57 replacement = dma_fence_chain_get_prev(prev_chain);
58 } else {
59 if (!dma_fence_is_signaled(prev))
60 break;
61
62 replacement = NULL;
63 }
64
65 tmp = cmpxchg((void **)&chain->prev, (void *)prev, (void *)replacement);
66 if (tmp == prev)
67 dma_fence_put(tmp);
68 else
69 dma_fence_put(replacement);
70 dma_fence_put(prev);
71 }
72
73 dma_fence_put(fence);
74 return prev;
75}
76EXPORT_SYMBOL(dma_fence_chain_walk);
77
78/**
79 * dma_fence_chain_find_seqno - find fence chain node by seqno
80 * @pfence: pointer to the chain node where to start
81 * @seqno: the sequence number to search for
82 *
83 * Advance the fence pointer to the chain node which will signal this sequence
84 * number. If no sequence number is provided then this is a no-op.
85 *
86 * Returns EINVAL if the fence is not a chain node or the sequence number has
87 * not yet advanced far enough.
88 */
89int dma_fence_chain_find_seqno(struct dma_fence **pfence, uint64_t seqno)
90{
91 struct dma_fence_chain *chain;
92
93 if (!seqno)
94 return 0;
95
96 chain = to_dma_fence_chain(*pfence);
97 if (!chain || chain->base.seqno < seqno)
98 return -EINVAL;
99
100 dma_fence_chain_for_each(*pfence, &chain->base) {
101 if ((*pfence)->context != chain->base.context ||
102 to_dma_fence_chain(*pfence)->prev_seqno < seqno)
103 break;
104 }
105 dma_fence_put(&chain->base);
106
107 return 0;
108}
109EXPORT_SYMBOL(dma_fence_chain_find_seqno);
110
111static const char *dma_fence_chain_get_driver_name(struct dma_fence *fence)
112{
113 return "dma_fence_chain";
114}
115
116static const char *dma_fence_chain_get_timeline_name(struct dma_fence *fence)
117{
118 return "unbound";
119}
120
121static void dma_fence_chain_irq_work(struct irq_work *work)
122{
123 struct dma_fence_chain *chain;
124
125 chain = container_of(work, typeof(*chain), work);
126
127 /* Try to rearm the callback */
128 if (!dma_fence_chain_enable_signaling(&chain->base))
129 /* Ok, we are done. No more unsignaled fences left */
130 dma_fence_signal(&chain->base);
131 dma_fence_put(&chain->base);
132}
133
134static void dma_fence_chain_cb(struct dma_fence *f, struct dma_fence_cb *cb)
135{
136 struct dma_fence_chain *chain;
137
138 chain = container_of(cb, typeof(*chain), cb);
139 irq_work_queue(&chain->work);
140 dma_fence_put(f);
141}
142
143static bool dma_fence_chain_enable_signaling(struct dma_fence *fence)
144{
145 struct dma_fence_chain *head = to_dma_fence_chain(fence);
146
147 dma_fence_get(&head->base);
148 dma_fence_chain_for_each(fence, &head->base) {
149 struct dma_fence_chain *chain = to_dma_fence_chain(fence);
150 struct dma_fence *f = chain ? chain->fence : fence;
151
152 dma_fence_get(f);
153 if (!dma_fence_add_callback(f, &head->cb, dma_fence_chain_cb)) {
154 dma_fence_put(fence);
155 return true;
156 }
157 dma_fence_put(f);
158 }
159 dma_fence_put(&head->base);
160 return false;
161}
162
163static bool dma_fence_chain_signaled(struct dma_fence *fence)
164{
165 dma_fence_chain_for_each(fence, fence) {
166 struct dma_fence_chain *chain = to_dma_fence_chain(fence);
167 struct dma_fence *f = chain ? chain->fence : fence;
168
169 if (!dma_fence_is_signaled(f)) {
170 dma_fence_put(fence);
171 return false;
172 }
173 }
174
175 return true;
176}
177
178static void dma_fence_chain_release(struct dma_fence *fence)
179{
180 struct dma_fence_chain *chain = to_dma_fence_chain(fence);
92cb3e59
CK
181 struct dma_fence *prev;
182
183 /* Manually unlink the chain as much as possible to avoid recursion
184 * and potential stack overflow.
185 */
186 while ((prev = rcu_dereference_protected(chain->prev, true))) {
187 struct dma_fence_chain *prev_chain;
188
189 if (kref_read(&prev->refcount) > 1)
190 break;
191
192 prev_chain = to_dma_fence_chain(prev);
193 if (!prev_chain)
194 break;
195
196 /* No need for atomic operations since we hold the last
197 * reference to prev_chain.
198 */
199 chain->prev = prev_chain->prev;
200 RCU_INIT_POINTER(prev_chain->prev, NULL);
201 dma_fence_put(prev);
202 }
203 dma_fence_put(prev);
7bf60c52 204
7bf60c52
CK
205 dma_fence_put(chain->fence);
206 dma_fence_free(fence);
207}
208
209const struct dma_fence_ops dma_fence_chain_ops = {
5e498abf 210 .use_64bit_seqno = true,
7bf60c52
CK
211 .get_driver_name = dma_fence_chain_get_driver_name,
212 .get_timeline_name = dma_fence_chain_get_timeline_name,
213 .enable_signaling = dma_fence_chain_enable_signaling,
214 .signaled = dma_fence_chain_signaled,
215 .release = dma_fence_chain_release,
216};
217EXPORT_SYMBOL(dma_fence_chain_ops);
218
219/**
220 * dma_fence_chain_init - initialize a fence chain
221 * @chain: the chain node to initialize
222 * @prev: the previous fence
223 * @fence: the current fence
224 *
225 * Initialize a new chain node and either start a new chain or add the node to
226 * the existing chain of the previous fence.
227 */
228void dma_fence_chain_init(struct dma_fence_chain *chain,
229 struct dma_fence *prev,
230 struct dma_fence *fence,
231 uint64_t seqno)
232{
233 struct dma_fence_chain *prev_chain = to_dma_fence_chain(prev);
234 uint64_t context;
235
236 spin_lock_init(&chain->lock);
237 rcu_assign_pointer(chain->prev, prev);
238 chain->fence = fence;
239 chain->prev_seqno = 0;
240 init_irq_work(&chain->work, dma_fence_chain_irq_work);
241
242 /* Try to reuse the context of the previous chain node. */
5e498abf 243 if (prev_chain && __dma_fence_is_later(seqno, prev->seqno, prev->ops)) {
7bf60c52
CK
244 context = prev->context;
245 chain->prev_seqno = prev->seqno;
246 } else {
247 context = dma_fence_context_alloc(1);
248 /* Make sure that we always have a valid sequence number. */
249 if (prev_chain)
250 seqno = max(prev->seqno, seqno);
251 }
252
253 dma_fence_init(&chain->base, &dma_fence_chain_ops,
254 &chain->lock, context, seqno);
255}
256EXPORT_SYMBOL(dma_fence_chain_init);