Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
d2ebfb33 RKAL |
2 | /* |
3 | * The contents of this file are private to DMA engine drivers, and is not | |
4 | * part of the API to be used by DMA engine users. | |
5 | */ | |
6 | #ifndef DMAENGINE_H | |
7 | #define DMAENGINE_H | |
8 | ||
f7fbce07 | 9 | #include <linux/bug.h> |
d2ebfb33 RKAL |
10 | #include <linux/dmaengine.h> |
11 | ||
d3ee98cd RKAL |
12 | /** |
13 | * dma_cookie_init - initialize the cookies for a DMA channel | |
14 | * @chan: dma channel to initialize | |
15 | */ | |
16 | static inline void dma_cookie_init(struct dma_chan *chan) | |
17 | { | |
18 | chan->cookie = DMA_MIN_COOKIE; | |
19 | chan->completed_cookie = DMA_MIN_COOKIE; | |
20 | } | |
21 | ||
884485e1 RKAL |
22 | /** |
23 | * dma_cookie_assign - assign a DMA engine cookie to the descriptor | |
24 | * @tx: descriptor needing cookie | |
25 | * | |
26 | * Assign a unique non-zero per-channel cookie to the descriptor. | |
27 | * Note: caller is expected to hold a lock to prevent concurrency. | |
28 | */ | |
29 | static inline dma_cookie_t dma_cookie_assign(struct dma_async_tx_descriptor *tx) | |
30 | { | |
31 | struct dma_chan *chan = tx->chan; | |
32 | dma_cookie_t cookie; | |
33 | ||
34 | cookie = chan->cookie + 1; | |
35 | if (cookie < DMA_MIN_COOKIE) | |
36 | cookie = DMA_MIN_COOKIE; | |
37 | tx->cookie = chan->cookie = cookie; | |
38 | ||
39 | return cookie; | |
40 | } | |
41 | ||
f7fbce07 RKAL |
42 | /** |
43 | * dma_cookie_complete - complete a descriptor | |
44 | * @tx: descriptor to complete | |
45 | * | |
46 | * Mark this descriptor complete by updating the channels completed | |
47 | * cookie marker. Zero the descriptors cookie to prevent accidental | |
48 | * repeated completions. | |
49 | * | |
50 | * Note: caller is expected to hold a lock to prevent concurrency. | |
51 | */ | |
52 | static inline void dma_cookie_complete(struct dma_async_tx_descriptor *tx) | |
53 | { | |
54 | BUG_ON(tx->cookie < DMA_MIN_COOKIE); | |
55 | tx->chan->completed_cookie = tx->cookie; | |
56 | tx->cookie = 0; | |
57 | } | |
58 | ||
96a2af41 RKAL |
59 | /** |
60 | * dma_cookie_status - report cookie status | |
61 | * @chan: dma channel | |
62 | * @cookie: cookie we are interested in | |
63 | * @state: dma_tx_state structure to return last/used cookies | |
64 | * | |
65 | * Report the status of the cookie, filling in the state structure if | |
66 | * non-NULL. No locking is required. | |
67 | */ | |
68 | static inline enum dma_status dma_cookie_status(struct dma_chan *chan, | |
69 | dma_cookie_t cookie, struct dma_tx_state *state) | |
70 | { | |
71 | dma_cookie_t used, complete; | |
72 | ||
73 | used = chan->cookie; | |
74 | complete = chan->completed_cookie; | |
75 | barrier(); | |
76 | if (state) { | |
77 | state->last = complete; | |
78 | state->used = used; | |
79 | state->residue = 0; | |
6755ec06 | 80 | state->in_flight_bytes = 0; |
96a2af41 RKAL |
81 | } |
82 | return dma_async_is_complete(cookie, complete, used); | |
83 | } | |
84 | ||
85 | static inline void dma_set_residue(struct dma_tx_state *state, u32 residue) | |
86 | { | |
87 | if (state) | |
88 | state->residue = residue; | |
89 | } | |
90 | ||
6755ec06 PU |
91 | static inline void dma_set_in_flight_bytes(struct dma_tx_state *state, |
92 | u32 in_flight_bytes) | |
93 | { | |
94 | if (state) | |
95 | state->in_flight_bytes = in_flight_bytes; | |
96 | } | |
97 | ||
f083f557 DJ |
98 | struct dmaengine_desc_callback { |
99 | dma_async_tx_callback callback; | |
f067025b | 100 | dma_async_tx_callback_result callback_result; |
f083f557 DJ |
101 | void *callback_param; |
102 | }; | |
103 | ||
104 | /** | |
105 | * dmaengine_desc_get_callback - get the passed in callback function | |
106 | * @tx: tx descriptor | |
107 | * @cb: temp struct to hold the callback info | |
108 | * | |
109 | * Fill the passed in cb struct with what's available in the passed in | |
110 | * tx descriptor struct | |
111 | * No locking is required. | |
112 | */ | |
113 | static inline void | |
114 | dmaengine_desc_get_callback(struct dma_async_tx_descriptor *tx, | |
115 | struct dmaengine_desc_callback *cb) | |
116 | { | |
117 | cb->callback = tx->callback; | |
f067025b | 118 | cb->callback_result = tx->callback_result; |
f083f557 DJ |
119 | cb->callback_param = tx->callback_param; |
120 | } | |
121 | ||
122 | /** | |
123 | * dmaengine_desc_callback_invoke - call the callback function in cb struct | |
124 | * @cb: temp struct that is holding the callback info | |
f067025b | 125 | * @result: transaction result |
f083f557 DJ |
126 | * |
127 | * Call the callback function provided in the cb struct with the parameter | |
128 | * in the cb struct. | |
129 | * Locking is dependent on the driver. | |
130 | */ | |
131 | static inline void | |
132 | dmaengine_desc_callback_invoke(struct dmaengine_desc_callback *cb, | |
f067025b | 133 | const struct dmaengine_result *result) |
f083f557 | 134 | { |
f067025b DJ |
135 | struct dmaengine_result dummy_result = { |
136 | .result = DMA_TRANS_NOERROR, | |
137 | .residue = 0 | |
138 | }; | |
139 | ||
140 | if (cb->callback_result) { | |
141 | if (!result) | |
142 | result = &dummy_result; | |
143 | cb->callback_result(cb->callback_param, result); | |
144 | } else if (cb->callback) { | |
f083f557 | 145 | cb->callback(cb->callback_param); |
f067025b | 146 | } |
f083f557 DJ |
147 | } |
148 | ||
149 | /** | |
150 | * dmaengine_desc_get_callback_invoke - get the callback in tx descriptor and | |
151 | * then immediately call the callback. | |
152 | * @tx: dma async tx descriptor | |
f067025b | 153 | * @result: transaction result |
f083f557 DJ |
154 | * |
155 | * Call dmaengine_desc_get_callback() and dmaengine_desc_callback_invoke() | |
156 | * in a single function since no work is necessary in between for the driver. | |
157 | * Locking is dependent on the driver. | |
158 | */ | |
159 | static inline void | |
160 | dmaengine_desc_get_callback_invoke(struct dma_async_tx_descriptor *tx, | |
f067025b | 161 | const struct dmaengine_result *result) |
f083f557 DJ |
162 | { |
163 | struct dmaengine_desc_callback cb; | |
164 | ||
165 | dmaengine_desc_get_callback(tx, &cb); | |
166 | dmaengine_desc_callback_invoke(&cb, result); | |
167 | } | |
168 | ||
169 | /** | |
170 | * dmaengine_desc_callback_valid - verify the callback is valid in cb | |
171 | * @cb: callback info struct | |
172 | * | |
173 | * Return a bool that verifies whether callback in cb is valid or not. | |
174 | * No locking is required. | |
175 | */ | |
176 | static inline bool | |
177 | dmaengine_desc_callback_valid(struct dmaengine_desc_callback *cb) | |
178 | { | |
179 | return (cb->callback) ? true : false; | |
180 | } | |
181 | ||
c3c431de GU |
182 | struct dma_chan *dma_get_slave_channel(struct dma_chan *chan); |
183 | struct dma_chan *dma_get_any_slave_channel(struct dma_device *device); | |
184 | ||
26cf132d PU |
185 | #ifdef CONFIG_DEBUG_FS |
186 | #include <linux/debugfs.h> | |
187 | ||
188 | static inline struct dentry * | |
189 | dmaengine_get_debugfs_root(struct dma_device *dma_dev) { | |
190 | return dma_dev->dbg_dev_root; | |
191 | } | |
192 | #else | |
193 | struct dentry; | |
194 | static inline struct dentry * | |
195 | dmaengine_get_debugfs_root(struct dma_device *dma_dev) | |
196 | { | |
197 | return NULL; | |
198 | } | |
199 | #endif /* CONFIG_DEBUG_FS */ | |
200 | ||
d2ebfb33 | 201 | #endif |