Commit | Line | Data |
---|---|---|
2874c5fd | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
7f98639d PO |
2 | /* |
3 | * Memory-to-memory device framework for Video for Linux 2. | |
4 | * | |
5 | * Helper functions for devices that use memory buffers for both source | |
6 | * and destination. | |
7 | * | |
8 | * Copyright (c) 2009 Samsung Electronics Co., Ltd. | |
95072084 | 9 | * Pawel Osciak, <pawel@osciak.com> |
7f98639d | 10 | * Marek Szyprowski, <m.szyprowski@samsung.com> |
7f98639d PO |
11 | */ |
12 | ||
13 | #ifndef _MEDIA_V4L2_MEM2MEM_H | |
14 | #define _MEDIA_V4L2_MEM2MEM_H | |
15 | ||
c139990e | 16 | #include <media/videobuf2-v4l2.h> |
7f98639d PO |
17 | |
18 | /** | |
19 | * struct v4l2_m2m_ops - mem-to-mem device driver callbacks | |
20 | * @device_run: required. Begin the actual job (transaction) inside this | |
21 | * callback. | |
22 | * The job does NOT have to end before this callback returns | |
23 | * (and it will be the usual case). When the job finishes, | |
24 | * v4l2_m2m_job_finish() has to be called. | |
25 | * @job_ready: optional. Should return 0 if the driver does not have a job | |
26 | * fully prepared to run yet (i.e. it will not be able to finish a | |
27 | * transaction without sleeping). If not provided, it will be | |
28 | * assumed that one source and one destination buffer are all | |
29 | * that is required for the driver to perform one full transaction. | |
30 | * This method may not sleep. | |
5525b831 | 31 | * @job_abort: optional. Informs the driver that it has to abort the currently |
7f98639d PO |
32 | * running transaction as soon as possible (i.e. as soon as it can |
33 | * stop the device safely; e.g. in the next interrupt handler), | |
34 | * even if the transaction would not have been finished by then. | |
35 | * After the driver performs the necessary steps, it has to call | |
36 | * v4l2_m2m_job_finish() (as if the transaction ended normally). | |
37 | * This function does not have to (and will usually not) wait | |
38 | * until the device enters a state when it can be stopped. | |
39 | */ | |
40 | struct v4l2_m2m_ops { | |
41 | void (*device_run)(void *priv); | |
42 | int (*job_ready)(void *priv); | |
43 | void (*job_abort)(void *priv); | |
44 | }; | |
45 | ||
be2fff65 | 46 | struct video_device; |
7f98639d PO |
47 | struct v4l2_m2m_dev; |
48 | ||
5fa5edbe MCC |
49 | /** |
50 | * struct v4l2_m2m_queue_ctx - represents a queue for buffers ready to be | |
51 | * processed | |
52 | * | |
53 | * @q: pointer to struct &vb2_queue | |
54 | * @rdy_queue: List of V4L2 mem-to-mem queues | |
55 | * @rdy_spinlock: spin lock to protect the struct usage | |
56 | * @num_rdy: number of buffers ready to be processed | |
57 | * @buffered: is the queue buffered? | |
58 | * | |
59 | * Queue for buffers ready to be processed as soon as this | |
60 | * instance receives access to the device. | |
61 | */ | |
62 | ||
7f98639d | 63 | struct v4l2_m2m_queue_ctx { |
908a0d7c | 64 | struct vb2_queue q; |
7f98639d | 65 | |
7f98639d | 66 | struct list_head rdy_queue; |
908a0d7c | 67 | spinlock_t rdy_spinlock; |
7f98639d | 68 | u8 num_rdy; |
33bdd5a8 | 69 | bool buffered; |
7f98639d PO |
70 | }; |
71 | ||
5fa5edbe MCC |
72 | /** |
73 | * struct v4l2_m2m_ctx - Memory to memory context structure | |
74 | * | |
75 | * @q_lock: struct &mutex lock | |
9f8d3a2e | 76 | * @m2m_dev: opaque pointer to the internal data to handle M2M context |
5fa5edbe MCC |
77 | * @cap_q_ctx: Capture (output to memory) queue context |
78 | * @out_q_ctx: Output (input from memory) queue context | |
79 | * @queue: List of memory to memory contexts | |
80 | * @job_flags: Job queue flags, used internally by v4l2-mem2mem.c: | |
81 | * %TRANS_QUEUED, %TRANS_RUNNING and %TRANS_ABORT. | |
82 | * @finished: Wait queue used to signalize when a job queue finished. | |
83 | * @priv: Instance private data | |
708f48e7 SA |
84 | * |
85 | * The memory to memory context is specific to a file handle, NOT to e.g. | |
86 | * a device. | |
5fa5edbe | 87 | */ |
7f98639d | 88 | struct v4l2_m2m_ctx { |
8e6e8f93 SN |
89 | /* optional cap/out vb2 queues lock */ |
90 | struct mutex *q_lock; | |
91 | ||
5fa5edbe | 92 | /* internal use only */ |
7f98639d PO |
93 | struct v4l2_m2m_dev *m2m_dev; |
94 | ||
7f98639d PO |
95 | struct v4l2_m2m_queue_ctx cap_q_ctx; |
96 | ||
7f98639d PO |
97 | struct v4l2_m2m_queue_ctx out_q_ctx; |
98 | ||
99 | /* For device job queue */ | |
100 | struct list_head queue; | |
101 | unsigned long job_flags; | |
908a0d7c | 102 | wait_queue_head_t finished; |
7f98639d | 103 | |
7f98639d PO |
104 | void *priv; |
105 | }; | |
106 | ||
5fa5edbe MCC |
107 | /** |
108 | * struct v4l2_m2m_buffer - Memory to memory buffer | |
109 | * | |
110 | * @vb: pointer to struct &vb2_v4l2_buffer | |
111 | * @list: list of m2m buffers | |
112 | */ | |
908a0d7c | 113 | struct v4l2_m2m_buffer { |
2d700715 | 114 | struct vb2_v4l2_buffer vb; |
908a0d7c MS |
115 | struct list_head list; |
116 | }; | |
117 | ||
4781646c MCC |
118 | /** |
119 | * v4l2_m2m_get_curr_priv() - return driver private data for the currently | |
120 | * running instance or NULL if no instance is running | |
dcbd8735 | 121 | * |
9f8d3a2e | 122 | * @m2m_dev: opaque pointer to the internal data to handle M2M context |
4781646c | 123 | */ |
7f98639d PO |
124 | void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev); |
125 | ||
4781646c MCC |
126 | /** |
127 | * v4l2_m2m_get_vq() - return vb2_queue for the given type | |
dcbd8735 MCC |
128 | * |
129 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx | |
130 | * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type | |
4781646c | 131 | */ |
908a0d7c | 132 | struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx, |
7f98639d PO |
133 | enum v4l2_buf_type type); |
134 | ||
4781646c MCC |
135 | /** |
136 | * v4l2_m2m_try_schedule() - check whether an instance is ready to be added to | |
137 | * the pending job queue and add it if so. | |
dcbd8735 MCC |
138 | * |
139 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx | |
4781646c MCC |
140 | * |
141 | * There are three basic requirements an instance has to meet to be able to run: | |
142 | * 1) at least one source buffer has to be queued, | |
143 | * 2) at least one destination buffer has to be queued, | |
144 | * 3) streaming has to be on. | |
145 | * | |
146 | * If a queue is buffered (for example a decoder hardware ringbuffer that has | |
147 | * to be drained before doing streamoff), allow scheduling without v4l2 buffers | |
148 | * on that queue. | |
149 | * | |
150 | * There may also be additional, custom requirements. In such case the driver | |
151 | * should supply a custom callback (job_ready in v4l2_m2m_ops) that should | |
152 | * return 1 if the instance is ready. | |
153 | * An example of the above could be an instance that requires more than one | |
154 | * src/dst buffer per transaction. | |
155 | */ | |
1190a419 MO |
156 | void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx); |
157 | ||
4781646c MCC |
158 | /** |
159 | * v4l2_m2m_job_finish() - inform the framework that a job has been finished | |
160 | * and have it clean up | |
161 | * | |
9f8d3a2e | 162 | * @m2m_dev: opaque pointer to the internal data to handle M2M context |
dcbd8735 MCC |
163 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
164 | * | |
4781646c MCC |
165 | * Called by a driver to yield back the device after it has finished with it. |
166 | * Should be called as soon as possible after reaching a state which allows | |
167 | * other instances to take control of the device. | |
168 | * | |
5fa5edbe MCC |
169 | * This function has to be called only after &v4l2_m2m_ops->device_run |
170 | * callback has been called on the driver. To prevent recursion, it should | |
171 | * not be called directly from the &v4l2_m2m_ops->device_run callback though. | |
4781646c | 172 | */ |
7f98639d PO |
173 | void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, |
174 | struct v4l2_m2m_ctx *m2m_ctx); | |
175 | ||
908a0d7c | 176 | static inline void |
2d700715 | 177 | v4l2_m2m_buf_done(struct vb2_v4l2_buffer *buf, enum vb2_buffer_state state) |
908a0d7c | 178 | { |
2d700715 | 179 | vb2_buffer_done(&buf->vb2_buf, state); |
908a0d7c MS |
180 | } |
181 | ||
4781646c MCC |
182 | /** |
183 | * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer | |
dcbd8735 MCC |
184 | * |
185 | * @file: pointer to struct &file | |
186 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx | |
187 | * @reqbufs: pointer to struct &v4l2_requestbuffers | |
4781646c | 188 | */ |
7f98639d PO |
189 | int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
190 | struct v4l2_requestbuffers *reqbufs); | |
191 | ||
4781646c MCC |
192 | /** |
193 | * v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer | |
194 | * | |
dcbd8735 MCC |
195 | * @file: pointer to struct &file |
196 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx | |
197 | * @buf: pointer to struct &v4l2_buffer | |
198 | * | |
4781646c MCC |
199 | * See v4l2_m2m_mmap() documentation for details. |
200 | */ | |
7f98639d PO |
201 | int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
202 | struct v4l2_buffer *buf); | |
203 | ||
4781646c MCC |
204 | /** |
205 | * v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on | |
206 | * the type | |
dcbd8735 MCC |
207 | * |
208 | * @file: pointer to struct &file | |
209 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx | |
210 | * @buf: pointer to struct &v4l2_buffer | |
4781646c | 211 | */ |
7f98639d PO |
212 | int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
213 | struct v4l2_buffer *buf); | |
4781646c MCC |
214 | |
215 | /** | |
216 | * v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on | |
217 | * the type | |
dcbd8735 MCC |
218 | * |
219 | * @file: pointer to struct &file | |
220 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx | |
221 | * @buf: pointer to struct &v4l2_buffer | |
4781646c | 222 | */ |
7f98639d PO |
223 | int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
224 | struct v4l2_buffer *buf); | |
4781646c MCC |
225 | |
226 | /** | |
227 | * v4l2_m2m_prepare_buf() - prepare a source or destination buffer, depending on | |
228 | * the type | |
dcbd8735 MCC |
229 | * |
230 | * @file: pointer to struct &file | |
231 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx | |
232 | * @buf: pointer to struct &v4l2_buffer | |
4781646c | 233 | */ |
e68cf471 HV |
234 | int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
235 | struct v4l2_buffer *buf); | |
4781646c MCC |
236 | |
237 | /** | |
238 | * v4l2_m2m_create_bufs() - create a source or destination buffer, depending | |
239 | * on the type | |
dcbd8735 MCC |
240 | * |
241 | * @file: pointer to struct &file | |
242 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx | |
243 | * @create: pointer to struct &v4l2_create_buffers | |
4781646c | 244 | */ |
8b94ca61 PZ |
245 | int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
246 | struct v4l2_create_buffers *create); | |
7f98639d | 247 | |
4781646c MCC |
248 | /** |
249 | * v4l2_m2m_expbuf() - export a source or destination buffer, depending on | |
250 | * the type | |
dcbd8735 MCC |
251 | * |
252 | * @file: pointer to struct &file | |
253 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx | |
254 | * @eb: pointer to struct &v4l2_exportbuffer | |
4781646c | 255 | */ |
83ae7c5a TS |
256 | int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
257 | struct v4l2_exportbuffer *eb); | |
258 | ||
4781646c MCC |
259 | /** |
260 | * v4l2_m2m_streamon() - turn on streaming for a video queue | |
dcbd8735 MCC |
261 | * |
262 | * @file: pointer to struct &file | |
263 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx | |
264 | * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type | |
4781646c | 265 | */ |
7f98639d PO |
266 | int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
267 | enum v4l2_buf_type type); | |
4781646c MCC |
268 | |
269 | /** | |
270 | * v4l2_m2m_streamoff() - turn off streaming for a video queue | |
dcbd8735 MCC |
271 | * |
272 | * @file: pointer to struct &file | |
273 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx | |
274 | * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type | |
4781646c | 275 | */ |
7f98639d PO |
276 | int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
277 | enum v4l2_buf_type type); | |
278 | ||
4781646c MCC |
279 | /** |
280 | * v4l2_m2m_poll() - poll replacement, for destination buffers only | |
281 | * | |
dcbd8735 MCC |
282 | * @file: pointer to struct &file |
283 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx | |
284 | * @wait: pointer to struct &poll_table_struct | |
285 | * | |
4781646c MCC |
286 | * Call from the driver's poll() function. Will poll both queues. If a buffer |
287 | * is available to dequeue (with dqbuf) from the source queue, this will | |
288 | * indicate that a non-blocking write can be performed, while read will be | |
289 | * returned in case of the destination queue. | |
290 | */ | |
c23e0cb8 | 291 | __poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
7f98639d PO |
292 | struct poll_table_struct *wait); |
293 | ||
4781646c MCC |
294 | /** |
295 | * v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer | |
296 | * | |
dcbd8735 MCC |
297 | * @file: pointer to struct &file |
298 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx | |
299 | * @vma: pointer to struct &vm_area_struct | |
300 | * | |
4781646c MCC |
301 | * Call from driver's mmap() function. Will handle mmap() for both queues |
302 | * seamlessly for videobuffer, which will receive normal per-queue offsets and | |
303 | * proper videobuf queue pointers. The differentiation is made outside videobuf | |
304 | * by adding a predefined offset to buffers from one of the queues and | |
305 | * subtracting it before passing it back to videobuf. Only drivers (and | |
306 | * thus applications) receive modified offsets. | |
307 | */ | |
7f98639d PO |
308 | int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
309 | struct vm_area_struct *vma); | |
310 | ||
4781646c MCC |
311 | /** |
312 | * v4l2_m2m_init() - initialize per-driver m2m data | |
313 | * | |
dcbd8735 MCC |
314 | * @m2m_ops: pointer to struct v4l2_m2m_ops |
315 | * | |
5fa5edbe MCC |
316 | * Usually called from driver's ``probe()`` function. |
317 | * | |
318 | * Return: returns an opaque pointer to the internal data to handle M2M context | |
4781646c | 319 | */ |
b1252eb8 | 320 | struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops); |
4781646c | 321 | |
be2fff65 EG |
322 | #if defined(CONFIG_MEDIA_CONTROLLER) |
323 | void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev); | |
324 | int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev, | |
325 | struct video_device *vdev, int function); | |
326 | #else | |
327 | static inline void | |
328 | v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev) | |
329 | { | |
330 | } | |
331 | ||
332 | static inline int | |
333 | v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev, | |
334 | struct video_device *vdev, int function) | |
335 | { | |
336 | return 0; | |
337 | } | |
338 | #endif | |
339 | ||
4781646c MCC |
340 | /** |
341 | * v4l2_m2m_release() - cleans up and frees a m2m_dev structure | |
342 | * | |
9f8d3a2e | 343 | * @m2m_dev: opaque pointer to the internal data to handle M2M context |
dcbd8735 | 344 | * |
5fa5edbe | 345 | * Usually called from driver's ``remove()`` function. |
4781646c | 346 | */ |
7f98639d PO |
347 | void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev); |
348 | ||
4781646c MCC |
349 | /** |
350 | * v4l2_m2m_ctx_init() - allocate and initialize a m2m context | |
dcbd8735 | 351 | * |
9f8d3a2e | 352 | * @m2m_dev: opaque pointer to the internal data to handle M2M context |
dcbd8735 MCC |
353 | * @drv_priv: driver's instance private data |
354 | * @queue_init: a callback for queue type-specific initialization function | |
355 | * to be used for initializing videobuf_queues | |
4781646c | 356 | * |
5fa5edbe | 357 | * Usually called from driver's ``open()`` function. |
4781646c | 358 | */ |
908a0d7c MS |
359 | struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev, |
360 | void *drv_priv, | |
361 | int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)); | |
362 | ||
33bdd5a8 PZ |
363 | static inline void v4l2_m2m_set_src_buffered(struct v4l2_m2m_ctx *m2m_ctx, |
364 | bool buffered) | |
365 | { | |
366 | m2m_ctx->out_q_ctx.buffered = buffered; | |
367 | } | |
368 | ||
369 | static inline void v4l2_m2m_set_dst_buffered(struct v4l2_m2m_ctx *m2m_ctx, | |
370 | bool buffered) | |
371 | { | |
372 | m2m_ctx->cap_q_ctx.buffered = buffered; | |
373 | } | |
374 | ||
4781646c MCC |
375 | /** |
376 | * v4l2_m2m_ctx_release() - release m2m context | |
377 | * | |
dcbd8735 MCC |
378 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
379 | * | |
4781646c MCC |
380 | * Usually called from driver's release() function. |
381 | */ | |
7f98639d PO |
382 | void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx); |
383 | ||
4781646c MCC |
384 | /** |
385 | * v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list. | |
386 | * | |
dcbd8735 MCC |
387 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
388 | * @vbuf: pointer to struct &vb2_v4l2_buffer | |
389 | * | |
5fa5edbe | 390 | * Call from videobuf_queue_ops->ops->buf_queue, videobuf_queue_ops callback. |
4781646c | 391 | */ |
2d700715 JS |
392 | void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, |
393 | struct vb2_v4l2_buffer *vbuf); | |
7f98639d PO |
394 | |
395 | /** | |
396 | * v4l2_m2m_num_src_bufs_ready() - return the number of source buffers ready for | |
397 | * use | |
62c0d016 | 398 | * |
dcbd8735 | 399 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
7f98639d PO |
400 | */ |
401 | static inline | |
402 | unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx) | |
403 | { | |
961ae449 | 404 | return m2m_ctx->out_q_ctx.num_rdy; |
7f98639d PO |
405 | } |
406 | ||
407 | /** | |
e383ce07 | 408 | * v4l2_m2m_num_dst_bufs_ready() - return the number of destination buffers |
7f98639d | 409 | * ready for use |
62c0d016 | 410 | * |
dcbd8735 | 411 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
7f98639d PO |
412 | */ |
413 | static inline | |
414 | unsigned int v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx) | |
415 | { | |
961ae449 | 416 | return m2m_ctx->cap_q_ctx.num_rdy; |
7f98639d PO |
417 | } |
418 | ||
4781646c MCC |
419 | /** |
420 | * v4l2_m2m_next_buf() - return next buffer from the list of ready buffers | |
dcbd8735 MCC |
421 | * |
422 | * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx | |
4781646c | 423 | */ |
8dd22b28 | 424 | struct vb2_v4l2_buffer *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx); |
7f98639d PO |
425 | |
426 | /** | |
427 | * v4l2_m2m_next_src_buf() - return next source buffer from the list of ready | |
428 | * buffers | |
62c0d016 | 429 | * |
dcbd8735 | 430 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
7f98639d | 431 | */ |
8dd22b28 EG |
432 | static inline struct vb2_v4l2_buffer * |
433 | v4l2_m2m_next_src_buf(struct v4l2_m2m_ctx *m2m_ctx) | |
7f98639d | 434 | { |
908a0d7c | 435 | return v4l2_m2m_next_buf(&m2m_ctx->out_q_ctx); |
7f98639d PO |
436 | } |
437 | ||
438 | /** | |
439 | * v4l2_m2m_next_dst_buf() - return next destination buffer from the list of | |
440 | * ready buffers | |
62c0d016 | 441 | * |
dcbd8735 | 442 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
7f98639d | 443 | */ |
8dd22b28 EG |
444 | static inline struct vb2_v4l2_buffer * |
445 | v4l2_m2m_next_dst_buf(struct v4l2_m2m_ctx *m2m_ctx) | |
7f98639d | 446 | { |
908a0d7c | 447 | return v4l2_m2m_next_buf(&m2m_ctx->cap_q_ctx); |
7f98639d PO |
448 | } |
449 | ||
ee1228cc HV |
450 | /** |
451 | * v4l2_m2m_last_buf() - return last buffer from the list of ready buffers | |
452 | * | |
453 | * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx | |
454 | */ | |
8dd22b28 | 455 | struct vb2_v4l2_buffer *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx); |
ee1228cc HV |
456 | |
457 | /** | |
458 | * v4l2_m2m_last_src_buf() - return last destination buffer from the list of | |
459 | * ready buffers | |
460 | * | |
461 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx | |
462 | */ | |
8dd22b28 EG |
463 | static inline struct vb2_v4l2_buffer * |
464 | v4l2_m2m_last_src_buf(struct v4l2_m2m_ctx *m2m_ctx) | |
ee1228cc HV |
465 | { |
466 | return v4l2_m2m_last_buf(&m2m_ctx->out_q_ctx); | |
467 | } | |
468 | ||
469 | /** | |
470 | * v4l2_m2m_last_dst_buf() - return last destination buffer from the list of | |
471 | * ready buffers | |
472 | * | |
473 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx | |
474 | */ | |
8dd22b28 EG |
475 | static inline struct vb2_v4l2_buffer * |
476 | v4l2_m2m_last_dst_buf(struct v4l2_m2m_ctx *m2m_ctx) | |
ee1228cc HV |
477 | { |
478 | return v4l2_m2m_last_buf(&m2m_ctx->cap_q_ctx); | |
479 | } | |
480 | ||
d4987564 SV |
481 | /** |
482 | * v4l2_m2m_for_each_dst_buf() - iterate over a list of destination ready | |
483 | * buffers | |
484 | * | |
485 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx | |
486 | * @b: current buffer of type struct v4l2_m2m_buffer | |
487 | */ | |
488 | #define v4l2_m2m_for_each_dst_buf(m2m_ctx, b) \ | |
489 | list_for_each_entry(b, &m2m_ctx->cap_q_ctx.rdy_queue, list) | |
490 | ||
491 | /** | |
492 | * v4l2_m2m_for_each_src_buf() - iterate over a list of source ready buffers | |
493 | * | |
494 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx | |
495 | * @b: current buffer of type struct v4l2_m2m_buffer | |
496 | */ | |
497 | #define v4l2_m2m_for_each_src_buf(m2m_ctx, b) \ | |
498 | list_for_each_entry(b, &m2m_ctx->out_q_ctx.rdy_queue, list) | |
499 | ||
500 | /** | |
501 | * v4l2_m2m_for_each_dst_buf_safe() - iterate over a list of destination ready | |
502 | * buffers safely | |
503 | * | |
504 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx | |
505 | * @b: current buffer of type struct v4l2_m2m_buffer | |
506 | * @n: used as temporary storage | |
507 | */ | |
508 | #define v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, b, n) \ | |
509 | list_for_each_entry_safe(b, n, &m2m_ctx->cap_q_ctx.rdy_queue, list) | |
510 | ||
511 | /** | |
512 | * v4l2_m2m_for_each_src_buf_safe() - iterate over a list of source ready | |
513 | * buffers safely | |
514 | * | |
515 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx | |
516 | * @b: current buffer of type struct v4l2_m2m_buffer | |
517 | * @n: used as temporary storage | |
518 | */ | |
519 | #define v4l2_m2m_for_each_src_buf_safe(m2m_ctx, b, n) \ | |
520 | list_for_each_entry_safe(b, n, &m2m_ctx->out_q_ctx.rdy_queue, list) | |
521 | ||
7f98639d | 522 | /** |
908a0d7c | 523 | * v4l2_m2m_get_src_vq() - return vb2_queue for source buffers |
62c0d016 | 524 | * |
dcbd8735 | 525 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
7f98639d PO |
526 | */ |
527 | static inline | |
908a0d7c | 528 | struct vb2_queue *v4l2_m2m_get_src_vq(struct v4l2_m2m_ctx *m2m_ctx) |
7f98639d | 529 | { |
908a0d7c | 530 | return &m2m_ctx->out_q_ctx.q; |
7f98639d PO |
531 | } |
532 | ||
533 | /** | |
908a0d7c | 534 | * v4l2_m2m_get_dst_vq() - return vb2_queue for destination buffers |
62c0d016 | 535 | * |
dcbd8735 | 536 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
7f98639d PO |
537 | */ |
538 | static inline | |
908a0d7c | 539 | struct vb2_queue *v4l2_m2m_get_dst_vq(struct v4l2_m2m_ctx *m2m_ctx) |
7f98639d | 540 | { |
908a0d7c | 541 | return &m2m_ctx->cap_q_ctx.q; |
7f98639d PO |
542 | } |
543 | ||
4781646c MCC |
544 | /** |
545 | * v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and | |
546 | * return it | |
dcbd8735 MCC |
547 | * |
548 | * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx | |
4781646c | 549 | */ |
8dd22b28 | 550 | struct vb2_v4l2_buffer *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx); |
7f98639d PO |
551 | |
552 | /** | |
553 | * v4l2_m2m_src_buf_remove() - take off a source buffer from the list of ready | |
554 | * buffers and return it | |
62c0d016 | 555 | * |
dcbd8735 | 556 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
7f98639d | 557 | */ |
8dd22b28 EG |
558 | static inline struct vb2_v4l2_buffer * |
559 | v4l2_m2m_src_buf_remove(struct v4l2_m2m_ctx *m2m_ctx) | |
7f98639d | 560 | { |
908a0d7c | 561 | return v4l2_m2m_buf_remove(&m2m_ctx->out_q_ctx); |
7f98639d PO |
562 | } |
563 | ||
564 | /** | |
565 | * v4l2_m2m_dst_buf_remove() - take off a destination buffer from the list of | |
566 | * ready buffers and return it | |
62c0d016 | 567 | * |
dcbd8735 | 568 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
7f98639d | 569 | */ |
8dd22b28 EG |
570 | static inline struct vb2_v4l2_buffer * |
571 | v4l2_m2m_dst_buf_remove(struct v4l2_m2m_ctx *m2m_ctx) | |
7f98639d | 572 | { |
908a0d7c | 573 | return v4l2_m2m_buf_remove(&m2m_ctx->cap_q_ctx); |
7f98639d PO |
574 | } |
575 | ||
d4987564 SV |
576 | /** |
577 | * v4l2_m2m_buf_remove_by_buf() - take off exact buffer from the list of ready | |
578 | * buffers | |
579 | * | |
580 | * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx | |
581 | * @vbuf: the buffer to be removed | |
582 | */ | |
583 | void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx *q_ctx, | |
584 | struct vb2_v4l2_buffer *vbuf); | |
585 | ||
586 | /** | |
587 | * v4l2_m2m_src_buf_remove_by_buf() - take off exact source buffer from the list | |
588 | * of ready buffers | |
589 | * | |
590 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx | |
591 | * @vbuf: the buffer to be removed | |
592 | */ | |
593 | static inline void v4l2_m2m_src_buf_remove_by_buf(struct v4l2_m2m_ctx *m2m_ctx, | |
594 | struct vb2_v4l2_buffer *vbuf) | |
595 | { | |
596 | v4l2_m2m_buf_remove_by_buf(&m2m_ctx->out_q_ctx, vbuf); | |
597 | } | |
598 | ||
599 | /** | |
600 | * v4l2_m2m_dst_buf_remove_by_buf() - take off exact destination buffer from the | |
601 | * list of ready buffers | |
602 | * | |
603 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx | |
604 | * @vbuf: the buffer to be removed | |
605 | */ | |
606 | static inline void v4l2_m2m_dst_buf_remove_by_buf(struct v4l2_m2m_ctx *m2m_ctx, | |
607 | struct vb2_v4l2_buffer *vbuf) | |
608 | { | |
609 | v4l2_m2m_buf_remove_by_buf(&m2m_ctx->cap_q_ctx, vbuf); | |
610 | } | |
611 | ||
612 | struct vb2_v4l2_buffer * | |
613 | v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx *q_ctx, unsigned int idx); | |
614 | ||
615 | static inline struct vb2_v4l2_buffer * | |
616 | v4l2_m2m_src_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx) | |
617 | { | |
618 | return v4l2_m2m_buf_remove_by_idx(&m2m_ctx->out_q_ctx, idx); | |
619 | } | |
620 | ||
621 | static inline struct vb2_v4l2_buffer * | |
622 | v4l2_m2m_dst_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx) | |
623 | { | |
624 | return v4l2_m2m_buf_remove_by_idx(&m2m_ctx->cap_q_ctx, idx); | |
625 | } | |
626 | ||
e2d8ffe2 | 627 | /** |
a4d3d612 EG |
628 | * v4l2_m2m_buf_copy_metadata() - copy buffer metadata from |
629 | * the output buffer to the capture buffer | |
e2d8ffe2 | 630 | * |
a4d3d612 EG |
631 | * @out_vb: the output buffer that is the source of the metadata. |
632 | * @cap_vb: the capture buffer that will receive the metadata. | |
e2d8ffe2 HV |
633 | * @copy_frame_flags: copy the KEY/B/PFRAME flags as well. |
634 | * | |
635 | * This helper function copies the timestamp, timecode (if the TIMECODE | |
636 | * buffer flag was set), field and the TIMECODE, KEYFRAME, BFRAME, PFRAME | |
637 | * and TSTAMP_SRC_MASK flags from @out_vb to @cap_vb. | |
638 | * | |
639 | * If @copy_frame_flags is false, then the KEYFRAME, BFRAME and PFRAME | |
640 | * flags are not copied. This is typically needed for encoders that | |
641 | * set this bits explicitly. | |
642 | */ | |
a4d3d612 EG |
643 | void v4l2_m2m_buf_copy_metadata(const struct vb2_v4l2_buffer *out_vb, |
644 | struct vb2_v4l2_buffer *cap_vb, | |
645 | bool copy_frame_flags); | |
e2d8ffe2 | 646 | |
803a7ab7 HV |
647 | /* v4l2 request helper */ |
648 | ||
ef86eaf9 | 649 | void v4l2_m2m_request_queue(struct media_request *req); |
803a7ab7 | 650 | |
8e6e8f93 SN |
651 | /* v4l2 ioctl helpers */ |
652 | ||
653 | int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv, | |
654 | struct v4l2_requestbuffers *rb); | |
655 | int v4l2_m2m_ioctl_create_bufs(struct file *file, void *fh, | |
656 | struct v4l2_create_buffers *create); | |
657 | int v4l2_m2m_ioctl_querybuf(struct file *file, void *fh, | |
658 | struct v4l2_buffer *buf); | |
659 | int v4l2_m2m_ioctl_expbuf(struct file *file, void *fh, | |
660 | struct v4l2_exportbuffer *eb); | |
661 | int v4l2_m2m_ioctl_qbuf(struct file *file, void *fh, | |
662 | struct v4l2_buffer *buf); | |
663 | int v4l2_m2m_ioctl_dqbuf(struct file *file, void *fh, | |
664 | struct v4l2_buffer *buf); | |
e68cf471 HV |
665 | int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *fh, |
666 | struct v4l2_buffer *buf); | |
8e6e8f93 SN |
667 | int v4l2_m2m_ioctl_streamon(struct file *file, void *fh, |
668 | enum v4l2_buf_type type); | |
669 | int v4l2_m2m_ioctl_streamoff(struct file *file, void *fh, | |
670 | enum v4l2_buf_type type); | |
671 | int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma); | |
c23e0cb8 | 672 | __poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait); |
8e6e8f93 | 673 | |
7f98639d PO |
674 | #endif /* _MEDIA_V4L2_MEM2MEM_H */ |
675 |