Commit | Line | Data |
---|---|---|
10905d70 HV |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Media device request objects | |
4 | * | |
5 | * Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved. | |
6 | * Copyright (C) 2018 Intel Corporation | |
7 | * Copyright (C) 2018 Google, Inc. | |
8 | * | |
9 | * Author: Hans Verkuil <hans.verkuil@cisco.com> | |
10 | * Author: Sakari Ailus <sakari.ailus@linux.intel.com> | |
11 | */ | |
12 | ||
13 | #include <linux/anon_inodes.h> | |
14 | #include <linux/file.h> | |
15 | #include <linux/refcount.h> | |
16 | ||
17 | #include <media/media-device.h> | |
18 | #include <media/media-request.h> | |
19 | ||
20 | static const char * const request_state[] = { | |
21 | [MEDIA_REQUEST_STATE_IDLE] = "idle", | |
22 | [MEDIA_REQUEST_STATE_VALIDATING] = "validating", | |
23 | [MEDIA_REQUEST_STATE_QUEUED] = "queued", | |
24 | [MEDIA_REQUEST_STATE_COMPLETE] = "complete", | |
25 | [MEDIA_REQUEST_STATE_CLEANING] = "cleaning", | |
26 | [MEDIA_REQUEST_STATE_UPDATING] = "updating", | |
27 | }; | |
28 | ||
29 | static const char * | |
30 | media_request_state_str(enum media_request_state state) | |
31 | { | |
32 | BUILD_BUG_ON(ARRAY_SIZE(request_state) != NR_OF_MEDIA_REQUEST_STATE); | |
33 | ||
34 | if (WARN_ON(state >= ARRAY_SIZE(request_state))) | |
35 | return "invalid"; | |
36 | return request_state[state]; | |
37 | } | |
38 | ||
39 | static void media_request_clean(struct media_request *req) | |
40 | { | |
41 | struct media_request_object *obj, *obj_safe; | |
42 | ||
43 | /* Just a sanity check. No other code path is allowed to change this. */ | |
44 | WARN_ON(req->state != MEDIA_REQUEST_STATE_CLEANING); | |
45 | WARN_ON(req->updating_count); | |
6736f4e9 | 46 | WARN_ON(req->access_count); |
10905d70 HV |
47 | |
48 | list_for_each_entry_safe(obj, obj_safe, &req->objects, list) { | |
49 | media_request_object_unbind(obj); | |
50 | media_request_object_put(obj); | |
51 | } | |
52 | ||
53 | req->updating_count = 0; | |
6736f4e9 | 54 | req->access_count = 0; |
10905d70 HV |
55 | WARN_ON(req->num_incomplete_objects); |
56 | req->num_incomplete_objects = 0; | |
57 | wake_up_interruptible_all(&req->poll_wait); | |
58 | } | |
59 | ||
60 | static void media_request_release(struct kref *kref) | |
61 | { | |
62 | struct media_request *req = | |
63 | container_of(kref, struct media_request, kref); | |
64 | struct media_device *mdev = req->mdev; | |
65 | ||
66 | dev_dbg(mdev->dev, "request: release %s\n", req->debug_str); | |
67 | ||
68 | /* No other users, no need for a spinlock */ | |
69 | req->state = MEDIA_REQUEST_STATE_CLEANING; | |
70 | ||
71 | media_request_clean(req); | |
72 | ||
73 | if (mdev->ops->req_free) | |
74 | mdev->ops->req_free(req); | |
75 | else | |
76 | kfree(req); | |
77 | } | |
78 | ||
79 | void media_request_put(struct media_request *req) | |
80 | { | |
81 | kref_put(&req->kref, media_request_release); | |
82 | } | |
83 | EXPORT_SYMBOL_GPL(media_request_put); | |
84 | ||
85 | static int media_request_close(struct inode *inode, struct file *filp) | |
86 | { | |
87 | struct media_request *req = filp->private_data; | |
88 | ||
89 | media_request_put(req); | |
90 | return 0; | |
91 | } | |
92 | ||
93 | static __poll_t media_request_poll(struct file *filp, | |
94 | struct poll_table_struct *wait) | |
95 | { | |
96 | struct media_request *req = filp->private_data; | |
97 | unsigned long flags; | |
98 | __poll_t ret = 0; | |
99 | ||
100 | if (!(poll_requested_events(wait) & EPOLLPRI)) | |
101 | return 0; | |
102 | ||
103 | spin_lock_irqsave(&req->lock, flags); | |
104 | if (req->state == MEDIA_REQUEST_STATE_COMPLETE) { | |
105 | ret = EPOLLPRI; | |
106 | goto unlock; | |
107 | } | |
108 | if (req->state != MEDIA_REQUEST_STATE_QUEUED) { | |
109 | ret = EPOLLERR; | |
110 | goto unlock; | |
111 | } | |
112 | ||
113 | poll_wait(filp, &req->poll_wait, wait); | |
114 | ||
115 | unlock: | |
116 | spin_unlock_irqrestore(&req->lock, flags); | |
117 | return ret; | |
118 | } | |
119 | ||
120 | static long media_request_ioctl_queue(struct media_request *req) | |
121 | { | |
122 | struct media_device *mdev = req->mdev; | |
123 | enum media_request_state state; | |
124 | unsigned long flags; | |
125 | int ret; | |
126 | ||
127 | dev_dbg(mdev->dev, "request: queue %s\n", req->debug_str); | |
128 | ||
129 | /* | |
130 | * Ensure the request that is validated will be the one that gets queued | |
131 | * next by serialising the queueing process. This mutex is also used | |
132 | * to serialize with canceling a vb2 queue and with setting values such | |
133 | * as controls in a request. | |
134 | */ | |
135 | mutex_lock(&mdev->req_queue_mutex); | |
136 | ||
137 | media_request_get(req); | |
138 | ||
139 | spin_lock_irqsave(&req->lock, flags); | |
140 | if (req->state == MEDIA_REQUEST_STATE_IDLE) | |
141 | req->state = MEDIA_REQUEST_STATE_VALIDATING; | |
142 | state = req->state; | |
143 | spin_unlock_irqrestore(&req->lock, flags); | |
144 | if (state != MEDIA_REQUEST_STATE_VALIDATING) { | |
145 | dev_dbg(mdev->dev, | |
146 | "request: unable to queue %s, request in state %s\n", | |
147 | req->debug_str, media_request_state_str(state)); | |
148 | media_request_put(req); | |
149 | mutex_unlock(&mdev->req_queue_mutex); | |
150 | return -EBUSY; | |
151 | } | |
152 | ||
153 | ret = mdev->ops->req_validate(req); | |
154 | ||
155 | /* | |
156 | * If the req_validate was successful, then we mark the state as QUEUED | |
157 | * and call req_queue. The reason we set the state first is that this | |
158 | * allows req_queue to unbind or complete the queued objects in case | |
159 | * they are immediately 'consumed'. State changes from QUEUED to another | |
160 | * state can only happen if either the driver changes the state or if | |
161 | * the user cancels the vb2 queue. The driver can only change the state | |
162 | * after each object is queued through the req_queue op (and note that | |
163 | * that op cannot fail), so setting the state to QUEUED up front is | |
164 | * safe. | |
165 | * | |
166 | * The other reason for changing the state is if the vb2 queue is | |
167 | * canceled, and that uses the req_queue_mutex which is still locked | |
168 | * while req_queue is called, so that's safe as well. | |
169 | */ | |
170 | spin_lock_irqsave(&req->lock, flags); | |
171 | req->state = ret ? MEDIA_REQUEST_STATE_IDLE | |
172 | : MEDIA_REQUEST_STATE_QUEUED; | |
173 | spin_unlock_irqrestore(&req->lock, flags); | |
174 | ||
175 | if (!ret) | |
176 | mdev->ops->req_queue(req); | |
177 | ||
178 | mutex_unlock(&mdev->req_queue_mutex); | |
179 | ||
180 | if (ret) { | |
181 | dev_dbg(mdev->dev, "request: can't queue %s (%d)\n", | |
182 | req->debug_str, ret); | |
183 | media_request_put(req); | |
184 | } | |
185 | ||
186 | return ret; | |
187 | } | |
188 | ||
189 | static long media_request_ioctl_reinit(struct media_request *req) | |
190 | { | |
191 | struct media_device *mdev = req->mdev; | |
192 | unsigned long flags; | |
193 | ||
194 | spin_lock_irqsave(&req->lock, flags); | |
195 | if (req->state != MEDIA_REQUEST_STATE_IDLE && | |
196 | req->state != MEDIA_REQUEST_STATE_COMPLETE) { | |
197 | dev_dbg(mdev->dev, | |
198 | "request: %s not in idle or complete state, cannot reinit\n", | |
199 | req->debug_str); | |
200 | spin_unlock_irqrestore(&req->lock, flags); | |
201 | return -EBUSY; | |
202 | } | |
6736f4e9 HV |
203 | if (req->access_count) { |
204 | dev_dbg(mdev->dev, | |
205 | "request: %s is being accessed, cannot reinit\n", | |
206 | req->debug_str); | |
207 | spin_unlock_irqrestore(&req->lock, flags); | |
208 | return -EBUSY; | |
209 | } | |
10905d70 HV |
210 | req->state = MEDIA_REQUEST_STATE_CLEANING; |
211 | spin_unlock_irqrestore(&req->lock, flags); | |
212 | ||
213 | media_request_clean(req); | |
214 | ||
215 | spin_lock_irqsave(&req->lock, flags); | |
216 | req->state = MEDIA_REQUEST_STATE_IDLE; | |
217 | spin_unlock_irqrestore(&req->lock, flags); | |
218 | ||
219 | return 0; | |
220 | } | |
221 | ||
222 | static long media_request_ioctl(struct file *filp, unsigned int cmd, | |
223 | unsigned long arg) | |
224 | { | |
225 | struct media_request *req = filp->private_data; | |
226 | ||
227 | switch (cmd) { | |
228 | case MEDIA_REQUEST_IOC_QUEUE: | |
229 | return media_request_ioctl_queue(req); | |
230 | case MEDIA_REQUEST_IOC_REINIT: | |
231 | return media_request_ioctl_reinit(req); | |
232 | default: | |
233 | return -ENOIOCTLCMD; | |
234 | } | |
235 | } | |
236 | ||
237 | static const struct file_operations request_fops = { | |
238 | .owner = THIS_MODULE, | |
239 | .poll = media_request_poll, | |
240 | .unlocked_ioctl = media_request_ioctl, | |
241 | .release = media_request_close, | |
242 | }; | |
243 | ||
244 | struct media_request * | |
245 | media_request_get_by_fd(struct media_device *mdev, int request_fd) | |
246 | { | |
247 | struct file *filp; | |
248 | struct media_request *req; | |
249 | ||
250 | if (!mdev || !mdev->ops || | |
251 | !mdev->ops->req_validate || !mdev->ops->req_queue) | |
15cd442e | 252 | return ERR_PTR(-EACCES); |
10905d70 HV |
253 | |
254 | filp = fget(request_fd); | |
255 | if (!filp) | |
34b41472 | 256 | goto err_no_req_fd; |
10905d70 HV |
257 | |
258 | if (filp->f_op != &request_fops) | |
259 | goto err_fput; | |
260 | req = filp->private_data; | |
261 | if (req->mdev != mdev) | |
262 | goto err_fput; | |
263 | ||
264 | /* | |
265 | * Note: as long as someone has an open filehandle of the request, | |
266 | * the request can never be released. The fget() above ensures that | |
267 | * even if userspace closes the request filehandle, the release() | |
268 | * fop won't be called, so the media_request_get() always succeeds | |
269 | * and there is no race condition where the request was released | |
270 | * before media_request_get() is called. | |
271 | */ | |
272 | media_request_get(req); | |
273 | fput(filp); | |
274 | ||
275 | return req; | |
276 | ||
277 | err_fput: | |
278 | fput(filp); | |
279 | ||
34b41472 HV |
280 | err_no_req_fd: |
281 | dev_dbg(mdev->dev, "cannot find request_fd %d\n", request_fd); | |
282 | return ERR_PTR(-EINVAL); | |
10905d70 HV |
283 | } |
284 | EXPORT_SYMBOL_GPL(media_request_get_by_fd); | |
285 | ||
286 | int media_request_alloc(struct media_device *mdev, int *alloc_fd) | |
287 | { | |
288 | struct media_request *req; | |
289 | struct file *filp; | |
290 | int fd; | |
291 | int ret; | |
292 | ||
293 | /* Either both are NULL or both are non-NULL */ | |
294 | if (WARN_ON(!mdev->ops->req_alloc ^ !mdev->ops->req_free)) | |
295 | return -ENOMEM; | |
296 | ||
297 | fd = get_unused_fd_flags(O_CLOEXEC); | |
298 | if (fd < 0) | |
299 | return fd; | |
300 | ||
301 | filp = anon_inode_getfile("request", &request_fops, NULL, O_CLOEXEC); | |
302 | if (IS_ERR(filp)) { | |
303 | ret = PTR_ERR(filp); | |
304 | goto err_put_fd; | |
305 | } | |
306 | ||
307 | if (mdev->ops->req_alloc) | |
308 | req = mdev->ops->req_alloc(mdev); | |
309 | else | |
310 | req = kzalloc(sizeof(*req), GFP_KERNEL); | |
311 | if (!req) { | |
312 | ret = -ENOMEM; | |
313 | goto err_fput; | |
314 | } | |
315 | ||
316 | filp->private_data = req; | |
317 | req->mdev = mdev; | |
318 | req->state = MEDIA_REQUEST_STATE_IDLE; | |
319 | req->num_incomplete_objects = 0; | |
320 | kref_init(&req->kref); | |
321 | INIT_LIST_HEAD(&req->objects); | |
322 | spin_lock_init(&req->lock); | |
323 | init_waitqueue_head(&req->poll_wait); | |
324 | req->updating_count = 0; | |
6736f4e9 | 325 | req->access_count = 0; |
10905d70 HV |
326 | |
327 | *alloc_fd = fd; | |
328 | ||
329 | snprintf(req->debug_str, sizeof(req->debug_str), "%u:%d", | |
330 | atomic_inc_return(&mdev->request_id), fd); | |
331 | dev_dbg(mdev->dev, "request: allocated %s\n", req->debug_str); | |
332 | ||
333 | fd_install(fd, filp); | |
334 | ||
335 | return 0; | |
336 | ||
337 | err_fput: | |
338 | fput(filp); | |
339 | ||
340 | err_put_fd: | |
341 | put_unused_fd(fd); | |
342 | ||
343 | return ret; | |
344 | } | |
345 | ||
346 | static void media_request_object_release(struct kref *kref) | |
347 | { | |
348 | struct media_request_object *obj = | |
349 | container_of(kref, struct media_request_object, kref); | |
350 | struct media_request *req = obj->req; | |
351 | ||
352 | if (WARN_ON(req)) | |
353 | media_request_object_unbind(obj); | |
354 | obj->ops->release(obj); | |
355 | } | |
356 | ||
0ca0e844 HV |
357 | struct media_request_object * |
358 | media_request_object_find(struct media_request *req, | |
359 | const struct media_request_object_ops *ops, | |
360 | void *priv) | |
361 | { | |
362 | struct media_request_object *obj; | |
363 | struct media_request_object *found = NULL; | |
364 | unsigned long flags; | |
365 | ||
366 | if (WARN_ON(!ops || !priv)) | |
367 | return NULL; | |
368 | ||
369 | spin_lock_irqsave(&req->lock, flags); | |
370 | list_for_each_entry(obj, &req->objects, list) { | |
371 | if (obj->ops == ops && obj->priv == priv) { | |
372 | media_request_object_get(obj); | |
373 | found = obj; | |
374 | break; | |
375 | } | |
376 | } | |
377 | spin_unlock_irqrestore(&req->lock, flags); | |
378 | return found; | |
379 | } | |
380 | EXPORT_SYMBOL_GPL(media_request_object_find); | |
381 | ||
10905d70 HV |
382 | void media_request_object_put(struct media_request_object *obj) |
383 | { | |
384 | kref_put(&obj->kref, media_request_object_release); | |
385 | } | |
386 | EXPORT_SYMBOL_GPL(media_request_object_put); | |
387 | ||
388 | void media_request_object_init(struct media_request_object *obj) | |
389 | { | |
390 | obj->ops = NULL; | |
391 | obj->req = NULL; | |
392 | obj->priv = NULL; | |
393 | obj->completed = false; | |
394 | INIT_LIST_HEAD(&obj->list); | |
395 | kref_init(&obj->kref); | |
396 | } | |
397 | EXPORT_SYMBOL_GPL(media_request_object_init); | |
398 | ||
399 | int media_request_object_bind(struct media_request *req, | |
400 | const struct media_request_object_ops *ops, | |
401 | void *priv, bool is_buffer, | |
402 | struct media_request_object *obj) | |
403 | { | |
404 | unsigned long flags; | |
405 | int ret = -EBUSY; | |
406 | ||
407 | if (WARN_ON(!ops->release)) | |
15cd442e | 408 | return -EACCES; |
10905d70 HV |
409 | |
410 | spin_lock_irqsave(&req->lock, flags); | |
411 | ||
412 | if (WARN_ON(req->state != MEDIA_REQUEST_STATE_UPDATING)) | |
413 | goto unlock; | |
414 | ||
415 | obj->req = req; | |
416 | obj->ops = ops; | |
417 | obj->priv = priv; | |
418 | ||
419 | if (is_buffer) | |
420 | list_add_tail(&obj->list, &req->objects); | |
421 | else | |
422 | list_add(&obj->list, &req->objects); | |
423 | req->num_incomplete_objects++; | |
424 | ret = 0; | |
425 | ||
426 | unlock: | |
427 | spin_unlock_irqrestore(&req->lock, flags); | |
428 | return ret; | |
429 | } | |
430 | EXPORT_SYMBOL_GPL(media_request_object_bind); | |
431 | ||
432 | void media_request_object_unbind(struct media_request_object *obj) | |
433 | { | |
434 | struct media_request *req = obj->req; | |
435 | unsigned long flags; | |
436 | bool completed = false; | |
437 | ||
438 | if (WARN_ON(!req)) | |
439 | return; | |
440 | ||
441 | spin_lock_irqsave(&req->lock, flags); | |
442 | list_del(&obj->list); | |
443 | obj->req = NULL; | |
444 | ||
445 | if (req->state == MEDIA_REQUEST_STATE_COMPLETE) | |
446 | goto unlock; | |
447 | ||
448 | if (WARN_ON(req->state == MEDIA_REQUEST_STATE_VALIDATING)) | |
449 | goto unlock; | |
450 | ||
451 | if (req->state == MEDIA_REQUEST_STATE_CLEANING) { | |
452 | if (!obj->completed) | |
453 | req->num_incomplete_objects--; | |
454 | goto unlock; | |
455 | } | |
456 | ||
457 | if (WARN_ON(!req->num_incomplete_objects)) | |
458 | goto unlock; | |
459 | ||
460 | req->num_incomplete_objects--; | |
461 | if (req->state == MEDIA_REQUEST_STATE_QUEUED && | |
462 | !req->num_incomplete_objects) { | |
463 | req->state = MEDIA_REQUEST_STATE_COMPLETE; | |
464 | completed = true; | |
465 | wake_up_interruptible_all(&req->poll_wait); | |
466 | } | |
467 | ||
468 | unlock: | |
469 | spin_unlock_irqrestore(&req->lock, flags); | |
470 | if (obj->ops->unbind) | |
471 | obj->ops->unbind(obj); | |
472 | if (completed) | |
473 | media_request_put(req); | |
474 | } | |
475 | EXPORT_SYMBOL_GPL(media_request_object_unbind); | |
476 | ||
477 | void media_request_object_complete(struct media_request_object *obj) | |
478 | { | |
479 | struct media_request *req = obj->req; | |
480 | unsigned long flags; | |
481 | bool completed = false; | |
482 | ||
483 | spin_lock_irqsave(&req->lock, flags); | |
484 | if (obj->completed) | |
485 | goto unlock; | |
486 | obj->completed = true; | |
487 | if (WARN_ON(!req->num_incomplete_objects) || | |
488 | WARN_ON(req->state != MEDIA_REQUEST_STATE_QUEUED)) | |
489 | goto unlock; | |
490 | ||
491 | if (!--req->num_incomplete_objects) { | |
492 | req->state = MEDIA_REQUEST_STATE_COMPLETE; | |
493 | wake_up_interruptible_all(&req->poll_wait); | |
494 | completed = true; | |
495 | } | |
496 | unlock: | |
497 | spin_unlock_irqrestore(&req->lock, flags); | |
498 | if (completed) | |
499 | media_request_put(req); | |
500 | } | |
501 | EXPORT_SYMBOL_GPL(media_request_object_complete); |