Commit | Line | Data |
---|---|---|
10905d70 HV |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Media device request objects | |
4 | * | |
5 | * Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved. | |
6 | * Copyright (C) 2018 Intel Corporation | |
7 | * Copyright (C) 2018 Google, Inc. | |
8 | * | |
9 | * Author: Hans Verkuil <hans.verkuil@cisco.com> | |
10 | * Author: Sakari Ailus <sakari.ailus@linux.intel.com> | |
11 | */ | |
12 | ||
13 | #include <linux/anon_inodes.h> | |
14 | #include <linux/file.h> | |
15 | #include <linux/refcount.h> | |
16 | ||
17 | #include <media/media-device.h> | |
18 | #include <media/media-request.h> | |
19 | ||
20 | static const char * const request_state[] = { | |
21 | [MEDIA_REQUEST_STATE_IDLE] = "idle", | |
22 | [MEDIA_REQUEST_STATE_VALIDATING] = "validating", | |
23 | [MEDIA_REQUEST_STATE_QUEUED] = "queued", | |
24 | [MEDIA_REQUEST_STATE_COMPLETE] = "complete", | |
25 | [MEDIA_REQUEST_STATE_CLEANING] = "cleaning", | |
26 | [MEDIA_REQUEST_STATE_UPDATING] = "updating", | |
27 | }; | |
28 | ||
29 | static const char * | |
30 | media_request_state_str(enum media_request_state state) | |
31 | { | |
32 | BUILD_BUG_ON(ARRAY_SIZE(request_state) != NR_OF_MEDIA_REQUEST_STATE); | |
33 | ||
34 | if (WARN_ON(state >= ARRAY_SIZE(request_state))) | |
35 | return "invalid"; | |
36 | return request_state[state]; | |
37 | } | |
38 | ||
39 | static void media_request_clean(struct media_request *req) | |
40 | { | |
41 | struct media_request_object *obj, *obj_safe; | |
42 | ||
43 | /* Just a sanity check. No other code path is allowed to change this. */ | |
44 | WARN_ON(req->state != MEDIA_REQUEST_STATE_CLEANING); | |
45 | WARN_ON(req->updating_count); | |
6736f4e9 | 46 | WARN_ON(req->access_count); |
10905d70 HV |
47 | |
48 | list_for_each_entry_safe(obj, obj_safe, &req->objects, list) { | |
49 | media_request_object_unbind(obj); | |
50 | media_request_object_put(obj); | |
51 | } | |
52 | ||
53 | req->updating_count = 0; | |
6736f4e9 | 54 | req->access_count = 0; |
10905d70 HV |
55 | WARN_ON(req->num_incomplete_objects); |
56 | req->num_incomplete_objects = 0; | |
57 | wake_up_interruptible_all(&req->poll_wait); | |
58 | } | |
59 | ||
60 | static void media_request_release(struct kref *kref) | |
61 | { | |
62 | struct media_request *req = | |
63 | container_of(kref, struct media_request, kref); | |
64 | struct media_device *mdev = req->mdev; | |
65 | ||
66 | dev_dbg(mdev->dev, "request: release %s\n", req->debug_str); | |
67 | ||
68 | /* No other users, no need for a spinlock */ | |
69 | req->state = MEDIA_REQUEST_STATE_CLEANING; | |
70 | ||
71 | media_request_clean(req); | |
72 | ||
73 | if (mdev->ops->req_free) | |
74 | mdev->ops->req_free(req); | |
75 | else | |
76 | kfree(req); | |
77 | } | |
78 | ||
79 | void media_request_put(struct media_request *req) | |
80 | { | |
81 | kref_put(&req->kref, media_request_release); | |
82 | } | |
83 | EXPORT_SYMBOL_GPL(media_request_put); | |
84 | ||
85 | static int media_request_close(struct inode *inode, struct file *filp) | |
86 | { | |
87 | struct media_request *req = filp->private_data; | |
88 | ||
89 | media_request_put(req); | |
90 | return 0; | |
91 | } | |
92 | ||
93 | static __poll_t media_request_poll(struct file *filp, | |
94 | struct poll_table_struct *wait) | |
95 | { | |
96 | struct media_request *req = filp->private_data; | |
97 | unsigned long flags; | |
98 | __poll_t ret = 0; | |
99 | ||
100 | if (!(poll_requested_events(wait) & EPOLLPRI)) | |
101 | return 0; | |
102 | ||
f4dd471b | 103 | poll_wait(filp, &req->poll_wait, wait); |
10905d70 HV |
104 | spin_lock_irqsave(&req->lock, flags); |
105 | if (req->state == MEDIA_REQUEST_STATE_COMPLETE) { | |
106 | ret = EPOLLPRI; | |
107 | goto unlock; | |
108 | } | |
109 | if (req->state != MEDIA_REQUEST_STATE_QUEUED) { | |
110 | ret = EPOLLERR; | |
111 | goto unlock; | |
112 | } | |
113 | ||
10905d70 HV |
114 | unlock: |
115 | spin_unlock_irqrestore(&req->lock, flags); | |
116 | return ret; | |
117 | } | |
118 | ||
119 | static long media_request_ioctl_queue(struct media_request *req) | |
120 | { | |
121 | struct media_device *mdev = req->mdev; | |
122 | enum media_request_state state; | |
123 | unsigned long flags; | |
124 | int ret; | |
125 | ||
126 | dev_dbg(mdev->dev, "request: queue %s\n", req->debug_str); | |
127 | ||
128 | /* | |
129 | * Ensure the request that is validated will be the one that gets queued | |
130 | * next by serialising the queueing process. This mutex is also used | |
131 | * to serialize with canceling a vb2 queue and with setting values such | |
132 | * as controls in a request. | |
133 | */ | |
134 | mutex_lock(&mdev->req_queue_mutex); | |
135 | ||
136 | media_request_get(req); | |
137 | ||
138 | spin_lock_irqsave(&req->lock, flags); | |
139 | if (req->state == MEDIA_REQUEST_STATE_IDLE) | |
140 | req->state = MEDIA_REQUEST_STATE_VALIDATING; | |
141 | state = req->state; | |
142 | spin_unlock_irqrestore(&req->lock, flags); | |
143 | if (state != MEDIA_REQUEST_STATE_VALIDATING) { | |
144 | dev_dbg(mdev->dev, | |
145 | "request: unable to queue %s, request in state %s\n", | |
146 | req->debug_str, media_request_state_str(state)); | |
147 | media_request_put(req); | |
148 | mutex_unlock(&mdev->req_queue_mutex); | |
149 | return -EBUSY; | |
150 | } | |
151 | ||
152 | ret = mdev->ops->req_validate(req); | |
153 | ||
154 | /* | |
155 | * If the req_validate was successful, then we mark the state as QUEUED | |
156 | * and call req_queue. The reason we set the state first is that this | |
157 | * allows req_queue to unbind or complete the queued objects in case | |
158 | * they are immediately 'consumed'. State changes from QUEUED to another | |
159 | * state can only happen if either the driver changes the state or if | |
160 | * the user cancels the vb2 queue. The driver can only change the state | |
161 | * after each object is queued through the req_queue op (and note that | |
162 | * that op cannot fail), so setting the state to QUEUED up front is | |
163 | * safe. | |
164 | * | |
165 | * The other reason for changing the state is if the vb2 queue is | |
166 | * canceled, and that uses the req_queue_mutex which is still locked | |
167 | * while req_queue is called, so that's safe as well. | |
168 | */ | |
169 | spin_lock_irqsave(&req->lock, flags); | |
170 | req->state = ret ? MEDIA_REQUEST_STATE_IDLE | |
171 | : MEDIA_REQUEST_STATE_QUEUED; | |
172 | spin_unlock_irqrestore(&req->lock, flags); | |
173 | ||
174 | if (!ret) | |
175 | mdev->ops->req_queue(req); | |
176 | ||
177 | mutex_unlock(&mdev->req_queue_mutex); | |
178 | ||
179 | if (ret) { | |
180 | dev_dbg(mdev->dev, "request: can't queue %s (%d)\n", | |
181 | req->debug_str, ret); | |
182 | media_request_put(req); | |
183 | } | |
184 | ||
185 | return ret; | |
186 | } | |
187 | ||
188 | static long media_request_ioctl_reinit(struct media_request *req) | |
189 | { | |
190 | struct media_device *mdev = req->mdev; | |
191 | unsigned long flags; | |
192 | ||
193 | spin_lock_irqsave(&req->lock, flags); | |
194 | if (req->state != MEDIA_REQUEST_STATE_IDLE && | |
195 | req->state != MEDIA_REQUEST_STATE_COMPLETE) { | |
196 | dev_dbg(mdev->dev, | |
197 | "request: %s not in idle or complete state, cannot reinit\n", | |
198 | req->debug_str); | |
199 | spin_unlock_irqrestore(&req->lock, flags); | |
200 | return -EBUSY; | |
201 | } | |
6736f4e9 HV |
202 | if (req->access_count) { |
203 | dev_dbg(mdev->dev, | |
204 | "request: %s is being accessed, cannot reinit\n", | |
205 | req->debug_str); | |
206 | spin_unlock_irqrestore(&req->lock, flags); | |
207 | return -EBUSY; | |
208 | } | |
10905d70 HV |
209 | req->state = MEDIA_REQUEST_STATE_CLEANING; |
210 | spin_unlock_irqrestore(&req->lock, flags); | |
211 | ||
212 | media_request_clean(req); | |
213 | ||
214 | spin_lock_irqsave(&req->lock, flags); | |
215 | req->state = MEDIA_REQUEST_STATE_IDLE; | |
216 | spin_unlock_irqrestore(&req->lock, flags); | |
217 | ||
218 | return 0; | |
219 | } | |
220 | ||
221 | static long media_request_ioctl(struct file *filp, unsigned int cmd, | |
222 | unsigned long arg) | |
223 | { | |
224 | struct media_request *req = filp->private_data; | |
225 | ||
226 | switch (cmd) { | |
227 | case MEDIA_REQUEST_IOC_QUEUE: | |
228 | return media_request_ioctl_queue(req); | |
229 | case MEDIA_REQUEST_IOC_REINIT: | |
230 | return media_request_ioctl_reinit(req); | |
231 | default: | |
232 | return -ENOIOCTLCMD; | |
233 | } | |
234 | } | |
235 | ||
236 | static const struct file_operations request_fops = { | |
237 | .owner = THIS_MODULE, | |
238 | .poll = media_request_poll, | |
239 | .unlocked_ioctl = media_request_ioctl, | |
f44e361e JS |
240 | #ifdef CONFIG_COMPAT |
241 | .compat_ioctl = media_request_ioctl, | |
242 | #endif /* CONFIG_COMPAT */ | |
10905d70 HV |
243 | .release = media_request_close, |
244 | }; | |
245 | ||
246 | struct media_request * | |
247 | media_request_get_by_fd(struct media_device *mdev, int request_fd) | |
248 | { | |
3b85d302 | 249 | struct fd f; |
10905d70 HV |
250 | struct media_request *req; |
251 | ||
252 | if (!mdev || !mdev->ops || | |
253 | !mdev->ops->req_validate || !mdev->ops->req_queue) | |
e79c7159 | 254 | return ERR_PTR(-EBADR); |
10905d70 | 255 | |
3b85d302 AV |
256 | f = fdget(request_fd); |
257 | if (!f.file) | |
34b41472 | 258 | goto err_no_req_fd; |
10905d70 | 259 | |
3b85d302 | 260 | if (f.file->f_op != &request_fops) |
10905d70 | 261 | goto err_fput; |
3b85d302 | 262 | req = f.file->private_data; |
10905d70 HV |
263 | if (req->mdev != mdev) |
264 | goto err_fput; | |
265 | ||
266 | /* | |
267 | * Note: as long as someone has an open filehandle of the request, | |
3b85d302 | 268 | * the request can never be released. The fdget() above ensures that |
10905d70 HV |
269 | * even if userspace closes the request filehandle, the release() |
270 | * fop won't be called, so the media_request_get() always succeeds | |
271 | * and there is no race condition where the request was released | |
272 | * before media_request_get() is called. | |
273 | */ | |
274 | media_request_get(req); | |
3b85d302 | 275 | fdput(f); |
10905d70 HV |
276 | |
277 | return req; | |
278 | ||
279 | err_fput: | |
3b85d302 | 280 | fdput(f); |
10905d70 | 281 | |
34b41472 HV |
282 | err_no_req_fd: |
283 | dev_dbg(mdev->dev, "cannot find request_fd %d\n", request_fd); | |
284 | return ERR_PTR(-EINVAL); | |
10905d70 HV |
285 | } |
286 | EXPORT_SYMBOL_GPL(media_request_get_by_fd); | |
287 | ||
288 | int media_request_alloc(struct media_device *mdev, int *alloc_fd) | |
289 | { | |
290 | struct media_request *req; | |
291 | struct file *filp; | |
292 | int fd; | |
293 | int ret; | |
294 | ||
295 | /* Either both are NULL or both are non-NULL */ | |
296 | if (WARN_ON(!mdev->ops->req_alloc ^ !mdev->ops->req_free)) | |
297 | return -ENOMEM; | |
298 | ||
299 | fd = get_unused_fd_flags(O_CLOEXEC); | |
300 | if (fd < 0) | |
301 | return fd; | |
302 | ||
303 | filp = anon_inode_getfile("request", &request_fops, NULL, O_CLOEXEC); | |
304 | if (IS_ERR(filp)) { | |
305 | ret = PTR_ERR(filp); | |
306 | goto err_put_fd; | |
307 | } | |
308 | ||
309 | if (mdev->ops->req_alloc) | |
310 | req = mdev->ops->req_alloc(mdev); | |
311 | else | |
312 | req = kzalloc(sizeof(*req), GFP_KERNEL); | |
313 | if (!req) { | |
314 | ret = -ENOMEM; | |
315 | goto err_fput; | |
316 | } | |
317 | ||
318 | filp->private_data = req; | |
319 | req->mdev = mdev; | |
320 | req->state = MEDIA_REQUEST_STATE_IDLE; | |
321 | req->num_incomplete_objects = 0; | |
322 | kref_init(&req->kref); | |
323 | INIT_LIST_HEAD(&req->objects); | |
324 | spin_lock_init(&req->lock); | |
325 | init_waitqueue_head(&req->poll_wait); | |
326 | req->updating_count = 0; | |
6736f4e9 | 327 | req->access_count = 0; |
10905d70 HV |
328 | |
329 | *alloc_fd = fd; | |
330 | ||
331 | snprintf(req->debug_str, sizeof(req->debug_str), "%u:%d", | |
332 | atomic_inc_return(&mdev->request_id), fd); | |
333 | dev_dbg(mdev->dev, "request: allocated %s\n", req->debug_str); | |
334 | ||
335 | fd_install(fd, filp); | |
336 | ||
337 | return 0; | |
338 | ||
339 | err_fput: | |
340 | fput(filp); | |
341 | ||
342 | err_put_fd: | |
343 | put_unused_fd(fd); | |
344 | ||
345 | return ret; | |
346 | } | |
347 | ||
348 | static void media_request_object_release(struct kref *kref) | |
349 | { | |
350 | struct media_request_object *obj = | |
351 | container_of(kref, struct media_request_object, kref); | |
352 | struct media_request *req = obj->req; | |
353 | ||
354 | if (WARN_ON(req)) | |
355 | media_request_object_unbind(obj); | |
356 | obj->ops->release(obj); | |
357 | } | |
358 | ||
0ca0e844 HV |
359 | struct media_request_object * |
360 | media_request_object_find(struct media_request *req, | |
361 | const struct media_request_object_ops *ops, | |
362 | void *priv) | |
363 | { | |
364 | struct media_request_object *obj; | |
365 | struct media_request_object *found = NULL; | |
366 | unsigned long flags; | |
367 | ||
368 | if (WARN_ON(!ops || !priv)) | |
369 | return NULL; | |
370 | ||
371 | spin_lock_irqsave(&req->lock, flags); | |
372 | list_for_each_entry(obj, &req->objects, list) { | |
373 | if (obj->ops == ops && obj->priv == priv) { | |
374 | media_request_object_get(obj); | |
375 | found = obj; | |
376 | break; | |
377 | } | |
378 | } | |
379 | spin_unlock_irqrestore(&req->lock, flags); | |
380 | return found; | |
381 | } | |
382 | EXPORT_SYMBOL_GPL(media_request_object_find); | |
383 | ||
10905d70 HV |
384 | void media_request_object_put(struct media_request_object *obj) |
385 | { | |
386 | kref_put(&obj->kref, media_request_object_release); | |
387 | } | |
388 | EXPORT_SYMBOL_GPL(media_request_object_put); | |
389 | ||
390 | void media_request_object_init(struct media_request_object *obj) | |
391 | { | |
392 | obj->ops = NULL; | |
393 | obj->req = NULL; | |
394 | obj->priv = NULL; | |
395 | obj->completed = false; | |
396 | INIT_LIST_HEAD(&obj->list); | |
397 | kref_init(&obj->kref); | |
398 | } | |
399 | EXPORT_SYMBOL_GPL(media_request_object_init); | |
400 | ||
401 | int media_request_object_bind(struct media_request *req, | |
402 | const struct media_request_object_ops *ops, | |
403 | void *priv, bool is_buffer, | |
404 | struct media_request_object *obj) | |
405 | { | |
406 | unsigned long flags; | |
407 | int ret = -EBUSY; | |
408 | ||
409 | if (WARN_ON(!ops->release)) | |
e79c7159 | 410 | return -EBADR; |
10905d70 HV |
411 | |
412 | spin_lock_irqsave(&req->lock, flags); | |
413 | ||
414 | if (WARN_ON(req->state != MEDIA_REQUEST_STATE_UPDATING)) | |
415 | goto unlock; | |
416 | ||
417 | obj->req = req; | |
418 | obj->ops = ops; | |
419 | obj->priv = priv; | |
420 | ||
421 | if (is_buffer) | |
422 | list_add_tail(&obj->list, &req->objects); | |
423 | else | |
424 | list_add(&obj->list, &req->objects); | |
425 | req->num_incomplete_objects++; | |
426 | ret = 0; | |
427 | ||
428 | unlock: | |
429 | spin_unlock_irqrestore(&req->lock, flags); | |
430 | return ret; | |
431 | } | |
432 | EXPORT_SYMBOL_GPL(media_request_object_bind); | |
433 | ||
434 | void media_request_object_unbind(struct media_request_object *obj) | |
435 | { | |
436 | struct media_request *req = obj->req; | |
437 | unsigned long flags; | |
438 | bool completed = false; | |
439 | ||
440 | if (WARN_ON(!req)) | |
441 | return; | |
442 | ||
443 | spin_lock_irqsave(&req->lock, flags); | |
444 | list_del(&obj->list); | |
445 | obj->req = NULL; | |
446 | ||
447 | if (req->state == MEDIA_REQUEST_STATE_COMPLETE) | |
448 | goto unlock; | |
449 | ||
450 | if (WARN_ON(req->state == MEDIA_REQUEST_STATE_VALIDATING)) | |
451 | goto unlock; | |
452 | ||
453 | if (req->state == MEDIA_REQUEST_STATE_CLEANING) { | |
454 | if (!obj->completed) | |
455 | req->num_incomplete_objects--; | |
456 | goto unlock; | |
457 | } | |
458 | ||
459 | if (WARN_ON(!req->num_incomplete_objects)) | |
460 | goto unlock; | |
461 | ||
462 | req->num_incomplete_objects--; | |
463 | if (req->state == MEDIA_REQUEST_STATE_QUEUED && | |
464 | !req->num_incomplete_objects) { | |
465 | req->state = MEDIA_REQUEST_STATE_COMPLETE; | |
466 | completed = true; | |
467 | wake_up_interruptible_all(&req->poll_wait); | |
468 | } | |
469 | ||
470 | unlock: | |
471 | spin_unlock_irqrestore(&req->lock, flags); | |
472 | if (obj->ops->unbind) | |
473 | obj->ops->unbind(obj); | |
474 | if (completed) | |
475 | media_request_put(req); | |
476 | } | |
477 | EXPORT_SYMBOL_GPL(media_request_object_unbind); | |
478 | ||
479 | void media_request_object_complete(struct media_request_object *obj) | |
480 | { | |
481 | struct media_request *req = obj->req; | |
482 | unsigned long flags; | |
483 | bool completed = false; | |
484 | ||
485 | spin_lock_irqsave(&req->lock, flags); | |
486 | if (obj->completed) | |
487 | goto unlock; | |
488 | obj->completed = true; | |
489 | if (WARN_ON(!req->num_incomplete_objects) || | |
490 | WARN_ON(req->state != MEDIA_REQUEST_STATE_QUEUED)) | |
491 | goto unlock; | |
492 | ||
493 | if (!--req->num_incomplete_objects) { | |
494 | req->state = MEDIA_REQUEST_STATE_COMPLETE; | |
495 | wake_up_interruptible_all(&req->poll_wait); | |
496 | completed = true; | |
497 | } | |
498 | unlock: | |
499 | spin_unlock_irqrestore(&req->lock, flags); | |
500 | if (completed) | |
501 | media_request_put(req); | |
502 | } | |
503 | EXPORT_SYMBOL_GPL(media_request_object_complete); |