Merge tag 'io_uring-6.16-20250630' of git://git.kernel.dk/linux
[linux-block.git] / drivers / media / v4l2-core / v4l2-subdev.c
CommitLineData
1802d0be 1// SPDX-License-Identifier: GPL-2.0-only
2096a5dc 2/*
3dd5ee08 3 * V4L2 sub-device
2096a5dc 4 *
3dd5ee08 5 * Copyright (C) 2010 Nokia Corporation
2096a5dc 6 *
3dd5ee08
LP
7 * Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
8 * Sakari Ailus <sakari.ailus@iki.fi>
2096a5dc
LP
9 */
10
c2a7f7a4 11#include <linux/export.h>
2096a5dc 12#include <linux/ioctl.h>
b6e10ff6 13#include <linux/leds.h>
758d90e1 14#include <linux/mm.h>
218bf10e 15#include <linux/module.h>
17bb9bf8 16#include <linux/overflow.h>
02adb1cc 17#include <linux/slab.h>
4c812e33 18#include <linux/string.h>
02adb1cc 19#include <linux/types.h>
6446ec6c 20#include <linux/version.h>
c2a7f7a4 21#include <linux/videodev2.h>
2096a5dc 22
ea8aa434 23#include <media/v4l2-ctrls.h>
2096a5dc 24#include <media/v4l2-device.h>
02adb1cc 25#include <media/v4l2-event.h>
c2a7f7a4
TV
26#include <media/v4l2-fh.h>
27#include <media/v4l2-ioctl.h>
2096a5dc 28
8a546445
TV
29#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
30/*
31 * The Streams API is an experimental feature. To use the Streams API, set
32 * 'v4l2_subdev_enable_streams_api' to 1 below.
33 */
34
35static bool v4l2_subdev_enable_streams_api;
36#endif
37
a418bb3f
LP
38/*
39 * Maximum stream ID is 63 for now, as we use u64 bitmask to represent a set
40 * of streams.
41 *
42 * Note that V4L2_FRAME_DESC_ENTRY_MAX is related: V4L2_FRAME_DESC_ENTRY_MAX
43 * restricts the total number of streams in a pad, although the stream ID is
44 * not restricted.
45 */
46#define V4L2_SUBDEV_MAX_STREAM_ID 63
2096a5dc 47
b6e10ff6
HG
48#include "v4l2-subdev-priv.h"
49
fb15db8c 50#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
7cd5a16b
SV
51static int subdev_fh_init(struct v4l2_subdev_fh *fh, struct v4l2_subdev *sd)
52{
0d346d2a 53 struct v4l2_subdev_state *state;
ed647ea6 54 static struct lock_class_key key;
0d346d2a 55
ed647ea6 56 state = __v4l2_subdev_state_alloc(sd, "fh->state->lock", &key);
0d346d2a
TV
57 if (IS_ERR(state))
58 return PTR_ERR(state);
59
60 fh->state = state;
fb15db8c 61
7cd5a16b
SV
62 return 0;
63}
64
65static void subdev_fh_free(struct v4l2_subdev_fh *fh)
66{
40aaab9d 67 __v4l2_subdev_state_free(fh->state);
0d346d2a 68 fh->state = NULL;
7cd5a16b
SV
69}
70
2096a5dc
LP
71static int subdev_open(struct file *file)
72{
02adb1cc
SA
73 struct video_device *vdev = video_devdata(file);
74 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
7cd5a16b 75 struct v4l2_subdev_fh *subdev_fh;
02adb1cc
SA
76 int ret;
77
7cd5a16b
SV
78 subdev_fh = kzalloc(sizeof(*subdev_fh), GFP_KERNEL);
79 if (subdev_fh == NULL)
80 return -ENOMEM;
02adb1cc 81
7cd5a16b
SV
82 ret = subdev_fh_init(subdev_fh, sd);
83 if (ret) {
84 kfree(subdev_fh);
85 return ret;
86 }
87
523f46d6 88 v4l2_fh_init(&subdev_fh->vfh, vdev);
7cd5a16b
SV
89 v4l2_fh_add(&subdev_fh->vfh);
90 file->private_data = &subdev_fh->vfh;
e550c370 91
218bf10e
HV
92 if (sd->v4l2_dev->mdev && sd->entity.graph_obj.mdev->dev) {
93 struct module *owner;
94
95 owner = sd->entity.graph_obj.mdev->dev->driver->owner;
96 if (!try_module_get(owner)) {
61f5db54
LP
97 ret = -EBUSY;
98 goto err;
99 }
218bf10e 100 subdev_fh->owner = owner;
61f5db54 101 }
7cd5a16b 102
f0beea8f
LP
103 if (sd->internal_ops && sd->internal_ops->open) {
104 ret = sd->internal_ops->open(sd, subdev_fh);
105 if (ret < 0)
106 goto err;
107 }
108
2096a5dc 109 return 0;
02adb1cc
SA
110
111err:
218bf10e 112 module_put(subdev_fh->owner);
7cd5a16b
SV
113 v4l2_fh_del(&subdev_fh->vfh);
114 v4l2_fh_exit(&subdev_fh->vfh);
115 subdev_fh_free(subdev_fh);
116 kfree(subdev_fh);
02adb1cc
SA
117
118 return ret;
2096a5dc
LP
119}
120
121static int subdev_close(struct file *file)
122{
61f5db54
LP
123 struct video_device *vdev = video_devdata(file);
124 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
02adb1cc 125 struct v4l2_fh *vfh = file->private_data;
7cd5a16b 126 struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
02adb1cc 127
f0beea8f
LP
128 if (sd->internal_ops && sd->internal_ops->close)
129 sd->internal_ops->close(sd, subdev_fh);
218bf10e 130 module_put(subdev_fh->owner);
7cd5a16b
SV
131 v4l2_fh_del(vfh);
132 v4l2_fh_exit(vfh);
133 subdev_fh_free(subdev_fh);
134 kfree(subdev_fh);
135 file->private_data = NULL;
02adb1cc 136
2096a5dc
LP
137 return 0;
138}
fb15db8c
JM
139#else /* CONFIG_VIDEO_V4L2_SUBDEV_API */
140static int subdev_open(struct file *file)
141{
142 return -ENODEV;
143}
144
145static int subdev_close(struct file *file)
146{
147 return -ENODEV;
148}
149#endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
2096a5dc 150
4e628f95
TV
151static void v4l2_subdev_enable_privacy_led(struct v4l2_subdev *sd)
152{
153#if IS_REACHABLE(CONFIG_LEDS_CLASS)
154 if (!IS_ERR_OR_NULL(sd->privacy_led))
155 led_set_brightness(sd->privacy_led,
156 sd->privacy_led->max_brightness);
157#endif
158}
159
160static void v4l2_subdev_disable_privacy_led(struct v4l2_subdev *sd)
161{
162#if IS_REACHABLE(CONFIG_LEDS_CLASS)
163 if (!IS_ERR_OR_NULL(sd->privacy_led))
164 led_set_brightness(sd->privacy_led, 0);
165#endif
166}
167
3cbd3d99 168static inline int check_which(u32 which)
b225e398 169{
a8fa5507
JK
170 if (which != V4L2_SUBDEV_FORMAT_TRY &&
171 which != V4L2_SUBDEV_FORMAT_ACTIVE)
b225e398
SA
172 return -EINVAL;
173
174 return 0;
175}
176
3cbd3d99 177static inline int check_pad(struct v4l2_subdev *sd, u32 pad)
b225e398 178{
a8fa5507 179#if defined(CONFIG_MEDIA_CONTROLLER)
6bda7073 180 if (sd->entity.num_pads) {
a8fa5507
JK
181 if (pad >= sd->entity.num_pads)
182 return -EINVAL;
183 return 0;
184 }
185#endif
186 /* allow pad 0 on subdevices not registered as media entities */
187 if (pad > 0)
b225e398 188 return -EINVAL;
a8fa5507
JK
189 return 0;
190}
b225e398 191
2f91e10e
TV
192static int check_state(struct v4l2_subdev *sd, struct v4l2_subdev_state *state,
193 u32 which, u32 pad, u32 stream)
374d62e7 194{
2f91e10e
TV
195 if (sd->flags & V4L2_SUBDEV_FL_STREAMS) {
196#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
d0fde6aa 197 if (!v4l2_subdev_state_get_format(state, pad, stream))
2f91e10e
TV
198 return -EINVAL;
199 return 0;
200#else
201 return -EINVAL;
202#endif
203 }
204
205 if (stream != 0)
206 return -EINVAL;
207
0d346d2a 208 if (which == V4L2_SUBDEV_FORMAT_TRY && (!state || !state->pads))
374d62e7
JK
209 return -EINVAL;
210
211 return 0;
212}
213
a8fa5507 214static inline int check_format(struct v4l2_subdev *sd,
0d346d2a 215 struct v4l2_subdev_state *state,
a8fa5507
JK
216 struct v4l2_subdev_format *format)
217{
a4f4a763
JK
218 if (!format)
219 return -EINVAL;
220
374d62e7 221 return check_which(format->which) ? : check_pad(sd, format->pad) ? :
2f91e10e 222 check_state(sd, state, format->which, format->pad, format->stream);
a8fa5507 223}
b225e398 224
a8fa5507 225static int call_get_fmt(struct v4l2_subdev *sd,
0d346d2a 226 struct v4l2_subdev_state *state,
a8fa5507
JK
227 struct v4l2_subdev_format *format)
228{
0d346d2a
TV
229 return check_format(sd, state, format) ? :
230 sd->ops->pad->get_fmt(sd, state, format);
b225e398
SA
231}
232
a8fa5507 233static int call_set_fmt(struct v4l2_subdev *sd,
0d346d2a 234 struct v4l2_subdev_state *state,
a8fa5507 235 struct v4l2_subdev_format *format)
b225e398 236{
0d346d2a
TV
237 return check_format(sd, state, format) ? :
238 sd->ops->pad->set_fmt(sd, state, format);
a8fa5507 239}
b225e398 240
a8fa5507 241static int call_enum_mbus_code(struct v4l2_subdev *sd,
0d346d2a 242 struct v4l2_subdev_state *state,
a8fa5507
JK
243 struct v4l2_subdev_mbus_code_enum *code)
244{
a4f4a763
JK
245 if (!code)
246 return -EINVAL;
247
a8fa5507 248 return check_which(code->which) ? : check_pad(sd, code->pad) ? :
2f91e10e 249 check_state(sd, state, code->which, code->pad, code->stream) ? :
0d346d2a 250 sd->ops->pad->enum_mbus_code(sd, state, code);
a8fa5507 251}
b225e398 252
a8fa5507 253static int call_enum_frame_size(struct v4l2_subdev *sd,
0d346d2a 254 struct v4l2_subdev_state *state,
a8fa5507
JK
255 struct v4l2_subdev_frame_size_enum *fse)
256{
a4f4a763
JK
257 if (!fse)
258 return -EINVAL;
259
a8fa5507 260 return check_which(fse->which) ? : check_pad(sd, fse->pad) ? :
2f91e10e 261 check_state(sd, state, fse->which, fse->pad, fse->stream) ? :
0d346d2a 262 sd->ops->pad->enum_frame_size(sd, state, fse);
b225e398
SA
263}
264
a8fa5507 265static int call_enum_frame_interval(struct v4l2_subdev *sd,
0d346d2a 266 struct v4l2_subdev_state *state,
a8fa5507
JK
267 struct v4l2_subdev_frame_interval_enum *fie)
268{
a4f4a763
JK
269 if (!fie)
270 return -EINVAL;
271
a8fa5507 272 return check_which(fie->which) ? : check_pad(sd, fie->pad) ? :
2f91e10e 273 check_state(sd, state, fie->which, fie->pad, fie->stream) ? :
0d346d2a 274 sd->ops->pad->enum_frame_interval(sd, state, fie);
a8fa5507
JK
275}
276
277static inline int check_selection(struct v4l2_subdev *sd,
0d346d2a 278 struct v4l2_subdev_state *state,
a8fa5507
JK
279 struct v4l2_subdev_selection *sel)
280{
a4f4a763
JK
281 if (!sel)
282 return -EINVAL;
283
374d62e7 284 return check_which(sel->which) ? : check_pad(sd, sel->pad) ? :
2f91e10e 285 check_state(sd, state, sel->which, sel->pad, sel->stream);
a8fa5507
JK
286}
287
288static int call_get_selection(struct v4l2_subdev *sd,
0d346d2a 289 struct v4l2_subdev_state *state,
a8fa5507
JK
290 struct v4l2_subdev_selection *sel)
291{
0d346d2a
TV
292 return check_selection(sd, state, sel) ? :
293 sd->ops->pad->get_selection(sd, state, sel);
a8fa5507
JK
294}
295
296static int call_set_selection(struct v4l2_subdev *sd,
0d346d2a 297 struct v4l2_subdev_state *state,
a8fa5507
JK
298 struct v4l2_subdev_selection *sel)
299{
0d346d2a
TV
300 return check_selection(sd, state, sel) ? :
301 sd->ops->pad->set_selection(sd, state, sel);
a8fa5507 302}
b225e398 303
287fe160
LP
304static inline int check_frame_interval(struct v4l2_subdev *sd,
305 struct v4l2_subdev_state *state,
306 struct v4l2_subdev_frame_interval *fi)
307{
308 if (!fi)
309 return -EINVAL;
310
805d4311
LP
311 return check_which(fi->which) ? : check_pad(sd, fi->pad) ? :
312 check_state(sd, state, fi->which, fi->pad, fi->stream);
287fe160
LP
313}
314
315static int call_get_frame_interval(struct v4l2_subdev *sd,
316 struct v4l2_subdev_state *state,
317 struct v4l2_subdev_frame_interval *fi)
318{
319 return check_frame_interval(sd, state, fi) ? :
320 sd->ops->pad->get_frame_interval(sd, state, fi);
321}
322
323static int call_set_frame_interval(struct v4l2_subdev *sd,
324 struct v4l2_subdev_state *state,
325 struct v4l2_subdev_frame_interval *fi)
326{
327 return check_frame_interval(sd, state, fi) ? :
328 sd->ops->pad->set_frame_interval(sd, state, fi);
329}
330
76c0b99d
SA
331static int call_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
332 struct v4l2_mbus_frame_desc *fd)
333{
4c812e33
SA
334 unsigned int i;
335 int ret;
336
afdb1f1f
LP
337#if defined(CONFIG_MEDIA_CONTROLLER)
338 if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE))
339 return -EOPNOTSUPP;
340#endif
341
76c0b99d
SA
342 memset(fd, 0, sizeof(*fd));
343
4c812e33
SA
344 ret = sd->ops->pad->get_frame_desc(sd, pad, fd);
345 if (ret)
346 return ret;
347
348 dev_dbg(sd->dev, "Frame descriptor on pad %u, type %s\n", pad,
349 fd->type == V4L2_MBUS_FRAME_DESC_TYPE_PARALLEL ? "parallel" :
350 fd->type == V4L2_MBUS_FRAME_DESC_TYPE_CSI2 ? "CSI-2" :
351 "unknown");
352
353 for (i = 0; i < fd->num_entries; i++) {
354 struct v4l2_mbus_frame_desc_entry *entry = &fd->entry[i];
355 char buf[20] = "";
356
357 if (fd->type == V4L2_MBUS_FRAME_DESC_TYPE_CSI2)
358 WARN_ON(snprintf(buf, sizeof(buf),
359 ", vc %u, dt 0x%02x",
360 entry->bus.csi2.vc,
361 entry->bus.csi2.dt) >= sizeof(buf));
362
363 dev_dbg(sd->dev,
364 "\tstream %u, code 0x%04x, length %u, flags 0x%04x%s\n",
365 entry->stream, entry->pixelcode, entry->length,
366 entry->flags, buf);
367 }
368
369 return 0;
76c0b99d
SA
370}
371
a8fa5507
JK
372static inline int check_edid(struct v4l2_subdev *sd,
373 struct v4l2_subdev_edid *edid)
374{
a4f4a763
JK
375 if (!edid)
376 return -EINVAL;
377
b225e398
SA
378 if (edid->blocks && edid->edid == NULL)
379 return -EINVAL;
380
a8fa5507 381 return check_pad(sd, edid->pad);
b225e398 382}
a8fa5507
JK
383
384static int call_get_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid)
385{
386 return check_edid(sd, edid) ? : sd->ops->pad->get_edid(sd, edid);
387}
388
389static int call_set_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid)
390{
391 return check_edid(sd, edid) ? : sd->ops->pad->set_edid(sd, edid);
392}
393
009e1256
PA
394static int call_s_dv_timings(struct v4l2_subdev *sd, unsigned int pad,
395 struct v4l2_dv_timings *timings)
396{
397 if (!timings)
398 return -EINVAL;
399
400 return check_pad(sd, pad) ? :
401 sd->ops->pad->s_dv_timings(sd, pad, timings);
402}
403
404static int call_g_dv_timings(struct v4l2_subdev *sd, unsigned int pad,
405 struct v4l2_dv_timings *timings)
406{
407 if (!timings)
408 return -EINVAL;
409
410 return check_pad(sd, pad) ? :
411 sd->ops->pad->g_dv_timings(sd, pad, timings);
412}
413
414static int call_query_dv_timings(struct v4l2_subdev *sd, unsigned int pad,
415 struct v4l2_dv_timings *timings)
416{
417 if (!timings)
418 return -EINVAL;
419
420 return check_pad(sd, pad) ? :
421 sd->ops->pad->query_dv_timings(sd, pad, timings);
422}
423
a8fa5507
JK
424static int call_dv_timings_cap(struct v4l2_subdev *sd,
425 struct v4l2_dv_timings_cap *cap)
426{
a4f4a763
JK
427 if (!cap)
428 return -EINVAL;
429
a8fa5507
JK
430 return check_pad(sd, cap->pad) ? :
431 sd->ops->pad->dv_timings_cap(sd, cap);
432}
433
434static int call_enum_dv_timings(struct v4l2_subdev *sd,
435 struct v4l2_enum_dv_timings *dvt)
436{
a4f4a763
JK
437 if (!dvt)
438 return -EINVAL;
439
a8fa5507
JK
440 return check_pad(sd, dvt->pad) ? :
441 sd->ops->pad->enum_dv_timings(sd, dvt);
442}
443
38df0b85
JM
444static int call_get_mbus_config(struct v4l2_subdev *sd, unsigned int pad,
445 struct v4l2_mbus_config *config)
446{
91d6a99a
SA
447 memset(config, 0, sizeof(*config));
448
38df0b85
JM
449 return check_pad(sd, pad) ? :
450 sd->ops->pad->get_mbus_config(sd, pad, config);
451}
452
379c2586
SA
453static int call_s_stream(struct v4l2_subdev *sd, int enable)
454{
455 int ret;
456
009905ec
LP
457 /*
458 * The .s_stream() operation must never be called to start or stop an
459 * already started or stopped subdev. Catch offenders but don't return
460 * an error yet to avoid regressions.
009905ec 461 */
1d780428 462 if (WARN_ON(sd->s_stream_enabled == !!enable))
009905ec
LP
463 return 0;
464
379c2586
SA
465 ret = sd->ops->video->s_stream(sd, enable);
466
467 if (!enable && ret < 0) {
468 dev_warn(sd->dev, "disabling streaming failed (%d)\n", ret);
009905ec 469 ret = 0;
379c2586
SA
470 }
471
f2bf6cd8 472 if (!ret) {
1d780428 473 sd->s_stream_enabled = enable;
009905ec 474
4e628f95
TV
475 if (enable)
476 v4l2_subdev_enable_privacy_led(sd);
477 else
478 v4l2_subdev_disable_privacy_led(sd);
f2bf6cd8
SA
479 }
480
379c2586
SA
481 return ret;
482}
483
b2ac2387
TV
484#ifdef CONFIG_MEDIA_CONTROLLER
485/*
486 * Create state-management wrapper for pad ops dealing with subdev state. The
487 * wrapper handles the case where the caller does not provide the called
488 * subdev's state. This should be removed when all the callers are fixed.
489 */
490#define DEFINE_STATE_WRAPPER(f, arg_type) \
491 static int call_##f##_state(struct v4l2_subdev *sd, \
492 struct v4l2_subdev_state *_state, \
493 arg_type *arg) \
494 { \
495 struct v4l2_subdev_state *state = _state; \
496 int ret; \
497 if (!_state) \
498 state = v4l2_subdev_lock_and_get_active_state(sd); \
499 ret = call_##f(sd, state, arg); \
500 if (!_state && state) \
501 v4l2_subdev_unlock_state(state); \
502 return ret; \
503 }
504
505#else /* CONFIG_MEDIA_CONTROLLER */
506
507#define DEFINE_STATE_WRAPPER(f, arg_type) \
508 static int call_##f##_state(struct v4l2_subdev *sd, \
509 struct v4l2_subdev_state *state, \
510 arg_type *arg) \
511 { \
512 return call_##f(sd, state, arg); \
513 }
514
515#endif /* CONFIG_MEDIA_CONTROLLER */
516
517DEFINE_STATE_WRAPPER(get_fmt, struct v4l2_subdev_format);
518DEFINE_STATE_WRAPPER(set_fmt, struct v4l2_subdev_format);
519DEFINE_STATE_WRAPPER(enum_mbus_code, struct v4l2_subdev_mbus_code_enum);
520DEFINE_STATE_WRAPPER(enum_frame_size, struct v4l2_subdev_frame_size_enum);
521DEFINE_STATE_WRAPPER(enum_frame_interval, struct v4l2_subdev_frame_interval_enum);
522DEFINE_STATE_WRAPPER(get_selection, struct v4l2_subdev_selection);
523DEFINE_STATE_WRAPPER(set_selection, struct v4l2_subdev_selection);
524
a8fa5507 525static const struct v4l2_subdev_pad_ops v4l2_subdev_call_pad_wrappers = {
b2ac2387
TV
526 .get_fmt = call_get_fmt_state,
527 .set_fmt = call_set_fmt_state,
528 .enum_mbus_code = call_enum_mbus_code_state,
529 .enum_frame_size = call_enum_frame_size_state,
530 .enum_frame_interval = call_enum_frame_interval_state,
531 .get_selection = call_get_selection_state,
532 .set_selection = call_set_selection_state,
287fe160
LP
533 .get_frame_interval = call_get_frame_interval,
534 .set_frame_interval = call_set_frame_interval,
a8fa5507
JK
535 .get_edid = call_get_edid,
536 .set_edid = call_set_edid,
009e1256
PA
537 .s_dv_timings = call_s_dv_timings,
538 .g_dv_timings = call_g_dv_timings,
539 .query_dv_timings = call_query_dv_timings,
a8fa5507
JK
540 .dv_timings_cap = call_dv_timings_cap,
541 .enum_dv_timings = call_enum_dv_timings,
76c0b99d 542 .get_frame_desc = call_get_frame_desc,
38df0b85 543 .get_mbus_config = call_get_mbus_config,
a8fa5507
JK
544};
545
546static const struct v4l2_subdev_video_ops v4l2_subdev_call_video_wrappers = {
379c2586 547 .s_stream = call_s_stream,
a8fa5507
JK
548};
549
550const struct v4l2_subdev_ops v4l2_subdev_call_wrappers = {
551 .pad = &v4l2_subdev_call_pad_wrappers,
552 .video = &v4l2_subdev_call_video_wrappers,
553};
554EXPORT_SYMBOL(v4l2_subdev_call_wrappers);
b225e398 555
fb15db8c 556#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
3cc7a4bb
TV
557
558static struct v4l2_subdev_state *
559subdev_ioctl_get_state(struct v4l2_subdev *sd, struct v4l2_subdev_fh *subdev_fh,
560 unsigned int cmd, void *arg)
561{
562 u32 which;
563
564 switch (cmd) {
565 default:
566 return NULL;
567 case VIDIOC_SUBDEV_G_FMT:
568 case VIDIOC_SUBDEV_S_FMT:
569 which = ((struct v4l2_subdev_format *)arg)->which;
570 break;
571 case VIDIOC_SUBDEV_G_CROP:
572 case VIDIOC_SUBDEV_S_CROP:
573 which = ((struct v4l2_subdev_crop *)arg)->which;
574 break;
575 case VIDIOC_SUBDEV_ENUM_MBUS_CODE:
576 which = ((struct v4l2_subdev_mbus_code_enum *)arg)->which;
577 break;
578 case VIDIOC_SUBDEV_ENUM_FRAME_SIZE:
579 which = ((struct v4l2_subdev_frame_size_enum *)arg)->which;
580 break;
581 case VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL:
582 which = ((struct v4l2_subdev_frame_interval_enum *)arg)->which;
583 break;
584 case VIDIOC_SUBDEV_G_SELECTION:
585 case VIDIOC_SUBDEV_S_SELECTION:
586 which = ((struct v4l2_subdev_selection *)arg)->which;
587 break;
287fe160 588 case VIDIOC_SUBDEV_G_FRAME_INTERVAL:
805d4311
LP
589 case VIDIOC_SUBDEV_S_FRAME_INTERVAL: {
590 struct v4l2_subdev_frame_interval *fi = arg;
591
592 if (!(subdev_fh->client_caps &
593 V4L2_SUBDEV_CLIENT_CAP_INTERVAL_USES_WHICH))
594 fi->which = V4L2_SUBDEV_FORMAT_ACTIVE;
595
596 which = fi->which;
287fe160 597 break;
805d4311 598 }
a418bb3f
LP
599 case VIDIOC_SUBDEV_G_ROUTING:
600 case VIDIOC_SUBDEV_S_ROUTING:
601 which = ((struct v4l2_subdev_routing *)arg)->which;
602 break;
3cc7a4bb
TV
603 }
604
605 return which == V4L2_SUBDEV_FORMAT_TRY ?
606 subdev_fh->state :
ed647ea6 607 v4l2_subdev_get_unlocked_active_state(sd);
3cc7a4bb
TV
608}
609
ed647ea6
TV
610static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg,
611 struct v4l2_subdev_state *state)
2096a5dc 612{
ea8aa434
LP
613 struct video_device *vdev = video_devdata(file);
614 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
7cd5a16b 615 struct v4l2_fh *vfh = file->private_data;
f57fa295 616 struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
f75c431e 617 bool ro_subdev = test_bit(V4L2_FL_SUBDEV_RO_DEVNODE, &vdev->flags);
9a6b5bf4 618 bool streams_subdev = sd->flags & V4L2_SUBDEV_FL_STREAMS;
f57fa295
TV
619 bool client_supports_streams = subdev_fh->client_caps &
620 V4L2_SUBDEV_CLIENT_CAP_STREAMS;
1a6c0b36 621 int rval;
ea8aa434 622
4800021c
HG
623 /*
624 * If the streams API is not enabled, remove V4L2_SUBDEV_CAP_STREAMS.
625 * Remove this when the API is no longer experimental.
626 */
627 if (!v4l2_subdev_enable_streams_api)
628 streams_subdev = false;
629
2096a5dc 630 switch (cmd) {
6446ec6c
HV
631 case VIDIOC_SUBDEV_QUERYCAP: {
632 struct v4l2_subdev_capability *cap = arg;
633
634 memset(cap->reserved, 0, sizeof(cap->reserved));
635 cap->version = LINUX_VERSION_CODE;
9a6b5bf4
TV
636 cap->capabilities =
637 (ro_subdev ? V4L2_SUBDEV_CAP_RO_SUBDEV : 0) |
638 (streams_subdev ? V4L2_SUBDEV_CAP_STREAMS : 0);
6446ec6c
HV
639
640 return 0;
641 }
642
ea8aa434 643 case VIDIOC_QUERYCTRL:
3103c7b4
HV
644 /*
645 * TODO: this really should be folded into v4l2_queryctrl (this
646 * currently returns -EINVAL for NULL control handlers).
647 * However, v4l2_queryctrl() is still called directly by
648 * drivers as well and until that has been addressed I believe
649 * it is safer to do the check here. The same is true for the
650 * other control ioctls below.
651 */
652 if (!vfh->ctrl_handler)
653 return -ENOTTY;
18d171ba 654 return v4l2_queryctrl(vfh->ctrl_handler, arg);
ea8aa434 655
e6bee368 656 case VIDIOC_QUERY_EXT_CTRL:
3103c7b4
HV
657 if (!vfh->ctrl_handler)
658 return -ENOTTY;
e6bee368
HV
659 return v4l2_query_ext_ctrl(vfh->ctrl_handler, arg);
660
ea8aa434 661 case VIDIOC_QUERYMENU:
3103c7b4
HV
662 if (!vfh->ctrl_handler)
663 return -ENOTTY;
18d171ba 664 return v4l2_querymenu(vfh->ctrl_handler, arg);
ea8aa434
LP
665
666 case VIDIOC_G_CTRL:
3103c7b4
HV
667 if (!vfh->ctrl_handler)
668 return -ENOTTY;
18d171ba 669 return v4l2_g_ctrl(vfh->ctrl_handler, arg);
ea8aa434
LP
670
671 case VIDIOC_S_CTRL:
3103c7b4
HV
672 if (!vfh->ctrl_handler)
673 return -ENOTTY;
ab892bac 674 return v4l2_s_ctrl(vfh, vfh->ctrl_handler, arg);
ea8aa434
LP
675
676 case VIDIOC_G_EXT_CTRLS:
3103c7b4
HV
677 if (!vfh->ctrl_handler)
678 return -ENOTTY;
c41e9cff 679 return v4l2_g_ext_ctrls(vfh->ctrl_handler,
173f6eac 680 vdev, sd->v4l2_dev->mdev, arg);
ea8aa434
LP
681
682 case VIDIOC_S_EXT_CTRLS:
3103c7b4
HV
683 if (!vfh->ctrl_handler)
684 return -ENOTTY;
c41e9cff 685 return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler,
173f6eac 686 vdev, sd->v4l2_dev->mdev, arg);
ea8aa434
LP
687
688 case VIDIOC_TRY_EXT_CTRLS:
3103c7b4
HV
689 if (!vfh->ctrl_handler)
690 return -ENOTTY;
c41e9cff 691 return v4l2_try_ext_ctrls(vfh->ctrl_handler,
173f6eac 692 vdev, sd->v4l2_dev->mdev, arg);
ea8aa434 693
02adb1cc
SA
694 case VIDIOC_DQEVENT:
695 if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
696 return -ENOIOCTLCMD;
697
7cd5a16b 698 return v4l2_event_dequeue(vfh, arg, file->f_flags & O_NONBLOCK);
02adb1cc
SA
699
700 case VIDIOC_SUBSCRIBE_EVENT:
e7724e23
TM
701 if (v4l2_subdev_has_op(sd, core, subscribe_event))
702 return v4l2_subdev_call(sd, core, subscribe_event,
703 vfh, arg);
704
705 if ((sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS) &&
706 vfh->ctrl_handler)
707 return v4l2_ctrl_subdev_subscribe_event(sd, vfh, arg);
708
709 return -ENOIOCTLCMD;
02adb1cc
SA
710
711 case VIDIOC_UNSUBSCRIBE_EVENT:
e7724e23
TM
712 if (v4l2_subdev_has_op(sd, core, unsubscribe_event))
713 return v4l2_subdev_call(sd, core, unsubscribe_event,
714 vfh, arg);
715
716 if (sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS)
717 return v4l2_event_subdev_unsubscribe(sd, vfh, arg);
718
719 return -ENOIOCTLCMD;
69967a71
MH
720
721#ifdef CONFIG_VIDEO_ADV_DEBUG
722 case VIDIOC_DBG_G_REGISTER:
723 {
724 struct v4l2_dbg_register *p = arg;
725
726 if (!capable(CAP_SYS_ADMIN))
727 return -EPERM;
728 return v4l2_subdev_call(sd, core, g_register, p);
729 }
730 case VIDIOC_DBG_S_REGISTER:
731 {
732 struct v4l2_dbg_register *p = arg;
733
734 if (!capable(CAP_SYS_ADMIN))
735 return -EPERM;
736 return v4l2_subdev_call(sd, core, s_register, p);
737 }
f437a7cb
HV
738 case VIDIOC_DBG_G_CHIP_INFO:
739 {
740 struct v4l2_dbg_chip_info *p = arg;
741
742 if (p->match.type != V4L2_CHIP_MATCH_SUBDEV || p->match.addr)
743 return -EINVAL;
744 if (sd->ops->core && sd->ops->core->s_register)
745 p->flags |= V4L2_CHIP_FL_WRITABLE;
746 if (sd->ops->core && sd->ops->core->g_register)
747 p->flags |= V4L2_CHIP_FL_READABLE;
c0decac1 748 strscpy(p->name, sd->name, sizeof(p->name));
f437a7cb
HV
749 return 0;
750 }
69967a71 751#endif
bcd158de 752
42194e72
HV
753 case VIDIOC_LOG_STATUS: {
754 int ret;
755
756 pr_info("%s: ================= START STATUS =================\n",
757 sd->name);
758 ret = v4l2_subdev_call(sd, core, log_status);
759 pr_info("%s: ================== END STATUS ==================\n",
760 sd->name);
761 return ret;
762 }
bcd158de 763
333c8b97
LP
764 case VIDIOC_SUBDEV_G_FMT: {
765 struct v4l2_subdev_format *format = arg;
766
f57fa295
TV
767 if (!client_supports_streams)
768 format->stream = 0;
769
7c8a940a
HV
770 memset(format->reserved, 0, sizeof(format->reserved));
771 memset(format->format.reserved, 0, sizeof(format->format.reserved));
3cc7a4bb 772 return v4l2_subdev_call(sd, pad, get_fmt, state, format);
333c8b97
LP
773 }
774
775 case VIDIOC_SUBDEV_S_FMT: {
776 struct v4l2_subdev_format *format = arg;
777
f75c431e
JM
778 if (format->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
779 return -EPERM;
780
f57fa295
TV
781 if (!client_supports_streams)
782 format->stream = 0;
783
7c8a940a
HV
784 memset(format->reserved, 0, sizeof(format->reserved));
785 memset(format->format.reserved, 0, sizeof(format->format.reserved));
3cc7a4bb 786 return v4l2_subdev_call(sd, pad, set_fmt, state, format);
333c8b97
LP
787 }
788
f6a5cb1b
AK
789 case VIDIOC_SUBDEV_G_CROP: {
790 struct v4l2_subdev_crop *crop = arg;
5b9d770f 791 struct v4l2_subdev_selection sel;
f6a5cb1b 792
f57fa295
TV
793 if (!client_supports_streams)
794 crop->stream = 0;
795
7c8a940a 796 memset(crop->reserved, 0, sizeof(crop->reserved));
5b9d770f
SA
797 memset(&sel, 0, sizeof(sel));
798 sel.which = crop->which;
799 sel.pad = crop->pad;
34d7bf1c 800 sel.stream = crop->stream;
5689b288 801 sel.target = V4L2_SEL_TGT_CROP;
5b9d770f
SA
802
803 rval = v4l2_subdev_call(
3cc7a4bb 804 sd, pad, get_selection, state, &sel);
5b9d770f
SA
805
806 crop->rect = sel.r;
807
808 return rval;
f6a5cb1b
AK
809 }
810
811 case VIDIOC_SUBDEV_S_CROP: {
812 struct v4l2_subdev_crop *crop = arg;
5b9d770f 813 struct v4l2_subdev_selection sel;
f6a5cb1b 814
f75c431e
JM
815 if (crop->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
816 return -EPERM;
817
f57fa295
TV
818 if (!client_supports_streams)
819 crop->stream = 0;
820
7c8a940a 821 memset(crop->reserved, 0, sizeof(crop->reserved));
5b9d770f
SA
822 memset(&sel, 0, sizeof(sel));
823 sel.which = crop->which;
824 sel.pad = crop->pad;
34d7bf1c 825 sel.stream = crop->stream;
5689b288 826 sel.target = V4L2_SEL_TGT_CROP;
5b9d770f
SA
827 sel.r = crop->rect;
828
829 rval = v4l2_subdev_call(
3cc7a4bb 830 sd, pad, set_selection, state, &sel);
5b9d770f
SA
831
832 crop->rect = sel.r;
833
834 return rval;
f6a5cb1b
AK
835 }
836
333c8b97
LP
837 case VIDIOC_SUBDEV_ENUM_MBUS_CODE: {
838 struct v4l2_subdev_mbus_code_enum *code = arg;
839
f57fa295
TV
840 if (!client_supports_streams)
841 code->stream = 0;
842
7c8a940a 843 memset(code->reserved, 0, sizeof(code->reserved));
3cc7a4bb 844 return v4l2_subdev_call(sd, pad, enum_mbus_code, state,
333c8b97
LP
845 code);
846 }
847
848 case VIDIOC_SUBDEV_ENUM_FRAME_SIZE: {
849 struct v4l2_subdev_frame_size_enum *fse = arg;
850
f57fa295
TV
851 if (!client_supports_streams)
852 fse->stream = 0;
853
7c8a940a 854 memset(fse->reserved, 0, sizeof(fse->reserved));
3cc7a4bb 855 return v4l2_subdev_call(sd, pad, enum_frame_size, state,
333c8b97
LP
856 fse);
857 }
35c3017a 858
743e1837
SA
859 case VIDIOC_SUBDEV_G_FRAME_INTERVAL: {
860 struct v4l2_subdev_frame_interval *fi = arg;
861
f57fa295
TV
862 if (!client_supports_streams)
863 fi->stream = 0;
864
7c8a940a 865 memset(fi->reserved, 0, sizeof(fi->reserved));
287fe160 866 return v4l2_subdev_call(sd, pad, get_frame_interval, state, fi);
743e1837
SA
867 }
868
869 case VIDIOC_SUBDEV_S_FRAME_INTERVAL: {
870 struct v4l2_subdev_frame_interval *fi = arg;
871
f57fa295
TV
872 if (!client_supports_streams)
873 fi->stream = 0;
874
805d4311
LP
875 if (fi->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
876 return -EPERM;
877
7c8a940a 878 memset(fi->reserved, 0, sizeof(fi->reserved));
287fe160 879 return v4l2_subdev_call(sd, pad, set_frame_interval, state, fi);
743e1837 880 }
35c3017a
LP
881
882 case VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL: {
883 struct v4l2_subdev_frame_interval_enum *fie = arg;
884
f57fa295
TV
885 if (!client_supports_streams)
886 fie->stream = 0;
887
7c8a940a 888 memset(fie->reserved, 0, sizeof(fie->reserved));
3cc7a4bb 889 return v4l2_subdev_call(sd, pad, enum_frame_interval, state,
35c3017a
LP
890 fie);
891 }
ae184cda
SA
892
893 case VIDIOC_SUBDEV_G_SELECTION: {
894 struct v4l2_subdev_selection *sel = arg;
895
f57fa295
TV
896 if (!client_supports_streams)
897 sel->stream = 0;
898
7c8a940a 899 memset(sel->reserved, 0, sizeof(sel->reserved));
ae184cda 900 return v4l2_subdev_call(
3cc7a4bb 901 sd, pad, get_selection, state, sel);
ae184cda
SA
902 }
903
904 case VIDIOC_SUBDEV_S_SELECTION: {
905 struct v4l2_subdev_selection *sel = arg;
906
f75c431e
JM
907 if (sel->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
908 return -EPERM;
909
f57fa295
TV
910 if (!client_supports_streams)
911 sel->stream = 0;
912
7c8a940a 913 memset(sel->reserved, 0, sizeof(sel->reserved));
ae184cda 914 return v4l2_subdev_call(
3cc7a4bb 915 sd, pad, set_selection, state, sel);
ae184cda 916 }
ed45ce2c 917
f2e90847
LP
918 case VIDIOC_G_EDID: {
919 struct v4l2_subdev_edid *edid = arg;
ed45ce2c 920
f2e90847
LP
921 return v4l2_subdev_call(sd, pad, get_edid, edid);
922 }
923
924 case VIDIOC_S_EDID: {
925 struct v4l2_subdev_edid *edid = arg;
926
f2e90847
LP
927 return v4l2_subdev_call(sd, pad, set_edid, edid);
928 }
9cfd65e8
LP
929
930 case VIDIOC_SUBDEV_DV_TIMINGS_CAP: {
931 struct v4l2_dv_timings_cap *cap = arg;
932
9cfd65e8
LP
933 return v4l2_subdev_call(sd, pad, dv_timings_cap, cap);
934 }
935
936 case VIDIOC_SUBDEV_ENUM_DV_TIMINGS: {
937 struct v4l2_enum_dv_timings *dvt = arg;
938
9cfd65e8
LP
939 return v4l2_subdev_call(sd, pad, enum_dv_timings, dvt);
940 }
941
942 case VIDIOC_SUBDEV_QUERY_DV_TIMINGS:
d8c9a6e2 943 return v4l2_subdev_call(sd, pad, query_dv_timings, 0, arg);
9cfd65e8
LP
944
945 case VIDIOC_SUBDEV_G_DV_TIMINGS:
d8c9a6e2 946 return v4l2_subdev_call(sd, pad, g_dv_timings, 0, arg);
9cfd65e8
LP
947
948 case VIDIOC_SUBDEV_S_DV_TIMINGS:
f75c431e
JM
949 if (ro_subdev)
950 return -EPERM;
951
d8c9a6e2 952 return v4l2_subdev_call(sd, pad, s_dv_timings, 0, arg);
56ab8cdb
NS
953
954 case VIDIOC_SUBDEV_G_STD:
955 return v4l2_subdev_call(sd, video, g_std, arg);
956
957 case VIDIOC_SUBDEV_S_STD: {
958 v4l2_std_id *std = arg;
959
f75c431e
JM
960 if (ro_subdev)
961 return -EPERM;
962
56ab8cdb
NS
963 return v4l2_subdev_call(sd, video, s_std, *std);
964 }
965
966 case VIDIOC_SUBDEV_ENUMSTD: {
967 struct v4l2_standard *p = arg;
968 v4l2_std_id id;
969
970 if (v4l2_subdev_call(sd, video, g_tvnorms, &id))
971 return -EINVAL;
972
973 return v4l_video_std_enumstd(p, id);
974 }
975
976 case VIDIOC_SUBDEV_QUERYSTD:
977 return v4l2_subdev_call(sd, video, querystd, arg);
fb15db8c 978
a418bb3f
LP
979 case VIDIOC_SUBDEV_G_ROUTING: {
980 struct v4l2_subdev_routing *routing = arg;
981 struct v4l2_subdev_krouting *krouting;
982
8a546445
TV
983 if (!v4l2_subdev_enable_streams_api)
984 return -ENOIOCTLCMD;
985
a418bb3f
LP
986 if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS))
987 return -ENOIOCTLCMD;
988
989 memset(routing->reserved, 0, sizeof(routing->reserved));
990
991 krouting = &state->routing;
992
a418bb3f
LP
993 memcpy((struct v4l2_subdev_route *)(uintptr_t)routing->routes,
994 krouting->routes,
83a22a07
SA
995 min(krouting->num_routes, routing->len_routes) *
996 sizeof(*krouting->routes));
a418bb3f
LP
997 routing->num_routes = krouting->num_routes;
998
999 return 0;
1000 }
1001
1002 case VIDIOC_SUBDEV_S_ROUTING: {
1003 struct v4l2_subdev_routing *routing = arg;
1004 struct v4l2_subdev_route *routes =
1005 (struct v4l2_subdev_route *)(uintptr_t)routing->routes;
1006 struct v4l2_subdev_krouting krouting = {};
1007 unsigned int i;
1008
8a546445
TV
1009 if (!v4l2_subdev_enable_streams_api)
1010 return -ENOIOCTLCMD;
1011
a418bb3f
LP
1012 if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS))
1013 return -ENOIOCTLCMD;
1014
1015 if (routing->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
1016 return -EPERM;
1017
83a22a07
SA
1018 if (routing->num_routes > routing->len_routes)
1019 return -EINVAL;
1020
a418bb3f
LP
1021 memset(routing->reserved, 0, sizeof(routing->reserved));
1022
1023 for (i = 0; i < routing->num_routes; ++i) {
1024 const struct v4l2_subdev_route *route = &routes[i];
1025 const struct media_pad *pads = sd->entity.pads;
1026
1027 if (route->sink_stream > V4L2_SUBDEV_MAX_STREAM_ID ||
1028 route->source_stream > V4L2_SUBDEV_MAX_STREAM_ID)
1029 return -EINVAL;
1030
1031 if (route->sink_pad >= sd->entity.num_pads)
1032 return -EINVAL;
1033
1034 if (!(pads[route->sink_pad].flags &
1035 MEDIA_PAD_FL_SINK))
1036 return -EINVAL;
1037
1038 if (route->source_pad >= sd->entity.num_pads)
1039 return -EINVAL;
1040
1041 if (!(pads[route->source_pad].flags &
1042 MEDIA_PAD_FL_SOURCE))
1043 return -EINVAL;
1044 }
1045
1bfef497
SA
1046 /*
1047 * If the driver doesn't support setting routing, just return
1048 * the routing table.
1049 */
1050 if (!v4l2_subdev_has_op(sd, pad, set_routing)) {
1051 memcpy((struct v4l2_subdev_route *)(uintptr_t)routing->routes,
1052 state->routing.routes,
1053 min(state->routing.num_routes, routing->len_routes) *
1054 sizeof(*state->routing.routes));
1055 routing->num_routes = state->routing.num_routes;
1056
1057 return 0;
1058 }
1059
a418bb3f 1060 krouting.num_routes = routing->num_routes;
83a22a07 1061 krouting.len_routes = routing->len_routes;
a418bb3f
LP
1062 krouting.routes = routes;
1063
91e99e5a 1064 rval = v4l2_subdev_call(sd, pad, set_routing, state,
a418bb3f 1065 routing->which, &krouting);
91e99e5a
SA
1066 if (rval < 0)
1067 return rval;
1068
1069 memcpy((struct v4l2_subdev_route *)(uintptr_t)routing->routes,
1070 state->routing.routes,
1071 min(state->routing.num_routes, routing->len_routes) *
1072 sizeof(*state->routing.routes));
1073 routing->num_routes = state->routing.num_routes;
1074
1075 return 0;
a418bb3f
LP
1076 }
1077
f57fa295
TV
1078 case VIDIOC_SUBDEV_G_CLIENT_CAP: {
1079 struct v4l2_subdev_client_capability *client_cap = arg;
1080
1081 client_cap->capabilities = subdev_fh->client_caps;
1082
1083 return 0;
1084 }
1085
1086 case VIDIOC_SUBDEV_S_CLIENT_CAP: {
1087 struct v4l2_subdev_client_capability *client_cap = arg;
1088
1089 /*
1090 * Clear V4L2_SUBDEV_CLIENT_CAP_STREAMS if streams API is not
1091 * enabled. Remove this when streams API is no longer
1092 * experimental.
1093 */
1094 if (!v4l2_subdev_enable_streams_api)
1095 client_cap->capabilities &= ~V4L2_SUBDEV_CLIENT_CAP_STREAMS;
1096
1097 /* Filter out unsupported capabilities */
805d4311
LP
1098 client_cap->capabilities &= (V4L2_SUBDEV_CLIENT_CAP_STREAMS |
1099 V4L2_SUBDEV_CLIENT_CAP_INTERVAL_USES_WHICH);
f57fa295
TV
1100
1101 subdev_fh->client_caps = client_cap->capabilities;
1102
1103 return 0;
1104 }
1105
2096a5dc 1106 default:
c30b46e5 1107 return v4l2_subdev_call(sd, core, ioctl, cmd, arg);
2096a5dc
LP
1108 }
1109
1110 return 0;
1111}
1112
73a11062
HV
1113static long subdev_do_ioctl_lock(struct file *file, unsigned int cmd, void *arg)
1114{
1115 struct video_device *vdev = video_devdata(file);
1116 struct mutex *lock = vdev->lock;
1117 long ret = -ENODEV;
1118
1119 if (lock && mutex_lock_interruptible(lock))
1120 return -ERESTARTSYS;
ed647ea6
TV
1121
1122 if (video_is_registered(vdev)) {
1123 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
1124 struct v4l2_fh *vfh = file->private_data;
1125 struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
1126 struct v4l2_subdev_state *state;
1127
1128 state = subdev_ioctl_get_state(sd, subdev_fh, cmd, arg);
1129
1130 if (state)
1131 v4l2_subdev_lock_state(state);
1132
1133 ret = subdev_do_ioctl(file, cmd, arg, state);
1134
1135 if (state)
1136 v4l2_subdev_unlock_state(state);
1137 }
1138
73a11062
HV
1139 if (lock)
1140 mutex_unlock(lock);
1141 return ret;
1142}
1143
2096a5dc
LP
1144static long subdev_ioctl(struct file *file, unsigned int cmd,
1145 unsigned long arg)
1146{
73a11062 1147 return video_usercopy(file, cmd, arg, subdev_do_ioctl_lock);
2096a5dc
LP
1148}
1149
ab58a301
HV
1150#ifdef CONFIG_COMPAT
1151static long subdev_compat_ioctl32(struct file *file, unsigned int cmd,
1152 unsigned long arg)
1153{
1154 struct video_device *vdev = video_devdata(file);
1155 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
1156
1157 return v4l2_subdev_call(sd, core, compat_ioctl32, cmd, arg);
1158}
1159#endif
1160
fb15db8c
JM
1161#else /* CONFIG_VIDEO_V4L2_SUBDEV_API */
1162static long subdev_ioctl(struct file *file, unsigned int cmd,
1163 unsigned long arg)
1164{
1165 return -ENODEV;
1166}
1167
1168#ifdef CONFIG_COMPAT
1169static long subdev_compat_ioctl32(struct file *file, unsigned int cmd,
1170 unsigned long arg)
1171{
1172 return -ENODEV;
1173}
1174#endif
1175#endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
1176
c23e0cb8 1177static __poll_t subdev_poll(struct file *file, poll_table *wait)
02adb1cc
SA
1178{
1179 struct video_device *vdev = video_devdata(file);
1180 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
1181 struct v4l2_fh *fh = file->private_data;
1182
1183 if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
a9a08845 1184 return EPOLLERR;
02adb1cc 1185
523f46d6 1186 poll_wait(file, &fh->wait, wait);
02adb1cc
SA
1187
1188 if (v4l2_event_pending(fh))
a9a08845 1189 return EPOLLPRI;
02adb1cc
SA
1190
1191 return 0;
1192}
1193
2096a5dc
LP
1194const struct v4l2_file_operations v4l2_subdev_fops = {
1195 .owner = THIS_MODULE,
1196 .open = subdev_open,
1197 .unlocked_ioctl = subdev_ioctl,
ab58a301
HV
1198#ifdef CONFIG_COMPAT
1199 .compat_ioctl32 = subdev_compat_ioctl32,
1200#endif
2096a5dc 1201 .release = subdev_close,
02adb1cc 1202 .poll = subdev_poll,
2096a5dc 1203};
3dd5ee08 1204
8227c92b 1205#ifdef CONFIG_MEDIA_CONTROLLER
8fe784b9
SL
1206
1207int v4l2_subdev_get_fwnode_pad_1_to_1(struct media_entity *entity,
1208 struct fwnode_endpoint *endpoint)
1209{
1210 struct fwnode_handle *fwnode;
1211 struct v4l2_subdev *sd;
1212
1213 if (!is_media_entity_v4l2_subdev(entity))
1214 return -EINVAL;
1215
1216 sd = media_entity_to_v4l2_subdev(entity);
1217
1218 fwnode = fwnode_graph_get_port_parent(endpoint->local_fwnode);
1219 fwnode_handle_put(fwnode);
1220
026df230 1221 if (device_match_fwnode(sd->dev, fwnode))
8fe784b9
SL
1222 return endpoint->port;
1223
1224 return -ENXIO;
1225}
1226EXPORT_SYMBOL_GPL(v4l2_subdev_get_fwnode_pad_1_to_1);
1227
8227c92b
SA
1228int v4l2_subdev_link_validate_default(struct v4l2_subdev *sd,
1229 struct media_link *link,
1230 struct v4l2_subdev_format *source_fmt,
1231 struct v4l2_subdev_format *sink_fmt)
1232{
db8e94e7
SA
1233 bool pass = true;
1234
24acf8b2 1235 /* The width, height and code must match. */
db8e94e7
SA
1236 if (source_fmt->format.width != sink_fmt->format.width) {
1237 dev_dbg(sd->entity.graph_obj.mdev->dev,
1238 "%s: width does not match (source %u, sink %u)\n",
1239 __func__,
1240 source_fmt->format.width, sink_fmt->format.width);
1241 pass = false;
1242 }
1243
1244 if (source_fmt->format.height != sink_fmt->format.height) {
1245 dev_dbg(sd->entity.graph_obj.mdev->dev,
1246 "%s: height does not match (source %u, sink %u)\n",
1247 __func__,
1248 source_fmt->format.height, sink_fmt->format.height);
1249 pass = false;
1250 }
1251
1252 if (source_fmt->format.code != sink_fmt->format.code) {
1253 dev_dbg(sd->entity.graph_obj.mdev->dev,
1254 "%s: media bus code does not match (source 0x%8.8x, sink 0x%8.8x)\n",
1255 __func__,
1256 source_fmt->format.code, sink_fmt->format.code);
1257 pass = false;
1258 }
8227c92b 1259
24acf8b2
LP
1260 /* The field order must match, or the sink field order must be NONE
1261 * to support interlaced hardware connected to bridges that support
1262 * progressive formats only.
1263 */
1264 if (source_fmt->format.field != sink_fmt->format.field &&
db8e94e7
SA
1265 sink_fmt->format.field != V4L2_FIELD_NONE) {
1266 dev_dbg(sd->entity.graph_obj.mdev->dev,
1267 "%s: field does not match (source %u, sink %u)\n",
1268 __func__,
1269 source_fmt->format.field, sink_fmt->format.field);
1270 pass = false;
1271 }
24acf8b2 1272
db8e94e7
SA
1273 if (pass)
1274 return 0;
1275
1276 dev_dbg(sd->entity.graph_obj.mdev->dev,
1277 "%s: link was \"%s\":%u -> \"%s\":%u\n", __func__,
1278 link->source->entity->name, link->source->index,
1279 link->sink->entity->name, link->sink->index);
1280
1281 return -EPIPE;
8227c92b
SA
1282}
1283EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate_default);
1284
1285static int
a6b995ed 1286v4l2_subdev_link_validate_get_format(struct media_pad *pad, u32 stream,
53077915
TV
1287 struct v4l2_subdev_format *fmt,
1288 bool states_locked)
8227c92b 1289{
53077915
TV
1290 struct v4l2_subdev_state *state;
1291 struct v4l2_subdev *sd;
1292 int ret;
864a1212 1293
53077915
TV
1294 sd = media_entity_to_v4l2_subdev(pad->entity);
1295
1296 fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
1297 fmt->pad = pad->index;
1298 fmt->stream = stream;
1299
1300 if (states_locked)
1301 state = v4l2_subdev_get_locked_active_state(sd);
1302 else
1303 state = v4l2_subdev_lock_and_get_active_state(sd);
1304
1305 ret = v4l2_subdev_call(sd, pad, get_fmt, state, fmt);
1306
1307 if (!states_locked && state)
1308 v4l2_subdev_unlock_state(state);
864a1212 1309
53077915 1310 return ret;
8227c92b
SA
1311}
1312
a6b995ed
TV
1313#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
1314
1315static void __v4l2_link_validate_get_streams(struct media_pad *pad,
53077915
TV
1316 u64 *streams_mask,
1317 bool states_locked)
a6b995ed
TV
1318{
1319 struct v4l2_subdev_route *route;
1320 struct v4l2_subdev_state *state;
1321 struct v4l2_subdev *subdev;
1322
1323 subdev = media_entity_to_v4l2_subdev(pad->entity);
1324
1325 *streams_mask = 0;
1326
53077915
TV
1327 if (states_locked)
1328 state = v4l2_subdev_get_locked_active_state(subdev);
1329 else
1330 state = v4l2_subdev_lock_and_get_active_state(subdev);
1331
a6b995ed
TV
1332 if (WARN_ON(!state))
1333 return;
1334
1335 for_each_active_route(&state->routing, route) {
1336 u32 route_pad;
1337 u32 route_stream;
1338
1339 if (pad->flags & MEDIA_PAD_FL_SOURCE) {
1340 route_pad = route->source_pad;
1341 route_stream = route->source_stream;
1342 } else {
1343 route_pad = route->sink_pad;
1344 route_stream = route->sink_stream;
1345 }
1346
1347 if (route_pad != pad->index)
1348 continue;
1349
1350 *streams_mask |= BIT_ULL(route_stream);
1351 }
53077915
TV
1352
1353 if (!states_locked)
1354 v4l2_subdev_unlock_state(state);
a6b995ed
TV
1355}
1356
1357#endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
1358
1359static void v4l2_link_validate_get_streams(struct media_pad *pad,
53077915
TV
1360 u64 *streams_mask,
1361 bool states_locked)
a6b995ed
TV
1362{
1363 struct v4l2_subdev *subdev = media_entity_to_v4l2_subdev(pad->entity);
1364
1365 if (!(subdev->flags & V4L2_SUBDEV_FL_STREAMS)) {
1366 /* Non-streams subdevs have an implicit stream 0 */
1367 *streams_mask = BIT_ULL(0);
1368 return;
1369 }
1370
1371#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
53077915 1372 __v4l2_link_validate_get_streams(pad, streams_mask, states_locked);
a6b995ed
TV
1373#else
1374 /* This shouldn't happen */
1375 *streams_mask = 0;
1376#endif
1377}
1378
53077915 1379static int v4l2_subdev_link_validate_locked(struct media_link *link, bool states_locked)
a6b995ed
TV
1380{
1381 struct v4l2_subdev *sink_subdev =
1382 media_entity_to_v4l2_subdev(link->sink->entity);
1383 struct device *dev = sink_subdev->entity.graph_obj.mdev->dev;
1384 u64 source_streams_mask;
1385 u64 sink_streams_mask;
1386 u64 dangling_sink_streams;
1387 u32 stream;
1388 int ret;
1389
1390 dev_dbg(dev, "validating link \"%s\":%u -> \"%s\":%u\n",
1391 link->source->entity->name, link->source->index,
1392 link->sink->entity->name, link->sink->index);
1393
53077915
TV
1394 v4l2_link_validate_get_streams(link->source, &source_streams_mask, states_locked);
1395 v4l2_link_validate_get_streams(link->sink, &sink_streams_mask, states_locked);
a6b995ed
TV
1396
1397 /*
1398 * It is ok to have more source streams than sink streams as extra
1399 * source streams can just be ignored by the receiver, but having extra
1400 * sink streams is an error as streams must have a source.
1401 */
1402 dangling_sink_streams = (source_streams_mask ^ sink_streams_mask) &
1403 sink_streams_mask;
1404 if (dangling_sink_streams) {
1405 dev_err(dev, "Dangling sink streams: mask %#llx\n",
1406 dangling_sink_streams);
1407 return -EINVAL;
1408 }
1409
1410 /* Validate source and sink stream formats */
1411
1412 for (stream = 0; stream < sizeof(sink_streams_mask) * 8; ++stream) {
1413 struct v4l2_subdev_format sink_fmt, source_fmt;
1414
1415 if (!(sink_streams_mask & BIT_ULL(stream)))
1416 continue;
1417
1418 dev_dbg(dev, "validating stream \"%s\":%u:%u -> \"%s\":%u:%u\n",
1419 link->source->entity->name, link->source->index, stream,
1420 link->sink->entity->name, link->sink->index, stream);
1421
1422 ret = v4l2_subdev_link_validate_get_format(link->source, stream,
53077915 1423 &source_fmt, states_locked);
a6b995ed
TV
1424 if (ret < 0) {
1425 dev_dbg(dev,
1426 "Failed to get format for \"%s\":%u:%u (but that's ok)\n",
1427 link->source->entity->name, link->source->index,
1428 stream);
1429 continue;
1430 }
1431
1432 ret = v4l2_subdev_link_validate_get_format(link->sink, stream,
53077915 1433 &sink_fmt, states_locked);
a6b995ed
TV
1434 if (ret < 0) {
1435 dev_dbg(dev,
1436 "Failed to get format for \"%s\":%u:%u (but that's ok)\n",
1437 link->sink->entity->name, link->sink->index,
1438 stream);
1439 continue;
1440 }
1441
1442 /* TODO: add stream number to link_validate() */
1443 ret = v4l2_subdev_call(sink_subdev, pad, link_validate, link,
1444 &source_fmt, &sink_fmt);
1445 if (!ret)
1446 continue;
1447
1448 if (ret != -ENOIOCTLCMD)
1449 return ret;
1450
1451 ret = v4l2_subdev_link_validate_default(sink_subdev, link,
1452 &source_fmt, &sink_fmt);
1453
1454 if (ret)
1455 return ret;
1456 }
1457
1458 return 0;
1459}
1460
8227c92b
SA
1461int v4l2_subdev_link_validate(struct media_link *link)
1462{
a6b995ed
TV
1463 struct v4l2_subdev *source_sd, *sink_sd;
1464 struct v4l2_subdev_state *source_state, *sink_state;
53077915 1465 bool states_locked;
a6b995ed 1466 int ret;
8227c92b 1467
d1307671
LP
1468 /*
1469 * Links are validated in the context of the sink entity. Usage of this
1470 * helper on a sink that is not a subdev is a clear driver bug.
1471 */
1472 if (WARN_ON_ONCE(!is_media_entity_v4l2_subdev(link->sink->entity)))
1473 return -EINVAL;
1474
5fd3e241
LP
1475 /*
1476 * If the source is a video device, delegate link validation to it. This
1477 * allows usage of this helper for subdev connected to a video output
1478 * device, provided that the driver implement the video output device's
1479 * .link_validate() operation.
1480 */
1481 if (is_media_entity_v4l2_video_device(link->source->entity)) {
1482 struct media_entity *source = link->source->entity;
1483
1484 if (!source->ops || !source->ops->link_validate) {
1485 /*
1486 * Many existing drivers do not implement the required
1487 * .link_validate() operation for their video devices.
1488 * Print a warning to get the drivers fixed, and return
1489 * 0 to avoid breaking userspace. This should
1490 * eventually be turned into a WARN_ON() when all
1491 * drivers will have been fixed.
1492 */
1493 pr_warn_once("video device '%s' does not implement .link_validate(), driver bug!\n",
1494 source->name);
1495 return 0;
1496 }
1497
1498 /*
1499 * Avoid infinite loops in case a video device incorrectly uses
1500 * this helper function as its .link_validate() handler.
1501 */
1502 if (WARN_ON(source->ops->link_validate == v4l2_subdev_link_validate))
1503 return -EINVAL;
1504
1505 return source->ops->link_validate(link);
55f1ecb1
SA
1506 }
1507
5fd3e241
LP
1508 /*
1509 * If the source is still not a subdev, usage of this helper is a clear
1510 * driver bug.
1511 */
1512 if (WARN_ON(!is_media_entity_v4l2_subdev(link->source->entity)))
1513 return -EINVAL;
1514
a6b995ed
TV
1515 sink_sd = media_entity_to_v4l2_subdev(link->sink->entity);
1516 source_sd = media_entity_to_v4l2_subdev(link->source->entity);
8227c92b 1517
a6b995ed
TV
1518 sink_state = v4l2_subdev_get_unlocked_active_state(sink_sd);
1519 source_state = v4l2_subdev_get_unlocked_active_state(source_sd);
8227c92b 1520
53077915 1521 states_locked = sink_state && source_state;
8227c92b 1522
72364b91
SA
1523 if (states_locked)
1524 v4l2_subdev_lock_states(sink_state, source_state);
8227c92b 1525
53077915 1526 ret = v4l2_subdev_link_validate_locked(link, states_locked);
8227c92b 1527
72364b91
SA
1528 if (states_locked)
1529 v4l2_subdev_unlock_states(sink_state, source_state);
a6b995ed
TV
1530
1531 return ret;
8227c92b
SA
1532}
1533EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate);
9b02cbb3 1534
33c0ddbe
TV
1535bool v4l2_subdev_has_pad_interdep(struct media_entity *entity,
1536 unsigned int pad0, unsigned int pad1)
1537{
1538 struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
1539 struct v4l2_subdev_krouting *routing;
1540 struct v4l2_subdev_state *state;
1541 unsigned int i;
1542
1543 state = v4l2_subdev_lock_and_get_active_state(sd);
1544
1545 routing = &state->routing;
1546
1547 for (i = 0; i < routing->num_routes; ++i) {
1548 struct v4l2_subdev_route *route = &routing->routes[i];
1549
1550 if (!(route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE))
1551 continue;
1552
1553 if ((route->sink_pad == pad0 && route->source_pad == pad1) ||
1554 (route->source_pad == pad0 && route->sink_pad == pad1)) {
1555 v4l2_subdev_unlock_state(state);
1556 return true;
1557 }
1558 }
1559
1560 v4l2_subdev_unlock_state(state);
1561
1562 return false;
1563}
1564EXPORT_SYMBOL_GPL(v4l2_subdev_has_pad_interdep);
1565
ed647ea6
TV
1566struct v4l2_subdev_state *
1567__v4l2_subdev_state_alloc(struct v4l2_subdev *sd, const char *lock_name,
1568 struct lock_class_key *lock_key)
9b02cbb3 1569{
0d346d2a 1570 struct v4l2_subdev_state *state;
9b02cbb3
LP
1571 int ret;
1572
0d346d2a
TV
1573 state = kzalloc(sizeof(*state), GFP_KERNEL);
1574 if (!state)
1575 return ERR_PTR(-ENOMEM);
9b02cbb3 1576
ed647ea6
TV
1577 __mutex_init(&state->_lock, lock_name, lock_key);
1578 if (sd->state_lock)
1579 state->lock = sd->state_lock;
1580 else
1581 state->lock = &state->_lock;
1582
52c2575d
SA
1583 state->sd = sd;
1584
2f91e10e
TV
1585 /* Drivers that support streams do not need the legacy pad config */
1586 if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS) && sd->entity.num_pads) {
8af592e2
HV
1587 state->pads = kvcalloc(sd->entity.num_pads,
1588 sizeof(*state->pads), GFP_KERNEL);
0d346d2a
TV
1589 if (!state->pads) {
1590 ret = -ENOMEM;
1591 goto err;
1592 }
9b02cbb3
LP
1593 }
1594
5755be5f
LP
1595 if (sd->internal_ops && sd->internal_ops->init_state) {
1596 /*
1597 * There can be no race at this point, but we lock the state
1598 * anyway to satisfy lockdep checks.
1599 */
1600 v4l2_subdev_lock_state(state);
1601 ret = sd->internal_ops->init_state(sd, state);
1602 v4l2_subdev_unlock_state(state);
ed647ea6 1603
5755be5f
LP
1604 if (ret)
1605 goto err;
1606 }
0d346d2a
TV
1607
1608 return state;
1609
1610err:
1611 if (state && state->pads)
1612 kvfree(state->pads);
1613
1614 kfree(state);
1615
1616 return ERR_PTR(ret);
9b02cbb3 1617}
40aaab9d 1618EXPORT_SYMBOL_GPL(__v4l2_subdev_state_alloc);
9b02cbb3 1619
40aaab9d 1620void __v4l2_subdev_state_free(struct v4l2_subdev_state *state)
9b02cbb3 1621{
0d346d2a
TV
1622 if (!state)
1623 return;
1624
ed647ea6
TV
1625 mutex_destroy(&state->_lock);
1626
a418bb3f 1627 kfree(state->routing.routes);
2f91e10e 1628 kvfree(state->stream_configs.configs);
0d346d2a
TV
1629 kvfree(state->pads);
1630 kfree(state);
9b02cbb3 1631}
40aaab9d 1632EXPORT_SYMBOL_GPL(__v4l2_subdev_state_free);
0d346d2a 1633
ed647ea6
TV
1634int __v4l2_subdev_init_finalize(struct v4l2_subdev *sd, const char *name,
1635 struct lock_class_key *key)
f69952a4
TV
1636{
1637 struct v4l2_subdev_state *state;
e003fd9c
TV
1638 struct device *dev = sd->dev;
1639 bool has_disable_streams;
1640 bool has_enable_streams;
1641 bool has_s_stream;
1642
1643 /* Check that the subdevice implements the required features */
1644
1645 has_s_stream = v4l2_subdev_has_op(sd, video, s_stream);
1646 has_enable_streams = v4l2_subdev_has_op(sd, pad, enable_streams);
1647 has_disable_streams = v4l2_subdev_has_op(sd, pad, disable_streams);
1648
1649 if (has_enable_streams != has_disable_streams) {
1650 dev_err(dev,
1651 "subdev '%s' must implement both or neither of .enable_streams() and .disable_streams()\n",
1652 sd->name);
1653 return -EINVAL;
1654 }
1655
1656 if (sd->flags & V4L2_SUBDEV_FL_STREAMS) {
1657 if (has_s_stream && !has_enable_streams) {
1658 dev_err(dev,
1659 "subdev '%s' must implement .enable/disable_streams()\n",
1660 sd->name);
1661
1662 return -EINVAL;
1663 }
1664 }
f69952a4 1665
e7724e23
TM
1666 if (sd->ctrl_handler)
1667 sd->flags |= V4L2_SUBDEV_FL_HAS_EVENTS;
1668
ed647ea6 1669 state = __v4l2_subdev_state_alloc(sd, name, key);
f69952a4
TV
1670 if (IS_ERR(state))
1671 return PTR_ERR(state);
1672
1673 sd->active_state = state;
1674
1675 return 0;
1676}
ed647ea6 1677EXPORT_SYMBOL_GPL(__v4l2_subdev_init_finalize);
f69952a4
TV
1678
1679void v4l2_subdev_cleanup(struct v4l2_subdev *sd)
1680{
e74f7a96
SA
1681 struct v4l2_async_subdev_endpoint *ase, *ase_tmp;
1682
f69952a4
TV
1683 __v4l2_subdev_state_free(sd->active_state);
1684 sd->active_state = NULL;
e74f7a96 1685
ab8d7194
SA
1686 /* Uninitialised sub-device, bail out here. */
1687 if (!sd->async_subdev_endpoint_list.next)
e74f7a96
SA
1688 return;
1689
1690 list_for_each_entry_safe(ase, ase_tmp, &sd->async_subdev_endpoint_list,
1691 async_subdev_endpoint_entry) {
1692 list_del(&ase->async_subdev_endpoint_entry);
1693
1694 kfree(ase);
1695 }
f69952a4
TV
1696}
1697EXPORT_SYMBOL_GPL(v4l2_subdev_cleanup);
1698
981e0d4c
SA
1699struct v4l2_mbus_framefmt *
1700__v4l2_subdev_state_get_format(struct v4l2_subdev_state *state,
1701 unsigned int pad, u32 stream)
1702{
1703 struct v4l2_subdev_stream_configs *stream_configs;
1704 unsigned int i;
1705
1706 if (WARN_ON_ONCE(!state))
1707 return NULL;
1708
1709 if (state->pads) {
1710 if (stream)
1711 return NULL;
1712
3591c53a
SA
1713 if (pad >= state->sd->entity.num_pads)
1714 return NULL;
981e0d4c
SA
1715
1716 return &state->pads[pad].format;
1717 }
1718
1719 lockdep_assert_held(state->lock);
1720
1721 stream_configs = &state->stream_configs;
1722
1723 for (i = 0; i < stream_configs->num_configs; ++i) {
1724 if (stream_configs->configs[i].pad == pad &&
1725 stream_configs->configs[i].stream == stream)
1726 return &stream_configs->configs[i].fmt;
1727 }
1728
1729 return NULL;
1730}
1731EXPORT_SYMBOL_GPL(__v4l2_subdev_state_get_format);
1732
1733struct v4l2_rect *
1734__v4l2_subdev_state_get_crop(struct v4l2_subdev_state *state, unsigned int pad,
1735 u32 stream)
1736{
1737 struct v4l2_subdev_stream_configs *stream_configs;
1738 unsigned int i;
1739
1740 if (WARN_ON_ONCE(!state))
1741 return NULL;
1742
1743 if (state->pads) {
1744 if (stream)
1745 return NULL;
1746
3591c53a
SA
1747 if (pad >= state->sd->entity.num_pads)
1748 return NULL;
981e0d4c
SA
1749
1750 return &state->pads[pad].crop;
1751 }
1752
1753 lockdep_assert_held(state->lock);
1754
1755 stream_configs = &state->stream_configs;
1756
1757 for (i = 0; i < stream_configs->num_configs; ++i) {
1758 if (stream_configs->configs[i].pad == pad &&
1759 stream_configs->configs[i].stream == stream)
1760 return &stream_configs->configs[i].crop;
1761 }
1762
1763 return NULL;
1764}
1765EXPORT_SYMBOL_GPL(__v4l2_subdev_state_get_crop);
1766
1767struct v4l2_rect *
1768__v4l2_subdev_state_get_compose(struct v4l2_subdev_state *state,
1769 unsigned int pad, u32 stream)
1770{
1771 struct v4l2_subdev_stream_configs *stream_configs;
1772 unsigned int i;
1773
1774 if (WARN_ON_ONCE(!state))
1775 return NULL;
1776
1777 if (state->pads) {
1778 if (stream)
1779 return NULL;
1780
3591c53a
SA
1781 if (pad >= state->sd->entity.num_pads)
1782 return NULL;
981e0d4c
SA
1783
1784 return &state->pads[pad].compose;
1785 }
1786
1787 lockdep_assert_held(state->lock);
1788
1789 stream_configs = &state->stream_configs;
1790
1791 for (i = 0; i < stream_configs->num_configs; ++i) {
1792 if (stream_configs->configs[i].pad == pad &&
1793 stream_configs->configs[i].stream == stream)
1794 return &stream_configs->configs[i].compose;
1795 }
1796
1797 return NULL;
1798}
1799EXPORT_SYMBOL_GPL(__v4l2_subdev_state_get_compose);
1800
6b456240
LP
1801struct v4l2_fract *
1802__v4l2_subdev_state_get_interval(struct v4l2_subdev_state *state,
1803 unsigned int pad, u32 stream)
1804{
1805 struct v4l2_subdev_stream_configs *stream_configs;
1806 unsigned int i;
1807
1808 if (WARN_ON(!state))
1809 return NULL;
1810
1811 lockdep_assert_held(state->lock);
1812
1813 if (state->pads) {
1814 if (stream)
1815 return NULL;
1816
1817 if (pad >= state->sd->entity.num_pads)
1818 return NULL;
1819
1820 return &state->pads[pad].interval;
1821 }
1822
1823 lockdep_assert_held(state->lock);
1824
1825 stream_configs = &state->stream_configs;
1826
1827 for (i = 0; i < stream_configs->num_configs; ++i) {
1828 if (stream_configs->configs[i].pad == pad &&
1829 stream_configs->configs[i].stream == stream)
1830 return &stream_configs->configs[i].interval;
1831 }
1832
1833 return NULL;
1834}
1835EXPORT_SYMBOL_GPL(__v4l2_subdev_state_get_interval);
1836
14a6fca7
TV
1837#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
1838
2f91e10e
TV
1839static int
1840v4l2_subdev_init_stream_configs(struct v4l2_subdev_stream_configs *stream_configs,
1841 const struct v4l2_subdev_krouting *routing)
1842{
1843 struct v4l2_subdev_stream_configs new_configs = { 0 };
1844 struct v4l2_subdev_route *route;
1845 u32 idx;
1846
1847 /* Count number of formats needed */
1848 for_each_active_route(routing, route) {
1849 /*
1850 * Each route needs a format on both ends of the route.
1851 */
1852 new_configs.num_configs += 2;
1853 }
1854
1855 if (new_configs.num_configs) {
1856 new_configs.configs = kvcalloc(new_configs.num_configs,
1857 sizeof(*new_configs.configs),
1858 GFP_KERNEL);
1859
1860 if (!new_configs.configs)
1861 return -ENOMEM;
1862 }
1863
1864 /*
1865 * Fill in the 'pad' and stream' value for each item in the array from
1866 * the routing table
1867 */
1868 idx = 0;
1869
1870 for_each_active_route(routing, route) {
1871 new_configs.configs[idx].pad = route->sink_pad;
1872 new_configs.configs[idx].stream = route->sink_stream;
1873
1874 idx++;
1875
1876 new_configs.configs[idx].pad = route->source_pad;
1877 new_configs.configs[idx].stream = route->source_stream;
1878
1879 idx++;
1880 }
1881
1882 kvfree(stream_configs->configs);
1883 *stream_configs = new_configs;
1884
1885 return 0;
1886}
1887
14a6fca7
TV
1888int v4l2_subdev_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *state,
1889 struct v4l2_subdev_format *format)
1890{
1891 struct v4l2_mbus_framefmt *fmt;
1892
8824170e 1893 fmt = v4l2_subdev_state_get_format(state, format->pad, format->stream);
14a6fca7
TV
1894 if (!fmt)
1895 return -EINVAL;
1896
1897 format->format = *fmt;
1898
1899 return 0;
1900}
1901EXPORT_SYMBOL_GPL(v4l2_subdev_get_fmt);
1902
6b456240
LP
1903int v4l2_subdev_get_frame_interval(struct v4l2_subdev *sd,
1904 struct v4l2_subdev_state *state,
1905 struct v4l2_subdev_frame_interval *fi)
1906{
1907 struct v4l2_fract *interval;
1908
1909 interval = v4l2_subdev_state_get_interval(state, fi->pad, fi->stream);
1910 if (!interval)
1911 return -EINVAL;
1912
1913 fi->interval = *interval;
1914
1915 return 0;
1916}
1917EXPORT_SYMBOL_GPL(v4l2_subdev_get_frame_interval);
1918
17bb9bf8
TV
1919int v4l2_subdev_set_routing(struct v4l2_subdev *sd,
1920 struct v4l2_subdev_state *state,
1921 const struct v4l2_subdev_krouting *routing)
1922{
1923 struct v4l2_subdev_krouting *dst = &state->routing;
1924 const struct v4l2_subdev_krouting *src = routing;
1925 struct v4l2_subdev_krouting new_routing = { 0 };
1926 size_t bytes;
2f91e10e 1927 int r;
17bb9bf8
TV
1928
1929 if (unlikely(check_mul_overflow((size_t)src->num_routes,
1930 sizeof(*src->routes), &bytes)))
1931 return -EOVERFLOW;
1932
1933 lockdep_assert_held(state->lock);
1934
1935 if (src->num_routes > 0) {
1936 new_routing.routes = kmemdup(src->routes, bytes, GFP_KERNEL);
1937 if (!new_routing.routes)
1938 return -ENOMEM;
1939 }
1940
1941 new_routing.num_routes = src->num_routes;
1942
2f91e10e
TV
1943 r = v4l2_subdev_init_stream_configs(&state->stream_configs,
1944 &new_routing);
1945 if (r) {
1946 kfree(new_routing.routes);
1947 return r;
1948 }
1949
17bb9bf8
TV
1950 kfree(dst->routes);
1951 *dst = new_routing;
1952
1953 return 0;
1954}
1955EXPORT_SYMBOL_GPL(v4l2_subdev_set_routing);
1956
837f92f0
JM
1957struct v4l2_subdev_route *
1958__v4l2_subdev_next_active_route(const struct v4l2_subdev_krouting *routing,
1959 struct v4l2_subdev_route *route)
1960{
1961 if (route)
1962 ++route;
1963 else
1964 route = &routing->routes[0];
1965
1966 for (; route < routing->routes + routing->num_routes; ++route) {
1967 if (!(route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE))
1968 continue;
1969
1970 return route;
1971 }
1972
1973 return NULL;
1974}
1975EXPORT_SYMBOL_GPL(__v4l2_subdev_next_active_route);
1976
5b0d85b3
TV
1977int v4l2_subdev_set_routing_with_fmt(struct v4l2_subdev *sd,
1978 struct v4l2_subdev_state *state,
bb05820e 1979 const struct v4l2_subdev_krouting *routing,
5b0d85b3
TV
1980 const struct v4l2_mbus_framefmt *fmt)
1981{
1982 struct v4l2_subdev_stream_configs *stream_configs;
1983 unsigned int i;
1984 int ret;
1985
1986 ret = v4l2_subdev_set_routing(sd, state, routing);
1987 if (ret)
1988 return ret;
1989
1990 stream_configs = &state->stream_configs;
1991
1992 for (i = 0; i < stream_configs->num_configs; ++i)
1993 stream_configs->configs[i].fmt = *fmt;
1994
1995 return 0;
1996}
1997EXPORT_SYMBOL_GPL(v4l2_subdev_set_routing_with_fmt);
1998
d00f1a07
TV
1999int v4l2_subdev_routing_find_opposite_end(const struct v4l2_subdev_krouting *routing,
2000 u32 pad, u32 stream, u32 *other_pad,
2001 u32 *other_stream)
2002{
2003 unsigned int i;
2004
2005 for (i = 0; i < routing->num_routes; ++i) {
2006 struct v4l2_subdev_route *route = &routing->routes[i];
2007
2008 if (route->source_pad == pad &&
2009 route->source_stream == stream) {
2010 if (other_pad)
2011 *other_pad = route->sink_pad;
2012 if (other_stream)
2013 *other_stream = route->sink_stream;
2014 return 0;
2015 }
2016
2017 if (route->sink_pad == pad && route->sink_stream == stream) {
2018 if (other_pad)
2019 *other_pad = route->source_pad;
2020 if (other_stream)
2021 *other_stream = route->source_stream;
2022 return 0;
2023 }
2024 }
2025
2026 return -EINVAL;
2027}
2028EXPORT_SYMBOL_GPL(v4l2_subdev_routing_find_opposite_end);
2029
2030struct v4l2_mbus_framefmt *
2031v4l2_subdev_state_get_opposite_stream_format(struct v4l2_subdev_state *state,
2032 u32 pad, u32 stream)
2033{
2034 u32 other_pad, other_stream;
2035 int ret;
2036
2037 ret = v4l2_subdev_routing_find_opposite_end(&state->routing,
2038 pad, stream,
2039 &other_pad, &other_stream);
2040 if (ret)
2041 return NULL;
2042
d0fde6aa 2043 return v4l2_subdev_state_get_format(state, other_pad, other_stream);
d00f1a07
TV
2044}
2045EXPORT_SYMBOL_GPL(v4l2_subdev_state_get_opposite_stream_format);
2046
c4a73f31
LP
2047u64 v4l2_subdev_state_xlate_streams(const struct v4l2_subdev_state *state,
2048 u32 pad0, u32 pad1, u64 *streams)
2049{
2050 const struct v4l2_subdev_krouting *routing = &state->routing;
2051 struct v4l2_subdev_route *route;
2052 u64 streams0 = 0;
2053 u64 streams1 = 0;
2054
2055 for_each_active_route(routing, route) {
2056 if (route->sink_pad == pad0 && route->source_pad == pad1 &&
2057 (*streams & BIT_ULL(route->sink_stream))) {
2058 streams0 |= BIT_ULL(route->sink_stream);
2059 streams1 |= BIT_ULL(route->source_stream);
2060 }
2061 if (route->source_pad == pad0 && route->sink_pad == pad1 &&
2062 (*streams & BIT_ULL(route->source_stream))) {
2063 streams0 |= BIT_ULL(route->source_stream);
2064 streams1 |= BIT_ULL(route->sink_stream);
2065 }
2066 }
2067
2068 *streams = streams0;
2069 return streams1;
2070}
2071EXPORT_SYMBOL_GPL(v4l2_subdev_state_xlate_streams);
2072
69c0fe7a
LP
2073int v4l2_subdev_routing_validate(struct v4l2_subdev *sd,
2074 const struct v4l2_subdev_krouting *routing,
2075 enum v4l2_subdev_routing_restriction disallow)
2076{
2077 u32 *remote_pads = NULL;
2078 unsigned int i, j;
2079 int ret = -EINVAL;
2080
a1299df6
TV
2081 if (disallow & (V4L2_SUBDEV_ROUTING_NO_STREAM_MIX |
2082 V4L2_SUBDEV_ROUTING_NO_MULTIPLEXING)) {
69c0fe7a
LP
2083 remote_pads = kcalloc(sd->entity.num_pads, sizeof(*remote_pads),
2084 GFP_KERNEL);
2085 if (!remote_pads)
2086 return -ENOMEM;
2087
2088 for (i = 0; i < sd->entity.num_pads; ++i)
2089 remote_pads[i] = U32_MAX;
2090 }
2091
2092 for (i = 0; i < routing->num_routes; ++i) {
2093 const struct v4l2_subdev_route *route = &routing->routes[i];
2094
2095 /* Validate the sink and source pad numbers. */
2096 if (route->sink_pad >= sd->entity.num_pads ||
2097 !(sd->entity.pads[route->sink_pad].flags & MEDIA_PAD_FL_SINK)) {
2098 dev_dbg(sd->dev, "route %u sink (%u) is not a sink pad\n",
2099 i, route->sink_pad);
2100 goto out;
2101 }
2102
2103 if (route->source_pad >= sd->entity.num_pads ||
2104 !(sd->entity.pads[route->source_pad].flags & MEDIA_PAD_FL_SOURCE)) {
2105 dev_dbg(sd->dev, "route %u source (%u) is not a source pad\n",
2106 i, route->source_pad);
2107 goto out;
2108 }
2109
2110 /*
698a619a
TV
2111 * V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX: all streams from a
2112 * sink pad must be routed to a single source pad.
69c0fe7a 2113 */
698a619a 2114 if (disallow & V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX) {
69c0fe7a
LP
2115 if (remote_pads[route->sink_pad] != U32_MAX &&
2116 remote_pads[route->sink_pad] != route->source_pad) {
2117 dev_dbg(sd->dev,
2118 "route %u attempts to mix %s streams\n",
2119 i, "sink");
2120 goto out;
2121 }
698a619a
TV
2122 }
2123
2124 /*
2125 * V4L2_SUBDEV_ROUTING_NO_SOURCE_STREAM_MIX: all streams on a
2126 * source pad must originate from a single sink pad.
2127 */
2128 if (disallow & V4L2_SUBDEV_ROUTING_NO_SOURCE_STREAM_MIX) {
69c0fe7a
LP
2129 if (remote_pads[route->source_pad] != U32_MAX &&
2130 remote_pads[route->source_pad] != route->sink_pad) {
2131 dev_dbg(sd->dev,
2132 "route %u attempts to mix %s streams\n",
2133 i, "source");
2134 goto out;
2135 }
a1299df6
TV
2136 }
2137
2138 /*
2139 * V4L2_SUBDEV_ROUTING_NO_SINK_MULTIPLEXING: Pads on the sink
2140 * side can not do stream multiplexing, i.e. there can be only
2141 * a single stream in a sink pad.
2142 */
2143 if (disallow & V4L2_SUBDEV_ROUTING_NO_SINK_MULTIPLEXING) {
2144 if (remote_pads[route->sink_pad] != U32_MAX) {
2145 dev_dbg(sd->dev,
2146 "route %u attempts to multiplex on %s pad %u\n",
2147 i, "sink", route->sink_pad);
2148 goto out;
2149 }
2150 }
69c0fe7a 2151
a1299df6
TV
2152 /*
2153 * V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING: Pads on the
2154 * source side can not do stream multiplexing, i.e. there can
2155 * be only a single stream in a source pad.
2156 */
2157 if (disallow & V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING) {
2158 if (remote_pads[route->source_pad] != U32_MAX) {
2159 dev_dbg(sd->dev,
2160 "route %u attempts to multiplex on %s pad %u\n",
2161 i, "source", route->source_pad);
2162 goto out;
2163 }
2164 }
2165
2166 if (remote_pads) {
2167 remote_pads[route->sink_pad] = route->source_pad;
69c0fe7a
LP
2168 remote_pads[route->source_pad] = route->sink_pad;
2169 }
2170
2171 for (j = i + 1; j < routing->num_routes; ++j) {
2172 const struct v4l2_subdev_route *r = &routing->routes[j];
2173
2174 /*
2175 * V4L2_SUBDEV_ROUTING_NO_1_TO_N: No two routes can
2176 * originate from the same (sink) stream.
2177 */
2178 if ((disallow & V4L2_SUBDEV_ROUTING_NO_1_TO_N) &&
2179 route->sink_pad == r->sink_pad &&
2180 route->sink_stream == r->sink_stream) {
2181 dev_dbg(sd->dev,
2182 "routes %u and %u originate from same sink (%u/%u)\n",
2183 i, j, route->sink_pad,
2184 route->sink_stream);
2185 goto out;
2186 }
2187
2188 /*
2189 * V4L2_SUBDEV_ROUTING_NO_N_TO_1: No two routes can end
2190 * at the same (source) stream.
2191 */
2192 if ((disallow & V4L2_SUBDEV_ROUTING_NO_N_TO_1) &&
2193 route->source_pad == r->source_pad &&
2194 route->source_stream == r->source_stream) {
2195 dev_dbg(sd->dev,
2196 "routes %u and %u end at same source (%u/%u)\n",
2197 i, j, route->source_pad,
2198 route->source_stream);
2199 goto out;
2200 }
2201 }
2202 }
2203
2204 ret = 0;
2205
2206out:
2207 kfree(remote_pads);
2208 return ret;
2209}
2210EXPORT_SYMBOL_GPL(v4l2_subdev_routing_validate);
2211
86862307
TV
2212static void v4l2_subdev_collect_streams(struct v4l2_subdev *sd,
2213 struct v4l2_subdev_state *state,
2214 u32 pad, u64 streams_mask,
2215 u64 *found_streams,
2216 u64 *enabled_streams)
2217{
b62949dd
TV
2218 if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS)) {
2219 *found_streams = BIT_ULL(0);
2220 *enabled_streams =
2221 (sd->enabled_pads & BIT_ULL(pad)) ? BIT_ULL(0) : 0;
2222 return;
2223 }
2224
86862307
TV
2225 *found_streams = 0;
2226 *enabled_streams = 0;
2227
2228 for (unsigned int i = 0; i < state->stream_configs.num_configs; ++i) {
2229 const struct v4l2_subdev_stream_config *cfg =
2230 &state->stream_configs.configs[i];
2231
2232 if (cfg->pad != pad || !(streams_mask & BIT_ULL(cfg->stream)))
2233 continue;
2234
2235 *found_streams |= BIT_ULL(cfg->stream);
2236 if (cfg->enabled)
2237 *enabled_streams |= BIT_ULL(cfg->stream);
2238 }
2239}
2240
2241static void v4l2_subdev_set_streams_enabled(struct v4l2_subdev *sd,
2242 struct v4l2_subdev_state *state,
2243 u32 pad, u64 streams_mask,
2244 bool enabled)
2245{
b62949dd
TV
2246 if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS)) {
2247 if (enabled)
2248 sd->enabled_pads |= BIT_ULL(pad);
2249 else
2250 sd->enabled_pads &= ~BIT_ULL(pad);
2251 return;
2252 }
2253
86862307
TV
2254 for (unsigned int i = 0; i < state->stream_configs.num_configs; ++i) {
2255 struct v4l2_subdev_stream_config *cfg =
2256 &state->stream_configs.configs[i];
2257
2258 if (cfg->pad == pad && (streams_mask & BIT_ULL(cfg->stream)))
2259 cfg->enabled = enabled;
2260 }
2261}
2262
d0749adb
LP
2263int v4l2_subdev_enable_streams(struct v4l2_subdev *sd, u32 pad,
2264 u64 streams_mask)
2265{
2266 struct device *dev = sd->entity.graph_obj.mdev->dev;
2267 struct v4l2_subdev_state *state;
585d8fd5 2268 bool already_streaming;
86862307
TV
2269 u64 enabled_streams;
2270 u64 found_streams;
b62949dd 2271 bool use_s_stream;
d0749adb
LP
2272 int ret;
2273
2274 /* A few basic sanity checks first. */
2275 if (pad >= sd->entity.num_pads)
2276 return -EINVAL;
2277
b62949dd
TV
2278 if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE))
2279 return -EOPNOTSUPP;
2280
2281 /*
2282 * We use a 64-bit bitmask for tracking enabled pads, so only subdevices
2283 * with 64 pads or less can be supported.
2284 */
2285 if (pad >= sizeof(sd->enabled_pads) * BITS_PER_BYTE)
2286 return -EOPNOTSUPP;
2287
d0749adb
LP
2288 if (!streams_mask)
2289 return 0;
2290
2291 /* Fallback on .s_stream() if .enable_streams() isn't available. */
b62949dd 2292 use_s_stream = !v4l2_subdev_has_op(sd, pad, enable_streams);
d0749adb 2293
b62949dd
TV
2294 if (!use_s_stream)
2295 state = v4l2_subdev_lock_and_get_active_state(sd);
2296 else
2297 state = NULL;
d0749adb
LP
2298
2299 /*
2300 * Verify that the requested streams exist and that they are not
2301 * already enabled.
2302 */
d0749adb 2303
86862307
TV
2304 v4l2_subdev_collect_streams(sd, state, pad, streams_mask,
2305 &found_streams, &enabled_streams);
d0749adb
LP
2306
2307 if (found_streams != streams_mask) {
2308 dev_dbg(dev, "streams 0x%llx not found on %s:%u\n",
2309 streams_mask & ~found_streams, sd->entity.name, pad);
2310 ret = -EINVAL;
2311 goto done;
2312 }
2313
86862307
TV
2314 if (enabled_streams) {
2315 dev_dbg(dev, "streams 0x%llx already enabled on %s:%u\n",
2316 enabled_streams, sd->entity.name, pad);
2317 ret = -EALREADY;
2318 goto done;
2319 }
2320
35a29918
TV
2321 dev_dbg(dev, "enable streams %u:%#llx\n", pad, streams_mask);
2322
585d8fd5
TV
2323 already_streaming = v4l2_subdev_is_streaming(sd);
2324
b62949dd
TV
2325 if (!use_s_stream) {
2326 /* Call the .enable_streams() operation. */
2327 ret = v4l2_subdev_call(sd, pad, enable_streams, state, pad,
2328 streams_mask);
2329 } else {
2330 /* Start streaming when the first pad is enabled. */
2331 if (!already_streaming)
2332 ret = v4l2_subdev_call(sd, video, s_stream, 1);
2333 else
2334 ret = 0;
2335 }
2336
35a29918
TV
2337 if (ret) {
2338 dev_dbg(dev, "enable streams %u:%#llx failed: %d\n", pad,
2339 streams_mask, ret);
d0749adb 2340 goto done;
35a29918 2341 }
d0749adb
LP
2342
2343 /* Mark the streams as enabled. */
86862307 2344 v4l2_subdev_set_streams_enabled(sd, state, pad, streams_mask, true);
d0749adb 2345
b62949dd
TV
2346 /*
2347 * TODO: When all the drivers have been changed to use
2348 * v4l2_subdev_enable_streams() and v4l2_subdev_disable_streams(),
2349 * instead of calling .s_stream() operation directly, we can remove
2350 * the privacy LED handling from call_s_stream() and do it here
2351 * for all cases.
2352 */
2353 if (!use_s_stream && !already_streaming)
585d8fd5
TV
2354 v4l2_subdev_enable_privacy_led(sd);
2355
d0749adb 2356done:
b62949dd
TV
2357 if (!use_s_stream)
2358 v4l2_subdev_unlock_state(state);
d0749adb
LP
2359
2360 return ret;
2361}
2362EXPORT_SYMBOL_GPL(v4l2_subdev_enable_streams);
2363
b62949dd
TV
2364int v4l2_subdev_disable_streams(struct v4l2_subdev *sd, u32 pad,
2365 u64 streams_mask)
d0749adb
LP
2366{
2367 struct device *dev = sd->entity.graph_obj.mdev->dev;
b62949dd
TV
2368 struct v4l2_subdev_state *state;
2369 u64 enabled_streams;
2370 u64 found_streams;
2371 bool use_s_stream;
d0749adb
LP
2372 int ret;
2373
b62949dd
TV
2374 /* A few basic sanity checks first. */
2375 if (pad >= sd->entity.num_pads)
2376 return -EINVAL;
d0749adb 2377
b62949dd 2378 if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE))
61d6c8c8
TV
2379 return -EOPNOTSUPP;
2380
2381 /*
2382 * We use a 64-bit bitmask for tracking enabled pads, so only subdevices
2383 * with 64 pads or less can be supported.
2384 */
2385 if (pad >= sizeof(sd->enabled_pads) * BITS_PER_BYTE)
2386 return -EOPNOTSUPP;
d0749adb 2387
d0749adb
LP
2388 if (!streams_mask)
2389 return 0;
2390
2391 /* Fallback on .s_stream() if .disable_streams() isn't available. */
b62949dd 2392 use_s_stream = !v4l2_subdev_has_op(sd, pad, disable_streams);
d0749adb 2393
b62949dd
TV
2394 if (!use_s_stream)
2395 state = v4l2_subdev_lock_and_get_active_state(sd);
2396 else
2397 state = NULL;
d0749adb
LP
2398
2399 /*
2400 * Verify that the requested streams exist and that they are not
2401 * already disabled.
2402 */
d0749adb 2403
86862307
TV
2404 v4l2_subdev_collect_streams(sd, state, pad, streams_mask,
2405 &found_streams, &enabled_streams);
d0749adb
LP
2406
2407 if (found_streams != streams_mask) {
2408 dev_dbg(dev, "streams 0x%llx not found on %s:%u\n",
2409 streams_mask & ~found_streams, sd->entity.name, pad);
2410 ret = -EINVAL;
2411 goto done;
2412 }
2413
86862307
TV
2414 if (enabled_streams != streams_mask) {
2415 dev_dbg(dev, "streams 0x%llx already disabled on %s:%u\n",
2416 streams_mask & ~enabled_streams, sd->entity.name, pad);
2417 ret = -EALREADY;
2418 goto done;
2419 }
2420
35a29918
TV
2421 dev_dbg(dev, "disable streams %u:%#llx\n", pad, streams_mask);
2422
b62949dd
TV
2423 if (!use_s_stream) {
2424 /* Call the .disable_streams() operation. */
2425 ret = v4l2_subdev_call(sd, pad, disable_streams, state, pad,
2426 streams_mask);
2427 } else {
2428 /* Stop streaming when the last streams are disabled. */
2429
2430 if (!(sd->enabled_pads & ~BIT_ULL(pad)))
2431 ret = v4l2_subdev_call(sd, video, s_stream, 0);
2432 else
2433 ret = 0;
2434 }
2435
35a29918
TV
2436 if (ret) {
2437 dev_dbg(dev, "disable streams %u:%#llx failed: %d\n", pad,
2438 streams_mask, ret);
d0749adb 2439 goto done;
35a29918 2440 }
d0749adb 2441
86862307 2442 v4l2_subdev_set_streams_enabled(sd, state, pad, streams_mask, false);
d0749adb
LP
2443
2444done:
b62949dd
TV
2445 if (!use_s_stream) {
2446 if (!v4l2_subdev_is_streaming(sd))
2447 v4l2_subdev_disable_privacy_led(sd);
585d8fd5 2448
b62949dd
TV
2449 v4l2_subdev_unlock_state(state);
2450 }
d0749adb
LP
2451
2452 return ret;
2453}
2454EXPORT_SYMBOL_GPL(v4l2_subdev_disable_streams);
2455
34a315ce
LP
2456int v4l2_subdev_s_stream_helper(struct v4l2_subdev *sd, int enable)
2457{
2458 struct v4l2_subdev_state *state;
2459 struct v4l2_subdev_route *route;
2460 struct media_pad *pad;
2461 u64 source_mask = 0;
2462 int pad_index = -1;
2463
2464 /*
2465 * Find the source pad. This helper is meant for subdevs that have a
2466 * single source pad, so failures shouldn't happen, but catch them
2467 * loudly nonetheless as they indicate a driver bug.
2468 */
2469 media_entity_for_each_pad(&sd->entity, pad) {
2470 if (pad->flags & MEDIA_PAD_FL_SOURCE) {
2471 pad_index = pad->index;
2472 break;
2473 }
2474 }
2475
2476 if (WARN_ON(pad_index == -1))
2477 return -EINVAL;
2478
93c726f4
TV
2479 if (sd->flags & V4L2_SUBDEV_FL_STREAMS) {
2480 /*
2481 * As there's a single source pad, just collect all the source
2482 * streams.
2483 */
2484 state = v4l2_subdev_lock_and_get_active_state(sd);
34a315ce 2485
93c726f4
TV
2486 for_each_active_route(&state->routing, route)
2487 source_mask |= BIT_ULL(route->source_stream);
34a315ce 2488
93c726f4
TV
2489 v4l2_subdev_unlock_state(state);
2490 } else {
2491 /*
2492 * For non-streams subdevices, there's a single implicit stream
2493 * per pad.
2494 */
2495 source_mask = BIT_ULL(0);
2496 }
34a315ce
LP
2497
2498 if (enable)
2499 return v4l2_subdev_enable_streams(sd, pad_index, source_mask);
2500 else
2501 return v4l2_subdev_disable_streams(sd, pad_index, source_mask);
2502}
2503EXPORT_SYMBOL_GPL(v4l2_subdev_s_stream_helper);
2504
14a6fca7
TV
2505#endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
2506
8227c92b
SA
2507#endif /* CONFIG_MEDIA_CONTROLLER */
2508
3dd5ee08
LP
2509void v4l2_subdev_init(struct v4l2_subdev *sd, const struct v4l2_subdev_ops *ops)
2510{
2511 INIT_LIST_HEAD(&sd->list);
2512 BUG_ON(!ops);
2513 sd->ops = ops;
2514 sd->v4l2_dev = NULL;
2515 sd->flags = 0;
2516 sd->name[0] = '\0';
2517 sd->grp_id = 0;
2518 sd->dev_priv = NULL;
2519 sd->host_priv = NULL;
b6e10ff6 2520 sd->privacy_led = NULL;
e74f7a96 2521 INIT_LIST_HEAD(&sd->async_subdev_endpoint_list);
61f5db54
LP
2522#if defined(CONFIG_MEDIA_CONTROLLER)
2523 sd->entity.name = sd->name;
b76a2a8c 2524 sd->entity.obj_type = MEDIA_ENTITY_TYPE_V4L2_SUBDEV;
4ca72efa 2525 sd->entity.function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN;
61f5db54 2526#endif
3dd5ee08
LP
2527}
2528EXPORT_SYMBOL(v4l2_subdev_init);
8ae5640f 2529
8ae5640f
LPC
2530void v4l2_subdev_notify_event(struct v4l2_subdev *sd,
2531 const struct v4l2_event *ev)
2532{
2533 v4l2_event_queue(sd->devnode, ev);
2534 v4l2_subdev_notify(sd, V4L2_DEVICE_NOTIFY_EVENT, (void *)ev);
2535}
2536EXPORT_SYMBOL_GPL(v4l2_subdev_notify_event);
b6e10ff6 2537
5f3ce14f
TV
2538bool v4l2_subdev_is_streaming(struct v4l2_subdev *sd)
2539{
2540 struct v4l2_subdev_state *state;
2541
2542 if (!v4l2_subdev_has_op(sd, pad, enable_streams))
2543 return sd->s_stream_enabled;
2544
2545 if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS))
2546 return !!sd->enabled_pads;
2547
2548 state = v4l2_subdev_get_locked_active_state(sd);
2549
2550 for (unsigned int i = 0; i < state->stream_configs.num_configs; ++i) {
2551 const struct v4l2_subdev_stream_config *cfg;
2552
2553 cfg = &state->stream_configs.configs[i];
2554
2555 if (cfg->enabled)
2556 return true;
2557 }
2558
2559 return false;
2560}
2561EXPORT_SYMBOL_GPL(v4l2_subdev_is_streaming);
2562
b6e10ff6
HG
2563int v4l2_subdev_get_privacy_led(struct v4l2_subdev *sd)
2564{
2565#if IS_REACHABLE(CONFIG_LEDS_CLASS)
2566 sd->privacy_led = led_get(sd->dev, "privacy-led");
2567 if (IS_ERR(sd->privacy_led) && PTR_ERR(sd->privacy_led) != -ENOENT)
2568 return dev_err_probe(sd->dev, PTR_ERR(sd->privacy_led),
2569 "getting privacy LED\n");
2570
2571 if (!IS_ERR_OR_NULL(sd->privacy_led)) {
2572 mutex_lock(&sd->privacy_led->led_access);
2573 led_sysfs_disable(sd->privacy_led);
2574 led_trigger_remove(sd->privacy_led);
2575 led_set_brightness(sd->privacy_led, 0);
2576 mutex_unlock(&sd->privacy_led->led_access);
2577 }
2578#endif
2579 return 0;
2580}
2581EXPORT_SYMBOL_GPL(v4l2_subdev_get_privacy_led);
2582
2583void v4l2_subdev_put_privacy_led(struct v4l2_subdev *sd)
2584{
2585#if IS_REACHABLE(CONFIG_LEDS_CLASS)
2586 if (!IS_ERR_OR_NULL(sd->privacy_led)) {
2587 mutex_lock(&sd->privacy_led->led_access);
2588 led_sysfs_enable(sd->privacy_led);
2589 mutex_unlock(&sd->privacy_led->led_access);
2590 led_put(sd->privacy_led);
2591 }
2592#endif
2593}
2594EXPORT_SYMBOL_GPL(v4l2_subdev_put_privacy_led);