1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2010 Nokia Corporation
7 * Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
8 * Sakari Ailus <sakari.ailus@iki.fi>
11 #include <linux/export.h>
12 #include <linux/ioctl.h>
13 #include <linux/leds.h>
15 #include <linux/module.h>
16 #include <linux/overflow.h>
17 #include <linux/slab.h>
18 #include <linux/types.h>
19 #include <linux/version.h>
20 #include <linux/videodev2.h>
22 #include <media/v4l2-ctrls.h>
23 #include <media/v4l2-device.h>
24 #include <media/v4l2-event.h>
25 #include <media/v4l2-fh.h>
26 #include <media/v4l2-ioctl.h>
28 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
30 * The Streams API is an experimental feature. To use the Streams API, set
31 * 'v4l2_subdev_enable_streams_api' to 1 below.
34 static bool v4l2_subdev_enable_streams_api;
38 * Maximum stream ID is 63 for now, as we use u64 bitmask to represent a set
41 * Note that V4L2_FRAME_DESC_ENTRY_MAX is related: V4L2_FRAME_DESC_ENTRY_MAX
42 * restricts the total number of streams in a pad, although the stream ID is
45 #define V4L2_SUBDEV_MAX_STREAM_ID 63
47 #include "v4l2-subdev-priv.h"
49 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
50 static int subdev_fh_init(struct v4l2_subdev_fh *fh, struct v4l2_subdev *sd)
52 struct v4l2_subdev_state *state;
53 static struct lock_class_key key;
55 state = __v4l2_subdev_state_alloc(sd, "fh->state->lock", &key);
57 return PTR_ERR(state);
64 static void subdev_fh_free(struct v4l2_subdev_fh *fh)
66 __v4l2_subdev_state_free(fh->state);
70 static int subdev_open(struct file *file)
72 struct video_device *vdev = video_devdata(file);
73 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
74 struct v4l2_subdev_fh *subdev_fh;
77 subdev_fh = kzalloc(sizeof(*subdev_fh), GFP_KERNEL);
78 if (subdev_fh == NULL)
81 ret = subdev_fh_init(subdev_fh, sd);
87 v4l2_fh_init(&subdev_fh->vfh, vdev);
88 v4l2_fh_add(&subdev_fh->vfh);
89 file->private_data = &subdev_fh->vfh;
91 if (sd->v4l2_dev->mdev && sd->entity.graph_obj.mdev->dev) {
94 owner = sd->entity.graph_obj.mdev->dev->driver->owner;
95 if (!try_module_get(owner)) {
99 subdev_fh->owner = owner;
102 if (sd->internal_ops && sd->internal_ops->open) {
103 ret = sd->internal_ops->open(sd, subdev_fh);
111 module_put(subdev_fh->owner);
112 v4l2_fh_del(&subdev_fh->vfh);
113 v4l2_fh_exit(&subdev_fh->vfh);
114 subdev_fh_free(subdev_fh);
120 static int subdev_close(struct file *file)
122 struct video_device *vdev = video_devdata(file);
123 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
124 struct v4l2_fh *vfh = file->private_data;
125 struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
127 if (sd->internal_ops && sd->internal_ops->close)
128 sd->internal_ops->close(sd, subdev_fh);
129 module_put(subdev_fh->owner);
132 subdev_fh_free(subdev_fh);
134 file->private_data = NULL;
138 #else /* CONFIG_VIDEO_V4L2_SUBDEV_API */
139 static int subdev_open(struct file *file)
144 static int subdev_close(struct file *file)
148 #endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
150 static inline int check_which(u32 which)
152 if (which != V4L2_SUBDEV_FORMAT_TRY &&
153 which != V4L2_SUBDEV_FORMAT_ACTIVE)
159 static inline int check_pad(struct v4l2_subdev *sd, u32 pad)
161 #if defined(CONFIG_MEDIA_CONTROLLER)
162 if (sd->entity.num_pads) {
163 if (pad >= sd->entity.num_pads)
168 /* allow pad 0 on subdevices not registered as media entities */
174 static int check_state(struct v4l2_subdev *sd, struct v4l2_subdev_state *state,
175 u32 which, u32 pad, u32 stream)
177 if (sd->flags & V4L2_SUBDEV_FL_STREAMS) {
178 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
179 if (!v4l2_subdev_state_get_stream_format(state, pad, stream))
190 if (which == V4L2_SUBDEV_FORMAT_TRY && (!state || !state->pads))
196 static inline int check_format(struct v4l2_subdev *sd,
197 struct v4l2_subdev_state *state,
198 struct v4l2_subdev_format *format)
203 if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS))
206 return check_which(format->which) ? : check_pad(sd, format->pad) ? :
207 check_state(sd, state, format->which, format->pad, format->stream);
210 static int call_get_fmt(struct v4l2_subdev *sd,
211 struct v4l2_subdev_state *state,
212 struct v4l2_subdev_format *format)
214 return check_format(sd, state, format) ? :
215 sd->ops->pad->get_fmt(sd, state, format);
218 static int call_set_fmt(struct v4l2_subdev *sd,
219 struct v4l2_subdev_state *state,
220 struct v4l2_subdev_format *format)
222 return check_format(sd, state, format) ? :
223 sd->ops->pad->set_fmt(sd, state, format);
226 static int call_enum_mbus_code(struct v4l2_subdev *sd,
227 struct v4l2_subdev_state *state,
228 struct v4l2_subdev_mbus_code_enum *code)
233 if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS))
236 return check_which(code->which) ? : check_pad(sd, code->pad) ? :
237 check_state(sd, state, code->which, code->pad, code->stream) ? :
238 sd->ops->pad->enum_mbus_code(sd, state, code);
241 static int call_enum_frame_size(struct v4l2_subdev *sd,
242 struct v4l2_subdev_state *state,
243 struct v4l2_subdev_frame_size_enum *fse)
248 if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS))
251 return check_which(fse->which) ? : check_pad(sd, fse->pad) ? :
252 check_state(sd, state, fse->which, fse->pad, fse->stream) ? :
253 sd->ops->pad->enum_frame_size(sd, state, fse);
256 static inline int check_frame_interval(struct v4l2_subdev *sd,
257 struct v4l2_subdev_frame_interval *fi)
262 return check_pad(sd, fi->pad);
265 static int call_g_frame_interval(struct v4l2_subdev *sd,
266 struct v4l2_subdev_frame_interval *fi)
268 return check_frame_interval(sd, fi) ? :
269 sd->ops->video->g_frame_interval(sd, fi);
272 static int call_s_frame_interval(struct v4l2_subdev *sd,
273 struct v4l2_subdev_frame_interval *fi)
275 return check_frame_interval(sd, fi) ? :
276 sd->ops->video->s_frame_interval(sd, fi);
279 static int call_enum_frame_interval(struct v4l2_subdev *sd,
280 struct v4l2_subdev_state *state,
281 struct v4l2_subdev_frame_interval_enum *fie)
286 if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS))
289 return check_which(fie->which) ? : check_pad(sd, fie->pad) ? :
290 check_state(sd, state, fie->which, fie->pad, fie->stream) ? :
291 sd->ops->pad->enum_frame_interval(sd, state, fie);
294 static inline int check_selection(struct v4l2_subdev *sd,
295 struct v4l2_subdev_state *state,
296 struct v4l2_subdev_selection *sel)
301 if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS))
304 return check_which(sel->which) ? : check_pad(sd, sel->pad) ? :
305 check_state(sd, state, sel->which, sel->pad, sel->stream);
308 static int call_get_selection(struct v4l2_subdev *sd,
309 struct v4l2_subdev_state *state,
310 struct v4l2_subdev_selection *sel)
312 return check_selection(sd, state, sel) ? :
313 sd->ops->pad->get_selection(sd, state, sel);
316 static int call_set_selection(struct v4l2_subdev *sd,
317 struct v4l2_subdev_state *state,
318 struct v4l2_subdev_selection *sel)
320 return check_selection(sd, state, sel) ? :
321 sd->ops->pad->set_selection(sd, state, sel);
324 static inline int check_edid(struct v4l2_subdev *sd,
325 struct v4l2_subdev_edid *edid)
330 if (edid->blocks && edid->edid == NULL)
333 return check_pad(sd, edid->pad);
336 static int call_get_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid)
338 return check_edid(sd, edid) ? : sd->ops->pad->get_edid(sd, edid);
341 static int call_set_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid)
343 return check_edid(sd, edid) ? : sd->ops->pad->set_edid(sd, edid);
346 static int call_dv_timings_cap(struct v4l2_subdev *sd,
347 struct v4l2_dv_timings_cap *cap)
352 return check_pad(sd, cap->pad) ? :
353 sd->ops->pad->dv_timings_cap(sd, cap);
356 static int call_enum_dv_timings(struct v4l2_subdev *sd,
357 struct v4l2_enum_dv_timings *dvt)
362 return check_pad(sd, dvt->pad) ? :
363 sd->ops->pad->enum_dv_timings(sd, dvt);
366 static int call_get_mbus_config(struct v4l2_subdev *sd, unsigned int pad,
367 struct v4l2_mbus_config *config)
369 return check_pad(sd, pad) ? :
370 sd->ops->pad->get_mbus_config(sd, pad, config);
373 static int call_s_stream(struct v4l2_subdev *sd, int enable)
377 #if IS_REACHABLE(CONFIG_LEDS_CLASS)
378 if (!IS_ERR_OR_NULL(sd->privacy_led)) {
380 led_set_brightness(sd->privacy_led,
381 sd->privacy_led->max_brightness);
383 led_set_brightness(sd->privacy_led, 0);
386 ret = sd->ops->video->s_stream(sd, enable);
388 if (!enable && ret < 0) {
389 dev_warn(sd->dev, "disabling streaming failed (%d)\n", ret);
396 #ifdef CONFIG_MEDIA_CONTROLLER
398 * Create state-management wrapper for pad ops dealing with subdev state. The
399 * wrapper handles the case where the caller does not provide the called
400 * subdev's state. This should be removed when all the callers are fixed.
402 #define DEFINE_STATE_WRAPPER(f, arg_type) \
403 static int call_##f##_state(struct v4l2_subdev *sd, \
404 struct v4l2_subdev_state *_state, \
407 struct v4l2_subdev_state *state = _state; \
410 state = v4l2_subdev_lock_and_get_active_state(sd); \
411 ret = call_##f(sd, state, arg); \
412 if (!_state && state) \
413 v4l2_subdev_unlock_state(state); \
417 #else /* CONFIG_MEDIA_CONTROLLER */
419 #define DEFINE_STATE_WRAPPER(f, arg_type) \
420 static int call_##f##_state(struct v4l2_subdev *sd, \
421 struct v4l2_subdev_state *state, \
424 return call_##f(sd, state, arg); \
427 #endif /* CONFIG_MEDIA_CONTROLLER */
429 DEFINE_STATE_WRAPPER(get_fmt, struct v4l2_subdev_format);
430 DEFINE_STATE_WRAPPER(set_fmt, struct v4l2_subdev_format);
431 DEFINE_STATE_WRAPPER(enum_mbus_code, struct v4l2_subdev_mbus_code_enum);
432 DEFINE_STATE_WRAPPER(enum_frame_size, struct v4l2_subdev_frame_size_enum);
433 DEFINE_STATE_WRAPPER(enum_frame_interval, struct v4l2_subdev_frame_interval_enum);
434 DEFINE_STATE_WRAPPER(get_selection, struct v4l2_subdev_selection);
435 DEFINE_STATE_WRAPPER(set_selection, struct v4l2_subdev_selection);
437 static const struct v4l2_subdev_pad_ops v4l2_subdev_call_pad_wrappers = {
438 .get_fmt = call_get_fmt_state,
439 .set_fmt = call_set_fmt_state,
440 .enum_mbus_code = call_enum_mbus_code_state,
441 .enum_frame_size = call_enum_frame_size_state,
442 .enum_frame_interval = call_enum_frame_interval_state,
443 .get_selection = call_get_selection_state,
444 .set_selection = call_set_selection_state,
445 .get_edid = call_get_edid,
446 .set_edid = call_set_edid,
447 .dv_timings_cap = call_dv_timings_cap,
448 .enum_dv_timings = call_enum_dv_timings,
449 .get_mbus_config = call_get_mbus_config,
452 static const struct v4l2_subdev_video_ops v4l2_subdev_call_video_wrappers = {
453 .g_frame_interval = call_g_frame_interval,
454 .s_frame_interval = call_s_frame_interval,
455 .s_stream = call_s_stream,
458 const struct v4l2_subdev_ops v4l2_subdev_call_wrappers = {
459 .pad = &v4l2_subdev_call_pad_wrappers,
460 .video = &v4l2_subdev_call_video_wrappers,
462 EXPORT_SYMBOL(v4l2_subdev_call_wrappers);
464 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
466 static struct v4l2_subdev_state *
467 subdev_ioctl_get_state(struct v4l2_subdev *sd, struct v4l2_subdev_fh *subdev_fh,
468 unsigned int cmd, void *arg)
475 case VIDIOC_SUBDEV_G_FMT:
476 case VIDIOC_SUBDEV_S_FMT:
477 which = ((struct v4l2_subdev_format *)arg)->which;
479 case VIDIOC_SUBDEV_G_CROP:
480 case VIDIOC_SUBDEV_S_CROP:
481 which = ((struct v4l2_subdev_crop *)arg)->which;
483 case VIDIOC_SUBDEV_ENUM_MBUS_CODE:
484 which = ((struct v4l2_subdev_mbus_code_enum *)arg)->which;
486 case VIDIOC_SUBDEV_ENUM_FRAME_SIZE:
487 which = ((struct v4l2_subdev_frame_size_enum *)arg)->which;
489 case VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL:
490 which = ((struct v4l2_subdev_frame_interval_enum *)arg)->which;
492 case VIDIOC_SUBDEV_G_SELECTION:
493 case VIDIOC_SUBDEV_S_SELECTION:
494 which = ((struct v4l2_subdev_selection *)arg)->which;
496 case VIDIOC_SUBDEV_G_ROUTING:
497 case VIDIOC_SUBDEV_S_ROUTING:
498 which = ((struct v4l2_subdev_routing *)arg)->which;
502 return which == V4L2_SUBDEV_FORMAT_TRY ?
504 v4l2_subdev_get_unlocked_active_state(sd);
507 static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg,
508 struct v4l2_subdev_state *state)
510 struct video_device *vdev = video_devdata(file);
511 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
512 struct v4l2_fh *vfh = file->private_data;
513 bool ro_subdev = test_bit(V4L2_FL_SUBDEV_RO_DEVNODE, &vdev->flags);
514 bool streams_subdev = sd->flags & V4L2_SUBDEV_FL_STREAMS;
518 case VIDIOC_SUBDEV_QUERYCAP: {
519 struct v4l2_subdev_capability *cap = arg;
521 memset(cap->reserved, 0, sizeof(cap->reserved));
522 cap->version = LINUX_VERSION_CODE;
524 (ro_subdev ? V4L2_SUBDEV_CAP_RO_SUBDEV : 0) |
525 (streams_subdev ? V4L2_SUBDEV_CAP_STREAMS : 0);
530 case VIDIOC_QUERYCTRL:
532 * TODO: this really should be folded into v4l2_queryctrl (this
533 * currently returns -EINVAL for NULL control handlers).
534 * However, v4l2_queryctrl() is still called directly by
535 * drivers as well and until that has been addressed I believe
536 * it is safer to do the check here. The same is true for the
537 * other control ioctls below.
539 if (!vfh->ctrl_handler)
541 return v4l2_queryctrl(vfh->ctrl_handler, arg);
543 case VIDIOC_QUERY_EXT_CTRL:
544 if (!vfh->ctrl_handler)
546 return v4l2_query_ext_ctrl(vfh->ctrl_handler, arg);
548 case VIDIOC_QUERYMENU:
549 if (!vfh->ctrl_handler)
551 return v4l2_querymenu(vfh->ctrl_handler, arg);
554 if (!vfh->ctrl_handler)
556 return v4l2_g_ctrl(vfh->ctrl_handler, arg);
559 if (!vfh->ctrl_handler)
561 return v4l2_s_ctrl(vfh, vfh->ctrl_handler, arg);
563 case VIDIOC_G_EXT_CTRLS:
564 if (!vfh->ctrl_handler)
566 return v4l2_g_ext_ctrls(vfh->ctrl_handler,
567 vdev, sd->v4l2_dev->mdev, arg);
569 case VIDIOC_S_EXT_CTRLS:
570 if (!vfh->ctrl_handler)
572 return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler,
573 vdev, sd->v4l2_dev->mdev, arg);
575 case VIDIOC_TRY_EXT_CTRLS:
576 if (!vfh->ctrl_handler)
578 return v4l2_try_ext_ctrls(vfh->ctrl_handler,
579 vdev, sd->v4l2_dev->mdev, arg);
582 if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
585 return v4l2_event_dequeue(vfh, arg, file->f_flags & O_NONBLOCK);
587 case VIDIOC_SUBSCRIBE_EVENT:
588 return v4l2_subdev_call(sd, core, subscribe_event, vfh, arg);
590 case VIDIOC_UNSUBSCRIBE_EVENT:
591 return v4l2_subdev_call(sd, core, unsubscribe_event, vfh, arg);
593 #ifdef CONFIG_VIDEO_ADV_DEBUG
594 case VIDIOC_DBG_G_REGISTER:
596 struct v4l2_dbg_register *p = arg;
598 if (!capable(CAP_SYS_ADMIN))
600 return v4l2_subdev_call(sd, core, g_register, p);
602 case VIDIOC_DBG_S_REGISTER:
604 struct v4l2_dbg_register *p = arg;
606 if (!capable(CAP_SYS_ADMIN))
608 return v4l2_subdev_call(sd, core, s_register, p);
610 case VIDIOC_DBG_G_CHIP_INFO:
612 struct v4l2_dbg_chip_info *p = arg;
614 if (p->match.type != V4L2_CHIP_MATCH_SUBDEV || p->match.addr)
616 if (sd->ops->core && sd->ops->core->s_register)
617 p->flags |= V4L2_CHIP_FL_WRITABLE;
618 if (sd->ops->core && sd->ops->core->g_register)
619 p->flags |= V4L2_CHIP_FL_READABLE;
620 strscpy(p->name, sd->name, sizeof(p->name));
625 case VIDIOC_LOG_STATUS: {
628 pr_info("%s: ================= START STATUS =================\n",
630 ret = v4l2_subdev_call(sd, core, log_status);
631 pr_info("%s: ================== END STATUS ==================\n",
636 case VIDIOC_SUBDEV_G_FMT: {
637 struct v4l2_subdev_format *format = arg;
639 memset(format->reserved, 0, sizeof(format->reserved));
640 memset(format->format.reserved, 0, sizeof(format->format.reserved));
641 return v4l2_subdev_call(sd, pad, get_fmt, state, format);
644 case VIDIOC_SUBDEV_S_FMT: {
645 struct v4l2_subdev_format *format = arg;
647 if (format->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
650 memset(format->reserved, 0, sizeof(format->reserved));
651 memset(format->format.reserved, 0, sizeof(format->format.reserved));
652 return v4l2_subdev_call(sd, pad, set_fmt, state, format);
655 case VIDIOC_SUBDEV_G_CROP: {
656 struct v4l2_subdev_crop *crop = arg;
657 struct v4l2_subdev_selection sel;
659 memset(crop->reserved, 0, sizeof(crop->reserved));
660 memset(&sel, 0, sizeof(sel));
661 sel.which = crop->which;
663 sel.target = V4L2_SEL_TGT_CROP;
665 rval = v4l2_subdev_call(
666 sd, pad, get_selection, state, &sel);
673 case VIDIOC_SUBDEV_S_CROP: {
674 struct v4l2_subdev_crop *crop = arg;
675 struct v4l2_subdev_selection sel;
677 if (crop->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
680 memset(crop->reserved, 0, sizeof(crop->reserved));
681 memset(&sel, 0, sizeof(sel));
682 sel.which = crop->which;
684 sel.target = V4L2_SEL_TGT_CROP;
687 rval = v4l2_subdev_call(
688 sd, pad, set_selection, state, &sel);
695 case VIDIOC_SUBDEV_ENUM_MBUS_CODE: {
696 struct v4l2_subdev_mbus_code_enum *code = arg;
698 memset(code->reserved, 0, sizeof(code->reserved));
699 return v4l2_subdev_call(sd, pad, enum_mbus_code, state,
703 case VIDIOC_SUBDEV_ENUM_FRAME_SIZE: {
704 struct v4l2_subdev_frame_size_enum *fse = arg;
706 memset(fse->reserved, 0, sizeof(fse->reserved));
707 return v4l2_subdev_call(sd, pad, enum_frame_size, state,
711 case VIDIOC_SUBDEV_G_FRAME_INTERVAL: {
712 struct v4l2_subdev_frame_interval *fi = arg;
714 memset(fi->reserved, 0, sizeof(fi->reserved));
715 return v4l2_subdev_call(sd, video, g_frame_interval, arg);
718 case VIDIOC_SUBDEV_S_FRAME_INTERVAL: {
719 struct v4l2_subdev_frame_interval *fi = arg;
724 memset(fi->reserved, 0, sizeof(fi->reserved));
725 return v4l2_subdev_call(sd, video, s_frame_interval, arg);
728 case VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL: {
729 struct v4l2_subdev_frame_interval_enum *fie = arg;
731 memset(fie->reserved, 0, sizeof(fie->reserved));
732 return v4l2_subdev_call(sd, pad, enum_frame_interval, state,
736 case VIDIOC_SUBDEV_G_SELECTION: {
737 struct v4l2_subdev_selection *sel = arg;
739 memset(sel->reserved, 0, sizeof(sel->reserved));
740 return v4l2_subdev_call(
741 sd, pad, get_selection, state, sel);
744 case VIDIOC_SUBDEV_S_SELECTION: {
745 struct v4l2_subdev_selection *sel = arg;
747 if (sel->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
750 memset(sel->reserved, 0, sizeof(sel->reserved));
751 return v4l2_subdev_call(
752 sd, pad, set_selection, state, sel);
755 case VIDIOC_G_EDID: {
756 struct v4l2_subdev_edid *edid = arg;
758 return v4l2_subdev_call(sd, pad, get_edid, edid);
761 case VIDIOC_S_EDID: {
762 struct v4l2_subdev_edid *edid = arg;
764 return v4l2_subdev_call(sd, pad, set_edid, edid);
767 case VIDIOC_SUBDEV_DV_TIMINGS_CAP: {
768 struct v4l2_dv_timings_cap *cap = arg;
770 return v4l2_subdev_call(sd, pad, dv_timings_cap, cap);
773 case VIDIOC_SUBDEV_ENUM_DV_TIMINGS: {
774 struct v4l2_enum_dv_timings *dvt = arg;
776 return v4l2_subdev_call(sd, pad, enum_dv_timings, dvt);
779 case VIDIOC_SUBDEV_QUERY_DV_TIMINGS:
780 return v4l2_subdev_call(sd, video, query_dv_timings, arg);
782 case VIDIOC_SUBDEV_G_DV_TIMINGS:
783 return v4l2_subdev_call(sd, video, g_dv_timings, arg);
785 case VIDIOC_SUBDEV_S_DV_TIMINGS:
789 return v4l2_subdev_call(sd, video, s_dv_timings, arg);
791 case VIDIOC_SUBDEV_G_STD:
792 return v4l2_subdev_call(sd, video, g_std, arg);
794 case VIDIOC_SUBDEV_S_STD: {
795 v4l2_std_id *std = arg;
800 return v4l2_subdev_call(sd, video, s_std, *std);
803 case VIDIOC_SUBDEV_ENUMSTD: {
804 struct v4l2_standard *p = arg;
807 if (v4l2_subdev_call(sd, video, g_tvnorms, &id))
810 return v4l_video_std_enumstd(p, id);
813 case VIDIOC_SUBDEV_QUERYSTD:
814 return v4l2_subdev_call(sd, video, querystd, arg);
816 case VIDIOC_SUBDEV_G_ROUTING: {
817 struct v4l2_subdev_routing *routing = arg;
818 struct v4l2_subdev_krouting *krouting;
820 if (!v4l2_subdev_enable_streams_api)
823 if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS))
826 memset(routing->reserved, 0, sizeof(routing->reserved));
828 krouting = &state->routing;
830 if (routing->num_routes < krouting->num_routes) {
831 routing->num_routes = krouting->num_routes;
835 memcpy((struct v4l2_subdev_route *)(uintptr_t)routing->routes,
837 krouting->num_routes * sizeof(*krouting->routes));
838 routing->num_routes = krouting->num_routes;
843 case VIDIOC_SUBDEV_S_ROUTING: {
844 struct v4l2_subdev_routing *routing = arg;
845 struct v4l2_subdev_route *routes =
846 (struct v4l2_subdev_route *)(uintptr_t)routing->routes;
847 struct v4l2_subdev_krouting krouting = {};
850 if (!v4l2_subdev_enable_streams_api)
853 if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS))
856 if (routing->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
859 memset(routing->reserved, 0, sizeof(routing->reserved));
861 for (i = 0; i < routing->num_routes; ++i) {
862 const struct v4l2_subdev_route *route = &routes[i];
863 const struct media_pad *pads = sd->entity.pads;
865 if (route->sink_stream > V4L2_SUBDEV_MAX_STREAM_ID ||
866 route->source_stream > V4L2_SUBDEV_MAX_STREAM_ID)
869 if (route->sink_pad >= sd->entity.num_pads)
872 if (!(pads[route->sink_pad].flags &
876 if (route->source_pad >= sd->entity.num_pads)
879 if (!(pads[route->source_pad].flags &
880 MEDIA_PAD_FL_SOURCE))
884 krouting.num_routes = routing->num_routes;
885 krouting.routes = routes;
887 return v4l2_subdev_call(sd, pad, set_routing, state,
888 routing->which, &krouting);
892 return v4l2_subdev_call(sd, core, ioctl, cmd, arg);
898 static long subdev_do_ioctl_lock(struct file *file, unsigned int cmd, void *arg)
900 struct video_device *vdev = video_devdata(file);
901 struct mutex *lock = vdev->lock;
904 if (lock && mutex_lock_interruptible(lock))
907 if (video_is_registered(vdev)) {
908 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
909 struct v4l2_fh *vfh = file->private_data;
910 struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
911 struct v4l2_subdev_state *state;
913 state = subdev_ioctl_get_state(sd, subdev_fh, cmd, arg);
916 v4l2_subdev_lock_state(state);
918 ret = subdev_do_ioctl(file, cmd, arg, state);
921 v4l2_subdev_unlock_state(state);
929 static long subdev_ioctl(struct file *file, unsigned int cmd,
932 return video_usercopy(file, cmd, arg, subdev_do_ioctl_lock);
936 static long subdev_compat_ioctl32(struct file *file, unsigned int cmd,
939 struct video_device *vdev = video_devdata(file);
940 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
942 return v4l2_subdev_call(sd, core, compat_ioctl32, cmd, arg);
946 #else /* CONFIG_VIDEO_V4L2_SUBDEV_API */
947 static long subdev_ioctl(struct file *file, unsigned int cmd,
954 static long subdev_compat_ioctl32(struct file *file, unsigned int cmd,
960 #endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
962 static __poll_t subdev_poll(struct file *file, poll_table *wait)
964 struct video_device *vdev = video_devdata(file);
965 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
966 struct v4l2_fh *fh = file->private_data;
968 if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
971 poll_wait(file, &fh->wait, wait);
973 if (v4l2_event_pending(fh))
979 const struct v4l2_file_operations v4l2_subdev_fops = {
980 .owner = THIS_MODULE,
982 .unlocked_ioctl = subdev_ioctl,
984 .compat_ioctl32 = subdev_compat_ioctl32,
986 .release = subdev_close,
990 #ifdef CONFIG_MEDIA_CONTROLLER
992 int v4l2_subdev_get_fwnode_pad_1_to_1(struct media_entity *entity,
993 struct fwnode_endpoint *endpoint)
995 struct fwnode_handle *fwnode;
996 struct v4l2_subdev *sd;
998 if (!is_media_entity_v4l2_subdev(entity))
1001 sd = media_entity_to_v4l2_subdev(entity);
1003 fwnode = fwnode_graph_get_port_parent(endpoint->local_fwnode);
1004 fwnode_handle_put(fwnode);
1006 if (device_match_fwnode(sd->dev, fwnode))
1007 return endpoint->port;
1011 EXPORT_SYMBOL_GPL(v4l2_subdev_get_fwnode_pad_1_to_1);
1013 int v4l2_subdev_link_validate_default(struct v4l2_subdev *sd,
1014 struct media_link *link,
1015 struct v4l2_subdev_format *source_fmt,
1016 struct v4l2_subdev_format *sink_fmt)
1020 /* The width, height and code must match. */
1021 if (source_fmt->format.width != sink_fmt->format.width) {
1022 dev_dbg(sd->entity.graph_obj.mdev->dev,
1023 "%s: width does not match (source %u, sink %u)\n",
1025 source_fmt->format.width, sink_fmt->format.width);
1029 if (source_fmt->format.height != sink_fmt->format.height) {
1030 dev_dbg(sd->entity.graph_obj.mdev->dev,
1031 "%s: height does not match (source %u, sink %u)\n",
1033 source_fmt->format.height, sink_fmt->format.height);
1037 if (source_fmt->format.code != sink_fmt->format.code) {
1038 dev_dbg(sd->entity.graph_obj.mdev->dev,
1039 "%s: media bus code does not match (source 0x%8.8x, sink 0x%8.8x)\n",
1041 source_fmt->format.code, sink_fmt->format.code);
1045 /* The field order must match, or the sink field order must be NONE
1046 * to support interlaced hardware connected to bridges that support
1047 * progressive formats only.
1049 if (source_fmt->format.field != sink_fmt->format.field &&
1050 sink_fmt->format.field != V4L2_FIELD_NONE) {
1051 dev_dbg(sd->entity.graph_obj.mdev->dev,
1052 "%s: field does not match (source %u, sink %u)\n",
1054 source_fmt->format.field, sink_fmt->format.field);
1061 dev_dbg(sd->entity.graph_obj.mdev->dev,
1062 "%s: link was \"%s\":%u -> \"%s\":%u\n", __func__,
1063 link->source->entity->name, link->source->index,
1064 link->sink->entity->name, link->sink->index);
1068 EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate_default);
1071 v4l2_subdev_link_validate_get_format(struct media_pad *pad, u32 stream,
1072 struct v4l2_subdev_format *fmt)
1074 if (is_media_entity_v4l2_subdev(pad->entity)) {
1075 struct v4l2_subdev *sd =
1076 media_entity_to_v4l2_subdev(pad->entity);
1078 fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
1079 fmt->pad = pad->index;
1080 fmt->stream = stream;
1082 return v4l2_subdev_call(sd, pad, get_fmt,
1083 v4l2_subdev_get_locked_active_state(sd),
1087 WARN(pad->entity->function != MEDIA_ENT_F_IO_V4L,
1088 "Driver bug! Wrong media entity type 0x%08x, entity %s\n",
1089 pad->entity->function, pad->entity->name);
1094 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
1096 static void __v4l2_link_validate_get_streams(struct media_pad *pad,
1099 struct v4l2_subdev_route *route;
1100 struct v4l2_subdev_state *state;
1101 struct v4l2_subdev *subdev;
1103 subdev = media_entity_to_v4l2_subdev(pad->entity);
1107 state = v4l2_subdev_get_locked_active_state(subdev);
1108 if (WARN_ON(!state))
1111 for_each_active_route(&state->routing, route) {
1115 if (pad->flags & MEDIA_PAD_FL_SOURCE) {
1116 route_pad = route->source_pad;
1117 route_stream = route->source_stream;
1119 route_pad = route->sink_pad;
1120 route_stream = route->sink_stream;
1123 if (route_pad != pad->index)
1126 *streams_mask |= BIT_ULL(route_stream);
1130 #endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
1132 static void v4l2_link_validate_get_streams(struct media_pad *pad,
1135 struct v4l2_subdev *subdev = media_entity_to_v4l2_subdev(pad->entity);
1137 if (!(subdev->flags & V4L2_SUBDEV_FL_STREAMS)) {
1138 /* Non-streams subdevs have an implicit stream 0 */
1139 *streams_mask = BIT_ULL(0);
1143 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
1144 __v4l2_link_validate_get_streams(pad, streams_mask);
1146 /* This shouldn't happen */
1151 static int v4l2_subdev_link_validate_locked(struct media_link *link)
1153 struct v4l2_subdev *sink_subdev =
1154 media_entity_to_v4l2_subdev(link->sink->entity);
1155 struct device *dev = sink_subdev->entity.graph_obj.mdev->dev;
1156 u64 source_streams_mask;
1157 u64 sink_streams_mask;
1158 u64 dangling_sink_streams;
1162 dev_dbg(dev, "validating link \"%s\":%u -> \"%s\":%u\n",
1163 link->source->entity->name, link->source->index,
1164 link->sink->entity->name, link->sink->index);
1166 v4l2_link_validate_get_streams(link->source, &source_streams_mask);
1167 v4l2_link_validate_get_streams(link->sink, &sink_streams_mask);
1170 * It is ok to have more source streams than sink streams as extra
1171 * source streams can just be ignored by the receiver, but having extra
1172 * sink streams is an error as streams must have a source.
1174 dangling_sink_streams = (source_streams_mask ^ sink_streams_mask) &
1176 if (dangling_sink_streams) {
1177 dev_err(dev, "Dangling sink streams: mask %#llx\n",
1178 dangling_sink_streams);
1182 /* Validate source and sink stream formats */
1184 for (stream = 0; stream < sizeof(sink_streams_mask) * 8; ++stream) {
1185 struct v4l2_subdev_format sink_fmt, source_fmt;
1187 if (!(sink_streams_mask & BIT_ULL(stream)))
1190 dev_dbg(dev, "validating stream \"%s\":%u:%u -> \"%s\":%u:%u\n",
1191 link->source->entity->name, link->source->index, stream,
1192 link->sink->entity->name, link->sink->index, stream);
1194 ret = v4l2_subdev_link_validate_get_format(link->source, stream,
1198 "Failed to get format for \"%s\":%u:%u (but that's ok)\n",
1199 link->source->entity->name, link->source->index,
1204 ret = v4l2_subdev_link_validate_get_format(link->sink, stream,
1208 "Failed to get format for \"%s\":%u:%u (but that's ok)\n",
1209 link->sink->entity->name, link->sink->index,
1214 /* TODO: add stream number to link_validate() */
1215 ret = v4l2_subdev_call(sink_subdev, pad, link_validate, link,
1216 &source_fmt, &sink_fmt);
1220 if (ret != -ENOIOCTLCMD)
1223 ret = v4l2_subdev_link_validate_default(sink_subdev, link,
1224 &source_fmt, &sink_fmt);
1233 int v4l2_subdev_link_validate(struct media_link *link)
1235 struct v4l2_subdev *source_sd, *sink_sd;
1236 struct v4l2_subdev_state *source_state, *sink_state;
1239 sink_sd = media_entity_to_v4l2_subdev(link->sink->entity);
1240 source_sd = media_entity_to_v4l2_subdev(link->source->entity);
1242 sink_state = v4l2_subdev_get_unlocked_active_state(sink_sd);
1243 source_state = v4l2_subdev_get_unlocked_active_state(source_sd);
1246 v4l2_subdev_lock_state(sink_state);
1249 v4l2_subdev_lock_state(source_state);
1251 ret = v4l2_subdev_link_validate_locked(link);
1254 v4l2_subdev_unlock_state(sink_state);
1257 v4l2_subdev_unlock_state(source_state);
1261 EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate);
1263 bool v4l2_subdev_has_pad_interdep(struct media_entity *entity,
1264 unsigned int pad0, unsigned int pad1)
1266 struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
1267 struct v4l2_subdev_krouting *routing;
1268 struct v4l2_subdev_state *state;
1271 state = v4l2_subdev_lock_and_get_active_state(sd);
1273 routing = &state->routing;
1275 for (i = 0; i < routing->num_routes; ++i) {
1276 struct v4l2_subdev_route *route = &routing->routes[i];
1278 if (!(route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE))
1281 if ((route->sink_pad == pad0 && route->source_pad == pad1) ||
1282 (route->source_pad == pad0 && route->sink_pad == pad1)) {
1283 v4l2_subdev_unlock_state(state);
1288 v4l2_subdev_unlock_state(state);
1292 EXPORT_SYMBOL_GPL(v4l2_subdev_has_pad_interdep);
1294 struct v4l2_subdev_state *
1295 __v4l2_subdev_state_alloc(struct v4l2_subdev *sd, const char *lock_name,
1296 struct lock_class_key *lock_key)
1298 struct v4l2_subdev_state *state;
1301 state = kzalloc(sizeof(*state), GFP_KERNEL);
1303 return ERR_PTR(-ENOMEM);
1305 __mutex_init(&state->_lock, lock_name, lock_key);
1307 state->lock = sd->state_lock;
1309 state->lock = &state->_lock;
1311 /* Drivers that support streams do not need the legacy pad config */
1312 if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS) && sd->entity.num_pads) {
1313 state->pads = kvcalloc(sd->entity.num_pads,
1314 sizeof(*state->pads), GFP_KERNEL);
1322 * There can be no race at this point, but we lock the state anyway to
1323 * satisfy lockdep checks.
1325 v4l2_subdev_lock_state(state);
1326 ret = v4l2_subdev_call(sd, pad, init_cfg, state);
1327 v4l2_subdev_unlock_state(state);
1329 if (ret < 0 && ret != -ENOIOCTLCMD)
1335 if (state && state->pads)
1336 kvfree(state->pads);
1340 return ERR_PTR(ret);
1342 EXPORT_SYMBOL_GPL(__v4l2_subdev_state_alloc);
1344 void __v4l2_subdev_state_free(struct v4l2_subdev_state *state)
1349 mutex_destroy(&state->_lock);
1351 kfree(state->routing.routes);
1352 kvfree(state->stream_configs.configs);
1353 kvfree(state->pads);
1356 EXPORT_SYMBOL_GPL(__v4l2_subdev_state_free);
1358 int __v4l2_subdev_init_finalize(struct v4l2_subdev *sd, const char *name,
1359 struct lock_class_key *key)
1361 struct v4l2_subdev_state *state;
1363 state = __v4l2_subdev_state_alloc(sd, name, key);
1365 return PTR_ERR(state);
1367 sd->active_state = state;
1371 EXPORT_SYMBOL_GPL(__v4l2_subdev_init_finalize);
1373 void v4l2_subdev_cleanup(struct v4l2_subdev *sd)
1375 __v4l2_subdev_state_free(sd->active_state);
1376 sd->active_state = NULL;
1378 EXPORT_SYMBOL_GPL(v4l2_subdev_cleanup);
1380 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
1383 v4l2_subdev_init_stream_configs(struct v4l2_subdev_stream_configs *stream_configs,
1384 const struct v4l2_subdev_krouting *routing)
1386 struct v4l2_subdev_stream_configs new_configs = { 0 };
1387 struct v4l2_subdev_route *route;
1390 /* Count number of formats needed */
1391 for_each_active_route(routing, route) {
1393 * Each route needs a format on both ends of the route.
1395 new_configs.num_configs += 2;
1398 if (new_configs.num_configs) {
1399 new_configs.configs = kvcalloc(new_configs.num_configs,
1400 sizeof(*new_configs.configs),
1403 if (!new_configs.configs)
1408 * Fill in the 'pad' and stream' value for each item in the array from
1413 for_each_active_route(routing, route) {
1414 new_configs.configs[idx].pad = route->sink_pad;
1415 new_configs.configs[idx].stream = route->sink_stream;
1419 new_configs.configs[idx].pad = route->source_pad;
1420 new_configs.configs[idx].stream = route->source_stream;
1425 kvfree(stream_configs->configs);
1426 *stream_configs = new_configs;
1431 int v4l2_subdev_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *state,
1432 struct v4l2_subdev_format *format)
1434 struct v4l2_mbus_framefmt *fmt;
1436 if (sd->flags & V4L2_SUBDEV_FL_STREAMS)
1437 fmt = v4l2_subdev_state_get_stream_format(state, format->pad,
1439 else if (format->pad < sd->entity.num_pads && format->stream == 0)
1440 fmt = v4l2_subdev_get_pad_format(sd, state, format->pad);
1447 format->format = *fmt;
1451 EXPORT_SYMBOL_GPL(v4l2_subdev_get_fmt);
1453 int v4l2_subdev_set_routing(struct v4l2_subdev *sd,
1454 struct v4l2_subdev_state *state,
1455 const struct v4l2_subdev_krouting *routing)
1457 struct v4l2_subdev_krouting *dst = &state->routing;
1458 const struct v4l2_subdev_krouting *src = routing;
1459 struct v4l2_subdev_krouting new_routing = { 0 };
1463 if (unlikely(check_mul_overflow((size_t)src->num_routes,
1464 sizeof(*src->routes), &bytes)))
1467 lockdep_assert_held(state->lock);
1469 if (src->num_routes > 0) {
1470 new_routing.routes = kmemdup(src->routes, bytes, GFP_KERNEL);
1471 if (!new_routing.routes)
1475 new_routing.num_routes = src->num_routes;
1477 r = v4l2_subdev_init_stream_configs(&state->stream_configs,
1480 kfree(new_routing.routes);
1489 EXPORT_SYMBOL_GPL(v4l2_subdev_set_routing);
1491 struct v4l2_subdev_route *
1492 __v4l2_subdev_next_active_route(const struct v4l2_subdev_krouting *routing,
1493 struct v4l2_subdev_route *route)
1498 route = &routing->routes[0];
1500 for (; route < routing->routes + routing->num_routes; ++route) {
1501 if (!(route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE))
1509 EXPORT_SYMBOL_GPL(__v4l2_subdev_next_active_route);
1511 int v4l2_subdev_set_routing_with_fmt(struct v4l2_subdev *sd,
1512 struct v4l2_subdev_state *state,
1513 struct v4l2_subdev_krouting *routing,
1514 const struct v4l2_mbus_framefmt *fmt)
1516 struct v4l2_subdev_stream_configs *stream_configs;
1520 ret = v4l2_subdev_set_routing(sd, state, routing);
1524 stream_configs = &state->stream_configs;
1526 for (i = 0; i < stream_configs->num_configs; ++i)
1527 stream_configs->configs[i].fmt = *fmt;
1531 EXPORT_SYMBOL_GPL(v4l2_subdev_set_routing_with_fmt);
1533 struct v4l2_mbus_framefmt *
1534 v4l2_subdev_state_get_stream_format(struct v4l2_subdev_state *state,
1535 unsigned int pad, u32 stream)
1537 struct v4l2_subdev_stream_configs *stream_configs;
1540 lockdep_assert_held(state->lock);
1542 stream_configs = &state->stream_configs;
1544 for (i = 0; i < stream_configs->num_configs; ++i) {
1545 if (stream_configs->configs[i].pad == pad &&
1546 stream_configs->configs[i].stream == stream)
1547 return &stream_configs->configs[i].fmt;
1552 EXPORT_SYMBOL_GPL(v4l2_subdev_state_get_stream_format);
1555 v4l2_subdev_state_get_stream_crop(struct v4l2_subdev_state *state,
1556 unsigned int pad, u32 stream)
1558 struct v4l2_subdev_stream_configs *stream_configs;
1561 lockdep_assert_held(state->lock);
1563 stream_configs = &state->stream_configs;
1565 for (i = 0; i < stream_configs->num_configs; ++i) {
1566 if (stream_configs->configs[i].pad == pad &&
1567 stream_configs->configs[i].stream == stream)
1568 return &stream_configs->configs[i].crop;
1573 EXPORT_SYMBOL_GPL(v4l2_subdev_state_get_stream_crop);
1576 v4l2_subdev_state_get_stream_compose(struct v4l2_subdev_state *state,
1577 unsigned int pad, u32 stream)
1579 struct v4l2_subdev_stream_configs *stream_configs;
1582 lockdep_assert_held(state->lock);
1584 stream_configs = &state->stream_configs;
1586 for (i = 0; i < stream_configs->num_configs; ++i) {
1587 if (stream_configs->configs[i].pad == pad &&
1588 stream_configs->configs[i].stream == stream)
1589 return &stream_configs->configs[i].compose;
1594 EXPORT_SYMBOL_GPL(v4l2_subdev_state_get_stream_compose);
1596 int v4l2_subdev_routing_find_opposite_end(const struct v4l2_subdev_krouting *routing,
1597 u32 pad, u32 stream, u32 *other_pad,
1602 for (i = 0; i < routing->num_routes; ++i) {
1603 struct v4l2_subdev_route *route = &routing->routes[i];
1605 if (route->source_pad == pad &&
1606 route->source_stream == stream) {
1608 *other_pad = route->sink_pad;
1610 *other_stream = route->sink_stream;
1614 if (route->sink_pad == pad && route->sink_stream == stream) {
1616 *other_pad = route->source_pad;
1618 *other_stream = route->source_stream;
1625 EXPORT_SYMBOL_GPL(v4l2_subdev_routing_find_opposite_end);
1627 struct v4l2_mbus_framefmt *
1628 v4l2_subdev_state_get_opposite_stream_format(struct v4l2_subdev_state *state,
1629 u32 pad, u32 stream)
1631 u32 other_pad, other_stream;
1634 ret = v4l2_subdev_routing_find_opposite_end(&state->routing,
1636 &other_pad, &other_stream);
1640 return v4l2_subdev_state_get_stream_format(state, other_pad,
1643 EXPORT_SYMBOL_GPL(v4l2_subdev_state_get_opposite_stream_format);
1645 u64 v4l2_subdev_state_xlate_streams(const struct v4l2_subdev_state *state,
1646 u32 pad0, u32 pad1, u64 *streams)
1648 const struct v4l2_subdev_krouting *routing = &state->routing;
1649 struct v4l2_subdev_route *route;
1653 for_each_active_route(routing, route) {
1654 if (route->sink_pad == pad0 && route->source_pad == pad1 &&
1655 (*streams & BIT_ULL(route->sink_stream))) {
1656 streams0 |= BIT_ULL(route->sink_stream);
1657 streams1 |= BIT_ULL(route->source_stream);
1659 if (route->source_pad == pad0 && route->sink_pad == pad1 &&
1660 (*streams & BIT_ULL(route->source_stream))) {
1661 streams0 |= BIT_ULL(route->source_stream);
1662 streams1 |= BIT_ULL(route->sink_stream);
1666 *streams = streams0;
1669 EXPORT_SYMBOL_GPL(v4l2_subdev_state_xlate_streams);
1671 int v4l2_subdev_routing_validate(struct v4l2_subdev *sd,
1672 const struct v4l2_subdev_krouting *routing,
1673 enum v4l2_subdev_routing_restriction disallow)
1675 u32 *remote_pads = NULL;
1679 if (disallow & V4L2_SUBDEV_ROUTING_NO_STREAM_MIX) {
1680 remote_pads = kcalloc(sd->entity.num_pads, sizeof(*remote_pads),
1685 for (i = 0; i < sd->entity.num_pads; ++i)
1686 remote_pads[i] = U32_MAX;
1689 for (i = 0; i < routing->num_routes; ++i) {
1690 const struct v4l2_subdev_route *route = &routing->routes[i];
1692 /* Validate the sink and source pad numbers. */
1693 if (route->sink_pad >= sd->entity.num_pads ||
1694 !(sd->entity.pads[route->sink_pad].flags & MEDIA_PAD_FL_SINK)) {
1695 dev_dbg(sd->dev, "route %u sink (%u) is not a sink pad\n",
1696 i, route->sink_pad);
1700 if (route->source_pad >= sd->entity.num_pads ||
1701 !(sd->entity.pads[route->source_pad].flags & MEDIA_PAD_FL_SOURCE)) {
1702 dev_dbg(sd->dev, "route %u source (%u) is not a source pad\n",
1703 i, route->source_pad);
1708 * V4L2_SUBDEV_ROUTING_NO_STREAM_MIX: Streams on the same pad
1709 * may not be routed to streams on different pads.
1711 if (disallow & V4L2_SUBDEV_ROUTING_NO_STREAM_MIX) {
1712 if (remote_pads[route->sink_pad] != U32_MAX &&
1713 remote_pads[route->sink_pad] != route->source_pad) {
1715 "route %u attempts to mix %s streams\n",
1720 if (remote_pads[route->source_pad] != U32_MAX &&
1721 remote_pads[route->source_pad] != route->sink_pad) {
1723 "route %u attempts to mix %s streams\n",
1728 remote_pads[route->sink_pad] = route->source_pad;
1729 remote_pads[route->source_pad] = route->sink_pad;
1732 for (j = i + 1; j < routing->num_routes; ++j) {
1733 const struct v4l2_subdev_route *r = &routing->routes[j];
1736 * V4L2_SUBDEV_ROUTING_NO_1_TO_N: No two routes can
1737 * originate from the same (sink) stream.
1739 if ((disallow & V4L2_SUBDEV_ROUTING_NO_1_TO_N) &&
1740 route->sink_pad == r->sink_pad &&
1741 route->sink_stream == r->sink_stream) {
1743 "routes %u and %u originate from same sink (%u/%u)\n",
1744 i, j, route->sink_pad,
1745 route->sink_stream);
1750 * V4L2_SUBDEV_ROUTING_NO_N_TO_1: No two routes can end
1751 * at the same (source) stream.
1753 if ((disallow & V4L2_SUBDEV_ROUTING_NO_N_TO_1) &&
1754 route->source_pad == r->source_pad &&
1755 route->source_stream == r->source_stream) {
1757 "routes %u and %u end at same source (%u/%u)\n",
1758 i, j, route->source_pad,
1759 route->source_stream);
1771 EXPORT_SYMBOL_GPL(v4l2_subdev_routing_validate);
1773 static int v4l2_subdev_enable_streams_fallback(struct v4l2_subdev *sd, u32 pad,
1776 struct device *dev = sd->entity.graph_obj.mdev->dev;
1781 * The subdev doesn't implement pad-based stream enable, fall back
1782 * on the .s_stream() operation. This can only be done for subdevs that
1783 * have a single source pad, as sd->enabled_streams is global to the
1786 if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE))
1789 for (i = 0; i < sd->entity.num_pads; ++i) {
1790 if (i != pad && sd->entity.pads[i].flags & MEDIA_PAD_FL_SOURCE)
1794 if (sd->enabled_streams & streams_mask) {
1795 dev_dbg(dev, "set of streams %#llx already enabled on %s:%u\n",
1796 streams_mask, sd->entity.name, pad);
1800 /* Start streaming when the first streams are enabled. */
1801 if (!sd->enabled_streams) {
1802 ret = v4l2_subdev_call(sd, video, s_stream, 1);
1807 sd->enabled_streams |= streams_mask;
1812 int v4l2_subdev_enable_streams(struct v4l2_subdev *sd, u32 pad,
1815 struct device *dev = sd->entity.graph_obj.mdev->dev;
1816 struct v4l2_subdev_state *state;
1817 u64 found_streams = 0;
1821 /* A few basic sanity checks first. */
1822 if (pad >= sd->entity.num_pads)
1828 /* Fallback on .s_stream() if .enable_streams() isn't available. */
1829 if (!sd->ops->pad || !sd->ops->pad->enable_streams)
1830 return v4l2_subdev_enable_streams_fallback(sd, pad,
1833 state = v4l2_subdev_lock_and_get_active_state(sd);
1836 * Verify that the requested streams exist and that they are not
1839 for (i = 0; i < state->stream_configs.num_configs; ++i) {
1840 struct v4l2_subdev_stream_config *cfg =
1841 &state->stream_configs.configs[i];
1843 if (cfg->pad != pad || !(streams_mask & BIT_ULL(cfg->stream)))
1846 found_streams |= BIT_ULL(cfg->stream);
1849 dev_dbg(dev, "stream %u already enabled on %s:%u\n",
1850 cfg->stream, sd->entity.name, pad);
1856 if (found_streams != streams_mask) {
1857 dev_dbg(dev, "streams 0x%llx not found on %s:%u\n",
1858 streams_mask & ~found_streams, sd->entity.name, pad);
1863 /* Call the .enable_streams() operation. */
1864 ret = v4l2_subdev_call(sd, pad, enable_streams, state, pad,
1869 /* Mark the streams as enabled. */
1870 for (i = 0; i < state->stream_configs.num_configs; ++i) {
1871 struct v4l2_subdev_stream_config *cfg =
1872 &state->stream_configs.configs[i];
1874 if (cfg->pad == pad && (streams_mask & BIT_ULL(cfg->stream)))
1875 cfg->enabled = true;
1879 v4l2_subdev_unlock_state(state);
1883 EXPORT_SYMBOL_GPL(v4l2_subdev_enable_streams);
1885 static int v4l2_subdev_disable_streams_fallback(struct v4l2_subdev *sd, u32 pad,
1888 struct device *dev = sd->entity.graph_obj.mdev->dev;
1893 * If the subdev doesn't implement pad-based stream enable, fall back
1894 * on the .s_stream() operation. This can only be done for subdevs that
1895 * have a single source pad, as sd->enabled_streams is global to the
1898 if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE))
1901 for (i = 0; i < sd->entity.num_pads; ++i) {
1902 if (i != pad && sd->entity.pads[i].flags & MEDIA_PAD_FL_SOURCE)
1906 if ((sd->enabled_streams & streams_mask) != streams_mask) {
1907 dev_dbg(dev, "set of streams %#llx already disabled on %s:%u\n",
1908 streams_mask, sd->entity.name, pad);
1912 /* Stop streaming when the last streams are disabled. */
1913 if (!(sd->enabled_streams & ~streams_mask)) {
1914 ret = v4l2_subdev_call(sd, video, s_stream, 0);
1919 sd->enabled_streams &= ~streams_mask;
1924 int v4l2_subdev_disable_streams(struct v4l2_subdev *sd, u32 pad,
1927 struct device *dev = sd->entity.graph_obj.mdev->dev;
1928 struct v4l2_subdev_state *state;
1929 u64 found_streams = 0;
1933 /* A few basic sanity checks first. */
1934 if (pad >= sd->entity.num_pads)
1940 /* Fallback on .s_stream() if .disable_streams() isn't available. */
1941 if (!sd->ops->pad || !sd->ops->pad->disable_streams)
1942 return v4l2_subdev_disable_streams_fallback(sd, pad,
1945 state = v4l2_subdev_lock_and_get_active_state(sd);
1948 * Verify that the requested streams exist and that they are not
1951 for (i = 0; i < state->stream_configs.num_configs; ++i) {
1952 struct v4l2_subdev_stream_config *cfg =
1953 &state->stream_configs.configs[i];
1955 if (cfg->pad != pad || !(streams_mask & BIT_ULL(cfg->stream)))
1958 found_streams |= BIT_ULL(cfg->stream);
1960 if (!cfg->enabled) {
1961 dev_dbg(dev, "stream %u already disabled on %s:%u\n",
1962 cfg->stream, sd->entity.name, pad);
1968 if (found_streams != streams_mask) {
1969 dev_dbg(dev, "streams 0x%llx not found on %s:%u\n",
1970 streams_mask & ~found_streams, sd->entity.name, pad);
1975 /* Call the .disable_streams() operation. */
1976 ret = v4l2_subdev_call(sd, pad, disable_streams, state, pad,
1981 /* Mark the streams as disabled. */
1982 for (i = 0; i < state->stream_configs.num_configs; ++i) {
1983 struct v4l2_subdev_stream_config *cfg =
1984 &state->stream_configs.configs[i];
1986 if (cfg->pad == pad && (streams_mask & BIT_ULL(cfg->stream)))
1987 cfg->enabled = false;
1991 v4l2_subdev_unlock_state(state);
1995 EXPORT_SYMBOL_GPL(v4l2_subdev_disable_streams);
1997 int v4l2_subdev_s_stream_helper(struct v4l2_subdev *sd, int enable)
1999 struct v4l2_subdev_state *state;
2000 struct v4l2_subdev_route *route;
2001 struct media_pad *pad;
2002 u64 source_mask = 0;
2006 * Find the source pad. This helper is meant for subdevs that have a
2007 * single source pad, so failures shouldn't happen, but catch them
2008 * loudly nonetheless as they indicate a driver bug.
2010 media_entity_for_each_pad(&sd->entity, pad) {
2011 if (pad->flags & MEDIA_PAD_FL_SOURCE) {
2012 pad_index = pad->index;
2017 if (WARN_ON(pad_index == -1))
2021 * As there's a single source pad, just collect all the source streams.
2023 state = v4l2_subdev_lock_and_get_active_state(sd);
2025 for_each_active_route(&state->routing, route)
2026 source_mask |= BIT_ULL(route->source_stream);
2028 v4l2_subdev_unlock_state(state);
2031 return v4l2_subdev_enable_streams(sd, pad_index, source_mask);
2033 return v4l2_subdev_disable_streams(sd, pad_index, source_mask);
2035 EXPORT_SYMBOL_GPL(v4l2_subdev_s_stream_helper);
2037 #endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
2039 #endif /* CONFIG_MEDIA_CONTROLLER */
2041 void v4l2_subdev_init(struct v4l2_subdev *sd, const struct v4l2_subdev_ops *ops)
2043 INIT_LIST_HEAD(&sd->list);
2046 sd->v4l2_dev = NULL;
2050 sd->dev_priv = NULL;
2051 sd->host_priv = NULL;
2052 sd->privacy_led = NULL;
2053 #if defined(CONFIG_MEDIA_CONTROLLER)
2054 sd->entity.name = sd->name;
2055 sd->entity.obj_type = MEDIA_ENTITY_TYPE_V4L2_SUBDEV;
2056 sd->entity.function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN;
2059 EXPORT_SYMBOL(v4l2_subdev_init);
2061 void v4l2_subdev_notify_event(struct v4l2_subdev *sd,
2062 const struct v4l2_event *ev)
2064 v4l2_event_queue(sd->devnode, ev);
2065 v4l2_subdev_notify(sd, V4L2_DEVICE_NOTIFY_EVENT, (void *)ev);
2067 EXPORT_SYMBOL_GPL(v4l2_subdev_notify_event);
2069 int v4l2_subdev_get_privacy_led(struct v4l2_subdev *sd)
2071 #if IS_REACHABLE(CONFIG_LEDS_CLASS)
2072 sd->privacy_led = led_get(sd->dev, "privacy-led");
2073 if (IS_ERR(sd->privacy_led) && PTR_ERR(sd->privacy_led) != -ENOENT)
2074 return dev_err_probe(sd->dev, PTR_ERR(sd->privacy_led),
2075 "getting privacy LED\n");
2077 if (!IS_ERR_OR_NULL(sd->privacy_led)) {
2078 mutex_lock(&sd->privacy_led->led_access);
2079 led_sysfs_disable(sd->privacy_led);
2080 led_trigger_remove(sd->privacy_led);
2081 led_set_brightness(sd->privacy_led, 0);
2082 mutex_unlock(&sd->privacy_led->led_access);
2087 EXPORT_SYMBOL_GPL(v4l2_subdev_get_privacy_led);
2089 void v4l2_subdev_put_privacy_led(struct v4l2_subdev *sd)
2091 #if IS_REACHABLE(CONFIG_LEDS_CLASS)
2092 if (!IS_ERR_OR_NULL(sd->privacy_led)) {
2093 mutex_lock(&sd->privacy_led->led_access);
2094 led_sysfs_enable(sd->privacy_led);
2095 mutex_unlock(&sd->privacy_led->led_access);
2096 led_put(sd->privacy_led);
2100 EXPORT_SYMBOL_GPL(v4l2_subdev_put_privacy_led);