[media] v4l: fwnode: Support generic fwnode for parsing standardised properties
[linux-2.6-block.git] / drivers / media / v4l2-core / v4l2-async.c
CommitLineData
e9e31049
GL
1/*
2 * V4L2 asynchronous subdevice registration API
3 *
4 * Copyright (C) 2012-2013, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/device.h>
12#include <linux/err.h>
13#include <linux/i2c.h>
14#include <linux/list.h>
15#include <linux/module.h>
16#include <linux/mutex.h>
17#include <linux/platform_device.h>
18#include <linux/slab.h>
19#include <linux/types.h>
20
21#include <media/v4l2-async.h>
22#include <media/v4l2-device.h>
23#include <media/v4l2-subdev.h>
24
86217651 25static bool match_i2c(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
e9e31049 26{
fe05e141 27#if IS_ENABLED(CONFIG_I2C)
86217651 28 struct i2c_client *client = i2c_verify_client(sd->dev);
e9e31049 29 return client &&
e9e31049
GL
30 asd->match.i2c.adapter_id == client->adapter->nr &&
31 asd->match.i2c.address == client->addr;
fe05e141
GL
32#else
33 return false;
34#endif
e9e31049
GL
35}
36
86217651
SA
37static bool match_devname(struct v4l2_subdev *sd,
38 struct v4l2_async_subdev *asd)
e9e31049 39{
86217651 40 return !strcmp(asd->match.device_name.name, dev_name(sd->dev));
e9e31049
GL
41}
42
86217651 43static bool match_of(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
e7359f8e 44{
d2180e0c
JM
45 return !of_node_cmp(of_node_full_name(sd->of_node),
46 of_node_full_name(asd->match.of.node));
86217651
SA
47}
48
49static bool match_custom(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
50{
51 if (!asd->match.custom.match)
52 /* Match always */
53 return true;
54
55 return asd->match.custom.match(sd->dev, asd);
e7359f8e
SN
56}
57
e9e31049
GL
58static LIST_HEAD(subdev_list);
59static LIST_HEAD(notifier_list);
60static DEFINE_MUTEX(list_lock);
61
62static struct v4l2_async_subdev *v4l2_async_belongs(struct v4l2_async_notifier *notifier,
b426b3a6 63 struct v4l2_subdev *sd)
e9e31049 64{
86217651 65 bool (*match)(struct v4l2_subdev *, struct v4l2_async_subdev *);
e9e31049 66 struct v4l2_async_subdev *asd;
e9e31049
GL
67
68 list_for_each_entry(asd, &notifier->waiting, list) {
69 /* bus_type has been verified valid before */
cfca7644
SN
70 switch (asd->match_type) {
71 case V4L2_ASYNC_MATCH_CUSTOM:
86217651 72 match = match_custom;
e9e31049 73 break;
cfca7644
SN
74 case V4L2_ASYNC_MATCH_DEVNAME:
75 match = match_devname;
e9e31049 76 break;
cfca7644 77 case V4L2_ASYNC_MATCH_I2C:
e9e31049
GL
78 match = match_i2c;
79 break;
e7359f8e
SN
80 case V4L2_ASYNC_MATCH_OF:
81 match = match_of;
82 break;
e9e31049
GL
83 default:
84 /* Cannot happen, unless someone breaks us */
85 WARN_ON(true);
86 return NULL;
87 }
88
89 /* match cannot be NULL here */
86217651 90 if (match(sd, asd))
e9e31049
GL
91 return asd;
92 }
93
94 return NULL;
95}
96
97static int v4l2_async_test_notify(struct v4l2_async_notifier *notifier,
b426b3a6 98 struct v4l2_subdev *sd,
e9e31049
GL
99 struct v4l2_async_subdev *asd)
100{
e9e31049
GL
101 int ret;
102
e9e31049
GL
103 if (notifier->bound) {
104 ret = notifier->bound(notifier, sd, asd);
105 if (ret < 0)
106 return ret;
107 }
e9e31049
GL
108
109 ret = v4l2_device_register_subdev(notifier->v4l2_dev, sd);
110 if (ret < 0) {
111 if (notifier->unbind)
112 notifier->unbind(notifier, sd, asd);
113 return ret;
114 }
115
47b037a0
TT
116 /* Remove from the waiting list */
117 list_del(&asd->list);
118 sd->asd = asd;
119 sd->notifier = notifier;
120
121 /* Move from the global subdevice list to notifier's done */
122 list_move(&sd->async_list, &notifier->done);
123
e9e31049
GL
124 if (list_empty(&notifier->waiting) && notifier->complete)
125 return notifier->complete(notifier);
126
127 return 0;
128}
129
b426b3a6 130static void v4l2_async_cleanup(struct v4l2_subdev *sd)
e9e31049 131{
e9e31049 132 v4l2_device_unregister_subdev(sd);
b426b3a6
SN
133 /* Subdevice driver will reprobe and put the subdev back onto the list */
134 list_del_init(&sd->async_list);
135 sd->asd = NULL;
e9e31049
GL
136 sd->dev = NULL;
137}
138
139int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
140 struct v4l2_async_notifier *notifier)
141{
b426b3a6 142 struct v4l2_subdev *sd, *tmp;
e9e31049
GL
143 struct v4l2_async_subdev *asd;
144 int i;
145
146 if (!notifier->num_subdevs || notifier->num_subdevs > V4L2_MAX_SUBDEVS)
147 return -EINVAL;
148
149 notifier->v4l2_dev = v4l2_dev;
150 INIT_LIST_HEAD(&notifier->waiting);
151 INIT_LIST_HEAD(&notifier->done);
152
153 for (i = 0; i < notifier->num_subdevs; i++) {
e8419d08 154 asd = notifier->subdevs[i];
e9e31049 155
cfca7644
SN
156 switch (asd->match_type) {
157 case V4L2_ASYNC_MATCH_CUSTOM:
158 case V4L2_ASYNC_MATCH_DEVNAME:
159 case V4L2_ASYNC_MATCH_I2C:
e7359f8e 160 case V4L2_ASYNC_MATCH_OF:
e9e31049
GL
161 break;
162 default:
163 dev_err(notifier->v4l2_dev ? notifier->v4l2_dev->dev : NULL,
cfca7644
SN
164 "Invalid match type %u on %p\n",
165 asd->match_type, asd);
e9e31049
GL
166 return -EINVAL;
167 }
168 list_add_tail(&asd->list, &notifier->waiting);
169 }
170
171 mutex_lock(&list_lock);
172
b426b3a6 173 list_for_each_entry_safe(sd, tmp, &subdev_list, async_list) {
e9e31049
GL
174 int ret;
175
b426b3a6 176 asd = v4l2_async_belongs(notifier, sd);
e9e31049
GL
177 if (!asd)
178 continue;
179
b426b3a6 180 ret = v4l2_async_test_notify(notifier, sd, asd);
e9e31049
GL
181 if (ret < 0) {
182 mutex_unlock(&list_lock);
183 return ret;
184 }
185 }
186
47b037a0
TT
187 /* Keep also completed notifiers on the list */
188 list_add(&notifier->list, &notifier_list);
189
e9e31049
GL
190 mutex_unlock(&list_lock);
191
192 return 0;
193}
194EXPORT_SYMBOL(v4l2_async_notifier_register);
195
196void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
197{
b426b3a6 198 struct v4l2_subdev *sd, *tmp;
e9e31049
GL
199 unsigned int notif_n_subdev = notifier->num_subdevs;
200 unsigned int n_subdev = min(notif_n_subdev, V4L2_MAX_SUBDEVS);
24e9a47e 201 struct device **dev;
e9e31049
GL
202 int i = 0;
203
8e3fbfee
LP
204 if (!notifier->v4l2_dev)
205 return;
206
f9e9c066 207 dev = kmalloc_array(n_subdev, sizeof(*dev), GFP_KERNEL);
24e9a47e
MCC
208 if (!dev) {
209 dev_err(notifier->v4l2_dev->dev,
210 "Failed to allocate device cache!\n");
211 }
212
e9e31049
GL
213 mutex_lock(&list_lock);
214
215 list_del(&notifier->list);
216
ceedcc4e 217 list_for_each_entry_safe(sd, tmp, &notifier->done, async_list) {
24e9a47e
MCC
218 struct device *d;
219
220 d = get_device(sd->dev);
e9e31049 221
b426b3a6 222 v4l2_async_cleanup(sd);
e9e31049
GL
223
224 /* If we handled USB devices, we'd have to lock the parent too */
24e9a47e 225 device_release_driver(d);
e9e31049
GL
226
227 if (notifier->unbind)
b426b3a6 228 notifier->unbind(notifier, sd, sd->asd);
24e9a47e
MCC
229
230 /*
231 * Store device at the device cache, in order to call
232 * put_device() on the final step
233 */
234 if (dev)
235 dev[i++] = d;
236 else
237 put_device(d);
e9e31049
GL
238 }
239
240 mutex_unlock(&list_lock);
241
24e9a47e
MCC
242 /*
243 * Call device_attach() to reprobe devices
244 *
245 * NOTE: If dev allocation fails, i is 0, and the whole loop won't be
246 * executed.
247 */
e9e31049
GL
248 while (i--) {
249 struct device *d = dev[i];
250
251 if (d && device_attach(d) < 0) {
252 const char *name = "(none)";
253 int lock = device_trylock(d);
254
255 if (lock && d->driver)
256 name = d->driver->name;
257 dev_err(d, "Failed to re-probe to %s\n", name);
258 if (lock)
259 device_unlock(d);
260 }
261 put_device(d);
262 }
24e9a47e 263 kfree(dev);
8e3fbfee
LP
264
265 notifier->v4l2_dev = NULL;
266
e9e31049
GL
267 /*
268 * Don't care about the waiting list, it is initialised and populated
269 * upon notifier registration.
270 */
271}
272EXPORT_SYMBOL(v4l2_async_notifier_unregister);
273
274int v4l2_async_register_subdev(struct v4l2_subdev *sd)
275{
e9e31049
GL
276 struct v4l2_async_notifier *notifier;
277
86217651
SA
278 /*
279 * No reference taken. The reference is held by the device
280 * (struct v4l2_subdev.dev), and async sub-device does not
281 * exist independently of the device at any point of time.
282 */
283 if (!sd->of_node && sd->dev)
284 sd->of_node = sd->dev->of_node;
285
e9e31049
GL
286 mutex_lock(&list_lock);
287
b426b3a6 288 INIT_LIST_HEAD(&sd->async_list);
e9e31049
GL
289
290 list_for_each_entry(notifier, &notifier_list, list) {
b426b3a6 291 struct v4l2_async_subdev *asd = v4l2_async_belongs(notifier, sd);
e9e31049 292 if (asd) {
b426b3a6 293 int ret = v4l2_async_test_notify(notifier, sd, asd);
e9e31049
GL
294 mutex_unlock(&list_lock);
295 return ret;
296 }
297 }
298
299 /* None matched, wait for hot-plugging */
b426b3a6 300 list_add(&sd->async_list, &subdev_list);
e9e31049
GL
301
302 mutex_unlock(&list_lock);
303
304 return 0;
305}
306EXPORT_SYMBOL(v4l2_async_register_subdev);
307
308void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
309{
b426b3a6 310 struct v4l2_async_notifier *notifier = sd->notifier;
e9e31049 311
b426b3a6
SN
312 if (!sd->asd) {
313 if (!list_empty(&sd->async_list))
314 v4l2_async_cleanup(sd);
e9e31049
GL
315 return;
316 }
317
318 mutex_lock(&list_lock);
319
b426b3a6 320 list_add(&sd->asd->list, &notifier->waiting);
e9e31049 321
b426b3a6 322 v4l2_async_cleanup(sd);
e9e31049
GL
323
324 if (notifier->unbind)
b426b3a6 325 notifier->unbind(notifier, sd, sd->asd);
e9e31049
GL
326
327 mutex_unlock(&list_lock);
328}
329EXPORT_SYMBOL(v4l2_async_unregister_subdev);