Commit | Line | Data |
---|---|---|
847ec80b JC |
1 | /* The industrial I/O core |
2 | * | |
3 | * Copyright (c) 2008 Jonathan Cameron | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms of the GNU General Public License version 2 as published by | |
7 | * the Free Software Foundation. | |
8 | * | |
9 | * Based on elements of hwmon and input subsystems. | |
10 | */ | |
11 | ||
12 | #include <linux/kernel.h> | |
13 | #include <linux/module.h> | |
14 | #include <linux/idr.h> | |
15 | #include <linux/kdev_t.h> | |
16 | #include <linux/err.h> | |
17 | #include <linux/device.h> | |
18 | #include <linux/fs.h> | |
19 | #include <linux/interrupt.h> | |
20 | #include <linux/poll.h> | |
ffc18afa | 21 | #include <linux/sched.h> |
4439c935 | 22 | #include <linux/wait.h> |
847ec80b | 23 | #include <linux/cdev.h> |
5a0e3ad6 | 24 | #include <linux/slab.h> |
847ec80b JC |
25 | #include "iio.h" |
26 | #include "trigger_consumer.h" | |
27 | ||
28 | #define IIO_ID_PREFIX "device" | |
29 | #define IIO_ID_FORMAT IIO_ID_PREFIX "%d" | |
30 | ||
31 | /* IDR to assign each registered device a unique id*/ | |
32 | static DEFINE_IDR(iio_idr); | |
33 | ||
34 | /* IDR for general event identifiers */ | |
35 | static DEFINE_IDR(iio_event_idr); | |
36 | /* IDR to allocate character device minor numbers */ | |
37 | static DEFINE_IDR(iio_chrdev_idr); | |
38 | /* Lock used to protect both of the above */ | |
39 | static DEFINE_SPINLOCK(iio_idr_lock); | |
40 | ||
41 | dev_t iio_devt; | |
42 | EXPORT_SYMBOL(iio_devt); | |
43 | ||
44 | #define IIO_DEV_MAX 256 | |
388dba30 | 45 | static char *iio_devnode(struct device *dev, mode_t *mode) |
847ec80b JC |
46 | { |
47 | return kasprintf(GFP_KERNEL, "iio/%s", dev_name(dev)); | |
48 | } | |
49 | ||
50 | struct class iio_class = { | |
51 | .name = "iio", | |
388dba30 | 52 | .devnode = iio_devnode, |
847ec80b JC |
53 | }; |
54 | EXPORT_SYMBOL(iio_class); | |
55 | ||
56 | void __iio_change_event(struct iio_detected_event_list *ev, | |
57 | int ev_code, | |
58 | s64 timestamp) | |
59 | { | |
60 | ev->ev.id = ev_code; | |
61 | ev->ev.timestamp = timestamp; | |
62 | } | |
63 | EXPORT_SYMBOL(__iio_change_event); | |
64 | ||
65 | /* Used both in the interrupt line put events and the ring buffer ones */ | |
66 | ||
67 | /* Note that in it's current form someone has to be listening before events | |
68 | * are queued. Hence a client MUST open the chrdev before the ring buffer is | |
69 | * switched on. | |
70 | */ | |
71 | int __iio_push_event(struct iio_event_interface *ev_int, | |
72 | int ev_code, | |
73 | s64 timestamp, | |
74 | struct iio_shared_ev_pointer * | |
75 | shared_pointer_p) | |
76 | { | |
77 | struct iio_detected_event_list *ev; | |
78 | int ret = 0; | |
79 | ||
80 | /* Does anyone care? */ | |
81 | mutex_lock(&ev_int->event_list_lock); | |
82 | if (test_bit(IIO_BUSY_BIT_POS, &ev_int->handler.flags)) { | |
75c80753 JC |
83 | if (ev_int->current_events == ev_int->max_events) { |
84 | mutex_unlock(&ev_int->event_list_lock); | |
847ec80b | 85 | return 0; |
75c80753 | 86 | } |
847ec80b JC |
87 | ev = kmalloc(sizeof(*ev), GFP_KERNEL); |
88 | if (ev == NULL) { | |
89 | ret = -ENOMEM; | |
75c80753 | 90 | mutex_unlock(&ev_int->event_list_lock); |
847ec80b JC |
91 | goto error_ret; |
92 | } | |
93 | ev->ev.id = ev_code; | |
94 | ev->ev.timestamp = timestamp; | |
95 | ev->shared_pointer = shared_pointer_p; | |
96 | if (ev->shared_pointer) | |
97 | shared_pointer_p->ev_p = ev; | |
98 | ||
99 | list_add_tail(&ev->list, &ev_int->det_events.list); | |
100 | ev_int->current_events++; | |
101 | mutex_unlock(&ev_int->event_list_lock); | |
102 | wake_up_interruptible(&ev_int->wait); | |
103 | } else | |
104 | mutex_unlock(&ev_int->event_list_lock); | |
105 | ||
106 | error_ret: | |
107 | return ret; | |
108 | } | |
109 | EXPORT_SYMBOL(__iio_push_event); | |
110 | ||
111 | int iio_push_event(struct iio_dev *dev_info, | |
112 | int ev_line, | |
113 | int ev_code, | |
114 | s64 timestamp) | |
115 | { | |
116 | return __iio_push_event(&dev_info->event_interfaces[ev_line], | |
117 | ev_code, timestamp, NULL); | |
118 | } | |
119 | EXPORT_SYMBOL(iio_push_event); | |
120 | ||
121 | /* Generic interrupt line interrupt handler */ | |
77712e5f | 122 | static irqreturn_t iio_interrupt_handler(int irq, void *_int_info) |
847ec80b JC |
123 | { |
124 | struct iio_interrupt *int_info = _int_info; | |
125 | struct iio_dev *dev_info = int_info->dev_info; | |
126 | struct iio_event_handler_list *p; | |
127 | s64 time_ns; | |
128 | unsigned long flags; | |
129 | ||
130 | spin_lock_irqsave(&int_info->ev_list_lock, flags); | |
131 | if (list_empty(&int_info->ev_list)) { | |
132 | spin_unlock_irqrestore(&int_info->ev_list_lock, flags); | |
133 | return IRQ_NONE; | |
134 | } | |
135 | ||
136 | time_ns = iio_get_time_ns(); | |
137 | /* detect single element list*/ | |
138 | if (list_is_singular(&int_info->ev_list)) { | |
139 | disable_irq_nosync(irq); | |
140 | p = list_first_entry(&int_info->ev_list, | |
141 | struct iio_event_handler_list, | |
142 | list); | |
143 | /* single event handler - maybe shared */ | |
144 | p->handler(dev_info, 1, time_ns, !(p->refcount > 1)); | |
145 | } else | |
146 | list_for_each_entry(p, &int_info->ev_list, list) { | |
147 | disable_irq_nosync(irq); | |
148 | p->handler(dev_info, 1, time_ns, 0); | |
149 | } | |
150 | spin_unlock_irqrestore(&int_info->ev_list_lock, flags); | |
151 | ||
152 | return IRQ_HANDLED; | |
153 | } | |
154 | ||
155 | static struct iio_interrupt *iio_allocate_interrupt(void) | |
156 | { | |
157 | struct iio_interrupt *i = kmalloc(sizeof *i, GFP_KERNEL); | |
158 | if (i) { | |
159 | spin_lock_init(&i->ev_list_lock); | |
160 | INIT_LIST_HEAD(&i->ev_list); | |
161 | } | |
162 | return i; | |
163 | } | |
164 | ||
165 | /* Confirming the validity of supplied irq is left to drivers.*/ | |
166 | int iio_register_interrupt_line(unsigned int irq, | |
167 | struct iio_dev *dev_info, | |
168 | int line_number, | |
169 | unsigned long type, | |
170 | const char *name) | |
171 | { | |
172 | int ret; | |
173 | ||
174 | dev_info->interrupts[line_number] = iio_allocate_interrupt(); | |
175 | if (dev_info->interrupts[line_number] == NULL) { | |
176 | ret = -ENOMEM; | |
177 | goto error_ret; | |
178 | } | |
179 | dev_info->interrupts[line_number]->line_number = line_number; | |
180 | dev_info->interrupts[line_number]->irq = irq; | |
181 | dev_info->interrupts[line_number]->dev_info = dev_info; | |
182 | ||
183 | /* Possibly only request on demand? | |
184 | * Can see this may complicate the handling of interrupts. | |
185 | * However, with this approach we might end up handling lots of | |
186 | * events no-one cares about.*/ | |
187 | ret = request_irq(irq, | |
188 | &iio_interrupt_handler, | |
189 | type, | |
190 | name, | |
191 | dev_info->interrupts[line_number]); | |
192 | ||
193 | error_ret: | |
194 | return ret; | |
195 | } | |
196 | EXPORT_SYMBOL(iio_register_interrupt_line); | |
197 | ||
198 | /* This turns up an awful lot */ | |
199 | ssize_t iio_read_const_attr(struct device *dev, | |
200 | struct device_attribute *attr, | |
201 | char *buf) | |
202 | { | |
203 | return sprintf(buf, "%s\n", to_iio_const_attr(attr)->string); | |
204 | } | |
205 | EXPORT_SYMBOL(iio_read_const_attr); | |
206 | ||
207 | /* Before this runs the interrupt generator must have been disabled */ | |
208 | void iio_unregister_interrupt_line(struct iio_dev *dev_info, int line_number) | |
209 | { | |
210 | /* make sure the interrupt handlers are all done */ | |
211 | flush_scheduled_work(); | |
212 | free_irq(dev_info->interrupts[line_number]->irq, | |
213 | dev_info->interrupts[line_number]); | |
214 | kfree(dev_info->interrupts[line_number]); | |
215 | } | |
216 | EXPORT_SYMBOL(iio_unregister_interrupt_line); | |
217 | ||
218 | /* Reference counted add and remove */ | |
219 | void iio_add_event_to_list(struct iio_event_handler_list *el, | |
220 | struct list_head *head) | |
221 | { | |
222 | unsigned long flags; | |
223 | struct iio_interrupt *inter = to_iio_interrupt(head); | |
224 | ||
225 | /* take mutex to protect this element */ | |
226 | mutex_lock(&el->exist_lock); | |
227 | if (el->refcount == 0) { | |
228 | /* Take the event list spin lock */ | |
229 | spin_lock_irqsave(&inter->ev_list_lock, flags); | |
230 | list_add(&el->list, head); | |
231 | spin_unlock_irqrestore(&inter->ev_list_lock, flags); | |
232 | } | |
233 | el->refcount++; | |
234 | mutex_unlock(&el->exist_lock); | |
235 | } | |
236 | EXPORT_SYMBOL(iio_add_event_to_list); | |
237 | ||
238 | void iio_remove_event_from_list(struct iio_event_handler_list *el, | |
239 | struct list_head *head) | |
240 | { | |
241 | unsigned long flags; | |
242 | struct iio_interrupt *inter = to_iio_interrupt(head); | |
243 | ||
244 | mutex_lock(&el->exist_lock); | |
245 | el->refcount--; | |
246 | if (el->refcount == 0) { | |
247 | /* Take the event list spin lock */ | |
248 | spin_lock_irqsave(&inter->ev_list_lock, flags); | |
249 | list_del_init(&el->list); | |
250 | spin_unlock_irqrestore(&inter->ev_list_lock, flags); | |
251 | } | |
252 | mutex_unlock(&el->exist_lock); | |
253 | } | |
254 | EXPORT_SYMBOL(iio_remove_event_from_list); | |
255 | ||
77712e5f MB |
256 | static ssize_t iio_event_chrdev_read(struct file *filep, |
257 | char __user *buf, | |
258 | size_t count, | |
259 | loff_t *f_ps) | |
847ec80b JC |
260 | { |
261 | struct iio_event_interface *ev_int = filep->private_data; | |
262 | struct iio_detected_event_list *el; | |
263 | int ret; | |
264 | size_t len; | |
265 | ||
266 | mutex_lock(&ev_int->event_list_lock); | |
267 | if (list_empty(&ev_int->det_events.list)) { | |
268 | if (filep->f_flags & O_NONBLOCK) { | |
269 | ret = -EAGAIN; | |
270 | goto error_mutex_unlock; | |
271 | } | |
272 | mutex_unlock(&ev_int->event_list_lock); | |
273 | /* Blocking on device; waiting for something to be there */ | |
274 | ret = wait_event_interruptible(ev_int->wait, | |
275 | !list_empty(&ev_int | |
276 | ->det_events.list)); | |
277 | if (ret) | |
278 | goto error_ret; | |
279 | /* Single access device so noone else can get the data */ | |
280 | mutex_lock(&ev_int->event_list_lock); | |
281 | } | |
282 | ||
283 | el = list_first_entry(&ev_int->det_events.list, | |
284 | struct iio_detected_event_list, | |
285 | list); | |
286 | len = sizeof el->ev; | |
287 | if (copy_to_user(buf, &(el->ev), len)) { | |
288 | ret = -EFAULT; | |
289 | goto error_mutex_unlock; | |
290 | } | |
291 | list_del(&el->list); | |
292 | ev_int->current_events--; | |
293 | mutex_unlock(&ev_int->event_list_lock); | |
294 | /* | |
295 | * Possible concurency issue if an update of this event is on its way | |
75b16013 JC |
296 | * through. May lead to new event being removed whilst the reported |
297 | * event was the unescalated event. In typical use case this is not a | |
298 | * problem as userspace will say read half the buffer due to a 50% | |
299 | * full event which would make the correct 100% full incorrect anyway. | |
847ec80b | 300 | */ |
75b16013 JC |
301 | if (el->shared_pointer) { |
302 | spin_lock(&el->shared_pointer->lock); | |
847ec80b | 303 | (el->shared_pointer->ev_p) = NULL; |
75b16013 JC |
304 | spin_unlock(&el->shared_pointer->lock); |
305 | } | |
847ec80b JC |
306 | kfree(el); |
307 | ||
308 | return len; | |
309 | ||
310 | error_mutex_unlock: | |
311 | mutex_unlock(&ev_int->event_list_lock); | |
312 | error_ret: | |
313 | ||
314 | return ret; | |
315 | } | |
316 | ||
77712e5f | 317 | static int iio_event_chrdev_release(struct inode *inode, struct file *filep) |
847ec80b JC |
318 | { |
319 | struct iio_handler *hand = iio_cdev_to_handler(inode->i_cdev); | |
320 | struct iio_event_interface *ev_int = hand->private; | |
321 | struct iio_detected_event_list *el, *t; | |
322 | ||
323 | mutex_lock(&ev_int->event_list_lock); | |
324 | clear_bit(IIO_BUSY_BIT_POS, &ev_int->handler.flags); | |
325 | /* | |
326 | * In order to maintain a clean state for reopening, | |
327 | * clear out any awaiting events. The mask will prevent | |
328 | * any new __iio_push_event calls running. | |
329 | */ | |
330 | list_for_each_entry_safe(el, t, &ev_int->det_events.list, list) { | |
331 | list_del(&el->list); | |
332 | kfree(el); | |
333 | } | |
334 | mutex_unlock(&ev_int->event_list_lock); | |
335 | ||
336 | return 0; | |
337 | } | |
338 | ||
77712e5f | 339 | static int iio_event_chrdev_open(struct inode *inode, struct file *filep) |
847ec80b JC |
340 | { |
341 | struct iio_handler *hand = iio_cdev_to_handler(inode->i_cdev); | |
342 | struct iio_event_interface *ev_int = hand->private; | |
343 | ||
344 | mutex_lock(&ev_int->event_list_lock); | |
345 | if (test_and_set_bit(IIO_BUSY_BIT_POS, &hand->flags)) { | |
346 | fops_put(filep->f_op); | |
347 | mutex_unlock(&ev_int->event_list_lock); | |
348 | return -EBUSY; | |
349 | } | |
350 | filep->private_data = hand->private; | |
351 | mutex_unlock(&ev_int->event_list_lock); | |
352 | ||
353 | return 0; | |
354 | } | |
355 | ||
356 | static const struct file_operations iio_event_chrdev_fileops = { | |
357 | .read = iio_event_chrdev_read, | |
358 | .release = iio_event_chrdev_release, | |
359 | .open = iio_event_chrdev_open, | |
360 | .owner = THIS_MODULE, | |
361 | }; | |
362 | ||
363 | static void iio_event_dev_release(struct device *dev) | |
364 | { | |
365 | struct iio_event_interface *ev_int | |
366 | = container_of(dev, struct iio_event_interface, dev); | |
367 | cdev_del(&ev_int->handler.chrdev); | |
368 | iio_device_free_chrdev_minor(MINOR(dev->devt)); | |
369 | }; | |
370 | ||
371 | static struct device_type iio_event_type = { | |
372 | .release = iio_event_dev_release, | |
373 | }; | |
374 | ||
375 | int iio_device_get_chrdev_minor(void) | |
376 | { | |
377 | int ret, val; | |
378 | ||
379 | idr_again: | |
380 | if (unlikely(idr_pre_get(&iio_chrdev_idr, GFP_KERNEL) == 0)) | |
381 | return -ENOMEM; | |
382 | spin_lock(&iio_idr_lock); | |
383 | ret = idr_get_new(&iio_chrdev_idr, NULL, &val); | |
384 | spin_unlock(&iio_idr_lock); | |
385 | if (unlikely(ret == -EAGAIN)) | |
386 | goto idr_again; | |
387 | else if (unlikely(ret)) | |
388 | return ret; | |
389 | if (val > IIO_DEV_MAX) | |
390 | return -ENOMEM; | |
391 | return val; | |
392 | } | |
393 | ||
394 | void iio_device_free_chrdev_minor(int val) | |
395 | { | |
396 | spin_lock(&iio_idr_lock); | |
397 | idr_remove(&iio_chrdev_idr, val); | |
398 | spin_unlock(&iio_idr_lock); | |
399 | } | |
400 | ||
401 | int iio_setup_ev_int(struct iio_event_interface *ev_int, | |
402 | const char *name, | |
403 | struct module *owner, | |
404 | struct device *dev) | |
405 | { | |
406 | int ret, minor; | |
407 | ||
408 | ev_int->dev.class = &iio_class; | |
409 | ev_int->dev.parent = dev; | |
410 | ev_int->dev.type = &iio_event_type; | |
411 | device_initialize(&ev_int->dev); | |
412 | ||
413 | minor = iio_device_get_chrdev_minor(); | |
414 | if (minor < 0) { | |
415 | ret = minor; | |
416 | goto error_device_put; | |
417 | } | |
418 | ev_int->dev.devt = MKDEV(MAJOR(iio_devt), minor); | |
419 | dev_set_name(&ev_int->dev, "%s", name); | |
420 | ||
421 | ret = device_add(&ev_int->dev); | |
422 | if (ret) | |
423 | goto error_free_minor; | |
424 | ||
425 | cdev_init(&ev_int->handler.chrdev, &iio_event_chrdev_fileops); | |
426 | ev_int->handler.chrdev.owner = owner; | |
427 | ||
428 | mutex_init(&ev_int->event_list_lock); | |
429 | /* discussion point - make this variable? */ | |
430 | ev_int->max_events = 10; | |
431 | ev_int->current_events = 0; | |
432 | INIT_LIST_HEAD(&ev_int->det_events.list); | |
433 | init_waitqueue_head(&ev_int->wait); | |
434 | ev_int->handler.private = ev_int; | |
435 | ev_int->handler.flags = 0; | |
436 | ||
437 | ret = cdev_add(&ev_int->handler.chrdev, ev_int->dev.devt, 1); | |
438 | if (ret) | |
439 | goto error_unreg_device; | |
440 | ||
441 | return 0; | |
442 | ||
443 | error_unreg_device: | |
444 | device_unregister(&ev_int->dev); | |
445 | error_free_minor: | |
446 | iio_device_free_chrdev_minor(minor); | |
447 | error_device_put: | |
448 | put_device(&ev_int->dev); | |
449 | ||
450 | return ret; | |
451 | } | |
452 | ||
453 | void iio_free_ev_int(struct iio_event_interface *ev_int) | |
454 | { | |
455 | device_unregister(&ev_int->dev); | |
456 | put_device(&ev_int->dev); | |
457 | } | |
458 | ||
459 | static int __init iio_dev_init(void) | |
460 | { | |
461 | int err; | |
462 | ||
463 | err = alloc_chrdev_region(&iio_devt, 0, IIO_DEV_MAX, "iio"); | |
464 | if (err < 0) | |
465 | printk(KERN_ERR "%s: failed to allocate char dev region\n", | |
466 | __FILE__); | |
467 | ||
468 | return err; | |
469 | } | |
470 | ||
471 | static void __exit iio_dev_exit(void) | |
472 | { | |
473 | if (iio_devt) | |
474 | unregister_chrdev_region(iio_devt, IIO_DEV_MAX); | |
475 | } | |
476 | ||
477 | static int __init iio_init(void) | |
478 | { | |
479 | int ret; | |
480 | ||
481 | /* Create sysfs class */ | |
482 | ret = class_register(&iio_class); | |
483 | if (ret < 0) { | |
484 | printk(KERN_ERR | |
485 | "%s could not create sysfs class\n", | |
486 | __FILE__); | |
487 | goto error_nothing; | |
488 | } | |
489 | ||
490 | ret = iio_dev_init(); | |
491 | if (ret < 0) | |
492 | goto error_unregister_class; | |
493 | ||
494 | return 0; | |
495 | ||
496 | error_unregister_class: | |
497 | class_unregister(&iio_class); | |
498 | error_nothing: | |
499 | return ret; | |
500 | } | |
501 | ||
502 | static void __exit iio_exit(void) | |
503 | { | |
504 | iio_dev_exit(); | |
505 | class_unregister(&iio_class); | |
506 | } | |
507 | ||
508 | static int iio_device_register_sysfs(struct iio_dev *dev_info) | |
509 | { | |
510 | int ret = 0; | |
511 | ||
512 | ret = sysfs_create_group(&dev_info->dev.kobj, dev_info->attrs); | |
513 | if (ret) { | |
514 | dev_err(dev_info->dev.parent, | |
515 | "Failed to register sysfs hooks\n"); | |
516 | goto error_ret; | |
517 | } | |
518 | ||
519 | if (dev_info->scan_el_attrs) { | |
520 | ret = sysfs_create_group(&dev_info->dev.kobj, | |
521 | dev_info->scan_el_attrs); | |
522 | if (ret) | |
523 | dev_err(&dev_info->dev, | |
524 | "Failed to add sysfs scan els\n"); | |
525 | } | |
526 | ||
527 | error_ret: | |
528 | return ret; | |
529 | } | |
530 | ||
531 | static void iio_device_unregister_sysfs(struct iio_dev *dev_info) | |
532 | { | |
533 | if (dev_info->scan_el_attrs) | |
534 | sysfs_remove_group(&dev_info->dev.kobj, | |
535 | dev_info->scan_el_attrs); | |
536 | ||
537 | sysfs_remove_group(&dev_info->dev.kobj, dev_info->attrs); | |
538 | } | |
539 | ||
540 | int iio_get_new_idr_val(struct idr *this_idr) | |
541 | { | |
542 | int ret; | |
543 | int val; | |
544 | ||
545 | idr_again: | |
546 | if (unlikely(idr_pre_get(this_idr, GFP_KERNEL) == 0)) | |
547 | return -ENOMEM; | |
548 | ||
549 | spin_lock(&iio_idr_lock); | |
550 | ret = idr_get_new(this_idr, NULL, &val); | |
551 | spin_unlock(&iio_idr_lock); | |
552 | if (unlikely(ret == -EAGAIN)) | |
553 | goto idr_again; | |
554 | else if (unlikely(ret)) | |
555 | return ret; | |
556 | ||
557 | return val; | |
558 | } | |
559 | EXPORT_SYMBOL(iio_get_new_idr_val); | |
560 | ||
561 | void iio_free_idr_val(struct idr *this_idr, int id) | |
562 | { | |
563 | spin_lock(&iio_idr_lock); | |
564 | idr_remove(this_idr, id); | |
565 | spin_unlock(&iio_idr_lock); | |
566 | } | |
567 | EXPORT_SYMBOL(iio_free_idr_val); | |
568 | ||
569 | static int iio_device_register_id(struct iio_dev *dev_info, | |
570 | struct idr *this_idr) | |
571 | { | |
572 | ||
573 | dev_info->id = iio_get_new_idr_val(&iio_idr); | |
574 | if (dev_info->id < 0) | |
575 | return dev_info->id; | |
576 | return 0; | |
577 | } | |
578 | ||
579 | static void iio_device_unregister_id(struct iio_dev *dev_info) | |
580 | { | |
581 | iio_free_idr_val(&iio_idr, dev_info->id); | |
582 | } | |
583 | ||
584 | static inline int __iio_add_event_config_attrs(struct iio_dev *dev_info, int i) | |
585 | { | |
586 | int ret; | |
587 | /*p for adding, q for removing */ | |
588 | struct attribute **attrp, **attrq; | |
589 | ||
590 | if (dev_info->event_conf_attrs && dev_info->event_conf_attrs[i].attrs) { | |
591 | attrp = dev_info->event_conf_attrs[i].attrs; | |
592 | while (*attrp) { | |
593 | ret = sysfs_add_file_to_group(&dev_info->dev.kobj, | |
594 | *attrp, | |
595 | dev_info | |
596 | ->event_attrs[i].name); | |
597 | if (ret) | |
598 | goto error_ret; | |
599 | attrp++; | |
600 | } | |
601 | } | |
602 | return 0; | |
603 | ||
604 | error_ret: | |
605 | attrq = dev_info->event_conf_attrs[i].attrs; | |
606 | while (attrq != attrp) { | |
607 | sysfs_remove_file_from_group(&dev_info->dev.kobj, | |
608 | *attrq, | |
609 | dev_info->event_attrs[i].name); | |
610 | attrq++; | |
611 | } | |
612 | ||
613 | return ret; | |
614 | } | |
615 | ||
616 | static inline int __iio_remove_event_config_attrs(struct iio_dev *dev_info, | |
617 | int i) | |
618 | { | |
619 | struct attribute **attrq; | |
620 | ||
621 | if (dev_info->event_conf_attrs | |
622 | && dev_info->event_conf_attrs[i].attrs) { | |
623 | attrq = dev_info->event_conf_attrs[i].attrs; | |
624 | while (*attrq) { | |
625 | sysfs_remove_file_from_group(&dev_info->dev.kobj, | |
626 | *attrq, | |
627 | dev_info | |
628 | ->event_attrs[i].name); | |
629 | attrq++; | |
630 | } | |
631 | } | |
632 | ||
633 | return 0; | |
634 | } | |
635 | ||
636 | static int iio_device_register_eventset(struct iio_dev *dev_info) | |
637 | { | |
638 | int ret = 0, i, j; | |
639 | ||
640 | if (dev_info->num_interrupt_lines == 0) | |
641 | return 0; | |
642 | ||
643 | dev_info->event_interfaces = | |
644 | kzalloc(sizeof(struct iio_event_interface) | |
645 | *dev_info->num_interrupt_lines, | |
646 | GFP_KERNEL); | |
647 | if (dev_info->event_interfaces == NULL) { | |
648 | ret = -ENOMEM; | |
649 | goto error_ret; | |
650 | } | |
651 | ||
652 | dev_info->interrupts = kzalloc(sizeof(struct iio_interrupt *) | |
653 | *dev_info->num_interrupt_lines, | |
654 | GFP_KERNEL); | |
655 | if (dev_info->interrupts == NULL) { | |
656 | ret = -ENOMEM; | |
657 | goto error_free_event_interfaces; | |
658 | } | |
659 | ||
660 | for (i = 0; i < dev_info->num_interrupt_lines; i++) { | |
661 | dev_info->event_interfaces[i].owner = dev_info->driver_module; | |
662 | ret = iio_get_new_idr_val(&iio_event_idr); | |
663 | if (ret) | |
664 | goto error_free_setup_ev_ints; | |
665 | else | |
666 | dev_info->event_interfaces[i].id = ret; | |
667 | ||
668 | snprintf(dev_info->event_interfaces[i]._name, 20, | |
669 | "event_line%d", | |
670 | dev_info->event_interfaces[i].id); | |
671 | ||
672 | ret = iio_setup_ev_int(&dev_info->event_interfaces[i], | |
673 | (const char *)(dev_info | |
674 | ->event_interfaces[i] | |
675 | ._name), | |
676 | dev_info->driver_module, | |
677 | &dev_info->dev); | |
678 | if (ret) { | |
679 | dev_err(&dev_info->dev, | |
680 | "Could not get chrdev interface\n"); | |
681 | iio_free_idr_val(&iio_event_idr, | |
682 | dev_info->event_interfaces[i].id); | |
683 | goto error_free_setup_ev_ints; | |
684 | } | |
685 | } | |
686 | ||
687 | for (i = 0; i < dev_info->num_interrupt_lines; i++) { | |
688 | snprintf(dev_info->event_interfaces[i]._attrname, 20, | |
689 | "event_line%d_sources", i); | |
690 | dev_info->event_attrs[i].name | |
691 | = (const char *) | |
692 | (dev_info->event_interfaces[i]._attrname); | |
693 | ret = sysfs_create_group(&dev_info->dev.kobj, | |
694 | &dev_info->event_attrs[i]); | |
695 | if (ret) { | |
696 | dev_err(&dev_info->dev, | |
697 | "Failed to register sysfs for event attrs"); | |
698 | goto error_remove_sysfs_interfaces; | |
699 | } | |
700 | } | |
701 | ||
702 | for (i = 0; i < dev_info->num_interrupt_lines; i++) { | |
703 | ret = __iio_add_event_config_attrs(dev_info, i); | |
704 | if (ret) | |
705 | goto error_unregister_config_attrs; | |
706 | } | |
707 | ||
708 | return 0; | |
709 | ||
710 | error_unregister_config_attrs: | |
711 | for (j = 0; j < i; j++) | |
712 | __iio_remove_event_config_attrs(dev_info, i); | |
713 | i = dev_info->num_interrupt_lines - 1; | |
714 | error_remove_sysfs_interfaces: | |
715 | for (j = 0; j < i; j++) | |
716 | sysfs_remove_group(&dev_info->dev.kobj, | |
717 | &dev_info->event_attrs[j]); | |
718 | i = dev_info->num_interrupt_lines - 1; | |
719 | error_free_setup_ev_ints: | |
720 | for (j = 0; j < i; j++) { | |
721 | iio_free_idr_val(&iio_event_idr, | |
722 | dev_info->event_interfaces[i].id); | |
723 | iio_free_ev_int(&dev_info->event_interfaces[j]); | |
724 | } | |
725 | kfree(dev_info->interrupts); | |
726 | error_free_event_interfaces: | |
727 | kfree(dev_info->event_interfaces); | |
728 | error_ret: | |
729 | ||
730 | return ret; | |
731 | } | |
732 | ||
733 | static void iio_device_unregister_eventset(struct iio_dev *dev_info) | |
734 | { | |
735 | int i; | |
736 | ||
737 | if (dev_info->num_interrupt_lines == 0) | |
738 | return; | |
739 | for (i = 0; i < dev_info->num_interrupt_lines; i++) | |
740 | sysfs_remove_group(&dev_info->dev.kobj, | |
741 | &dev_info->event_attrs[i]); | |
742 | ||
743 | for (i = 0; i < dev_info->num_interrupt_lines; i++) { | |
744 | iio_free_idr_val(&iio_event_idr, | |
745 | dev_info->event_interfaces[i].id); | |
746 | iio_free_ev_int(&dev_info->event_interfaces[i]); | |
747 | } | |
748 | kfree(dev_info->interrupts); | |
749 | kfree(dev_info->event_interfaces); | |
750 | } | |
751 | ||
752 | static void iio_dev_release(struct device *device) | |
753 | { | |
754 | struct iio_dev *dev = to_iio_dev(device); | |
755 | ||
756 | iio_put(); | |
757 | kfree(dev); | |
758 | } | |
759 | ||
760 | static struct device_type iio_dev_type = { | |
761 | .name = "iio_device", | |
762 | .release = iio_dev_release, | |
763 | }; | |
764 | ||
765 | struct iio_dev *iio_allocate_device(void) | |
766 | { | |
767 | struct iio_dev *dev = kzalloc(sizeof *dev, GFP_KERNEL); | |
768 | ||
769 | if (dev) { | |
770 | dev->dev.type = &iio_dev_type; | |
771 | dev->dev.class = &iio_class; | |
772 | device_initialize(&dev->dev); | |
773 | dev_set_drvdata(&dev->dev, (void *)dev); | |
774 | mutex_init(&dev->mlock); | |
775 | iio_get(); | |
776 | } | |
777 | ||
778 | return dev; | |
779 | } | |
780 | EXPORT_SYMBOL(iio_allocate_device); | |
781 | ||
782 | void iio_free_device(struct iio_dev *dev) | |
783 | { | |
784 | if (dev) | |
785 | iio_put_device(dev); | |
786 | } | |
787 | EXPORT_SYMBOL(iio_free_device); | |
788 | ||
789 | int iio_device_register(struct iio_dev *dev_info) | |
790 | { | |
791 | int ret; | |
792 | ||
793 | ret = iio_device_register_id(dev_info, &iio_idr); | |
794 | if (ret) { | |
795 | dev_err(&dev_info->dev, "Failed to get id\n"); | |
796 | goto error_ret; | |
797 | } | |
798 | dev_set_name(&dev_info->dev, "device%d", dev_info->id); | |
799 | ||
800 | ret = device_add(&dev_info->dev); | |
801 | if (ret) | |
802 | goto error_free_idr; | |
803 | ret = iio_device_register_sysfs(dev_info); | |
804 | if (ret) { | |
805 | dev_err(dev_info->dev.parent, | |
806 | "Failed to register sysfs interfaces\n"); | |
807 | goto error_del_device; | |
808 | } | |
809 | ret = iio_device_register_eventset(dev_info); | |
810 | if (ret) { | |
811 | dev_err(dev_info->dev.parent, | |
812 | "Failed to register event set \n"); | |
813 | goto error_free_sysfs; | |
814 | } | |
815 | if (dev_info->modes & INDIO_RING_TRIGGERED) | |
816 | iio_device_register_trigger_consumer(dev_info); | |
817 | ||
818 | return 0; | |
819 | ||
820 | error_free_sysfs: | |
821 | iio_device_unregister_sysfs(dev_info); | |
822 | error_del_device: | |
823 | device_del(&dev_info->dev); | |
824 | error_free_idr: | |
825 | iio_device_unregister_id(dev_info); | |
826 | error_ret: | |
827 | return ret; | |
828 | } | |
829 | EXPORT_SYMBOL(iio_device_register); | |
830 | ||
831 | void iio_device_unregister(struct iio_dev *dev_info) | |
832 | { | |
833 | if (dev_info->modes & INDIO_RING_TRIGGERED) | |
834 | iio_device_unregister_trigger_consumer(dev_info); | |
835 | iio_device_unregister_eventset(dev_info); | |
836 | iio_device_unregister_sysfs(dev_info); | |
837 | iio_device_unregister_id(dev_info); | |
838 | device_unregister(&dev_info->dev); | |
839 | } | |
840 | EXPORT_SYMBOL(iio_device_unregister); | |
841 | ||
842 | void iio_put(void) | |
843 | { | |
844 | module_put(THIS_MODULE); | |
845 | } | |
846 | ||
847 | void iio_get(void) | |
848 | { | |
849 | __module_get(THIS_MODULE); | |
850 | } | |
851 | ||
852 | subsys_initcall(iio_init); | |
853 | module_exit(iio_exit); | |
854 | ||
855 | MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>"); | |
856 | MODULE_DESCRIPTION("Industrial I/O core"); | |
857 | MODULE_LICENSE("GPL"); |