Commit | Line | Data |
---|---|---|
beafc54c HK |
1 | /* |
2 | * drivers/uio/uio.c | |
3 | * | |
4 | * Copyright(C) 2005, Benedikt Spranger <b.spranger@linutronix.de> | |
5 | * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de> | |
6 | * Copyright(C) 2006, Hans J. Koch <hjk@linutronix.de> | |
7 | * Copyright(C) 2006, Greg Kroah-Hartman <greg@kroah.com> | |
8 | * | |
9 | * Userspace IO | |
10 | * | |
11 | * Base Functions | |
12 | * | |
13 | * Licensed under the GPLv2 only. | |
14 | */ | |
15 | ||
16 | #include <linux/module.h> | |
17 | #include <linux/init.h> | |
18 | #include <linux/poll.h> | |
19 | #include <linux/device.h> | |
20 | #include <linux/mm.h> | |
21 | #include <linux/idr.h> | |
22 | #include <linux/string.h> | |
23 | #include <linux/kobject.h> | |
24 | #include <linux/uio_driver.h> | |
25 | ||
26 | #define UIO_MAX_DEVICES 255 | |
27 | ||
28 | struct uio_device { | |
29 | struct module *owner; | |
30 | struct device *dev; | |
31 | int minor; | |
32 | atomic_t event; | |
33 | struct fasync_struct *async_queue; | |
34 | wait_queue_head_t wait; | |
35 | int vma_count; | |
36 | struct uio_info *info; | |
81e7c6a6 | 37 | struct kobject *map_dir; |
beafc54c HK |
38 | }; |
39 | ||
40 | static int uio_major; | |
41 | static DEFINE_IDR(uio_idr); | |
42 | static struct file_operations uio_fops; | |
43 | ||
44 | /* UIO class infrastructure */ | |
45 | static struct uio_class { | |
46 | struct kref kref; | |
47 | struct class *class; | |
48 | } *uio_class; | |
49 | ||
50 | /* | |
51 | * attributes | |
52 | */ | |
53 | ||
81e7c6a6 GKH |
54 | struct uio_map { |
55 | struct kobject kobj; | |
56 | struct uio_mem *mem; | |
beafc54c | 57 | }; |
81e7c6a6 | 58 | #define to_map(map) container_of(map, struct uio_map, kobj) |
beafc54c | 59 | |
beafc54c | 60 | |
81e7c6a6 | 61 | static ssize_t map_attr_show(struct kobject *kobj, struct kobj_attribute *attr, |
beafc54c HK |
62 | char *buf) |
63 | { | |
81e7c6a6 GKH |
64 | struct uio_map *map = to_map(kobj); |
65 | struct uio_mem *mem = map->mem; | |
beafc54c | 66 | |
81e7c6a6 | 67 | if (strncmp(attr->attr.name, "addr", 4) == 0) |
beafc54c HK |
68 | return sprintf(buf, "0x%lx\n", mem->addr); |
69 | ||
81e7c6a6 | 70 | if (strncmp(attr->attr.name, "size", 4) == 0) |
beafc54c HK |
71 | return sprintf(buf, "0x%lx\n", mem->size); |
72 | ||
73 | return -ENODEV; | |
74 | } | |
75 | ||
81e7c6a6 GKH |
76 | static struct kobj_attribute attr_attribute = |
77 | __ATTR(addr, S_IRUGO, map_attr_show, NULL); | |
78 | static struct kobj_attribute size_attribute = | |
79 | __ATTR(size, S_IRUGO, map_attr_show, NULL); | |
beafc54c | 80 | |
81e7c6a6 GKH |
81 | static struct attribute *attrs[] = { |
82 | &attr_attribute.attr, | |
83 | &size_attribute.attr, | |
84 | NULL, /* need to NULL terminate the list of attributes */ | |
beafc54c HK |
85 | }; |
86 | ||
81e7c6a6 GKH |
87 | static void map_release(struct kobject *kobj) |
88 | { | |
89 | struct uio_map *map = to_map(kobj); | |
90 | kfree(map); | |
91 | } | |
92 | ||
beafc54c | 93 | static struct kobj_type map_attr_type = { |
81e7c6a6 GKH |
94 | .release = map_release, |
95 | .default_attrs = attrs, | |
beafc54c HK |
96 | }; |
97 | ||
98 | static ssize_t show_name(struct device *dev, | |
99 | struct device_attribute *attr, char *buf) | |
100 | { | |
101 | struct uio_device *idev = dev_get_drvdata(dev); | |
102 | if (idev) | |
103 | return sprintf(buf, "%s\n", idev->info->name); | |
104 | else | |
105 | return -ENODEV; | |
106 | } | |
107 | static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); | |
108 | ||
109 | static ssize_t show_version(struct device *dev, | |
110 | struct device_attribute *attr, char *buf) | |
111 | { | |
112 | struct uio_device *idev = dev_get_drvdata(dev); | |
113 | if (idev) | |
114 | return sprintf(buf, "%s\n", idev->info->version); | |
115 | else | |
116 | return -ENODEV; | |
117 | } | |
118 | static DEVICE_ATTR(version, S_IRUGO, show_version, NULL); | |
119 | ||
120 | static ssize_t show_event(struct device *dev, | |
121 | struct device_attribute *attr, char *buf) | |
122 | { | |
123 | struct uio_device *idev = dev_get_drvdata(dev); | |
124 | if (idev) | |
125 | return sprintf(buf, "%u\n", | |
126 | (unsigned int)atomic_read(&idev->event)); | |
127 | else | |
128 | return -ENODEV; | |
129 | } | |
130 | static DEVICE_ATTR(event, S_IRUGO, show_event, NULL); | |
131 | ||
132 | static struct attribute *uio_attrs[] = { | |
133 | &dev_attr_name.attr, | |
134 | &dev_attr_version.attr, | |
135 | &dev_attr_event.attr, | |
136 | NULL, | |
137 | }; | |
138 | ||
139 | static struct attribute_group uio_attr_grp = { | |
140 | .attrs = uio_attrs, | |
141 | }; | |
142 | ||
143 | /* | |
144 | * device functions | |
145 | */ | |
146 | static int uio_dev_add_attributes(struct uio_device *idev) | |
147 | { | |
148 | int ret; | |
149 | int mi; | |
150 | int map_found = 0; | |
151 | struct uio_mem *mem; | |
81e7c6a6 | 152 | struct uio_map *map; |
beafc54c HK |
153 | |
154 | ret = sysfs_create_group(&idev->dev->kobj, &uio_attr_grp); | |
155 | if (ret) | |
156 | goto err_group; | |
157 | ||
158 | for (mi = 0; mi < MAX_UIO_MAPS; mi++) { | |
159 | mem = &idev->info->mem[mi]; | |
160 | if (mem->size == 0) | |
161 | break; | |
162 | if (!map_found) { | |
163 | map_found = 1; | |
81e7c6a6 GKH |
164 | idev->map_dir = kobject_create_and_add("maps", |
165 | &idev->dev->kobj); | |
166 | if (!idev->map_dir) | |
167 | goto err; | |
beafc54c | 168 | } |
81e7c6a6 GKH |
169 | map = kzalloc(sizeof(*map), GFP_KERNEL); |
170 | if (!map) | |
171 | goto err; | |
f9cb074b | 172 | kobject_init(&map->kobj, &map_attr_type); |
81e7c6a6 GKH |
173 | map->mem = mem; |
174 | mem->map = map; | |
b2d6db58 | 175 | ret = kobject_add(&map->kobj, idev->map_dir, "map%d", mi); |
81e7c6a6 GKH |
176 | if (ret) |
177 | goto err; | |
178 | ret = kobject_uevent(&map->kobj, KOBJ_ADD); | |
beafc54c | 179 | if (ret) |
81e7c6a6 | 180 | goto err; |
beafc54c HK |
181 | } |
182 | ||
183 | return 0; | |
184 | ||
81e7c6a6 | 185 | err: |
beafc54c HK |
186 | for (mi--; mi>=0; mi--) { |
187 | mem = &idev->info->mem[mi]; | |
81e7c6a6 | 188 | map = mem->map; |
c10997f6 | 189 | kobject_put(&map->kobj); |
beafc54c | 190 | } |
c10997f6 | 191 | kobject_put(idev->map_dir); |
beafc54c HK |
192 | sysfs_remove_group(&idev->dev->kobj, &uio_attr_grp); |
193 | err_group: | |
194 | dev_err(idev->dev, "error creating sysfs files (%d)\n", ret); | |
195 | return ret; | |
196 | } | |
197 | ||
198 | static void uio_dev_del_attributes(struct uio_device *idev) | |
199 | { | |
200 | int mi; | |
201 | struct uio_mem *mem; | |
202 | for (mi = 0; mi < MAX_UIO_MAPS; mi++) { | |
203 | mem = &idev->info->mem[mi]; | |
204 | if (mem->size == 0) | |
205 | break; | |
c10997f6 | 206 | kobject_put(&mem->map->kobj); |
beafc54c | 207 | } |
c10997f6 | 208 | kobject_put(idev->map_dir); |
beafc54c HK |
209 | sysfs_remove_group(&idev->dev->kobj, &uio_attr_grp); |
210 | } | |
211 | ||
212 | static int uio_get_minor(struct uio_device *idev) | |
213 | { | |
214 | static DEFINE_MUTEX(minor_lock); | |
215 | int retval = -ENOMEM; | |
216 | int id; | |
217 | ||
218 | mutex_lock(&minor_lock); | |
219 | if (idr_pre_get(&uio_idr, GFP_KERNEL) == 0) | |
220 | goto exit; | |
221 | ||
222 | retval = idr_get_new(&uio_idr, idev, &id); | |
223 | if (retval < 0) { | |
224 | if (retval == -EAGAIN) | |
225 | retval = -ENOMEM; | |
226 | goto exit; | |
227 | } | |
228 | idev->minor = id & MAX_ID_MASK; | |
229 | exit: | |
230 | mutex_unlock(&minor_lock); | |
231 | return retval; | |
232 | } | |
233 | ||
234 | static void uio_free_minor(struct uio_device *idev) | |
235 | { | |
236 | idr_remove(&uio_idr, idev->minor); | |
237 | } | |
238 | ||
239 | /** | |
240 | * uio_event_notify - trigger an interrupt event | |
241 | * @info: UIO device capabilities | |
242 | */ | |
243 | void uio_event_notify(struct uio_info *info) | |
244 | { | |
245 | struct uio_device *idev = info->uio_dev; | |
246 | ||
247 | atomic_inc(&idev->event); | |
248 | wake_up_interruptible(&idev->wait); | |
249 | kill_fasync(&idev->async_queue, SIGIO, POLL_IN); | |
250 | } | |
251 | EXPORT_SYMBOL_GPL(uio_event_notify); | |
252 | ||
253 | /** | |
254 | * uio_interrupt - hardware interrupt handler | |
255 | * @irq: IRQ number, can be UIO_IRQ_CYCLIC for cyclic timer | |
256 | * @dev_id: Pointer to the devices uio_device structure | |
257 | */ | |
258 | static irqreturn_t uio_interrupt(int irq, void *dev_id) | |
259 | { | |
260 | struct uio_device *idev = (struct uio_device *)dev_id; | |
261 | irqreturn_t ret = idev->info->handler(irq, idev->info); | |
262 | ||
263 | if (ret == IRQ_HANDLED) | |
264 | uio_event_notify(idev->info); | |
265 | ||
266 | return ret; | |
267 | } | |
268 | ||
269 | struct uio_listener { | |
270 | struct uio_device *dev; | |
271 | s32 event_count; | |
272 | }; | |
273 | ||
274 | static int uio_open(struct inode *inode, struct file *filep) | |
275 | { | |
276 | struct uio_device *idev; | |
277 | struct uio_listener *listener; | |
278 | int ret = 0; | |
279 | ||
280 | idev = idr_find(&uio_idr, iminor(inode)); | |
281 | if (!idev) | |
282 | return -ENODEV; | |
283 | ||
284 | listener = kmalloc(sizeof(*listener), GFP_KERNEL); | |
285 | if (!listener) | |
286 | return -ENOMEM; | |
287 | ||
288 | listener->dev = idev; | |
289 | listener->event_count = atomic_read(&idev->event); | |
290 | filep->private_data = listener; | |
291 | ||
292 | if (idev->info->open) { | |
293 | if (!try_module_get(idev->owner)) | |
294 | return -ENODEV; | |
295 | ret = idev->info->open(idev->info, inode); | |
296 | module_put(idev->owner); | |
297 | } | |
298 | ||
299 | if (ret) | |
300 | kfree(listener); | |
301 | ||
302 | return ret; | |
303 | } | |
304 | ||
305 | static int uio_fasync(int fd, struct file *filep, int on) | |
306 | { | |
307 | struct uio_listener *listener = filep->private_data; | |
308 | struct uio_device *idev = listener->dev; | |
309 | ||
310 | return fasync_helper(fd, filep, on, &idev->async_queue); | |
311 | } | |
312 | ||
313 | static int uio_release(struct inode *inode, struct file *filep) | |
314 | { | |
315 | int ret = 0; | |
316 | struct uio_listener *listener = filep->private_data; | |
317 | struct uio_device *idev = listener->dev; | |
318 | ||
319 | if (idev->info->release) { | |
320 | if (!try_module_get(idev->owner)) | |
321 | return -ENODEV; | |
322 | ret = idev->info->release(idev->info, inode); | |
323 | module_put(idev->owner); | |
324 | } | |
325 | if (filep->f_flags & FASYNC) | |
326 | ret = uio_fasync(-1, filep, 0); | |
327 | kfree(listener); | |
328 | return ret; | |
329 | } | |
330 | ||
331 | static unsigned int uio_poll(struct file *filep, poll_table *wait) | |
332 | { | |
333 | struct uio_listener *listener = filep->private_data; | |
334 | struct uio_device *idev = listener->dev; | |
335 | ||
336 | if (idev->info->irq == UIO_IRQ_NONE) | |
337 | return -EIO; | |
338 | ||
339 | poll_wait(filep, &idev->wait, wait); | |
340 | if (listener->event_count != atomic_read(&idev->event)) | |
341 | return POLLIN | POLLRDNORM; | |
342 | return 0; | |
343 | } | |
344 | ||
345 | static ssize_t uio_read(struct file *filep, char __user *buf, | |
346 | size_t count, loff_t *ppos) | |
347 | { | |
348 | struct uio_listener *listener = filep->private_data; | |
349 | struct uio_device *idev = listener->dev; | |
350 | DECLARE_WAITQUEUE(wait, current); | |
351 | ssize_t retval; | |
352 | s32 event_count; | |
353 | ||
354 | if (idev->info->irq == UIO_IRQ_NONE) | |
355 | return -EIO; | |
356 | ||
357 | if (count != sizeof(s32)) | |
358 | return -EINVAL; | |
359 | ||
360 | add_wait_queue(&idev->wait, &wait); | |
361 | ||
362 | do { | |
363 | set_current_state(TASK_INTERRUPTIBLE); | |
364 | ||
365 | event_count = atomic_read(&idev->event); | |
366 | if (event_count != listener->event_count) { | |
367 | if (copy_to_user(buf, &event_count, count)) | |
368 | retval = -EFAULT; | |
369 | else { | |
370 | listener->event_count = event_count; | |
371 | retval = count; | |
372 | } | |
373 | break; | |
374 | } | |
375 | ||
376 | if (filep->f_flags & O_NONBLOCK) { | |
377 | retval = -EAGAIN; | |
378 | break; | |
379 | } | |
380 | ||
381 | if (signal_pending(current)) { | |
382 | retval = -ERESTARTSYS; | |
383 | break; | |
384 | } | |
385 | schedule(); | |
386 | } while (1); | |
387 | ||
388 | __set_current_state(TASK_RUNNING); | |
389 | remove_wait_queue(&idev->wait, &wait); | |
390 | ||
391 | return retval; | |
392 | } | |
393 | ||
394 | static int uio_find_mem_index(struct vm_area_struct *vma) | |
395 | { | |
396 | int mi; | |
397 | struct uio_device *idev = vma->vm_private_data; | |
398 | ||
399 | for (mi = 0; mi < MAX_UIO_MAPS; mi++) { | |
400 | if (idev->info->mem[mi].size == 0) | |
401 | return -1; | |
402 | if (vma->vm_pgoff == mi) | |
403 | return mi; | |
404 | } | |
405 | return -1; | |
406 | } | |
407 | ||
408 | static void uio_vma_open(struct vm_area_struct *vma) | |
409 | { | |
410 | struct uio_device *idev = vma->vm_private_data; | |
411 | idev->vma_count++; | |
412 | } | |
413 | ||
414 | static void uio_vma_close(struct vm_area_struct *vma) | |
415 | { | |
416 | struct uio_device *idev = vma->vm_private_data; | |
417 | idev->vma_count--; | |
418 | } | |
419 | ||
420 | static struct page *uio_vma_nopage(struct vm_area_struct *vma, | |
421 | unsigned long address, int *type) | |
422 | { | |
423 | struct uio_device *idev = vma->vm_private_data; | |
424 | struct page* page = NOPAGE_SIGBUS; | |
425 | ||
426 | int mi = uio_find_mem_index(vma); | |
427 | if (mi < 0) | |
428 | return page; | |
429 | ||
430 | if (idev->info->mem[mi].memtype == UIO_MEM_LOGICAL) | |
431 | page = virt_to_page(idev->info->mem[mi].addr); | |
432 | else | |
433 | page = vmalloc_to_page((void*)idev->info->mem[mi].addr); | |
434 | get_page(page); | |
435 | if (type) | |
436 | *type = VM_FAULT_MINOR; | |
437 | return page; | |
438 | } | |
439 | ||
440 | static struct vm_operations_struct uio_vm_ops = { | |
441 | .open = uio_vma_open, | |
442 | .close = uio_vma_close, | |
443 | .nopage = uio_vma_nopage, | |
444 | }; | |
445 | ||
446 | static int uio_mmap_physical(struct vm_area_struct *vma) | |
447 | { | |
448 | struct uio_device *idev = vma->vm_private_data; | |
449 | int mi = uio_find_mem_index(vma); | |
450 | if (mi < 0) | |
451 | return -EINVAL; | |
452 | ||
453 | vma->vm_flags |= VM_IO | VM_RESERVED; | |
454 | ||
455 | return remap_pfn_range(vma, | |
456 | vma->vm_start, | |
457 | idev->info->mem[mi].addr >> PAGE_SHIFT, | |
458 | vma->vm_end - vma->vm_start, | |
459 | vma->vm_page_prot); | |
460 | } | |
461 | ||
462 | static int uio_mmap_logical(struct vm_area_struct *vma) | |
463 | { | |
464 | vma->vm_flags |= VM_RESERVED; | |
465 | vma->vm_ops = &uio_vm_ops; | |
466 | uio_vma_open(vma); | |
467 | return 0; | |
468 | } | |
469 | ||
470 | static int uio_mmap(struct file *filep, struct vm_area_struct *vma) | |
471 | { | |
472 | struct uio_listener *listener = filep->private_data; | |
473 | struct uio_device *idev = listener->dev; | |
474 | int mi; | |
475 | unsigned long requested_pages, actual_pages; | |
476 | int ret = 0; | |
477 | ||
478 | if (vma->vm_end < vma->vm_start) | |
479 | return -EINVAL; | |
480 | ||
481 | vma->vm_private_data = idev; | |
482 | ||
483 | mi = uio_find_mem_index(vma); | |
484 | if (mi < 0) | |
485 | return -EINVAL; | |
486 | ||
487 | requested_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | |
488 | actual_pages = (idev->info->mem[mi].size + PAGE_SIZE -1) >> PAGE_SHIFT; | |
489 | if (requested_pages > actual_pages) | |
490 | return -EINVAL; | |
491 | ||
492 | if (idev->info->mmap) { | |
493 | if (!try_module_get(idev->owner)) | |
494 | return -ENODEV; | |
495 | ret = idev->info->mmap(idev->info, vma); | |
496 | module_put(idev->owner); | |
497 | return ret; | |
498 | } | |
499 | ||
500 | switch (idev->info->mem[mi].memtype) { | |
501 | case UIO_MEM_PHYS: | |
502 | return uio_mmap_physical(vma); | |
503 | case UIO_MEM_LOGICAL: | |
504 | case UIO_MEM_VIRTUAL: | |
505 | return uio_mmap_logical(vma); | |
506 | default: | |
507 | return -EINVAL; | |
508 | } | |
509 | } | |
510 | ||
511 | static struct file_operations uio_fops = { | |
512 | .owner = THIS_MODULE, | |
513 | .open = uio_open, | |
514 | .release = uio_release, | |
515 | .read = uio_read, | |
516 | .mmap = uio_mmap, | |
517 | .poll = uio_poll, | |
518 | .fasync = uio_fasync, | |
519 | }; | |
520 | ||
521 | static int uio_major_init(void) | |
522 | { | |
523 | uio_major = register_chrdev(0, "uio", &uio_fops); | |
524 | if (uio_major < 0) | |
525 | return uio_major; | |
526 | return 0; | |
527 | } | |
528 | ||
529 | static void uio_major_cleanup(void) | |
530 | { | |
531 | unregister_chrdev(uio_major, "uio"); | |
532 | } | |
533 | ||
534 | static int init_uio_class(void) | |
535 | { | |
536 | int ret = 0; | |
537 | ||
538 | if (uio_class != NULL) { | |
539 | kref_get(&uio_class->kref); | |
540 | goto exit; | |
541 | } | |
542 | ||
543 | /* This is the first time in here, set everything up properly */ | |
544 | ret = uio_major_init(); | |
545 | if (ret) | |
546 | goto exit; | |
547 | ||
548 | uio_class = kzalloc(sizeof(*uio_class), GFP_KERNEL); | |
549 | if (!uio_class) { | |
550 | ret = -ENOMEM; | |
551 | goto err_kzalloc; | |
552 | } | |
553 | ||
554 | kref_init(&uio_class->kref); | |
555 | uio_class->class = class_create(THIS_MODULE, "uio"); | |
556 | if (IS_ERR(uio_class->class)) { | |
557 | ret = IS_ERR(uio_class->class); | |
558 | printk(KERN_ERR "class_create failed for uio\n"); | |
559 | goto err_class_create; | |
560 | } | |
561 | return 0; | |
562 | ||
563 | err_class_create: | |
564 | kfree(uio_class); | |
565 | uio_class = NULL; | |
566 | err_kzalloc: | |
567 | uio_major_cleanup(); | |
568 | exit: | |
569 | return ret; | |
570 | } | |
571 | ||
572 | static void release_uio_class(struct kref *kref) | |
573 | { | |
574 | /* Ok, we cheat as we know we only have one uio_class */ | |
575 | class_destroy(uio_class->class); | |
576 | kfree(uio_class); | |
577 | uio_major_cleanup(); | |
578 | uio_class = NULL; | |
579 | } | |
580 | ||
581 | static void uio_class_destroy(void) | |
582 | { | |
583 | if (uio_class) | |
584 | kref_put(&uio_class->kref, release_uio_class); | |
585 | } | |
586 | ||
587 | /** | |
588 | * uio_register_device - register a new userspace IO device | |
589 | * @owner: module that creates the new device | |
590 | * @parent: parent device | |
591 | * @info: UIO device capabilities | |
592 | * | |
593 | * returns zero on success or a negative error code. | |
594 | */ | |
595 | int __uio_register_device(struct module *owner, | |
596 | struct device *parent, | |
597 | struct uio_info *info) | |
598 | { | |
599 | struct uio_device *idev; | |
600 | int ret = 0; | |
601 | ||
602 | if (!parent || !info || !info->name || !info->version) | |
603 | return -EINVAL; | |
604 | ||
605 | info->uio_dev = NULL; | |
606 | ||
607 | ret = init_uio_class(); | |
608 | if (ret) | |
609 | return ret; | |
610 | ||
611 | idev = kzalloc(sizeof(*idev), GFP_KERNEL); | |
612 | if (!idev) { | |
613 | ret = -ENOMEM; | |
614 | goto err_kzalloc; | |
615 | } | |
616 | ||
617 | idev->owner = owner; | |
618 | idev->info = info; | |
619 | init_waitqueue_head(&idev->wait); | |
620 | atomic_set(&idev->event, 0); | |
621 | ||
622 | ret = uio_get_minor(idev); | |
623 | if (ret) | |
624 | goto err_get_minor; | |
625 | ||
626 | idev->dev = device_create(uio_class->class, parent, | |
627 | MKDEV(uio_major, idev->minor), | |
628 | "uio%d", idev->minor); | |
629 | if (IS_ERR(idev->dev)) { | |
630 | printk(KERN_ERR "UIO: device register failed\n"); | |
631 | ret = PTR_ERR(idev->dev); | |
632 | goto err_device_create; | |
633 | } | |
634 | dev_set_drvdata(idev->dev, idev); | |
635 | ||
636 | ret = uio_dev_add_attributes(idev); | |
637 | if (ret) | |
638 | goto err_uio_dev_add_attributes; | |
639 | ||
640 | info->uio_dev = idev; | |
641 | ||
642 | if (idev->info->irq >= 0) { | |
643 | ret = request_irq(idev->info->irq, uio_interrupt, | |
644 | idev->info->irq_flags, idev->info->name, idev); | |
645 | if (ret) | |
646 | goto err_request_irq; | |
647 | } | |
648 | ||
649 | return 0; | |
650 | ||
651 | err_request_irq: | |
652 | uio_dev_del_attributes(idev); | |
653 | err_uio_dev_add_attributes: | |
654 | device_destroy(uio_class->class, MKDEV(uio_major, idev->minor)); | |
655 | err_device_create: | |
656 | uio_free_minor(idev); | |
657 | err_get_minor: | |
658 | kfree(idev); | |
659 | err_kzalloc: | |
660 | uio_class_destroy(); | |
661 | return ret; | |
662 | } | |
663 | EXPORT_SYMBOL_GPL(__uio_register_device); | |
664 | ||
665 | /** | |
666 | * uio_unregister_device - unregister a industrial IO device | |
667 | * @info: UIO device capabilities | |
668 | * | |
669 | */ | |
670 | void uio_unregister_device(struct uio_info *info) | |
671 | { | |
672 | struct uio_device *idev; | |
673 | ||
674 | if (!info || !info->uio_dev) | |
675 | return; | |
676 | ||
677 | idev = info->uio_dev; | |
678 | ||
679 | uio_free_minor(idev); | |
680 | ||
681 | if (info->irq >= 0) | |
682 | free_irq(info->irq, idev); | |
683 | ||
684 | uio_dev_del_attributes(idev); | |
685 | ||
686 | dev_set_drvdata(idev->dev, NULL); | |
687 | device_destroy(uio_class->class, MKDEV(uio_major, idev->minor)); | |
688 | kfree(idev); | |
689 | uio_class_destroy(); | |
690 | ||
691 | return; | |
692 | } | |
693 | EXPORT_SYMBOL_GPL(uio_unregister_device); | |
694 | ||
695 | static int __init uio_init(void) | |
696 | { | |
697 | return 0; | |
698 | } | |
699 | ||
700 | static void __exit uio_exit(void) | |
701 | { | |
702 | } | |
703 | ||
704 | module_init(uio_init) | |
705 | module_exit(uio_exit) | |
706 | MODULE_LICENSE("GPL v2"); |