2 * cdev.c - Application interfacing module for character devices
4 * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * This file is licensed under GPLv2.
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
16 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/device.h>
20 #include <linux/cdev.h>
21 #include <linux/poll.h>
22 #include <linux/kfifo.h>
23 #include <linux/uaccess.h>
24 #include <linux/idr.h>
27 static dev_t aim_devno;
28 static struct class *aim_class;
29 static struct ida minor_id;
30 static unsigned int major;
31 static struct most_aim cdev_aim;
37 struct mutex io_mutex;
38 struct most_interface *iface;
39 struct most_channel_config *cfg;
40 unsigned int channel_id;
43 struct mbo *stacked_mbo;
44 DECLARE_KFIFO_PTR(fifo, typeof(struct mbo *));
46 struct list_head list;
49 #define to_channel(d) container_of(d, struct aim_channel, cdev)
50 static struct list_head channel_list;
51 static spinlock_t ch_list_lock;
53 static struct aim_channel *get_channel(struct most_interface *iface, int id)
55 struct aim_channel *channel, *tmp;
57 int found_channel = 0;
59 spin_lock_irqsave(&ch_list_lock, flags);
60 list_for_each_entry_safe(channel, tmp, &channel_list, list) {
61 if ((channel->iface == iface) && (channel->channel_id == id)) {
66 spin_unlock_irqrestore(&ch_list_lock, flags);
73 * aim_open - implements the syscall to open the device
74 * @inode: inode pointer
77 * This stores the channel pointer in the private data field of
78 * the file structure and activates the channel within the core.
80 static int aim_open(struct inode *inode, struct file *filp)
82 struct aim_channel *channel;
85 channel = to_channel(inode->i_cdev);
86 filp->private_data = channel;
88 if (((channel->cfg->direction == MOST_CH_RX) &&
89 ((filp->f_flags & O_ACCMODE) != O_RDONLY)) ||
90 ((channel->cfg->direction == MOST_CH_TX) &&
91 ((filp->f_flags & O_ACCMODE) != O_WRONLY))) {
92 pr_info("WARN: Access flags mismatch\n");
95 if (!atomic_inc_and_test(&channel->access_ref)) {
96 pr_info("WARN: Device is busy\n");
97 atomic_dec(&channel->access_ref);
101 ret = most_start_channel(channel->iface, channel->channel_id,
104 atomic_dec(&channel->access_ref);
109 * aim_close - implements the syscall to close the device
110 * @inode: inode pointer
111 * @filp: file pointer
113 * This stops the channel within the core.
115 static int aim_close(struct inode *inode, struct file *filp)
119 struct aim_channel *channel = to_channel(inode->i_cdev);
121 mutex_lock(&channel->io_mutex);
123 mutex_unlock(&channel->io_mutex);
124 atomic_dec(&channel->access_ref);
125 device_destroy(aim_class, channel->devno);
126 cdev_del(&channel->cdev);
127 kfifo_free(&channel->fifo);
128 list_del(&channel->list);
129 ida_simple_remove(&minor_id, MINOR(channel->devno));
130 wake_up_interruptible(&channel->wq);
134 mutex_unlock(&channel->io_mutex);
136 while (kfifo_out((struct kfifo *)&channel->fifo, &mbo, 1))
138 if (channel->stacked_mbo)
139 most_put_mbo(channel->stacked_mbo);
140 ret = most_stop_channel(channel->iface, channel->channel_id, &cdev_aim);
141 atomic_dec(&channel->access_ref);
142 wake_up_interruptible(&channel->wq);
147 * aim_write - implements the syscall to write to the device
148 * @filp: file pointer
149 * @buf: pointer to user buffer
150 * @count: number of bytes to write
151 * @offset: offset from where to start writing
153 static ssize_t aim_write(struct file *filp, const char __user *buf,
154 size_t count, loff_t *offset)
157 size_t actual_len = 0;
161 struct aim_channel *channel = filp->private_data;
163 mutex_lock(&channel->io_mutex);
164 if (unlikely(!channel->dev)) {
165 mutex_unlock(&channel->io_mutex);
168 mutex_unlock(&channel->io_mutex);
170 mbo = most_get_mbo(channel->iface, channel->channel_id, &cdev_aim);
173 if ((filp->f_flags & O_NONBLOCK))
175 if (wait_event_interruptible(
177 (mbo = most_get_mbo(channel->iface,
184 mutex_lock(&channel->io_mutex);
185 if (unlikely(!channel->dev)) {
186 mutex_unlock(&channel->io_mutex);
190 mutex_unlock(&channel->io_mutex);
192 max_len = channel->cfg->buffer_size;
193 actual_len = min(count, max_len);
194 mbo->buffer_length = actual_len;
196 retval = copy_from_user(mbo->virt_address, buf, mbo->buffer_length);
202 ret = most_submit_mbo(mbo);
204 pr_info("submitting MBO to core failed\n");
208 return actual_len - retval;
215 * aim_read - implements the syscall to read from the device
216 * @filp: file pointer
217 * @buf: pointer to user buffer
218 * @count: number of bytes to read
219 * @offset: offset from where to start reading
222 aim_read(struct file *filp, char __user *buf, size_t count, loff_t *offset)
224 size_t to_copy, not_copied, copied;
226 struct aim_channel *channel = filp->private_data;
228 if (channel->stacked_mbo) {
229 mbo = channel->stacked_mbo;
232 while ((!kfifo_out(&channel->fifo, &mbo, 1)) && (channel->dev)) {
233 if (filp->f_flags & O_NONBLOCK)
235 if (wait_event_interruptible(channel->wq,
236 (!kfifo_is_empty(&channel->fifo) ||
242 /* make sure we don't submit to gone devices */
243 mutex_lock(&channel->io_mutex);
244 if (unlikely(!channel->dev)) {
245 mutex_unlock(&channel->io_mutex);
249 to_copy = min_t(size_t,
251 mbo->processed_length - channel->mbo_offs);
253 not_copied = copy_to_user(buf,
254 mbo->virt_address + channel->mbo_offs,
257 copied = to_copy - not_copied;
259 if (count < mbo->processed_length) {
260 channel->mbo_offs = copied;
261 channel->stacked_mbo = mbo;
264 channel->mbo_offs = 0;
265 channel->stacked_mbo = NULL;
267 mutex_unlock(&channel->io_mutex);
271 static inline bool __must_check IS_ERR_OR_FALSE(int x)
276 static unsigned int aim_poll(struct file *filp, poll_table *wait)
278 struct aim_channel *c = filp->private_data;
279 unsigned int mask = 0;
281 poll_wait(filp, &c->wq, wait);
283 if (c->cfg->direction == MOST_CH_RX) {
284 if (!kfifo_is_empty(&c->fifo))
285 mask |= POLLIN | POLLRDNORM;
287 if (!IS_ERR_OR_FALSE(channel_has_mbo(c->iface, c->channel_id)))
288 mask |= POLLOUT | POLLWRNORM;
294 * Initialization of struct file_operations
296 static const struct file_operations channel_fops = {
297 .owner = THIS_MODULE,
301 .release = aim_close,
306 * aim_disconnect_channel - disconnect a channel
307 * @iface: pointer to interface instance
308 * @channel_id: channel index
310 * This frees allocated memory and removes the cdev that represents this
311 * channel in user space.
313 static int aim_disconnect_channel(struct most_interface *iface, int channel_id)
315 struct aim_channel *channel;
319 pr_info("Bad interface pointer\n");
323 channel = get_channel(iface, channel_id);
327 mutex_lock(&channel->io_mutex);
329 mutex_unlock(&channel->io_mutex);
331 if (atomic_read(&channel->access_ref)) {
332 device_destroy(aim_class, channel->devno);
333 cdev_del(&channel->cdev);
334 kfifo_free(&channel->fifo);
335 ida_simple_remove(&minor_id, MINOR(channel->devno));
336 spin_lock_irqsave(&ch_list_lock, flags);
337 list_del(&channel->list);
338 spin_unlock_irqrestore(&ch_list_lock, flags);
341 wake_up_interruptible(&channel->wq);
347 * aim_rx_completion - completion handler for rx channels
348 * @mbo: pointer to buffer object that has completed
350 * This searches for the channel linked to this MBO and stores it in the local
353 static int aim_rx_completion(struct mbo *mbo)
355 struct aim_channel *channel;
360 channel = get_channel(mbo->ifp, mbo->hdm_channel_id);
364 kfifo_in(&channel->fifo, &mbo, 1);
366 if (kfifo_is_full(&channel->fifo))
367 pr_info("WARN: Fifo is full\n");
369 wake_up_interruptible(&channel->wq);
374 * aim_tx_completion - completion handler for tx channels
375 * @iface: pointer to interface instance
376 * @channel_id: channel index/ID
378 * This wakes sleeping processes in the wait-queue.
380 static int aim_tx_completion(struct most_interface *iface, int channel_id)
382 struct aim_channel *channel;
385 pr_info("Bad interface pointer\n");
388 if ((channel_id < 0) || (channel_id >= iface->num_channels)) {
389 pr_info("Channel ID out of range\n");
393 channel = get_channel(iface, channel_id);
396 wake_up_interruptible(&channel->wq);
400 static struct most_aim cdev_aim;
403 * aim_probe - probe function of the driver module
404 * @iface: pointer to interface instance
405 * @channel_id: channel index/ID
406 * @cfg: pointer to actual channel configuration
407 * @parent: pointer to kobject (needed for sysfs hook-up)
408 * @name: name of the device to be created
410 * This allocates achannel object and creates the device node in /dev
412 * Returns 0 on success or error code otherwise.
414 static int aim_probe(struct most_interface *iface, int channel_id,
415 struct most_channel_config *cfg,
416 struct kobject *parent, char *name)
418 struct aim_channel *channel;
419 unsigned long cl_flags;
423 if ((!iface) || (!cfg) || (!parent) || (!name)) {
424 pr_info("Probing AIM with bad arguments");
427 channel = get_channel(iface, channel_id);
431 current_minor = ida_simple_get(&minor_id, 0, 0, GFP_KERNEL);
432 if (current_minor < 0)
433 return current_minor;
435 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
438 goto error_alloc_channel;
441 channel->devno = MKDEV(major, current_minor);
442 cdev_init(&channel->cdev, &channel_fops);
443 channel->cdev.owner = THIS_MODULE;
444 cdev_add(&channel->cdev, channel->devno, 1);
445 channel->iface = iface;
447 channel->channel_id = channel_id;
448 channel->mbo_offs = 0;
449 atomic_set(&channel->access_ref, -1);
450 INIT_KFIFO(channel->fifo);
451 retval = kfifo_alloc(&channel->fifo, cfg->num_buffers, GFP_KERNEL);
453 pr_info("failed to alloc channel kfifo");
454 goto error_alloc_kfifo;
456 init_waitqueue_head(&channel->wq);
457 mutex_init(&channel->io_mutex);
458 spin_lock_irqsave(&ch_list_lock, cl_flags);
459 list_add_tail(&channel->list, &channel_list);
460 spin_unlock_irqrestore(&ch_list_lock, cl_flags);
461 channel->dev = device_create(aim_class,
467 retval = IS_ERR(channel->dev);
469 pr_info("failed to create new device node %s\n", name);
470 goto error_create_device;
472 kobject_uevent(&channel->dev->kobj, KOBJ_ADD);
476 kfifo_free(&channel->fifo);
477 list_del(&channel->list);
479 cdev_del(&channel->cdev);
482 ida_simple_remove(&minor_id, current_minor);
486 static struct most_aim cdev_aim = {
488 .probe_channel = aim_probe,
489 .disconnect_channel = aim_disconnect_channel,
490 .rx_completion = aim_rx_completion,
491 .tx_completion = aim_tx_completion,
494 static int __init mod_init(void)
498 INIT_LIST_HEAD(&channel_list);
499 spin_lock_init(&ch_list_lock);
502 if (alloc_chrdev_region(&aim_devno, 0, 50, "cdev") < 0)
504 major = MAJOR(aim_devno);
506 aim_class = class_create(THIS_MODULE, "most_cdev_aim");
507 if (IS_ERR(aim_class)) {
508 pr_err("no udev support\n");
512 if (most_register_aim(&cdev_aim))
517 class_destroy(aim_class);
519 unregister_chrdev_region(aim_devno, 1);
523 static void __exit mod_exit(void)
525 struct aim_channel *channel, *tmp;
527 pr_info("exit module\n");
529 most_deregister_aim(&cdev_aim);
531 list_for_each_entry_safe(channel, tmp, &channel_list, list) {
532 device_destroy(aim_class, channel->devno);
533 cdev_del(&channel->cdev);
534 kfifo_free(&channel->fifo);
535 list_del(&channel->list);
536 ida_simple_remove(&minor_id, MINOR(channel->devno));
539 class_destroy(aim_class);
540 unregister_chrdev_region(aim_devno, 1);
541 ida_destroy(&minor_id);
544 module_init(mod_init);
545 module_exit(mod_exit);
546 MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
547 MODULE_LICENSE("GPL");
548 MODULE_DESCRIPTION("character device AIM for mostcore");