1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2006-2009 Nokia Corporation. All rights reserved.
6 * Copyright (C) 2013-2021 Texas Instruments Incorporated - https://www.ti.com
8 * Contact: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
9 * Suman Anna <s-anna@ti.com>
12 #include <linux/interrupt.h>
13 #include <linux/spinlock.h>
14 #include <linux/mutex.h>
15 #include <linux/slab.h>
16 #include <linux/kfifo.h>
17 #include <linux/err.h>
18 #include <linux/module.h>
20 #include <linux/platform_device.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/omap-mailbox.h>
23 #include <linux/mailbox_controller.h>
24 #include <linux/mailbox_client.h>
28 #define MAILBOX_REVISION 0x000
29 #define MAILBOX_MESSAGE(m) (0x040 + 4 * (m))
30 #define MAILBOX_FIFOSTATUS(m) (0x080 + 4 * (m))
31 #define MAILBOX_MSGSTATUS(m) (0x0c0 + 4 * (m))
33 #define OMAP2_MAILBOX_IRQSTATUS(u) (0x100 + 8 * (u))
34 #define OMAP2_MAILBOX_IRQENABLE(u) (0x104 + 8 * (u))
36 #define OMAP4_MAILBOX_IRQSTATUS(u) (0x104 + 0x10 * (u))
37 #define OMAP4_MAILBOX_IRQENABLE(u) (0x108 + 0x10 * (u))
38 #define OMAP4_MAILBOX_IRQENABLE_CLR(u) (0x10c + 0x10 * (u))
40 #define MAILBOX_IRQSTATUS(type, u) (type ? OMAP4_MAILBOX_IRQSTATUS(u) : \
41 OMAP2_MAILBOX_IRQSTATUS(u))
42 #define MAILBOX_IRQENABLE(type, u) (type ? OMAP4_MAILBOX_IRQENABLE(u) : \
43 OMAP2_MAILBOX_IRQENABLE(u))
44 #define MAILBOX_IRQDISABLE(type, u) (type ? OMAP4_MAILBOX_IRQENABLE_CLR(u) \
45 : OMAP2_MAILBOX_IRQENABLE(u))
47 #define MAILBOX_IRQ_NEWMSG(m) (1 << (2 * (m)))
48 #define MAILBOX_IRQ_NOTFULL(m) (1 << (2 * (m) + 1))
50 /* Interrupt register configuration types */
51 #define MBOX_INTR_CFG_TYPE1 0
52 #define MBOX_INTR_CFG_TYPE2 1
59 struct omap_mbox_fifo {
61 unsigned long fifo_stat;
62 unsigned long msg_stat;
63 unsigned long irqenable;
64 unsigned long irqstatus;
65 unsigned long irqdisable;
69 struct omap_mbox_queue {
72 struct work_struct work;
73 struct omap_mbox *mbox;
77 struct omap_mbox_match_data {
81 struct omap_mbox_device {
83 struct mutex cfg_lock;
84 void __iomem *mbox_base;
89 struct omap_mbox **mboxes;
90 struct mbox_controller controller;
91 struct list_head elem;
94 struct omap_mbox_fifo_info {
110 struct omap_mbox_queue *rxq;
112 struct omap_mbox_device *parent;
113 struct omap_mbox_fifo tx_fifo;
114 struct omap_mbox_fifo rx_fifo;
116 struct mbox_chan *chan;
120 /* global variables for the mailbox devices */
121 static DEFINE_MUTEX(omap_mbox_devices_lock);
122 static LIST_HEAD(omap_mbox_devices);
124 static unsigned int mbox_kfifo_size = CONFIG_OMAP_MBOX_KFIFO_SIZE;
125 module_param(mbox_kfifo_size, uint, S_IRUGO);
126 MODULE_PARM_DESC(mbox_kfifo_size, "Size of omap's mailbox kfifo (bytes)");
128 static struct omap_mbox *mbox_chan_to_omap_mbox(struct mbox_chan *chan)
130 if (!chan || !chan->con_priv)
133 return (struct omap_mbox *)chan->con_priv;
137 unsigned int mbox_read_reg(struct omap_mbox_device *mdev, size_t ofs)
139 return __raw_readl(mdev->mbox_base + ofs);
143 void mbox_write_reg(struct omap_mbox_device *mdev, u32 val, size_t ofs)
145 __raw_writel(val, mdev->mbox_base + ofs);
148 /* Mailbox FIFO handle functions */
149 static u32 mbox_fifo_read(struct omap_mbox *mbox)
151 struct omap_mbox_fifo *fifo = &mbox->rx_fifo;
153 return mbox_read_reg(mbox->parent, fifo->msg);
156 static void mbox_fifo_write(struct omap_mbox *mbox, u32 msg)
158 struct omap_mbox_fifo *fifo = &mbox->tx_fifo;
160 mbox_write_reg(mbox->parent, msg, fifo->msg);
163 static int mbox_fifo_empty(struct omap_mbox *mbox)
165 struct omap_mbox_fifo *fifo = &mbox->rx_fifo;
167 return (mbox_read_reg(mbox->parent, fifo->msg_stat) == 0);
170 static int mbox_fifo_full(struct omap_mbox *mbox)
172 struct omap_mbox_fifo *fifo = &mbox->tx_fifo;
174 return mbox_read_reg(mbox->parent, fifo->fifo_stat);
177 /* Mailbox IRQ handle functions */
178 static void ack_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
180 struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
181 &mbox->tx_fifo : &mbox->rx_fifo;
182 u32 bit = fifo->intr_bit;
183 u32 irqstatus = fifo->irqstatus;
185 mbox_write_reg(mbox->parent, bit, irqstatus);
187 /* Flush posted write for irq status to avoid spurious interrupts */
188 mbox_read_reg(mbox->parent, irqstatus);
191 static int is_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
193 struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
194 &mbox->tx_fifo : &mbox->rx_fifo;
195 u32 bit = fifo->intr_bit;
196 u32 irqenable = fifo->irqenable;
197 u32 irqstatus = fifo->irqstatus;
199 u32 enable = mbox_read_reg(mbox->parent, irqenable);
200 u32 status = mbox_read_reg(mbox->parent, irqstatus);
202 return (int)(enable & status & bit);
205 static void omap_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
208 struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
209 &mbox->tx_fifo : &mbox->rx_fifo;
210 u32 bit = fifo->intr_bit;
211 u32 irqenable = fifo->irqenable;
213 l = mbox_read_reg(mbox->parent, irqenable);
215 mbox_write_reg(mbox->parent, l, irqenable);
218 static void omap_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
220 struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
221 &mbox->tx_fifo : &mbox->rx_fifo;
222 u32 bit = fifo->intr_bit;
223 u32 irqdisable = fifo->irqdisable;
226 * Read and update the interrupt configuration register for pre-OMAP4.
227 * OMAP4 and later SoCs have a dedicated interrupt disabling register.
229 if (!mbox->intr_type)
230 bit = mbox_read_reg(mbox->parent, irqdisable) & ~bit;
232 mbox_write_reg(mbox->parent, bit, irqdisable);
236 * Message receiver(workqueue)
238 static void mbox_rx_work(struct work_struct *work)
240 struct omap_mbox_queue *mq =
241 container_of(work, struct omap_mbox_queue, work);
246 while (kfifo_len(&mq->fifo) >= sizeof(msg)) {
247 len = kfifo_out(&mq->fifo, (unsigned char *)&msg, sizeof(msg));
248 WARN_ON(len != sizeof(msg));
251 mbox_chan_received_data(mq->mbox->chan, (void *)data);
252 spin_lock_irq(&mq->lock);
255 omap_mbox_enable_irq(mq->mbox, IRQ_RX);
257 spin_unlock_irq(&mq->lock);
262 * Mailbox interrupt handler
264 static void __mbox_tx_interrupt(struct omap_mbox *mbox)
266 omap_mbox_disable_irq(mbox, IRQ_TX);
267 ack_mbox_irq(mbox, IRQ_TX);
268 mbox_chan_txdone(mbox->chan, 0);
271 static void __mbox_rx_interrupt(struct omap_mbox *mbox)
273 struct omap_mbox_queue *mq = mbox->rxq;
277 while (!mbox_fifo_empty(mbox)) {
278 if (unlikely(kfifo_avail(&mq->fifo) < sizeof(msg))) {
279 omap_mbox_disable_irq(mbox, IRQ_RX);
284 msg = mbox_fifo_read(mbox);
286 len = kfifo_in(&mq->fifo, (unsigned char *)&msg, sizeof(msg));
287 WARN_ON(len != sizeof(msg));
290 /* no more messages in the fifo. clear IRQ source. */
291 ack_mbox_irq(mbox, IRQ_RX);
293 schedule_work(&mbox->rxq->work);
296 static irqreturn_t mbox_interrupt(int irq, void *p)
298 struct omap_mbox *mbox = p;
300 if (is_mbox_irq(mbox, IRQ_TX))
301 __mbox_tx_interrupt(mbox);
303 if (is_mbox_irq(mbox, IRQ_RX))
304 __mbox_rx_interrupt(mbox);
309 static struct omap_mbox_queue *mbox_queue_alloc(struct omap_mbox *mbox,
310 void (*work)(struct work_struct *))
312 struct omap_mbox_queue *mq;
317 mq = kzalloc(sizeof(*mq), GFP_KERNEL);
321 spin_lock_init(&mq->lock);
323 if (kfifo_alloc(&mq->fifo, mbox_kfifo_size, GFP_KERNEL))
326 INIT_WORK(&mq->work, work);
334 static void mbox_queue_free(struct omap_mbox_queue *q)
336 kfifo_free(&q->fifo);
340 static int omap_mbox_startup(struct omap_mbox *mbox)
343 struct omap_mbox_queue *mq;
345 mq = mbox_queue_alloc(mbox, mbox_rx_work);
351 ret = request_irq(mbox->irq, mbox_interrupt, IRQF_SHARED,
354 pr_err("failed to register mailbox interrupt:%d\n", ret);
355 goto fail_request_irq;
358 if (mbox->send_no_irq)
359 mbox->chan->txdone_method = TXDONE_BY_ACK;
361 omap_mbox_enable_irq(mbox, IRQ_RX);
366 mbox_queue_free(mbox->rxq);
370 static void omap_mbox_fini(struct omap_mbox *mbox)
372 omap_mbox_disable_irq(mbox, IRQ_RX);
373 free_irq(mbox->irq, mbox);
374 flush_work(&mbox->rxq->work);
375 mbox_queue_free(mbox->rxq);
378 static struct omap_mbox *omap_mbox_device_find(struct omap_mbox_device *mdev,
379 const char *mbox_name)
381 struct omap_mbox *_mbox, *mbox = NULL;
382 struct omap_mbox **mboxes = mdev->mboxes;
388 for (i = 0; (_mbox = mboxes[i]); i++) {
389 if (!strcmp(_mbox->name, mbox_name)) {
397 static struct class omap_mbox_class = { .name = "mbox", };
399 static int omap_mbox_register(struct omap_mbox_device *mdev)
403 struct omap_mbox **mboxes;
405 if (!mdev || !mdev->mboxes)
408 mboxes = mdev->mboxes;
409 for (i = 0; mboxes[i]; i++) {
410 struct omap_mbox *mbox = mboxes[i];
412 mbox->dev = device_create(&omap_mbox_class, mdev->dev,
413 0, mbox, "%s", mbox->name);
414 if (IS_ERR(mbox->dev)) {
415 ret = PTR_ERR(mbox->dev);
420 mutex_lock(&omap_mbox_devices_lock);
421 list_add(&mdev->elem, &omap_mbox_devices);
422 mutex_unlock(&omap_mbox_devices_lock);
424 ret = devm_mbox_controller_register(mdev->dev, &mdev->controller);
429 device_unregister(mboxes[i]->dev);
434 static int omap_mbox_unregister(struct omap_mbox_device *mdev)
437 struct omap_mbox **mboxes;
439 if (!mdev || !mdev->mboxes)
442 mutex_lock(&omap_mbox_devices_lock);
443 list_del(&mdev->elem);
444 mutex_unlock(&omap_mbox_devices_lock);
446 mboxes = mdev->mboxes;
447 for (i = 0; mboxes[i]; i++)
448 device_unregister(mboxes[i]->dev);
452 static int omap_mbox_chan_startup(struct mbox_chan *chan)
454 struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
455 struct omap_mbox_device *mdev = mbox->parent;
458 mutex_lock(&mdev->cfg_lock);
459 pm_runtime_get_sync(mdev->dev);
460 ret = omap_mbox_startup(mbox);
462 pm_runtime_put_sync(mdev->dev);
463 mutex_unlock(&mdev->cfg_lock);
467 static void omap_mbox_chan_shutdown(struct mbox_chan *chan)
469 struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
470 struct omap_mbox_device *mdev = mbox->parent;
472 mutex_lock(&mdev->cfg_lock);
473 omap_mbox_fini(mbox);
474 pm_runtime_put_sync(mdev->dev);
475 mutex_unlock(&mdev->cfg_lock);
478 static int omap_mbox_chan_send_noirq(struct omap_mbox *mbox, u32 msg)
482 if (!mbox_fifo_full(mbox)) {
483 omap_mbox_enable_irq(mbox, IRQ_RX);
484 mbox_fifo_write(mbox, msg);
486 omap_mbox_disable_irq(mbox, IRQ_RX);
488 /* we must read and ack the interrupt directly from here */
489 mbox_fifo_read(mbox);
490 ack_mbox_irq(mbox, IRQ_RX);
496 static int omap_mbox_chan_send(struct omap_mbox *mbox, u32 msg)
500 if (!mbox_fifo_full(mbox)) {
501 mbox_fifo_write(mbox, msg);
505 /* always enable the interrupt */
506 omap_mbox_enable_irq(mbox, IRQ_TX);
510 static int omap_mbox_chan_send_data(struct mbox_chan *chan, void *data)
512 struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
514 u32 msg = omap_mbox_message(data);
519 if (mbox->send_no_irq)
520 ret = omap_mbox_chan_send_noirq(mbox, msg);
522 ret = omap_mbox_chan_send(mbox, msg);
527 static const struct mbox_chan_ops omap_mbox_chan_ops = {
528 .startup = omap_mbox_chan_startup,
529 .send_data = omap_mbox_chan_send_data,
530 .shutdown = omap_mbox_chan_shutdown,
533 #ifdef CONFIG_PM_SLEEP
534 static int omap_mbox_suspend(struct device *dev)
536 struct omap_mbox_device *mdev = dev_get_drvdata(dev);
539 if (pm_runtime_status_suspended(dev))
542 for (fifo = 0; fifo < mdev->num_fifos; fifo++) {
543 if (mbox_read_reg(mdev, MAILBOX_MSGSTATUS(fifo))) {
544 dev_err(mdev->dev, "fifo %d has unexpected unread messages\n",
550 for (usr = 0; usr < mdev->num_users; usr++) {
551 reg = MAILBOX_IRQENABLE(mdev->intr_type, usr);
552 mdev->irq_ctx[usr] = mbox_read_reg(mdev, reg);
558 static int omap_mbox_resume(struct device *dev)
560 struct omap_mbox_device *mdev = dev_get_drvdata(dev);
563 if (pm_runtime_status_suspended(dev))
566 for (usr = 0; usr < mdev->num_users; usr++) {
567 reg = MAILBOX_IRQENABLE(mdev->intr_type, usr);
568 mbox_write_reg(mdev, mdev->irq_ctx[usr], reg);
575 static const struct dev_pm_ops omap_mbox_pm_ops = {
576 SET_SYSTEM_SLEEP_PM_OPS(omap_mbox_suspend, omap_mbox_resume)
579 static const struct omap_mbox_match_data omap2_data = { MBOX_INTR_CFG_TYPE1 };
580 static const struct omap_mbox_match_data omap4_data = { MBOX_INTR_CFG_TYPE2 };
582 static const struct of_device_id omap_mailbox_of_match[] = {
584 .compatible = "ti,omap2-mailbox",
588 .compatible = "ti,omap3-mailbox",
592 .compatible = "ti,omap4-mailbox",
596 .compatible = "ti,am654-mailbox",
600 .compatible = "ti,am64-mailbox",
607 MODULE_DEVICE_TABLE(of, omap_mailbox_of_match);
609 static struct mbox_chan *omap_mbox_of_xlate(struct mbox_controller *controller,
610 const struct of_phandle_args *sp)
612 phandle phandle = sp->args[0];
613 struct device_node *node;
614 struct omap_mbox_device *mdev;
615 struct omap_mbox *mbox;
617 mdev = container_of(controller, struct omap_mbox_device, controller);
619 return ERR_PTR(-EINVAL);
621 node = of_find_node_by_phandle(phandle);
623 pr_err("%s: could not find node phandle 0x%x\n",
625 return ERR_PTR(-ENODEV);
628 mbox = omap_mbox_device_find(mdev, node->name);
630 return mbox ? mbox->chan : ERR_PTR(-ENOENT);
633 static int omap_mbox_probe(struct platform_device *pdev)
636 struct mbox_chan *chnls;
637 struct omap_mbox **list, *mbox, *mboxblk;
638 struct omap_mbox_fifo_info *finfo, *finfoblk;
639 struct omap_mbox_device *mdev;
640 struct omap_mbox_fifo *fifo;
641 struct device_node *node = pdev->dev.of_node;
642 struct device_node *child;
643 const struct omap_mbox_match_data *match_data;
644 u32 intr_type, info_count;
645 u32 num_users, num_fifos;
651 pr_err("%s: only DT-based devices are supported\n", __func__);
655 match_data = of_device_get_match_data(&pdev->dev);
658 intr_type = match_data->intr_type;
660 if (of_property_read_u32(node, "ti,mbox-num-users", &num_users))
663 if (of_property_read_u32(node, "ti,mbox-num-fifos", &num_fifos))
666 info_count = of_get_available_child_count(node);
668 dev_err(&pdev->dev, "no available mbox devices found\n");
672 finfoblk = devm_kcalloc(&pdev->dev, info_count, sizeof(*finfoblk),
679 for (i = 0; i < info_count; i++, finfo++) {
680 child = of_get_next_available_child(node, child);
681 ret = of_property_read_u32_array(child, "ti,mbox-tx", tmp,
685 finfo->tx_id = tmp[0];
686 finfo->tx_irq = tmp[1];
687 finfo->tx_usr = tmp[2];
689 ret = of_property_read_u32_array(child, "ti,mbox-rx", tmp,
693 finfo->rx_id = tmp[0];
694 finfo->rx_irq = tmp[1];
695 finfo->rx_usr = tmp[2];
697 finfo->name = child->name;
699 finfo->send_no_irq = of_property_read_bool(child, "ti,mbox-send-noirq");
701 if (finfo->tx_id >= num_fifos || finfo->rx_id >= num_fifos ||
702 finfo->tx_usr >= num_users || finfo->rx_usr >= num_users)
706 mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_KERNEL);
710 mdev->mbox_base = devm_platform_ioremap_resource(pdev, 0);
711 if (IS_ERR(mdev->mbox_base))
712 return PTR_ERR(mdev->mbox_base);
714 mdev->irq_ctx = devm_kcalloc(&pdev->dev, num_users, sizeof(u32),
719 /* allocate one extra for marking end of list */
720 list = devm_kcalloc(&pdev->dev, info_count + 1, sizeof(*list),
725 chnls = devm_kcalloc(&pdev->dev, info_count + 1, sizeof(*chnls),
730 mboxblk = devm_kcalloc(&pdev->dev, info_count, sizeof(*mbox),
737 for (i = 0; i < info_count; i++, finfo++) {
738 fifo = &mbox->tx_fifo;
739 fifo->msg = MAILBOX_MESSAGE(finfo->tx_id);
740 fifo->fifo_stat = MAILBOX_FIFOSTATUS(finfo->tx_id);
741 fifo->intr_bit = MAILBOX_IRQ_NOTFULL(finfo->tx_id);
742 fifo->irqenable = MAILBOX_IRQENABLE(intr_type, finfo->tx_usr);
743 fifo->irqstatus = MAILBOX_IRQSTATUS(intr_type, finfo->tx_usr);
744 fifo->irqdisable = MAILBOX_IRQDISABLE(intr_type, finfo->tx_usr);
746 fifo = &mbox->rx_fifo;
747 fifo->msg = MAILBOX_MESSAGE(finfo->rx_id);
748 fifo->msg_stat = MAILBOX_MSGSTATUS(finfo->rx_id);
749 fifo->intr_bit = MAILBOX_IRQ_NEWMSG(finfo->rx_id);
750 fifo->irqenable = MAILBOX_IRQENABLE(intr_type, finfo->rx_usr);
751 fifo->irqstatus = MAILBOX_IRQSTATUS(intr_type, finfo->rx_usr);
752 fifo->irqdisable = MAILBOX_IRQDISABLE(intr_type, finfo->rx_usr);
754 mbox->send_no_irq = finfo->send_no_irq;
755 mbox->intr_type = intr_type;
758 mbox->name = finfo->name;
759 mbox->irq = platform_get_irq(pdev, finfo->tx_irq);
762 mbox->chan = &chnls[i];
763 chnls[i].con_priv = mbox;
767 mutex_init(&mdev->cfg_lock);
768 mdev->dev = &pdev->dev;
769 mdev->num_users = num_users;
770 mdev->num_fifos = num_fifos;
771 mdev->intr_type = intr_type;
775 * OMAP/K3 Mailbox IP does not have a Tx-Done IRQ, but rather a Tx-Ready
776 * IRQ and is needed to run the Tx state machine
778 mdev->controller.txdone_irq = true;
779 mdev->controller.dev = mdev->dev;
780 mdev->controller.ops = &omap_mbox_chan_ops;
781 mdev->controller.chans = chnls;
782 mdev->controller.num_chans = info_count;
783 mdev->controller.of_xlate = omap_mbox_of_xlate;
784 ret = omap_mbox_register(mdev);
788 platform_set_drvdata(pdev, mdev);
789 pm_runtime_enable(mdev->dev);
791 ret = pm_runtime_resume_and_get(mdev->dev);
796 * just print the raw revision register, the format is not
797 * uniform across all SoCs
799 l = mbox_read_reg(mdev, MAILBOX_REVISION);
800 dev_info(mdev->dev, "omap mailbox rev 0x%x\n", l);
802 ret = pm_runtime_put_sync(mdev->dev);
803 if (ret < 0 && ret != -ENOSYS)
806 devm_kfree(&pdev->dev, finfoblk);
810 pm_runtime_disable(mdev->dev);
811 omap_mbox_unregister(mdev);
815 static void omap_mbox_remove(struct platform_device *pdev)
817 struct omap_mbox_device *mdev = platform_get_drvdata(pdev);
819 pm_runtime_disable(mdev->dev);
820 omap_mbox_unregister(mdev);
823 static struct platform_driver omap_mbox_driver = {
824 .probe = omap_mbox_probe,
825 .remove_new = omap_mbox_remove,
827 .name = "omap-mailbox",
828 .pm = &omap_mbox_pm_ops,
829 .of_match_table = of_match_ptr(omap_mailbox_of_match),
833 static int __init omap_mbox_init(void)
837 err = class_register(&omap_mbox_class);
841 /* kfifo size sanity check: alignment and minimal size */
842 mbox_kfifo_size = ALIGN(mbox_kfifo_size, sizeof(u32));
843 mbox_kfifo_size = max_t(unsigned int, mbox_kfifo_size, sizeof(u32));
845 err = platform_driver_register(&omap_mbox_driver);
847 class_unregister(&omap_mbox_class);
851 subsys_initcall(omap_mbox_init);
853 static void __exit omap_mbox_exit(void)
855 platform_driver_unregister(&omap_mbox_driver);
856 class_unregister(&omap_mbox_class);
858 module_exit(omap_mbox_exit);
860 MODULE_LICENSE("GPL v2");
861 MODULE_DESCRIPTION("omap mailbox: interrupt driven messaging");
862 MODULE_AUTHOR("Toshihiro Kobayashi");
863 MODULE_AUTHOR("Hiroshi DOYU");