4 * Copyright (C) 2006-2009 Nokia Corporation. All rights reserved.
5 * Copyright (C) 2013-2014 Texas Instruments Inc.
7 * Contact: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
8 * Suman Anna <s-anna@ti.com>
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * version 2 as published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
26 #include <linux/interrupt.h>
27 #include <linux/spinlock.h>
28 #include <linux/mutex.h>
29 #include <linux/slab.h>
30 #include <linux/kfifo.h>
31 #include <linux/err.h>
32 #include <linux/notifier.h>
33 #include <linux/module.h>
34 #include <linux/of_device.h>
35 #include <linux/platform_device.h>
36 #include <linux/pm_runtime.h>
37 #include <linux/platform_data/mailbox-omap.h>
38 #include <linux/omap-mailbox.h>
40 #define MAILBOX_REVISION 0x000
41 #define MAILBOX_MESSAGE(m) (0x040 + 4 * (m))
42 #define MAILBOX_FIFOSTATUS(m) (0x080 + 4 * (m))
43 #define MAILBOX_MSGSTATUS(m) (0x0c0 + 4 * (m))
45 #define OMAP2_MAILBOX_IRQSTATUS(u) (0x100 + 8 * (u))
46 #define OMAP2_MAILBOX_IRQENABLE(u) (0x104 + 8 * (u))
48 #define OMAP4_MAILBOX_IRQSTATUS(u) (0x104 + 0x10 * (u))
49 #define OMAP4_MAILBOX_IRQENABLE(u) (0x108 + 0x10 * (u))
50 #define OMAP4_MAILBOX_IRQENABLE_CLR(u) (0x10c + 0x10 * (u))
52 #define MAILBOX_IRQSTATUS(type, u) (type ? OMAP4_MAILBOX_IRQSTATUS(u) : \
53 OMAP2_MAILBOX_IRQSTATUS(u))
54 #define MAILBOX_IRQENABLE(type, u) (type ? OMAP4_MAILBOX_IRQENABLE(u) : \
55 OMAP2_MAILBOX_IRQENABLE(u))
56 #define MAILBOX_IRQDISABLE(type, u) (type ? OMAP4_MAILBOX_IRQENABLE_CLR(u) \
57 : OMAP2_MAILBOX_IRQENABLE(u))
59 #define MAILBOX_IRQ_NEWMSG(m) (1 << (2 * (m)))
60 #define MAILBOX_IRQ_NOTFULL(m) (1 << (2 * (m) + 1))
62 #define MBOX_REG_SIZE 0x120
64 #define OMAP4_MBOX_REG_SIZE 0x130
66 #define MBOX_NR_REGS (MBOX_REG_SIZE / sizeof(u32))
67 #define OMAP4_MBOX_NR_REGS (OMAP4_MBOX_REG_SIZE / sizeof(u32))
69 struct omap_mbox_fifo {
71 unsigned long fifo_stat;
72 unsigned long msg_stat;
73 unsigned long irqenable;
74 unsigned long irqstatus;
75 unsigned long irqdisable;
79 struct omap_mbox_queue {
82 struct work_struct work;
83 struct tasklet_struct tasklet;
84 struct omap_mbox *mbox;
88 struct omap_mbox_device {
90 struct mutex cfg_lock;
91 void __iomem *mbox_base;
94 struct omap_mbox **mboxes;
95 struct list_head elem;
98 struct omap_mbox_fifo_info {
113 struct omap_mbox_queue *txq, *rxq;
115 struct omap_mbox_device *parent;
116 struct omap_mbox_fifo tx_fifo;
117 struct omap_mbox_fifo rx_fifo;
118 u32 ctx[OMAP4_MBOX_NR_REGS];
121 struct blocking_notifier_head notifier;
124 /* global variables for the mailbox devices */
125 static DEFINE_MUTEX(omap_mbox_devices_lock);
126 static LIST_HEAD(omap_mbox_devices);
128 static unsigned int mbox_kfifo_size = CONFIG_OMAP_MBOX_KFIFO_SIZE;
129 module_param(mbox_kfifo_size, uint, S_IRUGO);
130 MODULE_PARM_DESC(mbox_kfifo_size, "Size of omap's mailbox kfifo (bytes)");
133 unsigned int mbox_read_reg(struct omap_mbox_device *mdev, size_t ofs)
135 return __raw_readl(mdev->mbox_base + ofs);
139 void mbox_write_reg(struct omap_mbox_device *mdev, u32 val, size_t ofs)
141 __raw_writel(val, mdev->mbox_base + ofs);
144 /* Mailbox FIFO handle functions */
145 static mbox_msg_t mbox_fifo_read(struct omap_mbox *mbox)
147 struct omap_mbox_fifo *fifo = &mbox->rx_fifo;
148 return (mbox_msg_t) mbox_read_reg(mbox->parent, fifo->msg);
151 static void mbox_fifo_write(struct omap_mbox *mbox, mbox_msg_t msg)
153 struct omap_mbox_fifo *fifo = &mbox->tx_fifo;
154 mbox_write_reg(mbox->parent, msg, fifo->msg);
157 static int mbox_fifo_empty(struct omap_mbox *mbox)
159 struct omap_mbox_fifo *fifo = &mbox->rx_fifo;
160 return (mbox_read_reg(mbox->parent, fifo->msg_stat) == 0);
163 static int mbox_fifo_full(struct omap_mbox *mbox)
165 struct omap_mbox_fifo *fifo = &mbox->tx_fifo;
166 return mbox_read_reg(mbox->parent, fifo->fifo_stat);
169 /* Mailbox IRQ handle functions */
170 static void ack_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
172 struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
173 &mbox->tx_fifo : &mbox->rx_fifo;
174 u32 bit = fifo->intr_bit;
175 u32 irqstatus = fifo->irqstatus;
177 mbox_write_reg(mbox->parent, bit, irqstatus);
179 /* Flush posted write for irq status to avoid spurious interrupts */
180 mbox_read_reg(mbox->parent, irqstatus);
183 static int is_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
185 struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
186 &mbox->tx_fifo : &mbox->rx_fifo;
187 u32 bit = fifo->intr_bit;
188 u32 irqenable = fifo->irqenable;
189 u32 irqstatus = fifo->irqstatus;
191 u32 enable = mbox_read_reg(mbox->parent, irqenable);
192 u32 status = mbox_read_reg(mbox->parent, irqstatus);
194 return (int)(enable & status & bit);
200 int omap_mbox_msg_send(struct omap_mbox *mbox, mbox_msg_t msg)
202 struct omap_mbox_queue *mq = mbox->txq;
205 spin_lock_bh(&mq->lock);
207 if (kfifo_avail(&mq->fifo) < sizeof(msg)) {
212 if (kfifo_is_empty(&mq->fifo) && !mbox_fifo_full(mbox)) {
213 mbox_fifo_write(mbox, msg);
217 len = kfifo_in(&mq->fifo, (unsigned char *)&msg, sizeof(msg));
218 WARN_ON(len != sizeof(msg));
220 tasklet_schedule(&mbox->txq->tasklet);
223 spin_unlock_bh(&mq->lock);
226 EXPORT_SYMBOL(omap_mbox_msg_send);
228 void omap_mbox_save_ctx(struct omap_mbox *mbox)
234 nr_regs = OMAP4_MBOX_NR_REGS;
236 nr_regs = MBOX_NR_REGS;
237 for (i = 0; i < nr_regs; i++) {
238 mbox->ctx[i] = mbox_read_reg(mbox->parent, i * sizeof(u32));
240 dev_dbg(mbox->dev, "%s: [%02x] %08x\n", __func__,
244 EXPORT_SYMBOL(omap_mbox_save_ctx);
246 void omap_mbox_restore_ctx(struct omap_mbox *mbox)
252 nr_regs = OMAP4_MBOX_NR_REGS;
254 nr_regs = MBOX_NR_REGS;
255 for (i = 0; i < nr_regs; i++) {
256 mbox_write_reg(mbox->parent, mbox->ctx[i], i * sizeof(u32));
258 dev_dbg(mbox->dev, "%s: [%02x] %08x\n", __func__,
262 EXPORT_SYMBOL(omap_mbox_restore_ctx);
264 void omap_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
267 struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
268 &mbox->tx_fifo : &mbox->rx_fifo;
269 u32 bit = fifo->intr_bit;
270 u32 irqenable = fifo->irqenable;
272 l = mbox_read_reg(mbox->parent, irqenable);
274 mbox_write_reg(mbox->parent, l, irqenable);
276 EXPORT_SYMBOL(omap_mbox_enable_irq);
278 void omap_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
280 struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
281 &mbox->tx_fifo : &mbox->rx_fifo;
282 u32 bit = fifo->intr_bit;
283 u32 irqdisable = fifo->irqdisable;
286 * Read and update the interrupt configuration register for pre-OMAP4.
287 * OMAP4 and later SoCs have a dedicated interrupt disabling register.
289 if (!mbox->intr_type)
290 bit = mbox_read_reg(mbox->parent, irqdisable) & ~bit;
292 mbox_write_reg(mbox->parent, bit, irqdisable);
294 EXPORT_SYMBOL(omap_mbox_disable_irq);
296 static void mbox_tx_tasklet(unsigned long tx_data)
298 struct omap_mbox *mbox = (struct omap_mbox *)tx_data;
299 struct omap_mbox_queue *mq = mbox->txq;
303 while (kfifo_len(&mq->fifo)) {
304 if (mbox_fifo_full(mbox)) {
305 omap_mbox_enable_irq(mbox, IRQ_TX);
309 ret = kfifo_out(&mq->fifo, (unsigned char *)&msg,
311 WARN_ON(ret != sizeof(msg));
313 mbox_fifo_write(mbox, msg);
318 * Message receiver(workqueue)
320 static void mbox_rx_work(struct work_struct *work)
322 struct omap_mbox_queue *mq =
323 container_of(work, struct omap_mbox_queue, work);
327 while (kfifo_len(&mq->fifo) >= sizeof(msg)) {
328 len = kfifo_out(&mq->fifo, (unsigned char *)&msg, sizeof(msg));
329 WARN_ON(len != sizeof(msg));
331 blocking_notifier_call_chain(&mq->mbox->notifier, len,
333 spin_lock_irq(&mq->lock);
336 omap_mbox_enable_irq(mq->mbox, IRQ_RX);
338 spin_unlock_irq(&mq->lock);
343 * Mailbox interrupt handler
345 static void __mbox_tx_interrupt(struct omap_mbox *mbox)
347 omap_mbox_disable_irq(mbox, IRQ_TX);
348 ack_mbox_irq(mbox, IRQ_TX);
349 tasklet_schedule(&mbox->txq->tasklet);
352 static void __mbox_rx_interrupt(struct omap_mbox *mbox)
354 struct omap_mbox_queue *mq = mbox->rxq;
358 while (!mbox_fifo_empty(mbox)) {
359 if (unlikely(kfifo_avail(&mq->fifo) < sizeof(msg))) {
360 omap_mbox_disable_irq(mbox, IRQ_RX);
365 msg = mbox_fifo_read(mbox);
367 len = kfifo_in(&mq->fifo, (unsigned char *)&msg, sizeof(msg));
368 WARN_ON(len != sizeof(msg));
371 /* no more messages in the fifo. clear IRQ source. */
372 ack_mbox_irq(mbox, IRQ_RX);
374 schedule_work(&mbox->rxq->work);
377 static irqreturn_t mbox_interrupt(int irq, void *p)
379 struct omap_mbox *mbox = p;
381 if (is_mbox_irq(mbox, IRQ_TX))
382 __mbox_tx_interrupt(mbox);
384 if (is_mbox_irq(mbox, IRQ_RX))
385 __mbox_rx_interrupt(mbox);
390 static struct omap_mbox_queue *mbox_queue_alloc(struct omap_mbox *mbox,
391 void (*work) (struct work_struct *),
392 void (*tasklet)(unsigned long))
394 struct omap_mbox_queue *mq;
396 mq = kzalloc(sizeof(struct omap_mbox_queue), GFP_KERNEL);
400 spin_lock_init(&mq->lock);
402 if (kfifo_alloc(&mq->fifo, mbox_kfifo_size, GFP_KERNEL))
406 INIT_WORK(&mq->work, work);
409 tasklet_init(&mq->tasklet, tasklet, (unsigned long)mbox);
416 static void mbox_queue_free(struct omap_mbox_queue *q)
418 kfifo_free(&q->fifo);
422 static int omap_mbox_startup(struct omap_mbox *mbox)
425 struct omap_mbox_queue *mq;
426 struct omap_mbox_device *mdev = mbox->parent;
428 mutex_lock(&mdev->cfg_lock);
429 ret = pm_runtime_get_sync(mdev->dev);
430 if (unlikely(ret < 0))
433 if (!mbox->use_count++) {
434 mq = mbox_queue_alloc(mbox, NULL, mbox_tx_tasklet);
441 mq = mbox_queue_alloc(mbox, mbox_rx_work, NULL);
448 ret = request_irq(mbox->irq, mbox_interrupt, IRQF_SHARED,
451 pr_err("failed to register mailbox interrupt:%d\n",
453 goto fail_request_irq;
456 omap_mbox_enable_irq(mbox, IRQ_RX);
458 mutex_unlock(&mdev->cfg_lock);
462 mbox_queue_free(mbox->rxq);
464 mbox_queue_free(mbox->txq);
466 pm_runtime_put_sync(mdev->dev);
469 mutex_unlock(&mdev->cfg_lock);
473 static void omap_mbox_fini(struct omap_mbox *mbox)
475 struct omap_mbox_device *mdev = mbox->parent;
477 mutex_lock(&mdev->cfg_lock);
479 if (!--mbox->use_count) {
480 omap_mbox_disable_irq(mbox, IRQ_RX);
481 free_irq(mbox->irq, mbox);
482 tasklet_kill(&mbox->txq->tasklet);
483 flush_work(&mbox->rxq->work);
484 mbox_queue_free(mbox->txq);
485 mbox_queue_free(mbox->rxq);
488 pm_runtime_put_sync(mdev->dev);
490 mutex_unlock(&mdev->cfg_lock);
493 static struct omap_mbox *omap_mbox_device_find(struct omap_mbox_device *mdev,
494 const char *mbox_name)
496 struct omap_mbox *_mbox, *mbox = NULL;
497 struct omap_mbox **mboxes = mdev->mboxes;
503 for (i = 0; (_mbox = mboxes[i]); i++) {
504 if (!strcmp(_mbox->name, mbox_name)) {
512 struct omap_mbox *omap_mbox_get(const char *name, struct notifier_block *nb)
514 struct omap_mbox *mbox = NULL;
515 struct omap_mbox_device *mdev;
518 mutex_lock(&omap_mbox_devices_lock);
519 list_for_each_entry(mdev, &omap_mbox_devices, elem) {
520 mbox = omap_mbox_device_find(mdev, name);
524 mutex_unlock(&omap_mbox_devices_lock);
527 return ERR_PTR(-ENOENT);
530 blocking_notifier_chain_register(&mbox->notifier, nb);
532 ret = omap_mbox_startup(mbox);
534 blocking_notifier_chain_unregister(&mbox->notifier, nb);
535 return ERR_PTR(-ENODEV);
540 EXPORT_SYMBOL(omap_mbox_get);
542 void omap_mbox_put(struct omap_mbox *mbox, struct notifier_block *nb)
544 blocking_notifier_chain_unregister(&mbox->notifier, nb);
545 omap_mbox_fini(mbox);
547 EXPORT_SYMBOL(omap_mbox_put);
549 static struct class omap_mbox_class = { .name = "mbox", };
551 static int omap_mbox_register(struct omap_mbox_device *mdev)
555 struct omap_mbox **mboxes;
557 if (!mdev || !mdev->mboxes)
560 mboxes = mdev->mboxes;
561 for (i = 0; mboxes[i]; i++) {
562 struct omap_mbox *mbox = mboxes[i];
563 mbox->dev = device_create(&omap_mbox_class,
564 mdev->dev, 0, mbox, "%s", mbox->name);
565 if (IS_ERR(mbox->dev)) {
566 ret = PTR_ERR(mbox->dev);
570 BLOCKING_INIT_NOTIFIER_HEAD(&mbox->notifier);
573 mutex_lock(&omap_mbox_devices_lock);
574 list_add(&mdev->elem, &omap_mbox_devices);
575 mutex_unlock(&omap_mbox_devices_lock);
581 device_unregister(mboxes[i]->dev);
585 static int omap_mbox_unregister(struct omap_mbox_device *mdev)
588 struct omap_mbox **mboxes;
590 if (!mdev || !mdev->mboxes)
593 mutex_lock(&omap_mbox_devices_lock);
594 list_del(&mdev->elem);
595 mutex_unlock(&omap_mbox_devices_lock);
597 mboxes = mdev->mboxes;
598 for (i = 0; mboxes[i]; i++)
599 device_unregister(mboxes[i]->dev);
603 static const struct of_device_id omap_mailbox_of_match[] = {
605 .compatible = "ti,omap2-mailbox",
606 .data = (void *)MBOX_INTR_CFG_TYPE1,
609 .compatible = "ti,omap3-mailbox",
610 .data = (void *)MBOX_INTR_CFG_TYPE1,
613 .compatible = "ti,omap4-mailbox",
614 .data = (void *)MBOX_INTR_CFG_TYPE2,
620 MODULE_DEVICE_TABLE(of, omap_mailbox_of_match);
622 static int omap_mbox_probe(struct platform_device *pdev)
624 struct resource *mem;
626 struct omap_mbox **list, *mbox, *mboxblk;
627 struct omap_mbox_pdata *pdata = pdev->dev.platform_data;
628 struct omap_mbox_dev_info *info = NULL;
629 struct omap_mbox_fifo_info *finfo, *finfoblk;
630 struct omap_mbox_device *mdev;
631 struct omap_mbox_fifo *fifo;
632 struct device_node *node = pdev->dev.of_node;
633 struct device_node *child;
634 const struct of_device_id *match;
635 u32 intr_type, info_count;
636 u32 num_users, num_fifos;
641 if (!node && (!pdata || !pdata->info_cnt || !pdata->info)) {
642 pr_err("%s: platform not supported\n", __func__);
647 match = of_match_device(omap_mailbox_of_match, &pdev->dev);
650 intr_type = (u32)match->data;
652 if (of_property_read_u32(node, "ti,mbox-num-users",
656 if (of_property_read_u32(node, "ti,mbox-num-fifos",
660 info_count = of_get_available_child_count(node);
662 dev_err(&pdev->dev, "no available mbox devices found\n");
665 } else { /* non-DT device creation */
666 info_count = pdata->info_cnt;
668 intr_type = pdata->intr_type;
669 num_users = pdata->num_users;
670 num_fifos = pdata->num_fifos;
673 finfoblk = devm_kzalloc(&pdev->dev, info_count * sizeof(*finfoblk),
680 for (i = 0; i < info_count; i++, finfo++) {
682 child = of_get_next_available_child(node, child);
683 ret = of_property_read_u32_array(child, "ti,mbox-tx",
684 tmp, ARRAY_SIZE(tmp));
687 finfo->tx_id = tmp[0];
688 finfo->tx_irq = tmp[1];
689 finfo->tx_usr = tmp[2];
691 ret = of_property_read_u32_array(child, "ti,mbox-rx",
692 tmp, ARRAY_SIZE(tmp));
695 finfo->rx_id = tmp[0];
696 finfo->rx_irq = tmp[1];
697 finfo->rx_usr = tmp[2];
699 finfo->name = child->name;
701 finfo->tx_id = info->tx_id;
702 finfo->rx_id = info->rx_id;
703 finfo->tx_usr = info->usr_id;
704 finfo->tx_irq = info->irq_id;
705 finfo->rx_usr = info->usr_id;
706 finfo->rx_irq = info->irq_id;
707 finfo->name = info->name;
710 if (finfo->tx_id >= num_fifos || finfo->rx_id >= num_fifos ||
711 finfo->tx_usr >= num_users || finfo->rx_usr >= num_users)
715 mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_KERNEL);
719 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
720 mdev->mbox_base = devm_ioremap_resource(&pdev->dev, mem);
721 if (IS_ERR(mdev->mbox_base))
722 return PTR_ERR(mdev->mbox_base);
724 /* allocate one extra for marking end of list */
725 list = devm_kzalloc(&pdev->dev, (info_count + 1) * sizeof(*list),
730 mboxblk = devm_kzalloc(&pdev->dev, info_count * sizeof(*mbox),
737 for (i = 0; i < info_count; i++, finfo++) {
738 fifo = &mbox->tx_fifo;
739 fifo->msg = MAILBOX_MESSAGE(finfo->tx_id);
740 fifo->fifo_stat = MAILBOX_FIFOSTATUS(finfo->tx_id);
741 fifo->intr_bit = MAILBOX_IRQ_NOTFULL(finfo->tx_id);
742 fifo->irqenable = MAILBOX_IRQENABLE(intr_type, finfo->tx_usr);
743 fifo->irqstatus = MAILBOX_IRQSTATUS(intr_type, finfo->tx_usr);
744 fifo->irqdisable = MAILBOX_IRQDISABLE(intr_type, finfo->tx_usr);
746 fifo = &mbox->rx_fifo;
747 fifo->msg = MAILBOX_MESSAGE(finfo->rx_id);
748 fifo->msg_stat = MAILBOX_MSGSTATUS(finfo->rx_id);
749 fifo->intr_bit = MAILBOX_IRQ_NEWMSG(finfo->rx_id);
750 fifo->irqenable = MAILBOX_IRQENABLE(intr_type, finfo->rx_usr);
751 fifo->irqstatus = MAILBOX_IRQSTATUS(intr_type, finfo->rx_usr);
752 fifo->irqdisable = MAILBOX_IRQDISABLE(intr_type, finfo->rx_usr);
754 mbox->intr_type = intr_type;
757 mbox->name = finfo->name;
758 mbox->irq = platform_get_irq(pdev, finfo->tx_irq);
764 mutex_init(&mdev->cfg_lock);
765 mdev->dev = &pdev->dev;
766 mdev->num_users = num_users;
767 mdev->num_fifos = num_fifos;
769 ret = omap_mbox_register(mdev);
773 platform_set_drvdata(pdev, mdev);
774 pm_runtime_enable(mdev->dev);
776 ret = pm_runtime_get_sync(mdev->dev);
778 pm_runtime_put_noidle(mdev->dev);
783 * just print the raw revision register, the format is not
784 * uniform across all SoCs
786 l = mbox_read_reg(mdev, MAILBOX_REVISION);
787 dev_info(mdev->dev, "omap mailbox rev 0x%x\n", l);
789 ret = pm_runtime_put_sync(mdev->dev);
793 devm_kfree(&pdev->dev, finfoblk);
797 pm_runtime_disable(mdev->dev);
798 omap_mbox_unregister(mdev);
802 static int omap_mbox_remove(struct platform_device *pdev)
804 struct omap_mbox_device *mdev = platform_get_drvdata(pdev);
806 pm_runtime_disable(mdev->dev);
807 omap_mbox_unregister(mdev);
812 static struct platform_driver omap_mbox_driver = {
813 .probe = omap_mbox_probe,
814 .remove = omap_mbox_remove,
816 .name = "omap-mailbox",
817 .owner = THIS_MODULE,
818 .of_match_table = of_match_ptr(omap_mailbox_of_match),
822 static int __init omap_mbox_init(void)
826 err = class_register(&omap_mbox_class);
830 /* kfifo size sanity check: alignment and minimal size */
831 mbox_kfifo_size = ALIGN(mbox_kfifo_size, sizeof(mbox_msg_t));
832 mbox_kfifo_size = max_t(unsigned int, mbox_kfifo_size,
835 return platform_driver_register(&omap_mbox_driver);
837 subsys_initcall(omap_mbox_init);
839 static void __exit omap_mbox_exit(void)
841 platform_driver_unregister(&omap_mbox_driver);
842 class_unregister(&omap_mbox_class);
844 module_exit(omap_mbox_exit);
846 MODULE_LICENSE("GPL v2");
847 MODULE_DESCRIPTION("omap mailbox: interrupt driven messaging");
848 MODULE_AUTHOR("Toshihiro Kobayashi");
849 MODULE_AUTHOR("Hiroshi DOYU");