[RFC 3/4] ARM: imx: add the platform related rpmsg implementation

From: Richard Zhu
Date: Wed Jan 06 2016 - 05:43:57 EST


From: Richard Zhu <Richard.Zhu@xxxxxxxxxxxxx>

- add mu driver support, the irq and 4bytes msg of
the mu module are as the interaction channel
between A# core and the M4 core on imx amp platforms.
- register one notify in isr of the mu's irq used by
rpmsg.
- instance the virtual processor, and fill up the
virtio_config_ops in the platform related rpmsg
implementation codes.
- hard-code the vring storage shared by A# core and
M# core on AMP SOCs.

Signed-off-by: Richard Zhu <hongxing.zhu@xxxxxxx>
---
arch/arm/mach-imx/Kconfig | 12 ++
arch/arm/mach-imx/Makefile | 2 +
arch/arm/mach-imx/imx_rpmsg.c | 364 ++++++++++++++++++++++++++++++++++++++++++
arch/arm/mach-imx/mu.c | 217 +++++++++++++++++++++++++
include/linux/imx_rpmsg.h | 27 ++++
5 files changed, 622 insertions(+)
create mode 100644 arch/arm/mach-imx/imx_rpmsg.c
create mode 100644 arch/arm/mach-imx/mu.c
create mode 100644 include/linux/imx_rpmsg.h

diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
index 8ceda28..a7bc41c 100644
--- a/arch/arm/mach-imx/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
@@ -56,6 +56,12 @@ config HAVE_IMX_GPC
config HAVE_IMX_MMDC
bool

+config HAVE_IMX_MU
+ bool
+
+config HAVE_IMX_RPMSG
+ bool
+
config HAVE_IMX_SRC
def_bool y if SMP
select ARCH_HAS_RESET_CONTROLLER
@@ -544,6 +550,9 @@ config SOC_IMX6SX
bool "i.MX6 SoloX support"
select PINCTRL_IMX6SX
select SOC_IMX6
+ select HAVE_IMX_MU
+ select HAVE_IMX_RPMSG
+ select RPMSG

help
This enables support for Freescale i.MX6 SoloX processor.
@@ -562,6 +571,9 @@ config SOC_IMX7D
select ARM_GIC
select HAVE_IMX_ANATOP
select HAVE_IMX_MMDC
+ select HAVE_IMX_MU
+ select HAVE_IMX_RPMSG
+ select RPMSG
help
This enables support for Freescale i.MX7 Dual processor.

diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
index fb689d8..a3c1814 100644
--- a/arch/arm/mach-imx/Makefile
+++ b/arch/arm/mach-imx/Makefile
@@ -74,7 +74,9 @@ obj-$(CONFIG_MACH_IMX35_DT) += imx35-dt.o
obj-$(CONFIG_HAVE_IMX_ANATOP) += anatop.o
obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o
obj-$(CONFIG_HAVE_IMX_MMDC) += mmdc.o
+obj-$(CONFIG_HAVE_IMX_MU) += mu.o
obj-$(CONFIG_HAVE_IMX_SRC) += src.o
+obj-$(CONFIG_HAVE_IMX_RPMSG) += imx_rpmsg.o
ifneq ($(CONFIG_SOC_IMX6)$(CONFIG_SOC_LS1021A),)
AFLAGS_headsmp.o :=-Wa,-march=armv7-a
obj-$(CONFIG_SMP) += headsmp.o platsmp.o
diff --git a/arch/arm/mach-imx/imx_rpmsg.c b/arch/arm/mach-imx/imx_rpmsg.c
new file mode 100644
index 0000000..ab6ba7a
--- /dev/null
+++ b/arch/arm/mach-imx/imx_rpmsg.c
@@ -0,0 +1,364 @@
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ *
+ * derived from the omap-rpmsg implementation.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/rpmsg.h>
+#include <linux/slab.h>
+#include <linux/virtio.h>
+#include <linux/virtio_config.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_ring.h>
+#include <linux/imx_rpmsg.h>
+
+struct imx_rpmsg_vproc {
+ struct virtio_device vdev;
+ unsigned int vring[2];
+ char *rproc_name;
+ struct mutex lock;
+ struct notifier_block nb;
+ struct virtqueue *vq[2];
+ int base_vq_id;
+ int num_of_vqs;
+};
+
+/*
+ * For now, allocate 256 buffers of 512 bytes for each side. each buffer
+ * will then have 16B for the msg header and 496B for the payload.
+ * This will require a total space of 256KB for the buffers themselves, and
+ * 3 pages for every vring (the size of the vring depends on the number of
+ * buffers it supports).
+ */
+#define RPMSG_NUM_BUFS (512)
+#define RPMSG_BUF_SIZE (512)
+#define RPMSG_BUFS_SPACE (RPMSG_NUM_BUFS * RPMSG_BUF_SIZE)
+
+/*
+ * The alignment between the consumer and producer parts of the vring.
+ * Note: this is part of the "wire" protocol. If you change this, you need
+ * to update your BIOS image as well
+ */
+#define RPMSG_VRING_ALIGN (4096)
+
+/* With 256 buffers, our vring will occupy 3 pages */
+#define RPMSG_RING_SIZE ((DIV_ROUND_UP(vring_size(RPMSG_NUM_BUFS / 2, \
+ RPMSG_VRING_ALIGN), PAGE_SIZE)) * PAGE_SIZE)
+
+#define to_imx_rpdev(vd) container_of(vd, struct imx_rpmsg_vproc, vdev)
+
+struct imx_rpmsg_vq_info {
+ __u16 num; /* number of entries in the virtio_ring */
+ __u16 vq_id; /* a globaly unique index of this virtqueue */
+ void *addr; /* address where we mapped the virtio ring */
+ struct imx_rpmsg_vproc *rpdev;
+};
+
+static u64 imx_rpmsg_get_features(struct virtio_device *vdev)
+{
+ return 1 << VIRTIO_RPMSG_F_NS;
+}
+
+static int imx_rpmsg_finalize_features(struct virtio_device *vdev)
+{
+ /* Give virtio_ring a chance to accept features */
+ vring_transport_features(vdev);
+ return 0;
+}
+
+/* kick the remote processor, and let it know which virtqueue to poke at */
+static bool imx_rpmsg_notify(struct virtqueue *vq)
+{
+ int ret;
+ unsigned int mu_rpmsg = 0;
+ struct imx_rpmsg_vq_info *rpvq = vq->priv;
+
+ mu_rpmsg = rpvq->vq_id << 16;
+ mutex_lock(&rpvq->rpdev->lock);
+ /* send the index of the triggered virtqueue as the mu payload */
+ ret = imx_mu_rpmsg_send(mu_rpmsg);
+ mutex_unlock(&rpvq->rpdev->lock);
+ if (ret) {
+ pr_err("ugh, imx_mu_rpmsg_send() failed: %d\n", ret);
+ return false;
+ }
+
+ return true;
+}
+
+static int imx_mu_rpmsg_callback(struct notifier_block *this,
+ unsigned long index, void *data)
+{
+ u32 mu_msg = (u32) data;
+ struct imx_rpmsg_vproc *rpdev;
+
+ rpdev = container_of(this, struct imx_rpmsg_vproc, nb);
+
+ pr_debug("%s mu_msg: 0x%x\n", __func__, mu_msg);
+
+ /* ignore vq indices which are clearly not for us */
+ mu_msg = mu_msg >> 16;
+ if (mu_msg < rpdev->base_vq_id)
+ pr_err("mu_msg: 0x%x is invalid\n", mu_msg);
+
+ mu_msg -= rpdev->base_vq_id;
+
+ /*
+ * Currently both PENDING_MSG and explicit-virtqueue-index
+ * messaging are supported.
+ * Whatever approach is taken, at this point 'mu_msg' contains
+ * the index of the vring which was just triggered.
+ */
+ if (mu_msg < rpdev->num_of_vqs)
+ vring_interrupt(mu_msg, rpdev->vq[mu_msg]);
+
+ return NOTIFY_DONE;
+}
+
+static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
+ unsigned index,
+ void (*callback)(struct virtqueue *vq),
+ const char *name)
+{
+ struct imx_rpmsg_vproc *rpdev = to_imx_rpdev(vdev);
+ struct imx_rpmsg_vq_info *rpvq;
+ struct virtqueue *vq;
+ int err;
+
+ rpvq = kmalloc(sizeof(*rpvq), GFP_KERNEL);
+ if (!rpvq)
+ return ERR_PTR(-ENOMEM);
+
+ /* ioremap'ing normal memory, so we cast away sparse's complaints */
+ rpvq->addr = (__force void *) ioremap_nocache(rpdev->vring[index],
+ RPMSG_RING_SIZE);
+ if (!rpvq->addr) {
+ err = -ENOMEM;
+ goto free_rpvq;
+ }
+
+ memset(rpvq->addr, 0, RPMSG_RING_SIZE);
+
+ pr_debug("vring%d: phys 0x%x, virt 0x%x\n", index, rpdev->vring[index],
+ (unsigned int) rpvq->addr);
+
+ vq = vring_new_virtqueue(index, RPMSG_NUM_BUFS / 2, RPMSG_VRING_ALIGN,
+ vdev, true, rpvq->addr, imx_rpmsg_notify, callback,
+ name);
+ if (!vq) {
+ pr_err("vring_new_virtqueue failed\n");
+ err = -ENOMEM;
+ goto unmap_vring;
+ }
+
+ rpdev->vq[index] = vq;
+ vq->priv = rpvq;
+ /* system-wide unique id for this virtqueue */
+ rpvq->vq_id = rpdev->base_vq_id + index;
+ rpvq->rpdev = rpdev;
+ mutex_init(&rpdev->lock);
+
+ return vq;
+
+unmap_vring:
+ /* iounmap normal memory, so make sparse happy */
+ iounmap((__force void __iomem *) rpvq->addr);
+free_rpvq:
+ kfree(rpvq);
+ return ERR_PTR(err);
+}
+
+static void imx_rpmsg_del_vqs(struct virtio_device *vdev)
+{
+ struct virtqueue *vq, *n;
+ struct imx_rpmsg_vproc *rpdev = to_imx_rpdev(vdev);
+
+ list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
+ struct imx_rpmsg_vq_info *rpvq = vq->priv;
+
+ iounmap(rpvq->addr);
+ vring_del_virtqueue(vq);
+ kfree(rpvq);
+ }
+
+ if (&rpdev->nb)
+ imx_mu_rpmsg_unregister_nb((const char *)rpdev->rproc_name,
+ &rpdev->nb);
+}
+
+static int imx_rpmsg_find_vqs(struct virtio_device *vdev, unsigned nvqs,
+ struct virtqueue *vqs[],
+ vq_callback_t *callbacks[],
+ const char *names[])
+{
+ struct imx_rpmsg_vproc *rpdev = to_imx_rpdev(vdev);
+ int i, err;
+
+ /* we maintain two virtqueues per remote processor (for RX and TX) */
+ if (nvqs != 2)
+ return -EINVAL;
+
+ for (i = 0; i < nvqs; ++i) {
+ vqs[i] = rp_find_vq(vdev, i, callbacks[i], names[i]);
+ if (IS_ERR(vqs[i])) {
+ err = PTR_ERR(vqs[i]);
+ goto error;
+ }
+ }
+
+ rpdev->num_of_vqs = nvqs;
+
+ rpdev->nb.notifier_call = imx_mu_rpmsg_callback;
+ imx_mu_rpmsg_register_nb((const char *)rpdev->rproc_name, &rpdev->nb);
+
+ return 0;
+
+error:
+ imx_rpmsg_del_vqs(vdev);
+ return err;
+}
+
+static void imx_rpmsg_reset(struct virtio_device *vdev)
+{
+ dev_dbg(&vdev->dev, "reset !\n");
+}
+
+static u8 imx_rpmsg_get_status(struct virtio_device *vdev)
+{
+ return 0;
+}
+
+static void imx_rpmsg_set_status(struct virtio_device *vdev, u8 status)
+{
+ dev_dbg(&vdev->dev, "%s new status: %d\n", __func__, status);
+}
+
+static void imx_rpmsg_vproc_release(struct device *dev)
+{
+ /* this handler is provided so driver core doesn't yell at us */
+}
+
+static struct virtio_config_ops imx_rpmsg_config_ops = {
+ .get_features = imx_rpmsg_get_features,
+ .finalize_features = imx_rpmsg_finalize_features,
+ .find_vqs = imx_rpmsg_find_vqs,
+ .del_vqs = imx_rpmsg_del_vqs,
+ .reset = imx_rpmsg_reset,
+ .set_status = imx_rpmsg_set_status,
+ .get_status = imx_rpmsg_get_status,
+};
+
+static struct imx_rpmsg_vproc imx_rpmsg_vprocs[] = {
+ {
+ .vdev.id.device = VIRTIO_ID_RPMSG,
+ .vdev.config = &imx_rpmsg_config_ops,
+ .rproc_name = "m4",
+ .base_vq_id = 0,
+ },
+};
+
+static const struct of_device_id imx_rpmsg_dt_ids[] = {
+ { .compatible = "fsl,imx6sx-rpmsg", },
+ { .compatible = "fsl,imx7d-rpmsg", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx_rpmsg_dt_ids);
+
+static int imx_rpmsg_probe(struct platform_device *pdev)
+{
+ int i, ret = 0;
+ struct device_node *np = pdev->dev.of_node;
+
+ for (i = 0; i < ARRAY_SIZE(imx_rpmsg_vprocs); i++) {
+ struct imx_rpmsg_vproc *rpdev = &imx_rpmsg_vprocs[i];
+
+ if (!strcmp(rpdev->rproc_name, "m4")) {
+ ret = of_device_is_compatible(np, "fsl,imx7d-rpmsg");
+ ret |= of_device_is_compatible(np, "fsl,imx6sx-rpmsg");
+ if (ret) {
+ /* hardcodes here now. */
+ rpdev->vring[0] = 0xBFFF0000;
+ rpdev->vring[1] = 0xBFFF8000;
+ }
+ } else {
+ break;
+ }
+
+ pr_debug("%s rpdev%d: vring0 0x%x, vring1 0x%x\n", __func__,
+ i, rpdev->vring[0], rpdev->vring[1]);
+
+ rpdev->vdev.dev.parent = &pdev->dev;
+ rpdev->vdev.dev.release = imx_rpmsg_vproc_release;
+
+ ret = register_virtio_device(&rpdev->vdev);
+ if (ret) {
+ pr_err("%s failed to register rpdev: %d\n",
+ __func__, ret);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int imx_rpmsg_remove(struct platform_device *pdev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(imx_rpmsg_vprocs); i++) {
+ struct imx_rpmsg_vproc *rpdev = &imx_rpmsg_vprocs[i];
+
+ unregister_virtio_device(&rpdev->vdev);
+ }
+ return 0;
+}
+
+static struct platform_driver imx_rpmsg_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "imx-rpmsg",
+ .of_match_table = imx_rpmsg_dt_ids,
+ },
+ .probe = imx_rpmsg_probe,
+ .remove = imx_rpmsg_remove,
+};
+
+static int __init imx_rpmsg_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&imx_rpmsg_driver);
+ if (ret)
+ pr_err("Unable to initialize rpmsg driver\n");
+ else
+ pr_info("imx rpmsg driver is registered.\n");
+
+ return ret;
+}
+
+static void __exit imx_rpmsg_exit(void)
+{
+ pr_info("imx rpmsg driver is unregistered.\n");
+ platform_driver_unregister(&imx_rpmsg_driver);
+}
+
+module_exit(imx_rpmsg_exit);
+module_init(imx_rpmsg_init);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("iMX remote processor messaging virtio device");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/arm/mach-imx/mu.c b/arch/arm/mach-imx/mu.c
new file mode 100644
index 0000000..7b07f48
--- /dev/null
+++ b/arch/arm/mach-imx/mu.c
@@ -0,0 +1,217 @@
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/notifier.h>
+#include <linux/platform_device.h>
+#include "common.h"
+#include "hardware.h"
+
+#define MU_ATR0_OFFSET 0x0
+#define MU_ARR0_OFFSET 0x10
+#define MU_ARR1_OFFSET 0x14
+#define MU_ASR 0x20
+#define MU_ACR 0x24
+
+#define MU_RPMSG_HANDSHAKE_INDEX 1
+
+struct imx_mu_rpmsg_box {
+ const char *name;
+ struct blocking_notifier_head notifier;
+};
+
+static struct imx_mu_rpmsg_box mu_rpmsg_box = {
+ .name = "m4",
+};
+
+static void __iomem *mu_base;
+static u32 m4_message;
+static struct delayed_work rpmsg_work;
+
+static int imx_mu_send_message(unsigned int index, unsigned int data)
+{
+ u32 val, ep;
+ int i, te_flag = 0;
+ unsigned long timeout = jiffies + msecs_to_jiffies(500);
+
+ /* wait for transfer buffer empty, and no event pending */
+ do {
+ val = readl_relaxed(mu_base + MU_ASR);
+ ep = val & BIT(4);
+ if (time_after(jiffies, timeout)) {
+ pr_err("Waiting MU transmit buffer empty timeout!\n");
+ return -EIO;
+ }
+ } while (((val & (1 << (20 + 3 - index))) == 0) || (ep == BIT(4)));
+
+ writel_relaxed(data, mu_base + index * 0x4 + MU_ATR0_OFFSET);
+
+ /*
+ * make a double check that TEn is not empty after write
+ */
+ val = readl_relaxed(mu_base + MU_ASR);
+ ep = val & BIT(4);
+ if (((val & (1 << (20 + (3 - index)))) == 0) || (ep == BIT(4)))
+ return 0;
+
+ /*
+ * Make sure that TEn flag is changed, after the ATRn is filled up.
+ */
+ for (i = 0; i < 100; i++) {
+ val = readl_relaxed(mu_base + MU_ASR);
+ ep = val & BIT(4);
+ if (((val & (1 << (20 + 3 - index))) == 0) || (ep == BIT(4))) {
+ te_flag = 0;
+ break;
+ } else if (time_after(jiffies, timeout)) {
+ /* Can't see TEn 1->0, maybe already handled! */
+ te_flag = 1;
+ break;
+ }
+ }
+ if (te_flag == 0)
+ pr_info("TEn isn't changed when ATRn is filled up.\n");
+
+ return 0;
+}
+
+int imx_mu_rpmsg_send(unsigned int rpmsg)
+{
+ return imx_mu_send_message(MU_RPMSG_HANDSHAKE_INDEX, rpmsg);
+}
+
+int imx_mu_rpmsg_register_nb(const char *name, struct notifier_block *nb)
+{
+ if ((name == NULL) || (nb == NULL))
+ return -EINVAL;
+
+ if (!strcmp(mu_rpmsg_box.name, name))
+ blocking_notifier_chain_register(&(mu_rpmsg_box.notifier), nb);
+ else
+ return -ENOENT;
+
+ return 0;
+}
+
+int imx_mu_rpmsg_unregister_nb(const char *name, struct notifier_block *nb)
+{
+ if ((name == NULL) || (nb == NULL))
+ return -EINVAL;
+
+ if (!strcmp(mu_rpmsg_box.name, name))
+ blocking_notifier_chain_unregister(&(mu_rpmsg_box.notifier),
+ nb);
+ else
+ return -ENOENT;
+
+ return 0;
+}
+
+static void rpmsg_work_handler(struct work_struct *work)
+{
+
+ blocking_notifier_call_chain(&(mu_rpmsg_box.notifier), 4,
+ (void *)m4_message);
+ m4_message = 0;
+}
+
+static irqreturn_t imx_mu_isr(int irq, void *param)
+{
+ u32 irqs;
+
+ irqs = readl_relaxed(mu_base + MU_ASR);
+
+ /* RPMSG */
+ if (irqs & (1 << 26)) {
+ /* get message from receive buffer */
+ m4_message = readl_relaxed(mu_base + MU_ARR1_OFFSET);
+ schedule_delayed_work(&rpmsg_work, 0);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int imx_mu_probe(struct platform_device *pdev)
+{
+ int ret;
+ u32 irq;
+ struct device_node *np;
+ struct clk *clk;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx6sx-mu");
+ mu_base = of_iomap(np, 0);
+ WARN_ON(!mu_base);
+
+ irq = platform_get_irq(pdev, 0);
+ ret = request_irq(irq, imx_mu_isr,
+ IRQF_EARLY_RESUME, "imx-mu", &mu_rpmsg_box);
+ if (ret) {
+ pr_err("%s: register interrupt %d failed, rc %d\n",
+ __func__, irq, ret);
+ return ret;
+ }
+
+ ret = of_device_is_compatible(np, "fsl,imx7d-mu");
+ if (ret) {
+ clk = devm_clk_get(&pdev->dev, "mu");
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev,
+ "mu clock source missing or invalid\n");
+ return PTR_ERR(clk);
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "unable to enable mu clock\n");
+ return ret;
+ }
+ }
+
+ INIT_DELAYED_WORK(&rpmsg_work, rpmsg_work_handler);
+ /* enable the bit26(RIE1) of MU_ACR */
+ writel_relaxed(readl_relaxed(mu_base + MU_ACR) | BIT(26),
+ mu_base + MU_ACR);
+ BLOCKING_INIT_NOTIFIER_HEAD(&(mu_rpmsg_box.notifier));
+
+ pr_info("MU is ready for cross core communication!\n");
+
+ return 0;
+}
+
+static const struct of_device_id imx_mu_ids[] = {
+ { .compatible = "fsl,imx6sx-mu" },
+ { .compatible = "fsl,imx7d-mu" },
+ { }
+};
+
+static struct platform_driver imx_mu_driver = {
+ .driver = {
+ .name = "imx-mu",
+ .owner = THIS_MODULE,
+ .of_match_table = imx_mu_ids,
+ },
+ .probe = imx_mu_probe,
+};
+
+static int __init imx_mu_init(void)
+{
+ return platform_driver_register(&imx_mu_driver);
+}
+subsys_initcall(imx_mu_init);
diff --git a/include/linux/imx_rpmsg.h b/include/linux/imx_rpmsg.h
new file mode 100644
index 0000000..2b0dca5
--- /dev/null
+++ b/include/linux/imx_rpmsg.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ */
+
+/*
+ * The code contained herein is licensed under the GNU Lesser General
+ * Public License. You may obtain a copy of the GNU Lesser General
+ * Public License Version 2.1 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/lgpl-license.html
+ * http://www.gnu.org/copyleft/lgpl.html
+ */
+
+/*
+ * @file linux/imx_rpmsg.h
+ *
+ * @brief Global header file for imx RPMSG
+ *
+ * @ingroup RPMSG
+ */
+#ifndef __LINUX_IMX_RPMSG_H__
+#define __LINUX_IMX_RPMSG_H__
+
+int imx_mu_rpmsg_send(unsigned int vq_id);
+int imx_mu_rpmsg_register_nb(const char *name, struct notifier_block *nb);
+int imx_mu_rpmsg_unregister_nb(const char *name, struct notifier_block *nb);
+#endif /* __LINUX_IMX_RPMSG_H__ */
--
1.9.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/