uio: Add snapshot of MSM sharedmem driver

This is a snapshot of the MSM sharedmem driver as of msm-3.14
commit:

commit 149717c082aab81 ("uio: msm_sharedmem: Add custom mmap")

The following changes are included:
02d55287 uio: msm_sharedmem: Restrict debugfs write to root.
de961fc7 uio: msm_sharedmem: Return ENOMEM if the shared mem addr
	 is zero.
b974ce64 uio: msm_sharedmem: Add addtional information to debugfs
c46af547 uio: msm_sharedmem: Add support for dynamic shared memory
	 allocation

Change-Id: I49902f018bde1d59d41027b7e46268cc17231a3e
Signed-off-by: Nikhilesh Reddy <reddyn@codeaurora.org>
[riteshh@codeaurora.org: fixed trivial merge conflicts]
Signed-off-by: Ritesh Harjani <riteshh@codeaurora.org>
diff --git a/drivers/uio/Kconfig b/drivers/uio/Kconfig
index 52c98ce..ee15d7d 100644
--- a/drivers/uio/Kconfig
+++ b/drivers/uio/Kconfig
@@ -155,4 +155,11 @@
 
 	  If you compile this as a module, it will be called uio_mf624.
 
+config UIO_MSM_SHAREDMEM
+	bool "MSM shared memory driver"
+	default n
+	help
+	  Provides the clients with their respective alloted shared memory
+	  addresses which are used as transport buffer.
+
 endif
diff --git a/drivers/uio/Makefile b/drivers/uio/Makefile
index 8560dad..2282a69 100644
--- a/drivers/uio/Makefile
+++ b/drivers/uio/Makefile
@@ -9,3 +9,4 @@
 obj-$(CONFIG_UIO_PRUSS)         += uio_pruss.o
 obj-$(CONFIG_UIO_MF624)         += uio_mf624.o
 obj-$(CONFIG_UIO_FSL_ELBC_GPCM)	+= uio_fsl_elbc_gpcm.o
+obj-$(CONFIG_UIO_MSM_SHAREDMEM) += msm_sharedmem/
diff --git a/drivers/uio/msm_sharedmem/Makefile b/drivers/uio/msm_sharedmem/Makefile
new file mode 100644
index 0000000..e6b8570
--- /dev/null
+++ b/drivers/uio/msm_sharedmem/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_UIO_MSM_SHAREDMEM) := \
+	msm_sharedmem.o \
+	remote_filesystem_access_v01.o \
+	sharedmem_qmi.o \
diff --git a/drivers/uio/msm_sharedmem/msm_sharedmem.c b/drivers/uio/msm_sharedmem/msm_sharedmem.c
new file mode 100644
index 0000000..68f6211
--- /dev/null
+++ b/drivers/uio/msm_sharedmem/msm_sharedmem.c
@@ -0,0 +1,207 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define DRIVER_NAME "msm_sharedmem"
+#define pr_fmt(fmt) DRIVER_NAME ": %s: " fmt, __func__
+
+#include <linux/uio_driver.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/dma-mapping.h>
+#include "sharedmem_qmi.h"
+
+#define CLIENT_ID_PROP "qcom,client-id"
+
+static int uio_get_mem_index(struct uio_info *info, struct vm_area_struct *vma)
+{
+	if (vma->vm_pgoff >= MAX_UIO_MAPS)
+		return -EINVAL;
+
+	if (info->mem[vma->vm_pgoff].size == 0)
+		return -EINVAL;
+
+	return (int)vma->vm_pgoff;
+}
+
+static int sharedmem_mmap(struct uio_info *info, struct vm_area_struct *vma)
+{
+	int result;
+	struct uio_mem *mem;
+	int mem_index = uio_get_mem_index(info, vma);
+
+	if (mem_index < 0) {
+		pr_err("mem_index is invalid errno %d\n", mem_index);
+		return mem_index;
+	}
+
+	mem = info->mem + mem_index;
+
+	if (vma->vm_end - vma->vm_start > mem->size) {
+		pr_err("vm_end[%lu] - vm_start[%lu] [%lu] > mem->size[%lu]\n",
+			vma->vm_end, vma->vm_start,
+			(vma->vm_end - vma->vm_start), mem->size);
+		return -EINVAL;
+	}
+	pr_debug("Attempting to setup mmap.\n");
+
+	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+	result = remap_pfn_range(vma,
+				 vma->vm_start,
+				 mem->addr >> PAGE_SHIFT,
+				 vma->vm_end - vma->vm_start,
+				 vma->vm_page_prot);
+	if (result != 0)
+		pr_err("mmap Failed with errno %d\n", result);
+	else
+		pr_debug("mmap success\n");
+
+	return result;
+}
+
+static int msm_sharedmem_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct uio_info *info = NULL;
+	struct resource *clnt_res = NULL;
+	u32 client_id = ((u32)~0U);
+	u32 shared_mem_size = 0;
+	void *shared_mem = NULL;
+	phys_addr_t shared_mem_pyhsical = 0;
+	bool is_addr_dynamic = false;
+	struct sharemem_qmi_entry qmi_entry;
+
+	/* Get the addresses from platform-data */
+	if (!pdev->dev.of_node) {
+		pr_err("Node not found\n");
+		ret = -ENODEV;
+		goto out;
+	}
+	clnt_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!clnt_res) {
+		pr_err("resource not found\n");
+		return -ENODEV;
+	}
+
+	ret = of_property_read_u32(pdev->dev.of_node, CLIENT_ID_PROP,
+				   &client_id);
+	if (ret) {
+		client_id = ((u32)~0U);
+		pr_warn("qcom,client-id property not found\n");
+	}
+
+	info = devm_kzalloc(&pdev->dev, sizeof(struct uio_info), GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	shared_mem_size = resource_size(clnt_res);
+	shared_mem_pyhsical = clnt_res->start;
+
+	if (shared_mem_size == 0) {
+		pr_err("Shared memory size is zero\n");
+		return -EINVAL;
+	}
+
+	if (shared_mem_pyhsical == 0) {
+		is_addr_dynamic = true;
+		shared_mem = dma_alloc_coherent(&pdev->dev, shared_mem_size,
+					&shared_mem_pyhsical, GFP_KERNEL);
+		if (shared_mem == NULL) {
+			pr_err("Shared mem alloc client=%s, size=%u\n",
+				clnt_res->name, shared_mem_size);
+			return -ENOMEM;
+		}
+	}
+
+	/* Setup device */
+	info->mmap = sharedmem_mmap; /* Custom mmap function. */
+	info->name = clnt_res->name;
+	info->version = "1.0";
+	info->mem[0].addr = shared_mem_pyhsical;
+	info->mem[0].size = shared_mem_size;
+	info->mem[0].memtype = UIO_MEM_PHYS;
+
+	ret = uio_register_device(&pdev->dev, info);
+	if (ret) {
+		pr_err("uio register failed ret=%d\n", ret);
+		goto out;
+	}
+	dev_set_drvdata(&pdev->dev, info);
+
+	qmi_entry.client_id = client_id;
+	qmi_entry.client_name = info->name;
+	qmi_entry.address = info->mem[0].addr;
+	qmi_entry.size = info->mem[0].size;
+	qmi_entry.is_addr_dynamic = is_addr_dynamic;
+
+	sharedmem_qmi_add_entry(&qmi_entry);
+	pr_info("Device created for client '%s'\n", clnt_res->name);
+out:
+	return ret;
+}
+
+static int msm_sharedmem_remove(struct platform_device *pdev)
+{
+	struct uio_info *info = dev_get_drvdata(&pdev->dev);
+
+	uio_unregister_device(info);
+
+	return 0;
+}
+
+static const struct of_device_id msm_sharedmem_of_match[] = {
+	{.compatible = "qcom,sharedmem-uio",},
+	{},
+};
+MODULE_DEVICE_TABLE(of, msm_sharedmem_of_match);
+
+static struct platform_driver msm_sharedmem_driver = {
+	.probe          = msm_sharedmem_probe,
+	.remove         = msm_sharedmem_remove,
+	.driver         = {
+		.name   = DRIVER_NAME,
+		.owner	= THIS_MODULE,
+		.of_match_table = msm_sharedmem_of_match,
+	},
+};
+
+
+static int __init msm_sharedmem_init(void)
+{
+	int result;
+
+	result = sharedmem_qmi_init();
+	if (result < 0) {
+		pr_err("sharedmem_qmi_init failed result = %d\n", result);
+		return result;
+	}
+
+	result = platform_driver_register(&msm_sharedmem_driver);
+	if (result != 0) {
+		pr_err("Platform driver registration failed\n");
+		return result;
+	}
+	return 0;
+}
+
+static void __exit msm_sharedmem_exit(void)
+{
+	platform_driver_unregister(&msm_sharedmem_driver);
+	sharedmem_qmi_exit();
+}
+
+module_init(msm_sharedmem_init);
+module_exit(msm_sharedmem_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/uio/msm_sharedmem/remote_filesystem_access_v01.c b/drivers/uio/msm_sharedmem/remote_filesystem_access_v01.c
new file mode 100644
index 0000000..b04c913
--- /dev/null
+++ b/drivers/uio/msm_sharedmem/remote_filesystem_access_v01.c
@@ -0,0 +1,80 @@
+ /* Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
+  *
+  * This program is free software; you can redistribute it and/or modify
+  * it under the terms of the GNU General Public License version 2 and
+  * only version 2 as published by the Free Software Foundation.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  * GNU General Public License for more details.
+  *
+  */
+#include <linux/qmi_encdec.h>
+
+#include <soc/qcom/msm_qmi_interface.h>
+
+#include "remote_filesystem_access_v01.h"
+
+struct elem_info rfsa_get_buff_addr_req_msg_v01_ei[] = {
+	{
+		.data_type   = QMI_UNSIGNED_4_BYTE,
+		.elem_len    = 1,
+		.elem_size   = sizeof(uint32_t),
+		.is_array    = NO_ARRAY,
+		.tlv_type    = 0x01,
+		.offset      = offsetof(struct rfsa_get_buff_addr_req_msg_v01,
+					   client_id),
+	},
+	{
+		.data_type   = QMI_UNSIGNED_4_BYTE,
+		.elem_len    = 1,
+		.elem_size   = sizeof(uint32_t),
+		.is_array    = NO_ARRAY,
+		.tlv_type    = 0x02,
+		.offset      = offsetof(struct rfsa_get_buff_addr_req_msg_v01,
+					   size),
+	},
+	{
+		.data_type   = QMI_EOTI,
+		.is_array    = NO_ARRAY,
+		.is_array    = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info rfsa_get_buff_addr_resp_msg_v01_ei[] = {
+	{
+		.data_type   = QMI_STRUCT,
+		.elem_len    = 1,
+		.elem_size   = sizeof(struct qmi_response_type_v01),
+		.is_array    = NO_ARRAY,
+		.tlv_type    = 0x02,
+		.offset      = offsetof(struct rfsa_get_buff_addr_resp_msg_v01,
+					   resp),
+		.ei_array    = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type   = QMI_OPT_FLAG,
+		.elem_len    = 1,
+		.elem_size   = sizeof(uint8_t),
+		.is_array    = NO_ARRAY,
+		.tlv_type    = 0x10,
+		.offset      = offsetof(struct rfsa_get_buff_addr_resp_msg_v01,
+					address_valid),
+	},
+	{
+		.data_type   = QMI_UNSIGNED_8_BYTE,
+		.elem_len    = 1,
+		.elem_size   = sizeof(uint64_t),
+		.is_array    = NO_ARRAY,
+		.tlv_type    = 0x10,
+		.offset      = offsetof(struct rfsa_get_buff_addr_resp_msg_v01,
+					address),
+	},
+	{
+		.data_type   = QMI_EOTI,
+		.is_array    = NO_ARRAY,
+		.is_array    = QMI_COMMON_TLV_TYPE,
+	},
+};
+
diff --git a/drivers/uio/msm_sharedmem/remote_filesystem_access_v01.h b/drivers/uio/msm_sharedmem/remote_filesystem_access_v01.h
new file mode 100644
index 0000000..7ea8ce6
--- /dev/null
+++ b/drivers/uio/msm_sharedmem/remote_filesystem_access_v01.h
@@ -0,0 +1,39 @@
+ /* Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
+  *
+  * This program is free software; you can redistribute it and/or modify
+  * it under the terms of the GNU General Public License version 2 and
+  * only version 2 as published by the Free Software Foundation.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  * GNU General Public License for more details.
+  *
+  */
+#ifndef __REMOTE_FILESYSTEM_ACCESS_V01_H__
+#define __REMOTE_FILESYSTEM_ACCESS_V01_H__
+
+#define RFSA_SERVICE_ID_V01 0x1C
+#define RFSA_SERVICE_VERS_V01 0x01
+
+#define QMI_RFSA_GET_BUFF_ADDR_REQ_MSG_V01 0x0023
+#define QMI_RFSA_GET_BUFF_ADDR_RESP_MSG_V01 0x0023
+
+#define RFSA_GET_BUFF_ADDR_REQ_MSG_MAX_LEN_V01 14
+#define RFSA_GET_BUFF_ADDR_RESP_MSG_MAX_LEN_V01 18
+
+extern struct elem_info rfsa_get_buff_addr_req_msg_v01_ei[];
+extern struct elem_info rfsa_get_buff_addr_resp_msg_v01_ei[];
+
+struct rfsa_get_buff_addr_req_msg_v01 {
+	uint32_t client_id;
+	uint32_t size;
+};
+
+struct rfsa_get_buff_addr_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+	uint8_t address_valid;
+	uint64_t address;
+};
+
+#endif /* __REMOTE_FILESYSTEM_ACCESS_V01_H__ */
diff --git a/drivers/uio/msm_sharedmem/sharedmem_qmi.c b/drivers/uio/msm_sharedmem/sharedmem_qmi.c
new file mode 100644
index 0000000..48fb17e
--- /dev/null
+++ b/drivers/uio/msm_sharedmem/sharedmem_qmi.c
@@ -0,0 +1,442 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define DRIVER_NAME "msm_sharedmem"
+#define pr_fmt(fmt) DRIVER_NAME ": %s: " fmt, __func__
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/rwsem.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/debugfs.h>
+#include <soc/qcom/msm_qmi_interface.h>
+#include "sharedmem_qmi.h"
+#include "remote_filesystem_access_v01.h"
+
+#define RFSA_SERVICE_INSTANCE_NUM 1
+#define SHARED_ADDR_ENTRY_NAME_MAX_LEN 10
+
+struct shared_addr_entry {
+	u32 id;
+	u64 address;
+	u32 size;
+	u64 request_count;
+	bool is_addr_dynamic;
+	char name[SHARED_ADDR_ENTRY_NAME_MAX_LEN + 1];
+};
+
+struct shared_addr_list {
+	struct list_head node;
+	struct shared_addr_entry entry;
+};
+
+static struct shared_addr_list list;
+
+static struct qmi_handle *sharedmem_qmi_svc_handle;
+static void sharedmem_qmi_svc_recv_msg(struct work_struct *work);
+static DECLARE_DELAYED_WORK(work_recv_msg, sharedmem_qmi_svc_recv_msg);
+static struct workqueue_struct *sharedmem_qmi_svc_workqueue;
+static struct dentry *dir_ent;
+
+static u32 rfsa_count;
+static u32 rmts_count;
+
+static DECLARE_RWSEM(sharedmem_list_lock); /* declare list lock semaphore */
+
+static struct work_struct sharedmem_qmi_init_work;
+
+static struct msg_desc rfsa_get_buffer_addr_req_desc = {
+	.max_msg_len = RFSA_GET_BUFF_ADDR_REQ_MSG_MAX_LEN_V01,
+	.msg_id = QMI_RFSA_GET_BUFF_ADDR_REQ_MSG_V01,
+	.ei_array = rfsa_get_buff_addr_req_msg_v01_ei,
+};
+
+static struct msg_desc rfsa_get_buffer_addr_resp_desc = {
+	.max_msg_len = RFSA_GET_BUFF_ADDR_RESP_MSG_MAX_LEN_V01,
+	.msg_id = QMI_RFSA_GET_BUFF_ADDR_RESP_MSG_V01,
+	.ei_array = rfsa_get_buff_addr_resp_msg_v01_ei,
+};
+
+void sharedmem_qmi_add_entry(struct sharemem_qmi_entry *qmi_entry)
+{
+	struct shared_addr_list *list_entry;
+
+	list_entry = kzalloc(sizeof(*list_entry), GFP_KERNEL);
+
+	/* If we cannot add the entry log the failure and bail */
+	if (list_entry == NULL) {
+		pr_err("Alloc of new list entry failed\n");
+		return;
+	}
+
+	/* Copy as much of the client name that can fit in the entry. */
+	strlcpy(list_entry->entry.name, qmi_entry->client_name,
+		sizeof(list_entry->entry.name));
+
+	/* Setup the rest of the entry. */
+	list_entry->entry.id = qmi_entry->client_id;
+	list_entry->entry.address = qmi_entry->address;
+	list_entry->entry.size = qmi_entry->size;
+	list_entry->entry.is_addr_dynamic = qmi_entry->is_addr_dynamic;
+	list_entry->entry.request_count = 0;
+
+	down_write(&sharedmem_list_lock);
+	list_add_tail(&(list_entry->node), &(list.node));
+	up_write(&sharedmem_list_lock);
+	pr_debug("Added new entry to list\n");
+
+}
+
+static int get_buffer_for_client(u32 id, u32 size, u64 *address)
+{
+	int result = -ENOENT;
+	int client_found = 0;
+	struct list_head *curr_node;
+	struct shared_addr_list *list_entry;
+
+	if (size == 0)
+		return -ENOMEM;
+
+	down_read(&sharedmem_list_lock);
+
+	list_for_each(curr_node, &list.node) {
+		list_entry = list_entry(curr_node, struct shared_addr_list,
+					node);
+		if (list_entry->entry.id == id) {
+			if (list_entry->entry.size >= size) {
+				*address = list_entry->entry.address;
+				list_entry->entry.request_count++;
+				result = 0;
+			} else {
+				pr_err("Shared mem req too large for id=%u\n",
+					id);
+				result = -ENOMEM;
+			}
+			client_found = 1;
+			break;
+		}
+	}
+
+	up_read(&sharedmem_list_lock);
+
+	if (client_found != 1) {
+		pr_err("Unknown client id %u\n", id);
+		result = -ENOENT;
+	}
+	return result;
+}
+
+static int sharedmem_qmi_get_buffer(void *conn_h, void *req_handle, void *req)
+{
+	struct rfsa_get_buff_addr_req_msg_v01 *get_buffer_req;
+	struct rfsa_get_buff_addr_resp_msg_v01 get_buffer_resp;
+	int result;
+	u64 address = 0;
+
+	get_buffer_req = (struct rfsa_get_buff_addr_req_msg_v01 *)req;
+	pr_debug("req->client_id = 0x%X and req->size = %d\n",
+		get_buffer_req->client_id, get_buffer_req->size);
+
+	result = get_buffer_for_client(get_buffer_req->client_id,
+					get_buffer_req->size, &address);
+	if (result != 0)
+		return result;
+
+	if (address == 0) {
+		pr_err("Entry found for client id= 0x%X but address is zero\n",
+			get_buffer_req->client_id);
+		return -ENOMEM;
+	}
+
+	memset(&get_buffer_resp, 0, sizeof(get_buffer_resp));
+	get_buffer_resp.address_valid = 1;
+	get_buffer_resp.address = address;
+	get_buffer_resp.resp.result = QMI_RESULT_SUCCESS_V01;
+
+	result = qmi_send_resp_from_cb(sharedmem_qmi_svc_handle, conn_h,
+				req_handle,
+				&rfsa_get_buffer_addr_resp_desc,
+				&get_buffer_resp,
+				sizeof(get_buffer_resp));
+	return result;
+}
+
+
+static int sharedmem_qmi_connect_cb(struct qmi_handle *handle, void *conn_h)
+{
+	if (sharedmem_qmi_svc_handle != handle || !conn_h)
+		return -EINVAL;
+	return 0;
+}
+
+static int sharedmem_qmi_disconnect_cb(struct qmi_handle *handle, void *conn_h)
+{
+	if (sharedmem_qmi_svc_handle != handle || !conn_h)
+		return -EINVAL;
+	return 0;
+}
+
+static int sharedmem_qmi_req_desc_cb(unsigned int msg_id,
+				struct msg_desc **req_desc)
+{
+	int rc;
+
+	switch (msg_id) {
+	case QMI_RFSA_GET_BUFF_ADDR_REQ_MSG_V01:
+		*req_desc = &rfsa_get_buffer_addr_req_desc;
+		rc = sizeof(struct rfsa_get_buff_addr_req_msg_v01);
+		break;
+
+	default:
+		rc = -ENOTSUPP;
+		break;
+	}
+	return rc;
+}
+
+static int sharedmem_qmi_req_cb(struct qmi_handle *handle, void *conn_h,
+				void *req_handle, unsigned int msg_id,
+				void *req)
+{
+	int rc = -ENOTSUPP;
+
+	if (sharedmem_qmi_svc_handle != handle || !conn_h)
+		return -EINVAL;
+
+	if (msg_id == QMI_RFSA_GET_BUFF_ADDR_REQ_MSG_V01)
+		rc = sharedmem_qmi_get_buffer(conn_h, req_handle, req);
+
+	return rc;
+}
+
+#define DEBUG_BUF_SIZE (2048)
+static char *debug_buffer;
+static u32 debug_data_size;
+
+static ssize_t debug_read(struct file *file, char __user *buf,
+			  size_t count, loff_t *file_pos)
+{
+	return simple_read_from_buffer(buf, count, file_pos, debug_buffer,
+					debug_data_size);
+}
+
+static u32 fill_debug_info(char *buffer, u32 buffer_size)
+{
+	u32 size = 0;
+	struct list_head *curr_node;
+	struct shared_addr_list *list_entry;
+
+	memset(buffer, 0, buffer_size);
+	size += scnprintf(buffer + size, buffer_size - size, "\n");
+
+	down_read(&sharedmem_list_lock);
+	list_for_each(curr_node, &list.node) {
+		list_entry = list_entry(curr_node, struct shared_addr_list,
+					node);
+		size += scnprintf(buffer + size, buffer_size - size,
+				"Client_name: %s\n", list_entry->entry.name);
+		size += scnprintf(buffer + size, buffer_size - size,
+				"Client_id: 0x%08X\n", list_entry->entry.id);
+		size += scnprintf(buffer + size, buffer_size - size,
+				"Buffer Size: 0x%08X (%d)\n",
+				list_entry->entry.size,
+				list_entry->entry.size);
+		size += scnprintf(buffer + size, buffer_size - size,
+				"Address: 0x%016llX\n",
+				list_entry->entry.address);
+		size += scnprintf(buffer + size, buffer_size - size,
+				"Address Allocation: %s\n",
+				(list_entry->entry.is_addr_dynamic ?
+				"Dynamic" : "Static"));
+		size += scnprintf(buffer + size, buffer_size - size,
+				"Request count: %llu\n",
+				list_entry->entry.request_count);
+		size += scnprintf(buffer + size, buffer_size - size, "\n\n");
+	}
+	up_read(&sharedmem_list_lock);
+
+	size += scnprintf(buffer + size, buffer_size - size,
+			"RFSA server start count = %u\n", rfsa_count);
+	size += scnprintf(buffer + size, buffer_size - size,
+			"RMTS server start count = %u\n", rmts_count);
+
+	size += scnprintf(buffer + size, buffer_size - size, "\n");
+	return size;
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+	u32 buffer_size;
+
+	if (debug_buffer != NULL)
+		return -EBUSY;
+	buffer_size = DEBUG_BUF_SIZE;
+	debug_buffer = kzalloc(buffer_size, GFP_KERNEL);
+	if (debug_buffer == NULL)
+		return -ENOMEM;
+	debug_data_size = fill_debug_info(debug_buffer, buffer_size);
+	return 0;
+}
+
+static int debug_close(struct inode *inode, struct file *file)
+{
+	kfree(debug_buffer);
+	debug_buffer = NULL;
+	debug_data_size = 0;
+	return 0;
+}
+
+static const struct file_operations debug_ops = {
+	.read = debug_read,
+	.open = debug_open,
+	.release = debug_close,
+};
+
+static int rfsa_increment(void *data, u64 val)
+{
+	if (rfsa_count != ~0)
+		rfsa_count++;
+	return 0;
+}
+
+static int rmts_increment(void *data, u64 val)
+{
+	if (rmts_count != ~0)
+		rmts_count++;
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(rfsa_fops, NULL, rfsa_increment, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(rmts_fops, NULL, rmts_increment, "%llu\n");
+
+static void debugfs_init(void)
+{
+	struct dentry *f_ent;
+
+	dir_ent = debugfs_create_dir("rmt_storage", NULL);
+	if (IS_ERR(dir_ent)) {
+		pr_err("Failed to create debug_fs directory\n");
+		return;
+	}
+
+	f_ent = debugfs_create_file("info", 0400, dir_ent, NULL, &debug_ops);
+	if (IS_ERR(f_ent)) {
+		pr_err("Failed to create debug_fs info file\n");
+		return;
+	}
+
+	f_ent = debugfs_create_file("rfsa", 0200, dir_ent, NULL, &rfsa_fops);
+	if (IS_ERR(f_ent)) {
+		pr_err("Failed to create debug_fs rfsa file\n");
+		return;
+	}
+
+	f_ent = debugfs_create_file("rmts", 0200, dir_ent, NULL, &rmts_fops);
+	if (IS_ERR(f_ent)) {
+		pr_err("Failed to create debug_fs rmts file\n");
+		return;
+	}
+}
+
+static void debugfs_exit(void)
+{
+	debugfs_remove_recursive(dir_ent);
+}
+
+static void sharedmem_qmi_svc_recv_msg(struct work_struct *work)
+{
+	int rc;
+
+	do {
+		pr_debug("Notified about a Receive Event\n");
+	} while ((rc = qmi_recv_msg(sharedmem_qmi_svc_handle)) == 0);
+
+	if (rc != -ENOMSG)
+		pr_err("Error receiving message\n");
+}
+
+static void sharedmem_qmi_notify(struct qmi_handle *handle,
+		enum qmi_event_type event, void *priv)
+{
+	switch (event) {
+	case QMI_RECV_MSG:
+		queue_delayed_work(sharedmem_qmi_svc_workqueue,
+				   &work_recv_msg, 0);
+		break;
+	default:
+		break;
+	}
+}
+
+static struct qmi_svc_ops_options sharedmem_qmi_ops_options = {
+	.version = 1,
+	.service_id = RFSA_SERVICE_ID_V01,
+	.service_vers = RFSA_SERVICE_VERS_V01,
+	.service_ins = RFSA_SERVICE_INSTANCE_NUM,
+	.connect_cb = sharedmem_qmi_connect_cb,
+	.disconnect_cb = sharedmem_qmi_disconnect_cb,
+	.req_desc_cb = sharedmem_qmi_req_desc_cb,
+	.req_cb = sharedmem_qmi_req_cb,
+};
+
+
+static void sharedmem_register_qmi(void)
+{
+	int rc;
+
+	sharedmem_qmi_svc_workqueue =
+		create_singlethread_workqueue("sharedmem_qmi_work");
+	if (!sharedmem_qmi_svc_workqueue)
+		return;
+
+	sharedmem_qmi_svc_handle = qmi_handle_create(sharedmem_qmi_notify,
+							NULL);
+	if (!sharedmem_qmi_svc_handle) {
+		pr_err("Creating sharedmem_qmi qmi handle failed\n");
+		destroy_workqueue(sharedmem_qmi_svc_workqueue);
+		return;
+	}
+	rc = qmi_svc_register(sharedmem_qmi_svc_handle,
+				&sharedmem_qmi_ops_options);
+	if (rc < 0) {
+		pr_err("Registering sharedmem_qmi failed %d\n", rc);
+		qmi_handle_destroy(sharedmem_qmi_svc_handle);
+		destroy_workqueue(sharedmem_qmi_svc_workqueue);
+		return;
+	}
+	pr_info("qmi init successful\n");
+}
+
+static void sharedmem_qmi_init_worker(struct work_struct *work)
+{
+	sharedmem_register_qmi();
+	debugfs_init();
+}
+
+int sharedmem_qmi_init(void)
+{
+	INIT_LIST_HEAD(&list.node);
+	INIT_WORK(&sharedmem_qmi_init_work, sharedmem_qmi_init_worker);
+	schedule_work(&sharedmem_qmi_init_work);
+	return 0;
+}
+
+void sharedmem_qmi_exit(void)
+{
+	qmi_svc_unregister(sharedmem_qmi_svc_handle);
+	flush_workqueue(sharedmem_qmi_svc_workqueue);
+	qmi_handle_destroy(sharedmem_qmi_svc_handle);
+	destroy_workqueue(sharedmem_qmi_svc_workqueue);
+	debugfs_exit();
+}
diff --git a/drivers/uio/msm_sharedmem/sharedmem_qmi.h b/drivers/uio/msm_sharedmem/sharedmem_qmi.h
new file mode 100644
index 0000000..7353916
--- /dev/null
+++ b/drivers/uio/msm_sharedmem/sharedmem_qmi.h
@@ -0,0 +1,33 @@
+/* Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __SHAREDMEM_QMI_H__
+#define __SHAREDMEM_QMI_H__
+
+#include <linux/module.h>
+
+struct sharemem_qmi_entry {
+	const char *client_name;
+	u32 client_id;
+	u64 address;
+	u32 size;
+	bool is_addr_dynamic;
+};
+
+int sharedmem_qmi_init(void);
+
+void sharedmem_qmi_exit(void);
+
+void sharedmem_qmi_add_entry(struct sharemem_qmi_entry *qmi_entry);
+
+#endif /* __SHAREDMEM_QMI_H__ */