msm: rpm-smd: Support for buffering sleep set requests

Buffer the RPM sleep set requests and flush them only when entering RPM
assisted power collaspe. This reduces number of sleep set requests sent
over aggregating them between sleep cycles.

Change-Id: I9d85f68806c48ee4f5660e28efb07ff47d438d1d
Signed-off-by: Mahesh Sivasubramanian <msivasub@codeaurora.org>
diff --git a/arch/arm/mach-msm/lpm_levels.c b/arch/arm/mach-msm/lpm_levels.c
index dec62f0..9bafa33 100644
--- a/arch/arm/mach-msm/lpm_levels.c
+++ b/arch/arm/mach-msm/lpm_levels.c
@@ -66,12 +66,6 @@
 	atomic_notifier_call_chain(&__get_cpu_var(lpm_notify_head),
 		MSM_LPM_STATE_ENTER, &sleep_data);
 
-	ret = msm_rpm_enter_sleep();
-	if (ret) {
-		pr_warn("%s(): RPM failed to enter sleep err:%d\n",
-				__func__, ret);
-		goto bail;
-	}
 	if (from_idle)
 		debug_mask = msm_lpm_lvl_dbg_msk &
 				MSM_LPM_LVL_DBG_IDLE_LIMITS;
@@ -88,6 +82,15 @@
 				l->vdd_dig_upper_bound);
 
 	ret = msm_lpmrs_enter_sleep(sclk_count, l, from_idle, notify_rpm);
+	if (ret) {
+		pr_warn("%s() LPM resources failed to enter sleep\n",
+				__func__);
+		goto bail;
+	}
+	ret = msm_rpm_enter_sleep(debug_mask);
+	if (ret)
+		pr_warn("%s(): RPM failed to enter sleep err:%d\n",
+				__func__, ret);
 bail:
 	return ret;
 }
@@ -95,9 +98,10 @@
 static void msm_lpm_exit_sleep(void *limits, bool from_idle,
 		bool notify_rpm, bool collapsed)
 {
-	msm_rpm_exit_sleep();
+
 	msm_lpmrs_exit_sleep((struct msm_rpmrs_limits *)limits,
 				from_idle, notify_rpm, collapsed);
+	msm_rpm_exit_sleep();
 	atomic_notifier_call_chain(&__get_cpu_var(lpm_notify_head),
 			MSM_LPM_STATE_EXIT, NULL);
 }
diff --git a/arch/arm/mach-msm/rpm-notifier.h b/arch/arm/mach-msm/rpm-notifier.h
index b9815a5..16de77e 100644
--- a/arch/arm/mach-msm/rpm-notifier.h
+++ b/arch/arm/mach-msm/rpm-notifier.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -40,8 +40,12 @@
 
 /**
  * msm_rpm_enter_sleep - Notify RPM driver to prepare for entering sleep
+ *
+ * @bool - flag to enable print contents of sleep buffer.
+ *
+ * return 0 on success errno on failure.
  */
-int msm_rpm_enter_sleep(void);
+int msm_rpm_enter_sleep(bool print);
 
 /**
  * msm_rpm_exit_sleep - Notify RPM driver about resuming from power collapse
diff --git a/arch/arm/mach-msm/rpm-smd.c b/arch/arm/mach-msm/rpm-smd.c
index 4295fd4..b84ade9 100644
--- a/arch/arm/mach-msm/rpm-smd.c
+++ b/arch/arm/mach-msm/rpm-smd.c
@@ -32,6 +32,7 @@
 #include <linux/platform_device.h>
 #include <linux/of.h>
 #include <linux/of_platform.h>
+#include <linux/rbtree.h>
 #include <mach/socinfo.h>
 #include <mach/msm_smd.h>
 #include <mach/rpm-smd.h>
@@ -62,6 +63,9 @@
 };
 
 #define DEFAULT_BUFFER_SIZE 256
+#define DEBUG_PRINT_BUFFER_SIZE 512
+#define MAX_SLEEP_BUFFER 128
+
 #define GFP_FLAG(noirq) (noirq ? GFP_ATOMIC : GFP_KERNEL)
 #define INV_RSC "resource does not exist"
 #define ERR "err\0"
@@ -106,6 +110,11 @@
 	uint32_t data_len;
 };
 
+struct kvp {
+	unsigned int k;
+	unsigned int s;
+};
+
 struct msm_rpm_kvp_data {
 	uint32_t key;
 	uint32_t nbytes; /* number of bytes */
@@ -113,6 +122,301 @@
 	bool valid;
 };
 
+struct slp_buf {
+	struct rb_node node;
+	char ubuf[MAX_SLEEP_BUFFER];
+	char *buf;
+	bool valid;
+};
+static struct rb_root tr_root = RB_ROOT;
+
+static int msm_rpm_send_smd_buffer(char *buf, int size, bool noirq);
+static uint32_t msm_rpm_get_next_msg_id(void);
+
+static inline unsigned int get_rsc_type(char *buf)
+{
+	struct rpm_message_header *h;
+	h = (struct rpm_message_header *)
+		(buf + sizeof(struct rpm_request_header));
+	return h->resource_type;
+}
+
+static inline unsigned int get_rsc_id(char *buf)
+{
+	struct rpm_message_header *h;
+	h = (struct rpm_message_header *)
+		(buf + sizeof(struct rpm_request_header));
+	return h->resource_id;
+}
+
+#define get_data_len(buf) \
+	(((struct rpm_message_header *) \
+	  (buf + sizeof(struct rpm_request_header)))->data_len)
+
+#define get_req_len(buf) \
+	(((struct rpm_request_header *)(buf))->request_len)
+
+#define get_msg_id(buf) \
+	(((struct rpm_message_header *) \
+	  (buf + sizeof(struct rpm_request_header)))->msg_id)
+
+
+static inline int get_buf_len(char *buf)
+{
+	return get_req_len(buf) + sizeof(struct rpm_request_header);
+}
+
+static inline struct kvp *get_first_kvp(char *buf)
+{
+	return (struct kvp *)(buf + sizeof(struct rpm_request_header)
+			+ sizeof(struct rpm_message_header));
+}
+
+static inline struct kvp *get_next_kvp(struct kvp *k)
+{
+	return (struct kvp *)((void *)k + sizeof(*k) + k->s);
+}
+
+static inline void *get_data(struct kvp *k)
+{
+	return (void *)k + sizeof(*k);
+}
+
+
+static void delete_kvp(char *msg, struct kvp *d)
+{
+	struct kvp *n;
+	int dec, size;
+
+	n = get_next_kvp(d);
+	dec = (void *)n - (void *)d;
+	size = get_data_len(msg) - ((void *)n - (void *)get_first_kvp(msg));
+
+	memcpy((void *)d, (void *)n, size);
+
+	get_data_len(msg) -= dec;
+	get_req_len(msg) -= dec;
+}
+
+static inline void update_kvp_data(struct kvp *dest, struct kvp *src)
+{
+	memcpy(get_data(dest), get_data(src), src->s);
+}
+
+static void add_kvp(char *buf, struct kvp *n)
+{
+	int inc = sizeof(*n) + n->s;
+	BUG_ON((get_req_len(buf) + inc) > MAX_SLEEP_BUFFER);
+
+	memcpy(buf + get_buf_len(buf), n, inc);
+
+	get_data_len(buf) += inc;
+	get_req_len(buf) += inc;
+}
+
+static struct slp_buf *tr_search(struct rb_root *root, char *slp)
+{
+	unsigned int type = get_rsc_type(slp);
+	unsigned int id = get_rsc_id(slp);
+
+	struct rb_node *node = root->rb_node;
+
+	while (node) {
+		struct slp_buf *cur = rb_entry(node, struct slp_buf, node);
+		unsigned int ctype = get_rsc_type(cur->buf);
+		unsigned int cid = get_rsc_id(cur->buf);
+
+		if (type < ctype)
+			node = node->rb_left;
+		else if (type > ctype)
+			node = node->rb_right;
+		else if (id < cid)
+			node = node->rb_left;
+		else if (id > cid)
+			node = node->rb_right;
+		else
+			return cur;
+	}
+	return NULL;
+}
+
+static int tr_insert(struct rb_root *root, struct slp_buf *slp)
+{
+	unsigned int type = get_rsc_type(slp->buf);
+	unsigned int id = get_rsc_id(slp->buf);
+
+	struct rb_node **node = &(root->rb_node), *parent = NULL;
+
+	while (*node) {
+		struct slp_buf *curr = rb_entry(*node, struct slp_buf, node);
+		unsigned int ctype = get_rsc_type(curr->buf);
+		unsigned int cid = get_rsc_id(curr->buf);
+
+		parent = *node;
+
+		if (type < ctype)
+			node = &((*node)->rb_left);
+		else if (type > ctype)
+			node = &((*node)->rb_right);
+		else if (id < cid)
+			node = &((*node)->rb_left);
+		else if (id > cid)
+			node = &((*node)->rb_right);
+		else
+			return -EINVAL;
+	}
+
+	rb_link_node(&slp->node, parent, node);
+	rb_insert_color(&slp->node, root);
+	slp->valid = true;
+	return 0;
+}
+
+#define for_each_kvp(buf, k) \
+	for (k = (struct kvp *)get_first_kvp(buf); \
+		((void *)k - (void *)get_first_kvp(buf)) < get_data_len(buf);\
+		k = get_next_kvp(k))
+
+
+static void tr_update(struct slp_buf *s, char *buf)
+{
+	struct kvp *e, *n;
+
+	for_each_kvp(buf, n) {
+		for_each_kvp(s->buf, e) {
+			if (n->k == e->k) {
+				if (n->s == e->s) {
+					void *e_data = get_data(e);
+					void *n_data = get_data(n);
+					if (memcmp(e_data, n_data, n->s)) {
+						update_kvp_data(e, n);
+						s->valid = true;
+					}
+				} else {
+					delete_kvp(s->buf, e);
+					add_kvp(s->buf, n);
+					s->valid = true;
+				}
+				break;
+			}
+		}
+	}
+}
+
+int msm_rpm_smd_buffer_request(char *buf, int size, gfp_t flag)
+{
+	struct slp_buf *slp;
+	static DEFINE_SPINLOCK(slp_buffer_lock);
+	unsigned long flags;
+
+	if (size > MAX_SLEEP_BUFFER)
+		return -ENOMEM;
+
+	spin_lock_irqsave(&slp_buffer_lock, flags);
+	slp = tr_search(&tr_root, buf);
+
+	if (!slp) {
+		slp = kzalloc(sizeof(struct slp_buf), GFP_ATOMIC);
+		if (!slp) {
+			spin_unlock_irqrestore(&slp_buffer_lock, flags);
+			return -ENOMEM;
+		}
+		slp->buf = PTR_ALIGN(&slp->ubuf[0], sizeof(u32));
+		memcpy(slp->buf, buf, size);
+		if (tr_insert(&tr_root, slp))
+			pr_err("%s(): Error updating sleep request\n",
+					__func__);
+	} else {
+		/* handle unsent requests */
+		tr_update(slp, buf);
+	}
+
+	spin_unlock_irqrestore(&slp_buffer_lock, flags);
+
+	return 0;
+}
+static void msm_rpm_print_sleep_buffer(struct slp_buf *s)
+{
+	char buf[DEBUG_PRINT_BUFFER_SIZE] = {0};
+	int pos;
+	int buflen = DEBUG_PRINT_BUFFER_SIZE;
+	char ch[5] = {0};
+	u32 type;
+	struct kvp *e;
+
+	if (!s)
+		return;
+
+	if (!s->valid)
+		return;
+
+	type = get_rsc_type(s->buf);
+	memcpy(ch, &type, sizeof(u32));
+
+	pos = scnprintf(buf, buflen,
+			"Sleep request type = 0x%08x(%s)",
+			get_rsc_type(s->buf), ch);
+	pos += scnprintf(buf + pos, buflen - pos, " id = 0%x",
+			get_rsc_id(s->buf));
+	for_each_kvp(s->buf, e) {
+		int i;
+		char *data = get_data(e);
+
+		memcpy(ch, &e->k, sizeof(u32));
+
+		pos += scnprintf(buf + pos, buflen - pos,
+				"\n\t\tkey = 0x%08x(%s)",
+				e->k, ch);
+		pos += scnprintf(buf + pos, buflen - pos,
+				" sz= %d data =", e->s);
+
+		for (i = 0; i < e->s; i++)
+			pos += scnprintf(buf + pos, buflen - pos,
+					" 0x%02X", data[i]);
+	}
+	pos += scnprintf(buf + pos, buflen - pos, "\n");
+	printk(buf);
+}
+
+static int msm_rpm_flush_requests(bool print)
+{
+	struct rb_node *t;
+	int ret;
+
+	for (t = rb_first(&tr_root); t; t = rb_next(t)) {
+
+		struct slp_buf *s = rb_entry(t, struct slp_buf, node);
+
+		if (!s->valid)
+			continue;
+
+		if (print)
+			msm_rpm_print_sleep_buffer(s);
+
+		get_msg_id(s->buf) = msm_rpm_get_next_msg_id();
+		ret = msm_rpm_send_smd_buffer(s->buf,
+				get_buf_len(s->buf), true);
+		/* By not adding the message to a wait list we can reduce
+		 * latency involved in waiting for a ACK from RPM. The ACK
+		 * messages will be processed when we wakeup from sleep but
+		 * processing should be minimal
+		 * msm_rpm_wait_for_ack_noirq(get_msg_id(s->buf));
+		 */
+
+		WARN_ON(ret != get_buf_len(s->buf));
+
+		trace_rpm_send_message(true, MSM_RPM_CTX_SLEEP_SET,
+				get_rsc_type(s->buf),
+				get_rsc_id(s->buf),
+				get_msg_id(s->buf));
+
+		s->valid = false;
+	}
+	return 0;
+
+}
+
+
 static atomic_t msm_rpm_msg_id = ATOMIC_INIT(0);
 
 static struct msm_rpm_driver_data msm_rpm_data;
@@ -450,7 +754,12 @@
 		}
 		elem = NULL;
 	}
-	WARN_ON(!elem);
+	/* Special case where the sleep driver doesn't
+	 * wait for ACKs. This would decrease the latency involved with
+	 * entering RPM assisted power collapse.
+	 */
+	if (!elem)
+		trace_rpm_ack_recd(0, msg_id);
 
 	spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
 }
@@ -544,8 +853,6 @@
 	}
 }
 
-#define DEBUG_PRINT_BUFFER_SIZE 512
-
 static void msm_rpm_log_request(struct msm_rpm_request *cdata)
 {
 	char buf[DEBUG_PRINT_BUFFER_SIZE];
@@ -677,13 +984,42 @@
 	pos += scnprintf(buf + pos, buflen - pos, "\n");
 	printk(buf);
 }
+static int msm_rpm_send_smd_buffer(char *buf, int size, bool noirq)
+{
+	unsigned long flags;
+	int ret;
 
+	spin_lock_irqsave(&msm_rpm_data.smd_lock_write, flags);
+	ret = smd_write_avail(msm_rpm_data.ch_info);
+
+	while ((ret = smd_write_avail(msm_rpm_data.ch_info)) < size) {
+		if (ret < 0)
+			break;
+		if (!noirq) {
+			spin_unlock_irqrestore(&msm_rpm_data.smd_lock_write,
+					flags);
+			cpu_relax();
+			spin_lock_irqsave(&msm_rpm_data.smd_lock_write, flags);
+		} else
+			udelay(5);
+	}
+
+	if (ret < 0) {
+		pr_err("%s(): SMD not initialized\n", __func__);
+		spin_unlock_irqrestore(&msm_rpm_data.smd_lock_write, flags);
+		return ret;
+	}
+
+	ret = smd_write(msm_rpm_data.ch_info, buf, size);
+	spin_unlock_irqrestore(&msm_rpm_data.smd_lock_write, flags);
+	return ret;
+
+}
 static int msm_rpm_send_data(struct msm_rpm_request *cdata,
 		int msg_type, bool noirq)
 {
 	uint8_t *tmpbuff;
 	int i, ret, msg_size;
-	unsigned long flags;
 
 	int req_hdr_sz, msg_hdr_sz;
 
@@ -695,8 +1031,6 @@
 
 	cdata->req_hdr.service_type = msm_rpm_request_service[msg_type];
 
-	cdata->msg_hdr.msg_id = msm_rpm_get_next_msg_id();
-
 	cdata->req_hdr.request_len = cdata->msg_hdr.data_len + msg_hdr_sz;
 	msg_size = cdata->req_hdr.request_len + req_hdr_sz;
 
@@ -714,8 +1048,6 @@
 
 	tmpbuff = cdata->buf;
 
-	memcpy(tmpbuff, &cdata->req_hdr, req_hdr_sz + msg_hdr_sz);
-
 	tmpbuff += req_hdr_sz + msg_hdr_sz;
 
 	for (i = 0; (i < cdata->write_idx); i++) {
@@ -740,6 +1072,17 @@
 
 	}
 
+	memcpy(cdata->buf, &cdata->req_hdr, req_hdr_sz + msg_hdr_sz);
+
+	if ((cdata->msg_hdr.set == MSM_RPM_CTX_SLEEP_SET) &&
+		!msm_rpm_smd_buffer_request(cdata->buf, msg_size,
+			GFP_FLAG(noirq)))
+		return 1;
+
+	cdata->msg_hdr.msg_id = msm_rpm_get_next_msg_id();
+
+	memcpy(cdata->buf + req_hdr_sz, &cdata->msg_hdr, msg_hdr_sz);
+
 	if (msm_rpm_debug_mask
 	    & (MSM_RPM_LOG_REQUEST_PRETTY | MSM_RPM_LOG_REQUEST_RAW))
 		msm_rpm_log_request(cdata);
@@ -755,29 +1098,7 @@
 
 	msm_rpm_add_wait_list(cdata->msg_hdr.msg_id);
 
-	spin_lock_irqsave(&msm_rpm_data.smd_lock_write, flags);
-
-	ret = smd_write_avail(msm_rpm_data.ch_info);
-
-	if (ret < 0) {
-		pr_err("%s(): SMD not initialized\n", __func__);
-		spin_unlock_irqrestore(&msm_rpm_data.smd_lock_write, flags);
-		return 0;
-	}
-
-	while ((ret < msg_size)) {
-		if (!noirq) {
-			spin_unlock_irqrestore(&msm_rpm_data.smd_lock_write,
-					flags);
-			cpu_relax();
-			spin_lock_irqsave(&msm_rpm_data.smd_lock_write, flags);
-		} else
-			udelay(5);
-		ret = smd_write_avail(msm_rpm_data.ch_info);
-	}
-
-	ret = smd_write(msm_rpm_data.ch_info, &cdata->buf[0], msg_size);
-	spin_unlock_irqrestore(&msm_rpm_data.smd_lock_write, flags);
+	ret = msm_rpm_send_smd_buffer(&cdata->buf[0], msg_size, noirq);
 
 	if (ret == msg_size) {
 		trace_rpm_send_message(noirq, cdata->msg_hdr.set,
@@ -958,11 +1279,13 @@
  * During power collapse, the rpm driver disables the SMD interrupts to make
  * sure that the interrupt doesn't wakes us from sleep.
  */
-int msm_rpm_enter_sleep(void)
+int msm_rpm_enter_sleep(bool print)
 {
 	if (standalone)
 		return 0;
 
+	msm_rpm_flush_requests(print);
+
 	return smd_mask_receive_interrupt(msm_rpm_data.ch_info, true);
 }
 EXPORT_SYMBOL(msm_rpm_enter_sleep);