Merge "soc: qcom: glink: Validate ctx before using" into msm-4.9
diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c
index 7b93a00..a259901 100644
--- a/drivers/soc/qcom/glink.c
+++ b/drivers/soc/qcom/glink.c
@@ -30,6 +30,7 @@
 #include "glink_private.h"
 #include "glink_xprt_if.h"
 
+#define GLINK_CTX_CANARY 0x58544324 /* "$CTX" */
 /* Number of internal IPC Logging log pages */
 #define NUM_LOG_PAGES	10
 #define GLINK_PM_QOS_HOLDOFF_MS		10
@@ -38,6 +39,8 @@
 #define GLINK_QOS_DEF_MTU		2048
 
 #define GLINK_KTHREAD_PRIO 1
+
+static rwlock_t magic_lock;
 /**
  * struct glink_qos_priority_bin - Packet Scheduler's priority bucket
  * @max_rate_kBps:	Maximum rate supported by the priority bucket.
@@ -309,6 +312,7 @@ struct channel_ctx {
 	unsigned long req_rate_kBps;
 	uint32_t tx_intent_cnt;
 	uint32_t tx_cnt;
+	uint32_t magic_number;
 };
 
 static struct glink_core_if core_impl;
@@ -437,6 +441,37 @@ static void glink_core_deinit_xprt_qos_cfg(
 #define GLINK_GET_CH_TX_STATE(ctx) \
 		((ctx)->tx_intent_cnt || (ctx)->tx_cnt)
 
+static int glink_get_ch_ctx(struct channel_ctx *ctx)
+{
+	unsigned long flags;
+
+	if (!ctx)
+		return -EINVAL;
+	read_lock_irqsave(&magic_lock, flags);
+	if (ctx->magic_number != GLINK_CTX_CANARY) {
+		read_unlock_irqrestore(&magic_lock, flags);
+		return -EINVAL;
+	}
+	rwref_get(&ctx->ch_state_lhb2);
+	read_unlock_irqrestore(&magic_lock, flags);
+	return 0;
+}
+
+static int glink_put_ch_ctx(struct channel_ctx *ctx, bool update_magic)
+{
+	unsigned long flags;
+
+	if (!update_magic) {
+		rwref_put(&ctx->ch_state_lhb2);
+		return 0;
+	}
+	write_lock_irqsave(&magic_lock, flags);
+	ctx->magic_number = 0;
+	rwref_put(&ctx->ch_state_lhb2);
+	write_unlock_irqrestore(&magic_lock, flags);
+	return 0;
+}
+
 /**
  * glink_ssr() - Clean up locally for SSR by simulating remote close
  * @subsystem:	The name of the subsystem being restarted
@@ -2583,7 +2618,7 @@ void *glink_open(const struct glink_open_config *cfg)
 
 	GLINK_INFO_CH(ctx, "%s: Created channel, sent OPEN command. ctx %p\n",
 			__func__, ctx);
-
+	ctx->magic_number = GLINK_CTX_CANARY;
 	return ctx;
 }
 EXPORT_SYMBOL(glink_open);
@@ -2683,15 +2718,19 @@ int glink_close(void *handle)
 	unsigned long flags;
 	bool is_empty = false;
 
-	if (!ctx)
-		return -EINVAL;
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return ret;
 
 	GLINK_INFO_CH(ctx, "%s: Closing channel, ctx: %p\n", __func__, ctx);
-	if (ctx->local_open_state == GLINK_CHANNEL_CLOSED)
+	if (ctx->local_open_state == GLINK_CHANNEL_CLOSED) {
+		glink_put_ch_ctx(ctx, false);
 		return 0;
+	}
 
 	if (ctx->local_open_state == GLINK_CHANNEL_CLOSING) {
 		/* close already pending */
+		glink_put_ch_ctx(ctx, false);
 		return -EBUSY;
 	}
 
@@ -2756,6 +2795,7 @@ relock: xprt_ctx = ctx->transport_ptr;
 
 	rwref_put(&ctx->ch_state_lhb2);
 	rwref_read_put(&xprt_ctx->xprt_state_lhb0);
+	glink_put_ch_ctx(ctx, true);
 	return ret;
 }
 EXPORT_SYMBOL(glink_close);
@@ -2814,29 +2854,30 @@ static int glink_tx_common(void *handle, void *pkt_priv,
 	if (!size)
 		return -EINVAL;
 
-	if (!ctx)
-		return -EINVAL;
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return ret;
 
 	rwref_read_get_atomic(&ctx->ch_state_lhb2, is_atomic);
 	if (!(vbuf_provider || pbuf_provider)) {
-		rwref_read_put(&ctx->ch_state_lhb2);
-		return -EINVAL;
+		ret = -EINVAL;
+		goto glink_tx_common_err;
 	}
 
 	if (!ch_is_fully_opened(ctx)) {
-		rwref_read_put(&ctx->ch_state_lhb2);
-		return -EBUSY;
+		ret = -EBUSY;
+		goto glink_tx_common_err;
 	}
 
 	if (size > GLINK_MAX_PKT_SIZE) {
-		rwref_read_put(&ctx->ch_state_lhb2);
-		return -EINVAL;
+		ret = -EINVAL;
+		goto glink_tx_common_err;
 	}
 
 	if (unlikely(tx_flags & GLINK_TX_TRACER_PKT)) {
 		if (!(ctx->transport_ptr->capabilities & GCAP_TRACER_PKT)) {
-			rwref_read_put(&ctx->ch_state_lhb2);
-			return -EOPNOTSUPP;
+			ret = -EOPNOTSUPP;
+			goto glink_tx_common_err;
 		}
 		tracer_pkt_log_event(data, GLINK_CORE_TX);
 	}
@@ -2848,16 +2889,16 @@ static int glink_tx_common(void *handle, void *pkt_priv,
 			GLINK_ERR_CH(ctx,
 				"%s: R[%u]:%zu Intent not present for lcid\n",
 				__func__, riid, size);
-			rwref_read_put(&ctx->ch_state_lhb2);
-			return -EAGAIN;
+			ret = -EAGAIN;
+			goto glink_tx_common_err;
 		}
 		if (is_atomic && !(ctx->transport_ptr->capabilities &
 					  GCAP_AUTO_QUEUE_RX_INT)) {
 			GLINK_ERR_CH(ctx,
 				"%s: Cannot request intent in atomic context\n",
 				__func__);
-			rwref_read_put(&ctx->ch_state_lhb2);
-			return -EINVAL;
+			ret = -EINVAL;
+			goto glink_tx_common_err;
 		}
 
 		/* request intent of correct size */
@@ -2867,20 +2908,18 @@ static int glink_tx_common(void *handle, void *pkt_priv,
 		if (ret) {
 			GLINK_ERR_CH(ctx, "%s: Request intent failed %d\n",
 					__func__, ret);
-			rwref_read_put(&ctx->ch_state_lhb2);
-			return ret;
+			goto glink_tx_common_err;
 		}
 
 		while (ch_pop_remote_rx_intent(ctx, size, &riid,
 						&intent_size, &cookie)) {
-			rwref_get(&ctx->ch_state_lhb2);
 			rwref_read_put(&ctx->ch_state_lhb2);
 			if (is_atomic) {
 				GLINK_ERR_CH(ctx,
 				    "%s Intent of size %zu not ready\n",
 				    __func__, size);
-				rwref_put(&ctx->ch_state_lhb2);
-				return -EAGAIN;
+				ret = -EAGAIN;
+				goto glink_tx_common_err_2;
 			}
 
 			if (ctx->transport_ptr->local_state == GLINK_XPRT_DOWN
@@ -2888,8 +2927,8 @@ static int glink_tx_common(void *handle, void *pkt_priv,
 				GLINK_ERR_CH(ctx,
 					"%s: Channel closed while waiting for intent\n",
 					__func__);
-				rwref_put(&ctx->ch_state_lhb2);
-				return -EBUSY;
+				ret = -EBUSY;
+				goto glink_tx_common_err_2;
 			}
 
 			/* wait for the remote intent req ack */
@@ -2899,8 +2938,8 @@ static int glink_tx_common(void *handle, void *pkt_priv,
 				GLINK_ERR_CH(ctx,
 					"%s: Intent request ack with size: %zu not granted for lcid\n",
 					__func__, size);
-				rwref_put(&ctx->ch_state_lhb2);
-				return -ETIMEDOUT;
+				ret = -ETIMEDOUT;
+				goto glink_tx_common_err_2;
 			}
 
 			if (!ctx->int_req_ack) {
@@ -2908,8 +2947,8 @@ static int glink_tx_common(void *handle, void *pkt_priv,
 				    "%s: Intent Request with size: %zu %s",
 				    __func__, size,
 				    "not granted for lcid\n");
-				rwref_put(&ctx->ch_state_lhb2);
-				return -EAGAIN;
+				ret = -EAGAIN;
+				goto glink_tx_common_err_2;
 			}
 
 			/* wait for the rx_intent from remote side */
@@ -2919,13 +2958,12 @@ static int glink_tx_common(void *handle, void *pkt_priv,
 				GLINK_ERR_CH(ctx,
 					"%s: Intent request with size: %zu not granted for lcid\n",
 					__func__, size);
-				rwref_put(&ctx->ch_state_lhb2);
-				return -ETIMEDOUT;
+				ret = -ETIMEDOUT;
+				goto glink_tx_common_err_2;
 			}
 
 			reinit_completion(&ctx->int_req_complete);
 			rwref_read_get(&ctx->ch_state_lhb2);
-			rwref_put(&ctx->ch_state_lhb2);
 		}
 	}
 
@@ -2945,8 +2983,8 @@ static int glink_tx_common(void *handle, void *pkt_priv,
 	if (!tx_info) {
 		GLINK_ERR_CH(ctx, "%s: No memory for allocation\n", __func__);
 		ch_push_remote_rx_intent(ctx, intent_size, riid, cookie);
-		rwref_read_put(&ctx->ch_state_lhb2);
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto glink_tx_common_err;
 	}
 	rwref_lock_init(&tx_info->pkt_ref, glink_tx_pkt_release);
 	INIT_LIST_HEAD(&tx_info->list_done);
@@ -2972,7 +3010,10 @@ static int glink_tx_common(void *handle, void *pkt_priv,
 	else
 		xprt_schedule_tx(ctx->transport_ptr, ctx, tx_info);
 
+glink_tx_common_err:
 	rwref_read_put(&ctx->ch_state_lhb2);
+glink_tx_common_err_2:
+	glink_put_ch_ctx(ctx, false);
 	return ret;
 }
 
@@ -3013,13 +3054,15 @@ int glink_queue_rx_intent(void *handle, const void *pkt_priv, size_t size)
 	struct glink_core_rx_intent *intent_ptr;
 	int ret = 0;
 
-	if (!ctx)
-		return -EINVAL;
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return ret;
 
 	if (!ch_is_fully_opened(ctx)) {
 		/* Can only queue rx intents if channel is fully opened */
 		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
 			__func__);
+		glink_put_ch_ctx(ctx, false);
 		return -EBUSY;
 	}
 
@@ -3028,13 +3071,16 @@ int glink_queue_rx_intent(void *handle, const void *pkt_priv, size_t size)
 		GLINK_ERR_CH(ctx,
 			"%s: Intent pointer allocation failed size[%zu]\n",
 			__func__, size);
+		glink_put_ch_ctx(ctx, false);
 		return -ENOMEM;
 	}
 	GLINK_DBG_CH(ctx, "%s: L[%u]:%zu\n", __func__, intent_ptr->id,
 			intent_ptr->intent_size);
 
-	if (ctx->transport_ptr->capabilities & GCAP_INTENTLESS)
+	if (ctx->transport_ptr->capabilities & GCAP_INTENTLESS) {
+		glink_put_ch_ctx(ctx, false);
 		return ret;
+	}
 
 	/* notify remote side of rx intent */
 	ret = ctx->transport_ptr->ops->tx_cmd_local_rx_intent(
@@ -3042,7 +3088,7 @@ int glink_queue_rx_intent(void *handle, const void *pkt_priv, size_t size)
 	if (ret)
 		/* unable to transmit, dequeue intent */
 		ch_remove_local_rx_intent(ctx, intent_ptr->id);
-
+	glink_put_ch_ctx(ctx, false);
 	return ret;
 }
 EXPORT_SYMBOL(glink_queue_rx_intent);
@@ -3061,20 +3107,25 @@ bool glink_rx_intent_exists(void *handle, size_t size)
 	struct channel_ctx *ctx = (struct channel_ctx *)handle;
 	struct glink_core_rx_intent *intent;
 	unsigned long flags;
+	int ret;
 
 	if (!ctx || !ch_is_fully_opened(ctx))
 		return false;
 
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return false;
 	spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
 	list_for_each_entry(intent, &ctx->local_rx_intent_list, list) {
 		if (size <= intent->intent_size) {
 			spin_unlock_irqrestore(
 				&ctx->local_rx_intent_lst_lock_lhc1, flags);
+			glink_put_ch_ctx(ctx, false);
 			return true;
 		}
 	}
 	spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
-
+	glink_put_ch_ctx(ctx, false);
 	return false;
 }
 EXPORT_SYMBOL(glink_rx_intent_exists);
@@ -3095,11 +3146,15 @@ int glink_rx_done(void *handle, const void *ptr, bool reuse)
 	uint32_t id;
 	int ret = 0;
 
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return ret;
 	liid_ptr = ch_get_local_rx_intent_notified(ctx, ptr);
 
 	if (IS_ERR_OR_NULL(liid_ptr)) {
 		/* invalid pointer */
 		GLINK_ERR_CH(ctx, "%s: Invalid pointer %p\n", __func__, ptr);
+		glink_put_ch_ctx(ctx, false);
 		return -EINVAL;
 	}
 
@@ -3125,7 +3180,7 @@ int glink_rx_done(void *handle, const void *ptr, bool reuse)
 	/* send rx done */
 	ctx->transport_ptr->ops->tx_cmd_local_rx_done(ctx->transport_ptr->ops,
 			ctx->lcid, id, reuse);
-
+	glink_put_ch_ctx(ctx, false);
 	return ret;
 }
 EXPORT_SYMBOL(glink_rx_done);
@@ -3173,12 +3228,13 @@ int glink_sigs_set(void *handle, uint32_t sigs)
 	struct channel_ctx *ctx = (struct channel_ctx *)handle;
 	int ret;
 
-	if (!ctx)
-		return -EINVAL;
-
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return ret;
 	if (!ch_is_fully_opened(ctx)) {
 		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
 			__func__);
+		glink_put_ch_ctx(ctx, false);
 		return -EBUSY;
 	}
 
@@ -3188,6 +3244,7 @@ int glink_sigs_set(void *handle, uint32_t sigs)
 			ctx->lcid, ctx->lsigs);
 	GLINK_INFO_CH(ctx, "%s: Sent SIGNAL SET command\n", __func__);
 
+	glink_put_ch_ctx(ctx, false);
 	return ret;
 }
 EXPORT_SYMBOL(glink_sigs_set);
@@ -3203,17 +3260,22 @@ EXPORT_SYMBOL(glink_sigs_set);
 int glink_sigs_local_get(void *handle, uint32_t *sigs)
 {
 	struct channel_ctx *ctx = (struct channel_ctx *)handle;
+	int ret;
 
-	if (!ctx || !sigs)
+	if (!sigs)
 		return -EINVAL;
-
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return ret;
 	if (!ch_is_fully_opened(ctx)) {
 		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
 			__func__);
+		glink_put_ch_ctx(ctx, false);
 		return -EBUSY;
 	}
 
 	*sigs = ctx->lsigs;
+	glink_put_ch_ctx(ctx, false);
 	return 0;
 }
 EXPORT_SYMBOL(glink_sigs_local_get);
@@ -3229,17 +3291,23 @@ EXPORT_SYMBOL(glink_sigs_local_get);
 int glink_sigs_remote_get(void *handle, uint32_t *sigs)
 {
 	struct channel_ctx *ctx = (struct channel_ctx *)handle;
+	int ret;
 
-	if (!ctx || !sigs)
+	if (!sigs)
 		return -EINVAL;
 
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return ret;
 	if (!ch_is_fully_opened(ctx)) {
 		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
 			__func__);
+		glink_put_ch_ctx(ctx, false);
 		return -EBUSY;
 	}
 
 	*sigs = ctx->rsigs;
+	glink_put_ch_ctx(ctx, false);
 	return 0;
 }
 EXPORT_SYMBOL(glink_sigs_remote_get);
@@ -3334,12 +3402,16 @@ int glink_qos_latency(void *handle, unsigned long latency_us, size_t pkt_size)
 	int ret;
 	unsigned long req_rate_kBps;
 
-	if (!ctx || !latency_us || !pkt_size)
+	if (!latency_us || !pkt_size)
 		return -EINVAL;
 
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return ret;
 	if (!ch_is_fully_opened(ctx)) {
 		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
 			__func__);
+		glink_put_ch_ctx(ctx, false);
 		return -EBUSY;
 	}
 
@@ -3349,7 +3421,7 @@ int glink_qos_latency(void *handle, unsigned long latency_us, size_t pkt_size)
 	if (ret < 0)
 		GLINK_ERR_CH(ctx, "%s: QoS %lu:%zu cannot be met\n",
 			     __func__, latency_us, pkt_size);
-
+	glink_put_ch_ctx(ctx, false);
 	return ret;
 }
 EXPORT_SYMBOL(glink_qos_latency);
@@ -3367,16 +3439,18 @@ int glink_qos_cancel(void *handle)
 	struct channel_ctx *ctx = (struct channel_ctx *)handle;
 	int ret;
 
-	if (!ctx)
-		return -EINVAL;
-
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return ret;
 	if (!ch_is_fully_opened(ctx)) {
 		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
 			__func__);
+		glink_put_ch_ctx(ctx, false);
 		return -EBUSY;
 	}
 
 	ret = glink_qos_reset_priority(ctx);
+	glink_put_ch_ctx(ctx, false);
 	return ret;
 }
 EXPORT_SYMBOL(glink_qos_cancel);
@@ -3397,12 +3471,13 @@ int glink_qos_start(void *handle)
 	int ret;
 	unsigned long flags;
 
-	if (!ctx)
-		return -EINVAL;
-
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return ret;
 	if (!ch_is_fully_opened(ctx)) {
 		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
 			__func__);
+		glink_put_ch_ctx(ctx, false);
 		return -EBUSY;
 	}
 
@@ -3411,6 +3486,7 @@ int glink_qos_start(void *handle)
 	ret = glink_qos_add_ch_tx_intent(ctx);
 	spin_unlock(&ctx->tx_lists_lock_lhc3);
 	spin_unlock_irqrestore(&ctx->transport_ptr->tx_ready_lock_lhb3, flags);
+	glink_put_ch_ctx(ctx, false);
 	return ret;
 }
 EXPORT_SYMBOL(glink_qos_start);
@@ -3429,16 +3505,20 @@ EXPORT_SYMBOL(glink_qos_start);
 unsigned long glink_qos_get_ramp_time(void *handle, size_t pkt_size)
 {
 	struct channel_ctx *ctx = (struct channel_ctx *)handle;
+	int ret;
 
-	if (!ctx)
-		return (unsigned long)-EINVAL;
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return (unsigned long)ret;
 
 	if (!ch_is_fully_opened(ctx)) {
 		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
 			__func__);
+		glink_put_ch_ctx(ctx, false);
 		return (unsigned long)-EBUSY;
 	}
 
+	glink_put_ch_ctx(ctx, false);
 	return ctx->transport_ptr->ops->get_power_vote_ramp_time(
 			ctx->transport_ptr->ops,
 			glink_prio_to_power_state(ctx->transport_ptr,
@@ -3522,12 +3602,16 @@ EXPORT_SYMBOL(glink_rpm_mask_rx_interrupt);
 int glink_wait_link_down(void *handle)
 {
 	struct channel_ctx *ctx = (struct channel_ctx *)handle;
+	int ret;
 
-	if (!ctx)
-		return -EINVAL;
-	if (!ctx->transport_ptr)
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return ret;
+	if (!ctx->transport_ptr) {
+		glink_put_ch_ctx(ctx, false);
 		return -EOPNOTSUPP;
-
+	}
+	glink_put_ch_ctx(ctx, false);
 	return ctx->transport_ptr->ops->wait_link_down(ctx->transport_ptr->ops);
 }
 EXPORT_SYMBOL(glink_wait_link_down);
@@ -6024,6 +6108,7 @@ EXPORT_SYMBOL(glink_get_xprt_log_ctx);
 static int glink_init(void)
 {
 	log_ctx = ipc_log_context_create(NUM_LOG_PAGES, "glink", 0);
+	rwlock_init(&magic_lock);
 	if (!log_ctx)
 		GLINK_ERR("%s: unable to create log context\n", __func__);
 	glink_debugfs_init();