drm/msm/sde: manage secure and non-secure transition in sde

This change adds support to perform secure transition required
for handling secure ui and secure camera use cases. With this
change kms queries the crtc for the operations to be performed
to move between the secure states

CRs-Fixed: 2064272
Change-Id: Id1a22a0fbd1ed95c8762732103488a11d11ecfc1
Signed-off-by: Abhijit Kulkarni <kabhijit@codeaurora.org>
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index c6943de..c2f17c8 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -735,6 +735,7 @@
 void msm_gem_put_pages(struct drm_gem_object *obj);
 void msm_gem_put_iova(struct drm_gem_object *obj,
 		struct msm_gem_address_space *aspace);
+dma_addr_t msm_gem_get_dma_addr(struct drm_gem_object *obj);
 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
 		struct drm_mode_create_dumb *args);
 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
@@ -777,6 +778,7 @@
 		struct msm_gem_address_space *aspace);
 uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
 		struct msm_gem_address_space *aspace, int plane);
+uint32_t msm_framebuffer_phys(struct drm_framebuffer *fb, int plane);
 struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
 const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
 struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 0a9f12d..f5cdf64 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -190,6 +190,22 @@
 	return msm_gem_iova(msm_fb->planes[plane], aspace) + fb->offsets[plane];
 }
 
+uint32_t msm_framebuffer_phys(struct drm_framebuffer *fb,
+		int plane)
+{
+	struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+	dma_addr_t phys_addr;
+
+	if (!msm_fb->planes[plane])
+		return 0;
+
+	phys_addr = msm_gem_get_dma_addr(msm_fb->planes[plane]);
+	if (!phys_addr)
+		return 0;
+
+	return phys_addr + fb->offsets[plane];
+}
+
 struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane)
 {
 	struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index d64dcc6..b829460 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -290,6 +290,19 @@
 	return offset;
 }
 
+dma_addr_t msm_gem_get_dma_addr(struct drm_gem_object *obj)
+{
+	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+	struct drm_device *dev = obj->dev;
+
+	if (IS_ERR_OR_NULL(msm_obj->sgt)) {
+		dev_err(dev->dev, "invalid scatter/gather table\n");
+		return 0;
+	}
+
+	return sg_dma_address(msm_obj->sgt->sgl);
+}
+
 static void obj_remove_domain(struct msm_gem_vma *domain)
 {
 	if (domain) {
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index 6f1d2b7..b5a846d 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -38,6 +38,15 @@
 #include "sde_power_handle.h"
 #include "sde_core_perf.h"
 #include "sde_trace.h"
+#include <soc/qcom/scm.h>
+#include "soc/qcom/secure_buffer.h"
+
+/* defines for secure channel call */
+#define SEC_SID_CNT               2
+#define SEC_SID_MASK_0            0x80881
+#define SEC_SID_MASK_1            0x80C81
+#define MEM_PROTECT_SD_CTRL_SWITCH 0x18
+#define MDP_DEVICE_ID            0x1A
 
 struct sde_crtc_irq_info {
 	struct sde_irq_callback irq;
@@ -1516,6 +1525,348 @@
 	_sde_crtc_program_lm_output_roi(crtc);
 }
 
+static int _sde_crtc_find_plane_fb_modes(struct drm_crtc_state *state,
+		uint32_t *fb_ns,
+		uint32_t *fb_sec,
+		uint32_t *fb_ns_dir,
+		uint32_t *fb_sec_dir)
+{
+	struct drm_plane *plane;
+	const struct drm_plane_state *pstate;
+	struct sde_plane_state *sde_pstate;
+	uint32_t mode = 0;
+	int rc;
+
+	if (!state) {
+		SDE_ERROR("invalid state\n");
+		return -EINVAL;
+	}
+
+	*fb_ns = 0;
+	*fb_sec = 0;
+	*fb_ns_dir = 0;
+	*fb_sec_dir = 0;
+	drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
+		if (IS_ERR_OR_NULL(pstate)) {
+			rc = PTR_ERR(pstate);
+			SDE_ERROR("crtc%d failed to get plane%d state%d\n",
+					state->crtc->base.id,
+					plane->base.id, rc);
+			return rc;
+		}
+		sde_pstate = to_sde_plane_state(pstate);
+		mode = sde_plane_get_property(sde_pstate,
+				PLANE_PROP_FB_TRANSLATION_MODE);
+		switch (mode) {
+		case SDE_DRM_FB_NON_SEC:
+			(*fb_ns)++;
+			break;
+		case SDE_DRM_FB_SEC:
+			(*fb_sec)++;
+			break;
+		case SDE_DRM_FB_NON_SEC_DIR_TRANS:
+			(*fb_ns_dir)++;
+			break;
+		case SDE_DRM_FB_SEC_DIR_TRANS:
+			(*fb_sec_dir)++;
+			break;
+		default:
+			SDE_ERROR("Error: Plane[%d], fb_trans_mode:%d",
+					plane->base.id,
+					mode);
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+/**
+ * sde_crtc_get_secure_transition_ops - determines the operations that
+ * need to be performed before transitioning to secure state
+ * This function should be called after swapping the new state
+ * @crtc: Pointer to drm crtc structure
+ * Returns the bitmask of operations need to be performed, -Error in
+ * case of error cases
+ */
+int sde_crtc_get_secure_transition_ops(struct drm_crtc *crtc,
+		struct drm_crtc_state *old_crtc_state,
+		bool old_valid_fb)
+{
+	struct drm_plane *plane;
+	struct drm_encoder *encoder;
+	struct sde_crtc *sde_crtc;
+	struct sde_crtc_state *cstate;
+	struct sde_crtc_smmu_state_data *smmu_state;
+	uint32_t translation_mode = 0;
+	int ops  = 0;
+	bool post_commit = false;
+
+	if (!crtc || !crtc->state) {
+		SDE_ERROR("invalid crtc\n");
+		return -EINVAL;
+	}
+
+	sde_crtc = to_sde_crtc(crtc);
+	cstate = to_sde_crtc_state(crtc->state);
+	smmu_state = &sde_crtc->smmu_state;
+
+	SDE_DEBUG("crtc%d, secure_level%d\n",
+			crtc->base.id,
+			sde_crtc_get_secure_level(crtc, crtc->state));
+
+	/**
+	 * SMMU operations need to be delayed in case of
+	 * video mode panels when switching back to non_secure
+	 * mode
+	 */
+	drm_for_each_encoder(encoder, crtc->dev) {
+		if (encoder->crtc != crtc)
+			continue;
+
+		post_commit &= !sde_encoder_is_cmd_mode(encoder);
+	}
+
+	drm_atomic_crtc_for_each_plane(plane, crtc) {
+		if (!plane->state)
+			continue;
+
+		translation_mode = sde_plane_get_property(
+				to_sde_plane_state(plane->state),
+				PLANE_PROP_FB_TRANSLATION_MODE);
+		if (translation_mode > SDE_DRM_FB_SEC_DIR_TRANS) {
+			SDE_ERROR("crtc%d, invalid translation_mode%d\n",
+					crtc->base.id,
+					translation_mode);
+			return -EINVAL;
+		}
+
+		/**
+		 * we can break if we find sec_fir or non_sec_dir
+		 * plane
+		 */
+		if ((translation_mode == SDE_DRM_FB_NON_SEC_DIR_TRANS) ||
+			(translation_mode == SDE_DRM_FB_SEC_DIR_TRANS))
+			break;
+	}
+
+	switch (translation_mode) {
+	case SDE_DRM_FB_NON_SEC_DIR_TRANS:
+		if (smmu_state->state == ATTACHED) {
+			smmu_state->state = DETACH_ALL_REQ;
+			smmu_state->transition_type = PRE_COMMIT;
+			ops |= SDE_KMS_OPS_CRTC_SECURE_STATE_CHANGE;
+			if (old_valid_fb) {
+				ops |= (SDE_KMS_OPS_WAIT_FOR_TX_DONE  |
+					SDE_KMS_OPS_CLEANUP_PLANE_FB);
+			}
+		}
+		break;
+	case SDE_DRM_FB_SEC_DIR_TRANS:
+		if (smmu_state->state == ATTACHED) {
+			smmu_state->state = DETACH_SEC_REQ;
+			smmu_state->transition_type = PRE_COMMIT;
+			ops |= SDE_KMS_OPS_CRTC_SECURE_STATE_CHANGE;
+		}
+		break;
+	case SDE_DRM_FB_SEC:
+	case SDE_DRM_FB_NON_SEC:
+		if (smmu_state->state == DETACHED_SEC) {
+			smmu_state->state = ATTACH_SEC_REQ;
+			smmu_state->transition_type = post_commit ?
+				POST_COMMIT : PRE_COMMIT;
+			ops |= SDE_KMS_OPS_CRTC_SECURE_STATE_CHANGE;
+			if (translation_mode == SDE_DRM_FB_SEC)
+				ops |= SDE_KMS_OPS_PREPARE_PLANE_FB;
+		} else if (smmu_state->state == DETACHED) {
+			smmu_state->state = ATTACH_ALL_REQ;
+			smmu_state->transition_type = post_commit ?
+				POST_COMMIT : PRE_COMMIT;
+			ops |= SDE_KMS_OPS_CRTC_SECURE_STATE_CHANGE |
+				SDE_KMS_OPS_PREPARE_PLANE_FB;
+			if (old_valid_fb)
+				ops |= (SDE_KMS_OPS_WAIT_FOR_TX_DONE |
+				 SDE_KMS_OPS_CLEANUP_PLANE_FB);
+		}
+		break;
+	default:
+		SDE_ERROR("invalid plane fb_mode:%d\n",
+				translation_mode);
+		ops = 0;
+		return -EINVAL;
+	}
+
+	SDE_DEBUG("SMMU State:%d, type:%d ops:%x\n", smmu_state->state,
+			smmu_state->transition_type,
+			ops);
+	return ops;
+}
+
+/**
+ * _sde_crtc_scm_call - makes secure channel call to switch the VMIDs
+ * @vimd: switch the stage 2 translation to this VMID.
+ */
+static int _sde_crtc_scm_call(int vmid)
+{
+	struct scm_desc desc = {0};
+	uint32_t num_sids;
+	uint32_t *sec_sid;
+	uint32_t mem_protect_sd_ctrl_id = MEM_PROTECT_SD_CTRL_SWITCH;
+	int ret = 0;
+
+	/* This info should be queried from catalog */
+	num_sids = SEC_SID_CNT;
+	sec_sid = kcalloc(num_sids, sizeof(uint32_t), GFP_KERNEL);
+	if (!sec_sid)
+		return -ENOMEM;
+
+	/**
+	 * derive this info from device tree/catalog, this is combination of
+	 * smr mask and SID for secure
+	 */
+	sec_sid[0] = SEC_SID_MASK_0;
+	sec_sid[1] = SEC_SID_MASK_1;
+	dmac_flush_range(&sec_sid, &sec_sid + num_sids);
+
+	SDE_DEBUG("calling scm_call for vmid %d", vmid);
+
+	desc.arginfo = SCM_ARGS(4, SCM_VAL, SCM_RW, SCM_VAL, SCM_VAL);
+	desc.args[0] = MDP_DEVICE_ID;
+	desc.args[1] = SCM_BUFFER_PHYS(&sec_sid);
+	desc.args[2] = sizeof(uint32_t) * num_sids;
+	desc.args[3] =  vmid;
+
+	ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
+				mem_protect_sd_ctrl_id), &desc);
+	if (ret) {
+		SDE_ERROR("Error:scm_call2, vmid (%lld): ret%d\n",
+				desc.args[3],
+				ret);
+	}
+
+	kfree(sec_sid);
+	return ret;
+}
+
+/**
+ * sde_crtc_secure_ctrl - Initiates the operations to swtich  between secure
+ *                       and non-secure mode
+ * @crtc: Pointer to crtc
+ * @post_commit: if this operation is triggered after commit
+ */
+int sde_crtc_secure_ctrl(struct drm_crtc *crtc, bool post_commit)
+{
+	struct sde_crtc *sde_crtc;
+	struct sde_crtc_state *cstate;
+	struct sde_kms *sde_kms;
+	struct sde_crtc_smmu_state_data *smmu_state;
+	int ret = 0;
+	int old_smmu_state;
+
+	if (!crtc || !crtc->state) {
+		SDE_ERROR("invalid crtc\n");
+		return -EINVAL;
+	}
+
+	sde_kms = _sde_crtc_get_kms(crtc);
+	if (!sde_kms) {
+		SDE_ERROR("invalid kms\n");
+		return -EINVAL;
+	}
+
+	sde_crtc = to_sde_crtc(crtc);
+	cstate = to_sde_crtc_state(crtc->state);
+	smmu_state = &sde_crtc->smmu_state;
+	old_smmu_state = smmu_state->state;
+
+	if ((!smmu_state->transition_type) ||
+	    ((smmu_state->transition_type == POST_COMMIT) && !post_commit))
+		/* Bail out */
+		return 0;
+
+
+	/* Secure UI use case enable */
+	switch (smmu_state->state) {
+	case DETACH_ALL_REQ:
+		/* detach_all_contexts */
+		ret = sde_kms_mmu_detach(sde_kms, false);
+		if (ret) {
+			SDE_ERROR("crtc: %d, failed to detach %d\n",
+					crtc->base.id, ret);
+			goto error;
+		}
+
+		ret = _sde_crtc_scm_call(VMID_CP_SEC_DISPLAY);
+		if (ret)
+			goto error;
+
+		smmu_state->state = DETACHED;
+		break;
+	/* Secure UI use case disable */
+	case ATTACH_ALL_REQ:
+		ret = _sde_crtc_scm_call(VMID_CP_PIXEL);
+		if (ret)
+			goto error;
+
+		/* attach_all_contexts */
+		ret = sde_kms_mmu_attach(sde_kms, false);
+		if (ret) {
+			SDE_ERROR("crtc: %d, failed to attach %d\n",
+					crtc->base.id,
+					ret);
+			goto error;
+		}
+
+		smmu_state->state = ATTACHED;
+
+		break;
+	/* Secure preview enable */
+	case DETACH_SEC_REQ:
+		/* detach secure_context */
+		ret = sde_kms_mmu_detach(sde_kms, true);
+		if (ret) {
+			SDE_ERROR("crtc: %d, failed to detach %d\n",
+					crtc->base.id,
+					ret);
+			goto error;
+		}
+
+		smmu_state->state = DETACHED_SEC;
+		ret = _sde_crtc_scm_call(VMID_CP_CAMERA_PREVIEW);
+		if (ret)
+			goto error;
+
+		break;
+
+	/* Secure preview disable */
+	case ATTACH_SEC_REQ:
+		ret = _sde_crtc_scm_call(VMID_CP_PIXEL);
+		if (ret)
+			goto error;
+
+		ret = sde_kms_mmu_attach(sde_kms, true);
+		if (ret) {
+			SDE_ERROR("crtc: %d, failed to attach %d\n",
+					crtc->base.id,
+					ret);
+			goto error;
+		}
+		smmu_state->state = ATTACHED;
+		break;
+	default:
+		break;
+	}
+
+	SDE_DEBUG("crtc: %d, old_state %d new_state %d\n", crtc->base.id,
+			old_smmu_state,
+			smmu_state->state);
+	smmu_state->transition_error = false;
+	smmu_state->transition_type = NONE;
+
+error:
+	smmu_state->transition_error = true;
+	return ret;
+}
+
 void sde_crtc_prepare_commit(struct drm_crtc *crtc,
 		struct drm_crtc_state *old_state)
 {
@@ -1755,6 +2106,26 @@
 	kthread_queue_work(&priv->event_thread[crtc_id].worker, &fevent->work);
 }
 
+void sde_crtc_complete_commit(struct drm_crtc *crtc,
+		struct drm_crtc_state *old_state)
+{
+	struct sde_crtc *sde_crtc;
+	struct sde_crtc_smmu_state_data *smmu_state;
+
+	if (!crtc || !crtc->state) {
+		SDE_ERROR("invalid crtc\n");
+		return;
+	}
+
+	sde_crtc = to_sde_crtc(crtc);
+	SDE_EVT32_VERBOSE(DRMID(crtc));
+	smmu_state = &sde_crtc->smmu_state;
+
+	/* complete secure transitions if any */
+	if (smmu_state->transition_type == POST_COMMIT)
+		sde_crtc_secure_ctrl(crtc, true);
+}
+
 /**
  * _sde_crtc_set_input_fence_timeout - update ns version of in fence timeout
  * @cstate: Pointer to sde crtc state
@@ -2155,8 +2526,11 @@
 	 *                      required writes/flushing before crtc's "flush
 	 *                      everything" call below.
 	 */
-	drm_atomic_crtc_for_each_plane(plane, crtc)
+	drm_atomic_crtc_for_each_plane(plane, crtc) {
+		if (sde_crtc->smmu_state.transition_error)
+			sde_plane_set_error(plane, true);
 		sde_plane_flush(plane);
+	}
 
 	/* Kickoff will be scheduled by outer layer */
 }
@@ -2860,61 +3234,6 @@
 	return rc;
 }
 
-static int _sde_crtc_find_plane_fb_modes(struct drm_crtc_state *state,
-		uint32_t *fb_ns,
-		uint32_t *fb_sec,
-		uint32_t *fb_ns_dir,
-		uint32_t *fb_sec_dir)
-{
-	struct drm_plane *plane;
-	const struct drm_plane_state *pstate;
-	struct sde_plane_state *sde_pstate;
-	uint32_t mode = 0;
-	int rc;
-
-	if (!state) {
-		SDE_ERROR("invalid state\n");
-		return -EINVAL;
-	}
-
-	*fb_ns = 0;
-	*fb_sec = 0;
-	*fb_ns_dir = 0;
-	*fb_sec_dir = 0;
-	drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
-		if (IS_ERR_OR_NULL(pstate)) {
-			rc = PTR_ERR(pstate);
-			SDE_ERROR("crtc%d failed to get plane%d state%d\n",
-					state->crtc->base.id,
-					plane->base.id, rc);
-			return rc;
-		}
-		sde_pstate = to_sde_plane_state(pstate);
-		mode = sde_plane_get_property(sde_pstate,
-				PLANE_PROP_FB_TRANSLATION_MODE);
-		switch (mode) {
-		case SDE_DRM_FB_NON_SEC:
-			(*fb_ns)++;
-			break;
-		case SDE_DRM_FB_SEC:
-			(*fb_sec)++;
-			break;
-		case SDE_DRM_FB_NON_SEC_DIR_TRANS:
-			(*fb_ns_dir)++;
-			break;
-		case SDE_DRM_FB_SEC_DIR_TRANS:
-			(*fb_sec_dir)++;
-			break;
-		default:
-			SDE_ERROR("Error: Plane[%d], fb_trans_mode:%d",
-					plane->base.id,
-					mode);
-			return -EINVAL;
-		}
-	}
-	return 0;
-}
-
 static int _sde_crtc_check_secure_state(struct drm_crtc *crtc,
 		struct drm_crtc_state *state)
 {
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index f8ab110..43b3045 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -47,6 +47,50 @@
 };
 
 /**
+ * enum sde_crtc_smmu_state:	smmu state
+ * @ATTACHED:	 all the context banks are attached.
+ * @DETACHED:	 all the context banks are detached.
+ * @DETACHED_SEC:	 secure context bank is detached.
+ * @ATTACH_ALL_REQ:	 transient state of attaching context banks.
+ * @DETACH_ALL_REQ:	 transient state of detaching context banks.
+ * @DETACH_SEC_REQ:	 tranisent state of secure context bank is detached
+ * @ATTACH_SEC_REQ:	 transient state of attaching secure context bank.
+ */
+enum sde_crtc_smmu_state {
+	ATTACHED = 0,
+	DETACHED,
+	DETACHED_SEC,
+	ATTACH_ALL_REQ,
+	DETACH_ALL_REQ,
+	DETACH_SEC_REQ,
+	ATTACH_SEC_REQ,
+};
+
+/**
+ * enum sde_crtc_smmu_state_transition_type: state transition type
+ * @NONE: no pending state transitions
+ * @PRE_COMMIT: state transitions should be done before processing the commit
+ * @POST_COMMIT: state transitions to be done after processing the commit.
+ */
+enum sde_crtc_smmu_state_transition_type {
+	NONE,
+	PRE_COMMIT,
+	POST_COMMIT
+};
+
+/**
+ * struct sde_crtc_smmu_state_data: stores the smmu state and transition type
+ * @state: current state of smmu context banks
+ * @transition_type: transition request type
+ * @transition_error: whether there is error while transitioning the state
+ */
+struct sde_crtc_smmu_state_data {
+	uint32_t state;
+	uint32_t transition_type;
+	uint32_t transition_error;
+};
+
+/**
  * struct sde_crtc_mixer: stores the map for each virtual pipeline in the CRTC
  * @hw_lm:	LM HW Driver context
  * @hw_ctl:	CTL Path HW driver context
@@ -211,6 +255,8 @@
 
 	struct mutex rp_lock;
 	struct list_head rp_head;
+
+	struct sde_crtc_smmu_state_data smmu_state;
 };
 
 #define to_sde_crtc(x) container_of(x, struct sde_crtc, base)
@@ -388,6 +434,14 @@
 		struct drm_crtc_state *old_state);
 
 /**
+ * sde_crtc_complete_commit - callback signalling completion of current commit
+ * @crtc: Pointer to drm crtc object
+ * @old_state: Pointer to drm crtc old state object
+ */
+void sde_crtc_complete_commit(struct drm_crtc *crtc,
+		struct drm_crtc_state *old_state);
+
+/**
  * sde_crtc_init - create a new crtc object
  * @dev: sde device
  * @plane: base plane
@@ -527,5 +581,25 @@
 			CRTC_PROP_SECURITY_LEVEL);
 }
 
+/**
+ * sde_crtc_get_secure_transition - determines the operations to be
+ * performed before transitioning to secure state
+ * This function should be called after swapping the new state
+ * @crtc: Pointer to drm crtc structure
+ * @old_crtc_state: Poniter to previous CRTC state
+ * Returns the bitmask of operations need to be performed, -Error in
+ * case of error cases
+ */
+int sde_crtc_get_secure_transition_ops(struct drm_crtc *crtc,
+		struct drm_crtc_state *old_crtc_state,
+		bool old_valid_fb);
+
+/**
+ * sde_crtc_secure_ctrl - Initiates the transition between secure and
+ *                          non-secure world
+ * @crtc: Pointer to crtc
+ * @post_commit: if this operation is triggered after commit
+ */
+int sde_crtc_secure_ctrl(struct drm_crtc *crtc, bool post_commit);
 
 #endif /* _SDE_CRTC_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_formats.c b/drivers/gpu/drm/msm/sde/sde_formats.c
index 3acf4c9..2136f9c 100644
--- a/drivers/gpu/drm/msm/sde/sde_formats.c
+++ b/drivers/gpu/drm/msm/sde/sde_formats.c
@@ -847,7 +847,10 @@
 		return -EINVAL;
 	}
 
-	base_addr = msm_framebuffer_iova(fb, aspace, 0);
+	if (aspace)
+		base_addr = msm_framebuffer_iova(fb, aspace, 0);
+	else
+		base_addr = msm_framebuffer_phys(fb, 0);
 	if (!base_addr) {
 		DRM_ERROR("failed to retrieve base addr\n");
 		return -EFAULT;
@@ -943,7 +946,11 @@
 
 	/* Populate addresses for simple formats here */
 	for (i = 0; i < layout->num_planes; ++i) {
-		layout->plane_addr[i] = msm_framebuffer_iova(fb, aspace, i);
+		if (aspace)
+			layout->plane_addr[i] =
+				msm_framebuffer_iova(fb, aspace, i);
+		else
+			layout->plane_addr[i] = msm_framebuffer_phys(fb, i);
 		if (!layout->plane_addr[i]) {
 			DRM_ERROR("failed to retrieve base addr\n");
 			return -EFAULT;
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index 8dd6448..8c9c4c7 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -348,66 +348,7 @@
 	sde_crtc_vblank(crtc, false);
 }
 
-static void sde_kms_prepare_commit(struct msm_kms *kms,
-		struct drm_atomic_state *state)
-{
-	struct sde_kms *sde_kms;
-	struct msm_drm_private *priv;
-	struct drm_device *dev;
-	struct drm_encoder *encoder;
-
-	if (!kms)
-		return;
-	sde_kms = to_sde_kms(kms);
-	dev = sde_kms->dev;
-
-	if (!dev || !dev->dev_private)
-		return;
-	priv = dev->dev_private;
-
-	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
-
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
-		if (encoder->crtc != NULL)
-			sde_encoder_prepare_commit(encoder);
-
-}
-
-static void sde_kms_commit(struct msm_kms *kms,
-		struct drm_atomic_state *old_state)
-{
-	struct drm_crtc *crtc;
-	struct drm_crtc_state *old_crtc_state;
-	int i;
-
-	for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
-		if (crtc->state->active) {
-			SDE_EVT32(DRMID(crtc));
-			sde_crtc_commit_kickoff(crtc);
-		}
-	}
-}
-
-static void sde_kms_complete_commit(struct msm_kms *kms,
-		struct drm_atomic_state *old_state)
-{
-	struct sde_kms *sde_kms;
-	struct msm_drm_private *priv;
-
-	if (!kms || !old_state)
-		return;
-	sde_kms = to_sde_kms(kms);
-
-	if (!sde_kms->dev || !sde_kms->dev->dev_private)
-		return;
-	priv = sde_kms->dev->dev_private;
-
-	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
-
-	SDE_EVT32_VERBOSE(SDE_EVTLOG_FUNC_EXIT);
-}
-
-static void sde_kms_wait_for_tx_complete(struct msm_kms *kms,
+static void sde_kms_wait_for_frame_transfer_complete(struct msm_kms *kms,
 		struct drm_crtc *crtc)
 {
 	struct drm_encoder *encoder;
@@ -450,6 +391,178 @@
 	}
 }
 
+static int sde_kms_prepare_secure_transition(struct msm_kms *kms,
+		struct drm_atomic_state *state)
+{
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *old_crtc_state;
+
+	struct drm_plane *plane;
+	struct drm_plane_state *plane_state;
+	struct sde_kms *sde_kms = to_sde_kms(kms);
+	struct drm_device *dev = sde_kms->dev;
+	int i, ops = 0, ret = 0;
+	bool old_valid_fb = false;
+
+	for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
+		if (!crtc->state || !crtc->state->active)
+			continue;
+		/*
+		 * It is safe to assume only one active crtc,
+		 * and compatible translation modes on the
+		 * planes staged on this crtc.
+		 * otherwise validation would have failed.
+		 * For this CRTC,
+		 */
+
+		/*
+		 * 1. Check if old state on the CRTC has planes
+		 * staged with valid fbs
+		 */
+		for_each_plane_in_state(state, plane, plane_state, i) {
+			if (!plane_state->crtc)
+				continue;
+			if (plane_state->fb) {
+				old_valid_fb = true;
+				break;
+			}
+		}
+
+		/*
+		 * 2.Get the operations needed to be performed before
+		 * secure transition can be initiated.
+		 */
+		ops = sde_crtc_get_secure_transition_ops(crtc,
+				old_crtc_state,
+				old_valid_fb);
+		if (ops < 0) {
+			SDE_ERROR("invalid secure operations %x\n", ops);
+			return ops;
+		}
+
+		if (!ops)
+			goto no_ops;
+
+		SDE_DEBUG("%d:secure operations(%x) started on state:%pK\n",
+				crtc->base.id,
+				ops,
+				crtc->state);
+
+		/* 3. Perform operations needed for secure transition */
+		if  (ops & SDE_KMS_OPS_WAIT_FOR_TX_DONE) {
+			SDE_DEBUG("wait_for_transfer_done\n");
+			sde_kms_wait_for_frame_transfer_complete(kms, crtc);
+		}
+		if (ops & SDE_KMS_OPS_CLEANUP_PLANE_FB) {
+			SDE_DEBUG("cleanup planes\n");
+			drm_atomic_helper_cleanup_planes(dev, state);
+		}
+		if (ops & SDE_KMS_OPS_CRTC_SECURE_STATE_CHANGE) {
+			SDE_DEBUG("secure ctrl\n");
+			sde_crtc_secure_ctrl(crtc, false);
+		}
+		if (ops & SDE_KMS_OPS_PREPARE_PLANE_FB) {
+			SDE_DEBUG("prepare planes %d",
+					crtc->state->plane_mask);
+			drm_atomic_crtc_for_each_plane(plane,
+					crtc) {
+				const struct drm_plane_helper_funcs *funcs;
+
+				plane_state = plane->state;
+				funcs = plane->helper_private;
+
+				SDE_DEBUG("psde:%d FB[%u]\n",
+						plane->base.id,
+						plane->fb->base.id);
+				if (!funcs)
+					continue;
+
+				if (funcs->prepare_fb(plane, plane_state)) {
+					ret = funcs->prepare_fb(plane,
+							plane_state);
+					if (ret)
+						return ret;
+				}
+			}
+		}
+		SDE_DEBUG("secure operations completed\n");
+	}
+
+no_ops:
+	return 0;
+}
+
+static void sde_kms_prepare_commit(struct msm_kms *kms,
+		struct drm_atomic_state *state)
+{
+	struct sde_kms *sde_kms;
+	struct msm_drm_private *priv;
+	struct drm_device *dev;
+	struct drm_encoder *encoder;
+
+	if (!kms)
+		return;
+	sde_kms = to_sde_kms(kms);
+	dev = sde_kms->dev;
+
+	if (!dev || !dev->dev_private)
+		return;
+	priv = dev->dev_private;
+
+	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+		if (encoder->crtc != NULL)
+			sde_encoder_prepare_commit(encoder);
+
+	/*
+	 * NOTE: for secure use cases we want to apply the new HW
+	 * configuration only after completing preparation for secure
+	 * transitions prepare below if any transtions is required.
+	 */
+	sde_kms_prepare_secure_transition(kms, state);
+}
+
+static void sde_kms_commit(struct msm_kms *kms,
+		struct drm_atomic_state *old_state)
+{
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *old_crtc_state;
+	int i;
+
+	for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+		if (crtc->state->active) {
+			SDE_EVT32(DRMID(crtc));
+			sde_crtc_commit_kickoff(crtc);
+		}
+	}
+}
+
+static void sde_kms_complete_commit(struct msm_kms *kms,
+		struct drm_atomic_state *old_state)
+{
+	struct sde_kms *sde_kms;
+	struct msm_drm_private *priv;
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *old_crtc_state;
+	int i;
+
+	if (!kms || !old_state)
+		return;
+	sde_kms = to_sde_kms(kms);
+
+	if (!sde_kms->dev || !sde_kms->dev->dev_private)
+		return;
+	priv = sde_kms->dev->dev_private;
+
+	for_each_crtc_in_state(old_state, crtc, old_crtc_state, i)
+		sde_crtc_complete_commit(crtc, old_crtc_state);
+
+	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
+
+	SDE_EVT32_VERBOSE(SDE_EVTLOG_FUNC_EXIT);
+}
+
 static void sde_kms_wait_for_commit_done(struct msm_kms *kms,
 		struct drm_crtc *crtc)
 {
@@ -1574,7 +1687,7 @@
 	.commit          = sde_kms_commit,
 	.complete_commit = sde_kms_complete_commit,
 	.wait_for_crtc_commit_done = sde_kms_wait_for_commit_done,
-	.wait_for_tx_complete = sde_kms_wait_for_tx_complete,
+	.wait_for_tx_complete = sde_kms_wait_for_frame_transfer_complete,
 	.enable_vblank   = sde_kms_enable_vblank,
 	.disable_vblank  = sde_kms_disable_vblank,
 	.check_modified_format = sde_format_check_modified_format,
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h
index 4c0699e..0cb7008 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.h
+++ b/drivers/gpu/drm/msm/sde/sde_kms.h
@@ -89,7 +89,6 @@
 
 #define SDE_NAME_SIZE  12
 
-
 /* timeout in frames waiting for frame done */
 #define SDE_FRAME_DONE_TIMEOUT	60
 
@@ -102,6 +101,12 @@
 /* max virtual encoders per secure crtc */
 #define MAX_ALLOWED_ENCODER_CNT_PER_SECURE_CRTC	1
 
+/* defines the operations required for secure state transition */
+#define SDE_KMS_OPS_CRTC_SECURE_STATE_CHANGE               BIT(0)
+#define SDE_KMS_OPS_WAIT_FOR_TX_DONE                       BIT(1)
+#define SDE_KMS_OPS_CLEANUP_PLANE_FB                       BIT(2)
+#define SDE_KMS_OPS_PREPARE_PLANE_FB                       BIT(3)
+
 /*
  * struct sde_irq_callback - IRQ callback handlers
  * @list: list to callback
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index 8a39a34..01bcd0f 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -2714,10 +2714,13 @@
 
 	new_rstate = &to_sde_plane_state(new_state)->rot;
 
-	ret = msm_framebuffer_prepare(new_rstate->out_fb, pstate->aspace);
-	if (ret) {
-		SDE_ERROR("failed to prepare framebuffer\n");
-		return ret;
+	if (pstate->aspace) {
+		ret = msm_framebuffer_prepare(new_rstate->out_fb,
+				pstate->aspace);
+		if (ret) {
+			SDE_ERROR("failed to prepare framebuffer\n");
+			return ret;
+		}
 	}
 
 	/* validate framebuffer layout before commit */
@@ -3184,6 +3187,21 @@
 		pstate->pending = false;
 }
 
+/**
+ * sde_plane_set_error: enable/disable error condition
+ * @plane: pointer to drm_plane structure
+ */
+void sde_plane_set_error(struct drm_plane *plane, bool error)
+{
+	struct sde_plane *psde;
+
+	if (!plane)
+		return;
+
+	psde = to_sde_plane(plane);
+	psde->is_error = error;
+}
+
 static int sde_plane_sspp_atomic_update(struct drm_plane *plane,
 				struct drm_plane_state *old_state)
 {
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.h b/drivers/gpu/drm/msm/sde/sde_plane.h
index 325d342..913647f 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.h
+++ b/drivers/gpu/drm/msm/sde/sde_plane.h
@@ -233,6 +233,12 @@
 void sde_plane_kickoff(struct drm_plane *plane);
 
 /**
+ * sde_plane_set_error: enable/disable error condition
+ * @plane: pointer to drm_plane structure
+ */
+void sde_plane_set_error(struct drm_plane *plane, bool error);
+
+/**
  * sde_plane_init - create new sde plane for the given pipe
  * @dev:   Pointer to DRM device
  * @pipe:  sde hardware pipe identifier