Merge "drm/msm/sde: move rot flush config to crtc kickoff"
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index 30cf3df..efbbd24 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -1263,7 +1263,7 @@
struct sde_hw_stage_cfg *stage_cfg;
struct sde_rect plane_crtc_roi;
- u32 flush_mask, flush_sbuf, flush_tmp;
+ u32 flush_mask, flush_sbuf;
uint32_t stage_idx, lm_idx;
int zpos_cnt[SDE_STAGE_MAX + 1] = { 0 };
int i;
@@ -1279,10 +1279,9 @@
lm = mixer->hw_lm;
stage_cfg = &sde_crtc->stage_cfg;
cstate = to_sde_crtc_state(crtc->state);
- flush_sbuf = 0x0;
- cstate->sbuf_cfg.rot_op_mode = SDE_CTL_ROT_OP_MODE_OFFLINE;
cstate->sbuf_prefill_line = 0;
+ sde_crtc->sbuf_flush_mask = 0x0;
drm_atomic_crtc_for_each_plane(plane, crtc) {
state = plane->state;
@@ -1297,17 +1296,14 @@
pstate = to_sde_plane_state(state);
fb = state->fb;
- if (sde_plane_is_sbuf_mode(plane, &prefill))
- cstate->sbuf_cfg.rot_op_mode =
- SDE_CTL_ROT_OP_MODE_INLINE_SYNC;
+ prefill = sde_plane_rot_calc_prefill(plane);
if (prefill > cstate->sbuf_prefill_line)
cstate->sbuf_prefill_line = prefill;
- sde_plane_get_ctl_flush(plane, ctl, &flush_mask, &flush_tmp);
+ sde_plane_get_ctl_flush(plane, ctl, &flush_mask, &flush_sbuf);
- /* persist rotator flush bit(s) for one more commit */
- flush_mask |= cstate->sbuf_flush_mask | flush_tmp;
- flush_sbuf |= flush_tmp;
+ /* save sbuf flush value for later */
+ sde_crtc->sbuf_flush_mask |= flush_sbuf;
SDE_DEBUG("crtc %d stage:%d - plane %d sspp %d fb %d\n",
crtc->base.id,
@@ -1331,8 +1327,7 @@
state->src_w >> 16, state->src_h >> 16,
state->crtc_x, state->crtc_y,
state->crtc_w, state->crtc_h,
- flush_tmp ? cstate->sbuf_cfg.rot_op_mode :
- SDE_CTL_ROT_OP_MODE_OFFLINE);
+ flush_sbuf != 0);
stage_idx = zpos_cnt[pstate->stage]++;
stage_cfg->stage[pstate->stage][stage_idx] =
@@ -1359,8 +1354,6 @@
}
}
- cstate->sbuf_flush_mask = flush_sbuf;
-
if (lm && lm->ops.setup_dim_layer) {
cstate = to_sde_crtc_state(crtc->state);
for (i = 0; i < cstate->num_dim_layers; i++)
@@ -2731,17 +2724,73 @@
return rc;
}
-void sde_crtc_commit_kickoff(struct drm_crtc *crtc)
+static void _sde_crtc_commit_kickoff_rot(struct drm_crtc *crtc,
+ struct sde_crtc_state *cstate)
{
struct drm_plane *plane;
+ struct sde_crtc *sde_crtc;
+ struct sde_hw_ctl *ctl, *master_ctl;
+ u32 flush_mask;
+ int i;
+
+ if (!crtc || !cstate)
+ return;
+
+ sde_crtc = to_sde_crtc(crtc);
+
+ /*
+ * Update sbuf configuration and flush bits if a flush
+ * mask has been defined for either the current or
+ * previous commit.
+ *
+ * Updates are also required for the first commit after
+ * sbuf_flush_mask becomes 0x0, to properly transition
+ * the hardware out of sbuf mode.
+ */
+ if (!sde_crtc->sbuf_flush_mask_old && !sde_crtc->sbuf_flush_mask)
+ return;
+
+ flush_mask = sde_crtc->sbuf_flush_mask_old | sde_crtc->sbuf_flush_mask;
+ sde_crtc->sbuf_flush_mask_old = sde_crtc->sbuf_flush_mask;
+
+ SDE_ATRACE_BEGIN("crtc_kickoff_rot");
+
+ if (cstate->sbuf_cfg.rot_op_mode != SDE_CTL_ROT_OP_MODE_OFFLINE) {
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ sde_plane_kickoff(plane);
+ }
+ }
+
+ master_ctl = NULL;
+ for (i = 0; i < sde_crtc->num_mixers; i++) {
+ ctl = sde_crtc->mixers[i].hw_ctl;
+ if (!ctl || !ctl->ops.setup_sbuf_cfg ||
+ !ctl->ops.update_pending_flush)
+ continue;
+
+ if (!master_ctl || master_ctl->idx > ctl->idx)
+ master_ctl = ctl;
+
+ ctl->ops.setup_sbuf_cfg(ctl, &cstate->sbuf_cfg);
+ ctl->ops.update_pending_flush(ctl, flush_mask);
+ }
+
+ if (cstate->sbuf_cfg.rot_op_mode == SDE_CTL_ROT_OP_MODE_INLINE_ASYNC &&
+ master_ctl && master_ctl->ops.trigger_rot_start)
+ master_ctl->ops.trigger_rot_start(master_ctl);
+
+ SDE_ATRACE_END("crtc_kickoff_rot");
+}
+
+void sde_crtc_commit_kickoff(struct drm_crtc *crtc)
+{
struct drm_encoder *encoder;
struct drm_device *dev;
struct sde_crtc *sde_crtc;
struct msm_drm_private *priv;
struct sde_kms *sde_kms;
struct sde_crtc_state *cstate;
- struct sde_hw_ctl *ctl;
- int ret, i;
+ int ret;
if (!crtc) {
SDE_ERROR("invalid argument\n");
@@ -2768,6 +2817,11 @@
return;
SDE_ATRACE_BEGIN("crtc_commit");
+
+ /* default to ASYNC mode for inline rotation */
+ cstate->sbuf_cfg.rot_op_mode = sde_crtc->sbuf_flush_mask ?
+ SDE_CTL_ROT_OP_MODE_INLINE_ASYNC : SDE_CTL_ROT_OP_MODE_OFFLINE;
+
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
struct sde_encoder_kickoff_params params = { 0 };
@@ -2782,8 +2836,27 @@
params.affected_displays = _sde_crtc_get_displays_affected(crtc,
crtc->state);
sde_encoder_prepare_for_kickoff(encoder, ¶ms);
+
+ /*
+ * For inline ASYNC modes, the flush bits are not written
+ * to hardware atomically, so avoid using it if a video
+ * mode encoder is active on this CRTC.
+ */
+ if (cstate->sbuf_cfg.rot_op_mode ==
+ SDE_CTL_ROT_OP_MODE_INLINE_ASYNC &&
+ sde_encoder_get_intf_mode(encoder) ==
+ INTF_MODE_VIDEO)
+ cstate->sbuf_cfg.rot_op_mode =
+ SDE_CTL_ROT_OP_MODE_INLINE_SYNC;
}
+ /*
+ * For ASYNC inline modes, kick off the rotator now so that the H/W
+ * can start as soon as it's ready.
+ */
+ if (cstate->sbuf_cfg.rot_op_mode == SDE_CTL_ROT_OP_MODE_INLINE_ASYNC)
+ _sde_crtc_commit_kickoff_rot(crtc, cstate);
+
/* wait for frame_event_done completion */
SDE_ATRACE_BEGIN("wait_for_frame_done_event");
ret = _sde_crtc_wait_for_frame_done(crtc);
@@ -2798,24 +2871,24 @@
if (atomic_inc_return(&sde_crtc->frame_pending) == 1) {
/* acquire bandwidth and other resources */
SDE_DEBUG("crtc%d first commit\n", crtc->base.id);
- SDE_EVT32(DRMID(crtc), SDE_EVTLOG_FUNC_CASE1);
+ SDE_EVT32(DRMID(crtc), cstate->sbuf_cfg.rot_op_mode,
+ SDE_EVTLOG_FUNC_CASE1);
} else {
SDE_DEBUG("crtc%d commit\n", crtc->base.id);
- SDE_EVT32(DRMID(crtc), SDE_EVTLOG_FUNC_CASE2);
+ SDE_EVT32(DRMID(crtc), cstate->sbuf_cfg.rot_op_mode,
+ SDE_EVTLOG_FUNC_CASE2);
}
sde_crtc->play_count++;
- if (cstate->sbuf_cfg.rot_op_mode != SDE_CTL_ROT_OP_MODE_OFFLINE) {
- drm_atomic_crtc_for_each_plane(plane, crtc) {
- sde_plane_kickoff(plane);
- }
- }
-
- for (i = 0; i < sde_crtc->num_mixers; i++) {
- ctl = sde_crtc->mixers[i].hw_ctl;
- if (ctl && ctl->ops.setup_sbuf_cfg)
- ctl->ops.setup_sbuf_cfg(ctl, &cstate->sbuf_cfg);
- }
+ /*
+ * For SYNC inline modes, delay the kick off until after the
+ * wait for frame done in case the wait times out.
+ *
+ * Also perform a final kickoff when transitioning back to
+ * offline mode.
+ */
+ if (cstate->sbuf_cfg.rot_op_mode != SDE_CTL_ROT_OP_MODE_INLINE_ASYNC)
+ _sde_crtc_commit_kickoff_rot(crtc, cstate);
sde_vbif_clear_errors(sde_kms);
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index ea606b3..0783f11 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -204,6 +204,8 @@
* @misr_enable : boolean entry indicates misr enable/disable status.
* @misr_frame_count : misr frame count provided by client
* @misr_data : store misr data before turning off the clocks.
+ * @sbuf_flush_mask: flush mask for inline rotator
+ * @sbuf_flush_mask_old: inline rotator flush mask for previous commit
* @power_event : registered power event handle
* @cur_perf : current performance committed to clock/bandwidth driver
* @rp_lock : serialization lock for resource pool
@@ -264,6 +266,9 @@
u32 misr_frame_count;
u32 misr_data[CRTC_DUAL_MIXERS];
+ u32 sbuf_flush_mask;
+ u32 sbuf_flush_mask_old;
+
struct sde_power_event *power_event;
struct sde_core_perf_params cur_perf;
@@ -357,7 +362,6 @@
* @new_perf: new performance state being requested
* @sbuf_cfg: stream buffer configuration
* @sbuf_prefill_line: number of line for inline rotator prefetch
- * @sbuf_flush_mask: flush mask for inline rotator
*/
struct sde_crtc_state {
struct drm_crtc_state base;
@@ -385,7 +389,6 @@
struct sde_core_perf_params new_perf;
struct sde_ctl_sbuf_cfg sbuf_cfg;
u32 sbuf_prefill_line;
- u32 sbuf_flush_mask;
struct sde_crtc_respool rp;
};
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
index e027be7..95b7a6d 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
@@ -89,6 +89,11 @@
static inline void sde_hw_ctl_trigger_rot_start(struct sde_hw_ctl *ctx)
{
+ /* ROT flush bit is latched during ROT start, so set it first */
+ if (CTL_FLUSH_MASK_ROT & ctx->pending_flush_mask) {
+ ctx->pending_flush_mask &= ~CTL_FLUSH_MASK_ROT;
+ SDE_REG_WRITE(&ctx->hw, CTL_FLUSH, CTL_FLUSH_MASK_ROT);
+ }
SDE_REG_WRITE(&ctx->hw, CTL_ROT_START, BIT(0));
}
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index c915333..b22a0a3 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -1518,7 +1518,7 @@
* @plane: Pointer to drm plane
* return: prefill time in line
*/
-static u32 sde_plane_rot_calc_prefill(struct drm_plane *plane)
+u32 sde_plane_rot_calc_prefill(struct drm_plane *plane)
{
struct drm_plane_state *state;
struct sde_plane_state *pstate;
@@ -1554,26 +1554,6 @@
}
/**
- * sde_plane_is_sbuf_mode - check if sspp of given plane is in streaming
- * buffer mode
- * @plane: Pointer to drm plane
- * @prefill: Pointer to prefill line count
- * return: true if sspp is in stream buffer mode
- */
-bool sde_plane_is_sbuf_mode(struct drm_plane *plane, u32 *prefill)
-{
- struct sde_plane_state *pstate = plane && plane->state ?
- to_sde_plane_state(plane->state) : NULL;
- struct sde_plane_rot_state *rstate = pstate ? &pstate->rot : NULL;
- bool sbuf_mode = rstate ? rstate->out_sbuf : false;
-
- if (prefill)
- *prefill = sde_plane_rot_calc_prefill(plane);
-
- return sbuf_mode;
-}
-
-/**
* sde_plane_rot_calc_cfg - calculate rotator/sspp configuration by
* enumerating over all planes attached to the same rotator
* @plane: Pointer to drm plane
@@ -2752,7 +2732,7 @@
return;
*flush_rot = 0x0;
- if (sde_plane_is_sbuf_mode(plane, NULL) && rstate->rot_hw &&
+ if (rstate && rstate->out_sbuf && rstate->rot_hw &&
ctl->ops.get_bitmask_rot)
ctl->ops.get_bitmask_rot(ctl, flush_rot, rstate->rot_hw->idx);
}
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.h b/drivers/gpu/drm/msm/sde/sde_plane.h
index 913647f..c956345 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.h
+++ b/drivers/gpu/drm/msm/sde/sde_plane.h
@@ -207,12 +207,11 @@
u32 *flush_sspp, u32 *flush_rot);
/**
- * sde_plane_is_sbuf_mode - return status of stream buffer mode
- * @plane: Pointer to DRM plane object
- * @prefill: Pointer to updated prefill in stream buffer mode (optional)
- * Returns: true if plane is in stream buffer mode
+ * sde_plane_rot_calc_prefill - calculate rotator start prefill
+ * @plane: Pointer to drm plane
+ * return: prefill time in line
*/
-bool sde_plane_is_sbuf_mode(struct drm_plane *plane, u32 *prefill);
+u32 sde_plane_rot_calc_prefill(struct drm_plane *plane);
/**
* sde_plane_restore - restore hw state if previously power collapsed