Merge "tun: Set CHECKSUM_UNNECESSARY if userspace passes this indication"
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
index 726a63f..83feac0 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
@@ -399,15 +399,15 @@
<&clock_gcc GCC_USB3_PRIM_PHY_PIPE_CLK>,
<&clock_dispcc DISP_CC_MDSS_DP_LINK_CLK>,
<&clock_dispcc DISP_CC_MDSS_DP_LINK_INTF_CLK>,
- <&clock_dispcc DISP_CC_MDSS_DP_CRYPTO_CLK>,
<&clock_dispcc DISP_CC_MDSS_DP_PIXEL_CLK>,
+ <&clock_dispcc DISP_CC_MDSS_DP_CRYPTO_CLK>,
<&clock_dispcc DISP_CC_MDSS_DP_PIXEL_CLK_SRC>,
<&mdss_dp_pll DP_VCO_DIVIDED_CLK_SRC_MUX>;
clock-names = "core_aux_clk", "core_usb_ref_clk_src",
"core_usb_ref_clk", "core_usb_cfg_ahb_clk",
"core_usb_pipe_clk", "ctrl_link_clk",
- "ctrl_link_iface_clk", "ctrl_crypto_clk",
- "ctrl_pixel_clk", "pixel_clk_rcg", "pixel_parent";
+ "ctrl_link_iface_clk", "ctrl_pixel_clk",
+ "crypto_clk", "pixel_clk_rcg", "pixel_parent";
qcom,dp-usbpd-detection = <&pmi8998_pdphy>;
@@ -545,22 +545,34 @@
qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07 07 05 03 04 00];
qcom,mdss-dsi-t-clk-post = <0x0d>;
qcom,mdss-dsi-t-clk-pre = <0x2d>;
+ qcom,display-topology = <1 0 1>,
+ <2 0 1>;
+ qcom,default-topology-index = <0>;
};
&dsi_dual_sim_vid {
qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07 07 05 03 04 00];
qcom,mdss-dsi-t-clk-post = <0x0d>;
qcom,mdss-dsi-t-clk-pre = <0x2d>;
+ qcom,display-topology = <2 0 2>,
+ <1 0 2>;
+ qcom,default-topology-index = <0>;
};
&dsi_sim_cmd {
qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07 07 05 03 04 00];
qcom,mdss-dsi-t-clk-post = <0x0d>;
qcom,mdss-dsi-t-clk-pre = <0x2d>;
+ qcom,display-topology = <1 0 1>,
+ <2 0 1>;
+ qcom,default-topology-index = <0>;
};
&dsi_dual_sim_cmd {
qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07 07 05 03 04 00];
qcom,mdss-dsi-t-clk-post = <0x0d>;
qcom,mdss-dsi-t-clk-pre = <0x2d>;
+ qcom,display-topology = <2 0 2>,
+ <1 0 2>;
+ qcom,default-topology-index = <0>;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
index 2ae3832..e31f8fd 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
@@ -209,13 +209,33 @@
/* data and reg bus scale settings */
qcom,sde-data-bus {
- qcom,msm-bus,name = "mdss_sde";
+ qcom,msm-bus,name = "mdss_sde_mnoc";
qcom,msm-bus,num-cases = <3>;
qcom,msm-bus,num-paths = <2>;
qcom,msm-bus,vectors-KBps =
- <22 512 0 0>, <23 512 0 0>,
- <22 512 0 6400000>, <23 512 0 6400000>,
- <22 512 0 6400000>, <23 512 0 6400000>;
+ <22 773 0 0>, <23 773 0 0>,
+ <22 773 0 6400000>, <23 773 0 6400000>,
+ <22 773 0 6400000>, <23 773 0 6400000>;
+ };
+
+ qcom,sde-llcc-bus {
+ qcom,msm-bus,name = "mdss_sde_llcc";
+ qcom,msm-bus,num-cases = <3>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <132 770 0 0>,
+ <132 770 0 6400000>,
+ <132 770 0 6400000>;
+ };
+
+ qcom,sde-ebi-bus {
+ qcom,msm-bus,name = "mdss_sde_ebi";
+ qcom,msm-bus,num-cases = <3>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <129 512 0 0>,
+ <129 512 0 6400000>,
+ <129 512 0 6400000>;
};
qcom,sde-reg-bus {
@@ -252,14 +272,36 @@
/* data and reg bus scale settings */
qcom,sde-data-bus {
- qcom,msm-bus,name = "disp_rsc";
+ qcom,msm-bus,name = "disp_rsc_mnoc";
qcom,msm-bus,active-only;
qcom,msm-bus,num-cases = <3>;
qcom,msm-bus,num-paths = <2>;
qcom,msm-bus,vectors-KBps =
- <20003 20512 0 0>, <20004 20512 0 0>,
- <20003 20512 0 6400000>, <20004 20512 0 6400000>,
- <20003 20512 0 6400000>, <20004 20512 0 6400000>;
+ <20003 20515 0 0>, <20004 20515 0 0>,
+ <20003 20515 0 6400000>, <20004 20515 0 6400000>,
+ <20003 20515 0 6400000>, <20004 20515 0 6400000>;
+ };
+
+ qcom,sde-llcc-bus {
+ qcom,msm-bus,name = "disp_rsc_llcc";
+ qcom,msm-bus,active-only;
+ qcom,msm-bus,num-cases = <3>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <20001 20513 0 0>,
+ <20001 20513 0 6400000>,
+ <20001 20513 0 6400000>;
+ };
+
+ qcom,sde-ebi-bus {
+ qcom,msm-bus,name = "disp_rsc_ebi";
+ qcom,msm-bus,active-only;
+ qcom,msm-bus,num-cases = <3>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <20000 20512 0 0>,
+ <20000 20512 0 6400000>,
+ <20000 20512 0 6400000>;
};
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 1eaeb59..2f718bb 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -3261,10 +3261,10 @@
};
};
- silver-virt-max-usr {
- polling-delay-passive = <100>;
- polling-delay = <100>;
- thermal-governor = "user_space";
+ silv-virt-max-step {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "step_wise";
trips {
silver-trip {
temperature = <120000>;
@@ -3274,10 +3274,10 @@
};
};
- gold-virt-max-usr {
- polling-delay-passive = <100>;
- polling-delay = <100>;
- thermal-governor = "user_space";
+ gold-virt-max-step {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "step_wise";
trips {
gold-trip {
temperature = <120000>;
diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c
index 8b59bee..e091fd8 100644
--- a/drivers/cpuidle/lpm-levels.c
+++ b/drivers/cpuidle/lpm-levels.c
@@ -1045,7 +1045,6 @@
clear_predict_history();
clear_cl_predict_history();
- do_div(us, USEC_PER_SEC/SCLK_HZ);
system_sleep_enter(us);
}
/* Notify cluster enter event after successfully config completion */
diff --git a/drivers/edac/kryo3xx_arm64_edac.c b/drivers/edac/kryo3xx_arm64_edac.c
index 4ac880b..f5bb3ed 100644
--- a/drivers/edac/kryo3xx_arm64_edac.c
+++ b/drivers/edac/kryo3xx_arm64_edac.c
@@ -62,7 +62,7 @@
static inline void set_errxmisc_overflow(void)
{
- u64 val = 0x7F7F00000000;
+ u64 val = 0x7F7F00000000ULL;
asm volatile("msr s3_0_c5_c5_0, %0" : : "r" (val));
}
@@ -118,8 +118,9 @@
#define DATA_BUF_ERR 0x2
#define CACHE_DATA_ERR 0x6
#define CACHE_TAG_DIRTY_ERR 0x7
-#define TLB_PARITY_ERR 0x8
-#define BUS_ERROR 0x18
+#define TLB_PARITY_ERR_DATA 0x8
+#define TLB_PARITY_ERR_TAG 0x9
+#define BUS_ERROR 0x12
struct erp_drvdata {
struct edac_device_ctl_info *edev_ctl;
@@ -217,10 +218,13 @@
edac_printk(KERN_CRIT, EDAC_CPU, "ECC Error from cache tag or dirty RAM\n");
break;
- case TLB_PARITY_ERR:
+ case TLB_PARITY_ERR_DATA:
edac_printk(KERN_CRIT, EDAC_CPU, "Parity error on TLB RAM\n");
break;
+ case TLB_PARITY_ERR_TAG:
+ edac_printk(KERN_CRIT, EDAC_CPU, "Parity error on TLB DATA\n");
+
case BUS_ERROR:
edac_printk(KERN_CRIT, EDAC_CPU, "Bus Error\n");
break;
@@ -283,6 +287,16 @@
spin_unlock_irqrestore(&local_handler_lock, flags);
}
+static bool l3_is_bus_error(u64 errxstatus)
+{
+ if (KRYO3XX_ERRXSTATUS_SERR(errxstatus) == BUS_ERROR) {
+ edac_printk(KERN_CRIT, EDAC_CPU, "Bus Error\n");
+ return true;
+ }
+
+ return false;
+}
+
static void kryo3xx_check_l3_scu_error(struct edac_device_ctl_info *edev_ctl)
{
u64 errxstatus = 0;
@@ -296,6 +310,11 @@
if (KRYO3XX_ERRXSTATUS_VALID(errxstatus) &&
KRYO3XX_ERRXMISC_LVL(errxmisc) == L3) {
+ if (l3_is_bus_error(errxstatus)) {
+ if (edev_ctl->panic_on_ue)
+ panic("Causing panic due to Bus Error\n");
+ return;
+ }
if (KRYO3XX_ERRXSTATUS_UE(errxstatus)) {
edac_printk(KERN_CRIT, EDAC_CPU, "Detected L3 uncorrectable error\n");
dump_err_reg(KRYO3XX_L3_UE, L3, errxstatus, errxmisc,
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
index 4788f3b..e2a348d 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
@@ -9,7 +9,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
*/
#define pr_fmt(fmt) "dsi-ctrl:[%s] " fmt, __func__
@@ -884,7 +883,7 @@
const struct mipi_dsi_msg *msg,
u32 flags)
{
- int rc = 0;
+ int rc = 0, ret = 0;
struct mipi_dsi_packet packet;
struct dsi_ctrl_cmd_dma_fifo_info cmd;
struct dsi_ctrl_cmd_dma_info cmd_mem;
@@ -948,42 +947,59 @@
hw_flags |= (flags & DSI_CTRL_CMD_DEFER_TRIGGER) ?
DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER : 0;
- if (!(flags & DSI_CTRL_CMD_DEFER_TRIGGER))
- reinit_completion(&dsi_ctrl->int_info.cmd_dma_done);
-
- if (flags & DSI_CTRL_CMD_FETCH_MEMORY) {
- dsi_ctrl->hw.ops.kickoff_command(&dsi_ctrl->hw,
- &cmd_mem,
- hw_flags);
- } else if (flags & DSI_CTRL_CMD_FIFO_STORE) {
- dsi_ctrl->hw.ops.kickoff_fifo_command(&dsi_ctrl->hw,
- &cmd,
- hw_flags);
+ if (flags & DSI_CTRL_CMD_DEFER_TRIGGER) {
+ if (flags & DSI_CTRL_CMD_FETCH_MEMORY) {
+ dsi_ctrl->hw.ops.kickoff_command(&dsi_ctrl->hw,
+ &cmd_mem,
+ hw_flags);
+ } else if (flags & DSI_CTRL_CMD_FIFO_STORE) {
+ dsi_ctrl->hw.ops.kickoff_fifo_command(&dsi_ctrl->hw,
+ &cmd,
+ hw_flags);
+ }
}
if (!(flags & DSI_CTRL_CMD_DEFER_TRIGGER)) {
- u32 retry = 10;
- u32 status = 0;
- u64 error = 0;
- u32 mask = (DSI_CMD_MODE_DMA_DONE);
+ dsi_ctrl_enable_status_interrupt(dsi_ctrl,
+ DSI_SINT_CMD_MODE_DMA_DONE, NULL);
+ reinit_completion(&dsi_ctrl->irq_info.cmd_dma_done);
- while ((status == 0) && (retry > 0)) {
- udelay(1000);
- status = dsi_ctrl->hw.ops.get_interrupt_status(
- &dsi_ctrl->hw);
- error = dsi_ctrl->hw.ops.get_error_status(
- &dsi_ctrl->hw);
- status &= mask;
- retry--;
- dsi_ctrl->hw.ops.clear_interrupt_status(&dsi_ctrl->hw,
- status);
- dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw,
- error);
+ if (flags & DSI_CTRL_CMD_FETCH_MEMORY) {
+ dsi_ctrl->hw.ops.kickoff_command(&dsi_ctrl->hw,
+ &cmd_mem,
+ hw_flags);
+ } else if (flags & DSI_CTRL_CMD_FIFO_STORE) {
+ dsi_ctrl->hw.ops.kickoff_fifo_command(&dsi_ctrl->hw,
+ &cmd,
+ hw_flags);
}
- pr_debug("INT STATUS = %x, retry = %d\n", status, retry);
- if (retry == 0)
- pr_err("[DSI_%d]Command transfer failed\n",
- dsi_ctrl->cell_index);
+
+ ret = wait_for_completion_timeout(
+ &dsi_ctrl->irq_info.cmd_dma_done,
+ msecs_to_jiffies(DSI_CTRL_TX_TO_MS));
+
+ if (ret == 0) {
+ u32 status = 0;
+ u32 mask = DSI_CMD_MODE_DMA_DONE;
+
+ if (status & mask) {
+ status |= (DSI_CMD_MODE_DMA_DONE |
+ DSI_BTA_DONE);
+ dsi_ctrl->hw.ops.clear_interrupt_status(
+ &dsi_ctrl->hw,
+ status);
+ dsi_ctrl_disable_status_interrupt(dsi_ctrl,
+ DSI_SINT_CMD_MODE_DMA_DONE);
+ complete_all(&dsi_ctrl->irq_info.cmd_dma_done);
+ pr_warn("dma_tx done but irq not triggered\n");
+ } else {
+ rc = -ETIMEDOUT;
+ dsi_ctrl_disable_status_interrupt(dsi_ctrl,
+ DSI_SINT_CMD_MODE_DMA_DONE);
+ pr_err("[DSI_%d]Command transfer failed\n",
+ dsi_ctrl->cell_index);
+ }
+ }
dsi_ctrl->hw.ops.reset_cmd_fifo(&dsi_ctrl->hw);
}
@@ -1152,15 +1168,6 @@
return rc;
}
-int dsi_ctrl_intr_deinit(struct dsi_ctrl *dsi_ctrl)
-{
- struct dsi_ctrl_interrupts *ints = &dsi_ctrl->int_info;
-
- devm_free_irq(&dsi_ctrl->pdev->dev, ints->irq, dsi_ctrl);
-
- return 0;
-}
-
static int dsi_ctrl_buffer_deinit(struct dsi_ctrl *dsi_ctrl)
{
if (dsi_ctrl->tx_cmd_buf) {
@@ -1259,6 +1266,10 @@
dsi_ctrl->cell_index = index;
dsi_ctrl->version = version;
+ dsi_ctrl->irq_info.irq_num = -1;
+ dsi_ctrl->irq_info.irq_stat_mask = 0x0;
+
+ spin_lock_init(&dsi_ctrl->irq_info.irq_lock);
dsi_ctrl->name = of_get_property(pdev->dev.of_node, "label", NULL);
if (!dsi_ctrl->name)
@@ -1677,6 +1688,236 @@
return 0;
}
+static void dsi_ctrl_handle_error_status(struct dsi_ctrl *dsi_ctrl,
+ unsigned long int error)
+{
+ pr_err("%s: %lu\n", __func__, error);
+
+ /* DTLN PHY error */
+ if (error & 0x3000e00)
+ if (dsi_ctrl->hw.ops.clear_error_status)
+ dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw,
+ 0x3000e00);
+
+ /* DSI FIFO OVERFLOW error */
+ if (error & 0xf0000) {
+ if (dsi_ctrl->hw.ops.clear_error_status)
+ dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw,
+ 0xf0000);
+ }
+
+ /* DSI FIFO UNDERFLOW error */
+ if (error & 0xf00000) {
+ if (dsi_ctrl->hw.ops.clear_error_status)
+ dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw,
+ 0xf00000);
+ }
+
+ /* DSI PLL UNLOCK error */
+ if (error & BIT(8))
+ if (dsi_ctrl->hw.ops.clear_error_status)
+ dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw,
+ BIT(8));
+}
+
+/**
+ * dsi_ctrl_isr - interrupt service routine for DSI CTRL component
+ * @irq: Incoming IRQ number
+ * @ptr: Pointer to user data structure (struct dsi_ctrl)
+ * Returns: IRQ_HANDLED if no further action required
+ */
+static irqreturn_t dsi_ctrl_isr(int irq, void *ptr)
+{
+ struct dsi_ctrl *dsi_ctrl;
+ struct dsi_event_cb_info cb_info;
+ unsigned long flags;
+ uint32_t cell_index, status, i;
+ uint64_t errors;
+
+ if (!ptr)
+ return IRQ_NONE;
+ dsi_ctrl = ptr;
+
+ /* clear status interrupts */
+ if (dsi_ctrl->hw.ops.get_interrupt_status)
+ status = dsi_ctrl->hw.ops.get_interrupt_status(&dsi_ctrl->hw);
+ else
+ status = 0x0;
+
+ if (dsi_ctrl->hw.ops.clear_interrupt_status)
+ dsi_ctrl->hw.ops.clear_interrupt_status(&dsi_ctrl->hw, status);
+
+ spin_lock_irqsave(&dsi_ctrl->irq_info.irq_lock, flags);
+ cell_index = dsi_ctrl->cell_index;
+ spin_unlock_irqrestore(&dsi_ctrl->irq_info.irq_lock, flags);
+
+ /* clear error interrupts */
+ if (dsi_ctrl->hw.ops.get_error_status)
+ errors = dsi_ctrl->hw.ops.get_error_status(&dsi_ctrl->hw);
+ else
+ errors = 0x0;
+
+ if (errors) {
+ /* handle DSI error recovery */
+ dsi_ctrl_handle_error_status(dsi_ctrl, errors);
+ if (dsi_ctrl->hw.ops.clear_error_status)
+ dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw,
+ errors);
+ }
+
+ if (status & DSI_CMD_MODE_DMA_DONE) {
+ dsi_ctrl_disable_status_interrupt(dsi_ctrl,
+ DSI_SINT_CMD_MODE_DMA_DONE);
+ complete_all(&dsi_ctrl->irq_info.cmd_dma_done);
+ }
+
+ if (status & DSI_CMD_FRAME_DONE) {
+ dsi_ctrl_disable_status_interrupt(dsi_ctrl,
+ DSI_SINT_CMD_FRAME_DONE);
+ complete_all(&dsi_ctrl->irq_info.cmd_frame_done);
+ }
+
+ if (status & DSI_VIDEO_MODE_FRAME_DONE) {
+ dsi_ctrl_disable_status_interrupt(dsi_ctrl,
+ DSI_SINT_VIDEO_MODE_FRAME_DONE);
+ complete_all(&dsi_ctrl->irq_info.vid_frame_done);
+ }
+
+ if (status & DSI_BTA_DONE) {
+ dsi_ctrl_disable_status_interrupt(dsi_ctrl,
+ DSI_SINT_BTA_DONE);
+ complete_all(&dsi_ctrl->irq_info.bta_done);
+ }
+
+ for (i = 0; status && i < DSI_STATUS_INTERRUPT_COUNT; ++i) {
+ if (status & 0x1) {
+ spin_lock_irqsave(&dsi_ctrl->irq_info.irq_lock, flags);
+ cb_info = dsi_ctrl->irq_info.irq_stat_cb[i];
+ spin_unlock_irqrestore(
+ &dsi_ctrl->irq_info.irq_lock, flags);
+
+ if (cb_info.event_cb)
+ (void)cb_info.event_cb(cb_info.event_usr_ptr,
+ cb_info.event_idx,
+ cell_index, irq, 0, 0, 0);
+ }
+ status >>= 1;
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * _dsi_ctrl_setup_isr - register ISR handler
+ * @dsi_ctrl: Pointer to associated dsi_ctrl structure
+ * Returns: Zero on success
+ */
+static int dsi_ctrl_setup_isr(struct dsi_ctrl *dsi_ctrl)
+{
+ int irq_num, rc;
+
+ if (!dsi_ctrl)
+ return -EINVAL;
+ if (dsi_ctrl->irq_info.irq_num != -1)
+ return 0;
+
+ init_completion(&dsi_ctrl->irq_info.cmd_dma_done);
+ init_completion(&dsi_ctrl->irq_info.vid_frame_done);
+ init_completion(&dsi_ctrl->irq_info.cmd_frame_done);
+ init_completion(&dsi_ctrl->irq_info.bta_done);
+
+ irq_num = platform_get_irq(dsi_ctrl->pdev, 0);
+ if (irq_num < 0) {
+ pr_err("[DSI_%d] Failed to get IRQ number, %d\n",
+ dsi_ctrl->cell_index, irq_num);
+ rc = irq_num;
+ } else {
+ rc = devm_request_threaded_irq(&dsi_ctrl->pdev->dev, irq_num,
+ dsi_ctrl_isr, NULL, 0, "dsi_ctrl", dsi_ctrl);
+ if (rc) {
+ pr_err("[DSI_%d] Failed to request IRQ, %d\n",
+ dsi_ctrl->cell_index, rc);
+ } else {
+ dsi_ctrl->irq_info.irq_num = irq_num;
+ disable_irq_nosync(irq_num);
+
+ pr_info("[DSI_%d] IRQ %d registered\n",
+ dsi_ctrl->cell_index, irq_num);
+ }
+ }
+ return rc;
+}
+
+/**
+ * _dsi_ctrl_destroy_isr - unregister ISR handler
+ * @dsi_ctrl: Pointer to associated dsi_ctrl structure
+ */
+static void _dsi_ctrl_destroy_isr(struct dsi_ctrl *dsi_ctrl)
+{
+ if (!dsi_ctrl || !dsi_ctrl->pdev || dsi_ctrl->irq_info.irq_num < 0)
+ return;
+
+ if (dsi_ctrl->irq_info.irq_num != -1) {
+ devm_free_irq(&dsi_ctrl->pdev->dev,
+ dsi_ctrl->irq_info.irq_num, dsi_ctrl);
+ dsi_ctrl->irq_info.irq_num = -1;
+ }
+}
+
+void dsi_ctrl_enable_status_interrupt(struct dsi_ctrl *dsi_ctrl,
+ uint32_t intr_idx, struct dsi_event_cb_info *event_info)
+{
+ unsigned long flags;
+
+ if (!dsi_ctrl || dsi_ctrl->irq_info.irq_num == -1 ||
+ intr_idx >= DSI_STATUS_INTERRUPT_COUNT)
+ return;
+
+ spin_lock_irqsave(&dsi_ctrl->irq_info.irq_lock, flags);
+
+ if (dsi_ctrl->irq_info.irq_stat_refcount[intr_idx] == 0) {
+ /* enable irq on first request */
+ if (dsi_ctrl->irq_info.irq_stat_mask == 0)
+ enable_irq(dsi_ctrl->irq_info.irq_num);
+
+ /* update hardware mask */
+ dsi_ctrl->irq_info.irq_stat_mask |= BIT(intr_idx);
+ dsi_ctrl->hw.ops.enable_status_interrupts(&dsi_ctrl->hw,
+ dsi_ctrl->irq_info.irq_stat_mask);
+ }
+ ++(dsi_ctrl->irq_info.irq_stat_refcount[intr_idx]);
+
+ if (event_info)
+ dsi_ctrl->irq_info.irq_stat_cb[intr_idx] = *event_info;
+
+ spin_unlock_irqrestore(&dsi_ctrl->irq_info.irq_lock, flags);
+}
+
+void dsi_ctrl_disable_status_interrupt(struct dsi_ctrl *dsi_ctrl,
+ uint32_t intr_idx)
+{
+ unsigned long flags;
+
+ if (!dsi_ctrl || dsi_ctrl->irq_info.irq_num == -1 ||
+ intr_idx >= DSI_STATUS_INTERRUPT_COUNT)
+ return;
+
+ spin_lock_irqsave(&dsi_ctrl->irq_info.irq_lock, flags);
+
+ if (dsi_ctrl->irq_info.irq_stat_refcount[intr_idx])
+ if (--(dsi_ctrl->irq_info.irq_stat_refcount[intr_idx]) == 0) {
+ dsi_ctrl->irq_info.irq_stat_mask &= ~BIT(intr_idx);
+ dsi_ctrl->hw.ops.enable_status_interrupts(&dsi_ctrl->hw,
+ dsi_ctrl->irq_info.irq_stat_mask);
+
+ /* don't need irq if no lines are enabled */
+ if (dsi_ctrl->irq_info.irq_stat_mask == 0)
+ disable_irq_nosync(dsi_ctrl->irq_info.irq_num);
+ }
+
+ spin_unlock_irqrestore(&dsi_ctrl->irq_info.irq_lock, flags);
+}
+
/**
* dsi_ctrl_host_init() - Initialize DSI host hardware.
* @dsi_ctrl: DSI controller handle.
@@ -1729,7 +1970,7 @@
&dsi_ctrl->host_config.video_timing);
}
-
+ dsi_ctrl_setup_isr(dsi_ctrl);
dsi_ctrl->hw.ops.enable_status_interrupts(&dsi_ctrl->hw, 0x0);
dsi_ctrl->hw.ops.enable_error_interrupts(&dsi_ctrl->hw, 0x0);
@@ -1777,6 +2018,8 @@
mutex_lock(&dsi_ctrl->ctrl_lock);
+ _dsi_ctrl_destroy_isr(dsi_ctrl);
+
rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_HOST_INIT, 0x0);
if (rc) {
pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
@@ -1933,7 +2176,7 @@
*/
int dsi_ctrl_cmd_tx_trigger(struct dsi_ctrl *dsi_ctrl, u32 flags)
{
- int rc = 0;
+ int rc = 0, ret = 0;
u32 status = 0;
u32 mask = (DSI_CMD_MODE_DMA_DONE);
@@ -1944,27 +2187,43 @@
mutex_lock(&dsi_ctrl->ctrl_lock);
- reinit_completion(&dsi_ctrl->int_info.cmd_dma_done);
-
- dsi_ctrl->hw.ops.trigger_command_dma(&dsi_ctrl->hw);
+ if (!(flags & DSI_CTRL_CMD_BROADCAST_MASTER))
+ dsi_ctrl->hw.ops.trigger_command_dma(&dsi_ctrl->hw);
if ((flags & DSI_CTRL_CMD_BROADCAST) &&
- (flags & DSI_CTRL_CMD_BROADCAST_MASTER)) {
- u32 retry = 10;
+ (flags & DSI_CTRL_CMD_BROADCAST_MASTER)) {
+ dsi_ctrl_enable_status_interrupt(dsi_ctrl,
+ DSI_SINT_CMD_MODE_DMA_DONE, NULL);
+ reinit_completion(&dsi_ctrl->irq_info.cmd_dma_done);
- while ((status == 0) && (retry > 0)) {
- udelay(1000);
+ /* trigger command */
+ dsi_ctrl->hw.ops.trigger_command_dma(&dsi_ctrl->hw);
+
+ ret = wait_for_completion_timeout(
+ &dsi_ctrl->irq_info.cmd_dma_done,
+ msecs_to_jiffies(DSI_CTRL_TX_TO_MS));
+
+ if (ret == 0) {
status = dsi_ctrl->hw.ops.get_interrupt_status(
&dsi_ctrl->hw);
- status &= mask;
- retry--;
- dsi_ctrl->hw.ops.clear_interrupt_status(&dsi_ctrl->hw,
+ if (status & mask) {
+ status |= (DSI_CMD_MODE_DMA_DONE |
+ DSI_BTA_DONE);
+ dsi_ctrl->hw.ops.clear_interrupt_status(
+ &dsi_ctrl->hw,
status);
+ dsi_ctrl_disable_status_interrupt(dsi_ctrl,
+ DSI_SINT_CMD_MODE_DMA_DONE);
+ complete_all(&dsi_ctrl->irq_info.cmd_dma_done);
+ pr_warn("dma_tx done but irq not triggered\n");
+ } else {
+ rc = -ETIMEDOUT;
+ dsi_ctrl_disable_status_interrupt(dsi_ctrl,
+ DSI_SINT_CMD_MODE_DMA_DONE);
+ pr_err("[DSI_%d]Command transfer failed\n",
+ dsi_ctrl->cell_index);
+ }
}
- pr_debug("INT STATUS = %x, retry = %d\n", status, retry);
- if (retry == 0)
- pr_err("[DSI_%d]Command transfer failed\n",
- dsi_ctrl->cell_index);
}
mutex_unlock(&dsi_ctrl->ctrl_lock);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
index 7f36fde..ec535ce11 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
@@ -138,33 +138,26 @@
/**
* struct dsi_ctrl_interrupts - define interrupt information
- * @irq: IRQ id for the DSI controller.
- * @intr_lock: Spinlock to protect access to interrupt registers.
- * @interrupt_status: Status interrupts which need to be serviced.
- * @error_status: Error interurpts which need to be serviced.
- * @interrupts_enabled: Status interrupts which are enabled.
- * @errors_enabled: Error interrupts which are enabled.
+ * @irq_lock: Spinlock for ISR handler.
+ * @irq_num: Linux interrupt number associated with device.
+ * @irq_stat_mask: Hardware mask of currently enabled interrupts.
+ * @irq_stat_refcount: Number of times each interrupt has been requested.
+ * @irq_stat_cb: Status IRQ callback definitions.
* @cmd_dma_done: Completion signal for DSI_CMD_MODE_DMA_DONE interrupt
* @vid_frame_done: Completion signal for DSI_VIDEO_MODE_FRAME_DONE int.
* @cmd_frame_done: Completion signal for DSI_CMD_FRAME_DONE interrupt.
- * @interrupt_done_work: Work item for servicing status interrupts.
- * @error_status_work: Work item for servicing error interrupts.
*/
struct dsi_ctrl_interrupts {
- u32 irq;
- spinlock_t intr_lock; /* protects access to interrupt registers */
- u32 interrupt_status;
- u64 error_status;
-
- u32 interrupts_enabled;
- u64 errors_enabled;
+ spinlock_t irq_lock;
+ int irq_num;
+ uint32_t irq_stat_mask;
+ int irq_stat_refcount[DSI_STATUS_INTERRUPT_COUNT];
+ struct dsi_event_cb_info irq_stat_cb[DSI_STATUS_INTERRUPT_COUNT];
struct completion cmd_dma_done;
struct completion vid_frame_done;
struct completion cmd_frame_done;
-
- struct work_struct interrupt_done_work;
- struct work_struct error_status_work;
+ struct completion bta_done;
};
/**
@@ -180,7 +173,7 @@
* @hw: DSI controller hardware object.
* @current_state: Current driver and hardware state.
* @clk_cb: Callback for DSI clock control.
- * @int_info: Interrupt information.
+ * @irq_info: Interrupt information.
* @clk_info: Clock information.
* @clk_freq: DSi Link clock frequency information.
* @pwr_info: Power information.
@@ -212,7 +205,8 @@
struct dsi_ctrl_state_info current_state;
struct clk_ctrl_cb clk_cb;
- struct dsi_ctrl_interrupts int_info;
+ struct dsi_ctrl_interrupts irq_info;
+
/* Clock and power states */
struct dsi_ctrl_clk_info clk_info;
struct link_clk_freq clk_freq;
@@ -560,6 +554,23 @@
struct dsi_clk_link_set *source_clks);
/**
+ * dsi_ctrl_enable_status_interrupt() - enable status interrupts
+ * @dsi_ctrl: DSI controller handle.
+ * @intr_idx: Index interrupt to disable.
+ * @event_info: Pointer to event callback definition
+ */
+void dsi_ctrl_enable_status_interrupt(struct dsi_ctrl *dsi_ctrl,
+ uint32_t intr_idx, struct dsi_event_cb_info *event_info);
+
+/**
+ * dsi_ctrl_disable_status_interrupt() - disable status interrupts
+ * @dsi_ctrl: DSI controller handle.
+ * @intr_idx: Index interrupt to disable.
+ */
+void dsi_ctrl_disable_status_interrupt(
+ struct dsi_ctrl *dsi_ctrl, uint32_t intr_idx);
+
+/**
* dsi_ctrl_drv_register() - register platform driver for dsi controller
*/
void dsi_ctrl_drv_register(void);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
index bb72807..74be279 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
@@ -9,7 +9,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
*/
#ifndef _DSI_CTRL_HW_H_
@@ -84,6 +83,36 @@
};
/**
+ * enum dsi_status_int_index - index of interrupts generated by DSI controller
+ * @DSI_SINT_CMD_MODE_DMA_DONE: Command mode DMA packets are sent out.
+ * @DSI_SINT_CMD_STREAM0_FRAME_DONE: A frame of cmd mode stream0 is sent out.
+ * @DSI_SINT_CMD_STREAM1_FRAME_DONE: A frame of cmd mode stream1 is sent out.
+ * @DSI_SINT_CMD_STREAM2_FRAME_DONE: A frame of cmd mode stream2 is sent out.
+ * @DSI_SINT_VIDEO_MODE_FRAME_DONE: A frame of video mode stream is sent out.
+ * @DSI_SINT_BTA_DONE: A BTA is completed.
+ * @DSI_SINT_CMD_FRAME_DONE: A frame of selected cmd mode stream is
+ * sent out by MDP.
+ * @DSI_SINT_DYN_REFRESH_DONE: The dynamic refresh operation completed.
+ * @DSI_SINT_DESKEW_DONE: The deskew calibration operation done.
+ * @DSI_SINT_DYN_BLANK_DMA_DONE: The dynamic blankin DMA operation has
+ * completed.
+ */
+enum dsi_status_int_index {
+ DSI_SINT_CMD_MODE_DMA_DONE = 0,
+ DSI_SINT_CMD_STREAM0_FRAME_DONE = 1,
+ DSI_SINT_CMD_STREAM1_FRAME_DONE = 2,
+ DSI_SINT_CMD_STREAM2_FRAME_DONE = 3,
+ DSI_SINT_VIDEO_MODE_FRAME_DONE = 4,
+ DSI_SINT_BTA_DONE = 5,
+ DSI_SINT_CMD_FRAME_DONE = 6,
+ DSI_SINT_DYN_REFRESH_DONE = 7,
+ DSI_SINT_DESKEW_DONE = 8,
+ DSI_SINT_DYN_BLANK_DMA_DONE = 9,
+
+ DSI_STATUS_INTERRUPT_COUNT
+};
+
+/**
* enum dsi_status_int_type - status interrupts generated by DSI controller
* @DSI_CMD_MODE_DMA_DONE: Command mode DMA packets are sent out.
* @DSI_CMD_STREAM0_FRAME_DONE: A frame of command mode stream0 is sent out.
@@ -99,16 +128,89 @@
* completed.
*/
enum dsi_status_int_type {
- DSI_CMD_MODE_DMA_DONE = BIT(0),
- DSI_CMD_STREAM0_FRAME_DONE = BIT(1),
- DSI_CMD_STREAM1_FRAME_DONE = BIT(2),
- DSI_CMD_STREAM2_FRAME_DONE = BIT(3),
- DSI_VIDEO_MODE_FRAME_DONE = BIT(4),
- DSI_BTA_DONE = BIT(5),
- DSI_CMD_FRAME_DONE = BIT(6),
- DSI_DYN_REFRESH_DONE = BIT(7),
- DSI_DESKEW_DONE = BIT(8),
- DSI_DYN_BLANK_DMA_DONE = BIT(9)
+ DSI_CMD_MODE_DMA_DONE = BIT(DSI_SINT_CMD_MODE_DMA_DONE),
+ DSI_CMD_STREAM0_FRAME_DONE = BIT(DSI_SINT_CMD_STREAM0_FRAME_DONE),
+ DSI_CMD_STREAM1_FRAME_DONE = BIT(DSI_SINT_CMD_STREAM1_FRAME_DONE),
+ DSI_CMD_STREAM2_FRAME_DONE = BIT(DSI_SINT_CMD_STREAM2_FRAME_DONE),
+ DSI_VIDEO_MODE_FRAME_DONE = BIT(DSI_SINT_VIDEO_MODE_FRAME_DONE),
+ DSI_BTA_DONE = BIT(DSI_SINT_BTA_DONE),
+ DSI_CMD_FRAME_DONE = BIT(DSI_SINT_CMD_FRAME_DONE),
+ DSI_DYN_REFRESH_DONE = BIT(DSI_SINT_DYN_REFRESH_DONE),
+ DSI_DESKEW_DONE = BIT(DSI_SINT_DESKEW_DONE),
+ DSI_DYN_BLANK_DMA_DONE = BIT(DSI_SINT_DYN_BLANK_DMA_DONE)
+};
+
+/**
+ * enum dsi_error_int_index - index of error interrupts from DSI controller
+ * @DSI_EINT_RDBK_SINGLE_ECC_ERR: Single bit ECC error in read packet.
+ * @DSI_EINT_RDBK_MULTI_ECC_ERR: Multi bit ECC error in read packet.
+ * @DSI_EINT_RDBK_CRC_ERR: CRC error in read packet.
+ * @DSI_EINT_RDBK_INCOMPLETE_PKT: Incomplete read packet.
+ * @DSI_EINT_PERIPH_ERROR_PKT: Error packet returned from peripheral,
+ * @DSI_EINT_LP_RX_TIMEOUT: Low power reverse transmission timeout.
+ * @DSI_EINT_HS_TX_TIMEOUT: High speed fwd transmission timeout.
+ * @DSI_EINT_BTA_TIMEOUT: BTA timeout.
+ * @DSI_EINT_PLL_UNLOCK: PLL has unlocked.
+ * @DSI_EINT_DLN0_ESC_ENTRY_ERR: Incorrect LP Rx escape entry.
+ * @DSI_EINT_DLN0_ESC_SYNC_ERR: LP Rx data is not byte aligned.
+ * @DSI_EINT_DLN0_LP_CONTROL_ERR: Incorrect LP Rx state sequence.
+ * @DSI_EINT_PENDING_HS_TX_TIMEOUT: Pending High-speed transfer timeout.
+ * @DSI_EINT_INTERLEAVE_OP_CONTENTION: Interleave operation contention.
+ * @DSI_EINT_CMD_DMA_FIFO_UNDERFLOW: Command mode DMA FIFO underflow.
+ * @DSI_EINT_CMD_MDP_FIFO_UNDERFLOW: Command MDP FIFO underflow (failed to
+ * receive one complete line from MDP).
+ * @DSI_EINT_DLN0_HS_FIFO_OVERFLOW: High speed FIFO data lane 0 overflows.
+ * @DSI_EINT_DLN1_HS_FIFO_OVERFLOW: High speed FIFO data lane 1 overflows.
+ * @DSI_EINT_DLN2_HS_FIFO_OVERFLOW: High speed FIFO data lane 2 overflows.
+ * @DSI_EINT_DLN3_HS_FIFO_OVERFLOW: High speed FIFO data lane 3 overflows.
+ * @DSI_EINT_DLN0_HS_FIFO_UNDERFLOW: High speed FIFO data lane 0 underflows.
+ * @DSI_EINT_DLN1_HS_FIFO_UNDERFLOW: High speed FIFO data lane 1 underflows.
+ * @DSI_EINT_DLN2_HS_FIFO_UNDERFLOW: High speed FIFO data lane 2 underflows.
+ * @DSI_EINT_DLN3_HS_FIFO_UNDERFLOW: High speed FIFO data lane 3 undeflows.
+ * @DSI_EINT_DLN0_LP0_CONTENTION: PHY level contention while lane 0 low.
+ * @DSI_EINT_DLN1_LP0_CONTENTION: PHY level contention while lane 1 low.
+ * @DSI_EINT_DLN2_LP0_CONTENTION: PHY level contention while lane 2 low.
+ * @DSI_EINT_DLN3_LP0_CONTENTION: PHY level contention while lane 3 low.
+ * @DSI_EINT_DLN0_LP1_CONTENTION: PHY level contention while lane 0 high.
+ * @DSI_EINT_DLN1_LP1_CONTENTION: PHY level contention while lane 1 high.
+ * @DSI_EINT_DLN2_LP1_CONTENTION: PHY level contention while lane 2 high.
+ * @DSI_EINT_DLN3_LP1_CONTENTION: PHY level contention while lane 3 high.
+ */
+enum dsi_error_int_index {
+ DSI_EINT_RDBK_SINGLE_ECC_ERR = 0,
+ DSI_EINT_RDBK_MULTI_ECC_ERR = 1,
+ DSI_EINT_RDBK_CRC_ERR = 2,
+ DSI_EINT_RDBK_INCOMPLETE_PKT = 3,
+ DSI_EINT_PERIPH_ERROR_PKT = 4,
+ DSI_EINT_LP_RX_TIMEOUT = 5,
+ DSI_EINT_HS_TX_TIMEOUT = 6,
+ DSI_EINT_BTA_TIMEOUT = 7,
+ DSI_EINT_PLL_UNLOCK = 8,
+ DSI_EINT_DLN0_ESC_ENTRY_ERR = 9,
+ DSI_EINT_DLN0_ESC_SYNC_ERR = 10,
+ DSI_EINT_DLN0_LP_CONTROL_ERR = 11,
+ DSI_EINT_PENDING_HS_TX_TIMEOUT = 12,
+ DSI_EINT_INTERLEAVE_OP_CONTENTION = 13,
+ DSI_EINT_CMD_DMA_FIFO_UNDERFLOW = 14,
+ DSI_EINT_CMD_MDP_FIFO_UNDERFLOW = 15,
+ DSI_EINT_DLN0_HS_FIFO_OVERFLOW = 16,
+ DSI_EINT_DLN1_HS_FIFO_OVERFLOW = 17,
+ DSI_EINT_DLN2_HS_FIFO_OVERFLOW = 18,
+ DSI_EINT_DLN3_HS_FIFO_OVERFLOW = 19,
+ DSI_EINT_DLN0_HS_FIFO_UNDERFLOW = 20,
+ DSI_EINT_DLN1_HS_FIFO_UNDERFLOW = 21,
+ DSI_EINT_DLN2_HS_FIFO_UNDERFLOW = 22,
+ DSI_EINT_DLN3_HS_FIFO_UNDERFLOW = 23,
+ DSI_EINT_DLN0_LP0_CONTENTION = 24,
+ DSI_EINT_DLN1_LP0_CONTENTION = 25,
+ DSI_EINT_DLN2_LP0_CONTENTION = 26,
+ DSI_EINT_DLN3_LP0_CONTENTION = 27,
+ DSI_EINT_DLN0_LP1_CONTENTION = 28,
+ DSI_EINT_DLN1_LP1_CONTENTION = 29,
+ DSI_EINT_DLN2_LP1_CONTENTION = 30,
+ DSI_EINT_DLN3_LP1_CONTENTION = 31,
+
+ DSI_ERROR_INTERRUPT_COUNT
};
/**
@@ -148,38 +250,38 @@
* @DSI_DLN3_LP1_CONTENTION: PHY level contention while lane 3 is high.
*/
enum dsi_error_int_type {
- DSI_RDBK_SINGLE_ECC_ERR = BIT(0),
- DSI_RDBK_MULTI_ECC_ERR = BIT(1),
- DSI_RDBK_CRC_ERR = BIT(2),
- DSI_RDBK_INCOMPLETE_PKT = BIT(3),
- DSI_PERIPH_ERROR_PKT = BIT(4),
- DSI_LP_RX_TIMEOUT = BIT(5),
- DSI_HS_TX_TIMEOUT = BIT(6),
- DSI_BTA_TIMEOUT = BIT(7),
- DSI_PLL_UNLOCK = BIT(8),
- DSI_DLN0_ESC_ENTRY_ERR = BIT(9),
- DSI_DLN0_ESC_SYNC_ERR = BIT(10),
- DSI_DLN0_LP_CONTROL_ERR = BIT(11),
- DSI_PENDING_HS_TX_TIMEOUT = BIT(12),
- DSI_INTERLEAVE_OP_CONTENTION = BIT(13),
- DSI_CMD_DMA_FIFO_UNDERFLOW = BIT(14),
- DSI_CMD_MDP_FIFO_UNDERFLOW = BIT(15),
- DSI_DLN0_HS_FIFO_OVERFLOW = BIT(16),
- DSI_DLN1_HS_FIFO_OVERFLOW = BIT(17),
- DSI_DLN2_HS_FIFO_OVERFLOW = BIT(18),
- DSI_DLN3_HS_FIFO_OVERFLOW = BIT(19),
- DSI_DLN0_HS_FIFO_UNDERFLOW = BIT(20),
- DSI_DLN1_HS_FIFO_UNDERFLOW = BIT(21),
- DSI_DLN2_HS_FIFO_UNDERFLOW = BIT(22),
- DSI_DLN3_HS_FIFO_UNDERFLOW = BIT(23),
- DSI_DLN0_LP0_CONTENTION = BIT(24),
- DSI_DLN1_LP0_CONTENTION = BIT(25),
- DSI_DLN2_LP0_CONTENTION = BIT(26),
- DSI_DLN3_LP0_CONTENTION = BIT(27),
- DSI_DLN0_LP1_CONTENTION = BIT(28),
- DSI_DLN1_LP1_CONTENTION = BIT(29),
- DSI_DLN2_LP1_CONTENTION = BIT(30),
- DSI_DLN3_LP1_CONTENTION = BIT(31),
+ DSI_RDBK_SINGLE_ECC_ERR = BIT(DSI_EINT_RDBK_SINGLE_ECC_ERR),
+ DSI_RDBK_MULTI_ECC_ERR = BIT(DSI_EINT_RDBK_MULTI_ECC_ERR),
+ DSI_RDBK_CRC_ERR = BIT(DSI_EINT_RDBK_CRC_ERR),
+ DSI_RDBK_INCOMPLETE_PKT = BIT(DSI_EINT_RDBK_INCOMPLETE_PKT),
+ DSI_PERIPH_ERROR_PKT = BIT(DSI_EINT_PERIPH_ERROR_PKT),
+ DSI_LP_RX_TIMEOUT = BIT(DSI_EINT_LP_RX_TIMEOUT),
+ DSI_HS_TX_TIMEOUT = BIT(DSI_EINT_HS_TX_TIMEOUT),
+ DSI_BTA_TIMEOUT = BIT(DSI_EINT_BTA_TIMEOUT),
+ DSI_PLL_UNLOCK = BIT(DSI_EINT_PLL_UNLOCK),
+ DSI_DLN0_ESC_ENTRY_ERR = BIT(DSI_EINT_DLN0_ESC_ENTRY_ERR),
+ DSI_DLN0_ESC_SYNC_ERR = BIT(DSI_EINT_DLN0_ESC_SYNC_ERR),
+ DSI_DLN0_LP_CONTROL_ERR = BIT(DSI_EINT_DLN0_LP_CONTROL_ERR),
+ DSI_PENDING_HS_TX_TIMEOUT = BIT(DSI_EINT_PENDING_HS_TX_TIMEOUT),
+ DSI_INTERLEAVE_OP_CONTENTION = BIT(DSI_EINT_INTERLEAVE_OP_CONTENTION),
+ DSI_CMD_DMA_FIFO_UNDERFLOW = BIT(DSI_EINT_CMD_DMA_FIFO_UNDERFLOW),
+ DSI_CMD_MDP_FIFO_UNDERFLOW = BIT(DSI_EINT_CMD_MDP_FIFO_UNDERFLOW),
+ DSI_DLN0_HS_FIFO_OVERFLOW = BIT(DSI_EINT_DLN0_HS_FIFO_OVERFLOW),
+ DSI_DLN1_HS_FIFO_OVERFLOW = BIT(DSI_EINT_DLN1_HS_FIFO_OVERFLOW),
+ DSI_DLN2_HS_FIFO_OVERFLOW = BIT(DSI_EINT_DLN2_HS_FIFO_OVERFLOW),
+ DSI_DLN3_HS_FIFO_OVERFLOW = BIT(DSI_EINT_DLN3_HS_FIFO_OVERFLOW),
+ DSI_DLN0_HS_FIFO_UNDERFLOW = BIT(DSI_EINT_DLN0_HS_FIFO_UNDERFLOW),
+ DSI_DLN1_HS_FIFO_UNDERFLOW = BIT(DSI_EINT_DLN1_HS_FIFO_UNDERFLOW),
+ DSI_DLN2_HS_FIFO_UNDERFLOW = BIT(DSI_EINT_DLN2_HS_FIFO_UNDERFLOW),
+ DSI_DLN3_HS_FIFO_UNDERFLOW = BIT(DSI_EINT_DLN3_HS_FIFO_UNDERFLOW),
+ DSI_DLN0_LP0_CONTENTION = BIT(DSI_EINT_DLN0_LP0_CONTENTION),
+ DSI_DLN1_LP0_CONTENTION = BIT(DSI_EINT_DLN1_LP0_CONTENTION),
+ DSI_DLN2_LP0_CONTENTION = BIT(DSI_EINT_DLN2_LP0_CONTENTION),
+ DSI_DLN3_LP0_CONTENTION = BIT(DSI_EINT_DLN3_LP0_CONTENTION),
+ DSI_DLN0_LP1_CONTENTION = BIT(DSI_EINT_DLN0_LP1_CONTENTION),
+ DSI_DLN1_LP1_CONTENTION = BIT(DSI_EINT_DLN1_LP1_CONTENTION),
+ DSI_DLN2_LP1_CONTENTION = BIT(DSI_EINT_DLN2_LP1_CONTENTION),
+ DSI_DLN3_LP1_CONTENTION = BIT(DSI_EINT_DLN3_LP1_CONTENTION),
};
/**
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
index a024c43..0af6f25 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
@@ -706,6 +706,8 @@
{
u32 reg = 0;
+ reg = DSI_R32(ctrl, DSI_INT_CTRL);
+
if (ints & DSI_CMD_MODE_DMA_DONE)
reg |= BIT(0);
if (ints & DSI_CMD_FRAME_DONE)
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h b/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
index 77da9b4..1e6727b 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
@@ -9,7 +9,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
*/
#ifndef _DSI_DEFS_H_
@@ -447,5 +446,14 @@
r1->h == r2->h;
}
+struct dsi_event_cb_info {
+ uint32_t event_idx;
+ void *event_usr_ptr;
+
+ int (*event_cb)(void *event_usr_ptr,
+ uint32_t event_idx, uint32_t instance_idx,
+ uint32_t data0, uint32_t data1,
+ uint32_t data2, uint32_t data3);
+};
#endif /* _DSI_DEFS_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index 3dd4950..52b1dcb 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -19,6 +19,7 @@
#include <linux/err.h>
#include "msm_drv.h"
+#include "sde_connector.h"
#include "dsi_display.h"
#include "dsi_panel.h"
#include "dsi_ctrl.h"
@@ -499,7 +500,45 @@
return 0;
}
+void dsi_display_enable_event(struct dsi_display *display,
+ uint32_t event_idx, struct dsi_event_cb_info *event_info,
+ bool enable)
+{
+ uint32_t irq_status_idx = DSI_STATUS_INTERRUPT_COUNT;
+ int i;
+ if (!display) {
+ pr_err("invalid display\n");
+ return;
+ }
+
+ if (event_info)
+ event_info->event_idx = event_idx;
+
+ switch (event_idx) {
+ case SDE_CONN_EVENT_VID_DONE:
+ irq_status_idx = DSI_SINT_VIDEO_MODE_FRAME_DONE;
+ break;
+ case SDE_CONN_EVENT_CMD_DONE:
+ irq_status_idx = DSI_SINT_CMD_FRAME_DONE;
+ break;
+ default:
+ /* nothing to do */
+ pr_debug("[%s] unhandled event %d\n", display->name, event_idx);
+ return;
+ }
+
+ if (enable) {
+ for (i = 0; i < display->ctrl_count; i++)
+ dsi_ctrl_enable_status_interrupt(
+ display->ctrl[i].ctrl, irq_status_idx,
+ event_info);
+ } else {
+ for (i = 0; i < display->ctrl_count; i++)
+ dsi_ctrl_disable_status_interrupt(
+ display->ctrl[i].ctrl, irq_status_idx);
+ }
+}
static int dsi_display_ctrl_power_on(struct dsi_display *display)
{
@@ -1215,8 +1254,7 @@
goto error;
}
- rc = dsi_ctrl_cmd_tx_trigger(ctrl->ctrl,
- DSI_CTRL_CMD_BROADCAST);
+ rc = dsi_ctrl_cmd_tx_trigger(ctrl->ctrl, flags);
if (rc) {
pr_err("[%s] cmd trigger failed, rc=%d\n",
display->name, rc);
@@ -1224,9 +1262,7 @@
}
}
- rc = dsi_ctrl_cmd_tx_trigger(m_ctrl->ctrl,
- (DSI_CTRL_CMD_BROADCAST_MASTER |
- DSI_CTRL_CMD_BROADCAST));
+ rc = dsi_ctrl_cmd_tx_trigger(m_ctrl->ctrl, m_flags);
if (rc) {
pr_err("[%s] cmd trigger failed for master, rc=%d\n",
display->name, rc);
@@ -2780,6 +2816,9 @@
(void)_dsi_display_dev_deinit(main_display);
component_del(&main_display->pdev->dev,
&dsi_display_comp_ops);
+ mutex_lock(&dsi_display_list_lock);
+ list_del(&main_display->list);
+ mutex_unlock(&dsi_display_list_lock);
comp_add_success = false;
default_active_node = NULL;
pr_debug("removed the existing comp ops\n");
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
index 9aa3113..38af37b 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
@@ -444,6 +444,17 @@
int dsi_display_clock_gate(struct dsi_display *display, bool enable);
int dsi_dispaly_static_frame(struct dsi_display *display, bool enable);
+/**
+ * dsi_display_enable_event() - enable interrupt based connector event
+ * @display: Handle to display.
+ * @event_idx: Event index.
+ * @event_info: Event callback definition.
+ * @enable: Whether to enable/disable the event interrupt.
+ */
+void dsi_display_enable_event(struct dsi_display *display,
+ uint32_t event_idx, struct dsi_event_cb_info *event_info,
+ bool enable);
+
int dsi_display_set_backlight(void *display, u32 bl_lvl);
/**
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
index 4e09cfb..b499bd6 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
@@ -546,6 +546,19 @@
return dsi_display_pre_kickoff(display, params);
}
+void dsi_conn_enable_event(struct drm_connector *connector,
+ uint32_t event_idx, bool enable, void *display)
+{
+ struct dsi_event_cb_info event_info;
+
+ memset(&event_info, 0, sizeof(event_info));
+
+ event_info.event_cb = sde_connector_trigger_event;
+ event_info.event_usr_ptr = connector;
+
+ dsi_display_enable_event(display, event_idx, &event_info, enable);
+}
+
struct dsi_bridge *dsi_drm_bridge_init(struct dsi_display *display,
struct drm_device *dev,
struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h
index 68520a8..45feec9 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h
@@ -85,6 +85,16 @@
struct drm_display_mode *mode,
void *display);
+/**
+ * dsi_conn_enable_event - callback to notify DSI driver of event registeration
+ * @connector: Pointer to drm connector structure
+ * @event_idx: Connector event index
+ * @enable: Whether or not the event is enabled
+ * @display: Pointer to private display handle
+ */
+void dsi_conn_enable_event(struct drm_connector *connector,
+ uint32_t event_idx, bool enable, void *display);
+
struct dsi_bridge *dsi_drm_bridge_init(struct dsi_display *display,
struct drm_device *dev,
struct drm_encoder *encoder);
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index 6551257..4ede271 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -546,7 +546,7 @@
return rc;
c_state->rois.roi[i] = roi_v1.roi[i];
- SDE_DEBUG_CONN(c_conn, "roi%d: roi 0x%x 0x%x 0x%x 0x%x\n", i,
+ SDE_DEBUG_CONN(c_conn, "roi%d: roi (%d,%d) (%d,%d)\n", i,
c_state->rois.roi[i].x1,
c_state->rois.roi[i].y1,
c_state->rois.roi[i].x2,
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index 30bb72b..0f9d739 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -2634,7 +2634,7 @@
for (i = curr_cnt; i < cnt; i++) {
pstate = pstates[i].drm_pstate;
POPULATE_RECT(&dst_rect, pstate->crtc_x, pstate->crtc_y,
- pstate->crtc_w, pstate->crtc_h, true);
+ pstate->crtc_w, pstate->crtc_h, false);
sde_kms_rect_intersect(&dst_rect, excl_rect, &intersect);
if (intersect.w == excl_rect->w && intersect.h == excl_rect->h
@@ -2805,8 +2805,10 @@
sde_plane_clear_multirect(pipe_staged[i]);
if (is_sde_plane_virtual(pipe_staged[i]->plane)) {
- SDE_ERROR("invalid use of virtual plane: %d\n",
+ SDE_ERROR(
+ "r1 only virt plane:%d not supported\n",
pipe_staged[i]->plane->base.id);
+ rc = -EINVAL;
goto end;
}
}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ad4.c b/drivers/gpu/drm/msm/sde/sde_hw_ad4.c
index b02cc06..35fc2b5 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ad4.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ad4.c
@@ -202,7 +202,7 @@
} else {
info[dspp->idx].state = ad4_state_run;
SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
- 0);
+ 0x100);
}
return 0;
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 966a988..d0ae889 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -771,33 +771,47 @@
mutex_lock(&drvdata->mem_lock);
- /*
- * ETR DDR memory is not allocated until user enables
- * tmc at least once. If user specifies different ETR
- * DDR size than the default size or switches between
- * contiguous or scatter-gather memory type after
- * enabling tmc; the new selection will be honored from
- * next tmc enable session.
- */
- if (drvdata->size != drvdata->mem_size ||
- drvdata->memtype != drvdata->mem_type) {
- tmc_etr_free_mem(drvdata);
- drvdata->size = drvdata->mem_size;
- drvdata->memtype = drvdata->mem_type;
- }
- ret = tmc_etr_alloc_mem(drvdata);
- if (ret) {
- pm_runtime_put(drvdata->dev);
- mutex_unlock(&drvdata->mem_lock);
- return ret;
- }
- mutex_unlock(&drvdata->mem_lock);
-
spin_lock_irqsave(&drvdata->spinlock, flags);
if (drvdata->reading) {
ret = -EBUSY;
- goto out;
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ mutex_unlock(&drvdata->mem_lock);
+ return ret;
}
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+ if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM) {
+ /*
+ * ETR DDR memory is not allocated until user enables
+ * tmc at least once. If user specifies different ETR
+ * DDR size than the default size or switches between
+ * contiguous or scatter-gather memory type after
+ * enabling tmc; the new selection will be honored from
+ * next tmc enable session.
+ */
+ if (drvdata->size != drvdata->mem_size ||
+ drvdata->memtype != drvdata->mem_type) {
+ tmc_etr_free_mem(drvdata);
+ drvdata->size = drvdata->mem_size;
+ drvdata->memtype = drvdata->mem_type;
+ }
+ ret = tmc_etr_alloc_mem(drvdata);
+ if (ret) {
+ mutex_unlock(&drvdata->mem_lock);
+ return ret;
+ }
+ } else {
+ drvdata->usbch = usb_qdss_open("qdss", drvdata,
+ usb_notifier);
+ if (IS_ERR_OR_NULL(drvdata->usbch)) {
+ dev_err(drvdata->dev, "usb_qdss_open failed\n");
+ ret = PTR_ERR(drvdata->usbch);
+ mutex_unlock(&drvdata->mem_lock);
+ return ret;
+ }
+ }
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
val = local_xchg(&drvdata->mode, mode);
/*
@@ -808,9 +822,14 @@
if (val == CS_MODE_SYSFS)
goto out;
- tmc_etr_enable_hw(drvdata);
+ if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM)
+ tmc_etr_enable_hw(drvdata);
+
+ drvdata->enable = true;
+ drvdata->sticky_enable = true;
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ mutex_unlock(&drvdata->mem_lock);
if (!ret)
dev_info(drvdata->dev, "TMC-ETR enabled\n");
@@ -880,8 +899,15 @@
val = local_xchg(&drvdata->mode, CS_MODE_DISABLED);
/* Disable the TMC only if it needs to */
- if (val != CS_MODE_DISABLED)
- tmc_etr_disable_hw(drvdata);
+ if (val != CS_MODE_DISABLED) {
+ if (drvdata->out_mode == TMC_ETR_OUT_MODE_USB) {
+ __tmc_etr_disable_to_bam(drvdata);
+ tmc_etr_bam_disable(drvdata);
+ usb_qdss_close(drvdata->usbch);
+ } else {
+ tmc_etr_disable_hw(drvdata);
+ }
+ }
spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -913,6 +939,11 @@
goto out;
}
+ if (drvdata->out_mode == TMC_ETR_OUT_MODE_USB) {
+ ret = -EINVAL;
+ goto out;
+ }
+
val = local_read(&drvdata->mode);
/* Don't interfere if operated from Perf */
if (val == CS_MODE_PERF) {
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index 077cb45..012c56e 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -540,6 +540,7 @@
drvdata->memtype = TMC_ETR_MEM_TYPE_CONTIG;
drvdata->mem_size = drvdata->size;
drvdata->mem_type = drvdata->memtype;
+ drvdata->out_mode = TMC_ETR_OUT_MODE_MEM;
} else {
drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4;
}
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
index aa6c522..743d2f7 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
@@ -57,6 +57,9 @@
#define DEFAULT_MAXLINEWIDTH 4096
+/* stride alignment requirement for avoiding partial writes */
+#define PARTIAL_WRITE_ALIGNMENT 0x1F
+
/* Macro for constructing the REGDMA command */
#define SDE_REGDMA_WRITE(p, off, data) \
do { \
@@ -869,6 +872,8 @@
SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_SIZE, 0x00010001);
SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_IMG_SIZE, 0x00010001);
SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_XY, 0);
+ SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_WRITE_CONFIG,
+ (ctx->rot->highest_bank & 0x3) << 8);
SDE_REGDMA_WRITE(wrptr, ROTTOP_DNSC, 0);
SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, 1);
SDE_REGDMA_MODIFY(wrptr, REGDMA_TIMESTAMP_REG, mask, swts);
@@ -1270,7 +1275,7 @@
u32 *wrptr;
u32 pack = 0;
u32 dst_format = 0;
- u32 partial_write = 0;
+ u32 no_partial_writes = 0;
int i;
wrptr = sde_hw_rotator_get_regdma_segment(ctx);
@@ -1355,12 +1360,34 @@
(cfg->h_downscale_factor << 16));
/* partial write check */
- if (test_bit(SDE_CAPS_PARTIALWR, mdata->sde_caps_map) &&
- !sde_mdp_is_ubwc_format(fmt))
- partial_write = BIT(10);
+ if (test_bit(SDE_CAPS_PARTIALWR, mdata->sde_caps_map)) {
+ no_partial_writes = BIT(10);
+
+ /*
+ * For simplicity, don't disable partial writes if
+ * the ROI does not span the entire width of the
+ * output image, and require the total stride to
+ * also be properly aligned.
+ *
+ * This avoids having to determine the memory access
+ * alignment of the actual horizontal ROI on a per
+ * color format basis.
+ */
+ if (sde_mdp_is_ubwc_format(fmt)) {
+ no_partial_writes = 0x0;
+ } else if (cfg->dst_rect->x ||
+ cfg->dst_rect->w != cfg->img_width) {
+ no_partial_writes = 0x0;
+ } else {
+ for (i = 0; i < SDE_ROT_MAX_PLANES; i++)
+ if (cfg->dst_plane.ystride[i] &
+ PARTIAL_WRITE_ALIGNMENT)
+ no_partial_writes = 0x0;
+ }
+ }
/* write config setup for bank configuration */
- SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_WRITE_CONFIG, partial_write |
+ SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_WRITE_CONFIG, no_partial_writes |
(ctx->rot->highest_bank & 0x3) << 8);
if (test_bit(SDE_CAPS_UBWC_2, mdata->sde_caps_map))
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 1d9cf34..fea2971 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -1824,7 +1824,7 @@
}
pdata->status_gpio = of_get_named_gpio_flags(np, "cd-gpios", 0, &flags);
- if (gpio_is_valid(pdata->status_gpio) & !(flags & OF_GPIO_ACTIVE_LOW))
+ if (gpio_is_valid(pdata->status_gpio) && !(flags & OF_GPIO_ACTIVE_LOW))
pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
of_property_read_u32(np, "qcom,bus-width", &bus_width);
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c b/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
index 5aa39b6..9b3b53d 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
@@ -2046,6 +2046,8 @@
if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI)
ipa_mhi_update_host_ch_state(true);
+ return 0;
+
fail_stop_event_update_dl_channel:
ipa_mhi_resume_channels(true,
ipa_mhi_client_ctx->dl_channels);
diff --git a/drivers/soc/qcom/peripheral-loader.c b/drivers/soc/qcom/peripheral-loader.c
index cfa4ca9..91c9441 100644
--- a/drivers/soc/qcom/peripheral-loader.c
+++ b/drivers/soc/qcom/peripheral-loader.c
@@ -934,13 +934,13 @@
priv->region_start),
VMID_HLOS);
}
+ if (desc->clear_fw_region && priv->region_start)
+ pil_clear_segment(desc);
dma_free_attrs(desc->dev, priv->region_size,
priv->region, priv->region_start,
desc->attrs);
priv->region = NULL;
}
- if (desc->clear_fw_region && priv->region_start)
- pil_clear_segment(desc);
pil_release_mmap(desc);
}
return ret;
diff --git a/drivers/soc/qcom/pil-msa.c b/drivers/soc/qcom/pil-msa.c
index 4a586ac..20b9769 100644
--- a/drivers/soc/qcom/pil-msa.c
+++ b/drivers/soc/qcom/pil-msa.c
@@ -677,7 +677,15 @@
/* Load the MBA image into memory */
count = fw->size;
- memcpy(mba_dp_virt, data, count);
+ if (count <= SZ_1M) {
+ /* Ensures memcpy is done for max 1MB fw size */
+ memcpy(mba_dp_virt, data, count);
+ } else {
+ dev_err(pil->dev, "%s fw image loading into memory is failed due to fw size overflow\n",
+ __func__);
+ ret = -EINVAL;
+ goto err_mba_data;
+ }
/* Ensure memcpy of the MBA memory is done before loading the DP */
wmb();
diff --git a/drivers/thermal/qcom/qti_virtual_sensor.c b/drivers/thermal/qcom/qti_virtual_sensor.c
index 3064c74..923680a 100644
--- a/drivers/thermal/qcom/qti_virtual_sensor.c
+++ b/drivers/thermal/qcom/qti_virtual_sensor.c
@@ -29,7 +29,7 @@
.logic = VIRT_MAXIMUM,
},
{
- .virt_zone_name = "silver-virt-max-usr",
+ .virt_zone_name = "silv-virt-max-step",
.num_sensors = 4,
.sensor_names = {"cpu0-silver-usr",
"cpu1-silver-usr",
@@ -38,7 +38,7 @@
.logic = VIRT_MAXIMUM,
},
{
- .virt_zone_name = "gold-virt-max-usr",
+ .virt_zone_name = "gold-virt-max-step",
.num_sensors = 4,
.sensor_names = {"cpu0-gold-usr",
"cpu1-gold-usr",
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 4c1ccee..68d9feb 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -418,8 +418,9 @@
thermal_zone_device_set_polling(thermal_passive_wq,
tz, tz->passive_delay);
else if (tz->polling_delay)
- thermal_zone_device_set_polling(system_freezable_wq,
- tz, tz->polling_delay);
+ thermal_zone_device_set_polling(
+ system_freezable_power_efficient_wq,
+ tz, tz->polling_delay);
else
thermal_zone_device_set_polling(NULL, tz, 0);
@@ -2134,7 +2135,7 @@
/* Bind cooling devices for this zone */
bind_tz(tz);
- INIT_DELAYED_WORK(&(tz->poll_queue), thermal_zone_device_check);
+ INIT_DEFERRABLE_WORK(&(tz->poll_queue), thermal_zone_device_check);
thermal_zone_device_reset(tz);
/* Update the new thermal zone and mark it as already updated. */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 30a1b34..de1b3b7 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -8052,6 +8052,20 @@
int ret;
set_cpu_active(cpu, false);
+ /*
+ * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU
+ * users of this state to go away such that all new such users will
+ * observe it.
+ *
+ * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
+ * not imply sync_sched(), so wait for both.
+ *
+ * Do sync before park smpboot threads to take care the rcu boost case.
+ */
+ if (IS_ENABLED(CONFIG_PREEMPT))
+ synchronize_rcu_mult(call_rcu, call_rcu_sched);
+ else
+ synchronize_rcu();
if (!sched_smp_initialized)
return 0;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 45f404b..4d7c054 100755
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8430,7 +8430,8 @@
mcc->cpu = cpu;
#ifdef CONFIG_SCHED_DEBUG
raw_spin_unlock_irqrestore(&mcc->lock, flags);
- pr_info("CPU%d: update max cpu_capacity %lu\n", cpu, capacity);
+ printk_deferred(KERN_INFO "CPU%d: update max cpu_capacity %lu\n",
+ cpu, capacity);
goto skip_unlock;
#endif
}
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 744cfe6c5..c2225cc 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1206,12 +1206,7 @@
qdisc_len = q->q.qlen;
if (q->ops->change(q, &req.attr))
pr_err("%s(): qdisc change failed", __func__);
- } else {
- WARN_ONCE(1, "%s(): called on queue which does %s",
- __func__, "not support change() operation");
}
- } else {
- WARN_ONCE(1, "%s(): called on bad queue", __func__);
}
return qdisc_len;
}