Merge "msm: kgsl: Check if GPU is hung when reserving space in rb" into msm-3.4
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index 4f0c261..466d65b 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -136,6 +136,7 @@
select MSM_MULTIMEDIA_USE_ION
select MSM_PM8X60 if PM
select MSM_RUN_QUEUE_STATS
+ select ARM_HAS_SG_CHAIN
config ARCH_MSM8960
bool "MSM8960"
@@ -169,6 +170,7 @@
select MSM_PM8X60 if PM
select HOLES_IN_ZONE if SPARSEMEM
select MSM_RUN_QUEUE_STATS
+ select ARM_HAS_SG_CHAIN
config ARCH_MSM8930
bool "MSM8930"
@@ -199,6 +201,7 @@
select MULTI_IRQ_HANDLER
select MSM_PM8X60 if PM
select HOLES_IN_ZONE if SPARSEMEM
+ select ARM_HAS_SG_CHAIN
config ARCH_APQ8064
bool "APQ8064"
@@ -224,6 +227,7 @@
select QCACHE
select MIGHT_HAVE_PCI
select ARCH_SUPPORTS_MSI
+ select ARM_HAS_SG_CHAIN
config ARCH_MSM8974
bool "MSM8974"
@@ -247,6 +251,7 @@
select MSM_QDSP6V2_CODECS
select MSM_AUDIO_QDSP6V2 if SND_SOC
select MSM_RPM_REGULATOR_SMD
+ select ARM_HAS_SG_CHAIN
config ARCH_FSM9XXX
bool "FSM9XXX"
@@ -278,6 +283,7 @@
select MSM_QDSP6_APR
select MSM_AUDIO_QDSP6 if SND_SOC
select FIQ
+ select ARM_HAS_SG_CHAIN
config ARCH_MSM8625
bool "MSM8625"
diff --git a/arch/arm/mach-msm/clock-7x30.c b/arch/arm/mach-msm/clock-7x30.c
index 225ea2b..f3ac7d7 100644
--- a/arch/arm/mach-msm/clock-7x30.c
+++ b/arch/arm/mach-msm/clock-7x30.c
@@ -2328,60 +2328,53 @@
},
};
-static DEFINE_CLK_PCOM(adsp_clk, ADSP_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(codec_ssbi_clk, CODEC_SSBI_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(ebi1_clk, EBI1_CLK, CLKFLAG_SKIP_AUTO_OFF | CLKFLAG_MIN);
-static DEFINE_CLK_PCOM(ebi1_fixed_clk, EBI1_FIXED_CLK, CLKFLAG_MIN |
- CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(ecodec_clk, ECODEC_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(gp_clk, GP_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(adsp_clk, ADSP_CLK, 0);
+static DEFINE_CLK_PCOM(codec_ssbi_clk, CODEC_SSBI_CLK, 0);
+static DEFINE_CLK_PCOM(ebi1_clk, EBI1_CLK, CLKFLAG_MIN);
+static DEFINE_CLK_PCOM(ebi1_fixed_clk, EBI1_FIXED_CLK, CLKFLAG_MIN);
+static DEFINE_CLK_PCOM(ecodec_clk, ECODEC_CLK, 0);
+static DEFINE_CLK_PCOM(gp_clk, GP_CLK, 0);
static DEFINE_CLK_PCOM(uart3_clk, UART3_CLK, 0);
static DEFINE_CLK_PCOM(usb_phy_clk, USB_PHY_CLK, CLKFLAG_MIN);
-static DEFINE_CLK_PCOM(p_grp_2d_clk, GRP_2D_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_grp_2d_p_clk, GRP_2D_P_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_hdmi_clk, HDMI_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(p_grp_2d_clk, GRP_2D_CLK, 0);
+static DEFINE_CLK_PCOM(p_grp_2d_p_clk, GRP_2D_P_CLK, 0);
+static DEFINE_CLK_PCOM(p_hdmi_clk, HDMI_CLK, 0);
static DEFINE_CLK_PCOM(p_jpeg_clk, JPEG_CLK, CLKFLAG_MIN);
static DEFINE_CLK_PCOM(p_jpeg_p_clk, JPEG_P_CLK, 0);
-static DEFINE_CLK_PCOM(p_lpa_codec_clk, LPA_CODEC_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_lpa_core_clk, LPA_CORE_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_lpa_p_clk, LPA_P_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_mi2s_m_clk, MI2S_M_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_mi2s_s_clk, MI2S_S_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_mi2s_codec_rx_m_clk, MI2S_CODEC_RX_M_CLK,
- CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_mi2s_codec_rx_s_clk, MI2S_CODEC_RX_S_CLK,
- CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_mi2s_codec_tx_m_clk, MI2S_CODEC_TX_M_CLK,
- CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_mi2s_codec_tx_s_clk, MI2S_CODEC_TX_S_CLK,
- CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(p_lpa_codec_clk, LPA_CODEC_CLK, 0);
+static DEFINE_CLK_PCOM(p_lpa_core_clk, LPA_CORE_CLK, 0);
+static DEFINE_CLK_PCOM(p_lpa_p_clk, LPA_P_CLK, 0);
+static DEFINE_CLK_PCOM(p_mi2s_m_clk, MI2S_M_CLK, 0);
+static DEFINE_CLK_PCOM(p_mi2s_s_clk, MI2S_S_CLK, 0);
+static DEFINE_CLK_PCOM(p_mi2s_codec_rx_m_clk, MI2S_CODEC_RX_M_CLK, 0);
+static DEFINE_CLK_PCOM(p_mi2s_codec_rx_s_clk, MI2S_CODEC_RX_S_CLK, 0);
+static DEFINE_CLK_PCOM(p_mi2s_codec_tx_m_clk, MI2S_CODEC_TX_M_CLK, 0);
+static DEFINE_CLK_PCOM(p_mi2s_codec_tx_s_clk, MI2S_CODEC_TX_S_CLK, 0);
static DEFINE_CLK_PCOM(p_sdac_clk, SDAC_CLK, 0);
static DEFINE_CLK_PCOM(p_sdac_m_clk, SDAC_M_CLK, 0);
-static DEFINE_CLK_PCOM(p_vfe_clk, VFE_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_vfe_camif_clk, VFE_CAMIF_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_vfe_mdc_clk, VFE_MDC_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(p_vfe_clk, VFE_CLK, 0);
+static DEFINE_CLK_PCOM(p_vfe_camif_clk, VFE_CAMIF_CLK, 0);
+static DEFINE_CLK_PCOM(p_vfe_mdc_clk, VFE_MDC_CLK, 0);
static DEFINE_CLK_PCOM(p_vfe_p_clk, VFE_P_CLK, 0);
-static DEFINE_CLK_PCOM(p_grp_3d_clk, GRP_3D_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_grp_3d_p_clk, GRP_3D_P_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(p_grp_3d_clk, GRP_3D_CLK, 0);
+static DEFINE_CLK_PCOM(p_grp_3d_p_clk, GRP_3D_P_CLK, 0);
static DEFINE_CLK_PCOM(p_imem_clk, IMEM_CLK, 0);
-static DEFINE_CLK_PCOM(p_mdp_lcdc_pad_pclk_clk, MDP_LCDC_PAD_PCLK_CLK,
- CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_mdp_lcdc_pclk_clk, MDP_LCDC_PCLK_CLK,
- CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_mdp_p_clk, MDP_P_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(p_mdp_lcdc_pad_pclk_clk, MDP_LCDC_PAD_PCLK_CLK, 0);
+static DEFINE_CLK_PCOM(p_mdp_lcdc_pclk_clk, MDP_LCDC_PCLK_CLK, 0);
+static DEFINE_CLK_PCOM(p_mdp_p_clk, MDP_P_CLK, 0);
static DEFINE_CLK_PCOM(p_mdp_vsync_clk, MDP_VSYNC_CLK, 0);
-static DEFINE_CLK_PCOM(p_tsif_ref_clk, TSIF_REF_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_tsif_p_clk, TSIF_P_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_tv_dac_clk, TV_DAC_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_tv_enc_clk, TV_ENC_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(p_tsif_ref_clk, TSIF_REF_CLK, 0);
+static DEFINE_CLK_PCOM(p_tsif_p_clk, TSIF_P_CLK, 0);
+static DEFINE_CLK_PCOM(p_tv_dac_clk, TV_DAC_CLK, 0);
+static DEFINE_CLK_PCOM(p_tv_enc_clk, TV_ENC_CLK, 0);
static DEFINE_CLK_PCOM(p_emdh_clk, EMDH_CLK, CLKFLAG_MIN | CLKFLAG_MAX);
static DEFINE_CLK_PCOM(p_emdh_p_clk, EMDH_P_CLK, 0);
-static DEFINE_CLK_PCOM(p_i2c_clk, I2C_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_i2c_2_clk, I2C_2_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_mdc_clk, MDC_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(p_i2c_clk, I2C_CLK, 0);
+static DEFINE_CLK_PCOM(p_i2c_2_clk, I2C_2_CLK, 0);
+static DEFINE_CLK_PCOM(p_mdc_clk, MDC_CLK, 0);
static DEFINE_CLK_PCOM(p_pmdh_clk, PMDH_CLK, CLKFLAG_MIN | CLKFLAG_MAX);
-static DEFINE_CLK_PCOM(p_pmdh_p_clk, PMDH_P_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(p_pmdh_p_clk, PMDH_P_CLK, 0);
static DEFINE_CLK_PCOM(p_sdc1_clk, SDC1_CLK, 0);
static DEFINE_CLK_PCOM(p_sdc1_p_clk, SDC1_P_CLK, 0);
static DEFINE_CLK_PCOM(p_sdc2_clk, SDC2_CLK, 0);
@@ -2390,36 +2383,35 @@
static DEFINE_CLK_PCOM(p_sdc3_p_clk, SDC3_P_CLK, 0);
static DEFINE_CLK_PCOM(p_sdc4_clk, SDC4_CLK, 0);
static DEFINE_CLK_PCOM(p_sdc4_p_clk, SDC4_P_CLK, 0);
-static DEFINE_CLK_PCOM(p_uart2_clk, UART2_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(p_uart2_clk, UART2_CLK, 0);
static DEFINE_CLK_PCOM(p_usb_hs2_clk, USB_HS2_CLK, 0);
static DEFINE_CLK_PCOM(p_usb_hs2_core_clk, USB_HS2_CORE_CLK, 0);
static DEFINE_CLK_PCOM(p_usb_hs2_p_clk, USB_HS2_P_CLK, 0);
static DEFINE_CLK_PCOM(p_usb_hs3_clk, USB_HS3_CLK, 0);
static DEFINE_CLK_PCOM(p_usb_hs3_core_clk, USB_HS3_CORE_CLK, 0);
static DEFINE_CLK_PCOM(p_usb_hs3_p_clk, USB_HS3_P_CLK, 0);
-static DEFINE_CLK_PCOM(p_qup_i2c_clk, QUP_I2C_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_spi_clk, SPI_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_spi_p_clk, SPI_P_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(p_qup_i2c_clk, QUP_I2C_CLK, 0);
+static DEFINE_CLK_PCOM(p_spi_clk, SPI_CLK, 0);
+static DEFINE_CLK_PCOM(p_spi_p_clk, SPI_P_CLK, 0);
static DEFINE_CLK_PCOM(p_uart1_clk, UART1_CLK, 0);
static DEFINE_CLK_PCOM(p_uart1dm_clk, UART1DM_CLK, 0);
-static DEFINE_CLK_PCOM(p_uart2dm_clk, UART2DM_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(p_uart2dm_clk, UART2DM_CLK, 0);
static DEFINE_CLK_PCOM(p_usb_hs_clk, USB_HS_CLK, 0);
static DEFINE_CLK_PCOM(p_usb_hs_core_clk, USB_HS_CORE_CLK, 0);
static DEFINE_CLK_PCOM(p_usb_hs_p_clk, USB_HS_P_CLK, 0);
-static DEFINE_CLK_PCOM(p_cam_m_clk, CAM_M_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(p_cam_m_clk, CAM_M_CLK, 0);
static DEFINE_CLK_PCOM(p_camif_pad_p_clk, CAMIF_PAD_P_CLK, 0);
-static DEFINE_CLK_PCOM(p_csi0_clk, CSI0_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_csi0_vfe_clk, CSI0_VFE_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_csi0_p_clk, CSI0_P_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(p_csi0_clk, CSI0_CLK, 0);
+static DEFINE_CLK_PCOM(p_csi0_vfe_clk, CSI0_VFE_CLK, 0);
+static DEFINE_CLK_PCOM(p_csi0_p_clk, CSI0_P_CLK, 0);
static DEFINE_CLK_PCOM(p_mdp_clk, MDP_CLK, CLKFLAG_MIN);
-static DEFINE_CLK_PCOM(p_mfc_clk, MFC_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_mfc_div2_clk, MFC_DIV2_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_mfc_p_clk, MFC_P_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_vpe_clk, VPE_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_adm_clk, ADM_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_ce_clk, CE_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_axi_rotator_clk, AXI_ROTATOR_CLK,
- CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(p_mfc_clk, MFC_CLK, 0);
+static DEFINE_CLK_PCOM(p_mfc_div2_clk, MFC_DIV2_CLK, 0);
+static DEFINE_CLK_PCOM(p_mfc_p_clk, MFC_P_CLK, 0);
+static DEFINE_CLK_PCOM(p_vpe_clk, VPE_CLK, 0);
+static DEFINE_CLK_PCOM(p_adm_clk, ADM_CLK, 0);
+static DEFINE_CLK_PCOM(p_ce_clk, CE_CLK, 0);
+static DEFINE_CLK_PCOM(p_axi_rotator_clk, AXI_ROTATOR_CLK, 0);
static DEFINE_CLK_PCOM(p_rotator_imem_clk, ROTATOR_IMEM_CLK, 0);
static DEFINE_CLK_PCOM(p_rotator_p_clk, ROTATOR_P_CLK, 0);
diff --git a/arch/arm/mach-msm/clock-8960.c b/arch/arm/mach-msm/clock-8960.c
index fb0e168..7826498 100644
--- a/arch/arm/mach-msm/clock-8960.c
+++ b/arch/arm/mach-msm/clock-8960.c
@@ -2990,7 +2990,6 @@
static struct clk_ops clk_ops_pix_rdi_8960 = {
.enable = pix_rdi_clk_enable,
.disable = pix_rdi_clk_disable,
- .auto_off = pix_rdi_clk_disable,
.handoff = pix_rdi_clk_handoff,
.set_rate = pix_rdi_clk_set_rate,
.get_rate = pix_rdi_clk_get_rate,
diff --git a/arch/arm/mach-msm/clock-8x60.c b/arch/arm/mach-msm/clock-8x60.c
index 15cdacb..becb227 100644
--- a/arch/arm/mach-msm/clock-8x60.c
+++ b/arch/arm/mach-msm/clock-8x60.c
@@ -602,7 +602,6 @@
.c = {
.dbg_name = "smi_2x_axi_clk",
.ops = &clk_ops_branch,
- .flags = CLKFLAG_SKIP_AUTO_OFF,
CLK_INIT(smi_2x_axi_clk.c),
},
};
diff --git a/arch/arm/mach-msm/clock-9615.c b/arch/arm/mach-msm/clock-9615.c
index 57b7a76..f5ce5a7 100644
--- a/arch/arm/mach-msm/clock-9615.c
+++ b/arch/arm/mach-msm/clock-9615.c
@@ -250,7 +250,6 @@
static struct clk_ops clk_ops_pll_acpu_vote = {
.enable = pll_acpu_vote_clk_enable,
.disable = pll_acpu_vote_clk_disable,
- .auto_off = pll_acpu_vote_clk_disable,
.is_enabled = pll_vote_clk_is_enabled,
.get_parent = pll_vote_clk_get_parent,
};
diff --git a/arch/arm/mach-msm/clock-local.c b/arch/arm/mach-msm/clock-local.c
index 0f9404b..ca913dc 100644
--- a/arch/arm/mach-msm/clock-local.c
+++ b/arch/arm/mach-msm/clock-local.c
@@ -805,7 +805,6 @@
.enable_hwcg = branch_clk_enable_hwcg,
.disable_hwcg = branch_clk_disable_hwcg,
.in_hwcg_mode = branch_clk_in_hwcg_mode,
- .auto_off = branch_clk_disable,
.is_enabled = branch_clk_is_enabled,
.reset = branch_clk_reset,
.get_parent = branch_clk_get_parent,
@@ -828,7 +827,6 @@
.enable_hwcg = rcg_clk_enable_hwcg,
.disable_hwcg = rcg_clk_disable_hwcg,
.in_hwcg_mode = rcg_clk_in_hwcg_mode,
- .auto_off = rcg_clk_disable,
.handoff = rcg_clk_handoff,
.set_rate = rcg_clk_set_rate,
.list_rate = rcg_clk_list_rate,
@@ -941,7 +939,6 @@
.in_hwcg_mode = cdiv_clk_in_hwcg_mode,
.enable_hwcg = cdiv_clk_enable_hwcg,
.disable_hwcg = cdiv_clk_disable_hwcg,
- .auto_off = cdiv_clk_disable,
.handoff = cdiv_clk_handoff,
.set_rate = cdiv_clk_set_rate,
.get_rate = cdiv_clk_get_rate,
diff --git a/arch/arm/mach-msm/clock-local2.c b/arch/arm/mach-msm/clock-local2.c
index 9fe9591..23b4723 100644
--- a/arch/arm/mach-msm/clock-local2.c
+++ b/arch/arm/mach-msm/clock-local2.c
@@ -589,7 +589,6 @@
struct clk_ops clk_ops_branch = {
.enable = branch_clk_enable,
.disable = branch_clk_disable,
- .auto_off = branch_clk_disable,
.set_rate = branch_clk_set_rate,
.get_rate = branch_clk_get_rate,
.list_rate = branch_clk_list_rate,
@@ -602,7 +601,6 @@
struct clk_ops clk_ops_vote = {
.enable = local_vote_clk_enable,
.disable = local_vote_clk_disable,
- .auto_off = local_vote_clk_disable,
.reset = local_vote_clk_reset,
.handoff = local_vote_clk_handoff,
};
diff --git a/arch/arm/mach-msm/clock-pcom-lookup.c b/arch/arm/mach-msm/clock-pcom-lookup.c
index 83940cf..2a2cc01 100644
--- a/arch/arm/mach-msm/clock-pcom-lookup.c
+++ b/arch/arm/mach-msm/clock-pcom-lookup.c
@@ -22,19 +22,19 @@
#define PLLn_MODE(n) (MSM_CLK_CTL_BASE + 0x300 + 28 * (n))
#define PLL4_MODE (MSM_CLK_CTL_BASE + 0x374)
-static DEFINE_CLK_PCOM(adm_clk, ADM_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(adsp_clk, ADSP_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(ahb_m_clk, AHB_M_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(ahb_s_clk, AHB_S_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(cam_m_clk, CAM_M_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(adm_clk, ADM_CLK, 0);
+static DEFINE_CLK_PCOM(adsp_clk, ADSP_CLK, 0);
+static DEFINE_CLK_PCOM(ahb_m_clk, AHB_M_CLK, 0);
+static DEFINE_CLK_PCOM(ahb_s_clk, AHB_S_CLK, 0);
+static DEFINE_CLK_PCOM(cam_m_clk, CAM_M_CLK, 0);
static DEFINE_CLK_PCOM(axi_rotator_clk, AXI_ROTATOR_CLK, 0);
-static DEFINE_CLK_PCOM(ce_clk, CE_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(csi0_clk, CSI0_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(csi0_p_clk, CSI0_P_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(csi0_vfe_clk, CSI0_VFE_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(csi1_clk, CSI1_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(csi1_p_clk, CSI1_P_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(csi1_vfe_clk, CSI1_VFE_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(ce_clk, CE_CLK, 0);
+static DEFINE_CLK_PCOM(csi0_clk, CSI0_CLK, 0);
+static DEFINE_CLK_PCOM(csi0_p_clk, CSI0_P_CLK, 0);
+static DEFINE_CLK_PCOM(csi0_vfe_clk, CSI0_VFE_CLK, 0);
+static DEFINE_CLK_PCOM(csi1_clk, CSI1_CLK, 0);
+static DEFINE_CLK_PCOM(csi1_p_clk, CSI1_P_CLK, 0);
+static DEFINE_CLK_PCOM(csi1_vfe_clk, CSI1_VFE_CLK, 0);
static struct pll_shared_clk pll0_clk = {
.id = PLL_0,
@@ -113,38 +113,36 @@
};
static DEFINE_CLK_PCOM(dsi_ref_clk, DSI_REF_CLK, 0);
-static DEFINE_CLK_PCOM(ebi1_clk, EBI1_CLK,
- CLKFLAG_SKIP_AUTO_OFF | CLKFLAG_MIN);
-static DEFINE_CLK_PCOM(ebi2_clk, EBI2_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(ecodec_clk, ECODEC_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(ebi1_clk, EBI1_CLK, CLKFLAG_MIN);
+static DEFINE_CLK_PCOM(ebi2_clk, EBI2_CLK, 0);
+static DEFINE_CLK_PCOM(ecodec_clk, ECODEC_CLK, 0);
static DEFINE_CLK_PCOM(emdh_clk, EMDH_CLK, CLKFLAG_MIN | CLKFLAG_MAX);
-static DEFINE_CLK_PCOM(gp_clk, GP_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(grp_2d_clk, GRP_2D_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(grp_2d_p_clk, GRP_2D_P_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(gp_clk, GP_CLK, 0);
+static DEFINE_CLK_PCOM(grp_2d_clk, GRP_2D_CLK, 0);
+static DEFINE_CLK_PCOM(grp_2d_p_clk, GRP_2D_P_CLK, 0);
static DEFINE_CLK_PCOM(grp_3d_clk, GRP_3D_CLK, 0);
-static DEFINE_CLK_PCOM(grp_3d_p_clk, GRP_3D_P_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(grp_3d_p_clk, GRP_3D_P_CLK, 0);
static DEFINE_CLK_PCOM(gsbi1_qup_clk, GSBI1_QUP_CLK, 0);
static DEFINE_CLK_PCOM(gsbi1_qup_p_clk, GSBI1_QUP_P_CLK, 0);
static DEFINE_CLK_PCOM(gsbi2_qup_clk, GSBI2_QUP_CLK, 0);
static DEFINE_CLK_PCOM(gsbi2_qup_p_clk, GSBI2_QUP_P_CLK, 0);
-static DEFINE_CLK_PCOM(gsbi_clk, GSBI_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(gsbi_p_clk, GSBI_P_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(hdmi_clk, HDMI_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(i2c_clk, I2C_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(icodec_rx_clk, ICODEC_RX_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(icodec_tx_clk, ICODEC_TX_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(gsbi_clk, GSBI_CLK, 0);
+static DEFINE_CLK_PCOM(gsbi_p_clk, GSBI_P_CLK, 0);
+static DEFINE_CLK_PCOM(hdmi_clk, HDMI_CLK, 0);
+static DEFINE_CLK_PCOM(i2c_clk, I2C_CLK, 0);
+static DEFINE_CLK_PCOM(icodec_rx_clk, ICODEC_RX_CLK, 0);
+static DEFINE_CLK_PCOM(icodec_tx_clk, ICODEC_TX_CLK, 0);
static DEFINE_CLK_PCOM(imem_clk, IMEM_CLK, 0);
-static DEFINE_CLK_PCOM(mdc_clk, MDC_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(mdc_clk, MDC_CLK, 0);
static DEFINE_CLK_PCOM(mdp_clk, MDP_CLK, CLKFLAG_MIN);
static DEFINE_CLK_PCOM(mdp_lcdc_pad_pclk_clk, MDP_LCDC_PAD_PCLK_CLK,
- CLKFLAG_SKIP_AUTO_OFF);
+ 0);
static DEFINE_CLK_PCOM(mdp_lcdc_pclk_clk, MDP_LCDC_PCLK_CLK,
- CLKFLAG_SKIP_AUTO_OFF);
+ 0);
static DEFINE_CLK_PCOM(mdp_vsync_clk, MDP_VSYNC_CLK, 0);
static DEFINE_CLK_PCOM(mdp_dsi_p_clk, MDP_DSI_P_CLK, 0);
-static DEFINE_CLK_PCOM(pbus_clk, PBUS_CLK,
- CLKFLAG_SKIP_AUTO_OFF | CLKFLAG_MIN);
-static DEFINE_CLK_PCOM(pcm_clk, PCM_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(pbus_clk, PBUS_CLK, CLKFLAG_MIN);
+static DEFINE_CLK_PCOM(pcm_clk, PCM_CLK, 0);
static DEFINE_CLK_PCOM(pmdh_clk, PMDH_CLK, CLKFLAG_MIN | CLKFLAG_MAX);
static DEFINE_CLK_PCOM(sdac_clk, SDAC_CLK, 0);
static DEFINE_CLK_PCOM(sdc1_clk, SDC1_CLK, 0);
@@ -155,12 +153,12 @@
static DEFINE_CLK_PCOM(sdc3_p_clk, SDC3_P_CLK, 0);
static DEFINE_CLK_PCOM(sdc4_clk, SDC4_CLK, 0);
static DEFINE_CLK_PCOM(sdc4_p_clk, SDC4_P_CLK, 0);
-static DEFINE_CLK_PCOM(spi_clk, SPI_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(tsif_clk, TSIF_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(tsif_p_clk, TSIF_P_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(tsif_ref_clk, TSIF_REF_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(tv_dac_clk, TV_DAC_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(tv_enc_clk, TV_ENC_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(spi_clk, SPI_CLK, 0);
+static DEFINE_CLK_PCOM(tsif_clk, TSIF_CLK, 0);
+static DEFINE_CLK_PCOM(tsif_p_clk, TSIF_P_CLK, 0);
+static DEFINE_CLK_PCOM(tsif_ref_clk, TSIF_REF_CLK, 0);
+static DEFINE_CLK_PCOM(tv_dac_clk, TV_DAC_CLK, 0);
+static DEFINE_CLK_PCOM(tv_enc_clk, TV_ENC_CLK, 0);
static DEFINE_CLK_PCOM(uart1_clk, UART1_CLK, 0);
static DEFINE_CLK_PCOM(uart1dm_clk, UART1DM_CLK, 0);
static DEFINE_CLK_PCOM(uart2_clk, UART2_CLK, 0);
@@ -173,8 +171,8 @@
static DEFINE_CLK_PCOM(usb_hs_clk, USB_HS_CLK, 0);
static DEFINE_CLK_PCOM(usb_hs_core_clk, USB_HS_CORE_CLK, 0);
static DEFINE_CLK_PCOM(usb_hs_p_clk, USB_HS_P_CLK, 0);
-static DEFINE_CLK_PCOM(usb_otg_clk, USB_OTG_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(usb_phy_clk, USB_PHY_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(usb_otg_clk, USB_OTG_CLK, 0);
+static DEFINE_CLK_PCOM(usb_phy_clk, USB_PHY_CLK, 0);
static DEFINE_CLK_PCOM(vdc_clk, VDC_CLK, CLKFLAG_MIN);
static DEFINE_CLK_PCOM(vfe_axi_clk, VFE_AXI_CLK, 0);
static DEFINE_CLK_PCOM(vfe_clk, VFE_CLK, 0);
diff --git a/arch/arm/mach-msm/clock-pcom.c b/arch/arm/mach-msm/clock-pcom.c
index 02c8765..428423a 100644
--- a/arch/arm/mach-msm/clock-pcom.c
+++ b/arch/arm/mach-msm/clock-pcom.c
@@ -190,7 +190,6 @@
struct clk_ops clk_ops_pcom = {
.enable = pc_clk_enable,
.disable = pc_clk_disable,
- .auto_off = pc_clk_disable,
.reset = pc_reset,
.set_rate = pc_clk_set_rate,
.set_max_rate = pc_clk_set_max_rate,
@@ -205,7 +204,6 @@
struct clk_ops clk_ops_pcom_ext_config = {
.enable = pc_clk_enable,
.disable = pc_clk_disable,
- .auto_off = pc_clk_disable,
.reset = pc_reset,
.set_rate = pc_clk_set_ext_config,
.set_max_rate = pc_clk_set_max_rate,
diff --git a/arch/arm/mach-msm/clock-pll.c b/arch/arm/mach-msm/clock-pll.c
index 8c1f67e..2938135 100644
--- a/arch/arm/mach-msm/clock-pll.c
+++ b/arch/arm/mach-msm/clock-pll.c
@@ -121,7 +121,6 @@
struct clk_ops clk_ops_pll_vote = {
.enable = pll_vote_clk_enable,
.disable = pll_vote_clk_disable,
- .auto_off = pll_vote_clk_disable,
.is_enabled = pll_vote_clk_is_enabled,
.get_parent = pll_vote_clk_get_parent,
.handoff = pll_vote_clk_handoff,
@@ -300,7 +299,6 @@
struct clk_ops clk_ops_local_pll = {
.enable = local_pll_clk_enable,
.disable = local_pll_clk_disable,
- .auto_off = local_pll_clk_disable,
.handoff = local_pll_clk_handoff,
.get_parent = local_pll_clk_get_parent,
};
diff --git a/arch/arm/mach-msm/clock-rpm.h b/arch/arm/mach-msm/clock-rpm.h
index 107fb02..ce878ce 100644
--- a/arch/arm/mach-msm/clock-rpm.h
+++ b/arch/arm/mach-msm/clock-rpm.h
@@ -58,7 +58,6 @@
.rpmrs_data = (rpmrsdata),\
.c = { \
.ops = &clk_ops_rpm, \
- .flags = CLKFLAG_SKIP_AUTO_OFF, \
.dbg_name = #name, \
CLK_INIT(name.c), \
.depends = dep, \
@@ -74,7 +73,6 @@
.rpmrs_data = (rpmrsdata),\
.c = { \
.ops = &clk_ops_rpm, \
- .flags = CLKFLAG_SKIP_AUTO_OFF, \
.dbg_name = #active, \
CLK_INIT(active.c), \
.depends = dep, \
@@ -96,7 +94,6 @@
.rpmrs_data = (rpmrsdata),\
.c = { \
.ops = &clk_ops_rpm_branch, \
- .flags = CLKFLAG_SKIP_AUTO_OFF, \
.dbg_name = #name, \
.rate = (r), \
CLK_INIT(name.c), \
@@ -115,7 +112,6 @@
.rpmrs_data = (rpmrsdata),\
.c = { \
.ops = &clk_ops_rpm_branch, \
- .flags = CLKFLAG_SKIP_AUTO_OFF, \
.dbg_name = #active, \
.rate = (r), \
CLK_INIT(active.c), \
@@ -134,7 +130,6 @@
.rpmrs_data = (rpmrsdata),\
.c = { \
.ops = &clk_ops_rpm, \
- .flags = CLKFLAG_SKIP_AUTO_OFF, \
.dbg_name = #name, \
CLK_INIT(name.c), \
.warned = true, \
@@ -150,7 +145,6 @@
.rpmrs_data = (rpmrsdata),\
.c = { \
.ops = &clk_ops_rpm, \
- .flags = CLKFLAG_SKIP_AUTO_OFF, \
.dbg_name = #active, \
CLK_INIT(active.c), \
.warned = true, \
diff --git a/arch/arm/mach-msm/clock-voter.h b/arch/arm/mach-msm/clock-voter.h
index c9aebba..407aac6 100644
--- a/arch/arm/mach-msm/clock-voter.h
+++ b/arch/arm/mach-msm/clock-voter.h
@@ -34,7 +34,6 @@
.c = { \
.dbg_name = #clk_name, \
.ops = &clk_ops_voter, \
- .flags = CLKFLAG_SKIP_AUTO_OFF, \
.rate = _default_rate, \
CLK_INIT(clk_name.c), \
}, \
diff --git a/arch/arm/mach-msm/clock.c b/arch/arm/mach-msm/clock.c
index 8bf98fa..da8c3a9 100644
--- a/arch/arm/mach-msm/clock.c
+++ b/arch/arm/mach-msm/clock.c
@@ -506,34 +506,16 @@
clk_init_data->post_init();
}
-/*
- * The bootloader and/or AMSS may have left various clocks enabled.
- * Disable any clocks that have not been explicitly enabled by a
- * clk_enable() call and don't have the CLKFLAG_SKIP_AUTO_OFF flag.
- */
static int __init clock_late_init(void)
{
- unsigned n, count = 0;
struct handoff_clk *h, *h_temp;
- unsigned long flags;
- int ret = 0;
+ int n, ret = 0;
clock_debug_init(clk_init_data);
- for (n = 0; n < clk_init_data->size; n++) {
- struct clk *clk = clk_init_data->table[n].clk;
+ for (n = 0; n < clk_init_data->size; n++)
+ clock_debug_add(clk_init_data->table[n].clk);
- clock_debug_add(clk);
- spin_lock_irqsave(&clk->lock, flags);
- if (!(clk->flags & CLKFLAG_SKIP_AUTO_OFF)) {
- if (!clk->count && clk->ops->auto_off) {
- count++;
- clk->ops->auto_off(clk);
- }
- }
- spin_unlock_irqrestore(&clk->lock, flags);
- }
- pr_info("clock_late_init() disabled %d unused clocks\n", count);
-
+ pr_info("%s: Removing enables held for handed-off clocks\n", __func__);
list_for_each_entry_safe(h, h_temp, &handoff_list, list) {
clk_disable_unprepare(h->clk);
list_del(&h->list);
diff --git a/arch/arm/mach-msm/clock.h b/arch/arm/mach-msm/clock.h
index 51bfa67..56d3c6f 100644
--- a/arch/arm/mach-msm/clock.h
+++ b/arch/arm/mach-msm/clock.h
@@ -33,7 +33,6 @@
#define CLKFLAG_RETAIN 0x00000040
#define CLKFLAG_NORETAIN 0x00000080
#define CLKFLAG_SKIP_HANDOFF 0x00000100
-#define CLKFLAG_SKIP_AUTO_OFF 0x00000200
#define CLKFLAG_MIN 0x00000400
#define CLKFLAG_MAX 0x00000800
@@ -90,7 +89,6 @@
int (*enable)(struct clk *clk);
void (*disable)(struct clk *clk);
void (*unprepare)(struct clk *clk);
- void (*auto_off)(struct clk *clk);
void (*enable_hwcg)(struct clk *clk);
void (*disable_hwcg)(struct clk *clk);
int (*in_hwcg_mode)(struct clk *clk);
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_acdb.c b/arch/arm/mach-msm/qdsp6v2/audio_acdb.c
index 22779b4..1092c77 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_acdb.c
+++ b/arch/arm/mach-msm/qdsp6v2/audio_acdb.c
@@ -646,7 +646,7 @@
goto err;
}
- acdb_data.ion_handle = ion_import_fd(acdb_data.ion_client,
+ acdb_data.ion_handle = ion_import_dma_buf(acdb_data.ion_client,
atomic_read(&acdb_data.map_handle));
if (IS_ERR_OR_NULL(acdb_data.ion_handle)) {
pr_err("%s: Could not import map handle!!!\n", __func__);
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_lpa.c b/arch/arm/mach-msm/qdsp6v2/audio_lpa.c
index 0591a71..2502d61 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_lpa.c
+++ b/arch/arm/mach-msm/qdsp6v2/audio_lpa.c
@@ -481,7 +481,7 @@
goto client_error;
}
- handle = ion_import_fd(client, info->fd);
+ handle = ion_import_dma_buf(client, info->fd);
if (IS_ERR_OR_NULL(handle)) {
pr_err("%s: could not get handle of the given fd\n", __func__);
goto import_error;
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.c b/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.c
index fdc596d..1c29617 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.c
+++ b/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.c
@@ -699,7 +699,7 @@
goto client_error;
}
- handle = ion_import_fd(client, info->fd);
+ handle = ion_import_dma_buf(client, info->fd);
if (IS_ERR_OR_NULL(handle)) {
pr_err("%s: could not get handle of the given fd\n", __func__);
goto import_error;
diff --git a/drivers/char/msm_rotator.c b/drivers/char/msm_rotator.c
index 6cd1806..8650e83 100644
--- a/drivers/char/msm_rotator.c
+++ b/drivers/char/msm_rotator.c
@@ -177,13 +177,13 @@
if (!msm_rotator_dev->client)
return -EINVAL;
- *pihdl = ion_import_fd(msm_rotator_dev->client, mem_id);
+ *pihdl = ion_import_dma_buf(msm_rotator_dev->client, mem_id);
if (IS_ERR_OR_NULL(*pihdl)) {
- pr_err("ion_import_fd() failed\n");
+ pr_err("ion_import_dma_buf() failed\n");
return PTR_ERR(*pihdl);
}
- pr_debug("%s(): ion_hdl %p, ion_buf %p\n", __func__, *pihdl,
- ion_share(msm_rotator_dev->client, *pihdl));
+ pr_debug("%s(): ion_hdl %p, ion_fd %d\n", __func__, *pihdl,
+ ion_share_dma_buf(msm_rotator_dev->client, *pihdl));
if (ion_map_iommu(msm_rotator_dev->client,
*pihdl, ROTATOR_DOMAIN, GEN_POOL,
diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c
index 5c7ab3a..4e34e89 100644
--- a/drivers/gpu/ion/ion.c
+++ b/drivers/gpu/ion/ion.c
@@ -22,6 +22,7 @@
#include <linux/anon_inodes.h>
#include <linux/ion.h>
#include <linux/list.h>
+#include <linux/memblock.h>
#include <linux/miscdevice.h>
#include <linux/mm.h>
#include <linux/mm_types.h>
@@ -31,6 +32,7 @@
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/debugfs.h>
+#include <linux/dma-buf.h>
#include <mach/iommu_domains.h>
#include "ion_priv.h"
@@ -51,14 +53,12 @@
struct rb_root heaps;
long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
unsigned long arg);
- struct rb_root user_clients;
- struct rb_root kernel_clients;
+ struct rb_root clients;
struct dentry *debug_root;
};
/**
* struct ion_client - a process/hw block local address space
- * @ref: for reference counting the client
* @node: node in the tree of all clients
* @dev: backpointer to ion device
* @handles: an rb tree of all the handles in this client
@@ -72,7 +72,6 @@
* as well as the handles themselves, and should be held while modifying either.
*/
struct ion_client {
- struct kref ref;
struct rb_node node;
struct ion_device *dev;
struct rb_root handles;
@@ -92,7 +91,6 @@
* @node: node in the client's handle rbtree
* @kmap_cnt: count of times this client has mapped to kernel
* @dmap_cnt: count of times this client has mapped for dma
- * @usermap_cnt: count of times this client has mapped for userspace
*
* Modifications to node, map_cnt or mapping should be protected by the
* lock in the client. Other fields are never changed after initialization.
@@ -103,8 +101,6 @@
struct ion_buffer *buffer;
struct rb_node node;
unsigned int kmap_cnt;
- unsigned int dmap_cnt;
- unsigned int usermap_cnt;
unsigned int iommu_map_cnt;
};
@@ -217,6 +213,7 @@
unsigned long flags)
{
struct ion_buffer *buffer;
+ struct sg_table *table;
int ret;
buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
@@ -231,8 +228,18 @@
kfree(buffer);
return ERR_PTR(ret);
}
+
buffer->dev = dev;
buffer->size = len;
+
+ table = buffer->heap->ops->map_dma(buffer->heap, buffer);
+ if (IS_ERR_OR_NULL(table)) {
+ heap->ops->free(buffer);
+ kfree(buffer);
+ return ERR_PTR(PTR_ERR(table));
+ }
+ buffer->sg_table = table;
+
mutex_init(&buffer->lock);
ion_buffer_add(dev, buffer);
return buffer;
@@ -275,6 +282,11 @@
struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
struct ion_device *dev = buffer->dev;
+ if (WARN_ON(buffer->kmap_cnt > 0))
+ buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
+
+ buffer->heap->ops->unmap_dma(buffer->heap, buffer);
+
ion_iommu_delayed_unmap(buffer);
buffer->heap->ops->free(buffer);
mutex_lock(&dev->lock);
@@ -310,17 +322,26 @@
return handle;
}
-/* Client lock must be locked when calling */
+static void ion_handle_kmap_put(struct ion_handle *);
+
static void ion_handle_destroy(struct kref *kref)
{
struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
- /* XXX Can a handle be destroyed while it's map count is non-zero?:
- if (handle->map_cnt) unmap
- */
- WARN_ON(handle->kmap_cnt || handle->dmap_cnt || handle->usermap_cnt);
- ion_buffer_put(handle->buffer);
+ struct ion_client *client = handle->client;
+ struct ion_buffer *buffer = handle->buffer;
+
+ mutex_lock(&client->lock);
+
+ mutex_lock(&buffer->lock);
+ while (handle->kmap_cnt)
+ ion_handle_kmap_put(handle);
+ mutex_unlock(&buffer->lock);
+
if (!RB_EMPTY_NODE(&handle->node))
- rb_erase(&handle->node, &handle->client->handles);
+ rb_erase(&handle->node, &client->handles);
+ mutex_unlock(&client->lock);
+
+ ion_buffer_put(buffer);
kfree(handle);
}
@@ -412,6 +433,11 @@
* request of the caller allocate from it. Repeat until allocate has
* succeeded or all heaps have been tried
*/
+ if (WARN_ON(!len))
+ return ERR_PTR(-EINVAL);
+
+ len = PAGE_ALIGN(len);
+
mutex_lock(&dev->lock);
for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
@@ -445,7 +471,10 @@
}
mutex_unlock(&dev->lock);
- if (IS_ERR_OR_NULL(buffer)) {
+ if (buffer == NULL)
+ return ERR_PTR(-ENODEV);
+
+ if (IS_ERR(buffer)) {
pr_debug("ION is unable to allocate 0x%x bytes (alignment: "
"0x%x) from heap(s) %sfor client %s with heap "
"mask 0x%x\n",
@@ -455,22 +484,19 @@
handle = ion_handle_create(client, buffer);
- if (IS_ERR_OR_NULL(handle))
- goto end;
-
/*
* ion_buffer_create will create a buffer with a ref_cnt of 1,
* and ion_handle_create will take a second reference, drop one here
*/
ion_buffer_put(buffer);
- mutex_lock(&client->lock);
- ion_handle_add(client, handle);
- mutex_unlock(&client->lock);
- return handle;
+ if (!IS_ERR(handle)) {
+ mutex_lock(&client->lock);
+ ion_handle_add(client, handle);
+ mutex_unlock(&client->lock);
+ }
-end:
- ion_buffer_put(buffer);
+
return handle;
}
EXPORT_SYMBOL(ion_alloc);
@@ -488,43 +514,11 @@
WARN(1, "%s: invalid handle passed to free.\n", __func__);
return;
}
- ion_handle_put(handle);
mutex_unlock(&client->lock);
+ ion_handle_put(handle);
}
EXPORT_SYMBOL(ion_free);
-static void ion_client_get(struct ion_client *client);
-static int ion_client_put(struct ion_client *client);
-
-static bool _ion_map(int *buffer_cnt, int *handle_cnt)
-{
- bool map;
-
- BUG_ON(*handle_cnt != 0 && *buffer_cnt == 0);
-
- if (*buffer_cnt)
- map = false;
- else
- map = true;
- if (*handle_cnt == 0)
- (*buffer_cnt)++;
- (*handle_cnt)++;
- return map;
-}
-
-static bool _ion_unmap(int *buffer_cnt, int *handle_cnt)
-{
- BUG_ON(*handle_cnt == 0);
- (*handle_cnt)--;
- if (*handle_cnt != 0)
- return false;
- BUG_ON(*buffer_cnt == 0);
- (*buffer_cnt)--;
- if (*buffer_cnt == 0)
- return true;
- return false;
-}
-
int ion_phys(struct ion_client *client, struct ion_handle *handle,
ion_phys_addr_t *addr, size_t *len)
{
@@ -551,52 +545,55 @@
}
EXPORT_SYMBOL(ion_phys);
-void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle,
- unsigned long flags)
+static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
{
- struct ion_buffer *buffer;
void *vaddr;
- mutex_lock(&client->lock);
- if (!ion_handle_validate(client, handle)) {
- pr_err("%s: invalid handle passed to map_kernel.\n",
- __func__);
- mutex_unlock(&client->lock);
- return ERR_PTR(-EINVAL);
+ if (buffer->kmap_cnt) {
+ buffer->kmap_cnt++;
+ return buffer->vaddr;
}
-
- buffer = handle->buffer;
- mutex_lock(&buffer->lock);
-
- if (!handle->buffer->heap->ops->map_kernel) {
- pr_err("%s: map_kernel is not implemented by this heap.\n",
- __func__);
- mutex_unlock(&buffer->lock);
- mutex_unlock(&client->lock);
- return ERR_PTR(-ENODEV);
- }
-
- if (ion_validate_buffer_flags(buffer, flags)) {
- vaddr = ERR_PTR(-EEXIST);
- goto out;
- }
-
- if (_ion_map(&buffer->kmap_cnt, &handle->kmap_cnt)) {
- vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer,
- flags);
- if (IS_ERR_OR_NULL(vaddr))
- _ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt);
- buffer->vaddr = vaddr;
- } else {
- vaddr = buffer->vaddr;
- }
-
-out:
- mutex_unlock(&buffer->lock);
- mutex_unlock(&client->lock);
+ vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
+ if (IS_ERR_OR_NULL(vaddr))
+ return vaddr;
+ buffer->vaddr = vaddr;
+ buffer->kmap_cnt++;
return vaddr;
}
-EXPORT_SYMBOL(ion_map_kernel);
+
+static void *ion_handle_kmap_get(struct ion_handle *handle)
+{
+ struct ion_buffer *buffer = handle->buffer;
+ void *vaddr;
+
+ if (handle->kmap_cnt) {
+ handle->kmap_cnt++;
+ return buffer->vaddr;
+ }
+ vaddr = ion_buffer_kmap_get(buffer);
+ if (IS_ERR_OR_NULL(vaddr))
+ return vaddr;
+ handle->kmap_cnt++;
+ return vaddr;
+}
+
+static void ion_buffer_kmap_put(struct ion_buffer *buffer)
+{
+ buffer->kmap_cnt--;
+ if (!buffer->kmap_cnt) {
+ buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
+ buffer->vaddr = NULL;
+ }
+}
+
+static void ion_handle_kmap_put(struct ion_handle *handle)
+{
+ struct ion_buffer *buffer = handle->buffer;
+
+ handle->kmap_cnt--;
+ if (!handle->kmap_cnt)
+ ion_buffer_kmap_put(buffer);
+}
static struct ion_iommu_map *__ion_iommu_map(struct ion_buffer *buffer,
int domain_num, int partition_num, unsigned long align,
@@ -699,14 +696,10 @@
}
iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);
- _ion_map(&buffer->iommu_map_cnt, &handle->iommu_map_cnt);
if (!iommu_map) {
iommu_map = __ion_iommu_map(buffer, domain_num, partition_num,
align, iova_length, flags, iova);
- if (IS_ERR_OR_NULL(iommu_map)) {
- _ion_unmap(&buffer->iommu_map_cnt,
- &handle->iommu_map_cnt);
- } else {
+ if (!IS_ERR_OR_NULL(iommu_map)) {
iommu_map->flags = iommu_flags;
if (iommu_map->flags & ION_IOMMU_UNMAP_DELAYED)
@@ -717,22 +710,20 @@
pr_err("%s: handle %p is already mapped with iommu flags %lx, trying to map with flags %lx\n",
__func__, handle,
iommu_map->flags, iommu_flags);
- _ion_unmap(&buffer->iommu_map_cnt,
- &handle->iommu_map_cnt);
ret = -EINVAL;
} else if (iommu_map->mapped_size != iova_length) {
pr_err("%s: handle %p is already mapped with length"
" %x, trying to map with length %lx\n",
__func__, handle, iommu_map->mapped_size,
iova_length);
- _ion_unmap(&buffer->iommu_map_cnt,
- &handle->iommu_map_cnt);
ret = -EINVAL;
} else {
kref_get(&iommu_map->ref);
*iova = iommu_map->iova_addr;
}
}
+ if (!ret)
+ buffer->iommu_map_cnt++;
*buffer_size = buffer->size;
out:
mutex_unlock(&buffer->lock);
@@ -771,9 +762,9 @@
goto out;
}
- _ion_unmap(&buffer->iommu_map_cnt, &handle->iommu_map_cnt);
kref_put(&iommu_map->ref, ion_iommu_release);
+ buffer->iommu_map_cnt--;
out:
mutex_unlock(&buffer->lock);
@@ -782,51 +773,40 @@
}
EXPORT_SYMBOL(ion_unmap_iommu);
-struct scatterlist *ion_map_dma(struct ion_client *client,
- struct ion_handle *handle,
- unsigned long flags)
+void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle,
+ unsigned long flags)
{
struct ion_buffer *buffer;
- struct scatterlist *sglist;
+ void *vaddr;
mutex_lock(&client->lock);
if (!ion_handle_validate(client, handle)) {
- pr_err("%s: invalid handle passed to map_dma.\n",
+ pr_err("%s: invalid handle passed to map_kernel.\n",
__func__);
mutex_unlock(&client->lock);
return ERR_PTR(-EINVAL);
}
- buffer = handle->buffer;
- mutex_lock(&buffer->lock);
- if (!handle->buffer->heap->ops->map_dma) {
+ buffer = handle->buffer;
+
+ if (!handle->buffer->heap->ops->map_kernel) {
pr_err("%s: map_kernel is not implemented by this heap.\n",
__func__);
- mutex_unlock(&buffer->lock);
mutex_unlock(&client->lock);
return ERR_PTR(-ENODEV);
}
if (ion_validate_buffer_flags(buffer, flags)) {
- sglist = ERR_PTR(-EEXIST);
- goto out;
+ mutex_unlock(&client->lock);
+ return ERR_PTR(-EEXIST);
}
- if (_ion_map(&buffer->dmap_cnt, &handle->dmap_cnt)) {
- sglist = buffer->heap->ops->map_dma(buffer->heap, buffer);
- if (IS_ERR_OR_NULL(sglist))
- _ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt);
- buffer->sglist = sglist;
- } else {
- sglist = buffer->sglist;
- }
-
-out:
+ mutex_lock(&buffer->lock);
+ vaddr = ion_handle_kmap_get(handle);
mutex_unlock(&buffer->lock);
mutex_unlock(&client->lock);
- return sglist;
+ return vaddr;
}
-EXPORT_SYMBOL(ion_map_dma);
void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
{
@@ -835,74 +815,10 @@
mutex_lock(&client->lock);
buffer = handle->buffer;
mutex_lock(&buffer->lock);
- if (_ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt)) {
- buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
- buffer->vaddr = NULL;
- }
+ ion_handle_kmap_put(handle);
mutex_unlock(&buffer->lock);
mutex_unlock(&client->lock);
}
-EXPORT_SYMBOL(ion_unmap_kernel);
-
-void ion_unmap_dma(struct ion_client *client, struct ion_handle *handle)
-{
- struct ion_buffer *buffer;
-
- mutex_lock(&client->lock);
- buffer = handle->buffer;
- mutex_lock(&buffer->lock);
- if (_ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt)) {
- buffer->heap->ops->unmap_dma(buffer->heap, buffer);
- buffer->sglist = NULL;
- }
- mutex_unlock(&buffer->lock);
- mutex_unlock(&client->lock);
-}
-EXPORT_SYMBOL(ion_unmap_dma);
-
-struct ion_buffer *ion_share(struct ion_client *client,
- struct ion_handle *handle)
-{
- bool valid_handle;
-
- mutex_lock(&client->lock);
- valid_handle = ion_handle_validate(client, handle);
- mutex_unlock(&client->lock);
- if (!valid_handle) {
- WARN("%s: invalid handle passed to share.\n", __func__);
- return ERR_PTR(-EINVAL);
- }
-
- /* do not take an extra reference here, the burden is on the caller
- * to make sure the buffer doesn't go away while it's passing it
- * to another client -- ion_free should not be called on this handle
- * until the buffer has been imported into the other client
- */
- return handle->buffer;
-}
-EXPORT_SYMBOL(ion_share);
-
-struct ion_handle *ion_import(struct ion_client *client,
- struct ion_buffer *buffer)
-{
- struct ion_handle *handle = NULL;
-
- mutex_lock(&client->lock);
- /* if a handle exists for this buffer just take a reference to it */
- handle = ion_handle_lookup(client, buffer);
- if (!IS_ERR_OR_NULL(handle)) {
- ion_handle_get(handle);
- goto end;
- }
- handle = ion_handle_create(client, buffer);
- if (IS_ERR_OR_NULL(handle))
- goto end;
- ion_handle_add(client, handle);
-end:
- mutex_unlock(&client->lock);
- return handle;
-}
-EXPORT_SYMBOL(ion_import);
static int check_vaddr_bounds(unsigned long start, unsigned long end)
{
@@ -969,30 +885,6 @@
}
-static const struct file_operations ion_share_fops;
-
-struct ion_handle *ion_import_fd(struct ion_client *client, int fd)
-{
- struct file *file = fget(fd);
- struct ion_handle *handle;
-
- if (!file) {
- pr_err("%s: imported fd not found in file table.\n", __func__);
- return ERR_PTR(-EINVAL);
- }
- if (file->f_op != &ion_share_fops) {
- pr_err("%s: imported file %s is not a shared ion"
- " file.", __func__, file->f_dentry->d_name.name);
- handle = ERR_PTR(-EINVAL);
- goto end;
- }
- handle = ion_import(client, file->private_data);
-end:
- fput(file);
- return handle;
-}
-EXPORT_SYMBOL(ion_import_fd);
-
static int ion_debug_client_show(struct seq_file *s, void *unused)
{
struct ion_client *client = s->private;
@@ -1033,9 +925,6 @@
}
seq_printf(s, "\n");
}
-
- seq_printf(s, "%16.16s %d\n", "client refcount:",
- atomic_read(&client->ref.refcount));
mutex_unlock(&client->lock);
return 0;
@@ -1053,29 +942,6 @@
.release = single_release,
};
-static struct ion_client *ion_client_lookup(struct ion_device *dev,
- struct task_struct *task)
-{
- struct rb_node *n = dev->user_clients.rb_node;
- struct ion_client *client;
-
- mutex_lock(&dev->lock);
- while (n) {
- client = rb_entry(n, struct ion_client, node);
- if (task == client->task) {
- ion_client_get(client);
- mutex_unlock(&dev->lock);
- return client;
- } else if (task < client->task) {
- n = n->rb_left;
- } else if (task > client->task) {
- n = n->rb_right;
- }
- }
- mutex_unlock(&dev->lock);
- return NULL;
-}
-
struct ion_client *ion_client_create(struct ion_device *dev,
unsigned int heap_mask,
const char *name)
@@ -1107,19 +973,10 @@
}
task_unlock(current->group_leader);
- /* if this isn't a kernel thread, see if a client already
- exists */
- if (task) {
- client = ion_client_lookup(dev, task);
- if (!IS_ERR_OR_NULL(client)) {
- put_task_struct(current->group_leader);
- return client;
- }
- }
-
client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
if (!client) {
- put_task_struct(current->group_leader);
+ if (task)
+ put_task_struct(current->group_leader);
return ERR_PTR(-ENOMEM);
}
@@ -1139,36 +996,20 @@
client->heap_mask = heap_mask;
client->task = task;
client->pid = pid;
- kref_init(&client->ref);
mutex_lock(&dev->lock);
- if (task) {
- p = &dev->user_clients.rb_node;
- while (*p) {
- parent = *p;
- entry = rb_entry(parent, struct ion_client, node);
+ p = &dev->clients.rb_node;
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct ion_client, node);
- if (task < entry->task)
- p = &(*p)->rb_left;
- else if (task > entry->task)
- p = &(*p)->rb_right;
- }
- rb_link_node(&client->node, parent, p);
- rb_insert_color(&client->node, &dev->user_clients);
- } else {
- p = &dev->kernel_clients.rb_node;
- while (*p) {
- parent = *p;
- entry = rb_entry(parent, struct ion_client, node);
-
- if (client < entry)
- p = &(*p)->rb_left;
- else if (client > entry)
- p = &(*p)->rb_right;
- }
- rb_link_node(&client->node, parent, p);
- rb_insert_color(&client->node, &dev->kernel_clients);
+ if (client < entry)
+ p = &(*p)->rb_left;
+ else if (client > entry)
+ p = &(*p)->rb_right;
}
+ rb_link_node(&client->node, parent, p);
+ rb_insert_color(&client->node, &dev->clients);
client->debug_root = debugfs_create_file(name, 0664,
@@ -1179,9 +1020,8 @@
return client;
}
-static void _ion_client_destroy(struct kref *kref)
+void ion_client_destroy(struct ion_client *client)
{
- struct ion_client *client = container_of(kref, struct ion_client, ref);
struct ion_device *dev = client->dev;
struct rb_node *n;
@@ -1192,12 +1032,9 @@
ion_handle_destroy(&handle->ref);
}
mutex_lock(&dev->lock);
- if (client->task) {
- rb_erase(&client->node, &dev->user_clients);
+ if (client->task)
put_task_struct(client->task);
- } else {
- rb_erase(&client->node, &dev->kernel_clients);
- }
+ rb_erase(&client->node, &dev->clients);
debugfs_remove_recursive(client->debug_root);
mutex_unlock(&dev->lock);
@@ -1205,23 +1042,6 @@
kfree(client);
}
-static void ion_client_get(struct ion_client *client)
-{
- kref_get(&client->ref);
-}
-
-static int ion_client_put(struct ion_client *client)
-{
- return kref_put(&client->ref, _ion_client_destroy);
-}
-
-void ion_client_destroy(struct ion_client *client)
-{
- if (client)
- ion_client_put(client);
-}
-EXPORT_SYMBOL(ion_client_destroy);
-
int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
unsigned long *flags)
{
@@ -1266,77 +1086,63 @@
}
EXPORT_SYMBOL(ion_handle_get_size);
-static int ion_share_release(struct inode *inode, struct file* file)
+struct sg_table *ion_sg_table(struct ion_client *client,
+ struct ion_handle *handle)
{
- struct ion_buffer *buffer = file->private_data;
+ struct ion_buffer *buffer;
+ struct sg_table *table;
- pr_debug("%s: %d\n", __func__, __LINE__);
- /* drop the reference to the buffer -- this prevents the
- buffer from going away because the client holding it exited
- while it was being passed */
- ion_buffer_put(buffer);
- return 0;
+ mutex_lock(&client->lock);
+ if (!ion_handle_validate(client, handle)) {
+ pr_err("%s: invalid handle passed to map_dma.\n",
+ __func__);
+ mutex_unlock(&client->lock);
+ return ERR_PTR(-EINVAL);
+ }
+ buffer = handle->buffer;
+ table = buffer->sg_table;
+ mutex_unlock(&client->lock);
+ return table;
+}
+
+static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
+ enum dma_data_direction direction)
+{
+ struct dma_buf *dmabuf = attachment->dmabuf;
+ struct ion_buffer *buffer = dmabuf->priv;
+
+ return buffer->sg_table;
+}
+
+static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
+ struct sg_table *table,
+ enum dma_data_direction direction)
+{
}
static void ion_vma_open(struct vm_area_struct *vma)
{
-
- struct ion_buffer *buffer = vma->vm_file->private_data;
- struct ion_handle *handle = vma->vm_private_data;
- struct ion_client *client;
+ struct ion_buffer *buffer = vma->vm_private_data;
pr_debug("%s: %d\n", __func__, __LINE__);
- /* check that the client still exists and take a reference so
- it can't go away until this vma is closed */
- client = ion_client_lookup(buffer->dev, current->group_leader);
- if (IS_ERR_OR_NULL(client)) {
- vma->vm_private_data = NULL;
- return;
- }
- ion_handle_get(handle);
+
mutex_lock(&buffer->lock);
buffer->umap_cnt++;
mutex_unlock(&buffer->lock);
- pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
- __func__, __LINE__,
- atomic_read(&client->ref.refcount),
- atomic_read(&handle->ref.refcount),
- atomic_read(&buffer->ref.refcount));
}
static void ion_vma_close(struct vm_area_struct *vma)
{
- struct ion_handle *handle = vma->vm_private_data;
- struct ion_buffer *buffer = vma->vm_file->private_data;
- struct ion_client *client;
+ struct ion_buffer *buffer = vma->vm_private_data;
pr_debug("%s: %d\n", __func__, __LINE__);
- /* this indicates the client is gone, nothing to do here */
- if (!handle)
- return;
- client = handle->client;
+
mutex_lock(&buffer->lock);
buffer->umap_cnt--;
mutex_unlock(&buffer->lock);
if (buffer->heap->ops->unmap_user)
buffer->heap->ops->unmap_user(buffer->heap, buffer);
-
-
- pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
- __func__, __LINE__,
- atomic_read(&client->ref.refcount),
- atomic_read(&handle->ref.refcount),
- atomic_read(&buffer->ref.refcount));
- mutex_lock(&client->lock);
- ion_handle_put(handle);
- mutex_unlock(&client->lock);
- ion_client_put(client);
- pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
- __func__, __LINE__,
- atomic_read(&client->ref.refcount),
- atomic_read(&handle->ref.refcount),
- atomic_read(&buffer->ref.refcount));
}
static struct vm_operations_struct ion_vm_ops = {
@@ -1344,127 +1150,198 @@
.close = ion_vma_close,
};
-static int ion_share_mmap(struct file *file, struct vm_area_struct *vma)
+static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
{
- struct ion_buffer *buffer = file->private_data;
- unsigned long size = vma->vm_end - vma->vm_start;
- struct ion_client *client;
- struct ion_handle *handle;
+ struct ion_buffer *buffer = dmabuf->priv;
int ret;
- unsigned long flags = file->f_flags & O_DSYNC ?
- ION_SET_CACHE(UNCACHED) :
- ION_SET_CACHE(CACHED);
-
- pr_debug("%s: %d\n", __func__, __LINE__);
- /* make sure the client still exists, it's possible for the client to
- have gone away but the map/share fd still to be around, take
- a reference to it so it can't go away while this mapping exists */
- client = ion_client_lookup(buffer->dev, current->group_leader);
- if (IS_ERR_OR_NULL(client)) {
- pr_err("%s: trying to mmap an ion handle in a process with no "
- "ion client\n", __func__);
+ if (!buffer->heap->ops->map_user) {
+ pr_err("%s: this heap does not define a method for mapping "
+ "to userspace\n", __func__);
return -EINVAL;
}
- if ((size > buffer->size) || (size + (vma->vm_pgoff << PAGE_SHIFT) >
- buffer->size)) {
- pr_err("%s: trying to map larger area than handle has available"
- "\n", __func__);
- ret = -EINVAL;
- goto err;
- }
-
- /* find the handle and take a reference to it */
- handle = ion_import(client, buffer);
- if (IS_ERR_OR_NULL(handle)) {
- ret = -EINVAL;
- goto err;
- }
-
- if (!handle->buffer->heap->ops->map_user) {
- pr_err("%s: this heap does not define a method for mapping "
- "to userspace\n", __func__);
- ret = -EINVAL;
- goto err1;
- }
-
mutex_lock(&buffer->lock);
-
- if (ion_validate_buffer_flags(buffer, flags)) {
- ret = -EEXIST;
- mutex_unlock(&buffer->lock);
- goto err1;
- }
-
/* now map it to userspace */
- ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma,
- flags);
+ ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
- buffer->umap_cnt++;
if (ret) {
+ mutex_unlock(&buffer->lock);
pr_err("%s: failure mapping buffer to userspace\n",
__func__);
- goto err2;
+ } else {
+ buffer->umap_cnt++;
+ mutex_unlock(&buffer->lock);
+
+ vma->vm_ops = &ion_vm_ops;
+ /*
+ * move the buffer into the vm_private_data so we can access it
+ * from vma_open/close
+ */
+ vma->vm_private_data = buffer;
}
- mutex_unlock(&buffer->lock);
-
- vma->vm_ops = &ion_vm_ops;
- /* move the handle into the vm_private_data so we can access it from
- vma_open/close */
- vma->vm_private_data = handle;
- pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
- __func__, __LINE__,
- atomic_read(&client->ref.refcount),
- atomic_read(&handle->ref.refcount),
- atomic_read(&buffer->ref.refcount));
- return 0;
-
-err2:
- buffer->umap_cnt--;
- mutex_unlock(&buffer->lock);
- /* drop the reference to the handle */
-err1:
- mutex_lock(&client->lock);
- ion_handle_put(handle);
- mutex_unlock(&client->lock);
-err:
- /* drop the reference to the client */
- ion_client_put(client);
return ret;
}
-static const struct file_operations ion_share_fops = {
- .owner = THIS_MODULE,
- .release = ion_share_release,
- .mmap = ion_share_mmap,
+static void ion_dma_buf_release(struct dma_buf *dmabuf)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+ ion_buffer_put(buffer);
+}
+
+static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+ return buffer->vaddr + offset;
+}
+
+static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
+ void *ptr)
+{
+ return;
+}
+
+static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
+ size_t len,
+ enum dma_data_direction direction)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+ void *vaddr;
+
+ if (!buffer->heap->ops->map_kernel) {
+ pr_err("%s: map kernel is not implemented by this heap.\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ mutex_lock(&buffer->lock);
+ vaddr = ion_buffer_kmap_get(buffer);
+ mutex_unlock(&buffer->lock);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+ if (!vaddr)
+ return -ENOMEM;
+ return 0;
+}
+
+static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
+ size_t len,
+ enum dma_data_direction direction)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+
+ mutex_lock(&buffer->lock);
+ ion_buffer_kmap_put(buffer);
+ mutex_unlock(&buffer->lock);
+}
+
+struct dma_buf_ops dma_buf_ops = {
+ .map_dma_buf = ion_map_dma_buf,
+ .unmap_dma_buf = ion_unmap_dma_buf,
+ .mmap = ion_mmap,
+ .release = ion_dma_buf_release,
+ .begin_cpu_access = ion_dma_buf_begin_cpu_access,
+ .end_cpu_access = ion_dma_buf_end_cpu_access,
+ .kmap_atomic = ion_dma_buf_kmap,
+ .kunmap_atomic = ion_dma_buf_kunmap,
+ .kmap = ion_dma_buf_kmap,
+ .kunmap = ion_dma_buf_kunmap,
};
-static int ion_ioctl_share(struct file *parent, struct ion_client *client,
- struct ion_handle *handle)
+static int ion_share_set_flags(struct ion_client *client,
+ struct ion_handle *handle,
+ unsigned long flags)
{
- int fd = get_unused_fd();
- struct file *file;
+ struct ion_buffer *buffer;
+ bool valid_handle;
+ unsigned long ion_flags = ION_SET_CACHE(CACHED);
+ if (flags & O_DSYNC)
+ ion_flags = ION_SET_CACHE(UNCACHED);
- if (fd < 0)
- return -ENFILE;
+ mutex_lock(&client->lock);
+ valid_handle = ion_handle_validate(client, handle);
+ mutex_unlock(&client->lock);
+ if (!valid_handle) {
+ WARN(1, "%s: invalid handle passed to set_flags.\n", __func__);
+ return -EINVAL;
+ }
- file = anon_inode_getfile("ion_share_fd", &ion_share_fops,
- handle->buffer, O_RDWR);
- if (IS_ERR_OR_NULL(file))
- goto err;
+ buffer = handle->buffer;
- if (parent->f_flags & O_DSYNC)
- file->f_flags |= O_DSYNC;
+ mutex_lock(&buffer->lock);
+ if (ion_validate_buffer_flags(buffer, ion_flags)) {
+ mutex_unlock(&buffer->lock);
+ return -EEXIST;
+ }
+ mutex_unlock(&buffer->lock);
+ return 0;
+}
- ion_buffer_get(handle->buffer);
- fd_install(fd, file);
+int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
+{
+ struct ion_buffer *buffer;
+ struct dma_buf *dmabuf;
+ bool valid_handle;
+ int fd;
+
+ mutex_lock(&client->lock);
+ valid_handle = ion_handle_validate(client, handle);
+ mutex_unlock(&client->lock);
+ if (!valid_handle) {
+ WARN("%s: invalid handle passed to share.\n", __func__);
+ return -EINVAL;
+ }
+
+ buffer = handle->buffer;
+ ion_buffer_get(buffer);
+ dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
+ if (IS_ERR(dmabuf)) {
+ ion_buffer_put(buffer);
+ return PTR_ERR(dmabuf);
+ }
+ fd = dma_buf_fd(dmabuf, O_CLOEXEC);
+ if (fd < 0) {
+ dma_buf_put(dmabuf);
+ ion_buffer_put(buffer);
+ }
return fd;
+}
-err:
- put_unused_fd(fd);
- return -ENFILE;
+struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
+{
+ struct dma_buf *dmabuf;
+ struct ion_buffer *buffer;
+ struct ion_handle *handle;
+
+ dmabuf = dma_buf_get(fd);
+ if (IS_ERR_OR_NULL(dmabuf))
+ return ERR_PTR(PTR_ERR(dmabuf));
+ /* if this memory came from ion */
+
+ if (dmabuf->ops != &dma_buf_ops) {
+ pr_err("%s: can not import dmabuf from another exporter\n",
+ __func__);
+ dma_buf_put(dmabuf);
+ return ERR_PTR(-EINVAL);
+ }
+ buffer = dmabuf->priv;
+
+ mutex_lock(&client->lock);
+ /* if a handle exists for this buffer just take a reference to it */
+ handle = ion_handle_lookup(client, buffer);
+ if (!IS_ERR_OR_NULL(handle)) {
+ ion_handle_get(handle);
+ goto end;
+ }
+ handle = ion_handle_create(client, buffer);
+ if (IS_ERR_OR_NULL(handle))
+ goto end;
+ ion_handle_add(client, handle);
+end:
+ mutex_unlock(&client->lock);
+ dma_buf_put(dmabuf);
+ return handle;
}
static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
@@ -1481,11 +1358,13 @@
data.handle = ion_alloc(client, data.len, data.align,
data.flags);
- if (IS_ERR_OR_NULL(data.handle))
- return -ENOMEM;
+ if (IS_ERR(data.handle))
+ return PTR_ERR(data.handle);
- if (copy_to_user((void __user *)arg, &data, sizeof(data)))
+ if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
+ ion_free(client, data.handle);
return -EFAULT;
+ }
break;
}
case ION_IOC_FREE:
@@ -1508,18 +1387,15 @@
case ION_IOC_SHARE:
{
struct ion_fd_data data;
-
+ int ret;
if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
return -EFAULT;
- mutex_lock(&client->lock);
- if (!ion_handle_validate(client, data.handle)) {
- pr_err("%s: invalid handle passed to share ioctl.\n",
- __func__);
- mutex_unlock(&client->lock);
- return -EINVAL;
- }
- data.fd = ion_ioctl_share(filp, client, data.handle);
- mutex_unlock(&client->lock);
+
+ ret = ion_share_set_flags(client, data.handle, filp->f_flags);
+ if (ret)
+ return ret;
+
+ data.fd = ion_share_dma_buf(client, data.handle);
if (copy_to_user((void __user *)arg, &data, sizeof(data)))
return -EFAULT;
if (data.fd < 0)
@@ -1533,12 +1409,9 @@
if (copy_from_user(&data, (void __user *)arg,
sizeof(struct ion_fd_data)))
return -EFAULT;
-
- data.handle = ion_import_fd(client, data.fd);
- if (IS_ERR(data.handle)) {
- ret = PTR_ERR(data.handle);
+ data.handle = ion_import_dma_buf(client, data.fd);
+ if (IS_ERR(data.handle))
data.handle = NULL;
- }
if (copy_to_user((void __user *)arg, &data,
sizeof(struct ion_fd_data)))
return -EFAULT;
@@ -1581,8 +1454,8 @@
}
if (!data.handle) {
- handle = ion_import_fd(client, data.fd);
- if (IS_ERR_OR_NULL(handle)) {
+ handle = ion_import_dma_buf(client, data.fd);
+ if (IS_ERR(handle)) {
pr_info("%s: Could not import handle: %d\n",
__func__, (int)handle);
return -EINVAL;
@@ -1629,7 +1502,7 @@
struct ion_client *client = file->private_data;
pr_debug("%s: %d\n", __func__, __LINE__);
- ion_client_put(client);
+ ion_client_destroy(client);
return 0;
}
@@ -1739,14 +1612,7 @@
struct rb_node *j;
const char *client_name = NULL;
- for (j = rb_first(&dev->user_clients); j && !client_name;
- j = rb_next(j)) {
- struct ion_client *client = rb_entry(j, struct ion_client,
- node);
- if (ion_debug_find_buffer_owner(client, buffer))
- client_name = client->name;
- }
- for (j = rb_first(&dev->kernel_clients); j && !client_name;
+ for (j = rb_first(&dev->clients); j && !client_name;
j = rb_next(j)) {
struct ion_client *client = rb_entry(j, struct ion_client,
node);
@@ -1828,27 +1694,23 @@
mutex_lock(&dev->lock);
seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
- for (n = rb_first(&dev->user_clients); n; n = rb_next(n)) {
- struct ion_client *client = rb_entry(n, struct ion_client,
- node);
- char task_comm[TASK_COMM_LEN];
- size_t size = ion_debug_heap_total(client, heap->id);
- if (!size)
- continue;
- get_task_comm(task_comm, client->task);
- seq_printf(s, "%16.s %16u %16x\n", task_comm, client->pid,
- size);
- }
-
- for (n = rb_first(&dev->kernel_clients); n; n = rb_next(n)) {
+ for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
struct ion_client *client = rb_entry(n, struct ion_client,
node);
size_t size = ion_debug_heap_total(client, heap->id);
if (!size)
continue;
- seq_printf(s, "%16.s %16u %16x\n", client->name, client->pid,
- size);
+ if (client->task) {
+ char task_comm[TASK_COMM_LEN];
+
+ get_task_comm(task_comm, client->task);
+ seq_printf(s, "%16.s %16u %16u\n", task_comm,
+ client->pid, size);
+ } else {
+ seq_printf(s, "%16.s %16u %16u\n", client->name,
+ client->pid, size);
+ }
}
ion_heap_print_debug(s, heap);
mutex_unlock(&dev->lock);
@@ -1873,6 +1735,11 @@
struct rb_node *parent = NULL;
struct ion_heap *entry;
+ if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
+ !heap->ops->unmap_dma)
+ pr_err("%s: can not add heap with invalid ops struct.\n",
+ __func__);
+
heap->dev = dev;
mutex_lock(&dev->lock);
while (*p) {
@@ -1970,7 +1837,7 @@
}
/* now see which buffers we can access */
- for (n = rb_first(&dev->kernel_clients); n; n = rb_next(n)) {
+ for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
struct ion_client *client = rb_entry(n, struct ion_client,
node);
@@ -1986,21 +1853,6 @@
}
- for (n = rb_first(&dev->user_clients); n; n = rb_next(n)) {
- struct ion_client *client = rb_entry(n, struct ion_client,
- node);
-
- mutex_lock(&client->lock);
- for (n2 = rb_first(&client->handles); n2; n2 = rb_next(n2)) {
- struct ion_handle *handle = rb_entry(n2,
- struct ion_handle, node);
-
- handle->buffer->marked = 0;
-
- }
- mutex_unlock(&client->lock);
-
- }
/* And anyone still marked as a 1 means a leaked handle somewhere */
for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
@@ -2059,8 +1911,7 @@
idev->buffers = RB_ROOT;
mutex_init(&idev->lock);
idev->heaps = RB_ROOT;
- idev->user_clients = RB_ROOT;
- idev->kernel_clients = RB_ROOT;
+ idev->clients = RB_ROOT;
debugfs_create_file("check_leaked_fds", 0664, idev->debug_root, idev,
&debug_leak_fops);
return idev;
@@ -2072,3 +1923,19 @@
/* XXX need to free the heaps and clients ? */
kfree(dev);
}
+
+void __init ion_reserve(struct ion_platform_data *data)
+{
+ int i, ret;
+
+ for (i = 0; i < data->nr; i++) {
+ if (data->heaps[i].size == 0)
+ continue;
+ ret = memblock_reserve(data->heaps[i].base,
+ data->heaps[i].size);
+ if (ret)
+ pr_err("memblock reserve of %x@%lx failed\n",
+ data->heaps[i].size,
+ data->heaps[i].base);
+ }
+}
diff --git a/drivers/gpu/ion/ion_carveout_heap.c b/drivers/gpu/ion/ion_carveout_heap.c
index 1fdc1f9..a591eb4 100644
--- a/drivers/gpu/ion/ion_carveout_heap.c
+++ b/drivers/gpu/ion/ion_carveout_heap.c
@@ -108,28 +108,38 @@
buffer->priv_phys = ION_CARVEOUT_ALLOCATE_FAIL;
}
-struct scatterlist *ion_carveout_heap_map_dma(struct ion_heap *heap,
+struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
- struct scatterlist *sglist;
+ struct sg_table *table;
+ int ret;
- sglist = vmalloc(sizeof(struct scatterlist));
- if (!sglist)
+ table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!table)
return ERR_PTR(-ENOMEM);
- sg_init_table(sglist, 1);
- sglist->length = buffer->size;
- sglist->offset = 0;
- sglist->dma_address = buffer->priv_phys;
+ ret = sg_alloc_table(table, 1, GFP_KERNEL);
+ if (ret)
+ goto err0;
- return sglist;
+ table->sgl->length = buffer->size;
+ table->sgl->offset = 0;
+ table->sgl->dma_address = buffer->priv_phys;
+
+ return table;
+
+err0:
+ kfree(table);
+ return ERR_PTR(ret);
}
void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
- if (buffer->sglist)
- vfree(buffer->sglist);
+ if (buffer->sg_table)
+ sg_free_table(buffer->sg_table);
+ kfree(buffer->sg_table);
+ buffer->sg_table = 0;
}
static int ion_carveout_request_region(struct ion_carveout_heap *carveout_heap)
@@ -163,8 +173,7 @@
}
void *ion_carveout_heap_map_kernel(struct ion_heap *heap,
- struct ion_buffer *buffer,
- unsigned long flags)
+ struct ion_buffer *buffer)
{
struct ion_carveout_heap *carveout_heap =
container_of(heap, struct ion_carveout_heap, heap);
@@ -173,7 +182,7 @@
if (ion_carveout_request_region(carveout_heap))
return NULL;
- if (ION_IS_CACHED(flags))
+ if (ION_IS_CACHED(buffer->flags))
ret_value = ioremap_cached(buffer->priv_phys, buffer->size);
else
ret_value = ioremap(buffer->priv_phys, buffer->size);
@@ -197,7 +206,7 @@
}
int ion_carveout_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
- struct vm_area_struct *vma, unsigned long flags)
+ struct vm_area_struct *vma)
{
struct ion_carveout_heap *carveout_heap =
container_of(heap, struct ion_carveout_heap, heap);
@@ -206,7 +215,7 @@
if (ion_carveout_request_region(carveout_heap))
return -EINVAL;
- if (!ION_IS_CACHED(flags))
+ if (!ION_IS_CACHED(buffer->flags))
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
ret_value = remap_pfn_range(vma, vma->vm_start,
diff --git a/drivers/gpu/ion/ion_cp_heap.c b/drivers/gpu/ion/ion_cp_heap.c
index c5e9caf..23ccab3 100644
--- a/drivers/gpu/ion/ion_cp_heap.c
+++ b/drivers/gpu/ion/ion_cp_heap.c
@@ -366,33 +366,42 @@
buffer->priv_phys = ION_CP_ALLOCATE_FAIL;
}
-struct scatterlist *ion_cp_heap_create_sglist(struct ion_buffer *buffer)
+struct sg_table *ion_cp_heap_create_sg_table(struct ion_buffer *buffer)
{
- struct scatterlist *sglist;
+ struct sg_table *table;
+ int ret;
- sglist = vmalloc(sizeof(*sglist));
- if (!sglist)
+ table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!table)
return ERR_PTR(-ENOMEM);
- sg_init_table(sglist, 1);
- sglist->length = buffer->size;
- sglist->offset = 0;
- sglist->dma_address = buffer->priv_phys;
+ ret = sg_alloc_table(table, 1, GFP_KERNEL);
+ if (ret)
+ goto err0;
- return sglist;
+ table->sgl->length = buffer->size;
+ table->sgl->offset = 0;
+ table->sgl->dma_address = buffer->priv_phys;
+
+ return table;
+err0:
+ kfree(table);
+ return ERR_PTR(ret);
}
-struct scatterlist *ion_cp_heap_map_dma(struct ion_heap *heap,
+struct sg_table *ion_cp_heap_map_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
- return ion_cp_heap_create_sglist(buffer);
+ return ion_cp_heap_create_sg_table(buffer);
}
void ion_cp_heap_unmap_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
- if (buffer->sglist)
- vfree(buffer->sglist);
+ if (buffer->sg_table)
+ sg_free_table(buffer->sg_table);
+ kfree(buffer->sg_table);
+ buffer->sg_table = 0;
}
/**
@@ -441,9 +450,7 @@
return NULL;
}
-void *ion_cp_heap_map_kernel(struct ion_heap *heap,
- struct ion_buffer *buffer,
- unsigned long flags)
+void *ion_cp_heap_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer)
{
struct ion_cp_heap *cp_heap =
container_of(heap, struct ion_cp_heap, heap);
@@ -452,7 +459,7 @@
mutex_lock(&cp_heap->lock);
if ((cp_heap->heap_protected == HEAP_NOT_PROTECTED) ||
((cp_heap->heap_protected == HEAP_PROTECTED) &&
- !ION_IS_CACHED(flags))) {
+ !ION_IS_CACHED(buffer->flags))) {
if (ion_cp_request_region(cp_heap)) {
mutex_unlock(&cp_heap->lock);
@@ -461,10 +468,10 @@
if (cp_heap->reusable) {
ret_value = ion_map_fmem_buffer(buffer, cp_heap->base,
- cp_heap->reserved_vrange, flags);
+ cp_heap->reserved_vrange, buffer->flags);
} else {
- if (ION_IS_CACHED(flags))
+ if (ION_IS_CACHED(buffer->flags))
ret_value = ioremap_cached(buffer->priv_phys,
buffer->size);
else
@@ -510,7 +517,7 @@
}
int ion_cp_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
- struct vm_area_struct *vma, unsigned long flags)
+ struct vm_area_struct *vma)
{
int ret_value = -EAGAIN;
struct ion_cp_heap *cp_heap =
@@ -523,7 +530,7 @@
return -EINVAL;
}
- if (!ION_IS_CACHED(flags))
+ if (!ION_IS_CACHED(buffer->flags))
vma->vm_page_prot = pgprot_writecombine(
vma->vm_page_prot);
@@ -764,7 +771,6 @@
struct iommu_domain *domain;
int ret = 0;
unsigned long extra;
- struct scatterlist *sglist = 0;
struct ion_cp_heap *cp_heap =
container_of(buffer->heap, struct ion_cp_heap, heap);
int prot = IOMMU_WRITE | IOMMU_READ;
@@ -819,12 +825,7 @@
goto out1;
}
- sglist = ion_cp_heap_create_sglist(buffer);
- if (IS_ERR_OR_NULL(sglist)) {
- ret = -ENOMEM;
- goto out1;
- }
- ret = iommu_map_range(domain, data->iova_addr, sglist,
+ ret = iommu_map_range(domain, data->iova_addr, buffer->sg_table->sgl,
buffer->size, prot);
if (ret) {
pr_err("%s: could not map %lx in domain %p\n",
@@ -839,14 +840,11 @@
if (ret)
goto out2;
}
- vfree(sglist);
return ret;
out2:
iommu_unmap_range(domain, data->iova_addr, buffer->size);
out1:
- if (!IS_ERR_OR_NULL(sglist))
- vfree(sglist);
msm_free_iova_address(data->iova_addr, domain_num, partition_num,
data->mapped_size);
out:
diff --git a/drivers/gpu/ion/ion_iommu_heap.c b/drivers/gpu/ion/ion_iommu_heap.c
index 9ea6f2b..d0f101c 100644
--- a/drivers/gpu/ion/ion_iommu_heap.c
+++ b/drivers/gpu/ion/ion_iommu_heap.c
@@ -35,7 +35,6 @@
struct page **pages;
int nrpages;
unsigned long size;
- struct scatterlist *iommu_sglist;
};
static int ion_iommu_heap_allocate(struct ion_heap *heap,
@@ -47,6 +46,10 @@
struct ion_iommu_priv_data *data = NULL;
if (msm_use_iommu()) {
+ struct scatterlist *sg;
+ struct sg_table *table;
+ unsigned int i;
+
data = kmalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -59,25 +62,26 @@
ret = -ENOMEM;
goto err1;
}
- data->iommu_sglist = vmalloc(sizeof(*data->iommu_sglist) *
- data->nrpages);
- if (!data->iommu_sglist) {
+
+ table = buffer->sg_table =
+ kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+
+ if (!table) {
ret = -ENOMEM;
goto err1;
}
+ ret = sg_alloc_table(table, data->nrpages, GFP_KERNEL);
+ if (ret)
+ goto err2;
- sg_init_table(data->iommu_sglist, data->nrpages);
-
- for (i = 0; i < data->nrpages; i++) {
+ for_each_sg(table->sgl, sg, table->nents, i) {
data->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!data->pages[i])
- goto err2;
+ goto err3;
- sg_set_page(&data->iommu_sglist[i], data->pages[i],
- PAGE_SIZE, 0);
+ sg_set_page(sg, data->pages[i], PAGE_SIZE, 0);
}
-
buffer->priv_virt = data;
return 0;
@@ -86,9 +90,11 @@
}
+err3:
+ sg_free_table(buffer->sg_table);
err2:
- vfree(data->iommu_sglist);
- data->iommu_sglist = NULL;
+ kfree(buffer->sg_table);
+ buffer->sg_table = 0;
for (i = 0; i < data->nrpages; i++) {
if (data->pages[i])
@@ -111,16 +117,12 @@
for (i = 0; i < data->nrpages; i++)
__free_page(data->pages[i]);
- vfree(data->iommu_sglist);
- data->iommu_sglist = NULL;
-
kfree(data->pages);
kfree(data);
}
void *ion_iommu_heap_map_kernel(struct ion_heap *heap,
- struct ion_buffer *buffer,
- unsigned long flags)
+ struct ion_buffer *buffer)
{
struct ion_iommu_priv_data *data = buffer->priv_virt;
pgprot_t page_prot = PAGE_KERNEL;
@@ -128,7 +130,7 @@
if (!data)
return NULL;
- if (!ION_IS_CACHED(flags))
+ if (!ION_IS_CACHED(buffer->flags))
page_prot = pgprot_noncached(page_prot);
buffer->vaddr = vmap(data->pages, data->nrpages, VM_IOREMAP, page_prot);
@@ -147,7 +149,7 @@
}
int ion_iommu_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
- struct vm_area_struct *vma, unsigned long flags)
+ struct vm_area_struct *vma)
{
struct ion_iommu_priv_data *data = buffer->priv_virt;
int i;
@@ -155,7 +157,7 @@
if (!data)
return -EINVAL;
- if (!ION_IS_CACHED(flags))
+ if (!ION_IS_CACHED(buffer->flags))
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
curr_addr = vma->vm_start;
@@ -183,7 +185,6 @@
struct iommu_domain *domain;
int ret = 0;
unsigned long extra;
- struct ion_iommu_priv_data *buffer_data = buffer->priv_virt;
int prot = IOMMU_WRITE | IOMMU_READ;
prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
@@ -207,7 +208,8 @@
}
ret = iommu_map_range(domain, data->iova_addr,
- buffer_data->iommu_sglist, buffer->size, prot);
+ buffer->sg_table->sgl,
+ buffer->size, prot);
if (ret) {
pr_err("%s: could not map %lx in domain %p\n",
__func__, data->iova_addr, domain);
@@ -299,16 +301,19 @@
return 0;
}
-static struct scatterlist *ion_iommu_heap_map_dma(struct ion_heap *heap,
+static struct sg_table *ion_iommu_heap_map_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
- struct ion_iommu_priv_data *data = buffer->priv_virt;
- return data->iommu_sglist;
+ return buffer->sg_table;
}
static void ion_iommu_heap_unmap_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
+ if (buffer->sg_table)
+ sg_free_table(buffer->sg_table);
+ kfree(buffer->sg_table);
+ buffer->sg_table = 0;
}
static struct ion_heap_ops iommu_heap_ops = {
diff --git a/drivers/gpu/ion/ion_priv.h b/drivers/gpu/ion/ion_priv.h
index 6940e2f..273e57e 100644
--- a/drivers/gpu/ion/ion_priv.h
+++ b/drivers/gpu/ion/ion_priv.h
@@ -26,18 +26,6 @@
#include <linux/iommu.h>
#include <linux/seq_file.h>
-struct ion_mapping;
-
-struct ion_dma_mapping {
- struct kref ref;
- struct scatterlist *sglist;
-};
-
-struct ion_kernel_mapping {
- struct kref ref;
- void *vaddr;
-};
-
enum {
DI_PARTITION_NUM = 0,
DI_DOMAIN_NUM = 1,
@@ -92,7 +80,7 @@
* @kmap_cnt: number of times the buffer is mapped to the kernel
* @vaddr: the kenrel mapping if kmap_cnt is not zero
* @dmap_cnt: number of times the buffer is mapped for dma
- * @sglist: the scatterlist for the buffer is dmap_cnt is not zero
+ * @sg_table: the sg table for the buffer if dmap_cnt is not zero
*/
struct ion_buffer {
struct kref ref;
@@ -109,7 +97,7 @@
int kmap_cnt;
void *vaddr;
int dmap_cnt;
- struct scatterlist *sglist;
+ struct sg_table *sg_table;
int umap_cnt;
unsigned int iommu_map_cnt;
struct rb_root iommu_maps;
@@ -136,14 +124,13 @@
void (*free) (struct ion_buffer *buffer);
int (*phys) (struct ion_heap *heap, struct ion_buffer *buffer,
ion_phys_addr_t *addr, size_t *len);
- struct scatterlist *(*map_dma) (struct ion_heap *heap,
+ struct sg_table *(*map_dma) (struct ion_heap *heap,
struct ion_buffer *buffer);
void (*unmap_dma) (struct ion_heap *heap, struct ion_buffer *buffer);
- void * (*map_kernel) (struct ion_heap *heap, struct ion_buffer *buffer,
- unsigned long flags);
+ void * (*map_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
void (*unmap_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
int (*map_user) (struct ion_heap *mapper, struct ion_buffer *buffer,
- struct vm_area_struct *vma, unsigned long flags);
+ struct vm_area_struct *vma);
void (*unmap_user) (struct ion_heap *mapper, struct ion_buffer *buffer);
int (*cache_op)(struct ion_heap *heap, struct ion_buffer *buffer,
void *vaddr, unsigned int offset,
diff --git a/drivers/gpu/ion/ion_system_heap.c b/drivers/gpu/ion/ion_system_heap.c
index 08b271b..c79c184 100644
--- a/drivers/gpu/ion/ion_system_heap.c
+++ b/drivers/gpu/ion/ion_system_heap.c
@@ -38,71 +38,90 @@
unsigned long size, unsigned long align,
unsigned long flags)
{
- buffer->priv_virt = vmalloc_user(size);
- if (!buffer->priv_virt)
- return -ENOMEM;
+ struct sg_table *table;
+ struct scatterlist *sg;
+ int i, j;
+ int npages = PAGE_ALIGN(size) / PAGE_SIZE;
+ table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+ i = sg_alloc_table(table, npages, GFP_KERNEL);
+ if (i)
+ goto err0;
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ struct page *page;
+ page = alloc_page(GFP_KERNEL|__GFP_ZERO);
+ if (!page)
+ goto err1;
+ sg_set_page(sg, page, PAGE_SIZE, 0);
+ }
+ buffer->priv_virt = table;
atomic_add(size, &system_heap_allocated);
return 0;
+err1:
+ for_each_sg(table->sgl, sg, i, j)
+ __free_page(sg_page(sg));
+ sg_free_table(table);
+err0:
+ kfree(table);
+ return -ENOMEM;
}
void ion_system_heap_free(struct ion_buffer *buffer)
{
- vfree(buffer->priv_virt);
+ int i;
+ struct scatterlist *sg;
+ struct sg_table *table = buffer->priv_virt;
+
+ for_each_sg(table->sgl, sg, table->nents, i)
+ __free_page(sg_page(sg));
+ if (buffer->sg_table)
+ sg_free_table(buffer->sg_table);
+ kfree(buffer->sg_table);
atomic_sub(buffer->size, &system_heap_allocated);
}
-struct scatterlist *ion_system_heap_map_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
+struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
{
- struct scatterlist *sglist;
- struct page *page;
- int i;
- int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
- void *vaddr = buffer->priv_virt;
-
- sglist = vmalloc(npages * sizeof(struct scatterlist));
- if (!sglist)
- return ERR_PTR(-ENOMEM);
- memset(sglist, 0, npages * sizeof(struct scatterlist));
- sg_init_table(sglist, npages);
- for (i = 0; i < npages; i++) {
- page = vmalloc_to_page(vaddr);
- if (!page)
- goto end;
- sg_set_page(&sglist[i], page, PAGE_SIZE, 0);
- vaddr += PAGE_SIZE;
- }
- /* XXX do cache maintenance for dma? */
- return sglist;
-end:
- vfree(sglist);
- return NULL;
+ return buffer->priv_virt;
}
void ion_system_heap_unmap_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
- /* XXX undo cache maintenance for dma? */
- if (buffer->sglist)
- vfree(buffer->sglist);
+ return;
}
void *ion_system_heap_map_kernel(struct ion_heap *heap,
- struct ion_buffer *buffer,
- unsigned long flags)
+ struct ion_buffer *buffer)
{
- if (ION_IS_CACHED(flags))
- return buffer->priv_virt;
- else {
+ if (!ION_IS_CACHED(buffer->flags)) {
pr_err("%s: cannot map system heap uncached\n", __func__);
return ERR_PTR(-EINVAL);
+ } else {
+ struct scatterlist *sg;
+ int i;
+ void *vaddr;
+ struct sg_table *table = buffer->priv_virt;
+ struct page **pages = kmalloc(
+ sizeof(struct page *) * table->nents,
+ GFP_KERNEL);
+
+ for_each_sg(table->sgl, sg, table->nents, i)
+ pages[i] = sg_page(sg);
+ vaddr = vmap(pages, table->nents, VM_MAP, PAGE_KERNEL);
+ kfree(pages);
+
+ return vaddr;
}
}
void ion_system_heap_unmap_kernel(struct ion_heap *heap,
struct ion_buffer *buffer)
{
+ vunmap(buffer->vaddr);
}
void ion_system_heap_unmap_iommu(struct ion_iommu_map *data)
@@ -132,14 +151,27 @@
}
int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
- struct vm_area_struct *vma, unsigned long flags)
+ struct vm_area_struct *vma)
{
- if (ION_IS_CACHED(flags))
- return remap_vmalloc_range(vma, buffer->priv_virt,
- vma->vm_pgoff);
- else {
+ if (!ION_IS_CACHED(buffer->flags)) {
pr_err("%s: cannot map system heap uncached\n", __func__);
return -EINVAL;
+ } else {
+ struct sg_table *table = buffer->priv_virt;
+ unsigned long addr = vma->vm_start;
+ unsigned long offset = vma->vm_pgoff;
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ if (offset) {
+ offset--;
+ continue;
+ }
+ vm_insert_page(vma, addr, sg_page(sg));
+ addr += PAGE_SIZE;
+ }
+ return 0;
}
}
@@ -168,42 +200,20 @@
if (system_heap_has_outer_cache) {
unsigned long pstart;
- void *vend;
- void *vtemp;
- unsigned long ln = 0;
- vend = buffer->priv_virt + buffer->size;
- vtemp = buffer->priv_virt + offset;
-
- if ((vtemp+length) > vend) {
- pr_err("Trying to flush outside of mapped range.\n");
- pr_err("End of mapped range: %p, trying to flush to "
- "address %p\n", vend, vtemp+length);
- WARN(1, "%s: called with heap name %s, buffer size 0x%x, "
- "vaddr 0x%p, offset 0x%x, length: 0x%x\n",
- __func__, heap->name, buffer->size, vaddr,
- offset, length);
- return -EINVAL;
- }
-
- for (; ln < length && vtemp < vend;
- vtemp += PAGE_SIZE, ln += PAGE_SIZE) {
- struct page *page = vmalloc_to_page(vtemp);
- if (!page) {
- WARN(1, "Could not find page for virt. address %p\n",
- vtemp);
- return -EINVAL;
- }
+ struct sg_table *table = buffer->priv_virt;
+ struct scatterlist *sg;
+ int i;
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ struct page *page = sg_page(sg);
pstart = page_to_phys(page);
/*
* If page -> phys is returning NULL, something
* has really gone wrong...
*/
if (!pstart) {
- WARN(1, "Could not translate %p to physical address\n",
- vtemp);
+ WARN(1, "Could not translate virtual address to physical address\n");
return -EINVAL;
}
-
outer_cache_op(pstart, pstart + PAGE_SIZE);
}
}
@@ -227,14 +237,11 @@
unsigned long iova_length,
unsigned long flags)
{
- int ret = 0, i;
+ int ret = 0;
struct iommu_domain *domain;
unsigned long extra;
unsigned long extra_iova_addr;
- struct page *page;
- int npages = buffer->size >> PAGE_SHIFT;
- void *vaddr = buffer->priv_virt;
- struct scatterlist *sglist = 0;
+ struct sg_table *table = buffer->priv_virt;
int prot = IOMMU_WRITE | IOMMU_READ;
prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
@@ -261,23 +268,7 @@
goto out1;
}
-
- sglist = vmalloc(sizeof(*sglist) * npages);
- if (!sglist) {
- ret = -ENOMEM;
- goto out1;
- }
-
- sg_init_table(sglist, npages);
- for (i = 0; i < npages; i++) {
- page = vmalloc_to_page(vaddr);
- if (!page)
- goto out1;
- sg_set_page(&sglist[i], page, PAGE_SIZE, 0);
- vaddr += PAGE_SIZE;
- }
-
- ret = iommu_map_range(domain, data->iova_addr, sglist,
+ ret = iommu_map_range(domain, data->iova_addr, table->sgl,
buffer->size, prot);
if (ret) {
@@ -293,13 +284,11 @@
if (ret)
goto out2;
}
- vfree(sglist);
return ret;
out2:
iommu_unmap_range(domain, data->iova_addr, buffer->size);
out1:
- vfree(sglist);
msm_free_iova_address(data->iova_addr, domain_num, partition_num,
data->mapped_size);
out:
@@ -366,27 +355,32 @@
return 0;
}
-struct scatterlist *ion_system_contig_heap_map_dma(struct ion_heap *heap,
+struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
- struct scatterlist *sglist;
+ struct sg_table *table;
+ int ret;
- sglist = vmalloc(sizeof(struct scatterlist));
- if (!sglist)
+ table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!table)
return ERR_PTR(-ENOMEM);
- sg_init_table(sglist, 1);
- sg_set_page(sglist, virt_to_page(buffer->priv_virt), buffer->size, 0);
- return sglist;
+ ret = sg_alloc_table(table, 1, GFP_KERNEL);
+ if (ret) {
+ kfree(table);
+ return ERR_PTR(ret);
+ }
+ sg_set_page(table->sgl, virt_to_page(buffer->priv_virt), buffer->size,
+ 0);
+ return table;
}
int ion_system_contig_heap_map_user(struct ion_heap *heap,
struct ion_buffer *buffer,
- struct vm_area_struct *vma,
- unsigned long flags)
+ struct vm_area_struct *vma)
{
unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt));
- if (ION_IS_CACHED(flags))
+ if (ION_IS_CACHED(buffer->flags))
return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
vma->vm_end - vma->vm_start,
vma->vm_page_prot);
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index c597b42..a6b782a 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -249,13 +249,12 @@
kgsl_driver.stats.mapped -= entry->memdesc.size;
/*
- * Ion takes care of freeing the sglist for us (how nice </sarcasm>) so
- * unmap the dma before freeing the sharedmem so kgsl_sharedmem_free
+ * Ion takes care of freeing the sglist for us so
+ * clear the sg before freeing the sharedmem so kgsl_sharedmem_free
* doesn't try to free it again
*/
if (entry->memtype == KGSL_MEM_ENTRY_ION) {
- ion_unmap_dma(kgsl_ion_client, entry->priv_data);
entry->memdesc.sg = NULL;
}
@@ -1748,12 +1747,12 @@
{
struct ion_handle *handle;
struct scatterlist *s;
- unsigned long flags;
+ struct sg_table *sg_table;
if (IS_ERR_OR_NULL(kgsl_ion_client))
return -ENODEV;
- handle = ion_import_fd(kgsl_ion_client, fd);
+ handle = ion_import_dma_buf(kgsl_ion_client, fd);
if (IS_ERR_OR_NULL(handle))
return PTR_ERR(handle);
@@ -1762,13 +1761,12 @@
entry->memdesc.pagetable = pagetable;
entry->memdesc.size = 0;
- if (ion_handle_get_flags(kgsl_ion_client, handle, &flags))
+ sg_table = ion_sg_table(kgsl_ion_client, handle);
+
+ if (IS_ERR_OR_NULL(sg_table))
goto err;
- entry->memdesc.sg = ion_map_dma(kgsl_ion_client, handle, flags);
-
- if (IS_ERR_OR_NULL(entry->memdesc.sg))
- goto err;
+ entry->memdesc.sg = sg_table->sgl;
/* Calculate the size of the memdesc from the sglist */
@@ -1892,7 +1890,6 @@
fput(entry->priv_data);
break;
case KGSL_MEM_ENTRY_ION:
- ion_unmap_dma(kgsl_ion_client, entry->priv_data);
ion_free(kgsl_ion_client, entry->priv_data);
break;
default:
diff --git a/drivers/media/video/msm/gemini/msm_gemini_platform.c b/drivers/media/video/msm/gemini/msm_gemini_platform.c
index d7f6bd8..28d2439 100644
--- a/drivers/media/video/msm/gemini/msm_gemini_platform.c
+++ b/drivers/media/video/msm/gemini/msm_gemini_platform.c
@@ -47,7 +47,7 @@
unsigned long size;
int rc;
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
- *ionhandle = ion_import_fd(gemini_client, fd);
+ *ionhandle = ion_import_dma_buf(gemini_client, fd);
if (IS_ERR_OR_NULL(*ionhandle))
return 0;
diff --git a/drivers/media/video/msm/mercury/msm_mercury_platform.c b/drivers/media/video/msm/mercury/msm_mercury_platform.c
index 67ce82d..e90c63c 100644
--- a/drivers/media/video/msm/mercury/msm_mercury_platform.c
+++ b/drivers/media/video/msm/mercury/msm_mercury_platform.c
@@ -52,7 +52,7 @@
unsigned long size;
int rc;
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
- *ionhandle = ion_import_fd(mercury_client, fd);
+ *ionhandle = ion_import_dma_buf(mercury_client, fd);
if (IS_ERR_OR_NULL(*ionhandle))
return 0;
diff --git a/drivers/media/video/msm/msm_camera.c b/drivers/media/video/msm/msm_camera.c
index 9f32bfe..2f1d1ab 100644
--- a/drivers/media/video/msm/msm_camera.c
+++ b/drivers/media/video/msm/msm_camera.c
@@ -314,7 +314,7 @@
if (!region)
goto out;
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
- region->handle = ion_import_fd(client_for_ion, info->fd);
+ region->handle = ion_import_dma_buf(client_for_ion, info->fd);
if (IS_ERR_OR_NULL(region->handle))
goto out1;
ion_phys(client_for_ion, region->handle,
diff --git a/drivers/media/video/msm/msm_mem.c b/drivers/media/video/msm/msm_mem.c
index 0043f72..5d412db 100644
--- a/drivers/media/video/msm/msm_mem.c
+++ b/drivers/media/video/msm/msm_mem.c
@@ -132,7 +132,7 @@
if (!region)
goto out;
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
- region->handle = ion_import_fd(client, info->fd);
+ region->handle = ion_import_dma_buf(client, info->fd);
if (IS_ERR_OR_NULL(region->handle))
goto out1;
if (ion_map_iommu(client, region->handle, CAMERA_DOMAIN, GEN_POOL,
diff --git a/drivers/media/video/msm_vidc/msm_smem.c b/drivers/media/video/msm_vidc/msm_smem.c
index bdace3c..2913d74 100644
--- a/drivers/media/video/msm_vidc/msm_smem.c
+++ b/drivers/media/video/msm_vidc/msm_smem.c
@@ -26,7 +26,7 @@
unsigned long ionflag;
size_t len;
int rc = 0;
- hndl = ion_import_fd(client->clnt, fd);
+ hndl = ion_import_dma_buf(client->clnt, fd);
if (IS_ERR_OR_NULL(hndl)) {
pr_err("Failed to get handle: %p, %d, %d, %p\n",
client, fd, offset, hndl);
diff --git a/drivers/media/video/vcap_v4l2.c b/drivers/media/video/vcap_v4l2.c
index db0dbc2..0a033ae 100644
--- a/drivers/media/video/vcap_v4l2.c
+++ b/drivers/media/video/vcap_v4l2.c
@@ -339,7 +339,7 @@
buf = container_of(vb, struct vcap_buffer, vb);
- buf->ion_handle = ion_import_fd(dev->ion_client, b->m.userptr);
+ buf->ion_handle = ion_import_dma_buf(dev->ion_client, b->m.userptr);
if (IS_ERR((void *)buf->ion_handle)) {
pr_err("%s: Could not alloc memory\n", __func__);
buf->ion_handle = NULL;
diff --git a/drivers/media/video/videobuf2-msm-mem.c b/drivers/media/video/videobuf2-msm-mem.c
index 186195d..740d183 100644
--- a/drivers/media/video/videobuf2-msm-mem.c
+++ b/drivers/media/video/videobuf2-msm-mem.c
@@ -185,7 +185,7 @@
if (mem->phyaddr != 0)
return 0;
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
- mem->ion_handle = ion_import_fd(client, (int)mem->vaddr);
+ mem->ion_handle = ion_import_dma_buf(client, (int)mem->vaddr);
if (IS_ERR_OR_NULL(mem->ion_handle)) {
pr_err("%s ION import failed\n", __func__);
return PTR_ERR(mem->ion_handle);
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index f2ed62d..d4eb6e0 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -261,7 +261,8 @@
ion_phys_addr_t pa;
/* Get the handle of the shared fd */
- svc->ihandle = ion_import_fd(qseecom.ion_clnt, listener->ifd_data_fd);
+ svc->ihandle = ion_import_dma_buf(qseecom.ion_clnt,
+ listener->ifd_data_fd);
if (svc->ihandle == NULL) {
pr_err("Ion client could not retrieve the handle\n");
return -ENOMEM;
@@ -503,7 +504,8 @@
return -EFAULT;
/* Get the handle of the shared fd */
- data->client.ihandle = ion_import_fd(qseecom.ion_clnt, req.ifd_data_fd);
+ data->client.ihandle = ion_import_dma_buf(qseecom.ion_clnt,
+ req.ifd_data_fd);
if (IS_ERR_OR_NULL(data->client.ihandle)) {
pr_err("Ion client could not retrieve the handle\n");
return -ENOMEM;
@@ -665,7 +667,7 @@
pr_warn("App (%s) does not exist, loading apps for first time\n",
(char *)(req.app_name));
/* Get the handle of the shared fd */
- ihandle = ion_import_fd(qseecom.ion_clnt,
+ ihandle = ion_import_dma_buf(qseecom.ion_clnt,
load_img_req.ifd_data_fd);
if (IS_ERR_OR_NULL(ihandle)) {
pr_err("Ion client could not retrieve the handle\n");
@@ -1065,7 +1067,7 @@
for (i = 0; i < MAX_ION_FD; i++) {
if (req->ifd_data[i].fd > 0) {
/* Get the handle of the shared fd */
- ihandle = ion_import_fd(qseecom.ion_clnt,
+ ihandle = ion_import_dma_buf(qseecom.ion_clnt,
req->ifd_data[i].fd);
if (IS_ERR_OR_NULL(ihandle)) {
pr_err("Ion client can't retrieve the handle\n");
@@ -1299,7 +1301,7 @@
}
/* Get the handle of the shared fd */
- ihandle = ion_import_fd(qseecom.ion_clnt,
+ ihandle = ion_import_dma_buf(qseecom.ion_clnt,
load_img_req.ifd_data_fd);
if (IS_ERR_OR_NULL(ihandle)) {
pr_err("Ion client could not retrieve the handle\n");
diff --git a/drivers/tty/n_smux.c b/drivers/tty/n_smux.c
index d03b4b4..0d2fdb8 100644
--- a/drivers/tty/n_smux.c
+++ b/drivers/tty/n_smux.c
@@ -33,8 +33,6 @@
#define SMUX_NOTIFY_FIFO_SIZE 128
#define SMUX_TX_QUEUE_SIZE 256
-#define SMUX_WM_LOW 2
-#define SMUX_WM_HIGH 4
#define SMUX_PKT_LOG_SIZE 80
/* Maximum size we can accept in a single RX buffer */
@@ -83,6 +81,30 @@
pr_info(x); \
} while (0)
+#define SMUX_PWR_PKT_RX(pkt) do { \
+ if (smux_debug_mask & MSM_SMUX_POWER_INFO) \
+ smux_log_pkt(pkt, 1); \
+} while (0)
+
+#define SMUX_PWR_PKT_TX(pkt) do { \
+ if (smux_debug_mask & MSM_SMUX_POWER_INFO) { \
+ if (pkt->hdr.cmd == SMUX_CMD_BYTE && \
+ pkt->hdr.flags == SMUX_WAKEUP_ACK) \
+ pr_info("smux: TX Wakeup ACK\n"); \
+ else if (pkt->hdr.cmd == SMUX_CMD_BYTE && \
+ pkt->hdr.flags == SMUX_WAKEUP_REQ) \
+ pr_info("smux: TX Wakeup REQ\n"); \
+ else \
+ smux_log_pkt(pkt, 0); \
+ } \
+} while (0)
+
+#define SMUX_PWR_BYTE_TX(pkt) do { \
+ if (smux_debug_mask & MSM_SMUX_POWER_INFO) { \
+ smux_log_pkt(pkt, 0); \
+ } \
+} while (0)
+
#define SMUX_LOG_PKT_RX(pkt) do { \
if (smux_debug_mask & MSM_SMUX_PKT) \
smux_log_pkt(pkt, 1); \
@@ -172,12 +194,15 @@
unsigned local_state;
unsigned local_mode;
uint8_t local_tiocm;
+ unsigned options;
unsigned remote_state;
unsigned remote_mode;
uint8_t remote_tiocm;
int tx_flow_control;
+ int rx_flow_control_auto;
+ int rx_flow_control_client;
/* client callbacks and private data */
void *priv;
@@ -330,6 +355,8 @@
static int ssr_notifier_cb(struct notifier_block *this,
unsigned long code,
void *data);
+static void smux_uart_power_on_atomic(void);
+static int smux_rx_flow_control_updated(struct smux_lch_t *ch);
/**
* Convert TTY Error Flags to string for logging purposes.
@@ -402,10 +429,13 @@
ch->local_state = SMUX_LCH_LOCAL_CLOSED;
ch->local_mode = SMUX_LCH_MODE_NORMAL;
ch->local_tiocm = 0x0;
+ ch->options = SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
ch->remote_mode = SMUX_LCH_MODE_NORMAL;
ch->remote_tiocm = 0x0;
ch->tx_flow_control = 0;
+ ch->rx_flow_control_auto = 0;
+ ch->rx_flow_control_client = 0;
ch->priv = 0;
ch->notify = 0;
ch->get_rx_buffer = 0;
@@ -486,6 +516,8 @@
ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
ch->remote_mode = SMUX_LCH_MODE_NORMAL;
ch->tx_flow_control = 0;
+ ch->rx_flow_control_auto = 0;
+ ch->rx_flow_control_client = 0;
/* Purge RX retry queue */
if (ch->rx_retry_queue_cnt)
@@ -537,67 +569,76 @@
char local_mode;
char remote_state;
char remote_mode;
- struct smux_lch_t *ch;
+ struct smux_lch_t *ch = NULL;
unsigned char *data;
- ch = &smux_lch[pkt->hdr.lcid];
+ if (!smux_assert_lch_id(pkt->hdr.lcid))
+ ch = &smux_lch[pkt->hdr.lcid];
- switch (ch->local_state) {
- case SMUX_LCH_LOCAL_CLOSED:
- local_state = 'C';
- break;
- case SMUX_LCH_LOCAL_OPENING:
- local_state = 'o';
- break;
- case SMUX_LCH_LOCAL_OPENED:
- local_state = 'O';
- break;
- case SMUX_LCH_LOCAL_CLOSING:
- local_state = 'c';
- break;
- default:
- local_state = 'U';
- break;
- }
+ if (ch) {
+ switch (ch->local_state) {
+ case SMUX_LCH_LOCAL_CLOSED:
+ local_state = 'C';
+ break;
+ case SMUX_LCH_LOCAL_OPENING:
+ local_state = 'o';
+ break;
+ case SMUX_LCH_LOCAL_OPENED:
+ local_state = 'O';
+ break;
+ case SMUX_LCH_LOCAL_CLOSING:
+ local_state = 'c';
+ break;
+ default:
+ local_state = 'U';
+ break;
+ }
- switch (ch->local_mode) {
- case SMUX_LCH_MODE_LOCAL_LOOPBACK:
- local_mode = 'L';
- break;
- case SMUX_LCH_MODE_REMOTE_LOOPBACK:
- local_mode = 'R';
- break;
- case SMUX_LCH_MODE_NORMAL:
- local_mode = 'N';
- break;
- default:
- local_mode = 'U';
- break;
- }
+ switch (ch->local_mode) {
+ case SMUX_LCH_MODE_LOCAL_LOOPBACK:
+ local_mode = 'L';
+ break;
+ case SMUX_LCH_MODE_REMOTE_LOOPBACK:
+ local_mode = 'R';
+ break;
+ case SMUX_LCH_MODE_NORMAL:
+ local_mode = 'N';
+ break;
+ default:
+ local_mode = 'U';
+ break;
+ }
- switch (ch->remote_state) {
- case SMUX_LCH_REMOTE_CLOSED:
- remote_state = 'C';
- break;
- case SMUX_LCH_REMOTE_OPENED:
- remote_state = 'O';
- break;
+ switch (ch->remote_state) {
+ case SMUX_LCH_REMOTE_CLOSED:
+ remote_state = 'C';
+ break;
+ case SMUX_LCH_REMOTE_OPENED:
+ remote_state = 'O';
+ break;
- default:
- remote_state = 'U';
- break;
- }
+ default:
+ remote_state = 'U';
+ break;
+ }
- switch (ch->remote_mode) {
- case SMUX_LCH_MODE_REMOTE_LOOPBACK:
- remote_mode = 'R';
- break;
- case SMUX_LCH_MODE_NORMAL:
- remote_mode = 'N';
- break;
- default:
- remote_mode = 'U';
- break;
+ switch (ch->remote_mode) {
+ case SMUX_LCH_MODE_REMOTE_LOOPBACK:
+ remote_mode = 'R';
+ break;
+ case SMUX_LCH_MODE_NORMAL:
+ remote_mode = 'N';
+ break;
+ default:
+ remote_mode = 'U';
+ break;
+ }
+ } else {
+ /* broadcast channel */
+ local_state = '-';
+ local_mode = '-';
+ remote_state = '-';
+ remote_mode = '-';
}
/* determine command type (ACK, etc) */
@@ -611,6 +652,11 @@
if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
break;
+
+ case SMUX_CMD_PWR_CTL:
+ if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK)
+ snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
+ break;
};
i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
@@ -1342,6 +1388,7 @@
uint8_t lcid;
int ret = 0;
int do_retry = 0;
+ int tx_ready = 0;
int tmp;
int rx_len;
struct smux_lch_t *ch;
@@ -1385,8 +1432,20 @@
if (!list_empty(&ch->rx_retry_queue)) {
do_retry = 1;
+
+ if ((ch->options & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) &&
+ !ch->rx_flow_control_auto &&
+ ((ch->rx_retry_queue_cnt + 1) >= SMUX_RX_WM_HIGH)) {
+ /* need to flow control RX */
+ ch->rx_flow_control_auto = 1;
+ tx_ready |= smux_rx_flow_control_updated(ch);
+ schedule_notify(ch->lcid, SMUX_RX_RETRY_HIGH_WM_HIT,
+ NULL);
+ }
if ((ch->rx_retry_queue_cnt + 1) > SMUX_RX_RETRY_MAX_PKTS) {
/* retry queue full */
+ pr_err("%s: ch %d RX retry queue full\n",
+ __func__, lcid);
schedule_notify(lcid, SMUX_READ_FAIL, NULL);
ret = -ENOMEM;
spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
@@ -1410,7 +1469,7 @@
}
ack_pkt->hdr.pad_len = pkt->hdr.pad_len;
smux_tx_queue(ack_pkt, ch, 0);
- list_channel(ch);
+ tx_ready = 1;
} else {
pr_err("%s: Remote loopack allocation failure\n",
__func__);
@@ -1436,6 +1495,8 @@
/* buffer allocation failed - add to retry queue */
do_retry = 1;
} else if (tmp < 0) {
+ pr_err("%s: ch %d Client RX buffer alloc failed %d\n",
+ __func__, lcid, tmp);
schedule_notify(lcid, SMUX_READ_FAIL, NULL);
ret = -ENOMEM;
}
@@ -1482,6 +1543,8 @@
spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
}
+ if (tx_ready)
+ list_channel(ch);
out:
return ret;
}
@@ -1601,6 +1664,8 @@
struct smux_pkt_t *ack_pkt = NULL;
unsigned long flags;
+ SMUX_PWR_PKT_RX(pkt);
+
spin_lock_irqsave(&smux.tx_lock_lha2, flags);
if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK) {
/* local sleep request ack */
@@ -1665,10 +1730,9 @@
{
int ret = -ENXIO;
- SMUX_LOG_PKT_RX(pkt);
-
switch (pkt->hdr.cmd) {
case SMUX_CMD_OPEN_LCH:
+ SMUX_LOG_PKT_RX(pkt);
if (smux_assert_lch_id(pkt->hdr.lcid)) {
pr_err("%s: invalid channel id %d\n",
__func__, pkt->hdr.lcid);
@@ -1678,6 +1742,7 @@
break;
case SMUX_CMD_DATA:
+ SMUX_LOG_PKT_RX(pkt);
if (smux_assert_lch_id(pkt->hdr.lcid)) {
pr_err("%s: invalid channel id %d\n",
__func__, pkt->hdr.lcid);
@@ -1687,6 +1752,7 @@
break;
case SMUX_CMD_CLOSE_LCH:
+ SMUX_LOG_PKT_RX(pkt);
if (smux_assert_lch_id(pkt->hdr.lcid)) {
pr_err("%s: invalid channel id %d\n",
__func__, pkt->hdr.lcid);
@@ -1696,6 +1762,7 @@
break;
case SMUX_CMD_STATUS:
+ SMUX_LOG_PKT_RX(pkt);
if (smux_assert_lch_id(pkt->hdr.lcid)) {
pr_err("%s: invalid channel id %d\n",
__func__, pkt->hdr.lcid);
@@ -1709,10 +1776,12 @@
break;
case SMUX_CMD_BYTE:
+ SMUX_LOG_PKT_RX(pkt);
ret = smux_handle_rx_byte_cmd(pkt);
break;
default:
+ SMUX_LOG_PKT_RX(pkt);
pr_err("%s: command %d unknown\n", __func__, pkt->hdr.cmd);
ret = -EINVAL;
}
@@ -1828,9 +1897,11 @@
smux.rx_state = SMUX_RX_MAGIC;
break;
case SMUX_WAKEUP_REQ:
+ SMUX_PWR("smux: RX Wakeup REQ\n");
smux_handle_wakeup_req();
break;
case SMUX_WAKEUP_ACK:
+ SMUX_PWR("smux: RX Wakeup ACK\n");
smux_handle_wakeup_ack();
break;
default:
@@ -2048,8 +2119,10 @@
*/
static void smux_flush_tty(void)
{
+ mutex_lock(&smux.mutex_lha0);
if (!smux.tty) {
pr_err("%s: ldisc not loaded\n", __func__);
+ mutex_unlock(&smux.mutex_lha0);
return;
}
@@ -2058,6 +2131,8 @@
if (tty_chars_in_buffer(smux.tty) > 0)
pr_err("%s: unable to flush UART queue\n", __func__);
+
+ mutex_unlock(&smux.mutex_lha0);
}
/**
@@ -2106,8 +2181,10 @@
/**
* Power-up the UART.
+ *
+ * Must be called with smux.mutex_lha0 already locked.
*/
-static void smux_uart_power_on(void)
+static void smux_uart_power_on_atomic(void)
{
struct uart_state *state;
@@ -2121,19 +2198,32 @@
}
/**
+ * Power-up the UART.
+ */
+static void smux_uart_power_on(void)
+{
+ mutex_lock(&smux.mutex_lha0);
+ smux_uart_power_on_atomic();
+ mutex_unlock(&smux.mutex_lha0);
+}
+
+/**
* Power down the UART.
*/
static void smux_uart_power_off(void)
{
struct uart_state *state;
+ mutex_lock(&smux.mutex_lha0);
if (!smux.tty || !smux.tty->driver_data) {
pr_err("%s: unable to find UART port for tty %p\n",
__func__, smux.tty);
+ mutex_unlock(&smux.mutex_lha0);
return;
}
state = smux.tty->driver_data;
msm_hs_request_clock_off(state->uart_port);
+ mutex_unlock(&smux.mutex_lha0);
}
/**
@@ -2148,44 +2238,16 @@
{
unsigned long flags;
unsigned wakeup_delay;
- int complete = 0;
- while (!smux.in_reset) {
- spin_lock_irqsave(&smux.tx_lock_lha2, flags);
- if (smux.power_state == SMUX_PWR_ON) {
- /* wakeup complete */
- complete = 1;
- spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
- break;
- } else {
- /* retry */
- wakeup_delay = smux.pwr_wakeup_delay_us;
- smux.pwr_wakeup_delay_us <<= 1;
- if (smux.pwr_wakeup_delay_us > SMUX_WAKEUP_DELAY_MAX)
- smux.pwr_wakeup_delay_us =
- SMUX_WAKEUP_DELAY_MAX;
- }
+ if (smux.in_reset)
+ return;
+
+ spin_lock_irqsave(&smux.tx_lock_lha2, flags);
+ if (smux.power_state == SMUX_PWR_ON) {
+ /* wakeup complete */
spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
- SMUX_DBG("%s: triggering wakeup\n", __func__);
- smux_send_byte(SMUX_WAKEUP_REQ);
-
- if (wakeup_delay < SMUX_WAKEUP_DELAY_MIN) {
- SMUX_DBG("%s: sleeping for %u us\n", __func__,
- wakeup_delay);
- usleep_range(wakeup_delay, 2*wakeup_delay);
- } else {
- /* schedule delayed work */
- SMUX_DBG("%s: scheduling delayed wakeup in %u ms\n",
- __func__, wakeup_delay / 1000);
- queue_delayed_work(smux_tx_wq,
- &smux_wakeup_delayed_work,
- msecs_to_jiffies(wakeup_delay / 1000));
- break;
- }
- }
-
- if (complete) {
SMUX_DBG("%s: wakeup complete\n", __func__);
+
/*
* Cancel any pending retry. This avoids a race condition with
* a new power-up request because:
@@ -2194,6 +2256,31 @@
* workqueue as new TX wakeup requests
*/
cancel_delayed_work(&smux_wakeup_delayed_work);
+ } else {
+ /* retry wakeup */
+ wakeup_delay = smux.pwr_wakeup_delay_us;
+ smux.pwr_wakeup_delay_us <<= 1;
+ if (smux.pwr_wakeup_delay_us > SMUX_WAKEUP_DELAY_MAX)
+ smux.pwr_wakeup_delay_us =
+ SMUX_WAKEUP_DELAY_MAX;
+
+ spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
+ SMUX_DBG("%s: triggering wakeup\n", __func__);
+ smux_send_byte(SMUX_WAKEUP_REQ);
+
+ if (wakeup_delay < SMUX_WAKEUP_DELAY_MIN) {
+ SMUX_DBG("%s: sleeping for %u us\n", __func__,
+ wakeup_delay);
+ usleep_range(wakeup_delay, 2*wakeup_delay);
+ queue_work(smux_tx_wq, &smux_wakeup_work);
+ } else {
+ /* schedule delayed work */
+ SMUX_DBG("%s: scheduling delayed wakeup in %u ms\n",
+ __func__, wakeup_delay / 1000);
+ queue_delayed_work(smux_tx_wq,
+ &smux_wakeup_delayed_work,
+ msecs_to_jiffies(wakeup_delay / 1000));
+ }
}
}
@@ -2275,18 +2362,32 @@
/**
* Remove RX retry packet from channel and free it.
*
- * Must be called with state_lock_lhb1 locked.
- *
* @ch Channel for retry packet
* @retry Retry packet to remove
+ *
+ * @returns 1 if flow control updated; 0 otherwise
+ *
+ * Must be called with state_lock_lhb1 locked.
*/
-void smux_remove_rx_retry(struct smux_lch_t *ch,
+int smux_remove_rx_retry(struct smux_lch_t *ch,
struct smux_rx_pkt_retry *retry)
{
+ int tx_ready = 0;
+
list_del(&retry->rx_retry_list);
--ch->rx_retry_queue_cnt;
smux_free_pkt(retry->pkt);
kfree(retry);
+
+ if ((ch->options & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) &&
+ (ch->rx_retry_queue_cnt <= SMUX_RX_WM_LOW) &&
+ ch->rx_flow_control_auto) {
+ ch->rx_flow_control_auto = 0;
+ smux_rx_flow_control_updated(ch);
+ schedule_notify(ch->lcid, SMUX_RX_RETRY_LOW_WM_HIT, NULL);
+ tx_ready = 1;
+ }
+ return tx_ready;
}
/**
@@ -2357,6 +2458,8 @@
union notifier_metadata metadata;
int tmp;
unsigned long flags;
+ int immediate_retry = 0;
+ int tx_ready = 0;
ch = container_of(work, struct smux_lch_t, rx_retry_work.work);
@@ -2368,7 +2471,7 @@
retry = list_first_entry(&ch->rx_retry_queue,
struct smux_rx_pkt_retry,
rx_retry_list);
- smux_remove_rx_retry(ch, retry);
+ (void)smux_remove_rx_retry(ch, retry);
}
}
@@ -2383,7 +2486,8 @@
rx_retry_list);
spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
- SMUX_DBG("%s: retrying rx pkt %p\n", __func__, retry);
+ SMUX_DBG("%s: ch %d retrying rx pkt %p\n",
+ __func__, ch->lcid, retry);
metadata.read.pkt_priv = 0;
metadata.read.buffer = 0;
tmp = ch->get_rx_buffer(ch->priv,
@@ -2392,33 +2496,44 @@
retry->pkt->hdr.payload_len);
if (tmp == 0 && metadata.read.buffer) {
/* have valid RX buffer */
+
memcpy(metadata.read.buffer, retry->pkt->payload,
retry->pkt->hdr.payload_len);
metadata.read.len = retry->pkt->hdr.payload_len;
spin_lock_irqsave(&ch->state_lock_lhb1, flags);
- smux_remove_rx_retry(ch, retry);
+ tx_ready = smux_remove_rx_retry(ch, retry);
spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
-
schedule_notify(ch->lcid, SMUX_READ_DONE, &metadata);
+ if (tx_ready)
+ list_channel(ch);
+
+ immediate_retry = 1;
} else if (tmp == -EAGAIN ||
(tmp == 0 && !metadata.read.buffer)) {
/* retry again */
retry->timeout_in_ms <<= 1;
if (retry->timeout_in_ms > SMUX_RX_RETRY_MAX_MS) {
/* timed out */
+ pr_err("%s: ch %d RX retry client timeout\n",
+ __func__, ch->lcid);
spin_lock_irqsave(&ch->state_lock_lhb1, flags);
- smux_remove_rx_retry(ch, retry);
- schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
+ tx_ready = smux_remove_rx_retry(ch, retry);
spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
+ schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
+ if (tx_ready)
+ list_channel(ch);
}
} else {
/* client error - drop packet */
+ pr_err("%s: ch %d RX retry client failed (%d)\n",
+ __func__, ch->lcid, tmp);
spin_lock_irqsave(&ch->state_lock_lhb1, flags);
- smux_remove_rx_retry(ch, retry);
+ tx_ready = smux_remove_rx_retry(ch, retry);
spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
-
schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
+ if (tx_ready)
+ list_channel(ch);
}
/* schedule next retry */
@@ -2427,8 +2542,12 @@
retry = list_first_entry(&ch->rx_retry_queue,
struct smux_rx_pkt_retry,
rx_retry_list);
- queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
- msecs_to_jiffies(retry->timeout_in_ms));
+
+ if (immediate_retry)
+ queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
+ else
+ queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
+ msecs_to_jiffies(retry->timeout_in_ms));
}
spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
}
@@ -2497,7 +2616,7 @@
spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
/* send the packet */
- SMUX_LOG_PKT_TX(pkt);
+ SMUX_PWR_PKT_TX(pkt);
if (!smux_byte_loopback) {
smux_tx_tty(pkt);
smux_flush_tty();
@@ -2534,7 +2653,7 @@
if (smux.power_state != SMUX_PWR_ON) {
/* channel not ready to transmit */
- SMUX_DBG("%s: can not tx with power state %d\n",
+ SMUX_DBG("%s: waiting for link up (state %d)\n",
__func__,
smux.power_state);
spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
@@ -2577,7 +2696,7 @@
--ch->tx_pending_data_cnt;
if (ch->notify_lwm &&
ch->tx_pending_data_cnt
- <= SMUX_WM_LOW) {
+ <= SMUX_TX_WM_LOW) {
ch->notify_lwm = 0;
low_wm_notif = 1;
}
@@ -2604,6 +2723,34 @@
}
}
+/**
+ * Update the RX flow control (sent in the TIOCM Status command).
+ *
+ * @ch Channel for update
+ *
+ * @returns 1 for updated, 0 for not updated
+ *
+ * Must be called with ch->state_lock_lhb1 locked.
+ */
+static int smux_rx_flow_control_updated(struct smux_lch_t *ch)
+{
+ int updated = 0;
+ int prev_state;
+
+ prev_state = ch->local_tiocm & SMUX_CMD_STATUS_FLOW_CNTL;
+
+ if (ch->rx_flow_control_client || ch->rx_flow_control_auto)
+ ch->local_tiocm |= SMUX_CMD_STATUS_FLOW_CNTL;
+ else
+ ch->local_tiocm &= ~SMUX_CMD_STATUS_FLOW_CNTL;
+
+ if (prev_state != (ch->local_tiocm & SMUX_CMD_STATUS_FLOW_CNTL)) {
+ smux_send_status_cmd(ch);
+ updated = 1;
+ }
+
+ return updated;
+}
/**********************************************************************/
/* Kernel API */
@@ -2646,17 +2793,30 @@
if (clear & SMUX_CH_OPTION_REMOTE_LOOPBACK)
ch->local_mode = SMUX_LCH_MODE_NORMAL;
- /* Flow control */
+ /* RX Flow control */
if (set & SMUX_CH_OPTION_REMOTE_TX_STOP) {
- ch->local_tiocm |= SMUX_CMD_STATUS_FLOW_CNTL;
- ret = smux_send_status_cmd(ch);
- tx_ready = 1;
+ ch->rx_flow_control_client = 1;
+ tx_ready |= smux_rx_flow_control_updated(ch);
}
if (clear & SMUX_CH_OPTION_REMOTE_TX_STOP) {
- ch->local_tiocm &= ~SMUX_CMD_STATUS_FLOW_CNTL;
- ret = smux_send_status_cmd(ch);
- tx_ready = 1;
+ ch->rx_flow_control_client = 0;
+ tx_ready |= smux_rx_flow_control_updated(ch);
+ }
+
+ /* Auto RX Flow Control */
+ if (set & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) {
+ SMUX_DBG("%s: auto rx flow control option enabled\n",
+ __func__);
+ ch->options |= SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
+ }
+
+ if (clear & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) {
+ SMUX_DBG("%s: auto rx flow control option disabled\n",
+ __func__);
+ ch->options &= ~SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
+ ch->rx_flow_control_auto = 0;
+ tx_ready |= smux_rx_flow_control_updated(ch);
}
spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
@@ -2880,16 +3040,16 @@
/* verify high watermark */
SMUX_DBG("%s: pending %d", __func__, ch->tx_pending_data_cnt);
- if (ch->tx_pending_data_cnt >= SMUX_WM_HIGH) {
+ if (ch->tx_pending_data_cnt >= SMUX_TX_WM_HIGH) {
pr_err("%s: ch %d high watermark %d exceeded %d\n",
- __func__, lcid, SMUX_WM_HIGH,
+ __func__, lcid, SMUX_TX_WM_HIGH,
ch->tx_pending_data_cnt);
ret = -EAGAIN;
goto out_inner;
}
/* queue packet for transmit */
- if (++ch->tx_pending_data_cnt == SMUX_WM_HIGH) {
+ if (++ch->tx_pending_data_cnt == SMUX_TX_WM_HIGH) {
ch->notify_lwm = 1;
pr_err("%s: high watermark hit\n", __func__);
schedule_notify(lcid, SMUX_HIGH_WM_HIT, NULL);
@@ -2936,7 +3096,7 @@
ch = &smux_lch[lcid];
spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
- if (ch->tx_pending_data_cnt >= SMUX_WM_HIGH)
+ if (ch->tx_pending_data_cnt >= SMUX_TX_WM_HIGH)
is_full = 1;
spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
@@ -2964,7 +3124,7 @@
ch = &smux_lch[lcid];
spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
- if (ch->tx_pending_data_cnt <= SMUX_WM_LOW)
+ if (ch->tx_pending_data_cnt <= SMUX_TX_WM_LOW)
is_low = 1;
spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
@@ -3264,7 +3424,7 @@
spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
if (power_up_uart)
- smux_uart_power_on();
+ smux_uart_power_on_atomic();
/* Disconnect from TTY */
smux.tty = NULL;
diff --git a/drivers/tty/smux_private.h b/drivers/tty/smux_private.h
index f644ff0..353c762 100644
--- a/drivers/tty/smux_private.h
+++ b/drivers/tty/smux_private.h
@@ -32,6 +32,10 @@
/* Maximum number of packets in retry queue */
#define SMUX_RX_RETRY_MAX_PKTS 32
+#define SMUX_RX_WM_HIGH 16
+#define SMUX_RX_WM_LOW 4
+#define SMUX_TX_WM_LOW 2
+#define SMUX_TX_WM_HIGH 4
struct tty_struct;
diff --git a/drivers/tty/smux_test.c b/drivers/tty/smux_test.c
index 62e9465..e488a63 100644
--- a/drivers/tty/smux_test.c
+++ b/drivers/tty/smux_test.c
@@ -43,37 +43,46 @@
* @failed - set to true if test fails
*/
#define UT_ASSERT_INT(a, cmp, b) \
- if (!((a)cmp(b))) { \
+ { \
+ int a_tmp = (a); \
+ int b_tmp = (b); \
+ if (!((a_tmp)cmp(b_tmp))) { \
i += scnprintf(buf + i, max - i, \
"%s:%d Fail: " #a "(%d) " #cmp " " #b "(%d)\n", \
__func__, __LINE__, \
- a, b); \
+ a_tmp, b_tmp); \
failed = 1; \
break; \
} \
- do {} while (0)
+ }
#define UT_ASSERT_PTR(a, cmp, b) \
- if (!((a)cmp(b))) { \
+ { \
+ void *a_tmp = (a); \
+ void *b_tmp = (b); \
+ if (!((a_tmp)cmp(b_tmp))) { \
i += scnprintf(buf + i, max - i, \
"%s:%d Fail: " #a "(%p) " #cmp " " #b "(%p)\n", \
__func__, __LINE__, \
- a, b); \
+ a_tmp, b_tmp); \
failed = 1; \
break; \
} \
- do {} while (0)
+ }
#define UT_ASSERT_UINT(a, cmp, b) \
- if (!((a)cmp(b))) { \
+ { \
+ unsigned a_tmp = (a); \
+ unsigned b_tmp = (b); \
+ if (!((a_tmp)cmp(b_tmp))) { \
i += scnprintf(buf + i, max - i, \
"%s:%d Fail: " #a "(%u) " #cmp " " #b "(%u)\n", \
__func__, __LINE__, \
- a, b); \
+ a_tmp, b_tmp); \
failed = 1; \
break; \
} \
- do {} while (0)
+ }
/**
* In-range unit test assertion for test cases.
@@ -94,16 +103,20 @@
* @failed - set to true if test fails
*/
#define UT_ASSERT_INT_IN_RANGE(a, minv, maxv) \
- if (((a) < (minv)) || ((a) > (maxv))) { \
+ { \
+ int a_tmp = (a); \
+ int minv_tmp = (minv); \
+ int maxv_tmp = (maxv); \
+ if (((a_tmp) < (minv_tmp)) || ((a_tmp) > (maxv_tmp))) { \
i += scnprintf(buf + i, max - i, \
"%s:%d Fail: " #a "(%d) < " #minv "(%d) or " \
#a "(%d) > " #maxv "(%d)\n", \
__func__, __LINE__, \
- a, minv, a, maxv); \
+ a_tmp, minv_tmp, a_tmp, maxv_tmp); \
failed = 1; \
break; \
} \
- do {} while (0)
+ }
static unsigned char test_array[] = {1, 1, 2, 3, 5, 8, 13, 21, 34, 55,
@@ -172,6 +185,8 @@
int event_disconnected_ssr;
int event_low_wm;
int event_high_wm;
+ int event_rx_retry_high_wm;
+ int event_rx_retry_low_wm;
/* TIOCM changes */
int event_tiocm;
@@ -222,6 +237,8 @@
cb->event_disconnected_ssr = 0;
cb->event_low_wm = 0;
cb->event_high_wm = 0;
+ cb->event_rx_retry_high_wm = 0;
+ cb->event_rx_retry_low_wm = 0;
cb->event_tiocm = 0;
cb->tiocm_meta.tiocm_old = 0;
cb->tiocm_meta.tiocm_new = 0;
@@ -282,15 +299,17 @@
"\tevent_disconnected_ssr=%d\n"
"\tevent_low_wm=%d\n"
"\tevent_high_wm=%d\n"
+ "\tevent_rx_retry_high_wm=%d\n"
+ "\tevent_rx_retry_low_wm=%d\n"
"\tevent_tiocm=%d\n"
"\tevent_read_done=%d\n"
"\tevent_read_failed=%d\n"
- "\tread_events=%d\n"
+ "\tread_events empty=%d\n"
"\tget_rx_retry=%d\n"
- "\tget_rx_retry_events=%d\n"
+ "\tget_rx_retry_events empty=%d\n"
"\tevent_write_done=%d\n"
"\tevent_write_failed=%d\n"
- "\twrite_events=%d\n",
+ "\twrite_events empty=%d\n",
cb->cb_count,
cb->cb_completion.done,
cb->event_connected,
@@ -298,12 +317,14 @@
cb->event_disconnected_ssr,
cb->event_low_wm,
cb->event_high_wm,
+ cb->event_rx_retry_high_wm,
+ cb->event_rx_retry_low_wm,
cb->event_tiocm,
cb->event_read_done,
cb->event_read_failed,
- !list_empty(&cb->read_events),
+ list_empty(&cb->read_events),
cb->get_rx_buff_retry_count,
- !list_empty(&cb->get_rx_buff_retry_events),
+ list_empty(&cb->get_rx_buff_retry_events),
cb->event_write_done,
cb->event_write_failed,
list_empty(&cb->write_events)
@@ -416,6 +437,19 @@
spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
break;
+ case SMUX_RX_RETRY_HIGH_WM_HIT:
+ spin_lock_irqsave(&cb_data_ptr->lock, flags);
+ ++cb_data_ptr->event_rx_retry_high_wm;
+ spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
+ break;
+
+ case SMUX_RX_RETRY_LOW_WM_HIT:
+ spin_lock_irqsave(&cb_data_ptr->lock, flags);
+ ++cb_data_ptr->event_rx_retry_low_wm;
+ spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
+ break;
+
+
case SMUX_TIOCM_UPDATE:
spin_lock_irqsave(&cb_data_ptr->lock, flags);
++cb_data_ptr->event_tiocm;
@@ -1315,7 +1349,7 @@
/* open port for loopback */
ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
SMUX_CH_OPTION_LOCAL_LOOPBACK,
- 0);
+ SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP);
UT_ASSERT_INT(ret, ==, 0);
ret = msm_smux_open(SMUX_TEST_LCID, &cb_data,
@@ -1568,6 +1602,132 @@
return i;
}
+/**
+ * Verify get_rx_buffer callback retry for auto-rx flow control.
+ *
+ * @buf Buffer for status message
+ * @max Size of buffer
+ *
+ * @returns Number of bytes written to @buf
+ */
+static int smux_ut_local_get_rx_buff_retry_auto(char *buf, int max)
+{
+ static struct smux_mock_callback cb_data;
+ static int cb_initialized;
+ int i = 0;
+ int failed = 0;
+ int ret;
+ int try;
+ int try_rx_retry_wm;
+
+ i += scnprintf(buf + i, max - i, "Running %s\n", __func__);
+ pr_err("%s", buf);
+
+ if (!cb_initialized)
+ mock_cb_data_init(&cb_data);
+
+ mock_cb_data_reset(&cb_data);
+ smux_byte_loopback = SMUX_TEST_LCID;
+ while (!failed) {
+ /* open port for loopback */
+ ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
+ SMUX_CH_OPTION_LOCAL_LOOPBACK
+ | SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP,
+ 0);
+ UT_ASSERT_INT(ret, ==, 0);
+
+ ret = msm_smux_open(SMUX_TEST_LCID, &cb_data,
+ smux_mock_cb, get_rx_buffer_mock);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ), >, 0);
+ UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_data.event_connected, ==, 1);
+ mock_cb_data_reset(&cb_data);
+
+ /* Test high rx-retry watermark */
+ get_rx_buffer_mock_fail = 1;
+ try_rx_retry_wm = 0;
+ for (try = 0; try < SMUX_RX_RETRY_MAX_PKTS; ++try) {
+ pr_err("%s: try %d\n", __func__, try);
+ ret = msm_smux_write(SMUX_TEST_LCID, (void *)1,
+ test_array, sizeof(test_array));
+ UT_ASSERT_INT(ret, ==, 0);
+ if (failed)
+ break;
+
+ if (!try_rx_retry_wm &&
+ cb_data.event_rx_retry_high_wm) {
+ /* RX high watermark hit */
+ try_rx_retry_wm = try + 1;
+ break;
+ }
+
+ while (cb_data.event_write_done <= try) {
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ),
+ >, 0);
+ INIT_COMPLETION(cb_data.cb_completion);
+ }
+ if (failed)
+ break;
+ }
+ if (failed)
+ break;
+
+ /* RX retry high watermark should have been set */
+ UT_ASSERT_INT(cb_data.event_rx_retry_high_wm, ==, 1);
+ UT_ASSERT_INT(try_rx_retry_wm, ==, SMUX_RX_WM_HIGH);
+
+ /*
+ * Disabled RX buffer allocation failure and wait for
+ * the SMUX_RX_WM_HIGH count successful packets.
+ */
+ get_rx_buffer_mock_fail = 0;
+ while (cb_data.event_read_done < SMUX_RX_WM_HIGH) {
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, 2*HZ),
+ >, 0);
+ INIT_COMPLETION(cb_data.cb_completion);
+ }
+ if (failed)
+ break;
+
+ UT_ASSERT_INT(0, ==, cb_data.event_read_failed);
+ UT_ASSERT_INT(SMUX_RX_WM_HIGH, ==,
+ cb_data.event_read_done);
+ UT_ASSERT_INT(cb_data.event_rx_retry_low_wm, ==, 1);
+ mock_cb_data_reset(&cb_data);
+
+ /* close port */
+ ret = msm_smux_close(SMUX_TEST_LCID);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ),
+ >, 0);
+ UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_data.event_disconnected, ==, 1);
+ UT_ASSERT_INT(cb_data.event_disconnected_ssr, ==, 0);
+ break;
+ }
+
+ if (!failed) {
+ i += scnprintf(buf + i, max - i, "\tOK\n");
+ } else {
+ pr_err("%s: Failed\n", __func__);
+ i += scnprintf(buf + i, max - i, "\tFailed\n");
+ i += mock_cb_data_print(&cb_data, buf + i, max - i);
+ msm_smux_close(SMUX_TEST_LCID);
+ }
+ smux_byte_loopback = 0;
+ mock_cb_data_reset(&cb_data);
+ return i;
+}
+
static char debug_buffer[DEBUG_BUFMAX];
static ssize_t debug_read(struct file *file, char __user *buf,
@@ -1631,6 +1791,8 @@
smux_ut_local_smuxld_receive_buf);
debug_create("ut_local_get_rx_buff_retry", 0444, dent,
smux_ut_local_get_rx_buff_retry);
+ debug_create("ut_local_get_rx_buff_retry_auto", 0444, dent,
+ smux_ut_local_get_rx_buff_retry_auto);
return 0;
}
diff --git a/drivers/video/msm/mdp4_overlay.c b/drivers/video/msm/mdp4_overlay.c
index 9b9654a..ad37d2f 100644
--- a/drivers/video/msm/mdp4_overlay.c
+++ b/drivers/video/msm/mdp4_overlay.c
@@ -116,13 +116,13 @@
if (!display_iclient)
return -EINVAL;
- *srcp_ihdl = ion_import_fd(display_iclient, mem_id);
+ *srcp_ihdl = ion_import_dma_buf(display_iclient, mem_id);
if (IS_ERR_OR_NULL(*srcp_ihdl)) {
- pr_err("ion_import_fd() failed\n");
+ pr_err("ion_import_dma_buf() failed\n");
return PTR_ERR(*srcp_ihdl);
}
- pr_debug("%s(): ion_hdl %p, ion_buf %p\n", __func__, *srcp_ihdl,
- ion_share(display_iclient, *srcp_ihdl));
+ pr_debug("%s(): ion_hdl %p, ion_buf %d\n", __func__, *srcp_ihdl,
+ ion_share_dma_buf(display_iclient, *srcp_ihdl));
pr_debug("mixer %u, pipe %u, plane %u\n", pipe->mixer_num,
pipe->pipe_ndx, plane);
if (ion_map_iommu(display_iclient, *srcp_ihdl,
diff --git a/drivers/video/msm/mdp4_overlay_writeback.c b/drivers/video/msm/mdp4_overlay_writeback.c
index 8dccf78..d59c4a8 100644
--- a/drivers/video/msm/mdp4_overlay_writeback.c
+++ b/drivers/video/msm/mdp4_overlay_writeback.c
@@ -413,7 +413,7 @@
else if (mfd->iclient) {
struct ion_handle *srcp_ihdl;
ulong len;
- srcp_ihdl = ion_import_fd(mfd->iclient,
+ srcp_ihdl = ion_import_dma_buf(mfd->iclient,
data->memory_id);
if (IS_ERR_OR_NULL(srcp_ihdl)) {
pr_err("%s: ion import fd failed\n", __func__);
diff --git a/drivers/video/msm/mdss/mdss_mdp_util.c b/drivers/video/msm/mdss/mdss_mdp_util.c
index 2e86806..f6b4fce 100644
--- a/drivers/video/msm/mdss/mdss_mdp_util.c
+++ b/drivers/video/msm/mdss/mdss_mdp_util.c
@@ -326,7 +326,7 @@
}
} else if (iclient) {
data->iclient = iclient;
- data->srcp_ihdl = ion_import_fd(iclient, img->memory_id);
+ data->srcp_ihdl = ion_import_dma_buf(iclient, img->memory_id);
if (IS_ERR_OR_NULL(data->srcp_ihdl))
return PTR_ERR(data->srcp_ihdl);
ret = ion_phys(iclient, data->srcp_ihdl,
diff --git a/drivers/video/msm/vidc/common/dec/vdec.c b/drivers/video/msm/vidc/common/dec/vdec.c
index ed8b452..634011b 100644
--- a/drivers/video/msm/vidc/common/dec/vdec.c
+++ b/drivers/video/msm/vidc/common/dec/vdec.c
@@ -891,7 +891,7 @@
vcd_h264_mv_buffer->client_data = (void *) mapped_buffer;
vcd_h264_mv_buffer->dev_addr = (u8 *)mapped_buffer->iova[0];
} else {
- client_ctx->h264_mv_ion_handle = ion_import_fd(
+ client_ctx->h264_mv_ion_handle = ion_import_dma_buf(
client_ctx->user_ion_client,
vcd_h264_mv_buffer->pmem_fd);
if (IS_ERR_OR_NULL(client_ctx->h264_mv_ion_handle)) {
@@ -1790,7 +1790,7 @@
}
put_pmem_file(pmem_file);
} else {
- client_ctx->seq_hdr_ion_handle = ion_import_fd(
+ client_ctx->seq_hdr_ion_handle = ion_import_dma_buf(
client_ctx->user_ion_client,
seq_header.pmem_fd);
if (!client_ctx->seq_hdr_ion_handle) {
diff --git a/drivers/video/msm/vidc/common/enc/venc_internal.c b/drivers/video/msm/vidc/common/enc/venc_internal.c
index 43e8d5e..9450ee7 100644
--- a/drivers/video/msm/vidc/common/enc/venc_internal.c
+++ b/drivers/video/msm/vidc/common/enc/venc_internal.c
@@ -1821,7 +1821,7 @@
control->client_data = (void *) mapped_buffer;
control->dev_addr = (u8 *)mapped_buffer->iova[0];
} else {
- client_ctx->recon_buffer_ion_handle[i] = ion_import_fd(
+ client_ctx->recon_buffer_ion_handle[i] = ion_import_dma_buf(
client_ctx->user_ion_client, control->pmem_fd);
if (IS_ERR_OR_NULL(client_ctx->recon_buffer_ion_handle[i])) {
ERR("%s(): get_ION_handle failed\n", __func__);
diff --git a/drivers/video/msm/vidc/common/init/vidc_init.c b/drivers/video/msm/vidc/common/init/vidc_init.c
index 23c990a..dcacb3c 100644
--- a/drivers/video/msm/vidc/common/init/vidc_init.c
+++ b/drivers/video/msm/vidc/common/init/vidc_init.c
@@ -627,7 +627,7 @@
buf_addr_table[*num_of_buffers].dev_addr =
mapped_buffer->iova[0];
} else {
- buff_ion_handle = ion_import_fd(
+ buff_ion_handle = ion_import_dma_buf(
client_ctx->user_ion_client, pmem_fd);
if (IS_ERR_OR_NULL(buff_ion_handle)) {
ERR("%s(): get_ION_handle failed\n",
diff --git a/include/linux/ion.h b/include/linux/ion.h
index fca5517..2519270 100644
--- a/include/linux/ion.h
+++ b/include/linux/ion.h
@@ -21,7 +21,6 @@
#include <linux/ioctl.h>
#include <linux/types.h>
-
struct ion_handle;
/**
* enum ion_heap_types - list of all possible types of heaps
@@ -261,6 +260,17 @@
#ifdef CONFIG_ION
/**
+ * ion_reserve() - reserve memory for ion heaps if applicable
+ * @data: platform data specifying starting physical address and
+ * size
+ *
+ * Calls memblock reserve to set aside memory for heaps that are
+ * located at specific memory addresses or of specfic sizes not
+ * managed by the kernel
+ */
+void ion_reserve(struct ion_platform_data *data);
+
+/**
* ion_client_create() - allocate a client and returns it
* @dev: the global ion device
* @heap_mask: mask of heaps this client can allocate from
@@ -323,7 +333,7 @@
* This function queries the heap for a particular handle to get the
* handle's physical address. It't output is only correct if
* a heap returns physically contiguous memory -- in other cases
- * this api should not be implemented -- ion_map_dma should be used
+ * this api should not be implemented -- ion_sg_table should be used
* instead. Returns -EINVAL if the handle is invalid. This has
* no implications on the reference counting of the handle --
* the returned value may not be valid if the caller is not
@@ -333,6 +343,17 @@
ion_phys_addr_t *addr, size_t *len);
/**
+ * ion_map_dma - return an sg_table describing a handle
+ * @client: the client
+ * @handle: the handle
+ *
+ * This function returns the sg_table describing
+ * a particular ion handle.
+ */
+struct sg_table *ion_sg_table(struct ion_client *client,
+ struct ion_handle *handle);
+
+/**
* ion_map_kernel - create mapping for the given handle
* @client: the client
* @handle: handle to map
@@ -353,64 +374,22 @@
void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle);
/**
- * ion_map_dma - create a dma mapping for a given handle
+ * ion_share_dma_buf() - given an ion client, create a dma-buf fd
* @client: the client
- * @handle: handle to map
- *
- * Return an sglist describing the given handle
+ * @handle: the handle
*/
-struct scatterlist *ion_map_dma(struct ion_client *client,
- struct ion_handle *handle,
- unsigned long flags);
+int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle);
/**
- * ion_unmap_dma() - destroy a dma mapping for a handle
+ * ion_import_dma_buf() - given an dma-buf fd from the ion exporter get handle
* @client: the client
- * @handle: handle to unmap
- */
-void ion_unmap_dma(struct ion_client *client, struct ion_handle *handle);
-
-/**
- * ion_share() - given a handle, obtain a buffer to pass to other clients
- * @client: the client
- * @handle: the handle to share
+ * @fd: the dma-buf fd
*
- * Given a handle, return a buffer, which exists in a global name
- * space, and can be passed to other clients. Should be passed into ion_import
- * to obtain a new handle for this buffer.
- *
- * NOTE: This function does do not an extra reference. The burden is on the
- * caller to make sure the buffer doesn't go away while it's being passed to
- * another client. That is, ion_free should not be called on this handle until
- * the buffer has been imported into the other client.
+ * Given an dma-buf fd that was allocated through ion via ion_share_dma_buf,
+ * import that fd and return a handle representing it. If a dma-buf from
+ * another exporter is passed in this function will return ERR_PTR(-EINVAL)
*/
-struct ion_buffer *ion_share(struct ion_client *client,
- struct ion_handle *handle);
-
-/**
- * ion_import() - given an buffer in another client, import it
- * @client: this blocks client
- * @buffer: the buffer to import (as obtained from ion_share)
- *
- * Given a buffer, add it to the client and return the handle to use to refer
- * to it further. This is called to share a handle from one kernel client to
- * another.
- */
-struct ion_handle *ion_import(struct ion_client *client,
- struct ion_buffer *buffer);
-
-/**
- * ion_import_fd() - given an fd obtained via ION_IOC_SHARE ioctl, import it
- * @client: this blocks client
- * @fd: the fd
- *
- * A helper function for drivers that will be recieving ion buffers shared
- * with them from userspace. These buffers are represented by a file
- * descriptor obtained as the return from the ION_IOC_SHARE ioctl.
- * This function coverts that fd into the underlying buffer, and returns
- * the handle to use to refer to it further.
- */
-struct ion_handle *ion_import_fd(struct ion_client *client, int fd);
+struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd);
/**
* ion_handle_get_flags - get the flags for a given handle
@@ -575,6 +554,11 @@
void *vaddr, unsigned long len, unsigned int cmd);
#else
+static inline void ion_reserve(struct ion_platform_data *data)
+{
+
+}
+
static inline struct ion_client *ion_client_create(struct ion_device *dev,
unsigned int heap_mask, const char *name)
{
@@ -605,6 +589,12 @@
return -ENODEV;
}
+static inline struct sg_table *ion_sg_table(struct ion_client *client,
+ struct ion_handle *handle)
+{
+ return ERR_PTR(-ENODEV);
+}
+
static inline void *ion_map_kernel(struct ion_client *client,
struct ion_handle *handle, unsigned long flags)
{
@@ -614,29 +604,12 @@
static inline void ion_unmap_kernel(struct ion_client *client,
struct ion_handle *handle) { }
-static inline struct scatterlist *ion_map_dma(struct ion_client *client,
- struct ion_handle *handle, unsigned long flags)
+static inline int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
{
- return ERR_PTR(-ENODEV);
+ return -ENODEV;
}
-static inline void ion_unmap_dma(struct ion_client *client,
- struct ion_handle *handle) { }
-
-static inline struct ion_buffer *ion_share(struct ion_client *client,
- struct ion_handle *handle)
-{
- return ERR_PTR(-ENODEV);
-}
-
-static inline struct ion_handle *ion_import(struct ion_client *client,
- struct ion_buffer *buffer)
-{
- return ERR_PTR(-ENODEV);
-}
-
-static inline struct ion_handle *ion_import_fd(struct ion_client *client,
- int fd)
+static inline struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
{
return ERR_PTR(-ENODEV);
}
diff --git a/include/linux/smux.h b/include/linux/smux.h
index 308f969..24a6371 100644
--- a/include/linux/smux.h
+++ b/include/linux/smux.h
@@ -77,6 +77,8 @@
SMUX_TIOCM_UPDATE,
SMUX_LOW_WM_HIT, /* @metadata is NULL */
SMUX_HIGH_WM_HIT, /* @metadata is NULL */
+ SMUX_RX_RETRY_HIGH_WM_HIT, /* @metadata is NULL */
+ SMUX_RX_RETRY_LOW_WM_HIT, /* @metadata is NULL */
};
/**
@@ -86,6 +88,7 @@
SMUX_CH_OPTION_LOCAL_LOOPBACK = 1 << 0,
SMUX_CH_OPTION_REMOTE_LOOPBACK = 1 << 1,
SMUX_CH_OPTION_REMOTE_TX_STOP = 1 << 2,
+ SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP = 1 << 3,
};
/**