Merge "msm: clock-local2: Use correct ops for extp_clk_src"
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-msm.txt b/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
index 24b6b36..a0c7037 100644
--- a/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
+++ b/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
@@ -31,6 +31,10 @@
"HS200_1p2v" - indicates that host can support HS200 at 1.2v.
"DDR_1p8v" - indicates that host can support DDR mode at 1.8v.
"DDR_1p2v" - indicates that host can support DDR mode at 1.2v.
+ - qcom,cpu-dma-latency-us: specifies acceptable DMA latency in microseconds. There is
+ no default value that the driver assumes if this property
+ is not specified. So if this property is not specified,
+ then SDHC driver will not vote for PM QOS.
In the following, <supply> can be vdd (flash core voltage) or vdd-io (I/O voltage).
- qcom,<supply>-always-on - specifies whether supply should be kept "on" always.
@@ -51,6 +55,18 @@
- qcom,pad-drv-off - Suspend drive strength configuration for sdc tlmm pins.
Tlmm pins are specified as <clk cmd data>
+ - qcom,bus-bw-vectors-bps: specifies array of throughput values in
+ Bytes/sec. The values in the array are determined according to
+ supported bus speed modes. For example, if host supports SDR12 mode,
+ value is 13631488 Bytes/sec.
+ - Refer to "Documentation/devicetree/bindings/arm/msm/msm_bus.txt" for
+ below optional properties:
+ - qcom,msm-bus,name
+ - qcom,msm-bus,num-cases
+ - qcom,msm-bus,active-only
+ - qcom,msm-bus,num-paths
+ - qcom,msm-bus,vectors-KBps
+
Example:
aliases {
@@ -86,6 +102,7 @@
<&msmgpio 36 0>, /* DATA2 */
<&msmgpio 35 0>; /* DATA3 */
qcom,gpio-names = "CLK", "CMD", "DAT0", "DAT1", "DAT2", "DAT3";
+ qcom,cpu-dma-latency-us = <200>;
};
sdhc_2: qcom,sdhc@f98a4900 {
@@ -104,4 +121,18 @@
qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
qcom,pad-drv-on = <0x7 0x4 0x4>; /* 16mA, 10mA, 10mA */
qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
+
+ qcom,cpu-dma-latency-us = <200>;
+ qcom,msm-bus,name = "sdhc2";
+ qcom,msm-bus,num-cases = <7>;
+ qcom,msm-bus,active-only = <0>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps = <81 512 0 0>, /* No vote */
+ <81 512 6656 13312>, /* 13 MB/s*/
+ <81 512 13312 26624>, /* 26 MB/s */
+ <81 512 26624 53248>, /* 52 MB/s */
+ <81 512 53248 106496>, /* 104 MB/s */
+ <81 512 106496 212992>, /* 208 MB/s */
+ <81 512 2147483647 4294967295>; /* Max. bandwidth */
+ qcom,bus-bw-vectors-bps = <0 13631488 27262976 54525952 109051904 218103808 4294967295>;
};
diff --git a/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt b/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
index 8f602b6..df3f71c 100644
--- a/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
+++ b/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
@@ -17,14 +17,14 @@
- vdd_cx-supply: Reference to the regulator that supplies the vdd_cx domain.
- vdd_mx-supply: Reference to the regulator that supplies the memory rail.
- qcom,firmware-name: Base name of the firmware image. Ex. "mdsp"
-- qcom,pil-self-auth: <0> if the hardware does not require self-authenticating
- images and self-authentication is not desired;
- <1> if the hardware requires self-authenticating images.
-- qcom,is-loadable: if PIL is required to load the modem image
Optional properties:
- vdd_pll-supply: Reference to the regulator that supplies the PLL's rail.
- qcom,vdd_pll: Voltage to be set for the PLL's rail.
+- reg-names: "cxrail_bhs_reg" - control register for modem power
+ domain.
+- qcom,is-loadable: Boolean- Present if the image needs to be loaded.
+- qcom,pil-self-auth: Boolean- True if authentication is required.
Example:
qcom,mss@fc880000 {
@@ -43,5 +43,5 @@
qcom,is-loadable;
qcom,firmware-name = "mba";
- qcom,pil-self-auth = <1>;
+ qcom,pil-self-auth;
};
diff --git a/arch/arm/boot/dts/msm8226.dtsi b/arch/arm/boot/dts/msm8226.dtsi
index 8111b40..46812e7 100644
--- a/arch/arm/boot/dts/msm8226.dtsi
+++ b/arch/arm/boot/dts/msm8226.dtsi
@@ -541,6 +541,29 @@
qcom,firmware-name = "adsp";
};
+ qcom,mss@fc880000 {
+ compatible = "qcom,pil-q6v5-mss";
+ reg = <0xfc880000 0x100>,
+ <0xfd485000 0x400>,
+ <0xfc820000 0x020>,
+ <0xfc401680 0x004>,
+ <0x0d1fc000 0x4000>,
+ <0xfd485194 0x4>;
+ reg-names = "qdsp6_base", "halt_base", "rmb_base",
+ "restart_reg", "metadata_base", "cxrail_bhs_reg";
+
+ interrupts = <0 24 1>;
+ vdd_mss-supply = <&pm8226_s1>;
+ vdd_cx-supply = <&pm8226_s1_corner>;
+ vdd_mx-supply = <&pm8226_l3>;
+ vdd_pll-supply = <&pm8226_l8>;
+ qcom,vdd_pll = <1800000>;
+
+ qcom,is-loadable;
+ qcom,firmware-name = "mba";
+ qcom,pil-self-auth;
+ };
+
qcom,msm-mem-hole {
compatible = "qcom,msm-mem-hole";
qcom,memblock-remove = <0x8400000 0x7b00000>; /* Address and Size of Hole */
diff --git a/arch/arm/boot/dts/msm8974.dtsi b/arch/arm/boot/dts/msm8974.dtsi
index ab6b7c8..1922591 100644
--- a/arch/arm/boot/dts/msm8974.dtsi
+++ b/arch/arm/boot/dts/msm8974.dtsi
@@ -344,6 +344,7 @@
interrupts = <0 123 0>, <0 138 0>;
interrupt-names = "hc_irq", "pwr_irq";
qcom,bus-speed-mode = "HS200_1p8v", "DDR_1p8v";
+ qcom,cpu-dma-latency-us = <200>;
status = "disable";
};
@@ -355,6 +356,7 @@
interrupt-names = "hc_irq", "pwr_irq";
qcom,bus-width = <4>;
+ qcom,cpu-dma-latency-us = <200>;
status = "disable";
};
@@ -373,6 +375,7 @@
qcom,gpio-names = "CLK", "CMD", "DAT0", "DAT1", "DAT2", "DAT3";
qcom,bus-width = <4>;
+ qcom,cpu-dma-latency-us = <200>;
status = "disable";
};
@@ -391,6 +394,7 @@
qcom,gpio-names = "CLK", "CMD", "DAT0", "DAT1", "DAT2", "DAT3";
qcom,bus-width = <4>;
+ qcom,cpu-dma-latency-us = <200>;
status = "disable";
};
@@ -914,7 +918,7 @@
qcom,vdd_pll = <1800000>;
qcom,is-loadable;
qcom,firmware-name = "mba";
- qcom,pil-self-auth = <1>;
+ qcom,pil-self-auth;
};
qcom,pronto@fb21b000 {
diff --git a/arch/arm/configs/msm8974-perf_defconfig b/arch/arm/configs/msm8974-perf_defconfig
index c6be3c5..4b7065b 100644
--- a/arch/arm/configs/msm8974-perf_defconfig
+++ b/arch/arm/configs/msm8974-perf_defconfig
@@ -460,6 +460,6 @@
CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_TWOFISH=y
CONFIG_CRYPTO_DEV_QCRYPTO=m
-CONFIG_CRYPTO_DEV_QCE=m
-CONFIG_CRYPTO_DEV_QCEDEV=m
+CONFIG_CRYPTO_DEV_QCE=y
+CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRC_CCITT=y
diff --git a/arch/arm/configs/msm8974_defconfig b/arch/arm/configs/msm8974_defconfig
index f77f04f..6a4737c 100644
--- a/arch/arm/configs/msm8974_defconfig
+++ b/arch/arm/configs/msm8974_defconfig
@@ -485,6 +485,6 @@
CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_TWOFISH=y
CONFIG_CRYPTO_DEV_QCRYPTO=m
-CONFIG_CRYPTO_DEV_QCE=m
-CONFIG_CRYPTO_DEV_QCEDEV=m
+CONFIG_CRYPTO_DEV_QCE=y
+CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRC_CCITT=y
diff --git a/arch/arm/mach-msm/clock-8226.c b/arch/arm/mach-msm/clock-8226.c
index b40617c..898b7e0 100644
--- a/arch/arm/mach-msm/clock-8226.c
+++ b/arch/arm/mach-msm/clock-8226.c
@@ -2559,7 +2559,7 @@
static struct branch_clk oxili_gfx3d_clk = {
.cbcr_reg = OXILI_GFX3D_CBCR,
- .has_sibling = 1,
+ .has_sibling = 0,
.max_div = 0,
.base = &virt_bases[MMSS_BASE],
.c = {
diff --git a/arch/arm/mach-msm/lpm_levels.c b/arch/arm/mach-msm/lpm_levels.c
index 539a4fe..8194721 100644
--- a/arch/arm/mach-msm/lpm_levels.c
+++ b/arch/arm/mach-msm/lpm_levels.c
@@ -205,6 +205,8 @@
int best_level_iter = msm_lpm_level_count + 1;
bool irqs_detect = false;
bool gpio_detect = false;
+ bool modify_event_timer;
+ uint32_t next_wakeup_us = time_param->sleep_us;
if (!msm_lpm_levels)
return NULL;
@@ -219,6 +221,8 @@
for (i = 0; i < msm_lpm_level_count; i++) {
struct msm_rpmrs_level *level = &msm_lpm_levels[i];
+ modify_event_timer = false;
+
if (!level->available)
continue;
@@ -228,6 +232,23 @@
if (time_param->latency_us < level->latency_us)
continue;
+ if (time_param->next_event_us &&
+ time_param->next_event_us < level->latency_us)
+ continue;
+
+ if (time_param->next_event_us) {
+ if ((time_param->next_event_us < time_param->sleep_us)
+ || ((time_param->next_event_us - level->latency_us) <
+ time_param->sleep_us)) {
+ modify_event_timer = true;
+ next_wakeup_us = time_param->next_event_us -
+ level->latency_us;
+ }
+ }
+
+ if (next_wakeup_us <= level->time_overhead_us)
+ continue;
+
if ((sleep_mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE) &&
!msm_lpm_irqs_detectable(&level->rs_limits,
irqs_detect, gpio_detect))
@@ -238,19 +259,19 @@
if (!cpu && msm_rpm_waiting_for_ack())
break;
- if (time_param->sleep_us <= 1) {
+ if (next_wakeup_us <= 1) {
pwr = level->energy_overhead;
- } else if (time_param->sleep_us <= level->time_overhead_us) {
- pwr = level->energy_overhead / time_param->sleep_us;
- } else if ((time_param->sleep_us >> 10)
+ } else if (next_wakeup_us <= level->time_overhead_us) {
+ pwr = level->energy_overhead / next_wakeup_us;
+ } else if ((next_wakeup_us >> 10)
> level->time_overhead_us) {
pwr = level->steady_state_power;
} else {
pwr = level->steady_state_power;
pwr -= (level->time_overhead_us *
level->steady_state_power) /
- time_param->sleep_us;
- pwr += level->energy_overhead / time_param->sleep_us;
+ next_wakeup_us;
+ pwr += level->energy_overhead / next_wakeup_us;
}
if (!best_level || best_level->rs_limits.power[cpu] >= pwr) {
@@ -261,6 +282,14 @@
best_level_iter = i;
if (power)
*power = pwr;
+ if (modify_event_timer &&
+ (sleep_mode !=
+ MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT))
+ time_param->modified_time_us =
+ time_param->next_event_us -
+ best_level->latency_us;
+ else
+ time_param->modified_time_us = 0;
}
}
if (best_level && !lpm_level_permitted(best_level_iter))
diff --git a/arch/arm/mach-msm/pil-q6v5-mss.c b/arch/arm/mach-msm/pil-q6v5-mss.c
index aa42f5b..1954ec3 100644
--- a/arch/arm/mach-msm/pil-q6v5-mss.c
+++ b/arch/arm/mach-msm/pil-q6v5-mss.c
@@ -70,6 +70,11 @@
#define MAX_SSR_REASON_LEN 81U
+/* External BHS */
+#define EXTERNAL_BHS_ON BIT(0)
+#define EXTERNAL_BHS_STATUS BIT(4)
+#define BHS_TIMEOUT_US 50
+
struct mba_data {
void __iomem *metadata_base;
void __iomem *rmb_base;
@@ -81,7 +86,7 @@
void *adsp_state_notifier;
u32 img_length;
struct q6v5_data *q6;
- int self_auth;
+ bool self_auth;
void *ramdump_dev;
void *smem_ramdump_dev;
bool crash_shutdown;
@@ -99,16 +104,34 @@
{
int ret;
struct device *dev = drv->desc.dev;
+ u32 regval;
ret = regulator_enable(drv->vreg);
if (ret)
dev_err(dev, "Failed to enable modem regulator.\n");
+ if (drv->cxrail_bhs) {
+ regval = readl_relaxed(drv->cxrail_bhs);
+ regval |= EXTERNAL_BHS_ON;
+ writel_relaxed(regval, drv->cxrail_bhs);
+
+ ret = readl_poll_timeout(drv->cxrail_bhs, regval,
+ regval & EXTERNAL_BHS_STATUS, 1, BHS_TIMEOUT_US);
+ }
+
return ret;
}
static int pil_mss_power_down(struct q6v5_data *drv)
{
+ u32 regval;
+
+ if (drv->cxrail_bhs) {
+ regval = readl_relaxed(drv->cxrail_bhs);
+ regval &= ~EXTERNAL_BHS_ON;
+ writel_relaxed(regval, drv->cxrail_bhs);
+ }
+
return regulator_disable(drv->vreg);
}
@@ -198,10 +221,15 @@
pil_q6v5_shutdown(pil);
pil_mss_disable_clks(drv);
- pil_mss_power_down(drv);
writel_relaxed(1, drv->restart_reg);
+ /*
+ * access to the cx_rail_bhs is restricted until after the gcc_mss
+ * reset is asserted once the PBL starts executing.
+ */
+ pil_mss_power_down(drv);
+
drv->is_booted = false;
return 0;
@@ -215,11 +243,6 @@
unsigned long start_addr = pil_get_entry_addr(pil);
int ret;
- /* Deassert reset to subsystem and wait for propagation */
- writel_relaxed(0, drv->restart_reg);
- mb();
- udelay(2);
-
/*
* Bring subsystem out of reset and enable required
* regulators and clocks.
@@ -228,6 +251,11 @@
if (ret)
goto err_power;
+ /* Deassert reset to subsystem and wait for propagation */
+ writel_relaxed(0, drv->restart_reg);
+ mb();
+ udelay(2);
+
ret = pil_mss_enable_clks(drv);
if (ret)
goto err_clks;
@@ -679,8 +707,8 @@
q6_desc->owner = THIS_MODULE;
q6_desc->proxy_timeout = PROXY_TIMEOUT_MS;
- of_property_read_u32(pdev->dev.of_node, "qcom,pil-self-auth",
- &drv->self_auth);
+ drv->self_auth = of_property_read_bool(pdev->dev.of_node,
+ "qcom,pil-self-auth");
if (drv->self_auth) {
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"rmb_base");
@@ -721,6 +749,13 @@
return ret;
}
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "cxrail_bhs_reg");
+ if (res)
+ q6->cxrail_bhs = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+
+
q6->ahb_clk = devm_clk_get(&pdev->dev, "iface_clk");
if (IS_ERR(q6->ahb_clk))
return PTR_ERR(q6->ahb_clk);
diff --git a/arch/arm/mach-msm/pil-q6v5.h b/arch/arm/mach-msm/pil-q6v5.h
index 0d986a6..48d10df 100644
--- a/arch/arm/mach-msm/pil-q6v5.h
+++ b/arch/arm/mach-msm/pil-q6v5.h
@@ -22,12 +22,13 @@
struct q6v5_data {
void __iomem *reg_base;
- struct clk *xo; /* XO clock source */
- struct clk *ahb_clk; /* PIL access to registers */
- struct clk *axi_clk; /* CPU access to memory */
- struct clk *core_clk; /* CPU core */
- struct clk *reg_clk; /* CPU access registers */
- struct clk *rom_clk; /* Boot ROM */
+ void __iomem *cxrail_bhs; /* External BHS register */
+ struct clk *xo; /* XO clock source */
+ struct clk *ahb_clk; /* PIL access to registers */
+ struct clk *axi_clk; /* CPU access to memory */
+ struct clk *core_clk; /* CPU core */
+ struct clk *reg_clk; /* CPU access registers */
+ struct clk *rom_clk; /* Boot ROM */
void __iomem *axi_halt_base;
void __iomem *restart_reg;
struct regulator *vreg;
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index b1cab9b..c1f2423 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -127,7 +127,7 @@
void (*irq_control)(struct adreno_device *, int);
unsigned int (*irq_pending)(struct adreno_device *);
void * (*snapshot)(struct adreno_device *, void *, int *, int);
- void (*rb_init)(struct adreno_device *, struct adreno_ringbuffer *);
+ int (*rb_init)(struct adreno_device *, struct adreno_ringbuffer *);
void (*start)(struct adreno_device *);
unsigned int (*busy_cycles)(struct adreno_device *);
};
diff --git a/drivers/gpu/msm/adreno_a2xx.c b/drivers/gpu/msm/adreno_a2xx.c
index 6db6e7b..335d407 100644
--- a/drivers/gpu/msm/adreno_a2xx.c
+++ b/drivers/gpu/msm/adreno_a2xx.c
@@ -1825,13 +1825,16 @@
(mh & kgsl_mmu_get_int_mask())) ? 1 : 0;
}
-static void a2xx_rb_init(struct adreno_device *adreno_dev,
+static int a2xx_rb_init(struct adreno_device *adreno_dev,
struct adreno_ringbuffer *rb)
{
unsigned int *cmds, cmds_gpu;
/* ME_INIT */
cmds = adreno_ringbuffer_allocspace(rb, NULL, 19);
+ if (cmds == NULL)
+ return -ENOMEM;
+
cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint)*(rb->wptr-19);
GSL_RB_WRITE(cmds, cmds_gpu, cp_type3_packet(CP_ME_INIT, 18));
@@ -1884,6 +1887,8 @@
GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
adreno_ringbuffer_submit(rb);
+
+ return 0;
}
static unsigned int a2xx_busy_cycles(struct adreno_device *adreno_dev)
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index 73a7f52..08c800e 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -2486,11 +2486,14 @@
}
}
-static void a3xx_rb_init(struct adreno_device *adreno_dev,
+static int a3xx_rb_init(struct adreno_device *adreno_dev,
struct adreno_ringbuffer *rb)
{
unsigned int *cmds, cmds_gpu;
cmds = adreno_ringbuffer_allocspace(rb, NULL, 18);
+ if (cmds == NULL)
+ return -ENOMEM;
+
cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint) * (rb->wptr - 18);
GSL_RB_WRITE(cmds, cmds_gpu, cp_type3_packet(CP_ME_INIT, 17));
@@ -2514,6 +2517,8 @@
GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
adreno_ringbuffer_submit(rb);
+
+ return 0;
}
static void a3xx_err_callback(struct adreno_device *adreno_dev, int bit)
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 1d25646..5cdf911 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -445,7 +445,9 @@
adreno_regwrite(device, REG_CP_ME_CNTL, 0);
/* ME init is GPU specific, so jump into the sub-function */
- adreno_dev->gpudev->rb_init(adreno_dev, rb);
+ status = adreno_dev->gpudev->rb_init(adreno_dev, rb);
+ if (status)
+ return status;
/* idle device to validate ME INIT */
status = adreno_idle(device);
diff --git a/drivers/gpu/msm/adreno_snapshot.c b/drivers/gpu/msm/adreno_snapshot.c
index 26be9da..c8229e7 100644
--- a/drivers/gpu/msm/adreno_snapshot.c
+++ b/drivers/gpu/msm/adreno_snapshot.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -177,10 +177,11 @@
{ 8, 2 },
};
-static void ib_parse_load_state(struct kgsl_device *device, unsigned int *pkt,
+static int ib_parse_load_state(struct kgsl_device *device, unsigned int *pkt,
unsigned int ptbase)
{
unsigned int block, source, type;
+ int ret = 0;
/*
* The object here is to find indirect shaders i.e - shaders loaded from
@@ -192,7 +193,7 @@
*/
if (type3_pkt_size(pkt[0]) < 2)
- return;
+ return 0;
/*
* pkt[1] 18:16 - source
@@ -220,8 +221,14 @@
pkt[2] & 0xFFFFFFFC,
(((pkt[1] >> 22) & 0x03FF) * unitsize) << 2,
SNAPSHOT_GPU_OBJECT_SHADER);
+
+ if (ret < 0)
+ return -EINVAL;
+
snapshot_frozen_objsize += ret;
}
+
+ return ret;
}
/*
@@ -229,23 +236,31 @@
* visiblity stream size buffer.
*/
-static void ib_parse_set_bin_data(struct kgsl_device *device, unsigned int *pkt,
+static int ib_parse_set_bin_data(struct kgsl_device *device, unsigned int *pkt,
unsigned int ptbase)
{
int ret;
if (type3_pkt_size(pkt[0]) < 2)
- return;
+ return 0;
/* Visiblity stream buffer */
ret = kgsl_snapshot_get_object(device, ptbase, pkt[1], 0,
SNAPSHOT_GPU_OBJECT_GENERIC);
+
+ if (ret < 0)
+ return -EINVAL;
+
snapshot_frozen_objsize += ret;
/* visiblity stream size buffer (fixed size 8 dwords) */
ret = kgsl_snapshot_get_object(device, ptbase, pkt[2], 32,
SNAPSHOT_GPU_OBJECT_GENERIC);
- snapshot_frozen_objsize += ret;
+
+ if (ret >= 0)
+ snapshot_frozen_objsize += ret;
+
+ return ret;
}
/*
@@ -254,13 +269,13 @@
* buffers that are written to as frozen
*/
-static void ib_parse_mem_write(struct kgsl_device *device, unsigned int *pkt,
+static int ib_parse_mem_write(struct kgsl_device *device, unsigned int *pkt,
unsigned int ptbase)
{
int ret;
if (type3_pkt_size(pkt[0]) < 1)
- return;
+ return 0;
/*
* The address is where the data in the rest of this packet is written
@@ -272,7 +287,10 @@
ret = kgsl_snapshot_get_object(device, ptbase, pkt[1] & 0xFFFFFFFC, 0,
SNAPSHOT_GPU_OBJECT_GENERIC);
- snapshot_frozen_objsize += ret;
+ if (ret >= 0)
+ snapshot_frozen_objsize += ret;
+
+ return ret;
}
/*
@@ -282,19 +300,22 @@
* frozen with the others
*/
-static void ib_parse_draw_indx(struct kgsl_device *device, unsigned int *pkt,
+static int ib_parse_draw_indx(struct kgsl_device *device, unsigned int *pkt,
unsigned int ptbase)
{
- int ret, i;
+ int ret = 0, i;
if (type3_pkt_size(pkt[0]) < 3)
- return;
+ return 0;
/* DRAW_IDX may have a index buffer pointer */
if (type3_pkt_size(pkt[0]) > 3) {
ret = kgsl_snapshot_get_object(device, ptbase, pkt[4], pkt[5],
SNAPSHOT_GPU_OBJECT_GENERIC);
+ if (ret < 0)
+ return -EINVAL;
+
snapshot_frozen_objsize += ret;
}
@@ -310,6 +331,9 @@
ret = kgsl_snapshot_get_object(device, ptbase,
vsc_pipe[i].base, vsc_pipe[i].size,
SNAPSHOT_GPU_OBJECT_GENERIC);
+ if (ret < 0)
+ return -EINVAL;
+
snapshot_frozen_objsize += ret;
}
}
@@ -320,6 +344,9 @@
ret = kgsl_snapshot_get_object(device, ptbase,
vsc_size_address, 32,
SNAPSHOT_GPU_OBJECT_GENERIC);
+ if (ret < 0)
+ return -EINVAL;
+
snapshot_frozen_objsize += ret;
}
@@ -328,6 +355,9 @@
ret = kgsl_snapshot_get_object(device, ptbase,
sp_vs_pvt_mem_addr, 8192,
SNAPSHOT_GPU_OBJECT_GENERIC);
+ if (ret < 0)
+ return -EINVAL;
+
snapshot_frozen_objsize += ret;
sp_vs_pvt_mem_addr = 0;
}
@@ -336,6 +366,9 @@
ret = kgsl_snapshot_get_object(device, ptbase,
sp_fs_pvt_mem_addr, 8192,
SNAPSHOT_GPU_OBJECT_GENERIC);
+ if (ret < 0)
+ return -EINVAL;
+
snapshot_frozen_objsize += ret;
sp_fs_pvt_mem_addr = 0;
}
@@ -358,6 +391,9 @@
ret = kgsl_snapshot_get_object(device, ptbase,
vbo[i].base,
0, SNAPSHOT_GPU_OBJECT_GENERIC);
+ if (ret < 0)
+ return -EINVAL;
+
snapshot_frozen_objsize += ret;
}
@@ -367,6 +403,8 @@
vfd_control_0 = 0;
vfd_index_max = 0;
+
+ return ret;
}
/*
@@ -374,23 +412,21 @@
* such as additional GPU buffers to grab or a draw initator
*/
-static void ib_parse_type3(struct kgsl_device *device, unsigned int *ptr,
+static int ib_parse_type3(struct kgsl_device *device, unsigned int *ptr,
unsigned int ptbase)
{
- switch (cp_type3_opcode(*ptr)) {
- case CP_LOAD_STATE:
- ib_parse_load_state(device, ptr, ptbase);
- break;
- case CP_SET_BIN_DATA:
- ib_parse_set_bin_data(device, ptr, ptbase);
- break;
- case CP_MEM_WRITE:
- ib_parse_mem_write(device, ptr, ptbase);
- break;
- case CP_DRAW_INDX:
- ib_parse_draw_indx(device, ptr, ptbase);
- break;
- }
+ int opcode = cp_type3_opcode(*ptr);
+
+ if (opcode == CP_LOAD_STATE)
+ return ib_parse_load_state(device, ptr, ptbase);
+ else if (opcode == CP_SET_BIN_DATA)
+ return ib_parse_set_bin_data(device, ptr, ptbase);
+ else if (opcode == CP_MEM_WRITE)
+ return ib_parse_mem_write(device, ptr, ptbase);
+ else if (opcode == CP_DRAW_INDX)
+ return ib_parse_draw_indx(device, ptr, ptbase);
+
+ return 0;
}
/*
@@ -474,9 +510,12 @@
}
}
+static inline int parse_ib(struct kgsl_device *device, unsigned int ptbase,
+ unsigned int gpuaddr, unsigned int dwords);
+
/* Add an IB as a GPU object, but first, parse it to find more goodies within */
-static void ib_add_gpu_object(struct kgsl_device *device, unsigned int ptbase,
+static int ib_add_gpu_object(struct kgsl_device *device, unsigned int ptbase,
unsigned int gpuaddr, unsigned int dwords)
{
int i, ret, rem = dwords;
@@ -487,13 +526,13 @@
*/
if (kgsl_snapshot_have_object(device, ptbase, gpuaddr, dwords << 2))
- return;
+ return 0;
src = (unsigned int *) adreno_convertaddr(device, ptbase, gpuaddr,
dwords << 2);
if (src == NULL)
- return;
+ return -EINVAL;
for (i = 0; rem > 0; rem--, i++) {
int pktsize;
@@ -513,26 +552,23 @@
if (adreno_cmd_is_ib(src[i])) {
unsigned int gpuaddr = src[i + 1];
unsigned int size = src[i + 2];
- unsigned int ibbase;
- /* Address of the last processed IB2 */
- kgsl_regread(device, REG_CP_IB2_BASE, &ibbase);
+ ret = parse_ib(device, ptbase, gpuaddr, size);
+ /* If adding the IB failed then stop parsing */
+ if (ret < 0)
+ goto done;
+ } else {
+ ret = ib_parse_type3(device, &src[i], ptbase);
/*
- * If this is the last IB2 that was executed,
- * then push it to make sure it goes into the
- * static space
+ * If the parse function failed (probably
+ * because of a bad decode) then bail out and
+ * just capture the binary IB data
*/
- if (ibbase == gpuaddr)
- push_object(device,
- SNAPSHOT_OBJ_TYPE_IB, ptbase,
- gpuaddr, size);
- else
- ib_add_gpu_object(device, ptbase,
- gpuaddr, size);
- } else
- ib_parse_type3(device, &src[i], ptbase);
+ if (ret < 0)
+ goto done;
+ }
} else if (pkt_is_type0(src[i])) {
ib_parse_type0(device, &src[i], ptbase);
}
@@ -541,10 +577,44 @@
rem -= pktsize;
}
+done:
ret = kgsl_snapshot_get_object(device, ptbase, gpuaddr, dwords << 2,
SNAPSHOT_GPU_OBJECT_IB);
- snapshot_frozen_objsize += ret;
+ if (ret >= 0)
+ snapshot_frozen_objsize += ret;
+
+ return ret;
+}
+
+/*
+ * We want to store the last executed IB1 and IB2 in the static region to ensure
+ * that we get at least some information out of the snapshot even if we can't
+ * access the dynamic data from the sysfs file. Push all other IBs on the
+ * dynamic list
+ */
+static inline int parse_ib(struct kgsl_device *device, unsigned int ptbase,
+ unsigned int gpuaddr, unsigned int dwords)
+{
+ unsigned int ib1base, ib2base;
+ int ret = 0;
+
+ /*
+ * Check the IB address - if it is either the last executed IB1 or the
+ * last executed IB2 then push it into the static blob otherwise put
+ * it in the dynamic list
+ */
+
+ kgsl_regread(device, REG_CP_IB1_BASE, &ib1base);
+ kgsl_regread(device, REG_CP_IB2_BASE, &ib2base);
+
+ if (gpuaddr == ib1base || gpuaddr == ib2base)
+ push_object(device, SNAPSHOT_OBJ_TYPE_IB, ptbase,
+ gpuaddr, dwords);
+ else
+ ret = ib_add_gpu_object(device, ptbase, gpuaddr, dwords);
+
+ return ret;
}
/* Snapshot the ringbuffer memory */
@@ -697,12 +767,11 @@
* others get marked at GPU objects
*/
- if (ibaddr == ibbase || memdesc != NULL)
+ if (memdesc != NULL)
push_object(device, SNAPSHOT_OBJ_TYPE_IB,
ptbase, ibaddr, ibsize);
else
- ib_add_gpu_object(device, ptbase, ibaddr,
- ibsize);
+ parse_ib(device, ptbase, ibaddr, ibsize);
}
index = index + 1;
@@ -725,7 +794,7 @@
struct kgsl_snapshot_obj *obj = priv;
unsigned int *src = obj->ptr;
unsigned int *dst = snapshot + sizeof(*header);
- int i;
+ int i, ret;
if (remain < (obj->dwords << 2) + sizeof(*header)) {
KGSL_DRV_ERR(device,
@@ -747,10 +816,14 @@
continue;
if (adreno_cmd_is_ib(*src))
- push_object(device, SNAPSHOT_OBJ_TYPE_IB,
- obj->ptbase, src[1], src[2]);
+ ret = parse_ib(device, obj->ptbase, src[1],
+ src[2]);
else
- ib_parse_type3(device, src, obj->ptbase);
+ ret = ib_parse_type3(device, src, obj->ptbase);
+
+ /* Stop parsing if the type3 decode fails */
+ if (ret < 0)
+ break;
}
}
diff --git a/drivers/gpu/msm/kgsl_debugfs.c b/drivers/gpu/msm/kgsl_debugfs.c
index f967cd2..76998db 100644
--- a/drivers/gpu/msm/kgsl_debugfs.c
+++ b/drivers/gpu/msm/kgsl_debugfs.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2008-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2008-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -247,8 +247,8 @@
kgsl_get_memory_usage(usage, sizeof(usage), m->flags);
- seq_printf(s, "%08x %8d %5d %5s %10s %16s %5d\n",
- m->gpuaddr, m->size, entry->id, flags,
+ seq_printf(s, "%08x %08lx %8d %5d %5s %10s %16s %5d\n",
+ m->gpuaddr, m->useraddr, m->size, entry->id, flags,
memtype_str(entry->memtype), usage, m->sglen);
}
@@ -259,8 +259,9 @@
struct kgsl_process_private *private = s->private;
int next = 0;
- seq_printf(s, "%8s %8s %5s %5s %10s %16s %5s\n",
- "gpuaddr", "size", "id", "flags", "type", "usage", "sglen");
+ seq_printf(s, "%8s %8s %8s %5s %5s %10s %16s %5s\n",
+ "gpuaddr", "useraddr", "size", "id", "flags", "type",
+ "usage", "sglen");
/* print all entries with a GPU address */
spin_lock(&private->mem_lock);
diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c
index 18aed14..f71cf8c 100644
--- a/drivers/gpu/msm/kgsl_mmu.c
+++ b/drivers/gpu/msm/kgsl_mmu.c
@@ -68,8 +68,10 @@
device = kgsl_driver.devp[KGSL_DEVICE_3D0];
if (device->mmu.mmu_ops->mmu_setup_pt != NULL) {
status = device->mmu.mmu_ops->mmu_setup_pt(&device->mmu, pt);
- if (status)
+ if (status) {
+ i = KGSL_DEVICE_MAX - 1;
goto error_pt;
+ }
}
return status;
error_pt:
diff --git a/drivers/gpu/msm/kgsl_snapshot.c b/drivers/gpu/msm/kgsl_snapshot.c
index e9bbac8..0935f64 100644
--- a/drivers/gpu/msm/kgsl_snapshot.c
+++ b/drivers/gpu/msm/kgsl_snapshot.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -339,14 +339,14 @@
if (entry == NULL) {
KGSL_DRV_ERR(device, "Unable to find GPU buffer %8.8X\n",
gpuaddr);
- return 0;
+ return -EINVAL;
}
/* We can't freeze external memory, because we don't own it */
if (entry->memtype != KGSL_MEM_ENTRY_KERNEL) {
KGSL_DRV_ERR(device,
"Only internal GPU buffers can be frozen\n");
- return 0;
+ return -EINVAL;
}
/*
@@ -369,7 +369,7 @@
if (size + offset > entry->memdesc.size) {
KGSL_DRV_ERR(device, "Invalid size for GPU buffer %8.8X\n",
gpuaddr);
- return 0;
+ return -EINVAL;
}
/* If the buffer is already on the list, skip it */
@@ -386,14 +386,14 @@
if (kgsl_memdesc_map(&entry->memdesc) == NULL) {
KGSL_DRV_ERR(device, "Unable to map GPU buffer %X\n",
gpuaddr);
- return 0;
+ return -EINVAL;
}
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (obj == NULL) {
KGSL_DRV_ERR(device, "Unable to allocate memory\n");
- return 0;
+ return -EINVAL;
}
/* Ref count the mem entry */
@@ -518,6 +518,7 @@
struct kgsl_snapshot_header *header = device->snapshot;
int remain = device->snapshot_maxsize - sizeof(*header);
void *snapshot;
+ struct timespec boot;
/*
* The first hang is always the one we are interested in. To
@@ -559,7 +560,13 @@
snapshot = device->ftbl->snapshot(device, snapshot, &remain,
hang);
- device->snapshot_timestamp = get_seconds();
+ /*
+ * The timestamp is the seconds since boot so it is easier to match to
+ * the kernel log
+ */
+
+ getboottime(&boot);
+ device->snapshot_timestamp = get_seconds() - boot.tv_sec;
device->snapshot_size = (int) (snapshot - device->snapshot);
/* Freeze the snapshot on a hang until it gets read */
@@ -655,7 +662,7 @@
/* Show the timestamp of the last collected snapshot */
static ssize_t timestamp_show(struct kgsl_device *device, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%x\n", device->snapshot_timestamp);
+ return snprintf(buf, PAGE_SIZE, "%d\n", device->snapshot_timestamp);
}
/* manually trigger a new snapshot to be collected */
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 07850b9..27e6c79 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -33,6 +33,7 @@
#include <linux/slab.h>
#include <linux/mmc/mmc.h>
#include <mach/gpio.h>
+#include <mach/msm_bus.h>
#include "sdhci-pltfm.h"
@@ -78,6 +79,7 @@
/* 8KB descriptors */
#define SDHCI_MSM_MAX_SEGMENTS (1 << 13)
+#define SDHCI_MSM_MMC_CLK_GATE_DELAY 200 /* msecs */
static const u32 tuning_block_64[] = {
0x00FF0FFF, 0xCCC3CCFF, 0xFFCC3CC3, 0xEFFEFFFE,
@@ -179,6 +181,12 @@
struct sdhci_msm_pad_data *pad_data;
};
+struct sdhci_msm_bus_voting_data {
+ struct msm_bus_scale_pdata *bus_pdata;
+ unsigned int *bw_vecs;
+ unsigned int bw_vecs_size;
+};
+
struct sdhci_msm_pltfm_data {
/* Supported UHS-I Modes */
u32 caps;
@@ -191,17 +199,32 @@
struct sdhci_msm_slot_reg_data *vreg_data;
bool nonremovable;
struct sdhci_msm_pin_data *pin_data;
+ u32 cpu_dma_latency_us;
+ struct sdhci_msm_bus_voting_data *voting_data;
+};
+
+struct sdhci_msm_bus_vote {
+ uint32_t client_handle;
+ uint32_t curr_vote;
+ int min_bw_vote;
+ int max_bw_vote;
+ bool is_max_bw_needed;
+ struct delayed_work vote_work;
+ struct device_attribute max_bus_bw;
};
struct sdhci_msm_host {
+ struct platform_device *pdev;
void __iomem *core_mem; /* MSM SDCC mapped address */
struct clk *clk; /* main SD/MMC bus clock */
struct clk *pclk; /* SDHC peripheral bus clock */
struct clk *bus_clk; /* SDHC bus voter clock */
+ atomic_t clks_on; /* Set if clocks are enabled */
struct sdhci_msm_pltfm_data *pdata;
struct mmc_host *mmc;
struct sdhci_pltfm_data sdhci_msm_pdata;
wait_queue_head_t pwr_irq_wait;
+ struct sdhci_msm_bus_vote msm_bus_vote;
};
enum vdd_io_level {
@@ -1043,6 +1066,7 @@
struct sdhci_msm_pltfm_data *pdata = NULL;
struct device_node *np = dev->of_node;
u32 bus_width = 0;
+ u32 cpu_dma_latency;
int len, i;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
@@ -1061,6 +1085,10 @@
pdata->mmc_bus_width = 0;
}
+ if (!of_property_read_u32(np, "qcom,cpu-dma-latency-us",
+ &cpu_dma_latency))
+ pdata->cpu_dma_latency_us = cpu_dma_latency;
+
pdata->vreg_data = devm_kzalloc(dev, sizeof(struct
sdhci_msm_slot_reg_data),
GFP_KERNEL);
@@ -1118,6 +1146,218 @@
return NULL;
}
+/* Returns required bandwidth in Bytes per Sec */
+static unsigned int sdhci_get_bw_required(struct sdhci_host *host,
+ struct mmc_ios *ios)
+{
+ unsigned int bw;
+
+ bw = host->clock;
+ /*
+ * For DDR mode, SDCC controller clock will be at
+ * the double rate than the actual clock that goes to card.
+ */
+ if (ios->bus_width == MMC_BUS_WIDTH_4)
+ bw /= 2;
+ else if (ios->bus_width == MMC_BUS_WIDTH_1)
+ bw /= 8;
+
+ return bw;
+}
+
+static int sdhci_msm_bus_get_vote_for_bw(struct sdhci_msm_host *host,
+ unsigned int bw)
+{
+ unsigned int *table = host->pdata->voting_data->bw_vecs;
+ unsigned int size = host->pdata->voting_data->bw_vecs_size;
+ int i;
+
+ if (host->msm_bus_vote.is_max_bw_needed && bw)
+ return host->msm_bus_vote.max_bw_vote;
+
+ for (i = 0; i < size; i++) {
+ if (bw <= table[i])
+ break;
+ }
+
+ if (i && (i == size))
+ i--;
+
+ return i;
+}
+
+/*
+ * This function must be called with host lock acquired.
+ * Caller of this function should also ensure that msm bus client
+ * handle is not null.
+ */
+static inline int sdhci_msm_bus_set_vote(struct sdhci_msm_host *msm_host,
+ int vote,
+ unsigned long flags)
+{
+ struct sdhci_host *host = platform_get_drvdata(msm_host->pdev);
+ int rc = 0;
+
+ if (vote != msm_host->msm_bus_vote.curr_vote) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ rc = msm_bus_scale_client_update_request(
+ msm_host->msm_bus_vote.client_handle, vote);
+ spin_lock_irqsave(&host->lock, flags);
+ if (rc) {
+ pr_err("%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
+ mmc_hostname(host->mmc),
+ msm_host->msm_bus_vote.client_handle, vote, rc);
+ goto out;
+ }
+ msm_host->msm_bus_vote.curr_vote = vote;
+ }
+out:
+ return rc;
+}
+
+/*
+ * Internal work. Work to set 0 bandwidth for msm bus.
+ */
+static void sdhci_msm_bus_work(struct work_struct *work)
+{
+ struct sdhci_msm_host *msm_host;
+ struct sdhci_host *host;
+ unsigned long flags;
+
+ msm_host = container_of(work, struct sdhci_msm_host,
+ msm_bus_vote.vote_work.work);
+ host = platform_get_drvdata(msm_host->pdev);
+
+ if (!msm_host->msm_bus_vote.client_handle)
+ return;
+
+ spin_lock_irqsave(&host->lock, flags);
+ /* don't vote for 0 bandwidth if any request is in progress */
+ if (!host->mrq) {
+ sdhci_msm_bus_set_vote(msm_host,
+ msm_host->msm_bus_vote.min_bw_vote, flags);
+ } else
+ pr_warning("%s: %s: Transfer in progress. skipping bus voting to 0 bandwidth\n",
+ mmc_hostname(host->mmc), __func__);
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+/*
+ * This function cancels any scheduled delayed work and sets the bus
+ * vote based on bw (bandwidth) argument.
+ */
+static void sdhci_msm_bus_cancel_work_and_set_vote(struct sdhci_host *host,
+ unsigned int bw)
+{
+ int vote;
+ unsigned long flags;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+ cancel_delayed_work_sync(&msm_host->msm_bus_vote.vote_work);
+ spin_lock_irqsave(&host->lock, flags);
+ vote = sdhci_msm_bus_get_vote_for_bw(msm_host, bw);
+ sdhci_msm_bus_set_vote(msm_host, vote, flags);
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+#define MSM_MMC_BUS_VOTING_DELAY 200 /* msecs */
+
+/* This function queues a work which will set the bandwidth requiement to 0 */
+static void sdhci_msm_bus_queue_work(struct sdhci_host *host)
+{
+ unsigned long flags;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+ spin_lock_irqsave(&host->lock, flags);
+ if (msm_host->msm_bus_vote.min_bw_vote !=
+ msm_host->msm_bus_vote.curr_vote)
+ queue_delayed_work(system_nrt_wq,
+ &msm_host->msm_bus_vote.vote_work,
+ msecs_to_jiffies(MSM_MMC_BUS_VOTING_DELAY));
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static int sdhci_msm_bus_register(struct sdhci_msm_host *host,
+ struct platform_device *pdev)
+{
+ int rc = 0;
+ struct msm_bus_scale_pdata *bus_pdata;
+
+ struct sdhci_msm_bus_voting_data *data;
+ struct device *dev = &pdev->dev;
+
+ data = devm_kzalloc(dev,
+ sizeof(struct sdhci_msm_bus_voting_data), GFP_KERNEL);
+ if (!data) {
+ dev_err(&pdev->dev,
+ "%s: failed to allocate memory\n", __func__);
+ rc = -ENOMEM;
+ goto out;
+ }
+ data->bus_pdata = msm_bus_cl_get_pdata(pdev);
+ if (data->bus_pdata) {
+ rc = sdhci_msm_dt_get_array(dev, "qcom,bus-bw-vectors-bps",
+ &data->bw_vecs, &data->bw_vecs_size, 0);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "%s: Failed to get bus-bw-vectors-bps\n",
+ __func__);
+ goto out;
+ }
+ host->pdata->voting_data = data;
+ }
+ if (host->pdata->voting_data &&
+ host->pdata->voting_data->bus_pdata &&
+ host->pdata->voting_data->bw_vecs &&
+ host->pdata->voting_data->bw_vecs_size) {
+
+ bus_pdata = host->pdata->voting_data->bus_pdata;
+ host->msm_bus_vote.client_handle =
+ msm_bus_scale_register_client(bus_pdata);
+ if (!host->msm_bus_vote.client_handle) {
+ dev_err(&pdev->dev, "msm_bus_scale_register_client()\n");
+ rc = -EFAULT;
+ goto out;
+ }
+ /* cache the vote index for minimum and maximum bandwidth */
+ host->msm_bus_vote.min_bw_vote =
+ sdhci_msm_bus_get_vote_for_bw(host, 0);
+ host->msm_bus_vote.max_bw_vote =
+ sdhci_msm_bus_get_vote_for_bw(host, UINT_MAX);
+ } else {
+ devm_kfree(dev, data);
+ }
+
+out:
+ return rc;
+}
+
+static void sdhci_msm_bus_unregister(struct sdhci_msm_host *host)
+{
+ if (host->msm_bus_vote.client_handle)
+ msm_bus_scale_unregister_client(
+ host->msm_bus_vote.client_handle);
+}
+
+static void sdhci_msm_bus_voting(struct sdhci_host *host, u32 enable)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ struct mmc_ios *ios = &host->mmc->ios;
+ unsigned int bw;
+
+ if (!msm_host->msm_bus_vote.client_handle)
+ return;
+
+ bw = sdhci_get_bw_required(host, ios);
+ if (enable)
+ sdhci_msm_bus_cancel_work_and_set_vote(host, bw);
+ else
+ sdhci_msm_bus_queue_work(host);
+}
+
/* Regulator utility functions */
static int sdhci_msm_vreg_init_reg(struct device *dev,
struct sdhci_msm_reg_data *vreg)
@@ -1480,6 +1720,35 @@
else
return 0;
}
+static ssize_t
+show_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ msm_host->msm_bus_vote.is_max_bw_needed);
+}
+
+static ssize_t
+store_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ uint32_t value;
+ unsigned long flags;
+
+ if (!kstrtou32(buf, 0, &value)) {
+ spin_lock_irqsave(&host->lock, flags);
+ msm_host->msm_bus_vote.is_max_bw_needed = !!value;
+ spin_unlock_irqrestore(&host->lock, flags);
+ }
+ return count;
+}
static void sdhci_msm_check_power_status(struct sdhci_host *host)
{
@@ -1519,11 +1788,74 @@
return SDHCI_MSM_MAX_SEGMENTS;
}
+void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ int rc;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ unsigned long flags;
+
+ if (clock && !atomic_read(&msm_host->clks_on)) {
+ pr_debug("%s: request to enable clock at rate %u\n",
+ mmc_hostname(host->mmc), clock);
+ if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
+ rc = clk_prepare_enable(msm_host->bus_clk);
+ if (rc) {
+ pr_err("%s: %s: failed to enable the bus-clock with error %d\n",
+ mmc_hostname(host->mmc), __func__, rc);
+ goto out;
+ }
+ }
+ if (!IS_ERR(msm_host->pclk)) {
+ rc = clk_prepare_enable(msm_host->pclk);
+ if (rc) {
+ pr_err("%s: %s: failed to enable the pclk with error %d\n",
+ mmc_hostname(host->mmc), __func__, rc);
+ goto disable_bus_clk;
+ }
+ }
+ rc = clk_prepare_enable(msm_host->clk);
+ if (rc) {
+ pr_err("%s: %s: failed to enable the host-clk with error %d\n",
+ mmc_hostname(host->mmc), __func__, rc);
+ goto disable_pclk;
+ }
+ mb();
+ atomic_set(&msm_host->clks_on, 1);
+
+ } else if (!clock && atomic_read(&msm_host->clks_on)) {
+ pr_debug("%s: request to disable clocks\n",
+ mmc_hostname(host->mmc));
+ sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+ mb();
+ clk_disable_unprepare(msm_host->clk);
+ if (!IS_ERR(msm_host->pclk))
+ clk_disable_unprepare(msm_host->pclk);
+ if (!IS_ERR_OR_NULL(msm_host->bus_clk))
+ clk_disable_unprepare(msm_host->bus_clk);
+ atomic_set(&msm_host->clks_on, 0);
+ }
+ spin_lock_irqsave(&host->lock, flags);
+ host->clock = clock;
+ spin_unlock_irqrestore(&host->lock, flags);
+ goto out;
+disable_pclk:
+ if (!IS_ERR_OR_NULL(msm_host->pclk))
+ clk_disable_unprepare(msm_host->pclk);
+disable_bus_clk:
+ if (!IS_ERR_OR_NULL(msm_host->bus_clk))
+ clk_disable_unprepare(msm_host->bus_clk);
+out:
+ return;
+}
+
static struct sdhci_ops sdhci_msm_ops = {
.check_power_status = sdhci_msm_check_power_status,
.execute_tuning = sdhci_msm_execute_tuning,
.toggle_cdr = sdhci_msm_toggle_cdr,
.get_max_segments = sdhci_msm_max_segs,
+ .set_clock = sdhci_msm_set_clock,
+ .platform_bus_voting = sdhci_msm_bus_voting,
};
static int __devinit sdhci_msm_probe(struct platform_device *pdev)
@@ -1555,6 +1887,7 @@
pltfm_host = sdhci_priv(host);
pltfm_host->priv = msm_host;
msm_host->mmc = host->mmc;
+ msm_host->pdev = pdev;
/* Extract platform data */
if (pdev->dev.of_node) {
@@ -1601,6 +1934,7 @@
if (ret)
goto pclk_disable;
+ atomic_set(&msm_host->clks_on, 1);
/* Setup regulators */
ret = sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, true);
if (ret) {
@@ -1671,6 +2005,9 @@
/* Enable pwr irq interrupts */
writel_relaxed(INT_MASK, (msm_host->core_mem + CORE_PWRCTL_MASK));
+ /* Set clock gating delay to be used when CONFIG_MMC_CLKGATE is set */
+ msm_host->mmc->clkgate_delay = SDHCI_MSM_MMC_CLK_GATE_DELAY;
+
/* Set host capabilities */
msm_host->mmc->caps |= msm_host->pdata->mmc_bus_width;
msm_host->mmc->caps |= msm_host->pdata->caps;
@@ -1704,10 +2041,20 @@
if (msm_host->pdata->nonremovable)
msm_host->mmc->caps |= MMC_CAP_NONREMOVABLE;
+ host->cpu_dma_latency_us = msm_host->pdata->cpu_dma_latency_us;
+
+ ret = sdhci_msm_bus_register(msm_host, pdev);
+ if (ret)
+ goto vreg_deinit;
+
+ if (msm_host->msm_bus_vote.client_handle)
+ INIT_DELAYED_WORK(&msm_host->msm_bus_vote.vote_work,
+ sdhci_msm_bus_work);
+
ret = sdhci_add_host(host);
if (ret) {
dev_err(&pdev->dev, "Add host failed (%d)\n", ret);
- goto vreg_deinit;
+ goto bus_unregister;
}
/* Set core clk rate, optionally override from dts */
@@ -1719,12 +2066,24 @@
goto remove_host;
}
+ msm_host->msm_bus_vote.max_bus_bw.show = show_sdhci_max_bus_bw;
+ msm_host->msm_bus_vote.max_bus_bw.store = store_sdhci_max_bus_bw;
+ sysfs_attr_init(&msm_host->msm_bus_vote.max_bus_bw.attr);
+ msm_host->msm_bus_vote.max_bus_bw.attr.name = "max_bus_bw";
+ msm_host->msm_bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
+ ret = device_create_file(&pdev->dev,
+ &msm_host->msm_bus_vote.max_bus_bw);
+ if (ret)
+ goto remove_host;
+
/* Successful initialization */
goto out;
remove_host:
dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
sdhci_remove_host(host, dead);
+bus_unregister:
+ sdhci_msm_bus_unregister(msm_host);
vreg_deinit:
sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
clk_disable:
@@ -1753,17 +2112,18 @@
0xffffffff);
pr_debug("%s: %s\n", dev_name(&pdev->dev), __func__);
+ device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
sdhci_remove_host(host, dead);
sdhci_pltfm_free(pdev);
sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
- if (!IS_ERR(msm_host->clk))
- clk_disable_unprepare(msm_host->clk);
- if (!IS_ERR(msm_host->pclk))
- clk_disable_unprepare(msm_host->pclk);
- if (!IS_ERR_OR_NULL(msm_host->bus_clk))
- clk_disable_unprepare(msm_host->bus_clk);
+
if (pdata->pin_data)
sdhci_msm_setup_pins(pdata, false);
+
+ if (msm_host->msm_bus_vote.client_handle) {
+ sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
+ sdhci_msm_bus_unregister(msm_host);
+ }
return 0;
}
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 326f220..97c1013 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -968,6 +968,8 @@
tasklet_schedule(&host->finish_tasklet);
}
+#define SDHCI_REQUEST_TIMEOUT 10 /* Default request timeout in seconds */
+
static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
{
int flags;
@@ -1001,7 +1003,11 @@
mdelay(1);
}
- mod_timer(&host->timer, jiffies + 10 * HZ);
+ mod_timer(&host->timer, jiffies + SDHCI_REQUEST_TIMEOUT * HZ);
+
+ if (cmd->cmd_timeout_ms > SDHCI_REQUEST_TIMEOUT * MSEC_PER_SEC)
+ mod_timer(&host->timer, jiffies +
+ (msecs_to_jiffies(cmd->cmd_timeout_ms * 2)));
host->cmd = cmd;
@@ -1088,19 +1094,24 @@
int real_div = div, clk_mul = 1;
u16 clk = 0;
unsigned long timeout;
+ unsigned long flags;
+ spin_lock_irqsave(&host->lock, flags);
if (clock && clock == host->clock)
- return;
+ goto ret;
host->mmc->actual_clock = 0;
if (host->ops->set_clock) {
+ spin_unlock_irqrestore(&host->lock, flags);
host->ops->set_clock(host, clock);
+ spin_lock_irqsave(&host->lock, flags);
if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK)
- return;
+ goto ret;
}
- sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+ if (host->clock)
+ sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
if (clock == 0)
goto out;
@@ -1176,7 +1187,7 @@
pr_err("%s: Internal clock never "
"stabilised.\n", mmc_hostname(host->mmc));
sdhci_dumpregs(host);
- return;
+ goto ret;
}
timeout--;
mdelay(1);
@@ -1187,6 +1198,8 @@
out:
host->clock = clock;
+ret:
+ spin_unlock_irqrestore(&host->lock, flags);
}
static int sdhci_set_power(struct sdhci_host *host, unsigned short power)
@@ -1265,6 +1278,32 @@
* *
\*****************************************************************************/
+static int sdhci_enable(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ if (host->cpu_dma_latency_us)
+ pm_qos_update_request(&host->pm_qos_req_dma,
+ host->cpu_dma_latency_us);
+ if (host->ops->platform_bus_voting)
+ host->ops->platform_bus_voting(host, 1);
+
+ return 0;
+}
+
+static int sdhci_disable(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ if (host->cpu_dma_latency_us)
+ pm_qos_update_request(&host->pm_qos_req_dma,
+ PM_QOS_DEFAULT_VALUE);
+ if (host->ops->platform_bus_voting)
+ host->ops->platform_bus_voting(host, 0);
+
+ return 0;
+}
+
static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct sdhci_host *host;
@@ -1347,20 +1386,17 @@
return;
}
- /*
- * Reset the chip on each power off.
- * Should clear out any weird states.
- */
- if (ios->power_mode == MMC_POWER_OFF) {
- sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
- sdhci_reinit(host);
+ if (ios->clock)
+ sdhci_set_clock(host, ios->clock);
+
+ spin_lock_irqsave(&host->lock, flags);
+ if (!host->clock) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ return;
}
+ spin_unlock_irqrestore(&host->lock, flags);
- sdhci_set_clock(host, ios->clock);
-
- if (ios->power_mode == MMC_POWER_OFF)
- vdd_bit = sdhci_set_power(host, -1);
- else
+ if (ios->power_mode & (MMC_POWER_UP | MMC_POWER_ON))
vdd_bit = sdhci_set_power(host, ios->vdd);
if (host->vmmc && vdd_bit != -1)
@@ -1448,10 +1484,11 @@
/* Re-enable SD Clock */
clock = host->clock;
host->clock = 0;
+ spin_unlock_irqrestore(&host->lock, flags);
sdhci_set_clock(host, clock);
+ spin_lock_irqsave(&host->lock, flags);
}
-
/* Reset SD Clock Enable */
clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
clk &= ~SDHCI_CLOCK_CARD_EN;
@@ -1481,10 +1518,13 @@
/* Re-enable SD Clock */
clock = host->clock;
host->clock = 0;
+ spin_unlock_irqrestore(&host->lock, flags);
sdhci_set_clock(host, clock);
+ spin_lock_irqsave(&host->lock, flags);
} else
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+ spin_unlock_irqrestore(&host->lock, flags);
/*
* Some (ENE) controllers go apeshit on some ios operation,
* signalling timeout and CRC errors even on CMD0. Resetting
@@ -1493,8 +1533,21 @@
if(host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
+ /*
+ * Reset the chip on each power off.
+ * Should clear out any weird states.
+ */
+ if (ios->power_mode == MMC_POWER_OFF) {
+ sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
+ sdhci_reinit(host);
+ vdd_bit = sdhci_set_power(host, -1);
+ if (host->vmmc && vdd_bit != -1)
+ mmc_regulator_set_ocr(host->mmc, host->vmmc, vdd_bit);
+ }
+ if (!ios->clock)
+ sdhci_set_clock(host, ios->clock);
+
mmiowb();
- spin_unlock_irqrestore(&host->lock, flags);
}
static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
@@ -1971,6 +2024,8 @@
.start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
.execute_tuning = sdhci_execute_tuning,
.enable_preset_value = sdhci_enable_preset_value,
+ .enable = sdhci_enable,
+ .disable = sdhci_disable,
};
/*****************************************************************************\
@@ -2048,7 +2103,9 @@
/* This is to force an update */
clock = host->clock;
host->clock = 0;
+ spin_unlock_irqrestore(&host->lock, flags);
sdhci_set_clock(host, clock);
+ spin_lock_irqsave(&host->lock, flags);
}
/* Spec says we should do both at the same time, but Ricoh
@@ -3095,6 +3152,9 @@
mmiowb();
+ if (host->cpu_dma_latency_us)
+ pm_qos_add_request(&host->pm_qos_req_dma,
+ PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
mmc_add_host(mmc);
pr_info("%s: SDHCI controller on %s [%s] using %s\n",
@@ -3142,6 +3202,8 @@
sdhci_disable_card_detection(host);
+ if (host->cpu_dma_latency_us)
+ pm_qos_remove_request(&host->pm_qos_req_dma);
mmc_remove_host(host->mmc);
#ifdef SDHCI_USE_LEDS_CLASS
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index c0ae39f..49d7957 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -281,6 +281,7 @@
int (*execute_tuning)(struct sdhci_host *host, u32 opcode);
void (*toggle_cdr)(struct sdhci_host *host, bool enable);
unsigned int (*get_max_segments)(void);
+ void (*platform_bus_voting)(struct sdhci_host *host, u32 enable);
};
#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h
index 9fb01f4..bd24575 100644
--- a/include/linux/mmc/sdhci.h
+++ b/include/linux/mmc/sdhci.h
@@ -16,6 +16,7 @@
#include <linux/types.h>
#include <linux/io.h>
#include <linux/mmc/host.h>
+#include <linux/pm_qos.h>
struct sdhci_host {
/* Data set by hardware interface driver */
@@ -187,6 +188,9 @@
#define SDHCI_TUNING_MODE_1 0
struct timer_list tuning_timer; /* Timer for tuning */
+ unsigned int cpu_dma_latency_us;
+ struct pm_qos_request pm_qos_req_dma;
+
unsigned long private[0] ____cacheline_aligned;
};
#endif /* LINUX_MMC_SDHCI_H */