Merge " ASoC: msm: qdsp6v2: Add SEC_I2S_RX port as EC reference"
diff --git a/Documentation/devicetree/bindings/fb/mdss-mdp.txt b/Documentation/devicetree/bindings/fb/mdss-mdp.txt
index 21e09ef..bd11551 100644
--- a/Documentation/devicetree/bindings/fb/mdss-mdp.txt
+++ b/Documentation/devicetree/bindings/fb/mdss-mdp.txt
@@ -275,6 +275,12 @@
used to reduce the pending writes limit dynamically
and can be tuned to match performance requirements
depending upon system state.
+- qcom,mdss-clk-levels: This array indicates the mdp core clock level selection
+ array. Core clock is calculated for each frame and
+ hence depending upon calculated value, clock rate
+ will be rounded up to the next level according to
+ this table. Order of entries need to be ordered in
+ ascending order.
Fudge Factors: Fudge factors are used to boost demand for
resources like bus bandswidth, clk rate etc. to
@@ -363,6 +369,9 @@
qcom,mdss-ib-factor = <3 2>; /* 1.5 times */
qcom,mdss-clk-factor = <5 4>; /* 1.25 times */
+ /* Clock levels */
+ qcom,mdss-clk-levels = <92310000, 177780000, 200000000>;
+
qcom,max-clk-rate = <320000000>;
qcom,vbif-settings = <0x0004 0x00000001>,
<0x00D8 0x00000707>;
diff --git a/Documentation/devicetree/bindings/slimbus/slim-msm-ctrl.txt b/Documentation/devicetree/bindings/slimbus/slim-msm-ctrl.txt
index e199e55..d25b456 100644
--- a/Documentation/devicetree/bindings/slimbus/slim-msm-ctrl.txt
+++ b/Documentation/devicetree/bindings/slimbus/slim-msm-ctrl.txt
@@ -54,11 +54,11 @@
are to be used, so that application processor can query
logical address of the ported generic device to be used.
Other than PC, fields of EA are same across platforms.
- -qcom,slim-mdm: This value indicates presence of slimbus component on
+ - qcom,slim-mdm: This value provides the identifier of slimbus component on
external mdm. This property enables the slimbus driver to
- receive subsytem restart notification from mdm and follow
- appropriate steps to ensure communication on the bus can be
- resumed after mdm-restart.
+ register and receive subsytem restart notification from mdm
+ and follow appropriate steps to ensure communication on the bus
+ can be resumed after mdm-restart.
Example:
slim@fe12f000 {
cell-index = <1>;
diff --git a/Documentation/devicetree/bindings/spi/spi_qsd.txt b/Documentation/devicetree/bindings/spi/spi_qsd.txt
index 1504dc0..da71e19 100644
--- a/Documentation/devicetree/bindings/spi/spi_qsd.txt
+++ b/Documentation/devicetree/bindings/spi/spi_qsd.txt
@@ -33,6 +33,9 @@
When this entry is not present, voting is done by the runtime-pm callbacks.
- qcom,master-id : Master endpoint number used for voting on clocks using the
bus-scaling driver.
+ - qcom,rt-priority : whether spi message queue is set to run as a realtime task.
+ With this spi transaction message pump with high (realtime) priority to reduce
+ the transfer latency on the bus by minimising the delay between a transfer request
Optional properties which are required for support of BAM-mode:
- qcom,ver-reg-exists : Boolean. When present, allows driver to verify if HW
@@ -92,4 +95,5 @@
qcom,bam-producer-pipe-index = <13>;
qcom,ver-reg-exists;
qcom,master-id = <86>;
+ qcom,rt-priority;
};
diff --git a/arch/arm/boot/dts/msm8226-mdss.dtsi b/arch/arm/boot/dts/msm8226-mdss.dtsi
index 1bbc517..5776926 100644
--- a/arch/arm/boot/dts/msm8226-mdss.dtsi
+++ b/arch/arm/boot/dts/msm8226-mdss.dtsi
@@ -36,6 +36,10 @@
qcom,mdss-ib-factor = <2 1>; /* 2 times */
qcom,mdss-clk-factor = <5 4>; /* 1.25 times */
+ /* Clock levels */
+ qcom,mdss-clk-levels = <92310000 100000000
+ 133330000 177780000 200000000>;
+
qcom,max-clk-rate = <200000000>;
qcom,mdss-pipe-vig-off = <0x00001200>;
qcom,mdss-pipe-rgb-off = <0x00001E00>;
diff --git a/arch/arm/boot/dts/msm8610-mdss.dtsi b/arch/arm/boot/dts/msm8610-mdss.dtsi
index 929659e..70d53e9 100644
--- a/arch/arm/boot/dts/msm8610-mdss.dtsi
+++ b/arch/arm/boot/dts/msm8610-mdss.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -16,6 +16,7 @@
reg = <0xfd900000 0x100000>;
reg-names = "mdp_phys";
interrupts = <0 72 0>;
+ vdd-cx-supply = <&pm8110_s1_corner>;
mdss_fb0: qcom,mdss_fb_primary {
cell-index = <0>;
diff --git a/arch/arm/boot/dts/msm8610-v2-mtp.dts b/arch/arm/boot/dts/msm8610-v2-mtp.dts
index 77f5276..debfc23 100644
--- a/arch/arm/boot/dts/msm8610-v2-mtp.dts
+++ b/arch/arm/boot/dts/msm8610-v2-mtp.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -22,4 +22,6 @@
qcom,board-id = <8 0>;
};
-
+&sdhc_2 {
+ qcom,pad-drv-on = <0x5 0x4 0x4>; /* 12mA, 10mA, 10mA */
+};
diff --git a/arch/arm/boot/dts/msm8610.dtsi b/arch/arm/boot/dts/msm8610.dtsi
index a409510..66b81ac 100644
--- a/arch/arm/boot/dts/msm8610.dtsi
+++ b/arch/arm/boot/dts/msm8610.dtsi
@@ -593,6 +593,7 @@
qcom,i2c-src-freq = <19200000>;
qcom,sda-gpio = <&msmgpio 10 0>;
qcom,scl-gpio = <&msmgpio 11 0>;
+ qcom,clk-ctl-xfer;
qcom,master-id = <86>;
};
diff --git a/arch/arm/boot/dts/msm8974pro.dtsi b/arch/arm/boot/dts/msm8974pro.dtsi
index ae0547f..d398f72 100644
--- a/arch/arm/boot/dts/msm8974pro.dtsi
+++ b/arch/arm/boot/dts/msm8974pro.dtsi
@@ -1723,7 +1723,7 @@
<3240000 1600000>,
<4048000 1600000>,
<4264000 1600000>;
- qcom,max-hw-load = <1281600>; /* max(4k X 2304 @ 24, 4k X 2160 @ 30) + 1080p @ 30 */
+ qcom,max-hw-load = <1216800>; /* 3840 X 2160 @ 30 fps + 1920 X 1088 @ 30 fps */
qcom,buffer-type-tz-usage-table = <0x241 0x1>,
<0x106 0x2>,
<0x480 0x3>;
diff --git a/arch/arm/configs/msm8226_defconfig b/arch/arm/configs/msm8226_defconfig
index a26e247..5f008d5 100644
--- a/arch/arm/configs/msm8226_defconfig
+++ b/arch/arm/configs/msm8226_defconfig
@@ -487,6 +487,7 @@
CONFIG_SCHEDSTATS=y
CONFIG_TIMER_STATS=y
CONFIG_DEBUG_KMEMLEAK=y
+CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=4000
CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
CONFIG_DEBUG_SPINLOCK=y
CONFIG_DEBUG_MUTEXES=y
diff --git a/arch/arm/configs/msm8610_defconfig b/arch/arm/configs/msm8610_defconfig
index a908217..6874b28 100644
--- a/arch/arm/configs/msm8610_defconfig
+++ b/arch/arm/configs/msm8610_defconfig
@@ -453,6 +453,7 @@
CONFIG_SCHEDSTATS=y
CONFIG_TIMER_STATS=y
CONFIG_DEBUG_KMEMLEAK=y
+CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=4000
CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
CONFIG_DEBUG_SPINLOCK=y
CONFIG_DEBUG_MUTEXES=y
diff --git a/arch/arm/configs/msm8974_defconfig b/arch/arm/configs/msm8974_defconfig
index a27baba..c5c16c2 100755
--- a/arch/arm/configs/msm8974_defconfig
+++ b/arch/arm/configs/msm8974_defconfig
@@ -524,6 +524,7 @@
CONFIG_SCHEDSTATS=y
CONFIG_TIMER_STATS=y
CONFIG_DEBUG_KMEMLEAK=y
+CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=4000
CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
CONFIG_DEBUG_SPINLOCK=y
CONFIG_DEBUG_MUTEXES=y
diff --git a/arch/arm/mach-msm/include/mach/msm_smem.h b/arch/arm/mach-msm/include/mach/msm_smem.h
index 19f9c0e..670efe6 100644
--- a/arch/arm/mach-msm/include/mach/msm_smem.h
+++ b/arch/arm/mach-msm/include/mach/msm_smem.h
@@ -40,6 +40,17 @@
#define SMEM_NUM_SMD_STREAM_CHANNELS 64
+/**
+ * OVERFLOW_ADD_UNSIGNED() - check for unsigned overflow
+ *
+ * @type: type to check for overflow
+ * @a: left value to use
+ * @b: right value to use
+ * @returns: true if a + b will result in overflow; false otherwise
+ */
+#define OVERFLOW_ADD_UNSIGNED(type, a, b) \
+ (((type)~0 - (a)) < (b) ? true : false)
+
enum {
/* fixed items */
SMEM_PROC_COMM = 0,
diff --git a/arch/arm/mach-msm/include/mach/msm_spi.h b/arch/arm/mach-msm/include/mach/msm_spi.h
index 608927c..52d88a1 100644
--- a/arch/arm/mach-msm/include/mach/msm_spi.h
+++ b/arch/arm/mach-msm/include/mach/msm_spi.h
@@ -22,6 +22,9 @@
* runtime pm (optimizes for power).
* @master_id master id number of the controller's wrapper (BLSP or GSBI).
* When zero, clock path voting is disabled.
+ * @rt when set, spi will pump transaction messages with high (realtime)
+ * priority to reduce the transfer latency on the bus by minimising
+ * the delay between a transfer request.
*/
struct msm_spi_platform_data {
u32 max_clock_speed;
@@ -37,4 +40,5 @@
bool use_bam;
u32 bam_consumer_pipe_index;
u32 bam_producer_pipe_index;
+ bool rt_priority;
};
diff --git a/arch/arm/mach-msm/krait-regulator-pmic.c b/arch/arm/mach-msm/krait-regulator-pmic.c
index 5081e7b..2f4185e 100644
--- a/arch/arm/mach-msm/krait-regulator-pmic.c
+++ b/arch/arm/mach-msm/krait-regulator-pmic.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -58,7 +58,8 @@
#define REG_GANG_CTL2 0xC1
#define GANG_EN_BIT BIT(7)
-#define REG_PWM_CL 0x60
+#define REG_PWM_CL 0x60
+#define REG_SEC_ACCESS 0xD0
struct krait_vreg_pmic_chip {
struct spmi_device *spmi;
@@ -89,11 +90,21 @@
return 0;
}
-static int write_byte(struct spmi_device *spmi, u16 addr, u8 *val)
+static int write_secure_byte(struct spmi_device *spmi, u16 base,
+ u16 addr, u8 *val)
{
int rc;
+ u8 sec_val = 0xA5;
- rc = spmi_ext_register_writel(spmi->ctrl, spmi->sid, addr, val, 1);
+ rc = spmi_ext_register_writel(spmi->ctrl, spmi->sid,
+ base + REG_SEC_ACCESS, &sec_val, 1);
+ if (rc) {
+ pr_err("SPMI write failed [%d,0x%04x] val = 0x%02x rc=%d\n",
+ spmi->sid, base + REG_SEC_ACCESS, sec_val, rc);
+ return rc;
+ }
+ rc = spmi_ext_register_writel(spmi->ctrl, spmi->sid,
+ base + addr, val, 1);
if (rc) {
pr_err("SPMI write failed [%d,0x%04x] val = 0x%02x rc=%d\n",
spmi->sid, addr, *val, rc);
@@ -127,7 +138,7 @@
bool krait_pmic_is_ready(void)
{
if (the_chip == NULL) {
- pr_debug("kait_regulator_pmic not ready yet\n");
+ pr_debug("krait_regulator_pmic not ready yet\n");
return false;
}
return true;
@@ -149,7 +160,7 @@
int rc;
if (the_chip == NULL) {
- pr_debug("kait_regulator_pmic not ready yet\n");
+ pr_debug("krait_regulator_pmic not ready yet\n");
return -ENXIO;
}
@@ -157,8 +168,8 @@
return 0;
setpoint = (I_PFM_MA - IOFFSET_MA) / ISTEP_MA;
- rc = write_byte(the_chip->spmi,
- the_chip->ps_base + REG_PWM_CL, &setpoint);
+ rc = write_secure_byte(the_chip->spmi,
+ the_chip->ps_base, REG_PWM_CL, &setpoint);
pr_debug("wrote 0x%02x->[%d 0x%04x] rc = %d\n", setpoint,
the_chip->spmi->sid,
the_chip->ps_base + REG_PWM_CL, rc);
@@ -180,7 +191,7 @@
int rc;
if (the_chip == NULL) {
- pr_debug("kait_regulator_pmic not ready yet\n");
+ pr_debug("krait_regulator_pmic not ready yet\n");
return -ENXIO;
}
@@ -190,8 +201,8 @@
udelay(50);
setpoint = (I_PWM_MA - IOFFSET_MA) / ISTEP_MA;
- rc = write_byte(the_chip->spmi,
- the_chip->ps_base + REG_PWM_CL, &setpoint);
+ rc = write_secure_byte(the_chip->spmi,
+ the_chip->ps_base, REG_PWM_CL, &setpoint);
pr_debug("wrote 0x%02x->[%d 0x%04x] rc = %d\n", setpoint,
the_chip->spmi->sid,
the_chip->ps_base + REG_PWM_CL, rc);
diff --git a/arch/arm/mach-msm/qdsp6v2/ultrasound/usf.c b/arch/arm/mach-msm/qdsp6v2/ultrasound/usf.c
index cf69e17..192aaf9 100644
--- a/arch/arm/mach-msm/qdsp6v2/ultrasound/usf.c
+++ b/arch/arm/mach-msm/qdsp6v2/ultrasound/usf.c
@@ -70,6 +70,7 @@
USF_OPENED_STATE,
USF_CONFIGURED_STATE,
USF_WORK_STATE,
+ USF_ADSP_RESTART_STATE,
USF_ERROR_STATE
};
@@ -406,6 +407,13 @@
case Q6USM_EVENT_WRITE_DONE:
wake_up(&usf_xx->wait);
break;
+
+ case RESET_EVENTS:
+ pr_err("%s: received RESET_EVENTS\n", __func__);
+ usf_xx->usf_state = USF_ADSP_RESTART_STATE;
+ wake_up(&usf_xx->wait);
+ break;
+
default:
break;
}
@@ -445,6 +453,12 @@
}
break;
+ case RESET_EVENTS:
+ pr_err("%s: received RESET_EVENTS\n", __func__);
+ usf_xx->usf_state = USF_ADSP_RESTART_STATE;
+ wake_up(&usf_xx->wait);
+ break;
+
default:
break;
}
@@ -865,7 +879,9 @@
if (detect_info.detect_timeout == USF_INFINITIVE_TIMEOUT) {
rc = wait_event_interruptible(usf_xx->wait,
(usf_xx->us_detect_type !=
- USF_US_DETECT_UNDEF));
+ USF_US_DETECT_UNDEF) ||
+ (usf_xx->usf_state ==
+ USF_ADSP_RESTART_STATE));
} else {
if (detect_info.detect_timeout == USF_DEFAULT_TIMEOUT)
timeout = USF_TIMEOUT_JIFFIES;
@@ -874,8 +890,14 @@
}
rc = wait_event_interruptible_timeout(usf_xx->wait,
(usf_xx->us_detect_type !=
- USF_US_DETECT_UNDEF),
- timeout);
+ USF_US_DETECT_UNDEF) ||
+ (usf_xx->usf_state ==
+ USF_ADSP_RESTART_STATE), timeout);
+
+ /* In the case of aDSP restart, "no US" is assumed */
+ if (usf_xx->usf_state == USF_ADSP_RESTART_STATE) {
+ rc = -EFAULT;
+ }
/* In the case of timeout, "no US" is assumed */
if (rc < 0)
pr_err("%s: Getting US detection failed rc[%d]\n",
@@ -1336,7 +1358,8 @@
case US_STOP_TX: {
usf_xx = &usf->usf_tx;
- if (usf_xx->usf_state == USF_WORK_STATE)
+ if ((usf_xx->usf_state == USF_WORK_STATE)
+ || (usf_xx->usf_state == USF_ADSP_RESTART_STATE))
rc = usf_stop_tx(usf);
else {
pr_err("%s: stop_tx: wrong state[%d]\n",
@@ -1349,7 +1372,8 @@
case US_STOP_RX: {
usf_xx = &usf->usf_rx;
- if (usf_xx->usf_state == USF_WORK_STATE)
+ if ((usf_xx->usf_state == USF_WORK_STATE)
+ || (usf_xx->usf_state == USF_ADSP_RESTART_STATE))
usf_disable(usf_xx);
else {
pr_err("%s: stop_rx: wrong state[%d]\n",
diff --git a/arch/arm/mach-msm/qdsp6v2/ultrasound/version_b/q6usm_b.c b/arch/arm/mach-msm/qdsp6v2/ultrasound/version_b/q6usm_b.c
index 51a51c5..af3c1f5 100644
--- a/arch/arm/mach-msm/qdsp6v2/ultrasound/version_b/q6usm_b.c
+++ b/arch/arm/mach-msm/qdsp6v2/ultrasound/version_b/q6usm_b.c
@@ -488,6 +488,24 @@
}
switch (data->opcode) {
+ case RESET_EVENTS: {
+ pr_err("%s: Reset event is received: %d %d\n",
+ __func__,
+ data->reset_event,
+ data->reset_proc);
+
+ opcode = RESET_EVENTS;
+
+ apr_reset(this_mmap.apr);
+ this_mmap.apr = NULL;
+
+ apr_reset(usc->apr);
+ usc->apr = NULL;
+
+ break;
+ }
+
+
case USM_DATA_EVENT_READ_DONE: {
struct us_port_data *port = &usc->port[OUT];
diff --git a/arch/arm/mach-msm/smd.c b/arch/arm/mach-msm/smd.c
index 1241e44..32f9b3b 100644
--- a/arch/arm/mach-msm/smd.c
+++ b/arch/arm/mach-msm/smd.c
@@ -1035,12 +1035,19 @@
{
unsigned head = ch->half_ch->get_head(ch->recv);
unsigned tail = ch->half_ch->get_tail(ch->recv);
- *ptr = (void *) (ch->recv_data + tail);
+ unsigned fifo_size = ch->fifo_size;
+ BUG_ON(fifo_size >= SZ_1M);
+ BUG_ON(head >= fifo_size);
+ BUG_ON(tail >= fifo_size);
+ BUG_ON(OVERFLOW_ADD_UNSIGNED(uintptr_t, (uintptr_t)ch->recv_data,
+ tail));
+
+ *ptr = (void *) (ch->recv_data + tail);
if (tail <= head)
return head - tail;
else
- return ch->fifo_size - tail;
+ return fifo_size - tail;
}
static int read_intr_blocked(struct smd_channel *ch)
@@ -1140,16 +1147,23 @@
{
unsigned head = ch->half_ch->get_head(ch->send);
unsigned tail = ch->half_ch->get_tail(ch->send);
- *ptr = (void *) (ch->send_data + head);
+ unsigned fifo_size = ch->fifo_size;
+ BUG_ON(fifo_size >= SZ_1M);
+ BUG_ON(head >= fifo_size);
+ BUG_ON(tail >= fifo_size);
+ BUG_ON(OVERFLOW_ADD_UNSIGNED(uintptr_t, (uintptr_t)ch->send_data,
+ head));
+
+ *ptr = (void *) (ch->send_data + head);
if (head < tail) {
return tail - head - SMD_FIFO_FULL_RESERVE;
} else {
if (tail < SMD_FIFO_FULL_RESERVE)
- return ch->fifo_size + tail - head
+ return fifo_size + tail - head
- SMD_FIFO_FULL_RESERVE;
else
- return ch->fifo_size - head;
+ return fifo_size - head;
}
}
diff --git a/arch/arm/mach-msm/smem.c b/arch/arm/mach-msm/smem.c
index f41240a..3c7cbeb 100644
--- a/arch/arm/mach-msm/smem.c
+++ b/arch/arm/mach-msm/smem.c
@@ -27,17 +27,6 @@
#include "smem_private.h"
-/**
- * OVERFLOW_ADD_UNSIGNED() - check for unsigned overflow
- *
- * @type: type to check for overflow
- * @a: left value to use
- * @b: right value to use
- * @returns: true if a + b will result in overflow; false otherwise
- */
-#define OVERFLOW_ADD_UNSIGNED(type, a, b) \
- (((type)~0 - (a)) < (b) ? true : false)
-
#define MODEM_SBL_VERSION_INDEX 7
#define SMEM_VERSION_INFO_SIZE (32 * 4)
#define SMEM_VERSION 0x000B
diff --git a/drivers/crypto/msm/qcrypto.c b/drivers/crypto/msm/qcrypto.c
index 56f55b7..d7f0bcb 100644
--- a/drivers/crypto/msm/qcrypto.c
+++ b/drivers/crypto/msm/qcrypto.c
@@ -59,58 +59,60 @@
#define QCRYPTO_HIGH_BANDWIDTH_TIMEOUT 1000
struct crypto_stat {
- u32 aead_sha1_aes_enc;
- u32 aead_sha1_aes_dec;
- u32 aead_sha1_des_enc;
- u32 aead_sha1_des_dec;
- u32 aead_sha1_3des_enc;
- u32 aead_sha1_3des_dec;
- u32 aead_ccm_aes_enc;
- u32 aead_ccm_aes_dec;
- u32 aead_rfc4309_ccm_aes_enc;
- u32 aead_rfc4309_ccm_aes_dec;
- u32 aead_op_success;
- u32 aead_op_fail;
- u32 aead_bad_msg;
- u32 ablk_cipher_aes_enc;
- u32 ablk_cipher_aes_dec;
- u32 ablk_cipher_des_enc;
- u32 ablk_cipher_des_dec;
- u32 ablk_cipher_3des_enc;
- u32 ablk_cipher_3des_dec;
- u32 ablk_cipher_op_success;
- u32 ablk_cipher_op_fail;
- u32 sha1_digest;
- u32 sha256_digest;
- u32 sha_op_success;
- u32 sha_op_fail;
- u32 sha1_hmac_digest;
- u32 sha256_hmac_digest;
- u32 sha_hmac_op_success;
- u32 sha_hmac_op_fail;
+ u64 aead_sha1_aes_enc;
+ u64 aead_sha1_aes_dec;
+ u64 aead_sha1_des_enc;
+ u64 aead_sha1_des_dec;
+ u64 aead_sha1_3des_enc;
+ u64 aead_sha1_3des_dec;
+ u64 aead_ccm_aes_enc;
+ u64 aead_ccm_aes_dec;
+ u64 aead_rfc4309_ccm_aes_enc;
+ u64 aead_rfc4309_ccm_aes_dec;
+ u64 aead_op_success;
+ u64 aead_op_fail;
+ u64 aead_bad_msg;
+ u64 ablk_cipher_aes_enc;
+ u64 ablk_cipher_aes_dec;
+ u64 ablk_cipher_des_enc;
+ u64 ablk_cipher_des_dec;
+ u64 ablk_cipher_3des_enc;
+ u64 ablk_cipher_3des_dec;
+ u64 ablk_cipher_op_success;
+ u64 ablk_cipher_op_fail;
+ u64 sha1_digest;
+ u64 sha256_digest;
+ u64 sha_op_success;
+ u64 sha_op_fail;
+ u64 sha1_hmac_digest;
+ u64 sha256_hmac_digest;
+ u64 sha_hmac_op_success;
+ u64 sha_hmac_op_fail;
};
static struct crypto_stat _qcrypto_stat;
static struct dentry *_debug_dent;
static char _debug_read_buf[DEBUG_MAX_RW_BUF];
+static bool _qcrypto_init_assign;
struct crypto_priv;
struct crypto_engine {
struct list_head elist;
void *qce; /* qce handle */
struct platform_device *pdev; /* platform device */
struct crypto_async_request *req; /* current active request */
+ struct qcrypto_resp_ctx *arsp; /* rsp associcated with req */
+ int res; /* execution result */
struct crypto_priv *pcp;
struct tasklet_struct done_tasklet;
uint32_t bus_scale_handle;
struct crypto_queue req_queue; /*
* request queue for those requests
- * that have this engine assgined
+ * that have this engine assigned
* waiting to be executed
*/
- u32 total_req;
- u32 err_req;
+ u64 total_req;
+ u64 err_req;
u32 unit;
u32 ce_device;
- int res; /* execution result */
unsigned int signature;
uint32_t high_bw_req_count;
bool high_bw_req;
@@ -140,6 +142,12 @@
int32_t total_units; /* total units of engines */
struct mutex engine_lock;
struct crypto_engine *next_engine; /* next assign engine */
+ struct crypto_queue req_queue; /*
+ * request queue for those requests
+ * that waiting for an available
+ * engine.
+ */
+
};
static struct crypto_priv qcrypto_dev;
static struct crypto_engine *_qcrypto_static_assign_engine(
@@ -261,6 +269,11 @@
#define QCRYPTO_CCM4309_NONCE_LEN 3
struct qcrypto_cipher_ctx {
+ struct list_head rsp_queue; /* response queue */
+ struct crypto_engine *pengine; /* fixed engine assigned to this tfm */
+ struct crypto_priv *cp;
+ unsigned int flags;
+
u8 auth_key[QCRYPTO_MAX_KEY_SIZE];
u8 iv[QCRYPTO_MAX_IV_LENGTH];
@@ -270,13 +283,18 @@
unsigned int authsize;
unsigned int auth_key_len;
- struct crypto_priv *cp;
- unsigned int flags;
- struct crypto_engine *pengine; /* fixed engine assigned */
u8 ccm4309_nonce[QCRYPTO_CCM4309_NONCE_LEN];
};
+struct qcrypto_resp_ctx {
+ struct list_head list;
+ struct crypto_async_request *async_req; /* async req */
+ int res; /* execution result */
+};
+
struct qcrypto_cipher_req_ctx {
+ struct qcrypto_resp_ctx rsp_entry;/* rsp entry. */
+ struct crypto_engine *pengine; /* engine assigned to this request */
u8 *iv;
u8 rfc4309_iv[QCRYPTO_MAX_IV_LENGTH];
unsigned int ivsize;
@@ -301,6 +319,8 @@
#define SHA_MAX_STATE_SIZE (SHA256_DIGEST_SIZE / sizeof(u32))
#define SHA_MAX_DIGEST_SIZE SHA256_DIGEST_SIZE
+#define MSM_QCRYPTO_REQ_QUEUE_LENGTH 50
+
static uint8_t _std_init_vector_sha1_uint8[] = {
0x67, 0x45, 0x23, 0x01, 0xEF, 0xCD, 0xAB, 0x89,
0x98, 0xBA, 0xDC, 0xFE, 0x10, 0x32, 0x54, 0x76,
@@ -316,18 +336,21 @@
};
struct qcrypto_sha_ctx {
+ struct list_head rsp_queue; /* response queue */
+ struct crypto_engine *pengine; /* fixed engine assigned to this tfm */
+ struct crypto_priv *cp;
+ unsigned int flags;
enum qce_hash_alg_enum alg;
uint32_t diglen;
uint32_t authkey_in_len;
uint8_t authkey[SHA_MAX_BLOCK_SIZE];
struct ahash_request *ahash_req;
struct completion ahash_req_complete;
- struct crypto_priv *cp;
- unsigned int flags;
- struct crypto_engine *pengine; /* fixed engine assigned */
};
struct qcrypto_sha_req_ctx {
+ struct qcrypto_resp_ctx rsp_entry;/* rsp entry. */
+ struct crypto_engine *pengine; /* engine assigned to this request */
struct scatterlist *src;
uint32_t nbytes;
@@ -411,11 +434,10 @@
int ret = 0;
if (high_bw_req && pengine->high_bw_req == false) {
- pm_stay_awake(&pengine->pdev->dev);
ret = qce_enable_clk(pengine->qce);
if (ret) {
pr_err("%s Unable enable clk\n", __func__);
- goto clk_err;
+ return;
}
ret = msm_bus_scale_client_update_request(
pengine->bus_scale_handle, 1);
@@ -423,7 +445,7 @@
pr_err("%s Unable to set to high bandwidth\n",
__func__);
qce_disable_clk(pengine->qce);
- goto clk_err;
+ return;
}
pengine->high_bw_req = true;
} else if (high_bw_req == false && pengine->high_bw_req == true) {
@@ -432,7 +454,7 @@
if (ret) {
pr_err("%s Unable to set to low bandwidth\n",
__func__);
- goto clk_err;
+ return;
}
ret = qce_disable_clk(pengine->qce);
if (ret) {
@@ -442,16 +464,10 @@
if (ret)
pr_err("%s Unable to set to high bandwidth\n",
__func__);
- goto clk_err;
+ return;
}
pengine->high_bw_req = false;
- pm_relax(&pengine->pdev->dev);
}
- return;
-clk_err:
- pm_relax(&pengine->pdev->dev);
- return;
-
}
static void qcrypto_bw_scale_down_timer_callback(unsigned long data)
@@ -473,20 +489,26 @@
add_timer(&(pengine->bw_scale_down_timer));
}
-static void qcrypto_ce_bw_scaling_req(struct crypto_engine *pengine,
+static void qcrypto_ce_bw_scaling_req(struct crypto_priv *cp,
bool high_bw_req)
{
- mutex_lock(&pengine->pcp->engine_lock);
- if (high_bw_req) {
- if (pengine->high_bw_req_count == 0)
- qcrypto_ce_set_bus(pengine, true);
- pengine->high_bw_req_count++;
- } else {
- pengine->high_bw_req_count--;
- if (pengine->high_bw_req_count == 0)
- qcrypto_bw_set_timeout(pengine);
+ struct crypto_engine *pengine;
+
+ if (cp->platform_support.bus_scale_table == NULL)
+ return;
+ mutex_lock(&cp->engine_lock);
+ list_for_each_entry(pengine, &cp->engine_list, elist) {
+ if (high_bw_req) {
+ if (pengine->high_bw_req_count == 0)
+ qcrypto_ce_set_bus(pengine, true);
+ pengine->high_bw_req_count++;
+ } else {
+ pengine->high_bw_req_count--;
+ if (pengine->high_bw_req_count == 0)
+ qcrypto_bw_set_timeout(pengine);
+ }
}
- mutex_unlock(&pengine->pcp->engine_lock);
+ mutex_unlock(&cp->engine_lock);
}
static void qcrypto_low_bw_req_work(struct work_struct *work)
@@ -597,11 +619,14 @@
/* random first IV */
get_random_bytes(ctx->iv, QCRYPTO_MAX_IV_LENGTH);
- ctx->pengine = _qcrypto_static_assign_engine(ctx->cp);
- if (ctx->pengine == NULL)
- return -ENODEV;
- if (ctx->cp->platform_support.bus_scale_table != NULL)
- qcrypto_ce_bw_scaling_req(ctx->pengine, true);
+ if (_qcrypto_init_assign) {
+ ctx->pengine = _qcrypto_static_assign_engine(ctx->cp);
+ if (ctx->pengine == NULL)
+ return -ENODEV;
+ } else
+ ctx->pengine = NULL;
+ qcrypto_ce_bw_scaling_req(ctx->cp, true);
+ INIT_LIST_HEAD(&ctx->rsp_queue);
return 0;
};
@@ -619,11 +644,14 @@
sha_ctx->cp = q_alg->cp;
sha_ctx->flags = 0;
sha_ctx->ahash_req = NULL;
- sha_ctx->pengine = _qcrypto_static_assign_engine(sha_ctx->cp);
- if (sha_ctx->pengine == NULL)
- return -ENODEV;
- if (sha_ctx->cp->platform_support.bus_scale_table != NULL)
- qcrypto_ce_bw_scaling_req(sha_ctx->pengine, true);
+ if (_qcrypto_init_assign) {
+ sha_ctx->pengine = _qcrypto_static_assign_engine(sha_ctx->cp);
+ if (sha_ctx->pengine == NULL)
+ return -ENODEV;
+ } else
+ sha_ctx->pengine = NULL;
+ qcrypto_ce_bw_scaling_req(sha_ctx->cp, true);
+ INIT_LIST_HEAD(&sha_ctx->rsp_queue);
return 0;
};
@@ -631,13 +659,13 @@
{
struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(tfm);
+ if (!list_empty(&sha_ctx->rsp_queue))
+ pr_err("_qcrypto_ahash_cra_exit: requests still outstanding");
if (sha_ctx->ahash_req != NULL) {
ahash_request_free(sha_ctx->ahash_req);
sha_ctx->ahash_req = NULL;
}
- if (sha_ctx->pengine &&
- sha_ctx->cp->platform_support.bus_scale_table != NULL)
- qcrypto_ce_bw_scaling_req(sha_ctx->pengine, false);
+ qcrypto_ce_bw_scaling_req(sha_ctx->cp, false);
};
@@ -686,16 +714,18 @@
{
struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- if (ctx->pengine && ctx->cp->platform_support.bus_scale_table != NULL)
- qcrypto_ce_bw_scaling_req(ctx->pengine, false);
+ if (!list_empty(&ctx->rsp_queue))
+ pr_err("_qcrypto__cra_ablkcipher_exit: requests still outstanding");
+ qcrypto_ce_bw_scaling_req(ctx->cp, false);
};
static void _qcrypto_cra_aead_exit(struct crypto_tfm *tfm)
{
struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- if (ctx->pengine && ctx->cp->platform_support.bus_scale_table != NULL)
- qcrypto_ce_bw_scaling_req(ctx->pengine, false);
+ if (!list_empty(&ctx->rsp_queue))
+ pr_err("_qcrypto__cra_aead_exit: requests still outstanding");
+ qcrypto_ce_bw_scaling_req(ctx->cp, false);
};
static int _disp_stats(int id)
@@ -708,117 +738,117 @@
pstat = &_qcrypto_stat;
len = scnprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
- "\nQualcomm crypto accelerator %d Statistics:\n",
+ "\nQualcomm crypto accelerator %d Statistics\n",
id + 1);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " ABLK AES CIPHER encryption : %d\n",
+ " ABLK AES CIPHER encryption : %llu\n",
pstat->ablk_cipher_aes_enc);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " ABLK AES CIPHER decryption : %d\n",
+ " ABLK AES CIPHER decryption : %llu\n",
pstat->ablk_cipher_aes_dec);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " ABLK DES CIPHER encryption : %d\n",
+ " ABLK DES CIPHER encryption : %llu\n",
pstat->ablk_cipher_des_enc);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " ABLK DES CIPHER decryption : %d\n",
+ " ABLK DES CIPHER decryption : %llu\n",
pstat->ablk_cipher_des_dec);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " ABLK 3DES CIPHER encryption : %d\n",
+ " ABLK 3DES CIPHER encryption : %llu\n",
pstat->ablk_cipher_3des_enc);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " ABLK 3DES CIPHER decryption : %d\n",
+ " ABLK 3DES CIPHER decryption : %llu\n",
pstat->ablk_cipher_3des_dec);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " ABLK CIPHER operation success: %d\n",
+ " ABLK CIPHER operation success : %llu\n",
pstat->ablk_cipher_op_success);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " ABLK CIPHER operation fail : %d\n",
+ " ABLK CIPHER operation fail : %llu\n",
pstat->ablk_cipher_op_fail);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " AEAD SHA1-AES encryption : %d\n",
+ " AEAD SHA1-AES encryption : %llu\n",
pstat->aead_sha1_aes_enc);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " AEAD SHA1-AES decryption : %d\n",
+ " AEAD SHA1-AES decryption : %llu\n",
pstat->aead_sha1_aes_dec);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " AEAD SHA1-DES encryption : %d\n",
+ " AEAD SHA1-DES encryption : %llu\n",
pstat->aead_sha1_des_enc);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " AEAD SHA1-DES decryption : %d\n",
+ " AEAD SHA1-DES decryption : %llu\n",
pstat->aead_sha1_des_dec);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " AEAD SHA1-3DES encryption : %d\n",
+ " AEAD SHA1-3DES encryption : %llu\n",
pstat->aead_sha1_3des_enc);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " AEAD SHA1-3DES decryption : %d\n",
+ " AEAD SHA1-3DES decryption : %llu\n",
pstat->aead_sha1_3des_dec);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " AEAD CCM-AES encryption : %d\n",
+ " AEAD CCM-AES encryption : %llu\n",
pstat->aead_ccm_aes_enc);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " AEAD CCM-AES decryption : %d\n",
+ " AEAD CCM-AES decryption : %llu\n",
pstat->aead_ccm_aes_dec);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " AEAD RFC4309-CCM-AES encryption : %d\n",
+ " AEAD RFC4309-CCM-AES encryption : %llu\n",
pstat->aead_rfc4309_ccm_aes_enc);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " AEAD RFC4309-CCM-AES decryption : %d\n",
+ " AEAD RFC4309-CCM-AES decryption : %llu\n",
pstat->aead_rfc4309_ccm_aes_dec);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " AEAD operation success : %d\n",
+ " AEAD operation success : %llu\n",
pstat->aead_op_success);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " AEAD operation fail : %d\n",
+ " AEAD operation fail : %llu\n",
pstat->aead_op_fail);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " AEAD bad message : %d\n",
+ " AEAD bad message : %llu\n",
pstat->aead_bad_msg);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " SHA1 digest : %d\n",
+ " SHA1 digest : %llu\n",
pstat->sha1_digest);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " SHA256 digest : %d\n",
+ " SHA256 digest : %llu\n",
pstat->sha256_digest);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " SHA operation fail : %d\n",
+ " SHA operation fail : %llu\n",
pstat->sha_op_fail);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " SHA operation success : %d\n",
+ " SHA operation success : %llu\n",
pstat->sha_op_success);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " SHA1 HMAC digest : %d\n",
+ " SHA1 HMAC digest : %llu\n",
pstat->sha1_hmac_digest);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " SHA256 HMAC digest : %d\n",
+ " SHA256 HMAC digest : %llu\n",
pstat->sha256_hmac_digest);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " SHA HMAC operation fail : %d\n",
+ " SHA HMAC operation fail : %llu\n",
pstat->sha_hmac_op_fail);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " SHA HMAC operation success : %d\n",
+ " SHA HMAC operation success : %llu\n",
pstat->sha_hmac_op_success);
spin_lock_irqsave(&cp->lock, flags);
list_for_each_entry(pe, &cp->engine_list, elist) {
len += snprintf(
_debug_read_buf + len,
DEBUG_MAX_RW_BUF - len - 1,
- " Engine %d Req : %d\n",
+ " Engine %4d Req : %llu\n",
pe->unit,
pe->total_req
);
len += snprintf(
_debug_read_buf + len,
DEBUG_MAX_RW_BUF - len - 1,
- " Engine %d Req Error : %d\n",
+ " Engine %4d Req Error : %llu\n",
pe->unit,
pe->err_req
);
@@ -847,7 +877,6 @@
tasklet_kill(&pengine->done_tasklet);
cancel_work_sync(&pengine->low_bw_req_ws);
del_timer_sync(&pengine->bw_scale_down_timer);
- device_init_wakeup(&pengine->pdev->dev, false);
if (pengine->bus_scale_handle != 0)
msm_bus_scale_unregister_client(pengine->bus_scale_handle);
@@ -1020,27 +1049,73 @@
return 0;
};
+static void _qcrypto_tfm_complete(struct crypto_priv *cp, u32 type,
+ void *tfm_ctx)
+{
+ unsigned long flags;
+ struct qcrypto_resp_ctx *arsp;
+ struct list_head *plist;
+ struct crypto_async_request *areq;
+
+ switch (type) {
+ case CRYPTO_ALG_TYPE_AHASH:
+ plist = &((struct qcrypto_sha_ctx *) tfm_ctx)->rsp_queue;
+ break;
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ case CRYPTO_ALG_TYPE_AEAD:
+ default:
+ plist = &((struct qcrypto_cipher_ctx *) tfm_ctx)->rsp_queue;
+ break;
+ }
+again:
+ spin_lock_irqsave(&cp->lock, flags);
+ if (list_empty(plist)) {
+ arsp = NULL; /* nothing to do */
+ } else {
+ arsp = list_first_entry(plist,
+ struct qcrypto_resp_ctx, list);
+ if (arsp->res == -EINPROGRESS)
+ arsp = NULL; /* still in progress */
+ else
+ list_del(&arsp->list); /* request is complete */
+ }
+ spin_unlock_irqrestore(&cp->lock, flags);
+ if (arsp) {
+ areq = arsp->async_req;
+ areq->complete(areq, arsp->res);
+ goto again;
+ }
+}
+
static void req_done(unsigned long data)
{
struct crypto_async_request *areq;
struct crypto_engine *pengine = (struct crypto_engine *)data;
struct crypto_priv *cp;
unsigned long flags;
+ struct qcrypto_resp_ctx *arsp;
int res;
+ u32 type = 0;
+ void *tfm_ctx = NULL;
cp = pengine->pcp;
spin_lock_irqsave(&cp->lock, flags);
areq = pengine->req;
- pengine->req = NULL;
- res = pengine->res;
- spin_unlock_irqrestore(&cp->lock, flags);
- if (areq)
- areq->complete(areq, res);
- if (res)
- pengine->err_req++;
- _start_qcrypto_process(cp, pengine);
-};
+ arsp = pengine->arsp;
+ res = pengine->res;
+ pengine->req = NULL;
+ pengine->arsp = NULL;
+ if (areq) {
+ type = crypto_tfm_alg_type(areq->tfm);
+ tfm_ctx = crypto_tfm_ctx(areq->tfm);
+ arsp->res = res;
+ }
+ spin_unlock_irqrestore(&cp->lock, flags);
+ _start_qcrypto_process(cp, pengine);
+ if (areq)
+ _qcrypto_tfm_complete(cp, type, tfm_ctx);
+}
static void _qce_ahash_complete(void *cookie, unsigned char *digest,
unsigned char *authdata, int ret)
@@ -1057,7 +1132,7 @@
pstat = &_qcrypto_stat;
- pengine = sha_ctx->pengine;
+ pengine = rctx->pengine;
#ifdef QCRYPTO_DEBUG
dev_info(&pengine->pdev->dev, "_qce_ahash_complete: %p ret %d\n",
areq, ret);
@@ -1103,10 +1178,12 @@
struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
struct crypto_priv *cp = ctx->cp;
struct crypto_stat *pstat;
+ struct qcrypto_cipher_req_ctx *rctx;
struct crypto_engine *pengine;
pstat = &_qcrypto_stat;
- pengine = ctx->pengine;
+ rctx = ablkcipher_request_ctx(areq);
+ pengine = rctx->pengine;
#ifdef QCRYPTO_DEBUG
dev_info(&pengine->pdev->dev, "_qce_ablk_cipher_complete: %p ret %d\n",
areq, ret);
@@ -1158,8 +1235,8 @@
struct crypto_engine *pengine;
pstat = &_qcrypto_stat;
- pengine = ctx->pengine;
rctx = aead_request_ctx(areq);
+ pengine = rctx->pengine;
if (rctx->mode == QCE_MODE_CCM) {
if (cp->ce_support.aligned_only) {
@@ -1363,6 +1440,7 @@
req = container_of(async_req, struct ablkcipher_request, base);
cipher_ctx = crypto_tfm_ctx(async_req->tfm);
rctx = ablkcipher_request_ctx(req);
+ rctx->pengine = pengine;
tfm = crypto_ablkcipher_reqtfm(req);
if (pengine->pcp->ce_support.aligned_only) {
uint32_t bytes = 0;
@@ -1426,6 +1504,7 @@
struct ahash_request, base);
rctx = ahash_request_ctx(req);
sha_ctx = crypto_tfm_ctx(async_req->tfm);
+ rctx->pengine = pengine;
sreq.qce_cb = _qce_ahash_complete;
sreq.digest = &rctx->digest[0];
@@ -1481,6 +1560,7 @@
struct crypto_aead *aead = crypto_aead_reqtfm(req);
rctx = aead_request_ctx(req);
+ rctx->pengine = pengine;
cipher_ctx = crypto_tfm_ctx(async_req->tfm);
qreq.op = QCE_REQ_AEAD;
@@ -1695,28 +1775,93 @@
struct crypto_engine *pengine)
{
struct crypto_async_request *async_req = NULL;
- struct crypto_async_request *backlog = NULL;
+ struct crypto_async_request *backlog_eng = NULL;
+ struct crypto_async_request *backlog_cp = NULL;
unsigned long flags;
u32 type;
int ret = 0;
struct crypto_stat *pstat;
+ void *tfm_ctx;
+ struct qcrypto_cipher_req_ctx *cipher_rctx;
+ struct qcrypto_sha_req_ctx *ahash_rctx;
+ struct ablkcipher_request *ablkcipher_req;
+ struct ahash_request *ahash_req;
+ struct aead_request *aead_req;
+ struct qcrypto_resp_ctx *arsp;
pstat = &_qcrypto_stat;
again:
spin_lock_irqsave(&cp->lock, flags);
- if (pengine->req == NULL) {
- backlog = crypto_get_backlog(&pengine->req_queue);
- async_req = crypto_dequeue_request(&pengine->req_queue);
- pengine->req = async_req;
+ if (pengine->req) {
+ spin_unlock_irqrestore(&cp->lock, flags);
+ return 0;
}
- spin_unlock_irqrestore(&cp->lock, flags);
- if (!async_req)
- return ret;
- if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
- type = crypto_tfm_alg_type(async_req->tfm);
+ backlog_eng = crypto_get_backlog(&pengine->req_queue);
+
+ /* try to get request from request queue of the engine first */
+ async_req = crypto_dequeue_request(&pengine->req_queue);
+ if (!async_req) {
+ /*
+ * if no request from the engine,
+ * try to get from request queue of driver
+ */
+ backlog_cp = crypto_get_backlog(&cp->req_queue);
+ async_req = crypto_dequeue_request(&cp->req_queue);
+ if (!async_req) {
+ spin_unlock_irqrestore(&cp->lock, flags);
+ return 0;
+ }
+ }
+
+ /* add associated rsp entry to tfm response queue */
+ type = crypto_tfm_alg_type(async_req->tfm);
+ tfm_ctx = crypto_tfm_ctx(async_req->tfm);
+ switch (type) {
+ case CRYPTO_ALG_TYPE_AHASH:
+ ahash_req = container_of(async_req,
+ struct ahash_request, base);
+ ahash_rctx = ahash_request_ctx(ahash_req);
+ arsp = &ahash_rctx->rsp_entry;
+ list_add_tail(
+ &arsp->list,
+ &((struct qcrypto_sha_ctx *)tfm_ctx)
+ ->rsp_queue);
+ break;
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ ablkcipher_req = container_of(async_req,
+ struct ablkcipher_request, base);
+ cipher_rctx = ablkcipher_request_ctx(ablkcipher_req);
+ arsp = &cipher_rctx->rsp_entry;
+ list_add_tail(
+ &arsp->list,
+ &((struct qcrypto_sha_ctx *)tfm_ctx)
+ ->rsp_queue);
+ break;
+ case CRYPTO_ALG_TYPE_AEAD:
+ default:
+ aead_req = container_of(async_req,
+ struct aead_request, base);
+ cipher_rctx = aead_request_ctx(aead_req);
+ arsp = &cipher_rctx->rsp_entry;
+ list_add_tail(
+ &arsp->list,
+ &((struct qcrypto_sha_ctx *)tfm_ctx)
+ ->rsp_queue);
+ break;
+ }
+
+ arsp->res = -EINPROGRESS;
+ arsp->async_req = async_req;
+ pengine->req = async_req;
+ pengine->arsp = arsp;
+
+ spin_unlock_irqrestore(&cp->lock, flags);
+ if (backlog_eng)
+ backlog_eng->complete(backlog_eng, -EINPROGRESS);
+ if (backlog_cp)
+ backlog_cp->complete(backlog_cp, -EINPROGRESS);
switch (type) {
case CRYPTO_ALG_TYPE_ABLKCIPHER:
ret = _qcrypto_process_ablkcipher(pengine, async_req);
@@ -1732,9 +1877,11 @@
};
pengine->total_req++;
if (ret) {
+ arsp->res = ret;
pengine->err_req++;
spin_lock_irqsave(&cp->lock, flags);
pengine->req = NULL;
+ pengine->arsp = NULL;
spin_unlock_irqrestore(&cp->lock, flags);
if (type == CRYPTO_ALG_TYPE_ABLKCIPHER)
@@ -1745,11 +1892,22 @@
else
pstat->aead_op_fail++;
- async_req->complete(async_req, ret);
+ _qcrypto_tfm_complete(cp, type, tfm_ctx);
goto again;
};
return ret;
-};
+}
+
+static struct crypto_engine *_avail_eng(struct crypto_priv *cp)
+{
+ struct crypto_engine *pe = NULL;
+
+ list_for_each_entry(pe, &cp->engine_list, elist) {
+ if (pe->req == NULL)
+ return pe;
+ }
+ return NULL;
+}
static int _qcrypto_queue_req(struct crypto_priv *cp,
struct crypto_engine *pengine,
@@ -1765,9 +1923,15 @@
}
spin_lock_irqsave(&cp->lock, flags);
- ret = crypto_enqueue_request(&pengine->req_queue, req);
+ if (pengine) {
+ ret = crypto_enqueue_request(&pengine->req_queue, req);
+ } else {
+ ret = crypto_enqueue_request(&cp->req_queue, req);
+ pengine = _avail_eng(cp);
+ }
spin_unlock_irqrestore(&cp->lock, flags);
- _start_qcrypto_process(cp, pengine);
+ if (pengine)
+ _start_qcrypto_process(cp, pengine);
return ret;
}
@@ -3997,6 +4161,7 @@
pengine->pcp = cp;
pengine->pdev = pdev;
pengine->req = NULL;
+ pengine->signature = 0xdeadbeef;
pengine->high_bw_req_count = 0;
pengine->high_bw_req = false;
@@ -4004,11 +4169,8 @@
INIT_WORK(&pengine->low_bw_req_ws, qcrypto_low_bw_req_work);
pengine->bw_scale_down_timer.function =
qcrypto_bw_scale_down_timer_callback;
-
- device_init_wakeup(&pengine->pdev->dev, true);
-
tasklet_init(&pengine->done_tasklet, req_done, (unsigned long)pengine);
- crypto_init_queue(&pengine->req_queue, 50);
+ crypto_init_queue(&pengine->req_queue, MSM_QCRYPTO_REQ_QUEUE_LENGTH);
mutex_lock(&cp->engine_lock);
cp->total_units++;
@@ -4083,6 +4245,7 @@
dev_err(&pdev->dev,
"The algorithm name %s is too long.\n",
q_alg->cipher_alg.cra_name);
+ kfree(q_alg);
goto err;
}
}
@@ -4116,6 +4279,7 @@
dev_err(&pdev->dev,
"The algorithm name %s is too long.\n",
q_alg->cipher_alg.cra_name);
+ kfree(q_alg);
goto err;
}
}
@@ -4152,6 +4316,7 @@
dev_err(&pdev->dev,
"The algorithm name %s is too long.\n",
q_alg->sha_alg.halg.base.cra_name);
+ kfree(q_alg);
goto err;
}
}
@@ -4188,6 +4353,7 @@
dev_err(&pdev->dev,
"The algorithm name %s is too long.\n",
q_alg->cipher_alg.cra_name);
+ kfree(q_alg);
goto err;
}
}
@@ -4226,6 +4392,7 @@
dev_err(&pdev->dev,
"The algorithm name %s is too long.\n",
q_alg->sha_alg.halg.base.cra_name);
+ kfree(q_alg);
goto err;
}
}
@@ -4262,6 +4429,7 @@
dev_err(&pdev->dev,
"The algorithm name %s is too long.\n",
q_alg->cipher_alg.cra_name);
+ kfree(q_alg);
goto err;
}
}
@@ -4523,6 +4691,7 @@
pcp->ce_lock_count = 0;
pcp->platform_support.bus_scale_table = NULL;
pcp->next_engine = NULL;
+ crypto_init_queue(&pcp->req_queue, MSM_QCRYPTO_REQ_QUEUE_LENGTH);
return platform_driver_register(&_qualcomm_crypto);
}
diff --git a/drivers/leds/leds-qpnp.c b/drivers/leds/leds-qpnp.c
index eba5ca8..0659b77 100644
--- a/drivers/leds/leds-qpnp.c
+++ b/drivers/leds/leds-qpnp.c
@@ -484,6 +484,7 @@
};
static int num_kpbl_leds_on;
+static DEFINE_MUTEX(flash_lock);
static int
qpnp_led_masked_write(struct qpnp_led_data *led, u16 addr, u8 mask, u8 val)
@@ -1408,7 +1409,10 @@
{
int rc;
- mutex_lock(&led->lock);
+ if (led->id == QPNP_ID_FLASH1_LED0 || led->id == QPNP_ID_FLASH1_LED1)
+ mutex_lock(&flash_lock);
+ else
+ mutex_lock(&led->lock);
switch (led->id) {
case QPNP_ID_WLED:
@@ -1448,7 +1452,10 @@
dev_err(&led->spmi_dev->dev, "Invalid LED(%d)\n", led->id);
break;
}
- mutex_unlock(&led->lock);
+ if (led->id == QPNP_ID_FLASH1_LED0 || led->id == QPNP_ID_FLASH1_LED1)
+ mutex_unlock(&flash_lock);
+ else
+ mutex_unlock(&led->lock);
}
@@ -3314,7 +3321,9 @@
goto fail_id_check;
}
- mutex_init(&led->lock);
+ if (led->id != QPNP_ID_FLASH1_LED0 &&
+ led->id != QPNP_ID_FLASH1_LED1)
+ mutex_init(&led->lock);
INIT_WORK(&led->work, qpnp_led_work);
rc = qpnp_led_initialize(led);
@@ -3409,7 +3418,9 @@
fail_id_check:
for (i = 0; i < parsed_leds; i++) {
- mutex_destroy(&led_array[i].lock);
+ if (led_array[i].id != QPNP_ID_FLASH1_LED0 &&
+ led_array[i].id != QPNP_ID_FLASH1_LED1)
+ mutex_destroy(&led_array[i].lock);
led_classdev_unregister(&led_array[i].cdev);
}
@@ -3423,7 +3434,10 @@
for (i = 0; i < parsed_leds; i++) {
cancel_work_sync(&led_array[i].work);
- mutex_destroy(&led_array[i].lock);
+ if (led_array[i].id != QPNP_ID_FLASH1_LED0 &&
+ led_array[i].id != QPNP_ID_FLASH1_LED1)
+ mutex_destroy(&led_array[i].lock);
+
led_classdev_unregister(&led_array[i].cdev);
switch (led_array[i].id) {
case QPNP_ID_WLED:
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c b/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c
index 8662657..3596a12 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c
@@ -95,7 +95,7 @@
static void msm_csid_reset(struct csid_device *csid_dev)
{
msm_camera_io_w(CSID_RST_STB_ALL, csid_dev->base + CSID_RST_CMD_ADDR);
- wait_for_completion_interruptible(&csid_dev->reset_complete);
+ wait_for_completion(&csid_dev->reset_complete);
return;
}
diff --git a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_qup_i2c.c b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_qup_i2c.c
index d5b89b7..6f9aeec 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_qup_i2c.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_qup_i2c.c
@@ -69,7 +69,7 @@
rc = i2c_transfer(dev_client->client->adapter, msg, 1);
if (rc < 0)
S_I2C_DBG("msm_camera_qup_i2c_txdata faild 0x%x\n", saddr);
- return 0;
+ return rc;
}
int32_t msm_camera_qup_i2c_read(struct msm_camera_i2c_client *client,
diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c
index b3aad09..a8f4ca7 100644
--- a/drivers/media/platform/msm/vidc/msm_vdec.c
+++ b/drivers/media/platform/msm/vidc/msm_vdec.c
@@ -1171,70 +1171,6 @@
return rc;
}
-static int msm_vdec_queue_output_buffers(struct msm_vidc_inst *inst)
-{
- struct internal_buf *binfo;
- struct hfi_device *hdev;
- struct msm_smem *handle;
- struct vidc_frame_data frame_data = {0};
- struct hal_buffer_requirements *output_buf, *extradata_buf;
- int rc = 0;
-
- if (!inst || !inst->core || !inst->core->device) {
- dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
- return -EINVAL;
- }
-
- hdev = inst->core->device;
-
- output_buf = get_buff_req_buffer(inst, HAL_BUFFER_OUTPUT);
- if (!output_buf) {
- dprintk(VIDC_DBG,
- "This output buffer not required, buffer_type: %x\n",
- HAL_BUFFER_OUTPUT);
- return 0;
- }
- dprintk(VIDC_DBG,
- "output: num = %d, size = %d\n",
- output_buf->buffer_count_actual,
- output_buf->buffer_size);
-
- extradata_buf = get_buff_req_buffer(inst, HAL_BUFFER_EXTRADATA_OUTPUT);
- if (!extradata_buf) {
- dprintk(VIDC_DBG,
- "This extradata buffer not required, buffer_type: %x\n",
- HAL_BUFFER_EXTRADATA_OUTPUT);
- return 0;
- }
-
- hdev = inst->core->device;
-
- mutex_lock(&inst->lock);
- if (!list_empty(&inst->outputbufs)) {
- list_for_each_entry(binfo, &inst->outputbufs, list) {
- if (!binfo) {
- dprintk(VIDC_ERR, "Invalid parameter\n");
- mutex_unlock(&inst->lock);
- return -EINVAL;
- }
- handle = binfo->handle;
- frame_data.alloc_len = output_buf->buffer_size;
- frame_data.filled_len = 0;
- frame_data.offset = 0;
- frame_data.device_addr = handle->device_addr;
- frame_data.flags = 0;
- frame_data.extradata_addr = handle->device_addr +
- output_buf->buffer_size;
- frame_data.buffer_type = HAL_BUFFER_OUTPUT;
- rc = call_hfi_op(hdev, session_ftb,
- (void *) inst->session, &frame_data);
- binfo->buffer_ownership = FIRMWARE;
- }
- }
- mutex_unlock(&inst->lock);
- return 0;
-}
-
static inline int start_streaming(struct msm_vidc_inst *inst)
{
int rc = 0;
@@ -1285,7 +1221,7 @@
}
if (msm_comm_get_stream_output_mode(inst) ==
HAL_VIDEO_DECODER_SECONDARY) {
- rc = msm_vdec_queue_output_buffers(inst);
+ rc = msm_comm_queue_output_buffers(inst);
if (rc) {
dprintk(VIDC_ERR,
"Failed to queue output buffers: %d\n", rc);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index 96a13ab..24bed94 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -14,6 +14,7 @@
#include <linux/jiffies.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/workqueue.h>
#include <asm/div64.h>
#include <mach/subsystem_restart.h>
@@ -711,8 +712,8 @@
return;
}
if (binfo->buffer_ownership != DRIVER) {
- dprintk(VIDC_ERR,
- "Failed : This buffer is with FW 0x%lx\n",
+ dprintk(VIDC_DBG,
+ "This buffer is with FW 0x%lx\n",
binfo->handle->device_addr);
return;
}
@@ -726,16 +727,71 @@
return;
}
+
+int msm_comm_queue_output_buffers(struct msm_vidc_inst *inst)
+{
+ struct internal_buf *binfo;
+ struct hfi_device *hdev;
+ struct msm_smem *handle;
+ struct vidc_frame_data frame_data = {0};
+ struct hal_buffer_requirements *output_buf;
+ int rc = 0;
+
+ if (!inst || !inst->core || !inst->core->device) {
+ dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
+ return -EINVAL;
+ }
+ hdev = inst->core->device;
+ output_buf = get_buff_req_buffer(inst, HAL_BUFFER_OUTPUT);
+ if (!output_buf) {
+ dprintk(VIDC_DBG,
+ "This output buffer not required, buffer_type: %x\n",
+ HAL_BUFFER_OUTPUT);
+ return 0;
+ }
+ dprintk(VIDC_DBG,
+ "output: num = %d, size = %d\n",
+ output_buf->buffer_count_actual,
+ output_buf->buffer_size);
+
+ list_for_each_entry(binfo, &inst->outputbufs, list) {
+ if (binfo->buffer_ownership != DRIVER)
+ continue;
+ handle = binfo->handle;
+ frame_data.alloc_len = output_buf->buffer_size;
+ frame_data.filled_len = 0;
+ frame_data.offset = 0;
+ frame_data.device_addr = handle->device_addr;
+ frame_data.flags = 0;
+ frame_data.extradata_addr = handle->device_addr +
+ output_buf->buffer_size;
+ frame_data.buffer_type = HAL_BUFFER_OUTPUT;
+ rc = call_hfi_op(hdev, session_ftb,
+ (void *) inst->session, &frame_data);
+ binfo->buffer_ownership = FIRMWARE;
+ }
+ return 0;
+}
+
static void handle_session_flush(enum command_response cmd, void *data)
{
struct msm_vidc_cb_cmd_done *response = data;
struct msm_vidc_inst *inst;
+ int rc;
if (response) {
inst = (struct msm_vidc_inst *)response->session_id;
if (msm_comm_get_stream_output_mode(inst) ==
HAL_VIDEO_DECODER_SECONDARY) {
mutex_lock(&inst->lock);
validate_output_buffers(inst);
+ if (!inst->in_reconfig) {
+ rc = msm_comm_queue_output_buffers(inst);
+ if (rc) {
+ dprintk(VIDC_ERR,
+ "Failed to queue output buffers: %d\n",
+ rc);
+ }
+ }
mutex_unlock(&inst->lock);
}
msm_vidc_queue_v4l2_event(inst, V4L2_EVENT_MSM_VIDC_FLUSH_DONE);
@@ -764,95 +820,131 @@
"Failed to get valid response for session error\n");
}
}
+
+struct sys_err_handler_data {
+ struct msm_vidc_core *core;
+ struct delayed_work work;
+};
+
+
+void hw_sys_error_handler(struct work_struct *work)
+{
+ struct msm_vidc_core *core = NULL;
+ struct hfi_device *hdev = NULL;
+ struct sys_err_handler_data *handler = NULL;
+ int rc = 0;
+
+ handler = container_of(work, struct sys_err_handler_data, work.work);
+ if (!handler || !handler->core || !handler->core->device) {
+ dprintk(VIDC_ERR, "%s - invalid work or core handle\n",
+ __func__);
+ goto exit;
+ }
+
+ core = handler->core;
+ hdev = core->device;
+
+ mutex_lock(&core->sync_lock);
+ /*
+ * Restart the firmware to bring out of bad state.
+ */
+ if ((core->state == VIDC_CORE_INVALID) &&
+ hdev->resurrect_fw) {
+ mutex_lock(&core->lock);
+ rc = call_hfi_op(hdev, resurrect_fw,
+ hdev->hfi_device_data);
+ if (rc) {
+ dprintk(VIDC_ERR,
+ "%s - resurrect_fw failed: %d\n",
+ __func__, rc);
+ }
+ core->state = VIDC_CORE_LOADED;
+ mutex_unlock(&core->lock);
+ } else {
+ dprintk(VIDC_DBG,
+ "fw unloaded after sys error, no need to resurrect\n");
+ }
+ mutex_unlock(&core->sync_lock);
+
+exit:
+ /* free sys error handler, allocated in handle_sys_err */
+ kfree(handler);
+}
+
static void handle_sys_error(enum command_response cmd, void *data)
{
struct msm_vidc_cb_cmd_done *response = data;
- struct msm_vidc_inst *inst = NULL ;
struct msm_vidc_core *core = NULL;
+ struct sys_err_handler_data *handler = NULL;
struct hfi_device *hdev = NULL;
+ struct msm_vidc_inst *inst = NULL;
int rc = 0;
subsystem_crashed("venus");
- if (response) {
- core = get_vidc_core(response->device_id);
- dprintk(VIDC_WARN, "SYS_ERROR received for core %p\n", core);
- if (core) {
- mutex_lock(&core->lock);
- core->state = VIDC_CORE_INVALID;
- mutex_unlock(&core->lock);
- mutex_lock(&core->sync_lock);
- list_for_each_entry(inst, &core->instances,
- list) {
- mutex_lock(&inst->lock);
- inst->state = MSM_VIDC_CORE_INVALID;
- if (inst->core)
- hdev = inst->core->device;
- if (hdev && inst->session) {
- dprintk(VIDC_DBG,
- "cleaning up inst: 0x%p", inst);
- rc = call_hfi_op(hdev, session_clean,
- (void *) inst->session);
- if (rc)
- dprintk(VIDC_ERR,
- "Sess clean failed :%p",
- inst);
- }
- inst->session = NULL;
- mutex_unlock(&inst->lock);
- msm_vidc_queue_v4l2_event(inst,
- V4L2_EVENT_MSM_VIDC_SYS_ERROR);
- }
- mutex_unlock(&core->sync_lock);
- } else {
- dprintk(VIDC_ERR,
- "Got SYS_ERR but unable to identify core");
- }
- } else {
+ if (!response) {
dprintk(VIDC_ERR,
"Failed to get valid response for sys error\n");
- }
-}
-
-static void handle_sys_watchdog_timeout(enum command_response cmd, void *data)
-{
- struct msm_vidc_cb_cmd_done *response = data;
- struct msm_vidc_inst *inst;
- struct msm_vidc_core *core = NULL;
- struct hfi_device *hdev = NULL;
- int rc = 0;
- dprintk(VIDC_ERR, "Venus Subsystem crashed\n");
- core = get_vidc_core(response->device_id);
- if (!core) {
- dprintk(VIDC_ERR, "Wrong device_id received\n");
return;
}
- subsystem_crashed("venus");
+
+ core = get_vidc_core(response->device_id);
+ if (!core) {
+ dprintk(VIDC_ERR,
+ "Got SYS_ERR but unable to identify core\n");
+ return;
+ }
+
+ dprintk(VIDC_WARN, "SYS_ERROR %d received for core %p\n", cmd, core);
mutex_lock(&core->lock);
core->state = VIDC_CORE_INVALID;
mutex_unlock(&core->lock);
- mutex_lock(&core->sync_lock);
- list_for_each_entry(inst, &core->instances, list) {
- if (inst) {
- msm_vidc_queue_v4l2_event(inst,
- V4L2_EVENT_MSM_VIDC_SYS_ERROR);
- mutex_lock(&inst->lock);
- inst->state = MSM_VIDC_CORE_INVALID;
- if (inst->core)
- hdev = inst->core->device;
- if (hdev && inst->session) {
- rc = call_hfi_op(hdev, session_clean,
- (void *) inst->session);
- if (rc)
- dprintk(VIDC_ERR,
- "Sess clean failed :%p",
- inst);
- }
- inst->session = NULL;
- mutex_unlock(&inst->lock);
+ /*
+ * 1. Delete each instance session from hfi list
+ * 2. Notify all clients about hardware error.
+ */
+ mutex_lock(&core->sync_lock);
+ list_for_each_entry(inst, &core->instances,
+ list) {
+ mutex_lock(&inst->lock);
+ inst->state = MSM_VIDC_CORE_INVALID;
+ if (inst->core)
+ hdev = inst->core->device;
+ if (hdev && inst->session) {
+ dprintk(VIDC_DBG,
+ "cleaning up inst: 0x%p\n", inst);
+ rc = call_hfi_op(hdev, session_clean,
+ (void *) inst->session);
+ if (rc)
+ dprintk(VIDC_ERR,
+ "Sess clean failed :%p\n",
+ inst);
}
+ inst->session = NULL;
+ mutex_unlock(&inst->lock);
+ msm_vidc_queue_v4l2_event(inst,
+ V4L2_EVENT_MSM_VIDC_SYS_ERROR);
}
mutex_unlock(&core->sync_lock);
+
+
+ handler = kzalloc(sizeof(*handler), GFP_KERNEL);
+ if (!handler) {
+ dprintk(VIDC_ERR,
+ "%s - failed to allocate sys error handler\n",
+ __func__);
+ return;
+ }
+ handler->core = core;
+ INIT_DELAYED_WORK(&handler->work, hw_sys_error_handler);
+
+ /*
+ * Sleep for 5 sec to ensure venus has completed any
+ * pending cache operations. Without this sleep, we see
+ * device reset when firmware is unloaded after a sys
+ * error.
+ */
+ schedule_delayed_work(&handler->work, msecs_to_jiffies(5000));
}
static void handle_session_close(enum command_response cmd, void *data)
@@ -1359,7 +1451,7 @@
handle_seq_hdr_done(cmd, data);
break;
case SYS_WATCHDOG_TIMEOUT:
- handle_sys_watchdog_timeout(cmd, data);
+ handle_sys_error(cmd, data);
break;
case SYS_ERROR:
handle_sys_error(cmd, data);
@@ -1539,11 +1631,14 @@
goto fail_scale_bus;
}
- rc = call_hfi_op(hdev, load_fw, hdev->hfi_device_data);
- if (rc) {
- dprintk(VIDC_ERR, "Failed to load video firmware\n");
- goto fail_load_fw;
+ if (core->state < VIDC_CORE_LOADED) {
+ rc = call_hfi_op(hdev, load_fw, hdev->hfi_device_data);
+ if (rc) {
+ dprintk(VIDC_ERR, "Failed to load video firmware\n");
+ goto fail_load_fw;
+ }
}
+
rc = msm_comm_scale_clocks(core);
if (rc) {
dprintk(VIDC_ERR, "Failed to scale clocks: %d\n", rc);
@@ -1592,19 +1687,25 @@
core->id, core->state);
goto core_already_uninited;
}
+
msm_comm_scale_clocks_and_bus(inst);
if (list_empty(&core->instances)) {
- if (core->resources.has_ocmem) {
- if (inst->state != MSM_VIDC_CORE_INVALID)
- msm_comm_unset_ocmem(core);
- call_hfi_op(hdev, free_ocmem, hdev->hfi_device_data);
- }
- dprintk(VIDC_DBG, "Calling vidc_hal_core_release\n");
- rc = call_hfi_op(hdev, core_release, hdev->hfi_device_data);
- if (rc) {
- dprintk(VIDC_ERR, "Failed to release core, id = %d\n",
- core->id);
- goto exit;
+ if (core->state > VIDC_CORE_INIT) {
+ if (core->resources.has_ocmem) {
+ if (inst->state != MSM_VIDC_CORE_INVALID)
+ msm_comm_unset_ocmem(core);
+ call_hfi_op(hdev, free_ocmem,
+ hdev->hfi_device_data);
+ }
+ dprintk(VIDC_DBG, "Calling vidc_hal_core_release\n");
+ rc = call_hfi_op(hdev, core_release,
+ hdev->hfi_device_data);
+ if (rc) {
+ dprintk(VIDC_ERR,
+ "Failed to release core, id = %d\n",
+ core->id);
+ goto exit;
+ }
}
mutex_lock(&core->lock);
core->state = VIDC_CORE_UNINIT;
@@ -1615,6 +1716,7 @@
else
msm_comm_unvote_buses(core, DDR_MEM);
}
+
core_already_uninited:
change_inst_state(inst, MSM_VIDC_CORE_UNINIT);
exit:
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.h b/drivers/media/platform/msm/vidc/msm_vidc_common.h
index 07dae8c..da71424 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.h
@@ -32,6 +32,7 @@
int msm_comm_set_scratch_buffers(struct msm_vidc_inst *inst);
int msm_comm_set_persist_buffers(struct msm_vidc_inst *inst);
int msm_comm_set_output_buffers(struct msm_vidc_inst *inst);
+int msm_comm_queue_output_buffers(struct msm_vidc_inst *inst);
int msm_comm_qbuf(struct vb2_buffer *vb);
void msm_comm_scale_clocks_and_bus(struct msm_vidc_inst *inst);
int msm_comm_flush(struct msm_vidc_inst *inst, u32 flags);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_internal.h b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
index 867b894..e9bf91d 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_internal.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
@@ -68,6 +68,7 @@
enum vidc_core_state {
VIDC_CORE_UNINIT = 0,
+ VIDC_CORE_LOADED,
VIDC_CORE_INIT,
VIDC_CORE_INIT_DONE,
VIDC_CORE_INVALID
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index 2266fd4..0092bcb 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -80,6 +80,17 @@
u32 spare; /*reserved for future, should be zero*/
};
+#define VENUS_SET_STATE(__device, __state) {\
+ mutex_lock(&(__device)->write_lock); \
+ mutex_lock(&(__device)->read_lock); \
+ (__device)->state = __state; \
+ mutex_unlock(&(__device)->write_lock); \
+ mutex_unlock(&(__device)->read_lock); }
+
+#define IS_VENUS_IN_VALID_STATE(__device) (\
+ (__device)->state != VENUS_STATE_DEINIT)
+
+static int venus_hfi_power_enable(void *dev);
static inline int venus_hfi_clk_gating_off(struct venus_hfi_device *device);
@@ -1387,11 +1398,24 @@
}
WARN(!mutex_is_locked(&device->write_lock),
"Cmd queue write lock must be acquired");
+ if (!IS_VENUS_IN_VALID_STATE(device)) {
+ dprintk(VIDC_ERR, "%s - fw not in init state\n", __func__);
+ result = -EINVAL;
+ goto err_q_null;
+ }
+
q_info = &device->iface_queues[VIDC_IFACEQ_CMDQ_IDX];
if (!q_info) {
dprintk(VIDC_ERR, "cannot write to shared Q's");
goto err_q_null;
}
+
+ if (!q_info->q_array.align_virtual_addr) {
+ dprintk(VIDC_ERR, "cannot write to shared CMD Q's\n");
+ result = -ENODATA;
+ goto err_q_null;
+ }
+
if (!venus_hfi_write_queue(q_info, (u8 *)pkt, &rx_req_is_set)) {
WARN(!mutex_is_locked(&device->clk_pwr_lock),
"Clock/power lock must be acquired");
@@ -1431,12 +1455,19 @@
return -EINVAL;
}
mutex_lock(&device->read_lock);
+ if (!IS_VENUS_IN_VALID_STATE(device)) {
+ dprintk(VIDC_ERR, "%s - fw not in init state\n", __func__);
+ rc = -EINVAL;
+ goto read_error_null;
+ }
+
if (device->iface_queues[VIDC_IFACEQ_MSGQ_IDX].
q_array.align_virtual_addr == 0) {
dprintk(VIDC_ERR, "cannot read from shared MSG Q's");
rc = -ENODATA;
goto read_error_null;
}
+
q_info = &device->iface_queues[VIDC_IFACEQ_MSGQ_IDX];
if (!venus_hfi_read_queue(q_info, (u8 *)pkt, &tx_req_is_set)) {
mutex_lock(&device->clk_pwr_lock);
@@ -1475,6 +1506,11 @@
return -EINVAL;
}
mutex_lock(&device->read_lock);
+ if (!IS_VENUS_IN_VALID_STATE(device)) {
+ dprintk(VIDC_ERR, "%s - fw not in init state\n", __func__);
+ rc = -EINVAL;
+ goto dbg_error_null;
+ }
if (device->iface_queues[VIDC_IFACEQ_DBGQ_IDX].
q_array.align_virtual_addr == 0) {
dprintk(VIDC_ERR, "cannot read from shared DBG Q's");
@@ -1560,15 +1596,19 @@
device->iface_queues[i].q_array.align_virtual_addr = NULL;
device->iface_queues[i].q_array.align_device_addr = NULL;
}
+ device->iface_q_table.mem_data = NULL;
device->iface_q_table.align_virtual_addr = NULL;
device->iface_q_table.align_device_addr = NULL;
+ device->qdss.mem_data = NULL;
device->qdss.align_virtual_addr = NULL;
device->qdss.align_device_addr = NULL;
+ device->sfr.mem_data = NULL;
device->sfr.align_virtual_addr = NULL;
device->sfr.align_device_addr = NULL;
+ device->mem_addr.mem_data = NULL;
device->mem_addr.align_virtual_addr = NULL;
device->mem_addr.align_device_addr = NULL;
@@ -1795,6 +1835,8 @@
return -ENODEV;
}
+ VENUS_SET_STATE(dev, VENUS_STATE_INIT);
+
dev->intr_status = 0;
INIT_LIST_HEAD(&dev->sess_head);
venus_hfi_set_registers(dev);
@@ -1847,6 +1889,7 @@
return rc;
err_core_init:
+ VENUS_SET_STATE(dev, VENUS_STATE_DEINIT);
disable_irq_nosync(dev->hal_data->irq);
return rc;
}
@@ -1861,6 +1904,7 @@
dprintk(VIDC_ERR, "invalid device");
return -ENODEV;
}
+
if (dev->hal_client) {
mutex_lock(&dev->clk_pwr_lock);
rc = venus_hfi_clk_gating_off(device);
@@ -1877,6 +1921,8 @@
dev->intr_status = 0;
mutex_unlock(&dev->clk_pwr_lock);
}
+ VENUS_SET_STATE(dev, VENUS_STATE_DEINIT);
+
dprintk(VIDC_INFO, "HAL exited\n");
return 0;
}
@@ -3538,6 +3584,50 @@
}
}
+static int venus_hfi_resurrect_fw(void *dev)
+{
+ struct venus_hfi_device *device = dev;
+ int rc = 0;
+
+ if (!device) {
+ dprintk(VIDC_ERR, "%s Invalid paramter: %p\n",
+ __func__, device);
+ return -EINVAL;
+ }
+
+ rc = venus_hfi_free_ocmem(device);
+ if (rc)
+ dprintk(VIDC_WARN, "%s - failed to free ocmem\n", __func__);
+
+ rc = venus_hfi_core_release(device);
+ if (rc) {
+ dprintk(VIDC_ERR, "%s - failed to release venus core rc = %d\n",
+ __func__, rc);
+ goto exit;
+ }
+
+ dprintk(VIDC_ERR, "praying for firmware resurrection\n");
+
+ venus_hfi_unload_fw(device);
+
+ rc = venus_hfi_scale_buses(device, DDR_MEM);
+ if (rc) {
+ dprintk(VIDC_ERR, "Failed to scale buses");
+ goto exit;
+ }
+
+ rc = venus_hfi_load_fw(device);
+ if (rc) {
+ dprintk(VIDC_ERR, "%s - failed to load venus fw rc = %d\n",
+ __func__, rc);
+ goto exit;
+ }
+
+ dprintk(VIDC_ERR, "Hurray!! firmware has restarted\n");
+exit:
+ return rc;
+}
+
static int venus_hfi_get_fw_info(void *dev, enum fw_info info)
{
int rc = 0;
@@ -3833,6 +3923,7 @@
hdev->iommu_get_domain_partition = venus_hfi_iommu_get_domain_partition;
hdev->load_fw = venus_hfi_load_fw;
hdev->unload_fw = venus_hfi_unload_fw;
+ hdev->resurrect_fw = venus_hfi_resurrect_fw;
hdev->get_fw_info = venus_hfi_get_fw_info;
hdev->get_info = venus_hfi_get_info;
hdev->get_stride_scanline = venus_hfi_get_stride_scanline;
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.h b/drivers/media/platform/msm/vidc/venus_hfi.h
index d816df0..1c1ee59 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.h
+++ b/drivers/media/platform/msm/vidc/venus_hfi.h
@@ -177,6 +177,11 @@
struct on_chip_mem ocmem;
};
+enum venus_hfi_state {
+ VENUS_STATE_DEINIT = 1,
+ VENUS_STATE_INIT,
+};
+
struct venus_hfi_device {
struct list_head list;
struct list_head sess_head;
@@ -213,6 +218,7 @@
struct venus_resources resources;
struct msm_vidc_platform_resources *res;
struct regulator *gdsc;
+ enum venus_hfi_state state;
};
void venus_hfi_delete_device(void *device);
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index 614c78f..309164a 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -1164,6 +1164,7 @@
int *domain_num, int *partition_num);
int (*load_fw)(void *dev);
void (*unload_fw)(void *dev);
+ int (*resurrect_fw)(void *dev);
int (*get_fw_info)(void *dev, enum fw_info info);
int (*get_info) (void *dev, enum dev_info info);
int (*get_stride_scanline)(int color_fmt, int width,
diff --git a/drivers/slimbus/slim-msm-ngd.c b/drivers/slimbus/slim-msm-ngd.c
index 1247808..0035349 100644
--- a/drivers/slimbus/slim-msm-ngd.c
+++ b/drivers/slimbus/slim-msm-ngd.c
@@ -82,7 +82,7 @@
};
static int ngd_slim_runtime_resume(struct device *device);
-static int ngd_slim_power_up(struct msm_slim_ctrl *dev);
+static int ngd_slim_power_up(struct msm_slim_ctrl *dev, bool mdm_restart);
static irqreturn_t ngd_slim_interrupt(int irq, void *d)
{
@@ -97,7 +97,7 @@
writel_relaxed(stat, ngd + NGD_INT_CLR);
dev->err = -EIO;
- dev_err(dev->dev, "NGD interrupt error:0x%x, err:%d", stat,
+ SLIM_WARN(dev, "NGD interrupt error:0x%x, err:%d\n", stat,
dev->err);
/* Guarantee that error interrupts are cleared */
mb();
@@ -119,7 +119,7 @@
for (i = 1; i < ((len + 3) >> 2); i++) {
rx_buf[i] = readl_relaxed(ngd + NGD_RX_MSG +
(4 * i));
- dev_dbg(dev->dev, "REG-RX data: %x\n", rx_buf[i]);
+ SLIM_DBG(dev, "REG-RX data: %x\n", rx_buf[i]);
}
msm_slim_rx_enqueue(dev, rx_buf, len);
writel_relaxed(NGD_INT_RX_MSG_RCVD,
@@ -130,8 +130,7 @@
*/
mb();
if (dev->use_rx_msgqs == MSM_MSGQ_ENABLED)
- dev_err(dev->dev,
- "direct message received even with RX MSGQs");
+ SLIM_WARN(dev, "direct msg rcvd with RX MSGQs\n");
else
complete(&dev->rx_msgq_notify);
}
@@ -140,13 +139,13 @@
/* Guarantee RECONFIG DONE interrupt is cleared */
mb();
/* In satellite mode, just log the reconfig done IRQ */
- dev_dbg(dev->dev, "reconfig done IRQ for NGD");
+ SLIM_DBG(dev, "reconfig done IRQ for NGD\n");
}
if (stat & NGD_INT_IE_VE_CHG) {
writel_relaxed(NGD_INT_IE_VE_CHG, ngd + NGD_INT_CLR);
/* Guarantee IE VE change interrupt is cleared */
mb();
- dev_err(dev->dev, "NGD IE VE change");
+ SLIM_DBG(dev, "NGD IE VE change\n");
}
pstat = readl_relaxed(PGD_THIS_EE(PGD_PORT_INT_ST_EEn, dev->ver));
@@ -161,7 +160,7 @@
struct msm_slim_qmi *qmi = container_of(n, struct msm_slim_qmi, nb);
struct msm_slim_ctrl *dev =
container_of(qmi, struct msm_slim_ctrl, qmi);
- pr_info("Slimbus QMI NGD CB received event:%ld", code);
+ SLIM_INFO(dev, "Slimbus QMI NGD CB received event:%ld\n", code);
switch (code) {
case QMI_SERVER_ARRIVE:
schedule_work(&qmi->ssr_up);
@@ -183,32 +182,54 @@
static int mdm_ssr_notify_cb(struct notifier_block *n, unsigned long code,
void *_cmd)
{
+ void __iomem *ngd;
struct msm_slim_mdm *mdm = container_of(n, struct msm_slim_mdm, nb);
struct msm_slim_ctrl *dev = container_of(mdm, struct msm_slim_ctrl,
mdm);
- int ret;
+ struct slim_controller *ctrl = &dev->ctrl;
+ u32 laddr;
+ struct slim_device *sbdev;
switch (code) {
case SUBSYS_BEFORE_SHUTDOWN:
- /* make sure runtime-pm doesn't suspend during modem SSR */
- pm_runtime_get_noresume(dev->dev);
+ SLIM_INFO(dev, "SLIM %lu external_modem SSR notify cb\n", code);
+ /* vote for runtime-pm so that ADSP doesn't go down */
+ msm_slim_get_ctrl(dev);
+ /*
+ * checking framer here will wake-up ADSP and may avoid framer
+ * handover later
+ */
+ msm_slim_qmi_check_framer_request(dev);
+ dev->mdm.state = MSM_CTRL_DOWN;
+ msm_slim_put_ctrl(dev);
break;
case SUBSYS_AFTER_POWERUP:
- ret = msm_slim_qmi_check_framer_request(dev);
- dev_err(dev->dev,
- "%s:SLIM %lu external_modem SSR notify cb, ret %d",
- __func__, code, ret);
- /*
- * Next codec transaction will reinit the HW
- * if it was suspended
- */
- if (pm_runtime_suspended(dev->dev) ||
- dev->state >= MSM_CTRL_ASLEEP) {
- break;
- } else {
- ngd_slim_power_up(dev);
- msm_slim_put_ctrl(dev);
+ if (dev->mdm.state != MSM_CTRL_DOWN)
+ return NOTIFY_DONE;
+ SLIM_INFO(dev,
+ "SLIM %lu external_modem SSR notify cb\n", code);
+ /* vote for runtime-pm so that ADSP doesn't go down */
+ msm_slim_get_ctrl(dev);
+ msm_slim_qmi_check_framer_request(dev);
+ /* If NGD enumeration is lost, we will need to power us up */
+ ngd = dev->base + NGD_BASE(dev->ctrl.nr, dev->ver);
+ laddr = readl_relaxed(ngd + NGD_STATUS);
+ if (!(laddr & NGD_LADDR)) {
+ /* runtime-pm state should be consistent with HW */
+ pm_runtime_disable(dev->dev);
+ pm_runtime_set_suspended(dev->dev);
+ dev->state = MSM_CTRL_DOWN;
+ SLIM_INFO(dev,
+ "SLIM MDM SSR (active framer on MDM) dev-down\n");
+ list_for_each_entry(sbdev, &ctrl->devs, dev_list)
+ slim_report_absent(sbdev);
+ ngd_slim_power_up(dev, true);
+ pm_runtime_set_active(dev->dev);
+ pm_runtime_enable(dev->dev);
}
+ dev->mdm.state = MSM_CTRL_AWAKE;
+ msm_slim_put_ctrl(dev);
+ break;
default:
break;
}
@@ -302,7 +323,7 @@
if (dev->state == MSM_CTRL_DOWN) {
u8 mc = (u8)txn->mc;
int timeout;
- dev_err(dev->dev, "ADSP slimbus not up yet");
+ SLIM_INFO(dev, "ADSP slimbus not up yet\n");
/*
* Messages related to data channel management can't
* wait since they are holding reconfiguration lock.
@@ -362,7 +383,7 @@
mutex_lock(&dev->tx_lock);
if (report_sat == false && dev->state != MSM_CTRL_AWAKE) {
- dev_err(dev->dev, "controller not ready");
+ SLIM_ERR(dev, "controller not ready\n");
mutex_unlock(&dev->tx_lock);
msm_slim_put_ctrl(dev);
return -EREMOTEIO;
@@ -372,6 +393,14 @@
txn->mc == SLIM_MSG_MC_CONNECT_SINK ||
txn->mc == SLIM_MSG_MC_DISCONNECT_PORT)) {
int i = 0;
+ if (txn->mc != SLIM_MSG_MC_DISCONNECT_PORT)
+ SLIM_INFO(dev,
+ "Connect port: laddr 0x%x port_num %d chan_num %d\n",
+ txn->la, txn->wbuf[0], txn->wbuf[1]);
+ else
+ SLIM_INFO(dev,
+ "Disconnect port: laddr 0x%x port_num %d\n",
+ txn->la, txn->wbuf[0]);
txn->mt = SLIM_MSG_MT_DEST_REFERRED_USER;
if (txn->mc == SLIM_MSG_MC_CONNECT_SOURCE)
txn->mc = SLIM_USR_MC_CONNECT_SRC;
@@ -388,10 +417,11 @@
mutex_unlock(&dev->tx_lock);
ret = dev->ctrl.get_laddr(&dev->ctrl, ea, 6,
&dev->pgdla);
- pr_debug("SLIM PGD LA:0x%x, ret:%d", dev->pgdla,
- ret);
+ SLIM_DBG(dev, "SLIM PGD LA:0x%x, ret:%d\n",
+ dev->pgdla, ret);
if (ret) {
- pr_err("Incorrect SLIM-PGD EAPC:0x%x",
+ SLIM_ERR(dev,
+ "Incorrect SLIM-PGD EAPC:0x%x\n",
dev->pdata.eapc);
return ret;
}
@@ -406,7 +436,8 @@
wbuf[i++] = txn->wbuf[1];
ret = ngd_get_tid(ctrl, txn, &wbuf[i++], &done);
if (ret) {
- pr_err("TID for connect/disconnect fail:%d", ret);
+ SLIM_ERR(dev, "TID for connect/disconnect fail:%d\n",
+ ret);
goto ngd_xfer_err;
}
txn->len = i;
@@ -416,7 +447,7 @@
txn->rl--;
pbuf = msm_get_msg_buf(dev, txn->rl);
if (!pbuf) {
- dev_err(dev->dev, "Message buffer unavailable");
+ SLIM_ERR(dev, "Message buffer unavailable\n");
ret = -ENOMEM;
goto ngd_xfer_err;
}
@@ -470,7 +501,7 @@
return 0;
}
if (dev->err) {
- dev_err(dev->dev, "pipe-port connect err:%d", dev->err);
+ SLIM_ERR(dev, "pipe-port connect err:%d\n", dev->err);
goto ngd_xfer_err;
}
/* Add port-base to port number if this is manager side port */
@@ -509,7 +540,7 @@
u32 conf, stat, rx_msgq, int_stat, int_en, int_clr;
void __iomem *ngd = dev->base + NGD_BASE(dev->ctrl.nr,
dev->ver);
- dev_err(dev->dev, "TX failed :MC:0x%x,mt:0x%x, ret:%d, ver:%d",
+ SLIM_WARN(dev, "TX failed :MC:0x%x,mt:0x%x, ret:%d, ver:%d\n",
txn_mc, txn_mt, ret, dev->ver);
conf = readl_relaxed(ngd);
stat = readl_relaxed(ngd + NGD_STATUS);
@@ -518,9 +549,10 @@
int_en = readl_relaxed(ngd + NGD_INT_EN);
int_clr = readl_relaxed(ngd + NGD_INT_CLR);
- pr_err("conf:0x%x,stat:0x%x,rxmsgq:0x%x", conf, stat, rx_msgq);
- pr_err("int_stat:0x%x,int_en:0x%x,int_cll:0x%x", int_stat,
- int_en, int_clr);
+ SLIM_WARN(dev, "conf:0x%x,stat:0x%x,rxmsgq:0x%x\n",
+ conf, stat, rx_msgq);
+ SLIM_WARN(dev, "int_stat:0x%x,int_en:0x%x,int_cll:0x%x\n",
+ int_stat, int_en, int_clr);
} else if (txn_mt == SLIM_MSG_MT_DEST_REFERRED_USER &&
(txn_mc == SLIM_USR_MC_CONNECT_SRC ||
txn_mc == SLIM_USR_MC_CONNECT_SINK ||
@@ -534,8 +566,9 @@
else
ret = txn->ec;
if (ret) {
- pr_err("connect/disconnect:0x%x,tid:%d err:%d", txn->mc,
- txn->tid, ret);
+ SLIM_INFO(dev,
+ "connect/disconnect:0x%x,tid:%d err:%d\n",
+ txn->mc, txn->tid, ret);
mutex_lock(&ctrl->m_ctrl);
ctrl->txnt[txn->tid] = NULL;
mutex_unlock(&ctrl->m_ctrl);
@@ -590,6 +623,7 @@
static int ngd_xferandwait_ack(struct slim_controller *ctrl,
struct slim_msg_txn *txn)
{
+ struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
int ret = ngd_xfer_msg(ctrl, txn);
if (!ret) {
int timeout;
@@ -602,8 +636,8 @@
if (ret) {
if (ret != -EREMOTEIO || txn->mc != SLIM_USR_MC_CHAN_CTRL)
- pr_err("master msg:0x%x,tid:%d ret:%d", txn->mc,
- txn->tid, ret);
+ SLIM_ERR(dev, "master msg:0x%x,tid:%d ret:%d\n",
+ txn->mc, txn->tid, ret);
mutex_lock(&ctrl->m_ctrl);
ctrl->txnt[txn->tid] = NULL;
mutex_unlock(&ctrl->m_ctrl);
@@ -614,12 +648,13 @@
static int ngd_allocbw(struct slim_device *sb, int *subfrmc, int *clkgear)
{
- int ret;
+ int ret = 0, num_chan = 0;
struct slim_pending_ch *pch;
struct slim_msg_txn txn;
struct slim_controller *ctrl = sb->ctrl;
DECLARE_COMPLETION_ONSTACK(done);
u8 wbuf[SLIM_MSGQ_BUF_LEN];
+ struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
*clkgear = ctrl->clkgear;
*subfrmc = 0;
@@ -632,7 +667,7 @@
txn.rbuf = NULL;
if (ctrl->sched.msgsl != ctrl->sched.pending_msgsl) {
- pr_debug("slim reserve BW for messaging: req: %d",
+ SLIM_DBG(dev, "slim reserve BW for messaging: req: %d\n",
ctrl->sched.pending_msgsl);
txn.mc = SLIM_USR_MC_REQ_BW;
wbuf[txn.len++] = ((sb->laddr & 0x1f) |
@@ -663,7 +698,7 @@
struct slim_ich *slc;
slc = &ctrl->chans[pch->chan];
if (!slc) {
- pr_err("no channel in define?");
+ SLIM_WARN(dev, "no channel in define?\n");
return -ENXIO;
}
if (txn.len == 0) {
@@ -678,12 +713,14 @@
wbuf[txn.len++] = slc->prrate;
ret = ngd_get_tid(ctrl, &txn, &wbuf[txn.len++], &done);
if (ret) {
- pr_err("no tid for channel define?");
+ SLIM_WARN(dev, "no tid for channel define?\n");
return -ENXIO;
}
}
+ num_chan++;
wbuf[txn.len++] = slc->chan;
- pr_debug("slim define chan:%d, tid:0x%x", slc->chan, txn.tid);
+ SLIM_INFO(dev, "slim activate chan:%d, laddr: 0x%x\n",
+ slc->chan, sb->laddr);
}
if (txn.len) {
txn.mc = SLIM_USR_MC_DEF_ACT_CHAN;
@@ -708,7 +745,7 @@
struct slim_ich *slc;
slc = &ctrl->chans[pch->chan];
if (!slc) {
- pr_err("no channel in removal?");
+ SLIM_WARN(dev, "no channel in removal?\n");
return -ENXIO;
}
if (txn.len == 0) {
@@ -717,12 +754,13 @@
(sb->laddr & 0x1f);
ret = ngd_get_tid(ctrl, &txn, &wbuf[txn.len++], &done);
if (ret) {
- pr_err("no tid for channel define?");
+ SLIM_WARN(dev, "no tid for channel define?\n");
return -ENXIO;
}
}
wbuf[txn.len++] = slc->chan;
- pr_debug("slim remove chan:%d, tid:0x%x", slc->chan, txn.tid);
+ SLIM_INFO(dev, "slim remove chan:%d, laddr: 0x%x\n",
+ slc->chan, sb->laddr);
}
if (txn.len) {
txn.mc = SLIM_USR_MC_CHAN_CTRL;
@@ -830,7 +868,7 @@
wbuf[3] = SAT_MSG_PROT;
txn.wbuf = wbuf;
txn.len = 4;
- pr_info("SLIM SAT: Received master capability");
+ SLIM_INFO(dev, "SLIM SAT: Rcvd master capability\n");
if (dev->state >= MSM_CTRL_ASLEEP) {
ngd_slim_setup_msg_path(dev);
if (dev->use_rx_msgqs == MSM_MSGQ_ENABLED)
@@ -847,18 +885,20 @@
ret = ngd_xfer_msg(&dev->ctrl, &txn);
if (!ret) {
enum msm_ctrl_state prev_state = dev->state;
- pr_info("SLIM SAT: capability exchange successful");
+ SLIM_INFO(dev,
+ "SLIM SAT: capability exchange successful\n");
dev->state = MSM_CTRL_AWAKE;
if (prev_state >= MSM_CTRL_ASLEEP)
complete(&dev->reconf);
else
- pr_err("SLIM: unexpected capability, state:%d",
- prev_state);
+ SLIM_ERR(dev,
+ "SLIM: unexpected capability, state:%d\n",
+ prev_state);
/* ADSP SSR, send device_up notifications */
if (prev_state == MSM_CTRL_DOWN)
complete(&dev->qmi.slave_notify);
} else if (ret == -EIO) {
- pr_info("capability message NACKed, retrying");
+ SLIM_WARN(dev, "capability message NACKed, retrying\n");
if (retries < INIT_MX_RETRIES) {
msleep(DEF_RETRY_MS);
retries++;
@@ -881,8 +921,9 @@
mutex_lock(&dev->ctrl.m_ctrl);
txn = dev->ctrl.txnt[buf[3]];
if (!txn) {
- pr_err("LADDR response after timeout, tid:0x%x",
- buf[3]);
+ SLIM_WARN(dev,
+ "LADDR response after timeout, tid:0x%x\n",
+ buf[3]);
mutex_unlock(&dev->ctrl.m_ctrl);
return;
}
@@ -898,7 +939,7 @@
mutex_lock(&dev->ctrl.m_ctrl);
txn = dev->ctrl.txnt[buf[3]];
if (!txn) {
- pr_err("ACK received after timeout, tid:0x%x",
+ SLIM_WARN(dev, "ACK received after timeout, tid:0x%x\n",
buf[3]);
mutex_unlock(&dev->ctrl.m_ctrl);
return;
@@ -906,7 +947,7 @@
dev_dbg(dev->dev, "got response:tid:%d, response:0x%x",
(int)buf[3], buf[4]);
if (!(buf[4] & MSM_SAT_SUCCSS)) {
- dev_err(dev->dev, "TID:%d, NACK code:0x%x", (int)buf[3],
+ SLIM_WARN(dev, "TID:%d, NACK code:0x%x\n", (int)buf[3],
buf[4]);
txn->ec = -EIO;
}
@@ -916,7 +957,7 @@
}
}
-static int ngd_slim_power_up(struct msm_slim_ctrl *dev)
+static int ngd_slim_power_up(struct msm_slim_ctrl *dev, bool mdm_restart)
{
void __iomem *ngd;
int timeout, ret = 0;
@@ -927,18 +968,20 @@
NGD_INT_IE_VE_CHG | NGD_INT_DEV_ERR |
NGD_INT_TX_MSG_SENT | NGD_INT_RX_MSG_RCVD);
- if (cur_state == MSM_CTRL_DOWN) {
+ if (!mdm_restart && cur_state == MSM_CTRL_DOWN) {
int timeout = wait_for_completion_timeout(&dev->qmi.qmi_comp,
HZ);
if (!timeout)
- pr_err("slimbus QMI init timed out");
+ SLIM_ERR(dev, "slimbus QMI init timed out\n");
}
/* No need to vote if contorller is not in low power mode */
- if (cur_state == MSM_CTRL_DOWN || cur_state == MSM_CTRL_ASLEEP) {
+ if (!mdm_restart &&
+ (cur_state == MSM_CTRL_DOWN || cur_state == MSM_CTRL_ASLEEP)) {
ret = msm_slim_qmi_power_request(dev, true);
if (ret) {
- pr_err("SLIM QMI power request failed:%d", ret);
+ SLIM_ERR(dev, "SLIM QMI power request failed:%d\n",
+ ret);
return ret;
}
}
@@ -955,7 +998,7 @@
* For example, modem restarted when playback was active
*/
if (cur_state == MSM_CTRL_AWAKE) {
- pr_err("SLIM MDM restart: ADSP active framer:NO OP");
+ SLIM_INFO(dev, "Subsys restart: ADSP active framer\n");
return 0;
}
/*
@@ -964,24 +1007,27 @@
*/
ngd_slim_setup_msg_path(dev);
return 0;
- } else if (cur_state == MSM_CTRL_ASLEEP) {
- pr_debug("ADSP P.C. CTRL state:%d NGD not enumerated:0x%x",
- dev->state, laddr);
- } else if (cur_state == MSM_CTRL_IDLE || cur_state == MSM_CTRL_AWAKE) {
+ }
+
+ if (mdm_restart) {
/*
- * external MDM SSR when only voice call is in progress.
+ * external MDM SSR when MDM is active framer
* ADSP will reset slimbus HW. disconnect BAM pipes so that
* they can be connected after capability message is received.
* Set device state to ASLEEP to be synchronous with the HW
*/
- pr_err("SLIM MDM restart: MDM active framer: reinit HW");
- dev->state = MSM_CTRL_ASLEEP;
- msm_slim_disconnect_endp(dev, &dev->rx_msgq,
- &dev->use_rx_msgqs);
- msm_slim_disconnect_endp(dev, &dev->tx_msgq,
- &dev->use_tx_msgqs);
+ /* make current state as DOWN */
+ cur_state = MSM_CTRL_DOWN;
+ SLIM_INFO(dev,
+ "SLIM MDM restart: MDM active framer: reinit HW\n");
+ /* disconnect BAM pipes */
+ if (dev->use_rx_msgqs == MSM_MSGQ_ENABLED)
+ dev->use_rx_msgqs = MSM_MSGQ_DOWN;
+ if (dev->use_tx_msgqs == MSM_MSGQ_ENABLED)
+ dev->use_tx_msgqs = MSM_MSGQ_DOWN;
+ dev->state = MSM_CTRL_DOWN;
}
- /* ADSP SSR scenario, need to disconnect pipe before connecting */
+ /* SSR scenario, need to disconnect pipe before connecting */
if (dev->use_rx_msgqs == MSM_MSGQ_DOWN) {
struct msm_slim_endp *endpoint = &dev->rx_msgq;
sps_disconnect(endpoint->sps);
@@ -1010,11 +1056,14 @@
timeout = wait_for_completion_timeout(&dev->reconf, HZ);
if (!timeout) {
- pr_err("Failed to receive master capability");
+ SLIM_ERR(dev, "Failed to receive master capability\n");
return -ETIMEDOUT;
}
- if (cur_state == MSM_CTRL_DOWN)
+ if (cur_state == MSM_CTRL_DOWN) {
complete(&dev->ctrl_up);
+ /* Resetting the log level */
+ SLIM_RST_LOGLVL(dev);
+ }
return 0;
}
@@ -1038,7 +1087,7 @@
* framework state
*/
if (ret)
- ngd_slim_power_up(dev);
+ ngd_slim_power_up(dev, false);
if (!pm_runtime_enabled(dev->dev) ||
!pm_runtime_suspended(dev->dev))
ngd_slim_runtime_resume(dev->dev);
@@ -1047,7 +1096,7 @@
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put(dev->dev);
} else
- dev_err(dev->dev, "qmi init fail, ret:%d, state:%d",
+ SLIM_ERR(dev, "qmi init fail, ret:%d, state:%d\n",
ret, dev->state);
} else {
msm_slim_qmi_exit(dev);
@@ -1059,7 +1108,7 @@
static int ngd_clk_pause_wakeup(struct slim_controller *ctrl)
{
struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
- return ngd_slim_power_up(dev);
+ return ngd_slim_power_up(dev, false);
}
static int ngd_slim_rx_msgq_thread(void *data)
@@ -1083,7 +1132,7 @@
}
ret = msm_slim_rx_msgq_get(dev, buffer, index);
if (ret) {
- dev_err(dev->dev, "rx_msgq_get() failed 0x%x\n", ret);
+ SLIM_ERR(dev, "rx_msgq_get() failed 0x%x\n", ret);
continue;
}
@@ -1168,7 +1217,7 @@
/* device up should be called again after SSR */
list_for_each_entry(sbdev, &ctrl->devs, dev_list)
slim_report_absent(sbdev);
- pr_info("SLIM ADSP SSR (DOWN) done");
+ SLIM_INFO(dev, "SLIM ADSP SSR (DOWN) done\n");
}
static void ngd_adsp_up(struct work_struct *work)
@@ -1180,6 +1229,28 @@
ngd_slim_enable(dev, true);
}
+static ssize_t show_mask(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = to_platform_device(device);
+ struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
+ return snprintf(buf, sizeof(int), "%u\n", dev->ipc_log_mask);
+}
+
+static ssize_t set_mask(struct device *device, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(device);
+ struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
+
+ dev->ipc_log_mask = buf[0] - '0';
+ if (dev->ipc_log_mask > DBG_LEV)
+ dev->ipc_log_mask = DBG_LEV;
+ return count;
+}
+
+static DEVICE_ATTR(debug_mask, S_IRUGO | S_IWUSR, show_mask, set_mask);
+
static int __devinit ngd_slim_probe(struct platform_device *pdev)
{
struct msm_slim_ctrl *dev;
@@ -1189,6 +1260,7 @@
struct resource *irq, *bam_irq;
bool rxreg_access = false;
bool slim_mdm = false;
+ const char *ext_modem_id = NULL;
slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"slimbus_physical");
@@ -1223,6 +1295,26 @@
dev->dev = &pdev->dev;
platform_set_drvdata(pdev, dev);
slim_set_ctrldata(&dev->ctrl, dev);
+
+ /* Create IPC log context */
+ dev->ipc_slimbus_log = ipc_log_context_create(IPC_SLIMBUS_LOG_PAGES,
+ dev_name(dev->dev));
+ if (!dev->ipc_slimbus_log)
+ dev_err(&pdev->dev, "error creating ipc_logging context\n");
+ else {
+ /* Initialize the log mask */
+ dev->ipc_log_mask = INFO_LEV;
+ dev->default_ipc_log_mask = INFO_LEV;
+ SLIM_INFO(dev, "start logging for slim dev %s\n",
+ dev_name(dev->dev));
+ }
+ ret = sysfs_create_file(&dev->dev->kobj, &dev_attr_debug_mask.attr);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to create dev. attr\n");
+ dev->sysfs_created = false;
+ } else
+ dev->sysfs_created = true;
+
dev->base = ioremap(slim_mem->start, resource_size(slim_mem));
if (!dev->base) {
dev_err(&pdev->dev, "IOremap failed\n");
@@ -1249,8 +1341,10 @@
&dev->pdata.apps_pipes);
of_property_read_u32(pdev->dev.of_node, "qcom,ea-pc",
&dev->pdata.eapc);
- slim_mdm = of_property_read_bool(pdev->dev.of_node,
- "qcom,slim-mdm");
+ ret = of_property_read_string(pdev->dev.of_node,
+ "qcom,slim-mdm", &ext_modem_id);
+ if (!ret)
+ slim_mdm = true;
} else {
dev->ctrl.nr = pdev->id;
}
@@ -1325,7 +1419,7 @@
if (slim_mdm) {
dev->mdm.nb.notifier_call = mdm_ssr_notify_cb;
- dev->mdm.ssr = subsys_notif_register_notifier("external_modem",
+ dev->mdm.ssr = subsys_notif_register_notifier(ext_modem_id,
&dev->mdm.nb);
if (IS_ERR_OR_NULL(dev->mdm.ssr))
dev_err(dev->dev,
@@ -1363,7 +1457,7 @@
dev_err(dev->dev, "Failed to start notifier thread:%d\n", ret);
goto err_notify_thread_create_failed;
}
- dev_dbg(dev->dev, "NGD SB controller is up!\n");
+ SLIM_INFO(dev, "NGD SB controller is up!\n");
return 0;
err_notify_thread_create_failed:
@@ -1381,6 +1475,9 @@
err_ioremap_bam_failed:
iounmap(dev->base);
err_ioremap_failed:
+ if (dev->sysfs_created)
+ sysfs_remove_file(&dev->dev->kobj,
+ &dev_attr_debug_mask.attr);
kfree(dev);
return ret;
}
@@ -1389,6 +1486,9 @@
{
struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
ngd_slim_enable(dev, false);
+ if (dev->sysfs_created)
+ sysfs_remove_file(&dev->dev->kobj,
+ &dev_attr_debug_mask.attr);
qmi_svc_event_notifier_unregister(SLIMBUS_QMI_SVC_ID,
SLIMBUS_QMI_SVC_V1,
SLIMBUS_QMI_INS_ID, &dev->qmi.nb);
@@ -1434,10 +1534,11 @@
if (dev->state != MSM_CTRL_DOWN)
dev->state = MSM_CTRL_ASLEEP;
else
- dev_err(device, "HW wakeup attempt during SSR");
+ SLIM_WARN(dev, "HW wakeup attempt during SSR\n");
} else {
dev->state = MSM_CTRL_AWAKE;
}
+ SLIM_INFO(dev, "Slim runtime resume: ret %d\n", ret);
return ret;
}
@@ -1450,11 +1551,12 @@
ret = slim_ctrl_clk_pause(&dev->ctrl, false, SLIM_CLK_UNSPECIFIED);
if (ret) {
if (ret != -EBUSY)
- dev_err(device, "clk pause not entered:%d", ret);
+ SLIM_INFO(dev, "clk pause not entered:%d\n", ret);
dev->state = MSM_CTRL_AWAKE;
} else {
dev->state = MSM_CTRL_ASLEEP;
}
+ SLIM_INFO(dev, "Slim runtime suspend: ret %d\n", ret);
return ret;
}
@@ -1466,7 +1568,6 @@
if (!pm_runtime_enabled(dev) ||
(!pm_runtime_suspended(dev) &&
cdev->state == MSM_CTRL_IDLE)) {
- dev_dbg(dev, "system suspend");
ret = ngd_slim_runtime_suspend(dev);
/*
* If runtime-PM still thinks it's active, then make sure its
@@ -1493,16 +1594,20 @@
*/
ret = 0;
}
+ SLIM_INFO(cdev, "system suspend\n");
return ret;
}
static int ngd_slim_resume(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_slim_ctrl *cdev = platform_get_drvdata(pdev);
/*
* Rely on runtime-PM to call resume in case it is enabled.
* Even if it's not enabled, rely on 1st client transaction to do
* clock/power on
*/
+ SLIM_INFO(cdev, "system resume\n");
return 0;
}
#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/slimbus/slim-msm.c b/drivers/slimbus/slim-msm.c
index 8589b9f..915bf88 100644
--- a/drivers/slimbus/slim-msm.c
+++ b/drivers/slimbus/slim-msm.c
@@ -51,7 +51,7 @@
if (ret >= 0) {
ref = atomic_read(&dev->dev->power.usage_count);
if (ref <= 0) {
- dev_err(dev->dev, "reference count -ve:%d", ref);
+ SLIM_WARN(dev, "reference count -ve:%d", ref);
ret = -ENODEV;
}
}
@@ -67,7 +67,7 @@
pm_runtime_mark_last_busy(dev->dev);
ref = atomic_read(&dev->dev->power.usage_count);
if (ref <= 0)
- dev_err(dev->dev, "reference count mismatch:%d", ref);
+ SLIM_WARN(dev, "reference count mismatch:%d", ref);
else
pm_runtime_put_sync(dev->dev);
#endif
@@ -109,7 +109,7 @@
/* clear port interrupts */
writel_relaxed(pstat, PGD_THIS_EE(PGD_PORT_INT_CL_EEn,
dev->ver));
- pr_info("disabled overflow/underflow for port 0x%x", pstat);
+ SLIM_INFO(dev, "disabled overflow/underflow for port 0x%x", pstat);
/*
* Guarantee that port interrupt bit(s) clearing writes go
@@ -1133,13 +1133,13 @@
rc = qmi_send_req_wait(dev->qmi.handle, &req_desc, req, sizeof(*req),
&resp_desc, &resp, sizeof(resp), 5000);
if (rc < 0) {
- pr_err("%s: QMI send req failed %d\n", __func__, rc);
+ SLIM_ERR(dev, "%s: QMI send req failed %d\n", __func__, rc);
return rc;
}
/* Check the response */
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
- pr_err("%s: QMI request failed 0x%x (%s)\n", __func__,
+ SLIM_ERR(dev, "%s: QMI request failed 0x%x (%s)\n", __func__,
resp.resp.result, get_qmi_error(&resp.resp));
return -EREMOTEIO;
}
@@ -1165,13 +1165,13 @@
rc = qmi_send_req_wait(dev->qmi.handle, &req_desc, req, sizeof(*req),
&resp_desc, &resp, sizeof(resp), 5000);
if (rc < 0) {
- pr_err("%s: QMI send req failed %d\n", __func__, rc);
+ SLIM_ERR(dev, "%s: QMI send req failed %d\n", __func__, rc);
return rc;
}
/* Check the response */
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
- pr_err("%s: QMI request failed 0x%x (%s)\n", __func__,
+ SLIM_ERR(dev, "%s: QMI request failed 0x%x (%s)\n", __func__,
resp.resp.result, get_qmi_error(&resp.resp));
return -EREMOTEIO;
}
@@ -1208,7 +1208,7 @@
SLIMBUS_QMI_SVC_V1,
SLIMBUS_QMI_INS_ID);
if (rc < 0) {
- pr_err("%s: QMI server not found\n", __func__);
+ SLIM_ERR(dev, "%s: QMI server not found\n", __func__);
goto qmi_connect_to_service_failed;
}
@@ -1281,12 +1281,12 @@
rc = qmi_send_req_wait(dev->qmi.handle, &req_desc, NULL, 0,
&resp_desc, &resp, sizeof(resp), 5000);
if (rc < 0) {
- dev_err(dev->dev, "%s: QMI send req failed %d\n", __func__, rc);
+ SLIM_ERR(dev, "%s: QMI send req failed %d\n", __func__, rc);
return rc;
}
/* Check the response */
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
- dev_err(dev->dev, "%s: QMI request failed 0x%x (%s)\n",
+ SLIM_ERR(dev, "%s: QMI request failed 0x%x (%s)\n",
__func__, resp.resp.result, get_qmi_error(&resp.resp));
return -EREMOTEIO;
}
diff --git a/drivers/slimbus/slim-msm.h b/drivers/slimbus/slim-msm.h
index 63178cc..9673208 100644
--- a/drivers/slimbus/slim-msm.h
+++ b/drivers/slimbus/slim-msm.h
@@ -17,6 +17,7 @@
#include <linux/kthread.h>
#include <mach/msm_qmi_interface.h>
#include <mach/subsystem_notif.h>
+#include <mach/msm_ipc_logging.h>
/* Per spec.max 40 bytes per received message */
#define SLIM_MSGQ_BUF_LEN 40
@@ -216,6 +217,7 @@
struct msm_slim_mdm {
struct notifier_block nb;
void *ssr;
+ enum msm_ctrl_state state;
};
struct msm_slim_pdata {
@@ -266,6 +268,10 @@
struct msm_slim_qmi qmi;
struct msm_slim_pdata pdata;
struct msm_slim_mdm mdm;
+ int default_ipc_log_mask;
+ int ipc_log_mask;
+ bool sysfs_created;
+ void *ipc_slimbus_log;
};
struct msm_sat_chan {
@@ -299,6 +305,57 @@
};
+/* IPC logging stuff */
+#define IPC_SLIMBUS_LOG_PAGES 5
+
+/* Log levels */
+enum {
+ FATAL_LEV = 0U,
+ ERR_LEV = 1U,
+ WARN_LEV = 2U,
+ INFO_LEV = 3U,
+ DBG_LEV = 4U,
+};
+
+/* Default IPC log level INFO */
+#define SLIM_DBG(dev, x...) do { \
+ pr_debug(x); \
+ if (dev->ipc_slimbus_log && dev->ipc_log_mask >= DBG_LEV) { \
+ ipc_log_string(dev->ipc_slimbus_log, x); \
+ } \
+} while (0)
+
+#define SLIM_INFO(dev, x...) do { \
+ pr_debug(x); \
+ if (dev->ipc_slimbus_log && dev->ipc_log_mask >= INFO_LEV) {\
+ ipc_log_string(dev->ipc_slimbus_log, x); \
+ } \
+} while (0)
+
+/* warnings and errors show up on console always */
+#define SLIM_WARN(dev, x...) do { \
+ pr_warn(x); \
+ if (dev->ipc_slimbus_log && dev->ipc_log_mask >= WARN_LEV) \
+ ipc_log_string(dev->ipc_slimbus_log, x); \
+} while (0)
+
+/* ERROR condition in the driver sets the hs_serial_debug_mask
+ * to ERR_FATAL level, so that this message can be seen
+ * in IPC logging. Further errors continue to log on the console
+ */
+#define SLIM_ERR(dev, x...) do { \
+ pr_err(x); \
+ if (dev->ipc_slimbus_log && dev->ipc_log_mask >= ERR_LEV) { \
+ ipc_log_string(dev->ipc_slimbus_log, x); \
+ dev->default_ipc_log_mask = dev->ipc_log_mask; \
+ dev->ipc_log_mask = FATAL_LEV; \
+ } \
+} while (0)
+
+#define SLIM_RST_LOGLVL(dev) { \
+ dev->ipc_log_mask = dev->default_ipc_log_mask; \
+}
+
int msm_slim_rx_enqueue(struct msm_slim_ctrl *dev, u32 *buf, u8 len);
int msm_slim_rx_dequeue(struct msm_slim_ctrl *dev, u8 *buf);
int msm_slim_get_ctrl(struct msm_slim_ctrl *dev);
diff --git a/drivers/spi/spi_qsd.c b/drivers/spi/spi_qsd.c
index d3e4612..d670f8b 100644
--- a/drivers/spi/spi_qsd.c
+++ b/drivers/spi/spi_qsd.c
@@ -1957,15 +1957,40 @@
}
}
-/* workqueue - pull messages from queue & process */
-static void msm_spi_workq(struct work_struct *work)
+/**
+ * msm_spi_transfer_one_message: To process one spi message at a time
+ * @master: spi master controller reference
+ * @msg: one multi-segment SPI transaction
+ * @return zero on success or negative error value
+ *
+ */
+static int msm_spi_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
{
- struct msm_spi *dd =
- container_of(work, struct msm_spi, work_data);
+ struct msm_spi *dd;
+ struct spi_transfer *tr;
unsigned long flags;
- u32 status_error = 0;
+ u32 status_error = 0;
- pm_runtime_get_sync(dd->dev);
+ dd = spi_master_get_devdata(master);
+
+ if (list_empty(&msg->transfers) || !msg->complete)
+ return -EINVAL;
+
+ list_for_each_entry(tr, &msg->transfers, transfer_list) {
+ /* Check message parameters */
+ if (tr->speed_hz > dd->pdata->max_clock_speed ||
+ (tr->bits_per_word &&
+ (tr->bits_per_word < 4 || tr->bits_per_word > 32)) ||
+ (tr->tx_buf == NULL && tr->rx_buf == NULL)) {
+ dev_err(dd->dev,
+ "Invalid transfer: %d Hz, %d bpw tx=%p, rx=%p\n",
+ tr->speed_hz, tr->bits_per_word,
+ tr->tx_buf, tr->rx_buf);
+ status_error = -EINVAL;
+ goto out;
+ }
+ }
mutex_lock(&dd->core_lock);
@@ -1990,20 +2015,16 @@
status_error = 1;
}
spin_lock_irqsave(&dd->queue_lock, flags);
+ dd->transfer_pending = 1;
+ dd->cur_msg = msg;
+ spin_unlock_irqrestore(&dd->queue_lock, flags);
- while (!list_empty(&dd->queue)) {
- dd->cur_msg = list_entry(dd->queue.next,
- struct spi_message, queue);
- list_del_init(&dd->cur_msg->queue);
- spin_unlock_irqrestore(&dd->queue_lock, flags);
- if (status_error)
+ if (status_error)
dd->cur_msg->status = -EIO;
- else
- msm_spi_process_message(dd);
- if (dd->cur_msg->complete)
- dd->cur_msg->complete(dd->cur_msg->context);
- spin_lock_irqsave(&dd->queue_lock, flags);
- }
+ else
+ msm_spi_process_message(dd);
+
+ spin_lock_irqsave(&dd->queue_lock, flags);
dd->transfer_pending = 0;
spin_unlock_irqrestore(&dd->queue_lock, flags);
@@ -2012,44 +2033,33 @@
mutex_unlock(&dd->core_lock);
- pm_runtime_mark_last_busy(dd->dev);
- pm_runtime_put_autosuspend(dd->dev);
-
- /* If needed, this can be done after the current message is complete,
- and work can be continued upon resume. No motivation for now. */
+ /*
+ * If needed, this can be done after the current message is complete,
+ * and work can be continued upon resume. No motivation for now.
+ */
if (dd->suspended)
wake_up_interruptible(&dd->continue_suspend);
+
+out:
+ dd->cur_msg->status = status_error;
+ spi_finalize_current_message(master);
+ return 0;
}
-static int msm_spi_transfer(struct spi_device *spi, struct spi_message *msg)
+static int msm_spi_prepare_transfer_hardware(struct spi_master *master)
{
- struct msm_spi *dd;
- unsigned long flags;
- struct spi_transfer *tr;
+ struct msm_spi *dd = spi_master_get_devdata(master);
- dd = spi_master_get_devdata(spi->master);
+ pm_runtime_get_sync(dd->dev);
+ return 0;
+}
- if (list_empty(&msg->transfers) || !msg->complete)
- return -EINVAL;
+static int msm_spi_unprepare_transfer_hardware(struct spi_master *master)
+{
+ struct msm_spi *dd = spi_master_get_devdata(master);
- list_for_each_entry(tr, &msg->transfers, transfer_list) {
- /* Check message parameters */
- if (tr->speed_hz > dd->pdata->max_clock_speed ||
- (tr->bits_per_word &&
- (tr->bits_per_word < 4 || tr->bits_per_word > 32)) ||
- (tr->tx_buf == NULL && tr->rx_buf == NULL)) {
- dev_err(&spi->dev, "Invalid transfer: %d Hz, %d bpw"
- "tx=%p, rx=%p\n",
- tr->speed_hz, tr->bits_per_word,
- tr->tx_buf, tr->rx_buf);
- return -EINVAL;
- }
- }
-
- spin_lock_irqsave(&dd->queue_lock, flags);
- list_add_tail(&msg->queue, &dd->queue);
- spin_unlock_irqrestore(&dd->queue_lock, flags);
- queue_work(dd->workqueue, &dd->work_data);
+ pm_runtime_mark_last_busy(dd->dev);
+ pm_runtime_put_autosuspend(dd->dev);
return 0;
}
@@ -2701,6 +2711,8 @@
&dd->cs_gpios[2].gpio_num, DT_OPT, DT_GPIO, -1},
{"qcom,gpio-cs3",
&dd->cs_gpios[3].gpio_num, DT_OPT, DT_GPIO, -1},
+ {"qcom,rt-priority",
+ &pdata->rt_priority, DT_OPT, DT_BOOL, 0},
{NULL, NULL, 0, 0, 0},
};
@@ -2794,7 +2806,11 @@
master->mode_bits = SPI_SUPPORTED_MODES;
master->num_chipselect = SPI_NUM_CHIPSELECTS;
master->setup = msm_spi_setup;
- master->transfer = msm_spi_transfer;
+ master->prepare_transfer_hardware = msm_spi_prepare_transfer_hardware;
+ master->transfer_one_message = msm_spi_transfer_one_message;
+ master->unprepare_transfer_hardware
+ = msm_spi_unprepare_transfer_hardware;
+
platform_set_drvdata(pdev, master);
dd = spi_master_get_devdata(master);
@@ -2834,6 +2850,7 @@
for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i)
dd->cs_gpios[i].valid = 0;
+ master->rt = pdata->rt_priority;
dd->pdata = pdata;
resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!resource) {
@@ -2895,13 +2912,6 @@
spin_lock_init(&dd->queue_lock);
mutex_init(&dd->core_lock);
- INIT_LIST_HEAD(&dd->queue);
- INIT_WORK(&dd->work_data, msm_spi_workq);
- init_waitqueue_head(&dd->continue_suspend);
- dd->workqueue = create_singlethread_workqueue(
- dev_name(master->dev.parent));
- if (!dd->workqueue)
- goto err_probe_workq;
if (!devm_request_mem_region(&pdev->dev, dd->mem_phys_addr,
dd->mem_size, SPI_DRV_NAME)) {
@@ -3078,8 +3088,6 @@
}
err_probe_rlock_init:
err_probe_reqmem:
- destroy_workqueue(dd->workqueue);
-err_probe_workq:
err_probe_res:
spi_master_put(master);
err_probe_exit:
@@ -3242,7 +3250,6 @@
clk_put(dd->clk);
clk_put(dd->pclk);
msm_spi_clk_path_teardown(dd);
- destroy_workqueue(dd->workqueue);
platform_set_drvdata(pdev, 0);
spi_unregister_master(master);
spi_master_put(master);
diff --git a/drivers/spi/spi_qsd.h b/drivers/spi/spi_qsd.h
index 2a67a61..d538076 100644
--- a/drivers/spi/spi_qsd.h
+++ b/drivers/spi/spi_qsd.h
@@ -300,9 +300,6 @@
struct device *dev;
spinlock_t queue_lock;
struct mutex core_lock;
- struct list_head queue;
- struct workqueue_struct *workqueue;
- struct work_struct work_data;
struct spi_message *cur_msg;
struct spi_transfer *cur_transfer;
struct completion transfer_complete;
diff --git a/drivers/usb/otg/msm_otg.c b/drivers/usb/otg/msm_otg.c
index 350fd41..c6287ea 100644
--- a/drivers/usb/otg/msm_otg.c
+++ b/drivers/usb/otg/msm_otg.c
@@ -2573,8 +2573,10 @@
bool work = 0, srp_reqd, dcp;
pm_runtime_resume(otg->phy->dev);
- if (motg->pm_done)
+ if (motg->pm_done) {
pm_runtime_get_sync(otg->phy->dev);
+ motg->pm_done = 0;
+ }
pr_debug("%s work\n", otg_state_string(otg->phy->state));
switch (otg->phy->state) {
case OTG_STATE_UNDEFINED:
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 669fca9..05292f9 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -235,6 +235,15 @@
sdev->use_rpm_auto = 1;
sdev->autosuspend_delay = us->sdev_autosuspend_delay;
}
+
+ /*
+ * This quirk enables sending consecutive TEST_UNIT_READY
+ * commands in WRITE(10) command processing context. Increase
+ * the timeout to 60 seconds.
+ */
+ if (us->fflags & US_FL_TUR_AFTER_WRITE)
+ blk_queue_rq_timeout(sdev->request_queue, (60 * HZ));
+
} else {
/* Non-disk-type devices don't need to blacklist any pages
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index c70109e..a710d9f 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -859,6 +859,42 @@
srb->result = DID_ERROR << 16;
last_sector_hacks(us, srb);
+
+ /*
+ * TMC UICC cards expect 5 TEST_UNIT_READY commands after
+ * writing some data. The card performs the flash related
+ * house keeping operations after receiving these commands.
+ * Send 5 TEST_UNIT_READY commands for every 8 WRITE_10
+ * commands.
+ */
+ if (unlikely((us->fflags & US_FL_TUR_AFTER_WRITE) &&
+ srb->cmnd[0] == WRITE_10)) {
+ int i;
+ int temp_result;
+ struct scsi_eh_save ses;
+ unsigned char cmd[] = {
+ TEST_UNIT_READY, 0, 0, 0, 0, 0,
+ };
+
+ us->tur_count[srb->device->lun]++;
+
+ if (++us->tur_count[srb->device->lun] == 8) {
+
+ us->tur_count[srb->device->lun] = 0;
+
+ scsi_eh_prep_cmnd(srb, &ses, cmd, 6, 0);
+ for (i = 0; i < 5; i++) {
+ temp_result = us->transport(us->srb, us);
+ if (temp_result != USB_STOR_TRANSPORT_GOOD) {
+ US_DEBUGP("TUR failed %d %d\n",
+ i, temp_result);
+ break;
+ }
+ }
+ scsi_eh_restore_cmnd(srb, &ses);
+ }
+ }
+
return;
/* Error and abort processing: try to resynchronize with the device
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 856ad92..901f6fb 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -2018,6 +2018,12 @@
"Digital MP3 Audio Player",
USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NOT_LOCKABLE ),
+/* Reported by Pavankumar Kondeti <pkondeti@codeaurora.org> */
+UNUSUAL_DEV(0x0925, 0x9011, 0x0100, 0x0100,
+ "TMC",
+ "USB DISK",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_TUR_AFTER_WRITE),
+
/* Control/Bulk transport for all SubClass values */
USUAL_DEV(USB_SC_RBC, USB_PR_CB, USB_US_TYPE_STOR),
USUAL_DEV(USB_SC_8020, USB_PR_CB, USB_US_TYPE_STOR),
diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
index db75080..b079984 100644
--- a/drivers/usb/storage/usb.h
+++ b/drivers/usb/storage/usb.h
@@ -162,6 +162,8 @@
int use_last_sector_hacks;
int last_sector_retries;
int sdev_autosuspend_delay;
+ /* consecutive TEST_UNIT_READY commands during write */
+ int tur_count[16];
};
/* Convert between us_data and the corresponding Scsi_Host */
diff --git a/drivers/video/msm/mdss/mdp3.c b/drivers/video/msm/mdss/mdp3.c
index 2fcbeac..ad7cb81 100644
--- a/drivers/video/msm/mdss/mdp3.c
+++ b/drivers/video/msm/mdss/mdp3.c
@@ -47,6 +47,7 @@
#include <mach/iommu.h>
#include <mach/iommu_domains.h>
#include <mach/msm_memtypes.h>
+#include <mach/rpm-regulator-smd.h>
#include "mdp3.h"
#include "mdss_fb.h"
@@ -1204,6 +1205,54 @@
return 0;
}
+void msm_mdp3_cx_ctrl(int enable)
+{
+ int rc;
+
+ if (!mdp3_res->vdd_cx) {
+ mdp3_res->vdd_cx = devm_regulator_get(&mdp3_res->pdev->dev,
+ "vdd-cx");
+ if (IS_ERR_OR_NULL(mdp3_res->vdd_cx)) {
+ pr_debug("unable to get CX reg. rc=%d\n",
+ PTR_RET(mdp3_res->vdd_cx));
+ mdp3_res->vdd_cx = NULL;
+ return;
+ }
+ }
+
+ if (enable) {
+ rc = regulator_set_voltage(
+ mdp3_res->vdd_cx,
+ RPM_REGULATOR_CORNER_SVS_SOC,
+ RPM_REGULATOR_CORNER_SUPER_TURBO);
+ if (rc < 0)
+ goto vreg_set_voltage_fail;
+
+ rc = regulator_enable(mdp3_res->vdd_cx);
+ if (rc) {
+ pr_err("Failed to enable regulator vdd_cx.\n");
+ return;
+ }
+ } else {
+ rc = regulator_disable(mdp3_res->vdd_cx);
+ if (rc) {
+ pr_err("Failed to disable regulator vdd_cx.\n");
+ return;
+ }
+ rc = regulator_set_voltage(
+ mdp3_res->vdd_cx,
+ RPM_REGULATOR_CORNER_NONE,
+ RPM_REGULATOR_CORNER_SUPER_TURBO);
+ if (rc < 0)
+ goto vreg_set_voltage_fail;
+ }
+
+ return;
+vreg_set_voltage_fail:
+ pr_err("Set vltg failed\n");
+ return;
+}
+
void mdp3_batfet_ctrl(int enable)
{
int rc;
@@ -1236,6 +1285,12 @@
pr_err("%s: reg enable/disable failed", __func__);
}
+void mdp3_enable_regulator(int enable)
+{
+ msm_mdp3_cx_ctrl(enable);
+ mdp3_batfet_ctrl(enable);
+}
+
static void mdp3_iommu_heap_unmap_iommu(struct mdp3_iommu_meta *meta)
{
unsigned int domain_num;
@@ -1963,7 +2018,7 @@
else
mdp3_res->intf[MDP3_DMA_OUTPUT_SEL_DSI_CMD].active = 1;
- mdp3_batfet_ctrl(true);
+ mdp3_enable_regulator(true);
mdp3_res->cont_splash_en = 1;
return 0;
@@ -2320,13 +2375,13 @@
static int mdp3_suspend_sub(struct mdp3_hw_resource *mdata)
{
- mdp3_batfet_ctrl(false);
+ mdp3_enable_regulator(false);
return 0;
}
static int mdp3_resume_sub(struct mdp3_hw_resource *mdata)
{
- mdp3_batfet_ctrl(true);
+ mdp3_enable_regulator(true);
return 0;
}
diff --git a/drivers/video/msm/mdss/mdp3.h b/drivers/video/msm/mdss/mdp3.h
index 6650cf3..15aab59 100644
--- a/drivers/video/msm/mdss/mdp3.h
+++ b/drivers/video/msm/mdss/mdp3.h
@@ -164,6 +164,7 @@
bool batfet_required;
struct regulator *batfet;
+ struct regulator *vdd_cx;
};
struct mdp3_img_data {
@@ -207,6 +208,7 @@
int mdp3_misr_set(struct mdp_misr *misr_req);
int mdp3_misr_get(struct mdp_misr *misr_resp);
+void mdp3_enable_regulator(int enable);
#define MDP3_REG_WRITE(addr, val) writel_relaxed(val, mdp3_res->mdp_base + addr)
#define MDP3_REG_READ(addr) readl_relaxed(mdp3_res->mdp_base + addr)
diff --git a/drivers/video/msm/mdss/mdp3_ctrl.c b/drivers/video/msm/mdss/mdp3_ctrl.c
index 3ab6270..81ab41c 100644
--- a/drivers/video/msm/mdss/mdp3_ctrl.c
+++ b/drivers/video/msm/mdss/mdp3_ctrl.c
@@ -297,7 +297,7 @@
vsync_ticks = ktime_to_ns(mdp3_session->vsync_time);
pr_debug("fb%d vsync=%llu", mfd->index, vsync_ticks);
- rc = scnprintf(buf, PAGE_SIZE, "VSYNC=%llu", vsync_ticks);
+ rc = scnprintf(buf, PAGE_SIZE, "VSYNC=%llu\n", vsync_ticks);
return rc;
}
@@ -567,7 +567,7 @@
goto on_error;
}
- mdp3_batfet_ctrl(true);
+ mdp3_enable_regulator(true);
mdp3_ctrl_notifier_register(mdp3_session,
&mdp3_session->mfd->mdp_sync_pt_data.notifier);
@@ -638,6 +638,9 @@
panel = mdp3_session->panel;
mutex_lock(&mdp3_session->lock);
+ if (panel && panel->set_backlight)
+ panel->set_backlight(panel, 0);
+
if (!mdp3_session->status) {
pr_debug("fb%d is off already", mfd->index);
goto off_error;
@@ -675,7 +678,7 @@
mdp3_ctrl_notifier_unregister(mdp3_session,
&mdp3_session->mfd->mdp_sync_pt_data.notifier);
- mdp3_batfet_ctrl(false);
+ mdp3_enable_regulator(false);
mdp3_session->vsync_enabled = 0;
atomic_set(&mdp3_session->vsync_countdown, 0);
mdp3_session->clk_on = 0;
diff --git a/drivers/video/msm/mdss/mdss.h b/drivers/video/msm/mdss/mdss.h
index 478e704..72cceaa 100644
--- a/drivers/video/msm/mdss/mdss.h
+++ b/drivers/video/msm/mdss/mdss.h
@@ -152,6 +152,9 @@
struct mdss_fudge_factor ib_factor_overlap;
struct mdss_fudge_factor clk_factor;
+ u32 *clock_levels;
+ u32 nclk_lvl;
+
struct mdss_hw_settings *hw_settings;
struct mdss_mdp_pipe *vig_pipes;
diff --git a/drivers/video/msm/mdss/mdss_mdp.c b/drivers/video/msm/mdss/mdss_mdp.c
index 9214dcdc..667e4b7 100644
--- a/drivers/video/msm/mdss/mdss_mdp.c
+++ b/drivers/video/msm/mdss/mdss_mdp.c
@@ -2275,6 +2275,23 @@
if (rc)
pr_debug("max bandwidth (high) property not specified\n");
+ mdata->nclk_lvl = mdss_mdp_parse_dt_prop_len(pdev,
+ "qcom,mdss-clk-levels");
+
+ if (mdata->nclk_lvl) {
+ mdata->clock_levels = kzalloc(sizeof(u32) * mdata->nclk_lvl,
+ GFP_KERNEL);
+ if (!mdata->clock_levels) {
+ pr_err("no mem assigned for mdata clock_levels\n");
+ return -ENOMEM;
+ }
+
+ rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-clk-levels",
+ mdata->clock_levels, mdata->nclk_lvl);
+ if (rc)
+ pr_debug("clock levels not found\n");
+ }
+
return 0;
}
diff --git a/drivers/video/msm/mdss/mdss_mdp.h b/drivers/video/msm/mdss/mdss_mdp.h
index 1613757..e528219 100644
--- a/drivers/video/msm/mdss/mdss_mdp.h
+++ b/drivers/video/msm/mdss/mdss_mdp.h
@@ -199,6 +199,9 @@
struct mdss_panel_data *panel_data;
struct mdss_mdp_vsync_handler vsync_handler;
+ struct mdss_mdp_vsync_handler recover_underrun_handler;
+ struct work_struct recover_work;
+ struct work_struct remove_underrun_handler;
struct mdss_mdp_img_rect roi;
struct mdss_mdp_img_rect roi_bkup;
diff --git a/drivers/video/msm/mdss/mdss_mdp_ctl.c b/drivers/video/msm/mdss/mdss_mdp_ctl.c
index f188072..ba5791b 100644
--- a/drivers/video/msm/mdss/mdss_mdp_ctl.c
+++ b/drivers/video/msm/mdss/mdss_mdp_ctl.c
@@ -878,6 +878,29 @@
mutex_unlock(&mdss_mdp_ctl_lock);
}
+static int mdss_mdp_select_clk_lvl(struct mdss_mdp_ctl *ctl,
+ u32 clk_rate)
+{
+ int i;
+ struct mdss_data_type *mdata;
+
+ if (!ctl)
+ return -ENODEV;
+
+ mdata = ctl->mdata;
+
+ for (i = 0; i < mdata->nclk_lvl; i++) {
+ if (clk_rate > mdata->clock_levels[i]) {
+ continue;
+ } else {
+ clk_rate = mdata->clock_levels[i];
+ break;
+ }
+ }
+
+ return clk_rate;
+}
+
static void mdss_mdp_ctl_perf_update(struct mdss_mdp_ctl *ctl,
int params_changed)
{
@@ -945,6 +968,8 @@
clk_rate = max(ctl->cur_perf.mdp_clk_rate,
clk_rate);
}
+
+ clk_rate = mdss_mdp_select_clk_lvl(ctl, clk_rate);
mdss_mdp_set_clk_rate(clk_rate);
pr_debug("update clk rate = %d HZ\n", clk_rate);
}
diff --git a/drivers/video/msm/mdss/mdss_mdp_intf_video.c b/drivers/video/msm/mdss/mdss_mdp_intf_video.c
index 4c1de32..a9595a1 100644
--- a/drivers/video/msm/mdss/mdss_mdp_intf_video.c
+++ b/drivers/video/msm/mdss/mdss_mdp_intf_video.c
@@ -468,6 +468,21 @@
return rc;
}
+static void recover_underrun_work(struct work_struct *work)
+{
+ struct mdss_mdp_ctl *ctl =
+ container_of(work, typeof(*ctl), recover_work);
+
+ if (!ctl || !ctl->add_vsync_handler) {
+ pr_err("ctl or vsync handler is NULL\n");
+ return;
+ }
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
+ ctl->add_vsync_handler(ctl, &ctl->recover_underrun_handler);
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
+}
+
static void mdss_mdp_video_underrun_intr_done(void *arg)
{
struct mdss_mdp_ctl *ctl = arg;
@@ -479,6 +494,9 @@
MDSS_XLOG_TOUT_HANDLER("mdp", "dsi0", "dsi1", "edp", "hdmi", "panic");
pr_debug("display underrun detected for ctl=%d count=%d\n", ctl->num,
ctl->underrun_cnt);
+
+ if (ctl->opmode & MDSS_MDP_CTL_OP_PACK_3D_ENABLE)
+ schedule_work(&ctl->recover_work);
}
static int mdss_mdp_video_vfp_fps_update(struct mdss_mdp_ctl *ctl, int new_fps)
@@ -801,6 +819,7 @@
spin_lock_init(&ctx->vsync_lock);
mutex_init(&ctx->vsync_mtx);
atomic_set(&ctx->vsync_ref, 0);
+ INIT_WORK(&ctl->recover_work, recover_underrun_work);
mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_INTF_VSYNC, ctl->intf_num,
mdss_mdp_video_vsync_intr_done, ctl);
diff --git a/drivers/video/msm/mdss/mdss_mdp_overlay.c b/drivers/video/msm/mdss/mdss_mdp_overlay.c
index c8e4935..70b266d 100644
--- a/drivers/video/msm/mdss/mdss_mdp_overlay.c
+++ b/drivers/video/msm/mdss/mdss_mdp_overlay.c
@@ -1681,6 +1681,35 @@
mutex_unlock(&mdp5_data->ov_lock);
}
+static void remove_underrun_vsync_handler(struct work_struct *work)
+{
+ int rc;
+ struct mdss_mdp_ctl *ctl =
+ container_of(work, typeof(*ctl), remove_underrun_handler);
+
+ if (!ctl || !ctl->remove_vsync_handler) {
+ pr_err("ctl or vsync handler is NULL\n");
+ return;
+ }
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
+ rc = ctl->remove_vsync_handler(ctl,
+ &ctl->recover_underrun_handler);
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
+}
+
+static void mdss_mdp_recover_underrun_handler(struct mdss_mdp_ctl *ctl,
+ ktime_t t)
+{
+ if (!ctl) {
+ pr_err("ctl is NULL\n");
+ return;
+ }
+
+ mdss_mdp_ctl_reset(ctl);
+ schedule_work(&ctl->remove_underrun_handler);
+}
+
/* function is called in irq context should have minimum processing */
static void mdss_mdp_overlay_handle_vsync(struct mdss_mdp_ctl *ctl,
ktime_t t)
@@ -2646,6 +2675,13 @@
mdss_mdp_overlay_handle_vsync;
ctl->vsync_handler.cmd_post_flush = false;
+ ctl->recover_underrun_handler.vsync_handler =
+ mdss_mdp_recover_underrun_handler;
+ ctl->recover_underrun_handler.cmd_post_flush = false;
+
+ INIT_WORK(&ctl->remove_underrun_handler,
+ remove_underrun_vsync_handler);
+
if (mfd->split_display && pdata->next) {
/* enable split display */
rc = mdss_mdp_ctl_split_display_setup(ctl, pdata->next);
diff --git a/drivers/video/msm/mdss/mdss_mdp_rotator.c b/drivers/video/msm/mdss/mdss_mdp_rotator.c
old mode 100644
new mode 100755
index 91e6373..8d6d41d
--- a/drivers/video/msm/mdss/mdss_mdp_rotator.c
+++ b/drivers/video/msm/mdss/mdss_mdp_rotator.c
@@ -226,7 +226,7 @@
ret = mdss_mdp_smp_reserve(pipe);
if (ret) {
- pr_err("unable to mdss_mdp_smp_reserve rot data\n");
+ pr_debug("unable to mdss_mdp_smp_reserve rot data\n");
return ret;
}
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h
index d797797..0b95337 100644
--- a/include/linux/nl80211.h
+++ b/include/linux/nl80211.h
@@ -370,8 +370,18 @@
* %NL80211_ATTR_SSID attribute, and can optionally specify the association
* IEs in %NL80211_ATTR_IE, %NL80211_ATTR_AUTH_TYPE, %NL80211_ATTR_USE_MFP,
* %NL80211_ATTR_MAC, %NL80211_ATTR_WIPHY_FREQ, %NL80211_ATTR_CONTROL_PORT,
- * %NL80211_ATTR_CONTROL_PORT_ETHERTYPE and
- * %NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT.
+ * %NL80211_ATTR_CONTROL_PORT_ETHERTYPE,
+ * %NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT, %NL80211_ATTR_MAC_HINT, and
+ * %NL80211_ATTR_WIPHY_FREQ_HINT.
+ * If included, %NL80211_ATTR_MAC and %NL80211_ATTR_WIPHY_FREQ are
+ * restrictions on BSS selection, i.e., they effectively prevent roaming
+ * within the ESS. %NL80211_ATTR_MAC_HINT and %NL80211_ATTR_WIPHY_FREQ_HINT
+ * can be included to provide a recommendation of the initial BSS while
+ * allowing the driver to roam to other BSSes within the ESS and also to
+ * ignore this recommendation if the indicated BSS is not ideal. Only one
+ * set of BSSID,frequency parameters is used (i.e., either the enforcing
+ * %NL80211_ATTR_MAC,%NL80211_ATTR_WIPHY_FREQ or the less strict
+ * %NL80211_ATTR_MAC_HINT and %NL80211_ATTR_WIPHY_FREQ_HINT).
* Background scan period can optionally be
* specified in %NL80211_ATTR_BG_SCAN_PERIOD,
* if not specified default background scan configuration
@@ -637,6 +647,12 @@
* (&struct nl80211_vendor_cmd_info) of the supported vendor commands.
* This may also be sent as an event with the same attributes.
*
+ * @NL80211_CMD_SET_QOS_MAP: Set Interworking QoS mapping for IP DSCP values.
+ * The QoS mapping information is included in %NL80211_ATTR_QOS_MAP. If
+ * that attribute is not included, QoS mapping is disabled. Since this
+ * QoS mapping is relevant for IP packets, it is only valid during an
+ * association. This is cleared on disassociation and AP restart.
+ *
* @NL80211_CMD_MAX: highest used command number
* @__NL80211_CMD_AFTER_LAST: internal use
*/
@@ -807,6 +823,8 @@
NL80211_CMD_VENDOR,
+ NL80211_CMD_SET_QOS_MAP,
+
/* add new commands above here */
/* used to define NL80211_CMD_MAX below */
@@ -1446,6 +1464,23 @@
* @NL80211_ATTR_VENDOR_EVENTS: used for event list advertising in the wiphy
* info, containing a nested array of possible events
*
+ * @NL80211_ATTR_QOS_MAP: IP DSCP mapping for Interworking QoS mapping. This
+ * data is in the format defined for the payload of the QoS Map Set element
+ * in IEEE Std 802.11-2012, 8.4.2.97.
+ *
+ * @NL80211_ATTR_MAC_HINT: MAC address recommendation as initial BSS
+ * @NL80211_ATTR_WIPHY_FREQ_HINT: frequency of the recommended initial BSS
+ *
+ * @NL80211_ATTR_MAX_AP_ASSOC_STA: Device attribute that indicates how many
+ * associated stations are supported in AP mode (including P2P GO); u32.
+ * Since drivers may not have a fixed limit on the maximum number (e.g.,
+ * other concurrent operations may affect this), drivers are allowed to
+ * advertise values that cannot always be met. In such cases, an attempt
+ * to add a new station entry with @NL80211_CMD_NEW_STATION may fail.
+ *
+ * @NL80211_ATTR_TDLS_PEER_CAPABILITY: flags for TDLS peer capabilities, u32.
+ * As specified in the &enum nl80211_tdls_peer_capability.
+ *
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
*/
@@ -1773,6 +1808,15 @@
NL80211_ATTR_VENDOR_EVENTS,
+ NL80211_ATTR_QOS_MAP,
+
+ NL80211_ATTR_MAC_HINT,
+ NL80211_ATTR_WIPHY_FREQ_HINT,
+
+ NL80211_ATTR_MAX_AP_ASSOC_STA,
+
+ NL80211_ATTR_TDLS_PEER_CAPABILITY,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -3256,4 +3300,20 @@
__u32 subcmd;
};
+/**
+ * enum nl80211_tdls_peer_capability - TDLS peer flags.
+ *
+ * Used by tdls_mgmt() to determine which conditional elements need
+ * to be added to TDLS Setup frames.
+ *
+ * @NL80211_TDLS_PEER_HT: TDLS peer is HT capable.
+ * @NL80211_TDLS_PEER_VHT: TDLS peer is VHT capable.
+ * @NL80211_TDLS_PEER_WMM: TDLS peer is WMM capable.
+ */
+enum nl80211_tdls_peer_capability {
+ NL80211_TDLS_PEER_HT = 1<<0,
+ NL80211_TDLS_PEER_VHT = 1<<1,
+ NL80211_TDLS_PEER_WMM = 1<<2,
+};
+
#endif /* __LINUX_NL80211_H */
diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h
index 195800f..4ecacc7 100644
--- a/include/linux/usb/msm_hsusb.h
+++ b/include/linux/usb/msm_hsusb.h
@@ -346,7 +346,10 @@
* @chg_check_timer: The timer used to implement the workaround to detect
* very slow plug in of wall charger.
* @ui_enabled: USB Intterupt is enabled or disabled.
- * @pm_done: Indicates whether USB is PM resumed
+ * @pm_done: It is used to increment the pm counter using pm_runtime_get_sync.
+ This handles the race case when PM resume thread returns before
+ the charger detection starts. When USB is disconnected pm_done
+ is set to true.
*/
struct msm_otg {
struct usb_phy phy;
diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
index 17df360..ff30988 100644
--- a/include/linux/usb_usual.h
+++ b/include/linux/usb_usual.h
@@ -64,7 +64,9 @@
US_FLAG(NO_READ_CAPACITY_16, 0x00080000) \
/* cannot handle READ_CAPACITY_16 */ \
US_FLAG(INITIAL_READ10, 0x00100000) \
- /* Initial READ(10) (and others) must be retried */
+ /* Initial READ(10) (and others) must be retried */ \
+ US_FLAG(TUR_AFTER_WRITE, 0x00200000) \
+ /* 5 TEST_UNIT_READY after 8 WRITE(10) */ \
#define US_FLAG(name, value) US_FL_##name = value ,
enum { US_DO_ALL_FLAGS };
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index ecaef21..f6e37c4 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -58,6 +58,8 @@
* structures here describe these capabilities in detail.
*/
+#define TDLS_MGMT_VERSION2 1
+
/*
* wireless hardware capability structures
*/
@@ -1263,8 +1265,14 @@
*
* @channel: The channel to use or %NULL if not specified (auto-select based
* on scan results)
+ * @channel_hint: The channel of the recommended BSS for initial connection or
+ * %NULL if not specified
* @bssid: The AP BSSID or %NULL if not specified (auto-select based on scan
* results)
+ * @bssid_hint: The recommended AP BSSID for initial connection to the BSS or
+ * %NULL if not specified. Unlike the @bssid parameter, the driver is
+ * allowed to ignore this @bssid_hint if it has knowledge of a better BSS
+ * to use.
* @ssid: SSID
* @ssid_len: Length of ssid in octets
* @auth_type: Authentication type (algorithm)
@@ -1285,7 +1293,9 @@
*/
struct cfg80211_connect_params {
struct ieee80211_channel *channel;
+ struct ieee80211_channel *channel_hint;
u8 *bssid;
+ const u8 *bssid_hint;
u8 *ssid;
size_t ssid_len;
enum nl80211_auth_type auth_type;
@@ -1407,6 +1417,50 @@
};
/**
+ * struct cfg80211_dscp_exception - DSCP exception
+ *
+ * @dscp: DSCP value that does not adhere to the user priority range definition
+ * @up: user priority value to which the corresponding DSCP value belongs
+ */
+struct cfg80211_dscp_exception {
+ u8 dscp;
+ u8 up;
+};
+
+/**
+ * struct cfg80211_dscp_range - DSCP range definition for user priority
+ *
+ * @low: lowest DSCP value of this user priority range, inclusive
+ * @high: highest DSCP value of this user priority range, inclusive
+ */
+struct cfg80211_dscp_range {
+ u8 low;
+ u8 high;
+};
+
+/* QoS Map Set element length defined in IEEE Std 802.11-2012, 8.4.2.97 */
+#define IEEE80211_QOS_MAP_MAX_EX 21
+#define IEEE80211_QOS_MAP_LEN_MIN 16
+#define IEEE80211_QOS_MAP_LEN_MAX \
+ (IEEE80211_QOS_MAP_LEN_MIN + 2 * IEEE80211_QOS_MAP_MAX_EX)
+
+/**
+ * struct cfg80211_qos_map - QoS Map Information
+ *
+ * This struct defines the Interworking QoS map setting for DSCP values
+ *
+ * @num_des: number of DSCP exceptions (0..21)
+ * @dscp_exception: optionally up to maximum of 21 DSCP exceptions from
+ * the user priority DSCP range definition
+ * @up: DSCP range definition for a particular user priority
+ */
+struct cfg80211_qos_map {
+ u8 num_des;
+ struct cfg80211_dscp_exception dscp_exception[IEEE80211_QOS_MAP_MAX_EX];
+ struct cfg80211_dscp_range up[8];
+};
+
+/**
* struct cfg80211_ops - backend description for wireless configuration
*
* This struct is registered by fullmac card drivers and/or wireless stacks
@@ -1597,6 +1651,7 @@
* when number of MAC addresses entries is passed as 0. Drivers which
* advertise the support for MAC based ACL have to implement this callback.
*
+ * @set_qos_map: Set QoS mapping information to the driver
*/
struct cfg80211_ops {
int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -1781,7 +1836,8 @@
int (*tdls_mgmt)(struct wiphy *wiphy, struct net_device *dev,
u8 *peer, u8 action_code, u8 dialog_token,
- u16 status_code, const u8 *buf, size_t len);
+ u16 status_code, u32 peer_capability,
+ const u8 *buf, size_t len);
int (*tdls_oper)(struct wiphy *wiphy, struct net_device *dev,
u8 *peer, enum nl80211_tdls_operation oper);
@@ -1798,6 +1854,10 @@
int (*set_mac_acl)(struct wiphy *wiphy, struct net_device *dev,
const struct cfg80211_acl_data *params);
+ int (*set_qos_map)(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_qos_map *qos_map);
+
};
/*
@@ -2177,6 +2237,11 @@
* @n_vendor_commands: number of vendor commands
* @vendor_events: array of vendor events supported by the hardware
* @n_vendor_events: number of vendor events
+ *
+ * @max_ap_assoc_sta: maximum number of associated stations supported in AP mode
+ * (including P2P GO) or 0 to indicate no such limit is advertised. The
+ * driver is allowed to advertise a theoretical limit that it can reach in
+ * some cases, but may not always reach.
*/
struct wiphy {
/* assign these fields before you register the wiphy */
@@ -2285,6 +2350,8 @@
const struct nl80211_vendor_cmd_info *vendor_events;
int n_vendor_commands, n_vendor_events;
+ u16 max_ap_assoc_sta;
+
char priv[0] __attribute__((__aligned__(NETDEV_ALIGN)));
};
@@ -2705,8 +2772,10 @@
/**
* cfg80211_classify8021d - determine the 802.1p/1d tag for a data frame
* @skb: the data frame
+ * @qos_map: Interworking QoS mapping or %NULL if not in use
*/
-unsigned int cfg80211_classify8021d(struct sk_buff *skb);
+unsigned int cfg80211_classify8021d(struct sk_buff *skb,
+ struct cfg80211_qos_map *qos_map);
/**
* cfg80211_find_ie - find information element in data
diff --git a/include/sound/apr_audio-v2.h b/include/sound/apr_audio-v2.h
index 1c6ea04..3b8b1cc 100644
--- a/include/sound/apr_audio-v2.h
+++ b/include/sound/apr_audio-v2.h
@@ -2264,6 +2264,7 @@
#define VPM_TX_SM_ECNS_COPP_TOPOLOGY 0x00010F71
#define VPM_TX_DM_FLUENCE_COPP_TOPOLOGY 0x00010F72
#define VPM_TX_QMIC_FLUENCE_COPP_TOPOLOGY 0x00010F75
+#define VPM_TX_DM_RFECNS_COPP_TOPOLOGY 0x00010F86
/* Memory map regions command payload used by the
* #ASM_CMD_SHARED_MEM_MAP_REGIONS ,#ADM_CMD_SHARED_MEM_MAP_REGIONS
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 685553b..ff11148 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -2440,8 +2440,8 @@
static int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
u8 *peer, u8 action_code, u8 dialog_token,
- u16 status_code, const u8 *extra_ies,
- size_t extra_ies_len)
+ u16 status_code, u32 peer_capability,
+ const u8 *extra_ies, size_t extra_ies_len)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = sdata->local;
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index 89511be..262c305 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -134,7 +134,7 @@
/* use the data classifier to determine what 802.1d tag the
* data frame has */
- skb->priority = cfg80211_classify8021d(skb);
+ skb->priority = cfg80211_classify8021d(skb, NULL);
return ieee80211_downgrade_queue(local, skb);
}
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index 30f20fe..9890342 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -155,6 +155,10 @@
kfree(wdev->connect_keys);
wdev->connect_keys = NULL;
+ if (rdev->ops->set_qos_map) {
+ rdev->ops->set_qos_map(&rdev->wiphy, dev, NULL);
+ }
+
/*
* Delete all the keys ... pairwise keys can't really
* exist any more anyway, but default keys might.
diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c
index ba21ab2..c892cce 100644
--- a/net/wireless/mesh.c
+++ b/net/wireless/mesh.c
@@ -152,8 +152,12 @@
return -ENOTCONN;
err = rdev->ops->leave_mesh(&rdev->wiphy, dev);
- if (!err)
+ if (!err) {
wdev->mesh_id_len = 0;
+ if (rdev->ops->set_qos_map) {
+ rdev->ops->set_qos_map(&rdev->wiphy, dev, NULL);
+ }
+ }
return err;
}
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 4a3719b..c3adef8 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -237,6 +237,11 @@
[NL80211_ATTR_VENDOR_ID] = { .type = NLA_U32 },
[NL80211_ATTR_VENDOR_SUBCMD] = { .type = NLA_U32 },
[NL80211_ATTR_VENDOR_DATA] = { .type = NLA_BINARY },
+ [NL80211_ATTR_QOS_MAP] = { .type = NLA_BINARY,
+ .len = IEEE80211_QOS_MAP_LEN_MAX },
+ [NL80211_ATTR_MAC_HINT] = { .len = ETH_ALEN },
+ [NL80211_ATTR_WIPHY_FREQ_HINT] = { .type = NLA_U32 },
+ [NL80211_ATTR_TDLS_PEER_CAPABILITY] = { .type = NLA_U32 },
};
/* policy for the key attributes */
@@ -950,6 +955,7 @@
i++;
NLA_PUT_U32(msg, i, NL80211_CMD_REGISTER_BEACONS);
}
+ CMD(set_qos_map, SET_QOS_MAP);
#ifdef CONFIG_NL80211_TESTMODE
CMD(testmode_cmd, TESTMODE);
@@ -1088,6 +1094,11 @@
NLA_PUT_U32(msg, NL80211_ATTR_MAC_ACL_MAX,
dev->wiphy.max_acl_mac_addrs);
+ if (dev->wiphy.max_ap_assoc_sta &&
+ nla_put_u32(msg, NL80211_ATTR_MAX_AP_ASSOC_STA,
+ dev->wiphy.max_ap_assoc_sta))
+ goto nla_put_failure;
+
if (dev->wiphy.n_vendor_commands) {
const struct nl80211_vendor_cmd_info *info;
struct nlattr *nested;
@@ -5530,6 +5541,9 @@
if (info->attrs[NL80211_ATTR_MAC])
connect.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]);
+ else if (info->attrs[NL80211_ATTR_MAC_HINT])
+ connect.bssid_hint =
+ nla_data(info->attrs[NL80211_ATTR_MAC_HINT]);
connect.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]);
connect.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]);
@@ -5554,6 +5568,14 @@
if (!connect.channel ||
connect.channel->flags & IEEE80211_CHAN_DISABLED)
return -EINVAL;
+ } else if (info->attrs[NL80211_ATTR_WIPHY_FREQ_HINT]) {
+ connect.channel_hint =
+ ieee80211_get_channel(wiphy,
+ nla_get_u32(
+ info->attrs[NL80211_ATTR_WIPHY_FREQ_HINT]));
+ if (!connect.channel_hint ||
+ connect.channel_hint->flags & IEEE80211_CHAN_DISABLED)
+ return -EINVAL;
}
if (connect.privacy && info->attrs[NL80211_ATTR_KEYS]) {
@@ -5693,6 +5715,7 @@
struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
u8 action_code, dialog_token;
+ u32 peer_capability = 0;
u16 status_code;
u8 *peer;
@@ -5711,9 +5734,12 @@
action_code = nla_get_u8(info->attrs[NL80211_ATTR_TDLS_ACTION]);
status_code = nla_get_u16(info->attrs[NL80211_ATTR_STATUS_CODE]);
dialog_token = nla_get_u8(info->attrs[NL80211_ATTR_TDLS_DIALOG_TOKEN]);
+ if (info->attrs[NL80211_ATTR_TDLS_PEER_CAPABILITY])
+ peer_capability =
+ nla_get_u32(info->attrs[NL80211_ATTR_TDLS_PEER_CAPABILITY]);
return rdev->ops->tdls_mgmt(&rdev->wiphy, dev, peer, action_code,
- dialog_token, status_code,
+ dialog_token, status_code, peer_capability,
nla_data(info->attrs[NL80211_ATTR_IE]),
nla_len(info->attrs[NL80211_ATTR_IE]));
}
@@ -6796,6 +6822,57 @@
}
EXPORT_SYMBOL(cfg80211_vendor_cmd_reply);
+static int nl80211_set_qos_map(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct cfg80211_qos_map *qos_map = NULL;
+ struct net_device *dev = info->user_ptr[1];
+ u8 *pos, len, num_des, des_len, des;
+ int ret;
+
+ if (!rdev->ops->set_qos_map)
+ return -EOPNOTSUPP;
+
+ if (info->attrs[NL80211_ATTR_QOS_MAP]) {
+ pos = nla_data(info->attrs[NL80211_ATTR_QOS_MAP]);
+ len = nla_len(info->attrs[NL80211_ATTR_QOS_MAP]);
+
+ if (len % 2 || len < IEEE80211_QOS_MAP_LEN_MIN ||
+ len > IEEE80211_QOS_MAP_LEN_MAX)
+ return -EINVAL;
+
+ qos_map = kzalloc(sizeof(struct cfg80211_qos_map), GFP_KERNEL);
+ if (!qos_map)
+ return -ENOMEM;
+
+ num_des = (len - IEEE80211_QOS_MAP_LEN_MIN) >> 1;
+ if (num_des) {
+ des_len = num_des *
+ sizeof(struct cfg80211_dscp_exception);
+ memcpy(qos_map->dscp_exception, pos, des_len);
+ qos_map->num_des = num_des;
+ for (des = 0; des < num_des; des++) {
+ if (qos_map->dscp_exception[des].up > 7) {
+ kfree(qos_map);
+ return -EINVAL;
+ }
+ }
+ pos += des_len;
+ }
+ memcpy(qos_map->up, pos, IEEE80211_QOS_MAP_LEN_MIN);
+ }
+
+ wdev_lock(dev->ieee80211_ptr);
+ ret = nl80211_key_allowed(dev->ieee80211_ptr);
+ if (!ret)
+ ret = rdev->ops->set_qos_map(&rdev->wiphy, dev, qos_map);
+ wdev_unlock(dev->ieee80211_ptr);
+
+ kfree(qos_map);
+ return ret;
+}
+
#define NL80211_FLAG_NEED_WIPHY 0x01
#define NL80211_FLAG_NEED_NETDEV 0x02
#define NL80211_FLAG_NEED_RTNL 0x04
@@ -7408,6 +7485,14 @@
.internal_flags = NL80211_FLAG_NEED_NETDEV |
NL80211_FLAG_NEED_RTNL,
},
+ {
+ .cmd = NL80211_CMD_SET_QOS_MAP,
+ .doit = nl80211_set_qos_map,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_NEED_RTNL,
+ },
};
static struct genl_multicast_group nl80211_mlme_mcgrp = {
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index ab91446..87547ca 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -721,6 +721,10 @@
for (i = 0; i < 6; i++)
rdev->ops->del_key(wdev->wiphy, dev, i, false, NULL);
+ if (rdev->ops->set_qos_map) {
+ rdev->ops->set_qos_map(&rdev->wiphy, dev, NULL);
+ }
+
#ifdef CONFIG_CFG80211_WEXT
memset(&wrqu, 0, sizeof(wrqu));
wrqu.ap_addr.sa_family = ARPHRD_ETHER;
diff --git a/net/wireless/util.c b/net/wireless/util.c
index b89fb94..1ba7232 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -641,7 +641,8 @@
EXPORT_SYMBOL(ieee80211_amsdu_to_8023s);
/* Given a data frame determine the 802.1p/1d tag to use. */
-unsigned int cfg80211_classify8021d(struct sk_buff *skb)
+unsigned int cfg80211_classify8021d(struct sk_buff *skb,
+ struct cfg80211_qos_map *qos_map)
{
unsigned int dscp;
@@ -664,6 +665,21 @@
return 0;
}
+ if (qos_map) {
+ unsigned int i, tmp_dscp = dscp >> 2;
+
+ for (i = 0; i < qos_map->num_des; i++) {
+ if (tmp_dscp == qos_map->dscp_exception[i].dscp)
+ return qos_map->dscp_exception[i].up;
+ }
+
+ for (i = 0; i < 8; i++) {
+ if (tmp_dscp >= qos_map->up[i].low &&
+ tmp_dscp <= qos_map->up[i].high)
+ return i;
+ }
+ }
+
return dscp >> 5;
}
EXPORT_SYMBOL(cfg80211_classify8021d);
@@ -817,6 +833,9 @@
dev->ieee80211_ptr->use_4addr = false;
dev->ieee80211_ptr->mesh_id_up_len = 0;
+ if (rdev->ops->set_qos_map) {
+ rdev->ops->set_qos_map(&rdev->wiphy, dev, NULL);
+ }
switch (otype) {
case NL80211_IFTYPE_ADHOC:
diff --git a/sound/soc/codecs/wcd9306.c b/sound/soc/codecs/wcd9306.c
index f59456e..fccb70c 100644
--- a/sound/soc/codecs/wcd9306.c
+++ b/sound/soc/codecs/wcd9306.c
@@ -5786,7 +5786,17 @@
const char *name)
{
int i;
- struct wcd9xxx *core = dev_get_drvdata(codec->dev->parent);
+ struct wcd9xxx *core = NULL;
+
+ if (codec == NULL) {
+ dev_err(codec->dev, "%s: codec not initialized\n", __func__);
+ return NULL;
+ }
+ core = dev_get_drvdata(codec->dev->parent);
+ if (core == NULL) {
+ dev_err(codec->dev, "%s: core not initialized\n", __func__);
+ return NULL;
+ }
for (i = 0; i < core->num_of_supplies; i++) {
if (core->supplies[i].supply &&
diff --git a/sound/soc/codecs/wcd9xxx-mbhc.c b/sound/soc/codecs/wcd9xxx-mbhc.c
index b2ec347..34cb21a 100644
--- a/sound/soc/codecs/wcd9xxx-mbhc.c
+++ b/sound/soc/codecs/wcd9xxx-mbhc.c
@@ -188,7 +188,7 @@
static bool wcd9xxx_mbhc_polling(struct wcd9xxx_mbhc *mbhc)
{
- return mbhc->polling_active;
+ return snd_soc_read(mbhc->codec, WCD9XXX_A_CDC_MBHC_EN_CTL) & 0x1;
}
static void wcd9xxx_turn_onoff_override(struct wcd9xxx_mbhc *mbhc, bool on)
@@ -542,13 +542,13 @@
if (cfilt_mode.cur_mode_val
!= cfilt_mode.reg_mode_val) {
- if (mbhc->polling_active)
+ if (mbhc->polling_active && wcd9xxx_mbhc_polling(mbhc))
wcd9xxx_pause_hs_polling(mbhc);
snd_soc_update_bits(codec,
mbhc->mbhc_bias_regs.cfilt_ctl,
cfilt_mode.reg_mask,
cfilt_mode.reg_mode_val);
- if (mbhc->polling_active)
+ if (mbhc->polling_active && wcd9xxx_mbhc_polling(mbhc))
wcd9xxx_start_hs_polling(mbhc);
pr_debug("%s: CFILT mode change (%x to %x)\n", __func__,
cfilt_mode.cur_mode_val,
diff --git a/sound/soc/msm/qdsp6v2/q6adm.c b/sound/soc/msm/qdsp6v2/q6adm.c
index 2a6ce43..7b10815 100644
--- a/sound/soc/msm/qdsp6v2/q6adm.c
+++ b/sound/soc/msm/qdsp6v2/q6adm.c
@@ -1167,7 +1167,8 @@
open.topology_id = topology;
if ((open.topology_id == VPM_TX_SM_ECNS_COPP_TOPOLOGY) ||
- (open.topology_id == VPM_TX_DM_FLUENCE_COPP_TOPOLOGY))
+ (open.topology_id == VPM_TX_DM_FLUENCE_COPP_TOPOLOGY) ||
+ (open.topology_id == VPM_TX_DM_RFECNS_COPP_TOPOLOGY))
rate = 16000;
if (perf_mode == ULTRA_LOW_LATENCY_PCM_MODE) {
@@ -1175,6 +1176,10 @@
rate = ULL_SUPPORTED_SAMPLE_RATE;
if(channel_mode > ULL_MAX_SUPPORTED_CHANNEL)
channel_mode = ULL_MAX_SUPPORTED_CHANNEL;
+ } else if (perf_mode == LOW_LATENCY_PCM_MODE) {
+ if ((open.topology_id == DOLBY_ADM_COPP_TOPOLOGY_ID) ||
+ (open.topology_id == SRS_TRUMEDIA_TOPOLOGY_ID))
+ open.topology_id = DEFAULT_COPP_TOPOLOGY;
}
open.dev_num_channel = channel_mode & 0x00FF;
open.bit_width = bits_per_sample;