Merge "msm: mdss: Return suspend without delay in NOTIFY UPDATE"
diff --git a/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt b/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt
index 01cc798..2ae9821 100644
--- a/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt
+++ b/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt
@@ -16,6 +16,12 @@
Optional properties:
- qcom,ce-hw-shared : optional, indicates if the hardware is shared between EE.
- qcom,ce-hw-key : optional, indicates if the hardware supports use of HW KEY.
+ - qcom,use-sw-aes-cbc-ecb-ctr-algo : optional, indicates if use SW aes-cbc/ecb/ctr algorithm.
+ - qcom,use-sw-aes-xts-algo : optional, indicates if use SW aes-xts algorithm.
+ - qcom,use-sw-aead-algo : optional, indicates if use SW aead algorithm.
+ - qcom,use-sw-ahash-algo : optional, indicates if use SW hash algorithm.
+ - qcom,use-sw-hmac-algo : optional, indicates if use SW hmac algorithm.
+ - qcom,use-sw-aes-ccm-algo : optional, indicates if use SW aes-ccm algorithm.
Example:
diff --git a/Documentation/devicetree/bindings/fb/msm-hdmi-tx.txt b/Documentation/devicetree/bindings/fb/msm-hdmi-tx.txt
index 8579ec0..b4bfb92 100644
--- a/Documentation/devicetree/bindings/fb/msm-hdmi-tx.txt
+++ b/Documentation/devicetree/bindings/fb/msm-hdmi-tx.txt
@@ -28,6 +28,7 @@
- qcom,hdmi-tx-mux-sel: gpio required to toggle HDMI output between
docking station, type A, and liquid device, type D, ports. Required
property for liquid devices.
+- qcom,hdmi-tx-ddc-mux-sel: gpio for ddc mux select.
- qcom,hdmi-tx-mux-en: gpio required to enable mux for HDMI output
on liquid devices. Required property for liquid devices.
@@ -57,6 +58,7 @@
qcom,hdmi-tx-max-voltage-level = <0 0 1800000 1800000>;
qcom,hdmi-tx-peak-current = <0 0 1800000 0>;
+ qcom,hdmi-tx-ddc-mux-sel = <&pma8084_gpios 6 0>;
qcom,hdmi-tx-cec = <&msmgpio 31 0>;
qcom,hdmi-tx-ddc-clk = <&msmgpio 32 0>;
qcom,hdmi-tx-ddc-data = <&msmgpio 33 0>;
diff --git a/Documentation/devicetree/bindings/hwmon/qpnp-adc-voltage.txt b/Documentation/devicetree/bindings/hwmon/qpnp-adc-voltage.txt
index 1be5504..dd0c440 100644
--- a/Documentation/devicetree/bindings/hwmon/qpnp-adc-voltage.txt
+++ b/Documentation/devicetree/bindings/hwmon/qpnp-adc-voltage.txt
@@ -49,6 +49,7 @@
2 : {1, 4}
3 : {1, 6}
4 : {1, 20}
+ 5 : {1, 8}
- qcom,calibration-type : Reference voltage to use for channel calibration.
Channel calibration is dependendent on the channel.
Certain channels like XO_THERM, BATT_THERM use ratiometric
diff --git a/Documentation/devicetree/bindings/qseecom/qseecom.txt b/Documentation/devicetree/bindings/qseecom/qseecom.txt
index ac82387..9e582e2 100644
--- a/Documentation/devicetree/bindings/qseecom/qseecom.txt
+++ b/Documentation/devicetree/bindings/qseecom/qseecom.txt
@@ -11,6 +11,9 @@
- qcom, msm_bus,num_paths: The paths for source and destination ports
- qcom, msm_bus,vectors: Vectors for bus topology.
+Optional properties:
+ - qcom,support-bus-scaling : optional, indicates if driver support scaling the bus for crypto operation.
+
Example:
qcom,qseecom@fe806000 {
compatible = "qcom,qseecom";
diff --git a/Documentation/devicetree/bindings/thermal/qpnp-adc-tm.txt b/Documentation/devicetree/bindings/thermal/qpnp-adc-tm.txt
index e1681ca..e4e05d1 100644
--- a/Documentation/devicetree/bindings/thermal/qpnp-adc-tm.txt
+++ b/Documentation/devicetree/bindings/thermal/qpnp-adc-tm.txt
@@ -53,6 +53,7 @@
2 : pre-div ratio of {1, 4}
3 : pre-div ratio of {1, 6}
4 : pre-div ratio of {1, 20}
+ 5 : pre-div ratio of {1, 8}
- qcom,calibration-type : Reference voltage to use for channel calibration.
Channel calibration is dependendent on the channel.
Certain channels like XO_THERM, BATT_THERM use ratiometric
diff --git a/arch/arm/boot/dts/dsi-panel-hx8394a-720p-video.dtsi b/arch/arm/boot/dts/dsi-panel-hx8394a-720p-video.dtsi
index 5302d8ae..760ecd7 100644
--- a/arch/arm/boot/dts/dsi-panel-hx8394a-720p-video.dtsi
+++ b/arch/arm/boot/dts/dsi-panel-hx8394a-720p-video.dtsi
@@ -77,6 +77,8 @@
qcom,mdss-dsi-mdp-trigger = "none";
qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
qcom,mdss-dsi-reset-sequence = <1 20>, <0 1>, <1 20>;
+ qcom,mdss-pan-physical-width-dimension = <59>;
+ qcom,mdss-pan-physical-height-dimension = <104>;
};
};
diff --git a/arch/arm/boot/dts/dsi-panel-ssd2080m-720p-video.dtsi b/arch/arm/boot/dts/dsi-panel-ssd2080m-720p-video.dtsi
index 5ebb516..39b10b3 100644
--- a/arch/arm/boot/dts/dsi-panel-ssd2080m-720p-video.dtsi
+++ b/arch/arm/boot/dts/dsi-panel-ssd2080m-720p-video.dtsi
@@ -118,5 +118,7 @@
qcom,mdss-dsi-mdp-trigger = "none";
qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
qcom,mdss-dsi-reset-sequence = <1 20>, <0 2>, <1 20>;
+ qcom,mdss-dsi-lp11-init;
+ qcom,mdss-dsi-init-delay-us = <50000>;
};
};
diff --git a/arch/arm/boot/dts/msm8974-v2.2.dtsi b/arch/arm/boot/dts/msm8974-v2.2.dtsi
index 3ed5720..14897ba 100644
--- a/arch/arm/boot/dts/msm8974-v2.2.dtsi
+++ b/arch/arm/boot/dts/msm8974-v2.2.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -93,3 +93,7 @@
qcom,retain-periph;
qcom,retain-mem;
};
+
+&gdsc_venus {
+ qcom,skip-logic-collapse;
+};
diff --git a/arch/arm/boot/dts/msm8974.dtsi b/arch/arm/boot/dts/msm8974.dtsi
index 8b52a3f..f4aa25b 100644
--- a/arch/arm/boot/dts/msm8974.dtsi
+++ b/arch/arm/boot/dts/msm8974.dtsi
@@ -215,7 +215,7 @@
vdd-supply = <&gdsc_venus>;
qcom,hfi = "venus";
qcom,has-ocmem;
- qcom,max-hw-load = <1224450>; /* 4k @ 30 + 1080p @ 30*/
+ qcom,max-hw-load = <1216800>; /* 3840 x 2160 @ 30 + 1080p @ 30*/
};
qcom,vidc {
@@ -2001,8 +2001,8 @@
qcom,msm-bus,num-cases = <2>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
- <1 618 0 0>,
- <1 618 0 800>;
+ <88 618 0 0>,
+ <88 618 0 800>;
};
qseecom: qcom,qseecom@7b00000 {
diff --git a/arch/arm/boot/dts/msm8974pro-ab-pm8941-mtp.dts b/arch/arm/boot/dts/msm8974pro-ab-pm8941-mtp.dts
index 6b62391..d4bb37b 100644
--- a/arch/arm/boot/dts/msm8974pro-ab-pm8941-mtp.dts
+++ b/arch/arm/boot/dts/msm8974pro-ab-pm8941-mtp.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -22,5 +22,5 @@
};
&sdhc_1 {
- qcom,pad-drv-on = <0x7 0x4 0x4>; /* 16mA, 10mA, 10mA */
+ qcom,pad-drv-on = <0x4 0x4 0x4>; /* 10mA, 10mA, 10mA */
};
diff --git a/arch/arm/boot/dts/msm8974pro.dtsi b/arch/arm/boot/dts/msm8974pro.dtsi
index 874ce05..f9cdb6e 100755
--- a/arch/arm/boot/dts/msm8974pro.dtsi
+++ b/arch/arm/boot/dts/msm8974pro.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1701,7 +1701,7 @@
qcom,dec-ocmem-ab-ib = <0 0>,
<176000 519000>,
<456000 519000>,
- <864000 519000>,
+ <864000 629000>,
<1728000 1038000>,
<2766000 1661000>,
<3456000 2076000>,
@@ -1751,3 +1751,7 @@
qcom,memblock-remove = <0x05a00000 0x7800000
0x0fa00000 0x500000>; /* Address and size of the hole */
};
+
+&gdsc_venus {
+ qcom,skip-logic-collapse;
+};
diff --git a/arch/arm/mach-msm/lpm_levels.c b/arch/arm/mach-msm/lpm_levels.c
index 06160f7..7553f82 100644
--- a/arch/arm/mach-msm/lpm_levels.c
+++ b/arch/arm/mach-msm/lpm_levels.c
@@ -271,7 +271,7 @@
{
int best_level = -1;
int i;
- uint32_t best_level_pwr = ~0UL;
+ uint32_t best_level_pwr = ~0U;
uint32_t pwr;
uint32_t latency_us = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
@@ -490,7 +490,7 @@
static noinline int lpm_cpu_power_select(struct cpuidle_device *dev, int *index)
{
int best_level = -1;
- uint32_t best_level_pwr = ~0UL;
+ uint32_t best_level_pwr = ~0U;
uint32_t latency_us = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
uint32_t sleep_us =
(uint32_t)(ktime_to_us(tick_nohz_get_sleep_length()));
@@ -720,8 +720,6 @@
int idx;
struct lpm_cpu_level *cpu_level = &system_state->cpu_level[cpu_index];
- cpu_level = &system_state->cpu_level[cpu_index];
-
lpm_cpu_prepare(system_state, cpu_index, from_idle);
idx = lpm_system_select(system_state, cpu_index, from_idle);
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_arb.c b/arch/arm/mach-msm/msm_bus/msm_bus_arb.c
index 696103c..72e3ec3 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_arb.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_arb.c
@@ -299,12 +299,16 @@
static uint64_t get_node_maxib(struct msm_bus_inode_info *info)
{
- int i;
+ int i, ctx;
uint64_t maxib = 0;
- for (i = 0; i <= info->num_pnodes; i++)
- maxib = max(*info->pnode[i].sel_clk, maxib);
+ for (i = 0; i <= info->num_pnodes; i++) {
+ for (ctx = 0; ctx < NUM_CTX; ctx++)
+ maxib = max(info->pnode[i].clk[ctx], maxib);
+ }
+ MSM_BUS_DBG("%s: Node %d numpnodes %d maxib %llu", __func__,
+ info->num_pnodes, info->node_info->id, maxib);
return maxib;
}
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_of.c b/arch/arm/mach-msm/msm_bus/msm_bus_of.c
index 5e803a1..52195c7 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_of.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_of.c
@@ -173,7 +173,7 @@
of_node = pdev->dev.of_node;
pdata = get_pdata(pdev, of_node);
if (!pdata) {
- pr_err("Error getting bus pdata!\n");
+ pr_err("client has to provide missing entry for successful registration\n");
return NULL;
}
@@ -214,7 +214,7 @@
pdata = get_pdata(pdev, of_node);
if (!pdata) {
- pr_err("Error getting bus pdata!\n");
+ pr_err("client has to provide missing entry for successful registration\n");
return NULL;
}
diff --git a/arch/arm/mach-msm/qdsp6v2/apr_tal.c b/arch/arm/mach-msm/qdsp6v2/apr_tal.c
index 8826a35..e917f31 100644
--- a/arch/arm/mach-msm/qdsp6v2/apr_tal.c
+++ b/arch/arm/mach-msm/qdsp6v2/apr_tal.c
@@ -190,6 +190,9 @@
apr_tal_close(&apr_svc_ch[dl][dest][svc]);
return NULL;
}
+
+ smd_disable_read_intr(apr_svc_ch[dl][dest][svc].ch);
+
if (!apr_svc_ch[dl][dest][svc].dest_state) {
apr_svc_ch[dl][dest][svc].dest_state = 1;
pr_debug("apr_tal:Waiting for apr svc init\n");
diff --git a/arch/arm/mach-msm/smp2p.c b/arch/arm/mach-msm/smp2p.c
index df241f8..5574eae 100644
--- a/arch/arm/mach-msm/smp2p.c
+++ b/arch/arm/mach-msm/smp2p.c
@@ -940,7 +940,7 @@
if (size < sizeof(struct smp2p_smem)) {
SMP2P_ERR(
- "%s pid %d item size too small; expected: %d actual: %d\n",
+ "%s pid %d item size too small; expected: %zu actual: %d\n",
__func__, remote_pid,
sizeof(struct smp2p_smem), size);
smem_item = NULL;
diff --git a/arch/arm/mach-msm/smp2p_private.h b/arch/arm/mach-msm/smp2p_private.h
index 8e0d7a3..7174950 100644
--- a/arch/arm/mach-msm/smp2p_private.h
+++ b/arch/arm/mach-msm/smp2p_private.h
@@ -1,6 +1,6 @@
/* arch/arm/mach-msm/smp2p_private.h
*
- * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -44,10 +44,10 @@
#define SMP2P_GET_BITS(hdr_val, mask, bit) \
(((hdr_val) & (mask)) >> (bit))
#define SMP2P_SET_BITS(hdr_val, mask, bit, new_value) \
- do {\
+ {\
hdr_val = (hdr_val & ~(mask)) \
| (((new_value) << (bit)) & (mask)); \
- } while (0)
+ }
#define SMP2P_GET_LOCAL_PID(hdr) \
SMP2P_GET_BITS(hdr, SMP2P_LOCAL_PID_MASK, SMP2P_LOCAL_PID_BIT)
diff --git a/drivers/crypto/msm/qce.h b/drivers/crypto/msm/qce.h
index afd5141..4250202 100644
--- a/drivers/crypto/msm/qce.h
+++ b/drivers/crypto/msm/qce.h
@@ -1,6 +1,6 @@
/* Qualcomm Crypto Engine driver API
*
- * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -116,6 +116,12 @@
bool bam;
bool is_shared;
bool hw_key;
+ bool use_sw_aes_cbc_ecb_ctr_algo;
+ bool use_sw_aead_algo;
+ bool use_sw_aes_xts_algo;
+ bool use_sw_ahash_algo;
+ bool use_sw_hmac_algo;
+ bool use_sw_aes_ccm_algo;
};
/* Sha operation parameters */
diff --git a/drivers/crypto/msm/qce50.c b/drivers/crypto/msm/qce50.c
index 7f6498a..63e18c9 100644
--- a/drivers/crypto/msm/qce50.c
+++ b/drivers/crypto/msm/qce50.c
@@ -1,6 +1,6 @@
/* Qualcomm Crypto Engine driver.
*
- * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -38,6 +38,7 @@
#include "qce.h"
#include "qce50.h"
#include "qcryptohw_50.h"
+#include "qce_ota.h"
#define CRYPTO_CONFIG_RESET 0xE001F
#define QCE_MAX_NUM_DSCR 0x500
@@ -96,6 +97,17 @@
enum qce_cipher_mode_enum mode;
struct qce_ce_cfg_reg_setting reg;
struct ce_sps_data ce_sps;
+ uint32_t engines_avail;
+ dma_addr_t phy_ota_src;
+ dma_addr_t phy_ota_dst;
+ unsigned int ota_size;
+
+ bool use_sw_aes_cbc_ecb_ctr_algo;
+ bool use_sw_aead_algo;
+ bool use_sw_aes_xts_algo;
+ bool use_sw_ahash_algo;
+ bool use_sw_hmac_algo;
+ bool use_sw_aes_ccm_algo;
};
/* Standard initialization vector for SHA-1, source: FIPS 180-2 */
@@ -202,6 +214,8 @@
};
pce_dev->ce_sps.minor_version = min_rev;
+ pce_dev->engines_avail = readl_relaxed(pce_dev->iobase +
+ CRYPTO_ENGINES_AVAIL);
dev_info(pce_dev->pdev, "Qualcomm Crypto %d.%d.%d device found @0x%x\n",
maj_rev, min_rev, step_rev, pce_dev->phy_iobase);
@@ -212,12 +226,14 @@
"Consumer (IN) PIPE %d, "
"Producer (OUT) PIPE %d\n"
"IO base BAM = 0x%x\n"
- "BAM IRQ %d\n",
+ "BAM IRQ %d\n"
+ "Engines Availability = 0x%x\n",
(uint32_t) pce_dev->iobase,
pce_dev->ce_sps.dest_pipe_index,
pce_dev->ce_sps.src_pipe_index,
(uint32_t)pce_dev->ce_sps.bam_iobase,
- pce_dev->ce_sps.bam_irq);
+ pce_dev->ce_sps.bam_irq,
+ pce_dev->engines_avail);
return 0;
};
@@ -268,31 +284,38 @@
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
bool sha1 = false;
struct sps_command_element *pce = NULL;
+ bool use_hw_key = false;
+ bool use_pipe_key = false;
+ uint32_t authk_size_in_word = sreq->authklen/sizeof(uint32_t);
+ uint32_t auth_cfg;
if ((sreq->alg == QCE_HASH_SHA1_HMAC) ||
(sreq->alg == QCE_HASH_SHA256_HMAC) ||
(sreq->alg == QCE_HASH_AES_CMAC)) {
- uint32_t authk_size_in_word = sreq->authklen/sizeof(uint32_t);
- _byte_stream_to_net_words(mackey32, sreq->authkey,
- sreq->authklen);
- /* check for null key. If null, use hw key*/
- for (i = 0; i < authk_size_in_word; i++) {
- if (mackey32[i] != 0)
- break;
- }
-
+ /* no more check for null key. use flag */
+ if ((sreq->flags & QCRYPTO_CTX_USE_HW_KEY)
+ == QCRYPTO_CTX_USE_HW_KEY)
+ use_hw_key = true;
+ else if ((sreq->flags & QCRYPTO_CTX_USE_PIPE_KEY) ==
+ QCRYPTO_CTX_USE_PIPE_KEY)
+ use_pipe_key = true;
pce = cmdlistinfo->go_proc;
- if (i == authk_size_in_word) {
+ if (use_hw_key == true) {
pce->addr = (uint32_t)(CRYPTO_GOPROC_QC_KEY_REG +
pce_dev->phy_iobase);
} else {
pce->addr = (uint32_t)(CRYPTO_GOPROC_REG +
pce_dev->phy_iobase);
pce = cmdlistinfo->auth_key;
- for (i = 0; i < authk_size_in_word; i++, pce++)
- pce->data = mackey32[i];
+ if (use_pipe_key == false) {
+ _byte_stream_to_net_words(mackey32,
+ sreq->authkey,
+ sreq->authklen);
+ for (i = 0; i < authk_size_in_word; i++, pce++)
+ pce->data = mackey32[i];
+ }
}
}
@@ -347,14 +370,19 @@
/* Set/reset last bit in CFG register */
pce = cmdlistinfo->auth_seg_cfg;
+ auth_cfg = pce->data & ~(1 << CRYPTO_LAST |
+ 1 << CRYPTO_FIRST |
+ 1 << CRYPTO_USE_PIPE_KEY_AUTH |
+ 1 << CRYPTO_USE_HW_KEY_AUTH);
if (sreq->last_blk)
- pce->data |= 1 << CRYPTO_LAST;
- else
- pce->data &= ~(1 << CRYPTO_LAST);
+ auth_cfg |= 1 << CRYPTO_LAST;
if (sreq->first_blk)
- pce->data |= 1 << CRYPTO_FIRST;
- else
- pce->data &= ~(1 << CRYPTO_FIRST);
+ auth_cfg |= 1 << CRYPTO_FIRST;
+ if (use_hw_key)
+ auth_cfg |= 1 << CRYPTO_USE_HW_KEY_AUTH;
+ if (use_pipe_key)
+ auth_cfg |= 1 << CRYPTO_USE_PIPE_KEY_AUTH;
+ pce->data = auth_cfg;
go_proc:
/* write auth seg size */
pce = cmdlistinfo->auth_seg_size;
@@ -443,7 +471,7 @@
uint32_t totallen_in, uint32_t coffset,
struct qce_cmdlist_info *cmdlistinfo)
{
- int32_t authk_size_in_word = q_req->authklen/sizeof(uint32_t);
+ int32_t authk_size_in_word = SHA_HMAC_KEY_SIZE/sizeof(uint32_t);
int i;
uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {0};
struct sps_command_element *pce;
@@ -804,12 +832,20 @@
}
/* write xts du size */
pce = cmdlistinfo->encr_xts_du_size;
- if (!(creq->flags & QCRYPTO_CTX_XTS_MASK))
- pce->data = creq->cryptlen;
- else
+ switch (creq->flags & QCRYPTO_CTX_XTS_MASK) {
+ case QCRYPTO_CTX_XTS_DU_SIZE_512B:
pce->data = min((unsigned int)QCE_SECTOR_SIZE,
creq->cryptlen);
-
+ break;
+ case QCRYPTO_CTX_XTS_DU_SIZE_1KB:
+ pce->data =
+ min((unsigned int)QCE_SECTOR_SIZE * 2,
+ creq->cryptlen);
+ break;
+ default:
+ pce->data = creq->cryptlen;
+ break;
+ }
}
if (creq->mode != QCE_MODE_ECB) {
if (creq->mode == QCE_MODE_XTS)
@@ -889,15 +925,141 @@
return 0;
};
+static int _ce_f9_setup(struct qce_device *pce_dev, struct qce_f9_req *req,
+ struct qce_cmdlist_info *cmdlistinfo)
+{
+ uint32_t ikey32[OTA_KEY_SIZE/sizeof(uint32_t)];
+ uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t);
+ uint32_t cfg;
+ struct sps_command_element *pce;
+ int i;
+
+ switch (req->algorithm) {
+ case QCE_OTA_ALGO_KASUMI:
+ cfg = pce_dev->reg.auth_cfg_kasumi;
+ break;
+ case QCE_OTA_ALGO_SNOW3G:
+ default:
+ cfg = pce_dev->reg.auth_cfg_snow3g;
+ break;
+ };
+
+ /* write key in CRYPTO_AUTH_IV0-3_REG */
+ _byte_stream_to_net_words(ikey32, &req->ikey[0], OTA_KEY_SIZE);
+ pce = cmdlistinfo->auth_iv;
+ for (i = 0; i < key_size_in_word; i++, pce++)
+ pce->data = ikey32[i];
+
+ /* write last bits in CRYPTO_AUTH_IV4_REG */
+ pce->data = req->last_bits;
+
+ /* write fresh to CRYPTO_AUTH_BYTECNT0_REG */
+ pce = cmdlistinfo->auth_bytecount;
+ pce->data = req->fresh;
+
+ /* write count-i to CRYPTO_AUTH_BYTECNT1_REG */
+ pce++;
+ pce->data = req->count_i;
+
+ /* write auth seg cfg */
+ pce = cmdlistinfo->auth_seg_cfg;
+ if (req->direction == QCE_OTA_DIR_DOWNLINK)
+ cfg |= BIT(CRYPTO_F9_DIRECTION);
+ pce->data = cfg;
+
+ /* write auth seg size */
+ pce = cmdlistinfo->auth_seg_size;
+ pce->data = req->msize;
+
+ /* write auth seg start*/
+ pce = cmdlistinfo->auth_seg_start;
+ pce->data = 0;
+
+ /* write seg size */
+ pce = cmdlistinfo->seg_size;
+ pce->data = req->msize;
+
+
+ /* write go */
+ pce = cmdlistinfo->go_proc;
+ pce->addr = (uint32_t)(CRYPTO_GOPROC_REG + pce_dev->phy_iobase);
+ return 0;
+}
+
+static int _ce_f8_setup(struct qce_device *pce_dev, struct qce_f8_req *req,
+ bool key_stream_mode, uint16_t npkts, uint16_t cipher_offset,
+ uint16_t cipher_size,
+ struct qce_cmdlist_info *cmdlistinfo)
+{
+ uint32_t ckey32[OTA_KEY_SIZE/sizeof(uint32_t)];
+ uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t);
+ uint32_t cfg;
+ struct sps_command_element *pce;
+ int i;
+
+ switch (req->algorithm) {
+ case QCE_OTA_ALGO_KASUMI:
+ cfg = pce_dev->reg.encr_cfg_kasumi;
+ break;
+ case QCE_OTA_ALGO_SNOW3G:
+ default:
+ cfg = pce_dev->reg.encr_cfg_snow3g;
+ break;
+ };
+ /* write key */
+ _byte_stream_to_net_words(ckey32, &req->ckey[0], OTA_KEY_SIZE);
+ pce = cmdlistinfo->encr_key;
+ for (i = 0; i < key_size_in_word; i++, pce++)
+ pce->data = ckey32[i];
+
+ /* write encr seg cfg */
+ pce = cmdlistinfo->encr_seg_cfg;
+ if (key_stream_mode)
+ cfg |= BIT(CRYPTO_F8_KEYSTREAM_ENABLE);
+ if (req->direction == QCE_OTA_DIR_DOWNLINK)
+ cfg |= BIT(CRYPTO_F8_DIRECTION);
+ pce->data = cfg;
+
+ /* write encr seg start */
+ pce = cmdlistinfo->encr_seg_start;
+ pce->data = (cipher_offset & 0xffff);
+
+ /* write encr seg size */
+ pce = cmdlistinfo->encr_seg_size;
+ pce->data = cipher_size;
+
+ /* write seg size */
+ pce = cmdlistinfo->seg_size;
+ pce->data = req->data_len;
+
+ /* write cntr0_iv0 for countC */
+ pce = cmdlistinfo->encr_cntr_iv;
+ pce->data = req->count_c;
+ /* write cntr1_iv1 for nPkts, and bearer */
+ pce++;
+ if (npkts == 1)
+ npkts = 0;
+ pce->data = req->bearer << CRYPTO_CNTR1_IV1_REG_F8_BEARER |
+ npkts << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT;
+
+ /* write go */
+ pce = cmdlistinfo->go_proc;
+ pce->addr = (uint32_t)(CRYPTO_GOPROC_REG + pce_dev->phy_iobase);
+
+ return 0;
+}
+
static int _ce_setup_hash_direct(struct qce_device *pce_dev,
struct qce_sha_req *sreq)
{
uint32_t auth32[SHA256_DIGEST_SIZE / sizeof(uint32_t)];
uint32_t diglen;
bool use_hw_key = false;
+ bool use_pipe_key = false;
int i;
uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ uint32_t authk_size_in_word = sreq->authklen/sizeof(uint32_t);
bool sha1 = false;
uint32_t auth_cfg = 0;
@@ -944,25 +1106,25 @@
if ((sreq->alg == QCE_HASH_SHA1_HMAC) ||
(sreq->alg == QCE_HASH_SHA256_HMAC) ||
(sreq->alg == QCE_HASH_AES_CMAC)) {
- uint32_t authk_size_in_word = sreq->authklen/sizeof(uint32_t);
_byte_stream_to_net_words(mackey32, sreq->authkey,
sreq->authklen);
- /* check for null key. If null, use hw key*/
- for (i = 0; i < authk_size_in_word; i++) {
- if (mackey32[i] != 0)
- break;
- }
+ /* no more check for null key. use flag to check*/
- if (i == authk_size_in_word)
+ if ((sreq->flags & QCRYPTO_CTX_USE_HW_KEY) ==
+ QCRYPTO_CTX_USE_HW_KEY) {
use_hw_key = true;
- else
- /* Clear auth_ivn, auth_keyn registers */
+ } else if ((sreq->flags & QCRYPTO_CTX_USE_PIPE_KEY) ==
+ QCRYPTO_CTX_USE_PIPE_KEY) {
+ use_pipe_key = true;
+ } else {
+ /* setup key */
for (i = 0; i < authk_size_in_word; i++)
writel_relaxed(mackey32[i], (pce_dev->iobase +
(CRYPTO_AUTH_KEY0_REG +
i*sizeof(uint32_t))));
+ }
}
if (sreq->alg == QCE_HASH_AES_CMAC)
@@ -1036,6 +1198,10 @@
auth_cfg |= 1 << CRYPTO_FIRST;
else
auth_cfg &= ~(1 << CRYPTO_FIRST);
+ if (use_hw_key)
+ auth_cfg |= 1 << CRYPTO_USE_HW_KEY_AUTH;
+ if (use_pipe_key)
+ auth_cfg |= 1 << CRYPTO_USE_PIPE_KEY_AUTH;
go_proc:
/* write seg_cfg */
writel_relaxed(auth_cfg, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
@@ -1071,7 +1237,7 @@
static int _ce_setup_aead_direct(struct qce_device *pce_dev,
struct qce_req *q_req, uint32_t totallen_in, uint32_t coffset)
{
- int32_t authk_size_in_word = q_req->authklen/sizeof(uint32_t);
+ int32_t authk_size_in_word = SHA_HMAC_KEY_SIZE/sizeof(uint32_t);
int i;
uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {0};
uint32_t a_cfg;
@@ -1454,15 +1620,25 @@
(i * sizeof(uint32_t)));
}
/* write xts du size */
- if (use_pipe_key == true)
- writel_relaxed(min((uint32_t)QCE_SECTOR_SIZE,
- creq->cryptlen),
- pce_dev->iobase +
- CRYPTO_ENCR_XTS_DU_SIZE_REG);
- else
- writel_relaxed(creq->cryptlen ,
- pce_dev->iobase +
- CRYPTO_ENCR_XTS_DU_SIZE_REG);
+ switch (creq->flags & QCRYPTO_CTX_XTS_MASK) {
+ case QCRYPTO_CTX_XTS_DU_SIZE_512B:
+ writel_relaxed(
+ min((uint32_t)QCE_SECTOR_SIZE,
+ creq->cryptlen), pce_dev->iobase +
+ CRYPTO_ENCR_XTS_DU_SIZE_REG);
+ break;
+ case QCRYPTO_CTX_XTS_DU_SIZE_1KB:
+ writel_relaxed(
+ min((uint32_t)(QCE_SECTOR_SIZE * 2),
+ creq->cryptlen), pce_dev->iobase +
+ CRYPTO_ENCR_XTS_DU_SIZE_REG);
+ break;
+ default:
+ writel_relaxed(creq->cryptlen,
+ pce_dev->iobase +
+ CRYPTO_ENCR_XTS_DU_SIZE_REG);
+ break;
+ }
}
if (creq->mode != QCE_MODE_ECB) {
if (creq->mode == QCE_MODE_XTS)
@@ -1556,6 +1732,168 @@
return 0;
};
+static int _ce_f9_setup_direct(struct qce_device *pce_dev,
+ struct qce_f9_req *req)
+{
+ uint32_t ikey32[OTA_KEY_SIZE/sizeof(uint32_t)];
+ uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t);
+ uint32_t auth_cfg;
+ int i;
+
+ switch (req->algorithm) {
+ case QCE_OTA_ALGO_KASUMI:
+ auth_cfg = pce_dev->reg.auth_cfg_kasumi;
+ break;
+ case QCE_OTA_ALGO_SNOW3G:
+ default:
+ auth_cfg = pce_dev->reg.auth_cfg_snow3g;
+ break;
+ };
+
+ /* clear status */
+ writel_relaxed(0, pce_dev->iobase + CRYPTO_STATUS_REG);
+
+ /* set big endian configuration */
+ writel_relaxed(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
+ CRYPTO_CONFIG_REG));
+ /*
+ * Ensure previous instructions (setting the CONFIG register)
+ * was completed before issuing starting to set other config register
+ * This is to ensure the configurations are done in correct endian-ness
+ * as set in the CONFIG registers
+ */
+ mb();
+
+ /* write enc_seg_cfg */
+ writel_relaxed(0, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
+
+ /* write ecn_seg_size */
+ writel_relaxed(0, pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
+
+ /* write key in CRYPTO_AUTH_IV0-3_REG */
+ _byte_stream_to_net_words(ikey32, &req->ikey[0], OTA_KEY_SIZE);
+ for (i = 0; i < key_size_in_word; i++)
+ writel_relaxed(ikey32[i], (pce_dev->iobase +
+ (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
+
+ /* write last bits in CRYPTO_AUTH_IV4_REG */
+ writel_relaxed(req->last_bits, (pce_dev->iobase +
+ CRYPTO_AUTH_IV4_REG));
+
+ /* write fresh to CRYPTO_AUTH_BYTECNT0_REG */
+ writel_relaxed(req->fresh, (pce_dev->iobase +
+ CRYPTO_AUTH_BYTECNT0_REG));
+
+ /* write count-i to CRYPTO_AUTH_BYTECNT1_REG */
+ writel_relaxed(req->count_i, (pce_dev->iobase +
+ CRYPTO_AUTH_BYTECNT1_REG));
+
+ /* write auth seg cfg */
+ if (req->direction == QCE_OTA_DIR_DOWNLINK)
+ auth_cfg |= BIT(CRYPTO_F9_DIRECTION);
+ writel_relaxed(auth_cfg, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
+
+ /* write auth seg size */
+ writel_relaxed(req->msize, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
+
+ /* write auth seg start*/
+ writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
+
+ /* write seg size */
+ writel_relaxed(req->msize, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
+
+ /* set little endian configuration before go*/
+ writel_relaxed(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
+ CRYPTO_CONFIG_REG));
+ /* write go */
+ writel_relaxed(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
+ pce_dev->iobase + CRYPTO_GOPROC_REG);
+ /*
+ * Ensure previous instructions (setting the GO register)
+ * was completed before issuing a DMA transfer request
+ */
+ mb();
+ return 0;
+}
+
+static int _ce_f8_setup_direct(struct qce_device *pce_dev,
+ struct qce_f8_req *req, bool key_stream_mode,
+ uint16_t npkts, uint16_t cipher_offset, uint16_t cipher_size)
+{
+ int i = 0;
+ uint32_t encr_cfg = 0;
+ uint32_t ckey32[OTA_KEY_SIZE/sizeof(uint32_t)];
+ uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t);
+
+ switch (req->algorithm) {
+ case QCE_OTA_ALGO_KASUMI:
+ encr_cfg = pce_dev->reg.encr_cfg_kasumi;
+ break;
+ case QCE_OTA_ALGO_SNOW3G:
+ default:
+ encr_cfg = pce_dev->reg.encr_cfg_snow3g;
+ break;
+ };
+ /* clear status */
+ writel_relaxed(0, pce_dev->iobase + CRYPTO_STATUS_REG);
+ /* set big endian configuration */
+ writel_relaxed(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
+ CRYPTO_CONFIG_REG));
+ /* write auth seg configuration */
+ writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
+ /* write auth seg size */
+ writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
+
+ /* write key */
+ _byte_stream_to_net_words(ckey32, &req->ckey[0], OTA_KEY_SIZE);
+
+ for (i = 0; i < key_size_in_word; i++)
+ writel_relaxed(ckey32[i], (pce_dev->iobase +
+ (CRYPTO_ENCR_KEY0_REG + i*sizeof(uint32_t))));
+ /* write encr seg cfg */
+ if (key_stream_mode)
+ encr_cfg |= BIT(CRYPTO_F8_KEYSTREAM_ENABLE);
+ if (req->direction == QCE_OTA_DIR_DOWNLINK)
+ encr_cfg |= BIT(CRYPTO_F8_DIRECTION);
+ writel_relaxed(encr_cfg, pce_dev->iobase +
+ CRYPTO_ENCR_SEG_CFG_REG);
+
+ /* write encr seg start */
+ writel_relaxed((cipher_offset & 0xffff), pce_dev->iobase +
+ CRYPTO_ENCR_SEG_START_REG);
+ /* write encr seg size */
+ writel_relaxed(cipher_size, pce_dev->iobase +
+ CRYPTO_ENCR_SEG_SIZE_REG);
+
+ /* write seg size */
+ writel_relaxed(req->data_len, pce_dev->iobase +
+ CRYPTO_SEG_SIZE_REG);
+
+ /* write cntr0_iv0 for countC */
+ writel_relaxed(req->count_c, pce_dev->iobase +
+ CRYPTO_CNTR0_IV0_REG);
+ /* write cntr1_iv1 for nPkts, and bearer */
+ if (npkts == 1)
+ npkts = 0;
+ writel_relaxed(req->bearer << CRYPTO_CNTR1_IV1_REG_F8_BEARER |
+ npkts << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT,
+ pce_dev->iobase + CRYPTO_CNTR1_IV1_REG);
+
+ /* set little endian configuration before go*/
+ writel_relaxed(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
+ CRYPTO_CONFIG_REG));
+ /* write go */
+ writel_relaxed(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
+ pce_dev->iobase + CRYPTO_GOPROC_REG);
+ /*
+ * Ensure previous instructions (setting the GO register)
+ * was completed before issuing a DMA transfer request
+ */
+ mb();
+ return 0;
+}
+
+
static int _qce_unlock_other_pipes(struct qce_device *pce_dev)
{
int rc = 0;
@@ -1705,6 +2043,48 @@
return 0;
};
+static int _f9_complete(struct qce_device *pce_dev)
+{
+ uint32_t mac_i;
+ uint32_t status;
+ int32_t result_status;
+
+ dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src,
+ pce_dev->ota_size, DMA_TO_DEVICE);
+ _byte_stream_to_net_words(&mac_i,
+ (char *)(&pce_dev->ce_sps.result->auth_iv[0]),
+ CRYPTO_REG_SIZE);
+ /* read status before unlock */
+ status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
+ if (_qce_unlock_other_pipes(pce_dev)) {
+ pce_dev->qce_cb(pce_dev->areq, NULL, NULL, -ENXIO);
+ return -ENXIO;
+ }
+ if (status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
+ | (1 << CRYPTO_HSD_ERR))) {
+ pr_err("f9 operation error. Status %x\n", status);
+ result_status = -ENXIO;
+ } else if (pce_dev->ce_sps.consumer_status |
+ pce_dev->ce_sps.producer_status) {
+ pr_err("f9 sps operation error. sps status %x %x\n",
+ pce_dev->ce_sps.consumer_status,
+ pce_dev->ce_sps.producer_status);
+ result_status = -ENXIO;
+ } else if ((status & (1 << CRYPTO_OPERATION_DONE)) == 0) {
+ pr_err("f9 operation not done? Status %x, sps status %x %x\n",
+ status,
+ pce_dev->ce_sps.consumer_status,
+ pce_dev->ce_sps.producer_status);
+ result_status = -ENXIO;
+ } else {
+ result_status = 0;
+ }
+ pce_dev->qce_cb(pce_dev->areq, (void *) mac_i, NULL,
+ result_status);
+
+ return 0;
+}
+
static int _ablk_cipher_complete(struct qce_device *pce_dev)
{
struct ablkcipher_request *areq;
@@ -1811,6 +2191,47 @@
return 0;
};
+static int _f8_complete(struct qce_device *pce_dev)
+{
+ uint32_t status;
+ int32_t result_status;
+
+ if (pce_dev->phy_ota_dst != 0)
+ dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_dst,
+ pce_dev->ota_size, DMA_FROM_DEVICE);
+ if (pce_dev->phy_ota_src != 0)
+ dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src,
+ pce_dev->ota_size, (pce_dev->phy_ota_dst) ?
+ DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
+ /* read status before unlock */
+ status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
+ if (_qce_unlock_other_pipes(pce_dev)) {
+ pce_dev->qce_cb(pce_dev->areq, NULL, NULL, -ENXIO);
+ return -ENXIO;
+ }
+ if (status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
+ | (1 << CRYPTO_HSD_ERR))) {
+ pr_err("f8 operation error. Status %x\n", status);
+ result_status = -ENXIO;
+ } else if (pce_dev->ce_sps.consumer_status |
+ pce_dev->ce_sps.producer_status) {
+ pr_err("f8 sps operation error. sps status %x %x\n",
+ pce_dev->ce_sps.consumer_status,
+ pce_dev->ce_sps.producer_status);
+ result_status = -ENXIO;
+ } else if ((status & (1 << CRYPTO_OPERATION_DONE)) == 0) {
+ pr_err("f8 operation not done? Status %x, sps status %x %x\n",
+ status,
+ pce_dev->ce_sps.consumer_status,
+ pce_dev->ce_sps.producer_status);
+ result_status = -ENXIO;
+ } else {
+ result_status = 0;
+ }
+ pce_dev->qce_cb(pce_dev->areq, NULL, NULL, result_status);
+ return 0;
+}
+
#ifdef QCE_DEBUG
static void _qce_dump_descr_fifos(struct qce_device *pce_dev)
{
@@ -1903,8 +2324,11 @@
static void _qce_set_flag(struct sps_transfer *sps_bam_pipe, uint32_t flag)
{
- struct sps_iovec *iovec = sps_bam_pipe->iovec +
- (sps_bam_pipe->iovec_count - 1);
+ struct sps_iovec *iovec;
+
+ if (sps_bam_pipe->iovec_count == 0)
+ return;
+ iovec = sps_bam_pipe->iovec + (sps_bam_pipe->iovec_count - 1);
iovec->flags |= flag;
}
@@ -1913,16 +2337,26 @@
{
struct sps_iovec *iovec = sps_bam_pipe->iovec +
sps_bam_pipe->iovec_count;
- if (sps_bam_pipe->iovec_count == QCE_MAX_NUM_DSCR) {
- pr_err("Num of descrptor %d exceed max (%d)",
- sps_bam_pipe->iovec_count, (uint32_t)QCE_MAX_NUM_DSCR);
- return -ENOMEM;
- }
- if (len) {
- iovec->size = len;
+ uint32_t data_cnt;
+
+ while (len > 0) {
+ if (sps_bam_pipe->iovec_count == QCE_MAX_NUM_DSCR) {
+ pr_err("Num of descrptor %d exceed max (%d)",
+ sps_bam_pipe->iovec_count,
+ (uint32_t)QCE_MAX_NUM_DSCR);
+ return -ENOMEM;
+ }
+ if (len > SPS_MAX_PKT_SIZE)
+ data_cnt = SPS_MAX_PKT_SIZE;
+ else
+ data_cnt = len;
+ iovec->size = data_cnt;
iovec->addr = addr;
iovec->flags = 0;
sps_bam_pipe->iovec_count++;
+ iovec++;
+ addr += data_cnt;
+ len -= data_cnt;
}
return 0;
}
@@ -1988,13 +2422,15 @@
int rc = 0;
_qce_dump_descr_fifos(pce_dev);
- rc = sps_transfer(pce_dev->ce_sps.consumer.pipe,
+ if (pce_dev->ce_sps.in_transfer.iovec_count) {
+ rc = sps_transfer(pce_dev->ce_sps.consumer.pipe,
&pce_dev->ce_sps.in_transfer);
- if (rc) {
- pr_err("sps_xfr() fail (consumer pipe=0x%x) rc = %d,",
+ if (rc) {
+ pr_err("sps_xfr() fail (consumer pipe=0x%x) rc = %d,",
(u32)pce_dev->ce_sps.consumer.pipe, rc);
- _qce_dump_descr_fifos_fail(pce_dev);
- return rc;
+ _qce_dump_descr_fifos_fail(pce_dev);
+ return rc;
+ }
}
rc = sps_transfer(pce_dev->ce_sps.producer.pipe,
&pce_dev->ce_sps.out_transfer);
@@ -2255,6 +2691,10 @@
CRYPTO_BAM_CNFG_BITS_REG);
pbam->support_cmd_dscr = (bam_cfg & CRYPTO_BAM_CD_ENABLE_MASK) ?
true : false;
+ if (pbam->support_cmd_dscr == false) {
+ pr_info("qce50 don't support command descriptor. bam_cfg%x\n",
+ bam_cfg);
+ }
pce_dev->support_cmd_dscr = pbam->support_cmd_dscr;
bam.phys_addr = pce_dev->ce_sps.bam_mem;
@@ -2282,6 +2722,7 @@
bam.manage = SPS_BAM_MGR_DEVICE_REMOTE;
else
bam.manage = SPS_BAM_MGR_LOCAL;
+
bam.ee = 1;
pr_debug("bam physical base=0x%x\n", (u32)bam.phys_addr);
@@ -2419,6 +2860,55 @@
_sha_complete(pce_dev);
};
+static void _f9_sps_producer_callback(struct sps_event_notify *notify)
+{
+ struct qce_device *pce_dev = (struct qce_device *)
+ ((struct sps_event_notify *)notify)->user;
+
+ pce_dev->ce_sps.notify = *notify;
+ pr_debug("sps ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x\n",
+ notify->event_id,
+ notify->data.transfer.iovec.addr,
+ notify->data.transfer.iovec.size,
+ notify->data.transfer.iovec.flags);
+ /* done */
+ _f9_complete(pce_dev);
+}
+
+static void _f8_sps_producer_callback(struct sps_event_notify *notify)
+{
+ struct qce_device *pce_dev = (struct qce_device *)
+ ((struct sps_event_notify *)notify)->user;
+
+ pce_dev->ce_sps.notify = *notify;
+ pr_debug("sps ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x\n",
+ notify->event_id,
+ notify->data.transfer.iovec.addr,
+ notify->data.transfer.iovec.size,
+ notify->data.transfer.iovec.flags);
+
+ if (pce_dev->ce_sps.producer_state == QCE_PIPE_STATE_COMP) {
+ pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE;
+ /* done */
+ _f8_complete(pce_dev);
+ } else {
+ int rc = 0;
+ pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_COMP;
+ pce_dev->ce_sps.out_transfer.iovec_count = 0;
+ _qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.result_dump),
+ CRYPTO_RESULT_DUMP_SIZE,
+ &pce_dev->ce_sps.out_transfer);
+ _qce_set_flag(&pce_dev->ce_sps.out_transfer,
+ SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_INT);
+ rc = sps_transfer(pce_dev->ce_sps.producer.pipe,
+ &pce_dev->ce_sps.out_transfer);
+ if (rc) {
+ pr_err("sps_xfr() fail (producer pipe=0x%x) rc = %d,",
+ (u32)pce_dev->ce_sps.producer.pipe, rc);
+ }
+ }
+}
+
static void _ablk_cipher_sps_producer_callback(struct sps_event_notify *notify)
{
struct qce_device *pce_dev = (struct qce_device *)
@@ -3251,6 +3741,172 @@
return 0;
}
+static int _setup_f8_cmdlistptrs(struct qce_device *pdev,
+ unsigned char **pvaddr, enum qce_ota_algo_enum alg)
+{
+ struct sps_command_element *ce_vaddr;
+ uint32_t ce_vaddr_start;
+ struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_sps.cmdlistptr;
+ struct qce_cmdlist_info *pcl_info = NULL;
+ int i = 0;
+ uint32_t encr_cfg = 0;
+ uint32_t key_reg = 4;
+
+ *pvaddr = (unsigned char *) ALIGN(((unsigned int)(*pvaddr)),
+ pdev->ce_sps.ce_burst_size);
+ ce_vaddr = (struct sps_command_element *)(*pvaddr);
+ ce_vaddr_start = (uint32_t)(*pvaddr);
+
+ /*
+ * Designate chunks of the allocated memory to various
+ * command list pointers related to f8 cipher algorithm defined
+ * in ce_cmdlistptrs_ops structure.
+ */
+
+ switch (alg) {
+ case QCE_OTA_ALGO_KASUMI:
+ cmdlistptr->f8_kasumi.cmdlist = (uint32_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->f8_kasumi);
+ encr_cfg = pdev->reg.encr_cfg_kasumi;
+ break;
+
+ case QCE_OTA_ALGO_SNOW3G:
+ default:
+ cmdlistptr->f8_snow3g.cmdlist = (uint32_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->f8_snow3g);
+ encr_cfg = pdev->reg.encr_cfg_snow3g;
+ break;
+ }
+ /* clear status register */
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
+ 0, NULL);
+ /* set config to big endian */
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+ &pcl_info->seg_size);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
+ &pcl_info->encr_seg_cfg);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
+ &pcl_info->encr_seg_size);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+ &pcl_info->encr_seg_start);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0,
+ &pcl_info->auth_seg_cfg);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
+ 0, &pcl_info->auth_seg_size);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG,
+ 0, &pcl_info->auth_seg_start);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
+ &pcl_info->encr_key);
+ for (i = 1; i < key_reg; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
+ 0, NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
+ &pcl_info->encr_cntr_iv);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR1_IV1_REG, 0,
+ NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ pdev->reg.crypto_cfg_le, NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+ ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
+ &pcl_info->go_proc);
+
+ pcl_info->size = (uint32_t)ce_vaddr - (uint32_t)ce_vaddr_start;
+ *pvaddr = (unsigned char *) ce_vaddr;
+
+ return 0;
+}
+
+static int _setup_f9_cmdlistptrs(struct qce_device *pdev,
+ unsigned char **pvaddr, enum qce_ota_algo_enum alg)
+{
+ struct sps_command_element *ce_vaddr;
+ uint32_t ce_vaddr_start;
+ struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_sps.cmdlistptr;
+ struct qce_cmdlist_info *pcl_info = NULL;
+ int i = 0;
+ uint32_t auth_cfg = 0;
+ uint32_t iv_reg = 0;
+
+ *pvaddr = (unsigned char *) ALIGN(((unsigned int)(*pvaddr)),
+ pdev->ce_sps.ce_burst_size);
+ ce_vaddr_start = (uint32_t)(*pvaddr);
+ ce_vaddr = (struct sps_command_element *)(*pvaddr);
+
+ /*
+ * Designate chunks of the allocated memory to various
+ * command list pointers related to authentication operations
+ * defined in ce_cmdlistptrs_ops structure.
+ */
+ switch (alg) {
+ case QCE_OTA_ALGO_KASUMI:
+ cmdlistptr->f9_kasumi.cmdlist = (uint32_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->f9_kasumi);
+ auth_cfg = pdev->reg.auth_cfg_kasumi;
+ break;
+
+ case QCE_OTA_ALGO_SNOW3G:
+ default:
+ cmdlistptr->f9_snow3g.cmdlist = (uint32_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->f9_snow3g);
+ auth_cfg = pdev->reg.auth_cfg_snow3g;
+ };
+
+ /* clear status register */
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
+ 0, NULL);
+ /* set config to big endian */
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+
+ iv_reg = 5;
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+ &pcl_info->seg_size);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, 0,
+ &pcl_info->encr_seg_cfg);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+ auth_cfg, &pcl_info->auth_seg_cfg);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
+ &pcl_info->auth_seg_size);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
+ &pcl_info->auth_seg_start);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_IV0_REG, 0,
+ &pcl_info->auth_iv);
+ for (i = 1; i < iv_reg; i++) {
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)),
+ 0, NULL);
+ }
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
+ 0, &pcl_info->auth_bytecount);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG, 0, NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ pdev->reg.crypto_cfg_le, NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+ ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
+ &pcl_info->go_proc);
+
+ pcl_info->size = (uint32_t)ce_vaddr - (uint32_t)ce_vaddr_start;
+ *pvaddr = (unsigned char *) ce_vaddr;
+
+ return 0;
+}
+
static int _setup_unlock_pipe_cmdlistptrs(struct qce_device *pdev,
unsigned char **pvaddr)
{
@@ -3334,6 +3990,10 @@
_setup_aead_ccm_cmdlistptrs(pdev, pvaddr, true);
_setup_aead_ccm_cmdlistptrs(pdev, pvaddr, false);
+ _setup_f8_cmdlistptrs(pdev, pvaddr, QCE_OTA_ALGO_KASUMI);
+ _setup_f8_cmdlistptrs(pdev, pvaddr, QCE_OTA_ALGO_SNOW3G);
+ _setup_f9_cmdlistptrs(pdev, pvaddr, QCE_OTA_ALGO_KASUMI);
+ _setup_f9_cmdlistptrs(pdev, pvaddr, QCE_OTA_ALGO_SNOW3G);
_setup_unlock_pipe_cmdlistptrs(pdev, pvaddr);
return 0;
@@ -3461,6 +4121,13 @@
(CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) |
(CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
+ /* Initialize encr_cfg register for kasumi/snow3g alg */
+ pce_dev->reg.encr_cfg_kasumi =
+ (CRYPTO_ENCR_ALG_KASUMI << CRYPTO_ENCR_ALG);
+
+ pce_dev->reg.encr_cfg_snow3g =
+ (CRYPTO_ENCR_ALG_SNOW_3G << CRYPTO_ENCR_ALG);
+
/* Initialize auth_cfg register for CMAC alg */
pce_dev->reg.auth_cfg_cmac_128 =
(1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
@@ -3531,6 +4198,13 @@
((MAX_NONCE/sizeof(uint32_t)) << CRYPTO_AUTH_NONCE_NUM_WORDS);
pce_dev->reg.auth_cfg_aes_ccm_256 &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
+ /* Initialize auth_cfg register for kasumi/snow3g */
+ pce_dev->reg.auth_cfg_kasumi =
+ (CRYPTO_AUTH_ALG_KASUMI << CRYPTO_AUTH_ALG) |
+ BIT(CRYPTO_FIRST) | BIT(CRYPTO_LAST);
+ pce_dev->reg.auth_cfg_snow3g =
+ (CRYPTO_AUTH_ALG_SNOW3G << CRYPTO_AUTH_ALG) |
+ BIT(CRYPTO_FIRST) | BIT(CRYPTO_LAST);
return 0;
}
@@ -4105,6 +4779,302 @@
}
EXPORT_SYMBOL(qce_process_sha_req);
+int qce_f8_req(void *handle, struct qce_f8_req *req,
+ void *cookie, qce_comp_func_ptr_t qce_cb)
+{
+ struct qce_device *pce_dev = (struct qce_device *) handle;
+ bool key_stream_mode;
+ dma_addr_t dst;
+ int rc;
+ struct qce_cmdlist_info *cmdlistinfo;
+
+ switch (req->algorithm) {
+ case QCE_OTA_ALGO_KASUMI:
+ cmdlistinfo = &pce_dev->ce_sps.cmdlistptr.f8_kasumi;
+ break;
+ case QCE_OTA_ALGO_SNOW3G:
+ cmdlistinfo = &pce_dev->ce_sps.cmdlistptr.f8_snow3g;
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ key_stream_mode = (req->data_in == NULL);
+
+ if ((key_stream_mode && (req->data_len & 0xf)) ||
+ (req->bearer >= QCE_OTA_MAX_BEARER))
+ return -EINVAL;
+
+ /* F8 cipher input */
+ if (key_stream_mode)
+ pce_dev->phy_ota_src = 0;
+ else {
+ pce_dev->phy_ota_src = dma_map_single(pce_dev->pdev,
+ req->data_in, req->data_len,
+ (req->data_in == req->data_out) ?
+ DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+ }
+
+ /* F8 cipher output */
+ if (req->data_in != req->data_out) {
+ dst = dma_map_single(pce_dev->pdev, req->data_out,
+ req->data_len, DMA_FROM_DEVICE);
+ pce_dev->phy_ota_dst = dst;
+ } else {
+ /* in place ciphering */
+ dst = pce_dev->phy_ota_src;
+ pce_dev->phy_ota_dst = 0;
+ }
+ pce_dev->ota_size = req->data_len;
+
+
+ /* set up crypto device */
+ if (pce_dev->support_cmd_dscr)
+ rc = _ce_f8_setup(pce_dev, req, key_stream_mode, 1, 0,
+ req->data_len, cmdlistinfo);
+ else
+ rc = _ce_f8_setup_direct(pce_dev, req, key_stream_mode, 1, 0,
+ req->data_len);
+ if (rc < 0)
+ goto bad;
+
+ /* setup for callback, and issue command to sps */
+ pce_dev->areq = cookie;
+ pce_dev->qce_cb = qce_cb;
+
+ /* Register producer callback event for DESC_DONE event. */
+ pce_dev->ce_sps.producer.event.callback =
+ _f8_sps_producer_callback;
+ pce_dev->ce_sps.producer.event.options = SPS_O_DESC_DONE;
+ rc = sps_register_event(pce_dev->ce_sps.producer.pipe,
+ &pce_dev->ce_sps.producer.event);
+ if (rc) {
+ pr_err("Producer callback registration failed rc = %d\n", rc);
+ goto bad;
+ }
+ _qce_sps_iovec_count_init(pce_dev);
+
+ if (pce_dev->support_cmd_dscr)
+ _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
+ &pce_dev->ce_sps.in_transfer);
+
+ if (!key_stream_mode) {
+ _qce_sps_add_data((uint32_t)pce_dev->phy_ota_src, req->data_len,
+ &pce_dev->ce_sps.in_transfer);
+ _qce_set_flag(&pce_dev->ce_sps.in_transfer,
+ SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+ }
+
+ _qce_sps_add_data((uint32_t)dst, req->data_len,
+ &pce_dev->ce_sps.out_transfer);
+
+ if (req->data_len > SPS_MAX_PKT_SIZE) {
+ _qce_set_flag(&pce_dev->ce_sps.out_transfer,
+ SPS_IOVEC_FLAG_INT);
+ pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE;
+ } else {
+ pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_COMP;
+ _qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.result_dump),
+ CRYPTO_RESULT_DUMP_SIZE,
+ &pce_dev->ce_sps.out_transfer);
+ _qce_set_flag(&pce_dev->ce_sps.out_transfer,
+ SPS_IOVEC_FLAG_INT);
+ }
+ rc = _qce_sps_transfer(pce_dev);
+ if (rc)
+ goto bad;
+ return 0;
+bad:
+ if (pce_dev->phy_ota_dst != 0)
+ dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_dst,
+ req->data_len, DMA_FROM_DEVICE);
+ if (pce_dev->phy_ota_src != 0)
+ dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src,
+ req->data_len,
+ (req->data_in == req->data_out) ?
+ DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+ return rc;
+}
+EXPORT_SYMBOL(qce_f8_req);
+
+int qce_f8_multi_pkt_req(void *handle, struct qce_f8_multi_pkt_req *mreq,
+ void *cookie, qce_comp_func_ptr_t qce_cb)
+{
+ struct qce_device *pce_dev = (struct qce_device *) handle;
+ uint16_t num_pkt = mreq->num_pkt;
+ uint16_t cipher_start = mreq->cipher_start;
+ uint16_t cipher_size = mreq->cipher_size;
+ struct qce_f8_req *req = &mreq->qce_f8_req;
+ uint32_t total;
+ dma_addr_t dst = 0;
+ int rc = 0;
+ struct qce_cmdlist_info *cmdlistinfo;
+
+ switch (req->algorithm) {
+ case QCE_OTA_ALGO_KASUMI:
+ cmdlistinfo = &pce_dev->ce_sps.cmdlistptr.f8_kasumi;
+ break;
+ case QCE_OTA_ALGO_SNOW3G:
+ cmdlistinfo = &pce_dev->ce_sps.cmdlistptr.f8_snow3g;
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ total = num_pkt * req->data_len;
+
+ /* F8 cipher input */
+ pce_dev->phy_ota_src = dma_map_single(pce_dev->pdev,
+ req->data_in, total,
+ (req->data_in == req->data_out) ?
+ DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+
+ /* F8 cipher output */
+ if (req->data_in != req->data_out) {
+ dst = dma_map_single(pce_dev->pdev, req->data_out, total,
+ DMA_FROM_DEVICE);
+ pce_dev->phy_ota_dst = dst;
+ } else {
+ /* in place ciphering */
+ dst = pce_dev->phy_ota_src;
+ pce_dev->phy_ota_dst = 0;
+ }
+
+ pce_dev->ota_size = total;
+
+ /* set up crypto device */
+ if (pce_dev->support_cmd_dscr)
+ rc = _ce_f8_setup(pce_dev, req, false, num_pkt, cipher_start,
+ cipher_size, cmdlistinfo);
+ else
+ rc = _ce_f8_setup_direct(pce_dev, req, false, num_pkt,
+ cipher_start, cipher_size);
+ if (rc)
+ goto bad;
+
+ /* setup for callback, and issue command to sps */
+ pce_dev->areq = cookie;
+ pce_dev->qce_cb = qce_cb;
+
+ /* Register producer callback event for DESC_DONE event. */
+ pce_dev->ce_sps.producer.event.callback =
+ _f8_sps_producer_callback;
+ pce_dev->ce_sps.producer.event.options = SPS_O_DESC_DONE;
+ rc = sps_register_event(pce_dev->ce_sps.producer.pipe,
+ &pce_dev->ce_sps.producer.event);
+ if (rc) {
+ pr_err("Producer callback registration failed rc = %d\n", rc);
+ goto bad;
+ }
+ _qce_sps_iovec_count_init(pce_dev);
+
+ if (pce_dev->support_cmd_dscr)
+ _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
+ &pce_dev->ce_sps.in_transfer);
+
+ _qce_sps_add_data((uint32_t)pce_dev->phy_ota_src, total,
+ &pce_dev->ce_sps.in_transfer);
+ _qce_set_flag(&pce_dev->ce_sps.in_transfer,
+ SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+
+ _qce_sps_add_data((uint32_t)dst, total,
+ &pce_dev->ce_sps.out_transfer);
+
+ if (total > SPS_MAX_PKT_SIZE) {
+ _qce_set_flag(&pce_dev->ce_sps.out_transfer,
+ SPS_IOVEC_FLAG_INT);
+ pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE;
+ } else {
+ pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_COMP;
+ _qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.result_dump),
+ CRYPTO_RESULT_DUMP_SIZE,
+ &pce_dev->ce_sps.out_transfer);
+ _qce_set_flag(&pce_dev->ce_sps.out_transfer,
+ SPS_IOVEC_FLAG_INT);
+ }
+ rc = _qce_sps_transfer(pce_dev);
+
+ if (rc == 0)
+ return 0;
+bad:
+ if (pce_dev->phy_ota_dst)
+ dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_dst, total,
+ DMA_FROM_DEVICE);
+ dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src, total,
+ (req->data_in == req->data_out) ?
+ DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+ return rc;
+}
+EXPORT_SYMBOL(qce_f8_multi_pkt_req);
+
+int qce_f9_req(void *handle, struct qce_f9_req *req, void *cookie,
+ qce_comp_func_ptr_t qce_cb)
+{
+ struct qce_device *pce_dev = (struct qce_device *) handle;
+ int rc;
+ struct qce_cmdlist_info *cmdlistinfo;
+
+ switch (req->algorithm) {
+ case QCE_OTA_ALGO_KASUMI:
+ cmdlistinfo = &pce_dev->ce_sps.cmdlistptr.f9_kasumi;
+ break;
+ case QCE_OTA_ALGO_SNOW3G:
+ cmdlistinfo = &pce_dev->ce_sps.cmdlistptr.f9_snow3g;
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ pce_dev->phy_ota_src = dma_map_single(pce_dev->pdev, req->message,
+ req->msize, DMA_TO_DEVICE);
+
+ pce_dev->ota_size = req->msize;
+
+ if (pce_dev->support_cmd_dscr)
+ rc = _ce_f9_setup(pce_dev, req, cmdlistinfo);
+ else
+ rc = _ce_f9_setup_direct(pce_dev, req);
+ if (rc < 0)
+ goto bad;
+
+ /* setup for callback, and issue command to sps */
+ pce_dev->areq = cookie;
+ pce_dev->qce_cb = qce_cb;
+
+ /* Register producer callback event for DESC_DONE event. */
+ pce_dev->ce_sps.producer.event.callback = _f9_sps_producer_callback;
+ pce_dev->ce_sps.producer.event.options = SPS_O_DESC_DONE;
+ rc = sps_register_event(pce_dev->ce_sps.producer.pipe,
+ &pce_dev->ce_sps.producer.event);
+ if (rc) {
+ pr_err("Producer callback registration failed rc = %d\n", rc);
+ goto bad;
+ }
+
+ _qce_sps_iovec_count_init(pce_dev);
+ if (pce_dev->support_cmd_dscr)
+ _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
+ &pce_dev->ce_sps.in_transfer);
+ _qce_sps_add_data((uint32_t)pce_dev->phy_ota_src, req->msize,
+ &pce_dev->ce_sps.in_transfer);
+ _qce_set_flag(&pce_dev->ce_sps.in_transfer,
+ SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+
+ _qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.result_dump),
+ CRYPTO_RESULT_DUMP_SIZE,
+ &pce_dev->ce_sps.out_transfer);
+ _qce_set_flag(&pce_dev->ce_sps.out_transfer, SPS_IOVEC_FLAG_INT);
+ rc = _qce_sps_transfer(pce_dev);
+ if (rc)
+ goto bad;
+ return 0;
+bad:
+ dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src,
+ req->msize, DMA_TO_DEVICE);
+ return rc;
+}
+EXPORT_SYMBOL(qce_f9_req);
+
static int __qce_get_device_tree_data(struct platform_device *pdev,
struct qce_device *pce_dev)
{
@@ -4115,6 +5085,26 @@
"qcom,ce-hw-shared");
pce_dev->support_hw_key = of_property_read_bool((&pdev->dev)->of_node,
"qcom,ce-hw-key");
+
+ pce_dev->use_sw_aes_cbc_ecb_ctr_algo =
+ of_property_read_bool((&pdev->dev)->of_node,
+ "qcom,use-sw-aes-cbc-ecb-ctr-algo");
+ pce_dev->use_sw_aead_algo =
+ of_property_read_bool((&pdev->dev)->of_node,
+ "qcom,use-sw-aead-algo");
+ pce_dev->use_sw_aes_xts_algo =
+ of_property_read_bool((&pdev->dev)->of_node,
+ "qcom,use-sw-aes-xts-algo");
+ pce_dev->use_sw_ahash_algo =
+ of_property_read_bool((&pdev->dev)->of_node,
+ "qcom,use-sw-ahash-algo");
+ pce_dev->use_sw_hmac_algo =
+ of_property_read_bool((&pdev->dev)->of_node,
+ "qcom,use-sw-hmac-algo");
+ pce_dev->use_sw_aes_ccm_algo =
+ of_property_read_bool((&pdev->dev)->of_node,
+ "qcom,use-sw-aes-ccm-algo");
+
if (of_property_read_u32((&pdev->dev)->of_node,
"qcom,bam-pipe-pair",
&pce_dev->ce_sps.pipe_pair_index)) {
@@ -4339,7 +5329,7 @@
goto err_pce_dev;
}
- pce_dev->memsize = 9 * PAGE_SIZE;
+ pce_dev->memsize = 10 * PAGE_SIZE;
pce_dev->coh_vmem = dma_alloc_coherent(pce_dev->pdev,
pce_dev->memsize, &pce_dev->coh_pmem, GFP_KERNEL);
if (pce_dev->coh_vmem == NULL) {
@@ -4354,7 +5344,7 @@
*rc = qce_enable_clk(pce_dev);
if (*rc)
- goto err;
+ goto err_enable_clk;
if (_probe_ce_engine(pce_dev)) {
*rc = -ENXIO;
@@ -4363,12 +5353,17 @@
*rc = 0;
qce_init_ce_cfg_val(pce_dev);
- qce_sps_init(pce_dev);
+ *rc = qce_sps_init(pce_dev);
+ if (*rc)
+ goto err;
qce_setup_ce_sps_data(pce_dev);
qce_disable_clk(pce_dev);
return pce_dev;
err:
+ qce_disable_clk(pce_dev);
+
+err_enable_clk:
__qce_deinit_clk(pce_dev);
err_mem:
@@ -4410,6 +5405,11 @@
}
EXPORT_SYMBOL(qce_close);
+#define OTA_SUPPORT_MASK (1 << CRYPTO_ENCR_SNOW3G_SEL |\
+ 1 << CRYPTO_ENCR_KASUMI_SEL |\
+ 1 << CRYPTO_AUTH_SNOW3G_SEL |\
+ 1 << CRYPTO_AUTH_KASUMI_SEL)
+
int qce_hw_support(void *handle, struct ce_hw_support *ce_support)
{
struct qce_device *pce_dev = (struct qce_device *)handle;
@@ -4424,7 +5424,10 @@
ce_support->cmac = true;
ce_support->aes_key_192 = false;
ce_support->aes_xts = true;
- ce_support->ota = false;
+ if ((pce_dev->engines_avail & OTA_SUPPORT_MASK) == OTA_SUPPORT_MASK)
+ ce_support->ota = true;
+ else
+ ce_support->ota = false;
ce_support->bam = true;
ce_support->is_shared = (pce_dev->is_shared == 1) ? true : false;
ce_support->hw_key = pce_dev->support_hw_key;
@@ -4433,6 +5436,19 @@
ce_support->aligned_only = false;
else
ce_support->aligned_only = true;
+
+ ce_support->use_sw_aes_cbc_ecb_ctr_algo =
+ pce_dev->use_sw_aes_cbc_ecb_ctr_algo;
+ ce_support->use_sw_aead_algo =
+ pce_dev->use_sw_aead_algo;
+ ce_support->use_sw_aes_xts_algo =
+ pce_dev->use_sw_aes_xts_algo;
+ ce_support->use_sw_ahash_algo =
+ pce_dev->use_sw_ahash_algo;
+ ce_support->use_sw_hmac_algo =
+ pce_dev->use_sw_hmac_algo;
+ ce_support->use_sw_aes_ccm_algo =
+ pce_dev->use_sw_aes_ccm_algo;
return 0;
}
EXPORT_SYMBOL(qce_hw_support);
diff --git a/drivers/crypto/msm/qce50.h b/drivers/crypto/msm/qce50.h
index adab5d4..fc387aa 100644
--- a/drivers/crypto/msm/qce50.h
+++ b/drivers/crypto/msm/qce50.h
@@ -113,6 +113,10 @@
struct qce_cmdlist_info aead_hmac_sha1_ecb_3des;
struct qce_cmdlist_info aead_aes_128_ccm;
struct qce_cmdlist_info aead_aes_256_ccm;
+ struct qce_cmdlist_info f8_kasumi;
+ struct qce_cmdlist_info f8_snow3g;
+ struct qce_cmdlist_info f9_kasumi;
+ struct qce_cmdlist_info f9_snow3g;
struct qce_cmdlist_info unlock_all_pipes;
};
@@ -140,6 +144,8 @@
uint32_t encr_cfg_3des_cbc;
uint32_t encr_cfg_3des_ecb;
+ uint32_t encr_cfg_kasumi;
+ uint32_t encr_cfg_snow3g;
uint32_t auth_cfg_cmac_128;
uint32_t auth_cfg_cmac_256;
@@ -154,7 +160,8 @@
uint32_t auth_cfg_aes_ccm_256;
uint32_t auth_cfg_aead_sha1_hmac;
uint32_t auth_cfg_aead_sha256_hmac;
-
+ uint32_t auth_cfg_kasumi;
+ uint32_t auth_cfg_snow3g;
};
/* DM data structure with buffers, commandlists & commmand pointer lists */
diff --git a/drivers/crypto/msm/qcrypto.c b/drivers/crypto/msm/qcrypto.c
index 6606706..974606d 100644
--- a/drivers/crypto/msm/qcrypto.c
+++ b/drivers/crypto/msm/qcrypto.c
@@ -1,6 +1,6 @@
/* Qualcomm Crypto driver
*
- * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -24,6 +24,10 @@
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/debugfs.h>
+#include <linux/workqueue.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/cache.h>
#include <crypto/ctr.h>
#include <crypto/des.h>
@@ -51,6 +55,8 @@
*/
#define MAX_ALIGN_SIZE 0x40
+#define QCRYPTO_HIGH_BANDWIDTH_TIMEOUT 1000
+
struct crypto_stat {
u32 aead_sha1_aes_enc;
u32 aead_sha1_aes_dec;
@@ -83,6 +89,30 @@
static struct crypto_stat _qcrypto_stat;
static struct dentry *_debug_dent;
static char _debug_read_buf[DEBUG_MAX_RW_BUF];
+struct crypto_priv;
+struct crypto_engine {
+ struct list_head elist;
+ void *qce; /* qce handle */
+ struct platform_device *pdev; /* platform device */
+ struct crypto_async_request *req; /* current active request */
+ struct crypto_priv *pcp;
+ struct tasklet_struct done_tasklet;
+ uint32_t bus_scale_handle;
+ struct crypto_queue req_queue; /*
+ * request queue for those requests
+ * that have this engine assgined
+ * waiting to be executed
+ */
+ u32 total_req;
+ u32 err_req;
+ u32 unit;
+ int res; /* execution result */
+ unsigned int signature;
+ uint32_t high_bw_req_count;
+ bool high_bw_req;
+ struct timer_list bw_scale_down_timer;
+ struct work_struct low_bw_req_ws;
+};
struct crypto_priv {
/* CE features supported by target device*/
@@ -91,34 +121,25 @@
/* CE features/algorithms supported by HW engine*/
struct ce_hw_support ce_support;
- uint32_t bus_scale_handle;
/* the lock protects queue and req*/
spinlock_t lock;
- /* qce handle */
- void *qce;
-
/* list of registered algorithms */
struct list_head alg_list;
- /* platform device */
- struct platform_device *pdev;
-
/* current active request */
struct crypto_async_request *req;
- int res;
-
- /* request queue */
- struct crypto_queue queue;
uint32_t ce_lock_count;
- uint32_t high_bw_req_count;
-
struct work_struct unlock_ce_ws;
-
- struct tasklet_struct done_tasklet;
+ struct list_head engine_list; /* list of qcrypto engines */
+ int32_t total_units; /* total units of engines */
+ struct mutex engine_lock;
+ struct crypto_engine *next_engine; /* next assign engine */
};
-
+static struct crypto_priv qcrypto_dev;
+static struct crypto_engine *_qcrypto_static_assign_engine(
+ struct crypto_priv *cp);
/*-------------------------------------------------------------------------
* Resource Locking Service
@@ -129,8 +150,6 @@
#define NUM_RETRY 1000
#define CE_BUSY 55
-static DEFINE_MUTEX(qcrypto_sent_bw_req);
-
static int qcrypto_scm_cmd(int resource, int cmd, int *response)
{
#ifdef CONFIG_MSM_SCM
@@ -224,6 +243,7 @@
struct crypto_priv *cp;
unsigned int flags;
+ struct crypto_engine *pengine; /* fixed engine assigned */
};
struct qcrypto_cipher_req_ctx {
@@ -266,30 +286,18 @@
struct qcrypto_sha_ctx {
enum qce_hash_alg_enum alg;
- uint32_t byte_count[4];
- uint8_t digest[SHA_MAX_DIGEST_SIZE];
uint32_t diglen;
- uint8_t *tmp_tbuf;
- uint8_t *trailing_buf;
- uint8_t *in_buf;
uint32_t authkey_in_len;
- uint32_t trailing_buf_len;
- uint8_t first_blk;
- uint8_t last_blk;
uint8_t authkey[SHA_MAX_BLOCK_SIZE];
struct ahash_request *ahash_req;
struct completion ahash_req_complete;
- struct scatterlist *sg;
- struct scatterlist tmp_sg;
struct crypto_priv *cp;
unsigned int flags;
+ struct crypto_engine *pengine; /* fixed engine assigned */
};
struct qcrypto_sha_req_ctx {
- union {
- struct sha1_state sha1_state_ctx;
- struct sha256_state sha256_state_ctx;
- };
+
struct scatterlist *src;
uint32_t nbytes;
@@ -297,6 +305,20 @@
struct scatterlist dsg; /* Data sg */
unsigned char *data; /* Incoming data pointer*/
unsigned char *data2; /* Updated data pointer*/
+
+ uint32_t byte_count[4];
+ u64 count;
+ uint8_t first_blk;
+ uint8_t last_blk;
+ uint8_t trailing_buf[SHA_MAX_BLOCK_SIZE];
+ uint32_t trailing_buf_len;
+
+ /* dma buffer, Internal use */
+ uint8_t staging_dmabuf
+ [SHA_MAX_BLOCK_SIZE+SHA_MAX_DIGEST_SIZE+MAX_ALIGN_SIZE];
+
+ uint8_t digest[SHA_MAX_DIGEST_SIZE];
+ struct scatterlist sg[2];
};
static void _byte_stream_to_words(uint32_t *iv, unsigned char *b,
@@ -304,7 +326,7 @@
{
unsigned n;
- n = len / sizeof(uint32_t) ;
+ n = len / sizeof(uint32_t);
for (; n > 0; n--) {
*iv = ((*b << 24) & 0xff000000) |
(((*(b+1)) << 16) & 0xff0000) |
@@ -318,12 +340,12 @@
if (n == 3) {
*iv = ((*b << 24) & 0xff000000) |
(((*(b+1)) << 16) & 0xff0000) |
- (((*(b+2)) << 8) & 0xff00) ;
+ (((*(b+2)) << 8) & 0xff00);
} else if (n == 2) {
*iv = ((*b << 24) & 0xff000000) |
- (((*(b+1)) << 16) & 0xff0000) ;
+ (((*(b+1)) << 16) & 0xff0000);
} else if (n == 1) {
- *iv = ((*b << 24) & 0xff000000) ;
+ *iv = ((*b << 24) & 0xff000000);
}
}
@@ -352,58 +374,103 @@
}
}
-static void qcrypto_ce_high_bw_req(struct crypto_priv *cp, bool high_bw_req)
+static void qcrypto_ce_set_bus(struct crypto_engine *pengine,
+ bool high_bw_req)
{
int ret = 0;
- mutex_lock(&qcrypto_sent_bw_req);
- if (high_bw_req) {
- if (cp->high_bw_req_count == 0) {
- ret = qce_enable_clk(cp->qce);
- if (ret) {
- pr_err("%s Unable enable clk\n", __func__);
- mutex_unlock(&qcrypto_sent_bw_req);
- return;
- }
+ if (high_bw_req && pengine->high_bw_req == false) {
+ pm_stay_awake(&pengine->pdev->dev);
+ ret = qce_enable_clk(pengine->qce);
+ if (ret) {
+ pr_err("%s Unable enable clk\n", __func__);
+ goto clk_err;
+ }
+ ret = msm_bus_scale_client_update_request(
+ pengine->bus_scale_handle, 1);
+ if (ret) {
+ pr_err("%s Unable to set to high bandwidth\n",
+ __func__);
+ qce_disable_clk(pengine->qce);
+ goto clk_err;
+ }
+ pengine->high_bw_req = true;
+ } else if (high_bw_req == false && pengine->high_bw_req == true) {
+ ret = msm_bus_scale_client_update_request(
+ pengine->bus_scale_handle, 0);
+ if (ret) {
+ pr_err("%s Unable to set to low bandwidth\n",
+ __func__);
+ goto clk_err;
+ }
+ ret = qce_disable_clk(pengine->qce);
+ if (ret) {
+ pr_err("%s Unable disable clk\n", __func__);
ret = msm_bus_scale_client_update_request(
- cp->bus_scale_handle, 1);
- if (ret) {
+ pengine->bus_scale_handle, 1);
+ if (ret)
pr_err("%s Unable to set to high bandwidth\n",
- __func__);
- qce_disable_clk(cp->qce);
- mutex_unlock(&qcrypto_sent_bw_req);
- return;
- }
+ __func__);
+ goto clk_err;
}
- cp->high_bw_req_count++;
- } else {
- if (cp->high_bw_req_count == 1) {
- ret = msm_bus_scale_client_update_request(
- cp->bus_scale_handle, 0);
- if (ret) {
- pr_err("%s Unable to set to low bandwidth\n",
- __func__);
- mutex_unlock(&qcrypto_sent_bw_req);
- return;
- }
- ret = qce_disable_clk(cp->qce);
- if (ret) {
- pr_err("%s Unable disable clk\n", __func__);
- ret = msm_bus_scale_client_update_request(
- cp->bus_scale_handle, 1);
- if (ret)
- pr_err("%s Unable to set to high bandwidth\n",
- __func__);
- mutex_unlock(&qcrypto_sent_bw_req);
- return;
- }
- }
- cp->high_bw_req_count--;
+ pengine->high_bw_req = false;
+ pm_relax(&pengine->pdev->dev);
}
- mutex_unlock(&qcrypto_sent_bw_req);
+ return;
+clk_err:
+ pm_relax(&pengine->pdev->dev);
+ return;
+
}
-static int _start_qcrypto_process(struct crypto_priv *cp);
+static void qcrypto_bw_scale_down_timer_callback(unsigned long data)
+{
+ struct crypto_engine *pengine = (struct crypto_engine *)data;
+
+ schedule_work(&pengine->low_bw_req_ws);
+
+ return;
+}
+
+static void qcrypto_bw_set_timeout(struct crypto_engine *pengine)
+{
+ del_timer_sync(&(pengine->bw_scale_down_timer));
+ pengine->bw_scale_down_timer.data =
+ (unsigned long)(pengine);
+ pengine->bw_scale_down_timer.expires = jiffies +
+ msecs_to_jiffies(QCRYPTO_HIGH_BANDWIDTH_TIMEOUT);
+ add_timer(&(pengine->bw_scale_down_timer));
+}
+
+static void qcrypto_ce_bw_scaling_req(struct crypto_engine *pengine,
+ bool high_bw_req)
+{
+ mutex_lock(&pengine->pcp->engine_lock);
+ if (high_bw_req) {
+ if (pengine->high_bw_req_count == 0)
+ qcrypto_ce_set_bus(pengine, true);
+ pengine->high_bw_req_count++;
+ } else {
+ pengine->high_bw_req_count--;
+ if (pengine->high_bw_req_count == 0)
+ qcrypto_bw_set_timeout(pengine);
+ }
+ mutex_unlock(&pengine->pcp->engine_lock);
+}
+
+static void qcrypto_low_bw_req_work(struct work_struct *work)
+{
+ struct crypto_engine *pengine = container_of(work,
+ struct crypto_engine, low_bw_req_ws);
+
+ mutex_lock(&pengine->pcp->engine_lock);
+ if (pengine->high_bw_req_count == 0)
+ qcrypto_ce_set_bus(pengine, false);
+ mutex_unlock(&pengine->pcp->engine_lock);
+}
+
+static int _start_qcrypto_process(struct crypto_priv *cp,
+ struct crypto_engine *pengine);
static int qcrypto_count_sg(struct scatterlist *sg, int nbytes)
{
@@ -499,9 +566,11 @@
/* random first IV */
get_random_bytes(ctx->iv, QCRYPTO_MAX_IV_LENGTH);
+ ctx->pengine = _qcrypto_static_assign_engine(ctx->cp);
+ if (ctx->pengine == NULL)
+ return -ENODEV;
if (ctx->cp->platform_support.bus_scale_table != NULL)
- qcrypto_ce_high_bw_req(ctx->cp, true);
-
+ qcrypto_ce_bw_scaling_req(ctx->pengine, true);
return 0;
};
@@ -517,30 +586,13 @@
crypto_ahash_set_reqsize(ahash, sizeof(struct qcrypto_sha_req_ctx));
/* update context with ptr to cp */
sha_ctx->cp = q_alg->cp;
- sha_ctx->sg = NULL;
sha_ctx->flags = 0;
-
- sha_ctx->tmp_tbuf = kzalloc(SHA_MAX_BLOCK_SIZE +
- SHA_MAX_DIGEST_SIZE, GFP_KERNEL);
- if (sha_ctx->tmp_tbuf == NULL) {
- pr_err("qcrypto Can't Allocate mem: sha_ctx->tmp_tbuf, error %ld\n",
- PTR_ERR(sha_ctx->tmp_tbuf));
- return -ENOMEM;
- }
-
- sha_ctx->trailing_buf = kzalloc(SHA_MAX_BLOCK_SIZE, GFP_KERNEL);
- if (sha_ctx->trailing_buf == NULL) {
- kfree(sha_ctx->tmp_tbuf);
- sha_ctx->tmp_tbuf = NULL;
- pr_err("qcrypto Can't Allocate mem: sha_ctx->trailing_buf, error %ld\n",
- PTR_ERR(sha_ctx->trailing_buf));
- return -ENOMEM;
- }
-
sha_ctx->ahash_req = NULL;
+ sha_ctx->pengine = _qcrypto_static_assign_engine(sha_ctx->cp);
+ if (sha_ctx->pengine == NULL)
+ return -ENODEV;
if (sha_ctx->cp->platform_support.bus_scale_table != NULL)
- qcrypto_ce_high_bw_req(sha_ctx->cp, true);
-
+ qcrypto_ce_bw_scaling_req(sha_ctx->pengine, true);
return 0;
};
@@ -548,20 +600,13 @@
{
struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(tfm);
- kfree(sha_ctx->tmp_tbuf);
- sha_ctx->tmp_tbuf = NULL;
- kfree(sha_ctx->trailing_buf);
- sha_ctx->trailing_buf = NULL;
- if (sha_ctx->sg != NULL) {
- kfree(sha_ctx->sg);
- sha_ctx->sg = NULL;
- }
if (sha_ctx->ahash_req != NULL) {
ahash_request_free(sha_ctx->ahash_req);
sha_ctx->ahash_req = NULL;
}
- if (sha_ctx->cp->platform_support.bus_scale_table != NULL)
- qcrypto_ce_high_bw_req(sha_ctx->cp, false);
+ if (sha_ctx->pengine &&
+ sha_ctx->cp->platform_support.bus_scale_table != NULL)
+ qcrypto_ce_bw_scaling_req(sha_ctx->pengine, false);
};
@@ -610,22 +655,25 @@
{
struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- if (ctx->cp->platform_support.bus_scale_table != NULL)
- qcrypto_ce_high_bw_req(ctx->cp, false);
+ if (ctx->pengine && ctx->cp->platform_support.bus_scale_table != NULL)
+ qcrypto_ce_bw_scaling_req(ctx->pengine, false);
};
static void _qcrypto_cra_aead_exit(struct crypto_tfm *tfm)
{
struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- if (ctx->cp->platform_support.bus_scale_table != NULL)
- qcrypto_ce_high_bw_req(ctx->cp, false);
+ if (ctx->pengine && ctx->cp->platform_support.bus_scale_table != NULL)
+ qcrypto_ce_bw_scaling_req(ctx->pengine, false);
};
static int _disp_stats(int id)
{
struct crypto_stat *pstat;
int len = 0;
+ unsigned long flags;
+ struct crypto_priv *cp = &qcrypto_dev;
+ struct crypto_engine *pe;
pstat = &_qcrypto_stat;
len = scnprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
@@ -722,22 +770,55 @@
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" SHA HMAC operation success : %d\n",
pstat->sha_hmac_op_success);
+ spin_lock_irqsave(&cp->lock, flags);
+ list_for_each_entry(pe, &cp->engine_list, elist) {
+ len += snprintf(
+ _debug_read_buf + len,
+ DEBUG_MAX_RW_BUF - len - 1,
+ " Engine %d Req : %d\n",
+ pe->unit,
+ pe->total_req
+ );
+ len += snprintf(
+ _debug_read_buf + len,
+ DEBUG_MAX_RW_BUF - len - 1,
+ " Engine %d Req Error : %d\n",
+ pe->unit,
+ pe->err_req
+ );
+ }
+ spin_unlock_irqrestore(&cp->lock, flags);
return len;
}
-static int _qcrypto_remove(struct platform_device *pdev)
+static void _qcrypto_remove_engine(struct crypto_engine *pengine)
{
struct crypto_priv *cp;
struct qcrypto_alg *q_alg;
struct qcrypto_alg *n;
+ unsigned long flags;
- cp = platform_get_drvdata(pdev);
+ cp = pengine->pcp;
- if (!cp)
- return 0;
+ spin_lock_irqsave(&cp->lock, flags);
+ list_del(&pengine->elist);
+ if (cp->next_engine == pengine)
+ cp->next_engine = NULL;
+ spin_unlock_irqrestore(&cp->lock, flags);
- if (cp->platform_support.bus_scale_table != NULL)
- msm_bus_scale_unregister_client(cp->bus_scale_handle);
+ cp->total_units--;
+
+ tasklet_kill(&pengine->done_tasklet);
+ cancel_work_sync(&pengine->low_bw_req_ws);
+ del_timer_sync(&pengine->bw_scale_down_timer);
+ device_init_wakeup(&pengine->pdev->dev, false);
+
+ if (pengine->bus_scale_handle != 0)
+ msm_bus_scale_unregister_client(pengine->bus_scale_handle);
+ pengine->bus_scale_handle = 0;
+
+ if (cp->total_units)
+ return;
list_for_each_entry_safe(q_alg, n, &cp->alg_list, entry) {
if (q_alg->alg_type == QCRYPTO_ALG_CIPHER)
@@ -747,14 +828,26 @@
list_del(&q_alg->entry);
kfree(q_alg);
}
+}
- if (cp->qce)
- qce_close(cp->qce);
- tasklet_kill(&cp->done_tasklet);
- kfree(cp);
+static int _qcrypto_remove(struct platform_device *pdev)
+{
+ struct crypto_engine *pengine;
+ struct crypto_priv *cp;
+
+ pengine = platform_get_drvdata(pdev);
+
+ if (!pengine)
+ return 0;
+ cp = pengine->pcp;
+ mutex_lock(&cp->engine_lock);
+ _qcrypto_remove_engine(pengine);
+ mutex_unlock(&cp->engine_lock);
+ if (pengine->qce)
+ qce_close(pengine->qce);
+ kfree(pengine);
return 0;
-};
-
+}
static int _qcrypto_check_aes_keylen(struct crypto_ablkcipher *cipher,
struct crypto_priv *cp, unsigned int len)
@@ -894,54 +987,24 @@
static void req_done(unsigned long data)
{
struct crypto_async_request *areq;
- struct crypto_priv *cp = (struct crypto_priv *)data;
+ struct crypto_engine *pengine = (struct crypto_engine *)data;
+ struct crypto_priv *cp;
unsigned long flags;
+ int res;
+ cp = pengine->pcp;
spin_lock_irqsave(&cp->lock, flags);
- areq = cp->req;
- cp->req = NULL;
+ areq = pengine->req;
+ pengine->req = NULL;
+ res = pengine->res;
spin_unlock_irqrestore(&cp->lock, flags);
-
if (areq)
- areq->complete(areq, cp->res);
- _start_qcrypto_process(cp);
+ areq->complete(areq, res);
+ if (res)
+ pengine->err_req++;
+ _start_qcrypto_process(cp, pengine);
};
-static void _update_sha1_ctx(struct ahash_request *req)
-{
- struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
- struct sha1_state *sha_state_ctx = &rctx->sha1_state_ctx;
- struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
-
- if (sha_ctx->last_blk == 1)
- memset(sha_state_ctx, 0x00, sizeof(struct sha1_state));
- else {
- memset(sha_state_ctx->buffer, 0x00, SHA1_BLOCK_SIZE);
- memcpy(sha_state_ctx->buffer, sha_ctx->trailing_buf,
- sha_ctx->trailing_buf_len);
- _byte_stream_to_words(sha_state_ctx->state , sha_ctx->digest,
- SHA1_DIGEST_SIZE);
- }
- return;
-}
-
-static void _update_sha256_ctx(struct ahash_request *req)
-{
- struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
- struct sha256_state *sha_state_ctx = &rctx->sha256_state_ctx;
- struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
-
- if (sha_ctx->last_blk == 1)
- memset(sha_state_ctx, 0x00, sizeof(struct sha256_state));
- else {
- memset(sha_state_ctx->buf, 0x00, SHA256_BLOCK_SIZE);
- memcpy(sha_state_ctx->buf, sha_ctx->trailing_buf,
- sha_ctx->trailing_buf_len);
- _byte_stream_to_words(sha_state_ctx->state, sha_ctx->digest,
- SHA256_DIGEST_SIZE);
- }
- return;
-}
static void _qce_ahash_complete(void *cookie, unsigned char *digest,
unsigned char *authdata, int ret)
@@ -954,44 +1017,36 @@
struct crypto_stat *pstat;
uint32_t diglen = crypto_ahash_digestsize(ahash);
uint32_t *auth32 = (uint32_t *)authdata;
+ struct crypto_engine *pengine;
pstat = &_qcrypto_stat;
+ pengine = sha_ctx->pengine;
#ifdef QCRYPTO_DEBUG
- dev_info(&cp->pdev->dev, "_qce_ahash_complete: %p ret %d\n",
+ dev_info(&pengine->pdev->dev, "_qce_ahash_complete: %p ret %d\n",
areq, ret);
#endif
if (digest) {
- memcpy(sha_ctx->digest, digest, diglen);
+ memcpy(rctx->digest, digest, diglen);
memcpy(areq->result, digest, diglen);
}
if (authdata) {
- sha_ctx->byte_count[0] = auth32[0];
- sha_ctx->byte_count[1] = auth32[1];
- sha_ctx->byte_count[2] = auth32[2];
- sha_ctx->byte_count[3] = auth32[3];
+ rctx->byte_count[0] = auth32[0];
+ rctx->byte_count[1] = auth32[1];
+ rctx->byte_count[2] = auth32[2];
+ rctx->byte_count[3] = auth32[3];
}
areq->src = rctx->src;
areq->nbytes = rctx->nbytes;
- if (sha_ctx->sg != NULL) {
- kfree(sha_ctx->sg);
- sha_ctx->sg = NULL;
- }
-
- if (sha_ctx->alg == QCE_HASH_SHA1)
- _update_sha1_ctx(areq);
- if (sha_ctx->alg == QCE_HASH_SHA256)
- _update_sha256_ctx(areq);
-
- sha_ctx->last_blk = 0;
- sha_ctx->first_blk = 0;
+ rctx->last_blk = 0;
+ rctx->first_blk = 0;
if (ret) {
- cp->res = -ENXIO;
+ pengine->res = -ENXIO;
pstat->sha_op_fail++;
} else {
- cp->res = 0;
+ pengine->res = 0;
pstat->sha_op_success++;
}
if (cp->ce_support.aligned_only) {
@@ -1001,7 +1056,7 @@
if (cp->platform_support.ce_shared)
schedule_work(&cp->unlock_ce_ws);
- tasklet_schedule(&cp->done_tasklet);
+ tasklet_schedule(&pengine->done_tasklet);
};
static void _qce_ablk_cipher_complete(void *cookie, unsigned char *icb,
@@ -1012,21 +1067,22 @@
struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
struct crypto_priv *cp = ctx->cp;
struct crypto_stat *pstat;
+ struct crypto_engine *pengine;
pstat = &_qcrypto_stat;
-
+ pengine = ctx->pengine;
#ifdef QCRYPTO_DEBUG
- dev_info(&cp->pdev->dev, "_qce_ablk_cipher_complete: %p ret %d\n",
+ dev_info(&pengine->pdev->dev, "_qce_ablk_cipher_complete: %p ret %d\n",
areq, ret);
#endif
if (iv)
memcpy(ctx->iv, iv, crypto_ablkcipher_ivsize(ablk));
if (ret) {
- cp->res = -ENXIO;
+ pengine->res = -ENXIO;
pstat->ablk_cipher_op_fail++;
} else {
- cp->res = 0;
+ pengine->res = 0;
pstat->ablk_cipher_op_success++;
}
@@ -1050,7 +1106,7 @@
if (cp->platform_support.ce_shared)
schedule_work(&cp->unlock_ce_ws);
- tasklet_schedule(&cp->done_tasklet);
+ tasklet_schedule(&pengine->done_tasklet);
};
@@ -1063,9 +1119,10 @@
struct crypto_priv *cp = ctx->cp;
struct qcrypto_cipher_req_ctx *rctx;
struct crypto_stat *pstat;
+ struct crypto_engine *pengine;
pstat = &_qcrypto_stat;
-
+ pengine = ctx->pengine;
rctx = aead_request_ctx(areq);
if (rctx->mode == QCE_MODE_CCM) {
@@ -1162,11 +1219,11 @@
else
pstat->aead_op_success++;
- cp->res = ret;
+ pengine->res = ret;
if (cp->platform_support.ce_shared)
schedule_work(&cp->unlock_ce_ws);
- tasklet_schedule(&cp->done_tasklet);
+ tasklet_schedule(&pengine->done_tasklet);
}
static int aead_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize)
@@ -1251,7 +1308,7 @@
return 0;
}
-static int _qcrypto_process_ablkcipher(struct crypto_priv *cp,
+static int _qcrypto_process_ablkcipher(struct crypto_engine *pengine,
struct crypto_async_request *async_req)
{
struct qce_req qreq;
@@ -1265,7 +1322,7 @@
cipher_ctx = crypto_tfm_ctx(async_req->tfm);
rctx = ablkcipher_request_ctx(req);
tfm = crypto_ablkcipher_reqtfm(req);
- if (cp->ce_support.aligned_only) {
+ if (pengine->pcp->ce_support.aligned_only) {
uint32_t bytes = 0;
uint32_t num_sg = 0;
@@ -1306,35 +1363,37 @@
qreq.flags = cipher_ctx->flags;
if ((cipher_ctx->enc_key_len == 0) &&
- (cp->platform_support.hw_key_support == 0))
+ (pengine->pcp->platform_support.hw_key_support == 0))
ret = -EINVAL;
else
- ret = qce_ablk_cipher_req(cp->qce, &qreq);
+ ret = qce_ablk_cipher_req(pengine->qce, &qreq);
return ret;
}
-static int _qcrypto_process_ahash(struct crypto_priv *cp,
+static int _qcrypto_process_ahash(struct crypto_engine *pengine,
struct crypto_async_request *async_req)
{
struct ahash_request *req;
struct qce_sha_req sreq;
+ struct qcrypto_sha_req_ctx *rctx;
struct qcrypto_sha_ctx *sha_ctx;
int ret = 0;
req = container_of(async_req,
struct ahash_request, base);
+ rctx = ahash_request_ctx(req);
sha_ctx = crypto_tfm_ctx(async_req->tfm);
sreq.qce_cb = _qce_ahash_complete;
- sreq.digest = &sha_ctx->digest[0];
+ sreq.digest = &rctx->digest[0];
sreq.src = req->src;
- sreq.auth_data[0] = sha_ctx->byte_count[0];
- sreq.auth_data[1] = sha_ctx->byte_count[1];
- sreq.auth_data[2] = sha_ctx->byte_count[2];
- sreq.auth_data[3] = sha_ctx->byte_count[3];
- sreq.first_blk = sha_ctx->first_blk;
- sreq.last_blk = sha_ctx->last_blk;
+ sreq.auth_data[0] = rctx->byte_count[0];
+ sreq.auth_data[1] = rctx->byte_count[1];
+ sreq.auth_data[2] = rctx->byte_count[2];
+ sreq.auth_data[3] = rctx->byte_count[3];
+ sreq.first_blk = rctx->first_blk;
+ sreq.last_blk = rctx->last_blk;
sreq.size = req->nbytes;
sreq.areq = req;
sreq.flags = sha_ctx->flags;
@@ -1363,12 +1422,12 @@
ret = -1;
break;
};
- ret = qce_process_sha_req(cp->qce, &sreq);
+ ret = qce_process_sha_req(pengine->qce, &sreq);
return ret;
}
-static int _qcrypto_process_aead(struct crypto_priv *cp,
+static int _qcrypto_process_aead(struct crypto_engine *pengine,
struct crypto_async_request *async_req)
{
struct qce_req qreq;
@@ -1417,7 +1476,7 @@
if (ret)
return ret;
- if (cp->ce_support.aligned_only) {
+ if (pengine->pcp->ce_support.aligned_only) {
uint32_t bytes = 0;
uint32_t num_sg = 0;
@@ -1487,7 +1546,7 @@
sg_mark_end(req->assoc);
} else {
/* for aead operations, other than aes(ccm) */
- if (cp->ce_support.aligned_only) {
+ if (pengine->pcp->ce_support.aligned_only) {
uint32_t bytes = 0;
uint32_t num_sg = 0;
@@ -1558,12 +1617,36 @@
req->dst = &rctx->dsg;
}
}
- ret = qce_aead_req(cp->qce, &qreq);
+ ret = qce_aead_req(pengine->qce, &qreq);
return ret;
}
+#define list_next_entry(pos, member) \
+ list_entry(pos->member.next, typeof(*pos), member)
+static struct crypto_engine *_qcrypto_static_assign_engine(
+ struct crypto_priv *cp)
+{
+ struct crypto_engine *pengine;
+ unsigned long flags;
-static int _start_qcrypto_process(struct crypto_priv *cp)
+ spin_lock_irqsave(&cp->lock, flags);
+ if (cp->next_engine)
+ pengine = cp->next_engine;
+ else
+ pengine = list_first_entry(&cp->engine_list,
+ struct crypto_engine, elist);
+
+ if (list_is_last(&pengine->elist, &cp->engine_list))
+ cp->next_engine = list_first_entry(
+ &cp->engine_list, struct crypto_engine, elist);
+ else
+ cp->next_engine = list_next_entry(pengine, elist);
+ spin_unlock_irqrestore(&cp->lock, flags);
+ return pengine;
+}
+
+static int _start_qcrypto_process(struct crypto_priv *cp,
+ struct crypto_engine *pengine)
{
struct crypto_async_request *async_req = NULL;
struct crypto_async_request *backlog = NULL;
@@ -1576,10 +1659,10 @@
again:
spin_lock_irqsave(&cp->lock, flags);
- if (cp->req == NULL) {
- backlog = crypto_get_backlog(&cp->queue);
- async_req = crypto_dequeue_request(&cp->queue);
- cp->req = async_req;
+ if (pengine->req == NULL) {
+ backlog = crypto_get_backlog(&pengine->req_queue);
+ async_req = crypto_dequeue_request(&pengine->req_queue);
+ pengine->req = async_req;
}
spin_unlock_irqrestore(&cp->lock, flags);
if (!async_req)
@@ -1590,21 +1673,22 @@
switch (type) {
case CRYPTO_ALG_TYPE_ABLKCIPHER:
- ret = _qcrypto_process_ablkcipher(cp, async_req);
+ ret = _qcrypto_process_ablkcipher(pengine, async_req);
break;
case CRYPTO_ALG_TYPE_AHASH:
- ret = _qcrypto_process_ahash(cp, async_req);
+ ret = _qcrypto_process_ahash(pengine, async_req);
break;
case CRYPTO_ALG_TYPE_AEAD:
- ret = _qcrypto_process_aead(cp, async_req);
+ ret = _qcrypto_process_aead(pengine, async_req);
break;
default:
ret = -EINVAL;
};
-
+ pengine->total_req++;
if (ret) {
+ pengine->err_req++;
spin_lock_irqsave(&cp->lock, flags);
- cp->req = NULL;
+ pengine->req = NULL;
spin_unlock_irqrestore(&cp->lock, flags);
if (type == CRYPTO_ALG_TYPE_ABLKCIPHER)
@@ -1622,6 +1706,7 @@
};
static int _qcrypto_queue_req(struct crypto_priv *cp,
+ struct crypto_engine *pengine,
struct crypto_async_request *req)
{
int ret;
@@ -1634,9 +1719,9 @@
}
spin_lock_irqsave(&cp->lock, flags);
- ret = crypto_enqueue_request(&cp->queue, req);
+ ret = crypto_enqueue_request(&pengine->req_queue, req);
spin_unlock_irqrestore(&cp->lock, flags);
- _start_qcrypto_process(cp);
+ _start_qcrypto_process(cp, pengine);
return ret;
}
@@ -1653,7 +1738,7 @@
BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
CRYPTO_ALG_TYPE_ABLKCIPHER);
#ifdef QCRYPTO_DEBUG
- dev_info(&cp->pdev->dev, "_qcrypto_enc_aes_ecb: %p\n", req);
+ dev_info(&ctx->pengine->pdev->dev, "_qcrypto_enc_aes_ecb: %p\n", req);
#endif
rctx = ablkcipher_request_ctx(req);
rctx->aead = 0;
@@ -1662,7 +1747,7 @@
rctx->mode = QCE_MODE_ECB;
pstat->ablk_cipher_aes_enc++;
- return _qcrypto_queue_req(cp, &req->base);
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
};
static int _qcrypto_enc_aes_cbc(struct ablkcipher_request *req)
@@ -1677,7 +1762,7 @@
BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
CRYPTO_ALG_TYPE_ABLKCIPHER);
#ifdef QCRYPTO_DEBUG
- dev_info(&cp->pdev->dev, "_qcrypto_enc_aes_cbc: %p\n", req);
+ dev_info(&ctx->pengine->pdev->dev, "_qcrypto_enc_aes_cbc: %p\n", req);
#endif
rctx = ablkcipher_request_ctx(req);
rctx->aead = 0;
@@ -1686,7 +1771,7 @@
rctx->mode = QCE_MODE_CBC;
pstat->ablk_cipher_aes_enc++;
- return _qcrypto_queue_req(cp, &req->base);
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
};
static int _qcrypto_enc_aes_ctr(struct ablkcipher_request *req)
@@ -1701,7 +1786,7 @@
BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
CRYPTO_ALG_TYPE_ABLKCIPHER);
#ifdef QCRYPTO_DEBUG
- dev_info(&cp->pdev->dev, "_qcrypto_enc_aes_ctr: %p\n", req);
+ dev_info(&ctx->pengine->pdev->dev, "_qcrypto_enc_aes_ctr: %p\n", req);
#endif
rctx = ablkcipher_request_ctx(req);
rctx->aead = 0;
@@ -1710,7 +1795,7 @@
rctx->mode = QCE_MODE_CTR;
pstat->ablk_cipher_aes_enc++;
- return _qcrypto_queue_req(cp, &req->base);
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
};
static int _qcrypto_enc_aes_xts(struct ablkcipher_request *req)
@@ -1731,7 +1816,7 @@
rctx->mode = QCE_MODE_XTS;
pstat->ablk_cipher_aes_enc++;
- return _qcrypto_queue_req(cp, &req->base);
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
};
static int _qcrypto_aead_encrypt_aes_ccm(struct aead_request *req)
@@ -1757,7 +1842,7 @@
rctx->iv = req->iv;
pstat->aead_ccm_aes_enc++;
- return _qcrypto_queue_req(cp, &req->base);
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
}
static int _qcrypto_enc_des_ecb(struct ablkcipher_request *req)
@@ -1778,7 +1863,7 @@
rctx->mode = QCE_MODE_ECB;
pstat->ablk_cipher_des_enc++;
- return _qcrypto_queue_req(cp, &req->base);
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
};
static int _qcrypto_enc_des_cbc(struct ablkcipher_request *req)
@@ -1799,7 +1884,7 @@
rctx->mode = QCE_MODE_CBC;
pstat->ablk_cipher_des_enc++;
- return _qcrypto_queue_req(cp, &req->base);
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
};
static int _qcrypto_enc_3des_ecb(struct ablkcipher_request *req)
@@ -1820,7 +1905,7 @@
rctx->mode = QCE_MODE_ECB;
pstat->ablk_cipher_3des_enc++;
- return _qcrypto_queue_req(cp, &req->base);
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
};
static int _qcrypto_enc_3des_cbc(struct ablkcipher_request *req)
@@ -1841,7 +1926,7 @@
rctx->mode = QCE_MODE_CBC;
pstat->ablk_cipher_3des_enc++;
- return _qcrypto_queue_req(cp, &req->base);
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
};
static int _qcrypto_dec_aes_ecb(struct ablkcipher_request *req)
@@ -1856,7 +1941,7 @@
BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
CRYPTO_ALG_TYPE_ABLKCIPHER);
#ifdef QCRYPTO_DEBUG
- dev_info(&cp->pdev->dev, "_qcrypto_dec_aes_ecb: %p\n", req);
+ dev_info(&ctx->pengine->pdev->dev, "_qcrypto_dec_aes_ecb: %p\n", req);
#endif
rctx = ablkcipher_request_ctx(req);
rctx->aead = 0;
@@ -1865,7 +1950,7 @@
rctx->mode = QCE_MODE_ECB;
pstat->ablk_cipher_aes_dec++;
- return _qcrypto_queue_req(cp, &req->base);
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
};
static int _qcrypto_dec_aes_cbc(struct ablkcipher_request *req)
@@ -1880,7 +1965,7 @@
BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
CRYPTO_ALG_TYPE_ABLKCIPHER);
#ifdef QCRYPTO_DEBUG
- dev_info(&cp->pdev->dev, "_qcrypto_dec_aes_cbc: %p\n", req);
+ dev_info(&ctx->pengine->pdev->dev, "_qcrypto_dec_aes_cbc: %p\n", req);
#endif
rctx = ablkcipher_request_ctx(req);
@@ -1890,7 +1975,7 @@
rctx->mode = QCE_MODE_CBC;
pstat->ablk_cipher_aes_dec++;
- return _qcrypto_queue_req(cp, &req->base);
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
};
static int _qcrypto_dec_aes_ctr(struct ablkcipher_request *req)
@@ -1905,7 +1990,7 @@
BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
CRYPTO_ALG_TYPE_ABLKCIPHER);
#ifdef QCRYPTO_DEBUG
- dev_info(&cp->pdev->dev, "_qcrypto_dec_aes_ctr: %p\n", req);
+ dev_info(&ctx->pengine->pdev->dev, "_qcrypto_dec_aes_ctr: %p\n", req);
#endif
rctx = ablkcipher_request_ctx(req);
rctx->aead = 0;
@@ -1916,7 +2001,7 @@
rctx->dir = QCE_ENCRYPT;
pstat->ablk_cipher_aes_dec++;
- return _qcrypto_queue_req(cp, &req->base);
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
};
static int _qcrypto_dec_des_ecb(struct ablkcipher_request *req)
@@ -1937,7 +2022,7 @@
rctx->mode = QCE_MODE_ECB;
pstat->ablk_cipher_des_dec++;
- return _qcrypto_queue_req(cp, &req->base);
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
};
static int _qcrypto_dec_des_cbc(struct ablkcipher_request *req)
@@ -1958,7 +2043,7 @@
rctx->mode = QCE_MODE_CBC;
pstat->ablk_cipher_des_dec++;
- return _qcrypto_queue_req(cp, &req->base);
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
};
static int _qcrypto_dec_3des_ecb(struct ablkcipher_request *req)
@@ -1979,7 +2064,7 @@
rctx->mode = QCE_MODE_ECB;
pstat->ablk_cipher_3des_dec++;
- return _qcrypto_queue_req(cp, &req->base);
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
};
static int _qcrypto_dec_3des_cbc(struct ablkcipher_request *req)
@@ -2000,7 +2085,7 @@
rctx->mode = QCE_MODE_CBC;
pstat->ablk_cipher_3des_dec++;
- return _qcrypto_queue_req(cp, &req->base);
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
};
static int _qcrypto_dec_aes_xts(struct ablkcipher_request *req)
@@ -2021,7 +2106,7 @@
rctx->dir = QCE_DECRYPT;
pstat->ablk_cipher_aes_dec++;
- return _qcrypto_queue_req(cp, &req->base);
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
};
@@ -2048,7 +2133,7 @@
rctx->iv = req->iv;
pstat->aead_ccm_aes_dec++;
- return _qcrypto_queue_req(cp, &req->base);
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
}
static int _qcrypto_aead_setauthsize(struct crypto_aead *authenc,
@@ -2156,7 +2241,8 @@
pstat = &_qcrypto_stat;
#ifdef QCRYPTO_DEBUG
- dev_info(&cp->pdev->dev, "_qcrypto_aead_encrypt_aes_cbc: %p\n", req);
+ dev_info(&ctx->pengine->pdev->dev,
+ "_qcrypto_aead_encrypt_aes_cbc: %p\n", req);
#endif
rctx = aead_request_ctx(req);
@@ -2167,7 +2253,7 @@
rctx->iv = req->iv;
pstat->aead_sha1_aes_enc++;
- return _qcrypto_queue_req(cp, &req->base);
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
}
static int _qcrypto_aead_decrypt_aes_cbc(struct aead_request *req)
@@ -2180,7 +2266,8 @@
pstat = &_qcrypto_stat;
#ifdef QCRYPTO_DEBUG
- dev_info(&cp->pdev->dev, "_qcrypto_aead_decrypt_aes_cbc: %p\n", req);
+ dev_info(&ctx->pengine->pdev->dev,
+ "_qcrypto_aead_decrypt_aes_cbc: %p\n", req);
#endif
rctx = aead_request_ctx(req);
rctx->aead = 1;
@@ -2190,7 +2277,7 @@
rctx->iv = req->iv;
pstat->aead_sha1_aes_dec++;
- return _qcrypto_queue_req(cp, &req->base);
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
}
static int _qcrypto_aead_givencrypt_aes_cbc(struct aead_givcrypt_request *req)
@@ -2215,7 +2302,7 @@
/* avoid consecutive packets going out with same IV */
*(__be64 *)req->giv ^= cpu_to_be64(req->seq);
pstat->aead_sha1_aes_enc++;
- return _qcrypto_queue_req(cp, &areq->base);
+ return _qcrypto_queue_req(cp, ctx->pengine, &areq->base);
}
#ifdef QCRYPTO_AEAD_AES_CTR
@@ -2236,7 +2323,7 @@
rctx->iv = req->iv;
pstat->aead_sha1_aes_enc++;
- return _qcrypto_queue_req(cp, &req->base);
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
}
static int _qcrypto_aead_decrypt_aes_ctr(struct aead_request *req)
@@ -2259,7 +2346,7 @@
rctx->iv = req->iv;
pstat->aead_sha1_aes_dec++;
- return _qcrypto_queue_req(cp, &req->base);
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
}
static int _qcrypto_aead_givencrypt_aes_ctr(struct aead_givcrypt_request *req)
@@ -2284,7 +2371,7 @@
/* avoid consecutive packets going out with same IV */
*(__be64 *)req->giv ^= cpu_to_be64(req->seq);
pstat->aead_sha1_aes_enc++;
- return _qcrypto_queue_req(cp, &areq->base);
+ return _qcrypto_queue_req(cp, ctx->pengine, &areq->base);
};
#endif /* QCRYPTO_AEAD_AES_CTR */
@@ -2305,7 +2392,7 @@
rctx->iv = req->iv;
pstat->aead_sha1_des_enc++;
- return _qcrypto_queue_req(cp, &req->base);
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
}
static int _qcrypto_aead_decrypt_des_cbc(struct aead_request *req)
@@ -2325,7 +2412,7 @@
rctx->iv = req->iv;
pstat->aead_sha1_des_dec++;
- return _qcrypto_queue_req(cp, &req->base);
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
}
static int _qcrypto_aead_givencrypt_des_cbc(struct aead_givcrypt_request *req)
@@ -2350,7 +2437,7 @@
/* avoid consecutive packets going out with same IV */
*(__be64 *)req->giv ^= cpu_to_be64(req->seq);
pstat->aead_sha1_des_enc++;
- return _qcrypto_queue_req(cp, &areq->base);
+ return _qcrypto_queue_req(cp, ctx->pengine, &areq->base);
}
static int _qcrypto_aead_encrypt_3des_cbc(struct aead_request *req)
@@ -2370,7 +2457,7 @@
rctx->iv = req->iv;
pstat->aead_sha1_3des_enc++;
- return _qcrypto_queue_req(cp, &req->base);
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
}
static int _qcrypto_aead_decrypt_3des_cbc(struct aead_request *req)
@@ -2390,7 +2477,7 @@
rctx->iv = req->iv;
pstat->aead_sha1_3des_dec++;
- return _qcrypto_queue_req(cp, &req->base);
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
}
static int _qcrypto_aead_givencrypt_3des_cbc(struct aead_givcrypt_request *req)
@@ -2415,18 +2502,21 @@
/* avoid consecutive packets going out with same IV */
*(__be64 *)req->giv ^= cpu_to_be64(req->seq);
pstat->aead_sha1_3des_enc++;
- return _qcrypto_queue_req(cp, &areq->base);
+ return _qcrypto_queue_req(cp, ctx->pengine, &areq->base);
}
-static int _sha_init(struct qcrypto_sha_ctx *ctx)
+static int _sha_init(struct ahash_request *req)
{
- ctx->first_blk = 1;
- ctx->last_blk = 0;
- ctx->byte_count[0] = 0;
- ctx->byte_count[1] = 0;
- ctx->byte_count[2] = 0;
- ctx->byte_count[3] = 0;
- ctx->trailing_buf_len = 0;
+ struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+
+ rctx->first_blk = 1;
+ rctx->last_blk = 0;
+ rctx->byte_count[0] = 0;
+ rctx->byte_count[1] = 0;
+ rctx->byte_count[2] = 0;
+ rctx->byte_count[3] = 0;
+ rctx->trailing_buf_len = 0;
+ rctx->count = 0;
return 0;
};
@@ -2435,18 +2525,17 @@
{
struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
struct crypto_stat *pstat;
+ struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
pstat = &_qcrypto_stat;
- _sha_init(sha_ctx);
+ _sha_init(req);
sha_ctx->alg = QCE_HASH_SHA1;
- memset(&sha_ctx->trailing_buf[0], 0x00, SHA1_BLOCK_SIZE);
- memcpy(&sha_ctx->digest[0], &_std_init_vector_sha1_uint8[0],
+ memset(&rctx->trailing_buf[0], 0x00, SHA1_BLOCK_SIZE);
+ memcpy(&rctx->digest[0], &_std_init_vector_sha1_uint8[0],
SHA1_DIGEST_SIZE);
sha_ctx->diglen = SHA1_DIGEST_SIZE;
- _update_sha1_ctx(req);
-
pstat->sha1_digest++;
return 0;
};
@@ -2455,18 +2544,17 @@
{
struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
struct crypto_stat *pstat;
+ struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
pstat = &_qcrypto_stat;
- _sha_init(sha_ctx);
+ _sha_init(req);
sha_ctx->alg = QCE_HASH_SHA256;
- memset(&sha_ctx->trailing_buf[0], 0x00, SHA256_BLOCK_SIZE);
- memcpy(&sha_ctx->digest[0], &_std_init_vector_sha256_uint8[0],
+ memset(&rctx->trailing_buf[0], 0x00, SHA256_BLOCK_SIZE);
+ memcpy(&rctx->digest[0], &_std_init_vector_sha256_uint8[0],
SHA256_DIGEST_SIZE);
sha_ctx->diglen = SHA256_DIGEST_SIZE;
- _update_sha256_ctx(req);
-
pstat->sha256_digest++;
return 0;
};
@@ -2475,82 +2563,126 @@
static int _sha1_export(struct ahash_request *req, void *out)
{
struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
- struct sha1_state *sha_state_ctx = &rctx->sha1_state_ctx;
struct sha1_state *out_ctx = (struct sha1_state *)out;
- out_ctx->count = sha_state_ctx->count;
- memcpy(out_ctx->state, sha_state_ctx->state, sizeof(out_ctx->state));
- memcpy(out_ctx->buffer, sha_state_ctx->buffer, SHA1_BLOCK_SIZE);
+ out_ctx->count = rctx->count;
+ _byte_stream_to_words(out_ctx->state, rctx->digest, SHA1_DIGEST_SIZE);
+ memcpy(out_ctx->buffer, rctx->trailing_buf, SHA1_BLOCK_SIZE);
return 0;
};
+static int _sha1_hmac_export(struct ahash_request *req, void *out)
+{
+ return _sha1_export(req, out);
+}
+
+/* crypto hw padding constant for hmac first operation */
+#define HMAC_PADDING 64
+
+static int __sha1_import_common(struct ahash_request *req, const void *in,
+ bool hmac)
+{
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct sha1_state *in_ctx = (struct sha1_state *)in;
+ u64 hw_count = in_ctx->count;
+
+ rctx->count = in_ctx->count;
+ memcpy(rctx->trailing_buf, in_ctx->buffer, SHA1_BLOCK_SIZE);
+ if (in_ctx->count <= SHA1_BLOCK_SIZE) {
+ rctx->first_blk = 1;
+ } else {
+ rctx->first_blk = 0;
+ /*
+ * For hmac, there is a hardware padding done
+ * when first is set. So the byte_count will be
+ * incremened by 64 after the operstion of first
+ */
+ if (hmac)
+ hw_count += HMAC_PADDING;
+ }
+ rctx->byte_count[0] = (uint32_t)(hw_count & 0xFFFFFFC0);
+ rctx->byte_count[1] = (uint32_t)(hw_count >> 32);
+ _words_to_byte_stream(in_ctx->state, rctx->digest, sha_ctx->diglen);
+
+ rctx->trailing_buf_len = (uint32_t)(in_ctx->count &
+ (SHA1_BLOCK_SIZE-1));
+ return 0;
+}
+
static int _sha1_import(struct ahash_request *req, const void *in)
{
- struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
- struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
- struct sha1_state *sha_state_ctx = &rctx->sha1_state_ctx;
- struct sha1_state *in_ctx = (struct sha1_state *)in;
-
- sha_state_ctx->count = in_ctx->count;
- memcpy(sha_state_ctx->state, in_ctx->state, sizeof(in_ctx->state));
- memcpy(sha_state_ctx->buffer, in_ctx->buffer, SHA1_BLOCK_SIZE);
- memcpy(sha_ctx->trailing_buf, in_ctx->buffer, SHA1_BLOCK_SIZE);
-
- sha_ctx->byte_count[0] = (uint32_t)(in_ctx->count & 0xFFFFFFC0);
- sha_ctx->byte_count[1] = (uint32_t)(in_ctx->count >> 32);
- _words_to_byte_stream(in_ctx->state, sha_ctx->digest, sha_ctx->diglen);
-
- sha_ctx->trailing_buf_len = (uint32_t)(in_ctx->count &
- (SHA1_BLOCK_SIZE-1));
-
- if (!(in_ctx->count))
- sha_ctx->first_blk = 1;
- else
- sha_ctx->first_blk = 0;
-
- return 0;
+ return __sha1_import_common(req, in, false);
}
+
+static int _sha1_hmac_import(struct ahash_request *req, const void *in)
+{
+ return __sha1_import_common(req, in, true);
+}
+
static int _sha256_export(struct ahash_request *req, void *out)
{
struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
- struct sha256_state *sha_state_ctx = &rctx->sha256_state_ctx;
struct sha256_state *out_ctx = (struct sha256_state *)out;
- out_ctx->count = sha_state_ctx->count;
- memcpy(out_ctx->state, sha_state_ctx->state, sizeof(out_ctx->state));
- memcpy(out_ctx->buf, sha_state_ctx->buf, SHA256_BLOCK_SIZE);
+ out_ctx->count = rctx->count;
+ _byte_stream_to_words(out_ctx->state, rctx->digest, SHA256_DIGEST_SIZE);
+ memcpy(out_ctx->buf, rctx->trailing_buf, SHA256_BLOCK_SIZE);
return 0;
};
-static int _sha256_import(struct ahash_request *req, const void *in)
+static int _sha256_hmac_export(struct ahash_request *req, void *out)
+{
+ return _sha256_export(req, out);
+}
+
+static int __sha256_import_common(struct ahash_request *req, const void *in,
+ bool hmac)
{
struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
- struct sha256_state *sha_state_ctx = &rctx->sha256_state_ctx;
struct sha256_state *in_ctx = (struct sha256_state *)in;
+ u64 hw_count = in_ctx->count;
- sha_state_ctx->count = in_ctx->count;
- memcpy(sha_state_ctx->state, in_ctx->state, sizeof(in_ctx->state));
- memcpy(sha_state_ctx->buf, in_ctx->buf, SHA256_BLOCK_SIZE);
- memcpy(sha_ctx->trailing_buf, in_ctx->buf, SHA256_BLOCK_SIZE);
+ rctx->count = in_ctx->count;
+ memcpy(rctx->trailing_buf, in_ctx->buf, SHA256_BLOCK_SIZE);
- sha_ctx->byte_count[0] = (uint32_t)(in_ctx->count & 0xFFFFFFC0);
- sha_ctx->byte_count[1] = (uint32_t)(in_ctx->count >> 32);
- _words_to_byte_stream(in_ctx->state, sha_ctx->digest, sha_ctx->diglen);
+ if (in_ctx->count <= SHA256_BLOCK_SIZE) {
+ rctx->first_blk = 1;
+ } else {
+ rctx->first_blk = 0;
+ /*
+ * for hmac, there is a hardware padding done
+ * when first is set. So the byte_count will be
+ * incremened by 64 after the operstion of first
+ */
+ if (hmac)
+ hw_count += HMAC_PADDING;
+ }
- sha_ctx->trailing_buf_len = (uint32_t)(in_ctx->count &
+ rctx->byte_count[0] = (uint32_t)(hw_count & 0xFFFFFFC0);
+ rctx->byte_count[1] = (uint32_t)(hw_count >> 32);
+ _words_to_byte_stream(in_ctx->state, rctx->digest, sha_ctx->diglen);
+
+ rctx->trailing_buf_len = (uint32_t)(in_ctx->count &
(SHA256_BLOCK_SIZE-1));
- if (!(in_ctx->count))
- sha_ctx->first_blk = 1;
- else
- sha_ctx->first_blk = 0;
return 0;
}
+static int _sha256_import(struct ahash_request *req, const void *in)
+{
+ return __sha256_import_common(req, in, false);
+}
+
+static int _sha256_hmac_import(struct ahash_request *req, const void *in)
+{
+ return __sha256_import_common(req, in, true);
+}
+
static int _copy_source(struct ahash_request *req)
{
struct qcrypto_sha_req_ctx *srctx = NULL;
@@ -2593,23 +2725,19 @@
uint32_t nbytes;
uint32_t offset = 0;
uint32_t bytes = 0;
-
+ uint8_t *staging;
int ret = 0;
/* check for trailing buffer from previous updates and append it */
- total = req->nbytes + sha_ctx->trailing_buf_len;
+ total = req->nbytes + rctx->trailing_buf_len;
len = req->nbytes;
if (total <= sha_block_size) {
- k_src = &sha_ctx->trailing_buf[sha_ctx->trailing_buf_len];
+ k_src = &rctx->trailing_buf[rctx->trailing_buf_len];
num_sg = qcrypto_count_sg(req->src, len);
bytes = qcrypto_sg_copy_to_buffer(req->src, num_sg, k_src, len);
- sha_ctx->trailing_buf_len = total;
- if (sha_ctx->alg == QCE_HASH_SHA1)
- _update_sha1_ctx(req);
- if (sha_ctx->alg == QCE_HASH_SHA256)
- _update_sha256_ctx(req);
+ rctx->trailing_buf_len = total;
return 0;
}
@@ -2617,9 +2745,10 @@
rctx->src = req->src;
rctx->nbytes = req->nbytes;
- memcpy(sha_ctx->tmp_tbuf, sha_ctx->trailing_buf,
- sha_ctx->trailing_buf_len);
- k_src = &sha_ctx->trailing_buf[0];
+ staging = (uint8_t *) ALIGN(((unsigned int)rctx->staging_dmabuf),
+ L1_CACHE_BYTES);
+ memcpy(staging, rctx->trailing_buf, rctx->trailing_buf_len);
+ k_src = &rctx->trailing_buf[0];
/* get new trailing buffer */
sha_pad_len = ALIGN(total, sha_block_size) - total;
trailing_buf_len = sha_block_size - sha_pad_len;
@@ -2632,7 +2761,7 @@
nbytes = total - trailing_buf_len;
num_sg = qcrypto_count_sg(req->src, req->nbytes);
- len = sha_ctx->trailing_buf_len;
+ len = rctx->trailing_buf_len;
sg_last = req->src;
while (len < nbytes) {
@@ -2641,56 +2770,41 @@
len += sg_last->length;
sg_last = scatterwalk_sg_next(sg_last);
}
- if (sha_ctx->trailing_buf_len) {
+ if (rctx->trailing_buf_len) {
if (cp->ce_support.aligned_only) {
- sha_ctx->sg = kzalloc(sizeof(struct scatterlist),
- GFP_ATOMIC);
- if (sha_ctx->sg == NULL) {
- pr_err("MemAlloc fail sha_ctx->sg, error %ld\n",
- PTR_ERR(sha_ctx->sg));
- return -ENOMEM;
- }
rctx->data2 = kzalloc((req->nbytes + 64), GFP_ATOMIC);
if (rctx->data2 == NULL) {
pr_err("Mem Alloc fail srctx->data2, err %ld\n",
PTR_ERR(rctx->data2));
- kfree(sha_ctx->sg);
return -ENOMEM;
}
- memcpy(rctx->data2, sha_ctx->tmp_tbuf,
- sha_ctx->trailing_buf_len);
- memcpy((rctx->data2 + sha_ctx->trailing_buf_len),
+ memcpy(rctx->data2, staging,
+ rctx->trailing_buf_len);
+ memcpy((rctx->data2 + rctx->trailing_buf_len),
rctx->data, req->src->length);
kfree(rctx->data);
rctx->data = rctx->data2;
- sg_set_buf(&sha_ctx->sg[0], rctx->data,
- (sha_ctx->trailing_buf_len +
+ sg_set_buf(&rctx->sg[0], rctx->data,
+ (rctx->trailing_buf_len +
req->src->length));
- req->src = sha_ctx->sg;
- sg_mark_end(&sha_ctx->sg[0]);
+ req->src = rctx->sg;
+ sg_mark_end(&rctx->sg[0]);
} else {
sg_mark_end(sg_last);
- sha_ctx->sg = kzalloc(2 * (sizeof(struct scatterlist)),
- GFP_ATOMIC);
- if (sha_ctx->sg == NULL) {
- pr_err("MEMalloc fail sha_ctx->sg, error %ld\n",
- PTR_ERR(sha_ctx->sg));
- return -ENOMEM;
- }
-
- sg_set_buf(&sha_ctx->sg[0], sha_ctx->tmp_tbuf,
- sha_ctx->trailing_buf_len);
- sg_mark_end(&sha_ctx->sg[1]);
- sg_chain(sha_ctx->sg, 2, req->src);
- req->src = sha_ctx->sg;
+ memset(rctx->sg, 0, sizeof(rctx->sg));
+ sg_set_buf(&rctx->sg[0], staging,
+ rctx->trailing_buf_len);
+ sg_mark_end(&rctx->sg[1]);
+ sg_chain(rctx->sg, 2, req->src);
+ req->src = rctx->sg;
}
} else
sg_mark_end(sg_last);
req->nbytes = nbytes;
- sha_ctx->trailing_buf_len = trailing_buf_len;
+ rctx->trailing_buf_len = trailing_buf_len;
- ret = _qcrypto_queue_req(cp, &req->base);
+ ret = _qcrypto_queue_req(cp, sha_ctx->pengine, &req->base);
return ret;
};
@@ -2698,7 +2812,6 @@
static int _sha1_update(struct ahash_request *req)
{
struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
- struct sha1_state *sha_state_ctx = &rctx->sha1_state_ctx;
struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
struct crypto_priv *cp = sha_ctx->cp;
@@ -2706,14 +2819,13 @@
if (_copy_source(req))
return -ENOMEM;
}
- sha_state_ctx->count += req->nbytes;
+ rctx->count += req->nbytes;
return _sha_update(req, SHA1_BLOCK_SIZE);
}
static int _sha256_update(struct ahash_request *req)
{
struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
- struct sha256_state *sha_state_ctx = &rctx->sha256_state_ctx;
struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
struct crypto_priv *cp = sha_ctx->cp;
@@ -2722,7 +2834,7 @@
return -ENOMEM;
}
- sha_state_ctx->count += req->nbytes;
+ rctx->count += req->nbytes;
return _sha_update(req, SHA256_BLOCK_SIZE);
}
@@ -2732,26 +2844,29 @@
struct crypto_priv *cp = sha_ctx->cp;
struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
int ret = 0;
+ uint8_t *staging;
if (cp->ce_support.aligned_only) {
if (_copy_source(req))
return -ENOMEM;
}
- sha_ctx->last_blk = 1;
+ rctx->last_blk = 1;
/* save the original req structure fields*/
rctx->src = req->src;
rctx->nbytes = req->nbytes;
- sg_set_buf(&sha_ctx->tmp_sg, sha_ctx->trailing_buf,
- sha_ctx->trailing_buf_len);
- sg_mark_end(&sha_ctx->tmp_sg);
+ staging = (uint8_t *) ALIGN(((unsigned int)rctx->staging_dmabuf),
+ L1_CACHE_BYTES);
+ memcpy(staging, rctx->trailing_buf, rctx->trailing_buf_len);
+ sg_set_buf(&rctx->sg[0], staging, rctx->trailing_buf_len);
+ sg_mark_end(&rctx->sg[0]);
- req->src = &sha_ctx->tmp_sg;
- req->nbytes = sha_ctx->trailing_buf_len;
+ req->src = &rctx->sg[0];
+ req->nbytes = rctx->trailing_buf_len;
- ret = _qcrypto_queue_req(cp, &req->base);
+ ret = _qcrypto_queue_req(cp, sha_ctx->pengine, &req->base);
return ret;
};
@@ -2781,9 +2896,9 @@
/* save the original req structure fields*/
rctx->src = req->src;
rctx->nbytes = req->nbytes;
- sha_ctx->first_blk = 1;
- sha_ctx->last_blk = 1;
- ret = _qcrypto_queue_req(cp, &req->base);
+ rctx->first_blk = 1;
+ rctx->last_blk = 1;
+ ret = _qcrypto_queue_req(cp, sha_ctx->pengine, &req->base);
return ret;
}
@@ -2814,32 +2929,49 @@
unsigned int len)
{
struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base);
+ uint8_t *in_buf;
int ret = 0;
+ struct scatterlist sg;
+ struct ahash_request *ahash_req;
+ struct completion ahash_req_complete;
- sha_ctx->in_buf = kzalloc(len + 64, GFP_KERNEL);
- if (sha_ctx->in_buf == NULL) {
- pr_err("qcrypto Can't Allocate mem: sha_ctx->in_buf, error %ld\n",
- PTR_ERR(sha_ctx->in_buf));
+ ahash_req = ahash_request_alloc(tfm, GFP_KERNEL);
+ if (ahash_req == NULL)
+ return -ENOMEM;
+ init_completion(&ahash_req_complete);
+ ahash_request_set_callback(ahash_req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+ _crypto_sha_hmac_ahash_req_complete,
+ &ahash_req_complete);
+ crypto_ahash_clear_flags(tfm, ~0);
+
+ in_buf = kzalloc(len + 64, GFP_KERNEL);
+ if (in_buf == NULL) {
+ pr_err("qcrypto Can't Allocate mem: in_buf, error %ld\n",
+ PTR_ERR(in_buf));
+ ahash_request_free(ahash_req);
return -ENOMEM;
}
- memcpy(sha_ctx->in_buf, key, len);
- sg_set_buf(&sha_ctx->tmp_sg, sha_ctx->in_buf, len);
- sg_mark_end(&sha_ctx->tmp_sg);
+ memcpy(in_buf, key, len);
+ sg_set_buf(&sg, in_buf, len);
+ sg_mark_end(&sg);
- ahash_request_set_crypt(sha_ctx->ahash_req, &sha_ctx->tmp_sg,
+ ahash_request_set_crypt(ahash_req, &sg,
&sha_ctx->authkey[0], len);
- ret = _sha_digest(sha_ctx->ahash_req);
+ if (sha_ctx->alg == QCE_HASH_SHA1)
+ ret = _sha1_digest(ahash_req);
+ else
+ ret = _sha256_digest(ahash_req);
if (ret == -EINPROGRESS || ret == -EBUSY) {
ret =
wait_for_completion_interruptible(
- &sha_ctx->ahash_req_complete);
+ &ahash_req_complete);
INIT_COMPLETION(sha_ctx->ahash_req_complete);
}
- sha_ctx->authkey_in_len = len;
- kfree(sha_ctx->in_buf);
- sha_ctx->in_buf = NULL;
+ kfree(in_buf);
+ ahash_request_free(ahash_req);
return ret;
}
@@ -2848,16 +2980,15 @@
unsigned int len)
{
struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base);
-
- if (len <= SHA1_BLOCK_SIZE)
+ memset(&sha_ctx->authkey[0], 0, SHA1_BLOCK_SIZE);
+ if (len <= SHA1_BLOCK_SIZE) {
memcpy(&sha_ctx->authkey[0], key, len);
- else {
- _sha_init(sha_ctx);
+ sha_ctx->authkey_in_len = len;
+ } else {
sha_ctx->alg = QCE_HASH_SHA1;
- memcpy(&sha_ctx->digest[0], &_std_init_vector_sha1_uint8[0],
- SHA1_DIGEST_SIZE);
sha_ctx->diglen = SHA1_DIGEST_SIZE;
_sha_hmac_setkey(tfm, key, len);
+ sha_ctx->authkey_in_len = SHA1_BLOCK_SIZE;
}
return 0;
}
@@ -2867,15 +2998,15 @@
{
struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base);
- if (len <= SHA256_BLOCK_SIZE)
+ memset(&sha_ctx->authkey[0], 0, SHA256_BLOCK_SIZE);
+ if (len <= SHA256_BLOCK_SIZE) {
memcpy(&sha_ctx->authkey[0], key, len);
- else {
- _sha_init(sha_ctx);
+ sha_ctx->authkey_in_len = len;
+ } else {
sha_ctx->alg = QCE_HASH_SHA256;
- memcpy(&sha_ctx->digest[0], &_std_init_vector_sha256_uint8[0],
- SHA256_DIGEST_SIZE);
sha_ctx->diglen = SHA256_DIGEST_SIZE;
_sha_hmac_setkey(tfm, key, len);
+ sha_ctx->authkey_in_len = SHA256_BLOCK_SIZE;
}
return 0;
@@ -2885,11 +3016,12 @@
uint32_t sha_block_size)
{
struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
int i;
for (i = 0; i < sha_block_size; i++)
- sha_ctx->trailing_buf[i] = sha_ctx->authkey[i] ^ 0x36;
- sha_ctx->trailing_buf_len = sha_block_size;
+ rctx->trailing_buf[i] = sha_ctx->authkey[i] ^ 0x36;
+ rctx->trailing_buf_len = sha_block_size;
return 0;
}
@@ -2900,16 +3032,16 @@
struct crypto_priv *cp = sha_ctx->cp;
struct crypto_stat *pstat;
int ret = 0;
+ struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
pstat = &_qcrypto_stat;
pstat->sha1_hmac_digest++;
- _sha_init(sha_ctx);
- memset(&sha_ctx->trailing_buf[0], 0x00, SHA1_BLOCK_SIZE);
- memcpy(&sha_ctx->digest[0], &_std_init_vector_sha1_uint8[0],
+ _sha_init(req);
+ memset(&rctx->trailing_buf[0], 0x00, SHA1_BLOCK_SIZE);
+ memcpy(&rctx->digest[0], &_std_init_vector_sha1_uint8[0],
SHA1_DIGEST_SIZE);
sha_ctx->diglen = SHA1_DIGEST_SIZE;
- _update_sha1_ctx(req);
if (cp->ce_support.sha_hmac)
sha_ctx->alg = QCE_HASH_SHA1_HMAC;
@@ -2927,16 +3059,17 @@
struct crypto_priv *cp = sha_ctx->cp;
struct crypto_stat *pstat;
int ret = 0;
+ struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
pstat = &_qcrypto_stat;
pstat->sha256_hmac_digest++;
- _sha_init(sha_ctx);
- memset(&sha_ctx->trailing_buf[0], 0x00, SHA256_BLOCK_SIZE);
- memcpy(&sha_ctx->digest[0], &_std_init_vector_sha256_uint8[0],
+ _sha_init(req);
+
+ memset(&rctx->trailing_buf[0], 0x00, SHA256_BLOCK_SIZE);
+ memcpy(&rctx->digest[0], &_std_init_vector_sha256_uint8[0],
SHA256_DIGEST_SIZE);
sha_ctx->diglen = SHA256_DIGEST_SIZE;
- _update_sha256_ctx(req);
if (cp->ce_support.sha_hmac)
sha_ctx->alg = QCE_HASH_SHA256_HMAC;
@@ -2965,36 +3098,39 @@
struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
struct crypto_priv *cp = sha_ctx->cp;
int i;
+ uint8_t *staging;
+ uint8_t *p;
+ staging = (uint8_t *) ALIGN(((unsigned int)rctx->staging_dmabuf),
+ L1_CACHE_BYTES);
+ p = staging;
for (i = 0; i < sha_block_size; i++)
- sha_ctx->tmp_tbuf[i] = sha_ctx->authkey[i] ^ 0x5c;
+ *p++ = sha_ctx->authkey[i] ^ 0x5c;
+ memcpy(p, &rctx->digest[0], sha_digest_size);
+ sg_set_buf(&rctx->sg[0], staging, sha_block_size +
+ sha_digest_size);
+ sg_mark_end(&rctx->sg[0]);
/* save the original req structure fields*/
rctx->src = req->src;
rctx->nbytes = req->nbytes;
- memcpy(&sha_ctx->tmp_tbuf[sha_block_size], &sha_ctx->digest[0],
- sha_digest_size);
-
- sg_set_buf(&sha_ctx->tmp_sg, sha_ctx->tmp_tbuf, sha_block_size +
- sha_digest_size);
- sg_mark_end(&sha_ctx->tmp_sg);
- req->src = &sha_ctx->tmp_sg;
+ req->src = &rctx->sg[0];
req->nbytes = sha_block_size + sha_digest_size;
- _sha_init(sha_ctx);
+ _sha_init(req);
if (sha_ctx->alg == QCE_HASH_SHA1) {
- memcpy(&sha_ctx->digest[0], &_std_init_vector_sha1_uint8[0],
+ memcpy(&rctx->digest[0], &_std_init_vector_sha1_uint8[0],
SHA1_DIGEST_SIZE);
sha_ctx->diglen = SHA1_DIGEST_SIZE;
} else {
- memcpy(&sha_ctx->digest[0], &_std_init_vector_sha256_uint8[0],
+ memcpy(&rctx->digest[0], &_std_init_vector_sha256_uint8[0],
SHA256_DIGEST_SIZE);
sha_ctx->diglen = SHA256_DIGEST_SIZE;
}
- sha_ctx->last_blk = 1;
- return _qcrypto_queue_req(cp, &req->base);
+ rctx->last_blk = 1;
+ return _qcrypto_queue_req(cp, sha_ctx->pengine, &req->base);
}
static int _sha_hmac_inner_hash(struct ahash_request *req,
@@ -3004,17 +3140,19 @@
struct ahash_request *areq = sha_ctx->ahash_req;
struct crypto_priv *cp = sha_ctx->cp;
int ret = 0;
+ struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+ uint8_t *staging;
- sha_ctx->last_blk = 1;
+ staging = (uint8_t *) ALIGN(((unsigned int)rctx->staging_dmabuf),
+ L1_CACHE_BYTES);
+ memcpy(staging, rctx->trailing_buf, rctx->trailing_buf_len);
+ sg_set_buf(&rctx->sg[0], staging, rctx->trailing_buf_len);
+ sg_mark_end(&rctx->sg[0]);
- sg_set_buf(&sha_ctx->tmp_sg, sha_ctx->trailing_buf,
- sha_ctx->trailing_buf_len);
- sg_mark_end(&sha_ctx->tmp_sg);
-
- ahash_request_set_crypt(areq, &sha_ctx->tmp_sg, &sha_ctx->digest[0],
- sha_ctx->trailing_buf_len);
- sha_ctx->last_blk = 1;
- ret = _qcrypto_queue_req(cp, &areq->base);
+ ahash_request_set_crypt(areq, &rctx->sg[0], &rctx->digest[0],
+ rctx->trailing_buf_len);
+ rctx->last_blk = 1;
+ ret = _qcrypto_queue_req(cp, sha_ctx->pengine, &areq->base);
if (ret == -EINPROGRESS || ret == -EBUSY) {
ret =
@@ -3067,12 +3205,13 @@
{
struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
struct crypto_stat *pstat;
+ struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
pstat = &_qcrypto_stat;
pstat->sha1_hmac_digest++;
- _sha_init(sha_ctx);
- memcpy(&sha_ctx->digest[0], &_std_init_vector_sha1_uint8[0],
+ _sha_init(req);
+ memcpy(&rctx->digest[0], &_std_init_vector_sha1_uint8[0],
SHA1_DIGEST_SIZE);
sha_ctx->diglen = SHA1_DIGEST_SIZE;
sha_ctx->alg = QCE_HASH_SHA1_HMAC;
@@ -3084,12 +3223,13 @@
{
struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
struct crypto_stat *pstat;
+ struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
pstat = &_qcrypto_stat;
pstat->sha256_hmac_digest++;
- _sha_init(sha_ctx);
- memcpy(&sha_ctx->digest[0], &_std_init_vector_sha256_uint8[0],
+ _sha_init(req);
+ memcpy(&rctx->digest[0], &_std_init_vector_sha256_uint8[0],
SHA256_DIGEST_SIZE);
sha_ctx->diglen = SHA256_DIGEST_SIZE;
sha_ctx->alg = QCE_HASH_SHA256_HMAC;
@@ -3097,6 +3237,16 @@
return _sha_digest(req);
}
+static int _qcrypto_prefix_alg_cra_name(char cra_name[], unsigned int size)
+{
+ char new_cra_name[CRYPTO_MAX_ALG_NAME] = "qcom-";
+ if (CRYPTO_MAX_ALG_NAME < size + 5)
+ return -EINVAL;
+ strlcat(new_cra_name, cra_name, CRYPTO_MAX_ALG_NAME);
+ strlcpy(cra_name, new_cra_name, CRYPTO_MAX_ALG_NAME);
+ return 0;
+}
+
int qcrypto_cipher_set_flag(struct ablkcipher_request *req, unsigned int flags)
{
struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
@@ -3252,8 +3402,8 @@
.init = _sha1_hmac_init,
.update = _sha1_hmac_update,
.final = _sha1_hmac_final,
- .export = _sha1_export,
- .import = _sha1_import,
+ .export = _sha1_hmac_export,
+ .import = _sha1_hmac_import,
.digest = _sha1_hmac_digest,
.setkey = _sha1_hmac_setkey,
.halg = {
@@ -3280,8 +3430,8 @@
.init = _sha256_hmac_init,
.update = _sha256_hmac_update,
.final = _sha256_hmac_final,
- .export = _sha256_export,
- .import = _sha256_import,
+ .export = _sha256_hmac_export,
+ .import = _sha256_hmac_import,
.digest = _sha256_hmac_digest,
.setkey = _sha256_hmac_setkey,
.halg = {
@@ -3627,38 +3777,59 @@
{
int rc = 0;
void *handle;
- struct crypto_priv *cp;
+ struct crypto_priv *cp = &qcrypto_dev;
int i;
struct msm_ce_hw_support *platform_support;
+ struct crypto_engine *pengine;
+ unsigned long flags;
- cp = kzalloc(sizeof(*cp), GFP_KERNEL);
- if (!cp) {
+ pengine = kzalloc(sizeof(*pengine), GFP_KERNEL);
+ if (!pengine) {
pr_err("qcrypto Memory allocation of q_alg FAIL, error %ld\n",
- PTR_ERR(cp));
+ PTR_ERR(pengine));
return -ENOMEM;
}
/* open qce */
handle = qce_open(pdev, &rc);
if (handle == NULL) {
- kfree(cp);
+ kfree(pengine);
platform_set_drvdata(pdev, NULL);
return rc;
}
- INIT_LIST_HEAD(&cp->alg_list);
- platform_set_drvdata(pdev, cp);
- spin_lock_init(&cp->lock);
- tasklet_init(&cp->done_tasklet, req_done, (unsigned long)cp);
- crypto_init_queue(&cp->queue, 50);
- cp->qce = handle;
- cp->pdev = pdev;
- qce_hw_support(cp->qce, &cp->ce_support);
+ platform_set_drvdata(pdev, pengine);
+ pengine->qce = handle;
+ pengine->pcp = cp;
+ pengine->pdev = pdev;
+ pengine->req = NULL;
+
+ pengine->high_bw_req_count = 0;
+ pengine->high_bw_req = false;
+ init_timer(&(pengine->bw_scale_down_timer));
+ INIT_WORK(&pengine->low_bw_req_ws, qcrypto_low_bw_req_work);
+ pengine->bw_scale_down_timer.function =
+ qcrypto_bw_scale_down_timer_callback;
+
+ device_init_wakeup(&pengine->pdev->dev, true);
+
+ tasklet_init(&pengine->done_tasklet, req_done, (unsigned long)pengine);
+ crypto_init_queue(&pengine->req_queue, 50);
+
+ mutex_lock(&cp->engine_lock);
+ cp->total_units++;
+ pengine->unit = cp->total_units;
+
+ spin_lock_irqsave(&cp->lock, flags);
+ list_add_tail(&pengine->elist, &cp->engine_list);
+ cp->next_engine = pengine;
+ spin_unlock_irqrestore(&cp->lock, flags);
+
+ qce_hw_support(pengine->qce, &cp->ce_support);
if (cp->ce_support.bam) {
cp->platform_support.ce_shared = cp->ce_support.is_shared;
cp->platform_support.shared_ce_resource = 0;
cp->platform_support.hw_key_support = cp->ce_support.hw_key;
- cp->platform_support.bus_scale_table = NULL;
cp->platform_support.sha_hmac = 1;
cp->platform_support.bus_scale_table =
@@ -3678,26 +3849,25 @@
platform_support->bus_scale_table;
cp->platform_support.sha_hmac = platform_support->sha_hmac;
}
- cp->high_bw_req_count = 0;
- cp->ce_lock_count = 0;
-
-
- if (cp->platform_support.ce_shared)
- INIT_WORK(&cp->unlock_ce_ws, qcrypto_unlock_ce);
-
+ pengine->bus_scale_handle = 0;
if (cp->platform_support.bus_scale_table != NULL) {
- cp->bus_scale_handle =
+ pengine->bus_scale_handle =
msm_bus_scale_register_client(
(struct msm_bus_scale_pdata *)
cp->platform_support.bus_scale_table);
- if (!cp->bus_scale_handle) {
- printk(KERN_ERR "%s not able to get bus scale\n",
+ if (!pengine->bus_scale_handle) {
+ pr_err("%s not able to get bus scale\n",
__func__);
rc = -ENOMEM;
goto err;
}
}
+ if (cp->total_units != 1) {
+ mutex_unlock(&cp->engine_lock);
+ return 0;
+ }
+
/* register crypto cipher algorithms the device supports */
for (i = 0; i < ARRAY_SIZE(_qcrypto_ablk_cipher_algos); i++) {
struct qcrypto_alg *q_alg;
@@ -3708,6 +3878,17 @@
rc = PTR_ERR(q_alg);
goto err;
}
+ if (cp->ce_support.use_sw_aes_cbc_ecb_ctr_algo) {
+ rc = _qcrypto_prefix_alg_cra_name(
+ q_alg->cipher_alg.cra_name,
+ strlen(q_alg->cipher_alg.cra_name));
+ if (rc) {
+ dev_err(&pdev->dev,
+ "The algorithm name %s is too long.\n",
+ q_alg->cipher_alg.cra_name);
+ goto err;
+ }
+ }
rc = crypto_register_alg(&q_alg->cipher_alg);
if (rc) {
dev_err(&pdev->dev, "%s alg registration failed\n",
@@ -3730,6 +3911,17 @@
rc = PTR_ERR(q_alg);
goto err;
}
+ if (cp->ce_support.use_sw_aes_xts_algo) {
+ rc = _qcrypto_prefix_alg_cra_name(
+ q_alg->cipher_alg.cra_name,
+ strlen(q_alg->cipher_alg.cra_name));
+ if (rc) {
+ dev_err(&pdev->dev,
+ "The algorithm name %s is too long.\n",
+ q_alg->cipher_alg.cra_name);
+ goto err;
+ }
+ }
rc = crypto_register_alg(&q_alg->cipher_alg);
if (rc) {
dev_err(&pdev->dev, "%s alg registration failed\n",
@@ -3755,7 +3947,17 @@
rc = PTR_ERR(q_alg);
goto err;
}
-
+ if (cp->ce_support.use_sw_ahash_algo) {
+ rc = _qcrypto_prefix_alg_cra_name(
+ q_alg->sha_alg.halg.base.cra_name,
+ strlen(q_alg->sha_alg.halg.base.cra_name));
+ if (rc) {
+ dev_err(&pdev->dev,
+ "The algorithm name %s is too long.\n",
+ q_alg->sha_alg.halg.base.cra_name);
+ goto err;
+ }
+ }
rc = crypto_register_ahash(&q_alg->sha_alg);
if (rc) {
dev_err(&pdev->dev, "%s alg registration failed\n",
@@ -3781,7 +3983,17 @@
rc = PTR_ERR(q_alg);
goto err;
}
-
+ if (cp->ce_support.use_sw_aead_algo) {
+ rc = _qcrypto_prefix_alg_cra_name(
+ q_alg->cipher_alg.cra_name,
+ strlen(q_alg->cipher_alg.cra_name));
+ if (rc) {
+ dev_err(&pdev->dev,
+ "The algorithm name %s is too long.\n",
+ q_alg->cipher_alg.cra_name);
+ goto err;
+ }
+ }
rc = crypto_register_alg(&q_alg->cipher_alg);
if (rc) {
dev_err(&pdev->dev,
@@ -3808,7 +4020,18 @@
rc = PTR_ERR(q_alg);
goto err;
}
-
+ if (cp->ce_support.use_sw_hmac_algo) {
+ rc = _qcrypto_prefix_alg_cra_name(
+ q_alg->sha_alg.halg.base.cra_name,
+ strlen(
+ q_alg->sha_alg.halg.base.cra_name));
+ if (rc) {
+ dev_err(&pdev->dev,
+ "The algorithm name %s is too long.\n",
+ q_alg->sha_alg.halg.base.cra_name);
+ goto err;
+ }
+ }
rc = crypto_register_ahash(&q_alg->sha_alg);
if (rc) {
dev_err(&pdev->dev,
@@ -3834,6 +4057,17 @@
rc = PTR_ERR(q_alg);
goto err;
}
+ if (cp->ce_support.use_sw_aes_ccm_algo) {
+ rc = _qcrypto_prefix_alg_cra_name(
+ q_alg->cipher_alg.cra_name,
+ strlen(q_alg->cipher_alg.cra_name));
+ if (rc) {
+ dev_err(&pdev->dev,
+ "The algorithm name %s is too long.\n",
+ q_alg->cipher_alg.cra_name);
+ goto err;
+ }
+ }
rc = crypto_register_alg(&q_alg->cipher_alg);
if (rc) {
dev_err(&pdev->dev, "%s alg registration failed\n",
@@ -3846,9 +4080,14 @@
}
}
+ mutex_unlock(&cp->engine_lock);
return 0;
err:
- _qcrypto_remove(pdev);
+ _qcrypto_remove_engine(pengine);
+ mutex_unlock(&cp->engine_lock);
+ if (pengine->qce)
+ qce_close(pengine->qce);
+ kfree(pengine);
return rc;
};
@@ -3895,10 +4134,19 @@
static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
+ unsigned long flags;
+ struct crypto_priv *cp = &qcrypto_dev;
+ struct crypto_engine *pe;
memset((char *)&_qcrypto_stat, 0, sizeof(struct crypto_stat));
+ spin_lock_irqsave(&cp->lock, flags);
+ list_for_each_entry(pe, &cp->engine_list, elist) {
+ pe->total_req = 0;
+ pe->err_req = 0;
+ }
+ spin_unlock_irqrestore(&cp->lock, flags);
return count;
-};
+}
static const struct file_operations _debug_stats_ops = {
.open = _debug_stats_open,
@@ -3938,11 +4186,20 @@
static int __init _qcrypto_init(void)
{
int rc;
+ struct crypto_priv *pcp = &qcrypto_dev;
rc = _qcrypto_debug_init();
if (rc)
return rc;
-
+ INIT_LIST_HEAD(&pcp->alg_list);
+ INIT_LIST_HEAD(&pcp->engine_list);
+ INIT_WORK(&pcp->unlock_ce_ws, qcrypto_unlock_ce);
+ spin_lock_init(&pcp->lock);
+ mutex_init(&pcp->engine_lock);
+ pcp->total_units = 0;
+ pcp->ce_lock_count = 0;
+ pcp->platform_support.bus_scale_table = NULL;
+ pcp->next_engine = NULL;
return platform_driver_register(&_qualcomm_crypto);
}
diff --git a/drivers/devfreq/governor_cpubw_hwmon.c b/drivers/devfreq/governor_cpubw_hwmon.c
index fb5a562..e7d373b 100644
--- a/drivers/devfreq/governor_cpubw_hwmon.c
+++ b/drivers/devfreq/governor_cpubw_hwmon.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -77,13 +77,15 @@
static int l2pm_irq;
static unsigned int bytes_per_beat;
-static unsigned int sample_ms = 50;
static unsigned int tolerance_percent = 10;
static unsigned int guard_band_mbps = 100;
static unsigned int decay_rate = 90;
-static unsigned int io_percent = 15;
-static unsigned int bw_step = 200;
+static unsigned int io_percent = 16;
+static unsigned int bw_step = 190;
+#define MIN_MS 10U
+#define MAX_MS 500U
+static unsigned int sample_ms = 50;
static u32 prev_r_start_val;
static u32 prev_w_start_val;
static unsigned long prev_ab;
@@ -245,7 +247,7 @@
}
*ab = roundup(mbps, bw_step);
- *freq = roundup((mbps * 100) / io_percent, bw_step);
+ *freq = (mbps * 100) / io_percent;
}
#define TOO_SOON_US (1 * USEC_PER_MSEC)
@@ -343,7 +345,6 @@
return 0;
}
-gov_attr(sample_ms, 10U, 500U);
gov_attr(tolerance_percent, 0U, 30U);
gov_attr(guard_band_mbps, 0U, 2000U);
gov_attr(decay_rate, 0U, 100U);
@@ -351,7 +352,6 @@
gov_attr(bw_step, 50U, 1000U);
static struct attribute *dev_attr[] = {
- &dev_attr_sample_ms.attr,
&dev_attr_tolerance_percent.attr,
&dev_attr_guard_band_mbps.attr,
&dev_attr_decay_rate.attr,
@@ -378,7 +378,13 @@
ret = sysfs_create_group(&df->dev.kobj, &dev_attr_group);
if (ret)
return ret;
+
+ sample_ms = df->profile->polling_ms;
+ sample_ms = max(MIN_MS, sample_ms);
+ sample_ms = min(MAX_MS, sample_ms);
+ df->profile->polling_ms = sample_ms;
devfreq_monitor_start(df);
+
pr_debug("Enabled CPU BW HW monitor governor\n");
break;
@@ -391,7 +397,10 @@
break;
case DEVFREQ_GOV_INTERVAL:
- devfreq_interval_update(df, (unsigned int *)data);
+ sample_ms = *(unsigned int *)data;
+ sample_ms = max(MIN_MS, sample_ms);
+ sample_ms = min(MAX_MS, sample_ms);
+ devfreq_interval_update(df, &sample_ms);
break;
}
diff --git a/drivers/gpu/ion/ion_heap.c b/drivers/gpu/ion/ion_heap.c
index 73dd868..6261d89 100644
--- a/drivers/gpu/ion/ion_heap.c
+++ b/drivers/gpu/ion/ion_heap.c
@@ -240,43 +240,6 @@
return ret;
}
-int ion_heap_buffer_zero_old(struct ion_buffer *buffer)
-{
- struct sg_table *table = buffer->sg_table;
- pgprot_t pgprot;
- struct scatterlist *sg;
- struct vm_struct *vm_struct;
- int i, j, ret = 0;
-
- if (buffer->flags & ION_FLAG_CACHED)
- pgprot = PAGE_KERNEL;
- else
- pgprot = pgprot_writecombine(PAGE_KERNEL);
-
- vm_struct = get_vm_area(PAGE_SIZE, VM_ALLOC);
- if (!vm_struct)
- return -ENOMEM;
-
- for_each_sg(table->sgl, sg, table->nents, i) {
- struct page *page = sg_page(sg);
- unsigned long len = sg_dma_len(sg);
-
- for (j = 0; j < len / PAGE_SIZE; j++) {
- struct page *sub_page = page + j;
- struct page **pages = &sub_page;
- ret = map_vm_area(vm_struct, pgprot, &pages);
- if (ret)
- goto end;
- memset(vm_struct->addr, 0, PAGE_SIZE);
- unmap_kernel_range((unsigned long)vm_struct->addr,
- PAGE_SIZE);
- }
- }
-end:
- free_vm_area(vm_struct);
- return ret;
-}
-
void ion_heap_free_page(struct ion_buffer *buffer, struct page *page,
unsigned int order)
{
@@ -327,11 +290,11 @@
if (total_drained >= size)
break;
list_del(&buffer->list);
- ion_buffer_destroy(buffer);
heap->free_list_size -= buffer->size;
if (skip_pools)
buffer->flags |= ION_FLAG_FREED_FROM_SHRINKER;
total_drained += buffer->size;
+ ion_buffer_destroy(buffer);
}
rt_mutex_unlock(&heap->lock);
diff --git a/drivers/gpu/msm/adreno_a3xx_snapshot.c b/drivers/gpu/msm/adreno_a3xx_snapshot.c
index 56c4305..9f5765d 100644
--- a/drivers/gpu/msm/adreno_a3xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a3xx_snapshot.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -492,6 +492,22 @@
/* Reading these will hang the GPU if it isn't already hung */
if (hang) {
+ unsigned int reg;
+
+ /*
+ * Reading the microcode while the CP will is running will
+ * basically basically move the CP instruction pointer to
+ * whatever address we read. Big badaboom ensues. Stop the CP
+ * (if it isn't already stopped) to ensure that we are safe.
+ * We do this here and not earlier to avoid corrupting the RBBM
+ * status and CP registers - by the time we get here we don't
+ * care about the contents of the CP anymore.
+ */
+
+ adreno_readreg(adreno_dev, ADRENO_REG_CP_ME_CNTL, ®);
+ reg |= (1 << 27) | (1 << 28);
+ adreno_writereg(adreno_dev, ADRENO_REG_CP_ME_CNTL, reg);
+
snapshot = kgsl_snapshot_add_section(device,
KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
a3xx_snapshot_cp_pfp_ram, NULL);
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index a39ceef..95e4017 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -397,7 +397,7 @@
*/
if (count)
- wake_up_interruptible_all(&drawctxt->wq);
+ wake_up_all(&drawctxt->wq);
/*
* Return positive if the context submitted commands or if we figured
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
index 6007a3f..d727423 100644
--- a/drivers/gpu/msm/adreno_drawctxt.c
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -138,7 +138,7 @@
u32 timestamp, u32 type)
{
struct adreno_context *drawctxt = priv;
- wake_up_interruptible_all(&drawctxt->waiting);
+ wake_up_all(&drawctxt->waiting);
}
#define adreno_wait_event_interruptible_timeout(wq, condition, timeout, io) \
@@ -266,7 +266,7 @@
{
struct adreno_context *drawctxt = priv;
- wake_up_interruptible_all(&drawctxt->waiting);
+ wake_up_all(&drawctxt->waiting);
kgsl_context_put(&drawctxt->base);
}
@@ -310,7 +310,7 @@
mutex_unlock(&device->mutex);
if (timeout) {
- ret = (int) wait_event_interruptible_timeout(drawctxt->waiting,
+ ret = (int) wait_event_timeout(drawctxt->waiting,
_check_global_timestamp(device, timestamp),
msecs_to_jiffies(timeout));
@@ -319,7 +319,7 @@
else if (ret > 0)
ret = 0;
} else {
- ret = (int) wait_event_interruptible(drawctxt->waiting,
+ wait_event(drawctxt->waiting,
_check_global_timestamp(device, timestamp));
}
@@ -386,8 +386,8 @@
mutex_unlock(&drawctxt->mutex);
/* Give the bad news to everybody waiting around */
- wake_up_interruptible_all(&drawctxt->waiting);
- wake_up_interruptible_all(&drawctxt->wq);
+ wake_up_all(&drawctxt->waiting);
+ wake_up_all(&drawctxt->wq);
}
/**
@@ -543,6 +543,14 @@
ret = adreno_drawctxt_wait_global(adreno_dev, context,
drawctxt->internal_timestamp, 10 * 1000);
+ /*
+ * If the wait for global fails then nothing after this point is likely
+ * to work very well - BUG_ON() so we can take advantage of the debug
+ * tools to figure out what the h - e - double hockey sticks happened
+ */
+
+ BUG_ON(ret);
+
kgsl_sharedmem_writel(device, &device->memstore,
KGSL_MEMSTORE_OFFSET(context->id, soptimestamp),
drawctxt->timestamp);
@@ -557,8 +565,8 @@
drawctxt->ops->detach(drawctxt);
/* wake threads waiting to submit commands from this context */
- wake_up_interruptible_all(&drawctxt->waiting);
- wake_up_interruptible_all(&drawctxt->wq);
+ wake_up_all(&drawctxt->waiting);
+ wake_up_all(&drawctxt->wq);
return ret;
}
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 1383a20..b23d3ab 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -665,7 +665,8 @@
total_sizedwords += (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE) ? 2 : 0;
/* Add two dwords for the CP_INTERRUPT */
- total_sizedwords += drawctxt ? 2 : 0;
+ total_sizedwords +=
+ (drawctxt || (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE)) ? 2 : 0;
if (adreno_is_a3xx(adreno_dev))
total_sizedwords += 7;
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index c6996a4..cd6989c 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -2550,6 +2550,15 @@
.resume = mxt_resume,
#endif
};
+#else
+static int mxt_suspend(struct device *dev)
+{
+ return 0;
+};
+static int mxt_resume(struct device *dev)
+{
+ return 0;
+};
#endif
static int mxt_debugfs_object_show(struct seq_file *m, void *v)
diff --git a/drivers/input/touchscreen/gt9xx/gt9xx.c b/drivers/input/touchscreen/gt9xx/gt9xx.c
index a71526a..912d87c 100644
--- a/drivers/input/touchscreen/gt9xx/gt9xx.c
+++ b/drivers/input/touchscreen/gt9xx/gt9xx.c
@@ -1,6 +1,6 @@
/* drivers/input/touchscreen/gt9xx.c
*
- * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
*
* Linux Foundation chooses to take subject only to the GPLv2 license
* terms, and distributes only under these terms.
@@ -121,6 +121,8 @@
#define GTP_DEBUGFS_DIR "ts_debug"
#define GTP_DEBUGFS_FILE_SUSPEND "suspend"
+#define GTP_DEBUGFS_FILE_DATA "data"
+#define GTP_DEBUGFS_FILE_ADDR "addr"
/*******************************************************
Function:
@@ -1528,12 +1530,62 @@
return size;
}
+static ssize_t gtp_fw_upgrade_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct goodix_ts_data *ts = dev_get_drvdata(dev);
+ return snprintf(buf, 2, "%d\n", ts->fw_loading);
+}
+
+static ssize_t gtp_fw_upgrade_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct goodix_ts_data *ts = dev_get_drvdata(dev);
+ unsigned long val;
+ int ret;
+
+ if (size > 2)
+ return -EINVAL;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret != 0)
+ return ret;
+
+ if (ts->gtp_is_suspend) {
+ dev_err(&ts->client->dev,
+ "Can't start fw upgrade. Device is in suspend state.");
+ return -EBUSY;
+ }
+
+ mutex_lock(&ts->input_dev->mutex);
+ if (!ts->fw_loading && val) {
+ disable_irq(ts->client->irq);
+ ts->fw_loading = true;
+ if (config_enabled(CONFIG_GT9XX_TOUCHPANEL_UPDATE)) {
+ ret = gup_update_proc(NULL);
+ if (ret == FAIL)
+ dev_err(&ts->client->dev,
+ "Fail to update GTP firmware.\n");
+ }
+ ts->fw_loading = false;
+ enable_irq(ts->client->irq);
+ }
+ mutex_unlock(&ts->input_dev->mutex);
+
+ return size;
+}
+
static DEVICE_ATTR(fw_name, (S_IRUGO | S_IWUSR | S_IWGRP),
gtp_fw_name_show,
gtp_fw_name_store);
+static DEVICE_ATTR(fw_upgrade, (S_IRUGO | S_IWUSR | S_IWGRP),
+ gtp_fw_upgrade_show,
+ gtp_fw_upgrade_store);
static struct attribute *gtp_attrs[] = {
&dev_attr_fw_name.attr,
+ &dev_attr_fw_upgrade.attr,
NULL
};
@@ -1541,6 +1593,84 @@
.attrs = gtp_attrs,
};
+static int gtp_debug_addr_is_valid(u16 addr)
+{
+ if (addr < GTP_VALID_ADDR_START || addr > GTP_VALID_ADDR_END) {
+ pr_err("GTP reg address is invalid: 0x%x\n", addr);
+ return false;
+ }
+
+ return true;
+}
+
+static int gtp_debug_data_set(void *_data, u64 val)
+{
+ struct goodix_ts_data *ts = _data;
+
+ mutex_lock(&ts->input_dev->mutex);
+ if (gtp_debug_addr_is_valid(ts->addr))
+ dev_err(&ts->client->dev,
+ "Writing to GTP registers not supported.\n");
+ mutex_unlock(&ts->input_dev->mutex);
+
+ return 0;
+}
+
+static int gtp_debug_data_get(void *_data, u64 *val)
+{
+ struct goodix_ts_data *ts = _data;
+ int ret;
+ u8 buf[3] = {0};
+
+ mutex_lock(&ts->input_dev->mutex);
+ buf[0] = ts->addr >> 8;
+ buf[1] = ts->addr & 0x00ff;
+
+ if (gtp_debug_addr_is_valid(ts->addr)) {
+ ret = gtp_i2c_read(ts->client, buf, 3);
+ if (ret < 0)
+ dev_err(&ts->client->dev,
+ "GTP read register 0x%x failed (%d)\n",
+ ts->addr, ret);
+ else
+ *val = buf[2];
+ }
+ mutex_unlock(&ts->input_dev->mutex);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(debug_data_fops, gtp_debug_data_get,
+ gtp_debug_data_set, "%llx\n");
+
+static int gtp_debug_addr_set(void *_data, u64 val)
+{
+ struct goodix_ts_data *ts = _data;
+
+ if (gtp_debug_addr_is_valid(val)) {
+ mutex_lock(&ts->input_dev->mutex);
+ ts->addr = val;
+ mutex_unlock(&ts->input_dev->mutex);
+ }
+
+ return 0;
+}
+
+static int gtp_debug_addr_get(void *_data, u64 *val)
+{
+ struct goodix_ts_data *ts = _data;
+
+ mutex_lock(&ts->input_dev->mutex);
+ if (gtp_debug_addr_is_valid(ts->addr))
+ *val = ts->addr;
+ mutex_unlock(&ts->input_dev->mutex);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(debug_addr_fops, gtp_debug_addr_get,
+ gtp_debug_addr_set, "%llx\n");
+
static int gtp_debug_suspend_set(void *_data, u64 val)
{
struct goodix_ts_data *ts = _data;
@@ -1574,7 +1704,7 @@
data->debug_base = debugfs_create_dir(GTP_DEBUGFS_DIR, NULL);
if (IS_ERR_OR_NULL(data->debug_base)) {
- pr_err("Failed to create debugfs dir.\n");
+ dev_err(&data->client->dev, "Failed to create debugfs dir.\n");
return -EINVAL;
}
@@ -1583,7 +1713,27 @@
data->debug_base,
data,
&debug_suspend_fops)))) {
- pr_err("Failed to create suspend file.\n");
+ dev_err(&data->client->dev, "Failed to create suspend file.\n");
+ debugfs_remove_recursive(data->debug_base);
+ return -EINVAL;
+ }
+
+ if ((IS_ERR_OR_NULL(debugfs_create_file(GTP_DEBUGFS_FILE_DATA,
+ S_IWUSR | S_IWGRP | S_IRUSR | S_IRGRP,
+ data->debug_base,
+ data,
+ &debug_data_fops)))) {
+ dev_err(&data->client->dev, "Failed to create data file.\n");
+ debugfs_remove_recursive(data->debug_base);
+ return -EINVAL;
+ }
+
+ if ((IS_ERR_OR_NULL(debugfs_create_file(GTP_DEBUGFS_FILE_ADDR,
+ S_IWUSR | S_IWGRP | S_IRUSR | S_IRGRP,
+ data->debug_base,
+ data,
+ &debug_addr_fops)))) {
+ dev_err(&data->client->dev, "Failed to create addr file.\n");
debugfs_remove_recursive(data->debug_base);
return -EINVAL;
}
@@ -1771,9 +1921,7 @@
return -EINVAL;
}
-#if GTP_ESD_PROTECT
i2c_connect_client = client;
-#endif
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
dev_err(&client->dev, "GTP I2C not supported\n");
@@ -1827,15 +1975,14 @@
strlcpy(ts->fw_name, pdata->fw_name,
strlen(pdata->fw_name) + 1);
-#ifdef CONFIG_GT9XX_TOUCHPANEL_UPDATE
- ret = gup_init_update_proc(ts);
- if (ret < 0) {
- dev_err(&client->dev,
- "GTP Create firmware update thread error.\n");
- goto exit_power_off;
+ if (config_enabled(CONFIG_GT9XX_TOUCHPANEL_UPDATE)) {
+ ret = gup_init_update_proc(ts);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "GTP Create firmware update thread error.\n");
+ goto exit_power_off;
+ }
}
-#endif
-
ret = gtp_init_panel(ts);
if (ret < 0) {
dev_err(&client->dev, "GTP init panel failed.\n");
@@ -2032,6 +2179,14 @@
}
mutex_lock(&ts->lock);
+
+ if (ts->fw_loading) {
+ dev_info(&ts->client->dev,
+ "Fw upgrade in progress, can't go to suspend.");
+ mutex_unlock(&ts->lock);
+ return 0;
+ }
+
#if GTP_ESD_PROTECT
gtp_esd_switch(ts->client, SWITCH_OFF);
#endif
diff --git a/drivers/input/touchscreen/gt9xx/gt9xx.h b/drivers/input/touchscreen/gt9xx/gt9xx.h
index 4d656dd..7a1af23 100644
--- a/drivers/input/touchscreen/gt9xx/gt9xx.h
+++ b/drivers/input/touchscreen/gt9xx/gt9xx.h
@@ -1,6 +1,6 @@
/* drivers/input/touchscreen/gt9xx.h
*
- * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
*
* Linux Foundation chooses to take subject only to the GPLv2 license
* terms, and distributes only under these terms.
@@ -78,6 +78,7 @@
s32 use_irq;
u16 abs_x_max;
u16 abs_y_max;
+ u16 addr;
u8 max_touch_num;
u8 int_trigger_type;
u8 green_wake_mode;
@@ -92,6 +93,7 @@
u8 fw_error;
bool power_on;
struct mutex lock;
+ bool fw_loading;
struct regulator *avdd;
struct regulator *vdd;
struct regulator *vcc_i2c;
@@ -175,6 +177,8 @@
/* HIGH: 0x28/0x29, LOW: 0xBA/0xBB */
#define GTP_I2C_ADDRESS_HIGH 0x14
#define GTP_I2C_ADDRESS_LOW 0x5D
+#define GTP_VALID_ADDR_START 0x8040
+#define GTP_VALID_ADDR_END 0x8177
#define CFG_GROUP_LEN(p_cfg_grp) (sizeof(p_cfg_grp) / sizeof(p_cfg_grp[0]))
@@ -215,11 +219,9 @@
void uninit_wr_node(void);
#endif
-#ifdef CONFIG_GT9XX_TOUCHPANEL_UPDATE
-extern u8 gup_init_update_proc(struct goodix_ts_data *ts);
+u8 gup_init_update_proc(struct goodix_ts_data *ts);
s32 gup_enter_update_mode(struct i2c_client *client);
void gup_leave_update_mode(struct i2c_client *client);
s32 gup_update_proc(void *dir);
extern struct i2c_client *i2c_connect_client;
-#endif
#endif /* _GOODIX_GT9XX_H_ */
diff --git a/drivers/input/touchscreen/synaptics_fw_update.c b/drivers/input/touchscreen/synaptics_fw_update.c
index 7da0376..e577353 100644
--- a/drivers/input/touchscreen/synaptics_fw_update.c
+++ b/drivers/input/touchscreen/synaptics_fw_update.c
@@ -1559,7 +1559,7 @@
}
dev_dbg(&fwu->rmi4_data->i2c_client->dev,
- "%s: Firmware image size = %d\n",
+ "%s: Firmware image size = %zu\n",
__func__, fw_entry->size);
fwu->data_buffer = fw_entry->data;
@@ -1670,7 +1670,7 @@
if (count < fwu->config_size) {
dev_err(&rmi4_data->i2c_client->dev,
- "%s: Not enough space (%d bytes) in buffer\n",
+ "%s: Not enough space (%zu bytes) in buffer\n",
__func__, count);
return -EINVAL;
}
diff --git a/drivers/iommu/msm_iommu_sec.c b/drivers/iommu/msm_iommu_sec.c
index e293279..38dd41b 100644
--- a/drivers/iommu/msm_iommu_sec.c
+++ b/drivers/iommu/msm_iommu_sec.c
@@ -61,6 +61,12 @@
static struct iommu_access_ops *iommu_access_ops;
+static const struct of_device_id msm_smmu_list[] = {
+ { .compatible = "qcom,msm-smmu-v1", },
+ { .compatible = "qcom,msm-smmu-v2", },
+ { }
+};
+
struct msm_scm_paddr_list {
unsigned int list;
unsigned int list_size;
@@ -296,8 +302,9 @@
int ret, ptbl_ret = 0;
int version;
- for_each_compatible_node(np, NULL, "qcom,msm-smmu-v1")
- if (of_find_property(np, "qcom,iommu-secure-id", NULL))
+ for_each_matching_node(np, msm_smmu_list)
+ if (of_find_property(np, "qcom,iommu-secure-id", NULL) &&
+ of_device_is_available(np))
break;
if (!np)
diff --git a/drivers/leds/leds-qpnp.c b/drivers/leds/leds-qpnp.c
index da90440..a40b68a 100644
--- a/drivers/leds/leds-qpnp.c
+++ b/drivers/leds/leds-qpnp.c
@@ -136,11 +136,11 @@
#define FLASH_SELFCHECK_ENABLE 0x80
#define FLASH_RAMP_STEP_27US 0xBF
-#define FLASH_STROBE_SW 0xC0
-#define FLASH_STROBE_HW 0x04
+#define FLASH_HW_SW_STROBE_SEL_MASK 0x04
#define FLASH_STROBE_MASK 0xC7
#define FLASH_LED_0_OUTPUT 0x80
#define FLASH_LED_1_OUTPUT 0x40
+#define FLASH_TORCH_OUTPUT 0xC0
#define FLASH_CURRENT_PRGM_MIN 1
#define FLASH_CURRENT_PRGM_SHIFT 1
@@ -1001,6 +1001,13 @@
goto error_reg_write;
}
+ if (!led->flash_cfg->strobe_type)
+ led->flash_cfg->trigger_flash &=
+ ~FLASH_HW_SW_STROBE_SEL_MASK;
+ else
+ led->flash_cfg->trigger_flash |=
+ FLASH_HW_SW_STROBE_SEL_MASK;
+
rc = qpnp_led_masked_write(led,
FLASH_LED_STROBE_CTRL(led->base),
led->flash_cfg->trigger_flash,
@@ -1080,30 +1087,22 @@
*/
usleep(FLASH_RAMP_UP_DELAY_US);
- if (!led->flash_cfg->strobe_type) {
- rc = qpnp_led_masked_write(led,
- FLASH_LED_STROBE_CTRL(led->base),
- led->flash_cfg->trigger_flash,
- led->flash_cfg->trigger_flash);
- if (rc) {
- dev_err(&led->spmi_dev->dev,
- "LED %d strobe reg write failed(%d)\n",
- led->id, rc);
- goto error_flash_set;
- }
- } else {
- rc = qpnp_led_masked_write(led,
- FLASH_LED_STROBE_CTRL(led->base),
- (led->flash_cfg->trigger_flash |
- FLASH_STROBE_HW),
- (led->flash_cfg->trigger_flash |
- FLASH_STROBE_HW));
- if (rc) {
- dev_err(&led->spmi_dev->dev,
- "LED %d strobe reg write failed(%d)\n",
- led->id, rc);
- goto error_flash_set;
- }
+ if (!led->flash_cfg->strobe_type)
+ led->flash_cfg->trigger_flash &=
+ ~FLASH_HW_SW_STROBE_SEL_MASK;
+ else
+ led->flash_cfg->trigger_flash |=
+ FLASH_HW_SW_STROBE_SEL_MASK;
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_LED_STROBE_CTRL(led->base),
+ led->flash_cfg->trigger_flash,
+ led->flash_cfg->trigger_flash);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "LED %d strobe reg write failed(%d)\n",
+ led->id, rc);
+ goto error_flash_set;
}
}
} else {
@@ -2733,7 +2732,7 @@
led->flash_cfg->enable_module = FLASH_ENABLE_MODULE;
} else
led->flash_cfg->enable_module = FLASH_ENABLE_ALL;
- led->flash_cfg->trigger_flash = FLASH_STROBE_SW;
+ led->flash_cfg->trigger_flash = FLASH_TORCH_OUTPUT;
}
rc = of_property_read_u32(node, "qcom,current", &val);
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v1.h b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v1.h
index c8a4366..567a263 100644
--- a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v1.h
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v1.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -34,12 +34,20 @@
#define ISPIF_VFE_m_INTF_CMD_0(m) (0x0004 + ISPIF_VFE(m))
#define ISPIF_VFE_m_INTF_CMD_1(m) (0x0030 + ISPIF_VFE(m))
#define ISPIF_VFE_m_PIX_INTF_n_CID_MASK(m, n) (0x0010 + ISPIF_VFE(m) + 4*(n))
-#define ISPIF_VFE_m_RDI_INTF_n_CID_MASK(m, n) (0x0014 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_RDI_INTF_n_CID_MASK(m, n) (0x0014 + ISPIF_VFE(m) + \
+ ((n > 0) ? (0x20) : 0) \
+ + 8*(n))
#define ISPIF_VFE_m_PIX_OUTPUT_n_MISR(m, n) (0x0290 + ISPIF_VFE(m) + 4*(n))
-#define ISPIF_VFE_m_RDI_OUTPUT_n_MISR_0(m, n) (0x0298 + ISPIF_VFE(m) + 8*(n))
-#define ISPIF_VFE_m_RDI_OUTPUT_n_MISR_1(m, n) (0x029C + ISPIF_VFE(m) + 8*(n))
+#define ISPIF_VFE_m_RDI_OUTPUT_n_MISR_0(m, n) (0x001C + ISPIF_VFE(m) + \
+ ((n > 0) ? (0x24) : 0) \
+ + 0xc*(n))
+#define ISPIF_VFE_m_RDI_OUTPUT_n_MISR_1(m, n) (0x0020 + ISPIF_VFE(m) + \
+ ((n > 0) ? (0x24) : 0) \
+ + 0xc*(n))
#define ISPIF_VFE_m_PIX_INTF_n_STATUS(m, n) (0x0024 + ISPIF_VFE(m) + 4*(n))
-#define ISPIF_VFE_m_RDI_INTF_n_STATUS(m, n) (0x0028 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_RDI_INTF_n_STATUS(m, n) (0x0028 + ISPIF_VFE(m) + \
+ ((n > 0) ? (0x34) : 0) \
+ + 8*(n))
/* Defines for compatibility with newer ISPIF versions */
#define ISPIF_RST_CMD_1_ADDR (0x0000)
diff --git a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.c b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.c
index acdd5d0..7e84e7b 100755
--- a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1874,11 +1874,11 @@
s_ctrl->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_SENSOR;
s_ctrl->msm_sd.sd.entity.name =
s_ctrl->msm_sd.sd.name;
- mount_pos = s_ctrl->sensordata->sensor_init_params->position;
- mount_pos = mount_pos << 8;
- mount_pos = mount_pos |
- (s_ctrl->sensordata->sensor_init_params->sensor_mount_angle / 90);
- s_ctrl->msm_sd.sd.entity.flags = mount_pos;
+
+ mount_pos = s_ctrl->sensordata->sensor_init_params->position << 16;
+ mount_pos = mount_pos | ((s_ctrl->sensordata->sensor_init_params->
+ sensor_mount_angle / 90) << 8);
+ s_ctrl->msm_sd.sd.entity.flags = mount_pos | MEDIA_ENT_FL_DEFAULT;
rc = camera_init_v4l2(&s_ctrl->pdev->dev, &session_id);
CDBG("%s rc %d session_id %d\n", __func__, rc, session_id);
@@ -1995,11 +1995,10 @@
s_ctrl->msm_sd.sd.entity.name =
s_ctrl->msm_sd.sd.name;
- mount_pos = s_ctrl->sensordata->sensor_init_params->position;
- mount_pos = mount_pos << 8;
- mount_pos = mount_pos |
- (s_ctrl->sensordata->sensor_init_params->sensor_mount_angle / 90);
- s_ctrl->msm_sd.sd.entity.flags = mount_pos;
+ mount_pos = s_ctrl->sensordata->sensor_init_params->position << 16;
+ mount_pos = mount_pos | ((s_ctrl->sensordata->sensor_init_params->
+ sensor_mount_angle / 90) << 8);
+ s_ctrl->msm_sd.sd.entity.flags = mount_pos | MEDIA_ENT_FL_DEFAULT;
rc = camera_init_v4l2(&s_ctrl->sensor_i2c_client->client->dev,
&session_id);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index 035fc4b..9774c3c 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -246,38 +246,38 @@
return ret;
}
-struct buffer_info *get_same_fd_buffer(struct msm_vidc_inst *inst,
- struct list_head *list, int fd, int *plane)
+struct msm_smem *get_same_fd_buffer(struct msm_vidc_inst *inst,
+ struct list_head *list, int fd)
{
struct buffer_info *temp;
- struct buffer_info *ret = NULL;
+ struct msm_smem *same_fd_handle = NULL;
+
int i;
if (fd == 0)
return NULL;
- if (!list || fd < 0 || !plane) {
+ if (!list || fd < 0) {
dprintk(VIDC_ERR, "Invalid input\n");
goto err_invalid_input;
}
- *plane = 0;
mutex_lock(&inst->lock);
list_for_each_entry(temp, list, list) {
for (i = 0; (i < temp->num_planes)
&& (i < VIDEO_MAX_PLANES); i++) {
- if (temp && temp->fd[i] == fd) {
+ if (temp && (temp->fd[i] == fd) &&
+ temp->handle[i] && temp->mapped[i]) {
temp->same_fd_ref[i]++;
dprintk(VIDC_INFO,
"Found same fd buffer\n");
- ret = temp;
- *plane = i;
+ same_fd_handle = temp->handle[i];
break;
}
}
- if (ret)
+ if (same_fd_handle)
break;
}
mutex_unlock(&inst->lock);
err_invalid_input:
- return ret;
+ return same_fd_handle;
}
struct buffer_info *device_to_uvaddr(struct msm_vidc_inst *inst,
@@ -423,6 +423,7 @@
struct buffer_info *temp = NULL;
int plane = 0;
int i = 0, rc = 0;
+ struct msm_smem *same_fd_handle = NULL;
if (!b || !inst) {
dprintk(VIDC_ERR, "%s: invalid input\n", __func__);
@@ -480,16 +481,17 @@
if (rc < 0)
goto exit;
- temp = get_same_fd_buffer(inst, &inst->registered_bufs,
- b->m.planes[i].reserved[0], &plane);
+ same_fd_handle = get_same_fd_buffer(inst,
+ &inst->registered_bufs,
+ b->m.planes[i].reserved[0]);
populate_buf_info(binfo, b, i);
- if (temp) {
+ if (same_fd_handle) {
binfo->device_addr[i] =
- temp->handle[plane]->device_addr + binfo->buff_off[i];
+ same_fd_handle->device_addr + binfo->buff_off[i];
b->m.planes[i].m.userptr = binfo->device_addr[i];
binfo->mapped[i] = false;
- binfo->handle[i] = temp->handle[i];
+ binfo->handle[i] = same_fd_handle;
} else {
if (inst->map_output_buffer) {
binfo->handle[i] =
diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
index 5b2ec1f..ca28003 100644
--- a/drivers/media/video/v4l2-ioctl.c
+++ b/drivers/media/video/v4l2-ioctl.c
@@ -2348,6 +2348,7 @@
int ret = 0;
switch (cmd) {
+ case VIDIOC_PREPARE_BUF:
case VIDIOC_QUERYBUF:
case VIDIOC_QBUF:
case VIDIOC_DQBUF: {
diff --git a/drivers/mfd/wcd9xxx-core-resource.c b/drivers/mfd/wcd9xxx-core-resource.c
index 1791d72..1d0f894 100644
--- a/drivers/mfd/wcd9xxx-core-resource.c
+++ b/drivers/mfd/wcd9xxx-core-resource.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -55,6 +55,8 @@
int (*codec_read)(struct wcd9xxx_core_resource*, unsigned short),
int (*codec_write)(struct wcd9xxx_core_resource*, unsigned short, u8),
int (*codec_bulk_read) (struct wcd9xxx_core_resource*, unsigned short,
+ int, u8*),
+ int (*codec_bulk_write) (struct wcd9xxx_core_resource*, unsigned short,
int, u8*))
{
mutex_init(&wcd9xxx_core_res->pm_lock);
@@ -68,6 +70,7 @@
wcd9xxx_core_res->codec_reg_read = codec_read;
wcd9xxx_core_res->codec_reg_write = codec_write;
wcd9xxx_core_res->codec_bulk_read = codec_bulk_read;
+ wcd9xxx_core_res->codec_bulk_write = codec_bulk_write;
wcd9xxx_core_res->num_irqs = num_irqs;
wcd9xxx_core_res->num_irq_regs = num_irq_regs;
diff --git a/drivers/mfd/wcd9xxx-core.c b/drivers/mfd/wcd9xxx-core.c
index 64053de..1c42431 100644
--- a/drivers/mfd/wcd9xxx-core.c
+++ b/drivers/mfd/wcd9xxx-core.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -605,7 +605,7 @@
wcd9xxx->codec_type->num_irqs,
wcd9xxx_num_irq_regs(wcd9xxx),
wcd9xxx_reg_read, wcd9xxx_reg_write,
- wcd9xxx_bulk_read);
+ wcd9xxx_bulk_read, wcd9xxx_bulk_write);
if (wcd9xxx_core_irq_init(&wcd9xxx->core_res))
goto err;
diff --git a/drivers/mfd/wcd9xxx-irq.c b/drivers/mfd/wcd9xxx-irq.c
index 9209f0b..7644984 100644
--- a/drivers/mfd/wcd9xxx-irq.c
+++ b/drivers/mfd/wcd9xxx-irq.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -316,10 +316,12 @@
}
memset(status, 0xff, num_irq_regs);
- wcd9xxx_bulk_write(wcd9xxx_res, WCD9XXX_A_INTR_CLEAR0,
- num_irq_regs, status);
+
+ ret = wcd9xxx_res->codec_bulk_write(wcd9xxx_res,
+ WCD9XXX_A_INTR_CLEAR0,
+ num_irq_regs, status);
if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C)
- wcd9xxx_reg_write(wcd9xxx_res,
+ wcd9xxx_res->codec_reg_write(wcd9xxx_res,
WCD9XXX_A_INTR_MODE, 0x02);
}
wcd9xxx_unlock_sleep(wcd9xxx_res);
diff --git a/drivers/misc/isa1200.c b/drivers/misc/isa1200.c
index 8090b95..d7fa87b 100644
--- a/drivers/misc/isa1200.c
+++ b/drivers/misc/isa1200.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2009 Samsung Electronics
* Kyungmin Park <kyungmin.park@samsung.com>
- * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -318,7 +318,7 @@
value |= (haptic->pdata->mode_ctrl << 3) |
(haptic->pdata->overdrive_high << 5) |
- (haptic->pdata->overdrive_en << 5) |
+ (haptic->pdata->overdrive_en << 6) |
(haptic->pdata->chip_en << 7);
rc = isa1200_write_reg(client, ISA1200_HCTRL0, value);
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 2fe9c72..aad7fb3 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -71,6 +71,9 @@
#define RPMB_SERVICE 0x2000
+#define QSEECOM_SEND_CMD_CRYPTO_TIMEOUT 2000
+#define QSEECOM_LOAD_APP_CRYPTO_TIMEOUT 2000
+
enum qseecom_clk_definitions {
CLK_DFAB = 0,
CLK_SFPB,
@@ -162,6 +165,12 @@
struct qseecom_clk qsee;
struct qseecom_clk ce_drv;
struct cdev cdev;
+
+ bool support_bus_scaling;
+ uint32_t cumulative_mode;
+ enum qseecom_bandwidth_request_mode current_mode;
+ struct timer_list bw_scale_down_timer;
+ struct work_struct bw_inactive_req_ws;
};
struct qseecom_client_handle {
@@ -191,6 +200,7 @@
atomic_t ioctl_count;
bool perf_enabled;
bool fast_load_enabled;
+ enum qseecom_bandwidth_request_mode mode;
};
enum qseecom_set_clear_key_flag {
@@ -328,6 +338,7 @@
return -EFAULT;
data->listener.id = 0;
+ data->type = QSEECOM_LISTENER_SERVICE;
if (!__qseecom_is_svc_unique(data, &rcvd_lstnr)) {
pr_err("Service is not unique and is already registered\n");
data->released = true;
@@ -437,6 +448,157 @@
return ret;
}
+static int __qseecom_set_msm_bus_request(uint32_t mode)
+{
+ int ret = 0;
+ struct qseecom_clk *qclk;
+
+ qclk = &qseecom.qsee;
+ if (qclk->ce_core_src_clk != NULL) {
+ if (mode == INACTIVE) {
+ __qseecom_disable_clk(CLK_QSEE);
+ } else {
+ ret = __qseecom_enable_clk(CLK_QSEE);
+ if (ret)
+ pr_err("CLK enabling failed (%d) MODE (%d)\n",
+ ret, mode);
+ }
+ }
+
+ if ((!ret) && (qseecom.current_mode != mode)) {
+ ret = msm_bus_scale_client_update_request(
+ qseecom.qsee_perf_client, mode);
+ if (ret) {
+ pr_err("Bandwidth req failed(%d) MODE (%d)\n",
+ ret, mode);
+ if (qclk->ce_core_src_clk != NULL) {
+ if (mode == INACTIVE)
+ __qseecom_enable_clk(CLK_QSEE);
+ else
+ __qseecom_disable_clk(CLK_QSEE);
+ }
+ }
+ qseecom.current_mode = mode;
+ }
+ return ret;
+}
+
+static void qseecom_bw_inactive_req_work(struct work_struct *work)
+{
+ mutex_lock(&app_access_lock);
+ mutex_lock(&qsee_bw_mutex);
+ __qseecom_set_msm_bus_request(INACTIVE);
+ pr_debug("current_mode = %d, cumulative_mode = %d\n",
+ qseecom.current_mode, qseecom.cumulative_mode);
+ mutex_unlock(&qsee_bw_mutex);
+ mutex_unlock(&app_access_lock);
+ return;
+}
+
+static void qseecom_scale_bus_bandwidth_timer_callback(unsigned long data)
+{
+ schedule_work(&qseecom.bw_inactive_req_ws);
+ return;
+}
+
+static int qseecom_scale_bus_bandwidth_timer(uint32_t mode, uint32_t duration)
+{
+ int32_t ret = 0;
+ int32_t request_mode = INACTIVE;
+
+ mutex_lock(&qsee_bw_mutex);
+ if (mode == 0) {
+ if (qseecom.cumulative_mode > MEDIUM)
+ request_mode = HIGH;
+ else
+ request_mode = qseecom.cumulative_mode;
+ } else {
+ request_mode = mode;
+ }
+ __qseecom_set_msm_bus_request(request_mode);
+
+ del_timer_sync(&(qseecom.bw_scale_down_timer));
+ qseecom.bw_scale_down_timer.expires = jiffies +
+ msecs_to_jiffies(duration);
+ add_timer(&(qseecom.bw_scale_down_timer));
+
+ mutex_unlock(&qsee_bw_mutex);
+ return ret;
+}
+
+
+static int qseecom_unregister_bus_bandwidth_needs(
+ struct qseecom_dev_handle *data)
+{
+ int32_t ret = 0;
+
+ qseecom.cumulative_mode -= data->mode;
+ data->mode = INACTIVE;
+
+ return ret;
+}
+
+static int __qseecom_register_bus_bandwidth_needs(
+ struct qseecom_dev_handle *data, uint32_t request_mode)
+{
+ int32_t ret = 0;
+
+ if (data->mode == INACTIVE) {
+ qseecom.cumulative_mode += request_mode;
+ data->mode = request_mode;
+ } else {
+ if (data->mode != request_mode) {
+ qseecom.cumulative_mode -= data->mode;
+ qseecom.cumulative_mode += request_mode;
+ data->mode = request_mode;
+ }
+ }
+ return ret;
+}
+
+static int qseecom_scale_bus_bandwidth(struct qseecom_dev_handle *data,
+ void __user *argp)
+{
+ int32_t ret = 0;
+ int32_t req_mode;
+
+ ret = copy_from_user(&req_mode, argp, sizeof(req_mode));
+ if (ret) {
+ pr_err("copy_from_user failed\n");
+ return ret;
+ }
+ if (req_mode > HIGH) {
+ pr_err("Invalid bandwidth mode (%d)\n", req_mode);
+ return ret;
+ }
+ mutex_lock(&qsee_bw_mutex);
+ ret = __qseecom_register_bus_bandwidth_needs(data, req_mode);
+ mutex_unlock(&qsee_bw_mutex);
+
+ return ret;
+}
+
+static void __qseecom_disable_clk_scale_down(struct qseecom_dev_handle *data)
+{
+ if (!qseecom.support_bus_scaling)
+ qsee_disable_clock_vote(data, CLK_SFPB);
+ return;
+}
+
+static int __qseecom_enable_clk_scale_up(struct qseecom_dev_handle *data)
+{
+ int ret = 0;
+ if (qseecom.support_bus_scaling) {
+ qseecom_scale_bus_bandwidth_timer(
+ MEDIUM, QSEECOM_LOAD_APP_CRYPTO_TIMEOUT);
+ } else {
+ ret = qsee_vote_for_clock(data, CLK_SFPB);
+ if (ret)
+ pr_err("Fail vote for clk SFPB ret %d\n", ret);
+ }
+ return ret;
+}
+
static int qseecom_set_client_mem_param(struct qseecom_dev_handle *data,
void __user *argp)
{
@@ -626,7 +788,7 @@
u32 app_id = 0;
struct ion_handle *ihandle; /* Ion handle */
struct qseecom_load_img_req load_img_req;
- int32_t ret;
+ int32_t ret = 0;
ion_phys_addr_t pa = 0;
uint32_t len;
struct qseecom_command_scm_resp resp;
@@ -641,16 +803,16 @@
return -EFAULT;
}
/* Vote for the SFPB clock */
- ret = qsee_vote_for_clock(data, CLK_SFPB);
+ ret = __qseecom_enable_clk_scale_up(data);
if (ret)
- pr_warning("Unable to vote for SFPB clock");
+ return ret;
req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
load_img_req.img_name[MAX_APP_NAME_SIZE-1] = '\0';
memcpy(req.app_name, load_img_req.img_name, MAX_APP_NAME_SIZE);
ret = __qseecom_check_app_exists(req);
if (ret < 0) {
- qsee_disable_clock_vote(data, CLK_SFPB);
+ __qseecom_disable_clk_scale_down(data);
return ret;
}
@@ -676,7 +838,7 @@
load_img_req.ifd_data_fd);
if (IS_ERR_OR_NULL(ihandle)) {
pr_err("Ion client could not retrieve the handle\n");
- qsee_disable_clock_vote(data, CLK_SFPB);
+ __qseecom_disable_clk_scale_down(data);
return -ENOMEM;
}
@@ -701,7 +863,7 @@
pr_err("scm_call to load app failed\n");
if (!IS_ERR_OR_NULL(ihandle))
ion_free(qseecom.ion_clnt, ihandle);
- qsee_disable_clock_vote(data, CLK_SFPB);
+ __qseecom_disable_clk_scale_down(data);
return -EINVAL;
}
@@ -709,7 +871,7 @@
pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
if (!IS_ERR_OR_NULL(ihandle))
ion_free(qseecom.ion_clnt, ihandle);
- qsee_disable_clock_vote(data, CLK_SFPB);
+ __qseecom_disable_clk_scale_down(data);
return -EFAULT;
}
@@ -720,7 +882,7 @@
ret);
if (!IS_ERR_OR_NULL(ihandle))
ion_free(qseecom.ion_clnt, ihandle);
- qsee_disable_clock_vote(data, CLK_SFPB);
+ __qseecom_disable_clk_scale_down(data);
return ret;
}
}
@@ -730,7 +892,7 @@
resp.result);
if (!IS_ERR_OR_NULL(ihandle))
ion_free(qseecom.ion_clnt, ihandle);
- qsee_disable_clock_vote(data, CLK_SFPB);
+ __qseecom_disable_clk_scale_down(data);
return -EFAULT;
}
@@ -739,7 +901,7 @@
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) {
pr_err("kmalloc failed\n");
- qsee_disable_clock_vote(data, CLK_SFPB);
+ __qseecom_disable_clk_scale_down(data);
return -ENOMEM;
}
entry->app_id = app_id;
@@ -762,10 +924,10 @@
if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) {
pr_err("copy_to_user failed\n");
kzfree(entry);
- qsee_disable_clock_vote(data, CLK_SFPB);
+ __qseecom_disable_clk_scale_down(data);
return -EFAULT;
}
- qsee_disable_clock_vote(data, CLK_SFPB);
+ __qseecom_disable_clk_scale_down(data);
return 0;
}
@@ -953,6 +1115,8 @@
return -EINVAL;
}
+ data->type = QSEECOM_SECURE_SERVICE;
+
switch (req.cmd_id) {
case QSEOS_RPMB_PROVISION_KEY_COMMAND:
case QSEOS_RPMB_ERASE_COMMAND:
@@ -965,15 +1129,25 @@
return -EINVAL;
}
- ret = qsee_vote_for_clock(data, CLK_DFAB);
- if (ret) {
- pr_err("Failed to vote for DFAB clock%d\n", ret);
- return ret;
- }
- ret = qsee_vote_for_clock(data, CLK_SFPB);
- if (ret) {
- pr_err("Failed to vote for SFPB clock%d\n", ret);
- goto exit_reset_dfab_freq;
+ if (qseecom.support_bus_scaling) {
+ qseecom_scale_bus_bandwidth_timer(HIGH,
+ QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
+ if (ret) {
+ pr_err("Fail to set bw HIGH%d\n", ret);
+ return ret;
+ }
+ } else {
+ ret = qsee_vote_for_clock(data, CLK_DFAB);
+ if (ret) {
+ pr_err("Failed to vote for DFAB clock%d\n", ret);
+ return ret;
+ }
+ ret = qsee_vote_for_clock(data, CLK_SFPB);
+ if (ret) {
+ qsee_disable_clock_vote(data, CLK_DFAB);
+ pr_err("Failed to vote for SFPB clock%d\n", ret);
+ goto exit;
+ }
}
msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
@@ -987,7 +1161,11 @@
ION_IOC_INV_CACHES);
if (ret) {
pr_err("qseecom_scm_call failed with err: %d\n", ret);
- goto exit_reset_sdfab_freq;
+ if (!qseecom.support_bus_scaling) {
+ qsee_disable_clock_vote(data, CLK_DFAB);
+ qsee_disable_clock_vote(data, CLK_SFPB);
+ }
+ goto exit;
}
switch (resp.result) {
@@ -1010,10 +1188,7 @@
ret = -EINVAL;
break;
}
-exit_reset_sdfab_freq:
- qsee_disable_clock_vote(data, CLK_SFPB);
-exit_reset_dfab_freq:
- qsee_disable_clock_vote(data, CLK_DFAB);
+exit:
return ret;
}
@@ -1484,10 +1659,9 @@
/* Populate the remaining parameters */
load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
memcpy(load_req.app_name, appname, MAX_APP_NAME_SIZE);
- ret = qsee_vote_for_clock(data, CLK_SFPB);
+ ret = __qseecom_enable_clk_scale_up(data);
if (ret) {
kzfree(img_data);
- pr_warning("Unable to vote for SFPB clock");
return -EIO;
}
@@ -1499,7 +1673,7 @@
kzfree(img_data);
if (ret) {
pr_err("scm_call to load failed : ret %d\n", ret);
- qsee_disable_clock_vote(data, CLK_SFPB);
+ __qseecom_disable_clk_scale_down(data);
return -EIO;
}
@@ -1522,7 +1696,7 @@
ret = -EINVAL;
break;
}
- qsee_disable_clock_vote(data, CLK_SFPB);
+ __qseecom_disable_clk_scale_down(data);
return ret;
}
@@ -1551,9 +1725,8 @@
/* Populate the remaining parameters */
load_req.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
/* Vote for the SFPB clock */
- ret = qsee_vote_for_clock(data, CLK_SFPB);
+ ret = __qseecom_enable_clk_scale_up(data);
if (ret) {
- pr_err("Unable to vote for SFPB clock: ret = %d", ret);
kzfree(img_data);
return -EIO;
}
@@ -1589,7 +1762,7 @@
}
}
kzfree(img_data);
- qsee_disable_clock_vote(data, CLK_SFPB);
+ __qseecom_disable_clk_scale_down(data);
return ret;
}
@@ -1799,10 +1972,24 @@
pr_err("Unable to find the handle, exiting\n");
else
ret = qseecom_unload_app(data);
- if (data->fast_load_enabled == true)
- qsee_disable_clock_vote(data, CLK_SFPB);
- if (data->perf_enabled == true)
- qsee_disable_clock_vote(data, CLK_DFAB);
+
+ if (qseecom.support_bus_scaling) {
+ mutex_lock(&qsee_bw_mutex);
+ if (data->mode != INACTIVE) {
+ qseecom_unregister_bus_bandwidth_needs(data);
+ if (qseecom.cumulative_mode == INACTIVE) {
+ ret = __qseecom_set_msm_bus_request(INACTIVE);
+ if (ret)
+ pr_err("Fail to scale down bus\n");
+ }
+ }
+ mutex_unlock(&qsee_bw_mutex);
+ } else {
+ if (data->fast_load_enabled == true)
+ qsee_disable_clock_vote(data, CLK_SFPB);
+ if (data->perf_enabled == true)
+ qsee_disable_clock_vote(data, CLK_DFAB);
+ }
if (ret == 0) {
kzfree(data);
kzfree(*handle);
@@ -1833,7 +2020,9 @@
mutex_lock(&app_access_lock);
atomic_inc(&data->ioctl_count);
-
+ if (qseecom.support_bus_scaling)
+ qseecom_scale_bus_bandwidth_timer(INACTIVE,
+ QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
ret = __qseecom_send_cmd(data, &req);
atomic_dec(&data->ioctl_count);
@@ -1856,17 +2045,30 @@
return -EINVAL;
}
if (high) {
- ret = qsee_vote_for_clock(handle->dev, CLK_DFAB);
- if (ret)
- pr_err("Failed to vote for DFAB clock%d\n", ret);
- ret = qsee_vote_for_clock(handle->dev, CLK_SFPB);
- if (ret) {
- pr_err("Failed to vote for SFPB clock%d\n", ret);
- qsee_disable_clock_vote(handle->dev, CLK_DFAB);
+ if (qseecom.support_bus_scaling) {
+ mutex_lock(&qsee_bw_mutex);
+ __qseecom_register_bus_bandwidth_needs(handle->dev,
+ HIGH);
+ mutex_unlock(&qsee_bw_mutex);
+ if (ret)
+ pr_err("Failed to scale bus (med) %d\n", ret);
+ } else {
+ ret = qsee_vote_for_clock(handle->dev, CLK_DFAB);
+ if (ret)
+ pr_err("Failed to vote for DFAB clock%d\n",
+ ret);
+ ret = qsee_vote_for_clock(handle->dev, CLK_SFPB);
+ if (ret) {
+ pr_err("Failed to vote for SFPB clock%d\n",
+ ret);
+ qsee_disable_clock_vote(handle->dev, CLK_DFAB);
+ }
}
} else {
- qsee_disable_clock_vote(handle->dev, CLK_DFAB);
- qsee_disable_clock_vote(handle->dev, CLK_SFPB);
+ if (!qseecom.support_bus_scaling) {
+ qsee_disable_clock_vote(handle->dev, CLK_DFAB);
+ qsee_disable_clock_vote(handle->dev, CLK_SFPB);
+ }
}
return ret;
}
@@ -2231,9 +2433,8 @@
}
/* Vote for the SFPB clock */
- ret = qsee_vote_for_clock(data, CLK_SFPB);
+ ret = __qseecom_enable_clk_scale_up(data);
if (ret) {
- pr_err("Unable to vote for SFPB clock: ret = %d", ret);
ret = -EIO;
goto exit_cpu_restore;
}
@@ -2271,7 +2472,7 @@
}
exit_disable_clock:
- qsee_disable_clock_vote(data, CLK_SFPB);
+ __qseecom_disable_clk_scale_down(data);
exit_cpu_restore:
/* Restore the CPU mask */
mask = CPU_MASK_ALL;
@@ -2914,6 +3115,7 @@
break;
}
case QSEECOM_IOCTL_SEND_CMD_REQ: {
+ pr_debug("qseecom.current_mode %d\n", qseecom.current_mode);
if ((data->client.app_id == 0) ||
(data->type != QSEECOM_CLIENT_APP)) {
pr_err("send cmd req: invalid handle (%d) app_id(%d)\n",
@@ -2923,6 +3125,9 @@
}
/* Only one client allowed here at a time */
mutex_lock(&app_access_lock);
+ if (qseecom.support_bus_scaling)
+ qseecom_scale_bus_bandwidth_timer(INACTIVE,
+ QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
atomic_inc(&data->ioctl_count);
ret = qseecom_send_cmd(data, argp);
atomic_dec(&data->ioctl_count);
@@ -2933,6 +3138,7 @@
break;
}
case QSEECOM_IOCTL_SEND_MODFD_CMD_REQ: {
+ pr_debug("qseecom.current_mode %d\n", qseecom.current_mode);
if ((data->client.app_id == 0) ||
(data->type != QSEECOM_CLIENT_APP)) {
pr_err("send mdfd cmd: invalid handle (%d) appid(%d)\n",
@@ -2942,6 +3148,9 @@
}
/* Only one client allowed here at a time */
mutex_lock(&app_access_lock);
+ if (qseecom.support_bus_scaling)
+ qseecom_scale_bus_bandwidth_timer(INACTIVE,
+ QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
atomic_inc(&data->ioctl_count);
ret = qseecom_send_modfd_cmd(data, argp);
atomic_dec(&data->ioctl_count);
@@ -3068,12 +3277,18 @@
break;
}
atomic_inc(&data->ioctl_count);
- ret = qsee_vote_for_clock(data, CLK_DFAB);
- if (ret)
- pr_err("Failed to vote for DFAB clock%d\n", ret);
- ret = qsee_vote_for_clock(data, CLK_SFPB);
- if (ret)
- pr_err("Failed to vote for SFPB clock%d\n", ret);
+ if (qseecom.support_bus_scaling) {
+ mutex_lock(&qsee_bw_mutex);
+ __qseecom_register_bus_bandwidth_needs(data, HIGH);
+ mutex_unlock(&qsee_bw_mutex);
+ } else {
+ ret = qsee_vote_for_clock(data, CLK_DFAB);
+ if (ret)
+ pr_err("Fail to vote for DFAB clock%d\n", ret);
+ ret = qsee_vote_for_clock(data, CLK_SFPB);
+ if (ret)
+ pr_err("Fail to vote for SFPB clock%d\n", ret);
+ }
atomic_dec(&data->ioctl_count);
break;
}
@@ -3093,8 +3308,24 @@
break;
}
atomic_inc(&data->ioctl_count);
+ if (!qseecom.support_bus_scaling) {
qsee_disable_clock_vote(data, CLK_DFAB);
qsee_disable_clock_vote(data, CLK_SFPB);
+ }
+ atomic_dec(&data->ioctl_count);
+ break;
+ }
+
+ case QSEECOM_IOCTL_SET_BUS_SCALING_REQ: {
+ if ((data->client.app_id == 0) ||
+ (data->type != QSEECOM_CLIENT_APP)) {
+ pr_err("set bus scale: invalid handle (%d) appid(%d)\n",
+ data->type, data->client.app_id);
+ ret = -EINVAL;
+ break;
+ }
+ atomic_inc(&data->ioctl_count);
+ ret = qseecom_scale_bus_bandwidth(data, argp);
atomic_dec(&data->ioctl_count);
break;
}
@@ -3292,6 +3523,7 @@
data->abort = 0;
data->type = QSEECOM_GENERIC;
data->released = false;
+ data->mode = INACTIVE;
init_waitqueue_head(&data->abort_wq);
atomic_set(&data->ioctl_count, 0);
@@ -3304,8 +3536,8 @@
int ret = 0;
if (data->released == false) {
- pr_warn("data: released = false, type = %d, data = 0x%x\n",
- data->type, (u32)data);
+ pr_warn("data: released=false, type=%d, mode=%d, data=0x%x\n",
+ data->type, data->mode, (u32)data);
switch (data->type) {
case QSEECOM_LISTENER_SERVICE:
ret = qseecom_unregister_listener(data);
@@ -3328,11 +3560,23 @@
}
}
- if (data->fast_load_enabled == true)
- qsee_disable_clock_vote(data, CLK_SFPB);
- if (data->perf_enabled == true)
- qsee_disable_clock_vote(data, CLK_DFAB);
-
+ if (qseecom.support_bus_scaling) {
+ mutex_lock(&qsee_bw_mutex);
+ if (data->mode != INACTIVE) {
+ qseecom_unregister_bus_bandwidth_needs(data);
+ if (qseecom.cumulative_mode == INACTIVE) {
+ ret = __qseecom_set_msm_bus_request(INACTIVE);
+ if (ret)
+ pr_err("Fail to scale down bus\n");
+ }
+ }
+ mutex_unlock(&qsee_bw_mutex);
+ } else {
+ if (data->fast_load_enabled == true)
+ qsee_disable_clock_vote(data, CLK_SFPB);
+ if (data->perf_enabled == true)
+ qsee_disable_clock_vote(data, CLK_DFAB);
+ }
kfree(data);
return ret;
@@ -3475,6 +3719,10 @@
qseecom.qsee.ce_core_src_clk = NULL;
qseecom.qsee.ce_bus_clk = NULL;
+ qseecom.cumulative_mode = 0;
+ qseecom.current_mode = INACTIVE;
+ qseecom.support_bus_scaling = false;
+
qseecom.ce_drv.ce_core_clk = NULL;
qseecom.ce_drv.ce_clk = NULL;
qseecom.ce_drv.ce_core_src_clk = NULL;
@@ -3554,7 +3802,11 @@
/* register client for bus scaling */
if (pdev->dev.of_node) {
-
+ qseecom.support_bus_scaling =
+ of_property_read_bool((&pdev->dev)->of_node,
+ "qcom,support-bus-scaling");
+ pr_warn("support_bus_scaling=0x%x",
+ qseecom.support_bus_scaling);
if (of_property_read_u32((&pdev->dev)->of_node,
"qcom,disk-encrypt-pipe-pair",
&qseecom.ce_info.disk_encrypt_pipe)) {
@@ -3647,7 +3899,13 @@
qseecom_platform_support = (struct msm_bus_scale_pdata *)
pdev->dev.platform_data;
}
-
+ if (qseecom.support_bus_scaling) {
+ init_timer(&(qseecom.bw_scale_down_timer));
+ INIT_WORK(&qseecom.bw_inactive_req_ws,
+ qseecom_bw_inactive_req_work);
+ qseecom.bw_scale_down_timer.function =
+ qseecom_scale_bus_bandwidth_timer_callback;
+ }
qseecom.qsee_perf_client = msm_bus_scale_register_client(
qseecom_platform_support);
@@ -3713,6 +3971,11 @@
if (pdev->dev.platform_data != NULL)
msm_bus_scale_unregister_client(qseecom.qsee_perf_client);
+ if (qseecom.support_bus_scaling) {
+ cancel_work_sync(&qseecom.bw_inactive_req_ws);
+ del_timer_sync(&qseecom.bw_scale_down_timer);
+ }
+
/* register client for bus scaling */
if (pdev->dev.of_node) {
__qseecom_deinit_clk(CLK_QSEE);
diff --git a/drivers/net/ethernet/msm/msm_rmnet_bam.c b/drivers/net/ethernet/msm/msm_rmnet_bam.c
index 9f06258..7ec317a 100644
--- a/drivers/net/ethernet/msm/msm_rmnet_bam.c
+++ b/drivers/net/ethernet/msm/msm_rmnet_bam.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -341,6 +341,8 @@
if (bam_ret != 0 && bam_ret != -EAGAIN && bam_ret != -EFAULT) {
pr_err("[%s] %s: write returned error %d",
dev->name, __func__, bam_ret);
+ if (RMNET_IS_MODE_QOS(opmode))
+ skb_pull(skb, sizeof(struct QMI_QOS_HDR_S));
return -EPERM;
}
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index b743bd6..48959ab 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -1773,6 +1773,12 @@
*hba_handle = hba;
+ /*
+ * The device-initialize-sequence hasn't been invoked yet.
+ * Set the device to power-off state
+ */
+ ufshcd_set_ufs_dev_poweroff(hba);
+
async_schedule(ufshcd_async_scan, hba);
return 0;
diff --git a/drivers/slimbus/slim-msm-ngd.c b/drivers/slimbus/slim-msm-ngd.c
index adca457..dc728eb 100644
--- a/drivers/slimbus/slim-msm-ngd.c
+++ b/drivers/slimbus/slim-msm-ngd.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -268,8 +268,25 @@
* Messages related to data channel management can't
* wait since they are holding reconfiguration lock.
* clk_pause in resume (which can change state back to
- * MSM_CTRL_AWAKE), will need that lock
+ * MSM_CTRL_AWAKE), will need that lock.
+ * Port disconnection, channel removal calls should pass
+ * through since there is no activity on the bus and
+ * those calls are triggered by clients due to
+ * device_down callback in that situation.
+ * Returning 0 on the disconnections and
+ * removals will ensure consistent state of channels,
+ * ports with the HW
*/
+ if ((txn->mt == SLIM_MSG_MT_DEST_REFERRED_USER) &&
+ ((mc == SLIM_USR_MC_CHAN_CTRL ||
+ mc == SLIM_USR_MC_DISCONNECT_PORT ||
+ mc == SLIM_USR_MC_RECONFIG_NOW)))
+ return 0;
+ if ((txn->mt == SLIM_MSG_MT_CORE) &&
+ ((mc == SLIM_MSG_MC_DISCONNECT_PORT ||
+ mc == SLIM_MSG_MC_NEXT_REMOVE_CHANNEL ||
+ mc == SLIM_USR_MC_RECONFIG_NOW)))
+ return 0;
if ((txn->mt == SLIM_MSG_MT_CORE) &&
((mc >= SLIM_MSG_MC_CONNECT_SOURCE &&
mc <= SLIM_MSG_MC_CHANGE_CONTENT) ||
@@ -278,7 +295,7 @@
return -EREMOTEIO;
if ((txn->mt == SLIM_MSG_MT_DEST_REFERRED_USER) &&
((mc >= SLIM_USR_MC_DEFINE_CHAN &&
- mc <= SLIM_USR_MC_DISCONNECT_PORT)))
+ mc < SLIM_USR_MC_DISCONNECT_PORT)))
return -EREMOTEIO;
timeout = wait_for_completion_timeout(&dev->ctrl_up,
HZ);
diff --git a/drivers/thermal/msm_thermal.c b/drivers/thermal/msm_thermal.c
index c366086..61217dc 100644
--- a/drivers/thermal/msm_thermal.c
+++ b/drivers/thermal/msm_thermal.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -156,7 +156,7 @@
#define VDD_RES_RO_ATTRIB(_rail, ko_attr, j, _name) \
ko_attr.attr.name = __stringify(_name); \
- ko_attr.attr.mode = 444; \
+ ko_attr.attr.mode = 0444; \
ko_attr.show = vdd_rstr_reg_##_name##_show; \
ko_attr.store = NULL; \
sysfs_attr_init(&ko_attr.attr); \
@@ -164,7 +164,7 @@
#define VDD_RES_RW_ATTRIB(_rail, ko_attr, j, _name) \
ko_attr.attr.name = __stringify(_name); \
- ko_attr.attr.mode = 644; \
+ ko_attr.attr.mode = 0644; \
ko_attr.show = vdd_rstr_reg_##_name##_show; \
ko_attr.store = vdd_rstr_reg_##_name##_store; \
sysfs_attr_init(&ko_attr.attr); \
@@ -181,7 +181,7 @@
#define OCR_RW_ATTRIB(_rail, ko_attr, j, _name) \
ko_attr.attr.name = __stringify(_name); \
- ko_attr.attr.mode = 644; \
+ ko_attr.attr.mode = 0644; \
ko_attr.show = ocr_reg_##_name##_show; \
ko_attr.store = ocr_reg_##_name##_store; \
sysfs_attr_init(&ko_attr.attr); \
@@ -189,7 +189,7 @@
#define PSM_RW_ATTRIB(_rail, ko_attr, j, _name) \
ko_attr.attr.name = __stringify(_name); \
- ko_attr.attr.mode = 644; \
+ ko_attr.attr.mode = 0644; \
ko_attr.show = psm_reg_##_name##_show; \
ko_attr.store = psm_reg_##_name##_store; \
sysfs_attr_init(&ko_attr.attr); \
@@ -425,7 +425,7 @@
static struct vdd_rstr_enable vdd_rstr_en = {
.ko_attr.attr.name = __stringify(enabled),
- .ko_attr.attr.mode = 644,
+ .ko_attr.attr.mode = 0644,
.ko_attr.show = vdd_rstr_en_show,
.ko_attr.store = vdd_rstr_en_store,
.enabled = 1,
diff --git a/drivers/video/msm/mdss/mdp3_ctrl.c b/drivers/video/msm/mdss/mdp3_ctrl.c
index 44a7e51..8b390ca 100644
--- a/drivers/video/msm/mdss/mdp3_ctrl.c
+++ b/drivers/video/msm/mdss/mdp3_ctrl.c
@@ -708,9 +708,11 @@
off_error:
mdp3_session->status = 0;
mdp3_bufq_deinit(&mdp3_session->bufq_out);
+ if (mdp3_session->overlay.id != MSMFB_NEW_REQUEST) {
+ mdp3_session->overlay.id = MSMFB_NEW_REQUEST;
+ mdp3_bufq_deinit(&mdp3_session->bufq_in);
+ }
mutex_unlock(&mdp3_session->lock);
- if (mdp3_session->overlay.id != MSMFB_NEW_REQUEST)
- mdp3_overlay_unset(mfd, mdp3_session->overlay.id);
return 0;
}
@@ -890,11 +892,7 @@
mdp3_session->overlay = *req;
if (req->id == MSMFB_NEW_REQUEST) {
if (dma->source_config.stride != stride ||
- dma->source_config.width != req->src.width ||
- dma->source_config.height != req->src.height ||
dma->source_config.format != format) {
- dma->source_config.width = req->src.width;
- dma->source_config.height = req->src.height,
dma->source_config.format = format;
dma->source_config.stride = stride;
mdp3_clk_enable(1, 0);
@@ -916,7 +914,6 @@
struct mdp3_session_data *mdp3_session = mfd->mdp.private1;
struct fb_info *fbi = mfd->fbi;
struct fb_fix_screeninfo *fix;
- struct mdss_panel_info *panel_info = mfd->panel_info;
int format;
fix = &fbi->fix;
@@ -925,8 +922,6 @@
if (mdp3_session->overlay.id == ndx && ndx == 1) {
struct mdp3_dma *dma = mdp3_session->dma;
- dma->source_config.width = panel_info->xres,
- dma->source_config.height = panel_info->yres,
dma->source_config.format = format;
dma->source_config.stride = fix->line_length;
mdp3_clk_enable(1, 0);
diff --git a/drivers/video/msm/mdss/mdss.h b/drivers/video/msm/mdss/mdss.h
index ab54cbe..53292f8 100644
--- a/drivers/video/msm/mdss/mdss.h
+++ b/drivers/video/msm/mdss/mdss.h
@@ -165,6 +165,8 @@
int current_bus_idx;
bool mixer_switched;
struct mdss_panel_cfg pan_cfg;
+
+ int handoff_pending;
};
extern struct mdss_data_type *mdss_res;
diff --git a/drivers/video/msm/mdss/mdss_dsi_panel.c b/drivers/video/msm/mdss/mdss_dsi_panel.c
index 60e2cf9..6e44099 100644
--- a/drivers/video/msm/mdss/mdss_dsi_panel.c
+++ b/drivers/video/msm/mdss/mdss_dsi_panel.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -829,7 +829,7 @@
rc = of_property_read_u32(np, "qcom,mdss-dsi-virtual-channel-id", &tmp);
pinfo->mipi.vc = (!rc ? tmp : 0);
pinfo->mipi.rgb_swap = DSI_RGB_SWAP_RGB;
- data = of_get_property(np, "mdss-dsi-color-order", NULL);
+ data = of_get_property(np, "qcom,mdss-dsi-color-order", NULL);
if (data) {
if (!strcmp(data, "rgb_swap_rbg"))
pinfo->mipi.rgb_swap = DSI_RGB_SWAP_RBG;
diff --git a/drivers/video/msm/mdss/mdss_hdmi_tx.c b/drivers/video/msm/mdss/mdss_hdmi_tx.c
index a29fb751..426855c 100644
--- a/drivers/video/msm/mdss/mdss_hdmi_tx.c
+++ b/drivers/video/msm/mdss/mdss_hdmi_tx.c
@@ -20,6 +20,7 @@
#include <linux/of_gpio.h>
#include <linux/of_platform.h>
#include <linux/types.h>
+#include <linux/msm_hdmi.h>
#include <mach/msm_hdmi_audio_codec.h>
#define REG_DUMP 0
@@ -133,6 +134,7 @@
};
struct dss_gpio ddc_gpio_config[] = {
+ {0, 1, COMPATIBLE_NAME "-ddc-mux-sel"},
{0, 1, COMPATIBLE_NAME "-ddc-clk"},
{0, 1, COMPATIBLE_NAME "-ddc-data"}
};
@@ -222,6 +224,77 @@
{20480, 247500} } },
};
+int register_hdmi_cable_notification(struct hdmi_cable_notify *handler)
+{
+ struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+ struct list_head *pos;
+
+ if (!hdmi_tx_hw.ptr) {
+ DEV_WARN("%s: HDMI Tx core not ready\n", __func__);
+ return -EPROBE_DEFER;
+ }
+
+ if (!handler) {
+ DEV_ERR("%s: Empty handler\n", __func__);
+ return -ENODEV;
+ }
+
+ hdmi_ctrl = (struct hdmi_tx_ctrl *) hdmi_tx_hw.ptr;
+
+ mutex_lock(&hdmi_ctrl->cable_notify_mutex);
+ handler->status = hdmi_ctrl->hpd_state;
+ list_for_each(pos, &hdmi_ctrl->cable_notify_handlers);
+ list_add_tail(&handler->link, pos);
+ mutex_unlock(&hdmi_ctrl->cable_notify_mutex);
+
+ return handler->status;
+} /* register_hdmi_cable_notification */
+
+int unregister_hdmi_cable_notification(struct hdmi_cable_notify *handler)
+{
+ struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+
+ if (!hdmi_tx_hw.ptr) {
+ DEV_WARN("%s: HDMI Tx core not ready\n", __func__);
+ return -ENODEV;
+ }
+
+ if (!handler) {
+ DEV_ERR("%s: Empty handler\n", __func__);
+ return -ENODEV;
+ }
+
+ hdmi_ctrl = (struct hdmi_tx_ctrl *) hdmi_tx_hw.ptr;
+
+ mutex_lock(&hdmi_ctrl->cable_notify_mutex);
+ list_del(&handler->link);
+ mutex_unlock(&hdmi_ctrl->cable_notify_mutex);
+
+ return 0;
+} /* unregister_hdmi_cable_notification */
+
+static void hdmi_tx_cable_notify_work(struct work_struct *work)
+{
+ struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+ struct hdmi_cable_notify *pos;
+
+ hdmi_ctrl = container_of(work, struct hdmi_tx_ctrl, cable_notify_work);
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid hdmi data\n", __func__);
+ return;
+ }
+
+ mutex_lock(&hdmi_ctrl->cable_notify_mutex);
+ list_for_each_entry(pos, &hdmi_ctrl->cable_notify_handlers, link) {
+ if (pos->status != hdmi_ctrl->hpd_state) {
+ pos->status = hdmi_ctrl->hpd_state;
+ pos->hpd_notify(pos);
+ }
+ }
+ mutex_unlock(&hdmi_ctrl->cable_notify_mutex);
+} /* hdmi_tx_cable_notify_work */
+
static bool hdmi_tx_is_cea_format(int mode)
{
bool cea_fmt;
@@ -327,6 +400,9 @@
if (!hdmi_ctrl->pdata.primary && (hdmi_ctrl->sdev.state != val))
switch_set_state(&hdmi_ctrl->sdev, val);
+
+ /* Notify all registered modules of cable connection status */
+ schedule_work(&hdmi_ctrl->cable_notify_work);
} /* hdmi_tx_send_cable_notification */
static inline u32 hdmi_tx_is_dvi_mode(struct hdmi_tx_ctrl *hdmi_ctrl)
@@ -2835,6 +2911,7 @@
if (hdmi_ctrl->workq)
destroy_workqueue(hdmi_ctrl->workq);
mutex_destroy(&hdmi_ctrl->lut_lock);
+ mutex_destroy(&hdmi_ctrl->cable_notify_mutex);
mutex_destroy(&hdmi_ctrl->mutex);
hdmi_tx_hw.ptr = NULL;
@@ -2864,6 +2941,10 @@
hdmi_setup_video_mode_lut();
mutex_init(&hdmi_ctrl->mutex);
mutex_init(&hdmi_ctrl->lut_lock);
+ mutex_init(&hdmi_ctrl->cable_notify_mutex);
+
+ INIT_LIST_HEAD(&hdmi_ctrl->cable_notify_handlers);
+
hdmi_ctrl->workq = create_workqueue("hdmi_tx_workq");
if (!hdmi_ctrl->workq) {
DEV_ERR("%s: hdmi_tx_workq creation failed.\n", __func__);
@@ -2881,8 +2962,9 @@
hdmi_ctrl->hpd_initialized = false;
hdmi_ctrl->hpd_off_pending = false;
init_completion(&hdmi_ctrl->hpd_done);
- INIT_WORK(&hdmi_ctrl->hpd_int_work, hdmi_tx_hpd_int_work);
+ INIT_WORK(&hdmi_ctrl->hpd_int_work, hdmi_tx_hpd_int_work);
+ INIT_WORK(&hdmi_ctrl->cable_notify_work, hdmi_tx_cable_notify_work);
INIT_WORK(&hdmi_ctrl->power_off_work, hdmi_tx_power_off_work);
spin_lock_init(&hdmi_ctrl->hpd_state_lock);
diff --git a/drivers/video/msm/mdss/mdss_hdmi_tx.h b/drivers/video/msm/mdss/mdss_hdmi_tx.h
index 0787dee..8233ba8 100644
--- a/drivers/video/msm/mdss/mdss_hdmi_tx.h
+++ b/drivers/video/msm/mdss/mdss_hdmi_tx.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -55,6 +55,8 @@
struct mutex mutex;
struct mutex lut_lock;
+ struct mutex cable_notify_mutex;
+ struct list_head cable_notify_handlers;
struct kobject *kobj;
struct switch_dev sdev;
struct switch_dev audio_sdev;
@@ -78,6 +80,7 @@
struct work_struct hpd_int_work;
struct work_struct power_off_work;
+ struct work_struct cable_notify_work;
bool hdcp_feature_on;
u32 present_hdcp;
diff --git a/drivers/video/msm/mdss/mdss_mdp.c b/drivers/video/msm/mdss/mdss_mdp.c
index fe33331..a42aa87 100644
--- a/drivers/video/msm/mdss/mdss_mdp.c
+++ b/drivers/video/msm/mdss/mdss_mdp.c
@@ -80,6 +80,7 @@
static DEFINE_SPINLOCK(mdp_lock);
static DEFINE_MUTEX(mdp_clk_lock);
static DEFINE_MUTEX(bus_bw_lock);
+static DEFINE_MUTEX(mdp_iommu_lock);
#define MDP_BUS_VECTOR_ENTRY(ab_val, ib_val) \
{ \
@@ -654,6 +655,8 @@
pm_runtime_get_sync(&mdata->pdev->dev);
msm_bus_scale_client_update_request(
mdata->bus_hdl, mdata->current_bus_idx);
+ if (!mdata->handoff_pending)
+ mdss_iommu_attach(mdata);
}
}
@@ -784,8 +787,10 @@
struct mdss_iommu_map_type *iomap;
int i;
+ mutex_lock(&mdp_iommu_lock);
if (mdata->iommu_attached) {
pr_debug("mdp iommu already attached\n");
+ mutex_unlock(&mdp_iommu_lock);
return 0;
}
@@ -802,6 +807,7 @@
}
mdata->iommu_attached = true;
+ mutex_unlock(&mdp_iommu_lock);
return 0;
}
@@ -812,8 +818,10 @@
struct mdss_iommu_map_type *iomap;
int i;
+ mutex_lock(&mdp_iommu_lock);
if (!mdata->iommu_attached) {
pr_debug("mdp iommu already dettached\n");
+ mutex_unlock(&mdp_iommu_lock);
return 0;
}
@@ -830,6 +838,7 @@
}
mdata->iommu_attached = false;
+ mutex_unlock(&mdp_iommu_lock);
return 0;
}
@@ -1048,11 +1057,13 @@
if (mdata != NULL) {
if (on) {
pr_debug("Enable MDP FS for splash.\n");
+ mdata->handoff_pending = true;
regulator_enable(mdata->fs);
mdss_hw_init(mdata);
} else {
pr_debug("Disable MDP FS for splash.\n");
regulator_disable(mdata->fs);
+ mdata->handoff_pending = false;
}
} else {
pr_warn("mdss mdata not initialized\n");
diff --git a/drivers/video/msm/mdss/mdss_mdp_overlay.c b/drivers/video/msm/mdss/mdss_mdp_overlay.c
index 51ecdbd..8ea86d0 100644
--- a/drivers/video/msm/mdss/mdss_mdp_overlay.c
+++ b/drivers/video/msm/mdss/mdss_mdp_overlay.c
@@ -2550,8 +2550,8 @@
* increasing ref_cnt to help balance clocks once done.
*/
if (pdata->panel_info.cont_splash_enabled) {
- mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
mdss_mdp_footswitch_ctrl_splash(1);
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
}
return 0;
}
diff --git a/drivers/video/msm/mdss/mdss_mdp_pipe.c b/drivers/video/msm/mdss/mdss_mdp_pipe.c
index 88a2a69..4ade335 100644
--- a/drivers/video/msm/mdss/mdss_mdp_pipe.c
+++ b/drivers/video/msm/mdss/mdss_mdp_pipe.c
@@ -505,7 +505,7 @@
* shared as long as its attached to a writeback mixer
*/
pipe = mdata->dma_pipes + mixer->num;
- mdss_mdp_pipe_map(pipe);
+ atomic_inc(&pipe->ref_cnt);
pr_debug("pipe sharing for pipe=%d\n", pipe->num);
} else {
pr_err("no %d type pipes available\n", type);
@@ -624,6 +624,7 @@
mdss_mdp_smp_free(pipe);
pipe->flags = 0;
pipe->bwc_mode = 0;
+ pipe->mfd = NULL;
memset(&pipe->scale, 0, sizeof(struct mdp_scale_data));
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
diff --git a/include/linux/mfd/wcd9xxx/core-resource.h b/include/linux/mfd/wcd9xxx/core-resource.h
index 442496e..b45cf6a 100644
--- a/include/linux/mfd/wcd9xxx/core-resource.h
+++ b/include/linux/mfd/wcd9xxx/core-resource.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -67,6 +67,8 @@
unsigned short, u8);
int (*codec_bulk_read) (struct wcd9xxx_core_resource *,
unsigned short, int, u8 *);
+ int (*codec_bulk_write) (struct wcd9xxx_core_resource *,
+ unsigned short, int, u8 *);
/* Pointer to parent container data structure */
void *parent;
@@ -80,6 +82,8 @@
int (*codec_read)(struct wcd9xxx_core_resource *, unsigned short),
int (*codec_write)(struct wcd9xxx_core_resource *, unsigned short, u8),
int (*codec_bulk_read) (struct wcd9xxx_core_resource *, unsigned short,
+ int, u8 *),
+ int (*codec_bulk_write) (struct wcd9xxx_core_resource *, unsigned short,
int, u8 *));
extern void wcd9xxx_core_res_deinit(
diff --git a/include/linux/msm_hdmi.h b/include/linux/msm_hdmi.h
new file mode 100644
index 0000000..70fae94
--- /dev/null
+++ b/include/linux/msm_hdmi.h
@@ -0,0 +1,57 @@
+/* include/linux/msm_hdmi.h
+ *
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MSM_HDMI_H_
+#define _MSM_HDMI_H_
+
+/*
+ * HDMI cable notify handler sturcture.
+ * link A link for the linked list
+ * status Current status of HDMI cable connection
+ * hpd_notify Callback function to provide cable status
+ */
+struct hdmi_cable_notify {
+ struct list_head link;
+ int status;
+ void (*hpd_notify) (struct hdmi_cable_notify *h);
+};
+
+#ifdef CONFIG_FB_MSM_MDSS_HDMI_PANEL
+/*
+ * Register for HDMI cable connect or disconnect notification.
+ * @param handler callback handler for notification
+ * @return negative value as error otherwise current status of cable
+ */
+int register_hdmi_cable_notification(
+ struct hdmi_cable_notify *handler);
+
+/*
+ * Un-register for HDMI cable connect or disconnect notification.
+ * @param handler callback handler for notification
+ * @return negative value as error
+ */
+int unregister_hdmi_cable_notification(
+ struct hdmi_cable_notify *handler);
+#else
+int register_hdmi_cable_notification(
+ struct hdmi_cable_notify *handler) {
+ return 0;
+}
+
+int unregister_hdmi_cable_notification(
+ struct hdmi_cable_notify *handler) {
+ return 0;
+}
+#endif /* CONFIG_FB_MSM_MDSS_HDMI_PANEL */
+
+#endif /*_MSM_HDMI_H_*/
diff --git a/include/linux/qpnp/qpnp-adc.h b/include/linux/qpnp/qpnp-adc.h
index e6dcc3b..2b47b88 100644
--- a/include/linux/qpnp/qpnp-adc.h
+++ b/include/linux/qpnp/qpnp-adc.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -190,6 +190,7 @@
* %CHAN_PATH_SCALING2: ratio of {1, 4}
* %CHAN_PATH_SCALING3: ratio of {1, 6}
* %CHAN_PATH_SCALING4: ratio of {1, 20}
+ * %CHAN_PATH_SCALING5: ratio of {1, 8}
* %CHAN_PATH_NONE: Do not use this pre-scaling ratio type.
*
* The pre-scaling is applied for signals to be within the voltage range
@@ -201,6 +202,7 @@
PATH_SCALING2,
PATH_SCALING3,
PATH_SCALING4,
+ PATH_SCALING5,
PATH_SCALING_NONE,
};
@@ -893,7 +895,8 @@
{1, 3},
{1, 4},
{1, 6},
- {1, 20}
+ {1, 20},
+ {1, 8}
};
/**
diff --git a/include/linux/qrng.h b/include/linux/qrng.h
index 35708e3..8c09627 100644
--- a/include/linux/qrng.h
+++ b/include/linux/qrng.h
@@ -1,5 +1,5 @@
-#ifndef __QRNG_H_
-#define __QRNG_H_
+#ifndef _QRNG_H_
+#define _QRNG_H_
#include <linux/types.h>
#include <linux/ioctl.h>
@@ -9,4 +9,4 @@
#define QRNG_IOCTL_RESET_BUS_BANDWIDTH\
_IO(QRNG_IOC_MAGIC, 1)
-#endif /* __QRNG_H_ */
+#endif /* _QRNG_H_ */
diff --git a/lib/Kconfig b/lib/Kconfig
index 8437e36..09f0d8a 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -379,7 +379,7 @@
Implementation is done using GnuPG MPI library
config QMI_ENCDEC
- bool
+ bool "QMI Encode/Decode Library"
help
Library to encode & decode QMI messages from within
the kernel. The kernel drivers encode the C structure into
@@ -388,7 +388,7 @@
and then decode it into a C structure.
config QMI_ENCDEC_DEBUG
- bool
+ bool "QMI Encode/Decode Library Debug"
help
Kernel config option to enable debugging QMI Encode/Decode
library. This will log the information regarding the element
diff --git a/net/wireless/db.txt b/net/wireless/db.txt
index 24bb5d0..35b86ff 100644
--- a/net/wireless/db.txt
+++ b/net/wireless/db.txt
@@ -909,7 +909,7 @@
(5250 - 5330 @ 80), (3, 24), DFS
(5490 - 5600 @ 80), (3, 24), DFS
(5650 - 5710 @ 40), (3, 24), DFS
- (5735 - 5835 @ 80), (3, 30)
+ (5710 - 5835 @ 80), (3, 30)
# 60g band
# reference: http://cfr.regstoday.com/47cfr15.aspx#47_CFR_15p255
# channels 1,2,3, EIRP=40dBm(43dBm peak)
diff --git a/sound/soc/codecs/msm8x10-wcd.c b/sound/soc/codecs/msm8x10-wcd.c
index 87416dd..7706e3e 100644
--- a/sound/soc/codecs/msm8x10-wcd.c
+++ b/sound/soc/codecs/msm8x10-wcd.c
@@ -397,6 +397,32 @@
return temp;
}
+static int __msm8x10_wcd_bulk_write(struct msm8x10_wcd *msm8x10_wcd,
+ unsigned short reg, int count, u8 *buf)
+{
+ int ret = -EINVAL;
+ mutex_lock(&msm8x10_wcd->io_lock);
+ if (MSM8X10_WCD_IS_HELICON_REG(reg))
+ ret = msm8x10_wcd_i2c_write(reg, count, buf);
+ else if (MSM8X10_WCD_IS_DINO_REG(reg))
+ ret = msm8x10_wcd_abh_write_device(msm8x10_wcd, reg,
+ buf, count);
+ if (ret < 0)
+ dev_err(msm8x10_wcd->dev,
+ "%s: codec bulk write failed\n", __func__);
+ mutex_unlock(&msm8x10_wcd->io_lock);
+ return ret;
+}
+
+int msm8x10_wcd_bulk_write(struct wcd9xxx_core_resource *core_res,
+ unsigned short reg, int count, u8 *buf)
+{
+ struct msm8x10_wcd *msm8x10_wcd =
+ (struct msm8x10_wcd *) core_res->parent;
+ return __msm8x10_wcd_bulk_write(msm8x10_wcd, reg, count, buf);
+}
+EXPORT_SYMBOL(msm8x10_wcd_bulk_write);
+
int msm8x10_wcd_reg_read(struct wcd9xxx_core_resource *core_res,
unsigned short reg)
{
@@ -3618,7 +3644,8 @@
MSM8X10_WCD_NUM_IRQ_REGS,
msm8x10_wcd_reg_read,
msm8x10_wcd_reg_write,
- msm8x10_wcd_bulk_read);
+ msm8x10_wcd_bulk_read,
+ msm8x10_wcd_bulk_write);
if (wcd9xxx_core_irq_init(core_res)) {
dev_err(msm8x10->dev,
"%s: irq initialization failed\n", __func__);
diff --git a/sound/soc/msm/qdsp6v2/audio_ocmem.c b/sound/soc/msm/qdsp6v2/audio_ocmem.c
index 4c76168..3fe8033 100644
--- a/sound/soc/msm/qdsp6v2/audio_ocmem.c
+++ b/sound/soc/msm/qdsp6v2/audio_ocmem.c
@@ -536,6 +536,7 @@
mutex_unlock(&audio_ocmem_lcl.state_process_lock);
fail_cmd:
pr_debug("%s: exit\n", __func__);
+ audio_ocmem_lcl.buf = NULL;
audio_ocmem_lcl.audio_ocmem_running = false;
return ret;
}
@@ -834,6 +835,7 @@
ret = ocmem_free(OCMEM_LP_AUDIO, audio_ocmem_lcl.buf);
if (ret)
pr_err("%s: ocmem_free failed\n", __func__);
+ audio_ocmem_lcl.buf = NULL;
}
static int lpass_notifier_cb(struct notifier_block *this, unsigned long code,
diff --git a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
index 5f512ae..6394f0b 100644
--- a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
@@ -409,7 +409,11 @@
case FORMAT_MPEG4_AAC:
memset(&aac_cfg, 0x0, sizeof(struct asm_aac_cfg));
aac_cfg.aot = AAC_ENC_MODE_EAAC_P;
- aac_cfg.format = 0x03;
+ if (prtd->codec_param.codec.format ==
+ SND_AUDIOSTREAMFORMAT_MP4ADTS)
+ aac_cfg.format = 0x0;
+ else
+ aac_cfg.format = 0x03;
aac_cfg.ch_cfg = prtd->num_channels;
aac_cfg.sample_rate = prtd->sample_rate;
ret = q6asm_stream_media_format_block_aac(prtd->audio_client,
diff --git a/sound/soc/msm/qdsp6v2/q6asm.c b/sound/soc/msm/qdsp6v2/q6asm.c
index e60cd26..dc57996 100644
--- a/sound/soc/msm/qdsp6v2/q6asm.c
+++ b/sound/soc/msm/qdsp6v2/q6asm.c
@@ -1219,6 +1219,9 @@
}
if (data->opcode == RESET_EVENTS) {
+ if(ac->apr == NULL) {
+ ac->apr = ac->apr2;
+ }
pr_debug("q6asm_callback: Reset event is received: %d %d apr[%p]\n",
data->reset_event, data->reset_proc, ac->apr);
if (ac->cb)