Merge "USB: HSIC SMSC HUB: Add support for standalone HSIC configuration"
diff --git a/Documentation/devicetree/bindings/power/qpnp-charger.txt b/Documentation/devicetree/bindings/power/qpnp-charger.txt
index 50381a2..87ecc64 100644
--- a/Documentation/devicetree/bindings/power/qpnp-charger.txt
+++ b/Documentation/devicetree/bindings/power/qpnp-charger.txt
@@ -106,6 +106,10 @@
- qcom,vbatdet-maxerr-mv This property in mV is a hystersis value for the charge
resume voltage property qcom,vbatdet-delta-mv. If this
property is not defined it defaults to 50 mV.
+- qcom,parallel-ovp-mode When this option is enabled, it allows charging through both
+ DC and USB OVP FETs. Please note that this should only
+ be enabled in board designs with PM8941 which have DC_IN
+ and USB_IN connected via a short.
Sub node required structure:
- A qcom,chg node must be a child of an SPMI node that has specified
diff --git a/arch/arm/boot/dts/msm8226-1080p-mtp.dtsi b/arch/arm/boot/dts/msm8226-1080p-mtp.dtsi
index a99df65..3734273 100644
--- a/arch/arm/boot/dts/msm8226-1080p-mtp.dtsi
+++ b/arch/arm/boot/dts/msm8226-1080p-mtp.dtsi
@@ -120,6 +120,7 @@
qcom,cdc-mclk-gpios = <&pm8226_gpios 1 0>;
qcom,cdc-vdd-spkr-gpios = <&pm8226_gpios 2 0>;
+ qcom,cdc-us-euro-gpios = <&msmgpio 69 0>;
};
sound-9302 {
@@ -137,6 +138,7 @@
qcom,cdc-mclk-gpios = <&pm8226_gpios 1 0>;
qcom,cdc-vdd-spkr-gpios = <&pm8226_gpios 2 0>;
+ qcom,cdc-us-euro-gpios = <&msmgpio 69 0>;
};
};
diff --git a/arch/arm/boot/dts/msm8226-720p-mtp.dtsi b/arch/arm/boot/dts/msm8226-720p-mtp.dtsi
index 7f4f8fc..b4d9139 100644
--- a/arch/arm/boot/dts/msm8226-720p-mtp.dtsi
+++ b/arch/arm/boot/dts/msm8226-720p-mtp.dtsi
@@ -110,6 +110,7 @@
qcom,cdc-mclk-gpios = <&pm8226_gpios 1 0>;
qcom,cdc-vdd-spkr-gpios = <&pm8226_gpios 2 0>;
+ qcom,cdc-us-euro-gpios = <&msmgpio 69 0>;
};
sound-9302 {
@@ -127,6 +128,7 @@
qcom,cdc-mclk-gpios = <&pm8226_gpios 1 0>;
qcom,cdc-vdd-spkr-gpios = <&pm8226_gpios 2 0>;
+ qcom,cdc-us-euro-gpios = <&msmgpio 69 0>;
};
};
diff --git a/arch/arm/boot/dts/msm8610-v1.dtsi b/arch/arm/boot/dts/msm8610-v1.dtsi
index 9d8c411..8965e93 100644
--- a/arch/arm/boot/dts/msm8610-v1.dtsi
+++ b/arch/arm/boot/dts/msm8610-v1.dtsi
@@ -20,6 +20,7 @@
/include/ "msm8610-v1-pm.dtsi"
/ {
- qcom,msm-id = <147 0>, <165 0>, <161 0>, <162 0>,
- <163 0>, <164 0>, <166 0>;
+ qcom,msm-id = <147 1000>, <165 1000>, <161 1000>, <162 1000>,
+ <163 1000>, <164 1000>, <166 1000>, <225 1000>,
+ <226 1000>;
};
diff --git a/arch/arm/boot/dts/msm8610-v2.dtsi b/arch/arm/boot/dts/msm8610-v2.dtsi
index a1f466a..221598d 100644
--- a/arch/arm/boot/dts/msm8610-v2.dtsi
+++ b/arch/arm/boot/dts/msm8610-v2.dtsi
@@ -21,5 +21,6 @@
/ {
qcom,msm-id = <147 0x10001>, <165 0x10001>, <161 0x10001>, <162 0x10001>,
- <163 0x10001>, <164 0x10001>, <166 0x10001>;
+ <163 0x10001>, <164 0x10001>, <166 0x10001>,
+ <225 0x10001>, <226 0x10001>;
};
diff --git a/arch/arm/configs/msm8974_defconfig b/arch/arm/configs/msm8974_defconfig
index f1c285d..e72e5ff 100755
--- a/arch/arm/configs/msm8974_defconfig
+++ b/arch/arm/configs/msm8974_defconfig
@@ -276,6 +276,7 @@
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
+CONFIG_DM_REQ_CRYPT=y
CONFIG_NETDEVICES=y
CONFIG_DUMMY=y
CONFIG_TUN=y
@@ -540,8 +541,9 @@
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_ARC4=y
CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_XTS=y
CONFIG_NFC_QNCI=y
-CONFIG_CRYPTO_DEV_QCRYPTO=m
+CONFIG_CRYPTO_DEV_QCRYPTO=y
CONFIG_CRYPTO_DEV_QCE=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_SND_SOC_MSM_HDMI_CODEC_RX=y
diff --git a/arch/arm/mach-msm/socinfo.c b/arch/arm/mach-msm/socinfo.c
index e7cec58..9f50547 100644
--- a/arch/arm/mach-msm/socinfo.c
+++ b/arch/arm/mach-msm/socinfo.c
@@ -393,6 +393,8 @@
[164] = MSM_CPU_8610,
[165] = MSM_CPU_8610,
[166] = MSM_CPU_8610,
+ [225] = MSM_CPU_8610,
+ [226] = MSM_CPU_8610,
/* 8064AB IDs */
[153] = MSM_CPU_8064AB,
diff --git a/block/blk-core.c b/block/blk-core.c
index 00eab3b..7afd6cf 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1221,8 +1221,9 @@
elv_completed_request(q, req);
- /* this is a bio leak */
- WARN_ON(req->bio != NULL);
+ /* this is a bio leak if the bio is not tagged with BIO_DONTFREE */
+ WARN_ON(req->bio && !bio_flagged(req->bio, BIO_DONTFREE));
+
/*
* Request may not have originated from ll_rw_blk. if not,
@@ -2274,6 +2275,15 @@
blk_account_io_completion(req, nr_bytes);
total_bytes = bio_nbytes = 0;
+
+ /*
+ * Check for this if flagged, Req based dm needs to perform
+ * post processing, hence dont end bios or request.DM
+ * layer takes care.
+ */
+ if (bio_flagged(req->bio, BIO_DONTFREE))
+ return false;
+
while ((bio = req->bio) != NULL) {
int nbytes;
diff --git a/drivers/gpu/ion/ion_heap.c b/drivers/gpu/ion/ion_heap.c
index 6261d89..aa2551a 100644
--- a/drivers/gpu/ion/ion_heap.c
+++ b/drivers/gpu/ion/ion_heap.c
@@ -141,9 +141,11 @@
if (!ptr)
return -ENOMEM;
- memset(ptr, 0, npages_to_vmap * PAGE_SIZE);
/*
- * invalidate the cache to pick up the zeroing
+ * We have to invalidate the cache here because there
+ * might be dirty lines to these physical pages (which
+ * we don't care about) that could get written out at
+ * any moment.
*/
for (k = 0; k < npages_to_vmap; k++) {
void *p = kmap_atomic(pages[i + k]);
@@ -154,6 +156,7 @@
outer_inv_range(phys, phys + PAGE_SIZE);
kunmap_atomic(p);
}
+ memset(ptr, 0, npages_to_vmap * PAGE_SIZE);
vunmap(ptr);
}
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index 7d009ce..5f63cb6 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -723,4 +723,23 @@
return ret;
}
+/**
+ * kgsl_sysfs_store() - parse a string from a sysfs store function
+ * @buf: Incoming string to parse
+ * @ptr: Pointer to an unsigned int to store the value
+ */
+static inline int kgsl_sysfs_store(const char *buf, unsigned int *ptr)
+{
+ unsigned int val;
+ int rc;
+
+ rc = kstrtou32(buf, 0, &val);
+ if (rc)
+ return rc;
+
+ if (ptr)
+ *ptr = val;
+
+ return 0;
+}
#endif /* __KGSL_DEVICE_H */
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index 5c7e7b8..9353b2e 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -220,19 +220,18 @@
{
struct kgsl_device *device = kgsl_device_from_dev(dev);
struct kgsl_pwrctrl *pwr;
- int ret, level;
+ int ret;
+ unsigned int level = 0;
if (device == NULL)
return 0;
pwr = &device->pwrctrl;
- ret = sscanf(buf, "%d", &level);
- if (ret != 1)
- return count;
+ ret = kgsl_sysfs_store(buf, &level);
- if (level < 0)
- return count;
+ if (ret)
+ return ret;
mutex_lock(&device->mutex);
@@ -274,20 +273,17 @@
{
struct kgsl_device *device = kgsl_device_from_dev(dev);
struct kgsl_pwrctrl *pwr;
- int ret, level, max_level;
+ int ret, max_level;
+ unsigned int level = 0;
if (device == NULL)
return 0;
pwr = &device->pwrctrl;
- ret = sscanf(buf, "%d", &level);
- if (ret != 1)
- return count;
-
- /* If the use specifies a negative number, then don't change anything */
- if (level < 0)
- return count;
+ ret = kgsl_sysfs_store(buf, &level);
+ if (ret)
+ return ret;
mutex_lock(&device->mutex);
@@ -329,20 +325,17 @@
const char *buf, size_t count)
{ struct kgsl_device *device = kgsl_device_from_dev(dev);
struct kgsl_pwrctrl *pwr;
- int ret, level, min_level;
+ int ret, min_level;
+ unsigned int level = 0;
if (device == NULL)
return 0;
pwr = &device->pwrctrl;
- ret = sscanf(buf, "%d", &level);
- if (ret != 1)
- return count;
-
- /* Don't do anything on obviously incorrect values */
- if (level < 0)
- return count;
+ ret = kgsl_sysfs_store(buf, &level);
+ if (ret)
+ return ret;
mutex_lock(&device->mutex);
if (level > pwr->num_pwrlevels - 2)
@@ -413,7 +406,7 @@
{
struct kgsl_device *device = kgsl_device_from_dev(dev);
struct kgsl_pwrctrl *pwr;
- unsigned long val;
+ unsigned int val = 0;
int ret, level;
if (device == NULL)
@@ -421,9 +414,9 @@
pwr = &device->pwrctrl;
- ret = sscanf(buf, "%ld", &val);
- if (ret != 1)
- return count;
+ ret = kgsl_sysfs_store(buf, &val);
+ if (ret)
+ return ret;
mutex_lock(&device->mutex);
level = _get_nearest_pwrlevel(pwr, val);
@@ -465,7 +458,7 @@
{
struct kgsl_device *device = kgsl_device_from_dev(dev);
struct kgsl_pwrctrl *pwr;
- unsigned long val;
+ unsigned int val = 0;
int ret, level;
if (device == NULL)
@@ -473,9 +466,9 @@
pwr = &device->pwrctrl;
- ret = sscanf(buf, "%ld", &val);
- if (ret != 1)
- return count;
+ ret = kgsl_sysfs_store(buf, &val);
+ if (ret)
+ return ret;
mutex_lock(&device->mutex);
level = _get_nearest_pwrlevel(pwr, val);
@@ -502,22 +495,19 @@
struct device_attribute *attr,
const char *buf, size_t count)
{
- char temp[20];
- unsigned long val;
+ unsigned int val = 0;
struct kgsl_device *device = kgsl_device_from_dev(dev);
struct kgsl_pwrctrl *pwr;
const long div = 1000/HZ;
- int rc;
+ int ret;
if (device == NULL)
return 0;
pwr = &device->pwrctrl;
- snprintf(temp, sizeof(temp), "%.*s",
- (int)min(count, sizeof(temp) - 1), buf);
- rc = strict_strtoul(temp, 0, &val);
- if (rc)
- return rc;
+ ret = kgsl_sysfs_store(buf, &val);
+ if (ret)
+ return ret;
mutex_lock(&device->mutex);
@@ -547,19 +537,16 @@
struct device_attribute *attr,
const char *buf, size_t count)
{
- char temp[20];
- unsigned long val;
+ unsigned int val = 0;
struct kgsl_device *device = kgsl_device_from_dev(dev);
- int rc;
+ int ret;
if (device == NULL)
return 0;
- snprintf(temp, sizeof(temp), "%.*s",
- (int)min(count, sizeof(temp) - 1), buf);
- rc = kstrtoul(temp, 0, &val);
- if (rc)
- return rc;
+ ret = kgsl_sysfs_store(buf, &val);
+ if (ret)
+ return ret;
mutex_lock(&device->mutex);
device->pwrctrl.pm_qos_latency = val;
@@ -694,19 +681,16 @@
const char *buf, size_t count,
int flag)
{
- char temp[20];
- unsigned long val;
+ unsigned int val = 0;
struct kgsl_device *device = kgsl_device_from_dev(dev);
- int rc;
+ int ret;
if (device == NULL)
return 0;
- snprintf(temp, sizeof(temp), "%.*s",
- (int)min(count, sizeof(temp) - 1), buf);
- rc = kstrtoul(temp, 0, &val);
- if (rc)
- return rc;
+ ret = kgsl_sysfs_store(buf, &val);
+ if (ret)
+ return ret;
mutex_lock(&device->mutex);
__force_on(device, flag, val);
@@ -772,19 +756,16 @@
struct device_attribute *attr,
const char *buf, size_t count)
{
- char temp[20];
- unsigned long val;
+ unsigned int val = 0;
struct kgsl_device *device = kgsl_device_from_dev(dev);
- int rc;
+ int ret;
if (device == NULL)
return 0;
- snprintf(temp, sizeof(temp), "%.*s",
- (int)min(count, sizeof(temp) - 1), buf);
- rc = kstrtoul(temp, 0, &val);
- if (rc)
- return rc;
+ ret = kgsl_sysfs_store(buf, &val);
+ if (ret)
+ return ret;
mutex_lock(&device->mutex);
device->pwrctrl.bus_control = val ? true : false;
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 10f122a..67df573 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -237,6 +237,23 @@
If unsure, say N.
+config DM_REQ_CRYPT
+ tristate "Crypt target support"
+ depends on BLK_DEV_DM
+ select XTS
+ select CRYPTO_XTS
+ ---help---
+ This request based device-mapper target allows you to create a device that
+ transparently encrypts the data on it. You'll need to activate
+ the ciphers you're going to use in the cryptoapi configuration.
+ The DM REQ CRYPT operates on requests (bigger payloads) to utilize
+ crypto hardware better.
+
+ To compile this code as a module, choose M here: the module will
+ be called dm-req-crypt.
+
+ If unsure, say N.
+
config DM_SNAPSHOT
tristate "Snapshot target"
depends on BLK_DEV_DM
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 8b2e0df..7b16079 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -43,7 +43,7 @@
obj-$(CONFIG_DM_RAID) += dm-raid.o
obj-$(CONFIG_DM_THIN_PROVISIONING) += dm-thin-pool.o
obj-$(CONFIG_DM_VERITY) += dm-verity.o
-
+obj-$(CONFIG_DM_REQ_CRYPT) += dm-req-crypt.o
ifeq ($(CONFIG_DM_UEVENT),y)
dm-mod-objs += dm-uevent.o
endif
diff --git a/drivers/md/dm-req-crypt.c b/drivers/md/dm-req-crypt.c
new file mode 100644
index 0000000..16141b5
--- /dev/null
+++ b/drivers/md/dm-req-crypt.c
@@ -0,0 +1,752 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/completion.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/mempool.h>
+#include <linux/slab.h>
+#include <linux/crypto.h>
+#include <linux/workqueue.h>
+#include <linux/backing-dev.h>
+#include <linux/atomic.h>
+#include <linux/scatterlist.h>
+#include <crypto/scatterwalk.h>
+#include <asm/page.h>
+#include <asm/unaligned.h>
+#include <crypto/hash.h>
+#include <crypto/md5.h>
+#include <crypto/algapi.h>
+#include <mach/qcrypto.h>
+
+#include <linux/device-mapper.h>
+
+
+#define DM_MSG_PREFIX "req-crypt"
+
+#define MAX_SG_LIST 1024
+#define REQ_DM_512_KB (512*1024)
+#define MAX_ENCRYPTION_BUFFERS 1
+#define MIN_IOS 16
+#define MIN_POOL_PAGES 32
+#define KEY_SIZE_XTS 32
+#define AES_XTS_IV_LEN 16
+
+#define DM_REQ_CRYPT_ERROR -1
+
+struct req_crypt_result {
+ struct completion completion;
+ int err;
+};
+
+struct dm_dev *dev;
+static struct kmem_cache *_req_crypt_io_pool;
+sector_t start_sector_orig;
+struct workqueue_struct *req_crypt_queue;
+mempool_t *req_io_pool;
+mempool_t *req_page_pool;
+struct crypto_ablkcipher *tfm;
+
+struct req_dm_crypt_io {
+ struct work_struct work;
+ struct request *cloned_request;
+ int error;
+ atomic_t pending;
+ struct timespec start_time;
+};
+
+static void req_crypt_cipher_complete
+ (struct crypto_async_request *req, int err);
+
+
+static void req_crypt_inc_pending(struct req_dm_crypt_io *io)
+{
+ atomic_inc(&io->pending);
+}
+
+static void req_crypt_dec_pending_encrypt(struct req_dm_crypt_io *io)
+{
+ int error = 0;
+ struct request *clone = NULL;
+
+ if (io) {
+ error = io->error;
+ if (io->cloned_request) {
+ clone = io->cloned_request;
+ } else {
+ DMERR("%s io->cloned_request is NULL\n",
+ __func__);
+ /*
+ * If Clone is NULL we cannot do anything,
+ * this should never happen
+ */
+ BUG();
+ }
+ } else {
+ DMERR("%s io is NULL\n", __func__);
+ /*
+ * If Clone is NULL we cannot do anything,
+ * this should never happen
+ */
+ BUG();
+ }
+
+ atomic_dec(&io->pending);
+
+ if (error < 0)
+ dm_kill_unmapped_request(clone, error);
+ else
+ dm_dispatch_request(clone);
+}
+
+static void req_crypt_dec_pending_decrypt(struct req_dm_crypt_io *io)
+{
+ int error = 0;
+ struct request *clone = NULL;
+
+ if (io) {
+ error = io->error;
+ if (io->cloned_request) {
+ clone = io->cloned_request;
+ } else {
+ DMERR("%s io->cloned_request is NULL\n",
+ __func__);
+ /*
+ * If Clone is NULL we cannot do anything,
+ * this should never happen
+ */
+ BUG();
+ }
+ } else {
+ DMERR("%s io is NULL\n",
+ __func__);
+ /*
+ * If Clone is NULL we cannot do anything,
+ * this should never happen
+ */
+ BUG();
+ }
+
+ /* Should never get here if io or Clone is NULL */
+ dm_end_request(clone, error);
+ atomic_dec(&io->pending);
+ mempool_free(io, req_io_pool);
+}
+
+/*
+ * The callback that will be called by the worker queue to perform Decryption
+ * for reads and use the dm function to complete the bios and requests.
+ */
+static void req_cryptd_crypt_read_convert(struct req_dm_crypt_io *io)
+{
+ struct request *clone = NULL;
+ int error = 0;
+ int total_sg_len = 0, rc = 0, total_bytes_in_req = 0;
+ struct ablkcipher_request *req = NULL;
+ struct req_crypt_result result;
+ struct scatterlist *req_sg_read = NULL;
+ int err = 0;
+ struct req_iterator iter2;
+ struct bio_vec *bvec = NULL;
+ u8 IV[AES_XTS_IV_LEN];
+
+ if (io) {
+ error = io->error;
+ if (io->cloned_request) {
+ clone = io->cloned_request;
+ } else {
+ DMERR("%s io->cloned_request is NULL\n",
+ __func__);
+ error = DM_REQ_CRYPT_ERROR;
+ goto submit_request;
+ }
+ } else {
+ DMERR("%s io is NULL\n",
+ __func__);
+ error = DM_REQ_CRYPT_ERROR;
+ goto submit_request;
+ }
+
+ req_crypt_inc_pending(io);
+
+ if (error != 0) {
+ err = error;
+ goto submit_request;
+ }
+
+ req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
+ if (!req) {
+ DMERR("%s ablkcipher request allocation failed\n", __func__);
+ err = DM_REQ_CRYPT_ERROR;
+ goto ablkcipher_req_alloc_failure;
+ }
+
+ ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ req_crypt_cipher_complete, &result);
+ init_completion(&result.completion);
+ qcrypto_cipher_set_flag(req,
+ QCRYPTO_CTX_USE_PIPE_KEY | QCRYPTO_CTX_XTS_DU_SIZE_512B);
+ crypto_ablkcipher_clear_flags(tfm, ~0);
+ crypto_ablkcipher_setkey(tfm, NULL, KEY_SIZE_XTS);
+
+ req_sg_read = kzalloc(sizeof(struct scatterlist) *
+ MAX_SG_LIST, GFP_KERNEL);
+ if (!req_sg_read) {
+ DMERR("%s req_sg_read allocation failed\n",
+ __func__);
+ err = DM_REQ_CRYPT_ERROR;
+ goto ablkcipher_req_alloc_failure;
+ }
+
+ total_sg_len = blk_rq_map_sg(clone->q, clone, req_sg_read);
+ if ((total_sg_len <= 0) || (total_sg_len > MAX_SG_LIST)) {
+ DMERR("%s Request Error%d", __func__, total_sg_len);
+ err = DM_REQ_CRYPT_ERROR;
+ goto ablkcipher_req_alloc_failure;
+ }
+
+
+ /* total bytes to copy */
+ bvec = NULL;
+ rq_for_each_segment(bvec, clone, iter2) {
+ total_bytes_in_req = total_bytes_in_req + bvec->bv_len;
+ }
+
+ memset(IV, 0, AES_XTS_IV_LEN);
+ memcpy(IV, &clone->__sector, sizeof(sector_t));
+
+ ablkcipher_request_set_crypt(req, req_sg_read, req_sg_read,
+ total_bytes_in_req, (void *) IV);
+
+ rc = crypto_ablkcipher_decrypt(req);
+
+ switch (rc) {
+ case 0:
+ break;
+
+ case -EBUSY:
+ /*
+ * Lets make this synchronous request by waiting on
+ * in progress as well
+ */
+ case -EINPROGRESS:
+ wait_for_completion_io(&result.completion);
+ if (result.err) {
+ DMERR("%s error = %d encrypting the request\n",
+ __func__, result.err);
+ err = DM_REQ_CRYPT_ERROR;
+ }
+ break;
+
+ default:
+ err = DM_REQ_CRYPT_ERROR;
+ break;
+ }
+
+ablkcipher_req_alloc_failure:
+
+ if (req)
+ ablkcipher_request_free(req);
+
+ kfree(req_sg_read);
+
+submit_request:
+ io->error = err;
+ req_crypt_dec_pending_decrypt(io);
+}
+
+/*
+ * The callback that will be called by the worker queue to perform Encryption
+ * for writes and submit the request using the elevelator.
+ */
+static void req_cryptd_crypt_write_convert(struct req_dm_crypt_io *io)
+{
+ struct request *clone = NULL;
+ struct bio *bio_src = NULL;
+ unsigned int total_sg_len_req_in = 0, total_sg_len_req_out = 0,
+ total_bytes_in_req = 0, error = DM_MAPIO_REMAPPED, rc = 0;
+ struct req_iterator iter;
+ struct ablkcipher_request *req = NULL;
+ struct req_crypt_result result;
+ struct bio_vec *bvec = NULL;
+ struct scatterlist *req_sg_in = NULL;
+ struct scatterlist *req_sg_out = NULL;
+ int copy_bio_sector_to_req = 0;
+ gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
+ struct page *page = NULL;
+ u8 IV[AES_XTS_IV_LEN];
+ int remaining_size = 0;
+
+ if (io) {
+ if (io->cloned_request) {
+ clone = io->cloned_request;
+ } else {
+ DMERR("%s io->cloned_request is NULL\n",
+ __func__);
+ error = DM_REQ_CRYPT_ERROR;
+ goto submit_request;
+ }
+ } else {
+ DMERR("%s io is NULL\n",
+ __func__);
+ error = DM_REQ_CRYPT_ERROR;
+ goto submit_request;
+ }
+
+ req_crypt_inc_pending(io);
+
+ req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
+ if (!req) {
+ DMERR("%s ablkcipher request allocation failed\n",
+ __func__);
+ error = DM_REQ_CRYPT_ERROR;
+ goto ablkcipher_req_alloc_failure;
+ }
+
+ ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ req_crypt_cipher_complete, &result);
+
+ init_completion(&result.completion);
+ qcrypto_cipher_set_flag(req,
+ QCRYPTO_CTX_USE_PIPE_KEY | QCRYPTO_CTX_XTS_DU_SIZE_512B);
+ crypto_ablkcipher_clear_flags(tfm, ~0);
+ crypto_ablkcipher_setkey(tfm, NULL, KEY_SIZE_XTS);
+
+ req_sg_in = kzalloc(sizeof(struct scatterlist) * MAX_SG_LIST,
+ GFP_KERNEL);
+ if (!req_sg_in) {
+ DMERR("%s req_sg_in allocation failed\n",
+ __func__);
+ error = DM_REQ_CRYPT_ERROR;
+ goto ablkcipher_req_alloc_failure;
+ }
+
+ req_sg_out = kzalloc(sizeof(struct scatterlist) * MAX_SG_LIST,
+ GFP_KERNEL);
+ if (!req_sg_out) {
+ DMERR("%s req_sg_out allocation failed\n",
+ __func__);
+ error = DM_REQ_CRYPT_ERROR;
+ goto ablkcipher_req_alloc_failure;
+ }
+
+ total_sg_len_req_in = blk_rq_map_sg(clone->q, clone, req_sg_in);
+ if ((total_sg_len_req_in <= 0) ||
+ (total_sg_len_req_in > MAX_SG_LIST)) {
+ DMERR("%s Request Error%d", __func__, total_sg_len_req_in);
+ error = DM_REQ_CRYPT_ERROR;
+ goto ablkcipher_req_alloc_failure;
+ }
+
+
+ rq_for_each_segment(bvec, clone, iter) {
+try_again:
+ if (bvec->bv_len > remaining_size) {
+ page = NULL;
+ page = mempool_alloc(req_page_pool, gfp_mask);
+ if (!page) {
+ DMERR("%s Crypt page alloc failed", __func__);
+ congestion_wait(BLK_RW_ASYNC, HZ/100);
+ goto try_again;
+ }
+ bvec->bv_page = page;
+ bvec->bv_offset = 0;
+ total_bytes_in_req = total_bytes_in_req + bvec->bv_len;
+ remaining_size = PAGE_SIZE - bvec->bv_len;
+ if (remaining_size < 0)
+ BUG();
+ } else {
+ bvec->bv_page = page;
+ bvec->bv_offset = PAGE_SIZE - remaining_size;
+ remaining_size = remaining_size - bvec->bv_len;
+ total_bytes_in_req = total_bytes_in_req + bvec->bv_len;
+ }
+ }
+
+ total_sg_len_req_out = blk_rq_map_sg(clone->q, clone, req_sg_out);
+ if ((total_sg_len_req_out <= 0) ||
+ (total_sg_len_req_out > MAX_SG_LIST)) {
+ DMERR("%s Request Error %d", __func__, total_sg_len_req_out);
+ error = DM_REQ_CRYPT_ERROR;
+ goto ablkcipher_req_alloc_failure;
+ }
+
+ memset(IV, 0, AES_XTS_IV_LEN);
+ memcpy(IV, &clone->__sector, sizeof(sector_t));
+
+ ablkcipher_request_set_crypt(req, req_sg_in, req_sg_out,
+ total_bytes_in_req, (void *) IV);
+
+ rc = crypto_ablkcipher_encrypt(req);
+
+ switch (rc) {
+ case 0:
+ break;
+
+ case -EBUSY:
+ /*
+ * Lets make this synchronous request by waiting on
+ * in progress as well
+ */
+ case -EINPROGRESS:
+ wait_for_completion_interruptible(&result.completion);
+ if (result.err) {
+ DMERR("%s error = %d encrypting the request\n",
+ __func__, result.err);
+ error = DM_REQ_CRYPT_ERROR;
+ goto ablkcipher_req_alloc_failure;
+ }
+ break;
+
+ default:
+ error = DM_REQ_CRYPT_ERROR;
+ goto ablkcipher_req_alloc_failure;
+ }
+
+ __rq_for_each_bio(bio_src, clone) {
+ if (copy_bio_sector_to_req == 0) {
+ clone->buffer = bio_data(bio_src);
+ copy_bio_sector_to_req++;
+ }
+ blk_queue_bounce(clone->q, &bio_src);
+ }
+
+
+ablkcipher_req_alloc_failure:
+ if (req)
+ ablkcipher_request_free(req);
+
+
+ kfree(req_sg_in);
+
+ kfree(req_sg_out);
+
+submit_request:
+ io->error = error;
+ req_crypt_dec_pending_encrypt(io);
+}
+
+/* Queue callback function that will get triggered */
+static void req_cryptd_crypt(struct work_struct *work)
+{
+ struct req_dm_crypt_io *io =
+ container_of(work, struct req_dm_crypt_io, work);
+
+ if (rq_data_dir(io->cloned_request) == WRITE)
+ req_cryptd_crypt_write_convert(io);
+ else if (rq_data_dir(io->cloned_request) == READ)
+ req_cryptd_crypt_read_convert(io);
+ else
+ DMERR("%s received non-write request for Clone %u\n",
+ __func__, (unsigned int)io->cloned_request);
+}
+
+static void req_cryptd_queue_crypt(struct req_dm_crypt_io *io)
+{
+ INIT_WORK(&io->work, req_cryptd_crypt);
+ queue_work(req_crypt_queue, &io->work);
+}
+
+/*
+ * Cipher complete callback, this is triggered by the Linux crypto api once
+ * the operation is done. This signals the waiting thread that the crypto
+ * operation is complete.
+ */
+static void req_crypt_cipher_complete(struct crypto_async_request *req, int err)
+{
+ struct req_crypt_result *res = req->data;
+
+ if (err == -EINPROGRESS)
+ return;
+
+ res->err = err;
+ complete(&res->completion);
+}
+
+/*
+ * If bio->bi_dev is a partition, remap the location
+ */
+static inline void req_crypt_blk_partition_remap(struct bio *bio)
+{
+ struct block_device *bdev = bio->bi_bdev;
+
+ if (bio_sectors(bio) && bdev != bdev->bd_contains) {
+ struct hd_struct *p = bdev->bd_part;
+
+ bio->bi_sector += p->start_sect;
+ bio->bi_bdev = bdev->bd_contains;
+ }
+}
+
+/*
+ * The endio function is called from ksoftirqd context (atomic).
+ * For write operations the new pages created form the mempool
+ * is freed and returned. * For read operations, decryption is
+ * required, since this is called in a atomic * context, the
+ * request is sent to a worker queue to complete decryptiona and
+ * free the request once done.
+ */
+static int req_crypt_endio(struct dm_target *ti, struct request *clone,
+ int error, union map_info *map_context)
+{
+ int err = 0;
+ struct req_iterator iter1;
+ struct bio_vec *bvec = NULL;
+ struct req_dm_crypt_io *req_io = map_context->ptr;
+
+ /* If it is a write request, do nothing just return. */
+ bvec = NULL;
+ if (rq_data_dir(clone) == WRITE) {
+ rq_for_each_segment(bvec, clone, iter1) {
+ if (bvec->bv_offset == 0) {
+ mempool_free(bvec->bv_page, req_page_pool);
+ bvec->bv_page = NULL;
+ } else
+ bvec->bv_page = NULL;
+ }
+ mempool_free(req_io, req_io_pool);
+ goto submit_request;
+ } else if (rq_data_dir(clone) == READ) {
+ req_io->error = error;
+ req_cryptd_queue_crypt(req_io);
+ err = DM_ENDIO_INCOMPLETE;
+ goto submit_request;
+ }
+
+submit_request:
+ return err;
+}
+
+/*
+ * This function is called with interrupts disabled
+ * The function remaps the clone for the underlying device.
+ * If it is a write request, it calls into the worker queue to
+ * encrypt the data
+ * and submit the request directly using the elevator
+ * For a read request no pre-processing is required the request
+ * is returned to dm once mapping is done
+ */
+
+static int req_crypt_map(struct dm_target *ti, struct request *clone,
+ union map_info *map_context)
+{
+ struct req_dm_crypt_io *req_io = NULL;
+ int error = DM_MAPIO_REMAPPED, copy_bio_sector_to_req = 0;
+ struct bio *bio_src = NULL;
+
+ req_io = mempool_alloc(req_io_pool, GFP_NOWAIT);
+ if (!req_io) {
+ DMERR("%s req_io allocation failed\n", __func__);
+ error = DM_REQ_CRYPT_ERROR;
+ goto submit_request;
+ }
+
+ /* Save the clone in the req_io, the callback to the worker
+ * queue will get the req_io
+ */
+ req_io->cloned_request = clone;
+ map_context->ptr = req_io;
+ atomic_set(&req_io->pending, 0);
+
+ /* Get the queue of the underlying original device */
+ clone->q = bdev_get_queue(dev->bdev);
+ clone->rq_disk = dev->bdev->bd_disk;
+
+ __rq_for_each_bio(bio_src, clone) {
+ bio_src->bi_bdev = dev->bdev;
+ /* Currently the way req-dm works is that once the underlying
+ * device driver completes the request by calling into the
+ * block layer. The block layer completes the bios (clones) and
+ * then the cloned request. This is undesirable for req-dm-crypt
+ * hence added a flag BIO_DONTFREE, this flag will ensure that
+ * blk layer does not complete the cloned bios before completing
+ * the request. When the crypt endio is called, post-processsing
+ * is done and then the dm layer will complete the bios (clones)
+ * and free them.
+ */
+ bio_src->bi_flags |= 1 << BIO_DONTFREE;
+
+ /*
+ * If this device has partitions, remap block n
+ * of partition p to block n+start(p) of the disk.
+ */
+ req_crypt_blk_partition_remap(bio_src);
+ if (copy_bio_sector_to_req == 0) {
+ clone->__sector = bio_src->bi_sector;
+ clone->buffer = bio_data(bio_src);
+ copy_bio_sector_to_req++;
+ }
+ blk_queue_bounce(clone->q, &bio_src);
+ }
+
+ if (rq_data_dir(clone) == READ) {
+ error = DM_MAPIO_REMAPPED;
+ goto submit_request;
+ } else if (rq_data_dir(clone) == WRITE) {
+ req_cryptd_queue_crypt(req_io);
+ error = DM_MAPIO_SUBMITTED;
+ goto submit_request;
+ } else {
+ error = DM_REQ_CRYPT_ERROR;
+ DMERR("%s Unknown request\n", __func__);
+ }
+
+submit_request:
+ return error;
+
+}
+
+static int req_crypt_status(struct dm_target *ti, status_type_t type,
+ char *result, unsigned maxlen)
+{
+ return 0;
+}
+
+static void req_crypt_dtr(struct dm_target *ti)
+{
+ if (req_crypt_queue)
+ destroy_workqueue(req_crypt_queue);
+ if (req_io_pool)
+ mempool_destroy(req_io_pool);
+ if (req_page_pool)
+ mempool_destroy(req_page_pool);
+ if (tfm)
+ crypto_free_ablkcipher(tfm);
+}
+
+
+/*
+ * Construct an encryption mapping:
+ * <cipher> <key> <iv_offset> <dev_path> <start>
+ */
+static int req_crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+{
+ unsigned long long tmpll;
+ char dummy;
+
+ if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &dev)) {
+ DMERR(" %s Device Lookup failed\n", __func__);
+ return DM_REQ_CRYPT_ERROR;
+ }
+
+ if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) {
+ DMERR("%s Invalid device sector\n", __func__);
+ return DM_REQ_CRYPT_ERROR;
+ }
+ start_sector_orig = tmpll;
+
+ req_crypt_queue = alloc_workqueue("req_cryptd",
+ WQ_HIGHPRI |
+ WQ_CPU_INTENSIVE|
+ WQ_MEM_RECLAIM,
+ 1);
+ if (!req_crypt_queue) {
+ DMERR("%s req_crypt_queue not allocated\n", __func__);
+ return DM_REQ_CRYPT_ERROR;
+ }
+
+ /* Allocate the crypto alloc blk cipher and keep the handle */
+ tfm = crypto_alloc_ablkcipher("qcom-xts(aes)", 0, 0);
+ if (IS_ERR(tfm)) {
+ DMERR("%s ablkcipher tfm allocation failed : error = %lu\n",
+ __func__, PTR_ERR(tfm));
+ return DM_REQ_CRYPT_ERROR;
+ }
+
+ req_io_pool = mempool_create_slab_pool(MIN_IOS, _req_crypt_io_pool);
+ if (!req_io_pool) {
+ DMERR("%s req_io_pool not allocated\n", __func__);
+ return DM_REQ_CRYPT_ERROR;
+ }
+
+ req_page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
+ if (!req_page_pool) {
+ DMERR("%s req_page_pool not allocated\n", __func__);
+ return DM_REQ_CRYPT_ERROR;
+ }
+
+ return 0;
+}
+
+static void req_crypt_postsuspend(struct dm_target *ti)
+{
+}
+
+static int req_crypt_preresume(struct dm_target *ti)
+{
+ return 0;
+}
+
+static void req_crypt_resume(struct dm_target *ti)
+{
+}
+
+/* Message interface
+ * key set <key>
+ * key wipe
+ */
+static int req_crypt_message(struct dm_target *ti, unsigned argc, char **argv)
+{
+ return 0;
+}
+
+static int req_crypt_iterate_devices(struct dm_target *ti,
+ iterate_devices_callout_fn fn, void *data)
+{
+ return fn(ti, dev, start_sector_orig, ti->len, data);
+}
+
+static struct target_type req_crypt_target = {
+ .name = "req-crypt",
+ .version = {1, 0, 0},
+ .module = THIS_MODULE,
+ .ctr = req_crypt_ctr,
+ .dtr = req_crypt_dtr,
+ .map_rq = req_crypt_map,
+ .rq_end_io = req_crypt_endio,
+ .status = req_crypt_status,
+ .postsuspend = req_crypt_postsuspend,
+ .preresume = req_crypt_preresume,
+ .resume = req_crypt_resume,
+ .message = req_crypt_message,
+ .iterate_devices = req_crypt_iterate_devices,
+};
+
+static int __init req_dm_crypt_init(void)
+{
+ int r;
+
+ _req_crypt_io_pool = KMEM_CACHE(req_dm_crypt_io, 0);
+ if (!_req_crypt_io_pool)
+ return -ENOMEM;
+
+ r = dm_register_target(&req_crypt_target);
+ if (r < 0)
+ DMERR("register failed %d", r);
+
+ return r;
+}
+
+static void __exit req_dm_crypt_exit(void)
+{
+ kmem_cache_destroy(_req_crypt_io_pool);
+ dm_unregister_target(&req_crypt_target);
+}
+
+module_init(req_dm_crypt_init);
+module_exit(req_dm_crypt_exit);
+
+MODULE_DESCRIPTION(DM_NAME " target for request based transparent encryption / decryption");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index e24143c..5d240d1 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -775,7 +775,7 @@
* Complete the clone and the original request.
* Must be called without queue lock.
*/
-static void dm_end_request(struct request *clone, int error)
+void dm_end_request(struct request *clone, int error)
{
int rw = rq_data_dir(clone);
struct dm_rq_target_io *tio = clone->end_io_data;
diff --git a/drivers/power/qpnp-bms.c b/drivers/power/qpnp-bms.c
index bfe1f43..a0d9a24 100644
--- a/drivers/power/qpnp-bms.c
+++ b/drivers/power/qpnp-bms.c
@@ -606,12 +606,14 @@
temp_current = div_s64((vsense_uv * 1000000LL),
(int)chip->r_sense_uohm);
+ *result_ua = temp_current;
rc = qpnp_iadc_comp_result(chip->iadc_dev, &temp_current);
if (rc)
pr_debug("error compensation failed: %d\n", rc);
+ pr_debug("%d uA err compensated ibat=%llduA\n",
+ *result_ua, temp_current);
*result_ua = temp_current;
- pr_debug("err compensated ibat=%duA\n", *result_ua);
return 0;
}
diff --git a/drivers/power/qpnp-charger.c b/drivers/power/qpnp-charger.c
index 7689265..2dc77e6 100644
--- a/drivers/power/qpnp-charger.c
+++ b/drivers/power/qpnp-charger.c
@@ -114,6 +114,9 @@
#define USB_OCP_CLR 0x53
#define BAT_IF_TEMP_STATUS 0x09
#define BOOST_ILIM 0x78
+#define USB_SPARE 0xDF
+#define DC_COMP_OVR1 0xE9
+#define CHGR_COMP_OVR1 0xEE
#define REG_OFFSET_PERP_SUBTYPE 0x05
@@ -319,6 +322,7 @@
bool aicl_settled;
bool use_external_rsense;
bool fastchg_on;
+ bool parallel_ovp_mode;
unsigned int bpd_detection;
unsigned int max_bat_chg_current;
unsigned int warm_bat_chg_ma;
@@ -385,6 +389,8 @@
bool power_stage_workaround_enable;
};
+static void
+qpnp_chg_set_appropriate_battery_current(struct qpnp_chg_chip *chip);
static struct of_device_id qpnp_charger_match_table[] = {
{ .compatible = QPNP_CHARGER_DEV_NAME, },
@@ -1011,10 +1017,68 @@
return iusbmax_ma;
}
+#define ILIMIT_OVR_0 0x02
+static int
+override_dcin_ilimit(struct qpnp_chg_chip *chip, bool override)
+{
+ int rc;
+
+ pr_debug("override %d\n", override);
+ rc = qpnp_chg_masked_write(chip,
+ chip->dc_chgpth_base + SEC_ACCESS,
+ 0xA5,
+ 0xA5, 1);
+ rc |= qpnp_chg_masked_write(chip,
+ chip->dc_chgpth_base + DC_COMP_OVR1,
+ 0xFF,
+ override ? ILIMIT_OVR_0 : 0, 1);
+ if (rc) {
+ pr_err("Failed to override dc ilimit rc = %d\n", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+#define DUAL_PATH_EN BIT(7)
+static int
+switch_parallel_ovp_mode(struct qpnp_chg_chip *chip, bool enable)
+{
+ int rc = 0;
+
+ if (!chip->usb_chgpth_base || !chip->dc_chgpth_base)
+ return rc;
+
+ pr_debug("enable %d\n", enable);
+ rc = override_dcin_ilimit(chip, 1);
+ udelay(10);
+
+ /* enable/disable dual path mode */
+ rc = qpnp_chg_masked_write(chip,
+ chip->usb_chgpth_base + SEC_ACCESS,
+ 0xA5,
+ 0xA5, 1);
+ rc |= qpnp_chg_masked_write(chip,
+ chip->usb_chgpth_base + USB_SPARE,
+ 0xFF,
+ enable ? DUAL_PATH_EN : 0, 1);
+ if (rc) {
+ pr_err("Failed to turn on usb ovp rc = %d\n", rc);
+ return rc;
+ }
+
+ rc = override_dcin_ilimit(chip, 0);
+ return rc;
+}
+
#define USB_SUSPEND_BIT BIT(0)
static int
qpnp_chg_usb_suspend_enable(struct qpnp_chg_chip *chip, int enable)
{
+ /* Turn off DC OVP FET when going into USB suspend */
+ if (chip->parallel_ovp_mode && enable)
+ switch_parallel_ovp_mode(chip, 0);
+
return qpnp_chg_masked_write(chip,
chip->usb_chgpth_base + CHGR_USB_USB_SUSP,
USB_SUSPEND_BIT,
@@ -1796,6 +1860,29 @@
return (chgr_sts & FAST_CHG_ON_IRQ) ? 1 : 0;
}
+#define VBATDET_BYPASS 0x01
+static int
+bypass_vbatdet_comp(struct qpnp_chg_chip *chip, bool bypass)
+{
+ int rc;
+
+ pr_debug("bypass %d\n", bypass);
+ rc = qpnp_chg_masked_write(chip,
+ chip->chgr_base + SEC_ACCESS,
+ 0xA5,
+ 0xA5, 1);
+ rc |= qpnp_chg_masked_write(chip,
+ chip->chgr_base + CHGR_COMP_OVR1,
+ 0xFF,
+ bypass ? VBATDET_BYPASS : 0, 1);
+ if (rc) {
+ pr_err("Failed to bypass vbatdet comp rc = %d\n", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
static irqreturn_t
qpnp_chg_chgr_chg_fastchg_irq_handler(int irq, void *_chip)
{
@@ -1823,6 +1910,11 @@
if (fastchg_on) {
chip->chg_done = false;
+ bypass_vbatdet_comp(chip, 1);
+ if (chip->bat_is_warm || chip->bat_is_cool) {
+ qpnp_chg_set_appropriate_vddmax(chip);
+ qpnp_chg_set_appropriate_battery_current(chip);
+ }
if (chip->resuming_charging) {
chip->resuming_charging = false;
@@ -1834,6 +1926,13 @@
msecs_to_jiffies(EOC_CHECK_PERIOD_MS));
pm_stay_awake(chip->dev);
}
+ if (chip->parallel_ovp_mode)
+ switch_parallel_ovp_mode(chip, 1);
+ } else {
+ if (chip->parallel_ovp_mode)
+ switch_parallel_ovp_mode(chip, 0);
+ if (!chip->bat_is_warm && !chip->bat_is_cool)
+ bypass_vbatdet_comp(chip, 0);
}
}
@@ -1950,6 +2049,9 @@
if (qpnp_chg_is_otg_en_set(chip))
return 0;
+ if (chip->parallel_ovp_mode)
+ switch_parallel_ovp_mode(chip, 0);
+
if (chip->type == SMBBP) {
rc = qpnp_chg_masked_write(chip,
chip->boost_base + BOOST_ILIM,
@@ -2359,7 +2461,7 @@
pr_debug("Unable to read batt temperature rc=%d\n", rc);
return 0;
}
- pr_debug("get_bat_temp %d %lld\n",
+ pr_debug("get_bat_temp %d, %lld\n",
results.adc_code, results.physical);
return (int)results.physical;
@@ -2701,6 +2803,25 @@
return 0;
}
+static void
+qpnp_chg_set_appropriate_battery_current(struct qpnp_chg_chip *chip)
+{
+ unsigned int chg_current = chip->max_bat_chg_current;
+
+ if (chip->bat_is_cool)
+ chg_current = min(chg_current, chip->cool_bat_chg_ma);
+
+ if (chip->bat_is_warm)
+ chg_current = min(chg_current, chip->warm_bat_chg_ma);
+
+ if (chip->therm_lvl_sel != 0 && chip->thermal_mitigation)
+ chg_current = min(chg_current,
+ chip->thermal_mitigation[chip->therm_lvl_sel]);
+
+ pr_debug("setting %d mA\n", chg_current);
+ qpnp_chg_ibatmax_set(chip, chg_current);
+}
+
static int
qpnp_chg_vddsafe_set(struct qpnp_chg_chip *chip, int voltage)
{
@@ -2909,25 +3030,6 @@
}
static void
-qpnp_chg_set_appropriate_battery_current(struct qpnp_chg_chip *chip)
-{
- unsigned int chg_current = chip->max_bat_chg_current;
-
- if (chip->bat_is_cool)
- chg_current = min(chg_current, chip->cool_bat_chg_ma);
-
- if (chip->bat_is_warm)
- chg_current = min(chg_current, chip->warm_bat_chg_ma);
-
- if (chip->therm_lvl_sel != 0 && chip->thermal_mitigation)
- chg_current = min(chg_current,
- chip->thermal_mitigation[chip->therm_lvl_sel]);
-
- pr_debug("setting %d mA\n", chg_current);
- qpnp_chg_ibatmax_set(chip, chg_current);
-}
-
-static void
qpnp_batt_system_temp_level_set(struct qpnp_chg_chip *chip, int lvl_sel)
{
if (lvl_sel >= 0 && lvl_sel < chip->thermal_levels) {
@@ -3501,9 +3603,6 @@
chip->bat_is_cool = bat_cool;
chip->bat_is_warm = bat_warm;
- if (bat_cool || bat_warm)
- chip->resuming_charging = false;
-
/**
* set appropriate voltages and currents.
*
@@ -3511,9 +3610,25 @@
* driver will not resume with SoC. Only vbatdet is used to
* determine resume of charging.
*/
- qpnp_chg_set_appropriate_vddmax(chip);
- qpnp_chg_set_appropriate_battery_current(chip);
- qpnp_chg_set_appropriate_vbatdet(chip);
+ if (bat_cool || bat_warm) {
+ chip->resuming_charging = false;
+ qpnp_chg_set_appropriate_vbatdet(chip);
+
+ /* To avoid ARB, only vbatdet is configured in
+ * warm/cold zones. Once vbat < vbatdet the
+ * appropriate vddmax/ibatmax adjustments will
+ * be made in the fast charge interrupt. */
+ bypass_vbatdet_comp(chip, 1);
+ qpnp_chg_charge_en(chip, !chip->charging_disabled);
+ qpnp_chg_charge_en(chip, chip->charging_disabled);
+ qpnp_chg_charge_en(chip, !chip->charging_disabled);
+ } else {
+ bypass_vbatdet_comp(chip, 0);
+ /* restore normal parameters */
+ qpnp_chg_set_appropriate_vbatdet(chip);
+ qpnp_chg_set_appropriate_vddmax(chip);
+ qpnp_chg_set_appropriate_battery_current(chip);
+ }
}
pr_debug("warm %d, cool %d, low = %d deciDegC, high = %d deciDegC\n",
@@ -4643,6 +4758,9 @@
chip->ibat_calibration_enabled =
of_property_read_bool(chip->spmi->dev.of_node,
"qcom,ibat-calibration-enabled");
+ chip->parallel_ovp_mode =
+ of_property_read_bool(chip->spmi->dev.of_node,
+ "qcom,parallel-ovp-mode");
of_get_property(chip->spmi->dev.of_node, "qcom,thermal-mitigation",
&(chip->thermal_levels));
diff --git a/drivers/slimbus/slim-msm-ngd.c b/drivers/slimbus/slim-msm-ngd.c
index 0b8c931..f858822 100644
--- a/drivers/slimbus/slim-msm-ngd.c
+++ b/drivers/slimbus/slim-msm-ngd.c
@@ -92,25 +92,25 @@
u32 stat = readl_relaxed(ngd + NGD_INT_STAT);
u32 pstat;
- if (stat & NGD_INT_TX_MSG_SENT) {
+ if ((stat & NGD_INT_MSG_BUF_CONTE) ||
+ (stat & NGD_INT_MSG_TX_INVAL) || (stat & NGD_INT_DEV_ERR) ||
+ (stat & NGD_INT_TX_NACKED_2)) {
+ writel_relaxed(stat, ngd + NGD_INT_CLR);
+ dev->err = -EIO;
+
+ dev_err(dev->dev, "NGD interrupt error:0x%x, err:%d", stat,
+ dev->err);
+ /* Guarantee that error interrupts are cleared */
+ mb();
+ if (dev->wr_comp)
+ complete(dev->wr_comp);
+
+ } else if (stat & NGD_INT_TX_MSG_SENT) {
writel_relaxed(NGD_INT_TX_MSG_SENT, ngd + NGD_INT_CLR);
/* Make sure interrupt is cleared */
mb();
if (dev->wr_comp)
complete(dev->wr_comp);
- } else if ((stat & NGD_INT_MSG_BUF_CONTE) ||
- (stat & NGD_INT_MSG_TX_INVAL) || (stat & NGD_INT_DEV_ERR) ||
- (stat & NGD_INT_TX_NACKED_2)) {
- dev_err(dev->dev, "NGD interrupt error:0x%x", stat);
- writel_relaxed(stat, ngd + NGD_INT_CLR);
- /* Guarantee that error interrupts are cleared */
- mb();
- if (((stat & NGD_INT_TX_NACKED_2) ||
- (stat & NGD_INT_MSG_TX_INVAL))) {
- dev->err = -EIO;
- if (dev->wr_comp)
- complete(dev->wr_comp);
- }
}
if (stat & NGD_INT_RX_MSG_RCVD) {
u32 rx_buf[10];
diff --git a/drivers/video/msm/mdss/mdp3_ctrl.c b/drivers/video/msm/mdss/mdp3_ctrl.c
index eff1e81..b324130 100644
--- a/drivers/video/msm/mdss/mdp3_ctrl.c
+++ b/drivers/video/msm/mdss/mdp3_ctrl.c
@@ -792,16 +792,16 @@
if (panel && panel->set_backlight)
panel->set_backlight(panel, 0);
+ rc = panel->event_handler(panel, MDSS_EVENT_PANEL_OFF, NULL);
+ if (rc)
+ pr_err("fail to turn off panel\n");
+
rc = mdp3_dma->stop(mdp3_dma, mdp3_session->intf);
if (rc) {
pr_err("fail to stop the MDP3 dma\n");
goto reset_error;
}
- rc = panel->event_handler(panel, MDSS_EVENT_PANEL_OFF, NULL);
- if (rc)
- pr_err("fail to turn off panel\n");
-
rc = mdp3_put_mdp_dsi_clk();
if (rc) {
pr_err("fail to release mdp clocks\n");
@@ -1608,6 +1608,43 @@
return rc;
}
+static int mdp3_overlay_prepare(struct msm_fb_data_type *mfd,
+ struct mdp_overlay_list __user *user_ovlist)
+{
+ struct mdp_overlay_list ovlist;
+ struct mdp3_session_data *mdp3_session = mfd->mdp.private1;
+ struct mdp_overlay *req;
+ int rc;
+
+ if (!mdp3_session)
+ return -ENODEV;
+
+ req = &mdp3_session->req_overlay;
+
+ if (copy_from_user(&ovlist, user_ovlist, sizeof(ovlist)))
+ return -EFAULT;
+
+ if (ovlist.num_overlays != 1) {
+ pr_err("OV_PREPARE failed: only 1 overlay allowed\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(req, ovlist.overlay_list[0], sizeof(*req)))
+ return -EFAULT;
+
+ rc = mdp3_overlay_set(mfd, req);
+ if (!IS_ERR_VALUE(rc)) {
+ if (copy_to_user(ovlist.overlay_list[0], req, sizeof(*req)))
+ return -EFAULT;
+ }
+
+ if (put_user(IS_ERR_VALUE(rc) ? 0 : 1,
+ &user_ovlist->processed_overlays))
+ return -EFAULT;
+
+ return rc;
+}
+
static int mdp3_ctrl_ioctl_handler(struct msm_fb_data_type *mfd,
u32 cmd, void __user *argp)
{
@@ -1707,6 +1744,9 @@
if (rc)
pr_err("OVERLAY_PLAY failed (%d)\n", rc);
break;
+ case MSMFB_OVERLAY_PREPARE:
+ rc = mdp3_overlay_prepare(mfd, argp);
+ break;
default:
break;
}
diff --git a/drivers/video/msm/mdss/mhl_sii8334.c b/drivers/video/msm/mdss/mhl_sii8334.c
index 0f84b2d..c5d5366 100644
--- a/drivers/video/msm/mdss/mhl_sii8334.c
+++ b/drivers/video/msm/mdss/mhl_sii8334.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -194,6 +194,7 @@
static void mhl_init_reg_settings(struct mhl_tx_ctrl *mhl_ctrl,
bool mhl_disc_en);
static int mhl_gpio_config(struct mhl_tx_ctrl *mhl_ctrl, int on);
+static int mhl_vreg_config(struct mhl_tx_ctrl *mhl_ctrl, uint8_t on);
int mhl_i2c_reg_read(struct i2c_client *client,
uint8_t slave_addr_index, uint8_t reg_offset)
@@ -385,15 +386,69 @@
return 0;
}
+static int mhl_sii_config(struct mhl_tx_ctrl *mhl_ctrl, bool on)
+{
+ int rc = 0;
+ struct i2c_client *client = NULL;
+
+ if (!mhl_ctrl) {
+ pr_err("%s: ctrl is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ client = mhl_ctrl->i2c_handle;
+
+ if (on) {
+ rc = mhl_vreg_config(mhl_ctrl, 1);
+ if (rc) {
+ pr_err("%s: vreg init failed [%d]\n",
+ __func__, rc);
+ return -ENODEV;
+ }
+
+ rc = mhl_gpio_config(mhl_ctrl, 1);
+ if (rc) {
+ pr_err("%s: gpio init failed [%d]\n",
+ __func__, rc);
+ return -ENODEV;
+ }
+
+ rc = request_threaded_irq(mhl_ctrl->i2c_handle->irq, NULL,
+ &mhl_tx_isr, IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ client->dev.driver->name, mhl_ctrl);
+ if (rc) {
+ pr_err("%s: request_threaded_irq failed, status: %d\n",
+ __func__, rc);
+ return -ENODEV;
+ } else {
+ mhl_ctrl->irq_req_done = true;
+ }
+ } else {
+ free_irq(mhl_ctrl->i2c_handle->irq, mhl_ctrl);
+ mhl_gpio_config(mhl_ctrl, 0);
+ mhl_vreg_config(mhl_ctrl, 0);
+ mhl_ctrl->irq_req_done = false;
+ }
+
+ return rc;
+}
+
+static void mhl_sii_disc_intr_work(struct work_struct *work)
+{
+ struct mhl_tx_ctrl *mhl_ctrl = NULL;
+
+ mhl_ctrl = container_of(work, struct mhl_tx_ctrl, mhl_intr_work);
+
+ mhl_sii_config(mhl_ctrl, false);
+}
+
/* USB_HANDSHAKING FUNCTIONS */
static int mhl_sii_device_discovery(void *data, int id,
void (*usb_notify_cb)(void *, int), void *ctx)
{
int rc;
struct mhl_tx_ctrl *mhl_ctrl = data;
- struct i2c_client *client = mhl_ctrl->i2c_handle;
unsigned long flags;
- int discovery_retry = 5;
if (id) {
/* When MHL cable is disconnected we get a sii8334
@@ -413,18 +468,14 @@
mhl_ctrl->notify_usb_online = usb_notify_cb;
mhl_ctrl->notify_ctx = ctx;
}
-again:
+
+ flush_work(&mhl_ctrl->mhl_intr_work);
+
if (!mhl_ctrl->irq_req_done) {
- rc = request_threaded_irq(mhl_ctrl->i2c_handle->irq, NULL,
- &mhl_tx_isr, IRQF_TRIGGER_LOW | IRQF_ONESHOT,
- client->dev.driver->name, mhl_ctrl);
+ rc = mhl_sii_config(mhl_ctrl, true);
if (rc) {
- pr_debug("request_threaded_irq failed, status: %d\n",
- rc);
- return -EINVAL;
- } else {
- pr_debug("request_threaded_irq succeeded\n");
- mhl_ctrl->irq_req_done = true;
+ pr_err("%s: Failed to config vreg/gpio\n", __func__);
+ return rc;
}
/* wait for i2c interrupt line to be activated */
@@ -448,23 +499,9 @@
if (mhl_sii_wait_for_rgnd(mhl_ctrl)) {
pr_err("%s: discovery timeout\n", __func__);
- free_irq(mhl_ctrl->i2c_handle->irq, mhl_ctrl);
- mhl_gpio_config(mhl_ctrl, 0);
- mhl_ctrl->irq_req_done = false;
+ mhl_sii_config(mhl_ctrl, false);
- msleep(100);
-
- mhl_gpio_config(mhl_ctrl, 1);
- if (discovery_retry--) {
- pr_debug("%s: retrying discovery\n", __func__);
- goto again;
- } else {
- pr_err("%s: discovery failed, ret to USB\n",
- __func__);
- if (mhl_ctrl->notify_usb_online)
- mhl_ctrl->notify_usb_online(
- mhl_ctrl->notify_ctx, 0);
- }
+ return -EAGAIN;
}
} else {
if (mhl_ctrl->cur_state == POWER_STATE_D3) {
@@ -1059,13 +1096,8 @@
mhl_msm_connection(mhl_ctrl);
} else if (status & BIT3) {
pr_debug("%s: uUSB-a type dev detct\n", __func__);
-
- /* Short RGND */
- MHL_SII_REG_NAME_MOD(REG_DISC_STAT2, BIT0 | BIT1, 0x00);
- mhl_msm_disconnection(mhl_ctrl);
power_supply_changed(&mhl_ctrl->mhl_psy);
- if (mhl_ctrl->notify_usb_online)
- mhl_ctrl->notify_usb_online(mhl_ctrl->notify_ctx, 0);
+ mhl_drive_hpd(mhl_ctrl, HPD_DOWN);
return 0;
}
@@ -1081,6 +1113,9 @@
power_supply_changed(&mhl_ctrl->mhl_psy);
if (mhl_ctrl->notify_usb_online)
mhl_ctrl->notify_usb_online(mhl_ctrl->notify_ctx, 0);
+
+ queue_work(mhl_ctrl->mhl_workq, &mhl_ctrl->mhl_intr_work);
+
return 0;
}
@@ -1495,6 +1530,27 @@
int rc = -EINVAL;
pr_debug("%s\n", __func__);
+
+ if (!enable) {
+ regulator_disable(reg_8941_vdda);
+ regulator_put(reg_8941_vdda);
+ reg_8941_vdda = NULL;
+
+ regulator_disable(reg_8941_smps3a);
+ regulator_put(reg_8941_smps3a);
+ reg_8941_smps3a = NULL;
+
+ regulator_disable(reg_8941_l02);
+ regulator_put(reg_8941_l02);
+ reg_8941_l02 = NULL;
+
+ regulator_disable(reg_8941_l24);
+ regulator_put(reg_8941_l24);
+ reg_8941_l24 = NULL;
+
+ return 0;
+ }
+
if (!reg_8941_l24) {
reg_8941_l24 = regulator_get(&client->dev,
"avcc_18");
@@ -1736,26 +1792,6 @@
}
/*
- * Regulator init
- */
- rc = mhl_vreg_config(mhl_ctrl, 1);
- if (rc) {
- pr_err("%s: vreg init failed [%d]\n",
- __func__, rc);
- goto failed_probe;
- }
-
- /*
- * GPIO init
- */
- rc = mhl_gpio_config(mhl_ctrl, 1);
- if (rc) {
- pr_err("%s: gpio init failed [%d]\n",
- __func__, rc);
- goto failed_probe;
- }
-
- /*
* Other initializations
* such tx specific
*/
@@ -1767,6 +1803,9 @@
spin_lock_init(&mhl_ctrl->lock);
mhl_ctrl->msc_send_workqueue = create_singlethread_workqueue
("mhl_msc_cmd_queue");
+ mhl_ctrl->mhl_workq = create_singlethread_workqueue("mhl_workq");
+
+ INIT_WORK(&mhl_ctrl->mhl_intr_work, mhl_sii_disc_intr_work);
mhl_ctrl->input = input_allocate_device();
if (mhl_ctrl->input) {
@@ -1894,9 +1933,7 @@
failed_probe_pwr:
power_supply_unregister(&mhl_ctrl->mhl_psy);
failed_probe:
- free_irq(mhl_ctrl->i2c_handle->irq, mhl_ctrl);
- mhl_gpio_config(mhl_ctrl, 0);
- mhl_vreg_config(mhl_ctrl, 0);
+ mhl_sii_config(mhl_ctrl, false);
/* do not deep-free */
if (mhl_info)
devm_kfree(&client->dev, mhl_info);
@@ -1923,9 +1960,10 @@
return -EINVAL;
}
- free_irq(mhl_ctrl->i2c_handle->irq, mhl_ctrl);
- mhl_gpio_config(mhl_ctrl, 0);
- mhl_vreg_config(mhl_ctrl, 0);
+ mhl_sii_config(mhl_ctrl, false);
+
+ destroy_workqueue(mhl_ctrl->mhl_workq);
+
if (mhl_ctrl->mhl_info)
devm_kfree(&client->dev, mhl_ctrl->mhl_info);
if (mhl_ctrl->pdata)
@@ -1949,17 +1987,19 @@
pr_debug("%s\n", __func__);
- if (!mhl_ctrl)
+ if (!mhl_ctrl) {
+ pr_err("%s: invalid ctrl data\n", __func__);
return 0;
-
- free_irq(mhl_ctrl->i2c_handle->irq, mhl_ctrl);
- mhl_ctrl->irq_req_done = false;
+ }
if (mhl_ctrl->mhl_mode) {
mhl_ctrl->mhl_mode = 0;
+
power_supply_changed(&mhl_ctrl->mhl_psy);
if (mhl_ctrl->notify_usb_online)
mhl_ctrl->notify_usb_online(mhl_ctrl->notify_ctx, 0);
+
+ mhl_sii_config(mhl_ctrl, false);
}
return 0;
diff --git a/fs/fat/fat.h b/fs/fat/fat.h
index 341f753..8db494d 100644
--- a/fs/fat/fat.h
+++ b/fs/fat/fat.h
@@ -329,10 +329,14 @@
/* fat/misc.c */
extern __printf(3, 4) __cold
void __fat_fs_error(struct super_block *sb, int report, const char *fmt, ...);
-#define fat_fs_error(sb, fmt, args...) \
- __fat_fs_error(sb, 1, fmt , ## args)
#define fat_fs_error_ratelimit(sb, fmt, args...) \
__fat_fs_error(sb, __ratelimit(&MSDOS_SB(sb)->ratelimit), fmt , ## args)
+/*
+ * If removable devices with a fat fs are removed without a unmount, further
+ * accesses to the device by applications causes a large number of error prints
+ * & in some cases leads to watchdog bark.
+ */
+#define fat_fs_error(sb, fmt, args...) fat_fs_error_ratelimit(sb, fmt, ## args)
__printf(3, 4) __cold
void fat_msg(struct super_block *sb, const char *level, const char *fmt, ...);
extern int fat_clusters_flush(struct super_block *sb);
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index ac750ea..3f62354 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -95,6 +95,12 @@
#define BIO_FS_INTEGRITY 9 /* fs owns integrity data, not block layer */
#define BIO_QUIET 10 /* Make BIO Quiet */
#define BIO_MAPPED_INTEGRITY 11/* integrity metadata has been remapped */
+/*
+ * Added for Req based dm which need to perform post processing. This flag
+ * ensures blk_update_request does not free the bios or request, this is done
+ * at the dm level
+ */
+#define BIO_DONTFREE 12
#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
/*
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 98f34b8..546871b 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -510,5 +510,6 @@
void dm_requeue_unmapped_request(struct request *rq);
void dm_kill_unmapped_request(struct request *rq, int error);
int dm_underlying_device_busy(struct request_queue *q);
+void dm_end_request(struct request *clone, int error);
#endif /* _LINUX_DEVICE_MAPPER_H */
diff --git a/include/linux/mhl_8334.h b/include/linux/mhl_8334.h
index 71dec42..42ee81e 100644
--- a/include/linux/mhl_8334.h
+++ b/include/linux/mhl_8334.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -155,6 +155,8 @@
struct list_head list_cmd;
struct input_dev *input;
struct workqueue_struct *msc_send_workqueue;
+ struct workqueue_struct *mhl_workq;
+ struct work_struct mhl_intr_work;
u16 *rcp_key_code_tbl;
size_t rcp_key_code_tbl_len;
struct scrpd_struct scrpd;
diff --git a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
index 41ef3e3..143508f 100644
--- a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
@@ -472,11 +472,12 @@
COMPR_PLAYBACK_MIN_NUM_FRAGMENTS;
prtd->compr_cap.max_fragments =
COMPR_PLAYBACK_MAX_NUM_FRAGMENTS;
- prtd->compr_cap.num_codecs = 4;
+ prtd->compr_cap.num_codecs = 5;
prtd->compr_cap.codecs[0] = SND_AUDIOCODEC_MP3;
prtd->compr_cap.codecs[1] = SND_AUDIOCODEC_AAC;
prtd->compr_cap.codecs[2] = SND_AUDIOCODEC_AC3;
prtd->compr_cap.codecs[3] = SND_AUDIOCODEC_EAC3;
+ prtd->compr_cap.codecs[4] = SND_AUDIOCODEC_PCM;
}
static int msm_compr_send_media_format_block(struct snd_compr_stream *cstream,
@@ -486,8 +487,22 @@
struct msm_compr_audio *prtd = runtime->private_data;
struct asm_aac_cfg aac_cfg;
int ret = 0;
+ uint16_t bit_width = 16;
switch (prtd->codec) {
+ case FORMAT_LINEAR_PCM:
+ pr_debug("SND_AUDIOCODEC_PCM\n");
+ if (prtd->codec_param.codec.format == SNDRV_PCM_FORMAT_S24_LE)
+ bit_width = 24;
+ ret = q6asm_media_format_block_pcm_format_support(
+ prtd->audio_client,
+ prtd->sample_rate,
+ prtd->num_channels,
+ bit_width);
+ if (ret < 0)
+ pr_err("%s: CMD Format block failed\n", __func__);
+
+ break;
case FORMAT_MP3:
/* no media format block needed */
break;
@@ -744,6 +759,7 @@
if ((stream_index < MAX_NUMBER_OF_STREAMS && stream_index >= 0) &&
(prtd->gapless_state.stream_opened[stream_index])) {
+ prtd->gapless_state.stream_opened[stream_index] = 0;
spin_unlock_irqrestore(&prtd->lock, flags);
pr_debug(" close stream %d", NEXT_STREAM_ID(stream_id));
q6asm_stream_cmd(ac, CMD_CLOSE, NEXT_STREAM_ID(stream_id));
@@ -753,6 +769,7 @@
stream_index = STREAM_ARRAY_INDEX(stream_id);
if ((stream_index < MAX_NUMBER_OF_STREAMS && stream_index >= 0) &&
(prtd->gapless_state.stream_opened[stream_index])) {
+ prtd->gapless_state.stream_opened[stream_index] = 0;
spin_unlock_irqrestore(&prtd->lock, flags);
pr_debug("close stream %d", stream_id);
q6asm_stream_cmd(ac, CMD_CLOSE, stream_id);
@@ -826,6 +843,12 @@
pr_debug("%s: sample_rate %d\n", __func__, prtd->sample_rate);
switch (params->codec.id) {
+ case SND_AUDIOCODEC_PCM: {
+ pr_debug("SND_AUDIOCODEC_PCM\n");
+ prtd->codec = FORMAT_LINEAR_PCM;
+ break;
+ }
+
case SND_AUDIOCODEC_MP3: {
pr_debug("SND_AUDIOCODEC_MP3\n");
prtd->codec = FORMAT_MP3;
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index 32e6b2b..e72502c 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -1476,7 +1476,7 @@
msm_route_ext_ec_ref = AFE_PORT_INVALID;
break;
}
- if (voc_set_ext_ec_ref(msm_route_ext_ec_ref, state)) {
+ if (!voc_set_ext_ec_ref(msm_route_ext_ec_ref, state)) {
mutex_unlock(&routing_lock);
snd_soc_dapm_mux_update_power(widget, kcontrol, 1, mux, e);
} else {