Merge "ASoC: wcd9306: Update condition to disable HPH PA after impedance detection"
diff --git a/arch/arm/configs/msm8226-perf_defconfig b/arch/arm/configs/msm8226-perf_defconfig
index dd355af..6447dcc 100644
--- a/arch/arm/configs/msm8226-perf_defconfig
+++ b/arch/arm/configs/msm8226-perf_defconfig
@@ -176,6 +176,7 @@
 CONFIG_NETFILTER_XT_MATCH_STRING=y
 CONFIG_NETFILTER_XT_MATCH_TIME=y
 CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
 CONFIG_NF_CONNTRACK_IPV4=y
 CONFIG_IP_NF_IPTABLES=y
 CONFIG_IP_NF_MATCH_AH=y
diff --git a/arch/arm/configs/msm8226_defconfig b/arch/arm/configs/msm8226_defconfig
index 5f008d5..525def5 100644
--- a/arch/arm/configs/msm8226_defconfig
+++ b/arch/arm/configs/msm8226_defconfig
@@ -176,6 +176,7 @@
 CONFIG_NETFILTER_XT_MATCH_STRING=y
 CONFIG_NETFILTER_XT_MATCH_TIME=y
 CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
 CONFIG_NF_CONNTRACK_IPV4=y
 CONFIG_IP_NF_IPTABLES=y
 CONFIG_IP_NF_MATCH_AH=y
diff --git a/arch/arm/configs/msm8974-perf_defconfig b/arch/arm/configs/msm8974-perf_defconfig
index 913d798..7c299d7 100755
--- a/arch/arm/configs/msm8974-perf_defconfig
+++ b/arch/arm/configs/msm8974-perf_defconfig
@@ -192,6 +192,7 @@
 CONFIG_NETFILTER_XT_MATCH_STRING=y
 CONFIG_NETFILTER_XT_MATCH_TIME=y
 CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
 CONFIG_NF_CONNTRACK_IPV4=y
 CONFIG_IP_NF_IPTABLES=y
 CONFIG_IP_NF_MATCH_AH=y
diff --git a/arch/arm/configs/msm8974_defconfig b/arch/arm/configs/msm8974_defconfig
index d588fc6..e8a1cde 100755
--- a/arch/arm/configs/msm8974_defconfig
+++ b/arch/arm/configs/msm8974_defconfig
@@ -199,6 +199,7 @@
 CONFIG_NETFILTER_XT_MATCH_STRING=y
 CONFIG_NETFILTER_XT_MATCH_TIME=y
 CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
 CONFIG_NF_CONNTRACK_IPV4=y
 CONFIG_IP_NF_IPTABLES=y
 CONFIG_IP_NF_MATCH_AH=y
diff --git a/arch/arm/mach-msm/qdsp6v2/msm_audio_ion.c b/arch/arm/mach-msm/qdsp6v2/msm_audio_ion.c
index df7760a..f19ad59 100644
--- a/arch/arm/mach-msm/qdsp6v2/msm_audio_ion.c
+++ b/arch/arm/mach-msm/qdsp6v2/msm_audio_ion.c
@@ -29,7 +29,7 @@
 	bool smmu_enabled;
 	bool audioheap_enabled;
 	struct iommu_group *group;
-	u32 domain_id;
+	int32_t domain_id;
 	struct iommu_domain *domain;
 };
 
diff --git a/arch/arm/mach-msm/qdsp6v2/voice_svc.c b/arch/arm/mach-msm/qdsp6v2/voice_svc.c
old mode 100644
new mode 100755
index 92b3003..5bf86dc
--- a/arch/arm/mach-msm/qdsp6v2/voice_svc.c
+++ b/arch/arm/mach-msm/qdsp6v2/voice_svc.c
@@ -56,8 +56,15 @@
 
 static struct voice_svc_device *voice_svc_dev;
 static struct class *voice_svc_class;
+static bool reg_dummy_sess;
+static void *dummy_q6_mvm;
+static void *dummy_q6_cvs;
 dev_t device_num;
 
+static int voice_svc_dummy_reg(void);
+static int32_t qdsp_dummy_apr_callback(struct apr_client_data *data,
+					void *priv);
+
 static int32_t qdsp_apr_callback(struct apr_client_data *data, void *priv)
 {
 	struct voice_svc_prvt *prtd;
@@ -127,6 +134,12 @@
 	return 0;
 }
 
+static int32_t qdsp_dummy_apr_callback(struct apr_client_data *data, void *priv)
+{
+	/* Do Nothing */
+	return 0;
+}
+
 static void voice_svc_update_hdr(struct voice_svc_cmd_request* apr_req_data,
 			    struct apr_data *aprdata,
 			    struct voice_svc_prvt *prtd)
@@ -223,6 +236,13 @@
 		goto done;
 	}
 
+	if (src_port == (APR_MAX_PORTS - 1)) {
+		pr_err("%s: SRC port reserved for dummy session\n", __func__);
+		pr_err("%s: Unable to register %s\n", __func__, svc);
+		ret = -EINVAL;
+		goto done;
+	}
+
 	*handle = apr_register("ADSP",
 		svc, qdsp_apr_callback,
 		((src_port) << 8 | 0x0001),
@@ -449,6 +469,37 @@
 	return ret;
 }
 
+static int voice_svc_dummy_reg()
+{
+	uint32_t src_port = APR_MAX_PORTS - 1;
+
+	pr_debug("%s\n", __func__);
+	dummy_q6_mvm = apr_register("ADSP", "MVM",
+				qdsp_dummy_apr_callback,
+				src_port,
+				NULL);
+	if (dummy_q6_mvm == NULL) {
+		pr_err("%s: Unable to register dummy MVM\n", __func__);
+		goto err;
+	}
+
+	dummy_q6_cvs = apr_register("ADSP", "CVS",
+				qdsp_dummy_apr_callback,
+				src_port,
+				NULL);
+	if (dummy_q6_cvs == NULL) {
+		pr_err("%s: Unable to register dummy CVS\n", __func__);
+		goto err;
+	}
+	return 0;
+err:
+	if (dummy_q6_mvm != NULL) {
+		apr_deregister(dummy_q6_mvm);
+		dummy_q6_mvm = NULL;
+	}
+	return -EINVAL;
+}
+
 static int voice_svc_open(struct inode *inode, struct file *file)
 {
 	struct voice_svc_prvt *prtd = NULL;
@@ -472,6 +523,16 @@
 
 	file->private_data = (void*)prtd;
 
+	/* Current APR implementation doesn't support session based
+	 * multiple service registrations. The apr_deregister()
+	 * function sets the destination and client IDs to zero, if
+	 * deregister is called for a single service instance.
+	 * To avoid this, register for additional services.
+	 */
+	if (!reg_dummy_sess) {
+		voice_svc_dummy_reg();
+		reg_dummy_sess = 1;
+	}
 	return 0;
 }
 
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index eb8a75b..831f033 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -1483,7 +1483,9 @@
 		*(uint16_t *)(driver->apps_rsp_buf + 94) = MSG_SSID_21_LAST;
 		*(uint16_t *)(driver->apps_rsp_buf + 96) = MSG_SSID_22;
 		*(uint16_t *)(driver->apps_rsp_buf + 98) = MSG_SSID_22_LAST;
-		encode_rsp_and_send(99);
+		*(uint16_t *)(driver->apps_rsp_buf + 100) = MSG_SSID_23;
+		*(uint16_t *)(driver->apps_rsp_buf + 102) = MSG_SSID_23_LAST;
+		encode_rsp_and_send(103);
 		return 0;
 	}
 	/* Check for Apps Only Respond to Get Subsys Build mask */
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 68616b8..87938e0 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -254,6 +254,8 @@
 config HW_RANDOM_MSM
 	tristate "Qualcomm MSM Random Number Generator support"
 	depends on HW_RANDOM && ARCH_MSM
+	select CRYPTO_AES
+	select CRYPTO_ECB
 	default n
 	---help---
 	  This driver provides kernel-side support for the Random Number
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index c24305d..d0369fd 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -22,4 +22,4 @@
 obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o
 obj-$(CONFIG_HW_RANDOM_PICOXCELL) += picoxcell-rng.o
 obj-$(CONFIG_HW_RANDOM_PPC4XX) += ppc4xx-rng.o
-obj-$(CONFIG_HW_RANDOM_MSM) += msm_rng.o
+obj-$(CONFIG_HW_RANDOM_MSM) += msm_rng.o fips_drbg.o ctr_drbg.o msm_fips_selftest.o
diff --git a/drivers/char/hw_random/ctr_drbg.c b/drivers/char/hw_random/ctr_drbg.c
new file mode 100644
index 0000000..d8da08e
--- /dev/null
+++ b/drivers/char/hw_random/ctr_drbg.c
@@ -0,0 +1,938 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/hw_random.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/types.h>
+#include <mach/msm_bus.h>
+#include <linux/qrng.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+
+#include <linux/errno.h>
+#include <linux/crypto.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/gfp.h>
+#include <linux/string.h>
+#include <linux/platform_data/qcom_crypto_device.h>
+
+#include "ctr_drbg.h"
+#include "fips_drbg.h"
+
+#define E_FAILURE 0Xffff
+#define E_SUCCESS 0
+
+#define AES128_KEY_SIZE   (16)
+#define AES128_BLOCK_SIZE (16)
+
+#define AES_TEXT_LENGTH (64)
+#define MAX_TEXT_LENGTH (2048)
+
+uint8_t df_initial_k[16] = "\x0\x1\x2\x3\x4\x5\x6\x7\x8\x9\xa\xb\xc\xd\xe\xf";
+
+static void _crypto_cipher_test_complete(struct crypto_async_request *req,
+				int err)
+{
+	struct msm_ctr_tcrypt_result_s *res = NULL;
+
+	if (!req)
+		return;
+
+	res = req->data;
+	if (!res)
+		return;
+
+	if (err == -EINPROGRESS)
+		return;
+	res->err = err;
+	complete(&res->completion);
+}
+
+static int ctr_aes_init(struct ctr_drbg_ctx_s *ctx)
+{
+	int status = 0;
+
+	ctx->aes_ctx.tfm = crypto_alloc_ablkcipher("qcom-ecb(aes)", 0, 0);
+	if (IS_ERR(ctx->aes_ctx.tfm) || (NULL == ctx->aes_ctx.tfm)) {
+		pr_info("%s: qcom-ecb(aes) failed", __func__);
+		ctx->aes_ctx.tfm = crypto_alloc_ablkcipher("ecb(aes)", 0, 0);
+		pr_info("ctx->aes_ctx.tfm = %p\n", ctx->aes_ctx.tfm);
+		if (IS_ERR(ctx->aes_ctx.tfm) || (NULL == ctx->aes_ctx.tfm)) {
+			pr_err("%s: qcom-ecb(aes) failed\n", __func__);
+			status = -E_FAILURE;
+			goto out;
+		}
+	}
+
+	ctx->aes_ctx.req = ablkcipher_request_alloc(ctx->aes_ctx.tfm,
+							GFP_KERNEL);
+	if (IS_ERR(ctx->aes_ctx.req) || (NULL == ctx->aes_ctx.req)) {
+		pr_info("%s: Failed to allocate request.\n", __func__);
+		status = -E_FAILURE;
+		goto clr_tfm;
+	}
+
+	ablkcipher_request_set_callback(ctx->aes_ctx.req,
+				CRYPTO_TFM_REQ_MAY_BACKLOG,
+				_crypto_cipher_test_complete,
+				&ctx->aes_ctx.result);
+
+	memset(&ctx->aes_ctx.input, 0, sizeof(struct msm_ctr_buffer_s));
+	memset(&ctx->aes_ctx.output, 0, sizeof(struct msm_ctr_buffer_s));
+
+	/* Allocate memory. */
+	ctx->aes_ctx.input.virt_addr  = kmalloc(AES128_BLOCK_SIZE,
+						GFP_KERNEL | __GFP_DMA);
+	if (NULL == ctx->aes_ctx.input.virt_addr) {
+		pr_debug("%s: Failed to input memory.\n", __func__);
+		status = -E_FAILURE;
+		goto clr_req;
+	}
+	ctx->aes_ctx.output.virt_addr = kmalloc(AES128_BLOCK_SIZE,
+						GFP_KERNEL | __GFP_DMA);
+	if (NULL == ctx->aes_ctx.output.virt_addr) {
+		pr_debug("%s: Failed to output memory.\n", __func__);
+		status = -E_FAILURE;
+		goto clr_input;
+	}
+
+	/*--------------------------------------------------------------------
+	Set DF AES mode
+	----------------------------------------------------------------------*/
+	ctx->df_aes_ctx.tfm = crypto_alloc_ablkcipher("qcom-ecb(aes)", 0, 0);
+	if ((NULL == ctx->df_aes_ctx.tfm) || IS_ERR(ctx->df_aes_ctx.tfm)) {
+		pr_info("%s: qcom-ecb(aes) failed", __func__);
+		ctx->df_aes_ctx.tfm = crypto_alloc_ablkcipher("ecb(aes)", 0, 0);
+		if (IS_ERR(ctx->df_aes_ctx.tfm) ||
+			(NULL == ctx->df_aes_ctx.tfm)) {
+			pr_err("%s: ecb(aes) failed", __func__);
+			status = -E_FAILURE;
+			goto clr_output;
+		}
+	}
+
+	ctx->df_aes_ctx.req = ablkcipher_request_alloc(ctx->df_aes_ctx.tfm,
+							GFP_KERNEL);
+	if (IS_ERR(ctx->df_aes_ctx.req) || (NULL == ctx->df_aes_ctx.req)) {
+		pr_debug(": Failed to allocate request.\n");
+		status = -E_FAILURE;
+		goto clr_df_tfm;
+	}
+
+	ablkcipher_request_set_callback(ctx->df_aes_ctx.req,
+				CRYPTO_TFM_REQ_MAY_BACKLOG,
+				_crypto_cipher_test_complete,
+				&ctx->df_aes_ctx.result);
+
+	memset(&ctx->df_aes_ctx.input, 0, sizeof(struct msm_ctr_buffer_s));
+	memset(&ctx->df_aes_ctx.output, 0, sizeof(struct msm_ctr_buffer_s));
+
+	ctx->df_aes_ctx.input.virt_addr  = kmalloc(AES128_BLOCK_SIZE,
+						GFP_KERNEL | __GFP_DMA);
+	if (NULL == ctx->df_aes_ctx.input.virt_addr) {
+		pr_debug(": Failed to input memory.\n");
+		status = -E_FAILURE;
+		goto clr_df_req;
+	}
+
+	ctx->df_aes_ctx.output.virt_addr = kmalloc(AES128_BLOCK_SIZE,
+						GFP_KERNEL | __GFP_DMA);
+	if (NULL == ctx->df_aes_ctx.output.virt_addr) {
+		pr_debug(": Failed to output memory.\n");
+		status = -E_FAILURE;
+		goto clr_df_input;
+	}
+
+	goto out;
+
+clr_df_input:
+	if (ctx->df_aes_ctx.input.virt_addr) {
+		kzfree(ctx->df_aes_ctx.input.virt_addr);
+		ctx->df_aes_ctx.input.virt_addr = NULL;
+	}
+clr_df_req:
+	if (ctx->df_aes_ctx.req) {
+		ablkcipher_request_free(ctx->df_aes_ctx.req);
+		ctx->df_aes_ctx.req = NULL;
+	}
+clr_df_tfm:
+	if (ctx->df_aes_ctx.tfm) {
+			crypto_free_ablkcipher(ctx->df_aes_ctx.tfm);
+			ctx->df_aes_ctx.tfm = NULL;
+		}
+clr_output:
+	if (ctx->aes_ctx.output.virt_addr) {
+		kzfree(ctx->aes_ctx.output.virt_addr);
+		ctx->aes_ctx.output.virt_addr = NULL;
+	}
+clr_input:
+	if (ctx->aes_ctx.input.virt_addr) {
+		kzfree(ctx->aes_ctx.input.virt_addr);
+		ctx->aes_ctx.input.virt_addr = NULL;
+	}
+clr_req:
+	if (ctx->aes_ctx.req) {
+		ablkcipher_request_free(ctx->aes_ctx.req);
+		ctx->aes_ctx.req = NULL;
+	}
+clr_tfm:
+	if (ctx->aes_ctx.tfm) {
+		crypto_free_ablkcipher(ctx->aes_ctx.tfm);
+		ctx->aes_ctx.tfm = NULL;
+	}
+out:
+	return status;
+}
+
+/*
+ * Increments the V field in *ctx
+ */
+static void increment_V(struct ctr_drbg_ctx_s *ctx)
+{
+	unsigned sum = 1;
+	int i;
+	uint8_t *p = &ctx->seed.key_V.V[0];
+
+	/*
+	 * To make known answer tests work, this has to be done big_endian.
+	 * So we just do it by bytes.
+	 * since we are using AES-128, the key size is 16 bytes.
+	 */
+	for (i = 15; sum != 0 && i >= 0; --i) {
+		sum += p[i];
+		p[i] = (sum & 0xff);
+		sum >>= 8;
+	}
+
+	return;
+}
+
+/*
+ * The NIST update function.  It updates the key and V to new values
+ * (to prevent backtracking) and optionally stirs in data.  data may
+ * be null, otherwise *data is from 0 to 256 bits long.
+ * keysched is an optional keyschedule to use as an optimization.  It
+ * must be consistent with the key in *ctx.  No changes are made to
+ * *ctx until it is assured that there will be no failures.  Note that
+ * data_len is in bytes.  (That may not be offical NIST
+ * recommendation, but I do it anyway; they say "or equivalent" and
+ * this is equivalent enough.)
+ */
+static enum ctr_drbg_status_t
+update(struct ctr_drbg_ctx_s *ctx, const uint8_t *data, size_t data_len)
+{
+	uint8_t temp[32];
+	unsigned int i;
+	int rc;
+	struct scatterlist sg_in, sg_out;
+
+	for (i = 0; i < 2; ++i) {
+		increment_V(ctx);
+		init_completion(&ctx->aes_ctx.result.completion);
+
+		/*
+		 * Note: personalize these called routines for
+		 * specific testing.
+		 */
+		memcpy(ctx->aes_ctx.input.virt_addr,
+			ctx->seed.key_V.V,
+			CTR_DRBG_BLOCK_LEN_BYTES);
+
+		crypto_ablkcipher_clear_flags(ctx->aes_ctx.tfm, ~0);
+
+		/* Encrypt some clear text! */
+
+		sg_init_one(&sg_in,
+			ctx->aes_ctx.input.virt_addr,
+			AES128_BLOCK_SIZE);
+		sg_init_one(&sg_out,
+			ctx->aes_ctx.output.virt_addr,
+			AES128_BLOCK_SIZE);
+		ablkcipher_request_set_crypt(ctx->aes_ctx.req,
+						&sg_in,
+						&sg_out,
+						CTR_DRBG_BLOCK_LEN_BYTES,
+						NULL);
+
+		rc = crypto_ablkcipher_encrypt(ctx->aes_ctx.req);
+
+		switch (rc) {
+		case 0:
+			break;
+		case -EINPROGRESS:
+		case -EBUSY:
+			rc = wait_for_completion_interruptible(
+				&ctx->aes_ctx.result.completion);
+			if (!rc && !ctx->aes_ctx.result.err) {
+				INIT_COMPLETION(ctx->aes_ctx.result.completion);
+				break;
+			}
+		/* fall through */
+		default:
+			pr_debug("crypto_ablkcipher_encrypt returned");
+			pr_debug(" with %d result %d on iteration\n",
+				rc,
+				ctx->aes_ctx.result.err);
+			break;
+		}
+
+		init_completion(&ctx->aes_ctx.result.completion);
+
+		memcpy(temp + AES128_BLOCK_SIZE * i,
+			ctx->aes_ctx.output.virt_addr,
+			AES128_BLOCK_SIZE);
+	}
+
+	if (data_len > 0)
+		pr_debug("in upadte, data_len = %zu\n", data_len);
+
+	for (i = 0; i < data_len; ++i)
+		ctx->seed.as_bytes[i] = temp[i] ^ data[i];
+
+	/* now copy the rest of temp to key and V */
+	if (32 > data_len) {
+		memcpy(ctx->seed.as_bytes + data_len,
+			temp + data_len,
+			32 - data_len);
+	}
+
+	memset(temp, 0, 32);
+	return CTR_DRBG_SUCCESS;
+}
+
+/*
+ * Reseeds the CTR_DRBG instance with entropy.  entropy_len_bits must
+ * be exactly 256.
+ */
+enum ctr_drbg_status_t ctr_drbg_reseed(struct ctr_drbg_ctx_s *ctx,
+					const void     *entropy,
+					size_t         entropy_len_bits)
+{
+	enum ctr_drbg_status_t update_rv;
+	uint8_t           seed_material[32];
+	int               rc;
+
+	if (ctx == NULL || entropy == NULL)
+		return CTR_DRBG_INVALID_ARG;
+
+	update_rv = block_cipher_df(ctx,
+				(uint8_t *)entropy,
+				(entropy_len_bits / 8),
+				seed_material,
+				32
+				);
+	if (CTR_DRBG_SUCCESS != update_rv) {
+		memset(seed_material, 0, 32);
+		return CTR_DRBG_GENERAL_ERROR;
+	}
+
+	rc = crypto_ablkcipher_setkey(ctx->aes_ctx.tfm,
+				ctx->seed.key_V.key,
+				AES128_KEY_SIZE
+				);
+	if (rc) {
+		memset(seed_material, 0, 32);
+		pr_debug("set-key in Instantiate failed, returns with %d", rc);
+		return CTR_DRBG_GENERAL_ERROR;
+	}
+
+	pr_debug("ctr_drbg_reseed, to call update\n");
+	update_rv = update(ctx, (const uint8_t *)seed_material, 32);
+	pr_debug("ctr_drbg_reseed, after called update\n");
+	if (update_rv != CTR_DRBG_SUCCESS) {
+		memset(seed_material, 0, 32);
+		return update_rv;
+	}
+	ctx->reseed_counter = 1;  /* think 0 but SP 800-90 says 1 */
+
+	memset(seed_material, 0, 32);
+
+	return CTR_DRBG_SUCCESS;
+
+}
+
+/*
+ * The NIST instantiate function.  entropy_len_bits must be exactly
+ * 256.  After reseed_interval generate requests, generated requests
+ * will fail  until the CTR_DRBG instance is reseeded. As per NIST SP
+ * 800-90, an error is returned if reseed_interval > 2^48.
+ */
+
+enum ctr_drbg_status_t
+ctr_drbg_instantiate(struct ctr_drbg_ctx_s *ctx,
+			const uint8_t *entropy,
+			size_t entropy_len_bits,
+			const uint8_t *nonce,
+			size_t nonce_len_bits,
+			unsigned long long reseed_interval)
+{
+
+	enum ctr_drbg_status_t update_rv;
+	uint8_t           seed_material[32];
+	uint8_t           df_input[32];
+	int               rc;
+
+	if (ctx == NULL || entropy == NULL || nonce == NULL)
+		return CTR_DRBG_INVALID_ARG;
+	if (((nonce_len_bits / 8) + (entropy_len_bits / 8)) > 32) {
+		pr_info("\nentropy_len_bits + nonce_len_bits is too long!");
+		pr_info("\nnonce len: %zu, entropy: %zu\n",
+			nonce_len_bits, entropy_len_bits);
+		return CTR_DRBG_INVALID_ARG + 1;
+	}
+
+	if (reseed_interval > (1ULL << 48))
+		return CTR_DRBG_INVALID_ARG + 2;
+
+	ctr_aes_init(ctx);
+
+	memset(ctx->seed.as_bytes, 0, sizeof(ctx->seed.as_bytes));
+	memcpy(df_input, (uint8_t *)entropy, entropy_len_bits / 8);
+	memcpy(df_input + (entropy_len_bits / 8), nonce, nonce_len_bits / 8);
+
+	update_rv = block_cipher_df(ctx, df_input, 24, seed_material, 32);
+	memset(df_input, 0, 32);
+
+	if (CTR_DRBG_SUCCESS != update_rv) {
+		pr_debug("block_cipher_df failed, returns %d", update_rv);
+		memset(seed_material, 0, 32);
+		return CTR_DRBG_GENERAL_ERROR;
+	}
+
+	rc = crypto_ablkcipher_setkey(ctx->aes_ctx.tfm,
+				ctx->seed.key_V.key,
+				AES128_KEY_SIZE);
+	if (rc) {
+		pr_debug("crypto_ablkcipher_setkey API failed: %d", rc);
+		memset(seed_material, 0, 32);
+		return CTR_DRBG_GENERAL_ERROR;
+	}
+	update_rv = update(ctx, (const uint8_t *)seed_material, 32);
+	if (update_rv != CTR_DRBG_SUCCESS) {
+		memset(seed_material, 0, 32);
+		return update_rv;
+	}
+
+	ctx->reseed_counter = 1;  /* think 0 but SP 800-90 says 1 */
+	ctx->reseed_interval = reseed_interval;
+
+	memset(seed_material, 0, 32);
+
+	pr_debug(" return from ctr_drbg_instantiate\n");
+
+	return CTR_DRBG_SUCCESS;
+}
+
+/*
+ * Generate random bits. len_bits is specified in bits, as required by
+ * NIST SP800-90.  It fails with CTR_DRBG_NEEDS_RESEED if the number
+ * of generates since instantiation or the last reseed >= the
+ * reseed_interval supplied at instantiation.  len_bits must be a
+ * multiple of 8.  len_bits must not exceed 2^19, as per NIST SP
+ * 800-90. Optionally stirs in additional_input which is
+ * additional_input_len_bits long, and is silently rounded up to a
+ * multiple of 8.  CTR_DRBG_INVALID_ARG is returned if any pointer arg
+ * is null and the corresponding length is non-zero or if
+ * additioanl_input_len_bits > 256.
+ */
+enum ctr_drbg_status_t
+ctr_drbg_generate_w_data(struct ctr_drbg_ctx_s *ctx,
+			void   *additional_input,
+			size_t additional_input_len_bits,
+			void   *buffer,
+			size_t len_bits)
+{
+	size_t total_blocks = (len_bits + 127) / 128;
+	enum ctr_drbg_status_t update_rv;
+	int rv = 0;
+	size_t i;
+	int rc;
+	struct scatterlist sg_in, sg_out;
+
+	if (ctx == NULL)
+		return CTR_DRBG_INVALID_ARG;
+	if (buffer == NULL && len_bits > 0)
+		return CTR_DRBG_INVALID_ARG;
+	if (len_bits % 8 != 0)
+		return CTR_DRBG_INVALID_ARG;
+	if (len_bits > (1<<19))
+		return CTR_DRBG_INVALID_ARG;
+
+	if ((additional_input == NULL && additional_input_len_bits > 0) ||
+		additional_input_len_bits > CTR_DRBG_SEED_LEN_BITS)
+		return CTR_DRBG_INVALID_ARG;
+	if (ctx->reseed_counter > ctx->reseed_interval)
+		return CTR_DRBG_NEEDS_RESEED;
+
+	rc = crypto_ablkcipher_setkey(ctx->aes_ctx.tfm,
+				ctx->seed.key_V.key,
+				AES128_KEY_SIZE);
+	if (rc) {
+		pr_debug("crypto_ablkcipher_setkey API failed: %d", rc);
+		return CTR_DRBG_GENERAL_ERROR;
+	}
+	if (rv < 0)
+		return CTR_DRBG_GENERAL_ERROR;
+
+	if (!ctx->continuous_test_started) {
+		increment_V(ctx);
+		init_completion(&ctx->aes_ctx.result.completion);
+		crypto_ablkcipher_clear_flags(ctx->aes_ctx.tfm, ~0);
+		memcpy(ctx->aes_ctx.input.virt_addr, ctx->seed.key_V.V, 16);
+		sg_init_one(&sg_in, ctx->aes_ctx.input.virt_addr, 16);
+		sg_init_one(&sg_out, ctx->aes_ctx.output.virt_addr, 16);
+		ablkcipher_request_set_crypt(ctx->aes_ctx.req, &sg_in, &sg_out,
+					CTR_DRBG_BLOCK_LEN_BYTES, NULL);
+		rc = crypto_ablkcipher_encrypt(ctx->aes_ctx.req);
+		switch (rc) {
+		case 0:
+			break;
+		case -EINPROGRESS:
+		case -EBUSY:
+			rc = wait_for_completion_interruptible(
+				&ctx->aes_ctx.result.completion);
+			if (!rc && !ctx->aes_ctx.result.err) {
+				INIT_COMPLETION(ctx->aes_ctx.result.completion);
+				break;
+			}
+			/* fall through */
+		default:
+			pr_debug(":crypto_ablkcipher_encrypt returned with %d result %d on iteration\n",
+				rc,
+				ctx->aes_ctx.result.err);
+			break;
+		}
+		init_completion(&ctx->aes_ctx.result.completion);
+
+		memcpy(ctx->prev_drn, ctx->aes_ctx.output.virt_addr, 16);
+		ctx->continuous_test_started = 1;
+	}
+
+	/* Generate the output */
+	for (i = 0; i < total_blocks; ++i) {
+		/* Increment the counter */
+		increment_V(ctx);
+		if (((len_bits % 128) != 0) && (i == (total_blocks - 1))) {
+			/* last block and it's a fragment */
+			init_completion(&ctx->aes_ctx.result.completion);
+
+			/*
+			 * Note: personalize these called routines for
+			 * specific testing.
+			 */
+
+			crypto_ablkcipher_clear_flags(ctx->aes_ctx.tfm, ~0);
+
+			/* Encrypt some clear text! */
+
+			memcpy(ctx->aes_ctx.input.virt_addr,
+				ctx->seed.key_V.V,
+				16);
+			sg_init_one(&sg_in,
+				ctx->aes_ctx.input.virt_addr,
+				16);
+			sg_init_one(&sg_out,
+				ctx->aes_ctx.output.virt_addr,
+				16);
+			ablkcipher_request_set_crypt(ctx->aes_ctx.req,
+				&sg_in,
+				&sg_out,
+				CTR_DRBG_BLOCK_LEN_BYTES,
+				NULL);
+
+			rc = crypto_ablkcipher_encrypt(ctx->aes_ctx.req);
+
+			switch (rc) {
+			case 0:
+				break;
+			case -EINPROGRESS:
+			case -EBUSY:
+				rc = wait_for_completion_interruptible(
+					&ctx->aes_ctx.result.completion);
+				if (!rc && !ctx->aes_ctx.result.err) {
+					INIT_COMPLETION(
+						ctx->aes_ctx.result.completion);
+					break;
+				}
+				/* fall through */
+			default:
+				break;
+			}
+
+			init_completion(&ctx->aes_ctx.result.completion);
+
+			if (!memcmp(ctx->prev_drn,
+					ctx->aes_ctx.output.virt_addr,
+					16))
+				return CTR_DRBG_GENERAL_ERROR;
+			else
+				memcpy(ctx->prev_drn,
+					ctx->aes_ctx.output.virt_addr,
+					16);
+			rv = 0;
+			memcpy((uint8_t *)buffer + 16*i,
+				ctx->aes_ctx.output.virt_addr,
+				(len_bits % 128)/8);
+		} else {
+			/* normal case: encrypt direct to target buffer */
+
+			init_completion(&ctx->aes_ctx.result.completion);
+
+			/*
+			 * Note: personalize these called routines for
+			 * specific testing.
+			 */
+
+			crypto_ablkcipher_clear_flags(ctx->aes_ctx.tfm, ~0);
+
+			/* Encrypt some clear text! */
+
+			memcpy(ctx->aes_ctx.input.virt_addr,
+				ctx->seed.key_V.V,
+				16);
+			sg_init_one(&sg_in,
+				ctx->aes_ctx.input.virt_addr,
+				16);
+			sg_init_one(&sg_out,
+				ctx->aes_ctx.output.virt_addr,
+				16);
+			ablkcipher_request_set_crypt(ctx->aes_ctx.req,
+						&sg_in,
+						&sg_out,
+						CTR_DRBG_BLOCK_LEN_BYTES,
+						NULL);
+
+			rc = crypto_ablkcipher_encrypt(ctx->aes_ctx.req);
+
+			switch (rc) {
+			case 0:
+				break;
+			case -EINPROGRESS:
+			case -EBUSY:
+				rc = wait_for_completion_interruptible(
+					&ctx->aes_ctx.result.completion);
+				if (!rc && !ctx->aes_ctx.result.err) {
+					INIT_COMPLETION(
+					ctx->aes_ctx.result.completion);
+					break;
+				}
+				/* fall through */
+			default:
+				break;
+			}
+
+			if (!memcmp(ctx->prev_drn,
+				ctx->aes_ctx.output.virt_addr,
+				16))
+				return CTR_DRBG_GENERAL_ERROR;
+			else
+				memcpy(ctx->prev_drn,
+					ctx->aes_ctx.output.virt_addr,
+					16);
+
+			memcpy((uint8_t *)buffer + 16*i,
+				ctx->aes_ctx.output.virt_addr,
+				16);
+			rv = 0;
+		}
+	}
+
+	update_rv = update(ctx,
+			additional_input,
+			(additional_input_len_bits + 7) / 8); /* round up */
+	if (update_rv != CTR_DRBG_SUCCESS)
+		return update_rv;
+
+	ctx->reseed_counter += 1;
+
+	return CTR_DRBG_SUCCESS;
+}
+
+/*
+ * Generate random bits, but with no provided data. See notes on
+ * ctr_drbg_generate_w_data()
+ */
+enum ctr_drbg_status_t
+ctr_drbg_generate(struct ctr_drbg_ctx_s *ctx,
+		void *buffer,
+		size_t len_bits)
+
+{
+	return ctr_drbg_generate_w_data(ctx, NULL, 0, buffer, len_bits);
+}
+
+void ctr_aes_deinit(struct ctr_drbg_ctx_s *ctx)
+{
+	if (ctx->aes_ctx.req) {
+		ablkcipher_request_free(ctx->aes_ctx.req);
+		ctx->aes_ctx.req = NULL;
+	}
+	if (ctx->aes_ctx.tfm) {
+		crypto_free_ablkcipher(ctx->aes_ctx.tfm);
+		ctx->aes_ctx.tfm = NULL;
+	}
+	if (ctx->aes_ctx.input.virt_addr) {
+		kzfree(ctx->aes_ctx.input.virt_addr);
+		ctx->aes_ctx.input.virt_addr = NULL;
+	}
+	if (ctx->aes_ctx.output.virt_addr) {
+		kzfree(ctx->aes_ctx.output.virt_addr);
+		ctx->aes_ctx.output.virt_addr = NULL;
+	}
+	if (ctx->df_aes_ctx.req) {
+		ablkcipher_request_free(ctx->df_aes_ctx.req);
+		ctx->df_aes_ctx.req = NULL;
+	}
+	if (ctx->df_aes_ctx.tfm) {
+		crypto_free_ablkcipher(ctx->df_aes_ctx.tfm);
+		ctx->df_aes_ctx.tfm = NULL;
+	}
+	if (ctx->df_aes_ctx.input.virt_addr) {
+		kzfree(ctx->df_aes_ctx.input.virt_addr);
+		ctx->df_aes_ctx.input.virt_addr = NULL;
+	}
+	if (ctx->df_aes_ctx.output.virt_addr) {
+		kzfree(ctx->df_aes_ctx.output.virt_addr);
+		ctx->df_aes_ctx.output.virt_addr = NULL;
+	}
+
+}
+
+/*
+ * Zeroizes the context structure. In some future implemenation it
+ * could also free resources.  So do call it.
+ */
+void
+ctr_drbg_uninstantiate(struct ctr_drbg_ctx_s *ctx)
+{
+	ctr_aes_deinit(ctx);
+	memset(ctx, 0, sizeof(*ctx));
+}
+
+/*
+ * the derivation functions to handle biased entropy input.
+ */
+enum ctr_drbg_status_t df_bcc_func(struct ctr_drbg_ctx_s *ctx,
+		uint8_t *key,
+		uint8_t *input,
+		uint32_t input_size,
+		uint8_t *output)
+{
+	enum ctr_drbg_status_t ret_val = CTR_DRBG_SUCCESS;
+	uint8_t *p;
+	int rc;
+	int i;
+	int n;
+	struct scatterlist sg_in, sg_out;
+
+	if (0 != (input_size % CTR_DRBG_BLOCK_LEN_BYTES))
+		return CTR_DRBG_INVALID_ARG;
+
+	n = input_size / CTR_DRBG_BLOCK_LEN_BYTES;
+
+	for (i = 0; i < CTR_DRBG_BLOCK_LEN_BYTES; i++)
+		ctx->df_aes_ctx.output.virt_addr[i] = 0;
+
+	rc = crypto_ablkcipher_setkey(ctx->df_aes_ctx.tfm,
+					key,
+					AES128_KEY_SIZE);
+	if (rc) {
+		pr_debug("crypto_ablkcipher_setkey API failed: %d\n", rc);
+		return CTR_DRBG_GENERAL_ERROR;
+	}
+
+	p = input;
+	while (n > 0) {
+		for (i = 0; i < CTR_DRBG_BLOCK_LEN_BYTES; i++, p++)
+			ctx->df_aes_ctx.input.virt_addr[i] =
+				ctx->df_aes_ctx.output.virt_addr[i] ^ (*p);
+
+		init_completion(&ctx->df_aes_ctx.result.completion);
+
+		/*
+		 * Note: personalize these called routines for
+		 * specific testing.
+		 */
+
+		crypto_ablkcipher_clear_flags(ctx->df_aes_ctx.tfm, ~0);
+
+		/* Encrypt some clear text! */
+
+		sg_init_one(&sg_in, ctx->df_aes_ctx.input.virt_addr, 16);
+		sg_init_one(&sg_out, ctx->df_aes_ctx.output.virt_addr, 16);
+
+		ablkcipher_request_set_crypt(ctx->df_aes_ctx.req,
+					&sg_in,
+					&sg_out,
+					CTR_DRBG_BLOCK_LEN_BYTES,
+					NULL);
+
+		rc = crypto_ablkcipher_encrypt(ctx->df_aes_ctx.req);
+
+		switch (rc) {
+		case 0:
+			break;
+		case -EINPROGRESS:
+		case -EBUSY:
+			rc = wait_for_completion_interruptible(
+				&ctx->df_aes_ctx.result.completion);
+			if (!rc && !ctx->df_aes_ctx.result.err) {
+				INIT_COMPLETION(
+				ctx->df_aes_ctx.result.completion);
+				break;
+			}
+			/* fall through */
+		default:
+			break;
+		}
+
+		init_completion(&ctx->df_aes_ctx.result.completion);
+		n--;
+	}
+
+	for (i = 0; i < CTR_DRBG_BLOCK_LEN_BYTES; i++)
+		output[i] = ctx->df_aes_ctx.output.virt_addr[i];
+
+	return ret_val;
+}
+
+/* output_size must <= 512 bits (<= 64) */
+enum ctr_drbg_status_t
+block_cipher_df(struct ctr_drbg_ctx_s *ctx,
+		const uint8_t *input,
+		uint32_t input_size,
+		uint8_t *output,
+		uint32_t output_size)
+{
+	enum ctr_drbg_status_t ret_val = CTR_DRBG_SUCCESS;
+	uint32_t          s_len = 0;
+	uint32_t          s_pad_len = 0;
+	uint8_t           temp[32];
+	uint32_t          out_len = 0;
+	uint8_t           siv_string[64];
+	uint8_t          *p_s_string = NULL;
+	int               rc;
+	struct scatterlist sg_in, sg_out;
+
+	if (output_size > 64)
+		return CTR_DRBG_INVALID_ARG;
+
+	s_len = input_size + 9;
+
+	s_pad_len = s_len % 16;
+
+	if (0 != s_pad_len)
+		s_len += (16 - s_pad_len);
+
+	/* add the length of IV */
+	s_len += 16;
+
+	if (s_len > 64)
+		pr_debug("error! s_len is too big!!!!!!!!!!!!\n");
+
+	memset(siv_string, 0, 64);
+
+	p_s_string = siv_string + 16;
+
+	p_s_string[3] = input_size;
+	p_s_string[7] = output_size;
+	memcpy(p_s_string + 8, input, input_size);
+	p_s_string[8 + input_size] = 0x80;
+	if (0 < s_pad_len)
+		memset(p_s_string + 9 + input_size, '\0', s_pad_len);
+
+	ret_val = df_bcc_func(ctx, df_initial_k, siv_string, s_len, temp);
+
+	if (CTR_DRBG_SUCCESS != ret_val) {
+		pr_debug("df_bcc_func failed, returned %d", ret_val);
+		goto out;
+	}
+
+	siv_string[3] = 0x1;
+	ret_val = df_bcc_func(ctx, df_initial_k, siv_string, s_len, temp + 16);
+
+	if (CTR_DRBG_SUCCESS != ret_val)
+		goto out;
+
+	out_len = 0;
+	rc = crypto_ablkcipher_setkey(ctx->df_aes_ctx.tfm,
+				temp,
+				AES128_KEY_SIZE);
+	if (rc) {
+		pr_debug("crypto_ablkcipher_setkey API failed: %d", rc);
+		goto out;
+	}
+	memcpy(ctx->df_aes_ctx.input.virt_addr, temp + 16, 16);
+
+	while (out_len < output_size) {
+
+		init_completion(&ctx->df_aes_ctx.result.completion);
+
+		/*
+		 * Note: personalize these called routines for
+		 * specific testing.
+		 */
+
+		crypto_ablkcipher_clear_flags(ctx->df_aes_ctx.tfm, ~0);
+
+		/* Encrypt some clear text! */
+
+		sg_init_one(&sg_in, ctx->df_aes_ctx.input.virt_addr, 16);
+		sg_init_one(&sg_out, ctx->df_aes_ctx.output.virt_addr, 16);
+		ablkcipher_request_set_crypt(ctx->df_aes_ctx.req,
+					&sg_in,
+					&sg_out,
+					CTR_DRBG_BLOCK_LEN_BYTES,
+					NULL);
+
+		rc = crypto_ablkcipher_encrypt(ctx->df_aes_ctx.req);
+
+		switch (rc) {
+		case 0:
+			break;
+		case -EINPROGRESS:
+		case -EBUSY:
+			rc = wait_for_completion_interruptible(
+				&ctx->df_aes_ctx.result.completion);
+			if (!rc && !ctx->df_aes_ctx.result.err) {
+				INIT_COMPLETION(
+					ctx->df_aes_ctx.result.completion);
+				break;
+			}
+			/* fall through */
+		default:
+			break;
+		}
+
+
+		init_completion(&ctx->df_aes_ctx.result.completion);
+
+		memcpy(output + out_len, ctx->df_aes_ctx.output.virt_addr, 16);
+		memcpy(ctx->df_aes_ctx.input.virt_addr, output + out_len, 16);
+		out_len += 16;
+	}
+
+out:
+	memset(siv_string, 0, 64);
+	memset(temp, 0, 32);
+	return ret_val;
+}
+
diff --git a/drivers/char/hw_random/ctr_drbg.h b/drivers/char/hw_random/ctr_drbg.h
new file mode 100644
index 0000000..55a9988
--- /dev/null
+++ b/drivers/char/hw_random/ctr_drbg.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MSM_CTR_DRBG_H__
+#define __MSM_CTR_DRBG_H__
+
+/* This is the module that is actually follows the details of NIST SP
+ * 800-90 so it can claim to use a FIPS-approved algorithm.
+ */
+
+/* Added ctr_drbg_generate_w_data which supplies
+ * additional input to the generate operation.
+ */
+
+
+#define CTR_DRBG_MAX_REQ_LEN_BITS	(1 << 19)
+#define CTR_DRBG_SEED_LEN_BITS		256
+#define CTR_DRBG_BLOCK_LEN_BITS		128
+#define CTR_DRBG_BLOCK_LEN_BYTES	(CTR_DRBG_BLOCK_LEN_BITS/8)
+#define CTR_DRBG_MAX_RESEED_INTERVAL	(1ULL << 48)
+
+#define MSM_AES128_BLOCK_SIZE   (16)
+#define MSM_ENTROPY_BUFFER_SIZE (16)
+#define MSM_NONCE_BUFFER_SIZE   (8)
+
+enum ctr_drbg_status_t {
+	CTR_DRBG_SUCCESS = 0,
+	CTR_DRBG_NEEDS_RESEED,
+	CTR_DRBG_INVALID_ARG,
+	CTR_DRBG_GENERAL_ERROR = 0xFF,
+};
+
+union ctr_drbg_seed_t {
+	uint8_t as_bytes[32];
+	uint32_t as_words[8];
+	uint64_t as_64[4];
+	struct {
+		uint8_t key[16];
+		uint8_t V[16];
+	} key_V;
+};
+
+struct msm_ctr_tcrypt_result_s {
+	struct completion completion;
+	int err;
+};
+
+struct msm_ctr_buffer_s {
+	unsigned char *virt_addr;
+};
+
+struct aes_struct_s {
+	struct crypto_ablkcipher	*tfm;
+	struct ablkcipher_request	*req;
+	struct msm_ctr_buffer_s		input;
+	struct msm_ctr_buffer_s		output;
+	struct msm_ctr_tcrypt_result_s	result;
+};
+
+struct ctr_drbg_ctx_s {
+	unsigned long long reseed_counter;  /* starts at 1 as per SP
+					     * 800-90
+					     */
+	unsigned long long	reseed_interval;
+	union ctr_drbg_seed_t	seed;
+	struct aes_struct_s	aes_ctx;
+	struct aes_struct_s	df_aes_ctx;
+	uint8_t			prev_drn[MSM_AES128_BLOCK_SIZE];
+	uint8_t			continuous_test_started;
+};
+
+enum ctr_drbg_status_t ctr_drbg_instantiate(struct ctr_drbg_ctx_s *ctx,
+					const uint8_t *entropy,
+					size_t entropy_len_bits,
+					const uint8_t *nonce,
+					size_t nonce_len_bits,
+					unsigned long long reseed_interval);
+
+enum ctr_drbg_status_t ctr_drbg_reseed(struct ctr_drbg_ctx_s *ctx,
+				const void *entropy,
+				size_t entropy_len);
+
+enum ctr_drbg_status_t ctr_drbg_generate_w_data(struct ctr_drbg_ctx_s *ctx,
+			void *additional_input,
+			size_t additional_input_len_bits,
+			void *buffer,
+			size_t len_bits);
+
+enum ctr_drbg_status_t ctr_drbg_generate(struct ctr_drbg_ctx_s *ctx,
+				void *buffer,
+				size_t len);
+
+void ctr_drbg_uninstantiate(struct ctr_drbg_ctx_s *ctx);
+
+enum ctr_drbg_status_t block_cipher_df(struct ctr_drbg_ctx_s *ctx,
+				const uint8_t *input,
+				uint32_t input_size,
+				uint8_t *output,
+				uint32_t output_size
+				);
+void ctr_aes_deinit(struct ctr_drbg_ctx_s *ctx);
+
+#endif /* __MSM_CTR_DRBG_H__ */
diff --git a/drivers/char/hw_random/fips_drbg.c b/drivers/char/hw_random/fips_drbg.c
new file mode 100644
index 0000000..7b4225e
--- /dev/null
+++ b/drivers/char/hw_random/fips_drbg.c
@@ -0,0 +1,290 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/hw_random.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/types.h>
+#include <mach/msm_bus.h>
+#include <linux/qrng.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+
+#include <linux/errno.h>
+#include <linux/crypto.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/gfp.h>
+#include <linux/string.h>
+
+#include "msm_rng.h"
+#include "fips_drbg.h"
+
+/* The fips-140 random number generator is a wrapper around the CTR_DRBG
+ * random number generator, which is built according to the
+ * specifications in NIST SP 800-90 using AES-128.
+ *
+ * This wrapper has the following functionality
+ * a. Entropy collection is via a callback.
+ * b. A failure of CTR_DRBG because reseeding is needed invisibly
+ *    causes the underlying CTR_DRBG instance to be reseeded with
+ *    new random data and then the generate request is retried.
+ * c. Limitations in CTR_DRBG (like not allowed more than 65536 bytes
+ *    to be genrated in one request) are worked around.  At this level
+ *    it just works.
+ * d. On success the return value is zero.  If the callback was invoked
+ *    and returned a non-zero value, that value is returned.  On all other
+ *    errors -1 is returned.
+ */
+
+#ifndef NULL
+  #define NULL  0
+#endif
+
+/*  32 bytes = 256 bits = seed length */
+#define MAGIC 0xab10d161
+
+#define RESEED_INTERVAL (1 << 31)
+
+int get_entropy_callback(void *ctx, void *buf)
+{
+	struct msm_rng_device *msm_rng_dev = (struct msm_rng_device *)ctx;
+	int ret_val = -1;
+
+	if (NULL == ctx)
+		return FIPS140_PRNG_ERR;
+
+	if (NULL == buf)
+		return FIPS140_PRNG_ERR;
+
+	ret_val = msm_rng_direct_read(msm_rng_dev, buf);
+	if ((size_t)ret_val != Q_HW_DRBG_BLOCK_BYTES)
+		return ret_val;
+
+	return 0;
+}
+
+/* Initialize *ctx. Automatically reseed after reseed_interval calls
+ * to fips_drbg_gen.  The underlying CTR_DRBG will automatically be
+ * reseeded every reseed_interval requests.  Values over
+ * CTR_DRBG_MAX_RESEED_INTERVAL (2^48) or that are zero are silently
+ * converted to CTR_DRBG_MAX_RESEED_INTERVAL.  (It is easy to justify
+ * lowering values that are too large to CTR_DRBG_MAX_RESEED_INTERVAL
+ * (the NIST SP800-90 limit): just silently enforcing the rules.
+ * Silently converted 0 to to CTR_DRBG_MAX_RESEED_INTERVAL is harder.
+ * The alternative is to return an error.  But since
+ * CTR_DRBG_MAX_RESEED is safe, we relieve the caller of one more
+ * error to worry about.)
+ */
+static int
+do_fips_drbg_init(struct fips_drbg_ctx_s *ctx,
+	      get_entropy_callback_t callback,
+	      void *callback_ctx,
+	      unsigned long long reseed_interval)
+{
+	uint8_t entropy_pool[Q_HW_DRBG_BLOCK_BYTES];
+	enum ctr_drbg_status_t init_rv;
+	int rv = -1;
+
+	if (ctx == NULL)
+		return FIPS140_PRNG_ERR;
+	if (callback == NULL)
+		return FIPS140_PRNG_ERR;
+	if (reseed_interval == 0 ||
+		reseed_interval > CTR_DRBG_MAX_RESEED_INTERVAL)
+		reseed_interval = CTR_DRBG_MAX_RESEED_INTERVAL;
+
+	/* fill in callback related fields in ctx */
+	ctx->get_entropy_callback = callback;
+	ctx->get_entropy_callback_ctx = callback_ctx;
+
+	if (!ctx->fips_drbg_started) {
+		rv = (*ctx->get_entropy_callback)(ctx->get_entropy_callback_ctx,
+			ctx->prev_hw_drbg_block
+			);
+		if (rv != 0)
+			return FIPS140_PRNG_ERR;
+		ctx->fips_drbg_started = 1;
+	}
+
+	rv = (*ctx->get_entropy_callback)(ctx->get_entropy_callback_ctx,
+		entropy_pool
+		);
+	if (rv != 0) {
+		memset(entropy_pool, 0, Q_HW_DRBG_BLOCK_BYTES);
+		return FIPS140_PRNG_ERR;
+	}
+
+	if (!memcmp(entropy_pool,
+			ctx->prev_hw_drbg_block,
+			Q_HW_DRBG_BLOCK_BYTES)) {
+		memset(entropy_pool, 0, Q_HW_DRBG_BLOCK_BYTES);
+		return FIPS140_PRNG_ERR;
+	} else
+		memcpy(ctx->prev_hw_drbg_block,
+			entropy_pool,
+			Q_HW_DRBG_BLOCK_BYTES);
+
+
+	init_rv = ctr_drbg_instantiate(&ctx->ctr_drbg_ctx,
+		entropy_pool,
+		8 * MSM_ENTROPY_BUFFER_SIZE,
+		entropy_pool + MSM_ENTROPY_BUFFER_SIZE,
+		8 * 8,
+		reseed_interval);
+
+	memset(entropy_pool, 0, Q_HW_DRBG_BLOCK_BYTES);
+
+	if (init_rv == 0)
+		ctx->magic = MAGIC;
+
+	return 0;
+}
+
+int fips_drbg_init(struct msm_rng_device *msm_rng_ctx)
+{
+	uint32_t ret_val = 0;
+
+	ret_val = do_fips_drbg_init(msm_rng_ctx->drbg_ctx,
+			get_entropy_callback,
+			msm_rng_ctx,
+			RESEED_INTERVAL
+			);
+	if (ret_val != 0)
+		ret_val = FIPS140_PRNG_ERR;
+
+	return ret_val;
+}
+
+/* Push new entropy into the CTR_DRBG instance in ctx, combining
+ * it with the entropy already there.  On success, 0 is returned.  If
+ * the callback returns a non-zero value, that value is returned.
+ * Other errors return -1.
+ */
+static int
+fips_drbg_reseed(struct fips_drbg_ctx_s *ctx)
+{
+	uint8_t entropy_pool[Q_HW_DRBG_BLOCK_BYTES];
+	int rv;
+	enum ctr_drbg_status_t init_rv;
+
+	if (ctx == NULL)
+		return FIPS140_PRNG_ERR;
+
+	if (!ctx->fips_drbg_started) {
+		rv = (*ctx->get_entropy_callback)(ctx->get_entropy_callback_ctx,
+			ctx->prev_hw_drbg_block
+			);
+		if (rv != 0)
+			return FIPS140_PRNG_ERR;
+		ctx->fips_drbg_started = 1;
+	}
+
+	rv = (*ctx->get_entropy_callback)(ctx->get_entropy_callback_ctx,
+		entropy_pool
+		);
+	if (rv != 0) {
+		memset(entropy_pool, 0, Q_HW_DRBG_BLOCK_BYTES);
+		return FIPS140_PRNG_ERR;
+	}
+
+	if (!memcmp(entropy_pool,
+		    ctx->prev_hw_drbg_block,
+		    Q_HW_DRBG_BLOCK_BYTES)) {
+		memset(entropy_pool, 0, Q_HW_DRBG_BLOCK_BYTES);
+		return FIPS140_PRNG_ERR;
+	} else
+		memcpy(ctx->prev_hw_drbg_block,
+		       entropy_pool,
+		       Q_HW_DRBG_BLOCK_BYTES);
+
+	init_rv = ctr_drbg_reseed(&ctx->ctr_drbg_ctx,
+				  entropy_pool,
+				  8 * MSM_ENTROPY_BUFFER_SIZE);
+
+	/* Zeroize the buffer for security. */
+	memset(entropy_pool, 0, Q_HW_DRBG_BLOCK_BYTES);
+
+	return (init_rv == CTR_DRBG_SUCCESS ?
+				FIPS140_PRNG_OK :
+				FIPS140_PRNG_ERR);
+}
+
+/* generate random bytes.  len is in bytes On success returns 0.  If
+ * the callback returns a non-zero value, that is returned.  Other
+ * errors return -1. */
+int
+fips_drbg_gen(struct fips_drbg_ctx_s *ctx, void *tgt, size_t len)
+{
+
+	/* The contorted flow in this function is so that the CTR_DRBG
+	stuff can follow NIST SP 800-90, which has the generate function
+	fail and return a special code if a reseed is needed. We also work
+	around the CTR_DRBG limitation of the maximum request sized being
+	2^19 bits. */
+
+	enum ctr_drbg_status_t gen_rv;
+	int rv;
+
+	if (ctx == NULL || ctx->magic != MAGIC)
+		return FIPS140_PRNG_ERR;
+	if (tgt == NULL && len > 0)
+		return FIPS140_PRNG_ERR;
+	while (len > 0) {
+		size_t req_len;
+
+		if (len < (CTR_DRBG_MAX_REQ_LEN_BITS / 8))
+			req_len = len;
+		else
+			req_len = CTR_DRBG_MAX_REQ_LEN_BITS / 8;
+
+		gen_rv = ctr_drbg_generate(&ctx->ctr_drbg_ctx,
+					   tgt,
+					   8*req_len);
+		switch (gen_rv) {
+		case CTR_DRBG_SUCCESS:
+			tgt = (uint8_t *)tgt + req_len;
+			len -= req_len;
+			break;
+		case CTR_DRBG_NEEDS_RESEED:
+			rv = fips_drbg_reseed(ctx);
+			if (rv != 0)
+				return rv;
+			break;
+		default:
+			return FIPS140_PRNG_ERR;
+		}
+	}
+
+	return 0;
+}
+
+/* free resources and zeroize state */
+void
+fips_drbg_final(struct fips_drbg_ctx_s *ctx)
+{
+	ctr_drbg_uninstantiate(&ctx->ctr_drbg_ctx);
+	ctx->get_entropy_callback     = 0;
+	ctx->get_entropy_callback_ctx = 0;
+	ctx->fips_drbg_started        = 0;
+	memset(ctx->prev_hw_drbg_block, 0, Q_HW_DRBG_BLOCK_BYTES);
+	ctx->magic = 0;
+}
+
diff --git a/drivers/char/hw_random/fips_drbg.h b/drivers/char/hw_random/fips_drbg.h
new file mode 100644
index 0000000..06da362
--- /dev/null
+++ b/drivers/char/hw_random/fips_drbg.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MSM_FIPS_DRBG_H__
+#define __MSM_FIPS_DRBG_H__
+
+#include "ctr_drbg.h"
+#include "msm_rng.h"
+
+#define FIPS140_PRNG_OK  (0)
+#define FIPS140_PRNG_ERR (-1)
+
+typedef int (*get_entropy_callback_t)(void *ctx, void *buf);
+
+struct fips_drbg_ctx_s {
+	uint32_t magic;		/* for checking that ctx is likely valid */
+	get_entropy_callback_t get_entropy_callback;
+	void *get_entropy_callback_ctx;
+	struct ctr_drbg_ctx_s ctr_drbg_ctx;
+	uint8_t fips_drbg_started;
+	uint8_t prev_hw_drbg_block[Q_HW_DRBG_BLOCK_BYTES];
+};
+
+/*
+ * initialize *ctx, requesting automatic reseed after reseed_interval
+ * calls to qpsi_rng_gen.  callback is a function to get entropy.
+ * callback_ctx is a pointer to any context structure that function
+ * may need.  (Pass NULL if no context structure is needed.) callback
+ * must return zero or a positive number on success, and a
+ * negative number on an error.
+ */
+int fips_drbg_init(struct msm_rng_device *msm_rng_ctx);
+
+/* generated random data.  Returns 0 on success, -1 on failures */
+int fips_drbg_gen(struct fips_drbg_ctx_s *ctx, void *tgt, size_t len);
+
+
+/* free resources and zeroize state */
+/* Failure to call fips_drbg_final is not a security issue, since
+   CTR_DRBG provides backtracking resistance by updating Key and V
+   immediately after the data has been generated but before the
+   generate function returns.  But it is a resource issue (except at
+   program termination), as it abandons a FILE structure and a file
+   descriptor. */
+void fips_drbg_final(struct fips_drbg_ctx_s *ctx);
+
+#endif /* __MSM_FIPS_DRBG_H__ */
diff --git a/drivers/char/hw_random/msm_fips_selftest.c b/drivers/char/hw_random/msm_fips_selftest.c
new file mode 100644
index 0000000..3c23605
--- /dev/null
+++ b/drivers/char/hw_random/msm_fips_selftest.c
@@ -0,0 +1,346 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "fips_drbg.h"
+#include "ctr_drbg.h"
+#include "msm_rng.h"
+#include "msm_fips_selftest.h"
+
+#define CTRAES128_ENTROPY_BYTES       (16)
+#define CTRAES128_NONCE_BYTES         (8)
+#define CTRAES128_MAX_OUTPUT_BYTES    (64)
+
+struct ctr_drbg_testcase_s {
+	char *name;
+	char *entropy_string;
+	char *nonce_string;
+	char *reseed_entropy_string;
+	char *expected_string;
+};
+
+static struct ctr_drbg_testcase_s t0 = {
+	.name = "use_pr_0",
+	.entropy_string = "\x8f\xb9\x57\x3a\x54\x62\x53\xcd"
+			  "\xbf\x62\x15\xa1\x80\x5a\x41\x38",
+	.nonce_string   = "\x7c\x2c\xe6\x54\x02\xbc\xa6\x83",
+	.reseed_entropy_string = "\xbc\x5a\xd8\x9a\xe1\x8c\x49\x1f"
+				 "\x90\xa2\xae\x9e\x7e\x2c\xf9\x9d",
+	.expected_string = "\x07\x62\x82\xe8\x0e\x65\xd7\x70"
+			   "\x1a\x35\xb3\x44\x63\x68\xb6\x16"
+			   "\xf8\xd9\x62\x23\xb9\xb5\x11\x64"
+			   "\x23\xa3\xa2\x32\xc7\x2c\xea\xbf"
+			   "\x4a\xcc\xc4\x0a\xc6\x19\xd6\xaa"
+			   "\x68\xae\xdb\x8b\x26\x70\xb8\x07"
+			   "\xcc\xe9\x9f\xc2\x1b\x8f\xa5\x16"
+			   "\xef\x75\xb6\x8f\xc0\x6c\x87\xc7",
+};
+
+static struct ctr_drbg_testcase_s t1 = {
+	.name = "use_pr_1",
+	.entropy_string = "\xa3\x56\xf3\x9a\xce\x48\x59\xb1"
+			  "\xe1\x99\x49\x40\x22\x8e\xa4\xeb",
+	.nonce_string   = "\xff\x33\xe9\x51\x39\xf7\x67\xf1",
+	.reseed_entropy_string = "\x66\x8f\x0f\xe2\xd8\xa9\xa9\x29"
+				 "\x20\xfc\xb9\xf3\x55\xd6\xc3\x4c",
+	.expected_string = "\xa1\x06\x61\x65\x7b\x98\x0f\xac"
+			   "\xce\x77\x91\xde\x7f\x6f\xe6\x1e"
+			   "\x88\x15\xe5\xe2\x4c\xce\xb8\xa6"
+			   "\x63\xf2\xe8\x2f\x5b\xfb\x16\x92"
+			   "\x06\x2a\xf3\xa8\x59\x05\xe0\x5a"
+			   "\x92\x9a\x07\x65\xc7\x41\x29\x3a"
+			   "\x4b\x1d\x15\x3e\x02\x14\x7b\xdd"
+			   "\x74\x5e\xbd\x70\x07\x4d\x6c\x08",
+};
+
+static struct ctr_drbg_testcase_s *testlist[] = {
+	&t0, &t1
+};
+
+static int allzeroP(void *p, size_t len)
+{
+	size_t i;
+
+	for (i = 0; i < len; ++i)
+		if (((uint8_t *)p)[i] != 0)
+			return 0;
+
+	return 1;
+}
+
+/*
+ * basic test.  return value is error count.
+ */
+int fips_ctraes128_df_known_answer_test(struct ctr_debg_test_inputs_s *tcase)
+{
+	struct ctr_drbg_ctx_s ctx;
+	enum ctr_drbg_status_t rv;
+
+	if (tcase->observed_string_len > CTRAES128_MAX_OUTPUT_BYTES) {
+		pr_debug("known answer test output is bigger than 64!\n");
+		return 1;
+	}
+
+	memset(&ctx, 0, sizeof(ctx));
+
+	ctx.continuous_test_started = 1;
+
+	rv = ctr_drbg_instantiate(&ctx,
+				  tcase->entropy_string,
+				  8 * CTRAES128_ENTROPY_BYTES,
+				  tcase->nonce_string,
+				  8 * CTRAES128_NONCE_BYTES,
+				  1<<19);
+	if (rv != CTR_DRBG_SUCCESS) {
+		pr_err("test instantiate failed with code %d\n", rv);
+		return 1;
+	}
+
+	rv = ctr_drbg_reseed(&ctx,
+			     tcase->reseed_entropy_string,
+			     8 * CTRAES128_ENTROPY_BYTES);
+	if (rv != CTR_DRBG_SUCCESS) {
+		pr_err("test reseed failed with code %d\n", rv);
+		return 1;
+	}
+
+	rv = ctr_drbg_generate(&ctx,
+			       tcase->observed_string,
+			       tcase->observed_string_len * 8);
+	if (rv != CTR_DRBG_SUCCESS) {
+		pr_err("test generate (2) failed with code %d\n", rv);
+		return 1;
+	}
+
+	rv = ctr_drbg_generate(&ctx,
+			       tcase->observed_string,
+			       tcase->observed_string_len * 8);
+	if (rv != CTR_DRBG_SUCCESS) {
+		pr_err("test generate (2) failed with code %d\n", rv);
+		return 1;
+	}
+
+	ctr_drbg_uninstantiate(&ctx);
+
+	if (!allzeroP(&ctx.seed, sizeof(ctx.seed))) {
+		pr_err("test Final failed to zeroize the context\n");
+		return 1;
+	}
+
+	pr_info("\n DRBG counter test done");
+	return 0;
+
+}
+
+static int fips_drbg_healthcheck_sanitytest(void)
+{
+	struct ctr_drbg_ctx_s *p_ctx = NULL;
+	enum ctr_drbg_status_t rv = CTR_DRBG_SUCCESS;
+	char entropy_string[MSM_ENTROPY_BUFFER_SIZE];
+	char nonce[MSM_NONCE_BUFFER_SIZE];
+	char buffer[32];
+
+	pr_info("start DRBG health check sanity test.\n");
+	p_ctx = kzalloc(sizeof(struct ctr_drbg_ctx_s), GFP_KERNEL);
+	if (NULL == p_ctx) {
+		rv = CTR_DRBG_GENERAL_ERROR;
+		pr_err("p_ctx kzalloc fail\n");
+		goto outbuf;
+	}
+
+	/*
+	 * test DRGB Instantiaion function error handling.
+	 * Sends a NULL pointer as DTR-DRBG context.
+	 */
+	rv = ctr_drbg_instantiate(NULL,
+				  entropy_string,
+				  8 * CTRAES128_ENTROPY_BYTES,
+				  nonce,
+				  8 * CTRAES128_NONCE_BYTES,
+				  1<<19);
+	if (CTR_DRBG_SUCCESS == rv) {
+		rv = CTR_DRBG_INVALID_ARG;
+		pr_err("failed to handle NULL pointer of CTR context\n");
+		goto outbuf;
+	}
+
+	/*
+	 * test DRGB Instantiaion function error handling.
+	 * Sends a NULL pointer as entropy input.
+	 */
+	rv = ctr_drbg_instantiate(p_ctx,
+				  NULL,
+				  8 * CTRAES128_ENTROPY_BYTES,
+				  nonce,
+				  8 * CTRAES128_NONCE_BYTES,
+				  1<<19);
+	if (CTR_DRBG_SUCCESS == rv) {
+		rv = CTR_DRBG_INVALID_ARG;
+		pr_err("failed to handle NULL pointer of entropy string\n");
+		goto outbuf;
+	}
+
+	rv = ctr_drbg_instantiate(p_ctx,
+				  entropy_string,
+				  8 * CTRAES128_ENTROPY_BYTES,
+				  NULL,
+				  8 * CTRAES128_NONCE_BYTES,
+				  1<<19);
+	if (CTR_DRBG_SUCCESS == rv) {
+		rv = CTR_DRBG_INVALID_ARG;
+		pr_err("failed to handle NULL pointer of nonce string\n");
+		goto outbuf;
+	}
+
+	/*
+	 * test DRGB Instantiaion function error handling.
+	 * Sends very long seed length.
+	 */
+	rv = ctr_drbg_instantiate(p_ctx,
+				  entropy_string,
+				  8 * CTRAES128_ENTROPY_BYTES,
+				  nonce,
+				  32 * CTRAES128_NONCE_BYTES,
+				  1<<19);
+	if (CTR_DRBG_SUCCESS == rv) {
+		rv = CTR_DRBG_INVALID_ARG;
+		pr_err("failed to handle incorrect seed size\n");
+		goto outbuf;
+	}
+
+
+	rv = ctr_drbg_instantiate(p_ctx,
+				  entropy_string,
+				  8 * CTRAES128_ENTROPY_BYTES,
+				  nonce,
+				  8 * CTRAES128_NONCE_BYTES,
+				  1<<19);
+	if (CTR_DRBG_SUCCESS != rv) {
+		pr_err("Instantiation failed to handle CTR-DRBG instance\n");
+		goto outbuf;
+	}
+
+	/*
+	 * test DRGB generator function error handling.
+	 * set output string as NULL.
+	 */
+	rv = ctr_drbg_generate(p_ctx, NULL, 256);
+	if (CTR_DRBG_SUCCESS == rv) {
+		pr_err("failed to handle incorrect buffer pointer\n");
+		rv = CTR_DRBG_INVALID_ARG;
+		goto outdrbg;
+	}
+
+	rv = ctr_drbg_generate(p_ctx, &buffer,  1 << 20);
+	if (CTR_DRBG_SUCCESS == rv) {
+		pr_err("failed to handle too long output length\n");
+		rv = CTR_DRBG_INVALID_ARG;
+		goto outdrbg;
+	}
+
+	rv = ctr_drbg_generate(p_ctx, &buffer,  177);
+	if (CTR_DRBG_SUCCESS == rv) {
+		pr_err("failed to handle incorrect output length\n");
+		rv = CTR_DRBG_INVALID_ARG;
+		goto outdrbg;
+	}
+
+	pr_info("DRBG health check sanity test passed.\n");
+	rv = CTR_DRBG_SUCCESS;
+
+outdrbg:
+	ctr_drbg_uninstantiate(p_ctx);
+
+outbuf:
+	if (p_ctx)
+		kzfree(p_ctx);
+	p_ctx = NULL;
+
+	memset(buffer, 0, 32);
+	memset(nonce, 0, MSM_NONCE_BUFFER_SIZE);
+	memset(entropy_string, 0, MSM_ENTROPY_BUFFER_SIZE);
+
+	return rv;
+}
+
+int fips_self_test(void)
+{
+	struct ctr_debg_test_inputs_s cavs_input;
+	uint8_t entropy[CTRAES128_ENTROPY_BYTES];
+	uint8_t nonce[CTRAES128_NONCE_BYTES];
+	uint8_t reseed_entropy[CTRAES128_ENTROPY_BYTES];
+	uint8_t expected[CTRAES128_MAX_OUTPUT_BYTES];
+	uint8_t observed[CTRAES128_MAX_OUTPUT_BYTES];
+	unsigned int i;
+	int errors = 0;
+	int ret;
+
+	cavs_input.entropy_string = entropy;
+	cavs_input.nonce_string = nonce;
+	cavs_input.reseed_entropy_string = reseed_entropy;
+	cavs_input.observed_string = observed;
+	cavs_input.observed_string_len = CTRAES128_MAX_OUTPUT_BYTES;
+
+
+	ret = fips_drbg_healthcheck_sanitytest();
+	if (CTR_DRBG_SUCCESS != ret) {
+		pr_err("DRBG health check fail\n");
+		errors++;
+		return errors;
+	}
+
+	for (i = 0;
+	     i < sizeof(testlist)/sizeof(struct ctr_drbg_testcase_s *);
+	     ++i) {
+		memcpy(entropy,
+			testlist[i]->entropy_string,
+			CTRAES128_ENTROPY_BYTES);
+		memcpy(nonce,
+			testlist[i]->nonce_string,
+			CTRAES128_NONCE_BYTES);
+		memcpy(reseed_entropy,
+			testlist[i]->reseed_entropy_string,
+			CTRAES128_ENTROPY_BYTES);
+		memcpy(expected,
+			testlist[i]->expected_string,
+			CTRAES128_MAX_OUTPUT_BYTES);
+
+		pr_debug("starting test %s\n", testlist[i]->name);
+		ret = fips_ctraes128_df_known_answer_test(&cavs_input);
+		pr_debug("completed test %s\n\n", testlist[i]->name);
+		if (0 != ret) {
+			pr_debug("got error from drbg known answer test!\n");
+			return 1;
+		}
+
+		if (memcmp(expected,
+			cavs_input.observed_string,
+			CTRAES128_MAX_OUTPUT_BYTES) != 0) {
+			errors++;
+			pr_info("%s: generate failed\n", testlist[i]->name);
+			return 1;
+		} else
+			pr_info("%s: generate PASSED!\n", testlist[i]->name);
+	}
+
+	if (errors == 0)
+		pr_debug("All tests passed\n");
+	else
+		pr_debug("%d tests failed\n", errors);
+
+	return errors;
+
+}
+
diff --git a/drivers/char/hw_random/msm_fips_selftest.h b/drivers/char/hw_random/msm_fips_selftest.h
new file mode 100644
index 0000000..090ae01
--- /dev/null
+++ b/drivers/char/hw_random/msm_fips_selftest.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MSM_FIPS_SELFTEST_H__
+#define __MSM_FIPS_SELFTEST_H__
+
+struct ctr_debg_test_inputs_s {
+	char *entropy_string;		/* must by 16 bytes */
+	char *nonce_string;		/* must be 8 bytes */
+	char *reseed_entropy_string;	/* must be 16 bytes */
+	char *observed_string;		/* lenth is defined
+						in observed_string_len */
+	int  observed_string_len;
+};
+
+int fips_ctraes128_df_known_answer_test(struct ctr_debg_test_inputs_s *tcase);
+
+int fips_self_test(void);
+
+#endif  /* __MSM_FIPS_SELFTEST_H__ */
diff --git a/drivers/char/hw_random/msm_rng.c b/drivers/char/hw_random/msm_rng.c
index 4118a7a..e7b9a27 100644
--- a/drivers/char/hw_random/msm_rng.c
+++ b/drivers/char/hw_random/msm_rng.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -29,6 +29,13 @@
 #include <linux/fs.h>
 #include <linux/cdev.h>
 
+#include <linux/platform_data/qcom_crypto_device.h>
+
+#include "msm_rng.h"
+#include "ctr_drbg.h"
+#include "fips_drbg.h"
+#include "msm_fips_selftest.h"
+
 #define DRIVER_NAME "msm_rng"
 
 /* Device specific register offsets */
@@ -46,16 +53,34 @@
 #define MAX_HW_FIFO_DEPTH 16                     /* FIFO is 16 words deep */
 #define MAX_HW_FIFO_SIZE (MAX_HW_FIFO_DEPTH * 4) /* FIFO is 32 bits wide  */
 
+/* Global FIPS status  */
+#ifdef CONFIG_FIPS_ENABLE
+enum fips_status g_fips140_status = FIPS140_STATUS_FAIL;
+EXPORT_SYMBOL(g_fips140_status);
 
-struct msm_rng_device {
-	struct platform_device *pdev;
-	void __iomem *base;
-	struct clk *prng_clk;
-	uint32_t qrng_perf_client;
+#else
+enum fips_status g_fips140_status = FIPS140_STATUS_NA;
+EXPORT_SYMBOL(g_fips140_status);
+
+#endif
+
+/*FIPS140-2 call back for DRBG self test */
+void *drbg_call_back;
+EXPORT_SYMBOL(drbg_call_back);
+
+
+
+enum {
+	FIPS_NOT_STARTED = 0,
+	DRBG_FIPS_STARTED
 };
 
 struct msm_rng_device msm_rng_device_info;
 
+#ifdef CONFIG_FIPS_ENABLE
+static int fips_mode_enabled = FIPS_NOT_STARTED;
+#endif
+
 static long msm_rng_ioctl(struct file *filp, unsigned int cmd,
 				unsigned long arg)
 {
@@ -77,28 +102,24 @@
 	return ret;
 }
 
-static int msm_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+/*
+ *
+ *  This function calls hardware random bit generator directory and retuns it
+ *  back to caller
+ *
+ */
+int msm_rng_direct_read(struct msm_rng_device *msm_rng_dev, void *data)
 {
-	struct msm_rng_device *msm_rng_dev;
 	struct platform_device *pdev;
 	void __iomem *base;
-	size_t maxsize;
 	size_t currsize = 0;
 	unsigned long val;
 	unsigned long *retdata = data;
 	int ret;
 
-	msm_rng_dev = (struct msm_rng_device *)rng->priv;
 	pdev = msm_rng_dev->pdev;
 	base = msm_rng_dev->base;
 
-	/* calculate max size bytes to transfer back to caller */
-	maxsize = min_t(size_t, MAX_HW_FIFO_SIZE, max);
-
-	/* no room for word data */
-	if (maxsize < 4)
-		return 0;
-
 	/* enable PRNG clock */
 	ret = clk_prepare_enable(msm_rng_dev->prng_clk);
 	if (ret) {
@@ -120,6 +141,74 @@
 		*(retdata++) = val;
 		currsize += 4;
 
+	} while (currsize < Q_HW_DRBG_BLOCK_BYTES);
+
+	/* vote to turn off clock */
+	clk_disable_unprepare(msm_rng_dev->prng_clk);
+
+	val = 0L;
+	return currsize;
+
+}
+
+static int msm_rng_drbg_read(struct hwrng *rng,
+			void *data, size_t max, bool wait)
+{
+	struct msm_rng_device *msm_rng_dev;
+	struct platform_device *pdev;
+	void __iomem *base;
+	size_t maxsize;
+	size_t currsize = 0;
+	unsigned long val;
+	unsigned long *retdata = data;
+	int ret, ret1;
+
+	msm_rng_dev = (struct msm_rng_device *)rng->priv;
+	pdev = msm_rng_dev->pdev;
+	base = msm_rng_dev->base;
+
+
+	down(&msm_rng_dev->drbg_sem);
+
+	/* calculate max size bytes to transfer back to caller */
+	maxsize = min_t(size_t, MAX_HW_FIFO_SIZE, max);
+
+	/* no room for word data */
+	if (maxsize < 4)
+		return 0;
+
+	/* read random data from CTR-AES based DRBG */
+	if (FIPS140_DRBG_ENABLED == msm_rng_dev->fips140_drbg_enabled) {
+		ret1 = fips_drbg_gen(msm_rng_dev->drbg_ctx, data, maxsize);
+		if (FIPS140_PRNG_ERR == ret1)
+			panic("random number generator generator error.\n");
+	} else
+		ret1 = 1;
+
+	/* read random data from h/w */
+	/* enable PRNG clock */
+	ret = clk_prepare_enable(msm_rng_dev->prng_clk);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to enable clock in callback\n");
+		up(&msm_rng_dev->drbg_sem);
+		return 0;
+	}
+	/* read random data from h/w */
+	do {
+		/* check status bit if data is available */
+		if (!(readl_relaxed(base + PRNG_STATUS_OFFSET) & 0x00000001))
+			break;	/* no data to read so just bail */
+
+		/* read FIFO */
+		val = readl_relaxed(base + PRNG_DATA_OUT_OFFSET);
+		if (!val)
+			break;	/* no data to read so just bail */
+
+		/* write data back to callers pointer */
+		if (0 != ret1)
+			*(retdata++) = val;
+		currsize += 4;
+
 		/* make sure we stay on 32bit boundary */
 		if ((maxsize - currsize) < 4)
 			break;
@@ -127,9 +216,112 @@
 	/* vote to turn off clock */
 	clk_disable_unprepare(msm_rng_dev->prng_clk);
 
+	up(&msm_rng_dev->drbg_sem);
+
 	return currsize;
 }
 
+#ifdef CONFIG_FIPS_ENABLE
+static void _fips_drbg_init_error(struct msm_rng_device  *msm_rng_dev)
+{
+	unregister_chrdev(QRNG_IOC_MAGIC, DRIVER_NAME);
+	clk_put(msm_rng_dev->prng_clk);
+	iounmap(msm_rng_dev->base);
+	kzfree(msm_rng_dev->drbg_ctx);
+	kzfree(msm_rng_dev);
+	panic("software random number generator initialization error.\n");
+}
+#else
+static inline void _fips_drbg_init_error(struct msm_rng_device *msm_rng_dev)
+{
+	return;
+}
+
+#endif
+
+#ifdef CONFIG_FIPS_ENABLE
+int _do_msm_fips_drbg_init(void *rng_dev)
+{
+	struct msm_rng_device *msm_rng_dev = (struct msm_rng_device *) rng_dev;
+
+	int ret;
+
+	if (NULL == msm_rng_dev)
+		return 1;
+
+	ret = fips_drbg_init(msm_rng_dev);
+	if (0 == ret) {
+		pr_debug("start fips self test\n");
+		ret = fips_self_test();
+		if (ret) {
+			msm_rng_dev->fips140_drbg_enabled =
+				FIPS140_DRBG_DISABLED;
+			_fips_drbg_init_error(msm_rng_dev);
+		} else {
+			msm_rng_dev->fips140_drbg_enabled =
+				FIPS140_DRBG_ENABLED;
+		}
+	} else {
+		msm_rng_dev->fips140_drbg_enabled = FIPS140_DRBG_DISABLED;
+		_fips_drbg_init_error(msm_rng_dev);
+	}
+
+	return ret;
+}
+#else
+int _do_msm_fips_drbg_init(void *rng_dev)
+{
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_FIPS_ENABLE
+static int msm_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+	struct msm_rng_device *msm_rng_dev = (struct msm_rng_device *)rng->priv;
+	unsigned char a[Q_HW_DRBG_BLOCK_BYTES];
+	int read_size;
+	unsigned char *p = data;
+
+	switch (fips_mode_enabled) {
+	case DRBG_FIPS_STARTED:
+		return msm_rng_drbg_read(rng, data, max, wait);
+		break;
+	case FIPS_NOT_STARTED:
+		if (g_fips140_status != FIPS140_STATUS_PASS) {
+			do {
+				read_size = msm_rng_direct_read(msm_rng_dev, a);
+				if (read_size <= 0)
+					break;
+				if ((max - read_size > 0)) {
+					memcpy(p, a, read_size);
+					p += read_size;
+					max -= read_size;
+				} else {
+					memcpy(p, a, max);
+				break;
+				}
+			} while (1);
+			return p - (unsigned char *)data;
+		} else {
+				fips_mode_enabled  = DRBG_FIPS_STARTED;
+				return msm_rng_drbg_read(rng, data, max, wait);
+			}
+		break;
+	default:
+		return 0;
+		break;
+	}
+
+	return 0;
+}
+#else
+static int msm_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+	return msm_rng_drbg_read(rng, data, max, wait);
+}
+#endif
+
 static struct hwrng msm_rng = {
 	.name = DRIVER_NAME,
 	.read = msm_rng_read,
@@ -187,6 +379,20 @@
 static struct class *msm_rng_class;
 static struct cdev msm_rng_cdev;
 
+#ifdef CONFIG_FIPS_ENABLE
+
+static void _first_msm_drbg_init(struct msm_rng_device *msm_rng_dev)
+{
+	fips_reg_drbg_callback((void *)msm_rng_dev);
+	return;
+}
+#else
+static void _first_msm_drbg_init(struct msm_rng_device *msm_rng_dev)
+{
+	_do_msm_fips_drbg_init(msm_rng_dev);
+}
+#endif
+
 static int __devinit msm_rng_probe(struct platform_device *pdev)
 {
 	struct resource *res;
@@ -205,7 +411,7 @@
 		goto err_exit;
 	}
 
-	msm_rng_dev = kzalloc(sizeof(msm_rng_dev), GFP_KERNEL);
+	msm_rng_dev = kzalloc(sizeof(struct msm_rng_device), GFP_KERNEL);
 	if (!msm_rng_dev) {
 		dev_err(&pdev->dev, "cannot allocate memory\n");
 		error = -ENOMEM;
@@ -220,6 +426,14 @@
 	}
 	msm_rng_dev->base = base;
 
+	msm_rng_dev->drbg_ctx = kzalloc(sizeof(struct fips_drbg_ctx_s),
+					GFP_KERNEL);
+	if (!msm_rng_dev->drbg_ctx) {
+		dev_err(&pdev->dev, "cannot allocate memory\n");
+		error = -ENOMEM;
+		goto err_clk_get;
+	}
+
 	/* create a handle for clock control */
 	if ((pdev->dev.of_node) && (of_property_read_bool(pdev->dev.of_node,
 					"qcom,msm-rng-iface-clk")))
@@ -279,7 +493,11 @@
 	}
 	cdev_init(&msm_rng_cdev, &msm_rng_fops);
 
-	return ret;
+	sema_init(&msm_rng_dev->drbg_sem, 1);
+
+	_first_msm_drbg_init(msm_rng_dev);
+
+	return error;
 
 unregister_chrdev:
 	unregister_chrdev(QRNG_IOC_MAGIC, DRIVER_NAME);
@@ -288,7 +506,8 @@
 err_clk_get:
 	iounmap(msm_rng_dev->base);
 err_iomap:
-	kfree(msm_rng_dev);
+	kzfree(msm_rng_dev->drbg_ctx);
+	kzfree(msm_rng_dev);
 err_exit:
 	return error;
 }
@@ -296,6 +515,9 @@
 static int __devexit msm_rng_remove(struct platform_device *pdev)
 {
 	struct msm_rng_device *msm_rng_dev = platform_get_drvdata(pdev);
+
+	fips_drbg_final(msm_rng_dev->drbg_ctx);
+
 	unregister_chrdev(QRNG_IOC_MAGIC, DRIVER_NAME);
 	hwrng_unregister(&msm_rng);
 	clk_put(msm_rng_dev->prng_clk);
@@ -303,7 +525,11 @@
 	platform_set_drvdata(pdev, NULL);
 	if (msm_rng_dev->qrng_perf_client)
 		msm_bus_scale_unregister_client(msm_rng_dev->qrng_perf_client);
-	kfree(msm_rng_dev);
+	if (msm_rng_dev->drbg_ctx) {
+		kzfree(msm_rng_dev->drbg_ctx);
+		msm_rng_dev->drbg_ctx = NULL;
+	}
+	kzfree(msm_rng_dev);
 	return 0;
 }
 
@@ -336,6 +562,10 @@
 }
 
 module_exit(msm_rng_exit);
+#ifdef CONFIG_FIPS_ENABLE
+EXPORT_SYMBOL(fips_ctraes128_df_known_answer_test);
+#endif
+EXPORT_SYMBOL(_do_msm_fips_drbg_init);
 
 MODULE_AUTHOR("The Linux Foundation");
 MODULE_DESCRIPTION("Qualcomm MSM Random Number Driver");
diff --git a/drivers/char/hw_random/msm_rng.h b/drivers/char/hw_random/msm_rng.h
new file mode 100644
index 0000000..b79ba46
--- /dev/null
+++ b/drivers/char/hw_random/msm_rng.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __MSM_RNG_HEADER__
+#define __MSM_RNG_HEADER__
+
+#include <linux/semaphore.h>
+#include <linux/qcedev.h>
+
+struct _fips_drbg_ctx;
+
+#define FIPS140_DRBG_ENABLED  (1)
+#define FIPS140_DRBG_DISABLED (0)
+
+#define Q_HW_DRBG_BLOCK_BYTES (32)
+
+extern void fips_reg_drbg_callback(void *src);
+
+struct msm_rng_device {
+	struct platform_device *pdev;
+	void __iomem *base;
+	struct clk *prng_clk;
+	uint32_t qrng_perf_client;
+	struct  semaphore drbg_sem;
+	struct fips_drbg_ctx_s *drbg_ctx;
+	int    fips140_drbg_enabled;
+};
+
+/*
+ *
+ *  This function calls hardware random bit generator
+ *  directory and retuns it back to caller.
+ *
+ */
+int msm_rng_direct_read(struct msm_rng_device *msm_rng_dev, void *data);
+
+#endif
diff --git a/drivers/cpufreq/cpu-boost.c b/drivers/cpufreq/cpu-boost.c
index b4aec53..f50c050 100644
--- a/drivers/cpufreq/cpu-boost.c
+++ b/drivers/cpufreq/cpu-boost.c
@@ -140,7 +140,8 @@
 	unsigned long flags;
 
 	while(1) {
-		wait_event(s->sync_wq, s->pending || kthread_should_stop());
+		wait_event_interruptible(s->sync_wq, s->pending ||
+					kthread_should_stop());
 
 		if (kthread_should_stop())
 			break;
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 3422f05..fd6ac3d 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -291,6 +291,15 @@
 config CRYPTO_DEV_QCE50
 	bool
 
+config FIPS_ENABLE
+	bool "FIPS140-2 compliant build"
+	default n
+	help
+          This flag is used to make current build FIPS140-2
+          compliant. This flag will enable the patch of code
+          which will perform this task. Please select Y here
+          to enable.
+
 config CRYPTO_DEV_QCRYPTO
 	tristate "Qualcomm Crypto accelerator"
 	select CRYPTO_DES
diff --git a/drivers/crypto/msm/Makefile b/drivers/crypto/msm/Makefile
index df9acf2..ee8bc93 100644
--- a/drivers/crypto/msm/Makefile
+++ b/drivers/crypto/msm/Makefile
@@ -1,3 +1,6 @@
+ifeq ($(CONFIG_FIPS_ENABLE), y)
+	obj-$(CONFIG_CRYPTO_DEV_QCEDEV) += qcedev_fips.o
+endif
 obj-$(CONFIG_CRYPTO_DEV_QCEDEV) += qcedev.o
 ifeq ($(CONFIG_CRYPTO_DEV_QCE50), y)
 	obj-$(CONFIG_CRYPTO_DEV_QCE) += qce50.o
@@ -8,5 +11,8 @@
 		obj-$(CONFIG_CRYPTO_DEV_QCE) += qce.o
 	endif
 endif
+ifeq ($(CONFIG_FIPS_ENABLE), y)
+	obj-$(CONFIG_CRYPTO_DEV_QCRYPTO) += qcrypto_fips.o
+endif
 obj-$(CONFIG_CRYPTO_DEV_QCRYPTO) += qcrypto.o
 obj-$(CONFIG_CRYPTO_DEV_OTA_CRYPTO) += ota_crypto.o
diff --git a/drivers/crypto/msm/qcedev.c b/drivers/crypto/msm/qcedev.c
index d1e35ec..9839279 100644
--- a/drivers/crypto/msm/qcedev.c
+++ b/drivers/crypto/msm/qcedev.c
@@ -1,6 +1,6 @@
 /* Qualcomm CE device driver.
  *
- * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -33,12 +33,16 @@
 #include <mach/scm.h>
 #include <mach/msm_bus.h>
 #include <linux/qcedev.h>
+#include "qcedevi.h"
 #include "qce.h"
 
 
 #define CACHE_LINE_SIZE 32
 #define CE_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
 
+/* are FIPS integrity tests done ?? */
+bool is_fips_qcedev_integritytest_done;
+
 static uint8_t  _std_init_vector_sha1_uint8[] =   {
 	0x67, 0x45, 0x23, 0x01, 0xEF, 0xCD, 0xAB, 0x89,
 	0x98, 0xBA, 0xDC, 0xFE, 0x10, 0x32, 0x54, 0x76,
@@ -52,98 +56,8 @@
 	0x1F, 0x83, 0xD9, 0xAB, 0x5B, 0xE0, 0xCD, 0x19
 };
 
-enum qcedev_crypto_oper_type {
-  QCEDEV_CRYPTO_OPER_CIPHER	= 0,
-  QCEDEV_CRYPTO_OPER_SHA	= 1,
-  QCEDEV_CRYPTO_OPER_LAST
-};
-
-struct qcedev_handle;
-
-struct qcedev_cipher_req {
-	struct ablkcipher_request creq;
-	void *cookie;
-};
-
-struct qcedev_sha_req {
-	struct ahash_request sreq;
-	void *cookie;
-};
-
-struct	qcedev_sha_ctxt {
-	uint32_t	auth_data[4];
-	uint8_t		digest[QCEDEV_MAX_SHA_DIGEST];
-	uint32_t	diglen;
-	uint8_t		trailing_buf[64];
-	uint32_t	trailing_buf_len;
-	uint8_t		first_blk;
-	uint8_t		last_blk;
-	uint8_t		authkey[QCEDEV_MAX_SHA_BLOCK_SIZE];
-	bool		init_done;
-};
-
-struct qcedev_async_req {
-	struct list_head			list;
-	struct completion			complete;
-	enum qcedev_crypto_oper_type		op_type;
-	union {
-		struct qcedev_cipher_op_req	cipher_op_req;
-		struct qcedev_sha_op_req	sha_op_req;
-	};
-	union{
-		struct qcedev_cipher_req	cipher_req;
-		struct qcedev_sha_req		sha_req;
-	};
-	struct qcedev_handle			*handle;
-	int					err;
-};
-
 static DEFINE_MUTEX(send_cmd_lock);
 static DEFINE_MUTEX(qcedev_sent_bw_req);
-/**********************************************************************
- * Register ourselves as a misc device to be able to access the dev driver
- * from userspace. */
-
-
-#define QCEDEV_DEV	"qcedev"
-
-struct qcedev_control{
-
-	/* CE features supported by platform */
-	struct msm_ce_hw_support platform_support;
-
-	uint32_t ce_lock_count;
-	uint32_t high_bw_req_count;
-
-	/* CE features/algorithms supported by HW engine*/
-	struct ce_hw_support ce_support;
-
-	uint32_t  bus_scale_handle;
-
-	/* misc device */
-	struct miscdevice miscdevice;
-
-	/* qce handle */
-	void *qce;
-
-	/* platform device */
-	struct platform_device *pdev;
-
-	unsigned magic;
-
-	struct list_head ready_commands;
-	struct qcedev_async_req *active_command;
-	spinlock_t lock;
-	struct tasklet_struct done_tasklet;
-};
-
-struct qcedev_handle {
-	/* qcedev control handle */
-	struct qcedev_control *cntl;
-	/* qce internal sha context*/
-	struct	qcedev_sha_ctxt sha_ctxt;
-};
-
 /*-------------------------------------------------------------------------
 * Resource Locking Service
 * ------------------------------------------------------------------------*/
@@ -345,6 +259,12 @@
 	struct qcedev_handle *handle;
 	struct qcedev_control *podev;
 
+	/* IF FIPS tests not passed, return error */
+	if (((g_fips140_status == FIPS140_STATUS_FAIL) ||
+		(g_fips140_status == FIPS140_STATUS_PASS_CRYPTO)) &&
+		is_fips_qcedev_integritytest_done)
+		return -ENXIO;
+
 	podev = qcedev_minor_to_control(MINOR(inode->i_rdev));
 	if (podev == NULL) {
 		pr_err("%s: no such device %d\n", __func__,
@@ -427,7 +347,7 @@
 	return;
 }
 
-static void qcedev_sha_req_cb(void *cookie, unsigned char *digest,
+void qcedev_sha_req_cb(void *cookie, unsigned char *digest,
 	unsigned char *authdata, int ret)
 {
 	struct qcedev_sha_req *areq;
@@ -454,7 +374,7 @@
 };
 
 
-static void qcedev_cipher_req_cb(void *cookie, unsigned char *icv,
+void qcedev_cipher_req_cb(void *cookie, unsigned char *icv,
 	unsigned char *iv, int ret)
 {
 	struct qcedev_cipher_req *areq;
@@ -825,7 +745,7 @@
 	if (user_src && __copy_from_user(k_src,
 				(void __user *)user_src,
 				qcedev_areq->sha_op_req.data[0].len)) {
-		kfree(k_buf_src);
+		kzfree(k_buf_src);
 		return -EFAULT;
 	}
 	k_src += qcedev_areq->sha_op_req.data[0].len;
@@ -834,7 +754,7 @@
 		if (user_src && __copy_from_user(k_src,
 					(void __user *)user_src,
 					qcedev_areq->sha_op_req.data[i].len)) {
-			kfree(k_buf_src);
+			kzfree(k_buf_src);
 			return -EFAULT;
 		}
 		k_src += qcedev_areq->sha_op_req.data[i].len;
@@ -865,7 +785,7 @@
 	handle->sha_ctxt.last_blk = 0;
 	handle->sha_ctxt.first_blk = 0;
 
-	kfree(k_buf_src);
+	kzfree(k_buf_src);
 	return err;
 }
 
@@ -979,7 +899,7 @@
 		}
 		sreq->entries = saved_req->entries;
 		sreq->data_len = saved_req->data_len;
-		kfree(saved_req);
+		kzfree(saved_req);
 	} else
 		err = qcedev_sha_update_max_xfer(qcedev_areq, handle, sg_src);
 
@@ -1038,7 +958,7 @@
 	handle->sha_ctxt.init_done = false;
 	memset(&handle->sha_ctxt.trailing_buf[0], 0, 64);
 
-	kfree(k_buf_src);
+	kzfree(k_buf_src);
 	return err;
 }
 
@@ -1090,7 +1010,7 @@
 			(void __user *)qcedev_areq->sha_op_req.data[i].vaddr;
 		if (user_src && __copy_from_user(k_src, (void __user *)user_src,
 				qcedev_areq->sha_op_req.data[i].len)) {
-			kfree(k_buf_src);
+			kzfree(k_buf_src);
 			return -EFAULT;
 		}
 		k_src += qcedev_areq->sha_op_req.data[i].len;
@@ -1104,7 +1024,7 @@
 	handle->sha_ctxt.diglen = qcedev_areq->sha_op_req.diglen;
 	err = submit_req(qcedev_areq, handle);
 
-	kfree(k_buf_src);
+	kzfree(k_buf_src);
 	return err;
 }
 
@@ -1224,7 +1144,7 @@
 	handle->sha_ctxt.last_blk = 0;
 	handle->sha_ctxt.first_blk = 0;
 
-	kfree(k_src);
+	kzfree(k_src);
 	return err;
 }
 
@@ -1507,8 +1427,8 @@
 				err = qcedev_vbuf_ablk_cipher_max_xfer(areq,
 						&di, handle, k_align_src);
 				if (err < 0) {
-					kfree(k_buf_src);
-					kfree(saved_req);
+					kzfree(k_buf_src);
+					kzfree(saved_req);
 					return err;
 				}
 
@@ -1549,8 +1469,8 @@
 				err = qcedev_vbuf_ablk_cipher_max_xfer(areq,
 						&di, handle, k_align_src);
 				if (err < 0) {
-					kfree(k_buf_src);
-					kfree(saved_req);
+					kzfree(k_buf_src);
+					kzfree(saved_req);
 					return err;
 				}
 
@@ -1593,8 +1513,8 @@
 	creq->data_len = saved_req->data_len;
 	creq->byteoffset = saved_req->byteoffset;
 
-	kfree(saved_req);
-	kfree(k_buf_src);
+	kzfree(saved_req);
+	kzfree(k_buf_src);
 	return err;
 
 }
@@ -1703,6 +1623,18 @@
 		goto error;
 	}
 
+	/* Ensure IV size */
+	if (req->ivlen > QCEDEV_MAX_IV_SIZE) {
+		pr_err("%s: ivlen is not correct: %u\n", __func__, req->ivlen);
+		goto error;
+	}
+
+	/* Ensure Key size */
+	if (req->encklen > QCEDEV_MAX_KEY_SIZE) {
+		pr_err("%s: Klen is not correct: %u\n", __func__, req->encklen);
+		goto error;
+	}
+
 	/* Ensure zer ivlen for ECB  mode  */
 	if (req->ivlen > 0) {
 		if ((req->mode == QCEDEV_AES_MODE_ECB) ||
@@ -1718,8 +1650,8 @@
 		}
 	}
 	/* Check for sum of all dst length is equal to data_len  */
-	for (i = 0; (i < QCEDEV_MAX_BUFFERS) && (total < req->data_len); i++) {
-		if (req->vbuf.dst[i].len > ULONG_MAX - total) {
+	for (i = 0; i < req->entries; i++) {
+		if (req->vbuf.dst[i].len >= ULONG_MAX - total) {
 			pr_err("%s: Integer overflow on total req dst vbuf length\n",
 				__func__);
 			goto error;
@@ -2001,6 +1933,58 @@
 		}
 		break;
 
+		/* This IOCTL call can be called only once
+		by FIPS Integrity test */
+	case QCEDEV_IOCTL_UPDATE_FIPS_STATUS:
+		{
+		enum fips_status status;
+		if (is_fips_qcedev_integritytest_done)
+			return -EPERM;
+
+		if (!access_ok(VERIFY_WRITE, (void __user *)arg,
+			sizeof(enum fips_status)))
+			return -EFAULT;
+
+		if (__copy_from_user(&status, (void __user *)arg,
+			sizeof(enum fips_status)))
+			return -EFAULT;
+
+		g_fips140_status = _fips_update_status(status);
+		pr_info("qcedev: FIPS140-2 Global status flag: %d\n",
+			g_fips140_status);
+		is_fips_qcedev_integritytest_done = true;
+
+		if (g_fips140_status == FIPS140_STATUS_FAIL) {
+			pr_info("qcedev: FIPS140-2 Integrity test failed\n");
+			break;
+		}
+
+		if (!(_do_msm_fips_drbg_init(drbg_call_back)) &&
+			(g_fips140_status != FIPS140_STATUS_NA))
+			g_fips140_status = FIPS140_STATUS_PASS;
+		}
+
+		pr_info("qcedev: FIPS140-2 Global status flag: %d\n",
+			g_fips140_status);
+
+		break;
+
+		/* Read only IOCTL call to read the
+		current FIPS140-2 Status */
+	case QCEDEV_IOCTL_QUERY_FIPS_STATUS:
+		{
+		enum fips_status status;
+		if (!access_ok(VERIFY_WRITE, (void __user *)arg,
+			sizeof(enum fips_status)))
+			return -EFAULT;
+
+		status = g_fips140_status;
+		if (__copy_to_user((void __user *)arg, &status,
+			sizeof(enum fips_status)))
+			return -EFAULT;
+
+		}
+		break;
 	default:
 		return -ENOTTY;
 	}
@@ -2076,6 +2060,25 @@
 			goto err;
 		}
 	}
+
+/*
+ * FIPS140-2 Known Answer Tests:
+ * IN case of any failure, do not Init the module
+ */
+	is_fips_qcedev_integritytest_done = false;
+	if (g_fips140_status != FIPS140_STATUS_NA) {
+		if (_fips_qcedev_cipher_selftest(&qce_dev[0]) ||
+			_fips_qcedev_sha_selftest(&qce_dev[0])) {
+			pr_err("qcedev: FIPS140-2 Known Answer Tests : Failed\n");
+			panic("SYSTEM CAN NOT BOOT !!!");
+			rc = -1;
+		} else {
+			pr_info("qcedev: FIPS140-2 Known Answer Tests : Successful\n");
+			rc = 0;
+		}
+	} else
+		pr_info("qcedev: FIPS140-2 Known Answer Tests : Skipped\n");
+
 	if (rc >= 0)
 		return 0;
 	else
diff --git a/drivers/crypto/msm/qcedev_fips.c b/drivers/crypto/msm/qcedev_fips.c
new file mode 100644
index 0000000..fde1b88
--- /dev/null
+++ b/drivers/crypto/msm/qcedev_fips.c
@@ -0,0 +1,489 @@
+/* FIPS Known answer tests for QCEDEV / FIPS-non-FIPS separation .
+ *
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/mman.h>
+#include <linux/types.h>
+#include <linux/export.h>
+#include <linux/qcedev.h>
+#include "qcedevi.h"
+#include "qcedev_fips.h"
+
+/*
+ * Initiate the session handle (like open /dev/qce)
+ */
+static int _fips_initiate_qcedev_handle(struct qcedev_control *podev,
+				struct qcedev_async_req *qcedev_areq)
+{
+	struct  qcedev_handle *handle;
+
+	handle = kzalloc(sizeof(struct qcedev_handle), GFP_KERNEL);
+	if (handle == NULL) {
+		pr_err("Failed to allocate memory %ld\n", PTR_ERR(handle));
+		return -ENOMEM;
+	}
+
+	handle->cntl = podev;
+	qcedev_areq->handle = handle;
+	return 0;
+}
+
+/*
+ *Initiate QCEDEV request for sha/hmac
+ */
+static
+int _fips_initiate_qcedev_async_req_sha(struct qcedev_async_req *qcedev_areq,
+		struct scatterlist *fips_sg,
+		int tv_index)
+{
+	qcedev_areq->sha_op_req.alg =
+		fips_test_vector_sha_hmac[tv_index].hash_alg;
+
+	/* If HMAC setup key else make key length zero */
+	if ((qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) ||
+		(qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC) ||
+		(qcedev_areq->sha_op_req.alg == QCEDEV_ALG_AES_CMAC)) {
+		qcedev_areq->sha_op_req.authkey =
+			&fips_test_vector_sha_hmac[tv_index].key[0];
+		qcedev_areq->sha_op_req.authklen  =
+			fips_test_vector_sha_hmac[tv_index].klen;
+	} else
+		qcedev_areq->sha_op_req.authklen = 0;
+
+	/* Setup input and digest */
+	qcedev_areq->sha_op_req.data[0].vaddr =
+		&fips_test_vector_sha_hmac[tv_index].input[0];
+	qcedev_areq->sha_op_req.data[0].len =
+		fips_test_vector_sha_hmac[tv_index].ilen;
+	qcedev_areq->sha_op_req.data_len =
+		fips_test_vector_sha_hmac[tv_index].ilen;
+
+	/* Setup sha context and other parameters */
+	qcedev_areq->sha_op_req.entries = 1;
+	qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_SHA;
+	memset(&qcedev_areq->handle->sha_ctxt, 0,
+		sizeof(struct qcedev_sha_ctxt));
+	qcedev_areq->handle->sha_ctxt.first_blk = 1;
+
+	/* Initialize digest and digest length */
+	memset(&qcedev_areq->sha_op_req.digest[0], 0, QCEDEV_MAX_SHA_DIGEST);
+	qcedev_areq->sha_op_req.diglen =
+		fips_test_vector_sha_hmac[tv_index].diglen;
+	switch (qcedev_areq->sha_op_req.alg) {
+	case QCEDEV_ALG_SHA1:
+	case QCEDEV_ALG_SHA1_HMAC:
+		memcpy(&qcedev_areq->handle->sha_ctxt.digest[0],
+			&_std_init_vector_sha1_uint8[0],
+			SHA1_DIGEST_SIZE);
+		break;
+	case QCEDEV_ALG_SHA256:
+	case QCEDEV_ALG_SHA256_HMAC:
+		memcpy(&qcedev_areq->handle->sha_ctxt.digest[0],
+			&_std_init_vector_sha256_uint8[0],
+			SHA256_DIGEST_SIZE);
+		break;
+	case QCEDEV_ALG_AES_CMAC:
+		qcedev_areq->handle->sha_ctxt.diglen =
+			fips_test_vector_sha_hmac[tv_index].diglen;
+		break;
+	default:
+		pr_err(" _fips_initiate_qcedev_async_req_sha : Invalid algo");
+		return -EINVAL;
+	}
+
+	qcedev_areq->handle->sha_ctxt.init_done = true;
+	qcedev_areq->handle->sha_ctxt.trailing_buf_len =
+		qcedev_areq->sha_op_req.data_len;
+	memcpy(&qcedev_areq->handle->sha_ctxt.trailing_buf[0],
+		fips_test_vector_sha_hmac[tv_index].input,
+		fips_test_vector_sha_hmac[tv_index].ilen);
+	qcedev_areq->handle->sha_ctxt.last_blk = 1;
+	qcedev_areq->sha_req.sreq.nbytes = qcedev_areq->sha_op_req.data_len;
+	qcedev_areq->sha_req.cookie = qcedev_areq->handle;
+	qcedev_areq->sha_req.sreq.src = fips_sg;
+	sg_set_buf(qcedev_areq->sha_req.sreq.src,
+		&qcedev_areq->handle->sha_ctxt.trailing_buf[0],
+		qcedev_areq->sha_op_req.data_len);
+	sg_mark_end(qcedev_areq->sha_req.sreq.src);
+	return 0;
+}
+
+/*
+ * Clean up of sha context after request completion
+ */
+static void _fips_clear_qcedev_handle(struct qcedev_sha_ctxt *sha_ctxt)
+{
+	sha_ctxt->first_blk = 0;
+	sha_ctxt->last_blk = 0;
+	sha_ctxt->auth_data[0] = 0;
+	sha_ctxt->auth_data[1] = 0;
+	sha_ctxt->trailing_buf_len = 0;
+	sha_ctxt->init_done = false;
+	memset(&sha_ctxt->trailing_buf[0], 0, 64);
+}
+
+/*
+ * Self test for SHA / HMAC
+ */
+int _fips_qcedev_sha_selftest(struct qcedev_control *podev)
+{
+	int ret = 0, tv_index, num_tv;
+	struct qce_sha_req sreq;
+	struct qcedev_async_req qcedev_areq;
+	struct scatterlist fips_sg;
+
+	/* Initiate handle */
+	if (_fips_initiate_qcedev_handle(podev, &qcedev_areq))
+		return -ENOMEM;
+
+	num_tv = (sizeof(fips_test_vector_sha_hmac))/
+		(sizeof(struct _fips_test_vector_sha_hmac));
+
+	/* Tests one by one */
+	for (tv_index = 0; tv_index < num_tv; tv_index++) {
+		init_completion(&qcedev_areq.complete);
+
+		/* Initiate the qcedev request */
+		if (_fips_initiate_qcedev_async_req_sha(&qcedev_areq,
+			&fips_sg, tv_index))
+			return -EINVAL;
+
+		podev->active_command = &qcedev_areq;
+
+		/* Initiate qce hash request */
+		sreq.qce_cb = qcedev_sha_req_cb;
+		if (qcedev_areq.sha_op_req.alg != QCEDEV_ALG_AES_CMAC) {
+			sreq.digest = &qcedev_areq.handle->sha_ctxt.digest[0];
+			sreq.first_blk = qcedev_areq.handle->sha_ctxt.first_blk;
+			sreq.last_blk = qcedev_areq.handle->sha_ctxt.last_blk;
+			sreq.auth_data[0] =
+				qcedev_areq.handle->sha_ctxt.auth_data[0];
+			sreq.auth_data[1] =
+				qcedev_areq.handle->sha_ctxt.auth_data[1];
+			sreq.auth_data[2] =
+				qcedev_areq.handle->sha_ctxt.auth_data[2];
+			sreq.auth_data[3] =
+				qcedev_areq.handle->sha_ctxt.auth_data[3];
+		}
+
+		sreq.size = qcedev_areq.sha_req.sreq.nbytes;
+		sreq.src = qcedev_areq.sha_req.sreq.src;
+		sreq.areq = (void *)&qcedev_areq.sha_req;
+		sreq.flags = 0;
+		switch (qcedev_areq.sha_op_req.alg) {
+		case QCEDEV_ALG_SHA1:
+			sreq.alg = QCE_HASH_SHA1;
+			break;
+		case QCEDEV_ALG_SHA256:
+			sreq.alg = QCE_HASH_SHA256;
+			break;
+		case QCEDEV_ALG_SHA1_HMAC:
+			sreq.alg = QCE_HASH_SHA1_HMAC;
+			sreq.authkey = &qcedev_areq.sha_op_req.authkey[0];
+			sreq.authklen = qcedev_areq.sha_op_req.authklen;
+			break;
+		case QCEDEV_ALG_SHA256_HMAC:
+			sreq.alg = QCE_HASH_SHA256_HMAC;
+			sreq.authkey =
+				&qcedev_areq.sha_op_req.authkey[0];
+			sreq.authklen =
+				qcedev_areq.sha_op_req.authklen;
+			break;
+		case QCEDEV_ALG_AES_CMAC:
+			sreq.alg = QCE_HASH_AES_CMAC;
+			sreq.authkey =
+				&qcedev_areq.sha_op_req.authkey[0];
+			sreq.authklen =
+				qcedev_areq.sha_op_req.authklen;
+			break;
+		default:
+			ret = -EINVAL;
+			goto handle_free;
+		}
+
+		/*qce call */
+		ret = qce_process_sha_req(podev->qce, &sreq);
+		if (ret == 0)
+			wait_for_completion(&qcedev_areq.complete);
+		else
+			goto handle_free;
+
+		/* Known answer test */
+		if (memcmp(&qcedev_areq.handle->sha_ctxt.digest[0],
+			fips_test_vector_sha_hmac[tv_index].digest,
+			fips_test_vector_sha_hmac[tv_index].diglen)) {
+				ret = -1;
+				goto handle_free;
+		}
+		_fips_clear_qcedev_handle(&qcedev_areq.handle->sha_ctxt);
+	}
+
+handle_free:
+	kzfree(qcedev_areq.handle);
+	return ret;
+}
+
+/*
+ * Initiate QCEDEV request for cipher (Encryption/ Decryption requests)
+ */
+static
+void _fips_initiate_qcedev_async_req_cipher(
+			struct qcedev_async_req *qcedev_areq,
+			enum qcedev_oper_enum qcedev_oper,
+			struct scatterlist *fips_sg,
+			uint8_t *k_align_src,
+			int tv_index)
+{
+	uint8_t *k_align_dst = k_align_src;
+
+	/* Setup Key */
+	memset(qcedev_areq->cipher_op_req.enckey, 0,
+		fips_test_vector_cipher[tv_index].klen);
+	memcpy(qcedev_areq->cipher_op_req.enckey,
+		fips_test_vector_cipher[tv_index].key,
+		fips_test_vector_cipher[tv_index].klen);
+	qcedev_areq->cipher_op_req.encklen =
+		fips_test_vector_cipher[tv_index].klen;
+
+	/* Setup IV */
+	memset(qcedev_areq->cipher_op_req.iv, 0,
+		fips_test_vector_cipher[tv_index].ivlen);
+	memcpy(qcedev_areq->cipher_op_req.iv,
+		fips_test_vector_cipher[tv_index].iv,
+		fips_test_vector_cipher[tv_index].ivlen);
+	qcedev_areq->cipher_op_req.ivlen =
+		fips_test_vector_cipher[tv_index].ivlen;
+
+	/* Setup other parameters */
+	qcedev_areq->cipher_op_req.byteoffset  = 0;
+	qcedev_areq->cipher_op_req.alg =
+		fips_test_vector_cipher[tv_index].enc_alg;
+	qcedev_areq->cipher_op_req.mode =
+		fips_test_vector_cipher[tv_index].mode;
+	qcedev_areq->cipher_op_req.use_pmem = 0;
+	qcedev_areq->cipher_op_req.in_place_op = 1;
+	qcedev_areq->cipher_op_req.entries = 1;
+	qcedev_areq->cipher_op_req.op = qcedev_oper;
+	qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_CIPHER;
+
+	/* Setup Input and output buffers */
+	if (qcedev_oper == QCEDEV_OPER_ENC) {
+		qcedev_areq->cipher_op_req.data_len =
+			fips_test_vector_cipher[tv_index].pln_txt_len;
+		qcedev_areq->cipher_op_req.vbuf.src[0].len =
+			fips_test_vector_cipher[tv_index].pln_txt_len;
+	} else {
+		qcedev_areq->cipher_op_req.data_len =
+			fips_test_vector_cipher[tv_index].enc_txt_len;
+		qcedev_areq->cipher_op_req.vbuf.src[0].len =
+			fips_test_vector_cipher[tv_index].enc_txt_len;
+	}
+
+	qcedev_areq->cipher_op_req.vbuf.src[0].vaddr =
+		&k_align_src[0];
+	qcedev_areq->cipher_op_req.vbuf.dst[0].vaddr =
+		&k_align_dst[0];
+	qcedev_areq->cipher_op_req.vbuf.dst[0].len =
+		fips_test_vector_cipher[tv_index].enc_txt_len;
+
+	qcedev_areq->cipher_req.creq.src = fips_sg;
+	qcedev_areq->cipher_req.creq.dst = fips_sg;
+	sg_set_buf(qcedev_areq->cipher_req.creq.src,
+		k_align_src,
+		qcedev_areq->cipher_op_req.data_len);
+	sg_mark_end(qcedev_areq->cipher_req.creq.src);
+
+	qcedev_areq->cipher_req.creq.nbytes =
+		qcedev_areq->cipher_op_req.data_len;
+	qcedev_areq->cipher_req.creq.info =
+		qcedev_areq->cipher_op_req.iv;
+	qcedev_areq->cipher_req.cookie = qcedev_areq->handle;
+}
+
+/*
+ * Initiate QCE request for cipher (Encryption/ Decryption requests)
+ */
+static int _fips_initiate_qce_req_cipher(struct qcedev_async_req *qcedev_areq,
+			struct qce_req *creq,
+			enum qce_cipher_dir_enum cipher_dir)
+{
+	creq->dir = cipher_dir;
+	creq->iv = &qcedev_areq->cipher_op_req.iv[0];
+	creq->ivsize = qcedev_areq->cipher_op_req.ivlen;
+	creq->enckey =  &qcedev_areq->cipher_op_req.enckey[0];
+	creq->encklen = qcedev_areq->cipher_op_req.encklen;
+	creq->cryptlen = qcedev_areq->cipher_op_req.data_len;
+	creq->op = QCE_REQ_ABLK_CIPHER;
+	creq->qce_cb = qcedev_cipher_req_cb;
+	creq->areq = (void *)&qcedev_areq->cipher_req;
+	creq->flags = 0;
+	switch (qcedev_areq->cipher_op_req.alg) {
+	case QCEDEV_ALG_3DES:
+		creq->alg = CIPHER_ALG_3DES;
+		break;
+	case QCEDEV_ALG_AES:
+		creq->alg = CIPHER_ALG_AES;
+		break;
+	default:
+		pr_err(" _fips_initiate_qce_req_cipher : Invalid algo");
+		return -EINVAL;
+	}
+
+	switch (qcedev_areq->cipher_op_req.mode) {
+	case QCEDEV_AES_MODE_CBC:
+	case QCEDEV_DES_MODE_CBC:
+		creq->mode = QCE_MODE_CBC;
+		break;
+	case QCEDEV_AES_MODE_ECB:
+	case QCEDEV_DES_MODE_ECB:
+		creq->mode = QCE_MODE_ECB;
+		break;
+	case QCEDEV_AES_MODE_CTR:
+		creq->mode = QCE_MODE_CTR;
+		break;
+	case QCEDEV_AES_MODE_XTS:
+		creq->mode = QCE_MODE_XTS;
+		break;
+	case QCEDEV_AES_MODE_CCM:
+		creq->mode = QCE_MODE_CCM;
+		break;
+	default:
+		pr_err(" _fips_initiate_qce_req_cipher : Invalid algo");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/*
+ * Self test for Cipher algorithms
+ */
+int _fips_qcedev_cipher_selftest(struct qcedev_control *podev)
+{
+	int ret = 0, tv_index = 0, num_tv;
+	struct qcedev_async_req qcedev_areq;
+	struct qce_req creq;
+	struct scatterlist fips_sg;
+	uint8_t *k_align_src = NULL;
+
+	/* initiate handle */
+	if (_fips_initiate_qcedev_handle(podev, &qcedev_areq))
+		return -ENOMEM;
+
+	num_tv = (sizeof(fips_test_vector_cipher)) /
+		(sizeof(struct _fips_test_vector_cipher));
+
+	/* tests one by one */
+	for (tv_index = 0; tv_index < num_tv; tv_index++) {
+
+		/* Allocate single buffer for in-place operation */
+		k_align_src = kzalloc(QCE_MAX_OPER_DATA, GFP_KERNEL);
+		if (k_align_src == NULL) {
+			pr_err("qcedev: Failed to allocate memory for k_align_src %ld\n",
+				PTR_ERR(k_align_src));
+			kzfree(qcedev_areq.handle);
+			return -ENOMEM;
+		}
+
+		/**************** Encryption Tests *****************/
+		init_completion(&qcedev_areq.complete);
+		memcpy(&k_align_src[0],
+			fips_test_vector_cipher[tv_index].pln_txt,
+			fips_test_vector_cipher[tv_index].pln_txt_len);
+
+		/* Initiate qcedev request */
+		_fips_initiate_qcedev_async_req_cipher(&qcedev_areq,
+			QCEDEV_OPER_ENC, &fips_sg,
+			k_align_src, tv_index);
+		podev->active_command = &qcedev_areq;
+
+		/* Initiate qce cipher request */
+		if (_fips_initiate_qce_req_cipher(&qcedev_areq,
+			&creq, QCE_ENCRYPT)) {
+			ret = -EINVAL;
+			kzfree(k_align_src);
+			goto free_handle;
+		}
+
+		/* qce call */
+		ret = qce_ablk_cipher_req(podev->qce, &creq);
+		if (ret == 0)
+			wait_for_completion(&qcedev_areq.complete);
+		else {
+			kzfree(k_align_src);
+			goto free_handle;
+		}
+
+		/* Known answer test for encryption */
+		if (memcmp(k_align_src,
+			fips_test_vector_cipher[tv_index].enc_txt,
+			fips_test_vector_cipher[tv_index].enc_txt_len)) {
+			ret = -1;
+			kzfree(k_align_src);
+			goto free_handle;
+		}
+
+		/**************** Decryption Tests *****************/
+		init_completion(&qcedev_areq.complete);
+		memset(&k_align_src[0], 0,
+			fips_test_vector_cipher[tv_index].pln_txt_len);
+		memcpy(&k_align_src[0],
+			fips_test_vector_cipher[tv_index].enc_txt,
+			fips_test_vector_cipher[tv_index].enc_txt_len);
+
+		/* Initiate qcedev request */
+		_fips_initiate_qcedev_async_req_cipher(&qcedev_areq,
+			QCEDEV_OPER_DEC, &fips_sg,
+			k_align_src, tv_index);
+		podev->active_command = &qcedev_areq;
+
+		/*Initiate qce cipher request */
+		if (_fips_initiate_qce_req_cipher(&qcedev_areq,
+			&creq, QCE_DECRYPT)) {
+			ret = -EINVAL;
+			kzfree(k_align_src);
+			goto free_handle;
+		}
+
+		/* qce call */
+		ret = qce_ablk_cipher_req(podev->qce, &creq);
+		if (ret == 0)
+			wait_for_completion(&qcedev_areq.complete);
+		else {
+			kzfree(k_align_src);
+			goto free_handle;
+		}
+
+		/* Known answer test for Decryption */
+		if (memcmp(k_align_src,
+			fips_test_vector_cipher[tv_index].pln_txt,
+			fips_test_vector_cipher[tv_index].pln_txt_len)) {
+			ret = -1;
+			kzfree(k_align_src);
+			goto free_handle;
+		}
+		podev->active_command = NULL;
+		kzfree(k_align_src);
+	}
+
+free_handle:
+	kzfree(qcedev_areq.handle);
+	return ret;
+}
+
+void fips_reg_drbg_callback(void *src)
+{
+	drbg_call_back = src;
+}
+EXPORT_SYMBOL(fips_reg_drbg_callback);
diff --git a/drivers/crypto/msm/qcedev_fips.h b/drivers/crypto/msm/qcedev_fips.h
new file mode 100644
index 0000000..6f8ae7c
--- /dev/null
+++ b/drivers/crypto/msm/qcedev_fips.h
@@ -0,0 +1,438 @@
+/* Test vectors : FIPS Known answer tests for QCEDEV .
+ *
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __CRYPTO_MSM_QCEDEV_FIPS_H
+#define __CRYPTO_MSM_QCEDEV_FIPS_H
+
+#include "qce.h"
+
+static uint8_t  _std_init_vector_sha1_uint8[] =   {
+	0x67, 0x45, 0x23, 0x01, 0xEF, 0xCD, 0xAB, 0x89,
+	0x98, 0xBA, 0xDC, 0xFE, 0x10, 0x32, 0x54, 0x76,
+	0xC3, 0xD2, 0xE1, 0xF0
+};
+
+/* standard initialization vector for SHA-256, source: FIPS 180-2 */
+static uint8_t _std_init_vector_sha256_uint8[] = {
+	0x6A, 0x09, 0xE6, 0x67, 0xBB, 0x67, 0xAE, 0x85,
+	0x3C, 0x6E, 0xF3, 0x72, 0xA5, 0x4F, 0xF5, 0x3A,
+	0x51, 0x0E, 0x52, 0x7F, 0x9B, 0x05, 0x68, 0x8C,
+	0x1F, 0x83, 0xD9, 0xAB, 0x5B, 0xE0, 0xCD, 0x19
+};
+
+/*
+ * For Hashing / HMAC algorithms
+ */
+struct _fips_test_vector_sha_hmac {
+	char *key;
+	unsigned char klen;
+	char *input;
+	unsigned char ilen;
+	char *digest;
+	unsigned char diglen;
+	enum qcedev_sha_alg_enum hash_alg;
+};
+
+/*
+ * For cipher algorithms
+ */
+struct _fips_test_vector_cipher {
+	char *key;
+	unsigned char klen;
+	char *iv;
+	unsigned char ivlen;
+	char *pln_txt;
+	unsigned int pln_txt_len;
+	char *enc_txt;
+	unsigned int enc_txt_len;
+	enum qcedev_cipher_alg_enum enc_alg;
+	enum qcedev_cipher_mode_enum mode;
+};
+
+
+/*
+ * Test vectors sha and hmac algorothms
+ */
+static struct _fips_test_vector_sha_hmac fips_test_vector_sha_hmac[] = {
+/* http://csrc.nist.gov/groups/ST/toolkit/documents/Examples/SHA1.pdf */
+	{/* SHA1 */
+		.hash_alg	=	QCEDEV_ALG_SHA1,
+		.input		=	"abc",
+		.ilen		=	3,
+		.digest	=	"\xa9\x99\x3e\x36\x47\x06\x81\x6a"
+					"\xba\x3e\x25\x71\x78\x50\xc2\x6c"
+					"\x9c\xd0\xd8\x9d",
+		.diglen	=	SHA1_DIGEST_SIZE,
+	},
+/* http://csrc.nist.gov/groups/ST/toolkit/documents/Examples/SHA256.pdf */
+	{/* SHA256 */
+		.hash_alg	=	QCEDEV_ALG_SHA256,
+		.input		=	"abc",
+		.ilen		=	3,
+		.digest	=	"\xba\x78\x16\xbf\x8f\x01\xcf\xea"
+					"\x41\x41\x40\xde\x5d\xae\x22\x23"
+					"\xb0\x03\x61\xa3\x96\x17\x7a\x9c"
+					"\xb4\x10\xff\x61\xf2\x00\x15\xad",
+		.diglen	=	SHA256_DIGEST_SIZE,
+	},
+/* http://csrc.nist.gov/groups/ST/toolkit/documents/Examples/HMAC_SHA1.pdf */
+	{/* HMAC-SHA1 */
+		.hash_alg	=	QCEDEV_ALG_SHA1_HMAC,
+		.key		=	"\x00\x01\x02\x03\x04\x05\x06\x07"
+					"\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F"
+					"\x10\x11\x12\x13",
+		.klen		=	20,
+		.input		=	"Sample message for keylen<blocklen",
+		.ilen		=	34,
+		.digest	=	"\x4C\x99\xFF\x0C\xB1\xB3\x1B\xD3"
+					"\x3F\x84\x31\xDB\xAF\x4D\x17\xFC"
+					"\xD3\x56\xA8\x07",
+		.diglen	=	SHA1_DIGEST_SIZE,
+	},
+/* http://csrc.nist.gov/groups/ST/toolkit/documents/Examples/HMAC_SHA256.pdf */
+	{/* HMAC-SHA256 */
+		.hash_alg	=	QCEDEV_ALG_SHA256_HMAC,
+		.key		=	"\x00\x01\x02\x03\x04\x05\x06\x07"
+					"\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F"
+					"\x10\x11\x12\x13\x14\x15\x16\x17"
+					"\x18\x19\x1A\x1B\x1C\x1D\x1E\x1F"
+					"\x20\x21\x22\x23\x24\x25\x26\x27"
+					"\x28\x29\x2A\x2B\x2C\x2D\x2E\x2F"
+					"\x30\x31\x32\x33\x34\x35\x36\x37"
+					"\x38\x39\x3A\x3B\x3C\x3D\x3E\x3F",
+		.klen		=	64,
+		.input		=	"Sample message for keylen=blocklen",
+		.ilen		=	34,
+		.digest	=	"\x8B\xB9\xA1\xDB\x98\x06\xF2\x0D"
+					"\xF7\xF7\x7B\x82\x13\x8C\x79\x14"
+					"\xD1\x74\xD5\x9E\x13\xDC\x4D\x01"
+					"\x69\xC9\x05\x7B\x13\x3E\x1D\x62",
+		.diglen	=	SHA256_DIGEST_SIZE,
+	},
+/* From NIST Special Publication 800-38B Appendix D.1 */
+	{/* AES 128-CMAC */
+		.hash_alg	=	QCEDEV_ALG_AES_CMAC,
+		.key		=	"\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+					"\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+		.klen		=	16,
+		.input		=	"\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+					"\xe9\x3d\x7e\x11\x73\x93\x17\x2a",
+		.ilen		=	16,
+		.digest	=	"\x07\x0a\x16\xb4\x6b\x4d\x41\x44"
+					"\xf7\x9b\xdd\x9d\xd0\x4a\x28\x7c",
+		.diglen	=	16,
+	},
+	/* From NIST Special Publication 800-38B Appendix D.1 */
+	{/* AES 256-CMAC */
+		.hash_alg	=	QCEDEV_ALG_AES_CMAC,
+		.key		=	"\x60\x3d\xeb\x10\x15\xca\x71\xbe"
+					"\x2b\x73\xae\xf0\x85\x7d\x77\x81"
+					"\x1f\x35\x2c\x07\x3b\x61\x08\xd7"
+					"\x2d\x98\x10\xa3\x09\x14\xdf\xf4",
+		.klen		=	32,
+		.input		=	"\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+					"\xe9\x3d\x7e\x11\x73\x93\x17\x2a",
+		.ilen		=	16,
+		.digest	=	"\x28\xa7\x02\x3f\x45\x2e\x8f\x82"
+						"\xbd\x4b\xf2\x8d\x8c\x37\xc3\x5c",
+		.diglen	=	16,
+	},
+};
+
+ /*
+ *Test vectors for cipher algorithms
+ */
+static struct _fips_test_vector_cipher fips_test_vector_cipher[] = {
+	/* From NIST Special Publication 800-38A, Appendix F.1 */
+	{/* AES-128 ECB */
+		.enc_alg	=	QCEDEV_ALG_AES,
+		.mode		=	QCEDEV_AES_MODE_ECB,
+		.key		=	"\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+					"\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+		.klen		=	16,
+		.ivlen		=	0,
+		.pln_txt	=	"\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+					"\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+					"\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+					"\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+					"\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+					"\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+					"\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+					"\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+		.pln_txt_len	=	64,
+		.enc_txt	=	"\x3a\xd7\x7b\xb4\x0d\x7a\x36\x60"
+					"\xa8\x9e\xca\xf3\x24\x66\xef\x97"
+					"\xf5\xd3\xd5\x85\x03\xb9\x69\x9d"
+					"\xe7\x85\x89\x5a\x96\xfd\xba\xaf"
+					"\x43\xb1\xcd\x7f\x59\x8e\xce\x23"
+					"\x88\x1b\x00\xe3\xed\x03\x06\x88"
+					"\x7b\x0c\x78\x5e\x27\xe8\xad\x3f"
+					"\x82\x23\x20\x71\x04\x72\x5d\xd4",
+		.enc_txt_len	=	64,
+	},
+	/* From NIST Special Publication 800-38A, Appendix F.1 */
+	{/* AES-256 ECB */
+		.enc_alg	=	QCEDEV_ALG_AES,
+		.mode		=	QCEDEV_AES_MODE_ECB,
+		.key		=	"\x60\x3d\xeb\x10\x15\xca\x71\xbe"
+					"\x2b\x73\xae\xf0\x85\x7d\x77\x81"
+					"\x1f\x35\x2c\x07\x3b\x61\x08\xd7"
+					"\x2d\x98\x10\xa3\x09\x14\xdf\xf4",
+		.klen		=	32,
+		.ivlen		=	0,
+		.pln_txt	=	"\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+					"\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+					"\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+					"\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+					"\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+					"\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+					"\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+					"\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+		.pln_txt_len	=	64,
+		.enc_txt	=	"\xf3\xee\xd1\xbd\xb5\xd2\xa0\x3c"
+					"\x06\x4b\x5a\x7e\x3d\xb1\x81\xf8"
+					"\x59\x1c\xcb\x10\xd4\x10\xed\x26"
+					"\xdc\x5b\xa7\x4a\x31\x36\x28\x70"
+					"\xb6\xed\x21\xb9\x9c\xa6\xf4\xf9"
+					"\xf1\x53\xe7\xb1\xbe\xaf\xed\x1d"
+					"\x23\x30\x4b\x7a\x39\xf9\xf3\xff"
+					"\x06\x7d\x8d\x8f\x9e\x24\xec\xc7",
+		.enc_txt_len	=	64,
+	},
+	/* From NIST Special Publication 800-38A, Appendix F.2 */
+	{/* AES-128 CBC */
+		.enc_alg	=	QCEDEV_ALG_AES,
+		.mode		=	QCEDEV_AES_MODE_CBC,
+		.key		=	"\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+					"\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+		.klen		=	16,
+		.iv		=	"\x00\x01\x02\x03\x04\x05\x06\x07"
+					"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+		.ivlen		=	16,
+		.pln_txt	=	"\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+					"\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+					"\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+					"\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+					"\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+					"\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+					"\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+					"\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+		.pln_txt_len	=	64,
+		.enc_txt	=	"\x76\x49\xab\xac\x81\x19\xb2\x46"
+					"\xce\xe9\x8e\x9b\x12\xe9\x19\x7d"
+					"\x50\x86\xcb\x9b\x50\x72\x19\xee"
+					"\x95\xdb\x11\x3a\x91\x76\x78\xb2"
+					"\x73\xbe\xd6\xb8\xe3\xc1\x74\x3b"
+					"\x71\x16\xe6\x9e\x22\x22\x95\x16"
+					"\x3f\xf1\xca\xa1\x68\x1f\xac\x09"
+					"\x12\x0e\xca\x30\x75\x86\xe1\xa7",
+		.enc_txt_len	=	64,
+	},
+	/* From NIST Special Publication 800-38A, Appendix F.2 */
+	{/* AES-256 CBC */
+		.enc_alg	=	QCEDEV_ALG_AES,
+		.mode		=	QCEDEV_AES_MODE_CBC,
+		.key		=	"\x60\x3d\xeb\x10\x15\xca\x71\xbe"
+					"\x2b\x73\xae\xf0\x85\x7d\x77\x81"
+					"\x1f\x35\x2c\x07\x3b\x61\x08\xd7"
+					"\x2d\x98\x10\xa3\x09\x14\xdf\xf4",
+		.klen		=	32,
+		.iv		=	"\x00\x01\x02\x03\x04\x05\x06\x07"
+					"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+		.ivlen		=	16,
+		.pln_txt	=	"\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+					"\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+					"\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+					"\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+					"\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+					"\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+					"\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+					"\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+		.pln_txt_len	=	64,
+		.enc_txt	=	"\xf5\x8c\x4c\x04\xd6\xe5\xf1\xba"
+					"\x77\x9e\xab\xfb\x5f\x7b\xfb\xd6"
+					"\x9c\xfc\x4e\x96\x7e\xdb\x80\x8d"
+					"\x67\x9f\x77\x7b\xc6\x70\x2c\x7d"
+					"\x39\xf2\x33\x69\xa9\xd9\xba\xcf"
+					"\xa5\x30\xe2\x63\x04\x23\x14\x61"
+					"\xb2\xeb\x05\xe2\xc3\x9b\xe9\xfc"
+					"\xda\x6c\x19\x07\x8c\x6a\x9d\x1b",
+		.enc_txt_len	=	64,
+	},
+	/* From NIST Special Publication 800-38A, Appendix F.5 */
+	{/* AES-128 CTR */
+		.enc_alg	=	QCEDEV_ALG_AES,
+		.mode		=	QCEDEV_AES_MODE_CTR,
+		.key		=	"\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+					"\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+		.klen		=	16,
+		.iv		=	"\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+					"\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
+		.ivlen		=	16,
+		.pln_txt	=	"\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+					"\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+					"\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+					"\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+					"\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+					"\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+					"\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+					"\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+		.pln_txt_len	=	64,
+		.enc_txt	=	"\x87\x4d\x61\x91\xb6\x20\xe3\x26"
+					"\x1b\xef\x68\x64\x99\x0d\xb6\xce"
+					"\x98\x06\xf6\x6b\x79\x70\xfd\xff"
+					"\x86\x17\x18\x7b\xb9\xff\xfd\xff"
+					"\x5a\xe4\xdf\x3e\xdb\xd5\xd3\x5e"
+					"\x5b\x4f\x09\x02\x0d\xb0\x3e\xab"
+					"\x1e\x03\x1d\xda\x2f\xbe\x03\xd1"
+					"\x79\x21\x70\xa0\xf3\x00\x9c\xee",
+		.enc_txt_len	=	64,
+	},
+	/* From NIST Special Publication 800-38A, Appendix F.5 */
+	{/* AES-256 CTR */
+		.enc_alg	=	QCEDEV_ALG_AES,
+		.mode		=	QCEDEV_AES_MODE_CTR,
+		.key		=	"\x60\x3d\xeb\x10\x15\xca\x71\xbe"
+					"\x2b\x73\xae\xf0\x85\x7d\x77\x81"
+					"\x1f\x35\x2c\x07\x3b\x61\x08\xd7"
+					"\x2d\x98\x10\xa3\x09\x14\xdf\xf4",
+		.klen		=	32,
+		.iv		=	"\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+					"\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
+		.ivlen		=	16,
+		.pln_txt	=	"\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+					"\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+					"\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+					"\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+					"\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+					"\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+					"\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+					"\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+		.pln_txt_len	=	64,
+		.enc_txt	=	"\x60\x1e\xc3\x13\x77\x57\x89\xa5"
+					"\xb7\xa7\xf5\x04\xbb\xf3\xd2\x28"
+					"\xf4\x43\xe3\xca\x4d\x62\xb5\x9a"
+					"\xca\x84\xe9\x90\xca\xca\xf5\xc5"
+					"\x2b\x09\x30\xda\xa2\x3d\xe9\x4c"
+					"\xe8\x70\x17\xba\x2d\x84\x98\x8d"
+					"\xdf\xc9\xc5\x8d\xb6\x7a\xad\xa6"
+					"\x13\xc2\xdd\x08\x45\x79\x41\xa6",
+		.enc_txt_len	=	64,
+	},
+	/* Derived From From NIST Special Publication 800-38A */
+	{/* AES-128 XTS requires 2 keys and thus length of key is twice. */
+		.enc_alg	=	QCEDEV_ALG_AES,
+		.mode		=	QCEDEV_AES_MODE_XTS,
+		.key		=	"\x60\x3d\xeb\x10\x15\xca\x71\xbe"
+					"\x2b\x73\xae\xf0\x85\x7d\x77\x81"
+					"\x1f\x35\x2c\x07\x3b\x61\x08\xd7"
+					"\x2d\x98\x10\xa3\x09\x14\xdf\xf4",
+		.klen		=	32,
+		.iv		=	"\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+					"\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
+		.ivlen		=	16,
+		.pln_txt	=	"\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+					"\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+					"\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+					"\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+					"\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+					"\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+					"\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+					"\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+		.pln_txt_len	=	64,
+		.enc_txt	=	"\xba\x2a\x7d\x50\x7b\x60\x63\x3e"
+					"\xf3\x1b\x06\x14\xb4\x45\xb5\xb5"
+					"\x42\x0d\x12\x57\x28\x15\x2e\x5d"
+					"\x5a\x54\xbe\x46\x5c\x9d\x1f\x2e"
+					"\x18\x8e\x79\x07\xc7\xdf\xe7\xf8"
+					"\x78\xa6\x53\x2a\x80\xb4\xd9\xce"
+					"\x1d\xbe\x75\x7e\xb6\x11\xef\x1e"
+					"\x51\x5d\xd6\x70\x03\x51\xcc\x94",
+		.enc_txt_len	=	64,
+	},
+	/* Derived From From NIST Special Publication 800-38A */
+	{/* AES-256 XTS requires 2 keys and thus length of key is twice */
+		.enc_alg	=	QCEDEV_ALG_AES,
+		.mode		=	QCEDEV_AES_MODE_XTS,
+		.key		=	"\x3a\xd7\x7b\xb4\x0d\x7a\x36\x60"
+					"\xa8\x9e\xca\xf3\x24\x66\xef\x97"
+					"\xf5\xd3\xd5\x85\x03\xb9\x69\x9d"
+					"\xe7\x85\x89\x5a\x96\xfd\xba\xaf"
+					"\x43\xb1\xcd\x7f\x59\x8e\xce\x23"
+					"\x88\x1b\x00\xe3\xed\x03\x06\x88"
+					"\x7b\x0c\x78\x5e\x27\xe8\xad\x3f"
+					"\x82\x23\x20\x71\x04\x72\x5d\xd4",
+		.klen		=	64,
+		.iv		=	"\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+					"\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
+		.ivlen		=	16,
+		.pln_txt	=	"\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+					"\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+					"\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+					"\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+					"\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+					"\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+					"\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+					"\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+		.pln_txt_len	=	64,
+		.enc_txt	=	"\xd7\x2b\x90\x02\x6f\xf0\xd2\x39"
+					"\x7b\x1a\x57\x92\xd0\x1e\xc1\xb6"
+					"\x04\x8c\x08\x8e\xa4\x1f\xa0\x0f"
+					"\x5e\xd8\xaf\xda\x6e\xd2\x4e\x5b"
+					"\x23\xde\x09\xa4\x19\x79\xda\xd4"
+					"\xe9\x4b\xbc\x05\x2e\xca\x20\x7d"
+					"\xd5\x0f\x89\x88\xa3\xda\x46\x1f"
+					"\x1e\xde\x53\x78\x90\xb2\x9a\x2c",
+		.enc_txt_len	=	64,
+	},
+	/* From NIST Special Publication 800-67, Appendix B.1 */
+	{/* 3DES ECB */
+		.enc_alg	=	QCEDEV_ALG_3DES,
+		.mode		=	QCEDEV_DES_MODE_ECB,
+		.key		=	"\x01\x23\x45\x67\x89\xAB\xCD\xEF"
+					"\x23\x45\x67\x89\xAB\xCD\xEF\x01"
+					"\x45\x67\x89\xAB\xCD\xEF\x01\x23",
+		.klen		=	24,
+		.ivlen		=	0,
+		.pln_txt	=	"\x54\x68\x65\x20\x71\x75\x66\x63"
+					"\x6B\x20\x62\x72\x6F\x77\x6E\x20"
+					"\x66\x6F\x78\x20\x6A\x75\x6D\x70",
+		.pln_txt_len	=	24,
+		.enc_txt	=	"\xA8\x26\xFD\x8C\xE5\x3B\x85\x5F"
+					"\xCC\xE2\x1C\x81\x12\x25\x6F\xE6"
+					"\x68\xD5\xC0\x5D\xD9\xB6\xB9\x00",
+		.enc_txt_len	=	24,
+	},
+	/* Derived From From NIST Special Publication 800-38A  and 800-67 */
+	{/* 3DES CBC */
+		.enc_alg	=	QCEDEV_ALG_3DES,
+		.mode		=	QCEDEV_DES_MODE_CBC,
+		.key		=	"\x01\x23\x45\x67\x89\xAB\xCD\xEF"
+					"\x23\x45\x67\x89\xAB\xCD\xEF\x01"
+					"\x45\x67\x89\xAB\xCD\xEF\x01\x23",
+		.klen		=	24,
+		.iv		=	"\x00\x01\x02\x03\x04\x05\x06\x07",
+		.ivlen		=	8,
+		.pln_txt	=	"\x54\x68\x65\x20\x71\x75\x66\x63"
+					"\x6B\x20\x62\x72\x6F\x77\x6E\x20"
+					"\x66\x6F\x78\x20\x6A\x75\x6D\x70",
+		.pln_txt_len	=	24,
+		.enc_txt	=	"\xf3\x68\xd0\x6f\x3b\xbd\x61\x4e"
+					"\x60\xf2\xd0\x24\x5c\xad\x3f\x81"
+					"\x8d\x5c\x69\xf2\xcb\x3f\xd5\xc7",
+		.enc_txt_len	=	24,
+	},
+};
+#endif	/* __CRYPTO_MSM_QCEDEV_FIPS_H */
+
diff --git a/drivers/crypto/msm/qcedevi.h b/drivers/crypto/msm/qcedevi.h
new file mode 100644
index 0000000..361050a
--- /dev/null
+++ b/drivers/crypto/msm/qcedevi.h
@@ -0,0 +1,173 @@
+/* QTI crypto Driver
+ *
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __CRYPTO_MSM_QCEDEVI_H
+#define __CRYPTO_MSM_QCEDEVI_H
+
+#include <linux/interrupt.h>
+#include <linux/miscdevice.h>
+#include <crypto/hash.h>
+#include <linux/platform_data/qcom_crypto_device.h>
+#include <linux/fips_status.h>
+#include "qce.h"
+
+#define CACHE_LINE_SIZE 32
+#define CE_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
+
+/* FIPS global status variable */
+extern enum fips_status g_fips140_status;
+
+/*FIPS140-2 call back for DRBG self test */
+extern void *drbg_call_back;
+
+enum qcedev_crypto_oper_type {
+	QCEDEV_CRYPTO_OPER_CIPHER = 0,
+	QCEDEV_CRYPTO_OPER_SHA = 1,
+	QCEDEV_CRYPTO_OPER_LAST
+};
+
+struct qcedev_handle;
+
+struct qcedev_cipher_req {
+	struct ablkcipher_request creq;
+	void *cookie;
+};
+
+struct qcedev_sha_req {
+	struct ahash_request sreq;
+	void *cookie;
+};
+
+struct	qcedev_sha_ctxt {
+	uint32_t	auth_data[4];
+	uint8_t	digest[QCEDEV_MAX_SHA_DIGEST];
+	uint32_t	diglen;
+	uint8_t	trailing_buf[64];
+	uint32_t	trailing_buf_len;
+	uint8_t	first_blk;
+	uint8_t	last_blk;
+	uint8_t	authkey[QCEDEV_MAX_SHA_BLOCK_SIZE];
+	bool		init_done;
+};
+
+struct qcedev_async_req {
+	struct list_head			list;
+	struct completion			complete;
+	enum qcedev_crypto_oper_type		op_type;
+	union {
+		struct qcedev_cipher_op_req	cipher_op_req;
+		struct qcedev_sha_op_req	sha_op_req;
+	};
+
+	union {
+		struct qcedev_cipher_req	cipher_req;
+		struct qcedev_sha_req		sha_req;
+	};
+	struct qcedev_handle			*handle;
+	int					err;
+};
+
+/**********************************************************************
+ * Register ourselves as a misc device to be able to access the dev driver
+ * from userspace. */
+
+#define QCEDEV_DEV	"qcedev"
+
+struct qcedev_control {
+
+	/* CE features supported by platform */
+	struct msm_ce_hw_support platform_support;
+
+	uint32_t ce_lock_count;
+	uint32_t high_bw_req_count;
+
+	/* CE features/algorithms supported by HW engine*/
+	struct ce_hw_support ce_support;
+
+	uint32_t  bus_scale_handle;
+
+	/* misc device */
+	struct miscdevice miscdevice;
+
+	/* qce handle */
+	void *qce;
+
+	/* platform device */
+	struct platform_device *pdev;
+
+	unsigned magic;
+
+	struct list_head ready_commands;
+	struct qcedev_async_req *active_command;
+	spinlock_t lock;
+	struct tasklet_struct done_tasklet;
+};
+
+struct qcedev_handle {
+	/* qcedev control handle */
+	struct qcedev_control *cntl;
+	/* qce internal sha context*/
+	struct qcedev_sha_ctxt sha_ctxt;
+};
+
+void qcedev_cipher_req_cb(void *cookie, unsigned char *icv,
+	unsigned char *iv, int ret);
+
+void qcedev_sha_req_cb(void *cookie, unsigned char *digest,
+	unsigned char *authdata, int ret);
+
+extern int _do_msm_fips_drbg_init(void *rng_dev);
+
+#ifdef CONFIG_FIPS_ENABLE
+
+/*
+ * Self test for Cipher algorithms
+ */
+int _fips_qcedev_cipher_selftest(struct qcedev_control *podev);
+
+/*
+ * Self test for SHA / HMAC
+ */
+
+int _fips_qcedev_sha_selftest(struct qcedev_control *podev);
+
+/*
+ * Update FIPs Global status Status
+ */
+static inline enum fips_status _fips_update_status(enum fips_status status)
+{
+	return (status == FIPS140_STATUS_PASS) ?
+		FIPS140_STATUS_QCRYPTO_ALLOWED :
+		FIPS140_STATUS_FAIL;
+}
+
+#else
+
+static inline int _fips_qcedev_cipher_selftest(struct qcedev_control *podev)
+{
+	return 0;
+}
+static inline int _fips_qcedev_sha_selftest(struct qcedev_control *podev)
+{
+	return 0;
+}
+
+static inline enum fips_status _fips_update_status(enum fips_status status)
+{
+	return FIPS140_STATUS_NA;
+}
+
+#endif  /* CONFIG_FIPS_ENABLE */
+
+#endif  /* __CRYPTO_MSM_QCEDEVI_H */
diff --git a/drivers/crypto/msm/qcrypto.c b/drivers/crypto/msm/qcrypto.c
index c247189..b8975aa 100644
--- a/drivers/crypto/msm/qcrypto.c
+++ b/drivers/crypto/msm/qcrypto.c
@@ -45,9 +45,10 @@
 #include <linux/platform_data/qcom_crypto_device.h>
 #include <mach/msm_bus.h>
 #include <mach/qcrypto.h>
+#include <linux/fips_status.h>
+#include "qcryptoi.h"
 #include "qce.h"
 
-
 #define DEBUG_MAX_FNAME  16
 #define DEBUG_MAX_RW_BUF 2048
 
@@ -58,6 +59,9 @@
 
 #define QCRYPTO_HIGH_BANDWIDTH_TIMEOUT 1000
 
+/* are FIPS self tests done ?? */
+static bool is_fips_qcrypto_tests_done;
+
 enum qcrypto_bus_state {
 	BUS_NO_BANDWIDTH = 0,
 	BUS_HAS_BANDWIDTH,
@@ -683,6 +687,12 @@
 	struct qcrypto_alg *q_alg;
 	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
 
+	/* IF FIPS tests not passed, return error */
+	if (((g_fips140_status == FIPS140_STATUS_FAIL) ||
+		(g_fips140_status == FIPS140_STATUS_PASS_CRYPTO)) &&
+		is_fips_qcrypto_tests_done)
+		return -ENXIO;
+
 	q_alg = container_of(alg, struct qcrypto_alg, cipher_alg);
 	ctx->flags = 0;
 
@@ -710,6 +720,12 @@
 	struct qcrypto_alg *q_alg = container_of(alg, struct qcrypto_alg,
 								sha_alg);
 
+	/* IF FIPS tests not passed, return error */
+	if (((g_fips140_status == FIPS140_STATUS_FAIL) ||
+		(g_fips140_status == FIPS140_STATUS_PASS_CRYPTO)) &&
+		is_fips_qcrypto_tests_done)
+		return -ENXIO;
+
 	crypto_ahash_set_reqsize(ahash, sizeof(struct qcrypto_sha_req_ctx));
 	/* update context with ptr to cp */
 	sha_ctx->cp = q_alg->cp;
@@ -959,7 +975,7 @@
 		if (q_alg->alg_type == QCRYPTO_ALG_SHA)
 			crypto_unregister_ahash(&q_alg->sha_alg);
 		list_del(&q_alg->entry);
-		kfree(q_alg);
+		kzfree(q_alg);
 	}
 }
 
@@ -978,7 +994,7 @@
 	mutex_unlock(&cp->engine_lock);
 	if (pengine->qce)
 		qce_close(pengine->qce);
-	kfree(pengine);
+	kzfree(pengine);
 	return 0;
 }
 
@@ -1282,7 +1298,7 @@
 		if (bytes != areq->nbytes)
 			pr_warn("bytes copied=0x%x bytes to copy= 0x%x", bytes,
 								areq->nbytes);
-		kfree(rctx->data);
+		kzfree(rctx->data);
 	}
 
 	if (cp->platform_support.ce_shared)
@@ -1329,7 +1345,7 @@
 			if (bytes != nbytes)
 				pr_warn("bytes copied=0x%x bytes to copy= 0x%x",
 						bytes, nbytes);
-			kfree(rctx->data);
+			kzfree(rctx->data);
 		}
 		kzfree(rctx->assoc);
 		areq->assoc = rctx->assoc_sg;
@@ -1363,7 +1379,7 @@
 			if (bytes != nbytes)
 				pr_warn("bytes copied=0x%x bytes to copy= 0x%x",
 						bytes, nbytes);
-			kfree(rctx->data);
+			kzfree(rctx->data);
 		}
 
 		if (ret == 0) {
@@ -3163,7 +3179,7 @@
 						rctx->trailing_buf_len);
 			memcpy((rctx->data2 + rctx->trailing_buf_len),
 					rctx->data, req->src->length);
-			kfree(rctx->data);
+			kzfree(rctx->data);
 			rctx->data = rctx->data2;
 			sg_set_buf(&rctx->sg[0], rctx->data,
 					(rctx->trailing_buf_len +
@@ -3351,7 +3367,7 @@
 		INIT_COMPLETION(sha_ctx->ahash_req_complete);
 	}
 
-	kfree(in_buf);
+	kzfree(in_buf);
 	ahash_request_free(ahash_req);
 
 	return ret;
@@ -3628,6 +3644,23 @@
 	return 0;
 }
 
+/*
+ * Fill up fips_selftest_data structure
+ */
+
+static void _qcrypto_fips_selftest_d(struct fips_selftest_data *selftest_d,
+					struct ce_hw_support *ce_support,
+					char *prefix)
+{
+	strlcpy(selftest_d->algo_prefix, prefix, CRYPTO_MAX_ALG_NAME);
+	selftest_d->prefix_ahash_algo = ce_support->use_sw_ahash_algo;
+	selftest_d->prefix_hmac_algo = ce_support->use_sw_hmac_algo;
+	selftest_d->prefix_aes_xts_algo = ce_support->use_sw_aes_xts_algo;
+	selftest_d->prefix_aes_cbc_ecb_ctr_algo =
+		ce_support->use_sw_aes_cbc_ecb_ctr_algo;
+	selftest_d->prefix_aead_algo = ce_support->use_sw_aead_algo;
+	selftest_d->ce_device = ce_support->ce_device;
+}
 
 int qcrypto_cipher_set_device(struct ablkcipher_request *req, unsigned int dev)
 {
@@ -4235,6 +4268,10 @@
 	struct crypto_engine *pengine;
 	unsigned long flags;
 
+	/* For FIPS140-2 Power on self tests */
+	struct fips_selftest_data selftest_d;
+	char prefix[10] = "";
+
 	pengine = kzalloc(sizeof(*pengine), GFP_KERNEL);
 	if (!pengine) {
 		pr_err("qcrypto Memory allocation of q_alg FAIL, error %ld\n",
@@ -4245,7 +4282,7 @@
 	/* open qce */
 	handle = qce_open(pdev, &rc);
 	if (handle == NULL) {
-		kfree(pengine);
+		kzfree(pengine);
 		platform_set_drvdata(pdev, NULL);
 		return rc;
 	}
@@ -4328,7 +4365,7 @@
 
 	if (cp->total_units != 1) {
 		mutex_unlock(&cp->engine_lock);
-		return 0;
+		goto fips_selftest;
 	}
 
 	/* register crypto cipher algorithms the device supports */
@@ -4357,7 +4394,7 @@
 		if (rc) {
 			dev_err(&pdev->dev, "%s alg registration failed\n",
 					q_alg->cipher_alg.cra_driver_name);
-			kfree(q_alg);
+			kzfree(q_alg);
 		} else {
 			list_add_tail(&q_alg->entry, &cp->alg_list);
 			dev_info(&pdev->dev, "%s\n",
@@ -4391,7 +4428,7 @@
 		if (rc) {
 			dev_err(&pdev->dev, "%s alg registration failed\n",
 					q_alg->cipher_alg.cra_driver_name);
-			kfree(q_alg);
+			kzfree(q_alg);
 		} else {
 			list_add_tail(&q_alg->entry, &cp->alg_list);
 			dev_info(&pdev->dev, "%s\n",
@@ -4428,7 +4465,7 @@
 		if (rc) {
 			dev_err(&pdev->dev, "%s alg registration failed\n",
 				q_alg->sha_alg.halg.base.cra_driver_name);
-			kfree(q_alg);
+			kzfree(q_alg);
 		} else {
 			list_add_tail(&q_alg->entry, &cp->alg_list);
 			dev_info(&pdev->dev, "%s\n",
@@ -4505,7 +4542,7 @@
 				dev_err(&pdev->dev,
 				"%s alg registration failed\n",
 				q_alg->sha_alg.halg.base.cra_driver_name);
-				kfree(q_alg);
+				kzfree(q_alg);
 			} else {
 				list_add_tail(&q_alg->entry, &cp->alg_list);
 				dev_info(&pdev->dev, "%s\n",
@@ -4541,7 +4578,7 @@
 		if (rc) {
 			dev_err(&pdev->dev, "%s alg registration failed\n",
 					q_alg->cipher_alg.cra_driver_name);
-			kfree(q_alg);
+			kzfree(q_alg);
 		} else {
 			list_add_tail(&q_alg->entry, &cp->alg_list);
 			dev_info(&pdev->dev, "%s\n",
@@ -4580,13 +4617,42 @@
 	}
 
 	mutex_unlock(&cp->engine_lock);
+
+fips_selftest:
+	/*
+	* FIPS140-2 Known Answer Tests :
+	* IN case of any failure, do not Init the module
+	*/
+	is_fips_qcrypto_tests_done = false;
+
+	if (g_fips140_status != FIPS140_STATUS_NA) {
+
+		_qcrypto_prefix_alg_cra_name(prefix, 0);
+		_qcrypto_fips_selftest_d(&selftest_d, &cp->ce_support, prefix);
+		if (_fips_qcrypto_sha_selftest(&selftest_d) ||
+			_fips_qcrypto_cipher_selftest(&selftest_d) ||
+			_fips_qcrypto_aead_selftest(&selftest_d)) {
+			pr_err("qcrypto: FIPS140-2 Known Answer Tests : Failed\n");
+			panic("SYSTEM CAN NOT BOOT!!!");
+			rc = -1;
+			goto err;
+		} else
+			pr_info("qcrypto: FIPS140-2 Known Answer Tests: Successful\n");
+		if (g_fips140_status != FIPS140_STATUS_PASS)
+			g_fips140_status = FIPS140_STATUS_PASS_CRYPTO;
+
+	} else
+		pr_info("qcrypto: FIPS140-2 Known Answer Tests: Skipped\n");
+
+	is_fips_qcrypto_tests_done = true;
+
 	return 0;
 err:
 	_qcrypto_remove_engine(pengine);
 	mutex_unlock(&cp->engine_lock);
 	if (pengine->qce)
 		qce_close(pengine->qce);
-	kfree(pengine);
+	kzfree(pengine);
 	return rc;
 };
 
diff --git a/drivers/crypto/msm/qcrypto_fips.c b/drivers/crypto/msm/qcrypto_fips.c
new file mode 100644
index 0000000..a53690f
--- /dev/null
+++ b/drivers/crypto/msm/qcrypto_fips.c
@@ -0,0 +1,511 @@
+/* Qcrypto: FIPS 140-2 Selftests
+ *
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/miscdevice.h>
+#include <crypto/hash.h>
+#include <crypto/ctr.h>
+#include <crypto/des.h>
+#include <crypto/aes.h>
+#include <crypto/sha.h>
+#include <crypto/hash.h>
+#include <crypto/algapi.h>
+#include <crypto/aead.h>
+#include <crypto/authenc.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/hash.h>
+#include <linux/platform_data/qcom_crypto_device.h>
+#include <mach/qcrypto.h>
+#include "qcryptoi.h"
+#include "qcrypto_fips.h"
+
+/*
+ * Callback function
+ */
+static void _fips_cb(struct crypto_async_request *crypto_async_req, int err)
+{
+	struct _fips_completion *fips_completion = crypto_async_req->data;
+	if (err == -EINPROGRESS)
+		return;
+
+	fips_completion->err = err;
+	complete(&fips_completion->completion);
+}
+
+/*
+ * Function to prefix if needed
+ */
+static int _fips_get_alg_cra_name(char cra_name[],
+				char *prefix, unsigned int size)
+{
+	char new_cra_name[CRYPTO_MAX_ALG_NAME];
+	strlcpy(new_cra_name, prefix, CRYPTO_MAX_ALG_NAME);
+	if (CRYPTO_MAX_ALG_NAME < size + strlen(prefix))
+		return -EINVAL;
+
+	strlcat(new_cra_name, cra_name, CRYPTO_MAX_ALG_NAME);
+	strlcpy(cra_name, new_cra_name, CRYPTO_MAX_ALG_NAME);
+	return 0;
+}
+
+/*
+ * Sha/HMAC self tests
+ */
+int _fips_qcrypto_sha_selftest(struct fips_selftest_data *selftest_d)
+{
+	int rc = 0, err, tv_index = 0, num_tv;
+	char *k_out_buf = NULL;
+	struct scatterlist fips_sg;
+	struct crypto_ahash *tfm;
+	struct ahash_request *ahash_req;
+	struct _fips_completion fips_completion;
+	struct _fips_test_vector_sha_hmac tv_sha_hmac;
+
+	num_tv = (sizeof(fips_test_vector_sha_hmac)) /
+	(sizeof(struct _fips_test_vector_sha_hmac));
+
+	/* One-by-one testing */
+	for (tv_index = 0; tv_index < num_tv; tv_index++) {
+		memcpy(&tv_sha_hmac, &fips_test_vector_sha_hmac[tv_index],
+			(sizeof(struct _fips_test_vector_sha_hmac)));
+		k_out_buf = kzalloc(tv_sha_hmac.diglen, GFP_KERNEL);
+		if (k_out_buf == NULL) {
+			pr_err("qcrypto: Failed to allocate memory for k_out_buf %ld\n",
+				PTR_ERR(k_out_buf));
+			return -ENOMEM;
+		}
+
+		memset(k_out_buf, 0, tv_sha_hmac.diglen);
+		init_completion(&fips_completion.completion);
+
+		/* use_sw flags are set in dtsi file which makes
+		default Linux API calls to go to s/w crypto instead
+		of h/w crypto. This code makes sure that all selftests
+		calls always go to h/w, independent of DTSI flags. */
+		if (tv_sha_hmac.klen == 0) {
+			if (selftest_d->prefix_ahash_algo)
+				if (_fips_get_alg_cra_name(tv_sha_hmac
+					.hash_alg, selftest_d->algo_prefix,
+					strlen(tv_sha_hmac.hash_alg))) {
+					rc = -1;
+					pr_err("Algo Name is too long for tv %d\n",
+					tv_index);
+					goto clr_buf;
+				}
+		} else {
+			if (selftest_d->prefix_hmac_algo)
+				if (_fips_get_alg_cra_name(tv_sha_hmac
+					.hash_alg, selftest_d->algo_prefix,
+					strlen(tv_sha_hmac.hash_alg))) {
+					rc = -1;
+					pr_err("Algo Name is too long for tv %d\n",
+					tv_index);
+					goto clr_buf;
+				}
+		}
+
+		tfm = crypto_alloc_ahash(tv_sha_hmac.hash_alg, 0, 0);
+		if (IS_ERR(tfm)) {
+			pr_err("qcrypto: %s algorithm not found\n",
+			tv_sha_hmac.hash_alg);
+			rc = PTR_ERR(tfm);
+			goto clr_buf;
+		}
+
+		ahash_req = ahash_request_alloc(tfm, GFP_KERNEL);
+		if (!ahash_req) {
+			pr_err("qcrypto: ahash_request_alloc failed\n");
+			rc = -ENOMEM;
+			goto clr_tfm;
+		}
+		rc = qcrypto_ahash_set_device(ahash_req, selftest_d->ce_device);
+		if (rc != 0) {
+			pr_err("%s qcrypto_cipher_set_device failed with err %d\n",
+				__func__, rc);
+			goto clr_ahash_req;
+		}
+		ahash_request_set_callback(ahash_req,
+			CRYPTO_TFM_REQ_MAY_BACKLOG,
+			_fips_cb, &fips_completion);
+
+		sg_init_one(&fips_sg, &tv_sha_hmac.input[0], tv_sha_hmac.ilen);
+
+		crypto_ahash_clear_flags(tfm, ~0);
+		if (tv_sha_hmac.klen != 0) {
+			rc = crypto_ahash_setkey(tfm, tv_sha_hmac.key,
+				tv_sha_hmac.klen);
+			if (rc) {
+				pr_err("qcrypto: crypto_ahash_setkey failed\n");
+				goto clr_ahash_req;
+			}
+		}
+
+		ahash_request_set_crypt(ahash_req, &fips_sg, k_out_buf,
+			tv_sha_hmac.ilen);
+		rc = crypto_ahash_digest(ahash_req);
+		if (rc == -EINPROGRESS || rc == -EBUSY) {
+			rc = wait_for_completion_interruptible(
+				&fips_completion.completion);
+			err = fips_completion.err;
+			if (!rc && !err) {
+				INIT_COMPLETION(fips_completion.completion);
+			} else {
+				pr_err("qcrypto:SHA: wait_for_completion failed\n");
+				goto clr_ahash_req;
+			}
+
+		}
+
+		if (memcmp(k_out_buf, tv_sha_hmac.digest,
+			tv_sha_hmac.diglen))
+			rc = -1;
+
+clr_ahash_req:
+		ahash_request_free(ahash_req);
+clr_tfm:
+		crypto_free_ahash(tfm);
+clr_buf:
+		kzfree(k_out_buf);
+
+	/* For any failure, return error */
+		if (rc)
+			return rc;
+
+	}
+	return rc;
+}
+
+/*
+* Cipher algorithm self tests
+*/
+int _fips_qcrypto_cipher_selftest(struct fips_selftest_data *selftest_d)
+{
+	int rc = 0, err, tv_index, num_tv;
+	struct crypto_ablkcipher *tfm;
+	struct ablkcipher_request *ablkcipher_req;
+	struct _fips_completion fips_completion;
+	char *k_align_src = NULL;
+	struct scatterlist fips_sg;
+	struct _fips_test_vector_cipher tv_cipher;
+
+	num_tv = (sizeof(fips_test_vector_cipher)) /
+		(sizeof(struct _fips_test_vector_cipher));
+
+	/* One-by-one testing */
+	for (tv_index = 0; tv_index < num_tv; tv_index++) {
+
+		memcpy(&tv_cipher, &fips_test_vector_cipher[tv_index],
+			(sizeof(struct _fips_test_vector_cipher)));
+
+		/* Single buffer allocation for in place operation */
+		k_align_src = kzalloc(tv_cipher.pln_txt_len, GFP_KERNEL);
+		if (k_align_src == NULL) {
+			pr_err("qcrypto:, Failed to allocate memory for k_align_src %ld\n",
+			PTR_ERR(k_align_src));
+			return -ENOMEM;
+		}
+
+		memcpy(&k_align_src[0], tv_cipher.pln_txt,
+			tv_cipher.pln_txt_len);
+
+		/* use_sw flags are set in dtsi file which makes
+		default Linux API calls to go to s/w crypto instead
+		of h/w crypto. This code makes sure that all selftests
+		calls always go to h/w, independent of DTSI flags. */
+		if (!strcmp(tv_cipher.mod_alg, "xts(aes)")) {
+			if (selftest_d->prefix_aes_xts_algo)
+				if (_fips_get_alg_cra_name(
+					tv_cipher.mod_alg,
+					selftest_d->algo_prefix,
+					strlen(tv_cipher.mod_alg))) {
+					rc = -1;
+					pr_err("Algo Name is too long for tv %d\n",
+					tv_index);
+					goto clr_buf;
+				}
+		} else {
+			if (selftest_d->prefix_aes_cbc_ecb_ctr_algo)
+				if (_fips_get_alg_cra_name(
+					tv_cipher.mod_alg,
+					selftest_d->algo_prefix,
+					strlen(tv_cipher.mod_alg))) {
+					rc = -1;
+					pr_err("Algo Name is too long for tv %d\n",
+					tv_index);
+					goto clr_buf;
+				}
+		}
+
+		tfm = crypto_alloc_ablkcipher(tv_cipher.mod_alg, 0, 0);
+		if (IS_ERR(tfm)) {
+			pr_err("qcrypto: %s algorithm not found\n",
+			tv_cipher.mod_alg);
+			rc = -ENOMEM;
+			goto clr_buf;
+		}
+
+		ablkcipher_req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
+		if (!ablkcipher_req) {
+			pr_err("qcrypto: ablkcipher_request_alloc failed\n");
+			rc = -ENOMEM;
+			goto clr_tfm;
+		}
+		rc = qcrypto_cipher_set_device(ablkcipher_req,
+			selftest_d->ce_device);
+		if (rc != 0) {
+			pr_err("%s qcrypto_cipher_set_device failed with err %d\n",
+				__func__, rc);
+			goto clr_ablkcipher_req;
+		}
+		ablkcipher_request_set_callback(ablkcipher_req,
+			CRYPTO_TFM_REQ_MAY_BACKLOG,
+			_fips_cb, &fips_completion);
+
+		crypto_ablkcipher_clear_flags(tfm, ~0);
+		rc = crypto_ablkcipher_setkey(tfm, tv_cipher.key,
+			tv_cipher.klen);
+		if (rc) {
+			pr_err("qcrypto: crypto_ablkcipher_setkey failed\n");
+			goto clr_ablkcipher_req;
+		}
+		sg_set_buf(&fips_sg, k_align_src, tv_cipher.enc_txt_len);
+		sg_mark_end(&fips_sg);
+		ablkcipher_request_set_crypt(ablkcipher_req,
+			&fips_sg, &fips_sg, tv_cipher.pln_txt_len,
+			tv_cipher.iv);
+
+		/**** Encryption Test ****/
+		init_completion(&fips_completion.completion);
+		rc = crypto_ablkcipher_encrypt(ablkcipher_req);
+		if (rc == -EINPROGRESS || rc == -EBUSY) {
+			rc = wait_for_completion_interruptible(
+				&fips_completion.completion);
+			err = fips_completion.err;
+			if (!rc && !err) {
+				INIT_COMPLETION(fips_completion.completion);
+			} else {
+				pr_err("qcrypto:cipher:ENC, wait_for_completion failed\n");
+				goto clr_ablkcipher_req;
+			}
+
+		}
+
+		if (memcmp(k_align_src, tv_cipher.enc_txt,
+			tv_cipher.enc_txt_len)) {
+			rc = -1;
+			goto clr_ablkcipher_req;
+		}
+
+		/**** Decryption test ****/
+		init_completion(&fips_completion.completion);
+		rc = crypto_ablkcipher_decrypt(ablkcipher_req);
+		if (rc == -EINPROGRESS || rc == -EBUSY) {
+			rc = wait_for_completion_interruptible(
+				&fips_completion.completion);
+			err = fips_completion.err;
+			if (!rc && !err) {
+				INIT_COMPLETION(fips_completion.completion);
+			} else {
+				pr_err("qcrypto:cipher:DEC, wait_for_completion failed\n");
+				goto clr_ablkcipher_req;
+			}
+
+		}
+
+		if (memcmp(k_align_src, tv_cipher.pln_txt,
+			tv_cipher.pln_txt_len))
+			rc = -1;
+
+clr_ablkcipher_req:
+		ablkcipher_request_free(ablkcipher_req);
+clr_tfm:
+		crypto_free_ablkcipher(tfm);
+clr_buf:
+		kzfree(k_align_src);
+
+		if (rc)
+			return rc;
+
+	}
+	return rc;
+}
+
+/*
+ * AEAD algorithm self tests
+ */
+int _fips_qcrypto_aead_selftest(struct fips_selftest_data *selftest_d)
+{
+	int rc = 0, err, tv_index, num_tv, authsize, buf_length;
+	struct crypto_aead *tfm;
+	struct aead_request *aead_req;
+	struct _fips_completion fips_completion;
+	struct scatterlist fips_sg, fips_assoc_sg;
+	char *k_align_src = NULL;
+	struct _fips_test_vector_aead tv_aead;
+
+	num_tv = (sizeof(fips_test_vector_aead)) /
+		(sizeof(struct _fips_test_vector_aead));
+
+	/* One-by-one testing */
+	for (tv_index = 0; tv_index < num_tv; tv_index++) {
+
+		memcpy(&tv_aead, &fips_test_vector_aead[tv_index],
+			(sizeof(struct _fips_test_vector_aead)));
+
+		if (tv_aead.pln_txt_len > tv_aead.enc_txt_len)
+			buf_length = tv_aead.pln_txt_len;
+		else
+			buf_length = tv_aead.enc_txt_len;
+
+		/* Single buffer allocation for in place operation */
+		k_align_src = kzalloc(buf_length, GFP_KERNEL);
+		if (k_align_src == NULL) {
+			pr_err("qcrypto:, Failed to allocate memory for k_align_src %ld\n",
+				PTR_ERR(k_align_src));
+			return -ENOMEM;
+		}
+		memcpy(&k_align_src[0], tv_aead.pln_txt,
+			tv_aead.pln_txt_len);
+
+		/* use_sw flags are set in dtsi file which makes
+		default Linux API calls to go to s/w crypto instead
+		of h/w crypto. This code makes sure that all selftests
+		calls always go to h/w, independent of DTSI flags. */
+		if (selftest_d->prefix_aead_algo) {
+			if (_fips_get_alg_cra_name(tv_aead.mod_alg,
+				selftest_d->algo_prefix,
+				strlen(tv_aead.mod_alg))) {
+				rc = -1;
+				pr_err("Algo Name is too long for tv %d\n",
+					tv_index);
+				goto clr_buf;
+			}
+		}
+		tfm = crypto_alloc_aead(tv_aead.mod_alg, 0, 0);
+		if (IS_ERR(tfm)) {
+			pr_err("qcrypto: %s algorithm not found\n",
+				tv_aead.mod_alg);
+			rc = -ENOMEM;
+			goto clr_buf;
+		}
+		aead_req = aead_request_alloc(tfm, GFP_KERNEL);
+		if (!aead_req) {
+			pr_err("qcrypto:aead_request_alloc failed\n");
+			rc = -ENOMEM;
+			goto clr_tfm;
+		}
+		rc = qcrypto_aead_set_device(aead_req, selftest_d->ce_device);
+		if (rc != 0) {
+			pr_err("%s qcrypto_cipher_set_device failed with err %d\n",
+				__func__, rc);
+			goto clr_aead_req;
+		}
+		init_completion(&fips_completion.completion);
+		aead_request_set_callback(aead_req,
+			CRYPTO_TFM_REQ_MAY_BACKLOG,
+			_fips_cb, &fips_completion);
+		crypto_aead_clear_flags(tfm, ~0);
+		rc = crypto_aead_setkey(tfm, tv_aead.key, tv_aead.klen);
+		if (rc) {
+			pr_err("qcrypto:crypto_aead_setkey failed\n");
+			goto clr_aead_req;
+		}
+		authsize = abs(tv_aead.enc_txt_len - tv_aead.pln_txt_len);
+		rc = crypto_aead_setauthsize(tfm, authsize);
+		if (rc) {
+			pr_err("qcrypto:crypto_aead_setauthsize failed\n");
+			goto clr_aead_req;
+		}
+		sg_init_one(&fips_sg, k_align_src,
+		tv_aead.pln_txt_len + authsize);
+		aead_request_set_crypt(aead_req, &fips_sg, &fips_sg,
+			tv_aead.pln_txt_len , tv_aead.iv);
+		sg_init_one(&fips_assoc_sg, tv_aead.assoc, tv_aead.alen);
+		aead_request_set_assoc(aead_req, &fips_assoc_sg, tv_aead.alen);
+		/**** Encryption test ****/
+		rc = crypto_aead_encrypt(aead_req);
+		if (rc == -EINPROGRESS || rc == -EBUSY) {
+			rc = wait_for_completion_interruptible(
+				&fips_completion.completion);
+			err = fips_completion.err;
+			if (!rc && !err) {
+				INIT_COMPLETION(fips_completion.completion);
+			} else {
+				pr_err("qcrypto:aead:ENC, wait_for_completion failed\n");
+				goto clr_aead_req;
+			}
+
+		}
+		if (memcmp(k_align_src, tv_aead.enc_txt, tv_aead.enc_txt_len)) {
+			rc = -1;
+			goto clr_aead_req;
+		}
+
+		/** Decryption test **/
+		init_completion(&fips_completion.completion);
+		aead_request_set_callback(aead_req,
+			CRYPTO_TFM_REQ_MAY_BACKLOG,
+			_fips_cb, &fips_completion);
+		crypto_aead_clear_flags(tfm, ~0);
+		rc = crypto_aead_setkey(tfm, tv_aead.key, tv_aead.klen);
+		if (rc) {
+			pr_err("qcrypto:aead:DEC, crypto_aead_setkey failed\n");
+			goto clr_aead_req;
+		}
+
+		authsize = abs(tv_aead.enc_txt_len - tv_aead.pln_txt_len);
+		rc = crypto_aead_setauthsize(tfm, authsize);
+		if (rc) {
+			pr_err("qcrypto:aead:DEC, crypto_aead_setauthsize failed\n");
+			goto clr_aead_req;
+		}
+
+		sg_init_one(&fips_sg, k_align_src,
+			tv_aead.enc_txt_len + authsize);
+		aead_request_set_crypt(aead_req, &fips_sg, &fips_sg,
+			tv_aead.enc_txt_len, tv_aead.iv);
+		sg_init_one(&fips_assoc_sg, tv_aead.assoc, tv_aead.alen);
+		aead_request_set_assoc(aead_req, &fips_assoc_sg,
+			tv_aead.alen);
+		rc = crypto_aead_decrypt(aead_req);
+		if (rc == -EINPROGRESS || rc == -EBUSY) {
+			rc = wait_for_completion_interruptible(
+				&fips_completion.completion);
+			err = fips_completion.err;
+			if (!rc && !err) {
+				INIT_COMPLETION(fips_completion.completion);
+			} else {
+				pr_err("qcrypto:aead:DEC, wait_for_completion failed\n");
+				goto clr_aead_req;
+			}
+
+		}
+
+		if (memcmp(k_align_src, tv_aead.pln_txt, tv_aead.pln_txt_len)) {
+			rc = -1;
+			goto clr_aead_req;
+		}
+clr_aead_req:
+		aead_request_free(aead_req);
+clr_tfm:
+		crypto_free_aead(tfm);
+clr_buf:
+		kzfree(k_align_src);
+	/* In case of any failure, return error */
+		if (rc)
+			return rc;
+	}
+	return rc;
+}
+
diff --git a/drivers/crypto/msm/qcrypto_fips.h b/drivers/crypto/msm/qcrypto_fips.h
new file mode 100644
index 0000000..9624adc
--- /dev/null
+++ b/drivers/crypto/msm/qcrypto_fips.h
@@ -0,0 +1,446 @@
+/* FIPS Known answer tests for Qcrypto.
+ *
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __CRYPTO_MSM_QCRYPTO_FIPS_H
+#define __CRYPTO_MSM_QCRYPTO_FIPS_H
+
+struct _fips_completion {
+	struct completion completion;
+	int err;
+};
+
+/*
+ *   For cipher algorithms.
+ */
+struct _fips_test_vector_cipher {
+	char *key;
+	unsigned char klen;
+	char iv[16];
+	unsigned char ivlen;
+	char *pln_txt;
+	unsigned int pln_txt_len;
+	char *enc_txt;
+	unsigned int enc_txt_len;
+	char mod_alg[CRYPTO_MAX_ALG_NAME];
+};
+
+/*
+ * For Hashing / HMAC algorithms.
+ */
+struct _fips_test_vector_sha_hmac {
+	char *key;
+	unsigned char klen;
+	char *input;
+	unsigned char ilen;
+	char *digest;
+	unsigned char diglen;
+	char hash_alg[CRYPTO_MAX_ALG_NAME];
+};
+
+/*
+ *For AEAD algorithms
+ */
+struct _fips_test_vector_aead {
+	char *key;
+	unsigned char klen;
+	char iv[16];
+	unsigned char ivlen;
+	char assoc[32];
+	unsigned char alen;
+	char *pln_txt;
+	unsigned int pln_txt_len;
+	char *enc_txt;
+	unsigned int enc_txt_len;
+	char mod_alg[CRYPTO_MAX_ALG_NAME];
+};
+
+/*
+ *Test vectors for sha/hmac tests
+ */
+static struct _fips_test_vector_sha_hmac fips_test_vector_sha_hmac[] = {
+/*http://csrc.nist.gov/groups/ST/toolkit/documents/Examples/SHA1.pdf*/
+	{ /* SHA1 */
+		.hash_alg	=	"sha1",
+		.klen		=	0,
+		.input		=	"abc",
+		.ilen		=	3,
+		.digest	=	"\xa9\x99\x3e\x36\x47\x06\x81\x6a"
+					"\xba\x3e\x25\x71\x78\x50\xc2\x6c"
+					"\x9c\xd0\xd8\x9d",
+		.diglen	=	SHA1_DIGEST_SIZE,
+	},
+/* http://csrc.nist.gov/groups/ST/toolkit/documents/Examples/SHA256.pdf */
+	{/* SHA256 */
+		.hash_alg	=	"sha256",
+		.klen		=	0,
+		.input		=	"abc",
+		.ilen		=	3,
+		.digest	=	"\xba\x78\x16\xbf\x8f\x01\xcf\xea"
+					"\x41\x41\x40\xde\x5d\xae\x22\x23"
+					"\xb0\x03\x61\xa3\x96\x17\x7a\x9c"
+					"\xb4\x10\xff\x61\xf2\x00\x15\xad",
+		.diglen	=	SHA256_DIGEST_SIZE,
+	},
+/* http://csrc.nist.gov/groups/ST/toolkit/documents/Examples/HMAC_SHA1.pdf */
+	{/* HMAC-SHA1 */
+		.hash_alg	=	"hmac(sha1)",
+		.key		=	"\x00\x01\x02\x03\x04\x05\x06\x07"
+					"\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F"
+					"\x10\x11\x12\x13",
+		.klen		=	20,
+		.input		=	"Sample message for keylen<blocklen",
+		.ilen		=	34,
+		.digest	=	"\x4C\x99\xFF\x0C\xB1\xB3\x1B\xD3"
+					"\x3F\x84\x31\xDB\xAF\x4D\x17\xFC"
+					"\xD3\x56\xA8\x07",
+		.diglen	=	SHA1_DIGEST_SIZE,
+	},
+/* http://csrc.nist.gov/groups/ST/toolkit/documents/Examples/HMAC_SHA256.pdf */
+	{/* HMAC-SHA256 */
+		.hash_alg	=	"hmac(sha256)",
+		.key		=	"\x00\x01\x02\x03\x04\x05\x06\x07"
+					"\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F"
+					"\x10\x11\x12\x13\x14\x15\x16\x17"
+					"\x18\x19\x1A\x1B\x1C\x1D\x1E\x1F"
+					"\x20\x21\x22\x23\x24\x25\x26\x27"
+					"\x28\x29\x2A\x2B\x2C\x2D\x2E\x2F"
+					"\x30\x31\x32\x33\x34\x35\x36\x37"
+					"\x38\x39\x3A\x3B\x3C\x3D\x3E\x3F",
+		.klen		=	64,
+		.input		=	"Sample message for keylen=blocklen",
+		.ilen		=	34,
+		.digest	=	"\x8B\xB9\xA1\xDB\x98\x06\xF2\x0D"
+					"\xF7\xF7\x7B\x82\x13\x8C\x79\x14"
+					"\xD1\x74\xD5\x9E\x13\xDC\x4D\x01"
+					"\x69\xC9\x05\x7B\x13\x3E\x1D\x62",
+		.diglen	=	SHA256_DIGEST_SIZE,
+	},
+};
+
+/*
+ *Test vectors For cipher algorithms
+ */
+static struct _fips_test_vector_cipher fips_test_vector_cipher[] = {
+	/* From NIST Special Publication 800-38A, Appendix F.1 */
+	{/* AES-128 ECB */
+		.mod_alg	=	"ecb(aes)",
+		.key		=	"\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+					"\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+		.klen		=	16,
+		.ivlen		=	0,
+		.pln_txt	=	"\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+					"\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+					"\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+					"\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+					"\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+					"\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+					"\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+					"\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+		.pln_txt_len	=	64,
+		.enc_txt	=	"\x3a\xd7\x7b\xb4\x0d\x7a\x36\x60"
+					"\xa8\x9e\xca\xf3\x24\x66\xef\x97"
+					"\xf5\xd3\xd5\x85\x03\xb9\x69\x9d"
+					"\xe7\x85\x89\x5a\x96\xfd\xba\xaf"
+					"\x43\xb1\xcd\x7f\x59\x8e\xce\x23"
+					"\x88\x1b\x00\xe3\xed\x03\x06\x88"
+					"\x7b\x0c\x78\x5e\x27\xe8\xad\x3f"
+					"\x82\x23\x20\x71\x04\x72\x5d\xd4",
+		.enc_txt_len	=	64,
+	},
+	/* From NIST Special Publication 800-38A, Appendix F.1 */
+	{/* AES-256 ECB */
+		.mod_alg	=	"ecb(aes)",
+		.key		=	"\x60\x3d\xeb\x10\x15\xca\x71\xbe"
+					"\x2b\x73\xae\xf0\x85\x7d\x77\x81"
+					"\x1f\x35\x2c\x07\x3b\x61\x08\xd7"
+					"\x2d\x98\x10\xa3\x09\x14\xdf\xf4",
+		.klen		=	32,
+		.ivlen		=	0,
+		.pln_txt	=	"\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+					"\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+					"\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+					"\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+					"\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+					"\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+					"\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+					"\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+		.pln_txt_len	=	64,
+		.enc_txt	=	"\xf3\xee\xd1\xbd\xb5\xd2\xa0\x3c"
+					"\x06\x4b\x5a\x7e\x3d\xb1\x81\xf8"
+					"\x59\x1c\xcb\x10\xd4\x10\xed\x26"
+					"\xdc\x5b\xa7\x4a\x31\x36\x28\x70"
+					"\xb6\xed\x21\xb9\x9c\xa6\xf4\xf9"
+					"\xf1\x53\xe7\xb1\xbe\xaf\xed\x1d"
+					"\x23\x30\x4b\x7a\x39\xf9\xf3\xff"
+					"\x06\x7d\x8d\x8f\x9e\x24\xec\xc7",
+		.enc_txt_len	=	64,
+	},
+	/* From NIST Special Publication 800-38A, Appendix F.2 */
+	{/* AES-128 CBC */
+		.mod_alg	=	"cbc(aes)",
+		.key		=	"\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+					"\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+		.klen		=	16,
+		.iv		=	"\x00\x01\x02\x03\x04\x05\x06\x07"
+					"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+		.ivlen		=	16,
+		.pln_txt	=	"\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+					"\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+					"\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+					"\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+					"\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+					"\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+					"\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+					"\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+		.pln_txt_len	=	64,
+		.enc_txt	=	"\x76\x49\xab\xac\x81\x19\xb2\x46"
+					"\xce\xe9\x8e\x9b\x12\xe9\x19\x7d"
+					"\x50\x86\xcb\x9b\x50\x72\x19\xee"
+					"\x95\xdb\x11\x3a\x91\x76\x78\xb2"
+					"\x73\xbe\xd6\xb8\xe3\xc1\x74\x3b"
+					"\x71\x16\xe6\x9e\x22\x22\x95\x16"
+					"\x3f\xf1\xca\xa1\x68\x1f\xac\x09"
+					"\x12\x0e\xca\x30\x75\x86\xe1\xa7",
+		.enc_txt_len	=	64,
+	},
+	/* From NIST Special Publication 800-38A, Appendix F.2 */
+	{/* AES-256 CBC */
+		.mod_alg	=	"cbc(aes)",
+		.key		=	"\x60\x3d\xeb\x10\x15\xca\x71\xbe"
+					"\x2b\x73\xae\xf0\x85\x7d\x77\x81"
+					"\x1f\x35\x2c\x07\x3b\x61\x08\xd7"
+					"\x2d\x98\x10\xa3\x09\x14\xdf\xf4",
+		.klen		=	32,
+		.iv		=	"\x00\x01\x02\x03\x04\x05\x06\x07"
+					"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+		.ivlen		=	16,
+		.pln_txt	=	"\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+					"\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+					"\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+					"\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+					"\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+					"\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+					"\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+					"\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+		.pln_txt_len	=	64,
+		.enc_txt	=	"\xf5\x8c\x4c\x04\xd6\xe5\xf1\xba"
+					"\x77\x9e\xab\xfb\x5f\x7b\xfb\xd6"
+					"\x9c\xfc\x4e\x96\x7e\xdb\x80\x8d"
+					"\x67\x9f\x77\x7b\xc6\x70\x2c\x7d"
+					"\x39\xf2\x33\x69\xa9\xd9\xba\xcf"
+					"\xa5\x30\xe2\x63\x04\x23\x14\x61"
+					"\xb2\xeb\x05\xe2\xc3\x9b\xe9\xfc"
+					"\xda\x6c\x19\x07\x8c\x6a\x9d\x1b",
+		.enc_txt_len	=	64,
+	},
+	/* From NIST Special Publication 800-38A, Appendix F.5 */
+	{/* AES-128 CTR */
+		.mod_alg	=	"ctr(aes)",
+		.key		=	"\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+					"\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+		.klen		=	16,
+		.iv		=	"\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+					"\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
+		.ivlen		=	16,
+		.pln_txt	=	"\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+					"\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+					"\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+					"\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+					"\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+					"\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+					"\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+					"\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+		.pln_txt_len	=	64,
+		.enc_txt	=	"\x87\x4d\x61\x91\xb6\x20\xe3\x26"
+					"\x1b\xef\x68\x64\x99\x0d\xb6\xce"
+					"\x98\x06\xf6\x6b\x79\x70\xfd\xff"
+					"\x86\x17\x18\x7b\xb9\xff\xfd\xff"
+					"\x5a\xe4\xdf\x3e\xdb\xd5\xd3\x5e"
+					"\x5b\x4f\x09\x02\x0d\xb0\x3e\xab"
+					"\x1e\x03\x1d\xda\x2f\xbe\x03\xd1"
+					"\x79\x21\x70\xa0\xf3\x00\x9c\xee",
+		.enc_txt_len	=	64,
+	},
+	/* From NIST Special Publication 800-38A, Appendix F.5 */
+	{/* AES-256 CTR */
+		.mod_alg	=	"ctr(aes)",
+		.key		=	"\x60\x3d\xeb\x10\x15\xca\x71\xbe"
+					"\x2b\x73\xae\xf0\x85\x7d\x77\x81"
+					"\x1f\x35\x2c\x07\x3b\x61\x08\xd7"
+					"\x2d\x98\x10\xa3\x09\x14\xdf\xf4",
+		.klen		=	32,
+		.iv		=	"\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+					"\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
+		.ivlen		=	16,
+		.pln_txt	=	"\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+					"\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+					"\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+					"\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+					"\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+					"\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+					"\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+					"\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+		.pln_txt_len	=	64,
+		.enc_txt	=	"\x60\x1e\xc3\x13\x77\x57\x89\xa5"
+					"\xb7\xa7\xf5\x04\xbb\xf3\xd2\x28"
+					"\xf4\x43\xe3\xca\x4d\x62\xb5\x9a"
+					"\xca\x84\xe9\x90\xca\xca\xf5\xc5"
+					"\x2b\x09\x30\xda\xa2\x3d\xe9\x4c"
+					"\xe8\x70\x17\xba\x2d\x84\x98\x8d"
+					"\xdf\xc9\xc5\x8d\xb6\x7a\xad\xa6"
+					"\x13\xc2\xdd\x08\x45\x79\x41\xa6",
+		.enc_txt_len	=	64,
+	},
+	/* Derived From From NIST Special Publication 800-38A */
+	{/* AES-128 XTS requires 2 keys and thus length of key is twice. */
+		.mod_alg	=	"xts(aes)",
+		.key		=	"\x60\x3d\xeb\x10\x15\xca\x71\xbe"
+					"\x2b\x73\xae\xf0\x85\x7d\x77\x81"
+					"\x1f\x35\x2c\x07\x3b\x61\x08\xd7"
+					"\x2d\x98\x10\xa3\x09\x14\xdf\xf4",
+		.klen		=	32,
+		.iv		=	"\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+					"\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
+		.ivlen		=	16,
+		.pln_txt	=	"\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+					"\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+					"\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+					"\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+					"\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+					"\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+					"\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+					"\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+		.pln_txt_len	=	64,
+		.enc_txt	=	"\xba\x2a\x7d\x50\x7b\x60\x63\x3e"
+					"\xf3\x1b\x06\x14\xb4\x45\xb5\xb5"
+					"\x42\x0d\x12\x57\x28\x15\x2e\x5d"
+					"\x5a\x54\xbe\x46\x5c\x9d\x1f\x2e"
+					"\x18\x8e\x79\x07\xc7\xdf\xe7\xf8"
+					"\x78\xa6\x53\x2a\x80\xb4\xd9\xce"
+					"\x1d\xbe\x75\x7e\xb6\x11\xef\x1e"
+					"\x51\x5d\xd6\x70\x03\x51\xcc\x94",
+		.enc_txt_len	=	64,
+	},
+	/* Derived From From NIST Special Publication 800-38A */
+	{/* AES-256 XTS requires 2 keys and thus length of key is twice */
+		.mod_alg	=	"xts(aes)",
+		.key		=	"\x3a\xd7\x7b\xb4\x0d\x7a\x36\x60"
+					"\xa8\x9e\xca\xf3\x24\x66\xef\x97"
+					"\xf5\xd3\xd5\x85\x03\xb9\x69\x9d"
+					"\xe7\x85\x89\x5a\x96\xfd\xba\xaf"
+					"\x43\xb1\xcd\x7f\x59\x8e\xce\x23"
+					"\x88\x1b\x00\xe3\xed\x03\x06\x88"
+					"\x7b\x0c\x78\x5e\x27\xe8\xad\x3f"
+					"\x82\x23\x20\x71\x04\x72\x5d\xd4",
+		.klen		=	64,
+		.iv		=	"\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+					"\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
+		.ivlen		=	16,
+		.pln_txt	=	"\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+					"\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+					"\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+					"\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+					"\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+					"\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+					"\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+					"\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+		.pln_txt_len	=	64,
+		.enc_txt	=	"\xd7\x2b\x90\x02\x6f\xf0\xd2\x39"
+					"\x7b\x1a\x57\x92\xd0\x1e\xc1\xb6"
+					"\x04\x8c\x08\x8e\xa4\x1f\xa0\x0f"
+					"\x5e\xd8\xaf\xda\x6e\xd2\x4e\x5b"
+					"\x23\xde\x09\xa4\x19\x79\xda\xd4"
+					"\xe9\x4b\xbc\x05\x2e\xca\x20\x7d"
+					"\xd5\x0f\x89\x88\xa3\xda\x46\x1f"
+					"\x1e\xde\x53\x78\x90\xb2\x9a\x2c",
+		.enc_txt_len	=	64,
+	},
+	/* From NIST Special Publication 800-67, Appendix B.1 */
+	{/* 3DES ECB */
+		.mod_alg	=	"ecb(des3_ede)",
+		.key		=	"\x01\x23\x45\x67\x89\xAB\xCD\xEF"
+					"\x23\x45\x67\x89\xAB\xCD\xEF\x01"
+					"\x45\x67\x89\xAB\xCD\xEF\x01\x23",
+		.klen		=	24,
+		.ivlen		=	0,
+		.pln_txt	=	"\x54\x68\x65\x20\x71\x75\x66\x63"
+					"\x6B\x20\x62\x72\x6F\x77\x6E\x20"
+					"\x66\x6F\x78\x20\x6A\x75\x6D\x70",
+		.pln_txt_len	=	24,
+		.enc_txt	=	"\xA8\x26\xFD\x8C\xE5\x3B\x85\x5F"
+					"\xCC\xE2\x1C\x81\x12\x25\x6F\xE6"
+					"\x68\xD5\xC0\x5D\xD9\xB6\xB9\x00",
+		.enc_txt_len	=	24,
+	},
+	/* Derived From From NIST Special Publication 800-38A  and 800-67 */
+	{/* 3DES CBC */
+		.mod_alg	=	"cbc(des3_ede)",
+		.key		=	"\x01\x23\x45\x67\x89\xAB\xCD\xEF"
+					"\x23\x45\x67\x89\xAB\xCD\xEF\x01"
+					"\x45\x67\x89\xAB\xCD\xEF\x01\x23",
+		.klen		=	24,
+		.iv		=	"\x00\x01\x02\x03\x04\x05\x06\x07",
+		.ivlen		=	8,
+		.pln_txt	=	"\x54\x68\x65\x20\x71\x75\x66\x63"
+					"\x6B\x20\x62\x72\x6F\x77\x6E\x20"
+					"\x66\x6F\x78\x20\x6A\x75\x6D\x70",
+		.pln_txt_len	=	24,
+		.enc_txt	=	"\xf3\x68\xd0\x6f\x3b\xbd\x61\x4e"
+					"\x60\xf2\xd0\x24\x5c\xad\x3f\x81"
+					"\x8d\x5c\x69\xf2\xcb\x3f\xd5\xc7",
+		.enc_txt_len	=	24,
+	},
+};
+
+/*
+ *Test vectors For AEAD algorithms
+ */
+static struct _fips_test_vector_aead fips_test_vector_aead[] = {
+	/* From NIST Special Publication 800-38C: Appendix C.1  */
+	{ /*AES 128-CCM */
+		.mod_alg	=	"ccm(aes)",
+		.key		=	"\x40\x41\x42\x43\x44\x45\x46\x47"
+					"\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f",
+		.klen		=	16,
+		.iv		=	"\x07\x10\x11\x12\x13\x14\x15\x16"
+					"\x00\x00\x00\x00\x00\x00\x00\x00",
+		.ivlen		=	16,
+		.assoc		=	"\x00\x01\x02\x03\x04\x05\x06\x07",
+		.alen		=	8,
+		.pln_txt	=	"\x20\x21\x22\x23",
+		.pln_txt_len	=	4,
+		.enc_txt	=	"\x71\x62\x01\x5b\x4d\xac\x25\x5d",
+		.enc_txt_len	=	8,
+	},
+	/* Derived From NIST Special Publication 800-38C: Appendix C.1  */
+	{ /*AES 256-CCM */
+		.mod_alg	=	"ccm(aes)",
+		.key		=	"\x60\x3d\xeb\x10\x15\xca\x71\xbe"
+					"\x2b\x73\xae\xf0\x85\x7d\x77\x81"
+					"\x1f\x35\x2c\x07\x3b\x61\x08\xd7"
+					"\x2d\x98\x10\xa3\x09\x14\xdf\xf4",
+		.klen		=	32,
+		.iv		=	"\x07\x10\x11\x12\x13\x14\x15\x16"
+					"\x00\x00\x00\x00\x00\x00\x00\x00",
+		.ivlen		=	16,
+		.assoc		=	"\x00\x01\x02\x03\x04\x05\x06\x07",
+		.alen		=	8,
+		.pln_txt	=	"\x20\x21\x22\x23",
+		.pln_txt_len	=	4,
+		.enc_txt	=	"\xa8\xc7\xa9\x6a\x3a\x5b\x15\xe1",
+		.enc_txt_len	=	8,
+	},
+};
+
+#endif /* __CRYPTO_MSM_QCRYPTO_FIPS_H */
diff --git a/drivers/crypto/msm/qcryptoi.h b/drivers/crypto/msm/qcryptoi.h
new file mode 100644
index 0000000..64963ad
--- /dev/null
+++ b/drivers/crypto/msm/qcryptoi.h
@@ -0,0 +1,74 @@
+/* QTI Crypto driver
+ *
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __CRYPTO_MSM_QCRYPTOI_H
+#define __CRYPTO_MSM_QCRYPTOI_H
+
+/* FIPS global status variable */
+extern enum fips_status g_fips140_status;
+
+/* The structure to hold data
+ * that selftests require
+ */
+struct fips_selftest_data {
+
+	char algo_prefix[10];
+	unsigned int ce_device;
+	bool prefix_ahash_algo;
+	bool prefix_hmac_algo;
+	bool prefix_aes_xts_algo;
+	bool prefix_aes_cbc_ecb_ctr_algo;
+	bool prefix_aead_algo;
+};
+
+#ifdef CONFIG_FIPS_ENABLE
+/*
+ * Sha/HMAC self tests
+ */
+int _fips_qcrypto_sha_selftest(struct fips_selftest_data *selftest_d);
+
+/*
+* Cipher algorithm self tests
+*/
+int _fips_qcrypto_cipher_selftest(struct fips_selftest_data *selftest_d);
+
+/*
+ * AEAD algorithm self tests
+ */
+int _fips_qcrypto_aead_selftest(struct fips_selftest_data *selftest_d);
+
+#else
+
+static inline
+int _fips_qcrypto_sha_selftest(struct fips_selftest_data *selftest_d)
+{
+	return 0;
+}
+
+static inline
+int _fips_qcrypto_cipher_selftest(struct fips_selftest_data *selftest_d)
+{
+	return 0;
+}
+
+static
+inline int _fips_qcrypto_aead_selftest(struct fips_selftest_data *selftest_d)
+{
+	return 0;
+}
+
+#endif  /* CONFIG_FIPS_ENABLE*/
+
+#endif  /* __CRYPTO_MSM_QCRYPTOI_H */
+
diff --git a/drivers/gpu/ion/ion_heap.c b/drivers/gpu/ion/ion_heap.c
index aa2551a..0b193a0 100644
--- a/drivers/gpu/ion/ion_heap.c
+++ b/drivers/gpu/ion/ion_heap.c
@@ -2,7 +2,7 @@
  * drivers/gpu/ion/ion_heap.c
  *
  * Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -26,6 +26,7 @@
 #include <linux/vmalloc.h>
 #include <linux/slab.h>
 #include <linux/highmem.h>
+#include <linux/dma-mapping.h>
 #include "ion_priv.h"
 
 void *ion_heap_map_kernel(struct ion_heap *heap,
@@ -107,16 +108,15 @@
  * chunks to minimize the number of memsets and vmaps/vunmaps.
  *
  * Note that the `pages' array should be composed of all 4K pages.
+ *
+ * NOTE: This function does not guarantee synchronization of the caches
+ * and thus caller is responsible for handling any cache maintenance
+ * operations needed.
  */
 int ion_heap_pages_zero(struct page **pages, int num_pages)
 {
-	int i, j, k, npages_to_vmap;
+	int i, j, npages_to_vmap;
 	void *ptr = NULL;
-	/*
-	 * It's cheaper just to use writecombine memory and skip the
-	 * cache vs. using a cache memory and trying to flush it afterwards
-	 */
-	pgprot_t pgprot = pgprot_writecombine(pgprot_kernel);
 
 	/*
 	 * As an optimization, we manually zero out all of the pages
@@ -132,7 +132,7 @@
 		for (j = 0; j < MAX_VMAP_RETRIES && npages_to_vmap;
 			++j) {
 			ptr = vmap(&pages[i], npages_to_vmap,
-					VM_IOREMAP, pgprot);
+					VM_IOREMAP, PAGE_KERNEL);
 			if (ptr)
 				break;
 			else
@@ -141,21 +141,6 @@
 		if (!ptr)
 			return -ENOMEM;
 
-		/*
-		 * We have to invalidate the cache here because there
-		 * might be dirty lines to these physical pages (which
-		 * we don't care about) that could get written out at
-		 * any moment.
-		 */
-		for (k = 0; k < npages_to_vmap; k++) {
-			void *p = kmap_atomic(pages[i + k]);
-			phys_addr_t phys = page_to_phys(
-				pages[i + k]);
-
-			dmac_inv_range(p, p + PAGE_SIZE);
-			outer_inv_range(phys, phys + PAGE_SIZE);
-			kunmap_atomic(p);
-		}
 		memset(ptr, 0, npages_to_vmap * PAGE_SIZE);
 		vunmap(ptr);
 	}
@@ -163,11 +148,12 @@
 	return 0;
 }
 
-static int ion_heap_alloc_pages_mem(int page_tbl_size,
-				struct pages_mem *pages_mem)
+int ion_heap_alloc_pages_mem(struct pages_mem *pages_mem)
 {
 	struct page **pages;
+	unsigned int page_tbl_size;
 	pages_mem->free_fn = kfree;
+	page_tbl_size = sizeof(struct page *) * (pages_mem->size >> PAGE_SHIFT);
 	if (page_tbl_size > SZ_8K) {
 		/*
 		 * Do fallback to ensure we have a balance between
@@ -191,7 +177,7 @@
 	return 0;
 }
 
-static void ion_heap_free_pages_mem(struct pages_mem *pages_mem)
+void ion_heap_free_pages_mem(struct pages_mem *pages_mem)
 {
 	pages_mem->free_fn(pages_mem->pages);
 }
@@ -201,15 +187,17 @@
 	int i, ret;
 	struct pages_mem pages_mem;
 	int npages = 1 << order;
-	int page_tbl_size = sizeof(struct page *) * npages;
+	pages_mem.size = npages * PAGE_SIZE;
 
-	if (ion_heap_alloc_pages_mem(page_tbl_size, &pages_mem))
+	if (ion_heap_alloc_pages_mem(&pages_mem))
 		return -ENOMEM;
 
 	for (i = 0; i < (1 << order); ++i)
 		pages_mem.pages[i] = page + i;
 
 	ret = ion_heap_pages_zero(pages_mem.pages, npages);
+	dma_sync_single_for_device(NULL, page_to_phys(page), pages_mem.size,
+					DMA_BIDIRECTIONAL);
 	ion_heap_free_pages_mem(&pages_mem);
 	return ret;
 }
@@ -218,16 +206,12 @@
 {
 	struct sg_table *table = buffer->sg_table;
 	struct scatterlist *sg;
-	int i, j, ret = 0, npages = 0, page_tbl_size = 0;
+	int i, j, ret = 0, npages = 0;
 	struct pages_mem pages_mem;
 
-	for_each_sg(table->sgl, sg, table->nents, i) {
-		unsigned long len = sg_dma_len(sg);
-		int nrpages = len >> PAGE_SHIFT;
-		page_tbl_size += sizeof(struct page *) * nrpages;
-	}
+	pages_mem.size = PAGE_ALIGN(buffer->size);
 
-	if (ion_heap_alloc_pages_mem(page_tbl_size, &pages_mem))
+	if (ion_heap_alloc_pages_mem(&pages_mem))
 		return -ENOMEM;
 
 	for_each_sg(table->sgl, sg, table->nents, i) {
@@ -239,6 +223,8 @@
 	}
 
 	ret = ion_heap_pages_zero(pages_mem.pages, npages);
+	dma_sync_sg_for_device(NULL, table->sgl, table->nents,
+					DMA_BIDIRECTIONAL);
 	ion_heap_free_pages_mem(&pages_mem);
 	return ret;
 }
diff --git a/drivers/gpu/ion/ion_page_pool.c b/drivers/gpu/ion/ion_page_pool.c
index cc2a36d..a1845de 100644
--- a/drivers/gpu/ion/ion_page_pool.c
+++ b/drivers/gpu/ion/ion_page_pool.c
@@ -32,7 +32,6 @@
 static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
 {
 	struct page *page;
-	struct scatterlist sg;
 
 	page = alloc_pages(pool->gfp_mask & ~__GFP_ZERO, pool->order);
 
@@ -43,11 +42,6 @@
 		if (ion_heap_high_order_page_zero(page, pool->order))
 			goto error_free_pages;
 
-	sg_init_table(&sg, 1);
-	sg_set_page(&sg, page, PAGE_SIZE << pool->order, 0);
-	sg_dma_address(&sg) = sg_phys(&sg);
-	dma_sync_sg_for_device(NULL, &sg, 1, DMA_BIDIRECTIONAL);
-
 	return page;
 error_free_pages:
 	__free_pages(page, pool->order);
@@ -104,22 +98,25 @@
 	return page;
 }
 
-void *ion_page_pool_alloc(struct ion_page_pool *pool)
+void *ion_page_pool_alloc(struct ion_page_pool *pool, bool *from_pool)
 {
 	struct page *page = NULL;
 
 	BUG_ON(!pool);
 
-	mutex_lock(&pool->mutex);
-	if (pool->high_count)
-		page = ion_page_pool_remove(pool, true);
-	else if (pool->low_count)
-		page = ion_page_pool_remove(pool, false);
-	mutex_unlock(&pool->mutex);
+	*from_pool = true;
 
-	if (!page)
+	if (mutex_trylock(&pool->mutex)) {
+		if (pool->high_count)
+			page = ion_page_pool_remove(pool, true);
+		else if (pool->low_count)
+			page = ion_page_pool_remove(pool, false);
+		mutex_unlock(&pool->mutex);
+	}
+	if (!page) {
 		page = ion_page_pool_alloc_pages(pool);
-
+		*from_pool = false;
+	}
 	return page;
 }
 
diff --git a/drivers/gpu/ion/ion_priv.h b/drivers/gpu/ion/ion_priv.h
index c57efc1..1f78cb1 100644
--- a/drivers/gpu/ion/ion_priv.h
+++ b/drivers/gpu/ion/ion_priv.h
@@ -2,7 +2,7 @@
  * drivers/gpu/ion/ion_priv.h
  *
  * Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -223,6 +223,7 @@
 
 struct pages_mem {
 	struct page **pages;
+	u32 size;
 	void (*free_fn) (const void *);
 };
 
@@ -237,6 +238,8 @@
 int ion_heap_pages_zero(struct page **pages, int num_pages);
 int ion_heap_buffer_zero(struct ion_buffer *buffer);
 int ion_heap_high_order_page_zero(struct page *page, int order);
+int ion_heap_alloc_pages_mem(struct pages_mem *pages_mem);
+void ion_heap_free_pages_mem(struct pages_mem *pages_mem);
 
 /**
  * ion_heap_init_deferred_free -- initialize deferred free functionality
@@ -374,7 +377,7 @@
 
 struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order);
 void ion_page_pool_destroy(struct ion_page_pool *);
-void *ion_page_pool_alloc(struct ion_page_pool *);
+void *ion_page_pool_alloc(struct ion_page_pool *, bool *from_pool);
 void ion_page_pool_free(struct ion_page_pool *, struct page *);
 
 /** ion_page_pool_shrink - shrinks the size of the memory cached in the pool
diff --git a/drivers/gpu/ion/ion_system_heap.c b/drivers/gpu/ion/ion_system_heap.c
index cfdd5f4..b7ad01f 100644
--- a/drivers/gpu/ion/ion_system_heap.c
+++ b/drivers/gpu/ion/ion_system_heap.c
@@ -29,10 +29,10 @@
 #include <linux/dma-mapping.h>
 #include <trace/events/kmem.h>
 
-static unsigned int high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO |
+static unsigned int high_order_gfp_flags = (GFP_HIGHUSER |
 					    __GFP_NOWARN | __GFP_NORETRY |
 					    __GFP_NO_KSWAPD) & ~__GFP_WAIT;
-static unsigned int low_order_gfp_flags  = (GFP_HIGHUSER | __GFP_ZERO |
+static unsigned int low_order_gfp_flags  = (GFP_HIGHUSER |
 					 __GFP_NOWARN);
 static const unsigned int orders[] = {9, 8, 4, 0};
 static const int num_orders = ARRAY_SIZE(orders);
@@ -59,13 +59,15 @@
 
 struct page_info {
 	struct page *page;
+	bool from_pool;
 	unsigned int order;
 	struct list_head list;
 };
 
 static struct page *alloc_buffer_page(struct ion_system_heap *heap,
 				      struct ion_buffer *buffer,
-				      unsigned long order)
+				      unsigned long order,
+				      bool *from_pool)
 {
 	bool cached = ion_buffer_cached(buffer);
 	bool split_pages = ion_buffer_fault_user_mappings(buffer);
@@ -76,7 +78,7 @@
 		pool = heap->uncached_pools[order_to_index(order)];
 	else
 		pool = heap->cached_pools[order_to_index(order)];
-	page = ion_page_pool_alloc(pool);
+	page = ion_page_pool_alloc(pool, from_pool);
 	if (!page)
 		return 0;
 
@@ -119,14 +121,14 @@
 	struct page *page;
 	struct page_info *info;
 	int i;
-
+	bool from_pool;
 	for (i = 0; i < num_orders; i++) {
 		if (size < order_to_size(orders[i]))
 			continue;
 		if (max_order < orders[i])
 			continue;
 
-		page = alloc_buffer_page(heap, buffer, orders[i]);
+		page = alloc_buffer_page(heap, buffer, orders[i], &from_pool);
 		if (!page)
 			continue;
 
@@ -134,11 +136,39 @@
 		if (info) {
 			info->page = page;
 			info->order = orders[i];
+			info->from_pool = from_pool;
 		}
 		return info;
 	}
 	return NULL;
 }
+static unsigned int process_info(struct page_info *info,
+				 struct scatterlist *sg,
+				 struct scatterlist *sg_sync,
+				 struct pages_mem *data, unsigned int i)
+{
+	struct page *page = info->page;
+	unsigned int j;
+
+	if (sg_sync) {
+		sg_set_page(sg_sync, page, (1 << info->order) * PAGE_SIZE, 0);
+		sg_dma_address(sg_sync) = page_to_phys(page);
+	}
+	sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE, 0);
+	/*
+	 * This is not correct - sg_dma_address needs a dma_addr_t
+	 * that is valid for the the targeted device, but this works
+	 * on the currently targeted hardware.
+	 */
+	sg_dma_address(sg) = page_to_phys(page);
+	if (data) {
+		for (j = 0; j < (1 << info->order); ++j)
+			data->pages[i++] = nth_page(page, j);
+	}
+	list_del(&info->list);
+	kfree(info);
+	return i;
+}
 
 static int ion_system_heap_allocate(struct ion_heap *heap,
 				     struct ion_buffer *buffer,
@@ -149,29 +179,51 @@
 							struct ion_system_heap,
 							heap);
 	struct sg_table *table;
+	struct sg_table table_sync;
 	struct scatterlist *sg;
+	struct scatterlist *sg_sync;
 	int ret;
 	struct list_head pages;
+	struct list_head pages_from_pool;
 	struct page_info *info, *tmp_info;
 	int i = 0;
+	unsigned int nents_sync = 0;
 	unsigned long size_remaining = PAGE_ALIGN(size);
 	unsigned int max_order = orders[0];
+	struct pages_mem data;
+	unsigned int sz;
 	bool split_pages = ion_buffer_fault_user_mappings(buffer);
 
+	data.size = 0;
 	INIT_LIST_HEAD(&pages);
+	INIT_LIST_HEAD(&pages_from_pool);
 	while (size_remaining > 0) {
 		info = alloc_largest_available(sys_heap, buffer, size_remaining, max_order);
 		if (!info)
 			goto err;
-		list_add_tail(&info->list, &pages);
-		size_remaining -= (1 << info->order) * PAGE_SIZE;
+
+		sz = (1 << info->order) * PAGE_SIZE;
+
+		if (info->from_pool) {
+			list_add_tail(&info->list, &pages_from_pool);
+		} else {
+			list_add_tail(&info->list, &pages);
+			data.size += sz;
+			++nents_sync;
+		}
+		size_remaining -= sz;
 		max_order = info->order;
 		i++;
 	}
 
+	ret = ion_heap_alloc_pages_mem(&data);
+
+	if (ret)
+		goto err;
+
 	table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
 	if (!table)
-		goto err;
+		goto err_free_data_pages;
 
 	if (split_pages)
 		ret = sg_alloc_table(table, PAGE_ALIGN(size) / PAGE_SIZE,
@@ -182,32 +234,91 @@
 	if (ret)
 		goto err1;
 
-	sg = table->sgl;
-	list_for_each_entry_safe(info, tmp_info, &pages, list) {
-		struct page *page = info->page;
-		if (split_pages) {
-			for (i = 0; i < (1 << info->order); i++) {
-				sg_set_page(sg, page + i, PAGE_SIZE, 0);
-				sg = sg_next(sg);
-			}
-		} else {
-			sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE,
-				    0);
-			sg = sg_next(sg);
-		}
-		list_del(&info->list);
-		kfree(info);
+	if (nents_sync) {
+		ret = sg_alloc_table(&table_sync, nents_sync, GFP_KERNEL);
+		if (ret)
+			goto err_free_sg;
 	}
 
+	i = 0;
+	sg = table->sgl;
+	sg_sync = table_sync.sgl;
+
+	/*
+	 * We now have two separate lists. One list contains pages from the
+	 * pool and the other pages from buddy. We want to merge these
+	 * together while preserving the ordering of the pages (higher order
+	 * first).
+	 */
+	do {
+		if (!list_empty(&pages))
+			info = list_first_entry(&pages, struct page_info, list);
+		else
+			info = NULL;
+		if (!list_empty(&pages_from_pool))
+			tmp_info = list_first_entry(&pages_from_pool,
+							struct page_info, list);
+		else
+			tmp_info = NULL;
+
+		if (info && tmp_info) {
+			if (info->order >= tmp_info->order) {
+				i = process_info(info, sg, sg_sync, &data, i);
+				sg_sync = sg_next(sg_sync);
+			} else {
+				i = process_info(tmp_info, sg, 0, 0, i);
+			}
+		} else if (info) {
+			i = process_info(info, sg, sg_sync, &data, i);
+			sg_sync = sg_next(sg_sync);
+		} else if (tmp_info) {
+			i = process_info(tmp_info, sg, 0, 0, i);
+		} else {
+			BUG();
+		}
+		sg = sg_next(sg);
+
+	} while (sg);
+
+	ret = ion_heap_pages_zero(data.pages, data.size >> PAGE_SHIFT);
+	if (ret) {
+		pr_err("Unable to zero pages\n");
+		goto err_free_sg2;
+	}
+
+	if (nents_sync)
+		dma_sync_sg_for_device(NULL, table_sync.sgl, table_sync.nents,
+				       DMA_BIDIRECTIONAL);
+
 	buffer->priv_virt = table;
+	if (nents_sync)
+		sg_free_table(&table_sync);
+	ion_heap_free_pages_mem(&data);
 	return 0;
+err_free_sg2:
+	/* We failed to zero buffers. Bypass pool */
+	buffer->flags |= ION_FLAG_FREED_FROM_SHRINKER;
+
+	for_each_sg(table->sgl, sg, table->nents, i)
+		free_buffer_page(sys_heap, buffer, sg_page(sg),
+				get_order(sg->length));
+	if (nents_sync)
+		sg_free_table(&table_sync);
+err_free_sg:
+	sg_free_table(table);
 err1:
 	kfree(table);
+err_free_data_pages:
+	ion_heap_free_pages_mem(&data);
 err:
 	list_for_each_entry_safe(info, tmp_info, &pages, list) {
 		free_buffer_page(sys_heap, buffer, info->page, info->order);
 		kfree(info);
 	}
+	list_for_each_entry_safe(info, tmp_info, &pages_from_pool, list) {
+		free_buffer_page(sys_heap, buffer, info->page, info->order);
+		kfree(info);
+	}
 	return -ENOMEM;
 }
 
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index c690801..d37cb20 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -311,6 +311,7 @@
 	ADRENO_REG_CP_IB2_BASE,
 	ADRENO_REG_CP_IB2_BUFSZ,
 	ADRENO_REG_CP_TIMESTAMP,
+	ADRENO_REG_CP_HW_FAULT,
 	ADRENO_REG_SCRATCH_ADDR,
 	ADRENO_REG_SCRATCH_UMSK,
 	ADRENO_REG_SCRATCH_REG2,
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index b0851a2..6189c20 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -13,6 +13,7 @@
 
 #include <linux/delay.h>
 #include <linux/sched.h>
+#include <linux/ratelimit.h>
 #include <mach/socinfo.h>
 
 #include "kgsl.h"
@@ -3061,7 +3062,6 @@
 static void a3xx_err_callback(struct adreno_device *adreno_dev, int bit)
 {
 	struct kgsl_device *device = &adreno_dev->dev;
-	const char *err = "";
 
 	switch (bit) {
 	case A3XX_INT_RBBM_AHB_ERROR: {
@@ -3082,35 +3082,46 @@
 
 		/* Clear the error */
 		kgsl_regwrite(device, A3XX_RBBM_AHB_CMD, (1 << 3));
-		return;
+		break;
 	}
 	case A3XX_INT_RBBM_REG_TIMEOUT:
-		err = "RBBM: AHB register timeout";
+		KGSL_DRV_CRIT_RATELIMIT(device, "RBBM: AHB register timeout\n");
 		break;
 	case A3XX_INT_RBBM_ME_MS_TIMEOUT:
-		err = "RBBM: ME master split timeout";
+		KGSL_DRV_CRIT_RATELIMIT(device,
+			"RBBM: ME master split timeout\n");
 		break;
 	case A3XX_INT_RBBM_PFP_MS_TIMEOUT:
-		err = "RBBM: PFP master split timeout";
+		KGSL_DRV_CRIT_RATELIMIT(device,
+			"RBBM: PFP master split timeout\n");
 		break;
 	case A3XX_INT_RBBM_ATB_BUS_OVERFLOW:
-		err = "RBBM: ATB bus oveflow";
+		KGSL_DRV_CRIT_RATELIMIT(device,
+			"RBBM: ATB bus oveflow\n");
 		break;
 	case A3XX_INT_VFD_ERROR:
-		err = "VFD: Out of bounds access";
+		KGSL_DRV_CRIT_RATELIMIT(device,
+			"VFD: Out of bounds access\n");
 		break;
 	case A3XX_INT_CP_T0_PACKET_IN_IB:
-		err = "ringbuffer TO packet in IB interrupt";
+		KGSL_DRV_CRIT_RATELIMIT(device,
+			"ringbuffer TO packet in IB interrupt\n");
 		break;
 	case A3XX_INT_CP_OPCODE_ERROR:
-		err = "ringbuffer opcode error interrupt";
+		KGSL_DRV_CRIT_RATELIMIT(device,
+			"ringbuffer opcode error interrupt\n");
 		break;
 	case A3XX_INT_CP_RESERVED_BIT_ERROR:
-		err = "ringbuffer reserved bit error interrupt";
+		KGSL_DRV_CRIT_RATELIMIT(device,
+				"ringbuffer reserved bit error interrupt\n");
 		break;
-	case A3XX_INT_CP_HW_FAULT:
-		err = "ringbuffer hardware fault";
+	case A3XX_INT_CP_HW_FAULT: {
+		unsigned int reg;
+		adreno_readreg(adreno_dev, ADRENO_REG_CP_HW_FAULT, &reg);
+		KGSL_DRV_CRIT_RATELIMIT(device,
+			"CP | Ringbuffer HW fault | status=%x\n", reg);
 		break;
+	}
 	case A3XX_INT_CP_REG_PROTECT_FAULT: {
 		unsigned int reg;
 		kgsl_regread(device, A3XX_CP_PROTECT_STATUS, &reg);
@@ -3119,17 +3130,16 @@
 			"CP | Protected mode error| %s | addr=%x\n",
 			reg & (1 << 24) ? "WRITE" : "READ",
 			(reg & 0x1FFFF) >> 2);
+		break;
 	}
 	case A3XX_INT_CP_AHB_ERROR_HALT:
-		err = "ringbuffer AHB error interrupt";
+		KGSL_DRV_CRIT(device, "ringbuffer AHB error interrupt\n");
 		break;
 	case A3XX_INT_UCHE_OOB_ACCESS:
-		err = "UCHE:  Out of bounds access";
+		KGSL_DRV_CRIT_RATELIMIT(device,
+			"UCHE:  Out of bounds access\n");
 		break;
-	default:
-		return;
 	}
-	KGSL_DRV_CRIT(device, "%s\n", err);
 }
 
 static void a3xx_cp_callback(struct adreno_device *adreno_dev, int irq)
@@ -4563,6 +4573,7 @@
 	ADRENO_REG_DEFINE(ADRENO_REG_CP_TIMESTAMP, REG_CP_TIMESTAMP),
 	ADRENO_REG_DEFINE(ADRENO_REG_SCRATCH_ADDR, REG_SCRATCH_ADDR),
 	ADRENO_REG_DEFINE(ADRENO_REG_SCRATCH_UMSK, REG_SCRATCH_UMSK),
+	ADRENO_REG_DEFINE(ADRENO_REG_CP_HW_FAULT, A3XX_CP_HW_FAULT),
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_STATUS, A3XX_RBBM_STATUS),
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_CTL, A3XX_RBBM_PERFCTR_CTL),
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_CMD0,
diff --git a/drivers/gpu/msm/kgsl_log.h b/drivers/gpu/msm/kgsl_log.h
index f90627e..04ca7a5 100644
--- a/drivers/gpu/msm/kgsl_log.h
+++ b/drivers/gpu/msm/kgsl_log.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2008-2011,2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2008-2011,2013-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -56,6 +56,26 @@
 	} \
 })
 
+#define dev_crit_ratelimited(dev, fmt, ...)				\
+	dev_level_ratelimited(dev_crit, dev, fmt, ##__VA_ARGS__)
+
+#define dev_level_ratelimited(dev_level, dev, fmt, ...)			\
+do {									\
+	static DEFINE_RATELIMIT_STATE(_rs,				\
+				      DEFAULT_RATELIMIT_INTERVAL,	\
+				      DEFAULT_RATELIMIT_BURST);		\
+	if (__ratelimit(&_rs))						\
+		dev_level(dev, fmt, ##__VA_ARGS__);			\
+} while (0)
+
+#define KGSL_LOG_CRIT_RATELIMITED(dev, lvl, fmt, args...) \
+	do { \
+		if ((lvl) >= 2) \
+			dev_crit_ratelimited(dev, "|%s| " fmt, \
+					__func__, ##args);\
+	} while (0)
+
+
 #define KGSL_DRV_INFO(_dev, fmt, args...) \
 KGSL_LOG_INFO(_dev->dev, _dev->drv_log, fmt, ##args)
 #define KGSL_DRV_WARN(_dev, fmt, args...) \
@@ -64,6 +84,8 @@
 KGSL_LOG_ERR(_dev->dev, _dev->drv_log, fmt, ##args)
 #define KGSL_DRV_CRIT(_dev, fmt, args...) \
 KGSL_LOG_CRIT(_dev->dev, _dev->drv_log, fmt, ##args)
+#define KGSL_DRV_CRIT_RATELIMIT(_dev, fmt, args...) \
+KGSL_LOG_CRIT_RATELIMITED(_dev->dev, _dev->drv_log, fmt, ##args)
 
 #define KGSL_CMD_INFO(_dev, fmt, args...) \
 KGSL_LOG_INFO(_dev->dev, _dev->cmd_log, fmt, ##args)
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
index 353b55f..c92b29d 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
@@ -352,6 +352,10 @@
 	msm_camera_io_w_mb(0xFEFFFFFF, vfe_dev->vfe_base + 0x34);
 	msm_camera_io_w(vfe_dev->stats_data.stats_mask,
 		vfe_dev->vfe_base + 0x44);
+	msm_camera_io_w(1, vfe_dev->vfe_base + 0x24);
+	msm_camera_io_w(0, vfe_dev->vfe_base + 0x30);
+	msm_camera_io_w_mb(0, vfe_dev->vfe_base + 0x34);
+	msm_camera_io_w(1, vfe_dev->vfe_base + 0x24);
 }
 
 static void msm_vfe40_process_reset_irq(struct vfe_device *vfe_dev,
diff --git a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
index 1c219f2..b4196e9 100644
--- a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
+++ b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
@@ -56,6 +56,16 @@
 #define MSM_CPP_NOMINAL_CLOCK 266670000
 #define MSM_CPP_TURBO_CLOCK 320000000
 
+#define CPP_FW_VERSION_1_2_0	0x10020000
+#define CPP_FW_VERSION_1_4_0	0x10040000
+#define CPP_FW_VERSION_1_6_0	0x10060000
+#define CPP_FW_VERSION_1_8_0	0x10080000
+
+/* stripe information offsets in frame command */
+#define STRIPE_BASE_FW_1_2_0	130
+#define STRIPE_BASE_FW_1_4_0	140
+#define STRIPE_BASE_FW_1_6_0	464
+
 struct msm_cpp_timer_data_t {
 	struct cpp_device *cpp_dev;
 	struct msm_cpp_frame_info_t *processed_frame;
@@ -918,7 +928,8 @@
 	msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_CMD);
 	msm_cpp_poll(cpp_dev->base, 0x2);
 	msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_FW_VER);
-	pr_info("CPP FW Version: 0x%x\n", msm_cpp_read(cpp_dev->base));
+	cpp_dev->fw_version = msm_cpp_read(cpp_dev->base);
+	pr_info("CPP FW Version: 0x%08x\n", cpp_dev->fw_version);
 	msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_TRAILER);
 
 	/*Disable MC clock*/
@@ -1285,9 +1296,8 @@
 	uint16_t num_stripes = 0;
 	struct msm_buf_mngr_info buff_mgr_info, dup_buff_mgr_info;
 	int32_t status = 0;
-	uint8_t fw_version_1_2_x = 0;
 	int in_fd;
-
+	int32_t stripe_base = 0;
 	int i = 0;
 	if (!new_frame) {
 		pr_err("Insufficient memory. return\n");
@@ -1328,7 +1338,16 @@
 	}
 
 	new_frame->cpp_cmd_msg = cpp_frame_msg;
-
+	if (cpp_frame_msg == NULL ||
+		(new_frame->msg_len < MSM_CPP_MIN_FRAME_LENGTH)) {
+		pr_err("%s %d Length is not correct or frame message is missing\n",
+			__func__, __LINE__);
+		return -EINVAL;
+	}
+	if (cpp_frame_msg[new_frame->msg_len - 1] != MSM_CPP_MSG_ID_TRAILER) {
+		pr_err("%s %d Invalid frame message\n", __func__, __LINE__);
+		return -EINVAL;
+	}
 	in_phyaddr = msm_cpp_fetch_buffer_info(cpp_dev,
 		&new_frame->input_buffer_info,
 		((new_frame->input_buffer_info.identity >> 16) & 0xFFFF),
@@ -1402,22 +1421,36 @@
 		((cpp_frame_msg[12] >> 10) & 0x3FF) +
 		(cpp_frame_msg[12] & 0x3FF);
 
-	fw_version_1_2_x = 0;
-	if ((cpp_dev->hw_info.cpp_hw_version == CPP_HW_VERSION_1_1_0) ||
-		(cpp_dev->hw_info.cpp_hw_version == CPP_HW_VERSION_1_1_1) ||
-		(cpp_dev->hw_info.cpp_hw_version == CPP_HW_VERSION_2_0_0))
-		fw_version_1_2_x = 2;
+	if ((cpp_dev->fw_version & 0xffff0000) ==
+		CPP_FW_VERSION_1_2_0) {
+		stripe_base = STRIPE_BASE_FW_1_2_0;
+	} else if ((cpp_dev->fw_version & 0xffff0000) ==
+		CPP_FW_VERSION_1_4_0) {
+		stripe_base = STRIPE_BASE_FW_1_4_0;
+	} else if ((cpp_dev->fw_version & 0xffff0000) ==
+		CPP_FW_VERSION_1_6_0) {
+		stripe_base = STRIPE_BASE_FW_1_6_0;
+	} else {
+		pr_err("invalid fw version %08x", cpp_dev->fw_version);
+	}
+
+	if ((stripe_base + num_stripes*27 + 1) != new_frame->msg_len) {
+		pr_err("Invalid frame message\n");
+		rc = -EINVAL;
+		goto ERROR3;
+	}
+
 
 	for (i = 0; i < num_stripes; i++) {
-		cpp_frame_msg[(133 + fw_version_1_2_x) + i * 27] +=
+		cpp_frame_msg[stripe_base + 5 + i*27] +=
 			(uint32_t) in_phyaddr;
-		cpp_frame_msg[(139 + fw_version_1_2_x) + i * 27] +=
+		cpp_frame_msg[stripe_base + 11 + i * 27] +=
 			(uint32_t) out_phyaddr0;
-		cpp_frame_msg[(140 + fw_version_1_2_x) + i * 27] +=
+		cpp_frame_msg[stripe_base + 12 + i * 27] +=
 			(uint32_t) out_phyaddr1;
-		cpp_frame_msg[(141 + fw_version_1_2_x) + i * 27] +=
+		cpp_frame_msg[stripe_base + 13 + i * 27] +=
 			(uint32_t) out_phyaddr0;
-		cpp_frame_msg[(142 + fw_version_1_2_x) + i * 27] +=
+		cpp_frame_msg[stripe_base + 14 + i * 27] +=
 			(uint32_t) out_phyaddr1;
 	}
 
diff --git a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.h b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.h
index bd73ab2..af1af2d 100644
--- a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.h
+++ b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.h
@@ -189,6 +189,7 @@
 	char *fw_name_bin;
 	struct workqueue_struct *timer_wq;
 	struct msm_cpp_work_t *work;
+	uint32_t fw_version;
 	uint8_t stream_cnt;
 	uint8_t timeout_trial_cnt;
 
diff --git a/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c b/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c
index cce6525..7fb312a 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c
@@ -135,66 +135,57 @@
 {
 	int32_t rc = -EFAULT;
 	int32_t i = 0;
+	enum msm_camera_i2c_reg_addr_type save_addr_type;
 	CDBG("Enter\n");
 
+	save_addr_type = a_ctrl->i2c_client.addr_type;
 	for (i = 0; i < size; i++) {
+
+		switch (settings[i].addr_type) {
+		case MSM_ACTUATOR_BYTE_ADDR:
+			a_ctrl->i2c_client.addr_type = MSM_CAMERA_I2C_BYTE_ADDR;
+			break;
+		case MSM_ACTUATOR_WORD_ADDR:
+			a_ctrl->i2c_client.addr_type = MSM_CAMERA_I2C_WORD_ADDR;
+			break;
+		default:
+			pr_err("Unsupport addr type: %d\n",
+				settings[i].addr_type);
+			break;
+		}
+
 		switch (settings[i].i2c_operation) {
-		case MSM_ACT_WRITE: {
-			switch (settings[i].data_type) {
-			case MSM_ACTUATOR_BYTE_DATA:
-				rc = a_ctrl->i2c_client.i2c_func_tbl->i2c_write(
-					&a_ctrl->i2c_client,
-					settings[i].reg_addr,
-					settings[i].reg_data,
-					MSM_CAMERA_I2C_BYTE_DATA);
-				break;
-			case MSM_ACTUATOR_WORD_DATA:
-				rc = a_ctrl->i2c_client.i2c_func_tbl->i2c_write(
-					&a_ctrl->i2c_client,
-					settings[i].reg_addr,
-					settings[i].reg_data,
-					MSM_CAMERA_I2C_WORD_DATA);
-				break;
-			default:
-				pr_err("Unsupport data type: %d\n",
-					settings[i].i2c_operation);
-				break;
-			}
+		case MSM_ACT_WRITE:
+			rc = a_ctrl->i2c_client.i2c_func_tbl->i2c_write(
+				&a_ctrl->i2c_client,
+				settings[i].reg_addr,
+				settings[i].reg_data,
+				settings[i].data_type);
 			break;
-		}
-		case MSM_ACT_POLL: {
-			switch (settings[i].data_type) {
-			case MSM_ACTUATOR_BYTE_DATA:
-				rc = a_ctrl->i2c_client.i2c_func_tbl->i2c_poll(
-					&a_ctrl->i2c_client,
-					settings[i].reg_addr,
-					settings[i].reg_data,
-					MSM_CAMERA_I2C_BYTE_DATA);
-				break;
-			case MSM_ACTUATOR_WORD_DATA:
-				rc = a_ctrl->i2c_client.i2c_func_tbl->i2c_poll(
-					&a_ctrl->i2c_client,
-					settings[i].reg_addr,
-					settings[i].reg_data,
-					MSM_CAMERA_I2C_WORD_DATA);
-				break;
-			default:
-				pr_err("Unsupport data type: %d\n",
-					settings[i].i2c_operation);
-				break;
-			}
+		case MSM_ACT_POLL:
+			rc = a_ctrl->i2c_client.i2c_func_tbl->i2c_poll(
+				&a_ctrl->i2c_client,
+				settings[i].reg_addr,
+				settings[i].reg_data,
+				settings[i].data_type);
 			break;
-		}
-		}
+		default:
+			pr_err("Unsupport i2c_operation: %d\n",
+				settings[i].i2c_operation);
+			break;
 
 		if (0 != settings[i].delay)
 			msleep(settings[i].delay);
 
 		if (rc < 0)
 			break;
+		}
 	}
 
 	a_ctrl->curr_step_pos = 0;
+	/* recover register addr_type after the init
+	settings are written  */
+	a_ctrl->i2c_client.addr_type = save_addr_type;
 	CDBG("Exit\n");
 	return rc;
 }
diff --git a/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.h b/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.h
index 6067f26..44c134e 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.h
+++ b/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.h
@@ -27,6 +27,8 @@
 #define TRUE  1
 #define FALSE 0
 
+#define CCI_NUM_CLK_MAX	16
+
 enum cci_i2c_queue_t {
 	QUEUE_0,
 	QUEUE_1,
@@ -128,7 +130,7 @@
 	uint8_t ref_count;
 	enum msm_cci_state_t cci_state;
 
-	struct clk *cci_clk[5];
+	struct clk *cci_clk[CCI_NUM_CLK_MAX];
 	struct msm_camera_cci_i2c_queue_info
 		cci_i2c_queue_info[NUM_MASTERS][NUM_QUEUES];
 	struct msm_camera_cci_master_info cci_master_info[NUM_MASTERS];
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.h b/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.h
index fd4db79..0a17d93 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.h
+++ b/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.h
@@ -20,6 +20,8 @@
 #include <media/msm_cam_sensor.h>
 #include "msm_sd.h"
 
+#define CSID_NUM_CLK_MAX  16
+
 enum msm_csid_state_t {
 	CSID_POWER_UP,
 	CSID_POWER_DOWN,
@@ -38,7 +40,7 @@
 	uint32_t hw_version;
 	enum msm_csid_state_t csid_state;
 
-	struct clk *csid_clk[11];
+	struct clk *csid_clk[CSID_NUM_CLK_MAX];
 };
 
 #define VIDIOC_MSM_CSID_RELEASE \
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h
index a11b958..35b9ca1 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h
@@ -21,6 +21,7 @@
 #include "msm_sd.h"
 
 #define MAX_CSIPHY 3
+#define CSIPHY_NUM_CLK_MAX  16
 
 enum msm_csiphy_state_t {
 	CSIPHY_POWER_UP,
@@ -41,8 +42,7 @@
 	struct mutex mutex;
 	uint32_t hw_version;
 	enum msm_csiphy_state_t csiphy_state;
-
-	struct clk *csiphy_clk[4];
+	struct clk *csiphy_clk[CSIPHY_NUM_CLK_MAX];
 	uint8_t ref_count;
 	uint16_t lane_mask[MAX_CSIPHY];
 };
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index a563f68..0e38b18 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -1447,7 +1447,6 @@
 		hfi->ltrcount = hal->ltrcount;
 		hfi->trustmode = hal->trustmode;
 		pkt->size += sizeof(u32) + sizeof(struct hfi_ltrmode);
-		pr_err("SET LTR\n");
 		break;
 	}
 	case HAL_CONFIG_VENC_USELTRFRAME:
@@ -1461,7 +1460,6 @@
 		hfi->refltr = hal->refltr;
 		hfi->useconstrnt = hal->useconstrnt;
 		pkt->size += sizeof(u32) + sizeof(struct hfi_ltruse);
-		pr_err("USE LTR\n");
 		break;
 	}
 	case HAL_CONFIG_VENC_MARKLTRFRAME:
@@ -1473,7 +1471,6 @@
 		hfi = (struct hfi_ltrmark *) &pkt->rg_property_data[1];
 		hfi->markframe = hal->markframe;
 		pkt->size += sizeof(u32) + sizeof(struct hfi_ltrmark);
-		pr_err("MARK LTR\n");
 		break;
 	}
 	case HAL_PARAM_VENC_HIER_P_MAX_ENH_LAYERS:
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index bcd13b8..701a8cc 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -723,7 +723,7 @@
 	{
 		.id = V4L2_CID_MPEG_VIDC_VIDEO_USELTRFRAME,
 		.name = "H264 Use LTR",
-		.type = V4L2_CTRL_TYPE_BUTTON,
+		.type = V4L2_CTRL_TYPE_INTEGER,
 		.minimum = 0,
 		.maximum = (MAX_LTR_FRAME_COUNT - 1),
 		.default_value = 0,
@@ -753,7 +753,7 @@
 	{
 		.id = V4L2_CID_MPEG_VIDC_VIDEO_MARKLTRFRAME,
 		.name = "H264 Mark LTR",
-		.type = V4L2_CTRL_TYPE_BUTTON,
+		.type = V4L2_CTRL_TYPE_INTEGER,
 		.minimum = 0,
 		.maximum = (MAX_LTR_FRAME_COUNT - 1),
 		.default_value = 0,
@@ -2179,7 +2179,7 @@
 	}
 	case V4L2_CID_MPEG_VIDC_VIDEO_USELTRFRAME:
 		property_id = HAL_CONFIG_VENC_USELTRFRAME;
-		useltr.refltr = ctrl->val;
+		useltr.refltr = (1 << ctrl->val);
 		useltr.useconstrnt = false;
 		useltr.frames = 0;
 		pdata = &useltr;
@@ -2302,7 +2302,6 @@
 		rc = call_hfi_op(hdev, session_set_property,
 				(void *)inst->session, property_id, pdata);
 	}
-	pr_err("Returning from %s\n", __func__);
 	return rc;
 }
 
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index 84b0780..8176b06 100755
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -3164,6 +3164,40 @@
 	mutex_unlock(&inst->lock);
 }
 
+void msm_comm_flush_pending_dynamic_buffers(struct msm_vidc_inst *inst)
+{
+	struct buffer_info *binfo = NULL;
+	struct list_head *list = NULL;
+
+	if (!inst)
+		return;
+
+	if (inst->buffer_mode_set[CAPTURE_PORT] != HAL_BUFFER_MODE_DYNAMIC)
+		return;
+
+	if (list_empty(&inst->pendingq) || list_empty(&inst->registered_bufs))
+		return;
+
+	list = &inst->registered_bufs;
+
+	/*
+	* Dynamic Buffer mode - Since pendingq is not empty
+	* no output buffers have been sent to firmware yet.
+	* Hence remove reference to all pendingq o/p buffers
+	* before flushing them.
+	*/
+
+	list_for_each_entry(binfo, list, list) {
+		if (binfo && binfo->type ==
+			V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+			dprintk(VIDC_DBG,
+				"%s: binfo = %p device_addr = 0x%pa\n",
+				__func__, binfo, &binfo->device_addr[0]);
+			buf_ref_put(inst, binfo);
+		}
+	}
+}
+
 int msm_comm_flush(struct msm_vidc_inst *inst, u32 flags)
 {
 	int rc =  0;
@@ -3229,6 +3263,8 @@
 
 	} else {
 		if (!list_empty(&inst->pendingq)) {
+			msm_comm_flush_pending_dynamic_buffers(inst);
+
 			/*If flush is called after queueing buffers but before
 			 * streamon driver should flush the pending queue*/
 			list_for_each_safe(ptr, next, &inst->pendingq) {
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 7c20d1e..9d294f0 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -116,6 +116,7 @@
 	struct list_head                 list;
 	u32  app_id;
 	u32  ref_cnt;
+	char app_name[MAX_APP_NAME_SIZE];
 };
 
 struct qseecom_registered_kclient_list {
@@ -185,6 +186,7 @@
 	uint32_t user_virt_sb_base;
 	size_t sb_length;
 	struct ion_handle *ihandle;		/* Retrieve phy addr */
+	char app_name[MAX_APP_NAME_SIZE];
 };
 
 struct qseecom_listener_handle {
@@ -999,7 +1001,9 @@
 		}
 		entry->app_id = app_id;
 		entry->ref_cnt = 1;
-
+		memset((void *)entry->app_name, 0, MAX_APP_NAME_SIZE);
+		memcpy((void *)entry->app_name,
+			(void *)load_img_req.img_name, MAX_APP_NAME_SIZE);
 		/* Deallocate the handle */
 		if (!IS_ERR_OR_NULL(ihandle))
 			ion_free(qseecom.ion_clnt, ihandle);
@@ -1013,6 +1017,9 @@
 		(char *)(load_img_req.img_name));
 	}
 	data->client.app_id = app_id;
+	memset((void *)data->client.app_name, 0, MAX_APP_NAME_SIZE);
+	memcpy((void *)data->client.app_name,
+		(void *)load_img_req.img_name, MAX_APP_NAME_SIZE);
 	load_img_req.app_id = app_id;
 	if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) {
 		pr_err("copy_to_user failed\n");
@@ -1061,53 +1068,55 @@
 				bool app_crash)
 {
 	unsigned long flags;
+	unsigned long flags1;
 	int ret = 0;
 	struct qseecom_command_scm_resp resp;
-	struct qseecom_registered_app_list *ptr_app;
+	struct qseecom_registered_app_list *ptr_app = NULL;
 	bool unload = false;
 	bool found_app = false;
+	bool found_dead_app = false;
+
+	if (!memcmp(data->client.app_name, "keymaste", strlen("keymaste"))) {
+		pr_warn("Do not unload keymaster app from tz\n");
+		return 0;
+	}
 
 	if (data->client.app_id > 0) {
 		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
 		list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
 									list) {
 			if (ptr_app->app_id == data->client.app_id) {
-				found_app = true;
-				if (app_crash) {
-					ptr_app->ref_cnt = 0;
-					unload = true;
+				if (!memcmp((void *)ptr_app->app_name,
+					(void *)data->client.app_name,
+					strlen(data->client.app_name))) {
+					found_app = true;
+					if (app_crash || ptr_app->ref_cnt == 1)
+						unload = true;
 					break;
 				} else {
-					if (ptr_app->ref_cnt == 1) {
-						unload = true;
-						break;
-					} else {
-						ptr_app->ref_cnt--;
-						pr_debug("Can't unload app(%d) inuse\n",
-						ptr_app->app_id);
-						break;
-					}
+					found_dead_app = true;
+					break;
 				}
 			}
 		}
 		spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
 								flags);
-		if (found_app == false) {
-			pr_err("Cannot find app with id = %d\n",
-					data->client.app_id);
+		if (found_app == false && found_dead_app == false) {
+			pr_err("Cannot find app with id = %d (%s)\n",
+				data->client.app_id,
+				(char *)data->client.app_name);
 			return -EINVAL;
 		}
 	}
 
+	if (found_dead_app) {
+		pr_warn("cleanup app_id %d(%s)\n", data->client.app_id,
+			(char *)data->client.app_name);
+		__qseecom_cleanup_app(data);
+	}
+
 	if (unload) {
 		struct qseecom_unload_app_ireq req;
-
-		__qseecom_cleanup_app(data);
-		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
-		list_del(&ptr_app->list);
-		kzfree(ptr_app);
-		spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
-								flags);
 		/* Populate the structure for sending scm call to load image */
 		req.qsee_cmd_id = QSEOS_APP_SHUTDOWN_COMMAND;
 		req.app_id = data->client.app_id;
@@ -1123,6 +1132,15 @@
 		} else {
 			pr_warn("App id %d now unloaded\n", req.app_id);
 		}
+		if (resp.result == QSEOS_RESULT_FAILURE) {
+			pr_err("app (%d) unload_failed!!\n",
+					data->client.app_id);
+			return -EFAULT;
+		}
+		if (resp.result == QSEOS_RESULT_SUCCESS)
+			pr_info("App (%d) is unloaded!!\n",
+					data->client.app_id);
+		__qseecom_cleanup_app(data);
 		if (resp.result == QSEOS_RESULT_INCOMPLETE) {
 			ret = __qseecom_process_incomplete_cmd(data, &resp);
 			if (ret) {
@@ -1132,6 +1150,29 @@
 			}
 		}
 	}
+
+	if (found_app) {
+		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
+		if (app_crash) {
+			ptr_app->ref_cnt = 0;
+			pr_debug("app_crash: ref_count = 0\n");
+		} else {
+			if (ptr_app->ref_cnt == 1) {
+				ptr_app->ref_cnt = 0;
+				pr_info("ref_count set to 0\n");
+			} else {
+				ptr_app->ref_cnt--;
+				pr_info("Can't unload app(%d) inuse\n",
+					ptr_app->app_id);
+			}
+		}
+		if (unload) {
+			list_del(&ptr_app->list);
+			kzfree(ptr_app);
+		}
+		spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
+								flags1);
+	}
 	qseecom_unmap_ion_allocated_memory(data);
 	data->released = true;
 	return ret;
@@ -1326,6 +1367,9 @@
 	u32 reqd_len_sb_in = 0;
 	struct qseecom_client_send_data_ireq send_data_req;
 	struct qseecom_command_scm_resp resp;
+	unsigned long flags;
+	struct qseecom_registered_app_list *ptr_app;
+	bool found_app = false;
 
 	if (req->cmd_req_buf == NULL || req->resp_buf == NULL) {
 		pr_err("cmd buffer or response buffer is null\n");
@@ -1367,6 +1411,26 @@
 		return -ENOMEM;
 	}
 
+	/* find app_id & img_name from list */
+	spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+	list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
+							list) {
+		if ((ptr_app->app_id == data->client.app_id) &&
+			 (!memcmp((void *)ptr_app->app_name,
+				(void *)data->client.app_name,
+				strlen(data->client.app_name)))) {
+			found_app = true;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
+
+	if (!found_app) {
+		pr_err("app_id %d (%s) is not found\n", data->client.app_id,
+			(char *)data->client.app_name);
+		return -EINVAL;
+	}
+
 	send_data_req.qsee_cmd_id = QSEOS_CLIENT_SEND_DATA_COMMAND;
 	send_data_req.app_id = data->client.app_id;
 	send_data_req.req_ptr = (void *)(__qseecom_uvirt_to_kphys(data,
@@ -2841,7 +2905,9 @@
 				&qseecom.registered_app_list_lock, flags);
 		data->client.app_id = ret;
 		query_req.app_id = ret;
-
+		memset((void *)data->client.app_name, 0, MAX_APP_NAME_SIZE);
+		memcpy((void *)data->client.app_name,
+				(void *)query_req.app_name, MAX_APP_NAME_SIZE);
 		if (copy_to_user(argp, &query_req, sizeof(query_req))) {
 			pr_err("copy_to_user failed\n");
 			return -EFAULT;
@@ -3516,7 +3582,6 @@
 			ret = -EINVAL;
 			break;
 		}
-		data->type = QSEECOM_CLIENT_APP;
 		pr_debug("SET_MEM_PARAM: qseecom addr = 0x%x\n", (u32)data);
 		ret = qseecom_set_client_mem_param(data, argp);
 		if (ret)
@@ -3850,6 +3915,7 @@
 	data->abort = 0;
 	data->type = QSEECOM_GENERIC;
 	data->released = false;
+	memset((void *)data->client.app_name, 0, MAX_APP_NAME_SIZE);
 	data->mode = INACTIVE;
 	init_waitqueue_head(&data->abort_wq);
 	atomic_set(&data->ioctl_count, 0);
diff --git a/drivers/net/wireless/wcnss/wcnss_wlan.c b/drivers/net/wireless/wcnss/wcnss_wlan.c
index d06ec85..2d260be 100644
--- a/drivers/net/wireless/wcnss/wcnss_wlan.c
+++ b/drivers/net/wireless/wcnss/wcnss_wlan.c
@@ -207,7 +207,7 @@
 
 /* max 20mhz channel count */
 #define WCNSS_MAX_CH_NUM			45
-#define WCNSS_MAX_PIL_RETRY			3
+#define WCNSS_MAX_PIL_RETRY			2
 
 #define VALID_VERSION(version) \
 	((strncmp(version, "INVALID", WCNSS_VERSION_LEN)) ? 1 : 0)
diff --git a/drivers/platform/msm/qpnp-pwm.c b/drivers/platform/msm/qpnp-pwm.c
index 588afc6..80b0892 100644
--- a/drivers/platform/msm/qpnp-pwm.c
+++ b/drivers/platform/msm/qpnp-pwm.c
@@ -136,12 +136,6 @@
 	(value |= (BIT(id) >> QPNP_RAMP_CONTROL_SHIFT)); \
 } while (0)
 
-#define QPNP_DISABLE_LUT_V1(value, id) \
-do { \
-	(id < 8) ? (value &= ~BIT(id)) : \
-	(value &= (~BIT(id) >> QPNP_RAMP_CONTROL_SHIFT)); \
-} while (0)
-
 /* LPG Control for RAMP_STEP_DURATION_LSB */
 #define QPNP_RAMP_STEP_DURATION_LSB_MASK	0xFF
 
@@ -1018,7 +1012,6 @@
 			QPNP_ENABLE_LUT_V1(value1, pwm->pwm_config.channel_id);
 			value2 = QPNP_ENABLE_LPG_MODE;
 		} else {
-			QPNP_DISABLE_LUT_V1(value1, pwm->pwm_config.channel_id);
 			value2 = QPNP_DISABLE_LPG_MODE;
 		}
 		mask1 = value1;
@@ -1038,8 +1031,10 @@
 	if (rc)
 		return rc;
 
-	return qpnp_lpg_save_and_write(value1, mask1, reg1,
+	if (state == QPNP_LUT_ENABLE || chip->revision == QPNP_LPG_REVISION_0)
+		rc = qpnp_lpg_save_and_write(value1, mask1, reg1,
 					addr1, 1, chip);
+	return rc;
 }
 
 static inline int qpnp_enable_pwm_mode(struct qpnp_pwm_config *pwm_conf)
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c
index 6be6599..84288a7 100644
--- a/drivers/tty/serial/msm_serial_hs.c
+++ b/drivers/tty/serial/msm_serial_hs.c
@@ -1539,6 +1539,7 @@
 
 	if (msm_uport->clk_state != MSM_HS_CLK_ON) {
 		MSM_HS_WARN("%s: Failed.Clocks are OFF\n", __func__);
+		return;
 	}
 	if ((msm_uport->tx.tx_ready_int_en == 0) &&
 		(msm_uport->tx.dma_in_flight == 0))
@@ -2132,6 +2133,8 @@
 	case MSM_HS_CLK_ON:
 		break;
 	case MSM_HS_CLK_PORT_OFF:
+		MSM_HS_ERR("%s:Clock ON failed;UART Port is Closed\n",
+								__func__);
 		break;
 	}
 
@@ -2852,6 +2855,11 @@
 
 	msm_uport = devm_kzalloc(&pdev->dev, sizeof(struct msm_hs_port),
 			GFP_KERNEL);
+	if (!msm_uport) {
+		MSM_HS_ERR("Memory allocation failed\n");
+		return -ENOMEM;
+	}
+
 	msm_uport->uport.type = PORT_UNKNOWN;
 	uport = &msm_uport->uport;
 	uport->dev = &pdev->dev;
@@ -3018,15 +3026,16 @@
 	msm_hs_write(uport, UART_DM_MR2, data);
 	mb();
 
-	msm_uport->clk_state = MSM_HS_CLK_PORT_OFF;
 	hrtimer_init(&msm_uport->clk_off_timer, CLOCK_MONOTONIC,
 		     HRTIMER_MODE_REL);
 	msm_uport->clk_off_timer.function = msm_hs_clk_off_retry;
 	msm_uport->clk_off_delay = ktime_set(0, 1000000);  /* 1ms */
 
 	ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_clock.attr);
-	if (unlikely(ret))
+	if (unlikely(ret)) {
+		MSM_HS_ERR("Probe Failed as sysfs failed\n");
 		goto err_clock;
+	}
 
 	msm_serial_debugfs_init(msm_uport, pdev->id);
 
@@ -3036,6 +3045,7 @@
 	ret = uart_add_one_port(&msm_hs_driver, uport);
 	if (!ret) {
 		msm_hs_clock_unvote(msm_uport);
+		msm_uport->clk_state = MSM_HS_CLK_PORT_OFF;
 		return ret;
 	}
 
diff --git a/drivers/video/msm/mdss/mdss_dsi.c b/drivers/video/msm/mdss/mdss_dsi.c
index 99fd195..50e4a01 100644
--- a/drivers/video/msm/mdss/mdss_dsi.c
+++ b/drivers/video/msm/mdss/mdss_dsi.c
@@ -1080,6 +1080,7 @@
 	case MDSS_EVENT_REGISTER_RECOVERY_HANDLER:
 		rc = mdss_dsi_register_recovery_handler(ctrl_pdata,
 			(struct mdss_panel_recovery *)arg);
+		break;
 	case MDSS_EVENT_DSI_DYNAMIC_SWITCH:
 		rc = mdss_dsi_update_panel_config(ctrl_pdata,
 					(int)(unsigned long) arg);
diff --git a/drivers/video/msm/mdss/mdss_hdmi_tx.c b/drivers/video/msm/mdss/mdss_hdmi_tx.c
index 2b409f5..1bf389a 100644
--- a/drivers/video/msm/mdss/mdss_hdmi_tx.c
+++ b/drivers/video/msm/mdss/mdss_hdmi_tx.c
@@ -3340,7 +3340,7 @@
 
 	switch (module_type) {
 	case HDMI_TX_HPD_PM:
-		mp->num_clk = 3;
+		mp->num_clk = 4;
 		mp->clk_config = devm_kzalloc(dev, sizeof(struct dss_clk) *
 			mp->num_clk, GFP_KERNEL);
 		if (!mp->clk_config) {
@@ -3366,10 +3366,14 @@
 		snprintf(mp->clk_config[2].clk_name, 32, "%s", "mdp_core_clk");
 		mp->clk_config[2].type = DSS_CLK_AHB;
 		mp->clk_config[2].rate = 0;
+
+		snprintf(mp->clk_config[3].clk_name, 32, "%s", "alt_iface_clk");
+		mp->clk_config[3].type = DSS_CLK_AHB;
+		mp->clk_config[3].rate = 0;
 		break;
 
 	case HDMI_TX_CORE_PM:
-		mp->num_clk = 2;
+		mp->num_clk = 1;
 		mp->clk_config = devm_kzalloc(dev, sizeof(struct dss_clk) *
 			mp->num_clk, GFP_KERNEL);
 		if (!mp->clk_config) {
@@ -3382,10 +3386,6 @@
 		mp->clk_config[0].type = DSS_CLK_PCLK;
 		/* This rate will be overwritten when core is powered on */
 		mp->clk_config[0].rate = 148500000;
-
-		snprintf(mp->clk_config[1].clk_name, 32, "%s", "alt_iface_clk");
-		mp->clk_config[1].type = DSS_CLK_AHB;
-		mp->clk_config[1].rate = 0;
 		break;
 
 	case HDMI_TX_DDC_PM:
diff --git a/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c b/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c
index e619e6b..6808313 100644
--- a/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c
+++ b/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c
@@ -405,14 +405,6 @@
 		return;
 	}
 
-	mutex_lock(&ctx->clk_mtx);
-	if (ctx->clk_enabled) {
-		mutex_unlock(&ctx->clk_mtx);
-		pr_warn("Cannot enter ulps mode if DSI clocks are on\n");
-		return;
-	}
-	mutex_unlock(&ctx->clk_mtx);
-
 	if (!ctx->panel_on) {
 		pr_err("Panel is off. skipping ULPS configuration\n");
 		return;
diff --git a/drivers/video/msm/mdss/mdss_mdp_overlay.c b/drivers/video/msm/mdss/mdss_mdp_overlay.c
index 837b276..41a7d9d 100644
--- a/drivers/video/msm/mdss/mdss_mdp_overlay.c
+++ b/drivers/video/msm/mdss/mdss_mdp_overlay.c
@@ -56,6 +56,7 @@
 static int mdss_mdp_overlay_fb_parse_dt(struct msm_fb_data_type *mfd);
 static int mdss_mdp_overlay_off(struct msm_fb_data_type *mfd);
 static void __overlay_kickoff_requeue(struct msm_fb_data_type *mfd);
+static void __vsync_retire_signal(struct msm_fb_data_type *mfd, int val);
 
 static int mdss_mdp_overlay_sd_ctrl(struct msm_fb_data_type *mfd,
 					unsigned int enable)
@@ -2790,6 +2791,20 @@
 		mdss_mdp_overlay_kickoff(mfd, NULL);
 	}
 
+	/*
+	 * If retire fences are still active wait for a vsync time
+	 * for retire fence to be updated.
+	 * As a last resort signal the timeline if vsync doesn't arrive.
+	 */
+	if (mdp5_data->retire_cnt) {
+		u32 fps = mdss_panel_get_framerate(mfd->panel_info);
+		u32 vsync_time = 1000 / (fps ? : DEFAULT_FRAME_RATE);
+
+		msleep(vsync_time);
+
+		__vsync_retire_signal(mfd, mdp5_data->retire_cnt);
+	}
+
 	rc = mdss_mdp_ctl_stop(mdp5_data->ctl);
 	if (rc == 0) {
 		__mdss_mdp_overlay_free_list_purge(mfd);
@@ -2968,7 +2983,6 @@
 {
 	struct mdss_overlay_private *mdp5_data =
 		container_of(work, typeof(*mdp5_data), retire_work);
-	struct msm_sync_pt_data *sync_pt_data;
 
 	if (!mdp5_data->ctl || !mdp5_data->ctl->mfd)
 		return;
@@ -2976,12 +2990,18 @@
 	if (!mdp5_data->ctl->remove_vsync_handler)
 		return;
 
-	sync_pt_data = &mdp5_data->ctl->mfd->mdp_sync_pt_data;
-	mutex_lock(&sync_pt_data->sync_mutex);
-	if (mdp5_data->retire_cnt > 0) {
-		sw_sync_timeline_inc(mdp5_data->vsync_timeline, 1);
+	__vsync_retire_signal(mdp5_data->ctl->mfd, 1);
+}
 
-		mdp5_data->retire_cnt--;
+static void __vsync_retire_signal(struct msm_fb_data_type *mfd, int val)
+{
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+
+	mutex_lock(&mfd->mdp_sync_pt_data.sync_mutex);
+	if (mdp5_data->retire_cnt > 0) {
+		sw_sync_timeline_inc(mdp5_data->vsync_timeline, val);
+
+		mdp5_data->retire_cnt -= min(val, mdp5_data->retire_cnt);
 		if (mdp5_data->retire_cnt == 0) {
 			mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
 			mdp5_data->ctl->remove_vsync_handler(mdp5_data->ctl,
@@ -2989,7 +3009,7 @@
 			mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
 		}
 	}
-	mutex_unlock(&sync_pt_data->sync_mutex);
+	mutex_unlock(&mfd->mdp_sync_pt_data.sync_mutex);
 }
 
 static struct sync_fence *
@@ -3015,7 +3035,7 @@
 		return ERR_PTR(-EPERM);
 	}
 
-	if (mdp5_data->retire_cnt == 0) {
+	if (!mdp5_data->vsync_retire_handler.enabled) {
 		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
 		rc = ctl->add_vsync_handler(ctl,
 				&mdp5_data->vsync_retire_handler);
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
old mode 100755
new mode 100644
index c5adf38..117c5de85
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -133,6 +133,7 @@
 header-y += fib_rules.h
 header-y += fiemap.h
 header-y += filter.h
+header-y += fips_status.h
 header-y += firewire-cdev.h
 header-y += firewire-constants.h
 header-y += flat.h
diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h
index 1e15415..c1c440f 100644
--- a/include/linux/diagchar.h
+++ b/include/linux/diagchar.h
@@ -124,11 +124,11 @@
 
 /* This needs to be modified manually now, when we add
  a new RANGE of SSIDs to the msg_mask_tbl */
-#define MSG_MASK_TBL_CNT		24
-#define EVENT_LAST_ID			0x09F6
+#define MSG_MASK_TBL_CNT		25
+#define EVENT_LAST_ID			0x0A22
 
 #define MSG_SSID_0			0
-#define MSG_SSID_0_LAST			101
+#define MSG_SSID_0_LAST			105
 #define MSG_SSID_1			500
 #define MSG_SSID_1_LAST			506
 #define MSG_SSID_2			1000
@@ -142,7 +142,7 @@
 #define MSG_SSID_6			4500
 #define MSG_SSID_6_LAST			4526
 #define MSG_SSID_7			4600
-#define MSG_SSID_7_LAST			4614
+#define MSG_SSID_7_LAST			4615
 #define MSG_SSID_8			5000
 #define MSG_SSID_8_LAST			5031
 #define MSG_SSID_9			5500
@@ -173,8 +173,10 @@
 #define MSG_SSID_21_LAST		10300
 #define MSG_SSID_22			10350
 #define MSG_SSID_22_LAST		10377
-#define MSG_SSID_23			0xC000
-#define MSG_SSID_23_LAST		0xC063
+#define MSG_SSID_23			10400
+#define MSG_SSID_23_LAST		10414
+#define MSG_SSID_24			0xC000
+#define MSG_SSID_24_LAST		0xC063
 
 struct diagpkt_delay_params {
 	void *rsp_ptr;
@@ -302,7 +304,11 @@
 	MSG_LVL_HIGH,
 	MSG_LVL_LOW,
 	MSG_LVL_HIGH,
-	MSG_LVL_HIGH
+	MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL,
+	MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR,
+	MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR,
+	MSG_LVL_MED|MSG_LVL_HIGH,
+	MSG_LVL_MED|MSG_LVL_HIGH
 };
 
 static const uint32_t msg_bld_masks_1[] = {
@@ -417,7 +423,8 @@
 	MSG_LVL_MED,
 	MSG_LVL_LOW,
 	MSG_LVL_LOW,
-	MSG_LVL_LOW
+	MSG_LVL_LOW,
+	MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL
 };
 
 static const uint32_t msg_bld_masks_8[] = {
@@ -742,10 +749,28 @@
 	MSG_LVL_LOW
 };
 
+static const uint32_t msg_bld_masks_23[] = {
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW
+};
+
 /* LOG CODES */
 static const uint32_t log_code_last_tbl[] = {
 	0x0,	/* EQUIP ID 0 */
-	0x184A,	/* EQUIP ID 1 */
+	0x1871,	/* EQUIP ID 1 */
 	0x0,	/* EQUIP ID 2 */
 	0x0,	/* EQUIP ID 3 */
 	0x4910,	/* EQUIP ID 4 */
diff --git a/include/linux/fips_status.h b/include/linux/fips_status.h
new file mode 100644
index 0000000..7daf27b
--- /dev/null
+++ b/include/linux/fips_status.h
@@ -0,0 +1,33 @@
+#ifndef _UAPI_FIPS_STATUS__H
+#define _UAPI_FIPS_STATUS__H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/**
+* fips_status: global FIPS140-2 status
+* @FIPS140_STATUS_NA:
+*					Not a FIPS140-2 compliant Build.
+*					The flag status won't
+*					change throughout
+*					the lifetime
+* @FIPS140_STATUS_PASS_CRYPTO:
+*					KAT self tests are passed.
+* @FIPS140_STATUS_QCRYPTO_ALLOWED:
+*					Integrity test is passed.
+* @FIPS140_STATUS_PASS:
+*					All tests are passed and build
+*					is in FIPS140-2 mode
+* @FIPS140_STATUS_FAIL:
+*					One of the test is failed.
+*					This will block all requests
+*					to crypto modules
+*/
+enum fips_status {
+		FIPS140_STATUS_NA				= 0,
+		FIPS140_STATUS_PASS_CRYPTO		= 1,
+		FIPS140_STATUS_QCRYPTO_ALLOWED	= 2,
+		FIPS140_STATUS_PASS				= 3,
+		FIPS140_STATUS_FAIL				= 0xFF
+};
+#endif /* _UAPI_FIPS_STATUS__H */
diff --git a/include/linux/qcedev.h b/include/linux/qcedev.h
index 87040df..655f2ce 100644
--- a/include/linux/qcedev.h
+++ b/include/linux/qcedev.h
@@ -3,6 +3,7 @@
 
 #include <linux/types.h>
 #include <linux/ioctl.h>
+#include "fips_status.h"
 
 #define QCEDEV_MAX_SHA_BLOCK_SIZE	64
 #define QCEDEV_MAX_BEARER	31
@@ -217,6 +218,16 @@
 	enum qcedev_sha_alg_enum	alg;
 };
 
+/**
+* struct qfips_verify_t - Holds data for FIPS Integrity test
+* @kernel_size  (IN):		Size of kernel Image
+* @kernel       (IN):		pointer to buffer containing the kernel Image
+*/
+struct qfips_verify_t {
+	unsigned kernel_size;
+	void *kernel;
+};
+
 
 #define QCEDEV_IOC_MAGIC	0x87
 
@@ -238,4 +249,8 @@
 	_IO(QCEDEV_IOC_MAGIC, 8)
 #define QCEDEV_IOCTL_GET_CMAC_REQ	\
 	_IOWR(QCEDEV_IOC_MAGIC, 9, struct qcedev_cipher_op_req)
+#define QCEDEV_IOCTL_UPDATE_FIPS_STATUS		\
+	_IOWR(QCEDEV_IOC_MAGIC, 10, enum fips_status)
+#define QCEDEV_IOCTL_QUERY_FIPS_STATUS	\
+	_IOR(QCEDEV_IOC_MAGIC, 11, enum fips_status)
 #endif /* _QCEDEV__H */
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 20fb776..3ba696b 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -190,6 +190,7 @@
 extern void rcu_idle_exit(void);
 extern void rcu_irq_enter(void);
 extern void rcu_irq_exit(void);
+extern void exit_rcu(void);
 
 /**
  * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index e93df77..4e56a9c 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -91,23 +91,20 @@
 {
 }
 
-static inline void exit_rcu(void)
+static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
 {
-}
-
-static inline int rcu_needs_cpu(int cpu)
-{
+	*delta_jiffies = ULONG_MAX;
 	return 0;
 }
 
 #else /* #ifdef CONFIG_TINY_RCU */
 
 void rcu_preempt_note_context_switch(void);
-extern void exit_rcu(void);
 int rcu_preempt_needs_cpu(void);
 
-static inline int rcu_needs_cpu(int cpu)
+static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
 {
+	*delta_jiffies = ULONG_MAX;
 	return rcu_preempt_needs_cpu();
 }
 
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index e8ee5dd..624e6e9 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -32,7 +32,7 @@
 
 extern void rcu_init(void);
 extern void rcu_note_context_switch(int cpu);
-extern int rcu_needs_cpu(int cpu);
+extern int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies);
 extern void rcu_cpu_stall_reset(void);
 
 /*
@@ -45,18 +45,6 @@
 	rcu_note_context_switch(cpu);
 }
 
-#ifdef CONFIG_TREE_PREEMPT_RCU
-
-extern void exit_rcu(void);
-
-#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
-
-static inline void exit_rcu(void)
-{
-}
-
-#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
-
 extern void synchronize_rcu_bh(void);
 extern void synchronize_sched_expedited(void);
 extern void synchronize_rcu_expedited(void);
diff --git a/include/media/msm_cam_sensor.h b/include/media/msm_cam_sensor.h
index d583601..15edaf9 100644
--- a/include/media/msm_cam_sensor.h
+++ b/include/media/msm_cam_sensor.h
@@ -53,6 +53,11 @@
 #define MAX_AF_ITERATIONS 3
 #define MAX_NUMBER_OF_STEPS 47
 
+typedef enum sensor_stats_type {
+	YRGB,
+	YYYY,
+} sensor_stats_type_t;
+
 enum flash_type {
 	LED_FLASH = 1,
 	STROBE_FLASH,
diff --git a/include/media/msmb_pproc.h b/include/media/msmb_pproc.h
index 59dcca9..f5a53a8 100644
--- a/include/media/msmb_pproc.h
+++ b/include/media/msmb_pproc.h
@@ -13,7 +13,8 @@
 
 #define MAX_NUM_CPP_STRIPS 8
 #define MSM_CPP_MAX_NUM_PLANES 3
-#define MSM_CPP_MAX_FRAME_LENGTH 1024
+#define MSM_CPP_MIN_FRAME_LENGTH 13
+#define MSM_CPP_MAX_FRAME_LENGTH 2048
 #define MSM_CPP_MAX_FW_NAME_LEN 32
 #define MAX_FREQ_TBL 10
 
diff --git a/include/sound/q6asm-v2.h b/include/sound/q6asm-v2.h
index 07199e0..ec3038b 100644
--- a/include/sound/q6asm-v2.h
+++ b/include/sound/q6asm-v2.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -165,6 +165,7 @@
 	/* Relative or absolute TS */
 	atomic_t	       time_flag;
 	atomic_t	       nowait_cmd_cnt;
+	atomic_t               mem_state;
 	void		       *priv;
 	uint32_t               io_mode;
 	uint64_t	       time_stamp;
@@ -176,6 +177,7 @@
 	struct audio_port_data port[2];
 	wait_queue_head_t      cmd_wait;
 	wait_queue_head_t      time_wait;
+	wait_queue_head_t      mem_wait;
 	int                    perf_mode;
 	int					   stream_id;
 	/* audio cache operations fptr*/
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index 3370997..d274734 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -289,9 +289,12 @@
  *	"In holdoff": Nothing to do, holding off after unsuccessful attempt.
  *	"Begin holdoff": Attempt failed, don't retry until next jiffy.
  *	"Dyntick with callbacks": Entering dyntick-idle despite callbacks.
+ *	"Dyntick with lazy callbacks": Entering dyntick-idle w/lazy callbacks.
  *	"More callbacks": Still more callbacks, try again to clear them out.
  *	"Callbacks drained": All callbacks processed, off to dyntick idle!
  *	"Timer": Timer fired to cause CPU to continue processing callbacks.
+ *	"Demigrate": Timer fired on wrong CPU, woke up correct CPU.
+ *	"Cleanup after idle": Idle exited, timer canceled.
  */
 TRACE_EVENT(rcu_prep_idle,
 
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 5746f18..c1a835c 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -53,6 +53,33 @@
 #include "rcu.h"
 
 module_param(rcu_expedited, int, 0);
+#ifdef CONFIG_PREEMPT_RCU
+
+/*
+ * Check for a task exiting while in a preemptible-RCU read-side
+ * critical section, clean up if so.  No need to issue warnings,
+ * as debug_check_no_locks_held() already does this if lockdep
+ * is enabled.
+ */
+void exit_rcu(void)
+{
+	struct task_struct *t = current;
+
+	if (likely(list_empty(&current->rcu_node_entry)))
+		return;
+	t->rcu_read_lock_nesting = 1;
+	barrier();
+	t->rcu_read_unlock_special = RCU_READ_UNLOCK_BLOCKED;
+	__rcu_read_unlock();
+}
+
+#else /* #ifdef CONFIG_PREEMPT_RCU */
+
+void exit_rcu(void)
+{
+}
+
+#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 static struct lock_class_key rcu_lock_key;
diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
index 5a0f324..977250f 100644
--- a/kernel/rcutiny_plugin.h
+++ b/kernel/rcutiny_plugin.h
@@ -854,22 +854,6 @@
 	return rcu_preempt_ctrlblk.rcb.rcucblist != NULL;
 }
 
-/*
- * Check for a task exiting while in a preemptible -RCU read-side
- * critical section, clean up if so.  No need to issue warnings,
- * as debug_check_no_locks_held() already does this if lockdep
- * is enabled.
- */
-void exit_rcu(void)
-{
-	struct task_struct *t = current;
-
-	if (t->rcu_read_lock_nesting == 0)
-		return;
-	t->rcu_read_lock_nesting = 1;
-	__rcu_read_unlock();
-}
-
 #else /* #ifdef CONFIG_TINY_PREEMPT_RCU */
 
 #ifdef CONFIG_RCU_TRACE
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index e269782..aaf798b 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -75,6 +75,8 @@
 	.gpnum = -300, \
 	.completed = -300, \
 	.onofflock = __RAW_SPIN_LOCK_UNLOCKED(&structname##_state.onofflock), \
+	.orphan_nxttail = &structname##_state.orphan_nxtlist, \
+	.orphan_donetail = &structname##_state.orphan_donelist, \
 	.fqslock = __RAW_SPIN_LOCK_UNLOCKED(&structname##_state.fqslock), \
 	.n_force_qs = 0, \
 	.n_force_qs_ngp = 0, \
@@ -145,6 +147,13 @@
 unsigned long rcutorture_testseq;
 unsigned long rcutorture_vernum;
 
+/* State information for rcu_barrier() and friends. */
+
+static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
+static atomic_t rcu_barrier_cpu_count;
+static DEFINE_MUTEX(rcu_barrier_mutex);
+static struct completion rcu_barrier_completion;
+
 /*
  * Return true if an RCU grace period is in progress.  The ACCESS_ONCE()s
  * permit this function to be invoked without holding the root rcu_node
@@ -1313,95 +1322,133 @@
 #ifdef CONFIG_HOTPLUG_CPU
 
 /*
- * Move a dying CPU's RCU callbacks to online CPU's callback list.
- * Also record a quiescent state for this CPU for the current grace period.
- * Synchronization and interrupt disabling are not required because
- * this function executes in stop_machine() context.  Therefore, cleanup
- * operations that might block must be done later from the CPU_DEAD
- * notifier.
- *
- * Note that the outgoing CPU's bit has already been cleared in the
- * cpu_online_mask.  This allows us to randomly pick a callback
- * destination from the bits set in that mask.
+ * Send the specified CPU's RCU callbacks to the orphanage.  The
+ * specified CPU must be offline, and the caller must hold the
+ * ->onofflock.
  */
-static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
+static void
+rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
+			  struct rcu_node *rnp, struct rcu_data *rdp)
 {
 	int i;
-	unsigned long mask;
-	int receive_cpu = cpumask_any(cpu_online_mask);
-	struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
-	struct rcu_data *receive_rdp = per_cpu_ptr(rsp->rda, receive_cpu);
-	RCU_TRACE(struct rcu_node *rnp = rdp->mynode); /* For dying CPU. */
 
-	/* First, adjust the counts. */
+	/*
+	 * Orphan the callbacks.  First adjust the counts.  This is safe
+	 * because ->onofflock excludes _rcu_barrier()'s adoption of
+	 * the callbacks, thus no memory barrier is required.
+	 */
 	if (rdp->nxtlist != NULL) {
-		receive_rdp->qlen_lazy += rdp->qlen_lazy;
-		receive_rdp->qlen += rdp->qlen;
+		rsp->qlen_lazy += rdp->qlen_lazy;
+		rsp->qlen += rdp->qlen;
+		rdp->n_cbs_orphaned += rdp->qlen;
 		rdp->qlen_lazy = 0;
 		rdp->qlen = 0;
 	}
 
 	/*
-	 * Next, move ready-to-invoke callbacks to be invoked on some
-	 * other CPU.  These will not be required to pass through another
-	 * grace period:  They are done, regardless of CPU.
+	 * Next, move those callbacks still needing a grace period to
+	 * the orphanage, where some other CPU will pick them up.
+	 * Some of the callbacks might have gone partway through a grace
+	 * period, but that is too bad.  They get to start over because we
+	 * cannot assume that grace periods are synchronized across CPUs.
+	 * We don't bother updating the ->nxttail[] array yet, instead
+	 * we just reset the whole thing later on.
 	 */
-	if (rdp->nxtlist != NULL &&
-	    rdp->nxttail[RCU_DONE_TAIL] != &rdp->nxtlist) {
-		struct rcu_head *oldhead;
-		struct rcu_head **oldtail;
-		struct rcu_head **newtail;
-
-		oldhead = rdp->nxtlist;
-		oldtail = receive_rdp->nxttail[RCU_DONE_TAIL];
-		rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL];
-		*rdp->nxttail[RCU_DONE_TAIL] = *oldtail;
-		*receive_rdp->nxttail[RCU_DONE_TAIL] = oldhead;
-		newtail = rdp->nxttail[RCU_DONE_TAIL];
-		for (i = RCU_DONE_TAIL; i < RCU_NEXT_SIZE; i++) {
-			if (receive_rdp->nxttail[i] == oldtail)
-				receive_rdp->nxttail[i] = newtail;
-			if (rdp->nxttail[i] == newtail)
-				rdp->nxttail[i] = &rdp->nxtlist;
-		}
+	if (*rdp->nxttail[RCU_DONE_TAIL] != NULL) {
+		*rsp->orphan_nxttail = *rdp->nxttail[RCU_DONE_TAIL];
+		rsp->orphan_nxttail = rdp->nxttail[RCU_NEXT_TAIL];
+		*rdp->nxttail[RCU_DONE_TAIL] = NULL;
 	}
 
 	/*
-	 * Finally, put the rest of the callbacks at the end of the list.
-	 * The ones that made it partway through get to start over:  We
-	 * cannot assume that grace periods are synchronized across CPUs.
-	 * (We could splice RCU_WAIT_TAIL into RCU_NEXT_READY_TAIL, but
-	 * this does not seem compelling.  Not yet, anyway.)
+	 * Then move the ready-to-invoke callbacks to the orphanage,
+	 * where some other CPU will pick them up.  These will not be
+	 * required to pass though another grace period: They are done.
 	 */
 	if (rdp->nxtlist != NULL) {
-		*receive_rdp->nxttail[RCU_NEXT_TAIL] = rdp->nxtlist;
-		receive_rdp->nxttail[RCU_NEXT_TAIL] =
-				rdp->nxttail[RCU_NEXT_TAIL];
-		receive_rdp->n_cbs_adopted += rdp->qlen;
-		rdp->n_cbs_orphaned += rdp->qlen;
-
-		rdp->nxtlist = NULL;
-		for (i = 0; i < RCU_NEXT_SIZE; i++)
-			rdp->nxttail[i] = &rdp->nxtlist;
+		*rsp->orphan_donetail = rdp->nxtlist;
+		rsp->orphan_donetail = rdp->nxttail[RCU_DONE_TAIL];
 	}
 
+	/* Finally, initialize the rcu_data structure's list to empty.  */
+	rdp->nxtlist = NULL;
+	for (i = 0; i < RCU_NEXT_SIZE; i++)
+		rdp->nxttail[i] = &rdp->nxtlist;
+}
+
+/*
+ * Adopt the RCU callbacks from the specified rcu_state structure's
+ * orphanage.  The caller must hold the ->onofflock.
+ */
+static void rcu_adopt_orphan_cbs(struct rcu_state *rsp)
+{
+	int i;
+	struct rcu_data *rdp = __this_cpu_ptr(rsp->rda);
+
 	/*
-	 * Record a quiescent state for the dying CPU.  This is safe
-	 * only because we have already cleared out the callbacks.
-	 * (Otherwise, the RCU core might try to schedule the invocation
-	 * of callbacks on this now-offline CPU, which would be bad.)
+	 * If there is an rcu_barrier() operation in progress, then
+	 * only the task doing that operation is permitted to adopt
+	 * callbacks.  To do otherwise breaks rcu_barrier() and friends
+	 * by causing them to fail to wait for the callbacks in the
+	 * orphanage.
 	 */
-	mask = rdp->grpmask;	/* rnp->grplo is constant. */
+	if (rsp->rcu_barrier_in_progress &&
+	    rsp->rcu_barrier_in_progress != current)
+		return;
+
+	/* Do the accounting first. */
+	rdp->qlen_lazy += rsp->qlen_lazy;
+	rdp->qlen += rsp->qlen;
+	rdp->n_cbs_adopted += rsp->qlen;
+	rsp->qlen_lazy = 0;
+	rsp->qlen = 0;
+
+	/*
+	 * We do not need a memory barrier here because the only way we
+	 * can get here if there is an rcu_barrier() in flight is if
+	 * we are the task doing the rcu_barrier().
+	 */
+
+	/* First adopt the ready-to-invoke callbacks. */
+	if (rsp->orphan_donelist != NULL) {
+		*rsp->orphan_donetail = *rdp->nxttail[RCU_DONE_TAIL];
+		*rdp->nxttail[RCU_DONE_TAIL] = rsp->orphan_donelist;
+		for (i = RCU_NEXT_SIZE - 1; i >= RCU_DONE_TAIL; i--)
+			if (rdp->nxttail[i] == rdp->nxttail[RCU_DONE_TAIL])
+				rdp->nxttail[i] = rsp->orphan_donetail;
+		rsp->orphan_donelist = NULL;
+		rsp->orphan_donetail = &rsp->orphan_donelist;
+	}
+
+	/* And then adopt the callbacks that still need a grace period. */
+	if (rsp->orphan_nxtlist != NULL) {
+		*rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_nxtlist;
+		rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_nxttail;
+		rsp->orphan_nxtlist = NULL;
+		rsp->orphan_nxttail = &rsp->orphan_nxtlist;
+	}
+}
+
+/*
+ * Trace the fact that this CPU is going offline.
+ */
+static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
+{
+	RCU_TRACE(unsigned long mask);
+	RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(rsp->rda));
+	RCU_TRACE(struct rcu_node *rnp = rdp->mynode);
+
+	RCU_TRACE(mask = rdp->grpmask);
 	trace_rcu_grace_period(rsp->name,
 			       rnp->gpnum + 1 - !!(rnp->qsmask & mask),
 			       "cpuofl");
-	rcu_report_qs_rdp(smp_processor_id(), rsp, rdp, rsp->gpnum);
-	/* Note that rcu_report_qs_rdp() might call trace_rcu_grace_period(). */
 }
 
 /*
  * The CPU has been completely removed, and some other CPU is reporting
- * this fact from process context.  Do the remainder of the cleanup.
+ * this fact from process context.  Do the remainder of the cleanup,
+ * including orphaning the outgoing CPU's RCU callbacks, and also
+ * adopting them, if there is no _rcu_barrier() instance running.
  * There can only be one CPU hotplug operation at a time, so no other
  * CPU can be attempting to update rcu_cpu_kthread_task.
  */
@@ -1411,17 +1458,21 @@
 	unsigned long mask;
 	int need_report = 0;
 	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
-	struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rnp. */
+	struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
 
 	/* Adjust any no-longer-needed kthreads. */
 	rcu_stop_cpu_kthread(cpu);
 	rcu_node_kthread_setaffinity(rnp, -1);
 
-	/* Remove the dying CPU from the bitmasks in the rcu_node hierarchy. */
+	/* Remove the dead CPU from the bitmasks in the rcu_node hierarchy. */
 
 	/* Exclude any attempts to start a new grace period. */
 	raw_spin_lock_irqsave(&rsp->onofflock, flags);
 
+	/* Orphan the dead CPU's callbacks, and adopt them if appropriate. */
+	rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp);
+	rcu_adopt_orphan_cbs(rsp);
+
 	/* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */
 	mask = rdp->grpmask;	/* rnp->grplo is constant. */
 	do {
@@ -1458,6 +1509,10 @@
 
 #else /* #ifdef CONFIG_HOTPLUG_CPU */
 
+static void rcu_adopt_orphan_cbs(struct rcu_state *rsp)
+{
+}
+
 static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
 {
 }
@@ -1476,7 +1531,7 @@
 {
 	unsigned long flags;
 	struct rcu_head *next, *list, **tail;
-	int bl, count, count_lazy;
+	int bl, count, count_lazy, i;
 
 	/* If no callbacks are ready, just return.*/
 	if (!cpu_has_callbacks_ready_to_invoke(rdp)) {
@@ -1499,9 +1554,9 @@
 	rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL];
 	*rdp->nxttail[RCU_DONE_TAIL] = NULL;
 	tail = rdp->nxttail[RCU_DONE_TAIL];
-	for (count = RCU_NEXT_SIZE - 1; count >= 0; count--)
-		if (rdp->nxttail[count] == rdp->nxttail[RCU_DONE_TAIL])
-			rdp->nxttail[count] = &rdp->nxtlist;
+	for (i = RCU_NEXT_SIZE - 1; i >= 0; i--)
+		if (rdp->nxttail[i] == rdp->nxttail[RCU_DONE_TAIL])
+			rdp->nxttail[i] = &rdp->nxtlist;
 	local_irq_restore(flags);
 
 	/* Invoke callbacks. */
@@ -1526,18 +1581,19 @@
 			    rcu_is_callbacks_kthread());
 
 	/* Update count, and requeue any remaining callbacks. */
-	rdp->qlen_lazy -= count_lazy;
-	rdp->qlen -= count;
-	rdp->n_cbs_invoked += count;
 	if (list != NULL) {
 		*tail = rdp->nxtlist;
 		rdp->nxtlist = list;
-		for (count = 0; count < RCU_NEXT_SIZE; count++)
-			if (&rdp->nxtlist == rdp->nxttail[count])
-				rdp->nxttail[count] = tail;
+		for (i = 0; i < RCU_NEXT_SIZE; i++)
+			if (&rdp->nxtlist == rdp->nxttail[i])
+				rdp->nxttail[i] = tail;
 			else
 				break;
 	}
+	smp_mb(); /* List handling before counting for rcu_barrier(). */
+	rdp->qlen_lazy -= count_lazy;
+	rdp->qlen -= count;
+	rdp->n_cbs_invoked += count;
 
 	/* Reinstate batch limit if we have worked down the excess. */
 	if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark)
@@ -1825,11 +1881,14 @@
 	rdp = this_cpu_ptr(rsp->rda);
 
 	/* Add the callback to our list. */
-	*rdp->nxttail[RCU_NEXT_TAIL] = head;
-	rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
 	rdp->qlen++;
 	if (lazy)
 		rdp->qlen_lazy++;
+	else
+		rcu_idle_count_callbacks_posted();
+	smp_mb();  /* Count before adding callback for rcu_barrier(). */
+	*rdp->nxttail[RCU_NEXT_TAIL] = head;
+	rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
 
 	if (__is_kfree_rcu_offset((unsigned long)func))
 		trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func,
@@ -2174,11 +2233,10 @@
 	       rcu_preempt_cpu_has_callbacks(cpu);
 }
 
-static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
-static atomic_t rcu_barrier_cpu_count;
-static DEFINE_MUTEX(rcu_barrier_mutex);
-static struct completion rcu_barrier_completion;
-
+/*
+ * RCU callback function for _rcu_barrier().  If we are last, wake
+ * up the task executing _rcu_barrier().
+ */
 static void rcu_barrier_callback(struct rcu_head *notused)
 {
 	if (atomic_dec_and_test(&rcu_barrier_cpu_count))
@@ -2208,27 +2266,94 @@
 			 void (*call_rcu_func)(struct rcu_head *head,
 					       void (*func)(struct rcu_head *head)))
 {
-	BUG_ON(in_interrupt());
+	int cpu;
+	unsigned long flags;
+	struct rcu_data *rdp;
+	struct rcu_head rh;
+
+	init_rcu_head_on_stack(&rh);
+
 	/* Take mutex to serialize concurrent rcu_barrier() requests. */
 	mutex_lock(&rcu_barrier_mutex);
-	init_completion(&rcu_barrier_completion);
+
+	smp_mb();  /* Prevent any prior operations from leaking in. */
+
 	/*
-	 * Initialize rcu_barrier_cpu_count to 1, then invoke
-	 * rcu_barrier_func() on each CPU, so that each CPU also has
-	 * incremented rcu_barrier_cpu_count.  Only then is it safe to
-	 * decrement rcu_barrier_cpu_count -- otherwise the first CPU
-	 * might complete its grace period before all of the other CPUs
-	 * did their increment, causing this function to return too
-	 * early.  Note that on_each_cpu() disables irqs, which prevents
-	 * any CPUs from coming online or going offline until each online
-	 * CPU has queued its RCU-barrier callback.
+	 * Initialize the count to one rather than to zero in order to
+	 * avoid a too-soon return to zero in case of a short grace period
+	 * (or preemption of this task).  Also flag this task as doing
+	 * an rcu_barrier().  This will prevent anyone else from adopting
+	 * orphaned callbacks, which could cause otherwise failure if a
+	 * CPU went offline and quickly came back online.  To see this,
+	 * consider the following sequence of events:
+	 *
+	 * 1.	We cause CPU 0 to post an rcu_barrier_callback() callback.
+	 * 2.	CPU 1 goes offline, orphaning its callbacks.
+	 * 3.	CPU 0 adopts CPU 1's orphaned callbacks.
+	 * 4.	CPU 1 comes back online.
+	 * 5.	We cause CPU 1 to post an rcu_barrier_callback() callback.
+	 * 6.	Both rcu_barrier_callback() callbacks are invoked, awakening
+	 *	us -- but before CPU 1's orphaned callbacks are invoked!!!
 	 */
+	init_completion(&rcu_barrier_completion);
 	atomic_set(&rcu_barrier_cpu_count, 1);
-	on_each_cpu(rcu_barrier_func, (void *)call_rcu_func, 1);
+	raw_spin_lock_irqsave(&rsp->onofflock, flags);
+	rsp->rcu_barrier_in_progress = current;
+	raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
+
+	/*
+	 * Force every CPU with callbacks to register a new callback
+	 * that will tell us when all the preceding callbacks have
+	 * been invoked.  If an offline CPU has callbacks, wait for
+	 * it to either come back online or to finish orphaning those
+	 * callbacks.
+	 */
+	for_each_possible_cpu(cpu) {
+		preempt_disable();
+		rdp = per_cpu_ptr(rsp->rda, cpu);
+		if (cpu_is_offline(cpu)) {
+			preempt_enable();
+			while (cpu_is_offline(cpu) && ACCESS_ONCE(rdp->qlen))
+				schedule_timeout_interruptible(1);
+		} else if (ACCESS_ONCE(rdp->qlen)) {
+			smp_call_function_single(cpu, rcu_barrier_func,
+						 (void *)call_rcu_func, 1);
+			preempt_enable();
+		} else {
+			preempt_enable();
+		}
+	}
+
+	/*
+	 * Now that all online CPUs have rcu_barrier_callback() callbacks
+	 * posted, we can adopt all of the orphaned callbacks and place
+	 * an rcu_barrier_callback() callback after them.  When that is done,
+	 * we are guaranteed to have an rcu_barrier_callback() callback
+	 * following every callback that could possibly have been
+	 * registered before _rcu_barrier() was called.
+	 */
+	raw_spin_lock_irqsave(&rsp->onofflock, flags);
+	rcu_adopt_orphan_cbs(rsp);
+	rsp->rcu_barrier_in_progress = NULL;
+	raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
+	atomic_inc(&rcu_barrier_cpu_count);
+	smp_mb__after_atomic_inc(); /* Ensure atomic_inc() before callback. */
+	call_rcu_func(&rh, rcu_barrier_callback);
+
+	/*
+	 * Now that we have an rcu_barrier_callback() callback on each
+	 * CPU, and thus each counted, remove the initial count.
+	 */
 	if (atomic_dec_and_test(&rcu_barrier_cpu_count))
 		complete(&rcu_barrier_completion);
+
+	/* Wait for all rcu_barrier_callback() callbacks to be invoked. */
 	wait_for_completion(&rcu_barrier_completion);
+
+	/* Other rcu_barrier() invocations can now safely proceed. */
 	mutex_unlock(&rcu_barrier_mutex);
+
+	destroy_rcu_head_on_stack(&rh);
 }
 
 /**
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index cdd1be0..aa5676b 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -88,6 +88,20 @@
 				    /* Process level is worth LLONG_MAX/2. */
 	int dynticks_nmi_nesting;   /* Track NMI nesting level. */
 	atomic_t dynticks;	    /* Even value for idle, else odd. */
+#ifdef CONFIG_RCU_FAST_NO_HZ
+	int dyntick_drain;	    /* Prepare-for-idle state variable. */
+	unsigned long dyntick_holdoff;
+				    /* No retries for the jiffy of failure. */
+	struct timer_list idle_gp_timer;
+				    /* Wake up CPU sleeping with callbacks. */
+	unsigned long idle_gp_timer_expires;
+				    /* When to wake up CPU (for repost). */
+	bool idle_first_pass;	    /* First pass of attempt to go idle? */
+	unsigned long nonlazy_posted;
+				    /* # times non-lazy CBs posted to CPU. */
+	unsigned long nonlazy_posted_snap;
+				    /* idle-period nonlazy_posted snapshot. */
+#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
 };
 
 /* RCU's kthread states for tracing. */
@@ -371,6 +385,17 @@
 
 	raw_spinlock_t onofflock;		/* exclude on/offline and */
 						/*  starting new GP. */
+	struct rcu_head *orphan_nxtlist;	/* Orphaned callbacks that */
+						/*  need a grace period. */
+	struct rcu_head **orphan_nxttail;	/* Tail of above. */
+	struct rcu_head *orphan_donelist;	/* Orphaned callbacks that */
+						/*  are ready to invoke. */
+	struct rcu_head **orphan_donetail;	/* Tail of above. */
+	long qlen_lazy;				/* Number of lazy callbacks. */
+	long qlen;				/* Total number of callbacks. */
+	struct task_struct *rcu_barrier_in_progress;
+						/* Task doing rcu_barrier(), */
+						/*  or NULL if no barrier. */
 	raw_spinlock_t fqslock;			/* Only one task forcing */
 						/*  quiescent states. */
 	unsigned long jiffies_force_qs;		/* Time at which to invoke */
@@ -471,6 +496,7 @@
 static void rcu_prepare_for_idle_init(int cpu);
 static void rcu_cleanup_after_idle(int cpu);
 static void rcu_prepare_for_idle(int cpu);
+static void rcu_idle_count_callbacks_posted(void);
 static void print_cpu_stall_info_begin(void);
 static void print_cpu_stall_info(struct rcu_state *rsp, int cpu);
 static void print_cpu_stall_info_end(void);
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index beafb9c..9e501bd 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -972,22 +972,6 @@
 	rcu_init_one(&rcu_preempt_state, &rcu_preempt_data);
 }
 
-/*
- * Check for a task exiting while in a preemptible-RCU read-side
- * critical section, clean up if so.  No need to issue warnings,
- * as debug_check_no_locks_held() already does this if lockdep
- * is enabled.
- */
-void exit_rcu(void)
-{
-	struct task_struct *t = current;
-
-	if (t->rcu_read_lock_nesting == 0)
-		return;
-	t->rcu_read_lock_nesting = 1;
-	__rcu_read_unlock();
-}
-
 #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
 
 static struct rcu_state *rcu_state = &rcu_sched_state;
@@ -1913,8 +1897,9 @@
  * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs
  * any flavor of RCU.
  */
-int rcu_needs_cpu(int cpu)
+int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
 {
+	*delta_jiffies = ULONG_MAX;
 	return rcu_cpu_has_callbacks(cpu);
 }
 
@@ -1941,6 +1926,14 @@
 {
 }
 
+/*
+ * Don't bother keeping a running count of the number of RCU callbacks
+ * posted because CONFIG_RCU_FAST_NO_HZ=n.
+ */
+static void rcu_idle_count_callbacks_posted(void)
+{
+}
+
 #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
 
 /*
@@ -1981,30 +1974,6 @@
 #define RCU_IDLE_GP_DELAY 6		/* Roughly one grace period. */
 #define RCU_IDLE_LAZY_GP_DELAY (6 * HZ)	/* Roughly six seconds. */
 
-static DEFINE_PER_CPU(int, rcu_dyntick_drain);
-static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff);
-static DEFINE_PER_CPU(struct hrtimer, rcu_idle_gp_timer);
-static ktime_t rcu_idle_gp_wait;	/* If some non-lazy callbacks. */
-static ktime_t rcu_idle_lazy_gp_wait;	/* If only lazy callbacks. */
-
-/*
- * Allow the CPU to enter dyntick-idle mode if either: (1) There are no
- * callbacks on this CPU, (2) this CPU has not yet attempted to enter
- * dyntick-idle mode, or (3) this CPU is in the process of attempting to
- * enter dyntick-idle mode.  Otherwise, if we have recently tried and failed
- * to enter dyntick-idle mode, we refuse to try to enter it.  After all,
- * it is better to incur scheduling-clock interrupts than to spin
- * continuously for the same time duration!
- */
-int rcu_needs_cpu(int cpu)
-{
-	/* If no callbacks, RCU doesn't need the CPU. */
-	if (!rcu_cpu_has_callbacks(cpu))
-		return 0;
-	/* Otherwise, RCU needs the CPU only if it recently tried and failed. */
-	return per_cpu(rcu_dyntick_holdoff, cpu) == jiffies;
-}
-
 /*
  * Does the specified flavor of RCU have non-lazy callbacks pending on
  * the specified CPU?  Both RCU flavor and CPU are specified by the
@@ -2048,16 +2017,75 @@
 }
 
 /*
+ * Allow the CPU to enter dyntick-idle mode if either: (1) There are no
+ * callbacks on this CPU, (2) this CPU has not yet attempted to enter
+ * dyntick-idle mode, or (3) this CPU is in the process of attempting to
+ * enter dyntick-idle mode.  Otherwise, if we have recently tried and failed
+ * to enter dyntick-idle mode, we refuse to try to enter it.  After all,
+ * it is better to incur scheduling-clock interrupts than to spin
+ * continuously for the same time duration!
+ *
+ * The delta_jiffies argument is used to store the time when RCU is
+ * going to need the CPU again if it still has callbacks.  The reason
+ * for this is that rcu_prepare_for_idle() might need to post a timer,
+ * but if so, it will do so after tick_nohz_stop_sched_tick() has set
+ * the wakeup time for this CPU.  This means that RCU's timer can be
+ * delayed until the wakeup time, which defeats the purpose of posting
+ * a timer.
+ */
+int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
+{
+	struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
+
+	/* Flag a new idle sojourn to the idle-entry state machine. */
+	rdtp->idle_first_pass = 1;
+	/* If no callbacks, RCU doesn't need the CPU. */
+	if (!rcu_cpu_has_callbacks(cpu)) {
+		*delta_jiffies = ULONG_MAX;
+		return 0;
+	}
+	if (rdtp->dyntick_holdoff == jiffies) {
+		/* RCU recently tried and failed, so don't try again. */
+		*delta_jiffies = 1;
+		return 1;
+	}
+	/* Set up for the possibility that RCU will post a timer. */
+	if (rcu_cpu_has_nonlazy_callbacks(cpu))
+		*delta_jiffies = RCU_IDLE_GP_DELAY;
+	else
+		*delta_jiffies = RCU_IDLE_LAZY_GP_DELAY;
+	return 0;
+}
+
+/*
+ * Handler for smp_call_function_single().  The only point of this
+ * handler is to wake the CPU up, so the handler does only tracing.
+ */
+void rcu_idle_demigrate(void *unused)
+{
+	trace_rcu_prep_idle("Demigrate");
+}
+
+/*
  * Timer handler used to force CPU to start pushing its remaining RCU
  * callbacks in the case where it entered dyntick-idle mode with callbacks
  * pending.  The hander doesn't really need to do anything because the
  * real work is done upon re-entry to idle, or by the next scheduling-clock
  * interrupt should idle not be re-entered.
+ *
+ * One special case: the timer gets migrated without awakening the CPU
+ * on which the timer was scheduled on.  In this case, we must wake up
+ * that CPU.  We do so with smp_call_function_single().
  */
-static enum hrtimer_restart rcu_idle_gp_timer_func(struct hrtimer *hrtp)
+static void rcu_idle_gp_timer_func(unsigned long cpu_in)
 {
+	int cpu = (int)cpu_in;
+
 	trace_rcu_prep_idle("Timer");
-	return HRTIMER_NORESTART;
+	if (cpu != smp_processor_id())
+		smp_call_function_single(cpu, rcu_idle_demigrate, NULL, 0);
+	else
+		WARN_ON_ONCE(1); /* Getting here can hang the system... */
 }
 
 /*
@@ -2065,29 +2093,25 @@
  */
 static void rcu_prepare_for_idle_init(int cpu)
 {
-	static int firsttime = 1;
-	struct hrtimer *hrtp = &per_cpu(rcu_idle_gp_timer, cpu);
+	struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
 
-	hrtimer_init(hrtp, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-	hrtp->function = rcu_idle_gp_timer_func;
-	if (firsttime) {
-		unsigned int upj = jiffies_to_usecs(RCU_IDLE_GP_DELAY);
-
-		rcu_idle_gp_wait = ns_to_ktime(upj * (u64)1000);
-		upj = jiffies_to_usecs(RCU_IDLE_LAZY_GP_DELAY);
-		rcu_idle_lazy_gp_wait = ns_to_ktime(upj * (u64)1000);
-		firsttime = 0;
-	}
+	rdtp->dyntick_holdoff = jiffies - 1;
+	setup_timer(&rdtp->idle_gp_timer, rcu_idle_gp_timer_func, cpu);
+	rdtp->idle_gp_timer_expires = jiffies - 1;
+	rdtp->idle_first_pass = 1;
 }
 
 /*
  * Clean up for exit from idle.  Because we are exiting from idle, there
- * is no longer any point to rcu_idle_gp_timer, so cancel it.  This will
+ * is no longer any point to ->idle_gp_timer, so cancel it.  This will
  * do nothing if this timer is not active, so just cancel it unconditionally.
  */
 static void rcu_cleanup_after_idle(int cpu)
 {
-	hrtimer_cancel(&per_cpu(rcu_idle_gp_timer, cpu));
+	struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
+
+	del_timer(&rdtp->idle_gp_timer);
+	trace_rcu_prep_idle("Cleanup after idle");
 }
 
 /*
@@ -2105,19 +2129,41 @@
  * Because it is not legal to invoke rcu_process_callbacks() with irqs
  * disabled, we do one pass of force_quiescent_state(), then do a
  * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked
- * later.  The per-cpu rcu_dyntick_drain variable controls the sequencing.
+ * later.  The ->dyntick_drain field controls the sequencing.
  *
  * The caller must have disabled interrupts.
  */
 static void rcu_prepare_for_idle(int cpu)
 {
+	struct timer_list *tp;
+	struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
+
+	/*
+	 * If this is an idle re-entry, for example, due to use of
+	 * RCU_NONIDLE() or the new idle-loop tracing API within the idle
+	 * loop, then don't take any state-machine actions, unless the
+	 * momentary exit from idle queued additional non-lazy callbacks.
+	 * Instead, repost the ->idle_gp_timer if this CPU has callbacks
+	 * pending.
+	 */
+	if (!rdtp->idle_first_pass &&
+	    (rdtp->nonlazy_posted == rdtp->nonlazy_posted_snap)) {
+		if (rcu_cpu_has_callbacks(cpu)) {
+			tp = &rdtp->idle_gp_timer;
+			mod_timer_pinned(tp, rdtp->idle_gp_timer_expires);
+		}
+		return;
+	}
+	rdtp->idle_first_pass = 0;
+	rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted - 1;
+
 	/*
 	 * If there are no callbacks on this CPU, enter dyntick-idle mode.
 	 * Also reset state to avoid prejudicing later attempts.
 	 */
 	if (!rcu_cpu_has_callbacks(cpu)) {
-		per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
-		per_cpu(rcu_dyntick_drain, cpu) = 0;
+		rdtp->dyntick_holdoff = jiffies - 1;
+		rdtp->dyntick_drain = 0;
 		trace_rcu_prep_idle("No callbacks");
 		return;
 	}
@@ -2126,32 +2172,37 @@
 	 * If in holdoff mode, just return.  We will presumably have
 	 * refrained from disabling the scheduling-clock tick.
 	 */
-	if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies) {
+	if (rdtp->dyntick_holdoff == jiffies) {
 		trace_rcu_prep_idle("In holdoff");
 		return;
 	}
 
-	/* Check and update the rcu_dyntick_drain sequencing. */
-	if (per_cpu(rcu_dyntick_drain, cpu) <= 0) {
+	/* Check and update the ->dyntick_drain sequencing. */
+	if (rdtp->dyntick_drain <= 0) {
 		/* First time through, initialize the counter. */
-		per_cpu(rcu_dyntick_drain, cpu) = RCU_IDLE_FLUSHES;
-	} else if (per_cpu(rcu_dyntick_drain, cpu) <= RCU_IDLE_OPT_FLUSHES &&
+		rdtp->dyntick_drain = RCU_IDLE_FLUSHES;
+	} else if (rdtp->dyntick_drain <= RCU_IDLE_OPT_FLUSHES &&
 		   !rcu_pending(cpu) &&
 		   !local_softirq_pending()) {
 		/* Can we go dyntick-idle despite still having callbacks? */
-		trace_rcu_prep_idle("Dyntick with callbacks");
-		per_cpu(rcu_dyntick_drain, cpu) = 0;
-		per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
-		if (rcu_cpu_has_nonlazy_callbacks(cpu))
-			hrtimer_start(&per_cpu(rcu_idle_gp_timer, cpu),
-				      rcu_idle_gp_wait, HRTIMER_MODE_REL);
-		else
-			hrtimer_start(&per_cpu(rcu_idle_gp_timer, cpu),
-				      rcu_idle_lazy_gp_wait, HRTIMER_MODE_REL);
+		rdtp->dyntick_drain = 0;
+		rdtp->dyntick_holdoff = jiffies;
+		if (rcu_cpu_has_nonlazy_callbacks(cpu)) {
+			trace_rcu_prep_idle("Dyntick with callbacks");
+			rdtp->idle_gp_timer_expires =
+					   jiffies + RCU_IDLE_GP_DELAY;
+		} else {
+			rdtp->idle_gp_timer_expires =
+					   jiffies + RCU_IDLE_LAZY_GP_DELAY;
+			trace_rcu_prep_idle("Dyntick with lazy callbacks");
+		}
+		tp = &rdtp->idle_gp_timer;
+		mod_timer_pinned(tp, rdtp->idle_gp_timer_expires);
+		rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
 		return; /* Nothing more to do immediately. */
-	} else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) {
+	} else if (--(rdtp->dyntick_drain) <= 0) {
 		/* We have hit the limit, so time to give up. */
-		per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
+		rdtp->dyntick_holdoff = jiffies;
 		trace_rcu_prep_idle("Begin holdoff");
 		invoke_rcu_core();  /* Force the CPU out of dyntick-idle. */
 		return;
@@ -2187,6 +2238,19 @@
 		trace_rcu_prep_idle("Callbacks drained");
 }
 
+/*
+ * Keep a running count of the number of non-lazy callbacks posted
+ * on this CPU.  This running counter (which is never decremented) allows
+ * rcu_prepare_for_idle() to detect when something out of the idle loop
+ * posts a callback, even if an equal number of callbacks are invoked.
+ * Of course, callbacks should only be posted from within a trace event
+ * designed to be called from idle or from within RCU_NONIDLE().
+ */
+static void rcu_idle_count_callbacks_posted(void)
+{
+	__this_cpu_add(rcu_dynticks.nonlazy_posted, 1);
+}
+
 #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
 
 #ifdef CONFIG_RCU_CPU_STALL_INFO
@@ -2195,14 +2259,13 @@
 
 static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
 {
-	struct hrtimer *hrtp = &per_cpu(rcu_idle_gp_timer, cpu);
+	struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
+	struct timer_list *tltp = &rdtp->idle_gp_timer;
 
-	sprintf(cp, "drain=%d %c timer=%lld",
-		per_cpu(rcu_dyntick_drain, cpu),
-		per_cpu(rcu_dyntick_holdoff, cpu) == jiffies ? 'H' : '.',
-		hrtimer_active(hrtp)
-			? ktime_to_us(hrtimer_get_remaining(hrtp))
-			: -1);
+	sprintf(cp, "drain=%d %c timer=%lu",
+		rdtp->dyntick_drain,
+		rdtp->dyntick_holdoff == jiffies ? 'H' : '.',
+		timer_pending(tltp) ? tltp->expires - jiffies : -1);
 }
 
 #else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
index ed459ed..d4bc16d 100644
--- a/kernel/rcutree_trace.c
+++ b/kernel/rcutree_trace.c
@@ -271,13 +271,13 @@
 
 	gpnum = rsp->gpnum;
 	seq_printf(m, "c=%lu g=%lu s=%d jfq=%ld j=%x "
-		      "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu\n",
+		      "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu oqlen=%ld/%ld\n",
 		   rsp->completed, gpnum, rsp->fqs_state,
 		   (long)(rsp->jiffies_force_qs - jiffies),
 		   (int)(jiffies & 0xffff),
 		   rsp->n_force_qs, rsp->n_force_qs_ngp,
 		   rsp->n_force_qs - rsp->n_force_qs_ngp,
-		   rsp->n_force_qs_lh);
+		   rsp->n_force_qs_lh, rsp->qlen_lazy, rsp->qlen);
 	for (rnp = &rsp->node[0]; rnp - &rsp->node[0] < NUM_RCU_NODES; rnp++) {
 		if (rnp->level != level) {
 			seq_puts(m, "\n");
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index faeb4b7..1e5bfd8 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1609,8 +1609,7 @@
 
 	smp_wmb();
 	raw_spin_lock_irqsave(&p->pi_lock, flags);
-	src_cpu = task_cpu(p);
-	cpu = src_cpu;
+	src_cpu = cpu = task_cpu(p);
 
 	if (!(p->state & state))
 		goto out;
@@ -1652,6 +1651,9 @@
 		p->sched_class->task_waking(p);
 
 	cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
+
+	/* Refresh src_cpu as it could have changed since we last read it */
+	src_cpu = task_cpu(p);
 	if (src_cpu != cpu) {
 		wake_flags |= WF_MIGRATED;
 		set_task_cpu(p, cpu);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index c6cd85b..0f30374 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -280,6 +280,7 @@
 static void tick_nohz_stop_sched_tick(struct tick_sched *ts)
 {
 	unsigned long seq, last_jiffies, next_jiffies, delta_jiffies;
+	unsigned long rcu_delta_jiffies;
 	ktime_t last_update, expires, now;
 	struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
 	u64 time_delta;
@@ -328,7 +329,7 @@
 		time_delta = timekeeping_max_deferment();
 	} while (read_seqretry(&xtime_lock, seq));
 
-	if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu) ||
+	if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || printk_needs_cpu(cpu) ||
 	    arch_needs_cpu(cpu)) {
 		next_jiffies = last_jiffies + 1;
 		delta_jiffies = 1;
@@ -336,6 +337,10 @@
 		/* Get the next timer wheel timer */
 		next_jiffies = get_next_timer_interrupt(last_jiffies);
 		delta_jiffies = next_jiffies - last_jiffies;
+		if (rcu_delta_jiffies < delta_jiffies) {
+			next_jiffies = last_jiffies + rcu_delta_jiffies;
+			delta_jiffies = rcu_delta_jiffies;
+		}
 	}
 	/*
 	 * Do not stop the tick, if we are only one off
diff --git a/kernel/timer.c b/kernel/timer.c
index cf7217a..a851025 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -862,7 +862,13 @@
  *
  * mod_timer_pinned() is a way to update the expire field of an
  * active timer (if the timer is inactive it will be activated)
- * and not allow the timer to be migrated to a different CPU.
+ * and to ensure that the timer is scheduled on the current CPU.
+ *
+ * Note that this does not prevent the timer from being migrated
+ * when the current CPU goes offline.  If this is a problem for
+ * you, use CPU-hotplug notifiers to handle it correctly, for
+ * example, cancelling the timer when the corresponding CPU goes
+ * offline.
  *
  * mod_timer_pinned(timer, expires) is equivalent to:
  *
diff --git a/net/wireless/db.txt b/net/wireless/db.txt
index 6a77ffc..91591c7 100644
--- a/net/wireless/db.txt
+++ b/net/wireless/db.txt
@@ -7,6 +7,8 @@
 	(2474 - 2494 @ 20), (3, 20), PASSIVE-SCAN, NO-IBSS, NO-OFDM
 	# Channel 36 - 48
 	(5170 - 5250 @ 80), (3, 20), PASSIVE-SCAN, NO-IBSS
+	(5250 - 5330 @ 80), (3, 20), PASSIVE-SCAN, NO-IBSS
+	(5490 - 5710 @ 80), (3, 20), PASSIVE-SCAN, NO-IBSS
 	# NB: 5260 MHz - 5700 MHz requies DFS
 	# Channel 149 - 165
 	(5735 - 5835 @ 80), (3, 20), PASSIVE-SCAN, NO-IBSS
diff --git a/sound/soc/codecs/wcd9xxx-common.c b/sound/soc/codecs/wcd9xxx-common.c
index 675e378..c4c219c 100644
--- a/sound/soc/codecs/wcd9xxx-common.c
+++ b/sound/soc/codecs/wcd9xxx-common.c
@@ -657,14 +657,38 @@
 }
 EXPORT_SYMBOL(wcd9xxx_restore_registers);
 
+static void wcd9xxx_dynamic_bypass_buck_ctrl_lo(struct snd_soc_codec *cdc,
+						bool enable)
+{
+	int i;
+	const struct wcd9xxx_reg_mask_val reg_set[] = {
+		{WCD9XXX_A_BUCK_MODE_3, (0x1 << 3), (enable << 3)},
+		{WCD9XXX_A_BUCK_MODE_5, enable ? 0xFF : 0x02, 0x02},
+		{WCD9XXX_A_BUCK_MODE_5, 0x1, 0x01}
+	};
+
+	if (!enable) {
+		snd_soc_update_bits(cdc, WCD9XXX_A_BUCK_MODE_1,
+					(0x1 << 3), 0x00);
+		snd_soc_update_bits(cdc, WCD9XXX_A_BUCK_MODE_4,
+					0xFF, BUCK_VREF_2V);
+	}
+	for (i = 0; i < ARRAY_SIZE(reg_set); i++)
+		snd_soc_update_bits(cdc, reg_set[i].reg, reg_set[i].mask,
+							reg_set[i].val);
+
+	/* 50us sleep is reqd. as per the class H HW design sequence */
+	usleep_range(BUCK_SETTLE_TIME_US, BUCK_SETTLE_TIME_US+10);
+}
+
 static void wcd9xxx_dynamic_bypass_buck_ctrl(struct snd_soc_codec *cdc,
 						bool enable)
 {
 	int i;
 	const struct wcd9xxx_reg_mask_val reg_set[] = {
 		{WCD9XXX_A_BUCK_MODE_3, (0x1 << 3), (enable << 3)},
-		{WCD9XXX_A_BUCK_MODE_5, (0x1 << 1), (enable << 1)},
-		{WCD9XXX_A_BUCK_MODE_5, 0x1, enable}
+		{WCD9XXX_A_BUCK_MODE_5, (0x1 << 1), ((!enable) << 1)},
+		{WCD9XXX_A_BUCK_MODE_5, 0x1, !enable}
 	};
 	if (!enable) {
 		snd_soc_update_bits(cdc, WCD9XXX_A_BUCK_MODE_1,
@@ -929,7 +953,7 @@
 	if (is_enable) {
 		if ((clsh_d->state == WCD9XXX_CLSH_STATE_LO) ||
 			(req_state == WCD9XXX_CLSH_STATE_LO)) {
-			wcd9xxx_dynamic_bypass_buck_ctrl(codec, false);
+			wcd9xxx_dynamic_bypass_buck_ctrl_lo(codec, false);
 			wcd9xxx_enable_buck(codec, clsh_d, true);
 			wcd9xxx_set_fclk_get_ncp(codec, clsh_d,
 						NCP_FCLK_LEVEL_8);
@@ -951,7 +975,7 @@
 		case WCD9XXX_CLSH_STATE_LO:
 			snd_soc_update_bits(codec, WCD9XXX_A_NCP_STATIC,
 						0x20, 0x00);
-			wcd9xxx_dynamic_bypass_buck_ctrl(codec, true);
+			wcd9xxx_dynamic_bypass_buck_ctrl_lo(codec, true);
 			break;
 		case WCD9XXX_CLSH_STATE_HPHL:
 			wcd9xxx_clsh_comp_req(codec, clsh_d,
diff --git a/sound/soc/msm/qdsp6v2/q6afe.c b/sound/soc/msm/qdsp6v2/q6afe.c
index 774a33c..fc82215 100644
--- a/sound/soc/msm/qdsp6v2/q6afe.c
+++ b/sound/soc/msm/qdsp6v2/q6afe.c
@@ -2580,7 +2580,7 @@
 				goto afe_error;
 			}
 
-			if (param[1] < 0 || param[1] > 100) {
+			if (param[1] > 100) {
 				pr_err("%s: Error, volume shoud be 0 to 100 percentage param = %lu\n",
 					__func__, param[1]);
 				rc = -EINVAL;
diff --git a/sound/soc/msm/qdsp6v2/q6asm.c b/sound/soc/msm/qdsp6v2/q6asm.c
index 4b9d079..9a3cb87 100644
--- a/sound/soc/msm/qdsp6v2/q6asm.c
+++ b/sound/soc/msm/qdsp6v2/q6asm.c
@@ -445,7 +445,7 @@
 	q6asm_add_hdr_custom_topology(ac, &asm_top.hdr,
 				      APR_PKT_SIZE(APR_HDR_SIZE,
 					sizeof(asm_top)), TRUE);
-	atomic_set(&ac->cmd_state, 1);
+	atomic_set(&ac->mem_state, 1);
 	asm_top.hdr.opcode = ASM_CMD_ADD_TOPOLOGIES;
 	asm_top.payload_addr_lsw = cal_block.cal_paddr;
 	asm_top.payload_addr_msw = 0;
@@ -463,8 +463,8 @@
 		goto err_unmap;
 	}
 
-	result = wait_event_timeout(ac->cmd_wait,
-			(atomic_read(&ac->cmd_state) == 0), 5*HZ);
+	result = wait_event_timeout(ac->mem_wait,
+			(atomic_read(&ac->mem_state) == 0), 5*HZ);
 	if (!result) {
 		pr_err("%s: Set topologies failed after timedout payload = 0x%x\n",
 			__func__, cal_block.cal_paddr);
@@ -884,6 +884,7 @@
 
 	init_waitqueue_head(&ac->cmd_wait);
 	init_waitqueue_head(&ac->time_wait);
+	init_waitqueue_head(&ac->mem_wait);
 	atomic_set(&ac->time_flag, 1);
 	INIT_LIST_HEAD(&ac->port[0].mem_map_handle);
 	INIT_LIST_HEAD(&ac->port[1].mem_map_handle);
@@ -895,6 +896,7 @@
 	}
 	atomic_set(&ac->cmd_state, 0);
 	atomic_set(&ac->nowait_cmd_cnt, 0);
+	atomic_set(&ac->mem_state, 0);
 
 	send_asm_custom_topology(ac);
 
@@ -1185,10 +1187,8 @@
 					atomic_set(&ac->unmap_cb_success, 1);
 			}
 
-			if (atomic_read(&ac->cmd_state)) {
-				atomic_set(&ac->cmd_state, 0);
-				wake_up(&ac->cmd_wait);
-			}
+			if (atomic_cmpxchg(&ac->mem_state, 1, 0))
+				wake_up(&ac->mem_wait);
 			pr_debug("%s:Payload = [0x%x] status[0x%x]\n",
 					__func__, payload[0], payload[1]);
 			break;
@@ -1208,10 +1208,9 @@
 		pr_debug("%s:PL#0[0x%x]PL#1 [0x%x] dir=%x s_id=%x\n",
 				__func__, payload[0], payload[1], dir, sid);
 		spin_lock_irqsave(&port->dsp_lock, dsp_flags);
-		if (atomic_read(&ac->cmd_state)) {
+		if (atomic_cmpxchg(&ac->mem_state, 1, 0)) {
 			ac->port[dir].tmp_hdl = payload[0];
-			atomic_set(&ac->cmd_state, 0);
-			wake_up(&ac->cmd_wait);
+			wake_up(&ac->mem_wait);
 		}
 		spin_unlock_irqrestore(&port->dsp_lock, dsp_flags);
 		break;
@@ -1220,10 +1219,8 @@
 		pr_debug("%s:PL#0[0x%x]PL#1 [0x%x]\n",
 					__func__, payload[0], payload[1]);
 		spin_lock_irqsave(&port->dsp_lock, dsp_flags);
-		if (atomic_read(&ac->cmd_state)) {
-			atomic_set(&ac->cmd_state, 0);
-			wake_up(&ac->cmd_wait);
-		}
+		if (atomic_cmpxchg(&ac->mem_state, 1, 0))
+			wake_up(&ac->mem_wait);
 		spin_unlock_irqrestore(&port->dsp_lock, dsp_flags);
 
 		break;
@@ -1343,7 +1340,6 @@
 		case ASM_STREAM_CMD_OPEN_LOOPBACK_V2:
 		case ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2:
 		case ASM_STREAM_CMD_SET_ENCDEC_PARAM:
-		case ASM_CMD_ADD_TOPOLOGIES:
 		case ASM_DATA_CMD_REMOVE_INITIAL_SILENCE:
 		case ASM_DATA_CMD_REMOVE_TRAILING_SILENCE:
 		case ASM_SESSION_CMD_REGISTER_FOR_RX_UNDERFLOW_EVENTS:
@@ -1357,6 +1353,17 @@
 				ac->cb(data->opcode, data->token,
 					(uint32_t *)data->payload, ac->priv);
 			break;
+		case ASM_CMD_ADD_TOPOLOGIES:
+			pr_debug("%s:Payload = [0x%x]stat[0x%x]\n",
+				__func__, payload[0], payload[1]);
+			if (atomic_read(&ac->mem_state) && wakeup_flag) {
+				atomic_set(&ac->mem_state, 0);
+				wake_up(&ac->mem_wait);
+			}
+			if (ac->cb)
+				ac->cb(data->opcode, data->token,
+					(uint32_t *)data->payload, ac->priv);
+			break;
 		case ASM_STREAM_CMD_GET_PP_PARAMS_V2:
 			pr_debug("%s: ASM_STREAM_CMD_GET_PP_PARAMS_V2\n",
 				__func__);
@@ -3130,7 +3137,7 @@
 							mmap_region_cmd;
 	q6asm_add_mmaphdr(ac, &mmap_regions->hdr, cmd_size,
 			TRUE, ((ac->session << 8) | dir));
-	atomic_set(&ac->cmd_state, 1);
+	atomic_set(&ac->mem_state, 1);
 	mmap_regions->hdr.opcode = ASM_CMD_SHARED_MEM_MAP_REGIONS;
 	mmap_regions->mem_pool_id = ADSP_MEMORY_MAP_SHMEM8_4K_POOL;
 	mmap_regions->num_regions = bufcnt & 0x00ff;
@@ -3157,8 +3164,8 @@
 		goto fail_cmd;
 	}
 
-	rc = wait_event_timeout(ac->cmd_wait,
-			(atomic_read(&ac->cmd_state) == 0 &&
+	rc = wait_event_timeout(ac->mem_wait,
+			(atomic_read(&ac->mem_state) == 0 &&
 			 ac->port[dir].tmp_hdl), 5*HZ);
 	if (!rc) {
 		pr_err("timeout. waited for memory_map\n");
@@ -3194,7 +3201,7 @@
 	q6asm_add_mmaphdr(ac, &mem_unmap.hdr,
 			sizeof(struct avs_cmd_shared_mem_unmap_regions),
 			TRUE, ((ac->session << 8) | dir));
-	atomic_set(&ac->cmd_state, 1);
+	atomic_set(&ac->mem_state, 1);
 	mem_unmap.hdr.opcode = ASM_CMD_SHARED_MEM_UNMAP_REGIONS;
 	mem_unmap.mem_map_handle = 0;
 	list_for_each_safe(ptr, next, &ac->port[dir].mem_map_handle) {
@@ -3222,8 +3229,8 @@
 		goto fail_cmd;
 	}
 
-	rc = wait_event_timeout(ac->cmd_wait,
-			(atomic_read(&ac->cmd_state) == 0), 5 * HZ);
+	rc = wait_event_timeout(ac->mem_wait,
+			(atomic_read(&ac->mem_state) == 0), 5 * HZ);
 	if (!rc) {
 		pr_err("%s timeout. waited for memory_unmap of handle 0x%x\n",
 			__func__, mem_unmap.mem_map_handle);
@@ -3305,7 +3312,7 @@
 							mmap_region_cmd;
 	q6asm_add_mmaphdr(ac, &mmap_regions->hdr, cmd_size, TRUE,
 					((ac->session << 8) | dir));
-	atomic_set(&ac->cmd_state, 1);
+	atomic_set(&ac->mem_state, 1);
 	pr_debug("mmap_region=0x%p token=0x%x\n",
 		mmap_regions, ((ac->session << 8) | dir));
 
@@ -3338,8 +3345,8 @@
 		goto fail_cmd;
 	}
 
-	rc = wait_event_timeout(ac->cmd_wait,
-			(atomic_read(&ac->cmd_state) == 0)
+	rc = wait_event_timeout(ac->mem_wait,
+			(atomic_read(&ac->mem_state) == 0)
 			 , 5*HZ);
 	if (!rc) {
 		pr_err("timeout. waited for memory_map\n");
@@ -3387,7 +3394,7 @@
 	cmd_size = sizeof(struct avs_cmd_shared_mem_unmap_regions);
 	q6asm_add_mmaphdr(ac, &mem_unmap.hdr, cmd_size,
 			TRUE, ((ac->session << 8) | dir));
-	atomic_set(&ac->cmd_state, 1);
+	atomic_set(&ac->mem_state, 1);
 	port = &ac->port[dir];
 	buf_add = (uint32_t)port->buf->phys;
 	mem_unmap.hdr.opcode = ASM_CMD_SHARED_MEM_UNMAP_REGIONS;
@@ -3417,8 +3424,8 @@
 		goto fail_cmd;
 	}
 
-	rc = wait_event_timeout(ac->cmd_wait,
-			(atomic_read(&ac->cmd_state) == 0), 5*HZ);
+	rc = wait_event_timeout(ac->mem_wait,
+			(atomic_read(&ac->mem_state) == 0), 5*HZ);
 	if (!rc) {
 		pr_err("%s timeout. waited for memory_unmap of handle 0x%x\n",
 			__func__, mem_unmap.mem_map_handle);
@@ -4650,6 +4657,7 @@
 	common_client.port[1].buf = &common_buf[1];
 	init_waitqueue_head(&common_client.cmd_wait);
 	init_waitqueue_head(&common_client.time_wait);
+	init_waitqueue_head(&common_client.mem_wait);
 	atomic_set(&common_client.time_flag, 1);
 	INIT_LIST_HEAD(&common_client.port[0].mem_map_handle);
 	INIT_LIST_HEAD(&common_client.port[1].mem_map_handle);
@@ -4660,6 +4668,7 @@
 	}
 	atomic_set(&common_client.cmd_state, 0);
 	atomic_set(&common_client.nowait_cmd_cnt, 0);
+	atomic_set(&common_client.mem_state, 0);
 
 	config_debug_fs_init();